#!/usr/bin/env python3 """ NeuroSploitv2 - AI-Powered Penetration Testing Framework Author: Security Research Team License: MIT Version: 2.0.0 """ import os import sys import argparse import json import re from pathlib import Path from typing import Dict, List, Optional import logging from datetime import datetime import readline import mistune # Setup logging logging.basicConfig( level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', handlers=[ logging.FileHandler('logs/neurosploit.log'), logging.StreamHandler(sys.stdout) ] ) logger = logging.getLogger(__name__) from core.llm_manager import LLMManager from core.tool_installer import ToolInstaller, run_installer_menu, PENTEST_TOOLS from core.pentest_executor import PentestExecutor from core.report_generator import ReportGenerator from core.context_builder import ReconContextBuilder from agents.base_agent import BaseAgent from tools.recon.recon_tools import FullReconRunner class Completer: def __init__(self, neurosploit): self.neurosploit = neurosploit self.commands = [ "help", "run_agent", "config", "list_roles", "list_profiles", "set_profile", "set_agent", "discover_ollama", "install_tools", "scan", "quick_scan", "recon", "full_recon", "check_tools", "experience", "wizard", "analyze", "exit", "quit" ] self.agent_roles = list(self.neurosploit.config.get('agent_roles', {}).keys()) self.llm_profiles = list(self.neurosploit.config.get('llm', {}).get('profiles', {}).keys()) def complete(self, text, state): line = readline.get_line_buffer() parts = line.split() options = [] if state == 0: if not parts or (len(parts) == 1 and not line.endswith(' ')): options = [c + ' ' for c in self.commands if c.startswith(text)] elif len(parts) > 0: if parts[0] == 'run_agent': if len(parts) == 1 and line.endswith(' '): options = [a + ' ' for a in self.agent_roles] elif len(parts) == 2 and not line.endswith(' '): options = [a + ' ' for a in self.agent_roles if a.startswith(parts[1])] elif parts[0] == 'set_agent': if len(parts) == 1 and line.endswith(' '): options = [a + ' ' for a in self.agent_roles] elif len(parts) == 2 and not line.endswith(' '): options = [a + ' ' for a in self.agent_roles if a.startswith(parts[1])] elif parts[0] == 'set_profile': if len(parts) == 1 and line.endswith(' '): options = [p + ' ' for p in self.llm_profiles] elif len(parts) == 2 and not line.endswith(' '): options = [p + ' ' for p in self.llm_profiles if p.startswith(parts[1])] if state < len(options): return options[state] else: return None class NeuroSploitv2: """Main framework class for NeuroSploitv2""" def __init__(self, config_path: str = "config/config.json"): """Initialize the framework""" self.config_path = config_path self.config = self._load_config() self.session_id = datetime.now().strftime("%Y%m%d_%H%M%S") self._setup_directories() # LLMManager instance will be created dynamically per agent role to select specific profiles self.llm_manager_instance: Optional[LLMManager] = None self.selected_agent_role: Optional[str] = None # Initialize tool installer self.tool_installer = ToolInstaller() logger.info(f"NeuroSploitv2 initialized - Session: {self.session_id}") def experience_mode(self): """ Experience/Wizard Mode - Guided step-by-step configuration. Navigate through options to build your pentest configuration. """ print(""" ╔═══════════════════════════════════════════════════════════╗ ║ NEUROSPLOIT - EXPERIENCE MODE (WIZARD) ║ ║ Step-by-step Configuration ║ ╚═══════════════════════════════════════════════════════════╝ """) config = { "target": None, "context_file": None, "llm_profile": None, "agent_role": None, "prompt": None, "mode": None } # Step 1: Choose Mode print("\n[STEP 1/6] Choose Operation Mode") print("-" * 50) print(" 1. AI Analysis - Analyze recon context with LLM (no tools)") print(" 2. Full Scan - Run real pentest tools + AI analysis") print(" 3. Quick Scan - Fast essential checks + AI analysis") print(" 4. Recon Only - Run reconnaissance tools, save context") print(" 0. Cancel") while True: choice = input("\n Select mode [1-4]: ").strip() if choice == "0": print("\n[!] Cancelled.") return if choice in ["1", "2", "3", "4"]: config["mode"] = {"1": "analysis", "2": "full_scan", "3": "quick_scan", "4": "recon"}[choice] break print(" Invalid choice. Enter 1-4 or 0 to cancel.") # Step 2: Target print(f"\n[STEP 2/6] Set Target") print("-" * 50) target = input(" Enter target URL or domain: ").strip() if not target: print("\n[!] Target is required. Cancelled.") return config["target"] = target # Step 3: Context File (for analysis mode) if config["mode"] == "analysis": print(f"\n[STEP 3/6] Context File") print("-" * 50) print(" Context file contains recon data collected previously.") # List available context files context_files = list(Path("results").glob("context_*.json")) if context_files: print("\n Available context files:") for i, f in enumerate(context_files[-10:], 1): print(f" {i}. {f.name}") print(f" {len(context_files[-10:])+1}. Enter custom path") choice = input(f"\n Select file [1-{len(context_files[-10:])+1}]: ").strip() try: idx = int(choice) - 1 if 0 <= idx < len(context_files[-10:]): config["context_file"] = str(context_files[-10:][idx]) else: custom = input(" Enter context file path: ").strip() if custom: config["context_file"] = custom except ValueError: custom = input(" Enter context file path: ").strip() if custom: config["context_file"] = custom else: custom = input(" Enter context file path (or press Enter to skip): ").strip() if custom: config["context_file"] = custom if not config["context_file"]: print("\n[!] Analysis mode requires a context file. Cancelled.") return else: print(f"\n[STEP 3/6] Context File (Optional)") print("-" * 50) use_context = input(" Load existing context file? [y/N]: ").strip().lower() if use_context == 'y': context_files = list(Path("results").glob("context_*.json")) if context_files: print("\n Available context files:") for i, f in enumerate(context_files[-10:], 1): print(f" {i}. {f.name}") choice = input(f"\n Select file [1-{len(context_files[-10:])}] or path: ").strip() try: idx = int(choice) - 1 if 0 <= idx < len(context_files[-10:]): config["context_file"] = str(context_files[-10:][idx]) except ValueError: if choice: config["context_file"] = choice # Step 4: LLM Profile print(f"\n[STEP 4/6] LLM Profile") print("-" * 50) profiles = list(self.config.get('llm', {}).get('profiles', {}).keys()) default_profile = self.config.get('llm', {}).get('default_profile', '') if profiles: print(" Available LLM profiles:") for i, p in enumerate(profiles, 1): marker = " (default)" if p == default_profile else "" print(f" {i}. {p}{marker}") choice = input(f"\n Select profile [1-{len(profiles)}] or Enter for default: ").strip() if choice: try: idx = int(choice) - 1 if 0 <= idx < len(profiles): config["llm_profile"] = profiles[idx] except ValueError: pass if not config["llm_profile"]: config["llm_profile"] = default_profile else: print(" No LLM profiles configured. Using default.") config["llm_profile"] = default_profile # Step 5: Agent Role (optional) print(f"\n[STEP 5/6] Agent Role (Optional)") print("-" * 50) roles = list(self.config.get('agent_roles', {}).keys()) if roles: print(" Available agent roles:") for i, r in enumerate(roles, 1): desc = self.config['agent_roles'][r].get('description', '')[:50] print(f" {i}. {r} - {desc}") print(f" {len(roles)+1}. None (use default)") choice = input(f"\n Select role [1-{len(roles)+1}]: ").strip() try: idx = int(choice) - 1 if 0 <= idx < len(roles): config["agent_role"] = roles[idx] except ValueError: pass # Step 6: Custom Prompt if config["mode"] in ["analysis", "full_scan", "quick_scan"]: print(f"\n[STEP 6/6] Custom Prompt") print("-" * 50) print(" Enter your instructions for the AI agent.") print(" (What should it analyze, test, or look for?)") print(" Press Enter twice to finish.\n") lines = [] while True: line = input(" > ") if line == "" and lines and lines[-1] == "": break lines.append(line) config["prompt"] = "\n".join(lines).strip() if not config["prompt"]: config["prompt"] = f"Perform comprehensive security assessment on {config['target']}" else: print(f"\n[STEP 6/6] Skipped (Recon mode)") config["prompt"] = None # Summary and Confirmation print(f"\n{'='*60}") print(" CONFIGURATION SUMMARY") print(f"{'='*60}") print(f" Mode: {config['mode']}") print(f" Target: {config['target']}") print(f" Context File: {config['context_file'] or 'None'}") print(f" LLM Profile: {config['llm_profile']}") print(f" Agent Role: {config['agent_role'] or 'default'}") if config["prompt"]: print(f" Prompt: {config['prompt'][:60]}...") print(f"{'='*60}") confirm = input("\n Execute with this configuration? [Y/n]: ").strip().lower() if confirm == 'n': print("\n[!] Cancelled.") return # Execute based on mode print(f"\n[*] Starting execution...") context = None if config["context_file"]: from core.context_builder import load_context_from_file context = load_context_from_file(config["context_file"]) if context: print(f"[+] Loaded context from: {config['context_file']}") if config["mode"] == "recon": self.run_full_recon(config["target"], with_ai_analysis=bool(config["agent_role"])) elif config["mode"] == "analysis": agent_role = config["agent_role"] or "bug_bounty_hunter" self.execute_agent_role( agent_role, config["prompt"], llm_profile_override=config["llm_profile"], recon_context=context ) elif config["mode"] == "full_scan": self.execute_real_scan( config["target"], scan_type="full", agent_role=config["agent_role"], recon_context=context ) elif config["mode"] == "quick_scan": self.execute_real_scan( config["target"], scan_type="quick", agent_role=config["agent_role"], recon_context=context ) print(f"\n[+] Execution complete!") def _setup_directories(self): """Create necessary directories""" dirs = ['logs', 'reports', 'data', 'custom_agents', 'results'] for d in dirs: Path(d).mkdir(exist_ok=True) def _load_config(self) -> Dict: """Load configuration from file""" if not os.path.exists(self.config_path): if os.path.exists("config/config-example.json"): import shutil shutil.copy("config/config-example.json", self.config_path) logger.info(f"Created default configuration at {self.config_path}") else: logger.error("config-example.json not found. Cannot create default configuration.") return {} with open(self.config_path, 'r') as f: return json.load(f) def _initialize_llm_manager(self, agent_llm_profile: Optional[str] = None): """Initializes LLMManager with a specific profile or default.""" llm_config = self.config.get('llm', {}) if agent_llm_profile: # Temporarily modify config to set the default profile for LLMManager init original_default = llm_config.get('default_profile') llm_config['default_profile'] = agent_llm_profile self.llm_manager_instance = LLMManager({"llm": llm_config}) llm_config['default_profile'] = original_default # Restore original default else: self.llm_manager_instance = LLMManager({"llm": llm_config}) def execute_agent_role(self, agent_role_name: str, user_input: str, additional_context: Optional[Dict] = None, llm_profile_override: Optional[str] = None, recon_context: Optional[Dict] = None): """ Execute a specific agent role with a given input. Args: agent_role_name: Name of the agent role to use user_input: The prompt/task for the agent additional_context: Additional campaign data llm_profile_override: Override the default LLM profile recon_context: Pre-collected recon context (skips discovery if provided) """ logger.info(f"Starting execution for agent role: {agent_role_name}") agent_roles_config = self.config.get('agent_roles', {}) role_config = agent_roles_config.get(agent_role_name) # If role not in config, create a default config (allows dynamic roles from .md files) if not role_config: logger.info(f"Agent role '{agent_role_name}' not in config.json, using dynamic mode with prompt file.") role_config = { "enabled": True, "tools_allowed": [], "description": f"Dynamic agent role loaded from {agent_role_name}.md" } if not role_config.get('enabled', True): logger.warning(f"Agent role '{agent_role_name}' is disabled in configuration.") return {"warning": f"Agent role '{agent_role_name}' is disabled."} llm_profile_name = llm_profile_override or role_config.get('llm_profile', self.config['llm']['default_profile']) self._initialize_llm_manager(llm_profile_name) if not self.llm_manager_instance: logger.error("LLM Manager could not be initialized.") return {"error": "LLM Manager initialization failed."} # Get the prompts for the selected agent role # Assuming agent_role_name directly maps to the .md filename agent_prompts = self.llm_manager_instance.prompts.get("md_prompts", {}).get(agent_role_name) if not agent_prompts: logger.error(f"Prompts for agent role '{agent_role_name}' not found in MD library.") return {"error": f"Prompts for agent role '{agent_role_name}' not found."} # Instantiate and execute the BaseAgent agent_instance = BaseAgent(agent_role_name, self.config, self.llm_manager_instance, agent_prompts) # Execute with recon_context if provided (uses context-based flow) results = agent_instance.execute(user_input, additional_context, recon_context=recon_context) # Save results campaign_results = { "session_id": self.session_id, "agent_role": agent_role_name, "input": user_input, "timestamp": datetime.now().isoformat(), "results": results } self._save_results(campaign_results) return campaign_results def _save_results(self, results: Dict): """Save campaign results""" output_file = f"results/campaign_{self.session_id}.json" with open(output_file, 'w') as f: json.dump(results, f, indent=4) logger.info(f"Results saved to {output_file}") # Generate report self._generate_report(results) def _generate_report(self, results: Dict): """Generate professional HTML report with charts and modern CSS""" report_file = f"reports/report_{self.session_id}.html" # Get data llm_response = results.get('results', {}).get('llm_response', '') if isinstance(llm_response, dict): llm_response = json.dumps(llm_response, indent=2) report_content = mistune.html(llm_response) # Extract metrics from report targets = results.get('results', {}).get('targets', [results.get('input', 'N/A')]) if isinstance(targets, str): targets = [targets] tools_executed = results.get('results', {}).get('tools_executed', 0) # Count severities from report text critical = len(re.findall(r'\[?Critical\]?', llm_response, re.IGNORECASE)) high = len(re.findall(r'\[?High\]?', llm_response, re.IGNORECASE)) medium = len(re.findall(r'\[?Medium\]?', llm_response, re.IGNORECASE)) low = len(re.findall(r'\[?Low\]?', llm_response, re.IGNORECASE)) info = len(re.findall(r'\[?Info\]?', llm_response, re.IGNORECASE)) total_vulns = critical + high + medium + low # Risk score calculation risk_score = min(100, (critical * 25) + (high * 15) + (medium * 8) + (low * 3)) risk_level = "Critical" if risk_score >= 70 else "High" if risk_score >= 50 else "Medium" if risk_score >= 25 else "Low" risk_color = "#e74c3c" if risk_score >= 70 else "#e67e22" if risk_score >= 50 else "#f1c40f" if risk_score >= 25 else "#27ae60" html = f""" Security Assessment Report - {self.session_id}

AI-Powered Security Assessment Report

Report ID: {self.session_id}
Date: {datetime.now().strftime('%Y-%m-%d %H:%M')}
Agent: {results.get('agent_role', 'Security Analyst')}
{''.join(f'{t}' for t in targets[:5])}
{critical}
Critical
{high}
High
{medium}
Medium
{low}
Low
{tools_executed}
Tests Run

Risk Score

{risk_score}
{risk_level}

Severity Distribution

Vulnerability Report
{report_content}
""" with open(report_file, 'w') as f: f.write(html) logger.info(f"Report generated: {report_file}") def execute_real_scan(self, target: str, scan_type: str = "full", agent_role: str = None, recon_context: Dict = None) -> Dict: """ Execute a real penetration test with actual tools and generate professional report. Args: target: The target URL or IP to scan scan_type: "full" for comprehensive scan, "quick" for essential checks agent_role: Optional agent role for AI analysis of results """ print(f"\n{'='*70}") print(" NeuroSploitv2 - Real Penetration Test Execution") print(f"{'='*70}") print(f"\n[*] Target: {target}") print(f"[*] Scan Type: {scan_type}") print(f"[*] Session ID: {self.session_id}\n") # Check for required tools print("[*] Checking required tools...") missing_tools = [] essential_tools = ["nmap", "curl"] for tool in essential_tools: installed, path = self.tool_installer.check_tool_installed(tool) if not installed: missing_tools.append(tool) print(f" [-] {tool}: NOT INSTALLED") else: print(f" [+] {tool}: {path}") if missing_tools: print(f"\n[!] Missing required tools: {', '.join(missing_tools)}") print("[!] Run 'install_tools' to install required tools.") return {"error": f"Missing tools: {missing_tools}"} # Execute the scan executor = PentestExecutor(target, self.config, recon_context=recon_context) if recon_context: print(f"[+] Using recon context with {recon_context.get('attack_surface', {}).get('total_subdomains', 0)} subdomains, {recon_context.get('attack_surface', {}).get('live_hosts', 0)} live hosts") if scan_type == "quick": scan_result = executor.run_quick_scan() else: scan_result = executor.run_full_scan() # Get results as dictionary results_dict = executor.to_dict() # Get AI analysis if agent role specified llm_analysis = "" if agent_role: print(f"\n[*] Running AI analysis with {agent_role}...") llm_profile = self.config.get('agent_roles', {}).get(agent_role, {}).get('llm_profile') self._initialize_llm_manager(llm_profile) if self.llm_manager_instance: agent_prompts = self.llm_manager_instance.prompts.get("md_prompts", {}).get(agent_role, {}) if agent_prompts: agent = BaseAgent(agent_role, self.config, self.llm_manager_instance, agent_prompts) analysis_input = f""" Analyze the following penetration test results and provide a detailed security assessment: Target: {target} Scan Type: {scan_type} SCAN RESULTS: {json.dumps(results_dict, indent=2)} Provide: 1. Executive summary of findings 2. Risk assessment 3. Detailed analysis of each vulnerability 4. Prioritized remediation recommendations 5. Additional attack vectors to explore """ analysis_result = agent.execute(analysis_input, results_dict) llm_analysis = analysis_result.get("llm_response", "") # Generate professional report print("\n[*] Generating professional report...") report_gen = ReportGenerator(results_dict, llm_analysis) html_report = report_gen.save_report("reports") json_report = report_gen.save_json_report("results") print(f"\n{'='*70}") print("[+] Scan Complete!") print(f" - Vulnerabilities Found: {len(results_dict.get('vulnerabilities', []))}") print(f" - HTML Report: {html_report}") print(f" - JSON Results: {json_report}") print(f"{'='*70}\n") return { "session_id": self.session_id, "target": target, "scan_type": scan_type, "results": results_dict, "html_report": html_report, "json_report": json_report } def run_full_recon(self, target: str, with_ai_analysis: bool = True) -> Dict: """ Run full advanced recon and consolidate all outputs. This command runs all recon tools: - Subdomain enumeration (subfinder, amass, assetfinder) - HTTP probing (httpx, httprobe) - URL collection (gau, waybackurls, waymore) - Web crawling (katana, gospider) - Port scanning (naabu, nmap) - DNS enumeration - Vulnerability scanning (nuclei) All results are consolidated into a single context file that will be used by the LLM to enhance testing. """ print(f"\n{'='*70}") print(" NEUROSPLOIT - FULL ADVANCED RECON") print(f"{'='*70}") print(f"\n[*] Target: {target}") print(f"[*] Session ID: {self.session_id}") print(f"[*] With AI analysis: {with_ai_analysis}\n") # Execute full recon recon_runner = FullReconRunner(self.config) # Determine target type target_type = "url" if target.startswith(('http://', 'https://')) else "domain" recon_results = recon_runner.run(target, target_type) # If requested, run AI analysis llm_analysis = "" if with_ai_analysis and self.selected_agent_role: print(f"\n[*] Running AI analysis with {self.selected_agent_role}...") llm_profile = self.config.get('agent_roles', {}).get(self.selected_agent_role, {}).get('llm_profile') self._initialize_llm_manager(llm_profile) if self.llm_manager_instance: agent_prompts = self.llm_manager_instance.prompts.get("md_prompts", {}).get(self.selected_agent_role, {}) if agent_prompts: agent = BaseAgent(self.selected_agent_role, self.config, self.llm_manager_instance, agent_prompts) analysis_prompt = f""" Analise o seguinte contexto de reconhecimento e identifique: 1. Vetores de ataque mais promissores 2. Vulnerabilidades potenciais baseadas nas tecnologias detectadas 3. Endpoints prioritarios para teste 4. Recomendacoes de proximos passos para o pentest CONTEXTO DE RECON: {recon_results.get('context_text', '')} """ analysis_result = agent.execute(analysis_prompt, recon_results.get('context', {})) llm_analysis = analysis_result.get("llm_response", "") # Generate report if vulnerabilities found context = recon_results.get('context', {}) vulns = context.get('vulnerabilities', {}).get('all', []) if vulns or llm_analysis: print("\n[*] Generating report...") from core.report_generator import ReportGenerator report_data = { "target": target, "scan_started": datetime.now().isoformat(), "scan_completed": datetime.now().isoformat(), "attack_surface": context.get('attack_surface', {}), "vulnerabilities": vulns, "technologies": context.get('data', {}).get('technologies', []), "open_ports": context.get('data', {}).get('open_ports', []) } report_gen = ReportGenerator(report_data, llm_analysis) html_report = report_gen.save_report("reports") print(f"[+] HTML Report: {html_report}") print(f"\n{'='*70}") print("[+] ADVANCED RECON COMPLETE!") print(f"[+] Consolidated context: {recon_results.get('context_file', '')}") print(f"[+] Text context: {recon_results.get('context_text_file', '')}") print(f"{'='*70}\n") return { "session_id": self.session_id, "target": target, "recon_results": recon_results, "llm_analysis": llm_analysis, "context_file": recon_results.get('context_file', ''), "context_text_file": recon_results.get('context_text_file', '') } def check_tools_status(self): """Check and display status of all pentest tools""" print("\n" + "="*60) print(" PENTEST TOOLS STATUS") print("="*60 + "\n") status = self.tool_installer.get_tools_status() installed_count = 0 missing_count = 0 for tool_name, info in status.items(): if info["installed"]: print(f" [+] {tool_name:15} - INSTALLED ({info['path']})") installed_count += 1 else: print(f" [-] {tool_name:15} - NOT INSTALLED") missing_count += 1 print("\n" + "-"*60) print(f" Total: {installed_count} installed, {missing_count} missing") print("-"*60) if missing_count > 0: print("\n [!] Run 'install_tools' to install missing tools") return status def update_tools_config(self): """Update config with found tool paths""" status = self.tool_installer.get_tools_status() for tool_name, info in status.items(): if info["installed"] and info["path"]: self.config['tools'][tool_name] = info["path"] # Save updated config with open(self.config_path, 'w') as f: json.dump(self.config, f, indent=4) logger.info("Tools configuration updated") def list_agent_roles(self): """List all available agent roles.""" print("\nAvailable Agent Roles:") for role_name, role_details in self.config.get('agent_roles', {}).items(): status = "Enabled" if role_details.get("enabled") else "Disabled" print(f" - {role_name} ({status}): {role_details.get('description', 'No description.')}") def list_llm_profiles(self): """List all available LLM profiles.""" print("\nAvailable LLM Profiles:") for profile_name in self.config.get('llm', {}).get('profiles', {}).keys(): print(f" - {profile_name}") def interactive_mode(self): """Start interactive mode""" completer = Completer(self) readline.set_completer(completer.complete) readline.parse_and_bind("tab: complete") print(""" ╔═══════════════════════════════════════════════════════════╗ ║ NeuroSploitv2 - AI Offensive Security ║ ║ Interactive Mode ║ ╚═══════════════════════════════════════════════════════════╝ """) while True: try: cmd = input("\nNeuroSploit> ").strip() if cmd.lower() in ['exit', 'quit']: break elif cmd.lower() == 'help': self._show_help() elif cmd.startswith('run_agent'): parts = cmd.split(maxsplit=2) # e.g., run_agent red_team_agent "scan example.com" if len(parts) >= 2: if len(parts) == 2: if self.selected_agent_role: user_input = parts[1].strip('"') self.execute_agent_role(self.selected_agent_role, user_input) else: print("No agent selected. Use 'set_agent ' or 'run_agent \"\"'") else: agent_role_name = parts[1] user_input = parts[2].strip('"') self.execute_agent_role(agent_role_name, user_input) else: print("Usage: run_agent \"\"") elif cmd.startswith('config'): print(json.dumps(self.config, indent=2)) elif cmd.lower() == 'list_roles': print("\nAvailable Agent Roles:") for role_name, role_details in self.config.get('agent_roles', {}).items(): status = "Enabled" if role_details.get("enabled") else "Disabled" marker = "*" if role_name == self.selected_agent_role else " " print(f" {marker} {role_name} ({status}): {role_details.get('description', 'No description.')}") elif cmd.lower() == 'list_profiles': print("\nAvailable LLM Profiles:") default_profile = self.config['llm']['default_profile'] for profile_name in self.config.get('llm', {}).get('profiles', {}).keys(): marker = "*" if profile_name == default_profile else " " print(f" {marker} {profile_name}") elif cmd.startswith('set_profile'): parts = cmd.split(maxsplit=1) if len(parts) > 1: profile_name = parts[1].strip() if profile_name in self.config.get('llm', {}).get('profiles', {}): self.config['llm']['default_profile'] = profile_name print(f"Default LLM profile set to: {profile_name}") else: print(f"Profile '{profile_name}' not found.") else: print("Usage: set_profile ") elif cmd.startswith('set_agent'): parts = cmd.split(maxsplit=1) if len(parts) > 1: agent_name = parts[1].strip() if agent_name in self.config.get('agent_roles', {}): self.selected_agent_role = agent_name print(f"Default agent set to: {agent_name}") else: print(f"Agent '{agent_name}' not found.") else: print("Usage: set_agent ") elif cmd.lower() == 'discover_ollama': self.discover_ollama_models() elif cmd.lower() == 'install_tools': run_installer_menu() self.update_tools_config() elif cmd.lower() == 'check_tools': self.check_tools_status() elif cmd.startswith('scan '): parts = cmd.split(maxsplit=1) if len(parts) > 1: target = parts[1].strip().strip('"') agent_role = self.selected_agent_role or "bug_bounty_hunter" self.execute_real_scan(target, scan_type="full", agent_role=agent_role) else: print("Usage: scan ") elif cmd.startswith('quick_scan '): parts = cmd.split(maxsplit=1) if len(parts) > 1: target = parts[1].strip().strip('"') agent_role = self.selected_agent_role or "bug_bounty_hunter" self.execute_real_scan(target, scan_type="quick", agent_role=agent_role) else: print("Usage: quick_scan ") elif cmd.startswith('recon ') or cmd.startswith('full_recon '): parts = cmd.split(maxsplit=1) if len(parts) > 1: target = parts[1].strip().strip('"') with_ai = self.selected_agent_role is not None self.run_full_recon(target, with_ai_analysis=with_ai) else: print("Usage: recon ") print(" full_recon ") print("\nThis command runs all recon tools:") print(" - Subdomain enumeration (subfinder, amass, assetfinder)") print(" - HTTP probing (httpx)") print(" - URL collection (gau, waybackurls)") print(" - Web crawling (katana, gospider)") print(" - Port scanning (naabu, nmap)") print(" - Vulnerability scanning (nuclei)") print("\nAll outputs are consolidated into a single context file") print("for use by the LLM.") elif cmd.lower() in ['experience', 'wizard']: self.experience_mode() elif cmd.startswith('analyze '): parts = cmd.split(maxsplit=1) if len(parts) > 1: context_file = parts[1].strip().strip('"') if os.path.exists(context_file): from core.context_builder import load_context_from_file context = load_context_from_file(context_file) if context: prompt = input("Enter analysis prompt: ").strip() if prompt: agent_role = self.selected_agent_role or "bug_bounty_hunter" self.execute_agent_role(agent_role, prompt, recon_context=context) else: print(f"Context file not found: {context_file}") else: print("Usage: analyze ") print(" Then enter your analysis prompt") else: print("Unknown command. Type 'help' for available commands.") except KeyboardInterrupt: print("\nOperation cancelled.") continue except Exception as e: logger.error(f"Error: {e}") def discover_ollama_models(self): """Discover local Ollama models and add them to the configuration.""" try: import requests except ImportError: print("The 'requests' library is not installed. Please install it with 'pip3 install requests'") return try: response = requests.get("http://localhost:11434/api/tags") response.raise_for_status() models = response.json().get("models", []) except (requests.exceptions.ConnectionError, requests.exceptions.HTTPError): print("Ollama server not found. Please make sure Ollama is running.") return if not models: print("No Ollama models found.") return print("Available Ollama models:") for i, model in enumerate(models): print(f" {i+1}. {model['name']}") try: selections = input("Enter the numbers of the models to add (e.g., 1,3,4): ") selected_indices = [int(s.strip()) - 1 for s in selections.split(',')] except ValueError: print("Invalid input. Please enter a comma-separated list of numbers.") return for i in selected_indices: if 0 <= i < len(models): model_name = models[i]['name'] profile_name = f"ollama_{model_name.replace(':', '_').replace('-', '_')}" self.config['llm']['profiles'][profile_name] = { "provider": "ollama", "model": model_name, "api_key": "", "temperature": 0.7, "max_tokens": 4096, "input_token_limit": 8000, "output_token_limit": 4000, "cache_enabled": True, "search_context_level": "medium", "pdf_support_enabled": False, "guardrails_enabled": True, "hallucination_mitigation_strategy": None } print(f"Added profile '{profile_name}' for model '{model_name}'.") with open(self.config_path, 'w') as f: json.dump(self.config, f, indent=4) print("Configuration updated.") def _show_help(self): """Show help menu""" print(""" ======================================================================= NeuroSploitv2 - Command Reference ======================================================================= MODES: experience / wizard - GUIDED step-by-step setup (recommended!) analyze - LLM-only analysis with context file RECON COMMANDS (Data Collection): recon - Run FULL RECON and consolidate outputs full_recon - Alias for recon The recon command runs ALL reconnaissance tools: - Subdomain enumeration (subfinder, amass, assetfinder) - HTTP probing (httpx, httprobe) - URL collection (gau, waybackurls, waymore) - Web crawling (katana, gospider) - Port scanning (naabu, nmap) - DNS enumeration - Vulnerability scanning (nuclei) All outputs are CONSOLIDATED into a single context file for use by the LLM! SCANNING COMMANDS (Execute Real Tools): scan - Run FULL pentest scan with real tools quick_scan - Run QUICK scan (essential checks only) TOOL MANAGEMENT: install_tools - Install required pentest tools check_tools - Check which tools are installed AGENT COMMANDS (AI Analysis): run_agent "" - Execute AI agent with input set_agent - Set default agent for AI analysis CONFIGURATION: list_roles - List all available agent roles list_profiles - List all LLM profiles set_profile - Set the default LLM profile discover_ollama - Discover and configure local Ollama models config - Show current configuration GENERAL: help - Show this help menu exit/quit - Exit the framework RECOMMENDED WORKFLOW: 1. recon example.com - First run full recon 2. analyze results/context_X.json - LLM-only analysis with context OR 1. experience - Use guided wizard mode EXAMPLES: experience - Start guided wizard recon example.com - Full recon with consolidated output analyze results/context_X.json - LLM analysis of context file scan https://example.com - Full pentest scan quick_scan 192.168.1.1 - Quick vulnerability check ======================================================================= """) def main(): """Main entry point""" parser = argparse.ArgumentParser( description='NeuroSploitv2 - AI-Powered Penetration Testing Framework', formatter_class=argparse.RawDescriptionHelpFormatter, epilog=""" 3 EXECUTION MODES: ================== 1. CLI MODE (Direct command-line): python neurosploit.py --input "Your prompt" -cf context.json --llm-profile PROFILE 2. INTERACTIVE MODE (-i): python neurosploit.py -i Then use commands: recon, analyze, scan, etc. 3. EXPERIENCE/WIZARD MODE (-e): python neurosploit.py -e Guided step-by-step configuration - RECOMMENDED for beginners! EXAMPLES: ========= # Step 1: Run recon to collect data python neurosploit.py --recon example.com # Step 2: LLM-only analysis (no tool execution) python neurosploit.py --input "Analyze for SQLi and XSS" -cf results/context_X.json --llm-profile claude_opus # Or use wizard mode python neurosploit.py -e # Run full pentest scan with tools python neurosploit.py --scan https://example.com # Interactive mode python neurosploit.py -i """ ) # Recon options parser.add_argument('--recon', metavar='TARGET', help='Run FULL RECON on target (subdomain enum, http probe, url collection, etc.)') # Context file option parser.add_argument('--context-file', '-cf', metavar='FILE', help='Load recon context from JSON file (use with --scan or run_agent)') # Target option (for use with context or agent without running recon) parser.add_argument('--target', '-t', metavar='TARGET', help='Specify target URL/domain (use with -cf or --input)') # Scanning options parser.add_argument('--scan', metavar='TARGET', help='Run FULL pentest scan on target (executes real tools)') parser.add_argument('--quick-scan', metavar='TARGET', help='Run QUICK pentest scan on target') # Tool management parser.add_argument('--install-tools', action='store_true', help='Install required pentest tools (nmap, sqlmap, nuclei, etc.)') parser.add_argument('--check-tools', action='store_true', help='Check status of installed tools') # Agent options parser.add_argument('-r', '--agent-role', help='Name of the agent role to execute (optional)') parser.add_argument('-i', '--interactive', action='store_true', help='Start in interactive mode') parser.add_argument('-e', '--experience', action='store_true', help='Start in experience/wizard mode (guided setup)') parser.add_argument('--input', help='Input prompt/task for the agent role') parser.add_argument('--llm-profile', help='LLM profile to use for the execution') # Configuration parser.add_argument('-c', '--config', default='config/config.json', help='Configuration file path') parser.add_argument('-v', '--verbose', action='store_true', help='Enable verbose output') parser.add_argument('--list-agents', action='store_true', help='List all available agent roles and exit') parser.add_argument('--list-profiles', action='store_true', help='List all available LLM profiles and exit') args = parser.parse_args() if args.verbose: logging.getLogger().setLevel(logging.DEBUG) # Initialize framework framework = NeuroSploitv2(config_path=args.config) # Handle tool installation if args.install_tools: run_installer_menu() framework.update_tools_config() # Handle tool check elif args.check_tools: framework.check_tools_status() # Handle recon elif args.recon: framework.run_full_recon(args.recon, with_ai_analysis=bool(args.agent_role)) # Handle full scan elif args.scan: agent_role = args.agent_role or "bug_bounty_hunter" context = None if args.context_file: from core.context_builder import load_context_from_file context = load_context_from_file(args.context_file) if context: print(f"[+] Loaded context from: {args.context_file}") framework.execute_real_scan(args.scan, scan_type="full", agent_role=agent_role, recon_context=context) # Handle quick scan elif args.quick_scan: agent_role = args.agent_role or "bug_bounty_hunter" context = None if args.context_file: from core.context_builder import load_context_from_file context = load_context_from_file(args.context_file) if context: print(f"[+] Loaded context from: {args.context_file}") framework.execute_real_scan(args.quick_scan, scan_type="quick", agent_role=agent_role, recon_context=context) # Handle list commands elif args.list_agents: framework.list_agent_roles() elif args.list_profiles: framework.list_llm_profiles() # Handle experience/wizard mode elif args.experience: framework.experience_mode() # Handle interactive mode elif args.interactive: framework.interactive_mode() # Handle agent execution with optional context elif args.agent_role and args.input: context = None if args.context_file: from core.context_builder import load_context_from_file context = load_context_from_file(args.context_file) if context: print(f"[+] Loaded context from: {args.context_file}") framework.execute_agent_role( args.agent_role, args.input, llm_profile_override=args.llm_profile, recon_context=context ) # Handle input-only mode with context file (no role specified) # Use default agent or just LLM interaction elif args.input and args.context_file: from core.context_builder import load_context_from_file context = load_context_from_file(args.context_file) if context: print(f"[+] Loaded context from: {args.context_file}") # Use default agent role or bug_bounty_hunter agent_role = args.agent_role or "bug_bounty_hunter" framework.execute_agent_role( agent_role, args.input, llm_profile_override=args.llm_profile, recon_context=context ) else: print("[!] Failed to load context file") # Handle target with context file (AI pentest without recon) elif args.target and args.context_file: from core.context_builder import load_context_from_file context = load_context_from_file(args.context_file) if context: print(f"[+] Loaded context from: {args.context_file}") agent_role = args.agent_role or "bug_bounty_hunter" input_prompt = args.input or f"Perform security assessment on {args.target}" framework.execute_agent_role( agent_role, input_prompt, llm_profile_override=args.llm_profile, recon_context=context ) else: print("[!] Failed to load context file") else: parser.print_help() print("\n" + "="*70) print("QUICK START:") print(" 1. Install tools: python neurosploit.py --install-tools") print(" 2. Run scan: python neurosploit.py --scan https://target.com") print(" 3. Interactive: python neurosploit.py -i") print("="*70) if __name__ == "__main__": main()