#!/usr/bin/env python3
"""
NeuroSploitv2 - AI-Powered Penetration Testing Framework
Author: Security Research Team
License: MIT
Version: 2.0.0
"""
import os
import sys
import argparse
import json
import re
from pathlib import Path
from typing import Dict, List, Optional
import logging
from datetime import datetime
import readline
import mistune
# Setup logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
handlers=[
logging.FileHandler('logs/neurosploit.log'),
logging.StreamHandler(sys.stdout)
]
)
logger = logging.getLogger(__name__)
from core.llm_manager import LLMManager
from core.tool_installer import ToolInstaller, run_installer_menu, PENTEST_TOOLS
from core.pentest_executor import PentestExecutor
from core.report_generator import ReportGenerator
from agents.base_agent import BaseAgent
class Completer:
def __init__(self, neurosploit):
self.neurosploit = neurosploit
self.commands = [
"help", "run_agent", "config", "list_roles", "list_profiles",
"set_profile", "set_agent", "discover_ollama", "install_tools",
"scan", "quick_scan", "check_tools", "exit", "quit"
]
self.agent_roles = list(self.neurosploit.config.get('agent_roles', {}).keys())
self.llm_profiles = list(self.neurosploit.config.get('llm', {}).get('profiles', {}).keys())
def complete(self, text, state):
line = readline.get_line_buffer()
parts = line.split()
options = []
if state == 0:
if not parts or (len(parts) == 1 and not line.endswith(' ')):
options = [c + ' ' for c in self.commands if c.startswith(text)]
elif len(parts) > 0:
if parts[0] == 'run_agent':
if len(parts) == 1 and line.endswith(' '):
options = [a + ' ' for a in self.agent_roles]
elif len(parts) == 2 and not line.endswith(' '):
options = [a + ' ' for a in self.agent_roles if a.startswith(parts[1])]
elif parts[0] == 'set_agent':
if len(parts) == 1 and line.endswith(' '):
options = [a + ' ' for a in self.agent_roles]
elif len(parts) == 2 and not line.endswith(' '):
options = [a + ' ' for a in self.agent_roles if a.startswith(parts[1])]
elif parts[0] == 'set_profile':
if len(parts) == 1 and line.endswith(' '):
options = [p + ' ' for p in self.llm_profiles]
elif len(parts) == 2 and not line.endswith(' '):
options = [p + ' ' for p in self.llm_profiles if p.startswith(parts[1])]
if state < len(options):
return options[state]
else:
return None
class NeuroSploitv2:
"""Main framework class for NeuroSploitv2"""
def __init__(self, config_path: str = "config/config.json"):
"""Initialize the framework"""
self.config_path = config_path
self.config = self._load_config()
# self.agents = {} # Removed as agents will be dynamically created per role
self.session_id = datetime.now().strftime("%Y%m%d_%H%M%S")
self._setup_directories()
# LLMManager instance will be created dynamically per agent role to select specific profiles
self.llm_manager_instance: Optional[LLMManager] = None
self.selected_agent_role: Optional[str] = None
# Initialize tool installer
self.tool_installer = ToolInstaller()
logger.info(f"NeuroSploitv2 initialized - Session: {self.session_id}")
def _setup_directories(self):
"""Create necessary directories"""
dirs = ['logs', 'reports', 'data', 'custom_agents', 'results']
for d in dirs:
Path(d).mkdir(exist_ok=True)
def _load_config(self) -> Dict:
"""Load configuration from file"""
if not os.path.exists(self.config_path):
if os.path.exists("config/config-example.json"):
import shutil
shutil.copy("config/config-example.json", self.config_path)
logger.info(f"Created default configuration at {self.config_path}")
else:
logger.error("config-example.json not found. Cannot create default configuration.")
return {}
with open(self.config_path, 'r') as f:
return json.load(f)
def _initialize_llm_manager(self, agent_llm_profile: Optional[str] = None):
"""Initializes LLMManager with a specific profile or default."""
llm_config = self.config.get('llm', {})
if agent_llm_profile:
# Temporarily modify config to set the default profile for LLMManager init
original_default = llm_config.get('default_profile')
llm_config['default_profile'] = agent_llm_profile
self.llm_manager_instance = LLMManager({"llm": llm_config})
llm_config['default_profile'] = original_default # Restore original default
else:
self.llm_manager_instance = LLMManager({"llm": llm_config})
def execute_agent_role(self, agent_role_name: str, user_input: str, additional_context: Optional[Dict] = None, llm_profile_override: Optional[str] = None):
"""Execute a specific agent role with a given input."""
logger.info(f"Starting execution for agent role: {agent_role_name}")
agent_roles_config = self.config.get('agent_roles', {})
role_config = agent_roles_config.get(agent_role_name)
# If role not in config, create a default config (allows dynamic roles from .md files)
if not role_config:
logger.info(f"Agent role '{agent_role_name}' not in config.json, using dynamic mode with prompt file.")
role_config = {
"enabled": True,
"tools_allowed": [],
"description": f"Dynamic agent role loaded from {agent_role_name}.md"
}
if not role_config.get('enabled', True):
logger.warning(f"Agent role '{agent_role_name}' is disabled in configuration.")
return {"warning": f"Agent role '{agent_role_name}' is disabled."}
llm_profile_name = llm_profile_override or role_config.get('llm_profile', self.config['llm']['default_profile'])
self._initialize_llm_manager(llm_profile_name)
if not self.llm_manager_instance:
logger.error("LLM Manager could not be initialized.")
return {"error": "LLM Manager initialization failed."}
# Get the prompts for the selected agent role
# Assuming agent_role_name directly maps to the .md filename
agent_prompts = self.llm_manager_instance.prompts.get("md_prompts", {}).get(agent_role_name)
if not agent_prompts:
logger.error(f"Prompts for agent role '{agent_role_name}' not found in MD library.")
return {"error": f"Prompts for agent role '{agent_role_name}' not found."}
# Instantiate and execute the BaseAgent
agent_instance = BaseAgent(agent_role_name, self.config, self.llm_manager_instance, agent_prompts)
results = agent_instance.execute(user_input, additional_context)
# Save results
campaign_results = {
"session_id": self.session_id,
"agent_role": agent_role_name,
"input": user_input,
"timestamp": datetime.now().isoformat(),
"results": results
}
self._save_results(campaign_results)
return campaign_results
def _save_results(self, results: Dict):
"""Save campaign results"""
output_file = f"results/campaign_{self.session_id}.json"
with open(output_file, 'w') as f:
json.dump(results, f, indent=4)
logger.info(f"Results saved to {output_file}")
# Generate report
self._generate_report(results)
def _generate_report(self, results: Dict):
"""Generate professional HTML report with charts and modern CSS"""
report_file = f"reports/report_{self.session_id}.html"
# Get data
llm_response = results.get('results', {}).get('llm_response', '')
if isinstance(llm_response, dict):
llm_response = json.dumps(llm_response, indent=2)
report_content = mistune.html(llm_response)
# Extract metrics from report
targets = results.get('results', {}).get('targets', [results.get('input', 'N/A')])
if isinstance(targets, str):
targets = [targets]
tools_executed = results.get('results', {}).get('tools_executed', 0)
# Count severities from report text
critical = len(re.findall(r'\[?Critical\]?', llm_response, re.IGNORECASE))
high = len(re.findall(r'\[?High\]?', llm_response, re.IGNORECASE))
medium = len(re.findall(r'\[?Medium\]?', llm_response, re.IGNORECASE))
low = len(re.findall(r'\[?Low\]?', llm_response, re.IGNORECASE))
info = len(re.findall(r'\[?Info\]?', llm_response, re.IGNORECASE))
total_vulns = critical + high + medium + low
# Risk score calculation
risk_score = min(100, (critical * 25) + (high * 15) + (medium * 8) + (low * 3))
risk_level = "Critical" if risk_score >= 70 else "High" if risk_score >= 50 else "Medium" if risk_score >= 25 else "Low"
risk_color = "#e74c3c" if risk_score >= 70 else "#e67e22" if risk_score >= 50 else "#f1c40f" if risk_score >= 25 else "#27ae60"
html = f"""