"""
NeuroSploit v3 - Professional HTML Report Generator
Generates beautiful, comprehensive security assessment reports
"""
import json
import base64
from datetime import datetime
from pathlib import Path
from typing import Dict, List, Any, Optional
from dataclasses import dataclass
import html
@dataclass
class ReportConfig:
"""Report generation configuration"""
company_name: str = "NeuroSploit Security"
logo_base64: Optional[str] = None
include_executive_summary: bool = True
include_methodology: bool = True
include_recommendations: bool = True
theme: str = "dark" # "dark" or "light"
class HTMLReportGenerator:
"""Generate professional HTML security reports"""
SEVERITY_COLORS = {
"critical": {"bg": "#dc2626", "text": "#ffffff", "border": "#991b1b"},
"high": {"bg": "#ea580c", "text": "#ffffff", "border": "#c2410c"},
"medium": {"bg": "#ca8a04", "text": "#ffffff", "border": "#a16207"},
"low": {"bg": "#2563eb", "text": "#ffffff", "border": "#1d4ed8"},
"info": {"bg": "#6b7280", "text": "#ffffff", "border": "#4b5563"}
}
SEVERITY_ORDER = {"critical": 0, "high": 1, "medium": 2, "low": 3, "info": 4}
def __init__(self, config: Optional[ReportConfig] = None):
self.config = config or ReportConfig()
def generate_report(
self,
session_data: Dict,
findings: List[Dict],
scan_results: Optional[List[Dict]] = None
) -> str:
"""Generate complete HTML report"""
# Sort findings by severity
sorted_findings = sorted(
findings,
key=lambda x: self.SEVERITY_ORDER.get(x.get('severity', 'info'), 4)
)
# Calculate statistics
stats = self._calculate_stats(sorted_findings)
# Generate report sections
html_content = f"""
Security Assessment Report - {html.escape(session_data.get('name', 'Unknown'))}
{self._get_styles()}
{self._generate_header(session_data)}
{self._generate_executive_summary(session_data, stats, sorted_findings)}
{self._generate_scope_section(session_data)}
{self._generate_findings_summary(stats)}
{self._generate_findings_detail(sorted_findings)}
{self._generate_scan_results(scan_results) if scan_results else ''}
{self._generate_recommendations(sorted_findings)}
{self._generate_methodology()}
{self._generate_footer(session_data)}
{self._get_scripts()}
"""
return html_content
def _get_styles(self) -> str:
"""Get CSS styles for the report"""
is_dark = self.config.theme == "dark"
bg_color = "#0f172a" if is_dark else "#ffffff"
card_bg = "#1e293b" if is_dark else "#f8fafc"
text_color = "#e2e8f0" if is_dark else "#1e293b"
text_muted = "#94a3b8" if is_dark else "#64748b"
border_color = "#334155" if is_dark else "#e2e8f0"
accent = "#3b82f6"
return f"""
"""
def _get_scripts(self) -> str:
"""Get JavaScript for interactivity"""
return """
"""
def _generate_header(self, session_data: Dict) -> str:
"""Generate report header"""
target = session_data.get('target', 'Unknown Target')
name = session_data.get('name', 'Security Assessment')
created = session_data.get('created_at', datetime.utcnow().isoformat())
try:
created_dt = datetime.fromisoformat(created.replace('Z', '+00:00'))
created_str = created_dt.strftime('%B %d, %Y')
except:
created_str = created
return f"""
"""
def _calculate_stats(self, findings: List[Dict]) -> Dict:
"""Calculate finding statistics"""
stats = {
"total": len(findings),
"critical": 0,
"high": 0,
"medium": 0,
"low": 0,
"info": 0
}
for finding in findings:
severity = finding.get('severity', 'info').lower()
if severity in stats:
stats[severity] += 1
# Calculate risk score (0-100)
risk_score = (
stats['critical'] * 25 +
stats['high'] * 15 +
stats['medium'] * 8 +
stats['low'] * 3 +
stats['info'] * 1
)
stats['risk_score'] = min(100, risk_score)
# Risk level
if stats['risk_score'] >= 70 or stats['critical'] > 0:
stats['risk_level'] = 'HIGH'
stats['risk_class'] = 'risk-high'
elif stats['risk_score'] >= 40 or stats['high'] > 1:
stats['risk_level'] = 'MEDIUM'
stats['risk_class'] = 'risk-medium'
else:
stats['risk_level'] = 'LOW'
stats['risk_class'] = 'risk-low'
return stats
def _generate_executive_summary(self, session_data: Dict, stats: Dict, findings: List[Dict]) -> str:
"""Generate executive summary section"""
target = session_data.get('target', 'the target')
# Generate summary text based on findings
if stats['critical'] > 0:
summary = f"The security assessment of {html.escape(target)} revealed {stats['critical']} critical vulnerabilities that require immediate attention. These findings pose significant risk to the application's security posture and could lead to severe data breaches or system compromise."
elif stats['high'] > 0:
summary = f"The security assessment identified {stats['high']} high-severity issues that should be addressed promptly. While no critical vulnerabilities were found, the identified issues could be exploited by attackers to gain unauthorized access or compromise sensitive data."
elif stats['medium'] > 0:
summary = f"The assessment found {stats['medium']} medium-severity findings that represent moderate risk. These issues should be included in the remediation roadmap and addressed according to priority."
else:
summary = f"The security assessment completed with {stats['total']} findings, primarily informational in nature. The overall security posture appears reasonable, though continuous monitoring is recommended."
return f"""
{summary}
Overall Risk Score: {stats['risk_score']}/100 ({stats['risk_level']})
{stats['total']}
Total Findings
"""
def _generate_scope_section(self, session_data: Dict) -> str:
"""Generate scope section"""
target = session_data.get('target', 'Unknown')
recon = session_data.get('recon_data', {})
technologies = recon.get('technologies', [])
endpoints = recon.get('endpoints', [])
tech_html = ""
if technologies:
tech_html = f"""
Detected Technologies
{"".join(f'{html.escape(t)}' for t in technologies[:15])}
"""
return f"""
| Target URL |
{html.escape(target)} |
| Endpoints Tested |
{len(endpoints)} |
| Assessment Type |
Automated Security Scan + AI Analysis |
{tech_html}
"""
def _generate_findings_summary(self, stats: Dict) -> str:
"""Generate findings summary with stats cards"""
return f"""
{stats['critical']}
Critical
"""
def _generate_findings_detail(self, findings: List[Dict]) -> str:
"""Generate detailed findings section with CVSS, CWE, and OWASP data"""
if not findings:
return """
No vulnerabilities were identified during this assessment.
"""
findings_html = ""
for i, finding in enumerate(findings):
severity = finding.get('severity', 'info').lower()
colors = self.SEVERITY_COLORS.get(severity, self.SEVERITY_COLORS['info'])
# Get CVSS, CWE, and OWASP data
cvss_score = finding.get('cvss_score', self._get_default_cvss(severity))
cvss_vector = finding.get('cvss_vector', '')
cwe_id = finding.get('cwe_id', '')
owasp = finding.get('owasp', '')
# Generate technical info section
tech_info_html = ""
if cvss_score or cwe_id or owasp:
tech_items = []
if cvss_score:
cvss_color = self._get_cvss_color(cvss_score)
tech_items.append(f'''
CVSS Score
{cvss_score}
{self._get_cvss_rating(cvss_score)}
{f'
{html.escape(cvss_vector)}
' if cvss_vector else ''}
''')
if cwe_id:
cwe_link = f"https://cwe.mitre.org/data/definitions/{cwe_id.replace('CWE-', '')}.html" if cwe_id.startswith('CWE-') else '#'
tech_items.append(f'''
''')
if owasp:
tech_items.append(f'''
OWASP Top 10
{html.escape(owasp)}
''')
tech_info_html = f'''
{''.join(tech_items)}
'''
findings_html += f"""
{tech_info_html}
Vulnerability Type
{html.escape(finding.get('vulnerability_type', 'Unknown'))}
Description
{html.escape(finding.get('description', 'No description available'))}
{f'''
Affected Endpoint
{html.escape(finding.get('affected_endpoint', ''))}
''' if finding.get('affected_endpoint') else ''}
{f'''
Evidence / Proof of Concept
{html.escape(finding.get('evidence', ''))}
''' if finding.get('evidence') else ''}
{self._generate_screenshots_html(finding)}
{f'''
Impact
{html.escape(finding.get('impact', ''))}
''' if finding.get('impact') else ''}
Remediation
{html.escape(finding.get('remediation', 'Review and address this finding'))}
{self._generate_references_html(finding.get('references', []))}
"""
return f"""
"""
def _get_default_cvss(self, severity: str) -> float:
"""Get default CVSS score based on severity"""
defaults = {
'critical': 9.5,
'high': 7.5,
'medium': 5.0,
'low': 3.0,
'info': 0.0
}
return defaults.get(severity.lower(), 5.0)
def _get_cvss_color(self, score: float) -> str:
"""Get color based on CVSS score"""
if score >= 9.0:
return '#dc2626' # Critical - Red
elif score >= 7.0:
return '#ea580c' # High - Orange
elif score >= 4.0:
return '#ca8a04' # Medium - Yellow
elif score > 0:
return '#2563eb' # Low - Blue
else:
return '#6b7280' # Info - Gray
def _get_cvss_rating(self, score: float) -> str:
"""Get CVSS rating text"""
if score >= 9.0:
return 'Critical'
elif score >= 7.0:
return 'High'
elif score >= 4.0:
return 'Medium'
elif score > 0:
return 'Low'
else:
return 'None'
def _generate_references_html(self, references: List[str]) -> str:
"""Generate references section HTML"""
if not references:
return ''
refs_html = ''
for ref in references[:5]: # Limit to 5 references
if ref.startswith('http'):
refs_html += f'{html.escape(ref[:60])}{"..." if len(ref) > 60 else ""}'
else:
refs_html += f'{html.escape(ref)}'
return f'''
'''
def _generate_screenshots_html(self, finding: Dict) -> str:
"""Generate screenshot grid HTML for a finding.
Supports two sources:
1. finding['screenshots'] list with base64 data URIs (from agent capture)
2. Filesystem lookup in reports/screenshots/{finding_id}/ (from BrowserValidator)
"""
screenshots = finding.get('screenshots', [])
# Also check filesystem for screenshots stored by BrowserValidator
finding_id = finding.get('id', '')
if finding_id and not screenshots:
ss_dir = Path('reports/screenshots') / finding_id
if ss_dir.exists():
for ss_file in sorted(ss_dir.glob('*.png'))[:5]:
try:
with open(ss_file, 'rb') as f:
data = base64.b64encode(f.read()).decode('ascii')
screenshots.append(f"data:image/png;base64,{data}")
except Exception:
pass
if not screenshots:
return ''
cards = ''
for i, ss in enumerate(screenshots[:5]): # Cap at 5 screenshots
label = f"Screenshot {i + 1}"
if i == 0:
label = "Evidence Capture"
elif i == 1:
label = "Exploitation Proof"
cards += f'''
{label}
'''
return f'''
'''
def _generate_scan_results(self, scan_results: List[Dict]) -> str:
"""Generate tool scan results section"""
if not scan_results:
return ""
results_html = ""
for result in scan_results:
tool = result.get('tool', 'Unknown')
status = result.get('status', 'unknown')
output = result.get('output', '')[:2000] # Limit output size
status_color = "#22c55e" if status == "completed" else "#ef4444"
results_html += f"""
{html.escape(tool)}
{status}
{html.escape(output)}
"""
return f"""
"""
def _generate_recommendations(self, findings: List[Dict]) -> str:
"""Generate prioritized recommendations"""
recommendations = []
# Group findings by severity
critical = [f for f in findings if f.get('severity') == 'critical']
high = [f for f in findings if f.get('severity') == 'high']
medium = [f for f in findings if f.get('severity') == 'medium']
if critical:
recommendations.append({
"priority": "Immediate",
"color": "#dc2626",
"items": [f"Fix: {f.get('title', 'Unknown')} - {f.get('remediation', 'Review and fix')}" for f in critical]
})
if high:
recommendations.append({
"priority": "Short-term (1-2 weeks)",
"color": "#ea580c",
"items": [f"Address: {f.get('title', 'Unknown')}" for f in high]
})
if medium:
recommendations.append({
"priority": "Medium-term (1 month)",
"color": "#ca8a04",
"items": [f"Plan fix for: {f.get('title', 'Unknown')}" for f in medium[:5]]
})
# Always add general recommendations
recommendations.append({
"priority": "Ongoing",
"color": "#3b82f6",
"items": [
"Implement regular security scanning",
"Keep all software and dependencies updated",
"Review and strengthen authentication mechanisms",
"Implement proper logging and monitoring",
"Conduct periodic penetration testing"
]
})
rec_html = ""
for rec in recommendations:
items_html = "".join(f"{html.escape(item)}" for item in rec['items'])
rec_html += f"""
"""
return f"""
"""
def _generate_methodology(self) -> str:
"""Generate methodology section"""
return """
1. Reconnaissance
Technology fingerprinting, endpoint discovery, and information gathering
2. Vulnerability Scanning
Automated scanning for known vulnerabilities and misconfigurations
3. AI Analysis
LLM-powered analysis of findings for context and remediation
4. Verification
Manual verification of critical findings to eliminate false positives
"""
def _generate_footer(self, session_data: Dict) -> str:
"""Generate report footer"""
return f"""
"""