mirror of
https://github.com/CyberSecurityUP/NeuroSploit.git
synced 2026-02-12 14:02:45 +00:00
Add files via upload
This commit is contained in:
544
install_tools.sh
Normal file
544
install_tools.sh
Normal file
@@ -0,0 +1,544 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# NeuroSploit v2 - Reconnaissance Tools Installer
|
||||
# Installs all required tools for advanced reconnaissance
|
||||
#
|
||||
|
||||
set -e
|
||||
|
||||
# Colors
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
CYAN='\033[0;36m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Banner
|
||||
echo -e "${CYAN}"
|
||||
echo "╔═══════════════════════════════════════════════════════════════╗"
|
||||
echo "║ NEUROSPLOIT v2 - TOOLS INSTALLER ║"
|
||||
echo "║ Advanced Reconnaissance Tools Setup ║"
|
||||
echo "╚═══════════════════════════════════════════════════════════════╝"
|
||||
echo -e "${NC}"
|
||||
|
||||
# Detect OS
|
||||
detect_os() {
|
||||
if [[ "$OSTYPE" == "darwin"* ]]; then
|
||||
OS="macos"
|
||||
PKG_MANAGER="brew"
|
||||
elif [ -f /etc/debian_version ]; then
|
||||
OS="debian"
|
||||
PKG_MANAGER="apt"
|
||||
elif [ -f /etc/redhat-release ]; then
|
||||
OS="redhat"
|
||||
PKG_MANAGER="dnf"
|
||||
elif [ -f /etc/arch-release ]; then
|
||||
OS="arch"
|
||||
PKG_MANAGER="pacman"
|
||||
else
|
||||
OS="unknown"
|
||||
PKG_MANAGER="unknown"
|
||||
fi
|
||||
echo -e "${BLUE}[*] Detected OS: ${OS} (Package Manager: ${PKG_MANAGER})${NC}"
|
||||
}
|
||||
|
||||
# Check if command exists
|
||||
command_exists() {
|
||||
command -v "$1" &> /dev/null
|
||||
}
|
||||
|
||||
# Print status
|
||||
print_status() {
|
||||
if command_exists "$1"; then
|
||||
echo -e " ${GREEN}[✓]${NC} $1 - installed"
|
||||
return 0
|
||||
else
|
||||
echo -e " ${RED}[✗]${NC} $1 - not found"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Install Go if not present
|
||||
install_go() {
|
||||
if command_exists go; then
|
||||
echo -e "${GREEN}[✓] Go is already installed${NC}"
|
||||
return 0
|
||||
fi
|
||||
|
||||
echo -e "${YELLOW}[*] Installing Go...${NC}"
|
||||
|
||||
if [ "$OS" == "macos" ]; then
|
||||
brew install go
|
||||
elif [ "$OS" == "debian" ]; then
|
||||
sudo apt update && sudo apt install -y golang-go
|
||||
elif [ "$OS" == "redhat" ]; then
|
||||
sudo dnf install -y golang
|
||||
elif [ "$OS" == "arch" ]; then
|
||||
sudo pacman -S --noconfirm go
|
||||
else
|
||||
# Manual installation
|
||||
GO_VERSION="1.21.5"
|
||||
wget "https://go.dev/dl/go${GO_VERSION}.linux-amd64.tar.gz"
|
||||
sudo rm -rf /usr/local/go
|
||||
sudo tar -C /usr/local -xzf "go${GO_VERSION}.linux-amd64.tar.gz"
|
||||
rm "go${GO_VERSION}.linux-amd64.tar.gz"
|
||||
export PATH=$PATH:/usr/local/go/bin
|
||||
echo 'export PATH=$PATH:/usr/local/go/bin' >> ~/.bashrc
|
||||
echo 'export PATH=$PATH:$(go env GOPATH)/bin' >> ~/.bashrc
|
||||
fi
|
||||
|
||||
# Set GOPATH
|
||||
export GOPATH=$HOME/go
|
||||
export PATH=$PATH:$GOPATH/bin
|
||||
}
|
||||
|
||||
# Install Rust if not present
|
||||
install_rust() {
|
||||
if command_exists cargo; then
|
||||
echo -e "${GREEN}[✓] Rust is already installed${NC}"
|
||||
return 0
|
||||
fi
|
||||
|
||||
echo -e "${YELLOW}[*] Installing Rust...${NC}"
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
source "$HOME/.cargo/env"
|
||||
}
|
||||
|
||||
# Install Python packages
|
||||
install_python_packages() {
|
||||
echo -e "${BLUE}[*] Installing Python packages...${NC}"
|
||||
|
||||
pip3 install --upgrade pip 2>/dev/null || pip install --upgrade pip
|
||||
|
||||
# Core packages
|
||||
pip3 install requests dnspython urllib3 2>/dev/null || pip install requests dnspython urllib3
|
||||
|
||||
# Security tools
|
||||
pip3 install wafw00f 2>/dev/null || echo -e "${YELLOW} [!] wafw00f installation failed, try: pip install wafw00f${NC}"
|
||||
pip3 install paramspider 2>/dev/null || echo -e "${YELLOW} [!] paramspider installation failed${NC}"
|
||||
}
|
||||
|
||||
# Install tool via Go
|
||||
install_go_tool() {
|
||||
local tool_name=$1
|
||||
local repo=$2
|
||||
|
||||
if command_exists "$tool_name"; then
|
||||
echo -e " ${GREEN}[✓]${NC} $tool_name - already installed"
|
||||
return 0
|
||||
fi
|
||||
|
||||
echo -e " ${YELLOW}[~]${NC} Installing $tool_name..."
|
||||
go install "$repo@latest" 2>/dev/null
|
||||
|
||||
if command_exists "$tool_name"; then
|
||||
echo -e " ${GREEN}[✓]${NC} $tool_name - installed successfully"
|
||||
else
|
||||
echo -e " ${RED}[✗]${NC} $tool_name - installation failed"
|
||||
fi
|
||||
}
|
||||
|
||||
# Install tool via Cargo (Rust)
|
||||
install_cargo_tool() {
|
||||
local tool_name=$1
|
||||
local crate_name=${2:-$tool_name}
|
||||
|
||||
if command_exists "$tool_name"; then
|
||||
echo -e " ${GREEN}[✓]${NC} $tool_name - already installed"
|
||||
return 0
|
||||
fi
|
||||
|
||||
echo -e " ${YELLOW}[~]${NC} Installing $tool_name..."
|
||||
cargo install "$crate_name" 2>/dev/null
|
||||
|
||||
if command_exists "$tool_name"; then
|
||||
echo -e " ${GREEN}[✓]${NC} $tool_name - installed successfully"
|
||||
else
|
||||
echo -e " ${RED}[✗]${NC} $tool_name - installation failed"
|
||||
fi
|
||||
}
|
||||
|
||||
# Install system packages
|
||||
install_system_packages() {
|
||||
echo -e "${BLUE}[*] Installing system packages...${NC}"
|
||||
|
||||
if [ "$OS" == "macos" ]; then
|
||||
brew update
|
||||
brew install nmap curl wget jq git python3 2>/dev/null || true
|
||||
brew install feroxbuster 2>/dev/null || true
|
||||
brew install nikto 2>/dev/null || true
|
||||
brew install whatweb 2>/dev/null || true
|
||||
|
||||
elif [ "$OS" == "debian" ]; then
|
||||
sudo apt update
|
||||
sudo apt install -y nmap curl wget jq git python3 python3-pip dnsutils whois
|
||||
sudo apt install -y nikto whatweb 2>/dev/null || true
|
||||
|
||||
elif [ "$OS" == "redhat" ]; then
|
||||
sudo dnf install -y nmap curl wget jq git python3 python3-pip bind-utils whois
|
||||
|
||||
elif [ "$OS" == "arch" ]; then
|
||||
sudo pacman -Syu --noconfirm nmap curl wget jq git python python-pip dnsutils whois
|
||||
sudo pacman -S --noconfirm nikto whatweb 2>/dev/null || true
|
||||
fi
|
||||
}
|
||||
|
||||
# Install Go-based tools
|
||||
install_go_tools() {
|
||||
echo -e "\n${BLUE}[*] Installing Go-based reconnaissance tools...${NC}"
|
||||
|
||||
# Ensure Go paths are set
|
||||
export GOPATH=${GOPATH:-$HOME/go}
|
||||
export PATH=$PATH:$GOPATH/bin
|
||||
|
||||
# ProjectDiscovery tools
|
||||
install_go_tool "subfinder" "github.com/projectdiscovery/subfinder/v2/cmd/subfinder"
|
||||
install_go_tool "httpx" "github.com/projectdiscovery/httpx/cmd/httpx"
|
||||
install_go_tool "nuclei" "github.com/projectdiscovery/nuclei/v3/cmd/nuclei"
|
||||
install_go_tool "naabu" "github.com/projectdiscovery/naabu/v2/cmd/naabu"
|
||||
install_go_tool "katana" "github.com/projectdiscovery/katana/cmd/katana"
|
||||
install_go_tool "dnsx" "github.com/projectdiscovery/dnsx/cmd/dnsx"
|
||||
install_go_tool "shuffledns" "github.com/projectdiscovery/shuffledns/cmd/shuffledns"
|
||||
|
||||
# Other Go tools
|
||||
install_go_tool "amass" "github.com/owasp-amass/amass/v4/..."
|
||||
install_go_tool "assetfinder" "github.com/tomnomnom/assetfinder"
|
||||
install_go_tool "waybackurls" "github.com/tomnomnom/waybackurls"
|
||||
install_go_tool "gau" "github.com/lc/gau/v2/cmd/gau"
|
||||
install_go_tool "httprobe" "github.com/tomnomnom/httprobe"
|
||||
install_go_tool "ffuf" "github.com/ffuf/ffuf/v2"
|
||||
install_go_tool "gobuster" "github.com/OJ/gobuster/v3"
|
||||
install_go_tool "gospider" "github.com/jaeles-project/gospider"
|
||||
install_go_tool "hakrawler" "github.com/hakluke/hakrawler"
|
||||
install_go_tool "subjack" "github.com/haccer/subjack"
|
||||
install_go_tool "gowitness" "github.com/sensepost/gowitness"
|
||||
install_go_tool "findomain" "github.com/Findomain/Findomain"
|
||||
}
|
||||
|
||||
# Install Rust-based tools
|
||||
install_rust_tools() {
|
||||
echo -e "\n${BLUE}[*] Installing Rust-based tools...${NC}"
|
||||
|
||||
source "$HOME/.cargo/env" 2>/dev/null || true
|
||||
|
||||
install_cargo_tool "rustscan" "rustscan"
|
||||
install_cargo_tool "feroxbuster" "feroxbuster"
|
||||
}
|
||||
|
||||
# Install Nuclei templates
|
||||
install_nuclei_templates() {
|
||||
echo -e "\n${BLUE}[*] Updating Nuclei templates...${NC}"
|
||||
|
||||
if command_exists nuclei; then
|
||||
nuclei -update-templates 2>/dev/null || echo -e "${YELLOW} [!] Template update failed, run manually: nuclei -update-templates${NC}"
|
||||
echo -e " ${GREEN}[✓]${NC} Nuclei templates updated"
|
||||
else
|
||||
echo -e " ${RED}[✗]${NC} Nuclei not installed, skipping templates"
|
||||
fi
|
||||
}
|
||||
|
||||
# Install SecLists
|
||||
install_seclists() {
|
||||
echo -e "\n${BLUE}[*] Checking SecLists...${NC}"
|
||||
|
||||
SECLISTS_PATH="/opt/wordlists/SecLists"
|
||||
|
||||
if [ -d "$SECLISTS_PATH" ]; then
|
||||
echo -e " ${GREEN}[✓]${NC} SecLists already installed at $SECLISTS_PATH"
|
||||
return 0
|
||||
fi
|
||||
|
||||
echo -e " ${YELLOW}[~]${NC} Installing SecLists..."
|
||||
sudo mkdir -p /opt/wordlists
|
||||
sudo git clone --depth 1 https://github.com/danielmiessler/SecLists.git "$SECLISTS_PATH" 2>/dev/null || {
|
||||
echo -e " ${RED}[✗]${NC} SecLists installation failed"
|
||||
return 1
|
||||
}
|
||||
|
||||
# Create symlinks for common wordlists
|
||||
sudo ln -sf "$SECLISTS_PATH/Discovery/Web-Content/common.txt" /opt/wordlists/common.txt 2>/dev/null
|
||||
sudo ln -sf "$SECLISTS_PATH/Discovery/Web-Content/raft-medium-directories.txt" /opt/wordlists/directories.txt 2>/dev/null
|
||||
sudo ln -sf "$SECLISTS_PATH/Discovery/DNS/subdomains-top1million-5000.txt" /opt/wordlists/subdomains.txt 2>/dev/null
|
||||
|
||||
echo -e " ${GREEN}[✓]${NC} SecLists installed"
|
||||
}
|
||||
|
||||
# Install additional tools via package managers or manual
|
||||
install_additional_tools() {
|
||||
echo -e "\n${BLUE}[*] Installing additional tools...${NC}"
|
||||
|
||||
# wafw00f
|
||||
if ! command_exists wafw00f; then
|
||||
echo -e " ${YELLOW}[~]${NC} Installing wafw00f..."
|
||||
pip3 install wafw00f 2>/dev/null || pip install wafw00f 2>/dev/null
|
||||
fi
|
||||
print_status "wafw00f"
|
||||
|
||||
# paramspider
|
||||
if ! command_exists paramspider; then
|
||||
echo -e " ${YELLOW}[~]${NC} Installing paramspider..."
|
||||
pip3 install paramspider 2>/dev/null || {
|
||||
git clone https://github.com/devanshbatham/ParamSpider.git /tmp/paramspider 2>/dev/null
|
||||
cd /tmp/paramspider && pip3 install . 2>/dev/null
|
||||
cd -
|
||||
}
|
||||
fi
|
||||
print_status "paramspider"
|
||||
|
||||
# whatweb
|
||||
if ! command_exists whatweb; then
|
||||
if [ "$OS" == "macos" ]; then
|
||||
brew install whatweb 2>/dev/null
|
||||
elif [ "$OS" == "debian" ]; then
|
||||
sudo apt install -y whatweb 2>/dev/null
|
||||
fi
|
||||
fi
|
||||
print_status "whatweb"
|
||||
|
||||
# nikto
|
||||
if ! command_exists nikto; then
|
||||
if [ "$OS" == "macos" ]; then
|
||||
brew install nikto 2>/dev/null
|
||||
elif [ "$OS" == "debian" ]; then
|
||||
sudo apt install -y nikto 2>/dev/null
|
||||
fi
|
||||
fi
|
||||
print_status "nikto"
|
||||
|
||||
# sqlmap
|
||||
if ! command_exists sqlmap; then
|
||||
echo -e " ${YELLOW}[~]${NC} Installing sqlmap..."
|
||||
if [ "$OS" == "macos" ]; then
|
||||
brew install sqlmap 2>/dev/null
|
||||
elif [ "$OS" == "debian" ]; then
|
||||
sudo apt install -y sqlmap 2>/dev/null
|
||||
else
|
||||
pip3 install sqlmap 2>/dev/null
|
||||
fi
|
||||
fi
|
||||
print_status "sqlmap"
|
||||
|
||||
# eyewitness
|
||||
if ! command_exists eyewitness; then
|
||||
echo -e " ${YELLOW}[~]${NC} Installing EyeWitness..."
|
||||
git clone https://github.com/RedSiege/EyeWitness.git /opt/EyeWitness 2>/dev/null || true
|
||||
if [ -d "/opt/EyeWitness" ]; then
|
||||
cd /opt/EyeWitness/Python/setup
|
||||
sudo ./setup.sh 2>/dev/null || true
|
||||
sudo ln -sf /opt/EyeWitness/Python/EyeWitness.py /usr/local/bin/eyewitness 2>/dev/null
|
||||
cd -
|
||||
fi
|
||||
fi
|
||||
print_status "eyewitness"
|
||||
|
||||
# wpscan
|
||||
if ! command_exists wpscan; then
|
||||
echo -e " ${YELLOW}[~]${NC} Installing wpscan..."
|
||||
if [ "$OS" == "macos" ]; then
|
||||
brew install wpscan 2>/dev/null
|
||||
else
|
||||
sudo gem install wpscan 2>/dev/null || true
|
||||
fi
|
||||
fi
|
||||
print_status "wpscan"
|
||||
|
||||
# dirsearch
|
||||
if ! command_exists dirsearch; then
|
||||
echo -e " ${YELLOW}[~]${NC} Installing dirsearch..."
|
||||
pip3 install dirsearch 2>/dev/null || {
|
||||
git clone https://github.com/maurosoria/dirsearch.git /opt/dirsearch 2>/dev/null
|
||||
sudo ln -sf /opt/dirsearch/dirsearch.py /usr/local/bin/dirsearch 2>/dev/null
|
||||
}
|
||||
fi
|
||||
print_status "dirsearch"
|
||||
|
||||
# massdns (for shuffledns/puredns)
|
||||
if ! command_exists massdns; then
|
||||
echo -e " ${YELLOW}[~]${NC} Installing massdns..."
|
||||
git clone https://github.com/blechschmidt/massdns.git /tmp/massdns 2>/dev/null
|
||||
cd /tmp/massdns && make 2>/dev/null && sudo make install 2>/dev/null
|
||||
cd -
|
||||
fi
|
||||
print_status "massdns"
|
||||
|
||||
# puredns
|
||||
if ! command_exists puredns; then
|
||||
echo -e " ${YELLOW}[~]${NC} Installing puredns..."
|
||||
go install github.com/d3mondev/puredns/v2@latest 2>/dev/null
|
||||
fi
|
||||
print_status "puredns"
|
||||
|
||||
# waymore
|
||||
if ! command_exists waymore; then
|
||||
echo -e " ${YELLOW}[~]${NC} Installing waymore..."
|
||||
pip3 install waymore 2>/dev/null || pip install waymore 2>/dev/null
|
||||
fi
|
||||
print_status "waymore"
|
||||
}
|
||||
|
||||
# Check all tools status
|
||||
check_tools_status() {
|
||||
echo -e "\n${CYAN}═══════════════════════════════════════════════════════════════${NC}"
|
||||
echo -e "${CYAN} TOOLS STATUS SUMMARY ${NC}"
|
||||
echo -e "${CYAN}═══════════════════════════════════════════════════════════════${NC}\n"
|
||||
|
||||
echo -e "${BLUE}[Subdomain Enumeration]${NC}"
|
||||
print_status "subfinder"
|
||||
print_status "amass"
|
||||
print_status "assetfinder"
|
||||
print_status "findomain"
|
||||
print_status "puredns"
|
||||
print_status "shuffledns"
|
||||
print_status "massdns"
|
||||
|
||||
echo -e "\n${BLUE}[HTTP Probing]${NC}"
|
||||
print_status "httpx"
|
||||
print_status "httprobe"
|
||||
|
||||
echo -e "\n${BLUE}[URL Collection]${NC}"
|
||||
print_status "gau"
|
||||
print_status "waybackurls"
|
||||
print_status "waymore"
|
||||
print_status "hakrawler"
|
||||
|
||||
echo -e "\n${BLUE}[Web Crawling]${NC}"
|
||||
print_status "katana"
|
||||
print_status "gospider"
|
||||
|
||||
echo -e "\n${BLUE}[Directory Bruteforce]${NC}"
|
||||
print_status "feroxbuster"
|
||||
print_status "gobuster"
|
||||
print_status "ffuf"
|
||||
print_status "dirsearch"
|
||||
|
||||
echo -e "\n${BLUE}[Port Scanning]${NC}"
|
||||
print_status "rustscan"
|
||||
print_status "naabu"
|
||||
print_status "nmap"
|
||||
|
||||
echo -e "\n${BLUE}[Vulnerability Scanning]${NC}"
|
||||
print_status "nuclei"
|
||||
print_status "nikto"
|
||||
print_status "sqlmap"
|
||||
print_status "wpscan"
|
||||
|
||||
echo -e "\n${BLUE}[WAF Detection]${NC}"
|
||||
print_status "wafw00f"
|
||||
|
||||
echo -e "\n${BLUE}[Parameter Discovery]${NC}"
|
||||
print_status "paramspider"
|
||||
|
||||
echo -e "\n${BLUE}[Fingerprinting]${NC}"
|
||||
print_status "whatweb"
|
||||
|
||||
echo -e "\n${BLUE}[Screenshot]${NC}"
|
||||
print_status "gowitness"
|
||||
print_status "eyewitness"
|
||||
|
||||
echo -e "\n${BLUE}[Subdomain Takeover]${NC}"
|
||||
print_status "subjack"
|
||||
|
||||
echo -e "\n${BLUE}[DNS Tools]${NC}"
|
||||
print_status "dnsx"
|
||||
print_status "dig"
|
||||
|
||||
echo -e "\n${BLUE}[Utilities]${NC}"
|
||||
print_status "curl"
|
||||
print_status "wget"
|
||||
print_status "jq"
|
||||
print_status "git"
|
||||
|
||||
echo -e "\n${BLUE}[Wordlists]${NC}"
|
||||
if [ -d "/opt/wordlists/SecLists" ]; then
|
||||
echo -e " ${GREEN}[✓]${NC} SecLists - installed at /opt/wordlists/SecLists"
|
||||
else
|
||||
echo -e " ${RED}[✗]${NC} SecLists - not found"
|
||||
fi
|
||||
}
|
||||
|
||||
# Update PATH
|
||||
update_path() {
|
||||
echo -e "\n${BLUE}[*] Updating PATH...${NC}"
|
||||
|
||||
# Add Go bin to PATH
|
||||
if ! grep -q 'GOPATH' ~/.bashrc 2>/dev/null; then
|
||||
echo 'export GOPATH=$HOME/go' >> ~/.bashrc
|
||||
echo 'export PATH=$PATH:$GOPATH/bin' >> ~/.bashrc
|
||||
fi
|
||||
|
||||
if ! grep -q 'GOPATH' ~/.zshrc 2>/dev/null; then
|
||||
echo 'export GOPATH=$HOME/go' >> ~/.zshrc 2>/dev/null || true
|
||||
echo 'export PATH=$PATH:$GOPATH/bin' >> ~/.zshrc 2>/dev/null || true
|
||||
fi
|
||||
|
||||
# Add Cargo bin to PATH
|
||||
if ! grep -q '.cargo/bin' ~/.bashrc 2>/dev/null; then
|
||||
echo 'export PATH=$PATH:$HOME/.cargo/bin' >> ~/.bashrc
|
||||
fi
|
||||
|
||||
# Source for current session
|
||||
export GOPATH=$HOME/go
|
||||
export PATH=$PATH:$GOPATH/bin:$HOME/.cargo/bin
|
||||
|
||||
echo -e " ${GREEN}[✓]${NC} PATH updated"
|
||||
}
|
||||
|
||||
# Main installation function
|
||||
main() {
|
||||
echo -e "${BLUE}[*] Starting NeuroSploit tools installation...${NC}\n"
|
||||
|
||||
detect_os
|
||||
|
||||
# Parse arguments
|
||||
INSTALL_ALL=false
|
||||
CHECK_ONLY=false
|
||||
|
||||
while [[ "$#" -gt 0 ]]; do
|
||||
case $1 in
|
||||
--all) INSTALL_ALL=true ;;
|
||||
--check) CHECK_ONLY=true ;;
|
||||
--help|-h)
|
||||
echo "Usage: $0 [OPTIONS]"
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " --all Install all tools (full installation)"
|
||||
echo " --check Only check tool status, don't install"
|
||||
echo " --help Show this help message"
|
||||
echo ""
|
||||
exit 0
|
||||
;;
|
||||
*) echo "Unknown parameter: $1"; exit 1 ;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
if [ "$CHECK_ONLY" = true ]; then
|
||||
check_tools_status
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Installation steps
|
||||
install_system_packages
|
||||
install_go
|
||||
install_rust
|
||||
install_python_packages
|
||||
install_go_tools
|
||||
install_rust_tools
|
||||
install_additional_tools
|
||||
install_seclists
|
||||
install_nuclei_templates
|
||||
update_path
|
||||
|
||||
# Final status check
|
||||
check_tools_status
|
||||
|
||||
echo -e "\n${GREEN}═══════════════════════════════════════════════════════════════${NC}"
|
||||
echo -e "${GREEN} INSTALLATION COMPLETE! ${NC}"
|
||||
echo -e "${GREEN}═══════════════════════════════════════════════════════════════${NC}"
|
||||
echo -e "\n${YELLOW}[!] Please restart your terminal or run: source ~/.bashrc${NC}"
|
||||
echo -e "${YELLOW}[!] Some tools may require sudo privileges to run${NC}\n"
|
||||
}
|
||||
|
||||
# Run main
|
||||
main "$@"
|
||||
765
neurosploit.py
765
neurosploit.py
@@ -37,6 +37,23 @@ from core.context_builder import ReconContextBuilder
|
||||
from agents.base_agent import BaseAgent
|
||||
from tools.recon.recon_tools import FullReconRunner
|
||||
|
||||
# Import AI Agents
|
||||
try:
|
||||
from backend.core.ai_pentest_agent import AIPentestAgent
|
||||
except ImportError:
|
||||
AIPentestAgent = None
|
||||
|
||||
try:
|
||||
from backend.core.autonomous_agent import AutonomousAgent, OperationMode
|
||||
from backend.core.task_library import get_task_library, Task, TaskCategory
|
||||
except ImportError:
|
||||
AutonomousAgent = None
|
||||
OperationMode = None
|
||||
get_task_library = None
|
||||
Task = None
|
||||
TaskCategory = None
|
||||
|
||||
|
||||
class Completer:
|
||||
def __init__(self, neurosploit):
|
||||
self.neurosploit = neurosploit
|
||||
@@ -44,7 +61,12 @@ class Completer:
|
||||
"help", "run_agent", "config", "list_roles", "list_profiles",
|
||||
"set_profile", "set_agent", "discover_ollama", "install_tools",
|
||||
"scan", "quick_scan", "recon", "full_recon", "check_tools",
|
||||
"experience", "wizard", "analyze", "exit", "quit"
|
||||
"experience", "wizard", "analyze", "agent", "ai_agent",
|
||||
# New autonomous agent modes
|
||||
"pentest", "full_auto", "recon_only", "prompt_only", "analyze_only",
|
||||
# Task library
|
||||
"tasks", "task", "list_tasks", "create_task", "run_task",
|
||||
"exit", "quit"
|
||||
]
|
||||
self.agent_roles = list(self.neurosploit.config.get('agent_roles', {}).keys())
|
||||
self.llm_profiles = list(self.neurosploit.config.get('llm', {}).get('profiles', {}).keys())
|
||||
@@ -950,6 +972,465 @@ CONTEXTO DE RECON:
|
||||
"context_text_file": recon_results.get('context_text_file', '')
|
||||
}
|
||||
|
||||
def run_ai_agent(
|
||||
self,
|
||||
target: str,
|
||||
prompt_file: Optional[str] = None,
|
||||
context_file: Optional[str] = None,
|
||||
llm_profile: Optional[str] = None
|
||||
) -> Dict:
|
||||
"""
|
||||
Run the AI Offensive Security Agent.
|
||||
|
||||
This is an autonomous agent that:
|
||||
- Uses LLM for intelligent vulnerability testing
|
||||
- Confirms vulnerabilities with AI (no false positives)
|
||||
- Uses recon data to inform testing
|
||||
- Accepts custom .md prompt files
|
||||
- Generates PoC code
|
||||
|
||||
Args:
|
||||
target: Target URL to test
|
||||
prompt_file: Optional .md file with custom testing instructions
|
||||
context_file: Optional recon context JSON file
|
||||
llm_profile: Optional LLM profile to use
|
||||
"""
|
||||
if not AIPentestAgent:
|
||||
print("[!] AI Agent not available. Check backend installation.")
|
||||
return {"error": "AI Agent not installed"}
|
||||
|
||||
print(f"\n{'='*70}")
|
||||
print(" NEUROSPLOIT AI OFFENSIVE SECURITY AGENT")
|
||||
print(f"{'='*70}")
|
||||
print(f"\n[*] Target: {target}")
|
||||
if prompt_file:
|
||||
print(f"[*] Prompt file: {prompt_file}")
|
||||
if context_file:
|
||||
print(f"[*] Context file: {context_file}")
|
||||
print(f"[*] Session ID: {self.session_id}")
|
||||
print()
|
||||
|
||||
# Load recon context if provided
|
||||
recon_context = None
|
||||
if context_file:
|
||||
from core.context_builder import load_context_from_file
|
||||
recon_context = load_context_from_file(context_file)
|
||||
if recon_context:
|
||||
print(f"[+] Loaded recon context: {len(recon_context.get('data', {}).get('endpoints', []))} endpoints")
|
||||
|
||||
# Initialize LLM manager
|
||||
profile = llm_profile or self.config.get('llm', {}).get('default_profile')
|
||||
self._initialize_llm_manager(profile)
|
||||
|
||||
# Run the agent
|
||||
import asyncio
|
||||
|
||||
async def run_agent():
|
||||
async def log_callback(level: str, message: str):
|
||||
prefix = {
|
||||
"info": "[*]",
|
||||
"warning": "[!]",
|
||||
"error": "[X]",
|
||||
"debug": "[D]",
|
||||
}.get(level, "[*]")
|
||||
print(f"{prefix} {message}")
|
||||
|
||||
async with AIPentestAgent(
|
||||
target=target,
|
||||
llm_manager=self.llm_manager_instance,
|
||||
log_callback=log_callback,
|
||||
prompt_file=prompt_file,
|
||||
recon_context=recon_context,
|
||||
config=self.config,
|
||||
max_depth=5
|
||||
) as agent:
|
||||
report = await agent.run()
|
||||
return report
|
||||
|
||||
try:
|
||||
report = asyncio.run(run_agent())
|
||||
except Exception as e:
|
||||
logger.error(f"Agent error: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return {"error": str(e)}
|
||||
|
||||
# Save results
|
||||
if report and report.get("findings"):
|
||||
result_file = f"results/agent_{self.session_id}.json"
|
||||
with open(result_file, 'w') as f:
|
||||
json.dump(report, f, indent=2, default=str)
|
||||
print(f"\n[+] Results saved: {result_file}")
|
||||
|
||||
# Generate HTML report
|
||||
self._generate_agent_report(report)
|
||||
|
||||
print(f"\n{'='*70}")
|
||||
print("[+] AI AGENT COMPLETE!")
|
||||
print(f" Vulnerabilities found: {len(report.get('findings', []))}")
|
||||
print(f"{'='*70}\n")
|
||||
|
||||
return report
|
||||
|
||||
def run_autonomous_agent(
|
||||
self,
|
||||
target: str,
|
||||
mode: str = "full_auto",
|
||||
task_id: Optional[str] = None,
|
||||
prompt: Optional[str] = None,
|
||||
prompt_file: Optional[str] = None,
|
||||
context_file: Optional[str] = None,
|
||||
llm_profile: Optional[str] = None
|
||||
) -> Dict:
|
||||
"""
|
||||
Run the Autonomous AI Security Agent.
|
||||
|
||||
Modes:
|
||||
- recon_only: Just reconnaissance, no testing
|
||||
- full_auto: Complete workflow (Recon -> Analyze -> Test -> Report)
|
||||
- prompt_only: AI decides everything based on prompt (HIGH TOKEN USAGE!)
|
||||
- analyze_only: Analysis of provided data, no active testing
|
||||
|
||||
Args:
|
||||
target: Target URL/domain
|
||||
mode: Operation mode
|
||||
task_id: Task from library to execute
|
||||
prompt: Custom prompt
|
||||
prompt_file: Path to .md prompt file
|
||||
context_file: Path to recon context JSON
|
||||
llm_profile: LLM profile to use
|
||||
"""
|
||||
if not AutonomousAgent:
|
||||
print("[!] Autonomous Agent not available. Check installation.")
|
||||
return {"error": "Agent not installed"}
|
||||
|
||||
print(f"\n{'='*70}")
|
||||
print(" NEUROSPLOIT AUTONOMOUS AI AGENT")
|
||||
print(f"{'='*70}")
|
||||
print(f"\n[*] Target: {target}")
|
||||
print(f"[*] Mode: {mode.upper()}")
|
||||
|
||||
# Warning for prompt_only mode
|
||||
if mode == "prompt_only":
|
||||
print("\n[!] WARNING: PROMPT-ONLY MODE")
|
||||
print("[!] This mode uses significantly more tokens than other modes.")
|
||||
print("[!] The AI will decide what tools to use based on your prompt.\n")
|
||||
|
||||
# Load task from library
|
||||
task = None
|
||||
if task_id and get_task_library:
|
||||
library = get_task_library()
|
||||
task = library.get_task(task_id)
|
||||
if task:
|
||||
print(f"[*] Task: {task.name}")
|
||||
prompt = task.prompt
|
||||
else:
|
||||
print(f"[!] Task not found: {task_id}")
|
||||
|
||||
# Load prompt from file
|
||||
if prompt_file:
|
||||
print(f"[*] Prompt file: {prompt_file}")
|
||||
try:
|
||||
path = Path(prompt_file)
|
||||
for search in [path, Path("prompts") / path, Path("prompts/md_library") / path]:
|
||||
if search.exists():
|
||||
prompt = search.read_text()
|
||||
break
|
||||
except Exception as e:
|
||||
print(f"[!] Error loading prompt file: {e}")
|
||||
|
||||
# Load recon context
|
||||
recon_context = None
|
||||
if context_file:
|
||||
from core.context_builder import load_context_from_file
|
||||
recon_context = load_context_from_file(context_file)
|
||||
if recon_context:
|
||||
print(f"[+] Loaded context: {context_file}")
|
||||
|
||||
# Get operation mode
|
||||
mode_map = {
|
||||
"recon_only": OperationMode.RECON_ONLY,
|
||||
"full_auto": OperationMode.FULL_AUTO,
|
||||
"prompt_only": OperationMode.PROMPT_ONLY,
|
||||
"analyze_only": OperationMode.ANALYZE_ONLY,
|
||||
}
|
||||
op_mode = mode_map.get(mode, OperationMode.FULL_AUTO)
|
||||
|
||||
# Initialize LLM
|
||||
profile = llm_profile or self.config.get('llm', {}).get('default_profile')
|
||||
self._initialize_llm_manager(profile)
|
||||
|
||||
print(f"[*] Session: {self.session_id}\n")
|
||||
|
||||
# Run agent
|
||||
import asyncio
|
||||
|
||||
async def run():
|
||||
async def log_cb(level: str, message: str):
|
||||
prefix = {"info": "[*]", "warning": "[!]", "error": "[X]", "debug": "[D]"}.get(level, "[*]")
|
||||
print(f"{prefix} {message}")
|
||||
|
||||
async def progress_cb(progress: int, message: str):
|
||||
bar = "=" * (progress // 5) + ">" + " " * (20 - progress // 5)
|
||||
print(f"\r[{bar}] {progress}% - {message}", end="", flush=True)
|
||||
if progress == 100:
|
||||
print()
|
||||
|
||||
async with AutonomousAgent(
|
||||
target=target,
|
||||
mode=op_mode,
|
||||
llm_manager=self.llm_manager_instance,
|
||||
log_callback=log_cb,
|
||||
progress_callback=progress_cb,
|
||||
task=task,
|
||||
custom_prompt=prompt,
|
||||
recon_context=recon_context,
|
||||
config=self.config,
|
||||
prompt_file=prompt_file
|
||||
) as agent:
|
||||
return await agent.run()
|
||||
|
||||
try:
|
||||
report = asyncio.run(run())
|
||||
except Exception as e:
|
||||
logger.error(f"Agent error: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return {"error": str(e)}
|
||||
|
||||
# Save results
|
||||
result_file = f"results/autonomous_{self.session_id}.json"
|
||||
with open(result_file, 'w') as f:
|
||||
json.dump(report, f, indent=2, default=str)
|
||||
print(f"\n[+] Results saved: {result_file}")
|
||||
|
||||
# Generate HTML report
|
||||
if report.get("findings"):
|
||||
self._generate_autonomous_report(report)
|
||||
|
||||
return report
|
||||
|
||||
def list_tasks(self):
|
||||
"""List all available tasks from library"""
|
||||
if not get_task_library:
|
||||
print("[!] Task library not available")
|
||||
return
|
||||
|
||||
library = get_task_library()
|
||||
tasks = library.list_tasks()
|
||||
|
||||
print(f"\n{'='*70}")
|
||||
print(" TASK LIBRARY")
|
||||
print(f"{'='*70}\n")
|
||||
|
||||
# Group by category
|
||||
by_category = {}
|
||||
for task in tasks:
|
||||
cat = task.category
|
||||
if cat not in by_category:
|
||||
by_category[cat] = []
|
||||
by_category[cat].append(task)
|
||||
|
||||
for category, cat_tasks in by_category.items():
|
||||
print(f"[{category.upper()}]")
|
||||
for task in cat_tasks:
|
||||
preset = " (preset)" if task.is_preset else ""
|
||||
print(f" {task.id:<25} - {task.name}{preset}")
|
||||
print()
|
||||
|
||||
print(f"Total: {len(tasks)} tasks")
|
||||
print("\nUse: run_task <task_id> <target>")
|
||||
|
||||
def create_task(self, name: str, prompt: str, category: str = "custom"):
|
||||
"""Create a new task in the library"""
|
||||
if not get_task_library or not Task:
|
||||
print("[!] Task library not available")
|
||||
return
|
||||
|
||||
library = get_task_library()
|
||||
task = Task(
|
||||
id=f"custom_{datetime.now().strftime('%Y%m%d_%H%M%S')}",
|
||||
name=name,
|
||||
description=prompt[:100],
|
||||
category=category,
|
||||
prompt=prompt,
|
||||
is_preset=False
|
||||
)
|
||||
library.create_task(task)
|
||||
print(f"[+] Task created: {task.id}")
|
||||
return task
|
||||
|
||||
def _generate_autonomous_report(self, report: Dict):
|
||||
"""Generate HTML report from autonomous agent results"""
|
||||
from core.report_generator import ReportGenerator
|
||||
|
||||
# Convert to scan format
|
||||
scan_data = {
|
||||
"target": report.get("target", ""),
|
||||
"scan_started": report.get("scan_date", ""),
|
||||
"scan_completed": datetime.now().isoformat(),
|
||||
"vulnerabilities": [],
|
||||
"technologies": report.get("summary", {}).get("technologies", []),
|
||||
}
|
||||
|
||||
for finding in report.get("findings", []):
|
||||
vuln = {
|
||||
"title": finding.get("title", "Unknown"),
|
||||
"severity": finding.get("severity", "medium"),
|
||||
"description": finding.get("description", ""),
|
||||
"technical_details": finding.get("technical_details", ""),
|
||||
"affected_endpoint": finding.get("endpoint", ""),
|
||||
"payload": finding.get("payload", ""),
|
||||
"evidence": finding.get("evidence", ""),
|
||||
"impact": finding.get("impact", ""),
|
||||
"poc_code": finding.get("poc_code", ""),
|
||||
"exploitation_steps": finding.get("exploitation_steps", []),
|
||||
"remediation": finding.get("remediation", ""),
|
||||
"references": finding.get("references", []),
|
||||
}
|
||||
|
||||
# Add CVSS
|
||||
if finding.get("cvss"):
|
||||
cvss = finding["cvss"]
|
||||
vuln["cvss_score"] = cvss.get("score", 0)
|
||||
vuln["cvss_vector"] = cvss.get("vector", "")
|
||||
|
||||
# Add CWE
|
||||
if finding.get("cwe_id"):
|
||||
vuln["cwe_id"] = finding["cwe_id"]
|
||||
|
||||
scan_data["vulnerabilities"].append(vuln)
|
||||
|
||||
# Generate LLM analysis summary
|
||||
summary = report.get("summary", {})
|
||||
llm_analysis = f"""
|
||||
## Autonomous AI Agent Assessment Report
|
||||
|
||||
**Target:** {report.get('target', '')}
|
||||
**Mode:** {report.get('mode', 'full_auto').upper()}
|
||||
**Scan Date:** {report.get('scan_date', '')}
|
||||
|
||||
### Executive Summary
|
||||
Risk Level: **{summary.get('risk_level', 'UNKNOWN')}**
|
||||
|
||||
### Findings Summary
|
||||
| Severity | Count |
|
||||
|----------|-------|
|
||||
| Critical | {summary.get('critical', 0)} |
|
||||
| High | {summary.get('high', 0)} |
|
||||
| Medium | {summary.get('medium', 0)} |
|
||||
| Low | {summary.get('low', 0)} |
|
||||
| Info | {summary.get('info', 0)} |
|
||||
|
||||
**Total Findings:** {summary.get('total_findings', 0)}
|
||||
**Endpoints Tested:** {summary.get('endpoints_tested', 0)}
|
||||
|
||||
### Technologies Detected
|
||||
{', '.join(summary.get('technologies', [])) or 'None detected'}
|
||||
|
||||
### Detailed Findings
|
||||
"""
|
||||
for i, finding in enumerate(report.get("findings", []), 1):
|
||||
cvss_info = ""
|
||||
if finding.get("cvss"):
|
||||
cvss_info = f"**CVSS:** {finding['cvss'].get('score', 'N/A')} ({finding['cvss'].get('vector', '')})"
|
||||
|
||||
llm_analysis += f"""
|
||||
---
|
||||
#### {i}. {finding.get('title', 'Unknown')} [{finding.get('severity', 'medium').upper()}]
|
||||
{cvss_info}
|
||||
**CWE:** {finding.get('cwe_id', 'N/A')}
|
||||
**Endpoint:** `{finding.get('endpoint', 'N/A')}`
|
||||
|
||||
**Description:**
|
||||
{finding.get('description', 'No description')}
|
||||
|
||||
**Impact:**
|
||||
{finding.get('impact', 'No impact assessment')}
|
||||
|
||||
**Evidence:**
|
||||
```
|
||||
{finding.get('evidence', 'No evidence')}
|
||||
```
|
||||
|
||||
**Proof of Concept:**
|
||||
```python
|
||||
{finding.get('poc_code', '# No PoC available')}
|
||||
```
|
||||
|
||||
**Remediation:**
|
||||
{finding.get('remediation', 'No remediation provided')}
|
||||
|
||||
"""
|
||||
|
||||
# Recommendations
|
||||
if report.get("recommendations"):
|
||||
llm_analysis += "\n### Recommendations\n"
|
||||
for rec in report["recommendations"]:
|
||||
llm_analysis += f"- {rec}\n"
|
||||
|
||||
report_gen = ReportGenerator(scan_data, llm_analysis)
|
||||
html_report = report_gen.save_report("reports")
|
||||
print(f"[+] HTML Report: {html_report}")
|
||||
|
||||
def _generate_agent_report(self, report: Dict):
|
||||
"""Generate HTML report from AI agent results"""
|
||||
from core.report_generator import ReportGenerator
|
||||
|
||||
# Convert agent report to scan format
|
||||
scan_data = {
|
||||
"target": report.get("target", ""),
|
||||
"scan_started": report.get("scan_date", ""),
|
||||
"scan_completed": datetime.now().isoformat(),
|
||||
"vulnerabilities": [],
|
||||
"technologies": report.get("summary", {}).get("technologies", []),
|
||||
}
|
||||
|
||||
for finding in report.get("findings", []):
|
||||
scan_data["vulnerabilities"].append({
|
||||
"title": f"{finding['type'].upper()} - {finding['severity'].upper()}",
|
||||
"severity": finding["severity"],
|
||||
"description": finding.get("evidence", ""),
|
||||
"affected_endpoint": finding.get("endpoint", ""),
|
||||
"payload": finding.get("payload", ""),
|
||||
"poc_code": finding.get("poc_code", ""),
|
||||
"exploitation_steps": finding.get("exploitation_steps", []),
|
||||
"llm_analysis": finding.get("llm_analysis", ""),
|
||||
})
|
||||
|
||||
# Generate LLM analysis summary
|
||||
llm_analysis = f"""
|
||||
## AI Agent Analysis Summary
|
||||
|
||||
**Target:** {report.get('target', '')}
|
||||
**Scan Date:** {report.get('scan_date', '')}
|
||||
**LLM Enabled:** {report.get('llm_enabled', False)}
|
||||
|
||||
### Summary
|
||||
- Total Endpoints: {report.get('summary', {}).get('total_endpoints', 0)}
|
||||
- Total Parameters: {report.get('summary', {}).get('total_parameters', 0)}
|
||||
- Vulnerabilities Found: {report.get('summary', {}).get('total_vulnerabilities', 0)}
|
||||
- Critical: {report.get('summary', {}).get('critical', 0)}
|
||||
- High: {report.get('summary', {}).get('high', 0)}
|
||||
- Medium: {report.get('summary', {}).get('medium', 0)}
|
||||
- Low: {report.get('summary', {}).get('low', 0)}
|
||||
|
||||
### Findings
|
||||
"""
|
||||
for i, finding in enumerate(report.get("findings", []), 1):
|
||||
llm_analysis += f"""
|
||||
#### {i}. {finding['type'].upper()} [{finding['severity'].upper()}]
|
||||
- **Endpoint:** {finding.get('endpoint', '')}
|
||||
- **Payload:** `{finding.get('payload', '')}`
|
||||
- **Evidence:** {finding.get('evidence', '')}
|
||||
- **Confidence:** {finding.get('confidence', 'medium')}
|
||||
- **LLM Analysis:** {finding.get('llm_analysis', 'N/A')}
|
||||
"""
|
||||
|
||||
report_gen = ReportGenerator(scan_data, llm_analysis)
|
||||
html_report = report_gen.save_report("reports")
|
||||
print(f"[+] HTML Report: {html_report}")
|
||||
|
||||
def check_tools_status(self):
|
||||
"""Check and display status of all pentest tools"""
|
||||
print("\n" + "="*60)
|
||||
@@ -1138,6 +1619,181 @@ CONTEXTO DE RECON:
|
||||
else:
|
||||
print("Usage: analyze <context_file.json>")
|
||||
print(" Then enter your analysis prompt")
|
||||
elif cmd.startswith('agent ') or cmd.startswith('ai_agent '):
|
||||
# AI Agent command
|
||||
# Format: agent <target> [--prompt <file.md>] [--context <context.json>]
|
||||
parts = cmd.split()
|
||||
if len(parts) >= 2:
|
||||
target = parts[1].strip().strip('"')
|
||||
prompt_file = None
|
||||
context_file = None
|
||||
|
||||
# Parse optional arguments
|
||||
i = 2
|
||||
while i < len(parts):
|
||||
if parts[i] in ['--prompt', '-p'] and i + 1 < len(parts):
|
||||
prompt_file = parts[i + 1].strip().strip('"')
|
||||
i += 2
|
||||
elif parts[i] in ['--context', '-c'] and i + 1 < len(parts):
|
||||
context_file = parts[i + 1].strip().strip('"')
|
||||
i += 2
|
||||
else:
|
||||
i += 1
|
||||
|
||||
# Get LLM profile
|
||||
llm_profile = self.config.get('llm', {}).get('default_profile')
|
||||
self.run_ai_agent(target, prompt_file, context_file, llm_profile)
|
||||
else:
|
||||
print("Usage: agent <target_url> [--prompt <file.md>] [--context <context.json>]")
|
||||
print("")
|
||||
print("Examples:")
|
||||
print(" agent https://example.com")
|
||||
print(" agent https://example.com --prompt bug_bounty.md")
|
||||
print(" agent https://example.com --context results/context_X.json")
|
||||
print("")
|
||||
print("The AI Agent will:")
|
||||
print(" 1. Use LLM for intelligent vulnerability testing")
|
||||
print(" 2. Confirm findings with AI (no false positives)")
|
||||
print(" 3. Generate PoC code for exploits")
|
||||
print(" 4. Use recon data if context file provided")
|
||||
|
||||
# === NEW AUTONOMOUS AGENT MODES ===
|
||||
elif cmd.startswith('pentest ') or cmd.startswith('full_auto '):
|
||||
# Full autonomous pentest mode
|
||||
parts = cmd.split()
|
||||
if len(parts) >= 2:
|
||||
target = parts[1].strip().strip('"')
|
||||
task_id = None
|
||||
prompt_file = None
|
||||
context_file = None
|
||||
|
||||
i = 2
|
||||
while i < len(parts):
|
||||
if parts[i] in ['--task', '-t'] and i + 1 < len(parts):
|
||||
task_id = parts[i + 1].strip()
|
||||
i += 2
|
||||
elif parts[i] in ['--prompt', '-p'] and i + 1 < len(parts):
|
||||
prompt_file = parts[i + 1].strip().strip('"')
|
||||
i += 2
|
||||
elif parts[i] in ['--context', '-c'] and i + 1 < len(parts):
|
||||
context_file = parts[i + 1].strip().strip('"')
|
||||
i += 2
|
||||
else:
|
||||
i += 1
|
||||
|
||||
self.run_autonomous_agent(target, "full_auto", task_id, None, prompt_file, context_file)
|
||||
else:
|
||||
print("Usage: pentest <target> [--task <task_id>] [--prompt <file.md>] [--context <file.json>]")
|
||||
print("")
|
||||
print("Full autonomous pentest: Recon -> Analyze -> Test -> Report")
|
||||
|
||||
elif cmd.startswith('recon_only '):
|
||||
# Recon-only mode
|
||||
parts = cmd.split()
|
||||
if len(parts) >= 2:
|
||||
target = parts[1].strip().strip('"')
|
||||
self.run_autonomous_agent(target, "recon_only")
|
||||
else:
|
||||
print("Usage: recon_only <target>")
|
||||
print("Just reconnaissance, no vulnerability testing")
|
||||
|
||||
elif cmd.startswith('prompt_only '):
|
||||
# Prompt-only mode (high token usage)
|
||||
parts = cmd.split()
|
||||
if len(parts) >= 2:
|
||||
target = parts[1].strip().strip('"')
|
||||
prompt = None
|
||||
prompt_file = None
|
||||
|
||||
i = 2
|
||||
while i < len(parts):
|
||||
if parts[i] in ['--prompt', '-p'] and i + 1 < len(parts):
|
||||
prompt_file = parts[i + 1].strip().strip('"')
|
||||
i += 2
|
||||
else:
|
||||
i += 1
|
||||
|
||||
if not prompt_file:
|
||||
print("Enter your prompt (end with empty line):")
|
||||
lines = []
|
||||
while True:
|
||||
line = input()
|
||||
if not line:
|
||||
break
|
||||
lines.append(line)
|
||||
prompt = "\n".join(lines)
|
||||
|
||||
print("\n[!] WARNING: PROMPT-ONLY MODE uses more tokens!")
|
||||
self.run_autonomous_agent(target, "prompt_only", None, prompt, prompt_file)
|
||||
else:
|
||||
print("Usage: prompt_only <target> [--prompt <file.md>]")
|
||||
print("")
|
||||
print("WARNING: This mode uses significantly more tokens!")
|
||||
print("The AI will decide what tools to use based on your prompt.")
|
||||
|
||||
elif cmd.startswith('analyze_only '):
|
||||
# Analyze-only mode
|
||||
parts = cmd.split()
|
||||
if len(parts) >= 2:
|
||||
target = parts[1].strip().strip('"')
|
||||
context_file = None
|
||||
|
||||
i = 2
|
||||
while i < len(parts):
|
||||
if parts[i] in ['--context', '-c'] and i + 1 < len(parts):
|
||||
context_file = parts[i + 1].strip().strip('"')
|
||||
i += 2
|
||||
else:
|
||||
i += 1
|
||||
|
||||
self.run_autonomous_agent(target, "analyze_only", None, None, None, context_file)
|
||||
else:
|
||||
print("Usage: analyze_only <target> [--context <file.json>]")
|
||||
print("Analysis only, no active testing")
|
||||
|
||||
# === TASK LIBRARY COMMANDS ===
|
||||
elif cmd in ['tasks', 'list_tasks']:
|
||||
self.list_tasks()
|
||||
|
||||
elif cmd.startswith('run_task '):
|
||||
parts = cmd.split()
|
||||
if len(parts) >= 3:
|
||||
task_id = parts[1].strip()
|
||||
target = parts[2].strip().strip('"')
|
||||
context_file = None
|
||||
|
||||
i = 3
|
||||
while i < len(parts):
|
||||
if parts[i] in ['--context', '-c'] and i + 1 < len(parts):
|
||||
context_file = parts[i + 1].strip().strip('"')
|
||||
i += 2
|
||||
else:
|
||||
i += 1
|
||||
|
||||
self.run_autonomous_agent(target, "full_auto", task_id, None, None, context_file)
|
||||
else:
|
||||
print("Usage: run_task <task_id> <target> [--context <file.json>]")
|
||||
print("Use 'tasks' to list available tasks")
|
||||
|
||||
elif cmd.startswith('create_task'):
|
||||
print("Create a new task for the library")
|
||||
name = input("Task name: ").strip()
|
||||
if not name:
|
||||
print("Cancelled")
|
||||
continue
|
||||
print("Enter task prompt (end with empty line):")
|
||||
lines = []
|
||||
while True:
|
||||
line = input()
|
||||
if not line:
|
||||
break
|
||||
lines.append(line)
|
||||
prompt = "\n".join(lines)
|
||||
if prompt:
|
||||
self.create_task(name, prompt)
|
||||
else:
|
||||
print("Cancelled - no prompt provided")
|
||||
|
||||
else:
|
||||
print("Unknown command. Type 'help' for available commands.")
|
||||
except KeyboardInterrupt:
|
||||
@@ -1236,9 +1892,32 @@ TOOL MANAGEMENT:
|
||||
install_tools - Install required pentest tools
|
||||
check_tools - Check which tools are installed
|
||||
|
||||
AGENT COMMANDS (AI Analysis):
|
||||
run_agent <role> "<input>" - Execute AI agent with input
|
||||
set_agent <agent_name> - Set default agent for AI analysis
|
||||
AUTONOMOUS AI AGENT (Like PentAGI):
|
||||
pentest <url> - Full auto: Recon -> Analyze -> Test -> Report
|
||||
pentest <url> --task <id> - Use preset task from library
|
||||
recon_only <url> - Just reconnaissance, no testing
|
||||
prompt_only <url> - AI decides everything (HIGH TOKEN USAGE!)
|
||||
analyze_only <url> -c <f> - Analysis only, no active testing
|
||||
|
||||
The autonomous agent generates:
|
||||
- CVSS scores with vector strings
|
||||
- Detailed descriptions and impact analysis
|
||||
- Working PoC code
|
||||
- Remediation recommendations
|
||||
- Professional HTML reports
|
||||
|
||||
TASK LIBRARY:
|
||||
tasks / list_tasks - List all available tasks
|
||||
run_task <id> <url> - Run a task from the library
|
||||
create_task - Create and save a new task
|
||||
|
||||
Preset tasks include: full_bug_bounty, vuln_owasp_top10,
|
||||
vuln_api_security, recon_full, etc.
|
||||
|
||||
LEGACY AGENT:
|
||||
agent <url> - Simple AI agent (basic testing)
|
||||
run_agent <role> "<input>" - Execute an agent role
|
||||
set_agent <agent_name> - Set default agent
|
||||
|
||||
CONFIGURATION:
|
||||
list_roles - List all available agent roles
|
||||
@@ -1263,6 +1942,8 @@ EXAMPLES:
|
||||
analyze results/context_X.json - LLM analysis of context file
|
||||
scan https://example.com - Full pentest scan
|
||||
quick_scan 192.168.1.1 - Quick vulnerability check
|
||||
agent https://target.com - AI Agent pentest (uses LLM)
|
||||
agent https://target.com -p bug_bounty.md -c context.json
|
||||
=======================================================================
|
||||
""")
|
||||
|
||||
@@ -1324,6 +2005,26 @@ EXAMPLES:
|
||||
parser.add_argument('--quick-scan', metavar='TARGET',
|
||||
help='Run QUICK pentest scan on target')
|
||||
|
||||
# Autonomous AI Agent options
|
||||
parser.add_argument('--pentest', metavar='TARGET',
|
||||
help='Run full autonomous pentest: Recon -> Analyze -> Test -> Report')
|
||||
parser.add_argument('--recon-only', metavar='TARGET',
|
||||
help='Run reconnaissance only, no vulnerability testing')
|
||||
parser.add_argument('--prompt-only', metavar='TARGET',
|
||||
help='AI decides everything based on prompt (WARNING: High token usage!)')
|
||||
parser.add_argument('--analyze-only', metavar='TARGET',
|
||||
help='Analysis only mode, no active testing')
|
||||
parser.add_argument('--task', metavar='TASK_ID',
|
||||
help='Task ID from library to execute')
|
||||
parser.add_argument('--prompt-file', '-pf', metavar='FILE',
|
||||
help='Custom .md prompt file for AI agent')
|
||||
parser.add_argument('--list-tasks', action='store_true',
|
||||
help='List all available tasks from library')
|
||||
|
||||
# Legacy AI Agent options
|
||||
parser.add_argument('--agent', metavar='TARGET',
|
||||
help='Run simple AI Agent on target')
|
||||
|
||||
# Tool management
|
||||
parser.add_argument('--install-tools', action='store_true',
|
||||
help='Install required pentest tools (nmap, sqlmap, nuclei, etc.)')
|
||||
@@ -1393,6 +2094,62 @@ EXAMPLES:
|
||||
print(f"[+] Loaded context from: {args.context_file}")
|
||||
framework.execute_real_scan(args.quick_scan, scan_type="quick", agent_role=agent_role, recon_context=context)
|
||||
|
||||
# Handle Autonomous Pentest (Full Auto)
|
||||
elif args.pentest:
|
||||
framework.run_autonomous_agent(
|
||||
target=args.pentest,
|
||||
mode="full_auto",
|
||||
task_id=args.task,
|
||||
prompt_file=args.prompt_file,
|
||||
context_file=args.context_file,
|
||||
llm_profile=args.llm_profile
|
||||
)
|
||||
|
||||
# Handle Recon Only
|
||||
elif args.recon_only:
|
||||
framework.run_autonomous_agent(
|
||||
target=args.recon_only,
|
||||
mode="recon_only",
|
||||
llm_profile=args.llm_profile
|
||||
)
|
||||
|
||||
# Handle Prompt Only (High Token Usage Warning)
|
||||
elif args.prompt_only:
|
||||
print("\n" + "!"*70)
|
||||
print(" WARNING: PROMPT-ONLY MODE")
|
||||
print(" This mode uses significantly more tokens than other modes.")
|
||||
print(" The AI will decide what tools to use based on your prompt.")
|
||||
print("!"*70 + "\n")
|
||||
framework.run_autonomous_agent(
|
||||
target=args.prompt_only,
|
||||
mode="prompt_only",
|
||||
prompt_file=args.prompt_file,
|
||||
context_file=args.context_file,
|
||||
llm_profile=args.llm_profile
|
||||
)
|
||||
|
||||
# Handle Analyze Only
|
||||
elif args.analyze_only:
|
||||
framework.run_autonomous_agent(
|
||||
target=args.analyze_only,
|
||||
mode="analyze_only",
|
||||
context_file=args.context_file,
|
||||
llm_profile=args.llm_profile
|
||||
)
|
||||
|
||||
# Handle List Tasks
|
||||
elif args.list_tasks:
|
||||
framework.list_tasks()
|
||||
|
||||
# Handle Legacy AI Agent
|
||||
elif args.agent:
|
||||
framework.run_ai_agent(
|
||||
target=args.agent,
|
||||
prompt_file=args.prompt_file,
|
||||
context_file=args.context_file,
|
||||
llm_profile=args.llm_profile
|
||||
)
|
||||
|
||||
# Handle list commands
|
||||
elif args.list_agents:
|
||||
framework.list_agent_roles()
|
||||
|
||||
104
start.sh
Normal file
104
start.sh
Normal file
@@ -0,0 +1,104 @@
|
||||
#!/bin/bash
|
||||
|
||||
# NeuroSploit v3 Startup Script
|
||||
|
||||
echo "================================================"
|
||||
echo " NeuroSploit v3 - AI-Powered Penetration Testing"
|
||||
echo "================================================"
|
||||
echo ""
|
||||
|
||||
# Check for .env file
|
||||
if [ ! -f ".env" ]; then
|
||||
echo "[!] No .env file found. Creating from .env.example..."
|
||||
cp .env.example .env
|
||||
echo ""
|
||||
echo "=========================================="
|
||||
echo " IMPORTANT: Configure your API key!"
|
||||
echo "=========================================="
|
||||
echo ""
|
||||
echo "Edit the .env file and add your Claude API key:"
|
||||
echo " ANTHROPIC_API_KEY=sk-ant-..."
|
||||
echo ""
|
||||
echo "Get your API key at: https://console.anthropic.com/"
|
||||
echo ""
|
||||
read -p "Press Enter to continue (or Ctrl+C to exit and configure)..."
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# Check if API key is configured
|
||||
if grep -q "^ANTHROPIC_API_KEY=$" .env 2>/dev/null || grep -q "^ANTHROPIC_API_KEY=your-" .env 2>/dev/null; then
|
||||
echo "[WARNING] ANTHROPIC_API_KEY not configured in .env"
|
||||
echo "The AI agent will not work without an API key!"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
# Check for lite mode
|
||||
COMPOSE_FILE="docker-compose.yml"
|
||||
if [ "$1" = "--lite" ] || [ "$1" = "-l" ]; then
|
||||
echo "[INFO] Using LITE mode (faster build, no security tools)"
|
||||
COMPOSE_FILE="docker-compose.lite.yml"
|
||||
fi
|
||||
|
||||
# Check if docker-compose is available
|
||||
if command -v docker-compose &> /dev/null; then
|
||||
echo "Starting with Docker Compose..."
|
||||
docker-compose -f $COMPOSE_FILE up -d
|
||||
echo ""
|
||||
echo "NeuroSploit is starting..."
|
||||
echo " - Backend API: http://localhost:8000"
|
||||
echo " - Web Interface: http://localhost:3000"
|
||||
echo " - API Docs: http://localhost:8000/api/docs"
|
||||
echo " - LLM Status: http://localhost:8000/api/v1/agent/status"
|
||||
echo ""
|
||||
echo "Run 'docker-compose logs -f' to view logs"
|
||||
echo ""
|
||||
echo "To check if LLM is configured:"
|
||||
echo " curl http://localhost:8000/api/v1/agent/status"
|
||||
elif command -v docker &> /dev/null && command -v docker compose &> /dev/null; then
|
||||
echo "Starting with Docker Compose (v2)..."
|
||||
docker compose -f $COMPOSE_FILE up -d
|
||||
echo ""
|
||||
echo "NeuroSploit is starting..."
|
||||
echo " - Backend API: http://localhost:8000"
|
||||
echo " - Web Interface: http://localhost:3000"
|
||||
echo " - API Docs: http://localhost:8000/api/docs"
|
||||
echo " - LLM Status: http://localhost:8000/api/v1/agent/status"
|
||||
else
|
||||
echo "Docker not found. Starting manually..."
|
||||
echo ""
|
||||
|
||||
# Start backend
|
||||
echo "Starting backend..."
|
||||
cd backend
|
||||
if [ ! -d "venv" ]; then
|
||||
python3 -m venv venv
|
||||
source venv/bin/activate
|
||||
pip install -r requirements.txt
|
||||
else
|
||||
source venv/bin/activate
|
||||
fi
|
||||
python -m uvicorn backend.main:app --host 0.0.0.0 --port 8000 &
|
||||
BACKEND_PID=$!
|
||||
cd ..
|
||||
|
||||
# Start frontend
|
||||
echo "Starting frontend..."
|
||||
cd frontend
|
||||
if [ ! -d "node_modules" ]; then
|
||||
npm install
|
||||
fi
|
||||
npm run dev &
|
||||
FRONTEND_PID=$!
|
||||
cd ..
|
||||
|
||||
echo ""
|
||||
echo "NeuroSploit is running:"
|
||||
echo " - Backend API: http://localhost:8000"
|
||||
echo " - Web Interface: http://localhost:3000"
|
||||
echo ""
|
||||
echo "Press Ctrl+C to stop"
|
||||
|
||||
# Wait for Ctrl+C
|
||||
trap "kill $BACKEND_PID $FRONTEND_PID 2>/dev/null" EXIT
|
||||
wait
|
||||
fi
|
||||
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user