From 9f75e1d8d290cb82ce50e598931c533c1c21737c Mon Sep 17 00:00:00 2001 From: Joas A Santos <34966120+CyberSecurityUP@users.noreply.github.com> Date: Fri, 19 Dec 2025 13:26:15 -0300 Subject: [PATCH] Add files via upload --- agents/base_agent.py | 87 +++-- config/config.json | 96 +++--- core/llm_manager.py | 308 +++++------------- logs/neurosploit.log | 251 +++++++++++++++ neurosploit.py | 406 ++++++++++++++++-------- prompts/library.json | 22 +- prompts/md_library/bug_bounty_hunter.md | 5 +- prompts/md_library/cwe_expert.md | 16 + prompts/md_library/exploit_expert.md | 2 +- prompts/md_library/owasp_expert.md | 18 ++ requirements.txt | 12 +- 11 files changed, 777 insertions(+), 446 deletions(-) create mode 100644 logs/neurosploit.log create mode 100644 prompts/md_library/cwe_expert.md create mode 100644 prompts/md_library/owasp_expert.md diff --git a/agents/base_agent.py b/agents/base_agent.py index c307392..e7b045a 100644 --- a/agents/base_agent.py +++ b/agents/base_agent.py @@ -1,6 +1,8 @@ import json import logging from typing import Dict, Any, List, Optional +import re +import subprocess from core.llm_manager import LLMManager @@ -33,20 +35,23 @@ class BaseAgent: logger.warning(f"No user prompt template found for agent {self.agent_name}.") return user_input # Fallback to raw user input - # Format the user prompt with dynamic context - # Use a safe way to format, ensuring all expected keys are present or handled. - # This assumes the template uses specific placeholders like {target_info_json}, {recon_data_json} etc. - # For a generic solution, we pass all additional_context as a single JSON. - try: - formatted_prompt = user_prompt_template.format( - user_input=user_input, - additional_context_json=json.dumps(additional_context or {}, indent=2) - # Add more specific placeholders if needed, like target_info_json, recon_data_json etc. - # E.g., target_info_json=json.dumps(additional_context.get('target_info', {}), indent=2) - ) - except KeyError as e: - logger.error(f"Missing key in prompt template for {self.agent_name}: {e}. Falling back to basic prompt.") - formatted_prompt = f"{user_prompt_template}\n\nContext: {json.dumps(additional_context or {}, indent=2)}\n\nInput: {user_input}" + # Create a dictionary with all the possible placeholders + format_dict = { + "user_input": user_input, + "target_info_json": user_input, # for bug_bounty_hunter + "recon_data_json": json.dumps(additional_context or {}, indent=2), # for bug_bounty_hunter + "additional_context_json": json.dumps(additional_context or {}, indent=2) + } + + if additional_context: + for key, value in additional_context.items(): + if isinstance(value, (dict, list)): + format_dict[f"{key}_json"] = json.dumps(value, indent=2) + else: + format_dict[key] = value + + # Use a safe way to format, ignoring missing keys + formatted_prompt = user_prompt_template.format_map({k: v for k, v in format_dict.items() if f"{{{k}}}" in user_prompt_template}) return formatted_prompt @@ -65,14 +70,56 @@ class BaseAgent: # Prepare the user prompt with current input and campaign data prepared_user_prompt = self._prepare_prompt(user_input, campaign_data) - llm_response_text = self.llm_manager.generate(prepared_user_prompt, system_prompt) - - # Here's where we would integrate tool usage based on llm_response_text - # and self.tools_allowed. This will be more complex and potentially involve - # re-prompting the LLM or using a function-calling mechanism. - # For now, just return the LLM's direct response. + # Loop for tool usage + for _ in range(5): # Limit to 5 iterations to prevent infinite loops + llm_response_text = self.llm_manager.generate(prepared_user_prompt, system_prompt) + + tool_name, tool_args = self._parse_llm_response(llm_response_text) + + if tool_name: + if tool_name in self.config.get('tools', {}): + tool_path = self.config['tools'][tool_name] + tool_output = self._execute_tool(tool_path, tool_args) + prepared_user_prompt += f"\n\n[TOOL_OUTPUT]\n{tool_output}" + else: + if self._ask_for_permission(f"Tool '{tool_name}' not found. Do you want to try to download it?"): + self.download_tool(tool_name) + # We don't execute the tool in this iteration, but the LLM can try again in the next one + prepared_user_prompt += f"\n\n[TOOL_DOWNLOAD] Tool '{tool_name}' downloaded." + else: + prepared_user_prompt += f"\n\n[TOOL_ERROR] Tool '{tool_name}' not found and permission to download was denied." + else: + return {"agent_name": self.agent_name, "input": user_input, "llm_response": llm_response_text} + return {"agent_name": self.agent_name, "input": user_input, "llm_response": llm_response_text} + def _parse_llm_response(self, response: str) -> (Optional[str], Optional[str]): + """Parses the LLM response to find a tool to use.""" + match = re.search(r"\[TOOL\]\s*(\w+)\s*:\s*(.*)", response) + if match: + return match.group(1), match.group(2) + return None, None + + def _execute_tool(self, tool_path: str, args: str) -> str: + """Executes a tool and returns the output.""" + try: + result = subprocess.run(f"{tool_path} {args}", shell=True, capture_output=True, text=True) + return result.stdout + result.stderr + except Exception as e: + return f"Error executing tool: {e}" + + def _ask_for_permission(self, message: str) -> bool: + """Asks the user for permission.""" + response = input(f"{message} (y/n): ").lower() + return response == 'y' + + def download_tool(self, tool_name: str): + """Downloads a tool.""" + # This is a placeholder for a more sophisticated tool download mechanism. + # For now, we'll just log the request. + logger.info(f"User requested to download tool: {tool_name}") + print(f"Downloading tool '{tool_name}'... (This is a placeholder, no actual download will be performed)") + def get_allowed_tools(self) -> List[str]: """Returns the list of tools allowed for this agent role.""" return self.tools_allowed diff --git a/config/config.json b/config/config.json index e833f2b..06abe42 100644 --- a/config/config.json +++ b/config/config.json @@ -14,21 +14,7 @@ "search_context_level": "medium", "pdf_support_enabled": false, "guardrails_enabled": true, - "hallucination_mitigation_strategy": "grounding" - }, - "claude_opus_default": { - "provider": "claude", - "model": "claude-3-opus-20240229", - "api_key": "${ANTHROPIC_API_KEY}", - "temperature": 0.3, - "max_tokens": 4096, - "input_token_limit": 200000, - "output_token_limit": 4000, - "cache_enabled": true, - "search_context_level": "high", - "pdf_support_enabled": true, - "guardrails_enabled": true, - "hallucination_mitigation_strategy": "self_reflection" + "hallucination_mitigation_strategy": null }, "gemini_pro_default": { "provider": "gemini", @@ -44,74 +30,104 @@ "guardrails_enabled": true, "hallucination_mitigation_strategy": "consistency_check" }, - "gpt_4o_default": { - "provider": "gpt", - "model": "gpt-4o", - "api_key": "${OPENAI_API_KEY}", - "temperature": 0.5, + "claude_opus_default": { + "provider": "claude", + "model": "claude-3-opus-20240229", + "api_key": "${ANTHROPIC_API_KEY}", + "temperature": 0.7, "max_tokens": 4096, - "input_token_limit": 128000, - "output_token_limit": 4000, + "input_token_limit": 200000, + "output_token_limit": 4096, "cache_enabled": true, "search_context_level": "high", "pdf_support_enabled": true, "guardrails_enabled": true, - "hallucination_mitigation_strategy": "grounding" + "hallucination_mitigation_strategy": "self_reflection" + }, + "gpt_4o_default": { + "provider": "gpt", + "model": "gpt-4o", + "api_key": "${OPENAI_API_KEY}", + "temperature": 0.7, + "max_tokens": 4096, + "input_token_limit": 128000, + "output_token_limit": 4096, + "cache_enabled": true, + "search_context_level": "high", + "pdf_support_enabled": true, + "guardrails_enabled": true, + "hallucination_mitigation_strategy": "consistency_check" } } }, "agent_roles": { "bug_bounty_hunter": { "enabled": true, - "llm_profile": "gemini_pro_default", - "tools_allowed": ["subfinder", "nuclei", "burpsuite", "sqlmap"], + "tools_allowed": [ + "subfinder", + "nuclei", + "burpsuite", + "sqlmap" + ], "description": "Focuses on web application vulnerabilities, leveraging recon and exploitation tools." }, "blue_team_agent": { "enabled": true, - "llm_profile": "claude_opus_default", "tools_allowed": [], "description": "Analyzes logs and telemetry for threats, provides defensive strategies." }, "exploit_expert": { "enabled": true, - "llm_profile": "gpt_4o_default", - "tools_allowed": ["metasploit", "nmap"], + "tools_allowed": [ + "metasploit", + "nmap" + ], "description": "Devises exploitation strategies and payloads for identified vulnerabilities." }, "red_team_agent": { "enabled": true, - "llm_profile": "gemini_pro_default", - "tools_allowed": ["nmap", "metasploit", "hydra"], + "tools_allowed": [ + "nmap", + "metasploit", + "hydra" + ], "description": "Plans and executes simulated attacks to test an organization's defenses." }, "replay_attack_specialist": { "enabled": true, - "llm_profile": "ollama_llama3_default", - "tools_allowed": ["burpsuite"], + "tools_allowed": [ + "burpsuite" + ], "description": "Identifies and leverages replay attack vectors in network traffic or authentication." }, "pentest_generalist": { "enabled": true, - "llm_profile": "gemini_pro_default", - "tools_allowed": ["nmap", "subfinder", "nuclei", "metasploit", "burpsuite", "sqlmap", "hydra"], + "tools_allowed": [ + "nmap", + "subfinder", + "nuclei", + "metasploit", + "burpsuite", + "sqlmap", + "hydra" + ], "description": "Performs comprehensive penetration tests across various domains." }, "owasp_expert": { "enabled": true, - "llm_profile": "gemini_pro_default", - "tools_allowed": ["burpsuite", "sqlmap"], + "tools_allowed": [ + "burpsuite", + "sqlmap" + ], "description": "Specializes in assessing web applications against OWASP Top 10 vulnerabilities." }, "cwe_expert": { "enabled": true, - "llm_profile": "claude_opus_default", "tools_allowed": [], "description": "Analyzes code and reports for weaknesses based on MITRE CWE Top 25." }, "malware_analyst": { "enabled": true, - "llm_profile": "gpt_4o_default", "tools_allowed": [], "description": "Examines malware samples to understand functionality and identify IOCs." } @@ -128,9 +144,7 @@ "metasploit": "/usr/bin/msfconsole", "burpsuite": "/usr/bin/burpsuite", "sqlmap": "/usr/bin/sqlmap", - "hydra": "/usr/bin/hydra", - "subfinder": "/usr/local/bin/subfinder", - "nuclei": "/usr/local/bin/nuclei" + "hydra": "/usr/bin/hydra" }, "output": { "format": "json", diff --git a/core/llm_manager.py b/core/llm_manager.py index 1b63459..f31b47d 100644 --- a/core/llm_manager.py +++ b/core/llm_manager.py @@ -43,215 +43,76 @@ class LLMManager: self.guardrails_enabled = self.active_profile.get('guardrails_enabled', False) self.hallucination_mitigation_strategy = self.active_profile.get('hallucination_mitigation_strategy', None) - - # New prompt loading - - - self.json_prompts_file_path = Path("prompts/library.json") - - - self.md_prompts_dir_path = Path("prompts/md_library") - - - self.prompts = self._load_all_prompts() # New method to load both - - - - - - logger.info(f"Initialized LLM Manager - Provider: {self.provider}, Model: {self.model}, Profile: {self.default_profile_name}") - - - - - - def _get_api_key(self, api_key_config: str) -> str: - - - """Helper to get API key from config or environment variable""" - - - if api_key_config.startswith('${') and api_key_config.endswith('}'): - - - env_var = api_key_config[2:-1] - - - return os.getenv(env_var, '') - - - return api_key_config - - - - - - def _load_all_prompts(self) -> Dict: - - - """Load prompts from both JSON library and Markdown library files.""" - - - all_prompts = { - - - "json_prompts": {}, - - - "md_prompts": {} - - - } - - - - - - # Load from JSON library - - - if self.json_prompts_file_path.exists(): - - - try: - - - with open(self.json_prompts_file_path, 'r') as f: - - - all_prompts["json_prompts"] = json.load(f) - - - logger.info(f"Loaded prompts from JSON library: {self.json_prompts_file_path}") - - - except Exception as e: - - - logger.error(f"Error loading prompts from {self.json_prompts_file_path}: {e}") - - - else: - - - logger.warning(f"JSON prompts file not found at {self.json_prompts_file_path}. Some AI functionalities might be limited.") - - - - - - # Load from Markdown library - - - if self.md_prompts_dir_path.is_dir(): - - - for md_file in self.md_prompts_dir_path.glob("*.md"): - - - try: - - - content = md_file.read_text() - - - prompt_name = md_file.stem # Use filename as prompt name - - - - - - user_prompt_match = re.search(r"## User Prompt\n(.*?)(?=\n## System Prompt|\Z)", content, re.DOTALL) - - - system_prompt_match = re.search(r"## System Prompt\n(.*?)(?=\n## User Prompt|\Z)", content, re.DOTALL) - - - - - - user_prompt = user_prompt_match.group(1).strip() if user_prompt_match else "" - - - system_prompt = system_prompt_match.group(1).strip() if system_prompt_match else "" - - - - - - if user_prompt or system_prompt: - - - all_prompts["md_prompts"][prompt_name] = { - - - "user_prompt": user_prompt, - - - "system_prompt": system_prompt - - - } - - - else: - - - logger.warning(f"No valid User or System Prompt found in {md_file.name}. Skipping.") - - - - - - except Exception as e: - - - logger.error(f"Error loading prompt from {md_file.name}: {e}") - - - logger.info(f"Loaded {len(all_prompts['md_prompts'])} prompts from Markdown library.") - - - else: - - - logger.warning(f"Markdown prompts directory not found at {self.md_prompts_dir_path}. Some AI functionalities might be limited.") - - - - - - return all_prompts - - - - - - def get_prompt(self, library_type: str, category: str, name: str, default: str = "") -> str: - - - """Retrieve a specific prompt by library type, category, and name. - - - `library_type` can be "json_prompts" or "md_prompts". - - - `category` can be a JSON top-level key (e.g., 'exploitation') or an MD filename (e.g., 'red_team_agent'). - - - `name` can be a JSON sub-key (e.g., 'ai_exploit_planning_user') or 'user_prompt'/'system_prompt' for MD. - - - """ - - - return self.prompts.get(library_type, {}).get(category, {}).get(name, default) - - - - - - def generate(self, prompt: str, system_prompt: Optional[str] = None) -> str: + # New prompt loading + self.json_prompts_file_path = Path("prompts/library.json") + self.md_prompts_dir_path = Path("prompts/md_library") + self.prompts = self._load_all_prompts() # New method to load both + + logger.info(f"Initialized LLM Manager - Provider: {self.provider}, Model: {self.model}, Profile: {self.default_profile_name}") + + def _get_api_key(self, api_key_config: str) -> str: + """Helper to get API key from config or environment variable""" + if api_key_config.startswith('${') and api_key_config.endswith('}'): + env_var = api_key_config[2:-1] + return os.getenv(env_var, '') + return api_key_config + + def _load_all_prompts(self) -> Dict: + """Load prompts from both JSON library and Markdown library files.""" + all_prompts = { + "json_prompts": {}, + "md_prompts": {} + } + + # Load from JSON library + if self.json_prompts_file_path.exists(): + try: + with open(self.json_prompts_file_path, 'r') as f: + all_prompts["json_prompts"] = json.load(f) + logger.info(f"Loaded prompts from JSON library: {self.json_prompts_file_path}") + except Exception as e: + logger.error(f"Error loading prompts from {self.json_prompts_file_path}: {e}") + else: + logger.warning(f"JSON prompts file not found at {self.json_prompts_file_path}. Some AI functionalities might be limited.") + + # Load from Markdown library + if self.md_prompts_dir_path.is_dir(): + for md_file in self.md_prompts_dir_path.glob("*.md"): + try: + content = md_file.read_text() + prompt_name = md_file.stem # Use filename as prompt name + + user_prompt_match = re.search(r"## User Prompt\n(.*?)(?=\n## System Prompt|\Z)", content, re.DOTALL) + system_prompt_match = re.search(r"## System Prompt\n(.*?)(?=\n## User Prompt|\Z)", content, re.DOTALL) + + user_prompt = user_prompt_match.group(1).strip() if user_prompt_match else "" + system_prompt = system_prompt_match.group(1).strip() if system_prompt_match else "" + + if user_prompt or system_prompt: + all_prompts["md_prompts"][prompt_name] = { + "user_prompt": user_prompt, + "system_prompt": system_prompt + } + else: + logger.warning(f"No valid User or System Prompt found in {md_file.name}. Skipping.") + + except Exception as e: + logger.error(f"Error loading prompt from {md_file.name}: {e}") + logger.info(f"Loaded {len(all_prompts['md_prompts'])} prompts from Markdown library.") + else: + logger.warning(f"Markdown prompts directory not found at {self.md_prompts_dir_path}. Some AI functionalities might be limited.") + + return all_prompts + + def get_prompt(self, library_type: str, category: str, name: str, default: str = "") -> str: + """Retrieve a specific prompt by library type, category, and name. + `library_type` can be "json_prompts" or "md_prompts". + `category` can be a JSON top-level key (e.g., 'exploitation') or an MD filename (e.g., 'red_team_agent'). + `name` can be a JSON sub-key (e.g., 'ai_exploit_planning_user') or 'user_prompt'/'system_prompt' for MD. + """ + return self.prompts.get(library_type, {}).get(category, {}).get(name, default) + + def generate(self, prompt: str, system_prompt: Optional[str] = None) -> str: """Generate response from LLM and apply hallucination mitigation if configured.""" raw_response = "" try: @@ -323,24 +184,25 @@ class LLMManager: try: if strategy == "grounding": - verification_prompt = ( - f"Review the following response:\n\n---\n{raw_response}\n---\n\n" - f"Based *only* on the context provided in the original prompt (user: '{original_prompt}', system: '{original_system_prompt or "None"}'), " - f"is this response factual and directly supported by the context? If not, correct it to be factual. " - f"If the response is completely unsourced or makes claims beyond the context, state 'UNSOURCED'." - ) + verification_prompt = f"""Review the following response: + +--- +{raw_response} +--- + +Based *only* on the context provided in the original prompt (user: '{original_prompt}', system: '{original_system_prompt or "None"}'), is this response factual and directly supported by the context? If not, correct it to be factual. If the response is completely unsourced or makes claims beyond the context, state 'UNSOURCED'.""" logger.debug("Applying grounding strategy: Re-prompting for factual verification.") return self.generate(verification_prompt, "You are a fact-checker whose sole purpose is to verify LLM output against provided context.") elif strategy == "self_reflection": - reflection_prompt = ( - f"Critically review the following response for accuracy, logical consistency, and adherence to the original prompt's instructions:\n\n" - f"Original Prompt (User): {original_prompt}\n" - f"Original Prompt (System): {original_system_prompt or "None"}\n\n" - f"Generated Response: {raw_response}\n\n" - f"Identify any potential hallucinations, inconsistencies, or areas where the response might have deviated from facts or instructions. " - f"If you find issues, provide a corrected and more reliable version of the response. If the response is good, state 'ACCURATE'." - ) + reflection_prompt = f"""Critically review the following response for accuracy, logical consistency, and adherence to the original prompt's instructions: + +Original Prompt (User): {original_prompt} +Original Prompt (System): {original_system_prompt or "None"} + +Generated Response: {raw_response} + +Identify any potential hallucinations, inconsistencies, or areas where the response might have deviated from facts or instructions. If you find issues, provide a corrected and more reliable version of the response. If the response is good, state 'ACCURATE'.""" logger.debug("Applying self-reflection strategy: Re-prompting for self-critique.") return self.generate(reflection_prompt, "You are an AI assistant designed to critically evaluate and improve other AI-generated content.") diff --git a/logs/neurosploit.log b/logs/neurosploit.log new file mode 100644 index 0000000..62e7c34 --- /dev/null +++ b/logs/neurosploit.log @@ -0,0 +1,251 @@ +2025-12-19 11:32:18,555 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_113218 +2025-12-19 11:32:55,262 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_113255 +2025-12-19 11:33:54,241 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_113354 +2025-12-19 11:34:29,519 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_113429 +2025-12-19 11:35:39,664 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_113539 +2025-12-19 11:35:39,664 - __main__ - INFO - Starting execution for agent role: owasp_expert +2025-12-19 11:35:39,666 - core.llm_manager - INFO - Loaded prompts from JSON library: prompts/library.json +2025-12-19 11:35:39,668 - core.llm_manager - INFO - Loaded 8 prompts from Markdown library. +2025-12-19 11:35:39,668 - core.llm_manager - INFO - Initialized LLM Manager - Provider: gemini, Model: gemini-pro, Profile: gemini_pro_default +2025-12-19 11:35:39,668 - __main__ - ERROR - Prompts for agent role 'owasp_expert' not found in MD library. +2025-12-19 11:37:59,476 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_113759 +2025-12-19 11:38:04,329 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_113804 +2025-12-19 11:38:04,329 - __main__ - INFO - Starting execution for agent role: owasp_expert +2025-12-19 11:38:04,330 - core.llm_manager - INFO - Loaded prompts from JSON library: prompts/library.json +2025-12-19 11:38:04,331 - core.llm_manager - INFO - Loaded 9 prompts from Markdown library. +2025-12-19 11:38:04,331 - core.llm_manager - INFO - Initialized LLM Manager - Provider: gemini, Model: gemini-pro, Profile: gemini_pro_default +2025-12-19 11:38:04,331 - agents.base_agent - INFO - Initialized owasp_expert agent. Description: Specializes in assessing web applications against OWASP Top 10 vulnerabilities. +2025-12-19 11:38:04,331 - agents.base_agent - INFO - Executing owasp_expert agent for input: Realize um teste no site http://testphp.vulnweb.co... +2025-12-19 11:38:04,331 - agents.base_agent - ERROR - Missing key in prompt template for owasp_expert: 'web_app_details_json'. Falling back to basic prompt. +2025-12-19 11:38:13,483 - core.llm_manager - ERROR - Error generating raw response: + No API_KEY or ADC found. Please either: + - Set the `GOOGLE_API_KEY` environment variable. + - Manually pass the key with `genai.configure(api_key=my_api_key)`. + - Or set up Application Default Credentials, see https://ai.google.dev/gemini-api/docs/oauth for more information. +2025-12-19 11:38:13,484 - __main__ - INFO - Results saved to results/campaign_20251219_113804.json +2025-12-19 11:38:13,484 - __main__ - INFO - Report generated: reports/report_20251219_113804.html +2025-12-19 11:38:40,109 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_113840 +2025-12-19 11:38:40,109 - __main__ - INFO - Starting execution for agent role: owasp_expert +2025-12-19 11:38:40,109 - core.llm_manager - INFO - Loaded prompts from JSON library: prompts/library.json +2025-12-19 11:38:40,110 - core.llm_manager - INFO - Loaded 9 prompts from Markdown library. +2025-12-19 11:38:40,110 - core.llm_manager - INFO - Initialized LLM Manager - Provider: gemini, Model: gemini-pro, Profile: gemini_pro_default +2025-12-19 11:38:40,110 - agents.base_agent - INFO - Initialized owasp_expert agent. Description: Specializes in assessing web applications against OWASP Top 10 vulnerabilities. +2025-12-19 11:38:40,110 - agents.base_agent - INFO - Executing owasp_expert agent for input: Realize um teste no site http://testphp.vulnweb.co... +2025-12-19 11:38:49,301 - core.llm_manager - ERROR - Error generating raw response: + No API_KEY or ADC found. Please either: + - Set the `GOOGLE_API_KEY` environment variable. + - Manually pass the key with `genai.configure(api_key=my_api_key)`. + - Or set up Application Default Credentials, see https://ai.google.dev/gemini-api/docs/oauth for more information. +2025-12-19 11:38:49,301 - __main__ - INFO - Results saved to results/campaign_20251219_113840.json +2025-12-19 11:38:49,302 - __main__ - INFO - Report generated: reports/report_20251219_113840.html +2025-12-19 11:39:42,429 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_113942 +2025-12-19 11:39:42,430 - __main__ - INFO - Starting execution for agent role: owasp_expert +2025-12-19 11:39:42,430 - core.llm_manager - INFO - Loaded prompts from JSON library: prompts/library.json +2025-12-19 11:39:42,430 - core.llm_manager - INFO - Loaded 9 prompts from Markdown library. +2025-12-19 11:39:42,430 - core.llm_manager - INFO - Initialized LLM Manager - Provider: gemini, Model: gemini-pro, Profile: gemini_pro_default +2025-12-19 11:39:42,430 - agents.base_agent - INFO - Initialized owasp_expert agent. Description: Specializes in assessing web applications against OWASP Top 10 vulnerabilities. +2025-12-19 11:39:42,430 - agents.base_agent - INFO - Executing owasp_expert agent for input: Realize um teste no site http://testphp.vulnweb.co... +2025-12-19 11:39:51,400 - core.llm_manager - ERROR - Error generating raw response: + No API_KEY or ADC found. Please either: + - Set the `GOOGLE_API_KEY` environment variable. + - Manually pass the key with `genai.configure(api_key=my_api_key)`. + - Or set up Application Default Credentials, see https://ai.google.dev/gemini-api/docs/oauth for more information. +2025-12-19 11:39:51,401 - __main__ - INFO - Results saved to results/campaign_20251219_113942.json +2025-12-19 11:39:51,402 - __main__ - INFO - Report generated: reports/report_20251219_113942.html +2025-12-19 11:40:25,811 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_114025 +2025-12-19 11:44:45,527 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_114445 +2025-12-19 11:45:10,765 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_114510 +2025-12-19 11:45:21,124 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_114521 +2025-12-19 11:46:17,722 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_114617 +2025-12-19 11:47:37,765 - __main__ - INFO - Starting execution for agent role: owasp_expert +2025-12-19 11:47:37,766 - core.llm_manager - INFO - Loaded prompts from JSON library: prompts/library.json +2025-12-19 11:47:37,770 - core.llm_manager - INFO - Loaded 9 prompts from Markdown library. +2025-12-19 11:47:37,770 - core.llm_manager - INFO - Initialized LLM Manager - Provider: gemini, Model: gemini-pro, Profile: gemini_pro_default +2025-12-19 11:47:37,770 - agents.base_agent - INFO - Initialized owasp_expert agent. Description: Specializes in assessing web applications against OWASP Top 10 vulnerabilities. +2025-12-19 11:47:37,770 - agents.base_agent - INFO - Executing owasp_expert agent for input: scan http://testphp.vulnweb.com/... +2025-12-19 11:47:47,262 - core.llm_manager - ERROR - Error generating raw response: + No API_KEY or ADC found. Please either: + - Set the `GOOGLE_API_KEY` environment variable. + - Manually pass the key with `genai.configure(api_key=my_api_key)`. + - Or set up Application Default Credentials, see https://ai.google.dev/gemini-api/docs/oauth for more information. +2025-12-19 11:47:47,263 - __main__ - INFO - Results saved to results/campaign_20251219_114617.json +2025-12-19 11:47:47,263 - __main__ - INFO - Report generated: reports/report_20251219_114617.html +2025-12-19 11:49:23,054 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_114923 +2025-12-19 11:49:23,054 - __main__ - INFO - Starting execution for agent role: owasp_expert +2025-12-19 11:49:23,054 - core.llm_manager - INFO - Loaded prompts from JSON library: prompts/library.json +2025-12-19 11:49:23,055 - core.llm_manager - INFO - Loaded 9 prompts from Markdown library. +2025-12-19 11:49:23,055 - core.llm_manager - INFO - Initialized LLM Manager - Provider: ollama, Model: llama3:8b, Profile: ollama_llama3_default +2025-12-19 11:49:23,055 - agents.base_agent - INFO - Initialized owasp_expert agent. Description: Specializes in assessing web applications against OWASP Top 10 vulnerabilities. +2025-12-19 11:49:23,055 - agents.base_agent - INFO - Executing owasp_expert agent for input: scan example.com... +2025-12-19 11:49:48,488 - __main__ - INFO - Results saved to results/campaign_20251219_114923.json +2025-12-19 11:49:48,489 - __main__ - INFO - Report generated: reports/report_20251219_114923.html +2025-12-19 11:50:08,882 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_115008 +2025-12-19 11:50:08,882 - __main__ - INFO - Starting execution for agent role: owasp_expert +2025-12-19 11:50:08,882 - core.llm_manager - INFO - Loaded prompts from JSON library: prompts/library.json +2025-12-19 11:50:08,884 - core.llm_manager - INFO - Loaded 9 prompts from Markdown library. +2025-12-19 11:50:08,884 - core.llm_manager - INFO - Initialized LLM Manager - Provider: ollama, Model: llama3:8b, Profile: ollama_llama3_default +2025-12-19 11:50:08,884 - agents.base_agent - INFO - Initialized owasp_expert agent. Description: Specializes in assessing web applications against OWASP Top 10 vulnerabilities. +2025-12-19 11:50:08,884 - agents.base_agent - INFO - Executing owasp_expert agent for input: scan hackersec.com... +2025-12-19 11:50:29,383 - __main__ - INFO - Results saved to results/campaign_20251219_115008.json +2025-12-19 11:50:29,384 - __main__ - INFO - Report generated: reports/report_20251219_115008.html +2025-12-19 11:56:34,904 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_115634 +2025-12-19 11:56:34,904 - __main__ - INFO - Starting execution for agent role: owasp_expert +2025-12-19 11:56:34,904 - core.llm_manager - INFO - Loaded prompts from JSON library: prompts/library.json +2025-12-19 11:56:34,906 - core.llm_manager - INFO - Loaded 9 prompts from Markdown library. +2025-12-19 11:56:34,906 - core.llm_manager - INFO - Initialized LLM Manager - Provider: ollama, Model: llama3:8b, Profile: ollama_llama3_default +2025-12-19 11:56:34,906 - agents.base_agent - INFO - Initialized owasp_expert agent. Description: Specializes in assessing web applications against OWASP Top 10 vulnerabilities. +2025-12-19 11:56:34,906 - agents.base_agent - INFO - Executing owasp_expert agent for input: scan hackersec.com... +2025-12-19 11:56:54,137 - __main__ - INFO - Results saved to results/campaign_20251219_115634.json +2025-12-19 11:56:54,138 - __main__ - INFO - Report generated: reports/report_20251219_115634.html +2025-12-19 11:57:13,435 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_115713 +2025-12-19 11:57:13,435 - __main__ - INFO - Starting execution for agent role: owasp_expert +2025-12-19 11:57:13,436 - core.llm_manager - INFO - Loaded prompts from JSON library: prompts/library.json +2025-12-19 11:57:13,438 - core.llm_manager - INFO - Loaded 9 prompts from Markdown library. +2025-12-19 11:57:13,438 - core.llm_manager - INFO - Initialized LLM Manager - Provider: ollama, Model: llama3:8b, Profile: ollama_llama3_default +2025-12-19 11:57:13,438 - agents.base_agent - INFO - Initialized owasp_expert agent. Description: Specializes in assessing web applications against OWASP Top 10 vulnerabilities. +2025-12-19 11:57:13,438 - agents.base_agent - INFO - Executing owasp_expert agent for input: identifique vulnerabilidades no dominio hackersec.... +2025-12-19 11:57:36,170 - __main__ - INFO - Results saved to results/campaign_20251219_115713.json +2025-12-19 11:57:36,170 - __main__ - INFO - Report generated: reports/report_20251219_115713.html +2025-12-19 11:57:56,516 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_115756 +2025-12-19 11:58:01,802 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_115801 +2025-12-19 11:58:11,144 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_115811 +2025-12-19 11:58:22,784 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_115822 +2025-12-19 11:58:51,778 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_115851 +2025-12-19 12:02:00,697 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_120200 +2025-12-19 12:02:00,697 - __main__ - INFO - Starting execution for agent role: owasp_expert +2025-12-19 12:02:00,697 - core.llm_manager - INFO - Loaded prompts from JSON library: prompts/library.json +2025-12-19 12:02:00,699 - core.llm_manager - INFO - Loaded 9 prompts from Markdown library. +2025-12-19 12:02:00,699 - core.llm_manager - INFO - Initialized LLM Manager - Provider: ollama, Model: llama3:8b, Profile: ollama_llama3_default +2025-12-19 12:02:00,700 - agents.base_agent - INFO - Initialized owasp_expert agent. Description: Specializes in assessing web applications against OWASP Top 10 vulnerabilities. +2025-12-19 12:02:00,700 - agents.base_agent - INFO - Executing owasp_expert agent for input: identifique vulnerabilidades no dominio hackersec.... +2025-12-19 12:02:24,246 - __main__ - INFO - Results saved to results/campaign_20251219_120200.json +2025-12-19 12:02:24,247 - __main__ - INFO - Report generated: reports/report_20251219_120200.html +2025-12-19 12:02:39,920 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_120239 +2025-12-19 12:02:39,920 - __main__ - INFO - Starting execution for agent role: owasp_expert_profile +2025-12-19 12:02:39,920 - __main__ - ERROR - Agent role 'owasp_expert_profile' not found in configuration. +2025-12-19 12:03:53,173 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_120353 +2025-12-19 12:03:53,173 - __main__ - INFO - Starting execution for agent role: owasp_expert_profile +2025-12-19 12:03:53,173 - __main__ - ERROR - Agent role 'owasp_expert_profile' not found in configuration. +2025-12-19 12:03:57,672 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_120357 +2025-12-19 12:03:57,672 - __main__ - INFO - Starting execution for agent role: owasp_expert +2025-12-19 12:03:57,673 - core.llm_manager - INFO - Loaded prompts from JSON library: prompts/library.json +2025-12-19 12:03:57,676 - core.llm_manager - INFO - Loaded 9 prompts from Markdown library. +2025-12-19 12:03:57,676 - core.llm_manager - INFO - Initialized LLM Manager - Provider: ollama, Model: llama3:8b, Profile: ollama_llama3_default +2025-12-19 12:03:57,676 - agents.base_agent - INFO - Initialized owasp_expert agent. Description: Specializes in assessing web applications against OWASP Top 10 vulnerabilities. +2025-12-19 12:03:57,676 - agents.base_agent - INFO - Executing owasp_expert agent for input: identifique vulnerabilidades no dominio hackersec.... +2025-12-19 12:04:20,276 - __main__ - INFO - Results saved to results/campaign_20251219_120357.json +2025-12-19 12:04:20,277 - __main__ - INFO - Report generated: reports/report_20251219_120357.html +2025-12-19 12:09:45,332 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_120945 +2025-12-19 12:10:28,397 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_121028 +2025-12-19 12:13:17,354 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_121317 +2025-12-19 12:13:32,185 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_121332 +2025-12-19 12:14:31,136 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_121431 +2025-12-19 12:14:31,136 - __main__ - INFO - Starting execution for agent role: owasp_expert +2025-12-19 12:14:31,137 - core.llm_manager - INFO - Loaded prompts from JSON library: prompts/library.json +2025-12-19 12:14:31,139 - core.llm_manager - INFO - Loaded 9 prompts from Markdown library. +2025-12-19 12:14:31,139 - core.llm_manager - INFO - Initialized LLM Manager - Provider: ollama, Model: llama3:8b, Profile: ollama_llama3_default +2025-12-19 12:14:31,139 - agents.base_agent - INFO - Initialized owasp_expert agent. Description: Specializes in assessing web applications against OWASP Top 10 vulnerabilities. +2025-12-19 12:14:31,139 - agents.base_agent - INFO - Executing owasp_expert agent for input: identifique vulnerabilidades no dominio hackersec.... +2025-12-19 12:14:58,217 - __main__ - INFO - Results saved to results/campaign_20251219_121431.json +2025-12-19 12:14:58,218 - __main__ - INFO - Report generated: reports/report_20251219_121431.html +2025-12-19 12:15:43,666 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_121543 +2025-12-19 12:15:43,667 - __main__ - INFO - Starting execution for agent role: owasp_expert +2025-12-19 12:15:43,667 - core.llm_manager - INFO - Loaded prompts from JSON library: prompts/library.json +2025-12-19 12:15:43,669 - core.llm_manager - INFO - Loaded 9 prompts from Markdown library. +2025-12-19 12:15:43,670 - core.llm_manager - INFO - Initialized LLM Manager - Provider: ollama, Model: llama3:8b, Profile: ollama_llama3_default +2025-12-19 12:15:43,670 - agents.base_agent - INFO - Initialized owasp_expert agent. Description: Specializes in assessing web applications against OWASP Top 10 vulnerabilities. +2025-12-19 12:15:43,670 - agents.base_agent - INFO - Executing owasp_expert agent for input: scan target hackersec.com... +2025-12-19 12:16:11,774 - __main__ - INFO - Results saved to results/campaign_20251219_121543.json +2025-12-19 12:16:11,775 - __main__ - INFO - Report generated: reports/report_20251219_121543.html +2025-12-19 12:19:12,710 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_121912 +2025-12-19 12:19:12,710 - __main__ - INFO - Starting execution for agent role: owasp_expert +2025-12-19 12:19:12,711 - core.llm_manager - INFO - Loaded prompts from JSON library: prompts/library.json +2025-12-19 12:19:12,713 - core.llm_manager - INFO - Loaded 9 prompts from Markdown library. +2025-12-19 12:19:12,713 - core.llm_manager - INFO - Initialized LLM Manager - Provider: ollama, Model: llama3:8b, Profile: ollama_llama3_default +2025-12-19 12:19:12,713 - agents.base_agent - INFO - Initialized owasp_expert agent. Description: Specializes in assessing web applications against OWASP Top 10 vulnerabilities. +2025-12-19 12:19:12,713 - agents.base_agent - INFO - Executing owasp_expert agent for input: scan target hackersec.com... +2025-12-19 12:19:55,720 - __main__ - INFO - Results saved to results/campaign_20251219_121912.json +2025-12-19 12:19:55,721 - __main__ - INFO - Report generated: reports/report_20251219_121912.html +2025-12-19 12:31:03,782 - __main__ - INFO - Created default configuration at config/config.json +2025-12-19 12:31:03,782 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_123103 +2025-12-19 12:31:03,783 - __main__ - INFO - Starting execution for agent role: owasp_expert +2025-12-19 12:31:03,783 - core.llm_manager - INFO - Loaded prompts from JSON library: prompts/library.json +2025-12-19 12:31:03,785 - core.llm_manager - INFO - Loaded 9 prompts from Markdown library. +2025-12-19 12:31:03,785 - core.llm_manager - INFO - Initialized LLM Manager - Provider: ollama, Model: llama3:8b, Profile: ollama_llama3_default +2025-12-19 12:31:03,785 - agents.base_agent - INFO - Initialized owasp_expert agent. Description: Specializes in assessing web applications against OWASP Top 10 vulnerabilities. +2025-12-19 12:31:03,785 - agents.base_agent - INFO - Executing owasp_expert agent for input: scan target hackersec.com... +2025-12-19 12:31:23,207 - __main__ - INFO - Results saved to results/campaign_20251219_123103.json +2025-12-19 12:31:23,208 - __main__ - INFO - Report generated: reports/report_20251219_123103.html +2025-12-19 12:33:07,023 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_123307 +2025-12-19 12:33:07,023 - __main__ - INFO - Starting execution for agent role: owasp_expert +2025-12-19 12:33:07,024 - core.llm_manager - INFO - Loaded prompts from JSON library: prompts/library.json +2025-12-19 12:33:07,026 - core.llm_manager - INFO - Loaded 9 prompts from Markdown library. +2025-12-19 12:33:07,026 - core.llm_manager - INFO - Initialized LLM Manager - Provider: ollama, Model: llama3:8b, Profile: ollama_llama3_default +2025-12-19 12:33:07,026 - agents.base_agent - INFO - Initialized owasp_expert agent. Description: Specializes in assessing web applications against OWASP Top 10 vulnerabilities. +2025-12-19 12:33:07,026 - agents.base_agent - INFO - Executing owasp_expert agent for input: scan target http://testphp.vulnweb.com and identif... +2025-12-19 12:33:25,214 - __main__ - INFO - Results saved to results/campaign_20251219_123307.json +2025-12-19 12:33:25,215 - __main__ - INFO - Report generated: reports/report_20251219_123307.html +2025-12-19 12:36:29,020 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_123629 +2025-12-19 12:36:29,020 - __main__ - INFO - Starting execution for agent role: owasp_expert +2025-12-19 12:36:29,021 - core.llm_manager - INFO - Loaded prompts from JSON library: prompts/library.json +2025-12-19 12:36:29,023 - core.llm_manager - INFO - Loaded 9 prompts from Markdown library. +2025-12-19 12:36:29,023 - core.llm_manager - INFO - Initialized LLM Manager - Provider: ollama, Model: llama3:8b, Profile: ollama_llama3_default +2025-12-19 12:36:29,023 - agents.base_agent - INFO - Initialized owasp_expert agent. Description: Specializes in assessing web applications against OWASP Top 10 vulnerabilities. +2025-12-19 12:36:29,023 - agents.base_agent - INFO - Executing owasp_expert agent for input: scan target hackersec.com... +2025-12-19 12:36:45,283 - __main__ - INFO - Results saved to results/campaign_20251219_123629.json +2025-12-19 12:37:01,705 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_123701 +2025-12-19 12:37:01,705 - __main__ - INFO - Starting execution for agent role: owasp_expert +2025-12-19 12:37:01,705 - core.llm_manager - INFO - Loaded prompts from JSON library: prompts/library.json +2025-12-19 12:37:01,707 - core.llm_manager - INFO - Loaded 9 prompts from Markdown library. +2025-12-19 12:37:01,707 - core.llm_manager - INFO - Initialized LLM Manager - Provider: ollama, Model: llama3:8b, Profile: ollama_llama3_default +2025-12-19 12:37:01,707 - agents.base_agent - INFO - Initialized owasp_expert agent. Description: Specializes in assessing web applications against OWASP Top 10 vulnerabilities. +2025-12-19 12:37:01,707 - agents.base_agent - INFO - Executing owasp_expert agent for input: scan target hackersec.com... +2025-12-19 12:37:16,413 - __main__ - INFO - Results saved to results/campaign_20251219_123701.json +2025-12-19 12:43:25,362 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_124325 +2025-12-19 12:43:25,362 - __main__ - INFO - Starting execution for agent role: owasp_expert +2025-12-19 12:43:25,363 - core.llm_manager - INFO - Loaded prompts from JSON library: prompts/library.json +2025-12-19 12:43:25,365 - core.llm_manager - INFO - Loaded 9 prompts from Markdown library. +2025-12-19 12:43:25,365 - core.llm_manager - INFO - Initialized LLM Manager - Provider: ollama, Model: llama3:8b, Profile: ollama_llama3_default +2025-12-19 12:43:25,365 - agents.base_agent - INFO - Initialized owasp_expert agent. Description: Specializes in assessing web applications against OWASP Top 10 vulnerabilities. +2025-12-19 12:43:25,365 - agents.base_agent - INFO - Executing owasp_expert agent for input: scan target hackersec.com... +2025-12-19 12:43:47,234 - __main__ - INFO - Results saved to results/campaign_20251219_124325.json +2025-12-19 12:43:47,235 - __main__ - INFO - Report generated: reports/report_20251219_124325.html +2025-12-19 12:46:24,533 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_124624 +2025-12-19 12:51:12,912 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_125112 +2025-12-19 13:07:54,046 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_130754 +2025-12-19 13:08:09,699 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_130809 +2025-12-19 13:08:39,156 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_130839 +2025-12-19 13:08:39,156 - __main__ - INFO - Starting execution for agent role: owasp_expert +2025-12-19 13:08:39,157 - core.llm_manager - INFO - Loaded prompts from JSON library: prompts/library.json +2025-12-19 13:08:39,160 - core.llm_manager - INFO - Loaded 9 prompts from Markdown library. +2025-12-19 13:08:39,160 - core.llm_manager - INFO - Initialized LLM Manager - Provider: ollama, Model: llama3:8b, Profile: ollama_llama3_default +2025-12-19 13:08:39,160 - agents.base_agent - INFO - Initialized owasp_expert agent. Description: Specializes in assessing web applications against OWASP Top 10 vulnerabilities. +2025-12-19 13:08:39,160 - agents.base_agent - INFO - Executing owasp_expert agent for input: scan target hackersec.com... +2025-12-19 13:08:59,868 - __main__ - INFO - Results saved to results/campaign_20251219_130839.json +2025-12-19 13:08:59,893 - __main__ - INFO - Report generated: reports/report_20251219_130839.html +2025-12-19 13:09:57,106 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_130957 +2025-12-19 13:10:51,790 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_131051 +2025-12-19 13:10:51,790 - __main__ - INFO - Starting execution for agent role: bug_bounty_hunter +2025-12-19 13:10:51,791 - core.llm_manager - INFO - Loaded prompts from JSON library: prompts/library.json +2025-12-19 13:10:51,794 - core.llm_manager - INFO - Loaded 9 prompts from Markdown library. +2025-12-19 13:10:51,794 - core.llm_manager - INFO - Initialized LLM Manager - Provider: ollama, Model: llama3:8b, Profile: ollama_llama3_default +2025-12-19 13:10:51,794 - agents.base_agent - INFO - Initialized bug_bounty_hunter agent. Description: Focuses on web application vulnerabilities, leveraging recon and exploitation tools. +2025-12-19 13:10:51,794 - agents.base_agent - INFO - Executing bug_bounty_hunter agent for input: identify vulnerability in target testphp.vulnweb.c... +2025-12-19 13:12:27,308 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_131227 +2025-12-19 13:12:27,308 - __main__ - INFO - Starting execution for agent role: bug_bounty_hunter +2025-12-19 13:12:27,308 - core.llm_manager - INFO - Loaded prompts from JSON library: prompts/library.json +2025-12-19 13:12:27,310 - core.llm_manager - INFO - Loaded 9 prompts from Markdown library. +2025-12-19 13:12:27,310 - core.llm_manager - INFO - Initialized LLM Manager - Provider: ollama, Model: llama3:8b, Profile: ollama_llama3_default +2025-12-19 13:12:27,310 - agents.base_agent - INFO - Initialized bug_bounty_hunter agent. Description: Focuses on web application vulnerabilities, leveraging recon and exploitation tools. +2025-12-19 13:12:27,310 - agents.base_agent - INFO - Executing bug_bounty_hunter agent for input: identify vulnerability in target testphp.vulnweb.c... +2025-12-19 13:12:41,925 - __main__ - INFO - Results saved to results/campaign_20251219_131227.json +2025-12-19 13:12:41,946 - __main__ - INFO - Report generated: reports/report_20251219_131227.html +2025-12-19 13:24:05,659 - __main__ - INFO - NeuroSploitv2 initialized - Session: 20251219_132405 +2025-12-19 13:24:05,659 - __main__ - INFO - Starting execution for agent role: bug_bounty_hunter +2025-12-19 13:24:05,659 - core.llm_manager - INFO - Loaded prompts from JSON library: prompts/library.json +2025-12-19 13:24:05,661 - core.llm_manager - INFO - Loaded 9 prompts from Markdown library. +2025-12-19 13:24:05,661 - core.llm_manager - INFO - Initialized LLM Manager - Provider: ollama, Model: llama3:8b, Profile: ollama_llama3_default +2025-12-19 13:24:05,661 - agents.base_agent - INFO - Initialized bug_bounty_hunter agent. Description: Focuses on web application vulnerabilities, leveraging recon and exploitation tools. +2025-12-19 13:24:05,661 - agents.base_agent - INFO - Executing bug_bounty_hunter agent for input: identify vulnerability in target testphp.vulnweb.c... +2025-12-19 13:24:18,057 - __main__ - INFO - Results saved to results/campaign_20251219_132405.json +2025-12-19 13:24:18,078 - __main__ - INFO - Report generated: reports/report_20251219_132405.html diff --git a/neurosploit.py b/neurosploit.py index 17b080c..1cd4baa 100644 --- a/neurosploit.py +++ b/neurosploit.py @@ -14,6 +14,8 @@ from pathlib import Path from typing import Dict, List, Optional import logging from datetime import datetime +import readline +import mistune # Setup logging logging.basicConfig( @@ -29,6 +31,44 @@ logger = logging.getLogger(__name__) from core.llm_manager import LLMManager from agents.base_agent import BaseAgent +class Completer: + def __init__(self, neurosploit): + self.neurosploit = neurosploit + self.commands = ["help", "run_agent", "config", "list_roles", "list_profiles", "set_profile", "set_agent", "discover_ollama", "exit", "quit"] + self.agent_roles = list(self.neurosploit.config.get('agent_roles', {}).keys()) + self.llm_profiles = list(self.neurosploit.config.get('llm', {}).get('profiles', {}).keys()) + + def complete(self, text, state): + line = readline.get_line_buffer() + parts = line.split() + + options = [] + if state == 0: + if not parts or (len(parts) == 1 and not line.endswith(' ')): + options = [c + ' ' for c in self.commands if c.startswith(text)] + elif len(parts) > 0: + if parts[0] == 'run_agent': + if len(parts) == 1 and line.endswith(' '): + options = [a + ' ' for a in self.agent_roles] + elif len(parts) == 2 and not line.endswith(' '): + options = [a + ' ' for a in self.agent_roles if a.startswith(parts[1])] + elif parts[0] == 'set_agent': + if len(parts) == 1 and line.endswith(' '): + options = [a + ' ' for a in self.agent_roles] + elif len(parts) == 2 and not line.endswith(' '): + options = [a + ' ' for a in self.agent_roles if a.startswith(parts[1])] + elif parts[0] == 'set_profile': + if len(parts) == 1 and line.endswith(' '): + options = [p + ' ' for p in self.llm_profiles] + elif len(parts) == 2 and not line.endswith(' '): + options = [p + ' ' for p in self.llm_profiles if p.startswith(parts[1])] + + if state < len(options): + return options[state] + else: + return None + + class NeuroSploitv2: """Main framework class for NeuroSploitv2""" @@ -41,7 +81,8 @@ class NeuroSploitv2: self._setup_directories() # LLMManager instance will be created dynamically per agent role to select specific profiles - self.llm_manager_instance: Optional[LLMManager] = None + self.llm_manager_instance: Optional[LLMManager] = None + self.selected_agent_role: Optional[str] = None logger.info(f"NeuroSploitv2 initialized - Session: {self.session_id}") @@ -53,107 +94,17 @@ class NeuroSploitv2: def _load_config(self) -> Dict: """Load configuration from file""" - if os.path.exists(self.config_path): - with open(self.config_path, 'r') as f: - return json.load(f) - return self._create_default_config() - - def _create_default_config(self) -> Dict: - """Create default configuration""" - config = { - "llm": { - "provider": "gemini", - "model": "gemini-pro", - "api_key": "", - "temperature": 0.7, - "max_tokens": 4096 - }, - "agent_roles": { - "bug_bounty_hunter": { - "enabled": True, - "llm_profile": "gemini_pro_default", - "tools_allowed": ["subfinder", "nuclei", "burpsuite", "sqlmap"], - "description": "Focuses on web application vulnerabilities, leveraging recon and exploitation tools." - }, - "blue_team_agent": { - "enabled": True, - "llm_profile": "claude_opus_default", - "tools_allowed": [], - "description": "Analyzes logs and telemetry for threats, provides defensive strategies." - }, - "exploit_expert": { - "enabled": True, - "llm_profile": "gpt_4o_default", - "tools_allowed": ["metasploit", "nmap"], - "description": "Devises exploitation strategies and payloads for identified vulnerabilities." - }, - "red_team_agent": { - "enabled": True, - "llm_profile": "gemini_pro_default", - "tools_allowed": ["nmap", "metasploit", "hydra"], - "description": "Plans and executes simulated attacks to test an organization's defenses." - }, - "replay_attack_specialist": { - "enabled": True, - "llm_profile": "ollama_llama3_default", - "tools_allowed": ["burpsuite"], - "description": "Identifies and leverages replay attack vectors in network traffic or authentication." - }, - "pentest_generalist": { - "enabled": True, - "llm_profile": "gemini_pro_default", - "tools_allowed": ["nmap", "subfinder", "nuclei", "metasploit", "burpsuite", "sqlmap", "hydra"], - "description": "Performs comprehensive penetration tests across various domains." - }, - "owasp_expert": { - "enabled": True, - "llm_profile": "gemini_pro_default", - "tools_allowed": ["burpsuite", "sqlmap"], - "description": "Specializes in assessing web applications against OWASP Top 10 vulnerabilities." - }, - "cwe_expert": { - "enabled": True, - "llm_profile": "claude_opus_default", - "tools_allowed": [], - "description": "Analyzes code and reports for weaknesses based on MITRE CWE Top 25." - }, - "malware_analyst": { - "enabled": True, - "llm_profile": "gpt_4o_default", - "tools_allowed": [], - "description": "Examines malware samples to understand functionality and identify IOCs." - } - }, - "methodologies": { - "owasp_top10": True, - "cwe_top25": True, - "network_pentest": True, - "ad_pentest": True, - "web_security": True - }, - "tools": { - "nmap": "/usr/bin/nmap", - "metasploit": "/usr/bin/msfconsole", - "burpsuite": "/usr/bin/burpsuite", - "sqlmap": "/usr/bin/sqlmap", - "hydra": "/usr/bin/hydra" - }, - "output": { - "format": "json", - "verbose": True, - "save_artifacts": True - } - } + if not os.path.exists(self.config_path): + if os.path.exists("config/config-example.json"): + import shutil + shutil.copy("config/config-example.json", self.config_path) + logger.info(f"Created default configuration at {self.config_path}") + else: + logger.error("config-example.json not found. Cannot create default configuration.") + return {} - # Save default config - os.makedirs(os.path.dirname(self.config_path), exist_ok=True) - with open(self.config_path, 'w') as f: - json.dump(config, f, indent=4) - - logger.info(f"Created default configuration at {self.config_path}") - return config - - + with open(self.config_path, 'r') as f: + return json.load(f) def _initialize_llm_manager(self, agent_llm_profile: Optional[str] = None): """Initializes LLMManager with a specific profile or default.""" @@ -167,7 +118,7 @@ class NeuroSploitv2: else: self.llm_manager_instance = LLMManager({"llm": llm_config}) - def execute_agent_role(self, agent_role_name: str, user_input: str, additional_context: Optional[Dict] = None): + def execute_agent_role(self, agent_role_name: str, user_input: str, additional_context: Optional[Dict] = None, llm_profile_override: Optional[str] = None): """Execute a specific agent role with a given input.""" logger.info(f"Starting execution for agent role: {agent_role_name}") @@ -182,7 +133,7 @@ class NeuroSploitv2: logger.warning(f"Agent role '{agent_role_name}' is disabled in configuration.") return {"warning": f"Agent role '{agent_role_name}' is disabled."} - llm_profile_name = role_config.get('llm_profile', self.config['llm']['default_profile']) + llm_profile_name = llm_profile_override or role_config.get('llm_profile', self.config['llm']['default_profile']) self._initialize_llm_manager(llm_profile_name) if not self.llm_manager_instance: @@ -226,43 +177,111 @@ class NeuroSploitv2: """Generate HTML report for agent role execution""" report_file = f"reports/report_{self.session_id}.html" + llm_response = results.get('results', {}).get('llm_response', '') + if isinstance(llm_response, dict): + llm_response = json.dumps(llm_response, indent=2) + + report_content = mistune.html(llm_response) + html = f""" - +
+ +Agent Role: {results.get('agent_role', 'N/A')}
-Input: {results.get('input', 'N/A')}
-Session: {results['session_id']}
-Timestamp: {results['timestamp']}
-Agent Role: {results.get('agent_role', 'N/A')}
+Input: {results.get('input', 'N/A')}
+Timestamp: {results['timestamp']}
+{json.dumps(results.get('results', {}), indent=2)}