mirror of
https://github.com/CyberSecurityUP/NeuroSploit.git
synced 2026-03-31 00:20:44 +02:00
116 modules | 100 vuln types | 18 API routes | 18 frontend pages Major features: - VulnEngine: 100 vuln types, 526+ payloads, 12 testers, anti-hallucination prompts - Autonomous Agent: 3-stream auto pentest, multi-session (5 concurrent), pause/resume/stop - CLI Agent: Claude Code / Gemini CLI / Codex CLI inside Kali containers - Validation Pipeline: negative controls, proof of execution, confidence scoring, judge - AI Reasoning: ReACT engine, token budget, endpoint classifier, CVE hunter, deep recon - Multi-Agent: 5 specialists + orchestrator + researcher AI + vuln type agents - RAG System: BM25/TF-IDF/ChromaDB vectorstore, few-shot, reasoning templates - Smart Router: 20 providers (8 CLI OAuth + 12 API), tier failover, token refresh - Kali Sandbox: container-per-scan, 56 tools, VPN support, on-demand install - Full IA Testing: methodology-driven comprehensive pentest sessions - Notifications: Discord, Telegram, WhatsApp/Twilio multi-channel alerts - Frontend: React/TypeScript with 18 pages, real-time WebSocket updates
78 lines
2.0 KiB
Python
Executable File
78 lines
2.0 KiB
Python
Executable File
"""
|
|
NeuroSploit v3 - Prompt Schemas
|
|
"""
|
|
from datetime import datetime
|
|
from typing import Optional, List
|
|
from pydantic import BaseModel, Field
|
|
|
|
|
|
class PromptCreate(BaseModel):
|
|
"""Schema for creating a prompt"""
|
|
name: str = Field(..., max_length=255, description="Prompt name")
|
|
description: Optional[str] = Field(None, description="Prompt description")
|
|
content: str = Field(..., min_length=10, description="Prompt content")
|
|
category: Optional[str] = Field(None, description="Prompt category")
|
|
|
|
|
|
class PromptUpdate(BaseModel):
|
|
"""Schema for updating a prompt"""
|
|
name: Optional[str] = None
|
|
description: Optional[str] = None
|
|
content: Optional[str] = None
|
|
category: Optional[str] = None
|
|
|
|
|
|
class PromptParse(BaseModel):
|
|
"""Schema for parsing a prompt"""
|
|
content: str = Field(..., min_length=10, description="Prompt content to parse")
|
|
|
|
|
|
class VulnerabilityTypeExtracted(BaseModel):
|
|
"""Extracted vulnerability type from prompt"""
|
|
type: str
|
|
category: str
|
|
confidence: float
|
|
context: Optional[str] = None
|
|
|
|
|
|
class TestingScope(BaseModel):
|
|
"""Testing scope extracted from prompt"""
|
|
include_recon: bool = True
|
|
depth: str = "standard" # quick, standard, thorough, exhaustive
|
|
max_requests_per_endpoint: Optional[int] = None
|
|
time_limit_minutes: Optional[int] = None
|
|
|
|
|
|
class PromptParseResult(BaseModel):
|
|
"""Result of prompt parsing"""
|
|
vulnerabilities_to_test: List[VulnerabilityTypeExtracted]
|
|
testing_scope: TestingScope
|
|
special_instructions: List[str] = []
|
|
target_filters: dict = {}
|
|
output_preferences: dict = {}
|
|
|
|
|
|
class PromptResponse(BaseModel):
|
|
"""Schema for prompt response"""
|
|
id: str
|
|
name: str
|
|
description: Optional[str]
|
|
content: str
|
|
is_preset: bool
|
|
category: Optional[str]
|
|
parsed_vulnerabilities: List
|
|
created_at: datetime
|
|
updated_at: datetime
|
|
|
|
class Config:
|
|
from_attributes = True
|
|
|
|
|
|
class PromptPreset(BaseModel):
|
|
"""Schema for preset prompt"""
|
|
id: str
|
|
name: str
|
|
description: str
|
|
category: str
|
|
vulnerability_count: int
|