mirror of
https://github.com/0xMarcio/PentestPilot.git
synced 2026-02-13 05:22:54 +00:00
Initial commit of PentestPilot — AI‑assisted pentest recon and orchestration toolkit.\n\nHighlights:\n- Resumeable pipelines (full_pipeline) with manifest state and elapsed timings\n- Rich dashboard (colors, severity bars, durations, compact/json modes)\n- Web helpers: httpx→nuclei auto, tech routing + quick scanners\n- Agents: multi‑task orchestrator (web/full/ad/notes/post) with resume\n- AD/SMB, password utils, shells, transfer, privesc, tunnels\n- QoL scripts: proxy toggle, cleanup, tmux init, URL extractor\n- Docs: README (Quick Start + Docs Index), HOWTO (deep guide), TOOLKIT (catalog with examples)\n\nStructure:\n- bin/automation: pipelines, dashboard, manifest, resume, tech_actions\n- bin/web: routing, scanners, helpers\n- bin/ai: orchestrators + robust AI utils\n- bin/ad, bin/passwords, bin/shells, bin/transfer, bin/privesc, bin/misc, bin/dns, bin/scan, bin/windows, bin/hashes\n- HOWTO.md and TOOLKIT.md cross‑linked with examples\n\nUse:\n- settarget <target>; agent full <domain|hosts.txt>; dashboard --compact\n- See HOWTO.md for setup, semantics, and examples.
35 lines
1.8 KiB
Python
Executable File
35 lines
1.8 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
import os, json, requests, time
|
|
|
|
def ai_complete(prompt, system='You are a helpful pentest copilot.', temperature=0.2, max_chars=12000, retries=2, timeout=60):
|
|
text = prompt[-max_chars:] if len(prompt) > max_chars else prompt
|
|
provider = os.environ.get('PROVIDER') or ('openai' if os.environ.get('OPENAI_API_KEY') else 'ollama')
|
|
last_err = ''
|
|
for _ in range(retries):
|
|
try:
|
|
if provider == 'openai' and os.environ.get('OPENAI_API_KEY'):
|
|
url = 'https://api.openai.com/v1/chat/completions'
|
|
headers = {'Authorization': f"Bearer {os.environ['OPENAI_API_KEY']}", 'Content-Type':'application/json'}
|
|
body = {'model': os.environ.get('OPENAI_MODEL','gpt-4o-mini'),
|
|
'messages':[{'role':'system','content':system},{'role':'user','content':text}],
|
|
'temperature':temperature}
|
|
r = requests.post(url, headers=headers, data=json.dumps(body), timeout=timeout)
|
|
if r.ok:
|
|
return r.json()['choices'][0]['message']['content']
|
|
last_err = f"HTTP {r.status_code}: {r.text[:200]}"
|
|
else:
|
|
host = os.environ.get('OLLAMA_HOST', 'http://localhost:11434')
|
|
model = os.environ.get('OLLAMA_MODEL', 'llama3.1')
|
|
r = requests.post(f'{host}/api/chat', json={'model':model,'messages':[{'role':'system','content':system},{'role':'user','content':text}]}, timeout=timeout)
|
|
if r.ok:
|
|
try:
|
|
return r.json()['message']['content']
|
|
except Exception:
|
|
return r.text
|
|
last_err = f"HTTP {r.status_code}: {r.text[:200]}"
|
|
except Exception as e:
|
|
last_err = str(e)
|
|
time.sleep(1)
|
|
return f"[AI error] {last_err}"
|
|
|