mirror of
https://github.com/0xMarcio/PentestPilot.git
synced 2026-02-13 05:22:54 +00:00
Initial commit of PentestPilot — AI‑assisted pentest recon and orchestration toolkit.\n\nHighlights:\n- Resumeable pipelines (full_pipeline) with manifest state and elapsed timings\n- Rich dashboard (colors, severity bars, durations, compact/json modes)\n- Web helpers: httpx→nuclei auto, tech routing + quick scanners\n- Agents: multi‑task orchestrator (web/full/ad/notes/post) with resume\n- AD/SMB, password utils, shells, transfer, privesc, tunnels\n- QoL scripts: proxy toggle, cleanup, tmux init, URL extractor\n- Docs: README (Quick Start + Docs Index), HOWTO (deep guide), TOOLKIT (catalog with examples)\n\nStructure:\n- bin/automation: pipelines, dashboard, manifest, resume, tech_actions\n- bin/web: routing, scanners, helpers\n- bin/ai: orchestrators + robust AI utils\n- bin/ad, bin/passwords, bin/shells, bin/transfer, bin/privesc, bin/misc, bin/dns, bin/scan, bin/windows, bin/hashes\n- HOWTO.md and TOOLKIT.md cross‑linked with examples\n\nUse:\n- settarget <target>; agent full <domain|hosts.txt>; dashboard --compact\n- See HOWTO.md for setup, semantics, and examples.
74 lines
2.5 KiB
Python
Executable File
74 lines
2.5 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
import os, sys, json, requests
|
|
|
|
def usage():
|
|
print("Usage: ask.py [-m model] [-s system] [prompt | -]", file=sys.stderr)
|
|
print("Env:")
|
|
print(" PROVIDER=openai|ollama (default: openai if OPENAI_API_KEY set else ollama)")
|
|
print(" OPENAI_API_KEY, OPENAI_MODEL (default: gpt-4o-mini)")
|
|
print(" OLLAMA_HOST (default: http://localhost:11434), OLLAMA_MODEL (default: llama3.1)")
|
|
sys.exit(1)
|
|
|
|
model = None
|
|
system = os.environ.get('AI_SYSTEM', 'You are a helpful pentest copilot.')
|
|
args = sys.argv[1:]
|
|
while args and args[0].startswith('-'):
|
|
if args[0] == '-m' and len(args) > 1:
|
|
model = args[1]; args = args[2:]
|
|
elif args[0] == '-s' and len(args) > 1:
|
|
system = args[1]; args = args[2:]
|
|
else:
|
|
usage()
|
|
|
|
if not args:
|
|
usage()
|
|
|
|
prompt = args[0]
|
|
if prompt == '-':
|
|
prompt = sys.stdin.read()
|
|
|
|
provider = os.environ.get('PROVIDER')
|
|
if not provider:
|
|
provider = 'openai' if os.environ.get('OPENAI_API_KEY') else 'ollama'
|
|
|
|
if provider == 'openai':
|
|
key = os.environ.get('OPENAI_API_KEY')
|
|
if not key:
|
|
print('[!] OPENAI_API_KEY not set; fallback to ollama?', file=sys.stderr)
|
|
provider = 'ollama'
|
|
else:
|
|
model = model or os.environ.get('OPENAI_MODEL', 'gpt-4o-mini')
|
|
url = 'https://api.openai.com/v1/chat/completions'
|
|
headers = {'Authorization': f'Bearer {key}', 'Content-Type': 'application/json'}
|
|
body = {
|
|
'model': model,
|
|
'messages': [
|
|
{'role': 'system', 'content': system},
|
|
{'role': 'user', 'content': prompt}
|
|
],
|
|
'temperature': 0.2,
|
|
}
|
|
r = requests.post(url, headers=headers, data=json.dumps(body), timeout=60)
|
|
r.raise_for_status()
|
|
print(r.json()['choices'][0]['message']['content'].strip())
|
|
sys.exit(0)
|
|
|
|
# ollama
|
|
host = os.environ.get('OLLAMA_HOST', 'http://localhost:11434')
|
|
model = model or os.environ.get('OLLAMA_MODEL', 'llama3.1')
|
|
url = f'{host}/api/chat'
|
|
body = {'model': model, 'messages': [{'role': 'system', 'content': system}, {'role': 'user', 'content': prompt}]}
|
|
r = requests.post(url, json=body, timeout=60)
|
|
if r.ok:
|
|
data = r.json()
|
|
# responses is either 'message' or 'messages'
|
|
if 'message' in data and 'content' in data['message']:
|
|
print(data['message']['content'].strip())
|
|
else:
|
|
# naive fallback
|
|
print(json.dumps(data, indent=2))
|
|
else:
|
|
print(f'[!] Ollama request failed: {r.status_code}', file=sys.stderr)
|
|
sys.exit(2)
|
|
|