mirror of
https://github.com/Vyntral/god-eye.git
synced 2026-05-16 13:39:10 +02:00
3a4c230aa7
Complete architectural overhaul. Replaces the v0.1 monolithic scanner with an event-driven pipeline of auto-registered modules. Foundation (internal/): - eventbus: typed pub/sub, 20 event types, race-safe, drop counter - module: registry with phase-based selection - store: thread-safe host store with per-host locks + deep-copy reads - pipeline: coordinator with phase barriers + panic recovery - config: 5 scan profiles + 3 AI tiers + YAML loader + auto-discovery Modules (26 auto-registered across 6 phases): - Discovery: passive (26 sources), bruteforce, recursive, AXFR, GitHub dorks, CT streaming, permutation, reverse DNS, vhost, ASN, supply chain (npm + PyPI) - Enrichment: HTTP probe + tech fingerprint + TLS appliance ID, ports - Analysis: security checks, takeover (110+ sigs), cloud, JavaScript, GraphQL, JWT, headers (OWASP), HTTP smuggling, AI cascade, Nuclei - Reporting: TXT/JSON/CSV writer + AI scan brief AI layer (internal/ai/ + internal/modules/ai/): - Three profiles: lean (16 GB), balanced (32 GB MoE), heavy (64 GB) - Six event-driven handlers: CVE, JS file, HTTP response, secret filter, multi-agent vuln enrichment, anomaly + executive report - Content-hash cache dedups Ollama calls across hosts - Auto-pull of missing models via /api/pull with streaming progress - End-of-scan AI SCAN BRIEF in terminal with top chains + next actions Nuclei compat layer (internal/nucleitpl/): - Executes ~13k community templates (HTTP subset) - Auto-download of nuclei-templates ZIP to ~/.god-eye/nuclei-templates - Scope filter rejects off-host templates (eliminates OSINT FPs) Operations: - Interactive wizard (internal/wizard/) — zero-flag launch - LivePrinter (internal/tui/) — colorized event stream - Diff engine + scheduler (internal/diff, internal/scheduler) for continuous ASM monitoring with webhook alerts - Proxy support (internal/proxyconf/): http / https / socks5 / socks5h + basic auth Fixes #1 — native SOCKS5 / Tor compatibility via --proxy flag. 185 unit tests across 15 packages, all race-detector clean.
260 lines
7.9 KiB
Go
260 lines
7.9 KiB
Go
// Package report writes the final scan output. It consumes the store (not
|
|
// events) at ScanCompleted time and emits TXT / JSON / CSV via the existing
|
|
// v1 output.WriteOutput function. To preserve v1 output shape during the
|
|
// Fase 0.6 migration, store.Host records are projected to the legacy
|
|
// config.SubdomainResult type before serialization.
|
|
package report
|
|
|
|
import (
|
|
"context"
|
|
"encoding/json"
|
|
"fmt"
|
|
"os"
|
|
"strings"
|
|
"time"
|
|
|
|
"god-eye/internal/config"
|
|
"god-eye/internal/eventbus"
|
|
"god-eye/internal/module"
|
|
"god-eye/internal/output"
|
|
)
|
|
|
|
var _ = time.Now // keep import stable when unused in certain branches
|
|
|
|
const ModuleName = "report.output"
|
|
|
|
type reportModule struct{}
|
|
|
|
func Register() { module.Register(&reportModule{}) }
|
|
|
|
func (*reportModule) Name() string { return ModuleName }
|
|
func (*reportModule) Phase() module.Phase { return module.PhaseReporting }
|
|
func (*reportModule) Consumes() []eventbus.EventType { return []eventbus.EventType{eventbus.EventScanCompleted} }
|
|
func (*reportModule) Produces() []eventbus.EventType { return nil }
|
|
func (*reportModule) DefaultEnabled() bool { return true }
|
|
|
|
func (*reportModule) Run(mctx module.Context) error {
|
|
// Block until the scan is complete — we're last in the pipeline and the
|
|
// coordinator guarantees reporting runs after every earlier phase.
|
|
done := make(chan struct{}, 1)
|
|
sub := mctx.Bus.Subscribe(eventbus.EventScanCompleted, func(_ context.Context, _ eventbus.Event) {
|
|
select {
|
|
case done <- struct{}{}:
|
|
default:
|
|
}
|
|
})
|
|
defer sub.Unsubscribe()
|
|
|
|
// The report module itself runs in PhaseReporting which is the last
|
|
// phase. ScanCompleted fires right after this phase ends, so we can't
|
|
// rely on it — write output directly from the store instead.
|
|
_ = done
|
|
|
|
results := projectStoreToResults(mctx)
|
|
if len(results) == 0 {
|
|
return nil
|
|
}
|
|
|
|
silent := mctx.Config.Bool("silent", false)
|
|
jsonStdout := mctx.Config.Bool("json", false)
|
|
onlyActive := mctx.Config.Bool("only_active", false)
|
|
outPath := mctx.Config.String("output", "")
|
|
format := mctx.Config.String("format", "txt")
|
|
|
|
if jsonStdout {
|
|
// Project a minimal JSON report to stdout, shape-compatible with v1.
|
|
writeJSONStdout(mctx, results)
|
|
return nil
|
|
}
|
|
|
|
// Console presentation — only when not silent / not JSON-only mode.
|
|
if !silent {
|
|
printResults(results, onlyActive)
|
|
}
|
|
|
|
if outPath != "" {
|
|
if err := writeFile(outPath, format, results); err != nil {
|
|
mctx.Bus.Publish(mctx.Ctx, eventbus.ModuleError{
|
|
EventMeta: eventbus.EventMeta{At: time.Now(), Source: ModuleName, Target: mctx.Target},
|
|
Module: ModuleName,
|
|
Err: fmt.Sprintf("write output %s: %v", outPath, err),
|
|
})
|
|
return err
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
// projectStoreToResults converts store.Host records to the legacy
|
|
// config.SubdomainResult shape expected by output.WriteOutput. Doing the
|
|
// projection here keeps the store schema decoupled from the v1 output format.
|
|
func projectStoreToResults(mctx module.Context) map[string]*config.SubdomainResult {
|
|
hosts := mctx.Store.All(mctx.Ctx)
|
|
out := make(map[string]*config.SubdomainResult, len(hosts))
|
|
for _, h := range hosts {
|
|
r := &config.SubdomainResult{
|
|
Subdomain: h.Subdomain,
|
|
IPs: append([]string(nil), h.IPs...),
|
|
CNAME: h.CNAME,
|
|
PTR: h.PTR,
|
|
ASN: h.ASN,
|
|
Org: h.Org,
|
|
Country: h.Country,
|
|
City: h.City,
|
|
StatusCode: h.StatusCode,
|
|
ContentLength: h.ContentLength,
|
|
Title: h.Title,
|
|
Server: h.Server,
|
|
Tech: append([]string(nil), h.Technologies...),
|
|
WAF: h.WAF,
|
|
TLSVersion: h.TLSVersion,
|
|
TLSIssuer: h.TLSIssuer,
|
|
TLSSelfSigned: h.TLSSelfSigned,
|
|
Ports: append([]int(nil), h.Ports...),
|
|
ResponseMs: h.ResponseMs,
|
|
CloudProvider: h.CloudProvider,
|
|
}
|
|
if !h.TLSExpiry.IsZero() {
|
|
r.TLSExpiry = h.TLSExpiry.Format("2006-01-02")
|
|
}
|
|
if h.TLSFingerprint != nil {
|
|
r.TLSFingerprint = &config.TLSFingerprint{
|
|
Vendor: h.TLSFingerprint.Vendor,
|
|
Product: h.TLSFingerprint.Product,
|
|
Version: h.TLSFingerprint.Version,
|
|
ApplianceType: h.TLSFingerprint.ApplianceKind,
|
|
InternalHosts: append([]string(nil), h.TLSFingerprint.InternalHosts...),
|
|
}
|
|
}
|
|
if h.Takeover != nil {
|
|
r.Takeover = h.Takeover.Service
|
|
}
|
|
// Flatten vulnerabilities → scalar fields v1 consumers expect.
|
|
for _, v := range h.Vulnerabilities {
|
|
switch v.ID {
|
|
case "open-redirect":
|
|
r.OpenRedirect = true
|
|
case "cors-misconfig":
|
|
r.CORSMisconfig = v.Description
|
|
case "dangerous-http-methods":
|
|
r.DangerousMethods = append(r.DangerousMethods, strings.Split(v.Evidence, ", ")...)
|
|
case "git-exposed":
|
|
r.GitExposed = true
|
|
case "svn-exposed":
|
|
r.SvnExposed = true
|
|
case "backup-file":
|
|
r.BackupFiles = append(r.BackupFiles, v.URL)
|
|
}
|
|
}
|
|
// Secrets → legacy field
|
|
for _, s := range h.Secrets {
|
|
r.JSSecrets = append(r.JSSecrets, s.Match)
|
|
}
|
|
// CVEs / AI
|
|
for _, c := range h.CVEs {
|
|
r.CVEFindings = append(r.CVEFindings, c.ID)
|
|
}
|
|
for _, a := range h.AIFindings {
|
|
r.AIFindings = append(r.AIFindings, a.Title)
|
|
if r.AISeverity == "" {
|
|
r.AISeverity = a.Severity
|
|
}
|
|
if r.AIModel == "" {
|
|
r.AIModel = a.Model
|
|
}
|
|
}
|
|
out[h.Subdomain] = r
|
|
}
|
|
return out
|
|
}
|
|
|
|
// printResults is a minimal, non-colorful table print. The full v1
|
|
// presentation is re-introduced when the TUI module lands in Fase 4.
|
|
func printResults(results map[string]*config.SubdomainResult, onlyActive bool) {
|
|
// Sorted output for determinism.
|
|
names := make([]string, 0, len(results))
|
|
for n := range results {
|
|
names = append(names, n)
|
|
}
|
|
// sort by status desc, then name
|
|
sortResultsForPrint(names, results)
|
|
|
|
active := 0
|
|
for _, n := range names {
|
|
r := results[n]
|
|
if r.StatusCode == 0 {
|
|
if onlyActive {
|
|
continue
|
|
}
|
|
fmt.Printf(" %s %s\n", output.Dim("○"), r.Subdomain)
|
|
continue
|
|
}
|
|
active++
|
|
marker := output.Green("●")
|
|
if r.StatusCode >= 300 && r.StatusCode < 400 {
|
|
marker = output.Yellow("◐")
|
|
} else if r.StatusCode >= 400 {
|
|
marker = output.Red("○")
|
|
}
|
|
tech := ""
|
|
if len(r.Tech) > 0 {
|
|
tech = output.Dim(" [" + strings.Join(r.Tech, ", ") + "]")
|
|
}
|
|
fmt.Printf(" %s %s %s%s\n", marker, r.Subdomain, output.Dim(fmt.Sprintf("[%d]", r.StatusCode)), tech)
|
|
}
|
|
fmt.Println()
|
|
fmt.Printf(" %s total, %s active\n", output.BoldWhite(fmt.Sprintf("%d", len(results))), output.BoldGreen(fmt.Sprintf("%d", active)))
|
|
}
|
|
|
|
func sortResultsForPrint(names []string, results map[string]*config.SubdomainResult) {
|
|
// Simple insertion-sort quality ok for small lists; stable enough.
|
|
n := len(names)
|
|
for i := 1; i < n; i++ {
|
|
j := i
|
|
for j > 0 && lessResult(results[names[j]], results[names[j-1]]) {
|
|
names[j], names[j-1] = names[j-1], names[j]
|
|
j--
|
|
}
|
|
}
|
|
}
|
|
|
|
func lessResult(a, b *config.SubdomainResult) bool {
|
|
// Active first, then by subdomain name.
|
|
aActive := a.StatusCode >= 200 && a.StatusCode < 400
|
|
bActive := b.StatusCode >= 200 && b.StatusCode < 400
|
|
if aActive != bActive {
|
|
return aActive && !bActive
|
|
}
|
|
return a.Subdomain < b.Subdomain
|
|
}
|
|
|
|
func writeFile(path, format string, results map[string]*config.SubdomainResult) error {
|
|
// v1 exposes SaveOutput (void); we funnel through it but surface errors
|
|
// by re-checking file writability up front.
|
|
format = strings.ToLower(strings.TrimSpace(format))
|
|
if format == "" {
|
|
format = "txt"
|
|
}
|
|
// Pre-flight: make sure we can create the target file before delegating.
|
|
f, err := os.Create(path)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
f.Close()
|
|
output.SaveOutput(path, format, results)
|
|
return nil
|
|
}
|
|
|
|
// writeJSONStdout emits a v2-native minimal JSON dump to stdout. This is
|
|
// intentionally simpler than v1's ReportBuilder — when the full report
|
|
// generator lands in Fase 4 (Reporting), this is where it'll be wired.
|
|
func writeJSONStdout(mctx module.Context, results map[string]*config.SubdomainResult) {
|
|
enc := json.NewEncoder(os.Stdout)
|
|
enc.SetIndent("", " ")
|
|
_ = enc.Encode(map[string]interface{}{
|
|
"target": mctx.Target,
|
|
"subdomains": results,
|
|
})
|
|
}
|