mirror of
https://github.com/Vyntral/god-eye.git
synced 2026-05-16 13:39:10 +02:00
3a4c230aa7
Complete architectural overhaul. Replaces the v0.1 monolithic scanner with an event-driven pipeline of auto-registered modules. Foundation (internal/): - eventbus: typed pub/sub, 20 event types, race-safe, drop counter - module: registry with phase-based selection - store: thread-safe host store with per-host locks + deep-copy reads - pipeline: coordinator with phase barriers + panic recovery - config: 5 scan profiles + 3 AI tiers + YAML loader + auto-discovery Modules (26 auto-registered across 6 phases): - Discovery: passive (26 sources), bruteforce, recursive, AXFR, GitHub dorks, CT streaming, permutation, reverse DNS, vhost, ASN, supply chain (npm + PyPI) - Enrichment: HTTP probe + tech fingerprint + TLS appliance ID, ports - Analysis: security checks, takeover (110+ sigs), cloud, JavaScript, GraphQL, JWT, headers (OWASP), HTTP smuggling, AI cascade, Nuclei - Reporting: TXT/JSON/CSV writer + AI scan brief AI layer (internal/ai/ + internal/modules/ai/): - Three profiles: lean (16 GB), balanced (32 GB MoE), heavy (64 GB) - Six event-driven handlers: CVE, JS file, HTTP response, secret filter, multi-agent vuln enrichment, anomaly + executive report - Content-hash cache dedups Ollama calls across hosts - Auto-pull of missing models via /api/pull with streaming progress - End-of-scan AI SCAN BRIEF in terminal with top chains + next actions Nuclei compat layer (internal/nucleitpl/): - Executes ~13k community templates (HTTP subset) - Auto-download of nuclei-templates ZIP to ~/.god-eye/nuclei-templates - Scope filter rejects off-host templates (eliminates OSINT FPs) Operations: - Interactive wizard (internal/wizard/) — zero-flag launch - LivePrinter (internal/tui/) — colorized event stream - Diff engine + scheduler (internal/diff, internal/scheduler) for continuous ASM monitoring with webhook alerts - Proxy support (internal/proxyconf/): http / https / socks5 / socks5h + basic auth Fixes #1 — native SOCKS5 / Tor compatibility via --proxy flag. 185 unit tests across 15 packages, all race-detector clean.
125 lines
2.9 KiB
Go
125 lines
2.9 KiB
Go
// Package takeover runs v1 takeover detection on every host with a CNAME.
|
|
// Reads from the store; listens for late DNSResolved events for concurrent
|
|
// modules.
|
|
package takeover
|
|
|
|
import (
|
|
"context"
|
|
"sync"
|
|
"time"
|
|
|
|
"god-eye/internal/eventbus"
|
|
"god-eye/internal/module"
|
|
"god-eye/internal/scanner"
|
|
"god-eye/internal/store"
|
|
)
|
|
|
|
const ModuleName = "takeover.cname"
|
|
|
|
type takeoverModule struct{}
|
|
|
|
func Register() { module.Register(&takeoverModule{}) }
|
|
|
|
func (*takeoverModule) Name() string { return ModuleName }
|
|
func (*takeoverModule) Phase() module.Phase { return module.PhaseAnalysis }
|
|
func (*takeoverModule) Consumes() []eventbus.EventType { return []eventbus.EventType{eventbus.EventDNSResolved} }
|
|
func (*takeoverModule) Produces() []eventbus.EventType {
|
|
return []eventbus.EventType{eventbus.EventTakeoverCandidate}
|
|
}
|
|
func (*takeoverModule) DefaultEnabled() bool { return true }
|
|
|
|
func (*takeoverModule) Run(mctx module.Context) error {
|
|
if mctx.Config.Bool("no_takeover", false) {
|
|
return nil
|
|
}
|
|
conc := mctx.Config.Int("concurrency", 100)
|
|
if conc <= 0 {
|
|
conc = 100
|
|
}
|
|
timeout := mctx.Config.Int("timeout", 5)
|
|
|
|
processed := make(map[string]struct{})
|
|
var processedMu sync.Mutex
|
|
shouldProcess := func(host string) bool {
|
|
processedMu.Lock()
|
|
defer processedMu.Unlock()
|
|
if _, dup := processed[host]; dup {
|
|
return false
|
|
}
|
|
processed[host] = struct{}{}
|
|
return true
|
|
}
|
|
|
|
work := make(chan string, conc*2)
|
|
var wg sync.WaitGroup
|
|
for i := 0; i < conc; i++ {
|
|
wg.Add(1)
|
|
go func() {
|
|
defer wg.Done()
|
|
for host := range work {
|
|
if mctx.Ctx.Err() != nil {
|
|
return
|
|
}
|
|
service := scanner.CheckTakeover(host, timeout)
|
|
if service == "" {
|
|
continue
|
|
}
|
|
_ = mctx.Store.Upsert(mctx.Ctx, host, func(h *store.Host) {
|
|
h.Takeover = &store.Takeover{
|
|
Service: service,
|
|
CNAME: h.CNAME,
|
|
Confirmed: false,
|
|
FoundAt: time.Now(),
|
|
}
|
|
})
|
|
mctx.Bus.Publish(mctx.Ctx, eventbus.TakeoverCandidate{
|
|
EventMeta: eventbus.EventMeta{At: time.Now(), Source: ModuleName, Target: host},
|
|
Subdomain: host,
|
|
Service: service,
|
|
})
|
|
}
|
|
}()
|
|
}
|
|
|
|
// Drain: every host with a CNAME is a takeover candidate.
|
|
for _, h := range mctx.Store.All(mctx.Ctx) {
|
|
if h == nil || h.CNAME == "" {
|
|
continue
|
|
}
|
|
if !shouldProcess(h.Subdomain) {
|
|
continue
|
|
}
|
|
select {
|
|
case work <- h.Subdomain:
|
|
case <-mctx.Ctx.Done():
|
|
close(work)
|
|
wg.Wait()
|
|
return nil
|
|
}
|
|
}
|
|
|
|
sub := mctx.Bus.Subscribe(eventbus.EventDNSResolved, func(_ context.Context, e eventbus.Event) {
|
|
ev, ok := e.(eventbus.DNSResolved)
|
|
if !ok || ev.CNAME == "" {
|
|
return
|
|
}
|
|
if !shouldProcess(ev.Subdomain) {
|
|
return
|
|
}
|
|
select {
|
|
case work <- ev.Subdomain:
|
|
case <-mctx.Ctx.Done():
|
|
}
|
|
})
|
|
defer sub.Unsubscribe()
|
|
|
|
select {
|
|
case <-time.After(500 * time.Millisecond):
|
|
case <-mctx.Ctx.Done():
|
|
}
|
|
|
|
close(work)
|
|
wg.Wait()
|
|
return nil
|
|
}
|