mirror of
https://github.com/Vyntral/god-eye.git
synced 2026-05-16 05:29:11 +02:00
3a4c230aa7
Complete architectural overhaul. Replaces the v0.1 monolithic scanner with an event-driven pipeline of auto-registered modules. Foundation (internal/): - eventbus: typed pub/sub, 20 event types, race-safe, drop counter - module: registry with phase-based selection - store: thread-safe host store with per-host locks + deep-copy reads - pipeline: coordinator with phase barriers + panic recovery - config: 5 scan profiles + 3 AI tiers + YAML loader + auto-discovery Modules (26 auto-registered across 6 phases): - Discovery: passive (26 sources), bruteforce, recursive, AXFR, GitHub dorks, CT streaming, permutation, reverse DNS, vhost, ASN, supply chain (npm + PyPI) - Enrichment: HTTP probe + tech fingerprint + TLS appliance ID, ports - Analysis: security checks, takeover (110+ sigs), cloud, JavaScript, GraphQL, JWT, headers (OWASP), HTTP smuggling, AI cascade, Nuclei - Reporting: TXT/JSON/CSV writer + AI scan brief AI layer (internal/ai/ + internal/modules/ai/): - Three profiles: lean (16 GB), balanced (32 GB MoE), heavy (64 GB) - Six event-driven handlers: CVE, JS file, HTTP response, secret filter, multi-agent vuln enrichment, anomaly + executive report - Content-hash cache dedups Ollama calls across hosts - Auto-pull of missing models via /api/pull with streaming progress - End-of-scan AI SCAN BRIEF in terminal with top chains + next actions Nuclei compat layer (internal/nucleitpl/): - Executes ~13k community templates (HTTP subset) - Auto-download of nuclei-templates ZIP to ~/.god-eye/nuclei-templates - Scope filter rejects off-host templates (eliminates OSINT FPs) Operations: - Interactive wizard (internal/wizard/) — zero-flag launch - LivePrinter (internal/tui/) — colorized event stream - Diff engine + scheduler (internal/diff, internal/scheduler) for continuous ASM monitoring with webhook alerts - Proxy support (internal/proxyconf/): http / https / socks5 / socks5h + basic auth Fixes #1 — native SOCKS5 / Tor compatibility via --proxy flag. 185 unit tests across 15 packages, all race-detector clean.
105 lines
2.6 KiB
Go
105 lines
2.6 KiB
Go
// Package cloud wraps v1 cloud detection + S3 bucket discovery.
|
|
// Drains the store, plus listens for late DNSResolved events.
|
|
package cloud
|
|
|
|
import (
|
|
"context"
|
|
"sync"
|
|
"time"
|
|
|
|
"god-eye/internal/eventbus"
|
|
gohttp "god-eye/internal/http"
|
|
"god-eye/internal/module"
|
|
"god-eye/internal/scanner"
|
|
"god-eye/internal/store"
|
|
)
|
|
|
|
const ModuleName = "cloud.detect"
|
|
|
|
type cloudModule struct{}
|
|
|
|
func Register() { module.Register(&cloudModule{}) }
|
|
|
|
func (*cloudModule) Name() string { return ModuleName }
|
|
func (*cloudModule) Phase() module.Phase { return module.PhaseAnalysis }
|
|
func (*cloudModule) Consumes() []eventbus.EventType {
|
|
return []eventbus.EventType{eventbus.EventDNSResolved, eventbus.EventHTTPProbed}
|
|
}
|
|
func (*cloudModule) Produces() []eventbus.EventType { return []eventbus.EventType{eventbus.EventCloudAsset} }
|
|
func (*cloudModule) DefaultEnabled() bool { return true }
|
|
|
|
func (*cloudModule) Run(mctx module.Context) error {
|
|
timeout := mctx.Config.Int("timeout", 5)
|
|
client := gohttp.GetSharedClient(timeout)
|
|
|
|
handled := make(map[string]struct{})
|
|
var mu sync.Mutex
|
|
shouldHandle := func(host string) bool {
|
|
mu.Lock()
|
|
defer mu.Unlock()
|
|
if _, ok := handled[host]; ok {
|
|
return false
|
|
}
|
|
handled[host] = struct{}{}
|
|
return true
|
|
}
|
|
|
|
handle := func(host string, ips []string, cname string) {
|
|
if !shouldHandle(host) {
|
|
return
|
|
}
|
|
provider := scanner.DetectCloudProvider(ips, cname, "")
|
|
if provider != "" {
|
|
_ = mctx.Store.Upsert(mctx.Ctx, host, func(h *store.Host) {
|
|
if h.CloudProvider == "" {
|
|
h.CloudProvider = provider
|
|
}
|
|
})
|
|
}
|
|
|
|
if buckets := scanner.CheckS3BucketsWithClient(host, client); len(buckets) > 0 {
|
|
for _, url := range buckets {
|
|
mctx.Bus.Publish(mctx.Ctx, eventbus.CloudAssetFound{
|
|
EventMeta: eventbus.EventMeta{At: time.Now(), Source: ModuleName, Target: host},
|
|
Provider: "AWS",
|
|
Kind: "s3-bucket",
|
|
Name: host,
|
|
URL: url,
|
|
Status: "accessible",
|
|
})
|
|
}
|
|
}
|
|
}
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
// Drain: every host already in the store with an IP.
|
|
for _, h := range mctx.Store.All(mctx.Ctx) {
|
|
if h == nil || h.Subdomain == "" || len(h.IPs) == 0 {
|
|
continue
|
|
}
|
|
h := h
|
|
wg.Add(1)
|
|
go func() { defer wg.Done(); handle(h.Subdomain, h.IPs, h.CNAME) }()
|
|
}
|
|
|
|
// Late DNSResolved events.
|
|
sub := mctx.Bus.Subscribe(eventbus.EventDNSResolved, func(_ context.Context, e eventbus.Event) {
|
|
ev, ok := e.(eventbus.DNSResolved)
|
|
if !ok {
|
|
return
|
|
}
|
|
wg.Add(1)
|
|
go func() { defer wg.Done(); handle(ev.Subdomain, ev.IPs, ev.CNAME) }()
|
|
})
|
|
defer sub.Unsubscribe()
|
|
|
|
select {
|
|
case <-time.After(500 * time.Millisecond):
|
|
case <-mctx.Ctx.Done():
|
|
}
|
|
|
|
wg.Wait()
|
|
return nil
|
|
}
|