mirror of
https://github.com/BigBodyCobain/Shadowbroker.git
synced 2026-05-07 09:56:40 +02:00
release: prepare v0.9.7
This commit is contained in:
+16
-2
@@ -3,15 +3,20 @@
|
||||
# cp .env.example .env
|
||||
|
||||
# ── Required for backend container ─────────────────────────────
|
||||
# OpenSky Network OAuth2 — REQUIRED for airplane telemetry.
|
||||
# Free registration at https://opensky-network.org/index.php?option=com_users&view=registration
|
||||
# Without these the flights layer falls back to ADS-B-only with major gaps in Africa, Asia, and LatAm.
|
||||
OPENSKY_CLIENT_ID=
|
||||
OPENSKY_CLIENT_SECRET=
|
||||
AIS_API_KEY=
|
||||
|
||||
# Admin key to protect sensitive endpoints (settings, updates).
|
||||
# If blank, admin endpoints are only accessible from localhost unless ALLOW_INSECURE_ADMIN=true.
|
||||
# If blank, loopback/localhost requests still work for local single-host dev.
|
||||
# Remote/non-loopback admin access requires ADMIN_KEY, or ALLOW_INSECURE_ADMIN=true in debug-only setups.
|
||||
ADMIN_KEY=
|
||||
|
||||
# Allow insecure admin access without ADMIN_KEY (local dev only).
|
||||
# Allow insecure admin access without ADMIN_KEY (local dev only, beyond loopback).
|
||||
# Requires MESH_DEBUG_MODE=true on the backend; do not enable this for normal use.
|
||||
# ALLOW_INSECURE_ADMIN=false
|
||||
|
||||
# User-Agent for Nominatim geocoding requests (per OSM usage policy).
|
||||
@@ -29,6 +34,10 @@ ADMIN_KEY=
|
||||
# Ukraine air raid alerts — free token from https://alerts.in.ua/
|
||||
# ALERTS_IN_UA_TOKEN=
|
||||
|
||||
# Optional NUFORC UAP sighting map enrichment via Mapbox Tilequery.
|
||||
# Leave blank to skip this optional enrichment.
|
||||
# NUFORC_MAPBOX_TOKEN=
|
||||
|
||||
# Google Earth Engine for VIIRS night lights change detection (optional).
|
||||
# pip install earthengine-api
|
||||
# GEE_SERVICE_ACCOUNT_KEY=
|
||||
@@ -77,6 +86,11 @@ ADMIN_KEY=
|
||||
|
||||
# ── Mesh DM Relay ──────────────────────────────────────────────
|
||||
# MESH_DM_TOKEN_PEPPER=change-me
|
||||
# Optional local-dev DM root external assurance bridge.
|
||||
# These stay commented because they are machine-local file paths, not safe global defaults.
|
||||
# MESH_DM_ROOT_EXTERNAL_WITNESS_IMPORT_PATH=backend/../ops/root_witness_receipt_import.json
|
||||
# MESH_DM_ROOT_TRANSPARENCY_LEDGER_EXPORT_PATH=backend/../ops/root_transparency_ledger.json
|
||||
# MESH_DM_ROOT_TRANSPARENCY_LEDGER_READBACK_URI=backend/../ops/root_transparency_ledger.json
|
||||
|
||||
# ── Self Update ────────────────────────────────────────────────
|
||||
# MESH_UPDATE_SHA256=
|
||||
|
||||
@@ -0,0 +1,10 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "npm"
|
||||
directory: "/frontend"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
- package-ecosystem: "pip"
|
||||
directory: "/backend"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
@@ -1,11 +1,11 @@
|
||||
name: CI — Lint & Test
|
||||
name: CI - Lint & Test
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
workflow_call: # Allow docker-publish to call this workflow as a gate
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
frontend:
|
||||
@@ -22,9 +22,9 @@ jobs:
|
||||
cache: npm
|
||||
cache-dependency-path: frontend/package-lock.json
|
||||
- run: npm ci
|
||||
- run: npm run lint || echo "::warning::ESLint found issues (non-blocking)"
|
||||
- run: npm run format:check || echo "::warning::Prettier found formatting issues (non-blocking)"
|
||||
- run: npx vitest run --reporter=verbose || echo "::warning::Some tests failed (non-blocking)"
|
||||
- run: npm run lint
|
||||
- run: npm run format:check
|
||||
- run: npx vitest run --reporter=verbose
|
||||
- run: npm run build
|
||||
- run: npm run bundle:report
|
||||
|
||||
@@ -33,6 +33,8 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- name: Run secret scan
|
||||
run: bash backend/scripts/scan-secrets.sh --all
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v5
|
||||
with:
|
||||
@@ -43,8 +45,8 @@ jobs:
|
||||
python-version: "3.11"
|
||||
- name: Install dependencies
|
||||
run: cd backend && uv sync --frozen --group dev
|
||||
- run: cd backend && uv run ruff check . || echo "::warning::Ruff found issues (non-blocking)"
|
||||
- run: cd backend && uv run black --check . || echo "::warning::Black found formatting issues (non-blocking)"
|
||||
- run: cd backend && uv run ruff check .
|
||||
- run: cd backend && uv run black --check .
|
||||
- run: cd backend && uv run python -c "from services.fetchers.retry import with_retry; from services.env_check import validate_env; print('Module imports OK')"
|
||||
- name: Run tests
|
||||
run: cd backend && uv run pytest tests/ -v --tb=short || echo "No pytest tests found (OK)"
|
||||
run: cd backend && uv run pytest tests/ services/infonet/tests -v --tb=short
|
||||
|
||||
@@ -6,10 +6,9 @@ on:
|
||||
tags: ["v*.*.*"]
|
||||
pull_request:
|
||||
branches: ["main"]
|
||||
|
||||
|
||||
env:
|
||||
REGISTRY: ghcr.io
|
||||
# github.repository as <account>/<repo>
|
||||
IMAGE_NAME: ${{ github.repository }}
|
||||
|
||||
jobs:
|
||||
@@ -24,7 +23,6 @@ jobs:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -33,33 +31,23 @@ jobs:
|
||||
runner: ubuntu-latest
|
||||
- platform: linux/arm64
|
||||
runner: ubuntu-24.04-arm
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- name: Lowercase image name
|
||||
run: echo "IMAGE_NAME=${IMAGE_NAME,,}" >> $GITHUB_ENV
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3.0.0
|
||||
|
||||
- name: Log into registry ${{ env.REGISTRY }}
|
||||
- uses: docker/setup-buildx-action@v3.0.0
|
||||
- name: Log into registry
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v3.0.0
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract Docker metadata
|
||||
id: meta
|
||||
- id: meta
|
||||
uses: docker/metadata-action@v5.0.0
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}-frontend
|
||||
|
||||
- name: Build and push Docker image by digest
|
||||
id: build
|
||||
- id: build
|
||||
uses: docker/build-push-action@v5.0.0
|
||||
with:
|
||||
context: ./frontend
|
||||
@@ -69,17 +57,14 @@ jobs:
|
||||
cache-from: type=gha,scope=frontend-${{ matrix.platform }}
|
||||
cache-to: type=gha,mode=max,scope=frontend-${{ matrix.platform }}
|
||||
outputs: type=image,name=${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}-frontend,push-by-digest=true,name-canonical=true,push=${{ github.event_name != 'pull_request' }}
|
||||
|
||||
- name: Export digest
|
||||
if: github.event_name != 'pull_request'
|
||||
run: |
|
||||
mkdir -p /tmp/digests/frontend
|
||||
digest="${{ steps.build.outputs.digest }}"
|
||||
touch "/tmp/digests/frontend/${digest#sha256:}"
|
||||
|
||||
- name: Upload digest
|
||||
- uses: actions/upload-artifact@v4
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: digests-frontend-${{ matrix.platform == 'linux/amd64' && 'amd64' || 'arm64' }}
|
||||
path: /tmp/digests/frontend/*
|
||||
@@ -87,36 +72,27 @@ jobs:
|
||||
retention-days: 1
|
||||
|
||||
merge-frontend:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name != 'pull_request'
|
||||
needs: build-frontend
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- name: Lowercase image name
|
||||
run: echo "IMAGE_NAME=${IMAGE_NAME,,}" >> $GITHUB_ENV
|
||||
|
||||
- name: Download digests
|
||||
uses: actions/download-artifact@v4
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: /tmp/digests/frontend
|
||||
pattern: digests-frontend-*
|
||||
merge-multiple: true
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3.0.0
|
||||
|
||||
- name: Log into registry ${{ env.REGISTRY }}
|
||||
uses: docker/login-action@v3.0.0
|
||||
- uses: docker/setup-buildx-action@v3.0.0
|
||||
- uses: docker/login-action@v3.0.0
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract Docker metadata
|
||||
id: meta
|
||||
- id: meta
|
||||
uses: docker/metadata-action@v5.0.0
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}-frontend
|
||||
@@ -124,7 +100,6 @@ jobs:
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=raw,value=latest,enable={{is_default_branch}}
|
||||
|
||||
- name: Create and push manifest
|
||||
working-directory: /tmp/digests/frontend
|
||||
run: |
|
||||
@@ -139,7 +114,6 @@ jobs:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
@@ -148,33 +122,23 @@ jobs:
|
||||
runner: ubuntu-latest
|
||||
- platform: linux/arm64
|
||||
runner: ubuntu-24.04-arm
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
- name: Lowercase image name
|
||||
run: echo "IMAGE_NAME=${IMAGE_NAME,,}" >> $GITHUB_ENV
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3.0.0
|
||||
|
||||
- name: Log into registry ${{ env.REGISTRY }}
|
||||
- uses: docker/setup-buildx-action@v3.0.0
|
||||
- name: Log into registry
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: docker/login-action@v3.0.0
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract Docker metadata
|
||||
id: meta
|
||||
- id: meta
|
||||
uses: docker/metadata-action@v5.0.0
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}-backend
|
||||
|
||||
- name: Build and push Docker image by digest
|
||||
id: build
|
||||
- id: build
|
||||
uses: docker/build-push-action@v5.0.0
|
||||
with:
|
||||
context: .
|
||||
@@ -185,17 +149,14 @@ jobs:
|
||||
cache-from: type=gha,scope=backend-${{ matrix.platform }}
|
||||
cache-to: type=gha,mode=max,scope=backend-${{ matrix.platform }}
|
||||
outputs: type=image,name=${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}-backend,push-by-digest=true,name-canonical=true,push=${{ github.event_name != 'pull_request' }}
|
||||
|
||||
- name: Export digest
|
||||
if: github.event_name != 'pull_request'
|
||||
run: |
|
||||
mkdir -p /tmp/digests/backend
|
||||
digest="${{ steps.build.outputs.digest }}"
|
||||
touch "/tmp/digests/backend/${digest#sha256:}"
|
||||
|
||||
- name: Upload digest
|
||||
- uses: actions/upload-artifact@v4
|
||||
if: github.event_name != 'pull_request'
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: digests-backend-${{ matrix.platform == 'linux/amd64' && 'amd64' || 'arm64' }}
|
||||
path: /tmp/digests/backend/*
|
||||
@@ -203,36 +164,27 @@ jobs:
|
||||
retention-days: 1
|
||||
|
||||
merge-backend:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name != 'pull_request'
|
||||
needs: build-backend
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- name: Lowercase image name
|
||||
run: echo "IMAGE_NAME=${IMAGE_NAME,,}" >> $GITHUB_ENV
|
||||
|
||||
- name: Download digests
|
||||
uses: actions/download-artifact@v4
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: /tmp/digests/backend
|
||||
pattern: digests-backend-*
|
||||
merge-multiple: true
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3.0.0
|
||||
|
||||
- name: Log into registry ${{ env.REGISTRY }}
|
||||
uses: docker/login-action@v3.0.0
|
||||
- uses: docker/setup-buildx-action@v3.0.0
|
||||
- uses: docker/login-action@v3.0.0
|
||||
with:
|
||||
registry: ${{ env.REGISTRY }}
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract Docker metadata
|
||||
id: meta
|
||||
- id: meta
|
||||
uses: docker/metadata-action@v5.0.0
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}-backend
|
||||
@@ -240,7 +192,6 @@ jobs:
|
||||
type=semver,pattern={{version}}
|
||||
type=semver,pattern={{major}}.{{minor}}
|
||||
type=raw,value=latest,enable={{is_default_branch}}
|
||||
|
||||
- name: Create and push manifest
|
||||
working-directory: /tmp/digests/backend
|
||||
run: |
|
||||
|
||||
+67
-2
@@ -6,13 +6,32 @@ node_modules/
|
||||
venv/
|
||||
env/
|
||||
.venv/
|
||||
backend/.venv-dir
|
||||
backend/venv-repair*/
|
||||
backend/.venv-repair*/
|
||||
|
||||
# Environment Variables & Secrets
|
||||
.env
|
||||
.envrc
|
||||
.env.local
|
||||
.env.development.local
|
||||
.env.test.local
|
||||
.env.production.local
|
||||
.npmrc
|
||||
.pypirc
|
||||
.netrc
|
||||
*.pem
|
||||
*.key
|
||||
*.crt
|
||||
*.csr
|
||||
*.p12
|
||||
*.pfx
|
||||
id_rsa
|
||||
id_rsa.*
|
||||
id_ed25519
|
||||
id_ed25519.*
|
||||
known_hosts
|
||||
authorized_keys
|
||||
|
||||
# Python caches & compiled files
|
||||
__pycache__/
|
||||
@@ -22,11 +41,15 @@ __pycache__/
|
||||
.Python
|
||||
.ruff_cache/
|
||||
.pytest_cache/
|
||||
.mypy_cache/
|
||||
.hypothesis/
|
||||
.tox/
|
||||
|
||||
# Next.js build output
|
||||
.next/
|
||||
out/
|
||||
build/
|
||||
*.tsbuildinfo
|
||||
|
||||
# Deprecated standalone Infonet Terminal skeleton (migrated into frontend/src/components/InfonetTerminal/)
|
||||
frontend/infonet-terminal/
|
||||
@@ -49,6 +72,8 @@ backend/ais_cache.json
|
||||
backend/carrier_cache.json
|
||||
backend/cctv.db
|
||||
cctv.db
|
||||
*.db
|
||||
*.sqlite
|
||||
*.sqlite3
|
||||
|
||||
# ========================
|
||||
@@ -63,6 +88,7 @@ backend/data/*
|
||||
!backend/data/military_bases.json
|
||||
!backend/data/plan_ccg_vessels.json
|
||||
!backend/data/plane_alert_db.json
|
||||
!backend/data/power_plants.json
|
||||
!backend/data/tracked_names.json
|
||||
!backend/data/yacht_alert_db.json
|
||||
|
||||
@@ -129,6 +155,7 @@ frontend/eslint-report.json
|
||||
# Old backups & repo clones
|
||||
.git_backup/
|
||||
local-artifacts/
|
||||
release-secrets/
|
||||
shadowbroker_repo/
|
||||
frontend/src/components.bak/
|
||||
frontend/src/components/map/icons/backups/
|
||||
@@ -136,6 +163,7 @@ frontend/src/components/map/icons/backups/
|
||||
# Coverage
|
||||
coverage/
|
||||
.coverage
|
||||
.coverage.*
|
||||
dist/
|
||||
|
||||
# Test scratch files (not in tests/ folder)
|
||||
@@ -152,8 +180,11 @@ backend/services/ais_cache.json
|
||||
docs/*
|
||||
!docs/mesh/
|
||||
docs/mesh/*
|
||||
!docs/mesh/threat-model.md
|
||||
!docs/mesh/claims-reconciliation.md
|
||||
!docs/mesh/mesh-canonical-fixtures.json
|
||||
!docs/mesh/mesh-merkle-fixtures.json
|
||||
!docs/mesh/wormhole-dm-root-operations-runbook.md
|
||||
.local-docs/
|
||||
infonet-economy/
|
||||
updatestuff.md
|
||||
@@ -173,6 +204,40 @@ jobs.json
|
||||
.mise.local.toml
|
||||
.codex-tmp/
|
||||
prototype/
|
||||
.runtime/
|
||||
|
||||
# Python UV lock file (regenerated from pyproject.toml)
|
||||
uv.lock
|
||||
# ========================
|
||||
# Runtime state & operator-local data (never commit)
|
||||
# ========================
|
||||
# TimeMachine snapshot cache — regenerated at runtime, can be 100 MB+
|
||||
backend/timemachine/
|
||||
# Operator witness keys, identity material, transparency ledgers (machine-local)
|
||||
ops/
|
||||
# Runtime DM relay state
|
||||
dm_relay.json
|
||||
# Dev scratch notes
|
||||
improvements.txt
|
||||
|
||||
# ========================
|
||||
# Custody verification temp dirs (runtime test artifacts with private keys!)
|
||||
# ========================
|
||||
backend/sb-custody-verify-*/
|
||||
|
||||
# Python egg-info (build artifact, regenerated by pip install -e)
|
||||
*.egg-info/
|
||||
|
||||
# Privacy-core debug build (Windows DLL, 3.6 MB, not shipped)
|
||||
privacy-core/debug/
|
||||
|
||||
# Desktop-shell export stash dirs (empty temp dirs from Tauri build)
|
||||
frontend/.desktop-export-stash-*/
|
||||
|
||||
# Wormhole logs (can be 30 MB+ each, runtime-generated)
|
||||
backend/data/wormhole_stderr.log
|
||||
backend/data/wormhole_stdout.log
|
||||
|
||||
# Runtime caches that already slip through the backend/data/* blanket
|
||||
# (these are caught by the wildcard but listing for clarity)
|
||||
|
||||
# Compressed snapshot archives (can be 100 MB+)
|
||||
*.json.gz
|
||||
|
||||
@@ -1,4 +1,12 @@
|
||||
repos:
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: shadowbroker-secret-scan
|
||||
name: ShadowBroker secret scan
|
||||
entry: bash backend/scripts/scan-secrets.sh --staged
|
||||
language: system
|
||||
pass_filenames: false
|
||||
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.6.0
|
||||
hooks:
|
||||
|
||||
@@ -0,0 +1,71 @@
|
||||
# Data Attribution & Licensing
|
||||
|
||||
ShadowBroker aggregates publicly available data from many third-party sources.
|
||||
This file documents each source and its license so operators and users can
|
||||
comply with the terms under which we access that data.
|
||||
|
||||
ShadowBroker itself is licensed under AGPL-3.0 (see `LICENSE`). **This file
|
||||
concerns the *data* rendered by the dashboard, not the source code.**
|
||||
|
||||
---
|
||||
|
||||
## ODbL-licensed sources (Open Database License v1.0)
|
||||
|
||||
Data from these sources is licensed under the
|
||||
[Open Database License v1.0](https://opendatacommons.org/licenses/odbl/1-0/).
|
||||
If you redistribute a derivative database built from these sources, the
|
||||
derivative must also be offered under ODbL and must preserve attribution.
|
||||
|
||||
| Source | URL | What we use it for |
|
||||
|---|---|---|
|
||||
| adsb.lol | https://adsb.lol | Military aircraft positions, regional commercial gap-fill, route enrichment |
|
||||
| OpenStreetMap contributors | https://www.openstreetmap.org/copyright | Nominatim geocoding (LOCATE bar), CARTO basemap tiles (OSM-derived) |
|
||||
|
||||
**Attribution requirement:** the ShadowBroker map UI displays
|
||||
"© OpenStreetMap contributors" and "adsb.lol (ODbL)" in the map attribution
|
||||
control. Do not remove this attribution if you fork or redistribute the app.
|
||||
|
||||
---
|
||||
|
||||
## Other third-party data sources
|
||||
|
||||
These sources have their own terms; consult each link before redistributing.
|
||||
|
||||
| Source | URL | License / Terms | Notes |
|
||||
|---|---|---|---|
|
||||
| OpenSky Network | https://opensky-network.org | OpenSky API terms | Commercial and private aircraft tracking |
|
||||
| CelesTrak | https://celestrak.org | Public domain / no restrictions | Satellite TLE data |
|
||||
| USGS Earthquake Hazards | https://earthquake.usgs.gov | Public domain (US Federal) | Seismic events |
|
||||
| NASA FIRMS | https://firms.modaps.eosdis.nasa.gov | NASA Open Data | Fire/thermal anomalies (VIIRS) |
|
||||
| NASA GIBS | https://gibs.earthdata.nasa.gov | NASA Open Data | MODIS imagery tiles |
|
||||
| NOAA SWPC | https://services.swpc.noaa.gov | Public domain (US Federal) | Space weather, Kp index |
|
||||
| GDELT Project | https://www.gdeltproject.org | CC BY (non-commercial friendly) | Global conflict events |
|
||||
| DeepState Map | https://deepstatemap.live | Per-site terms | Ukraine frontline GeoJSON |
|
||||
| aisstream.io | https://aisstream.io | Free-tier API terms (attribution required) | AIS vessel positions |
|
||||
| Global Fishing Watch | https://globalfishingwatch.org | CC BY 4.0 (for public data) | Fishing activity events |
|
||||
| Microsoft Planetary Computer | https://planetarycomputer.microsoft.com | Sentinel-2 / ESA Copernicus terms | Sentinel-2 imagery |
|
||||
| Copernicus CDSE (Sentinel Hub) | https://dataspace.copernicus.eu | ESA Copernicus open data terms | SAR + optical imagery |
|
||||
| Shodan | https://www.shodan.io | Operator-supplied API key, Shodan ToS | Internet device search |
|
||||
| Smithsonian GVP | https://volcano.si.edu | Attribution required | Volcanoes |
|
||||
| OpenAQ | https://openaq.org | CC BY 4.0 | Air quality stations |
|
||||
| NOAA NWS | https://www.weather.gov | Public domain (US Federal) | Severe weather alerts |
|
||||
| WRI Global Power Plant DB | https://datasets.wri.org | CC BY 4.0 | Power plants |
|
||||
| Wikidata | https://www.wikidata.org | CC0 | Head-of-state lookup |
|
||||
| Wikipedia | https://en.wikipedia.org | CC BY-SA 4.0 | Region summaries |
|
||||
| KiwiSDR (via dyatlov mirror) | http://rx.linkfanel.net | Per-site terms (community mirror by Pierre Ynard) | SDR receiver list — pulled from rx.linkfanel.net to keep load off jks-prv's bandwidth at kiwisdr.com |
|
||||
| OpenMHZ | https://openmhz.com | Per-site terms | Police/fire scanner feeds |
|
||||
| Meshtastic | https://meshtastic.org | Open Source | Mesh radio nodes (protocol) |
|
||||
| Meshtastic Map (Liam Cottle) | https://meshtastic.liamcottle.net | Community project (per-site terms) | Global Meshtastic node positions — polled once per day with on-disk cache trust to minimize load on this volunteer-run HTTP API |
|
||||
| APRS-IS | https://www.aprs-is.net | Open / attribution-based | Amateur radio positions |
|
||||
| CARTO basemaps | https://carto.com | CARTO attribution required | Dark map tiles (OSM-derived) |
|
||||
| Esri World Imagery | https://www.arcgis.com | Esri terms | High-res satellite basemap |
|
||||
| IODA (Georgia Tech) | https://ioda.inetintel.cc.gatech.edu | Research/academic terms | Internet outage data |
|
||||
|
||||
---
|
||||
|
||||
## Contact
|
||||
|
||||
If you represent a data provider and have concerns about how ShadowBroker
|
||||
uses your data, please open an issue or contact the maintainer at
|
||||
`bigbodycobain@gmail.com`. We will respond promptly and, if needed, adjust
|
||||
usage or remove the source.
|
||||
@@ -0,0 +1,89 @@
|
||||
# ShadowBroker — Meshtastic MQTT Remediation
|
||||
|
||||
**Version:** 0.9.6
|
||||
**Date:** 2026-04-12
|
||||
**Re:** [meshtastic/firmware#6131](https://github.com/meshtastic/firmware/issues/6131) — Excessive MQTT traffic from ShadowBroker clients
|
||||
|
||||
---
|
||||
|
||||
## What happened
|
||||
|
||||
ShadowBroker is an open-source OSINT situational awareness platform that includes a Meshtastic MQTT listener for displaying mesh network activity on a global map. In prior versions, the MQTT bridge:
|
||||
|
||||
- Subscribed to **28 wildcard topics** (`msh/{region}/#`) covering every known official and community root on startup
|
||||
- Used an aggressive reconnect policy (min 1s / max 30s backoff)
|
||||
- Set keepalive to 30 seconds
|
||||
- Had no client-side rate limiting on inbound messages
|
||||
- Auto-started on every launch with no opt-out
|
||||
|
||||
This produced 1-2 orders of magnitude more traffic than typical Meshtastic clients on the public broker at `mqtt.meshtastic.org`.
|
||||
|
||||
---
|
||||
|
||||
## What we fixed
|
||||
|
||||
### 1. Bridge disabled by default
|
||||
|
||||
The MQTT bridge no longer starts automatically. Operators must explicitly opt in:
|
||||
|
||||
```env
|
||||
MESH_MQTT_ENABLED=true
|
||||
```
|
||||
|
||||
### 2. US-only default subscription
|
||||
|
||||
When enabled, the bridge subscribes to **1 topic** (`msh/US/#`) instead of 28. Additional regions are opt-in:
|
||||
|
||||
```env
|
||||
MESH_MQTT_EXTRA_ROOTS=EU_868,ANZ
|
||||
```
|
||||
|
||||
The UI still displays all regions in its dropdown — only the MQTT subscription scope changed.
|
||||
|
||||
### 3. Client-side rate limiter
|
||||
|
||||
Inbound messages are capped at **100 messages per minute** using a sliding window. Excess messages are silently dropped. A warning is logged periodically when the limiter activates so operators are aware.
|
||||
|
||||
### 4. Conservative connection parameters
|
||||
|
||||
| Parameter | Before | After |
|
||||
|-----------|--------|-------|
|
||||
| Keepalive | 30s | 120s |
|
||||
| Reconnect min delay | 1s | 15s |
|
||||
| Reconnect max delay | 30s | 300s |
|
||||
| QoS | 0 | 0 (unchanged) |
|
||||
|
||||
### 5. Versioned client ID
|
||||
|
||||
Client IDs changed from `sbmesh-{uuid}` to `sb096-{uuid}` so the Meshtastic team can identify ShadowBroker clients and track adoption of the fix by version.
|
||||
|
||||
---
|
||||
|
||||
## Configuration reference
|
||||
|
||||
| Variable | Default | Description |
|
||||
|----------|---------|-------------|
|
||||
| `MESH_MQTT_ENABLED` | `false` | Master switch for the MQTT bridge |
|
||||
| `MESH_MQTT_EXTRA_ROOTS` | _(empty)_ | Comma-separated additional region roots (e.g. `EU_868,ANZ,JP`) |
|
||||
| `MESH_MQTT_INCLUDE_DEFAULT_ROOTS` | `true` | Include US in subscriptions |
|
||||
| `MESH_MQTT_BROKER` | `mqtt.meshtastic.org` | Broker hostname |
|
||||
| `MESH_MQTT_PORT` | `1883` | Broker port |
|
||||
| `MESH_MQTT_USER` | `meshdev` | Broker username |
|
||||
| `MESH_MQTT_PASS` | `large4cats` | Broker password |
|
||||
| `MESH_MQTT_PSK` | _(empty)_ | Hex-encoded PSK (empty = default LongFast key) |
|
||||
|
||||
---
|
||||
|
||||
## Files changed
|
||||
|
||||
- `backend/services/config.py` — Added `MESH_MQTT_ENABLED` flag
|
||||
- `backend/services/mesh/meshtastic_topics.py` — Reduced default roots to US-only
|
||||
- `backend/services/sigint_bridge.py` — Rate limiter, keepalive/backoff tuning, versioned client ID, opt-in gate
|
||||
- `backend/.env.example` — Documented all MQTT options
|
||||
|
||||
---
|
||||
|
||||
## Contact
|
||||
|
||||
Repository: [github.com/BigBodyCobain/Shadowbroker](https://github.com/BigBodyCobain/Shadowbroker)
|
||||
Maintainer: BigBodyCobain
|
||||
@@ -11,15 +11,15 @@
|
||||
|
||||
|
||||
|
||||
https://github.com/user-attachments/assets/248208ec-62f7-49d1-831d-4bd0a1fa6852
|
||||

|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
**ShadowBroker** is a real-time, multi-domain OSINT dashboard that fuses 60+ live intelligence feeds into a single dark-ops map interface. Aircraft, ships, satellites, conflict zones, CCTV networks, GPS jamming, internet-connected devices, police scanners, mesh radio nodes, and breaking geopolitical events — all updating in real time on one screen.
|
||||
**ShadowBroker** is a decentralized real-time, multi-domain OSINT dashboard that fuses 60+ live intelligence feeds into a single dark-ops map interface. Aircraft, ships, satellites, conflict zones, CCTV networks, GPS jamming, internet-connected devices, police scanners, mesh radio nodes, and breaking geopolitical events — all updating in real time on one screen as well as a obfuscated communications protocol and information exchange infrastructure.
|
||||
|
||||
Built with **Next.js**, **MapLibre GL**, **FastAPI**, and **Python**. 35+ toggleable data layers. Right-click any point on Earth for a region/country dossier, head-of-state lookup, and the latest Sentinel-2 satellite photo. No user data is collected or transmitted — the dashboard runs entirely in your browser against a self-hosted backend.
|
||||
Built with **Next.js**, **MapLibre GL**, **FastAPI**, and **Python**. 35+ toggleable data layers including SAR ground-change detection. Multiple visual modes (DEFAULT / SATELLITE / FLIR / NVG / CRT). Right-click any point on Earth for a country dossier, head-of-state lookup, and the latest Sentinel-2 satellite photo. No user data is collected or transmitted — the dashboard runs entirely in your browser against a self-hosted backend.
|
||||
|
||||
Designed for analysts, researchers, radio operators, and anyone who wants to see what the world looks like when every public signal is on the same map.
|
||||
|
||||
@@ -38,7 +38,7 @@ ShadowBroker includes an optional Shodan connector for operator-supplied API acc
|
||||
|
||||
## Interesting Use Cases
|
||||
|
||||
* **Transmit on the InfoNet testnet** — the first decentralized intelligence mesh built into an OSINT tool. Obfuscated messaging with gate personas, Dead Drop peer-to-peer exchange, and a built-in terminal CLI. No accounts, no signup. Privacy is not guaranteed yet — this is an experimental testnet — but the protocol is live and being hardened.
|
||||
* **Communicate on the InfoNet testnet** — The first decentralized intelligence mesh built into an OSINT tool. Obfuscated messaging with gate personas, Dead Drop peer-to-peer exchange, and a built-in terminal CLI. No accounts, no signup. Privacy is not guaranteed yet — this is an experimental testnet — but the protocol is live and being hardened.
|
||||
* **Track Air Force One**, the private jets of billionaires and dictators, and every military tanker, ISR, and fighter broadcasting ADS-B — with automatic holding pattern detection when aircraft start circling
|
||||
* **Estimate where US aircraft carriers are** using automated GDELT news scraping — no other open tool does this
|
||||
* **Search internet-connected devices worldwide** via Shodan — cameras, SCADA systems, databases — plotted as a live overlay on the map
|
||||
@@ -51,6 +51,8 @@ ShadowBroker includes an optional Shodan connector for operator-supplied API acc
|
||||
* **Follow earthquakes, volcanic eruptions, active wildfires** (NASA FIRMS), severe weather alerts, and air quality readings worldwide
|
||||
* **Map military bases, 35,000+ power plants**, 2,000+ data centers, and internet outage regions — cross-referenced automatically
|
||||
* **Connect to Meshtastic mesh radio nodes** and APRS amateur radio networks — visible on the map and integrated into Mesh Chat
|
||||
* **Connect an AI agent as a co-analyst** through ShadowBroker's HMAC-signed agentic command channel — supports OpenClaw and any other agent that speaks the protocol (Claude, GPT, LangChain, custom). The agent gets full read/write access to all 35+ data layers, pin placement, map control, SAR ground-change, mesh networking, and alert delivery. It sees everything the operator sees and can take actions on the map in real time.
|
||||
* **Detect ground changes through cloud cover** with SAR (Synthetic Aperture Radar) — mm-scale ground deformation, flood extent, vegetation disturbance, and damage assessments from NASA OPERA and Copernicus EGMS. Define your own watch areas and get anomaly alerts. Free with a NASA Earthdata account.
|
||||
* **Switch visual modes** — DEFAULT, SATELLITE, FLIR (thermal), NVG (night vision), CRT (retro terminal) — via the STYLE button
|
||||
* **Track trains** across the US (Amtrak) and Europe (DigiTraffic) in real time
|
||||
|
||||
@@ -59,7 +61,7 @@ ShadowBroker includes an optional Shodan connector for operator-supplied API acc
|
||||
## ⚡ Quick Start (Docker)
|
||||
|
||||
```bash
|
||||
git clone https://github.com/BigBodyCobain/Shadowbroker.git
|
||||
git clone https://github.com/bigbodycobain/Shadowbroker.git
|
||||
cd Shadowbroker
|
||||
docker compose pull
|
||||
docker compose up -d
|
||||
@@ -99,7 +101,7 @@ That's it. `pull` grabs the latest images, `up -d` restarts the containers.
|
||||
# Back up any local config you want to keep (.env, etc.)
|
||||
cd ..
|
||||
rm -rf Shadowbroker
|
||||
git clone https://github.com/BigBodyCobain/Shadowbroker.git
|
||||
git clone https://github.com/bigbodycobain/Shadowbroker.git
|
||||
cd Shadowbroker
|
||||
docker compose pull
|
||||
docker compose up -d
|
||||
@@ -142,35 +144,61 @@ helm install shadowbroker ./helm/chart --create-namespace --namespace shadowbrok
|
||||
|
||||
## Experimental Testnet — No Privacy Guarantee
|
||||
|
||||
ShadowBroker v0.9.6 introduces **InfoNet**, a decentralized intelligence mesh with obfuscated messaging. This is an **experimental testnet** — not a private messenger.
|
||||
ShadowBroker v0.9.7 ships **InfoNet** (decentralized intelligence mesh + Sovereign Shell governance economy), an **agentic AI command channel** (supports OpenClaw and any HMAC-signing agent), **Time Machine snapshot playback**, and **SAR satellite ground-change detection**. This is an **experimental testnet** — not a private messenger and not a production governance system.
|
||||
|
||||
| Channel | Privacy Status | Details |
|
||||
|---|---|---|
|
||||
| **Meshtastic / APRS** | **PUBLIC** | RF radio transmissions are public and interceptable by design. |
|
||||
| **InfoNet Gate Chat** | **OBFUSCATED** | Messages are obfuscated with gate personas and canonical payload signing, but NOT end-to-end encrypted. Metadata is not hidden. |
|
||||
| **Dead Drop DMs** | **STRONGEST CURRENT LANE** | Token-based epoch mailbox with SAS word verification. Strongest lane in this build, but not yet confidently private. |
|
||||
| **Sovereign Shell governance** | **PUBLIC LEDGER** | Petitions, votes, upgrade hashes, and dispute stakes are signed events on a public hashchain. Pseudonymous via gate persona, but governance actions are intentionally observable. |
|
||||
| **Privacy primitives (RingCT / stealth / DEX)** | **NOT YET WIRED** | Locked Protocol contracts are in place, but the cryptographic scheme has not been chosen. The privacy-core Rust crate is the integration target for a future sprint. |
|
||||
|
||||
**Do not transmit anything sensitive on any channel.** Treat all lanes as open and public for now. E2E encryption and deeper native/Tauri hardening are the next milestones. If you fork this project, keep these labels intact and do not make stronger privacy claims than the implementation supports.
|
||||
|
||||
> **For a full picture of what the mesh actually defends against and
|
||||
> what it doesn't, read the
|
||||
> [threat model](docs/mesh/threat-model.md) and the
|
||||
> [claims reconciliation](docs/mesh/claims-reconciliation.md). Every
|
||||
> sentence above is mapped there to the code path that enforces it (or
|
||||
> doesn't).**
|
||||
|
||||
---
|
||||
|
||||
|
||||
## ✨ Features
|
||||
|
||||
### 🧅 InfoNet — Decentralized Intelligence Mesh (NEW in v0.9.6)
|
||||
### 🧅 InfoNet — Decentralized Intelligence Mesh + Sovereign Shell (expanded in v0.9.7)
|
||||
|
||||
The first decentralized intelligence communication layer built directly into an OSINT platform. No accounts, no signup, no identity required. Nothing like this has existed in an OSINT tool before.
|
||||
The first decentralized intelligence communication and governance layer built directly into an OSINT platform. No accounts, no signup, no identity required. v0.9.7 promotes InfoNet from a chat layer into a full governance economy with a clear path to a privacy-preserving decentralized intelligence platform.
|
||||
|
||||
**Communication layer (since v0.9.6):**
|
||||
|
||||
* **InfoNet Experimental Testnet** — A global, obfuscated message relay. Anyone running ShadowBroker can transmit and receive on the InfoNet. Messages pass through a Wormhole relay layer with gate personas, Ed25519 canonical payload signing, and transport obfuscation.
|
||||
* **Mesh Chat Panel** — Three-tab interface:
|
||||
* **INFONET** — Gate chat with obfuscated transport (experimental — not yet E2E encrypted)
|
||||
* **MESH** — Meshtastic radio integration (default tab on startup)
|
||||
* **DEAD DROP** — Peer-to-peer message exchange with token-based epoch mailboxes (strongest current lane)
|
||||
* **Gate Persona System** — Pseudonymous identities with Ed25519 signing keys, prekey bundles, SAS word contact verification, and abuse reporting
|
||||
* **Mesh Chat Panel** — Three-tab interface: **INFONET** (gate chat with obfuscated transport), **MESH** (Meshtastic radio integration), **DEAD DROP** (peer-to-peer message exchange with token-based epoch mailboxes — strongest current lane).
|
||||
* **Gate Persona System** — Pseudonymous identities with Ed25519 signing keys, prekey bundles, SAS word contact verification, and abuse reporting.
|
||||
* **Mesh Terminal** — Built-in CLI: `send`, `dm`, market commands, gate state inspection. Draggable panel, minimizes to the top bar. Type `help` to see all commands.
|
||||
* **Crypto Stack** — Ed25519 signing, X25519 Diffie-Hellman, AESGCM encryption with HKDF key derivation, hash chain commitment system. Double-ratchet DM scaffolding in progress.
|
||||
|
||||
> **Experimental Testnet — No Privacy Guarantee:** InfoNet messages are obfuscated but NOT end-to-end encrypted. The Mesh network (Meshtastic/APRS) is NOT private — radio transmissions are inherently public. Do not send anything sensitive on any channel. E2E encryption is being developed but is not yet implemented. Treat all channels as open and public for now.
|
||||
**Sovereign Shell — governance economy (NEW in v0.9.7):**
|
||||
|
||||
* **Petitions + Governance DSL** — On-chain parameter changes via signed petitions. Type-safe payload executor for `UPDATE_PARAM`, `BATCH_UPDATE_PARAMS`, `ENABLE_FEATURE`, and `DISABLE_FEATURE`. Tunable knobs change by vote — no code deploys required.
|
||||
* **Upgrade-Hash Governance** — Protocol upgrades that need new logic (not just parameter changes) vote on a SHA-256 hash of the verified release. 80% supermajority, 40% quorum, 67% Heavy-Node activation. Lifecycle: signatures → voting → challenge window → awaiting readiness → activated.
|
||||
* **Resolution & Dispute Markets** — Stake on market resolution outcomes (yes / no / data_unavailable), open disputes with bonded evidence, and stake on dispute confirm-or-reverse. Per-row submission state stays isolated so concurrent actions don't share an in-flight slot.
|
||||
* **Evidence Submission** — Bonded evidence bundles with client-side SHA-256 canonicalization that matches Python `repr()` exactly, so hashes round-trip cleanly through the chain.
|
||||
* **Gate Suspension / Shutdown / Appeals** — Filing forms for suspending or shutting down a gate, with a reusable appeal flow auto-targeting the pending petition.
|
||||
* **Bootstrap Eligible-Node-One-Vote** — The first 100 markets resolve via one-vote-per-eligible-node instead of stake-weighted resolution. Eligibility: identity age ≥ 3 days, not in predictor exclusion set, valid Argon2id PoW (Heavy-Node-only). Transitions to staked resolution at 1000 nodes.
|
||||
* **Two-Tier State + Epoch Finality** — Tier 1 events propagate CRDT-style for low latency; Tier 2 events require epoch finality before they can be acted on. Identity rotation, progressive penalties, ramp milestones, and constitutional invariants enforced via `MappingProxyType`.
|
||||
* **Adaptive Polling** — Sovereign Shell views poll every 8 seconds during active voting / challenge / activation phases, every 30–60 seconds when idle. Voting feels live without a websocket layer.
|
||||
* **Verbatim Diagnostics** — Every write button surfaces the backend's verbatim rejection reason. No opaque "denied" toasts.
|
||||
|
||||
**Privacy primitive runway (NEW in v0.9.7):**
|
||||
|
||||
* **Function Keys — Anonymous Citizenship Proof** — A citizen proves "I am an Infonet citizen" without revealing their Infonet identity. 5 of 6 pieces shipped: nullifiers, challenge-response, two-phase commit receipts, enumerated denial codes, batched settlement. Issuance via blind signatures waits on a primitive decision (RSA blind sigs vs BBS+ vs U-Prove vs Idemix).
|
||||
* **Locked Protocol Contracts** — Stable interfaces in `services/infonet/privacy/contracts.py` for ring signatures, stealth addresses, Pedersen commitments, range proofs, and DEX matching. The `privacy-core` Rust crate is the integration target — no caller of the privacy module needs to know which scheme is active.
|
||||
* **Sprint 11+ Path** — When the cryptographic scheme is chosen, primitives wire into the locked Protocols without API churn.
|
||||
|
||||
> **Experimental Testnet — No Privacy Guarantee:** InfoNet messages are obfuscated but NOT end-to-end encrypted. The Mesh network (Meshtastic/APRS) is NOT private — radio transmissions are inherently public. The privacy primitive contracts are scaffolded but not yet wired. Do not send anything sensitive on any channel. Treat all channels as open and public for now.
|
||||
|
||||
### 🔍 Shodan Device Search (NEW in v0.9.6)
|
||||
|
||||
@@ -239,6 +267,17 @@ The first decentralized intelligence communication layer built directly into an
|
||||
* **NVG** — Night vision green phosphor
|
||||
* **CRT** — Retro terminal scanline overlay
|
||||
|
||||
### 🛰️ SAR Ground-Change Detection (NEW)
|
||||
|
||||
* **Synthetic Aperture Radar Layer** — Detects ground changes through cloud cover, at night, anywhere on Earth. Two modes, both free:
|
||||
* **Mode A (Catalog)** — Free Sentinel-1 scene metadata from Alaska Satellite Facility. No account required. Shows when radar passes happened over your AOIs and when the next pass is coming.
|
||||
* **Mode B (Full Anomalies)** — Real-time ground-change alerts from NASA OPERA (DISP, DSWx, DIST-ALERT) and Copernicus EGMS. Requires a free NASA Earthdata account — the in-app wizard walks you through setup in under a minute.
|
||||
* **Anomaly Types** — Ground deformation (mm-scale subsidence, landslides), surface water change (flood extent), vegetation disturbance (deforestation, burn scars, blast craters), damage assessments (UNOSAT/Copernicus EMS verified), and coherence change detection
|
||||
* **Map Visualization** — Color-coded anomaly pins by kind (orange for deformation, cyan for water, green for vegetation, red for damage, purple for coherence). AOI boundaries drawn as dashed polygons with category-based coloring. Click any pin for a detail popup with magnitude, confidence, solver, scene count, and provenance link.
|
||||
* **AOI Editor** — Define areas of interest directly from the map. Click the "EDIT AOIs" button when the SAR layer is active, then use the crosshair tool to click-to-drop an AOI center on the map. Set name, radius (1–500 km), and category. AOIs appear on the map immediately.
|
||||
* **OpenClaw Integration** — The AI agent can inspect SAR anomaly details (`sar_pin_click`) and fly the operator's map to any AOI center (`sar_focus_aoi`) — enabling collaborative analyst workflows.
|
||||
* **Settings Panel** — Dedicated SAR tab in Settings shows Mode A/B status, OpenClaw integration state, and lets you revoke Earthdata credentials with one click.
|
||||
|
||||
### 📻 Software-Defined Radio & SIGINT
|
||||
|
||||
* **KiwiSDR Receivers** — 500+ public SDR receivers plotted worldwide with clustered amber markers
|
||||
@@ -286,65 +325,169 @@ The first decentralized intelligence communication layer built directly into an
|
||||
* **Measurement Tool** — Point-to-point distance & bearing measurement on the map
|
||||
* **LOCATE Bar** — Search by coordinates (31.8, 34.8) or place name (Tehran, Strait of Hormuz) to fly directly to any location — geocoded via OpenStreetMap Nominatim
|
||||
|
||||

|
||||

|
||||
|
||||
### 🤖 Agentic AI Command Channel — OpenClaw + Compatible Agents (expanded in v0.9.7)
|
||||
|
||||
ShadowBroker exposes a **bidirectional agentic AI command channel** — a signed, tier-gated bridge that gives any compatible AI agent full read/write access to the intelligence platform. **OpenClaw is the reference agent**, but the channel is an open protocol: any LLM-driven agent that signs requests with HMAC-SHA256 (Claude Code, GPT, LangChain, custom Python/TypeScript clients, or your own integration) can connect as an analyst that sees the same data as the operator and can take actions on the map. ShadowBroker does *not* bundle an LLM, an agent runtime, or model weights — it provides the surface; you bring the agent.
|
||||
|
||||
v0.9.7 turns ShadowBroker from a dashboard a human watches into an intelligence surface any agent can act on.
|
||||
|
||||
**Channel transport (NEW in v0.9.7):**
|
||||
|
||||
* **Single Command Channel** — `POST /api/ai/channel/command` accepts `{cmd, args}` and dispatches to any registered tool.
|
||||
* **Batched Concurrent Execution** — `POST /api/ai/channel/batch` accepts up to 20 commands in one request. The backend runs them concurrently and returns a fan-out result map. Cuts agent latency by an order of magnitude over sequential calls.
|
||||
* **Tier-Gated Access** — `OPENCLAW_ACCESS_TIER` controls which commands the agent can call: `restricted` exposes the read-only set, `full` adds writes and injection. Discovery endpoint returns `available_commands` so the agent can introspect its own capabilities.
|
||||
* **HMAC-SHA256 Signing** — Every command is signed `HMAC-SHA256(secret, METHOD|path|timestamp|nonce|sha256(body))` with timestamp + nonce replay protection and request integrity. Supports local mode (no config) and remote mode (agent on a different machine / VPS).
|
||||
|
||||
**Capabilities:**
|
||||
|
||||
* **Full Telemetry Access** — The agent queries all 35+ data layers: flights, ships, satellites, SIGINT, conflict events, earthquakes, fires, wastewater, prediction markets, and more. Fast and slow tier endpoints return enriched data with geographic coordinates, timestamps, and source attribution.
|
||||
* **AI Intel Pins** — Place color-coded investigation markers directly on the operator's map. 14 pin categories (threat, anomaly, military, maritime, aviation, SIGINT, infrastructure, etc.) with confidence scores, TTL expiry, source URLs, and batch placement up to 100 pins at once.
|
||||
* **Map Control** — Fly the operator's map view to any coordinate, trigger satellite imagery lookups, and open region dossiers. The agent can direct the operator's attention to specific locations in real time.
|
||||
* **SAR Ground-Change** — Query SAR anomaly feeds, inspect pin details, manage AOIs, and fly the map to watch areas. The agent can monitor for ground deformation, flood extent, or damage and promote anomalies to pins.
|
||||
* **Native Layer Injection** — Push custom data directly into ShadowBroker's native layers (CCTV cameras, ships, SIGINT nodes, military bases, etc.) so agent-discovered sources render alongside real feeds.
|
||||
* **Wormhole Mesh Participation** — The agent can join the decentralized InfoNet, post signed messages, join encrypted gate channels, send/receive encrypted DMs, and interact with Meshtastic radio and Dead Drops — operating as a full mesh peer.
|
||||
* **Sovereign Shell Participation (v0.9.7)** — File petitions, sign and vote on governance changes, stake on resolutions and disputes, signal Heavy-Node readiness for upgrades — all programmatically, all gated by tier and HMAC. Agents become first-class participants in the decentralized intelligence economy.
|
||||
* **Geocoding & Proximity Scans** — Resolve place names to coordinates, then scan all layers within a radius for a complete proximity digest.
|
||||
* **News & GDELT Near Location** — Pull GDELT conflict events and aggregated news articles near any coordinate for regional situational awareness.
|
||||
* **Alert Delivery** — Send branded intelligence briefs, warnings, and threat notifications to Discord webhooks and Telegram channels.
|
||||
* **Intelligence Reports** — Generate structured reports with summary stats, top military flights, correlations, earthquake activity, SIGINT counts, and pin inventories.
|
||||
* **Auditable** — Every channel call is logged; the operator can introspect what the agent has done.
|
||||
|
||||
**Connect an agent:** Open the AI Intel panel in the left sidebar, click **Connect Agent**, and copy the HMAC secret. From there, point any compatible agent at the channel — for OpenClaw, import `ShadowBrokerClient` from the OpenClaw skill package; for any other agent, use the same HMAC contract documented above (timestamp + nonce + body digest, tier-gated). The channel is the protocol, not the agent.
|
||||
|
||||
### ⏱️ Time Machine — Snapshot Playback (NEW in v0.9.7)
|
||||
|
||||
A media-style transport for the entire telemetry feed. Treat the live map as a recording that can be scrubbed, paused, and replayed.
|
||||
|
||||
* **Live ↔ Snapshot Toggle** — Switching to snapshot mode pauses the global polling loop instantly; switching back to Live invalidates ETags and force-refreshes both fast and slow tiers so the dashboard catches up without a stale-frame flicker.
|
||||
* **Hourly Index** — Every captured snapshot is indexed by its hour bucket with `count`, `latest_id`, `latest_ts`, and the full `snapshot_ids` list. Jump to any captured timestamp directly from the timeline scrubber.
|
||||
* **Frame Interpolation** — Moving entities (aircraft, ships, satellites, military flights) interpolate smoothly between recorded frames during playback so motion stays continuous even when snapshots are sparse.
|
||||
* **Variable Playback Speed** — Step, play, fast-forward, and rewind through saved telemetry at adjustable speed.
|
||||
* **Profile-Aware** — Each snapshot records the privacy profile that was active when it was captured, so playback is faithful to what an operator on that profile would have seen.
|
||||
* **Operator-Side, Not Server-Side** — Snapshots are stored locally in the backend; no third party ever sees the playback timeline.
|
||||
|
||||
### 📦 API Keys Panel — Path-First, Read-Only (NEW in v0.9.7)
|
||||
|
||||
Settings → API Keys is now a read-only registry. Key values never reach the browser process — not even an obfuscated prefix. The panel surfaces:
|
||||
|
||||
* The absolute path to the backend `.env` file as resolved by `Path(__file__).resolve()` — works on every OS, every drive, every install location (Linux `/home/...`, macOS `/Users/...`, Windows on any drive, Docker containers, cloud VMs).
|
||||
* `[exists]` / `[will be created on first save]` / `[NOT WRITABLE — edit by hand]` indicators on the path itself.
|
||||
* The path to the `.env.example` template so users can copy it and fill in their keys.
|
||||
* A binary `CONFIGURED` / `NOT CONFIGURED` badge per key, plus a copy-pastable env line (e.g. `OPENSKY_CLIENT_ID=YOUR_VALUE`) the user can drop into the file by hand.
|
||||
|
||||
OpenSky API credentials are now a **critical-warn** environment requirement: the startup environment check flags missing OpenSky OAuth2 credentials with a strong warning, and the changelog modal links directly to the free registration page. Without them, the flights layer falls back to ADS-B-only coverage with significant gaps in Africa, Asia, and Latin America.
|
||||
|
||||
---
|
||||
|
||||
## 🏗️ Architecture
|
||||
|
||||
ShadowBroker v0.9.7 is composed of three vertically-stacked planes — the **Operator UI**, the **Backend Service Plane**, and the **Decentralized Layer (InfoNet)** — plus two cross-cutting bridges (the **Time Machine** and the **Agentic AI Channel**, which is the protocol that OpenClaw and any other compatible agent connects through) and a **Privacy Core** Rust crate that backstops both the legacy mesh and the future shielded coin / DEX work.
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ FRONTEND (Next.js) │
|
||||
│ │
|
||||
│ ┌─────────────┐ ┌──────────┐ ┌───────────┐ ┌────────────┐ │
|
||||
│ │ MapLibre GL │ │ NewsFeed │ │ Control │ │ Mesh │ │
|
||||
│ │ 2D WebGL │ │ SIGINT │ │ Panels │ │ Chat │ │
|
||||
│ │ Map Render │ │ Intel │ │ Radio │ │ Terminal │ │
|
||||
│ └──────┬──────┘ └────┬─────┘ └─────┬─────┘ └─────┬──────┘ │
|
||||
│ └──────────────┼──────────────┼──────────────┘ │
|
||||
│ │ REST + WebSocket │
|
||||
├────────────────────────┼────────────────────────────────────────┤
|
||||
│ BACKEND (FastAPI) │
|
||||
│ │ │
|
||||
│ ┌─────────────────────┼─────────────────────────────────────┐ │
|
||||
│ │ Data Fetcher (Scheduler) │ │
|
||||
│ │ │ │
|
||||
│ │ ┌───────────┬───────────┬───────────┬───────────┐ │ │
|
||||
│ │ │ OpenSky │ adsb.lol │ CelesTrak │ USGS │ │ │
|
||||
│ │ │ Flights │ Military │ Sats │ Quakes │ │ │
|
||||
│ │ ├───────────┼───────────┼───────────┼───────────┤ │ │
|
||||
│ │ │ AIS WS │ Carrier │ GDELT │ CCTV (13) │ │ │
|
||||
│ │ │ Ships │ Tracker │ Conflict │ Cameras │ │ │
|
||||
│ │ ├───────────┼───────────┼───────────┼───────────┤ │ │
|
||||
│ │ │ DeepState │ RSS │ Region │ GPS │ │ │
|
||||
│ │ │ Frontline │ Intel │ Dossier │ Jamming │ │ │
|
||||
│ │ ├───────────┼───────────┼───────────┼───────────┤ │ │
|
||||
│ │ │ NASA │ NOAA │ IODA │ KiwiSDR │ │ │
|
||||
│ │ │ FIRMS │ Space Wx │ Outages │ Radios │ │ │
|
||||
│ │ ├───────────┼───────────┼───────────┼───────────┤ │ │
|
||||
│ │ │ Shodan │ Amtrak │ SatNOGS │Meshtastic │ │ │
|
||||
│ │ │ Devices │ DigiTraf │ TinyGS │ APRS │ │ │
|
||||
│ │ ├───────────┼───────────┼───────────┼───────────┤ │ │
|
||||
│ │ │ Volcanoes │ Weather │ Fishing │ Mil Bases │ │ │
|
||||
│ │ │ Air Qual. │ Alerts │ Activity │Pwr Plants │ │ │
|
||||
│ │ ├───────────┼───────────┼───────────┼───────────┤ │ │
|
||||
│ │ │ Sentinel │ MODIS │ VIIRS │ Data │ │ │
|
||||
│ │ │ Hub/STAC │ Terra │ Nightlts │ Centers │ │ │
|
||||
│ │ └───────────┴───────────┴───────────┴───────────┘ │ │
|
||||
│ └───────────────────────────────────────────────────────────┘ │
|
||||
│ │
|
||||
│ ┌───────────────────────────────────────────────────────────┐ │
|
||||
│ │ Wormhole / InfoNet Relay │ │
|
||||
│ │ Gate Personas │ Canonical Signing │ Dead Drop DMs │ │
|
||||
│ └───────────────────────────────────────────────────────────┘ │
|
||||
│ │
|
||||
│ ┌───────────────────────────────────────────────────────────┐ │
|
||||
│ │ GHCR (Pre-built Images) │ │
|
||||
│ │ ghcr.io/bigbodycobain/shadowbroker-backend:latest │ │
|
||||
│ │ ghcr.io/bigbodycobain/shadowbroker-frontend:latest │ │
|
||||
│ │ Multi-arch: linux/amd64 + linux/arm64 │ │
|
||||
│ └───────────────────────────────────────────────────────────┘ │
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
╔═════════════════════════════════════════════════════════════════════════════╗
|
||||
║ OPERATOR UI (Next.js + MapLibre) ║
|
||||
║ ║
|
||||
║ ┌────────────────┐ ┌──────────┐ ┌────────────────┐ ┌────────────────┐ ║
|
||||
║ │ MapLibre GL │ │ NewsFeed │ │ Sovereign Shell│ │ Mesh Chat │ ║
|
||||
║ │ WebGL render │ │ SIGINT │ │ Petitions / │ │ + Mesh Term. │ ║
|
||||
║ │ + clusters │ │ GDELT │ │ Upgrades / │ │ (Infonet / │ ║
|
||||
║ │ │ │ Threat │ │ Disputes / │ │ Mesh / │ ║
|
||||
║ │ │ │ │ │ Gates / │ │ Dead Drop) │ ║
|
||||
║ │ │ │ │ │ Bootstrap / │ │ │ ║
|
||||
║ │ │ │ │ │ Function Keys │ │ │ ║
|
||||
║ └──────┬─────────┘ └────┬─────┘ └────────┬───────┘ └────────┬───────┘ ║
|
||||
║ │ │ │ │ ║
|
||||
║ ┌──────┴─────────────────┴─────────────────┴───────────────────┴───────┐ ║
|
||||
║ │ Time Machine ◀── snapshot playback ── snapshotMode toggle ──▶ Live │ ║
|
||||
║ │ hourly index │ frame interpolation │ profile-aware │ per-tier ETag │ ║
|
||||
║ └──────────────────────────────────┬───────────────────────────────────┘ ║
|
||||
║ │ REST + /api/[...path] proxy ║
|
||||
╠═════════════════════════════════════╪═══════════════════════════════════════╣
|
||||
║ BACKEND SERVICE PLANE (FastAPI) ║
|
||||
║ │ ║
|
||||
║ ┌──────────────────────────────────┴────────────────────────────────────┐ ║
|
||||
║ │ Data Fetcher (APScheduler — fast / slow tiers) │ ║
|
||||
║ │ │ ║
|
||||
║ │ ┌───────────┬───────────┬───────────┬───────────┬───────────┐ │ ║
|
||||
║ │ │ OpenSky* │ adsb.lol │ CelesTrak │ USGS │ AIS WS │ │ ║
|
||||
║ │ │ Flights │ Military │ Sats │ Quakes │ Ships │ │ ║
|
||||
║ │ ├───────────┼───────────┼───────────┼───────────┼───────────┤ │ ║
|
||||
║ │ │ Carrier │ GDELT │ CCTV (12) │ DeepState │ NASA │ │ ║
|
||||
║ │ │ Tracker │ Conflict │ Cameras │ Frontline │ FIRMS │ │ ║
|
||||
║ │ ├───────────┼───────────┼───────────┼───────────┼───────────┤ │ ║
|
||||
║ │ │ GPS │ KiwiSDR │ Shodan │ Amtrak │ SatNOGS │ │ ║
|
||||
║ │ │ Jamming │ Radios │ Devices │ DigiTraf │ TinyGS │ │ ║
|
||||
║ │ ├───────────┼───────────┼───────────┼───────────┼───────────┤ │ ║
|
||||
║ │ │ Volcanoes │ Weather │ Fishing │ Mil Bases │ IODA │ │ ║
|
||||
║ │ │ Air Qual │ Alerts │ Activity │ PwrPlants │ Outages │ │ ║
|
||||
║ │ ├───────────┼───────────┼───────────┼───────────┼───────────┤ │ ║
|
||||
║ │ │ Sentinel │ MODIS │ VIIRS │ Data │ Meshtastic│ │ ║
|
||||
║ │ │ Hub/STAC │ Terra │ Nightlts │ Centers │ APRS │ │ ║
|
||||
║ │ ├───────────┴───────────┴───────────┴───────────┴───────────┤ │ ║
|
||||
║ │ │ SAR (NEW v0.9.7) │ │ ║
|
||||
║ │ │ Mode A: ASF Search catalog (free, no account) │ │ ║
|
||||
║ │ │ Mode B: NASA OPERA / Copernicus EGMS / GFM / EMS / │ │ ║
|
||||
║ │ │ UNOSAT ground-change anomalies (opt-in) │ │ ║
|
||||
║ │ └───────────────────────────────────────────────────────────┘ │ ║
|
||||
║ │ * OpenSky: REQUIRED for global flight coverage │ ║
|
||||
║ └───────────────────────────────────────────────────────────────────────┘ ║
|
||||
║ │ ║
|
||||
║ ┌──────────────────────────────────┴────────────────────────────────────┐ ║
|
||||
║ │ Snapshot Store (Time Machine source) │ ║
|
||||
║ │ Hourly index │ per-snapshot layer manifest │ profile metadata │ ║
|
||||
║ └───────────────────────────────────────────────────────────────────────┘ ║
|
||||
║ ║
|
||||
║ ┌───────────────────────────────────────────────────────────────────────┐ ║
|
||||
║ │ Agentic AI Channel (HMAC-SHA256, tier-gated — OpenClaw + others) │ ║
|
||||
║ │ │ ║
|
||||
║ │ POST /api/ai/channel/command → one tool call │ ║
|
||||
║ │ POST /api/ai/channel/batch → up to 20 concurrent tool calls │ ║
|
||||
║ │ │ ║
|
||||
║ │ Tier: restricted (read-only) │ full (read + write + inject) │ ║
|
||||
║ │ Auth: X-SB-Timestamp + X-SB-Nonce + X-SB-Signature │ ║
|
||||
║ │ Sig = HMAC-SHA256(secret, METHOD|path|ts|nonce|sha256(body)) │ ║
|
||||
║ └───────────────────────────────────────────────────────────────────────┘ ║
|
||||
╠═════════════════════════════════════════════════════════════════════════════╣
|
||||
║ DECENTRALIZED LAYER (InfoNet Testnet — signed events) ║
|
||||
║ ║
|
||||
║ ┌────────────────────────────┐ ┌──────────────────────────────────┐ ║
|
||||
║ │ Mesh Hashchain │ │ Sovereign Shell Governance │ ║
|
||||
║ │ │ │ │ ║
|
||||
║ │ Ed25519 signed events │ │ Petitions (DSL: UPDATE_PARAM, │ ║
|
||||
║ │ Public-key binding │ │ ENABLE_FEATURE …) │ ║
|
||||
║ │ Replay / sequence guard │ │ Upgrade-Hash voting (80% / 40% │ ║
|
||||
║ │ Two-tier finality │ │ quorum / 67% Heavy) │ ║
|
||||
║ │ ├ Tier 1 (CRDT, fast) │ │ Resolution & Dispute markets │ ║
|
||||
║ │ └ Tier 2 (epoch finality)│ │ Gate suspend / shutdown / appeal│ ║
|
||||
║ │ Identity rotation │ │ Bootstrap eligible-node-1-vote │ ║
|
||||
║ │ Constitutional invariants │ │ (Argon2id PoW, Heavy-Node only)│ ║
|
||||
║ │ (MappingProxyType) │ │ Function Keys (5 of 6 pieces) │ ║
|
||||
║ └─────────────┬──────────────┘ └─────────────┬────────────────────┘ ║
|
||||
║ │ │ ║
|
||||
║ └──────────────┬──────────────────┘ ║
|
||||
║ │ ║
|
||||
║ ┌────────────────────────────┴──────────────────────────────────────┐ ║
|
||||
║ │ Wormhole / InfoNet Relay (transport layer) │ ║
|
||||
║ │ Gate personas │ canonical signing │ Dead Drop epoch mailboxes │ ║
|
||||
║ └───────────────────────────────────────────────────────────────────┘ ║
|
||||
╠═════════════════════════════════════════════════════════════════════════════╣
|
||||
║ PRIVACY CORE (Rust crate — locked Protocol contracts) ║
|
||||
║ ║
|
||||
║ privacy-core/ ─► Argon2id │ Ed25519/X25519 │ AESGCM │ HKDF ║
|
||||
║ Ring sigs* │ Stealth addrs* │ Pedersen* │ Bulletproofs*║
|
||||
║ Blind-sig issuance* (RSA / BBS+ / U-Prove / Idemix) ║
|
||||
║ ║
|
||||
║ * = locked Protocol contract; cryptographic primitive lands Sprint 11+ ║
|
||||
╚═════════════════════════════════════════════════════════════════════════════╝
|
||||
|
||||
Distribution
|
||||
────────────
|
||||
GitHub (primary): ghcr.io/bigbodycobain/shadowbroker-{backend,frontend}
|
||||
GitLab (mirror): registry.gitlab.com/bigbodycobain/shadowbroker/{backend,frontend}
|
||||
Multi-arch: linux/amd64 + linux/arm64 (Raspberry Pi 5 supported)
|
||||
Desktop: Tauri shell → packaged backend-runtime + Next.js frontend
|
||||
```
|
||||
|
||||
---
|
||||
@@ -353,7 +496,7 @@ The first decentralized intelligence communication layer built directly into an
|
||||
|
||||
| Source | Data | Update Frequency | API Key Required |
|
||||
|---|---|---|---|
|
||||
| [OpenSky Network](https://opensky-network.org) | Commercial & private flights | ~60s | Optional (anonymous limited) |
|
||||
| [OpenSky Network](https://opensky-network.org) | Commercial & private flights | ~60s | **Yes** |
|
||||
| [adsb.lol](https://adsb.lol) | Military aircraft | ~60s | No |
|
||||
| [aisstream.io](https://aisstream.io) | AIS vessel positions | Real-time WebSocket | **Yes** |
|
||||
| [CelesTrak](https://celestrak.org) | Satellite orbital positions (TLE + SGP4) | ~60s | No |
|
||||
@@ -401,7 +544,7 @@ The first decentralized intelligence communication layer built directly into an
|
||||
|
||||
### 🐳 Docker Setup (Recommended for Self-Hosting)
|
||||
|
||||
The repo includes a `docker-compose.yml` that pulls pre-built images from the GitHub Container Registry.
|
||||
The repo includes a `docker-compose.yml` that pulls pre-built images from GitHub Container Registry.
|
||||
|
||||
```bash
|
||||
git clone https://github.com/BigBodyCobain/Shadowbroker.git
|
||||
@@ -440,14 +583,20 @@ Open `http://localhost:3000` to view the dashboard.
|
||||
|
||||
### 🐋 Standalone Deploy (Portainer, Uncloud, NAS, etc.)
|
||||
|
||||
No need to clone the repo. Use the pre-built images published to the GitHub Container Registry.
|
||||
No need to clone the repo. Use the pre-built images from GitHub Container Registry. GitLab registry images may be used as a mirror if you publish them there.
|
||||
|
||||
Create a `docker-compose.yml` with the following content and deploy it directly — paste it into Portainer's stack editor, `uncloud deploy`, or any Docker host:
|
||||
|
||||
```yaml
|
||||
## Image registry — uncomment ONE line per service:
|
||||
## GitHub (primary): ghcr.io/bigbodycobain/shadowbroker-backend:latest
|
||||
## GitLab (mirror): registry.gitlab.com/bigbodycobain/shadowbroker/backend:latest
|
||||
|
||||
|
||||
services:
|
||||
backend:
|
||||
image: ghcr.io/bigbodycobain/shadowbroker-backend:latest
|
||||
# image: registry.gitlab.com/bigbodycobain/shadowbroker/backend:latest
|
||||
container_name: shadowbroker-backend
|
||||
ports:
|
||||
- "8000:8000"
|
||||
@@ -466,6 +615,7 @@ services:
|
||||
|
||||
frontend:
|
||||
image: ghcr.io/bigbodycobain/shadowbroker-frontend:latest
|
||||
# image: registry.gitlab.com/bigbodycobain/shadowbroker/frontend:latest
|
||||
container_name: shadowbroker-frontend
|
||||
ports:
|
||||
- "3000:3000"
|
||||
@@ -489,17 +639,19 @@ volumes:
|
||||
|
||||
If you just want to run the dashboard without dealing with terminal commands:
|
||||
|
||||
1. Go to the **[Releases](../../releases)** tab on the right side of this GitHub page.
|
||||
1. Go to the **[Releases](../../releases)** tab on the right side of this repo page.
|
||||
2. Download the latest `.zip` file from the release.
|
||||
3. Extract the folder to your computer.
|
||||
4. **Windows:** Double-click `start.bat`.
|
||||
**Mac/Linux:** Open terminal, type `chmod +x start.sh` and run `./start.sh`.
|
||||
**Mac/Linux:** Open terminal, type `chmod +x start.sh`, `dos2unix start.sh`, and run `./start.sh`.
|
||||
5. It will automatically install everything and launch the dashboard!
|
||||
|
||||
Local launcher notes:
|
||||
|
||||
- `start.bat` / `start.sh` run the app without Docker — they install dependencies and start both servers directly.
|
||||
- If Wormhole identity or DM contact endpoints fail after an upgrade, check the `docs/mesh/` folder for troubleshooting.
|
||||
- For DM root witness, transparency, and operator monitoring rollout, start with `docs/mesh/wormhole-dm-root-operations-runbook.md`.
|
||||
- For sample DM root ops bridge assets, also see `scripts/mesh/poll-dm-root-health-alerts.mjs`, `scripts/mesh/export-dm-root-health-prometheus.mjs`, `scripts/mesh/publish-external-root-witness-package.mjs`, `scripts/mesh/smoke-external-root-witness-flow.mjs`, `scripts/mesh/smoke-root-transparency-publication-flow.mjs`, `scripts/mesh/smoke-dm-root-deployment-flow.mjs`, `scripts/mesh/sync-dm-root-external-assurance.mjs`, and `docs/mesh/examples/`.
|
||||
|
||||
---
|
||||
|
||||
@@ -526,19 +678,19 @@ cd backend
|
||||
python -m venv venv
|
||||
venv\Scripts\activate # Windows
|
||||
# source venv/bin/activate # macOS/Linux
|
||||
pip install . # installs all dependencies from pyproject.toml
|
||||
pip install .
|
||||
|
||||
# Optional helper scripts (creates venv + installs dev deps)
|
||||
# Windows PowerShell
|
||||
# .\scripts\setup-venv.ps1
|
||||
# .\backend\scripts\setup-venv.ps1
|
||||
# macOS/Linux
|
||||
# ./scripts/setup-venv.sh
|
||||
# ./backend/scripts/setup-venv.sh
|
||||
|
||||
# Optional env check (prints warnings for missing keys)
|
||||
# Windows PowerShell
|
||||
# .\scripts\check-env.ps1
|
||||
# .\backend\scripts\check-env.ps1
|
||||
# macOS/Linux
|
||||
# ./scripts/check-env.sh
|
||||
# ./backend/scripts/check-env.sh
|
||||
|
||||
# Create .env with your API keys
|
||||
echo "AIS_API_KEY=your_aisstream_key" >> .env
|
||||
@@ -547,7 +699,7 @@ echo "OPENSKY_CLIENT_SECRET=your_opensky_secret" >> .env
|
||||
|
||||
# Frontend setup
|
||||
cd ../frontend
|
||||
npm install
|
||||
npm ci
|
||||
```
|
||||
|
||||
### Running
|
||||
@@ -661,81 +813,73 @@ The platform is optimized for handling massive real-time datasets:
|
||||
```
|
||||
Shadowbroker/
|
||||
├── backend/
|
||||
│ ├── main.py # FastAPI app, middleware, API routes
|
||||
│ ├── pyproject.toml # Python dependencies
|
||||
│ ├── main.py # FastAPI app, middleware, API routes (~4,000 lines)
|
||||
│ ├── cctv.db # SQLite CCTV camera database (auto-generated)
|
||||
│ ├── config/
|
||||
│ │ └── news_feeds.json # User-customizable RSS feed list
|
||||
│ ├── services/
|
||||
│ │ ├── data_fetcher.py # Core scheduler — orchestrates all data sources
|
||||
│ │ ├── ais_stream.py # AIS WebSocket client (25K+ vessels)
|
||||
│ │ ├── carrier_tracker.py # OSINT carrier position estimator
|
||||
│ │ ├── cctv_pipeline.py # 14-source CCTV camera ingestion pipeline
|
||||
│ │ ├── correlation_engine.py # Cross-layer intelligence correlation
|
||||
│ │ ├── carrier_tracker.py # OSINT carrier position estimator (GDELT news scraping)
|
||||
│ │ ├── cctv_pipeline.py # 13-source CCTV camera ingestion pipeline
|
||||
│ │ ├── geopolitics.py # GDELT + Ukraine frontline + air alerts
|
||||
│ │ ├── region_dossier.py # Right-click country/city intelligence
|
||||
│ │ ├── radio_intercept.py # Police scanner feeds + OpenMHZ
|
||||
│ │ ├── oracle_service.py # Prediction market oracle resolution
|
||||
│ │ ├── kiwisdr_fetcher.py # KiwiSDR receiver scraper
|
||||
│ │ ├── sentinel_search.py # Sentinel-2 STAC imagery search
|
||||
│ │ ├── shodan_connector.py # Shodan device search connector
|
||||
│ │ ├── sigint_bridge.py # APRS-IS TCP bridge
|
||||
│ │ ├── config.py # pydantic-settings configuration
|
||||
│ │ ├── network_utils.py # HTTP client with curl fallback
|
||||
│ │ ├── api_settings.py # API key management
|
||||
│ │ ├── news_feed_config.py # RSS feed config manager
|
||||
│ │ ├── fetchers/
|
||||
│ │ │ ├── _store.py # Thread-safe in-memory data store
|
||||
│ │ │ ├── flights.py # OpenSky, adsb.lol, GPS jamming, holding patterns
|
||||
│ │ │ ├── geo.py # AIS vessels, carriers, GDELT, fishing activity
|
||||
│ │ │ ├── satellites.py # CelesTrak TLE + SGP4 propagation
|
||||
│ │ │ ├── earth_observation.py # Quakes, fires, volcanoes, air quality, weather
|
||||
│ │ │ ├── infrastructure.py # Data centers, power plants, military bases
|
||||
│ │ │ ├── prediction_markets.py # Polymarket aggregation
|
||||
│ │ │ ├── trains.py # Amtrak + DigiTraffic European rail
|
||||
│ │ │ ├── sigint.py # SatNOGS, TinyGS, APRS, Meshtastic
|
||||
│ │ │ ├── plane_alert.py # Plane-Alert DB enrichment
|
||||
│ │ │ ├── meshtastic_map.py # Meshtastic MQTT + map node aggregation
|
||||
│ │ │ ├── military.py # Military aircraft classification
|
||||
│ │ │ ├── news.py # RSS intelligence feed aggregation
|
||||
│ │ │ ├── financial.py # Global markets data
|
||||
│ │ │ └── ukraine_alerts.py # Ukraine air raid alerts
|
||||
│ │ └── mesh/ # InfoNet / Wormhole protocol stack
|
||||
│ │ ├── mesh_protocol.py # Core mesh protocol + payload normalization
|
||||
│ │ ├── mesh_crypto.py # Ed25519, ECDSA, HKDF primitives
|
||||
│ │ ├── mesh_hashchain.py # Append-only hash chain
|
||||
│ │ ├── mesh_router.py # Multi-transport router (APRS, LoRa, Tor, clearnet)
|
||||
│ │ ├── mesh_dm_mls.py # MLS-like DM encryption
|
||||
│ │ ├── mesh_gate_mls.py # MLS-like gate (channel) encryption
|
||||
│ │ ├── mesh_rns.py # Reticulum Network Stack + Dandelion++ routing
|
||||
│ │ ├── mesh_reputation.py # Node reputation scoring
|
||||
│ │ ├── mesh_schema.py # Event payload validation
|
||||
│ │ ├── mesh_wormhole_identity.py # Wormhole identity management
|
||||
│ │ ├── mesh_protocol.py # Core mesh protocol + routing
|
||||
│ │ ├── mesh_crypto.py # Ed25519, X25519, AESGCM primitives
|
||||
│ │ ├── mesh_hashchain.py # Hash chain commitment system (~1,400 lines)
|
||||
│ │ ├── mesh_router.py # Multi-transport router (APRS, Meshtastic, WS)
|
||||
│ │ ├── mesh_wormhole_persona.py # Gate persona identity management
|
||||
│ │ ├── mesh_wormhole_dead_drop.py # Dead Drop token-based DM mailbox
|
||||
│ │ ├── mesh_wormhole_contacts.py # Contact exchange
|
||||
│ │ ├── mesh_wormhole_ratchet.py # Double-ratchet DM scaffolding
|
||||
│ │ ├── mesh_wormhole_gate_keys.py # Gate key management + rotation
|
||||
│ │ ├── mesh_wormhole_seal.py # Message sealing + unsealing
|
||||
│ │ ├── mesh_merkle.py # Merkle tree proofs for data commitment
|
||||
│ │ ├── mesh_reputation.py # Node reputation scoring
|
||||
│ │ ├── mesh_oracle.py # Oracle consensus protocol
|
||||
│ │ └── mesh_secure_storage.py # Secure credential storage
|
||||
│
|
||||
├── frontend/
|
||||
│ ├── src/
|
||||
│ │ ├── app/
|
||||
│ │ │ └── page.tsx # Main dashboard — state, polling, layout
|
||||
│ │ ├── components/
|
||||
│ │ │ ├── MaplibreViewer.tsx # Core map — all GeoJSON layers
|
||||
│ │ │ ├── InfonetTerminal/ # InfoNet mesh terminal UI
|
||||
│ │ │ ├── MeshChat.tsx # Mesh / Dead Drop chat panel
|
||||
│ │ │ ├── MeshTerminal.tsx # Draggable CLI terminal
|
||||
│ │ │ ├── NewsFeed.tsx # SIGINT feed + entity detail panels
|
||||
│ │ │ ├── PredictionsPanel.tsx # Prediction market panel
|
||||
│ │ │ ├── ShodanPanel.tsx # Shodan search panel
|
||||
│ │ │ ├── FilterPanel.tsx # Data filter controls
|
||||
│ │ │ ├── WorldviewLeftPanel.tsx # Data layer toggles (37+ layers)
|
||||
│ │ │ ├── WorldviewRightPanel.tsx # Search + filter sidebar
|
||||
│ │ │ ├── RadioInterceptPanel.tsx # Scanner-style radio panel
|
||||
│ │ │ ├── MarketsPanel.tsx # Global financial markets ticker
|
||||
│ │ │ ├── FindLocateBar.tsx # Search/locate bar
|
||||
│ │ │ └── map/ # Map sub-components, layers, icons, styles
|
||||
│ │ ├── hooks/ # useDataPolling, useDataStore, useGateSSE
|
||||
│ │ ├── mesh/ # Frontend mesh/DM/identity client code
|
||||
│ │ └── lib/ # Utilities, desktop bridge, API client
|
||||
│ │ └── components/
|
||||
│ │ ├── MaplibreViewer.tsx # Core map — all GeoJSON layers
|
||||
│ │ ├── MeshChat.tsx # InfoNet / Mesh / Dead Drop chat panel
|
||||
│ │ ├── MeshTerminal.tsx # Draggable CLI terminal
|
||||
│ │ ├── NewsFeed.tsx # SIGINT feed + entity detail panels
|
||||
│ │ ├── WorldviewLeftPanel.tsx # Data layer toggles (35+ layers)
|
||||
│ │ ├── WorldviewRightPanel.tsx # Search + filter sidebar
|
||||
│ │ ├── AdvancedFilterModal.tsx # Airport/country/owner filtering
|
||||
│ │ ├── MapLegend.tsx # Dynamic legend with all icons
|
||||
│ │ ├── MarketsPanel.tsx # Global financial markets ticker
|
||||
│ │ ├── RadioInterceptPanel.tsx # Scanner-style radio panel
|
||||
│ │ ├── FindLocateBar.tsx # Search/locate bar
|
||||
│ │ ├── ChangelogModal.tsx # Version changelog popup (auto-shows on upgrade)
|
||||
│ │ ├── SettingsPanel.tsx # API Keys + News Feed + Shodan config
|
||||
│ │ ├── ScaleBar.tsx # Map scale indicator
|
||||
│ │ └── ErrorBoundary.tsx # Crash recovery wrapper
|
||||
│ └── package.json
|
||||
│
|
||||
├── desktop-shell/ # Tauri (Rust) desktop wrapper
|
||||
├── helm/chart/ # Kubernetes Helm chart
|
||||
├── docker-compose.yml # Main Docker Compose config
|
||||
├── start.sh / start.bat # Local launcher scripts
|
||||
└── compose.sh # Podman/Docker auto-detect wrapper
|
||||
```
|
||||
|
||||
---
|
||||
@@ -745,18 +889,44 @@ Shadowbroker/
|
||||
### Backend (`backend/.env`)
|
||||
|
||||
```env
|
||||
# Required
|
||||
AIS_API_KEY=your_aisstream_key # Maritime vessel tracking (aisstream.io)
|
||||
# Required for airplane telemetry (NEW in v0.9.7 — startup env check flags these as critical)
|
||||
# Free registration: https://opensky-network.org/index.php?option=com_users&view=registration
|
||||
OPENSKY_CLIENT_ID=your_opensky_client_id # OAuth2 — global flight state vectors
|
||||
OPENSKY_CLIENT_SECRET=your_opensky_secret # OAuth2 — paired with Client ID above
|
||||
|
||||
# Optional (enhances data quality)
|
||||
OPENSKY_CLIENT_ID=your_opensky_client_id # OAuth2 — higher rate limits for flight data
|
||||
OPENSKY_CLIENT_SECRET=your_opensky_secret # OAuth2 — paired with Client ID above
|
||||
AIS_API_KEY=your_aisstream_key # Maritime vessel tracking (aisstream.io) — ships layer empty without it
|
||||
LTA_ACCOUNT_KEY=your_lta_key # Singapore CCTV cameras
|
||||
SHODAN_API_KEY=your_shodan_key # Shodan device search overlay
|
||||
SH_CLIENT_ID=your_sentinel_hub_id # Copernicus CDSE Sentinel Hub imagery
|
||||
SH_CLIENT_SECRET=your_sentinel_hub_secret # Paired with Sentinel Hub Client ID
|
||||
MESH_SAR_EARTHDATA_USER= # NASA Earthdata user (SAR Mode B — OPERA products)
|
||||
MESH_SAR_EARTHDATA_TOKEN= # NASA Earthdata token (paired with user above)
|
||||
MESH_SAR_COPERNICUS_USER= # Copernicus Data Space user (SAR Mode B — EGMS / EMS)
|
||||
MESH_SAR_COPERNICUS_TOKEN= # Copernicus token (paired with user above)
|
||||
OPENCLAW_ACCESS_TIER=restricted # OpenClaw agent tier: "restricted" (read-only) or "full"
|
||||
|
||||
# Private-lane privacy-core pinning (required when Arti or RNS is enabled)
|
||||
PRIVACY_CORE_MIN_VERSION=0.1.0
|
||||
PRIVACY_CORE_ALLOWED_SHA256=your_privacy_core_sha256
|
||||
# Optional override if you load a non-default shared library path
|
||||
PRIVACY_CORE_LIB=
|
||||
```
|
||||
|
||||
When `MESH_ARTI_ENABLED=true` or `MESH_RNS_ENABLED=true`, backend startup now fails closed unless the loaded `privacy-core` artifact reports a parseable version at or above `PRIVACY_CORE_MIN_VERSION` and matches one of the hashes in `PRIVACY_CORE_ALLOWED_SHA256`.
|
||||
|
||||
Generate the hash from the artifact you intend to ship:
|
||||
|
||||
```powershell
|
||||
Get-FileHash .\privacy-core\target\release\privacy_core.dll -Algorithm SHA256
|
||||
```
|
||||
|
||||
```bash
|
||||
sha256sum ./privacy-core/target/release/libprivacy_core.so
|
||||
```
|
||||
|
||||
Then confirm authenticated `GET /api/wormhole/status` or `GET /api/settings/wormhole-status` shows the same `privacy_core.version`, `privacy_core.library_path`, and `privacy_core.library_sha256`.
|
||||
|
||||
### Frontend
|
||||
|
||||
| Variable | Where to set | Purpose |
|
||||
@@ -773,6 +943,7 @@ ShadowBroker is built in the open. These people shipped real code:
|
||||
|
||||
| Who | What | PR |
|
||||
|-----|------|----|
|
||||
| [@Alienmajik](https://github.com/Alienmajik) | Raspberry Pi 5 support — ARM64 packaging, headless deployment notes, runtime tuning for Pi-class hardware | — |
|
||||
| [@wa1id](https://github.com/wa1id) | CCTV ingestion fix — threaded SQLite, persistent DB, startup hydration, cluster clickability | #92 |
|
||||
| [@AlborzNazari](https://github.com/AlborzNazari) | Spain DGT + Madrid CCTV sources, STIX 2.1 threat intel export | #91 |
|
||||
| [@adust09](https://github.com/adust09) | Power plants layer, East Asia intel coverage (JSDF bases, ICAO enrichment, Taiwan news, military classification) | #71, #72, #76, #77, #87 |
|
||||
|
||||
+198
-4
@@ -15,11 +15,13 @@ AIS_API_KEY= # https://aisstream.io/ — free tier WebSocket key
|
||||
# CORS_ORIGINS=http://192.168.1.50:3000,https://my-domain.com
|
||||
|
||||
# Admin key — protects sensitive endpoints (API key management, system update).
|
||||
# If unset, endpoints are only accessible from localhost unless ALLOW_INSECURE_ADMIN=true.
|
||||
# If unset, loopback/localhost requests still work for local single-host dev.
|
||||
# Remote/non-loopback admin access requires ADMIN_KEY, or ALLOW_INSECURE_ADMIN=true in debug-only setups.
|
||||
# Set this in production and enter the same key in Settings → Admin Key.
|
||||
# ADMIN_KEY=your-secret-admin-key-here
|
||||
|
||||
# Allow insecure admin access without ADMIN_KEY (local dev only).
|
||||
# Allow insecure admin access without ADMIN_KEY (local dev only, beyond loopback).
|
||||
# Requires MESH_DEBUG_MODE=true; do not enable this for ordinary use.
|
||||
# ALLOW_INSECURE_ADMIN=false
|
||||
|
||||
# User-Agent for Nominatim geocoding requests (per OSM usage policy).
|
||||
@@ -35,19 +37,99 @@ AIS_API_KEY= # https://aisstream.io/ — free tier WebSocket key
|
||||
# Ukraine air raid alerts from alerts.in.ua — free token from https://alerts.in.ua/
|
||||
# ALERTS_IN_UA_TOKEN=
|
||||
|
||||
# Optional NUFORC UAP sighting map enrichment via Mapbox Tilequery.
|
||||
# Leave blank to skip this optional enrichment.
|
||||
# NUFORC_MAPBOX_TOKEN=
|
||||
|
||||
# Google Earth Engine service account for VIIRS change detection (optional).
|
||||
# Download JSON key from https://console.cloud.google.com/iam-admin/serviceaccounts
|
||||
# pip install earthengine-api
|
||||
# GEE_SERVICE_ACCOUNT_KEY=
|
||||
|
||||
# ── Meshtastic MQTT Bridge ─────────────────────────────────────
|
||||
# Disabled by default to respect the public Meshtastic broker.
|
||||
# When enabled, subscribes to US region only. Add more regions via MESH_MQTT_EXTRA_ROOTS.
|
||||
# MESH_MQTT_ENABLED=false
|
||||
# MESH_MQTT_EXTRA_ROOTS=EU_868,ANZ # comma-separated additional region roots
|
||||
# MESH_MQTT_INCLUDE_DEFAULT_ROOTS=true
|
||||
# MESH_MQTT_BROKER=mqtt.meshtastic.org
|
||||
# MESH_MQTT_PORT=1883
|
||||
# MESH_MQTT_USER=meshdev
|
||||
# MESH_MQTT_PASS=large4cats
|
||||
|
||||
# Optional Meshtastic node ID (e.g. "!abcd1234"). When set, included in the
|
||||
# User-Agent sent to meshtastic.liamcottle.net so the upstream service operator
|
||||
# can identify per-install traffic instead of aggregated "ShadowBroker" hits.
|
||||
# Leave blank to send a generic UA with the project contact email only.
|
||||
# MESHTASTIC_OPERATOR_CALLSIGN=
|
||||
# MESH_MQTT_PSK= # hex-encoded, empty = default LongFast key
|
||||
|
||||
# ── Mesh / Reticulum (RNS) ─────────────────────────────────────
|
||||
# Full-node / participant-node posture for public Infonet sync.
|
||||
# MESH_NODE_MODE=participant # participant | relay | perimeter
|
||||
# Legacy compatibility sunset toggles. Default posture is to block these.
|
||||
# Legacy 16-hex node-id binding no longer has a boolean escape hatch; use a
|
||||
# dated migration override only when you intentionally need older peers during
|
||||
# migration before the hard removal target in v0.10.0 / 2026-06-01.
|
||||
# MESH_BLOCK_LEGACY_NODE_ID_COMPAT=true
|
||||
# MESH_ALLOW_LEGACY_NODE_ID_COMPAT_UNTIL=2026-05-15
|
||||
# MESH_BLOCK_LEGACY_AGENT_ID_LOOKUP=true
|
||||
# Temporary DM invite migration escape hatch. Default posture blocks importing
|
||||
# legacy/compat v1/v2 DM invites; use a dated override only while retiring
|
||||
# older exports and ask senders to re-export a current signed invite.
|
||||
# MESH_ALLOW_COMPAT_DM_INVITE_IMPORT_UNTIL=2026-05-15
|
||||
# Temporary legacy GET DM poll/count escape hatch. Default posture requires the
|
||||
# signed mailbox-claim POST APIs; only use this dated override while retiring
|
||||
# older clients that still call GET poll/count directly.
|
||||
# MESH_ALLOW_LEGACY_DM_GET_UNTIL=2026-05-15
|
||||
# Temporary raw dm1 compose/decrypt escape hatch. Default posture expects MLS
|
||||
# DM bootstrap on supported peers; only use this dated override while retiring
|
||||
# older clients that still need the raw dm1 helper path.
|
||||
# MESH_ALLOW_LEGACY_DM1_UNTIL=2026-05-15
|
||||
# Temporary legacy dm_message signature escape hatch. Default posture requires
|
||||
# the full modern signed payload; only enable this with a dated migration
|
||||
# override while older senders are being retired.
|
||||
# MESH_ALLOW_LEGACY_DM_SIGNATURE_COMPAT_UNTIL=2026-05-15
|
||||
# Rotate voter-blinding salts so new reputation events stop reusing one
|
||||
# forever-stable blinded ID. Keep grace >= rotation cadence so older votes
|
||||
# remain matchable while they age out of the ledger.
|
||||
# MESH_VOTER_BLIND_SALT_ROTATE_DAYS=30
|
||||
# MESH_VOTER_BLIND_SALT_GRACE_DAYS=30
|
||||
# Deprecated legacy env vars kept only for backward config compatibility.
|
||||
# Ordinary shipped gate flows keep MLS decrypt local; service-side decrypt is
|
||||
# reserved for explicit recovery reads.
|
||||
# MESH_GATE_BACKEND_DECRYPT_COMPAT=false
|
||||
# MESH_GATE_BACKEND_DECRYPT_COMPAT_ACKNOWLEDGE=false
|
||||
# Deprecated legacy env vars kept only for backward config compatibility.
|
||||
# Ordinary shipped gate flows keep plaintext compose/post local and only submit
|
||||
# encrypted envelopes to the backend for sign/post.
|
||||
# MESH_GATE_BACKEND_PLAINTEXT_COMPAT=false
|
||||
# MESH_GATE_BACKEND_PLAINTEXT_COMPAT_ACKNOWLEDGE=false
|
||||
# Legacy runtime switches for recovery envelopes. Per-gate envelope_policy is
|
||||
# the source of truth; leave these at the default unless testing old behavior.
|
||||
# MESH_GATE_RECOVERY_ENVELOPE_ENABLE=true
|
||||
# MESH_GATE_RECOVERY_ENVELOPE_ENABLE_ACKNOWLEDGE=true
|
||||
# Optional operator-only recovery tradeoff. Leave off for the default posture:
|
||||
# ordinary gate reads keep plaintext local/in-memory unless you explicitly use
|
||||
# the recovery-envelope path.
|
||||
# MESH_GATE_PLAINTEXT_PERSIST=false
|
||||
# MESH_GATE_PLAINTEXT_PERSIST_ACKNOWLEDGE=false
|
||||
# Legacy Phase-1 gate envelope fallback is now explicit and time-bounded per
|
||||
# gate. This only controls the default expiry window when you deliberately
|
||||
# re-enable that migration path for older stored envelopes.
|
||||
# MESH_GATE_LEGACY_ENVELOPE_FALLBACK_MAX_DAYS=30
|
||||
# Feature-flagged multiplexed gate session stream. Stream-first room ownership
|
||||
# is implemented; keep off until you want that rollout enabled in your env.
|
||||
# MESH_GATE_SESSION_STREAM_ENABLED=false
|
||||
# MESH_GATE_SESSION_STREAM_HEARTBEAT_S=20
|
||||
# MESH_GATE_SESSION_STREAM_BATCH_MS=1500
|
||||
# MESH_GATE_SESSION_STREAM_MAX_GATES=16
|
||||
# MESH_BOOTSTRAP_DISABLED=false
|
||||
# MESH_BOOTSTRAP_MANIFEST_PATH=data/bootstrap_peers.json
|
||||
# MESH_BOOTSTRAP_SIGNER_PUBLIC_KEY=
|
||||
# MESH_RELAY_PEERS= # comma-separated operator-trusted sync/push peers
|
||||
# MESH_PEER_PUSH_SECRET=Mv63UvLfwqOEVWeRBXjA8MtFl2nEkkhUlLYVHiX1Zzo # transport auth for mesh peer push (default works out of the box)
|
||||
# MESH_DEFAULT_SYNC_PEERS=https://node.shadowbroker.info # bundled pull-only public seed for fresh installs
|
||||
# MESH_RELAY_PEERS= # comma-separated operator-trusted sync/push peers (empty by default)
|
||||
# MESH_PEER_PUSH_SECRET= # REQUIRED when relay/RNS peers are configured (min 16 chars, generate with: python -c "import secrets; print(secrets.token_urlsafe(32))")
|
||||
# MESH_SYNC_INTERVAL_S=300
|
||||
# MESH_SYNC_FAILURE_BACKOFF_S=60
|
||||
#
|
||||
@@ -90,8 +172,54 @@ AIS_API_KEY= # https://aisstream.io/ — free tier WebSocket key
|
||||
# MESH_VERIFY_INTERVAL_S=600
|
||||
# MESH_VERIFY_SIGNATURES=false
|
||||
|
||||
# ── Secure Storage (non-Windows) ───────────────────────────────
|
||||
# Required on Linux/Docker to protect Wormhole key material at rest.
|
||||
# Generate with: python -c "import secrets; print(secrets.token_urlsafe(32))"
|
||||
# Also supports Docker secrets via MESH_SECURE_STORAGE_SECRET_FILE.
|
||||
# MESH_SECURE_STORAGE_SECRET=
|
||||
#
|
||||
# To rotate the storage secret, stop the backend and run:
|
||||
# 1. Dry-run first (validates without writing):
|
||||
# MESH_OLD_STORAGE_SECRET=<current> MESH_NEW_STORAGE_SECRET=<new> \
|
||||
# python -m scripts.rotate_secure_storage_secret --dry-run
|
||||
# 2. Rotate (creates .bak backups, then rewraps envelopes):
|
||||
# MESH_OLD_STORAGE_SECRET=<current> MESH_NEW_STORAGE_SECRET=<new> \
|
||||
# python -m scripts.rotate_secure_storage_secret
|
||||
# 3. Update MESH_SECURE_STORAGE_SECRET to the new value and restart.
|
||||
#
|
||||
# If rotation is interrupted, .bak files preserve the old envelopes.
|
||||
# To repair corrupted secure-json payloads (not key envelopes), use:
|
||||
# python -m scripts.repair_wormhole_secure_storage
|
||||
|
||||
# ── Mesh DM Relay ──────────────────────────────────────────────
|
||||
# MESH_DM_TOKEN_PEPPER=change-me
|
||||
# Keep DM relay metadata retention explicit and bounded.
|
||||
# MESH_DM_KEY_TTL_DAYS=30
|
||||
# MESH_DM_PREKEY_LOOKUP_ALIAS_TTL_DAYS=14
|
||||
# MESH_DM_WITNESS_TTL_DAYS=14
|
||||
# MESH_DM_BINDING_TTL_DAYS=3
|
||||
# Optional operational bridge for externally sourced root witnesses / transparency.
|
||||
# Relative paths resolve from the backend directory.
|
||||
# MESH_DM_ROOT_EXTERNAL_WITNESS_IMPORT_PATH=data/root_witness_import.json
|
||||
# Local single-host dev example after bootstrapping an external witness locally:
|
||||
# MESH_DM_ROOT_EXTERNAL_WITNESS_IMPORT_PATH=../ops/root_witness_receipt_import.json
|
||||
# Optional URI bridge for externally retrieved root witness packages.
|
||||
# MESH_DM_ROOT_EXTERNAL_WITNESS_IMPORT_URI=file:///absolute/path/root_witness_import.json
|
||||
# Maximum acceptable age for external witness packages before strong DM trust fails closed.
|
||||
# MESH_DM_ROOT_EXTERNAL_WITNESS_MAX_AGE_S=3600
|
||||
# Warning threshold for external witness packages before fail-closed max age.
|
||||
# MESH_DM_ROOT_EXTERNAL_WITNESS_WARN_AGE_S=2700
|
||||
# MESH_DM_ROOT_TRANSPARENCY_LEDGER_EXPORT_PATH=data/root_transparency_ledger.json
|
||||
# Local single-host dev example after publishing the transparency ledger locally:
|
||||
# MESH_DM_ROOT_TRANSPARENCY_LEDGER_EXPORT_PATH=../ops/root_transparency_ledger.json
|
||||
# Optional URI used to read back and verify a published transparency ledger.
|
||||
# MESH_DM_ROOT_TRANSPARENCY_LEDGER_READBACK_URI=file:///absolute/path/root_transparency_ledger.json
|
||||
# Local single-host dev readback example:
|
||||
# MESH_DM_ROOT_TRANSPARENCY_LEDGER_READBACK_URI=../ops/root_transparency_ledger.json
|
||||
# Maximum acceptable age for external transparency ledgers before strong DM trust fails closed.
|
||||
# MESH_DM_ROOT_TRANSPARENCY_LEDGER_MAX_AGE_S=3600
|
||||
# Warning threshold for external transparency ledgers before fail-closed max age.
|
||||
# MESH_DM_ROOT_TRANSPARENCY_LEDGER_WARN_AGE_S=2700
|
||||
|
||||
# ── Self Update ────────────────────────────────────────────────
|
||||
# MESH_UPDATE_SHA256=
|
||||
@@ -103,3 +231,69 @@ AIS_API_KEY= # https://aisstream.io/ — free tier WebSocket key
|
||||
# WORMHOLE_TRANSPORT=direct
|
||||
# WORMHOLE_SOCKS_PROXY=127.0.0.1:9050
|
||||
# WORMHOLE_SOCKS_DNS=true
|
||||
# Optional override for the loaded Rust privacy-core shared library. Leave
|
||||
# unset for the default repo search order. When you override this, verify the
|
||||
# authenticated wormhole status surfaces show the expected version, absolute
|
||||
# library path, and SHA-256 for the loaded artifact before making stronger
|
||||
# privacy claims about the deployment.
|
||||
# PRIVACY_CORE_LIB=
|
||||
# Minimum privacy-core version accepted when hidden/private carriers are
|
||||
# enabled. Private-lane startup fails closed if the loaded artifact is
|
||||
# missing, reports no parseable version, or falls below this minimum.
|
||||
# PRIVACY_CORE_MIN_VERSION=0.1.0
|
||||
# Comma-separated SHA-256 allowlist for the exact privacy-core artifact(s)
|
||||
# your deployment is allowed to load. Required for Arti/RNS private-lane
|
||||
# startup. Generate with:
|
||||
# PowerShell: Get-FileHash .\privacy-core\target\release\privacy_core.dll -Algorithm SHA256
|
||||
# macOS/Linux: sha256sum ./privacy-core/target/release/libprivacy_core.so
|
||||
# PRIVACY_CORE_ALLOWED_SHA256=
|
||||
# Optional structured release attestation artifact for the Sprint 8 release gate.
|
||||
# Relative paths resolve from the backend directory. When set explicitly, a
|
||||
# missing or unreadable file fails the DM relay security-suite criterion closed.
|
||||
# CI/release tooling can generate this automatically via:
|
||||
# uv run python scripts/release_helper.py write-attestation ...
|
||||
# MESH_RELEASE_ATTESTATION_PATH=data/release_attestation.json
|
||||
# Operator-only Sprint 8 release attestation. Set this only when the DM relay
|
||||
# security suite has been run and passed for the current release candidate.
|
||||
# File-based release attestation takes precedence when present.
|
||||
# MESH_RELEASE_DM_RELAY_SECURITY_SUITE_GREEN=false
|
||||
|
||||
# ── OpenClaw Agent ─────────────────────────────────────────────
|
||||
# HMAC shared secret for remote OpenClaw agent authentication.
|
||||
# Auto-generated via the Connect OpenClaw modal — do not set manually.
|
||||
# OPENCLAW_HMAC_SECRET=
|
||||
# Access tier: "restricted" (read-only) or "full" (read+write+inject)
|
||||
# OPENCLAW_ACCESS_TIER=restricted
|
||||
|
||||
# ── SAR (Synthetic Aperture Radar) Layer ───────────────────────
|
||||
# Mode A — Free catalog metadata from Alaska Satellite Facility (ASF Search).
|
||||
# No account, no downloads. Default-on. Set to false to disable entirely.
|
||||
# MESH_SAR_CATALOG_ENABLED=true
|
||||
#
|
||||
# Mode B — Free pre-processed ground-change anomalies (deformation, flood,
|
||||
# damage assessments) from NASA OPERA, Copernicus EGMS, GFM, EMS, UNOSAT.
|
||||
# Two-step opt-in: BOTH of the following must be set together.
|
||||
# 1. MESH_SAR_PRODUCTS_FETCH=allow
|
||||
# 2. MESH_SAR_PRODUCTS_FETCH_ACKNOWLEDGE=true
|
||||
# Either flag alone keeps Mode B disabled. You can also enable this from
|
||||
# the Settings → SAR panel inside the app.
|
||||
# MESH_SAR_PRODUCTS_FETCH=block
|
||||
# MESH_SAR_PRODUCTS_FETCH_ACKNOWLEDGE=false
|
||||
#
|
||||
# NASA Earthdata Login (free, ~1 minute signup) — required for OPERA products.
|
||||
# Sign up: https://urs.earthdata.nasa.gov/users/new
|
||||
# Generate token: https://urs.earthdata.nasa.gov/profile → "Generate Token"
|
||||
# MESH_SAR_EARTHDATA_USER=
|
||||
# MESH_SAR_EARTHDATA_TOKEN=
|
||||
#
|
||||
# Copernicus Data Space (free, ~1 minute signup) — required for EGMS / EMS.
|
||||
# Sign up: https://dataspace.copernicus.eu/
|
||||
# MESH_SAR_COPERNICUS_USER=
|
||||
# MESH_SAR_COPERNICUS_TOKEN=
|
||||
#
|
||||
# Allow OpenClaw agents to read and act on the SAR layer (default true).
|
||||
# MESH_SAR_OPENCLAW_ENABLED=true
|
||||
#
|
||||
# Require private-tier transport (Tor / RNS) before signing and broadcasting
|
||||
# SAR anomalies to the mesh. Default true — disable only for testnet/local use.
|
||||
# MESH_SAR_REQUIRE_PRIVATE_TIER=true
|
||||
|
||||
+10
-2
@@ -1,10 +1,17 @@
|
||||
# ---- Stage 1: Compile privacy-core Rust library ----
|
||||
FROM rust:1.88-slim-bookworm AS rust-builder
|
||||
FROM --platform=$BUILDPLATFORM rust:1.88-slim-bookworm AS rust-builder
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
pkg-config libssl-dev \
|
||||
ca-certificates \
|
||||
git \
|
||||
pkg-config \
|
||||
libssl-dev \
|
||||
build-essential \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ENV CARGO_NET_GIT_FETCH_WITH_CLI=true
|
||||
ENV CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse
|
||||
|
||||
COPY privacy-core /build/privacy-core
|
||||
WORKDIR /build/privacy-core
|
||||
RUN cargo build --release --lib \
|
||||
@@ -17,6 +24,7 @@ WORKDIR /app
|
||||
|
||||
# Install Node.js (for AIS WebSocket proxy) and curl (for network fallback)
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
ca-certificates \
|
||||
curl \
|
||||
&& curl -fsSL https://deb.nodesource.com/setup_20.x | bash - \
|
||||
&& apt-get install -y --no-install-recommends nodejs \
|
||||
|
||||
+1369
File diff suppressed because it is too large
Load Diff
File diff suppressed because one or more lines are too long
@@ -1047,14 +1047,6 @@
|
||||
"lat": 37.47,
|
||||
"lng": 69.381
|
||||
},
|
||||
{
|
||||
"name": "Berth rights and right to station its troops in Qatar",
|
||||
"country": "India",
|
||||
"operator": "India",
|
||||
"branch": "army",
|
||||
"lat": 25.308,
|
||||
"lng": 51.209
|
||||
},
|
||||
{
|
||||
"name": "Ahmad al-Jaber Air Base",
|
||||
"country": "Italy",
|
||||
|
||||
@@ -73567,6 +73567,14 @@
|
||||
"tags": "Air Ambo, Medical Evac, Saving Lives",
|
||||
"link": "https://www.airmethods.com/"
|
||||
},
|
||||
"ABD9B5": {
|
||||
"registration": "N8628",
|
||||
"operator": "Elon Musk",
|
||||
"ac_type": "Gulfstream G800",
|
||||
"category": "Don't you know who I am?",
|
||||
"tags": "Elon Musk, SpaceX, DOGE, Toys4Billionaires",
|
||||
"link": "https://en.wikipedia.org/wiki/Elon_Musk"
|
||||
},
|
||||
"A835AF": {
|
||||
"registration": "N628TS",
|
||||
"operator": "Falcon Landing LLC",
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -0,0 +1,11 @@
|
||||
"""gate_sse.py — DEPRECATED. Gate SSE broadcast removed in S3A.
|
||||
|
||||
Gate activity is no longer broadcast via SSE. The frontend uses the
|
||||
authenticated poll loop for gate message refresh.
|
||||
|
||||
Stubs are kept so any late imports do not crash at startup.
|
||||
"""
|
||||
|
||||
|
||||
def _broadcast_gate_events(gate_id: str, events: list[dict]) -> None: # noqa: ARG001
|
||||
"""No-op — gate SSE broadcast removed."""
|
||||
@@ -0,0 +1,4 @@
|
||||
from slowapi import Limiter
|
||||
from slowapi.util import get_remote_address
|
||||
|
||||
limiter = Limiter(key_func=get_remote_address)
|
||||
+4568
-1774
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,266 @@
|
||||
"""node_state.py — Shared mutable node runtime state and node helper functions.
|
||||
|
||||
Extracted from main.py so that background worker functions and route handlers
|
||||
can reference the same state objects without importing the full application.
|
||||
|
||||
_NODE_SYNC_STATE is a reassignable value (SyncWorkerState is replaced whole,
|
||||
not mutated), so callers must use get_sync_state() / set_sync_state() instead
|
||||
of binding to the name at import time.
|
||||
|
||||
All other _NODE_* objects are mutable containers (Lock, Event, dict) whose
|
||||
identity never changes; importing them directly by name is safe.
|
||||
"""
|
||||
|
||||
import threading
|
||||
import time
|
||||
from typing import Any
|
||||
|
||||
from services.mesh.mesh_infonet_sync_support import SyncWorkerState
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Runtime state objects
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_NODE_RUNTIME_LOCK = threading.RLock()
|
||||
_NODE_SYNC_STOP = threading.Event()
|
||||
_NODE_SYNC_STATE = SyncWorkerState()
|
||||
_NODE_BOOTSTRAP_STATE: dict[str, Any] = {
|
||||
"node_mode": "participant",
|
||||
"manifest_loaded": False,
|
||||
"manifest_signer_id": "",
|
||||
"manifest_valid_until": 0,
|
||||
"bootstrap_peer_count": 0,
|
||||
"sync_peer_count": 0,
|
||||
"push_peer_count": 0,
|
||||
"operator_peer_count": 0,
|
||||
"last_bootstrap_error": "",
|
||||
}
|
||||
_NODE_PUSH_STATE: dict[str, Any] = {
|
||||
"last_event_id": "",
|
||||
"last_push_ok_at": 0,
|
||||
"last_push_error": "",
|
||||
"last_results": [],
|
||||
}
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Getter / setter for _NODE_SYNC_STATE
|
||||
#
|
||||
# Use these instead of globals()["_NODE_SYNC_STATE"] = ... in any module that
|
||||
# imports this package. The setter modifies *this* module's namespace so
|
||||
# subsequent get_sync_state() calls see the new value regardless of which
|
||||
# module calls set_sync_state().
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def get_sync_state() -> SyncWorkerState:
|
||||
return _NODE_SYNC_STATE
|
||||
|
||||
|
||||
def set_sync_state(state: SyncWorkerState) -> None:
|
||||
global _NODE_SYNC_STATE
|
||||
_NODE_SYNC_STATE = state
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Node helper functions
|
||||
#
|
||||
# These were in main.py but are needed by both route handlers and background
|
||||
# workers, so they live here to avoid circular imports.
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _current_node_mode() -> str:
|
||||
from services.config import get_settings
|
||||
mode = str(get_settings().MESH_NODE_MODE or "participant").strip().lower()
|
||||
if mode not in {"participant", "relay", "perimeter"}:
|
||||
return "participant"
|
||||
return mode
|
||||
|
||||
|
||||
def _node_runtime_supported() -> bool:
|
||||
return _current_node_mode() in {"participant", "relay"}
|
||||
|
||||
|
||||
def _node_activation_enabled() -> bool:
|
||||
from services.node_settings import read_node_settings
|
||||
|
||||
try:
|
||||
settings = read_node_settings()
|
||||
except Exception:
|
||||
return False
|
||||
return bool(settings.get("enabled", False))
|
||||
|
||||
|
||||
def _participant_node_enabled() -> bool:
|
||||
return _node_runtime_supported() and _node_activation_enabled()
|
||||
|
||||
|
||||
def _node_runtime_snapshot() -> dict[str, Any]:
|
||||
with _NODE_RUNTIME_LOCK:
|
||||
return {
|
||||
"node_mode": _NODE_BOOTSTRAP_STATE.get("node_mode", "participant"),
|
||||
"node_enabled": _participant_node_enabled(),
|
||||
"bootstrap": dict(_NODE_BOOTSTRAP_STATE),
|
||||
"sync_runtime": get_sync_state().to_dict(),
|
||||
"push_runtime": dict(_NODE_PUSH_STATE),
|
||||
}
|
||||
|
||||
|
||||
def _set_node_sync_disabled_state(*, current_head: str = "") -> SyncWorkerState:
|
||||
return SyncWorkerState(
|
||||
current_head=str(current_head or ""),
|
||||
last_outcome="disabled",
|
||||
)
|
||||
|
||||
|
||||
def _set_participant_node_enabled(enabled: bool) -> dict[str, Any]:
|
||||
from services.mesh.mesh_hashchain import infonet
|
||||
from services.node_settings import write_node_settings
|
||||
|
||||
settings = write_node_settings(enabled=bool(enabled))
|
||||
current_head = str(infonet.head_hash or "")
|
||||
with _NODE_RUNTIME_LOCK:
|
||||
_NODE_BOOTSTRAP_STATE["node_mode"] = _current_node_mode()
|
||||
set_sync_state(
|
||||
SyncWorkerState(current_head=current_head)
|
||||
if bool(enabled) and _node_runtime_supported()
|
||||
else _set_node_sync_disabled_state(current_head=current_head)
|
||||
)
|
||||
return {
|
||||
**settings,
|
||||
"node_mode": _current_node_mode(),
|
||||
"node_enabled": _participant_node_enabled(),
|
||||
}
|
||||
|
||||
|
||||
def _refresh_node_peer_store(*, now: float | None = None) -> dict[str, Any]:
|
||||
from services.config import get_settings
|
||||
from services.mesh.mesh_bootstrap_manifest import load_bootstrap_manifest_from_settings
|
||||
from services.mesh.mesh_peer_store import (
|
||||
DEFAULT_PEER_STORE_PATH,
|
||||
PeerStore,
|
||||
make_bootstrap_peer_record,
|
||||
make_push_peer_record,
|
||||
make_sync_peer_record,
|
||||
)
|
||||
from services.mesh.mesh_router import (
|
||||
configured_relay_peer_urls,
|
||||
parse_configured_relay_peers,
|
||||
peer_transport_kind,
|
||||
)
|
||||
|
||||
timestamp = int(now if now is not None else time.time())
|
||||
mode = _current_node_mode()
|
||||
store = PeerStore(DEFAULT_PEER_STORE_PATH)
|
||||
try:
|
||||
store.load()
|
||||
except Exception:
|
||||
store = PeerStore(DEFAULT_PEER_STORE_PATH)
|
||||
|
||||
operator_peers = configured_relay_peer_urls()
|
||||
default_sync_peers = parse_configured_relay_peers(
|
||||
str(getattr(get_settings(), "MESH_DEFAULT_SYNC_PEERS", "") or "")
|
||||
)
|
||||
for peer_url in operator_peers:
|
||||
transport = peer_transport_kind(peer_url)
|
||||
if not transport:
|
||||
continue
|
||||
store.upsert(
|
||||
make_sync_peer_record(
|
||||
peer_url=peer_url,
|
||||
transport=transport,
|
||||
role="relay",
|
||||
source="operator",
|
||||
now=timestamp,
|
||||
)
|
||||
)
|
||||
store.upsert(
|
||||
make_push_peer_record(
|
||||
peer_url=peer_url,
|
||||
transport=transport,
|
||||
role="relay",
|
||||
source="operator",
|
||||
now=timestamp,
|
||||
)
|
||||
)
|
||||
|
||||
operator_peer_set = set(operator_peers)
|
||||
for peer_url in default_sync_peers:
|
||||
if peer_url in operator_peer_set:
|
||||
continue
|
||||
transport = peer_transport_kind(peer_url)
|
||||
if not transport:
|
||||
continue
|
||||
store.upsert(
|
||||
make_bootstrap_peer_record(
|
||||
peer_url=peer_url,
|
||||
transport=transport,
|
||||
role="seed",
|
||||
label="ShadowBroker default seed",
|
||||
signer_id="shadowbroker-default",
|
||||
now=timestamp,
|
||||
)
|
||||
)
|
||||
store.upsert(
|
||||
make_sync_peer_record(
|
||||
peer_url=peer_url,
|
||||
transport=transport,
|
||||
role="seed",
|
||||
source="bundle",
|
||||
label="ShadowBroker default seed",
|
||||
signer_id="shadowbroker-default",
|
||||
now=timestamp,
|
||||
)
|
||||
)
|
||||
|
||||
manifest = None
|
||||
bootstrap_error = ""
|
||||
try:
|
||||
manifest = load_bootstrap_manifest_from_settings(now=timestamp)
|
||||
except Exception as exc:
|
||||
bootstrap_error = str(exc or "").strip()
|
||||
|
||||
if manifest is not None:
|
||||
for peer in manifest.peers:
|
||||
store.upsert(
|
||||
make_bootstrap_peer_record(
|
||||
peer_url=peer.peer_url,
|
||||
transport=peer.transport,
|
||||
role=peer.role,
|
||||
label=peer.label,
|
||||
signer_id=manifest.signer_id,
|
||||
now=timestamp,
|
||||
)
|
||||
)
|
||||
store.upsert(
|
||||
make_sync_peer_record(
|
||||
peer_url=peer.peer_url,
|
||||
transport=peer.transport,
|
||||
role=peer.role,
|
||||
source="bootstrap_promoted",
|
||||
label=peer.label,
|
||||
signer_id=manifest.signer_id,
|
||||
now=timestamp,
|
||||
)
|
||||
)
|
||||
|
||||
store.save()
|
||||
snapshot = {
|
||||
"node_mode": mode,
|
||||
"manifest_loaded": manifest is not None,
|
||||
"manifest_signer_id": manifest.signer_id if manifest is not None else "",
|
||||
"manifest_valid_until": int(manifest.valid_until or 0) if manifest is not None else 0,
|
||||
"bootstrap_peer_count": len(store.records_for_bucket("bootstrap")),
|
||||
"sync_peer_count": len(store.records_for_bucket("sync")),
|
||||
"push_peer_count": len(store.records_for_bucket("push")),
|
||||
"operator_peer_count": len(operator_peers),
|
||||
"default_sync_peer_count": len(default_sync_peers),
|
||||
"last_bootstrap_error": bootstrap_error,
|
||||
}
|
||||
with _NODE_RUNTIME_LOCK:
|
||||
_NODE_BOOTSTRAP_STATE.update(snapshot)
|
||||
return snapshot
|
||||
|
||||
|
||||
def _materialize_local_infonet_state() -> None:
|
||||
from services.mesh.mesh_hashchain import infonet
|
||||
|
||||
infonet.ensure_materialized()
|
||||
@@ -1,6 +1,13 @@
|
||||
[build-system]
|
||||
requires = ["setuptools>=68.0"]
|
||||
build-backend = "setuptools.build_meta"
|
||||
|
||||
[tool.setuptools]
|
||||
py-modules = []
|
||||
|
||||
[project]
|
||||
name = "backend"
|
||||
version = "0.9.6"
|
||||
version = "0.9.7"
|
||||
requires-python = ">=3.10"
|
||||
dependencies = [
|
||||
"apscheduler==3.10.3",
|
||||
|
||||
@@ -0,0 +1,277 @@
|
||||
import json as json_mod
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from fastapi import APIRouter, Request, Depends, Response
|
||||
from pydantic import BaseModel
|
||||
from limiter import limiter
|
||||
from auth import require_admin, require_local_operator
|
||||
from node_state import (
|
||||
_current_node_mode,
|
||||
_participant_node_enabled,
|
||||
_refresh_node_peer_store,
|
||||
_set_participant_node_enabled,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
class NodeSettingsUpdate(BaseModel):
|
||||
enabled: bool
|
||||
|
||||
|
||||
class TimeMachineToggle(BaseModel):
|
||||
enabled: bool
|
||||
|
||||
|
||||
@router.get("/api/settings/api-keys", dependencies=[Depends(require_admin)])
|
||||
@limiter.limit("30/minute")
|
||||
async def api_get_keys(request: Request):
|
||||
from services.api_settings import get_api_keys
|
||||
return get_api_keys()
|
||||
|
||||
|
||||
@router.get("/api/settings/api-keys/meta")
|
||||
@limiter.limit("30/minute")
|
||||
async def api_get_keys_meta(request: Request):
|
||||
"""Return absolute paths for the backend .env and .env.example template.
|
||||
|
||||
Not gated behind admin auth: the paths are not sensitive, and the frontend
|
||||
needs them to render the API Keys panel banner before the user has had a
|
||||
chance to enter an admin key. Helps users find the file when in-app editing
|
||||
is blocked or when the backend is read-only.
|
||||
"""
|
||||
from services.api_settings import get_env_path_info
|
||||
return get_env_path_info()
|
||||
|
||||
|
||||
@router.get("/api/settings/news-feeds")
|
||||
@limiter.limit("30/minute")
|
||||
async def api_get_news_feeds(request: Request):
|
||||
from services.news_feed_config import get_feeds
|
||||
return get_feeds()
|
||||
|
||||
|
||||
@router.put("/api/settings/news-feeds", dependencies=[Depends(require_admin)])
|
||||
@limiter.limit("10/minute")
|
||||
async def api_save_news_feeds(request: Request):
|
||||
from services.news_feed_config import save_feeds
|
||||
body = await request.json()
|
||||
ok = save_feeds(body)
|
||||
if ok:
|
||||
return {"status": "updated", "count": len(body)}
|
||||
return Response(
|
||||
content=json_mod.dumps({"status": "error",
|
||||
"message": "Validation failed (max 20 feeds, each needs name/url/weight 1-5)"}),
|
||||
status_code=400,
|
||||
media_type="application/json",
|
||||
)
|
||||
|
||||
|
||||
@router.post("/api/settings/news-feeds/reset", dependencies=[Depends(require_admin)])
|
||||
@limiter.limit("10/minute")
|
||||
async def api_reset_news_feeds(request: Request):
|
||||
from services.news_feed_config import get_feeds, reset_feeds
|
||||
ok = reset_feeds()
|
||||
if ok:
|
||||
return {"status": "reset", "feeds": get_feeds()}
|
||||
return {"status": "error", "message": "Failed to reset feeds"}
|
||||
|
||||
|
||||
@router.get("/api/settings/node")
|
||||
@limiter.limit("30/minute")
|
||||
async def api_get_node_settings(request: Request):
|
||||
import asyncio
|
||||
from services.node_settings import read_node_settings
|
||||
data = await asyncio.to_thread(read_node_settings)
|
||||
return {
|
||||
**data,
|
||||
"node_mode": _current_node_mode(),
|
||||
"node_enabled": _participant_node_enabled(),
|
||||
}
|
||||
|
||||
|
||||
@router.put("/api/settings/node", dependencies=[Depends(require_local_operator)])
|
||||
@limiter.limit("10/minute")
|
||||
async def api_set_node_settings(request: Request, body: NodeSettingsUpdate):
|
||||
_refresh_node_peer_store()
|
||||
return _set_participant_node_enabled(bool(body.enabled))
|
||||
|
||||
|
||||
@router.get("/api/settings/timemachine")
|
||||
@limiter.limit("30/minute")
|
||||
async def api_get_timemachine_settings(request: Request):
|
||||
import asyncio
|
||||
from services.node_settings import read_node_settings
|
||||
data = await asyncio.to_thread(read_node_settings)
|
||||
return {
|
||||
"enabled": data.get("timemachine_enabled", False),
|
||||
"storage_warning": "Time Machine auto-snapshots use ~68 MB/day compressed (~2 GB/month). "
|
||||
"Snapshots capture entity positions (flights, ships, satellites) for historical playback.",
|
||||
}
|
||||
|
||||
|
||||
@router.put("/api/settings/timemachine", dependencies=[Depends(require_local_operator)])
|
||||
@limiter.limit("10/minute")
|
||||
async def api_set_timemachine_settings(request: Request, body: TimeMachineToggle):
|
||||
import asyncio
|
||||
from services.node_settings import write_node_settings
|
||||
result = await asyncio.to_thread(write_node_settings, timemachine_enabled=body.enabled)
|
||||
return {
|
||||
"ok": True,
|
||||
"enabled": result.get("timemachine_enabled", False),
|
||||
}
|
||||
|
||||
|
||||
@router.post("/api/system/update", dependencies=[Depends(require_admin)])
|
||||
@limiter.limit("1/minute")
|
||||
async def system_update(request: Request):
|
||||
"""Download latest release, backup current files, extract update, and restart."""
|
||||
from services.updater import perform_update, schedule_restart
|
||||
candidate = Path(__file__).resolve().parent.parent.parent
|
||||
if (candidate / "frontend").is_dir() or (candidate / "backend").is_dir():
|
||||
project_root = str(candidate)
|
||||
else:
|
||||
project_root = os.getcwd()
|
||||
result = perform_update(project_root)
|
||||
if result.get("status") == "error":
|
||||
return Response(content=json_mod.dumps(result), status_code=500, media_type="application/json")
|
||||
if result.get("status") == "docker":
|
||||
return result
|
||||
threading.Timer(2.0, schedule_restart, args=[project_root]).start()
|
||||
return result
|
||||
|
||||
|
||||
# ── Tor Hidden Service ──────────────────────────────────────────────
|
||||
|
||||
|
||||
@router.get("/api/settings/tor", dependencies=[Depends(require_local_operator)])
|
||||
@limiter.limit("30/minute")
|
||||
async def api_tor_status(request: Request):
|
||||
"""Return Tor hidden service status and .onion address if available."""
|
||||
import asyncio
|
||||
from services.tor_hidden_service import tor_service
|
||||
|
||||
return await asyncio.to_thread(tor_service.status)
|
||||
|
||||
|
||||
@router.post("/api/settings/tor/start", dependencies=[Depends(require_local_operator)])
|
||||
@limiter.limit("5/minute")
|
||||
async def api_tor_start(request: Request):
|
||||
"""Start Tor and provision a hidden service for this ShadowBroker instance.
|
||||
|
||||
Also enables MESH_ARTI so the mesh/wormhole system can route traffic
|
||||
through the Tor SOCKS proxy (port 9050) automatically.
|
||||
"""
|
||||
import asyncio
|
||||
from services.tor_hidden_service import tor_service
|
||||
|
||||
result = await asyncio.to_thread(tor_service.start)
|
||||
|
||||
# If Tor started successfully, enable Arti (Tor SOCKS proxy for mesh)
|
||||
if result.get("ok"):
|
||||
try:
|
||||
from routers.ai_intel import _write_env_value
|
||||
from services.config import get_settings
|
||||
_write_env_value("MESH_ARTI_ENABLED", "true")
|
||||
get_settings.cache_clear()
|
||||
except Exception:
|
||||
pass # Non-fatal — hidden service still works without mesh Arti
|
||||
|
||||
return result
|
||||
|
||||
|
||||
@router.post("/api/settings/tor/reset-identity", dependencies=[Depends(require_local_operator)])
|
||||
@limiter.limit("2/minute")
|
||||
async def api_tor_reset_identity(request: Request):
|
||||
"""Destroy current .onion identity and generate a fresh one on next start.
|
||||
|
||||
This is irreversible — the old .onion address is permanently lost.
|
||||
"""
|
||||
import asyncio, shutil
|
||||
from services.tor_hidden_service import tor_service, TOR_DIR
|
||||
|
||||
# Stop Tor if running
|
||||
await asyncio.to_thread(tor_service.stop)
|
||||
|
||||
# Delete the hidden service directory (contains the private key)
|
||||
hs_dir = TOR_DIR / "hidden_service"
|
||||
if hs_dir.exists():
|
||||
shutil.rmtree(str(hs_dir), ignore_errors=True)
|
||||
|
||||
# Clear cached address
|
||||
tor_service._onion_address = ""
|
||||
|
||||
return {"ok": True, "detail": "Tor identity destroyed. A new .onion will be generated on next start."}
|
||||
|
||||
|
||||
@router.post("/api/settings/agent/reset-all", dependencies=[Depends(require_local_operator)])
|
||||
@limiter.limit("2/minute")
|
||||
async def api_reset_all_agent_credentials(request: Request):
|
||||
"""Nuclear reset: regenerate HMAC key, destroy .onion, revoke agent identity.
|
||||
|
||||
After this, the agent is fully disconnected and needs new credentials.
|
||||
"""
|
||||
import asyncio, secrets, shutil
|
||||
from services.tor_hidden_service import tor_service, TOR_DIR
|
||||
from services.config import get_settings
|
||||
|
||||
results = {}
|
||||
|
||||
# 1. Regenerate HMAC key
|
||||
new_secret = secrets.token_hex(24)
|
||||
from routers.ai_intel import _write_env_value
|
||||
_write_env_value("OPENCLAW_HMAC_SECRET", new_secret)
|
||||
results["hmac"] = "regenerated"
|
||||
|
||||
# 2. Revoke agent identity (Ed25519 keypair)
|
||||
try:
|
||||
from services.openclaw_bridge import revoke_agent_identity
|
||||
revoke_agent_identity()
|
||||
results["identity"] = "revoked"
|
||||
except Exception as e:
|
||||
results["identity"] = f"error: {e}"
|
||||
|
||||
# 3. Destroy .onion and restart Tor with new identity
|
||||
await asyncio.to_thread(tor_service.stop)
|
||||
hs_dir = TOR_DIR / "hidden_service"
|
||||
if hs_dir.exists():
|
||||
shutil.rmtree(str(hs_dir), ignore_errors=True)
|
||||
tor_service._onion_address = ""
|
||||
results["tor"] = "identity destroyed"
|
||||
|
||||
# 4. Bootstrap fresh identity + start Tor with new .onion
|
||||
try:
|
||||
from services.openclaw_bridge import generate_agent_keypair
|
||||
keypair = generate_agent_keypair(force=True)
|
||||
results["new_node_id"] = keypair.get("node_id", "")
|
||||
except Exception as e:
|
||||
results["new_node_id"] = f"error: {e}"
|
||||
|
||||
tor_result = await asyncio.to_thread(tor_service.start)
|
||||
results["new_onion"] = tor_result.get("onion_address", "")
|
||||
results["tor_ok"] = tor_result.get("ok", False)
|
||||
|
||||
# Clear settings cache
|
||||
get_settings.cache_clear()
|
||||
|
||||
return {
|
||||
"ok": True,
|
||||
"new_hmac_secret": new_secret,
|
||||
"detail": "All agent credentials have been reset. Reconfigure your agent with the new credentials.",
|
||||
**results,
|
||||
}
|
||||
|
||||
|
||||
@router.post("/api/settings/tor/stop", dependencies=[Depends(require_local_operator)])
|
||||
@limiter.limit("10/minute")
|
||||
async def api_tor_stop(request: Request):
|
||||
"""Stop the Tor hidden service."""
|
||||
import asyncio
|
||||
from services.tor_hidden_service import tor_service
|
||||
|
||||
return await asyncio.to_thread(tor_service.stop)
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,259 @@
|
||||
import logging
|
||||
from dataclasses import dataclass, field
|
||||
from fastapi import APIRouter, Request, Query, HTTPException
|
||||
from fastapi.responses import StreamingResponse
|
||||
from starlette.background import BackgroundTask
|
||||
from pydantic import BaseModel
|
||||
from limiter import limiter
|
||||
from auth import require_admin
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
_CCTV_PROXY_ALLOWED_HOSTS = {
|
||||
"s3-eu-west-1.amazonaws.com",
|
||||
"jamcams.tfl.gov.uk",
|
||||
"images.data.gov.sg",
|
||||
"cctv.austinmobility.io",
|
||||
"webcams.nyctmc.org",
|
||||
"cwwp2.dot.ca.gov",
|
||||
"wzmedia.dot.ca.gov",
|
||||
"images.wsdot.wa.gov",
|
||||
"olypen.com",
|
||||
"flyykm.com",
|
||||
"cam.pangbornairport.com",
|
||||
"navigator-c2c.dot.ga.gov",
|
||||
"navigator-c2c.ga.gov",
|
||||
"navigator-csc.dot.ga.gov",
|
||||
"vss1live.dot.ga.gov",
|
||||
"vss2live.dot.ga.gov",
|
||||
"vss3live.dot.ga.gov",
|
||||
"vss4live.dot.ga.gov",
|
||||
"vss5live.dot.ga.gov",
|
||||
"511ga.org",
|
||||
"gettingaroundillinois.com",
|
||||
"cctv.travelmidwest.com",
|
||||
"mdotjboss.state.mi.us",
|
||||
"micamerasimages.net",
|
||||
"publicstreamer1.cotrip.org",
|
||||
"publicstreamer2.cotrip.org",
|
||||
"publicstreamer3.cotrip.org",
|
||||
"publicstreamer4.cotrip.org",
|
||||
"cocam.carsprogram.org",
|
||||
"tripcheck.com",
|
||||
"www.tripcheck.com",
|
||||
"infocar.dgt.es",
|
||||
"informo.madrid.es",
|
||||
"www.windy.com",
|
||||
}
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class _CCTVProxyProfile:
|
||||
name: str
|
||||
timeout: tuple = (5.0, 10.0)
|
||||
cache_seconds: int = 30
|
||||
headers: dict = field(default_factory=dict)
|
||||
|
||||
|
||||
def _cctv_host_allowed(hostname) -> bool:
|
||||
host = str(hostname or "").strip().lower()
|
||||
if not host:
|
||||
return False
|
||||
for allowed in _CCTV_PROXY_ALLOWED_HOSTS:
|
||||
normalized = str(allowed or "").strip().lower()
|
||||
if host == normalized or host.endswith(f".{normalized}"):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _proxied_cctv_url(target_url: str) -> str:
|
||||
from urllib.parse import quote
|
||||
return f"/api/cctv/media?url={quote(target_url, safe='')}"
|
||||
|
||||
|
||||
def _cctv_proxy_profile_for_url(target_url: str) -> _CCTVProxyProfile:
|
||||
from urllib.parse import urlparse
|
||||
parsed = urlparse(target_url)
|
||||
host = str(parsed.hostname or "").strip().lower()
|
||||
path = str(parsed.path or "").strip().lower()
|
||||
|
||||
if host in {"jamcams.tfl.gov.uk", "s3-eu-west-1.amazonaws.com"}:
|
||||
return _CCTVProxyProfile(name="tfl-jamcam", timeout=(5.0, 20.0), cache_seconds=15,
|
||||
headers={"Accept": "video/mp4,image/avif,image/webp,image/apng,image/*,*/*;q=0.8", "Referer": "https://tfl.gov.uk/"})
|
||||
if host == "images.data.gov.sg":
|
||||
return _CCTVProxyProfile(name="lta-singapore", timeout=(5.0, 10.0), cache_seconds=30,
|
||||
headers={"Accept": "image/avif,image/webp,image/apng,image/*,*/*;q=0.8"})
|
||||
if host == "cctv.austinmobility.io":
|
||||
return _CCTVProxyProfile(name="austin-mobility", timeout=(5.0, 8.0), cache_seconds=15,
|
||||
headers={"Accept": "image/avif,image/webp,image/apng,image/*,*/*;q=0.8",
|
||||
"Referer": "https://data.mobility.austin.gov/", "Origin": "https://data.mobility.austin.gov"})
|
||||
if host == "webcams.nyctmc.org":
|
||||
return _CCTVProxyProfile(name="nyc-dot", timeout=(5.0, 10.0), cache_seconds=15,
|
||||
headers={"Accept": "image/avif,image/webp,image/apng,image/*,*/*;q=0.8"})
|
||||
if host in {"cwwp2.dot.ca.gov", "wzmedia.dot.ca.gov"}:
|
||||
return _CCTVProxyProfile(name="caltrans", timeout=(5.0, 15.0), cache_seconds=15,
|
||||
headers={"Accept": "application/vnd.apple.mpegurl,application/x-mpegURL,video/*,image/*,*/*;q=0.8",
|
||||
"Referer": "https://cwwp2.dot.ca.gov/"})
|
||||
if host in {"images.wsdot.wa.gov", "olypen.com", "flyykm.com", "cam.pangbornairport.com"}:
|
||||
return _CCTVProxyProfile(name="wsdot", timeout=(5.0, 12.0), cache_seconds=30,
|
||||
headers={"Accept": "image/avif,image/webp,image/apng,image/*,*/*;q=0.8"})
|
||||
if host in {"navigator-c2c.dot.ga.gov", "navigator-c2c.ga.gov", "navigator-csc.dot.ga.gov"}:
|
||||
read_timeout = 18.0 if "/snapshots/" in path else 12.0
|
||||
return _CCTVProxyProfile(name="gdot-snapshot", timeout=(5.0, read_timeout), cache_seconds=15,
|
||||
headers={"Accept": "image/avif,image/webp,image/apng,image/*,*/*;q=0.8",
|
||||
"Referer": "http://navigator-c2c.dot.ga.gov/"})
|
||||
if host == "511ga.org":
|
||||
return _CCTVProxyProfile(name="gdot-511ga-image", timeout=(5.0, 12.0), cache_seconds=15,
|
||||
headers={"Accept": "image/avif,image/webp,image/apng,image/*,*/*;q=0.8",
|
||||
"Referer": "https://511ga.org/cctv"})
|
||||
if host.startswith("vss") and host.endswith("dot.ga.gov"):
|
||||
return _CCTVProxyProfile(name="gdot-hls", timeout=(5.0, 20.0), cache_seconds=10,
|
||||
headers={"Accept": "application/vnd.apple.mpegurl,application/x-mpegURL,video/*,*/*;q=0.8",
|
||||
"Referer": "http://navigator-c2c.dot.ga.gov/"})
|
||||
if host in {"gettingaroundillinois.com", "cctv.travelmidwest.com"}:
|
||||
return _CCTVProxyProfile(name="illinois-dot", timeout=(5.0, 12.0), cache_seconds=30,
|
||||
headers={"Accept": "image/avif,image/webp,image/apng,image/*,*/*;q=0.8"})
|
||||
if host in {"mdotjboss.state.mi.us", "micamerasimages.net"}:
|
||||
return _CCTVProxyProfile(name="michigan-dot", timeout=(5.0, 12.0), cache_seconds=30,
|
||||
headers={"Accept": "image/avif,image/webp,image/apng,image/*,*/*;q=0.8",
|
||||
"Referer": "https://mdotjboss.state.mi.us/"})
|
||||
if host in {"publicstreamer1.cotrip.org", "publicstreamer2.cotrip.org",
|
||||
"publicstreamer3.cotrip.org", "publicstreamer4.cotrip.org"}:
|
||||
return _CCTVProxyProfile(name="cotrip-hls", timeout=(5.0, 20.0), cache_seconds=10,
|
||||
headers={"Accept": "application/vnd.apple.mpegurl,application/x-mpegURL,video/*,*/*;q=0.8",
|
||||
"Referer": "https://www.cotrip.org/"})
|
||||
if host == "cocam.carsprogram.org":
|
||||
return _CCTVProxyProfile(name="cotrip-preview", timeout=(5.0, 12.0), cache_seconds=20,
|
||||
headers={"Accept": "image/avif,image/webp,image/apng,image/*,*/*;q=0.8",
|
||||
"Referer": "https://www.cotrip.org/"})
|
||||
if host in {"tripcheck.com", "www.tripcheck.com"}:
|
||||
return _CCTVProxyProfile(name="odot-tripcheck", timeout=(5.0, 12.0), cache_seconds=30,
|
||||
headers={"Accept": "image/avif,image/webp,image/apng,image/*,*/*;q=0.8"})
|
||||
if host == "infocar.dgt.es":
|
||||
return _CCTVProxyProfile(name="dgt-spain", timeout=(5.0, 8.0), cache_seconds=60,
|
||||
headers={"Accept": "image/avif,image/webp,image/apng,image/*,*/*;q=0.8",
|
||||
"Referer": "https://infocar.dgt.es/"})
|
||||
if host == "informo.madrid.es":
|
||||
return _CCTVProxyProfile(name="madrid-city", timeout=(5.0, 12.0), cache_seconds=30,
|
||||
headers={"Accept": "image/avif,image/webp,image/apng,image/*,*/*;q=0.8",
|
||||
"Referer": "https://informo.madrid.es/"})
|
||||
if host == "www.windy.com":
|
||||
return _CCTVProxyProfile(name="windy-webcams", timeout=(5.0, 12.0), cache_seconds=60,
|
||||
headers={"Accept": "image/avif,image/webp,image/apng,image/*,*/*;q=0.8"})
|
||||
return _CCTVProxyProfile(name="generic-cctv", timeout=(5.0, 10.0), cache_seconds=30,
|
||||
headers={"Accept": "*/*"})
|
||||
|
||||
|
||||
def _cctv_upstream_headers(request: Request, profile: _CCTVProxyProfile) -> dict:
|
||||
headers = {"User-Agent": "Mozilla/5.0 (compatible; ShadowBroker CCTV proxy)", **profile.headers}
|
||||
range_header = request.headers.get("range")
|
||||
if range_header:
|
||||
headers["Range"] = range_header
|
||||
if_none_match = request.headers.get("if-none-match")
|
||||
if if_none_match:
|
||||
headers["If-None-Match"] = if_none_match
|
||||
if_modified_since = request.headers.get("if-modified-since")
|
||||
if if_modified_since:
|
||||
headers["If-Modified-Since"] = if_modified_since
|
||||
return headers
|
||||
|
||||
|
||||
def _cctv_response_headers(resp, cache_seconds: int, include_length: bool = True) -> dict:
|
||||
headers = {"Cache-Control": f"public, max-age={cache_seconds}", "Access-Control-Allow-Origin": "*"}
|
||||
for key in ("Accept-Ranges", "Content-Range", "ETag", "Last-Modified"):
|
||||
value = resp.headers.get(key)
|
||||
if value:
|
||||
headers[key] = value
|
||||
if include_length:
|
||||
content_length = resp.headers.get("Content-Length")
|
||||
if content_length:
|
||||
headers["Content-Length"] = content_length
|
||||
return headers
|
||||
|
||||
|
||||
def _fetch_cctv_upstream_response(request: Request, target_url: str, profile: _CCTVProxyProfile):
|
||||
import requests as _req
|
||||
headers = _cctv_upstream_headers(request, profile)
|
||||
try:
|
||||
resp = _req.get(target_url, timeout=profile.timeout, stream=True, allow_redirects=True, headers=headers)
|
||||
except _req.exceptions.Timeout as exc:
|
||||
logger.warning("CCTV upstream timeout [%s] %s", profile.name, target_url)
|
||||
raise HTTPException(status_code=504, detail="Upstream timeout") from exc
|
||||
except _req.exceptions.RequestException as exc:
|
||||
logger.warning("CCTV upstream request failure [%s] %s: %s", profile.name, target_url, exc)
|
||||
raise HTTPException(status_code=502, detail="Upstream fetch failed") from exc
|
||||
if resp.status_code >= 400:
|
||||
logger.info("CCTV upstream HTTP %s [%s] %s", resp.status_code, profile.name, target_url)
|
||||
resp.close()
|
||||
raise HTTPException(status_code=int(resp.status_code), detail=f"Upstream returned {resp.status_code}")
|
||||
return resp
|
||||
|
||||
|
||||
def _rewrite_cctv_hls_playlist(base_url: str, body: str) -> str:
|
||||
import re
|
||||
from urllib.parse import urljoin, urlparse
|
||||
|
||||
def _rewrite_target(target: str) -> str:
|
||||
candidate = str(target or "").strip()
|
||||
if not candidate or candidate.startswith("data:"):
|
||||
return candidate
|
||||
absolute = urljoin(base_url, candidate)
|
||||
parsed_target = urlparse(absolute)
|
||||
if parsed_target.scheme not in ("http", "https"):
|
||||
return candidate
|
||||
if not _cctv_host_allowed(parsed_target.hostname):
|
||||
return candidate
|
||||
return _proxied_cctv_url(absolute)
|
||||
|
||||
rewritten_lines: list = []
|
||||
for raw_line in body.splitlines():
|
||||
stripped = raw_line.strip()
|
||||
if not stripped:
|
||||
rewritten_lines.append(raw_line)
|
||||
continue
|
||||
if stripped.startswith("#"):
|
||||
rewritten_lines.append(re.sub(r'URI="([^"]+)"',
|
||||
lambda match: f'URI="{_rewrite_target(match.group(1))}"', raw_line))
|
||||
continue
|
||||
rewritten_lines.append(_rewrite_target(stripped))
|
||||
return "\n".join(rewritten_lines) + ("\n" if body.endswith("\n") else "")
|
||||
|
||||
|
||||
def _proxy_cctv_media_response(request: Request, target_url: str):
|
||||
from urllib.parse import urlparse
|
||||
from fastapi.responses import Response
|
||||
parsed = urlparse(target_url)
|
||||
profile = _cctv_proxy_profile_for_url(target_url)
|
||||
resp = _fetch_cctv_upstream_response(request, target_url, profile)
|
||||
content_type = resp.headers.get("Content-Type", "application/octet-stream")
|
||||
is_hls_playlist = (
|
||||
".m3u8" in str(parsed.path or "").lower()
|
||||
or "mpegurl" in content_type.lower()
|
||||
or "vnd.apple.mpegurl" in content_type.lower()
|
||||
)
|
||||
if is_hls_playlist:
|
||||
body = resp.text
|
||||
if "#EXTM3U" in body:
|
||||
body = _rewrite_cctv_hls_playlist(target_url, body)
|
||||
resp.close()
|
||||
return Response(content=body, media_type=content_type,
|
||||
headers=_cctv_response_headers(resp, cache_seconds=profile.cache_seconds, include_length=False))
|
||||
return StreamingResponse(resp.iter_content(chunk_size=65536), status_code=resp.status_code,
|
||||
media_type=content_type,
|
||||
headers=_cctv_response_headers(resp, cache_seconds=profile.cache_seconds),
|
||||
background=BackgroundTask(resp.close))
|
||||
|
||||
|
||||
@router.get("/api/cctv/media")
|
||||
@limiter.limit("120/minute")
|
||||
async def cctv_media_proxy(request: Request, url: str = Query(...)):
|
||||
"""Proxy CCTV media through the backend to bypass browser CORS restrictions."""
|
||||
from urllib.parse import urlparse
|
||||
parsed = urlparse(url)
|
||||
if not _cctv_host_allowed(parsed.hostname):
|
||||
raise HTTPException(status_code=403, detail="Host not allowed")
|
||||
if parsed.scheme not in ("http", "https"):
|
||||
raise HTTPException(status_code=400, detail="Invalid scheme")
|
||||
return _proxy_cctv_media_response(request, url)
|
||||
@@ -0,0 +1,469 @@
|
||||
import asyncio
|
||||
import logging
|
||||
import math
|
||||
import threading
|
||||
from typing import Any
|
||||
from fastapi import APIRouter, Request, Response, Query, Depends
|
||||
from fastapi.responses import JSONResponse
|
||||
from pydantic import BaseModel
|
||||
from limiter import limiter
|
||||
from auth import require_admin, require_local_operator
|
||||
from services.data_fetcher import get_latest_data, update_all_data
|
||||
import orjson
|
||||
import json as json_mod
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
_refresh_lock = threading.Lock()
|
||||
|
||||
|
||||
class ViewportUpdate(BaseModel):
|
||||
s: float
|
||||
w: float
|
||||
n: float
|
||||
e: float
|
||||
|
||||
|
||||
class LayerUpdate(BaseModel):
|
||||
layers: dict[str, bool]
|
||||
|
||||
|
||||
_LAST_VIEWPORT_UPDATE: tuple | None = None
|
||||
_LAST_VIEWPORT_UPDATE_TS = 0.0
|
||||
_VIEWPORT_UPDATE_LOCK = threading.Lock()
|
||||
_VIEWPORT_DEDUPE_EPSILON = 1.0
|
||||
_VIEWPORT_MIN_UPDATE_S = 10.0
|
||||
|
||||
|
||||
def _normalize_longitude(value: float) -> float:
|
||||
normalized = ((value + 180.0) % 360.0 + 360.0) % 360.0 - 180.0
|
||||
if normalized == -180.0 and value > 0:
|
||||
return 180.0
|
||||
return normalized
|
||||
|
||||
|
||||
def _normalize_viewport_bounds(s: float, w: float, n: float, e: float) -> tuple:
|
||||
south = max(-90.0, min(90.0, s))
|
||||
north = max(-90.0, min(90.0, n))
|
||||
raw_width = abs(e - w)
|
||||
if not math.isfinite(raw_width) or raw_width >= 360.0:
|
||||
return south, -180.0, north, 180.0
|
||||
west = _normalize_longitude(w)
|
||||
east = _normalize_longitude(e)
|
||||
if east < west:
|
||||
return south, -180.0, north, 180.0
|
||||
return south, west, north, east
|
||||
|
||||
|
||||
def _viewport_changed_enough(bounds: tuple) -> bool:
|
||||
global _LAST_VIEWPORT_UPDATE, _LAST_VIEWPORT_UPDATE_TS
|
||||
import time
|
||||
now = time.monotonic()
|
||||
with _VIEWPORT_UPDATE_LOCK:
|
||||
if _LAST_VIEWPORT_UPDATE is None:
|
||||
_LAST_VIEWPORT_UPDATE = bounds
|
||||
_LAST_VIEWPORT_UPDATE_TS = now
|
||||
return True
|
||||
changed = any(
|
||||
abs(current - previous) > _VIEWPORT_DEDUPE_EPSILON
|
||||
for current, previous in zip(bounds, _LAST_VIEWPORT_UPDATE)
|
||||
)
|
||||
if not changed and (now - _LAST_VIEWPORT_UPDATE_TS) < _VIEWPORT_MIN_UPDATE_S:
|
||||
return False
|
||||
if (now - _LAST_VIEWPORT_UPDATE_TS) < _VIEWPORT_MIN_UPDATE_S:
|
||||
return False
|
||||
_LAST_VIEWPORT_UPDATE = bounds
|
||||
_LAST_VIEWPORT_UPDATE_TS = now
|
||||
return True
|
||||
|
||||
|
||||
def _queue_viirs_change_refresh() -> None:
|
||||
from services.fetchers.earth_observation import fetch_viirs_change_nodes
|
||||
threading.Thread(target=fetch_viirs_change_nodes, daemon=True).start()
|
||||
|
||||
|
||||
def _etag_response(request: Request, payload: dict, prefix: str = "", default=None):
|
||||
etag = _current_etag(prefix)
|
||||
if request.headers.get("if-none-match") == etag:
|
||||
return Response(status_code=304, headers={"ETag": etag, "Cache-Control": "no-cache"})
|
||||
content = json_mod.dumps(_json_safe(payload), default=default, allow_nan=False)
|
||||
return Response(content=content, media_type="application/json",
|
||||
headers={"ETag": etag, "Cache-Control": "no-cache"})
|
||||
|
||||
|
||||
def _current_etag(prefix: str = "") -> str:
|
||||
from services.fetchers._store import get_active_layers_version, get_data_version
|
||||
return f"{prefix}v{get_data_version()}-l{get_active_layers_version()}"
|
||||
|
||||
|
||||
def _json_safe(value):
|
||||
if isinstance(value, float):
|
||||
return value if math.isfinite(value) else None
|
||||
if isinstance(value, dict):
|
||||
return {k: _json_safe(v) for k, v in list(value.items())}
|
||||
if isinstance(value, list):
|
||||
return [_json_safe(v) for v in list(value)]
|
||||
if isinstance(value, tuple):
|
||||
return [_json_safe(v) for v in list(value)]
|
||||
return value
|
||||
|
||||
|
||||
def _sanitize_payload(value):
|
||||
if isinstance(value, float):
|
||||
return value if math.isfinite(value) else None
|
||||
if isinstance(value, dict):
|
||||
return {k: _sanitize_payload(v) for k, v in list(value.items())}
|
||||
if isinstance(value, (list, tuple)):
|
||||
return list(value)
|
||||
return value
|
||||
|
||||
|
||||
def _bbox_filter(items: list, s: float, w: float, n: float, e: float,
|
||||
lat_key: str = "lat", lng_key: str = "lng") -> list:
|
||||
pad_lat = (n - s) * 0.2
|
||||
pad_lng = (e - w) * 0.2 if e > w else ((e + 360 - w) * 0.2)
|
||||
s2, n2 = s - pad_lat, n + pad_lat
|
||||
w2, e2 = w - pad_lng, e + pad_lng
|
||||
crosses_antimeridian = w2 > e2
|
||||
out = []
|
||||
for item in items:
|
||||
lat = item.get(lat_key)
|
||||
lng = item.get(lng_key)
|
||||
if lat is None or lng is None:
|
||||
out.append(item)
|
||||
continue
|
||||
if not (s2 <= lat <= n2):
|
||||
continue
|
||||
if crosses_antimeridian:
|
||||
if lng >= w2 or lng <= e2:
|
||||
out.append(item)
|
||||
else:
|
||||
if w2 <= lng <= e2:
|
||||
out.append(item)
|
||||
return out
|
||||
|
||||
|
||||
def _bbox_filter_geojson_points(items: list, s: float, w: float, n: float, e: float) -> list:
|
||||
pad_lat = (n - s) * 0.2
|
||||
pad_lng = (e - w) * 0.2 if e > w else ((e + 360 - w) * 0.2)
|
||||
s2, n2 = s - pad_lat, n + pad_lat
|
||||
w2, e2 = w - pad_lng, e + pad_lng
|
||||
crosses_antimeridian = w2 > e2
|
||||
out = []
|
||||
for item in items:
|
||||
geometry = item.get("geometry") if isinstance(item, dict) else None
|
||||
coords = geometry.get("coordinates") if isinstance(geometry, dict) else None
|
||||
if not isinstance(coords, (list, tuple)) or len(coords) < 2:
|
||||
out.append(item)
|
||||
continue
|
||||
lng, lat = coords[0], coords[1]
|
||||
if lat is None or lng is None:
|
||||
out.append(item)
|
||||
continue
|
||||
if not (s2 <= lat <= n2):
|
||||
continue
|
||||
if crosses_antimeridian:
|
||||
if lng >= w2 or lng <= e2:
|
||||
out.append(item)
|
||||
else:
|
||||
if w2 <= lng <= e2:
|
||||
out.append(item)
|
||||
return out
|
||||
|
||||
|
||||
def _bbox_spans(s, w, n, e) -> tuple:
|
||||
if None in (s, w, n, e):
|
||||
return 180.0, 360.0
|
||||
lat_span = max(0.0, float(n) - float(s))
|
||||
lng_span = float(e) - float(w)
|
||||
if lng_span < 0:
|
||||
lng_span += 360.0
|
||||
if lng_span == 0 and w == -180 and e == 180:
|
||||
lng_span = 360.0
|
||||
return lat_span, max(0.0, lng_span)
|
||||
|
||||
|
||||
def _downsample_points(items: list, max_items: int) -> list:
|
||||
if max_items <= 0 or len(items) <= max_items:
|
||||
return items
|
||||
step = len(items) / float(max_items)
|
||||
return [items[min(len(items) - 1, int(i * step))] for i in range(max_items)]
|
||||
|
||||
|
||||
def _world_and_continental_scale(has_bbox: bool, s, w, n, e) -> tuple:
|
||||
lat_span, lng_span = _bbox_spans(s, w, n, e)
|
||||
world_scale = (not has_bbox) or lng_span >= 300 or lat_span >= 120
|
||||
continental_scale = has_bbox and not world_scale and (lng_span >= 120 or lat_span >= 55)
|
||||
return world_scale, continental_scale
|
||||
|
||||
|
||||
def _filter_sigint_by_layers(items: list, active_layers: dict) -> list:
|
||||
allow_aprs = bool(active_layers.get("sigint_aprs", True))
|
||||
allow_mesh = bool(active_layers.get("sigint_meshtastic", True))
|
||||
if allow_aprs and allow_mesh:
|
||||
return items
|
||||
allowed_sources: set = {"js8call"}
|
||||
if allow_aprs:
|
||||
allowed_sources.add("aprs")
|
||||
if allow_mesh:
|
||||
allowed_sources.update({"meshtastic", "meshtastic-map"})
|
||||
return [item for item in items if str(item.get("source") or "").lower() in allowed_sources]
|
||||
|
||||
|
||||
def _sigint_totals_for_items(items: list) -> dict:
|
||||
totals = {"total": len(items), "meshtastic": 0, "meshtastic_live": 0, "meshtastic_map": 0,
|
||||
"aprs": 0, "js8call": 0}
|
||||
for item in items:
|
||||
source = str(item.get("source") or "").lower()
|
||||
if source == "meshtastic":
|
||||
totals["meshtastic"] += 1
|
||||
if bool(item.get("from_api")):
|
||||
totals["meshtastic_map"] += 1
|
||||
else:
|
||||
totals["meshtastic_live"] += 1
|
||||
elif source == "aprs":
|
||||
totals["aprs"] += 1
|
||||
elif source == "js8call":
|
||||
totals["js8call"] += 1
|
||||
return totals
|
||||
|
||||
|
||||
@router.get("/api/refresh", dependencies=[Depends(require_admin)])
|
||||
@limiter.limit("2/minute")
|
||||
async def force_refresh(request: Request):
|
||||
from services.schemas import RefreshResponse
|
||||
if not _refresh_lock.acquire(blocking=False):
|
||||
return {"status": "refresh already in progress"}
|
||||
|
||||
def _do_refresh():
|
||||
try:
|
||||
update_all_data()
|
||||
finally:
|
||||
_refresh_lock.release()
|
||||
|
||||
t = threading.Thread(target=_do_refresh)
|
||||
t.start()
|
||||
return {"status": "refreshing in background"}
|
||||
|
||||
|
||||
@router.post("/api/ais/feed")
|
||||
@limiter.limit("60/minute")
|
||||
async def ais_feed(request: Request):
|
||||
"""Accept AIS-catcher HTTP JSON feed (POST decoded AIS messages)."""
|
||||
from services.ais_stream import ingest_ais_catcher
|
||||
try:
|
||||
body = await request.json()
|
||||
except Exception:
|
||||
return JSONResponse(status_code=422, content={"ok": False, "detail": "invalid JSON body"})
|
||||
msgs = body.get("msgs", [])
|
||||
if not msgs:
|
||||
return {"status": "ok", "ingested": 0}
|
||||
count = ingest_ais_catcher(msgs)
|
||||
return {"status": "ok", "ingested": count}
|
||||
|
||||
|
||||
@router.post("/api/viewport")
|
||||
@limiter.limit("60/minute")
|
||||
async def update_viewport(vp: ViewportUpdate, request: Request): # noqa: ARG001
|
||||
"""Receive frontend map bounds. AIS stream stays global so open-ocean
|
||||
vessels are never dropped — the frontend worker handles viewport culling."""
|
||||
return {"status": "ok"}
|
||||
|
||||
|
||||
@router.post("/api/layers")
|
||||
@limiter.limit("30/minute")
|
||||
async def update_layers(update: LayerUpdate, request: Request):
|
||||
"""Receive frontend layer toggle state. Starts/stops streams accordingly."""
|
||||
from services.fetchers._store import active_layers, bump_active_layers_version, is_any_active
|
||||
old_ships = is_any_active("ships_military", "ships_cargo", "ships_civilian", "ships_passenger", "ships_tracked_yachts")
|
||||
old_mesh = is_any_active("sigint_meshtastic")
|
||||
old_aprs = is_any_active("sigint_aprs")
|
||||
old_viirs = is_any_active("viirs_nightlights")
|
||||
changed = False
|
||||
for key, value in update.layers.items():
|
||||
if key in active_layers:
|
||||
if active_layers[key] != value:
|
||||
changed = True
|
||||
active_layers[key] = value
|
||||
if changed:
|
||||
bump_active_layers_version()
|
||||
new_ships = is_any_active("ships_military", "ships_cargo", "ships_civilian", "ships_passenger", "ships_tracked_yachts")
|
||||
new_mesh = is_any_active("sigint_meshtastic")
|
||||
new_aprs = is_any_active("sigint_aprs")
|
||||
new_viirs = is_any_active("viirs_nightlights")
|
||||
if old_ships and not new_ships:
|
||||
from services.ais_stream import stop_ais_stream
|
||||
stop_ais_stream()
|
||||
logger.info("AIS stream stopped (all ship layers disabled)")
|
||||
elif not old_ships and new_ships:
|
||||
from services.ais_stream import start_ais_stream
|
||||
start_ais_stream()
|
||||
logger.info("AIS stream started (ship layer enabled)")
|
||||
from services.sigint_bridge import sigint_grid
|
||||
if old_mesh and not new_mesh:
|
||||
sigint_grid.mesh.stop()
|
||||
logger.info("Meshtastic MQTT bridge stopped (layer disabled)")
|
||||
elif not old_mesh and new_mesh:
|
||||
sigint_grid.mesh.start()
|
||||
logger.info("Meshtastic MQTT bridge started (layer enabled)")
|
||||
if old_aprs and not new_aprs:
|
||||
sigint_grid.aprs.stop()
|
||||
logger.info("APRS bridge stopped (layer disabled)")
|
||||
elif not old_aprs and new_aprs:
|
||||
sigint_grid.aprs.start()
|
||||
logger.info("APRS bridge started (layer enabled)")
|
||||
if not old_viirs and new_viirs:
|
||||
_queue_viirs_change_refresh()
|
||||
logger.info("VIIRS change refresh queued (layer enabled)")
|
||||
return {"status": "ok"}
|
||||
|
||||
|
||||
@router.get("/api/live-data")
|
||||
@limiter.limit("120/minute")
|
||||
async def live_data(request: Request):
|
||||
return get_latest_data()
|
||||
|
||||
|
||||
@router.get("/api/live-data/fast")
|
||||
@limiter.limit("120/minute")
|
||||
async def live_data_fast(
|
||||
request: Request,
|
||||
s: float = Query(None, description="South bound (ignored)", ge=-90, le=90),
|
||||
w: float = Query(None, description="West bound (ignored)", ge=-180, le=180),
|
||||
n: float = Query(None, description="North bound (ignored)", ge=-90, le=90),
|
||||
e: float = Query(None, description="East bound (ignored)", ge=-180, le=180),
|
||||
):
|
||||
etag = _current_etag(prefix="fast|full|")
|
||||
if request.headers.get("if-none-match") == etag:
|
||||
return Response(status_code=304, headers={"ETag": etag, "Cache-Control": "no-cache"})
|
||||
from services.fetchers._store import (active_layers, get_latest_data_subset_refs, get_source_timestamps_snapshot)
|
||||
d = get_latest_data_subset_refs(
|
||||
"last_updated", "commercial_flights", "military_flights", "private_flights",
|
||||
"private_jets", "tracked_flights", "ships", "cctv", "uavs", "liveuamap",
|
||||
"gps_jamming", "satellites", "satellite_source", "satellite_analysis",
|
||||
"sigint", "sigint_totals", "trains",
|
||||
)
|
||||
freshness = get_source_timestamps_snapshot()
|
||||
ships_enabled = any(active_layers.get(key, True) for key in (
|
||||
"ships_military", "ships_cargo", "ships_civilian", "ships_passenger", "ships_tracked_yachts"))
|
||||
cctv_total = len(d.get("cctv") or [])
|
||||
sigint_items = _filter_sigint_by_layers(d.get("sigint") or [], active_layers)
|
||||
sigint_totals = _sigint_totals_for_items(sigint_items)
|
||||
payload = {
|
||||
"commercial_flights": (d.get("commercial_flights") or []) if active_layers.get("flights", True) else [],
|
||||
"military_flights": (d.get("military_flights") or []) if active_layers.get("military", True) else [],
|
||||
"private_flights": (d.get("private_flights") or []) if active_layers.get("private", True) else [],
|
||||
"private_jets": (d.get("private_jets") or []) if active_layers.get("jets", True) else [],
|
||||
"tracked_flights": (d.get("tracked_flights") or []) if active_layers.get("tracked", True) else [],
|
||||
"ships": (d.get("ships") or []) if ships_enabled else [],
|
||||
"cctv": (d.get("cctv") or []) if active_layers.get("cctv", True) else [],
|
||||
"uavs": (d.get("uavs") or []) if active_layers.get("military", True) else [],
|
||||
"liveuamap": (d.get("liveuamap") or []) if active_layers.get("global_incidents", True) else [],
|
||||
"gps_jamming": (d.get("gps_jamming") or []) if active_layers.get("gps_jamming", True) else [],
|
||||
"satellites": (d.get("satellites") or []) if active_layers.get("satellites", True) else [],
|
||||
"satellite_source": d.get("satellite_source", "none"),
|
||||
"satellite_analysis": (d.get("satellite_analysis") or {}) if active_layers.get("satellites", True) else {},
|
||||
"sigint": sigint_items if (active_layers.get("sigint_meshtastic", True) or active_layers.get("sigint_aprs", True)) else [],
|
||||
"sigint_totals": sigint_totals,
|
||||
"cctv_total": cctv_total,
|
||||
"trains": (d.get("trains") or []) if active_layers.get("trains", True) else [],
|
||||
"freshness": freshness,
|
||||
}
|
||||
return Response(content=orjson.dumps(_sanitize_payload(payload)), media_type="application/json",
|
||||
headers={"ETag": etag, "Cache-Control": "no-cache"})
|
||||
|
||||
|
||||
@router.get("/api/live-data/slow")
|
||||
@limiter.limit("60/minute")
|
||||
async def live_data_slow(
|
||||
request: Request,
|
||||
s: float = Query(None, description="South bound (ignored)", ge=-90, le=90),
|
||||
w: float = Query(None, description="West bound (ignored)", ge=-180, le=180),
|
||||
n: float = Query(None, description="North bound (ignored)", ge=-90, le=90),
|
||||
e: float = Query(None, description="East bound (ignored)", ge=-180, le=180),
|
||||
):
|
||||
etag = _current_etag(prefix="slow|full|")
|
||||
if request.headers.get("if-none-match") == etag:
|
||||
return Response(status_code=304, headers={"ETag": etag, "Cache-Control": "no-cache"})
|
||||
from services.fetchers._store import (active_layers, get_latest_data_subset_refs, get_source_timestamps_snapshot)
|
||||
d = get_latest_data_subset_refs(
|
||||
"last_updated", "news", "stocks", "financial_source", "oil", "weather", "traffic",
|
||||
"earthquakes", "frontlines", "gdelt", "airports", "kiwisdr", "satnogs_stations",
|
||||
"satnogs_observations", "tinygs_satellites", "space_weather", "internet_outages",
|
||||
"firms_fires", "datacenters", "military_bases", "power_plants", "viirs_change_nodes",
|
||||
"scanners", "weather_alerts", "ukraine_alerts", "air_quality", "volcanoes",
|
||||
"fishing_activity", "psk_reporter", "correlations", "uap_sightings", "wastewater",
|
||||
"crowdthreat", "threat_level", "trending_markets",
|
||||
)
|
||||
freshness = get_source_timestamps_snapshot()
|
||||
payload = {
|
||||
"last_updated": d.get("last_updated"),
|
||||
"threat_level": d.get("threat_level"),
|
||||
"trending_markets": d.get("trending_markets", []),
|
||||
"news": d.get("news", []),
|
||||
"stocks": d.get("stocks", {}),
|
||||
"financial_source": d.get("financial_source", ""),
|
||||
"oil": d.get("oil", {}),
|
||||
"weather": d.get("weather"),
|
||||
"traffic": d.get("traffic", []),
|
||||
"earthquakes": (d.get("earthquakes") or []) if active_layers.get("earthquakes", True) else [],
|
||||
"frontlines": d.get("frontlines") if active_layers.get("ukraine_frontline", True) else None,
|
||||
"gdelt": (d.get("gdelt") or []) if active_layers.get("global_incidents", True) else [],
|
||||
"airports": d.get("airports") or [],
|
||||
"kiwisdr": (d.get("kiwisdr") or []) if active_layers.get("kiwisdr", True) else [],
|
||||
"satnogs_stations": (d.get("satnogs_stations") or []) if active_layers.get("satnogs", True) else [],
|
||||
"satnogs_total": len(d.get("satnogs_stations") or []),
|
||||
"satnogs_observations": (d.get("satnogs_observations") or []) if active_layers.get("satnogs", True) else [],
|
||||
"tinygs_satellites": (d.get("tinygs_satellites") or []) if active_layers.get("tinygs", True) else [],
|
||||
"tinygs_total": len(d.get("tinygs_satellites") or []),
|
||||
"psk_reporter": (d.get("psk_reporter") or []) if active_layers.get("psk_reporter", True) else [],
|
||||
"space_weather": d.get("space_weather"),
|
||||
"internet_outages": (d.get("internet_outages") or []) if active_layers.get("internet_outages", True) else [],
|
||||
"firms_fires": (d.get("firms_fires") or []) if active_layers.get("firms", True) else [],
|
||||
"datacenters": (d.get("datacenters") or []) if active_layers.get("datacenters", True) else [],
|
||||
"military_bases": (d.get("military_bases") or []) if active_layers.get("military_bases", True) else [],
|
||||
"power_plants": (d.get("power_plants") or []) if active_layers.get("power_plants", True) else [],
|
||||
"viirs_change_nodes": (d.get("viirs_change_nodes") or []) if active_layers.get("viirs_nightlights", True) else [],
|
||||
"scanners": (d.get("scanners") or []) if active_layers.get("scanners", True) else [],
|
||||
"weather_alerts": d.get("weather_alerts", []) if active_layers.get("weather_alerts", True) else [],
|
||||
"ukraine_alerts": d.get("ukraine_alerts", []) if active_layers.get("ukraine_alerts", True) else [],
|
||||
"air_quality": (d.get("air_quality") or []) if active_layers.get("air_quality", True) else [],
|
||||
"volcanoes": (d.get("volcanoes") or []) if active_layers.get("volcanoes", True) else [],
|
||||
"fishing_activity": (d.get("fishing_activity") or []) if active_layers.get("fishing_activity", True) else [],
|
||||
"correlations": (d.get("correlations") or []) if active_layers.get("correlations", True) else [],
|
||||
"uap_sightings": (d.get("uap_sightings") or []) if active_layers.get("uap_sightings", True) else [],
|
||||
"wastewater": (d.get("wastewater") or []) if active_layers.get("wastewater", True) else [],
|
||||
"crowdthreat": (d.get("crowdthreat") or []) if active_layers.get("crowdthreat", True) else [],
|
||||
"freshness": freshness,
|
||||
}
|
||||
return Response(
|
||||
content=orjson.dumps(_sanitize_payload(payload), default=str, option=orjson.OPT_NON_STR_KEYS),
|
||||
media_type="application/json",
|
||||
headers={"ETag": etag, "Cache-Control": "no-cache"},
|
||||
)
|
||||
|
||||
|
||||
# ── Satellite Overflight Counting ───────────────────────────────────────────
|
||||
# Counts unique satellites whose ground track entered a bounding box over 24h.
|
||||
# Uses cached TLEs + SGP4 propagation — no extra network requests.
|
||||
|
||||
class OverflightRequest(BaseModel):
|
||||
s: float
|
||||
w: float
|
||||
n: float
|
||||
e: float
|
||||
hours: int = 24
|
||||
|
||||
|
||||
@router.post("/api/satellites/overflights")
|
||||
@limiter.limit("10/minute")
|
||||
async def satellite_overflights(request: Request, body: OverflightRequest):
|
||||
from services.fetchers.satellites import compute_overflights, _sat_gp_cache
|
||||
gp_data = _sat_gp_cache.get("data")
|
||||
if not gp_data:
|
||||
return JSONResponse({"total": 0, "by_mission": {}, "satellites": [], "error": "No GP data cached yet"})
|
||||
bbox = {"s": body.s, "w": body.w, "n": body.n, "e": body.e}
|
||||
result = compute_overflights(gp_data, bbox, hours=body.hours)
|
||||
return JSONResponse(result)
|
||||
@@ -0,0 +1,85 @@
|
||||
import time as _time_mod
|
||||
from fastapi import APIRouter, Request, Depends
|
||||
from fastapi.responses import JSONResponse
|
||||
from pydantic import BaseModel
|
||||
from limiter import limiter
|
||||
from auth import require_admin
|
||||
from services.data_fetcher import get_latest_data
|
||||
from services.schemas import HealthResponse
|
||||
import os
|
||||
|
||||
APP_VERSION = os.environ.get("_HEALTH_APP_VERSION", "0.9.7")
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
def _get_app_version() -> str:
|
||||
# Import lazily to avoid circular import; main sets APP_VERSION before including routers
|
||||
try:
|
||||
import main as _main
|
||||
return _main.APP_VERSION
|
||||
except Exception:
|
||||
return APP_VERSION
|
||||
|
||||
|
||||
_start_time_ref: dict = {"value": None}
|
||||
|
||||
|
||||
def _get_start_time() -> float:
|
||||
if _start_time_ref["value"] is None:
|
||||
try:
|
||||
import main as _main
|
||||
_start_time_ref["value"] = _main._start_time
|
||||
except Exception:
|
||||
_start_time_ref["value"] = _time_mod.time()
|
||||
return _start_time_ref["value"]
|
||||
|
||||
|
||||
@router.get("/api/health", response_model=HealthResponse)
|
||||
@limiter.limit("30/minute")
|
||||
async def health_check(request: Request):
|
||||
from services.fetchers._store import get_source_timestamps_snapshot
|
||||
from services.slo import compute_all_statuses, summarise_statuses
|
||||
|
||||
d = get_latest_data()
|
||||
last = d.get("last_updated")
|
||||
timestamps = get_source_timestamps_snapshot()
|
||||
slo_statuses = compute_all_statuses(d, timestamps)
|
||||
slo_summary = summarise_statuses(slo_statuses)
|
||||
# Top-level status reflects worst SLO result — "degraded" if any
|
||||
# yellow, "error" if any red, "ok" otherwise. This is the single
|
||||
# field an external probe / pager can watch.
|
||||
top_status = "ok"
|
||||
if slo_summary.get("red", 0) > 0:
|
||||
top_status = "error"
|
||||
elif slo_summary.get("yellow", 0) > 0:
|
||||
top_status = "degraded"
|
||||
return {
|
||||
"status": top_status,
|
||||
"version": _get_app_version(),
|
||||
"last_updated": last,
|
||||
"sources": {
|
||||
"flights": len(d.get("commercial_flights", [])),
|
||||
"military": len(d.get("military_flights", [])),
|
||||
"ships": len(d.get("ships", [])),
|
||||
"satellites": len(d.get("satellites", [])),
|
||||
"earthquakes": len(d.get("earthquakes", [])),
|
||||
"cctv": len(d.get("cctv", [])),
|
||||
"news": len(d.get("news", [])),
|
||||
"uavs": len(d.get("uavs", [])),
|
||||
"firms_fires": len(d.get("firms_fires", [])),
|
||||
"liveuamap": len(d.get("liveuamap", [])),
|
||||
"gdelt": len(d.get("gdelt", [])),
|
||||
"uap_sightings": len(d.get("uap_sightings", [])),
|
||||
},
|
||||
"freshness": timestamps,
|
||||
"uptime_seconds": round(_time_mod.time() - _get_start_time()),
|
||||
"slo": slo_statuses,
|
||||
"slo_summary": slo_summary,
|
||||
}
|
||||
|
||||
|
||||
@router.get("/api/debug-latest", dependencies=[Depends(require_admin)])
|
||||
@limiter.limit("30/minute")
|
||||
async def debug_latest_data(request: Request):
|
||||
return list(get_latest_data().keys())
|
||||
@@ -0,0 +1,598 @@
|
||||
"""Infonet economy / governance / gates / bootstrap HTTP surface.
|
||||
|
||||
Source of truth: ``infonet-economy/IMPLEMENTATION_PLAN.md`` §2.1.
|
||||
|
||||
Read endpoints return chain-derived state (computed by the
|
||||
``services.infonet`` adapters / pure functions). Write endpoints take
|
||||
a payload, validate it through the cutover-registered validators, and
|
||||
return a structured "would-emit" preview. Production wiring (signing
|
||||
+ ``Infonet.append`` persistence) is a thin follow-on; the validation
|
||||
contract is locked here.
|
||||
|
||||
Cross-cutting design rule: errors are diagnostic, not punitive. Each
|
||||
write endpoint returns ``{"ok": False, "reason": "..."}`` on
|
||||
validation failure with the exact field that failed. Frontend
|
||||
surfaces the reason in the UI.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import time
|
||||
from typing import Any
|
||||
|
||||
from fastapi import APIRouter, Body, Path
|
||||
|
||||
# Triggers the chain cutover at module-load time so registered
|
||||
# validators are live for any subsequent route invocation.
|
||||
from services.infonet import _chain_cutover # noqa: F401
|
||||
from services.infonet.adapters.gate_adapter import InfonetGateAdapter
|
||||
from services.infonet.adapters.oracle_adapter import InfonetOracleAdapter
|
||||
from services.infonet.adapters.reputation_adapter import InfonetReputationAdapter
|
||||
from services.infonet.bootstrap import compute_active_features
|
||||
from services.infonet.config import (
|
||||
CONFIG,
|
||||
IMMUTABLE_PRINCIPLES,
|
||||
)
|
||||
from services.infonet.governance import (
|
||||
apply_petition_payload,
|
||||
compute_petition_state,
|
||||
compute_upgrade_state,
|
||||
)
|
||||
from services.infonet.governance.dsl_executor import InvalidPetition
|
||||
from services.infonet.partition import (
|
||||
classify_event_type,
|
||||
is_chain_stale,
|
||||
should_mark_provisional,
|
||||
)
|
||||
from services.infonet.privacy import (
|
||||
DEXScaffolding,
|
||||
RingCTScaffolding,
|
||||
ShieldedBalanceScaffolding,
|
||||
StealthAddressScaffolding,
|
||||
)
|
||||
from services.infonet.schema import (
|
||||
INFONET_ECONOMY_EVENT_TYPES,
|
||||
validate_infonet_event_payload,
|
||||
)
|
||||
from services.infonet.time_validity import chain_majority_time
|
||||
|
||||
logger = logging.getLogger("routers.infonet")
|
||||
|
||||
router = APIRouter(prefix="/api/infonet", tags=["infonet"])
|
||||
|
||||
|
||||
# ─── Chain access helper ─────────────────────────────────────────────────
|
||||
# Every adapter takes a ``chain_provider`` callable. We pull the live
|
||||
# Infonet chain from mesh_hashchain. Tests can monkeypatch this.
|
||||
|
||||
def _live_chain() -> list[dict[str, Any]]:
|
||||
try:
|
||||
from services.mesh.mesh_hashchain import infonet
|
||||
events = getattr(infonet, "events", None)
|
||||
if isinstance(events, list):
|
||||
return list(events)
|
||||
# Some implementations use a deque; convert to list.
|
||||
if events is not None:
|
||||
return list(events)
|
||||
except Exception as exc:
|
||||
logger.debug("infonet chain unavailable: %s", exc)
|
||||
return []
|
||||
|
||||
|
||||
def _now() -> float:
|
||||
cmt = chain_majority_time(_live_chain())
|
||||
return cmt if cmt > 0 else float(time.time())
|
||||
|
||||
|
||||
# ─── Status ──────────────────────────────────────────────────────────────
|
||||
|
||||
@router.get("/status")
|
||||
def infonet_status() -> dict[str, Any]:
|
||||
"""Top-level health snapshot for the InfonetTerminal HUD.
|
||||
|
||||
Returns ramp activation flags, partition staleness, privacy
|
||||
primitive statuses, immutable principles, and counts of
|
||||
chain-derived state (markets / petitions / gates / etc).
|
||||
"""
|
||||
chain = _live_chain()
|
||||
now = _now()
|
||||
features = compute_active_features(chain)
|
||||
|
||||
# Privacy primitive statuses (truthful — most are NOT_IMPLEMENTED).
|
||||
privacy = {
|
||||
"ringct": RingCTScaffolding().status().value,
|
||||
"stealth_address": StealthAddressScaffolding().status().value,
|
||||
"shielded_balance": ShieldedBalanceScaffolding().status().value,
|
||||
"dex": DEXScaffolding().status().value,
|
||||
}
|
||||
|
||||
return {
|
||||
"ok": True,
|
||||
"now": now,
|
||||
"chain_majority_time": chain_majority_time(chain),
|
||||
"chain_event_count": len(chain),
|
||||
"chain_stale": is_chain_stale(chain, now=now),
|
||||
"ramp": {
|
||||
"node_count": features.node_count,
|
||||
"bootstrap_resolution_active": features.bootstrap_resolution_active,
|
||||
"staked_resolution_active": features.staked_resolution_active,
|
||||
"governance_petitions_active": features.governance_petitions_active,
|
||||
"upgrade_governance_active": features.upgrade_governance_active,
|
||||
"commoncoin_active": features.commoncoin_active,
|
||||
},
|
||||
"privacy_primitive_status": privacy,
|
||||
"immutable_principles": dict(IMMUTABLE_PRINCIPLES),
|
||||
"config_keys_count": len(CONFIG),
|
||||
"infonet_economy_event_types_count": len(INFONET_ECONOMY_EVENT_TYPES),
|
||||
}
|
||||
|
||||
|
||||
# ─── Petitions / governance ──────────────────────────────────────────────
|
||||
|
||||
@router.get("/petitions")
|
||||
def list_petitions() -> dict[str, Any]:
|
||||
"""List petition_file events on the chain with their current state."""
|
||||
chain = _live_chain()
|
||||
now = _now()
|
||||
out: list[dict[str, Any]] = []
|
||||
for ev in chain:
|
||||
if ev.get("event_type") != "petition_file":
|
||||
continue
|
||||
pid = (ev.get("payload") or {}).get("petition_id")
|
||||
if not isinstance(pid, str):
|
||||
continue
|
||||
try:
|
||||
state = compute_petition_state(pid, chain, now=now)
|
||||
out.append({
|
||||
"petition_id": state.petition_id,
|
||||
"status": state.status,
|
||||
"filer_id": state.filer_id,
|
||||
"filed_at": state.filed_at,
|
||||
"petition_payload": state.petition_payload,
|
||||
"signature_governance_weight": state.signature_governance_weight,
|
||||
"signature_threshold_at_filing": state.signature_threshold_at_filing,
|
||||
"votes_for_weight": state.votes_for_weight,
|
||||
"votes_against_weight": state.votes_against_weight,
|
||||
"voting_deadline": state.voting_deadline,
|
||||
"challenge_window_until": state.challenge_window_until,
|
||||
})
|
||||
except Exception as exc:
|
||||
logger.warning("petition state error for %s: %s", pid, exc)
|
||||
return {"ok": True, "petitions": out, "now": now}
|
||||
|
||||
|
||||
@router.get("/petitions/{petition_id}")
|
||||
def get_petition(petition_id: str = Path(...)) -> dict[str, Any]:
|
||||
chain = _live_chain()
|
||||
now = _now()
|
||||
state = compute_petition_state(petition_id, chain, now=now)
|
||||
return {"ok": True, "petition": state.__dict__, "now": now}
|
||||
|
||||
|
||||
@router.post("/petitions/preview")
|
||||
def preview_petition_payload(payload: dict[str, Any] = Body(...)) -> dict[str, Any]:
|
||||
"""Validate a petition payload through the DSL executor without
|
||||
emitting it. Returns the candidate config diff so the UI can show
|
||||
"this petition would change vote_decay_days from 90 to 30".
|
||||
"""
|
||||
try:
|
||||
result = apply_petition_payload(payload)
|
||||
return {
|
||||
"ok": True,
|
||||
"changed_keys": list(result.changed_keys),
|
||||
"new_values": {k: result.new_config[k] for k in result.changed_keys},
|
||||
}
|
||||
except InvalidPetition as exc:
|
||||
return {"ok": False, "reason": str(exc)}
|
||||
|
||||
|
||||
@router.post("/events/validate")
|
||||
def validate_event(body: dict[str, Any] = Body(...)) -> dict[str, Any]:
|
||||
"""Validate an arbitrary Infonet economy event payload.
|
||||
|
||||
Frontend uses this for client-side preflight before signing /
|
||||
submitting an event. Returns ``{ok: True}`` on success or
|
||||
``{ok: False, reason: ...}`` with the exact validation failure.
|
||||
"""
|
||||
event_type = body.get("event_type")
|
||||
payload = body.get("payload", {})
|
||||
if not isinstance(event_type, str) or not event_type:
|
||||
return {"ok": False, "reason": "event_type required"}
|
||||
if not isinstance(payload, dict):
|
||||
return {"ok": False, "reason": "payload must be an object"}
|
||||
ok, reason = validate_infonet_event_payload(event_type, payload)
|
||||
return {
|
||||
"ok": ok,
|
||||
"reason": reason if not ok else None,
|
||||
"tier": classify_event_type(event_type),
|
||||
"would_be_provisional": should_mark_provisional(event_type, _live_chain(), now=_now()),
|
||||
}
|
||||
|
||||
|
||||
# ─── Upgrade-hash governance ────────────────────────────────────────────
|
||||
|
||||
@router.get("/upgrades")
|
||||
def list_upgrades() -> dict[str, Any]:
|
||||
chain = _live_chain()
|
||||
now = _now()
|
||||
out: list[dict[str, Any]] = []
|
||||
for ev in chain:
|
||||
if ev.get("event_type") != "upgrade_propose":
|
||||
continue
|
||||
pid = (ev.get("payload") or {}).get("proposal_id")
|
||||
if not isinstance(pid, str):
|
||||
continue
|
||||
try:
|
||||
# Heavy node set is a runtime concept (transport tier ==
|
||||
# private_strong per plan §3.5). Empty here for the
|
||||
# snapshot endpoint; production will pass the live set.
|
||||
state = compute_upgrade_state(pid, chain, now=now, heavy_node_ids=set())
|
||||
out.append({
|
||||
"proposal_id": state.proposal_id,
|
||||
"status": state.status,
|
||||
"proposer_id": state.proposer_id,
|
||||
"filed_at": state.filed_at,
|
||||
"release_hash": state.release_hash,
|
||||
"target_protocol_version": state.target_protocol_version,
|
||||
"votes_for_weight": state.votes_for_weight,
|
||||
"votes_against_weight": state.votes_against_weight,
|
||||
"readiness_fraction": state.readiness.fraction,
|
||||
"readiness_threshold_met": state.readiness.threshold_met,
|
||||
})
|
||||
except Exception as exc:
|
||||
logger.warning("upgrade state error for %s: %s", pid, exc)
|
||||
return {"ok": True, "upgrades": out, "now": now}
|
||||
|
||||
|
||||
@router.get("/upgrades/{proposal_id}")
|
||||
def get_upgrade(proposal_id: str = Path(...)) -> dict[str, Any]:
|
||||
chain = _live_chain()
|
||||
now = _now()
|
||||
state = compute_upgrade_state(proposal_id, chain, now=now, heavy_node_ids=set())
|
||||
return {
|
||||
"ok": True,
|
||||
"upgrade": {
|
||||
"proposal_id": state.proposal_id,
|
||||
"status": state.status,
|
||||
"proposer_id": state.proposer_id,
|
||||
"filed_at": state.filed_at,
|
||||
"release_hash": state.release_hash,
|
||||
"target_protocol_version": state.target_protocol_version,
|
||||
"signature_governance_weight": state.signature_governance_weight,
|
||||
"votes_for_weight": state.votes_for_weight,
|
||||
"votes_against_weight": state.votes_against_weight,
|
||||
"voting_deadline": state.voting_deadline,
|
||||
"challenge_window_until": state.challenge_window_until,
|
||||
"activation_deadline": state.activation_deadline,
|
||||
"readiness": {
|
||||
"total_heavy_nodes": state.readiness.total_heavy_nodes,
|
||||
"ready_count": state.readiness.ready_count,
|
||||
"fraction": state.readiness.fraction,
|
||||
"threshold_met": state.readiness.threshold_met,
|
||||
},
|
||||
},
|
||||
"now": now,
|
||||
}
|
||||
|
||||
|
||||
# ─── Markets / resolution / disputes ────────────────────────────────────
|
||||
|
||||
@router.get("/markets/{market_id}")
|
||||
def get_market_state(market_id: str = Path(...)) -> dict[str, Any]:
|
||||
"""Full market view: lifecycle, snapshot, evidence, stakes,
|
||||
excluded predictors, dispute state."""
|
||||
chain = _live_chain()
|
||||
now = _now()
|
||||
oracle = InfonetOracleAdapter(lambda: chain)
|
||||
|
||||
status = oracle.market_status(market_id, now=now)
|
||||
snap = oracle.find_snapshot(market_id)
|
||||
bundles = oracle.collect_evidence(market_id)
|
||||
excluded = sorted(oracle.excluded_predictor_ids(market_id))
|
||||
disputes = oracle.collect_disputes(market_id)
|
||||
reversed_flag = oracle.market_was_reversed(market_id)
|
||||
|
||||
return {
|
||||
"ok": True,
|
||||
"market_id": market_id,
|
||||
"status": status.value,
|
||||
"snapshot": snap,
|
||||
"evidence_bundles": [
|
||||
{
|
||||
"node_id": b.node_id,
|
||||
"claimed_outcome": b.claimed_outcome,
|
||||
"evidence_hashes": list(b.evidence_hashes),
|
||||
"source_description": b.source_description,
|
||||
"bond": b.bond,
|
||||
"timestamp": b.timestamp,
|
||||
"is_first_for_side": b.is_first_for_side,
|
||||
"submission_hash": b.submission_hash,
|
||||
}
|
||||
for b in bundles
|
||||
],
|
||||
"excluded_predictor_ids": excluded,
|
||||
"disputes": [
|
||||
{
|
||||
"dispute_id": d.dispute_id,
|
||||
"challenger_id": d.challenger_id,
|
||||
"challenger_stake": d.challenger_stake,
|
||||
"opened_at": d.opened_at,
|
||||
"is_resolved": d.is_resolved,
|
||||
"resolved_outcome": d.resolved_outcome,
|
||||
"confirm_stakes": d.confirm_stakes,
|
||||
"reverse_stakes": d.reverse_stakes,
|
||||
}
|
||||
for d in disputes
|
||||
],
|
||||
"was_reversed": reversed_flag,
|
||||
"now": now,
|
||||
}
|
||||
|
||||
|
||||
@router.get("/markets/{market_id}/preview-resolution")
|
||||
def preview_resolution(market_id: str = Path(...)) -> dict[str, Any]:
|
||||
"""Run the resolution decision procedure without emitting a
|
||||
finalize event. UI uses this to show "if resolution closed now,
|
||||
the market would resolve as <outcome> for <reason>"."""
|
||||
chain = _live_chain()
|
||||
oracle = InfonetOracleAdapter(lambda: chain)
|
||||
result = oracle.resolve_market(market_id)
|
||||
return {
|
||||
"ok": True,
|
||||
"preview": {
|
||||
"outcome": result.outcome,
|
||||
"reason": result.reason,
|
||||
"is_provisional": result.is_provisional,
|
||||
"burned_amount": result.burned_amount,
|
||||
"stake_returns": [
|
||||
{"node_id": k[0], "rep_type": k[1], "amount": v}
|
||||
for k, v in result.stake_returns.items()
|
||||
],
|
||||
"stake_winnings": [
|
||||
{"node_id": k[0], "rep_type": k[1], "amount": v}
|
||||
for k, v in result.stake_winnings.items()
|
||||
],
|
||||
"bond_returns": [
|
||||
{"node_id": k, "amount": v} for k, v in result.bond_returns.items()
|
||||
],
|
||||
"bond_forfeits": [
|
||||
{"node_id": k, "amount": v} for k, v in result.bond_forfeits.items()
|
||||
],
|
||||
"first_submitter_bonuses": [
|
||||
{"node_id": k, "amount": v}
|
||||
for k, v in result.first_submitter_bonuses.items()
|
||||
],
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
# ─── Gate shutdown lifecycle ────────────────────────────────────────────
|
||||
|
||||
@router.get("/gates/{gate_id}")
|
||||
def get_gate_state(gate_id: str = Path(...)) -> dict[str, Any]:
|
||||
chain = _live_chain()
|
||||
now = _now()
|
||||
gates = InfonetGateAdapter(lambda: chain)
|
||||
meta = gates.gate_meta(gate_id)
|
||||
if meta is None:
|
||||
return {"ok": False, "reason": "gate_not_found"}
|
||||
suspension = gates.suspension_state(gate_id, now=now)
|
||||
shutdown = gates.shutdown_state(gate_id, now=now)
|
||||
locked = gates.locked_state(gate_id)
|
||||
members = sorted(gates.member_set(gate_id))
|
||||
return {
|
||||
"ok": True,
|
||||
"gate_id": gate_id,
|
||||
"meta": {
|
||||
"creator_node_id": meta.creator_node_id,
|
||||
"display_name": meta.display_name,
|
||||
"entry_sacrifice": meta.entry_sacrifice,
|
||||
"min_overall_rep": meta.min_overall_rep,
|
||||
"min_gate_rep": dict(meta.min_gate_rep),
|
||||
"created_at": meta.created_at,
|
||||
},
|
||||
"members": members,
|
||||
"ratified": gates.is_ratified(gate_id),
|
||||
"cumulative_member_oracle_rep": gates.cumulative_member_oracle_rep(gate_id),
|
||||
"locked": {
|
||||
"is_locked": locked.locked,
|
||||
"locked_at": locked.locked_at,
|
||||
"locked_by": list(locked.locked_by),
|
||||
},
|
||||
"suspension": {
|
||||
"status": suspension.status,
|
||||
"suspended_at": suspension.suspended_at,
|
||||
"suspended_until": suspension.suspended_until,
|
||||
"last_shutdown_petition_at": suspension.last_shutdown_petition_at,
|
||||
},
|
||||
"shutdown": {
|
||||
"has_pending": shutdown.has_pending,
|
||||
"pending_petition_id": shutdown.pending_petition_id,
|
||||
"pending_status": shutdown.pending_status,
|
||||
"execution_at": shutdown.execution_at,
|
||||
"executed": shutdown.executed,
|
||||
},
|
||||
"now": now,
|
||||
}
|
||||
|
||||
|
||||
# ─── Reputation views ───────────────────────────────────────────────────
|
||||
|
||||
@router.get("/nodes/{node_id}/reputation")
|
||||
def get_node_reputation(node_id: str = Path(...)) -> dict[str, Any]:
|
||||
chain = _live_chain()
|
||||
rep = InfonetReputationAdapter(lambda: chain)
|
||||
breakdown = rep.oracle_rep_breakdown(node_id)
|
||||
return {
|
||||
"ok": True,
|
||||
"node_id": node_id,
|
||||
"oracle_rep": rep.oracle_rep(node_id),
|
||||
"oracle_rep_active": rep.oracle_rep_active(node_id),
|
||||
"oracle_rep_lifetime": rep.oracle_rep_lifetime(node_id),
|
||||
"common_rep": rep.common_rep(node_id),
|
||||
"decay_factor": rep.decay_factor(node_id),
|
||||
"last_successful_prediction_ts": rep.last_successful_prediction_ts(node_id),
|
||||
"breakdown": {
|
||||
"free_prediction_mints": breakdown.free_prediction_mints,
|
||||
"staked_prediction_returns": breakdown.staked_prediction_returns,
|
||||
"staked_prediction_losses": breakdown.staked_prediction_losses,
|
||||
"total": breakdown.total,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
# ─── Bootstrap ──────────────────────────────────────────────────────────
|
||||
|
||||
@router.get("/bootstrap/markets/{market_id}")
|
||||
def get_bootstrap_market_state(market_id: str = Path(...)) -> dict[str, Any]:
|
||||
"""Bootstrap-mode-specific market view: who has voted, who is
|
||||
eligible, current tally."""
|
||||
from services.infonet.bootstrap import (
|
||||
deduplicate_votes,
|
||||
validate_bootstrap_eligibility,
|
||||
)
|
||||
|
||||
chain = _live_chain()
|
||||
canonical = deduplicate_votes(market_id, chain)
|
||||
votes_summary: list[dict[str, Any]] = []
|
||||
yes = 0
|
||||
no = 0
|
||||
for v in canonical:
|
||||
node_id = v.get("node_id") or ""
|
||||
side = (v.get("payload") or {}).get("side")
|
||||
decision = validate_bootstrap_eligibility(node_id, market_id, chain)
|
||||
votes_summary.append({
|
||||
"node_id": node_id,
|
||||
"side": side,
|
||||
"eligible": decision.eligible,
|
||||
"ineligible_reason": decision.reason if not decision.eligible else None,
|
||||
})
|
||||
if decision.eligible:
|
||||
if side == "yes":
|
||||
yes += 1
|
||||
elif side == "no":
|
||||
no += 1
|
||||
total = yes + no
|
||||
return {
|
||||
"ok": True,
|
||||
"market_id": market_id,
|
||||
"votes": votes_summary,
|
||||
"tally": {
|
||||
"yes": yes,
|
||||
"no": no,
|
||||
"total_eligible": total,
|
||||
"min_market_participants": int(CONFIG["min_market_participants"]),
|
||||
"supermajority_threshold": float(CONFIG["bootstrap_resolution_supermajority"]),
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
# ─── Signed write: append an Infonet economy event ──────────────────────
|
||||
|
||||
@router.post("/append")
|
||||
def append_event(body: dict[str, Any] = Body(...)) -> dict[str, Any]:
|
||||
"""Append a signed Infonet economy event to the chain.
|
||||
|
||||
Body shape (all required for production):
|
||||
|
||||
{
|
||||
"event_type": str, # one of INFONET_ECONOMY_EVENT_TYPES
|
||||
"node_id": str, # signer
|
||||
"payload": dict, # event-specific fields
|
||||
"signature": str, # hex
|
||||
"sequence": int, # node-monotonic
|
||||
"public_key": str, # base64
|
||||
"public_key_algo": str, # "ed25519" or "ecdsa"
|
||||
"protocol_version": str # optional, defaults to current
|
||||
}
|
||||
|
||||
The cutover-registered validators run automatically via
|
||||
``mesh_hashchain.Infonet.append`` — payload validation, signature
|
||||
verification, replay protection, sequence ordering, public-key
|
||||
binding, revocation status. No additional security wrapper is
|
||||
needed because ``Infonet.append`` IS the secure entry point.
|
||||
|
||||
Returns the appended event dict on success, or
|
||||
``{"ok": False, "reason": "..."}`` on validation / signing failure.
|
||||
"""
|
||||
if not isinstance(body, dict):
|
||||
return {"ok": False, "reason": "body_must_be_object"}
|
||||
|
||||
event_type = body.get("event_type")
|
||||
if not isinstance(event_type, str) or event_type not in INFONET_ECONOMY_EVENT_TYPES:
|
||||
return {
|
||||
"ok": False,
|
||||
"reason": f"event_type must be one of INFONET_ECONOMY_EVENT_TYPES "
|
||||
f"(got {event_type!r})",
|
||||
}
|
||||
|
||||
node_id = body.get("node_id")
|
||||
if not isinstance(node_id, str) or not node_id:
|
||||
return {"ok": False, "reason": "node_id required"}
|
||||
|
||||
payload = body.get("payload", {})
|
||||
if not isinstance(payload, dict):
|
||||
return {"ok": False, "reason": "payload must be an object"}
|
||||
|
||||
sequence = body.get("sequence", 0)
|
||||
try:
|
||||
sequence = int(sequence)
|
||||
except (TypeError, ValueError):
|
||||
return {"ok": False, "reason": "sequence must be an integer"}
|
||||
if sequence <= 0:
|
||||
return {"ok": False, "reason": "sequence must be > 0"}
|
||||
|
||||
signature = str(body.get("signature") or "")
|
||||
public_key = str(body.get("public_key") or "")
|
||||
public_key_algo = str(body.get("public_key_algo") or "")
|
||||
protocol_version = str(body.get("protocol_version") or "")
|
||||
|
||||
if not signature or not public_key or not public_key_algo:
|
||||
return {
|
||||
"ok": False,
|
||||
"reason": "signature, public_key, and public_key_algo are required",
|
||||
}
|
||||
|
||||
try:
|
||||
from services.mesh.mesh_hashchain import infonet
|
||||
event = infonet.append(
|
||||
event_type=event_type,
|
||||
node_id=node_id,
|
||||
payload=payload,
|
||||
signature=signature,
|
||||
sequence=sequence,
|
||||
public_key=public_key,
|
||||
public_key_algo=public_key_algo,
|
||||
protocol_version=protocol_version,
|
||||
)
|
||||
except ValueError as exc:
|
||||
# Infonet.append raises ValueError for any validation failure
|
||||
# — payload / signature / replay / sequence / binding. The
|
||||
# message is user-facing per the non-hostile UX rule.
|
||||
return {"ok": False, "reason": str(exc)}
|
||||
except Exception as exc:
|
||||
logger.exception("infonet append failed")
|
||||
return {"ok": False, "reason": f"server_error: {type(exc).__name__}"}
|
||||
|
||||
return {"ok": True, "event": event}
|
||||
|
||||
|
||||
# ─── Function Keys (citizen + operator views) ───────────────────────────
|
||||
|
||||
@router.get("/function-keys/operator/{operator_id}/batch-summary")
|
||||
def operator_batch_summary(operator_id: str = Path(...)) -> dict[str, Any]:
|
||||
"""Sprint 11+ scaffolding: returns the operator's local batch
|
||||
counter for the current period. Production wires this through the
|
||||
operator's local-store implementation (Sprint 11+ scaffolding
|
||||
doesn't persist; counts reset per process)."""
|
||||
return {
|
||||
"ok": True,
|
||||
"operator_id": operator_id,
|
||||
"scaffolding_only": True,
|
||||
"note": "Production operators maintain a persistent BatchedSettlementBatch. "
|
||||
"This endpoint reports the in-memory state of the local batch.",
|
||||
}
|
||||
|
||||
|
||||
__all__ = ["router"]
|
||||
@@ -0,0 +1,565 @@
|
||||
import asyncio
|
||||
import hashlib
|
||||
import hmac
|
||||
import logging
|
||||
import secrets
|
||||
import time
|
||||
from typing import Any
|
||||
|
||||
from fastapi import APIRouter, Depends, Request
|
||||
from fastapi.responses import JSONResponse
|
||||
|
||||
from auth import (
|
||||
_is_debug_test_request,
|
||||
_scoped_view_authenticated,
|
||||
_verify_peer_push_hmac,
|
||||
require_admin,
|
||||
)
|
||||
from limiter import limiter
|
||||
from services.config import get_settings
|
||||
from services.mesh.mesh_compatibility import (
|
||||
LEGACY_AGENT_ID_LOOKUP_TARGET,
|
||||
legacy_agent_id_lookup_blocked,
|
||||
record_legacy_agent_id_lookup,
|
||||
sunset_target_label,
|
||||
)
|
||||
from services.mesh.mesh_signed_events import (
|
||||
MeshWriteExemption,
|
||||
SignedWriteKind,
|
||||
get_prepared_signed_write,
|
||||
mesh_write_exempt,
|
||||
requires_signed_write,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
_WARNED_LEGACY_DM_PUBKEY_LOOKUPS: set[str] = set()
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Local helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def _safe_int(val, default=0):
|
||||
try:
|
||||
return int(val)
|
||||
except (TypeError, ValueError):
|
||||
return default
|
||||
|
||||
|
||||
def _warn_legacy_dm_pubkey_lookup(agent_id: str) -> None:
|
||||
peer_id = str(agent_id or "").strip().lower()
|
||||
if not peer_id or peer_id in _WARNED_LEGACY_DM_PUBKEY_LOOKUPS:
|
||||
return
|
||||
_WARNED_LEGACY_DM_PUBKEY_LOOKUPS.add(peer_id)
|
||||
logger.warning(
|
||||
"mesh legacy DH pubkey lookup used for %s via direct agent_id; prefer invite-scoped lookup handles before removal in %s",
|
||||
peer_id,
|
||||
sunset_target_label(LEGACY_AGENT_ID_LOOKUP_TARGET),
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Transition delegates: forward to main.py so test monkeypatches still work.
|
||||
# These will move to a shared module once main.py routes are removed.
|
||||
# ---------------------------------------------------------------------------
|
||||
def _main_delegate(name):
|
||||
def _wrapper(*a, **kw):
|
||||
import main as _m
|
||||
return getattr(_m, name)(*a, **kw)
|
||||
_wrapper.__name__ = name
|
||||
return _wrapper
|
||||
|
||||
|
||||
_verify_signed_write = _main_delegate("_verify_signed_write")
|
||||
_secure_dm_enabled = _main_delegate("_secure_dm_enabled")
|
||||
_legacy_dm_get_allowed = _main_delegate("_legacy_dm_get_allowed")
|
||||
_rns_private_dm_ready = _main_delegate("_rns_private_dm_ready")
|
||||
_anonymous_dm_hidden_transport_enforced = _main_delegate("_anonymous_dm_hidden_transport_enforced")
|
||||
_high_privacy_profile_enabled = _main_delegate("_high_privacy_profile_enabled")
|
||||
_dm_send_from_signed_request = _main_delegate("_dm_send_from_signed_request")
|
||||
_dm_poll_secure_from_signed_request = _main_delegate("_dm_poll_secure_from_signed_request")
|
||||
_dm_count_secure_from_signed_request = _main_delegate("_dm_count_secure_from_signed_request")
|
||||
_validate_private_signed_sequence = _main_delegate("_validate_private_signed_sequence")
|
||||
|
||||
|
||||
def _signed_body(request: Request) -> dict[str, Any]:
|
||||
prepared = get_prepared_signed_write(request)
|
||||
if prepared is None:
|
||||
return {}
|
||||
return dict(prepared.body)
|
||||
|
||||
|
||||
async def _maybe_apply_dm_relay_jitter() -> None:
|
||||
if not _high_privacy_profile_enabled():
|
||||
return
|
||||
await asyncio.sleep((50 + secrets.randbelow(451)) / 1000.0)
|
||||
|
||||
|
||||
_REQUEST_V2_REDUCED_VERSION = "request-v2-reduced-v3"
|
||||
_REQUEST_V2_RECOVERY_STATES = {"pending", "verified", "failed"}
|
||||
|
||||
|
||||
def _is_canonical_reduced_request_message(message: dict[str, Any]) -> bool:
|
||||
item = dict(message or {})
|
||||
return (
|
||||
str(item.get("delivery_class", "") or "").strip().lower() == "request"
|
||||
and str(item.get("request_contract_version", "") or "").strip()
|
||||
== _REQUEST_V2_REDUCED_VERSION
|
||||
and item.get("sender_recovery_required") is True
|
||||
)
|
||||
|
||||
|
||||
def _annotate_request_recovery_message(message: dict[str, Any]) -> dict[str, Any]:
|
||||
item = dict(message or {})
|
||||
delivery_class = str(item.get("delivery_class", "") or "").strip().lower()
|
||||
sender_id = str(item.get("sender_id", "") or "").strip()
|
||||
sender_seal = str(item.get("sender_seal", "") or "").strip()
|
||||
sender_is_blinded = sender_id.startswith("sealed:") or sender_id.startswith("sender_token:")
|
||||
if delivery_class != "request" or not sender_is_blinded or not sender_seal.startswith("v3:"):
|
||||
return item
|
||||
if not str(item.get("request_contract_version", "") or "").strip():
|
||||
item["request_contract_version"] = _REQUEST_V2_REDUCED_VERSION
|
||||
item["sender_recovery_required"] = True
|
||||
state = str(item.get("sender_recovery_state", "") or "").strip().lower()
|
||||
if state not in _REQUEST_V2_RECOVERY_STATES:
|
||||
state = "pending"
|
||||
item["sender_recovery_state"] = state
|
||||
return item
|
||||
|
||||
|
||||
def _annotate_request_recovery_messages(messages: list[dict[str, Any]]) -> list[dict[str, Any]]:
|
||||
return [_annotate_request_recovery_message(message) for message in (messages or [])]
|
||||
|
||||
|
||||
def _request_duplicate_authority_rank(message: dict[str, Any]) -> int:
|
||||
item = dict(message or {})
|
||||
if str(item.get("delivery_class", "") or "").strip().lower() != "request":
|
||||
return 0
|
||||
if _is_canonical_reduced_request_message(item):
|
||||
return 3
|
||||
sender_id = str(item.get("sender_id", "") or "").strip()
|
||||
if sender_id.startswith("sealed:") or sender_id.startswith("sender_token:"):
|
||||
return 1
|
||||
if sender_id:
|
||||
return 2
|
||||
return 0
|
||||
|
||||
|
||||
def _request_duplicate_recovery_rank(message: dict[str, Any]) -> int:
|
||||
if not _is_canonical_reduced_request_message(message):
|
||||
return 0
|
||||
state = str(dict(message or {}).get("sender_recovery_state", "") or "").strip().lower()
|
||||
if state == "verified":
|
||||
return 2
|
||||
if state == "pending":
|
||||
return 1
|
||||
return 0
|
||||
|
||||
|
||||
def _poll_duplicate_source_rank(source: str) -> int:
|
||||
normalized = str(source or "").strip().lower()
|
||||
if normalized == "relay":
|
||||
return 2
|
||||
if normalized == "reticulum":
|
||||
return 1
|
||||
return 0
|
||||
|
||||
|
||||
def _should_replace_dm_poll_duplicate(
|
||||
existing: dict[str, Any],
|
||||
existing_source: str,
|
||||
candidate: dict[str, Any],
|
||||
candidate_source: str,
|
||||
) -> bool:
|
||||
candidate_authority = _request_duplicate_authority_rank(candidate)
|
||||
existing_authority = _request_duplicate_authority_rank(existing)
|
||||
if candidate_authority != existing_authority:
|
||||
return candidate_authority > existing_authority
|
||||
|
||||
candidate_recovery = _request_duplicate_recovery_rank(candidate)
|
||||
existing_recovery = _request_duplicate_recovery_rank(existing)
|
||||
if candidate_recovery != existing_recovery:
|
||||
return candidate_recovery > existing_recovery
|
||||
|
||||
candidate_source_rank = _poll_duplicate_source_rank(candidate_source)
|
||||
existing_source_rank = _poll_duplicate_source_rank(existing_source)
|
||||
if candidate_source_rank != existing_source_rank:
|
||||
return candidate_source_rank > existing_source_rank
|
||||
|
||||
try:
|
||||
candidate_ts = float(candidate.get("timestamp", 0) or 0)
|
||||
except Exception:
|
||||
candidate_ts = 0.0
|
||||
try:
|
||||
existing_ts = float(existing.get("timestamp", 0) or 0)
|
||||
except Exception:
|
||||
existing_ts = 0.0
|
||||
return candidate_ts > existing_ts
|
||||
|
||||
|
||||
def _merge_dm_poll_messages(
|
||||
relay_messages: list[dict[str, Any]],
|
||||
direct_messages: list[dict[str, Any]],
|
||||
) -> list[dict[str, Any]]:
|
||||
merged: list[dict[str, Any]] = []
|
||||
index_by_msg_id: dict[str, tuple[int, str]] = {}
|
||||
|
||||
def add_messages(items: list[dict[str, Any]], source: str) -> None:
|
||||
for original in items or []:
|
||||
item = dict(original or {})
|
||||
msg_id = str(item.get("msg_id", "") or "").strip()
|
||||
if not msg_id:
|
||||
merged.append(item)
|
||||
continue
|
||||
existing = index_by_msg_id.get(msg_id)
|
||||
if existing is None:
|
||||
index_by_msg_id[msg_id] = (len(merged), source)
|
||||
merged.append(item)
|
||||
continue
|
||||
index, existing_source = existing
|
||||
if _should_replace_dm_poll_duplicate(merged[index], existing_source, item, source):
|
||||
merged[index] = item
|
||||
index_by_msg_id[msg_id] = (index, source)
|
||||
|
||||
add_messages(relay_messages, "relay")
|
||||
add_messages(direct_messages, "reticulum")
|
||||
return sorted(merged, key=lambda item: float(item.get("timestamp", 0) or 0))
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Route handlers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
@router.post("/api/mesh/dm/register")
|
||||
@limiter.limit("10/minute")
|
||||
@requires_signed_write(kind=SignedWriteKind.DM_REGISTER)
|
||||
async def dm_register_key(request: Request):
|
||||
"""Register a DH public key for encrypted DM key exchange."""
|
||||
body = _signed_body(request)
|
||||
agent_id = body.get("agent_id", "").strip()
|
||||
dh_pub_key = body.get("dh_pub_key", "").strip()
|
||||
dh_algo = body.get("dh_algo", "").strip()
|
||||
timestamp = _safe_int(body.get("timestamp", 0) or 0)
|
||||
public_key = body.get("public_key", "").strip()
|
||||
public_key_algo = body.get("public_key_algo", "").strip()
|
||||
signature = body.get("signature", "").strip()
|
||||
sequence = _safe_int(body.get("sequence", 0) or 0)
|
||||
protocol_version = body.get("protocol_version", "").strip()
|
||||
if not agent_id or not dh_pub_key or not dh_algo or not timestamp:
|
||||
return {"ok": False, "detail": "Missing agent_id, dh_pub_key, dh_algo, or timestamp"}
|
||||
if dh_algo.upper() not in ("X25519", "ECDH_P256", "ECDH"):
|
||||
return {"ok": False, "detail": "Unsupported dh_algo"}
|
||||
now_ts = int(time.time())
|
||||
if abs(timestamp - now_ts) > 7 * 86400:
|
||||
return {"ok": False, "detail": "DH key timestamp is too far from current time"}
|
||||
from services.mesh.mesh_dm_relay import dm_relay
|
||||
|
||||
try:
|
||||
from services.mesh.mesh_reputation import reputation_ledger
|
||||
|
||||
reputation_ledger.register_node(agent_id, public_key, public_key_algo)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
accepted, detail, metadata = dm_relay.register_dh_key(
|
||||
agent_id,
|
||||
dh_pub_key,
|
||||
dh_algo,
|
||||
timestamp,
|
||||
signature,
|
||||
public_key,
|
||||
public_key_algo,
|
||||
protocol_version,
|
||||
sequence,
|
||||
)
|
||||
if not accepted:
|
||||
return {"ok": False, "detail": detail}
|
||||
|
||||
return {"ok": True, **(metadata or {})}
|
||||
|
||||
|
||||
@router.get("/api/mesh/dm/pubkey")
|
||||
@limiter.limit("30/minute")
|
||||
async def dm_get_pubkey(request: Request, agent_id: str = "", lookup_token: str = ""):
|
||||
import main as _m
|
||||
|
||||
return await _m.dm_get_pubkey(request, agent_id=agent_id, lookup_token=lookup_token)
|
||||
|
||||
|
||||
@router.get("/api/mesh/dm/prekey-bundle")
|
||||
@limiter.limit("30/minute")
|
||||
async def dm_get_prekey_bundle(request: Request, agent_id: str = "", lookup_token: str = ""):
|
||||
import main as _m
|
||||
|
||||
return await _m.dm_get_prekey_bundle(request, agent_id=agent_id, lookup_token=lookup_token)
|
||||
|
||||
|
||||
@router.post("/api/mesh/dm/prekey-peer-lookup")
|
||||
@limiter.limit("60/minute")
|
||||
@mesh_write_exempt(MeshWriteExemption.PEER_GOSSIP)
|
||||
async def dm_prekey_peer_lookup(request: Request):
|
||||
"""Peer-authenticated invite lookup handle resolution.
|
||||
|
||||
This endpoint exists for private/bootstrap peers to import signed invites
|
||||
without exposing a stable agent_id on the ordinary lookup surface. It only
|
||||
accepts HMAC-authenticated peer calls and only resolves lookup_token.
|
||||
"""
|
||||
content_length = request.headers.get("content-length")
|
||||
if content_length:
|
||||
try:
|
||||
if int(content_length) > 4096:
|
||||
return JSONResponse(
|
||||
status_code=413,
|
||||
content={"ok": False, "detail": "Request body too large"},
|
||||
)
|
||||
except (TypeError, ValueError):
|
||||
pass
|
||||
body_bytes = await request.body()
|
||||
if not _verify_peer_push_hmac(request, body_bytes):
|
||||
return JSONResponse(
|
||||
status_code=403,
|
||||
content={"ok": False, "detail": "Invalid or missing peer HMAC"},
|
||||
)
|
||||
try:
|
||||
import json
|
||||
|
||||
body = json.loads(body_bytes or b"{}")
|
||||
except Exception:
|
||||
return {"ok": False, "detail": "invalid json"}
|
||||
lookup_token = str(dict(body or {}).get("lookup_token", "") or "").strip()
|
||||
if not lookup_token:
|
||||
return {"ok": False, "detail": "lookup_token required"}
|
||||
from services.mesh.mesh_wormhole_prekey import fetch_dm_prekey_bundle
|
||||
|
||||
result = fetch_dm_prekey_bundle(
|
||||
agent_id="",
|
||||
lookup_token=lookup_token,
|
||||
allow_peer_lookup=False,
|
||||
)
|
||||
if not result.get("ok"):
|
||||
return {"ok": False, "detail": str(result.get("detail", "") or "Prekey bundle not found")}
|
||||
safe = dict(result)
|
||||
safe.pop("resolved_agent_id", None)
|
||||
safe["lookup_mode"] = "invite_lookup_handle"
|
||||
return safe
|
||||
|
||||
|
||||
@router.post("/api/mesh/dm/send")
|
||||
@limiter.limit("20/minute")
|
||||
@requires_signed_write(kind=SignedWriteKind.DM_SEND)
|
||||
async def dm_send(request: Request):
|
||||
return await _dm_send_from_signed_request(request)
|
||||
|
||||
|
||||
@router.post("/api/mesh/dm/poll")
|
||||
@limiter.limit("30/minute")
|
||||
@requires_signed_write(kind=SignedWriteKind.DM_POLL)
|
||||
async def dm_poll_secure(request: Request):
|
||||
return await _dm_poll_secure_from_signed_request(request)
|
||||
|
||||
|
||||
@router.get("/api/mesh/dm/poll")
|
||||
@limiter.limit("30/minute")
|
||||
async def dm_poll(
|
||||
request: Request,
|
||||
agent_id: str = "",
|
||||
agent_token: str = "",
|
||||
agent_token_prev: str = "",
|
||||
agent_tokens: str = "",
|
||||
):
|
||||
import main as _m
|
||||
|
||||
return await _m.dm_poll(
|
||||
request,
|
||||
agent_id=agent_id,
|
||||
agent_token=agent_token,
|
||||
agent_token_prev=agent_token_prev,
|
||||
agent_tokens=agent_tokens,
|
||||
)
|
||||
|
||||
|
||||
@router.post("/api/mesh/dm/count")
|
||||
@limiter.limit("60/minute")
|
||||
@requires_signed_write(kind=SignedWriteKind.DM_COUNT)
|
||||
async def dm_count_secure(request: Request):
|
||||
return await _dm_count_secure_from_signed_request(request)
|
||||
|
||||
|
||||
@router.get("/api/mesh/dm/count")
|
||||
@limiter.limit("60/minute")
|
||||
async def dm_count(
|
||||
request: Request,
|
||||
agent_id: str = "",
|
||||
agent_token: str = "",
|
||||
agent_token_prev: str = "",
|
||||
agent_tokens: str = "",
|
||||
):
|
||||
import main as _m
|
||||
|
||||
return await _m.dm_count(
|
||||
request,
|
||||
agent_id=agent_id,
|
||||
agent_token=agent_token,
|
||||
agent_token_prev=agent_token_prev,
|
||||
agent_tokens=agent_tokens,
|
||||
)
|
||||
|
||||
|
||||
@router.post("/api/mesh/dm/block")
|
||||
@limiter.limit("10/minute")
|
||||
@requires_signed_write(kind=SignedWriteKind.DM_BLOCK)
|
||||
async def dm_block(request: Request):
|
||||
"""Block or unblock a sender from DMing you."""
|
||||
body = _signed_body(request)
|
||||
agent_id = body.get("agent_id", "").strip()
|
||||
blocked_id = body.get("blocked_id", "").strip()
|
||||
action = body.get("action", "block").strip().lower()
|
||||
public_key = body.get("public_key", "").strip()
|
||||
public_key_algo = body.get("public_key_algo", "").strip()
|
||||
signature = body.get("signature", "").strip()
|
||||
sequence = _safe_int(body.get("sequence", 0) or 0)
|
||||
protocol_version = body.get("protocol_version", "").strip()
|
||||
if not agent_id or not blocked_id:
|
||||
return {"ok": False, "detail": "Missing agent_id or blocked_id"}
|
||||
from services.mesh.mesh_dm_relay import dm_relay
|
||||
|
||||
try:
|
||||
from services.mesh.mesh_hashchain import infonet
|
||||
|
||||
ok_seq, seq_reason = _validate_private_signed_sequence(
|
||||
infonet,
|
||||
agent_id,
|
||||
sequence,
|
||||
domain=f"dm_block:{action}",
|
||||
)
|
||||
if not ok_seq:
|
||||
return {"ok": False, "detail": seq_reason}
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if action == "unblock":
|
||||
dm_relay.unblock(agent_id, blocked_id)
|
||||
else:
|
||||
dm_relay.block(agent_id, blocked_id)
|
||||
return {"ok": True, "action": action, "blocked_id": blocked_id}
|
||||
|
||||
|
||||
@router.post("/api/mesh/dm/witness")
|
||||
@limiter.limit("20/minute")
|
||||
@requires_signed_write(kind=SignedWriteKind.DM_WITNESS)
|
||||
async def dm_key_witness(request: Request):
|
||||
"""Record a lightweight witness for a DM key (dual-path spot-check)."""
|
||||
body = _signed_body(request)
|
||||
witness_id = body.get("witness_id", "").strip()
|
||||
target_id = body.get("target_id", "").strip()
|
||||
dh_pub_key = body.get("dh_pub_key", "").strip()
|
||||
timestamp = _safe_int(body.get("timestamp", 0) or 0)
|
||||
public_key = body.get("public_key", "").strip()
|
||||
public_key_algo = body.get("public_key_algo", "").strip()
|
||||
signature = body.get("signature", "").strip()
|
||||
sequence = _safe_int(body.get("sequence", 0) or 0)
|
||||
protocol_version = body.get("protocol_version", "").strip()
|
||||
if not witness_id or not target_id or not dh_pub_key or not timestamp:
|
||||
return {"ok": False, "detail": "Missing witness_id, target_id, dh_pub_key, or timestamp"}
|
||||
now_ts = int(time.time())
|
||||
if abs(timestamp - now_ts) > 7 * 86400:
|
||||
return {"ok": False, "detail": "Witness timestamp is too far from current time"}
|
||||
try:
|
||||
from services.mesh.mesh_reputation import reputation_ledger
|
||||
|
||||
reputation_ledger.register_node(witness_id, public_key, public_key_algo)
|
||||
except Exception:
|
||||
pass
|
||||
try:
|
||||
from services.mesh.mesh_hashchain import infonet
|
||||
|
||||
ok_seq, seq_reason = _validate_private_signed_sequence(
|
||||
infonet,
|
||||
witness_id,
|
||||
sequence,
|
||||
domain="dm_witness",
|
||||
)
|
||||
if not ok_seq:
|
||||
return {"ok": False, "detail": seq_reason}
|
||||
except Exception:
|
||||
pass
|
||||
from services.mesh.mesh_dm_relay import dm_relay
|
||||
|
||||
ok, reason = dm_relay.record_witness(witness_id, target_id, dh_pub_key, timestamp)
|
||||
return {"ok": ok, "detail": reason}
|
||||
|
||||
|
||||
@router.get("/api/mesh/dm/witness")
|
||||
@limiter.limit("60/minute")
|
||||
async def dm_key_witness_get(request: Request, target_id: str = "", dh_pub_key: str = ""):
|
||||
"""Get witness counts for a target's DH key."""
|
||||
if not target_id:
|
||||
return {"ok": False, "detail": "Missing target_id"}
|
||||
from services.mesh.mesh_dm_relay import dm_relay
|
||||
|
||||
witnesses = dm_relay.get_witnesses(target_id, dh_pub_key if dh_pub_key else None, limit=5)
|
||||
response = {
|
||||
"ok": True,
|
||||
"count": len(witnesses),
|
||||
}
|
||||
if _scoped_view_authenticated(request, "mesh.audit"):
|
||||
response["target_id"] = target_id
|
||||
response["dh_pub_key"] = dh_pub_key or ""
|
||||
response["witnesses"] = witnesses
|
||||
return response
|
||||
|
||||
|
||||
@router.post("/api/mesh/trust/vouch")
|
||||
@limiter.limit("20/minute")
|
||||
@requires_signed_write(kind=SignedWriteKind.TRUST_VOUCH)
|
||||
async def trust_vouch(request: Request):
|
||||
"""Record a trust vouch for a node (web-of-trust signal)."""
|
||||
body = _signed_body(request)
|
||||
voucher_id = body.get("voucher_id", "").strip()
|
||||
target_id = body.get("target_id", "").strip()
|
||||
note = body.get("note", "").strip()
|
||||
timestamp = _safe_int(body.get("timestamp", 0) or 0)
|
||||
public_key = body.get("public_key", "").strip()
|
||||
public_key_algo = body.get("public_key_algo", "").strip()
|
||||
signature = body.get("signature", "").strip()
|
||||
sequence = _safe_int(body.get("sequence", 0) or 0)
|
||||
protocol_version = body.get("protocol_version", "").strip()
|
||||
if not voucher_id or not target_id or not timestamp:
|
||||
return {"ok": False, "detail": "Missing voucher_id, target_id, or timestamp"}
|
||||
now_ts = int(time.time())
|
||||
if abs(timestamp - now_ts) > 7 * 86400:
|
||||
return {"ok": False, "detail": "Vouch timestamp is too far from current time"}
|
||||
try:
|
||||
from services.mesh.mesh_reputation import reputation_ledger
|
||||
from services.mesh.mesh_hashchain import infonet
|
||||
|
||||
reputation_ledger.register_node(voucher_id, public_key, public_key_algo)
|
||||
ok_seq, seq_reason = _validate_private_signed_sequence(
|
||||
infonet,
|
||||
voucher_id,
|
||||
sequence,
|
||||
domain="trust_vouch",
|
||||
)
|
||||
if not ok_seq:
|
||||
return {"ok": False, "detail": seq_reason}
|
||||
ok, reason = reputation_ledger.add_vouch(voucher_id, target_id, note, timestamp)
|
||||
return {"ok": ok, "detail": reason}
|
||||
except Exception:
|
||||
return {"ok": False, "detail": "Failed to record vouch"}
|
||||
|
||||
|
||||
@router.get("/api/mesh/trust/vouches", dependencies=[Depends(require_admin)])
|
||||
@limiter.limit("60/minute")
|
||||
async def trust_vouches(request: Request, node_id: str = "", limit: int = 20):
|
||||
"""Fetch latest vouches for a node."""
|
||||
if not node_id:
|
||||
return {"ok": False, "detail": "Missing node_id"}
|
||||
try:
|
||||
from services.mesh.mesh_reputation import reputation_ledger
|
||||
|
||||
vouches = reputation_ledger.get_vouches(node_id, limit=limit)
|
||||
return {"ok": True, "node_id": node_id, "vouches": vouches, "count": len(vouches)}
|
||||
except Exception:
|
||||
return {"ok": False, "detail": "Failed to fetch vouches"}
|
||||
@@ -0,0 +1,145 @@
|
||||
import time
|
||||
import logging
|
||||
from fastapi import APIRouter, Request, Response, Query, Depends
|
||||
from fastapi.responses import JSONResponse
|
||||
from pydantic import BaseModel
|
||||
from limiter import limiter
|
||||
from auth import require_admin, require_local_operator
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get("/api/mesh/peers", dependencies=[Depends(require_local_operator)])
|
||||
@limiter.limit("30/minute")
|
||||
async def list_peers(request: Request, bucket: str = Query(None)):
|
||||
"""List all peers (or filter by bucket: sync, push, bootstrap)."""
|
||||
from services.mesh.mesh_peer_store import DEFAULT_PEER_STORE_PATH, PeerStore
|
||||
store = PeerStore(DEFAULT_PEER_STORE_PATH)
|
||||
try:
|
||||
store.load()
|
||||
except Exception as exc:
|
||||
return {"ok": False, "detail": f"Failed to load peer store: {exc}"}
|
||||
if bucket:
|
||||
records = store.records_for_bucket(bucket)
|
||||
else:
|
||||
records = store.records()
|
||||
return {"ok": True, "count": len(records), "peers": [r.to_dict() for r in records]}
|
||||
|
||||
|
||||
@router.post("/api/mesh/peers", dependencies=[Depends(require_local_operator)])
|
||||
@limiter.limit("10/minute")
|
||||
async def add_peer(request: Request):
|
||||
"""Add a peer to the store. Body: {peer_url, transport?, label?, role?, buckets?[]}."""
|
||||
from services.mesh.mesh_crypto import normalize_peer_url
|
||||
from services.mesh.mesh_peer_store import (
|
||||
DEFAULT_PEER_STORE_PATH, PeerStore, PeerStoreError,
|
||||
make_push_peer_record, make_sync_peer_record,
|
||||
)
|
||||
from services.mesh.mesh_router import peer_transport_kind
|
||||
body = await request.json()
|
||||
peer_url_raw = str(body.get("peer_url", "") or "").strip()
|
||||
if not peer_url_raw:
|
||||
return {"ok": False, "detail": "peer_url is required"}
|
||||
peer_url = normalize_peer_url(peer_url_raw)
|
||||
if not peer_url:
|
||||
return {"ok": False, "detail": "Invalid peer_url"}
|
||||
transport = str(body.get("transport", "") or "").strip().lower()
|
||||
if not transport:
|
||||
transport = peer_transport_kind(peer_url)
|
||||
if not transport:
|
||||
return {"ok": False, "detail": "Cannot determine transport for peer_url — provide transport explicitly"}
|
||||
label = str(body.get("label", "") or "").strip()
|
||||
role = str(body.get("role", "") or "").strip().lower() or "relay"
|
||||
buckets = body.get("buckets", ["sync", "push"])
|
||||
if isinstance(buckets, str):
|
||||
buckets = [buckets]
|
||||
if not isinstance(buckets, list):
|
||||
buckets = ["sync", "push"]
|
||||
store = PeerStore(DEFAULT_PEER_STORE_PATH)
|
||||
try:
|
||||
store.load()
|
||||
except Exception:
|
||||
store = PeerStore(DEFAULT_PEER_STORE_PATH)
|
||||
added: list = []
|
||||
try:
|
||||
for b in buckets:
|
||||
b = str(b).strip().lower()
|
||||
if b == "sync":
|
||||
store.upsert(make_sync_peer_record(peer_url=peer_url, transport=transport, role=role, label=label))
|
||||
added.append("sync")
|
||||
elif b == "push":
|
||||
store.upsert(make_push_peer_record(peer_url=peer_url, transport=transport, role=role, label=label))
|
||||
added.append("push")
|
||||
store.save()
|
||||
except PeerStoreError as exc:
|
||||
return {"ok": False, "detail": str(exc)}
|
||||
return {"ok": True, "peer_url": peer_url, "buckets": added}
|
||||
|
||||
|
||||
@router.delete("/api/mesh/peers", dependencies=[Depends(require_local_operator)])
|
||||
@limiter.limit("10/minute")
|
||||
async def remove_peer(request: Request):
|
||||
"""Remove a peer. Body: {peer_url, bucket?}. If bucket omitted, removes from all buckets."""
|
||||
from services.mesh.mesh_crypto import normalize_peer_url
|
||||
from services.mesh.mesh_peer_store import DEFAULT_PEER_STORE_PATH, PeerStore
|
||||
body = await request.json()
|
||||
peer_url_raw = str(body.get("peer_url", "") or "").strip()
|
||||
if not peer_url_raw:
|
||||
return {"ok": False, "detail": "peer_url is required"}
|
||||
peer_url = normalize_peer_url(peer_url_raw)
|
||||
if not peer_url:
|
||||
return {"ok": False, "detail": "Invalid peer_url"}
|
||||
bucket_filter = str(body.get("bucket", "") or "").strip().lower()
|
||||
store = PeerStore(DEFAULT_PEER_STORE_PATH)
|
||||
try:
|
||||
store.load()
|
||||
except Exception:
|
||||
return {"ok": False, "detail": "Failed to load peer store"}
|
||||
removed: list = []
|
||||
for b in ["bootstrap", "sync", "push"]:
|
||||
if bucket_filter and b != bucket_filter:
|
||||
continue
|
||||
key = f"{b}:{peer_url}"
|
||||
if key in store._records:
|
||||
del store._records[key]
|
||||
removed.append(b)
|
||||
if not removed:
|
||||
return {"ok": False, "detail": "Peer not found in any bucket"}
|
||||
store.save()
|
||||
return {"ok": True, "peer_url": peer_url, "removed_from": removed}
|
||||
|
||||
|
||||
@router.patch("/api/mesh/peers", dependencies=[Depends(require_local_operator)])
|
||||
@limiter.limit("10/minute")
|
||||
async def toggle_peer(request: Request):
|
||||
"""Enable or disable a peer. Body: {peer_url, bucket, enabled: bool}."""
|
||||
from services.mesh.mesh_crypto import normalize_peer_url
|
||||
from services.mesh.mesh_peer_store import DEFAULT_PEER_STORE_PATH, PeerRecord, PeerStore
|
||||
body = await request.json()
|
||||
peer_url_raw = str(body.get("peer_url", "") or "").strip()
|
||||
bucket = str(body.get("bucket", "") or "").strip().lower()
|
||||
enabled = body.get("enabled")
|
||||
if not peer_url_raw:
|
||||
return {"ok": False, "detail": "peer_url is required"}
|
||||
if not bucket:
|
||||
return {"ok": False, "detail": "bucket is required"}
|
||||
if enabled is None:
|
||||
return {"ok": False, "detail": "enabled (true/false) is required"}
|
||||
peer_url = normalize_peer_url(peer_url_raw)
|
||||
if not peer_url:
|
||||
return {"ok": False, "detail": "Invalid peer_url"}
|
||||
store = PeerStore(DEFAULT_PEER_STORE_PATH)
|
||||
try:
|
||||
store.load()
|
||||
except Exception:
|
||||
return {"ok": False, "detail": "Failed to load peer store"}
|
||||
key = f"{bucket}:{peer_url}"
|
||||
record = store._records.get(key)
|
||||
if not record:
|
||||
return {"ok": False, "detail": f"Peer not found in {bucket} bucket"}
|
||||
updated = PeerRecord(**{**record.to_dict(), "enabled": bool(enabled), "updated_at": int(time.time())})
|
||||
store._records[key] = updated
|
||||
store.save()
|
||||
return {"ok": True, "peer_url": peer_url, "bucket": bucket, "enabled": bool(enabled)}
|
||||
@@ -0,0 +1,337 @@
|
||||
import math
|
||||
from typing import Any
|
||||
from fastapi import APIRouter, Request, Response, Query, Depends
|
||||
from fastapi.responses import JSONResponse
|
||||
from pydantic import BaseModel
|
||||
from limiter import limiter
|
||||
from auth import require_admin, require_local_operator, _scoped_view_authenticated
|
||||
from services.data_fetcher import get_latest_data
|
||||
from services.mesh.mesh_protocol import normalize_payload
|
||||
from services.mesh.mesh_signed_events import (
|
||||
MeshWriteExemption,
|
||||
SignedWriteKind,
|
||||
get_prepared_signed_write,
|
||||
mesh_write_exempt,
|
||||
requires_signed_write,
|
||||
)
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
def _signed_body(request: Request) -> dict[str, Any]:
|
||||
prepared = get_prepared_signed_write(request)
|
||||
if prepared is None:
|
||||
return {}
|
||||
return dict(prepared.body)
|
||||
|
||||
|
||||
def _safe_int(val, default=0):
|
||||
try:
|
||||
return int(val)
|
||||
except (TypeError, ValueError):
|
||||
return default
|
||||
|
||||
|
||||
def _safe_float(val, default=0.0):
|
||||
try:
|
||||
parsed = float(val)
|
||||
if not math.isfinite(parsed):
|
||||
return default
|
||||
return parsed
|
||||
except (TypeError, ValueError):
|
||||
return default
|
||||
|
||||
|
||||
def _redact_public_oracle_profile(payload: dict, authenticated: bool) -> dict:
|
||||
redacted = dict(payload)
|
||||
if authenticated:
|
||||
return redacted
|
||||
redacted["active_stakes"] = []
|
||||
redacted["prediction_history"] = []
|
||||
return redacted
|
||||
|
||||
|
||||
def _redact_public_oracle_predictions(predictions: list, authenticated: bool) -> dict:
|
||||
if authenticated:
|
||||
return {"predictions": list(predictions)}
|
||||
return {"predictions": [], "count": len(predictions)}
|
||||
|
||||
|
||||
def _redact_public_oracle_stakes(payload: dict, authenticated: bool) -> dict:
|
||||
redacted = dict(payload)
|
||||
if authenticated:
|
||||
return redacted
|
||||
redacted["truth_stakers"] = []
|
||||
redacted["false_stakers"] = []
|
||||
return redacted
|
||||
|
||||
|
||||
@router.post("/api/mesh/oracle/predict")
|
||||
@limiter.limit("10/minute")
|
||||
@requires_signed_write(kind=SignedWriteKind.ORACLE_PREDICT)
|
||||
async def oracle_predict(request: Request):
|
||||
"""Place a prediction on a market outcome."""
|
||||
from services.mesh.mesh_oracle import oracle_ledger
|
||||
body = _signed_body(request)
|
||||
node_id = body.get("node_id", "")
|
||||
market_title = body.get("market_title", "")
|
||||
side = body.get("side", "")
|
||||
stake_amount = _safe_float(body.get("stake_amount", 0))
|
||||
public_key = body.get("public_key", "")
|
||||
public_key_algo = body.get("public_key_algo", "")
|
||||
signature = body.get("signature", "")
|
||||
sequence = _safe_int(body.get("sequence", 0) or 0)
|
||||
protocol_version = body.get("protocol_version", "")
|
||||
if not node_id or not market_title or not side:
|
||||
return {"ok": False, "detail": "Missing node_id, market_title, or side"}
|
||||
prediction_payload = {"market_title": market_title, "side": side, "stake_amount": stake_amount}
|
||||
try:
|
||||
from services.mesh.mesh_reputation import reputation_ledger
|
||||
reputation_ledger.register_node(node_id, public_key, public_key_algo)
|
||||
except Exception:
|
||||
pass
|
||||
data = get_latest_data()
|
||||
markets = data.get("prediction_markets", [])
|
||||
matched = None
|
||||
for m in markets:
|
||||
if m.get("title", "").lower() == market_title.lower():
|
||||
matched = m
|
||||
break
|
||||
if not matched:
|
||||
for m in markets:
|
||||
if market_title.lower() in m.get("title", "").lower():
|
||||
matched = m
|
||||
break
|
||||
if not matched:
|
||||
return {"ok": False, "detail": f"Market '{market_title}' not found in active markets."}
|
||||
probability = 50.0
|
||||
side_lower = side.lower()
|
||||
outcomes = matched.get("outcomes", [])
|
||||
if outcomes:
|
||||
for o in outcomes:
|
||||
if o.get("name", "").lower() == side_lower:
|
||||
probability = float(o.get("pct", 50))
|
||||
break
|
||||
else:
|
||||
consensus = matched.get("consensus_pct")
|
||||
if consensus is None:
|
||||
consensus = matched.get("polymarket_pct") or matched.get("kalshi_pct") or 50
|
||||
probability = float(consensus)
|
||||
if side_lower == "no":
|
||||
probability = 100.0 - probability
|
||||
if stake_amount > 0:
|
||||
ok, detail = oracle_ledger.place_market_stake(node_id, matched["title"], side, stake_amount, probability)
|
||||
mode = "staked"
|
||||
else:
|
||||
ok, detail = oracle_ledger.place_prediction(node_id, matched["title"], side, probability)
|
||||
mode = "free"
|
||||
if ok:
|
||||
try:
|
||||
from services.mesh.mesh_hashchain import infonet
|
||||
normalized_payload = normalize_payload("prediction", prediction_payload)
|
||||
infonet.append(event_type="prediction", node_id=node_id, payload=normalized_payload,
|
||||
signature=signature, sequence=sequence, public_key=public_key,
|
||||
public_key_algo=public_key_algo, protocol_version=protocol_version)
|
||||
except Exception:
|
||||
pass
|
||||
return {"ok": ok, "detail": detail, "probability": probability, "mode": mode}
|
||||
|
||||
|
||||
@router.get("/api/mesh/oracle/markets")
|
||||
@limiter.limit("30/minute")
|
||||
async def oracle_markets(request: Request):
|
||||
"""List active prediction markets."""
|
||||
from collections import defaultdict
|
||||
from services.mesh.mesh_oracle import oracle_ledger
|
||||
data = get_latest_data()
|
||||
markets = data.get("prediction_markets", [])
|
||||
all_consensus = oracle_ledger.get_all_market_consensus()
|
||||
by_category = defaultdict(list)
|
||||
for m in markets:
|
||||
by_category[m.get("category", "NEWS")].append(m)
|
||||
_fields = ("title", "consensus_pct", "polymarket_pct", "kalshi_pct", "volume", "volume_24h",
|
||||
"end_date", "description", "category", "sources", "slug", "kalshi_ticker", "outcomes")
|
||||
categories = {}
|
||||
cat_totals = {}
|
||||
for cat in ["POLITICS", "CONFLICT", "NEWS", "FINANCE", "CRYPTO"]:
|
||||
all_cat = sorted(by_category.get(cat, []), key=lambda x: x.get("volume", 0) or 0, reverse=True)
|
||||
cat_totals[cat] = len(all_cat)
|
||||
cat_list = []
|
||||
for m in all_cat[:10]:
|
||||
entry = {k: m.get(k) for k in _fields}
|
||||
entry["consensus"] = all_consensus.get(m.get("title", ""), {})
|
||||
cat_list.append(entry)
|
||||
categories[cat] = cat_list
|
||||
return {"categories": categories, "total_count": len(markets), "cat_totals": cat_totals}
|
||||
|
||||
|
||||
@router.get("/api/mesh/oracle/search")
|
||||
@limiter.limit("20/minute")
|
||||
async def oracle_search(request: Request, q: str = "", limit: int = 50):
|
||||
"""Search prediction markets across Polymarket + Kalshi APIs."""
|
||||
if not q or len(q) < 2:
|
||||
return {"results": [], "query": q, "count": 0}
|
||||
from services.fetchers.prediction_markets import search_polymarket_direct, search_kalshi_direct
|
||||
import concurrent.futures
|
||||
# Search both APIs in parallel for speed
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as pool:
|
||||
poly_fut = pool.submit(search_polymarket_direct, q, limit)
|
||||
kalshi_fut = pool.submit(search_kalshi_direct, q, limit)
|
||||
poly_results = poly_fut.result(timeout=20)
|
||||
kalshi_results = kalshi_fut.result(timeout=20)
|
||||
# Also check cached/merged markets
|
||||
data = get_latest_data()
|
||||
markets = data.get("prediction_markets", [])
|
||||
q_lower = q.lower()
|
||||
cached_matches = [m for m in markets if q_lower in m.get("title", "").lower()]
|
||||
seen_titles = set()
|
||||
combined = []
|
||||
# Cached first (already merged Poly+Kalshi with consensus)
|
||||
for m in cached_matches:
|
||||
seen_titles.add(m["title"].lower())
|
||||
combined.append(m)
|
||||
# Then Polymarket direct hits
|
||||
for m in poly_results:
|
||||
if m["title"].lower() not in seen_titles:
|
||||
seen_titles.add(m["title"].lower())
|
||||
combined.append(m)
|
||||
# Then Kalshi direct hits
|
||||
for m in kalshi_results:
|
||||
if m["title"].lower() not in seen_titles:
|
||||
seen_titles.add(m["title"].lower())
|
||||
combined.append(m)
|
||||
combined.sort(key=lambda x: x.get("volume", 0) or 0, reverse=True)
|
||||
_fields = ("title", "consensus_pct", "polymarket_pct", "kalshi_pct", "volume", "volume_24h",
|
||||
"end_date", "description", "category", "sources", "slug", "kalshi_ticker", "outcomes")
|
||||
results = [{k: m.get(k) for k in _fields} for m in combined[:limit]]
|
||||
return {"results": results, "query": q, "count": len(results)}
|
||||
|
||||
|
||||
@router.get("/api/mesh/oracle/markets/more")
|
||||
@limiter.limit("30/minute")
|
||||
async def oracle_markets_more(request: Request, category: str = "NEWS", offset: int = 0, limit: int = 10):
|
||||
"""Load more markets for a specific category (paginated)."""
|
||||
data = get_latest_data()
|
||||
markets = data.get("prediction_markets", [])
|
||||
cat_markets = sorted([m for m in markets if m.get("category") == category],
|
||||
key=lambda x: x.get("volume", 0) or 0, reverse=True)
|
||||
page = cat_markets[offset : offset + limit]
|
||||
_fields = ("title", "consensus_pct", "polymarket_pct", "kalshi_pct", "volume", "volume_24h",
|
||||
"end_date", "description", "category", "sources", "slug", "kalshi_ticker", "outcomes")
|
||||
results = [{k: m.get(k) for k in _fields} for m in page]
|
||||
return {"markets": results, "category": category, "offset": offset,
|
||||
"has_more": offset + limit < len(cat_markets), "total": len(cat_markets)}
|
||||
|
||||
|
||||
@router.post("/api/mesh/oracle/resolve")
|
||||
@limiter.limit("5/minute")
|
||||
@mesh_write_exempt(MeshWriteExemption.ADMIN_CONTROL)
|
||||
async def oracle_resolve(request: Request):
|
||||
"""Resolve a prediction market."""
|
||||
from services.mesh.mesh_oracle import oracle_ledger
|
||||
body = await request.json()
|
||||
market_title = body.get("market_title", "")
|
||||
outcome = body.get("outcome", "")
|
||||
if not market_title or not outcome:
|
||||
return {"ok": False, "detail": "Need market_title and outcome"}
|
||||
winners, losers = oracle_ledger.resolve_market(market_title, outcome)
|
||||
stake_result = oracle_ledger.resolve_market_stakes(market_title, outcome)
|
||||
return {"ok": True,
|
||||
"detail": f"Resolved: {winners} free winners, {losers} free losers, "
|
||||
f"{stake_result.get('winners', 0)} stake winners, {stake_result.get('losers', 0)} stake losers",
|
||||
"free": {"winners": winners, "losers": losers}, "stakes": stake_result}
|
||||
|
||||
|
||||
@router.get("/api/mesh/oracle/consensus")
|
||||
@limiter.limit("30/minute")
|
||||
async def oracle_consensus(request: Request, market_title: str = ""):
|
||||
"""Get network consensus for a market."""
|
||||
from services.mesh.mesh_oracle import oracle_ledger
|
||||
if not market_title:
|
||||
return {"error": "market_title required"}
|
||||
return oracle_ledger.get_market_consensus(market_title)
|
||||
|
||||
|
||||
@router.post("/api/mesh/oracle/stake")
|
||||
@limiter.limit("10/minute")
|
||||
@requires_signed_write(kind=SignedWriteKind.ORACLE_STAKE)
|
||||
async def oracle_stake(request: Request):
|
||||
"""Stake oracle rep on a post's truthfulness."""
|
||||
from services.mesh.mesh_oracle import oracle_ledger
|
||||
body = _signed_body(request)
|
||||
staker_id = body.get("staker_id", "")
|
||||
message_id = body.get("message_id", "")
|
||||
poster_id = body.get("poster_id", "")
|
||||
side = body.get("side", "").lower()
|
||||
amount = _safe_float(body.get("amount", 0))
|
||||
duration_days = _safe_int(body.get("duration_days", 1), 1)
|
||||
public_key = body.get("public_key", "")
|
||||
public_key_algo = body.get("public_key_algo", "")
|
||||
signature = body.get("signature", "")
|
||||
sequence = _safe_int(body.get("sequence", 0) or 0)
|
||||
protocol_version = body.get("protocol_version", "")
|
||||
if not staker_id or not message_id or not side:
|
||||
return {"ok": False, "detail": "Missing staker_id, message_id, or side"}
|
||||
stake_payload = {"message_id": message_id, "poster_id": poster_id, "side": side,
|
||||
"amount": amount, "duration_days": duration_days}
|
||||
try:
|
||||
from services.mesh.mesh_reputation import reputation_ledger
|
||||
reputation_ledger.register_node(staker_id, public_key, public_key_algo)
|
||||
except Exception:
|
||||
pass
|
||||
ok, detail = oracle_ledger.place_stake(staker_id, message_id, poster_id, side, amount, duration_days)
|
||||
if ok:
|
||||
try:
|
||||
from services.mesh.mesh_hashchain import infonet
|
||||
normalized_payload = normalize_payload("stake", stake_payload)
|
||||
infonet.append(event_type="stake", node_id=staker_id, payload=normalized_payload,
|
||||
signature=signature, sequence=sequence, public_key=public_key,
|
||||
public_key_algo=public_key_algo, protocol_version=protocol_version)
|
||||
except Exception:
|
||||
pass
|
||||
return {"ok": ok, "detail": detail}
|
||||
|
||||
|
||||
@router.get("/api/mesh/oracle/stakes/{message_id}")
|
||||
@limiter.limit("30/minute")
|
||||
async def oracle_stakes_for_message(request: Request, message_id: str):
|
||||
"""Get all oracle stakes on a message."""
|
||||
from services.mesh.mesh_oracle import oracle_ledger
|
||||
return _redact_public_oracle_stakes(
|
||||
oracle_ledger.get_stakes_for_message(message_id),
|
||||
authenticated=_scoped_view_authenticated(request, "mesh.audit"),
|
||||
)
|
||||
|
||||
|
||||
@router.get("/api/mesh/oracle/profile")
|
||||
@limiter.limit("30/minute")
|
||||
async def oracle_profile(request: Request, node_id: str = ""):
|
||||
"""Get full oracle profile."""
|
||||
from services.mesh.mesh_oracle import oracle_ledger
|
||||
if not node_id:
|
||||
return {"ok": False, "detail": "Provide ?node_id=xxx"}
|
||||
profile = oracle_ledger.get_oracle_profile(node_id)
|
||||
return _redact_public_oracle_profile(
|
||||
profile, authenticated=_scoped_view_authenticated(request, "mesh.audit"))
|
||||
|
||||
|
||||
@router.get("/api/mesh/oracle/predictions")
|
||||
@limiter.limit("30/minute")
|
||||
async def oracle_predictions(request: Request, node_id: str = ""):
|
||||
"""Get a node's active (unresolved) predictions."""
|
||||
from services.mesh.mesh_oracle import oracle_ledger
|
||||
if not node_id:
|
||||
return {"ok": False, "detail": "Provide ?node_id=xxx"}
|
||||
active_predictions = oracle_ledger.get_active_predictions(node_id)
|
||||
return _redact_public_oracle_predictions(
|
||||
active_predictions, authenticated=_scoped_view_authenticated(request, "mesh.audit"))
|
||||
|
||||
|
||||
@router.post("/api/mesh/oracle/resolve-stakes")
|
||||
@limiter.limit("5/minute")
|
||||
@mesh_write_exempt(MeshWriteExemption.ADMIN_CONTROL)
|
||||
async def oracle_resolve_stakes(request: Request):
|
||||
"""Resolve all expired stake contests."""
|
||||
from services.mesh.mesh_oracle import oracle_ledger
|
||||
resolutions = oracle_ledger.resolve_expired_stakes()
|
||||
return {"ok": True, "resolutions": resolutions, "count": len(resolutions)}
|
||||
@@ -0,0 +1,235 @@
|
||||
import json as json_mod
|
||||
import logging
|
||||
from typing import Any
|
||||
from fastapi import APIRouter, Request, Response
|
||||
from fastapi.responses import JSONResponse
|
||||
from pydantic import BaseModel
|
||||
from limiter import limiter
|
||||
from auth import require_admin, require_local_operator, _verify_peer_push_hmac
|
||||
from services.config import get_settings
|
||||
from services.mesh.mesh_crypto import normalize_peer_url
|
||||
from services.mesh.mesh_router import peer_transport_kind
|
||||
from auth import _peer_hmac_url_from_request
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
_PEER_PUSH_BATCH_SIZE = 50
|
||||
|
||||
|
||||
def _safe_int(val, default=0):
|
||||
try:
|
||||
return int(val)
|
||||
except (TypeError, ValueError):
|
||||
return default
|
||||
|
||||
|
||||
def _hydrate_gate_store_from_chain(events: list) -> int:
|
||||
"""Copy any gate_message chain events into the local gate_store for read/decrypt.
|
||||
|
||||
Only events that are resident in the local infonet (accepted or already
|
||||
present) are hydrated. The canonical infonet-resident event is used —
|
||||
never the raw batch event — so a forged batch entry carrying a valid
|
||||
event_id but attacker-chosen payload cannot pollute gate_store.
|
||||
"""
|
||||
import copy
|
||||
from services.mesh.mesh_hashchain import gate_store, infonet
|
||||
count = 0
|
||||
for evt in events:
|
||||
if evt.get("event_type") != "gate_message":
|
||||
continue
|
||||
event_id = str(evt.get("event_id", "") or "").strip()
|
||||
if not event_id or event_id not in infonet.event_index:
|
||||
continue
|
||||
canonical = infonet.events[infonet.event_index[event_id]]
|
||||
payload = canonical.get("payload") or {}
|
||||
gate_id = str(payload.get("gate", "") or "").strip()
|
||||
if not gate_id:
|
||||
continue
|
||||
try:
|
||||
gate_store.append(gate_id, copy.deepcopy(canonical))
|
||||
count += 1
|
||||
except Exception:
|
||||
pass
|
||||
return count
|
||||
|
||||
|
||||
@router.post("/api/mesh/infonet/peer-push")
|
||||
@limiter.limit("30/minute")
|
||||
async def infonet_peer_push(request: Request):
|
||||
"""Accept pushed Infonet events from relay peers (HMAC-authenticated)."""
|
||||
content_length = request.headers.get("content-length")
|
||||
if content_length:
|
||||
try:
|
||||
if int(content_length) > 524_288:
|
||||
return Response(content='{"ok":false,"detail":"Request body too large (max 512KB)"}',
|
||||
status_code=413, media_type="application/json")
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
from services.mesh.mesh_hashchain import infonet
|
||||
body_bytes = await request.body()
|
||||
if not _verify_peer_push_hmac(request, body_bytes):
|
||||
return Response(content='{"ok":false,"detail":"Invalid or missing peer HMAC"}',
|
||||
status_code=403, media_type="application/json")
|
||||
body = json_mod.loads(body_bytes or b"{}")
|
||||
events = body.get("events", [])
|
||||
if not isinstance(events, list):
|
||||
return {"ok": False, "detail": "events must be a list"}
|
||||
if len(events) > 50:
|
||||
return {"ok": False, "detail": "Too many events in one push (max 50)"}
|
||||
if not events:
|
||||
return {"ok": True, "accepted": 0, "duplicates": 0, "rejected": []}
|
||||
result = infonet.ingest_events(events)
|
||||
_hydrate_gate_store_from_chain(events)
|
||||
return {"ok": True, **result}
|
||||
|
||||
|
||||
@router.post("/api/mesh/gate/peer-push")
|
||||
@limiter.limit("30/minute")
|
||||
async def gate_peer_push(request: Request):
|
||||
"""Accept pushed gate events from relay peers (private plane)."""
|
||||
content_length = request.headers.get("content-length")
|
||||
if content_length:
|
||||
try:
|
||||
if int(content_length) > 524_288:
|
||||
return Response(content='{"ok":false,"detail":"Request body too large"}',
|
||||
status_code=413, media_type="application/json")
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
from services.mesh.mesh_hashchain import gate_store
|
||||
body_bytes = await request.body()
|
||||
if not _verify_peer_push_hmac(request, body_bytes):
|
||||
return Response(content='{"ok":false,"detail":"Invalid or missing peer HMAC"}',
|
||||
status_code=403, media_type="application/json")
|
||||
body = json_mod.loads(body_bytes or b"{}")
|
||||
events = body.get("events", [])
|
||||
if not isinstance(events, list):
|
||||
return {"ok": False, "detail": "events must be a list"}
|
||||
if len(events) > 50:
|
||||
return {"ok": False, "detail": "Too many events (max 50)"}
|
||||
if not events:
|
||||
return {"ok": True, "accepted": 0, "duplicates": 0}
|
||||
from services.mesh.mesh_hashchain import resolve_gate_wire_ref
|
||||
# Sprint 3 / Rec #4: the gate_ref is HMACed with a key bound to the
|
||||
# receiver's peer URL (the URL the push was delivered to). This is
|
||||
# the same URL _verify_peer_push_hmac validated the X-Peer-HMAC
|
||||
# header against, so we can trust it for ref resolution.
|
||||
hop_peer_url = _peer_hmac_url_from_request(request)
|
||||
grouped_events: dict[str, list] = {}
|
||||
for evt in events:
|
||||
evt_dict = evt if isinstance(evt, dict) else {}
|
||||
payload = evt_dict.get("payload")
|
||||
if not isinstance(payload, dict):
|
||||
payload = {}
|
||||
clean_event = {
|
||||
"event_id": str(evt_dict.get("event_id", "") or ""),
|
||||
"event_type": "gate_message",
|
||||
"timestamp": evt_dict.get("timestamp", 0),
|
||||
"node_id": str(evt_dict.get("node_id", "") or evt_dict.get("sender_id", "") or ""),
|
||||
"sequence": evt_dict.get("sequence", 0),
|
||||
"signature": str(evt_dict.get("signature", "") or ""),
|
||||
"public_key": str(evt_dict.get("public_key", "") or ""),
|
||||
"public_key_algo": str(evt_dict.get("public_key_algo", "") or ""),
|
||||
"protocol_version": str(evt_dict.get("protocol_version", "") or ""),
|
||||
"payload": {
|
||||
"ciphertext": str(payload.get("ciphertext", "") or ""),
|
||||
"format": str(payload.get("format", "") or ""),
|
||||
"nonce": str(payload.get("nonce", "") or ""),
|
||||
"sender_ref": str(payload.get("sender_ref", "") or ""),
|
||||
},
|
||||
}
|
||||
epoch = _safe_int(payload.get("epoch", 0) or 0)
|
||||
if epoch > 0:
|
||||
clean_event["payload"]["epoch"] = epoch
|
||||
envelope_hash_val = str(payload.get("envelope_hash", "") or "").strip()
|
||||
gate_envelope_val = str(payload.get("gate_envelope", "") or "").strip()
|
||||
reply_to_val = str(payload.get("reply_to", "") or "").strip()
|
||||
if envelope_hash_val:
|
||||
clean_event["payload"]["envelope_hash"] = envelope_hash_val
|
||||
if gate_envelope_val:
|
||||
clean_event["payload"]["gate_envelope"] = gate_envelope_val
|
||||
if reply_to_val:
|
||||
clean_event["payload"]["reply_to"] = reply_to_val
|
||||
event_gate_id = str(payload.get("gate", "") or evt_dict.get("gate", "") or "").strip().lower()
|
||||
if not event_gate_id:
|
||||
event_gate_id = resolve_gate_wire_ref(
|
||||
str(payload.get("gate_ref", "") or evt_dict.get("gate_ref", "") or ""),
|
||||
clean_event,
|
||||
peer_url=hop_peer_url,
|
||||
)
|
||||
if not event_gate_id:
|
||||
return {"ok": False, "detail": "gate resolution failed"}
|
||||
final_payload: dict[str, Any] = {
|
||||
"gate": event_gate_id,
|
||||
"ciphertext": clean_event["payload"]["ciphertext"],
|
||||
"format": clean_event["payload"]["format"],
|
||||
"nonce": clean_event["payload"]["nonce"],
|
||||
"sender_ref": clean_event["payload"]["sender_ref"],
|
||||
}
|
||||
if epoch > 0:
|
||||
final_payload["epoch"] = epoch
|
||||
if clean_event["payload"].get("envelope_hash"):
|
||||
final_payload["envelope_hash"] = clean_event["payload"]["envelope_hash"]
|
||||
if clean_event["payload"].get("gate_envelope"):
|
||||
final_payload["gate_envelope"] = clean_event["payload"]["gate_envelope"]
|
||||
if clean_event["payload"].get("reply_to"):
|
||||
final_payload["reply_to"] = clean_event["payload"]["reply_to"]
|
||||
grouped_events.setdefault(event_gate_id, []).append({
|
||||
"event_id": clean_event["event_id"],
|
||||
"event_type": "gate_message",
|
||||
"timestamp": clean_event["timestamp"],
|
||||
"node_id": clean_event["node_id"],
|
||||
"sequence": clean_event["sequence"],
|
||||
"signature": clean_event["signature"],
|
||||
"public_key": clean_event["public_key"],
|
||||
"public_key_algo": clean_event["public_key_algo"],
|
||||
"protocol_version": clean_event["protocol_version"],
|
||||
"payload": final_payload,
|
||||
})
|
||||
accepted = 0
|
||||
duplicates = 0
|
||||
rejected = 0
|
||||
for event_gate_id, items in grouped_events.items():
|
||||
result = gate_store.ingest_peer_events(event_gate_id, items)
|
||||
a = int(result.get("accepted", 0) or 0)
|
||||
accepted += a
|
||||
duplicates += int(result.get("duplicates", 0) or 0)
|
||||
rejected += int(result.get("rejected", 0) or 0)
|
||||
return {"ok": True, "accepted": accepted, "duplicates": duplicates, "rejected": rejected}
|
||||
|
||||
|
||||
@router.post("/api/mesh/gate/peer-pull")
|
||||
@limiter.limit("30/minute")
|
||||
async def gate_peer_pull(request: Request):
|
||||
"""Return gate events a peer is missing (HMAC-authenticated pull sync)."""
|
||||
content_length = request.headers.get("content-length")
|
||||
if content_length:
|
||||
try:
|
||||
if int(content_length) > 65_536:
|
||||
return Response(content='{"ok":false,"detail":"Request body too large"}',
|
||||
status_code=413, media_type="application/json")
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
from services.mesh.mesh_hashchain import gate_store
|
||||
body_bytes = await request.body()
|
||||
if not _verify_peer_push_hmac(request, body_bytes):
|
||||
return Response(content='{"ok":false,"detail":"Invalid or missing peer HMAC"}',
|
||||
status_code=403, media_type="application/json")
|
||||
body = json_mod.loads(body_bytes or b"{}")
|
||||
gate_id = str(body.get("gate_id", "") or "").strip().lower()
|
||||
after_count = _safe_int(body.get("after_count", 0) or 0)
|
||||
if not gate_id:
|
||||
gate_ids = gate_store.known_gate_ids()
|
||||
gate_counts: dict[str, int] = {}
|
||||
for gid in gate_ids:
|
||||
with gate_store._lock:
|
||||
gate_counts[gid] = len(gate_store._gates.get(gid, []))
|
||||
return {"ok": True, "gates": gate_counts}
|
||||
with gate_store._lock:
|
||||
all_events = list(gate_store._gates.get(gate_id, []))
|
||||
total = len(all_events)
|
||||
if after_count >= total:
|
||||
return {"ok": True, "events": [], "total": total, "gate_id": gate_id}
|
||||
batch = all_events[after_count : after_count + _PEER_PUSH_BATCH_SIZE]
|
||||
return {"ok": True, "events": batch, "total": total, "gate_id": gate_id}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,91 @@
|
||||
from fastapi import APIRouter, Request, Query, Depends
|
||||
from fastapi.responses import JSONResponse
|
||||
from pydantic import BaseModel
|
||||
from limiter import limiter
|
||||
from auth import require_admin, require_local_operator
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get("/api/radio/top")
|
||||
@limiter.limit("30/minute")
|
||||
async def get_top_radios(request: Request):
|
||||
from services.radio_intercept import get_top_broadcastify_feeds
|
||||
return get_top_broadcastify_feeds()
|
||||
|
||||
|
||||
@router.get("/api/radio/openmhz/systems")
|
||||
@limiter.limit("30/minute")
|
||||
async def api_get_openmhz_systems(request: Request):
|
||||
from services.radio_intercept import get_openmhz_systems
|
||||
return get_openmhz_systems()
|
||||
|
||||
|
||||
@router.get("/api/radio/openmhz/calls/{sys_name}")
|
||||
@limiter.limit("60/minute")
|
||||
async def api_get_openmhz_calls(request: Request, sys_name: str):
|
||||
from services.radio_intercept import get_recent_openmhz_calls
|
||||
return get_recent_openmhz_calls(sys_name)
|
||||
|
||||
|
||||
@router.get("/api/radio/openmhz/audio")
|
||||
@limiter.limit("120/minute")
|
||||
async def api_get_openmhz_audio(request: Request, url: str = Query(..., min_length=10)):
|
||||
from services.radio_intercept import openmhz_audio_response
|
||||
return openmhz_audio_response(url)
|
||||
|
||||
|
||||
@router.get("/api/radio/nearest")
|
||||
@limiter.limit("60/minute")
|
||||
async def api_get_nearest_radio(
|
||||
request: Request,
|
||||
lat: float = Query(..., ge=-90, le=90),
|
||||
lng: float = Query(..., ge=-180, le=180),
|
||||
):
|
||||
from services.radio_intercept import find_nearest_openmhz_system
|
||||
return find_nearest_openmhz_system(lat, lng)
|
||||
|
||||
|
||||
@router.get("/api/radio/nearest-list")
|
||||
@limiter.limit("60/minute")
|
||||
async def api_get_nearest_radios_list(
|
||||
request: Request,
|
||||
lat: float = Query(..., ge=-90, le=90),
|
||||
lng: float = Query(..., ge=-180, le=180),
|
||||
limit: int = Query(5, ge=1, le=20),
|
||||
):
|
||||
from services.radio_intercept import find_nearest_openmhz_systems_list
|
||||
return find_nearest_openmhz_systems_list(lat, lng, limit=limit)
|
||||
|
||||
|
||||
@router.get("/api/route/{callsign}")
|
||||
@limiter.limit("60/minute")
|
||||
async def get_flight_route(request: Request, callsign: str, lat: float = 0.0, lng: float = 0.0):
|
||||
from services.network_utils import fetch_with_curl
|
||||
r = fetch_with_curl(
|
||||
"https://api.adsb.lol/api/0/routeset",
|
||||
method="POST",
|
||||
json_data={"planes": [{"callsign": callsign, "lat": lat, "lng": lng}]},
|
||||
timeout=10,
|
||||
)
|
||||
if r and r.status_code == 200:
|
||||
data = r.json()
|
||||
route_list = []
|
||||
if isinstance(data, dict):
|
||||
route_list = data.get("value", [])
|
||||
elif isinstance(data, list):
|
||||
route_list = data
|
||||
|
||||
if route_list and len(route_list) > 0:
|
||||
route = route_list[0]
|
||||
airports = route.get("_airports", [])
|
||||
if len(airports) >= 2:
|
||||
orig = airports[0]
|
||||
dest = airports[-1]
|
||||
return {
|
||||
"orig_loc": [orig.get("lon", 0), orig.get("lat", 0)],
|
||||
"dest_loc": [dest.get("lon", 0), dest.get("lat", 0)],
|
||||
"origin_name": f"{orig.get('iata', '') or orig.get('icao', '')}: {orig.get('name', 'Unknown')}",
|
||||
"dest_name": f"{dest.get('iata', '') or dest.get('icao', '')}: {dest.get('name', 'Unknown')}",
|
||||
}
|
||||
return {}
|
||||
@@ -0,0 +1,260 @@
|
||||
"""SAR (Synthetic Aperture Radar) layer endpoints.
|
||||
|
||||
Exposes:
|
||||
- GET /api/sar/status — feature gates + signup links for the UI
|
||||
- GET /api/sar/anomalies — Mode B pre-processed anomalies
|
||||
- GET /api/sar/scenes — Mode A scene catalog
|
||||
- GET /api/sar/coverage — per-AOI coverage and next-pass hints
|
||||
- GET /api/sar/aois — operator-defined AOIs
|
||||
- POST /api/sar/aois — create or replace an AOI
|
||||
- DELETE /api/sar/aois/{aoi_id} — remove an AOI
|
||||
- GET /api/sar/near — anomalies within radius_km of (lat, lon)
|
||||
|
||||
The /status endpoint is the load-bearing UX: when Mode B is disabled it
|
||||
returns the structured help payload from sar_config.products_fetch_status()
|
||||
so the frontend can render in-app links to the free signup pages instead of
|
||||
making the user hunt around.
|
||||
"""
|
||||
|
||||
from fastapi import APIRouter, Depends, HTTPException, Query, Request
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from auth import require_local_operator
|
||||
from limiter import limiter
|
||||
from services.fetchers._store import get_latest_data_subset_refs
|
||||
from services.sar.sar_aoi import (
|
||||
SarAoi,
|
||||
add_aoi,
|
||||
haversine_km,
|
||||
load_aois,
|
||||
remove_aoi,
|
||||
)
|
||||
from services.sar.sar_config import (
|
||||
catalog_enabled,
|
||||
clear_runtime_credentials,
|
||||
openclaw_enabled,
|
||||
products_fetch_enabled,
|
||||
products_fetch_status,
|
||||
require_private_tier_for_publish,
|
||||
set_runtime_credentials,
|
||||
)
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Status — the in-app onboarding hook
|
||||
# ---------------------------------------------------------------------------
|
||||
@router.get("/api/sar/status")
|
||||
@limiter.limit("60/minute")
|
||||
async def sar_status(request: Request) -> dict:
|
||||
"""Layer status + signup links.
|
||||
|
||||
The frontend calls this whenever the SAR panel is opened. When Mode B
|
||||
is off, the response includes a step-by-step ``help`` block with the
|
||||
free signup URLs so the user can enable everything without leaving the
|
||||
app.
|
||||
"""
|
||||
products_status = products_fetch_status()
|
||||
return {
|
||||
"ok": True,
|
||||
"catalog": {
|
||||
"mode": "A",
|
||||
"enabled": catalog_enabled(),
|
||||
"needs_account": False,
|
||||
"description": "Free Sentinel-1 scene catalog from ASF Search.",
|
||||
},
|
||||
"products": {
|
||||
"mode": "B",
|
||||
**products_status,
|
||||
},
|
||||
"openclaw_enabled": openclaw_enabled(),
|
||||
"require_private_tier": require_private_tier_for_publish(),
|
||||
}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Data feeds
|
||||
# ---------------------------------------------------------------------------
|
||||
@router.get("/api/sar/anomalies")
|
||||
@limiter.limit("60/minute")
|
||||
async def sar_anomalies(
|
||||
request: Request,
|
||||
kind: str = Query("", description="Optional anomaly kind filter"),
|
||||
aoi_id: str = Query("", description="Optional AOI id filter"),
|
||||
limit: int = Query(200, ge=1, le=1000),
|
||||
) -> dict:
|
||||
"""Return the latest cached SAR anomalies (Mode B)."""
|
||||
snap = get_latest_data_subset_refs("sar_anomalies")
|
||||
items = list(snap.get("sar_anomalies") or [])
|
||||
if kind:
|
||||
items = [a for a in items if a.get("kind") == kind]
|
||||
if aoi_id:
|
||||
aoi_id = aoi_id.strip().lower()
|
||||
items = [a for a in items if (a.get("stack_id") or "").lower() == aoi_id]
|
||||
items = items[:limit]
|
||||
return {
|
||||
"ok": True,
|
||||
"count": len(items),
|
||||
"anomalies": items,
|
||||
"products_enabled": products_fetch_enabled(),
|
||||
}
|
||||
|
||||
|
||||
@router.get("/api/sar/scenes")
|
||||
@limiter.limit("60/minute")
|
||||
async def sar_scenes(
|
||||
request: Request,
|
||||
aoi_id: str = Query(""),
|
||||
limit: int = Query(200, ge=1, le=1000),
|
||||
) -> dict:
|
||||
"""Return the latest cached scene catalog (Mode A)."""
|
||||
snap = get_latest_data_subset_refs("sar_scenes")
|
||||
items = list(snap.get("sar_scenes") or [])
|
||||
if aoi_id:
|
||||
aoi_id = aoi_id.strip().lower()
|
||||
items = [s for s in items if (s.get("aoi_id") or "").lower() == aoi_id]
|
||||
items = items[:limit]
|
||||
return {
|
||||
"ok": True,
|
||||
"count": len(items),
|
||||
"scenes": items,
|
||||
"catalog_enabled": catalog_enabled(),
|
||||
}
|
||||
|
||||
|
||||
@router.get("/api/sar/coverage")
|
||||
@limiter.limit("60/minute")
|
||||
async def sar_coverage(request: Request) -> dict:
|
||||
"""Per-AOI coverage and rough next-pass estimate."""
|
||||
snap = get_latest_data_subset_refs("sar_aoi_coverage")
|
||||
return {
|
||||
"ok": True,
|
||||
"coverage": list(snap.get("sar_aoi_coverage") or []),
|
||||
}
|
||||
|
||||
|
||||
@router.get("/api/sar/near")
|
||||
@limiter.limit("60/minute")
|
||||
async def sar_near(
|
||||
request: Request,
|
||||
lat: float = Query(..., ge=-90, le=90),
|
||||
lon: float = Query(..., ge=-180, le=180),
|
||||
radius_km: float = Query(50, ge=1, le=2000),
|
||||
kind: str = Query(""),
|
||||
limit: int = Query(50, ge=1, le=500),
|
||||
) -> dict:
|
||||
"""Return anomalies whose center sits within ``radius_km`` of (lat, lon)."""
|
||||
snap = get_latest_data_subset_refs("sar_anomalies")
|
||||
items = list(snap.get("sar_anomalies") or [])
|
||||
matches = []
|
||||
for a in items:
|
||||
try:
|
||||
a_lat = float(a.get("lat", 0.0))
|
||||
a_lon = float(a.get("lon", 0.0))
|
||||
except (TypeError, ValueError):
|
||||
continue
|
||||
d = haversine_km(lat, lon, a_lat, a_lon)
|
||||
if d > radius_km:
|
||||
continue
|
||||
if kind and a.get("kind") != kind:
|
||||
continue
|
||||
a = dict(a)
|
||||
a["distance_km"] = round(d, 2)
|
||||
matches.append(a)
|
||||
matches.sort(key=lambda x: x.get("distance_km", 0))
|
||||
return {
|
||||
"ok": True,
|
||||
"count": len(matches[:limit]),
|
||||
"anomalies": matches[:limit],
|
||||
}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# AOI CRUD
|
||||
# ---------------------------------------------------------------------------
|
||||
@router.get("/api/sar/aois")
|
||||
@limiter.limit("60/minute")
|
||||
async def sar_aoi_list(request: Request) -> dict:
|
||||
return {
|
||||
"ok": True,
|
||||
"aois": [a.to_dict() for a in load_aois(force=True)],
|
||||
}
|
||||
|
||||
|
||||
class AoiPayload(BaseModel):
|
||||
id: str = Field(..., min_length=1, max_length=64)
|
||||
name: str = Field(..., min_length=1, max_length=120)
|
||||
description: str = Field("", max_length=400)
|
||||
center_lat: float = Field(..., ge=-90, le=90)
|
||||
center_lon: float = Field(..., ge=-180, le=180)
|
||||
radius_km: float = Field(25.0, ge=1.0, le=500.0)
|
||||
category: str = Field("watchlist", max_length=40)
|
||||
polygon: list[list[float]] | None = None
|
||||
|
||||
|
||||
@router.post("/api/sar/aois", dependencies=[Depends(require_local_operator)])
|
||||
@limiter.limit("20/minute")
|
||||
async def sar_aoi_upsert(request: Request, payload: AoiPayload) -> dict:
|
||||
aoi = SarAoi(
|
||||
id=payload.id.strip().lower(),
|
||||
name=payload.name.strip(),
|
||||
description=payload.description.strip(),
|
||||
center_lat=payload.center_lat,
|
||||
center_lon=payload.center_lon,
|
||||
radius_km=payload.radius_km,
|
||||
polygon=payload.polygon,
|
||||
category=(payload.category or "watchlist").strip().lower(),
|
||||
)
|
||||
add_aoi(aoi)
|
||||
return {"ok": True, "aoi": aoi.to_dict()}
|
||||
|
||||
|
||||
@router.delete("/api/sar/aois/{aoi_id}", dependencies=[Depends(require_local_operator)])
|
||||
@limiter.limit("20/minute")
|
||||
async def sar_aoi_delete(request: Request, aoi_id: str) -> dict:
|
||||
removed = remove_aoi(aoi_id)
|
||||
if not removed:
|
||||
raise HTTPException(status_code=404, detail="AOI not found")
|
||||
return {"ok": True, "removed": aoi_id}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Mode B enable / disable — one-click setup from the frontend
|
||||
# ---------------------------------------------------------------------------
|
||||
class ModeBEnablePayload(BaseModel):
|
||||
earthdata_user: str = Field("", max_length=120)
|
||||
earthdata_token: str = Field(..., min_length=8, max_length=2048)
|
||||
copernicus_user: str = Field("", max_length=120)
|
||||
copernicus_token: str = Field("", max_length=2048)
|
||||
|
||||
|
||||
@router.post("/api/sar/mode-b/enable", dependencies=[Depends(require_local_operator)])
|
||||
@limiter.limit("10/minute")
|
||||
async def sar_mode_b_enable(request: Request, payload: ModeBEnablePayload) -> dict:
|
||||
"""Store Earthdata (and optional Copernicus) credentials and flip both
|
||||
two-step opt-in flags. Returns the fresh status payload so the UI can
|
||||
immediately reflect the change.
|
||||
"""
|
||||
set_runtime_credentials(
|
||||
earthdata_user=payload.earthdata_user,
|
||||
earthdata_token=payload.earthdata_token,
|
||||
copernicus_user=payload.copernicus_user,
|
||||
copernicus_token=payload.copernicus_token,
|
||||
mode_b_opt_in=True,
|
||||
)
|
||||
return {
|
||||
"ok": True,
|
||||
"products": products_fetch_status(),
|
||||
}
|
||||
|
||||
|
||||
@router.post("/api/sar/mode-b/disable", dependencies=[Depends(require_local_operator)])
|
||||
@limiter.limit("10/minute")
|
||||
async def sar_mode_b_disable(request: Request) -> dict:
|
||||
"""Wipe runtime credentials and revert to Mode A only."""
|
||||
clear_runtime_credentials()
|
||||
return {
|
||||
"ok": True,
|
||||
"products": products_fetch_status(),
|
||||
}
|
||||
@@ -0,0 +1,67 @@
|
||||
from fastapi import APIRouter, Request, Query, Depends
|
||||
from fastapi.responses import JSONResponse
|
||||
from pydantic import BaseModel
|
||||
from limiter import limiter
|
||||
from auth import require_admin, require_local_operator
|
||||
from services.data_fetcher import get_latest_data
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
@router.get("/api/oracle/region-intel")
|
||||
@limiter.limit("30/minute")
|
||||
async def oracle_region_intel(
|
||||
request: Request,
|
||||
lat: float = Query(..., ge=-90, le=90),
|
||||
lng: float = Query(..., ge=-180, le=180),
|
||||
):
|
||||
"""Get oracle intelligence summary for a geographic region."""
|
||||
from services.oracle_service import get_region_oracle_intel
|
||||
news_items = get_latest_data().get("news", [])
|
||||
return get_region_oracle_intel(lat, lng, news_items)
|
||||
|
||||
|
||||
@router.get("/api/thermal/verify")
|
||||
@limiter.limit("10/minute")
|
||||
async def thermal_verify(
|
||||
request: Request,
|
||||
lat: float = Query(..., ge=-90, le=90),
|
||||
lng: float = Query(..., ge=-180, le=180),
|
||||
radius_km: float = Query(10, ge=1, le=100),
|
||||
):
|
||||
"""On-demand thermal anomaly verification using Sentinel-2 SWIR bands."""
|
||||
from services.thermal_sentinel import search_thermal_anomaly
|
||||
result = search_thermal_anomaly(lat, lng, radius_km)
|
||||
return result
|
||||
|
||||
|
||||
@router.post("/api/sigint/transmit")
|
||||
@limiter.limit("5/minute")
|
||||
async def sigint_transmit(request: Request):
|
||||
"""Send an APRS-IS message to a specific callsign. Requires ham radio credentials."""
|
||||
from services.wormhole_supervisor import get_transport_tier
|
||||
tier = get_transport_tier()
|
||||
if str(tier or "").startswith("private_"):
|
||||
return {"ok": False, "detail": "APRS transmit blocked in private transport mode"}
|
||||
body = await request.json()
|
||||
callsign = body.get("callsign", "")
|
||||
passcode = body.get("passcode", "")
|
||||
target = body.get("target", "")
|
||||
message = body.get("message", "")
|
||||
if not all([callsign, passcode, target, message]):
|
||||
return {"ok": False, "detail": "Missing required fields: callsign, passcode, target, message"}
|
||||
from services.sigint_bridge import send_aprs_message
|
||||
return send_aprs_message(callsign, passcode, target, message)
|
||||
|
||||
|
||||
@router.get("/api/sigint/nearest-sdr")
|
||||
@limiter.limit("30/minute")
|
||||
async def nearest_sdr(
|
||||
request: Request,
|
||||
lat: float = Query(..., ge=-90, le=90),
|
||||
lng: float = Query(..., ge=-180, le=180),
|
||||
):
|
||||
"""Find the nearest KiwiSDR receivers to a given coordinate."""
|
||||
from services.sigint_bridge import find_nearest_kiwisdr
|
||||
kiwisdr_data = get_latest_data().get("kiwisdr", [])
|
||||
return find_nearest_kiwisdr(lat, lng, kiwisdr_data)
|
||||
@@ -0,0 +1,303 @@
|
||||
import asyncio
|
||||
import logging
|
||||
import math
|
||||
from typing import Any
|
||||
from fastapi import APIRouter, Request, Query, Depends, HTTPException, Response
|
||||
from fastapi.responses import JSONResponse
|
||||
from pydantic import BaseModel
|
||||
from limiter import limiter
|
||||
from auth import require_admin, require_local_operator
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter()
|
||||
|
||||
|
||||
def _safe_int(val, default=0):
|
||||
try:
|
||||
return int(val)
|
||||
except (TypeError, ValueError):
|
||||
return default
|
||||
|
||||
|
||||
def _safe_float(val, default=0.0):
|
||||
try:
|
||||
parsed = float(val)
|
||||
if not math.isfinite(parsed):
|
||||
return default
|
||||
return parsed
|
||||
except (TypeError, ValueError):
|
||||
return default
|
||||
|
||||
|
||||
class ShodanSearchRequest(BaseModel):
|
||||
query: str
|
||||
page: int = 1
|
||||
facets: list[str] = []
|
||||
|
||||
|
||||
class ShodanCountRequest(BaseModel):
|
||||
query: str
|
||||
facets: list[str] = []
|
||||
|
||||
|
||||
class ShodanHostRequest(BaseModel):
|
||||
ip: str
|
||||
history: bool = False
|
||||
|
||||
|
||||
@router.get("/api/region-dossier")
|
||||
@limiter.limit("30/minute")
|
||||
def api_region_dossier(
|
||||
request: Request,
|
||||
lat: float = Query(..., ge=-90, le=90),
|
||||
lng: float = Query(..., ge=-180, le=180),
|
||||
):
|
||||
"""Sync def so FastAPI runs it in a threadpool — prevents blocking the event loop."""
|
||||
from services.region_dossier import get_region_dossier
|
||||
return get_region_dossier(lat, lng)
|
||||
|
||||
|
||||
@router.get("/api/geocode/search")
|
||||
@limiter.limit("30/minute")
|
||||
async def api_geocode_search(
|
||||
request: Request,
|
||||
q: str = "",
|
||||
limit: int = 5,
|
||||
local_only: bool = False,
|
||||
):
|
||||
from services.geocode import search_geocode
|
||||
if not q or len(q.strip()) < 2:
|
||||
return {"results": [], "query": q, "count": 0}
|
||||
results = await asyncio.to_thread(search_geocode, q, limit, local_only)
|
||||
return {"results": results, "query": q, "count": len(results)}
|
||||
|
||||
|
||||
@router.get("/api/geocode/reverse")
|
||||
@limiter.limit("60/minute")
|
||||
async def api_geocode_reverse(
|
||||
request: Request,
|
||||
lat: float = Query(..., ge=-90, le=90),
|
||||
lng: float = Query(..., ge=-180, le=180),
|
||||
local_only: bool = False,
|
||||
):
|
||||
from services.geocode import reverse_geocode
|
||||
return await asyncio.to_thread(reverse_geocode, lat, lng, local_only)
|
||||
|
||||
|
||||
@router.get("/api/sentinel2/search")
|
||||
@limiter.limit("30/minute")
|
||||
def api_sentinel2_search(
|
||||
request: Request,
|
||||
lat: float = Query(..., ge=-90, le=90),
|
||||
lng: float = Query(..., ge=-180, le=180),
|
||||
):
|
||||
"""Search for latest Sentinel-2 imagery at a point. Sync for threadpool execution."""
|
||||
from services.sentinel_search import search_sentinel2_scene
|
||||
return search_sentinel2_scene(lat, lng)
|
||||
|
||||
|
||||
@router.post("/api/sentinel/token")
|
||||
@limiter.limit("60/minute")
|
||||
async def api_sentinel_token(request: Request):
|
||||
"""Proxy Copernicus CDSE OAuth2 token request (avoids browser CORS block)."""
|
||||
import requests as req
|
||||
body = await request.body()
|
||||
from urllib.parse import parse_qs
|
||||
params = parse_qs(body.decode("utf-8"))
|
||||
client_id = params.get("client_id", [""])[0]
|
||||
client_secret = params.get("client_secret", [""])[0]
|
||||
if not client_id or not client_secret:
|
||||
raise HTTPException(400, "client_id and client_secret required")
|
||||
token_url = "https://identity.dataspace.copernicus.eu/auth/realms/CDSE/protocol/openid-connect/token"
|
||||
try:
|
||||
resp = await asyncio.to_thread(req.post, token_url,
|
||||
data={"grant_type": "client_credentials", "client_id": client_id, "client_secret": client_secret},
|
||||
timeout=15)
|
||||
return Response(content=resp.content, status_code=resp.status_code, media_type="application/json")
|
||||
except Exception:
|
||||
logger.exception("Token request failed")
|
||||
raise HTTPException(502, "Token request failed")
|
||||
|
||||
|
||||
_sh_token_cache: dict = {"token": None, "expiry": 0, "client_id": ""}
|
||||
|
||||
|
||||
@router.post("/api/sentinel/tile")
|
||||
@limiter.limit("300/minute")
|
||||
async def api_sentinel_tile(request: Request):
|
||||
"""Proxy Sentinel Hub Process API tile request (avoids CORS block)."""
|
||||
import requests as req
|
||||
import time as _time
|
||||
try:
|
||||
body = await request.json()
|
||||
except Exception:
|
||||
return JSONResponse(status_code=422, content={"ok": False, "detail": "invalid JSON body"})
|
||||
|
||||
client_id = body.get("client_id", "")
|
||||
client_secret = body.get("client_secret", "")
|
||||
preset = body.get("preset", "TRUE-COLOR")
|
||||
date_str = body.get("date", "")
|
||||
z = body.get("z", 0)
|
||||
x = body.get("x", 0)
|
||||
y = body.get("y", 0)
|
||||
|
||||
if not client_id or not client_secret or not date_str:
|
||||
raise HTTPException(400, "client_id, client_secret, and date required")
|
||||
|
||||
now = _time.time()
|
||||
if (_sh_token_cache["token"] and _sh_token_cache["client_id"] == client_id
|
||||
and now < _sh_token_cache["expiry"] - 30):
|
||||
token = _sh_token_cache["token"]
|
||||
else:
|
||||
token_url = "https://identity.dataspace.copernicus.eu/auth/realms/CDSE/protocol/openid-connect/token"
|
||||
try:
|
||||
tresp = await asyncio.to_thread(req.post, token_url,
|
||||
data={"grant_type": "client_credentials", "client_id": client_id, "client_secret": client_secret},
|
||||
timeout=15)
|
||||
if tresp.status_code != 200:
|
||||
raise HTTPException(401, f"Token auth failed: {tresp.text[:200]}")
|
||||
tdata = tresp.json()
|
||||
token = tdata["access_token"]
|
||||
_sh_token_cache["token"] = token
|
||||
_sh_token_cache["expiry"] = now + tdata.get("expires_in", 300)
|
||||
_sh_token_cache["client_id"] = client_id
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception:
|
||||
logger.exception("Token request failed")
|
||||
raise HTTPException(502, "Token request failed")
|
||||
|
||||
half = 20037508.342789244
|
||||
tile_size = (2 * half) / math.pow(2, z)
|
||||
min_x = -half + x * tile_size
|
||||
max_x = min_x + tile_size
|
||||
max_y = half - y * tile_size
|
||||
min_y = max_y - tile_size
|
||||
bbox = [min_x, min_y, max_x, max_y]
|
||||
|
||||
evalscripts = {
|
||||
"TRUE-COLOR": '//VERSION=3\nfunction setup(){return{input:["B04","B03","B02"],output:{bands:3}};}\nfunction evaluatePixel(s){return[2.5*s.B04,2.5*s.B03,2.5*s.B02];}',
|
||||
"FALSE-COLOR": '//VERSION=3\nfunction setup(){return{input:["B08","B04","B03"],output:{bands:3}};}\nfunction evaluatePixel(s){return[2.5*s.B08,2.5*s.B04,2.5*s.B03];}',
|
||||
"NDVI": '//VERSION=3\nfunction setup(){return{input:["B04","B08"],output:{bands:3}};}\nfunction evaluatePixel(s){var n=(s.B08-s.B04)/(s.B08+s.B04);if(n<-0.2)return[0.05,0.05,0.05];if(n<0)return[0.75,0.75,0.75];if(n<0.1)return[0.86,0.86,0.86];if(n<0.2)return[0.92,0.84,0.68];if(n<0.3)return[0.77,0.88,0.55];if(n<0.4)return[0.56,0.80,0.32];if(n<0.5)return[0.35,0.72,0.18];if(n<0.6)return[0.20,0.60,0.08];if(n<0.7)return[0.10,0.48,0.04];return[0.0,0.36,0.0];}',
|
||||
"MOISTURE-INDEX": '//VERSION=3\nfunction setup(){return{input:["B8A","B11"],output:{bands:3}};}\nfunction evaluatePixel(s){var m=(s.B8A-s.B11)/(s.B8A+s.B11);var r=Math.max(0,Math.min(1,1.5-3*m));var g=Math.max(0,Math.min(1,m<0?1.5+3*m:1.5-3*m));var b=Math.max(0,Math.min(1,1.5+3*(m-0.5)));return[r,g,b];}',
|
||||
}
|
||||
evalscript = evalscripts.get(preset, evalscripts["TRUE-COLOR"])
|
||||
|
||||
from datetime import datetime as _dt, timedelta as _td
|
||||
try:
|
||||
end_date = _dt.strptime(date_str, "%Y-%m-%d")
|
||||
except ValueError:
|
||||
end_date = _dt.utcnow()
|
||||
|
||||
if z <= 6:
|
||||
lookback_days = 30
|
||||
elif z <= 9:
|
||||
lookback_days = 14
|
||||
elif z <= 11:
|
||||
lookback_days = 7
|
||||
else:
|
||||
lookback_days = 5
|
||||
|
||||
start_date = end_date - _td(days=lookback_days)
|
||||
|
||||
process_body = {
|
||||
"input": {
|
||||
"bounds": {"bbox": bbox, "properties": {"crs": "http://www.opengis.net/def/crs/EPSG/0/3857"}},
|
||||
"data": [{"type": "sentinel-2-l2a", "dataFilter": {
|
||||
"timeRange": {
|
||||
"from": start_date.strftime("%Y-%m-%dT00:00:00Z"),
|
||||
"to": end_date.strftime("%Y-%m-%dT23:59:59Z"),
|
||||
},
|
||||
"maxCloudCoverage": 30, "mosaickingOrder": "leastCC",
|
||||
}}],
|
||||
},
|
||||
"output": {"width": 256, "height": 256,
|
||||
"responses": [{"identifier": "default", "format": {"type": "image/png"}}]},
|
||||
"evalscript": evalscript,
|
||||
}
|
||||
try:
|
||||
resp = await asyncio.to_thread(req.post,
|
||||
"https://sh.dataspace.copernicus.eu/api/v1/process",
|
||||
json=process_body,
|
||||
headers={"Authorization": f"Bearer {token}", "Accept": "image/png"},
|
||||
timeout=30)
|
||||
return Response(content=resp.content, status_code=resp.status_code,
|
||||
media_type=resp.headers.get("content-type", "image/png"))
|
||||
except Exception:
|
||||
logger.exception("Process API failed")
|
||||
raise HTTPException(502, "Process API failed")
|
||||
|
||||
|
||||
@router.get("/api/tools/shodan/status", dependencies=[Depends(require_local_operator)])
|
||||
@limiter.limit("30/minute")
|
||||
async def api_shodan_status(request: Request):
|
||||
from services.shodan_connector import get_shodan_connector_status
|
||||
return get_shodan_connector_status()
|
||||
|
||||
|
||||
@router.post("/api/tools/shodan/search", dependencies=[Depends(require_local_operator)])
|
||||
@limiter.limit("12/minute")
|
||||
async def api_shodan_search(request: Request, body: ShodanSearchRequest):
|
||||
from services.shodan_connector import ShodanConnectorError, search_shodan
|
||||
try:
|
||||
return search_shodan(body.query, page=body.page, facets=body.facets)
|
||||
except ShodanConnectorError as exc:
|
||||
raise HTTPException(status_code=exc.status_code, detail=exc.detail) from exc
|
||||
|
||||
|
||||
@router.post("/api/tools/shodan/count", dependencies=[Depends(require_local_operator)])
|
||||
@limiter.limit("12/minute")
|
||||
async def api_shodan_count(request: Request, body: ShodanCountRequest):
|
||||
from services.shodan_connector import ShodanConnectorError, count_shodan
|
||||
try:
|
||||
return count_shodan(body.query, facets=body.facets)
|
||||
except ShodanConnectorError as exc:
|
||||
raise HTTPException(status_code=exc.status_code, detail=exc.detail) from exc
|
||||
|
||||
|
||||
@router.post("/api/tools/shodan/host", dependencies=[Depends(require_local_operator)])
|
||||
@limiter.limit("12/minute")
|
||||
async def api_shodan_host(request: Request, body: ShodanHostRequest):
|
||||
from services.shodan_connector import ShodanConnectorError, lookup_shodan_host
|
||||
try:
|
||||
return lookup_shodan_host(body.ip, history=body.history)
|
||||
except ShodanConnectorError as exc:
|
||||
raise HTTPException(status_code=exc.status_code, detail=exc.detail) from exc
|
||||
|
||||
|
||||
@router.get("/api/tools/uw/status", dependencies=[Depends(require_local_operator)])
|
||||
@limiter.limit("30/minute")
|
||||
async def api_uw_status(request: Request):
|
||||
from services.unusual_whales_connector import get_uw_status
|
||||
return get_uw_status()
|
||||
|
||||
|
||||
@router.post("/api/tools/uw/congress", dependencies=[Depends(require_local_operator)])
|
||||
@limiter.limit("12/minute")
|
||||
async def api_uw_congress(request: Request):
|
||||
from services.unusual_whales_connector import FinnhubConnectorError, fetch_congress_trades
|
||||
try:
|
||||
return fetch_congress_trades()
|
||||
except FinnhubConnectorError as exc:
|
||||
raise HTTPException(status_code=exc.status_code, detail=exc.detail) from exc
|
||||
|
||||
|
||||
@router.post("/api/tools/uw/darkpool", dependencies=[Depends(require_local_operator)])
|
||||
@limiter.limit("12/minute")
|
||||
async def api_uw_darkpool(request: Request):
|
||||
from services.unusual_whales_connector import FinnhubConnectorError, fetch_insider_transactions
|
||||
try:
|
||||
return fetch_insider_transactions()
|
||||
except FinnhubConnectorError as exc:
|
||||
raise HTTPException(status_code=exc.status_code, detail=exc.detail) from exc
|
||||
|
||||
|
||||
@router.post("/api/tools/uw/flow", dependencies=[Depends(require_local_operator)])
|
||||
@limiter.limit("12/minute")
|
||||
async def api_uw_flow(request: Request):
|
||||
from services.unusual_whales_connector import FinnhubConnectorError, fetch_defense_quotes
|
||||
try:
|
||||
return fetch_defense_quotes()
|
||||
except FinnhubConnectorError as exc:
|
||||
raise HTTPException(status_code=exc.status_code, detail=exc.detail) from exc
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,7 +1,9 @@
|
||||
import argparse
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
@@ -56,6 +58,72 @@ def sha256_file(path: Path) -> str:
|
||||
return digest.hexdigest().lower()
|
||||
|
||||
|
||||
def _default_generated_at() -> str:
|
||||
return datetime.now(timezone.utc).replace(microsecond=0).isoformat().replace("+00:00", "Z")
|
||||
|
||||
|
||||
def build_release_attestation(
|
||||
*,
|
||||
suite_green: bool,
|
||||
suite_name: str = "dm_relay_security",
|
||||
detail: str = "",
|
||||
report: str = "",
|
||||
command: str = "",
|
||||
commit: str = "",
|
||||
generated_at: str = "",
|
||||
threat_model_reference: str = "docs/mesh/threat-model.md",
|
||||
workflow: str = "",
|
||||
run_id: str = "",
|
||||
run_attempt: str = "",
|
||||
ref: str = "",
|
||||
) -> dict:
|
||||
normalized_generated_at = str(generated_at or "").strip() or _default_generated_at()
|
||||
normalized_commit = str(commit or "").strip() or os.environ.get("GITHUB_SHA", "").strip()
|
||||
normalized_workflow = str(workflow or "").strip() or os.environ.get("GITHUB_WORKFLOW", "").strip()
|
||||
normalized_run_id = str(run_id or "").strip() or os.environ.get("GITHUB_RUN_ID", "").strip()
|
||||
normalized_run_attempt = str(run_attempt or "").strip() or os.environ.get("GITHUB_RUN_ATTEMPT", "").strip()
|
||||
normalized_ref = str(ref or "").strip() or os.environ.get("GITHUB_REF", "").strip()
|
||||
normalized_suite_name = str(suite_name or "").strip() or "dm_relay_security"
|
||||
normalized_report = str(report or "").strip()
|
||||
normalized_command = str(command or "").strip()
|
||||
normalized_detail = str(detail or "").strip() or (
|
||||
"CI attestation confirms the DM relay security suite is green."
|
||||
if suite_green
|
||||
else "CI attestation recorded a failing DM relay security suite run."
|
||||
)
|
||||
payload = {
|
||||
"generated_at": normalized_generated_at,
|
||||
"commit": normalized_commit,
|
||||
"threat_model_reference": str(threat_model_reference or "").strip()
|
||||
or "docs/mesh/threat-model.md",
|
||||
"dm_relay_security_suite": {
|
||||
"name": normalized_suite_name,
|
||||
"green": bool(suite_green),
|
||||
"detail": normalized_detail,
|
||||
"report": normalized_report,
|
||||
},
|
||||
}
|
||||
if normalized_command:
|
||||
payload["dm_relay_security_suite"]["command"] = normalized_command
|
||||
ci = {
|
||||
"workflow": normalized_workflow,
|
||||
"run_id": normalized_run_id,
|
||||
"run_attempt": normalized_run_attempt,
|
||||
"ref": normalized_ref,
|
||||
}
|
||||
if any(ci.values()):
|
||||
payload["ci"] = ci
|
||||
return payload
|
||||
|
||||
|
||||
def write_release_attestation(output_path: Path | str, **kwargs) -> dict:
|
||||
path = Path(output_path).resolve()
|
||||
payload = build_release_attestation(**kwargs)
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
path.write_text(json.dumps(payload, indent=2) + "\n", encoding="utf-8")
|
||||
return payload
|
||||
|
||||
|
||||
def cmd_show(_args: argparse.Namespace) -> int:
|
||||
version = current_version()
|
||||
if not version:
|
||||
@@ -102,6 +170,30 @@ def cmd_hash(args: argparse.Namespace) -> int:
|
||||
return 0 if asset_matches else 2
|
||||
|
||||
|
||||
def cmd_write_attestation(args: argparse.Namespace) -> int:
|
||||
suite_green = bool(args.suite_green)
|
||||
payload = write_release_attestation(
|
||||
args.output_path,
|
||||
suite_green=suite_green,
|
||||
suite_name=args.suite_name,
|
||||
detail=args.detail,
|
||||
report=args.report,
|
||||
command=args.command,
|
||||
commit=args.commit,
|
||||
generated_at=args.generated_at,
|
||||
threat_model_reference=args.threat_model_reference,
|
||||
workflow=args.workflow,
|
||||
run_id=args.run_id,
|
||||
run_attempt=args.run_attempt,
|
||||
ref=args.ref,
|
||||
)
|
||||
output_path = Path(args.output_path).resolve()
|
||||
print(f"Wrote release attestation: {output_path}")
|
||||
print(f"DM relay security suite : {'green' if suite_green else 'red'}")
|
||||
print(f"Commit : {payload.get('commit', '')}")
|
||||
return 0
|
||||
|
||||
|
||||
def build_parser() -> argparse.ArgumentParser:
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Helper for ShadowBroker release version/tag/asset consistency."
|
||||
@@ -112,7 +204,7 @@ def build_parser() -> argparse.ArgumentParser:
|
||||
show_parser.set_defaults(func=cmd_show)
|
||||
|
||||
set_version_parser = subparsers.add_parser("set-version", help="Update frontend/package.json version")
|
||||
set_version_parser.add_argument("version", help="Version like 0.9.6")
|
||||
set_version_parser.add_argument("version", help="Version like 0.9.7")
|
||||
set_version_parser.set_defaults(func=cmd_set_version)
|
||||
|
||||
hash_parser = subparsers.add_parser(
|
||||
@@ -121,10 +213,83 @@ def build_parser() -> argparse.ArgumentParser:
|
||||
hash_parser.add_argument("zip_path", help="Path to the release ZIP")
|
||||
hash_parser.add_argument(
|
||||
"--version",
|
||||
help="Release version like 0.9.6. Defaults to frontend/package.json version.",
|
||||
help="Release version like 0.9.7. Defaults to frontend/package.json version.",
|
||||
)
|
||||
hash_parser.set_defaults(func=cmd_hash)
|
||||
|
||||
attestation_parser = subparsers.add_parser(
|
||||
"write-attestation",
|
||||
help="Write a structured Sprint 8 release attestation JSON file",
|
||||
)
|
||||
attestation_parser.add_argument("output_path", help="Where to write the attestation JSON")
|
||||
suite_group = attestation_parser.add_mutually_exclusive_group(required=True)
|
||||
suite_group.add_argument(
|
||||
"--suite-green",
|
||||
action="store_true",
|
||||
help="Mark the DM relay security suite as green",
|
||||
)
|
||||
suite_group.add_argument(
|
||||
"--suite-red",
|
||||
action="store_true",
|
||||
help="Mark the DM relay security suite as failing",
|
||||
)
|
||||
attestation_parser.add_argument(
|
||||
"--suite-name",
|
||||
default="dm_relay_security",
|
||||
help="Suite name to record in the attestation",
|
||||
)
|
||||
attestation_parser.add_argument(
|
||||
"--detail",
|
||||
default="",
|
||||
help="Human-readable suite detail. Defaults to a CI-generated message.",
|
||||
)
|
||||
attestation_parser.add_argument(
|
||||
"--report",
|
||||
default="",
|
||||
help="Path to the suite report or artifact reference to embed in the attestation.",
|
||||
)
|
||||
attestation_parser.add_argument(
|
||||
"--command",
|
||||
default="",
|
||||
help="Exact suite command used to generate the attestation.",
|
||||
)
|
||||
attestation_parser.add_argument(
|
||||
"--commit",
|
||||
default="",
|
||||
help="Commit SHA. Defaults to GITHUB_SHA when available.",
|
||||
)
|
||||
attestation_parser.add_argument(
|
||||
"--generated-at",
|
||||
default="",
|
||||
help="UTC timestamp for the attestation. Defaults to current UTC time.",
|
||||
)
|
||||
attestation_parser.add_argument(
|
||||
"--threat-model-reference",
|
||||
default="docs/mesh/threat-model.md",
|
||||
help="Threat model reference to embed in the attestation.",
|
||||
)
|
||||
attestation_parser.add_argument(
|
||||
"--workflow",
|
||||
default="",
|
||||
help="Workflow name. Defaults to GITHUB_WORKFLOW when available.",
|
||||
)
|
||||
attestation_parser.add_argument(
|
||||
"--run-id",
|
||||
default="",
|
||||
help="Workflow run ID. Defaults to GITHUB_RUN_ID when available.",
|
||||
)
|
||||
attestation_parser.add_argument(
|
||||
"--run-attempt",
|
||||
default="",
|
||||
help="Workflow run attempt. Defaults to GITHUB_RUN_ATTEMPT when available.",
|
||||
)
|
||||
attestation_parser.add_argument(
|
||||
"--ref",
|
||||
default="",
|
||||
help="Git ref. Defaults to GITHUB_REF when available.",
|
||||
)
|
||||
attestation_parser.set_defaults(func=cmd_write_attestation)
|
||||
|
||||
return parser
|
||||
|
||||
|
||||
|
||||
@@ -0,0 +1,75 @@
|
||||
"""Rotate the MESH_SECURE_STORAGE_SECRET used to protect key envelopes at rest.
|
||||
|
||||
Usage — stop the backend first, then run:
|
||||
|
||||
MESH_OLD_STORAGE_SECRET=<current> \\
|
||||
MESH_NEW_STORAGE_SECRET=<new> \\
|
||||
python -m scripts.rotate_secure_storage_secret
|
||||
|
||||
Dry-run mode (validates old secret without writing anything):
|
||||
|
||||
MESH_OLD_STORAGE_SECRET=<current> \\
|
||||
MESH_NEW_STORAGE_SECRET=<new> \\
|
||||
python -m scripts.rotate_secure_storage_secret --dry-run
|
||||
|
||||
Or, for Docker deployments:
|
||||
|
||||
docker exec -e MESH_OLD_STORAGE_SECRET=<current> \\
|
||||
-e MESH_NEW_STORAGE_SECRET=<new> \\
|
||||
<container> python -m scripts.rotate_secure_storage_secret
|
||||
|
||||
After successful rotation, update your .env (or Docker secret file) to set
|
||||
MESH_SECURE_STORAGE_SECRET to the new value, then restart the backend.
|
||||
|
||||
The script fails closed: if the old secret cannot unwrap any existing envelope,
|
||||
nothing is written. Non-passphrase envelopes (DPAPI, raw) are skipped with a
|
||||
warning.
|
||||
|
||||
Before rewriting, .bak copies of every envelope are created so a mid-rotation
|
||||
crash leaves recoverable backups on disk.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
|
||||
|
||||
def main() -> None:
|
||||
dry_run = "--dry-run" in sys.argv
|
||||
|
||||
old_secret = os.environ.get("MESH_OLD_STORAGE_SECRET", "").strip()
|
||||
new_secret = os.environ.get("MESH_NEW_STORAGE_SECRET", "").strip()
|
||||
|
||||
if not old_secret:
|
||||
print("ERROR: MESH_OLD_STORAGE_SECRET environment variable is required.", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
if not new_secret:
|
||||
print("ERROR: MESH_NEW_STORAGE_SECRET environment variable is required.", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
from services.mesh.mesh_secure_storage import SecureStorageError, rotate_storage_secret
|
||||
|
||||
try:
|
||||
result = rotate_storage_secret(old_secret, new_secret, dry_run=dry_run)
|
||||
except SecureStorageError as exc:
|
||||
print(f"ROTATION FAILED: {exc}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
print(json.dumps(result, indent=2))
|
||||
if dry_run:
|
||||
print(
|
||||
"\nDry run complete. No files were modified. Run again without --dry-run to perform the rotation.",
|
||||
file=sys.stderr,
|
||||
)
|
||||
else:
|
||||
print(
|
||||
"\nRotation complete. Update MESH_SECURE_STORAGE_SECRET to the new value and restart the backend."
|
||||
"\nBackup files (.bak) were created alongside each rotated envelope.",
|
||||
file=sys.stderr,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -1,10 +1,16 @@
|
||||
param(
|
||||
[string]$Python = "python"
|
||||
[string]$Python = "py"
|
||||
)
|
||||
|
||||
$repoRoot = Resolve-Path (Join-Path $PSScriptRoot "..")
|
||||
$venvPath = Join-Path $repoRoot "venv"
|
||||
& $Python -m venv $venvPath
|
||||
$venvMarker = Join-Path $repoRoot ".venv-dir"
|
||||
& $Python -3.11 -m venv $venvPath
|
||||
|
||||
$pip = Join-Path $venvPath "Scripts\pip.exe"
|
||||
& $pip install -r (Join-Path $repoRoot "requirements-dev.txt")
|
||||
& $pip install --upgrade pip
|
||||
Push-Location $repoRoot
|
||||
& (Join-Path $venvPath "Scripts\python.exe") -m pip install -e .
|
||||
& $pip install pytest pytest-asyncio ruff black
|
||||
"venv" | Set-Content -LiteralPath $venvMarker -NoNewline
|
||||
Pop-Location
|
||||
|
||||
@@ -1,9 +1,14 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
PYTHON="${PYTHON:-python3}"
|
||||
PYTHON="${PYTHON:-python3.11}"
|
||||
REPO_ROOT="$(cd "$(dirname "$0")/.." && pwd)"
|
||||
VENV_DIR="$REPO_ROOT/venv"
|
||||
VENV_MARKER="$REPO_ROOT/.venv-dir"
|
||||
|
||||
"$PYTHON" -m venv "$VENV_DIR"
|
||||
"$VENV_DIR/bin/pip" install -r "$REPO_ROOT/requirements-dev.txt"
|
||||
"$VENV_DIR/bin/pip" install --upgrade pip
|
||||
cd "$REPO_ROOT"
|
||||
"$VENV_DIR/bin/python" -m pip install -e .
|
||||
"$VENV_DIR/bin/pip" install pytest pytest-asyncio ruff black
|
||||
printf 'venv\n' > "$VENV_MARKER"
|
||||
|
||||
@@ -0,0 +1,178 @@
|
||||
"""ai_intel_store — compatibility wrapper around ai_pin_store + layer injection.
|
||||
|
||||
openclaw_channel.py and routers/ai_intel.py import from this module name.
|
||||
All pin/layer logic lives in ai_pin_store.py; this module re-exports with the
|
||||
expected function signatures and adds the layer injection helper.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import time
|
||||
from typing import Any
|
||||
|
||||
from services.ai_pin_store import (
|
||||
create_pin,
|
||||
create_pins_batch,
|
||||
get_pins,
|
||||
delete_pin,
|
||||
clear_pins,
|
||||
pin_count,
|
||||
pins_as_geojson,
|
||||
purge_expired,
|
||||
# Layer CRUD
|
||||
create_layer,
|
||||
get_layers,
|
||||
update_layer,
|
||||
delete_layer,
|
||||
# Feed layers
|
||||
get_feed_layers,
|
||||
replace_layer_pins,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Re-exports expected by openclaw_channel._dispatch_command
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def get_all_intel_pins() -> list[dict[str, Any]]:
|
||||
"""Return all active pins (no filter, generous limit)."""
|
||||
return get_pins(limit=2000)
|
||||
|
||||
|
||||
def add_intel_pin(args: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Create a single pin from a command-channel args dict."""
|
||||
ea = args.get("entity_attachment")
|
||||
return create_pin(
|
||||
lat=float(args.get("lat", 0)),
|
||||
lng=float(args.get("lng", 0)),
|
||||
label=str(args.get("label", ""))[:200],
|
||||
category=str(args.get("category", "custom")),
|
||||
layer_id=str(args.get("layer_id", "")),
|
||||
color=str(args.get("color", "")),
|
||||
description=str(args.get("description", "")),
|
||||
source=str(args.get("source", "openclaw")),
|
||||
source_url=str(args.get("source_url", "")),
|
||||
confidence=float(args.get("confidence", 1.0)),
|
||||
ttl_hours=float(args.get("ttl_hours", 0)),
|
||||
metadata=args.get("metadata") or {},
|
||||
entity_attachment=ea if isinstance(ea, dict) else None,
|
||||
)
|
||||
|
||||
|
||||
def delete_intel_pin(pin_id: str) -> bool:
|
||||
"""Delete a pin by ID."""
|
||||
return delete_pin(pin_id)
|
||||
|
||||
|
||||
# Layer helpers for OpenClaw
|
||||
def create_intel_layer(args: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Create a layer from a command-channel args dict."""
|
||||
return create_layer(
|
||||
name=str(args.get("name", "Untitled"))[:100],
|
||||
description=str(args.get("description", ""))[:500],
|
||||
source=str(args.get("source", "openclaw"))[:50],
|
||||
color=str(args.get("color", "")),
|
||||
feed_url=str(args.get("feed_url", "")),
|
||||
feed_interval=int(args.get("feed_interval", 300)),
|
||||
)
|
||||
|
||||
|
||||
def get_intel_layers() -> list[dict[str, Any]]:
|
||||
"""Return all layers with pin counts."""
|
||||
return get_layers()
|
||||
|
||||
|
||||
def update_intel_layer(layer_id: str, args: dict[str, Any]) -> dict[str, Any] | None:
|
||||
"""Update a layer from a command-channel args dict."""
|
||||
return update_layer(layer_id, **{
|
||||
k: v for k, v in args.items()
|
||||
if k in ("name", "description", "visible", "color", "feed_url", "feed_interval")
|
||||
})
|
||||
|
||||
|
||||
def delete_intel_layer(layer_id: str) -> int:
|
||||
"""Delete a layer and its pins. Returns pin count removed."""
|
||||
return delete_layer(layer_id)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Layer injection — inserts agent data into native telemetry layers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
# Layers that agents are allowed to inject into.
|
||||
_INJECTABLE_LAYERS = frozenset({
|
||||
"cctv", "ships", "sigint", "kiwisdr", "military_bases",
|
||||
"datacenters", "power_plants", "satnogs_stations",
|
||||
"volcanoes", "earthquakes", "news", "viirs_change_nodes",
|
||||
"air_quality",
|
||||
})
|
||||
|
||||
|
||||
def inject_layer_data(
|
||||
layer: str,
|
||||
items: list[dict[str, Any]],
|
||||
mode: str = "append",
|
||||
) -> dict[str, Any]:
|
||||
"""Inject agent data into a native telemetry layer."""
|
||||
from services.fetchers._store import latest_data, _data_lock, bump_data_version
|
||||
|
||||
layer = str(layer or "").strip()
|
||||
if layer not in _INJECTABLE_LAYERS:
|
||||
return {"ok": False, "detail": f"layer '{layer}' not injectable"}
|
||||
|
||||
items = list(items or [])[:200]
|
||||
if not items:
|
||||
return {"ok": False, "detail": "no items provided"}
|
||||
|
||||
now = time.time()
|
||||
tagged = []
|
||||
for item in items:
|
||||
if not isinstance(item, dict):
|
||||
continue
|
||||
entry = dict(item)
|
||||
entry["_injected"] = True
|
||||
entry["_source"] = "user:openclaw"
|
||||
entry["_injected_at"] = now
|
||||
tagged.append(entry)
|
||||
|
||||
with _data_lock:
|
||||
existing = latest_data.get(layer)
|
||||
if not isinstance(existing, list):
|
||||
existing = []
|
||||
|
||||
if mode == "replace":
|
||||
existing = [e for e in existing if not e.get("_injected")]
|
||||
|
||||
existing.extend(tagged)
|
||||
latest_data[layer] = existing
|
||||
|
||||
bump_data_version()
|
||||
|
||||
return {
|
||||
"ok": True,
|
||||
"layer": layer,
|
||||
"injected": len(tagged),
|
||||
"mode": mode,
|
||||
}
|
||||
|
||||
|
||||
def clear_injected_data(layer: str = "") -> dict[str, Any]:
|
||||
"""Remove all injected items from a layer (or all layers)."""
|
||||
from services.fetchers._store import latest_data, _data_lock, bump_data_version
|
||||
|
||||
removed = 0
|
||||
with _data_lock:
|
||||
targets = [layer] if layer else list(_INJECTABLE_LAYERS)
|
||||
for lyr in targets:
|
||||
existing = latest_data.get(lyr)
|
||||
if not isinstance(existing, list):
|
||||
continue
|
||||
before = len(existing)
|
||||
latest_data[lyr] = [e for e in existing if not e.get("_injected")]
|
||||
removed += before - len(latest_data[lyr])
|
||||
|
||||
if removed:
|
||||
bump_data_version()
|
||||
|
||||
return {"ok": True, "removed": removed}
|
||||
@@ -0,0 +1,633 @@
|
||||
"""AI Intel pin storage — layered pin system with JSON file persistence.
|
||||
|
||||
Supports:
|
||||
- Named pin layers (created by user or AI)
|
||||
- Pins with optional entity attachment (track moving objects)
|
||||
- Pin source tracking (user vs openclaw)
|
||||
- Layer visibility toggles
|
||||
- External feed URL per layer (for Phase 5)
|
||||
- GeoJSON export per layer or all layers
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
import time
|
||||
import uuid
|
||||
from datetime import datetime
|
||||
from typing import Any, Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Pin schema
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
PIN_CATEGORIES = {
|
||||
"threat", "news", "geolocation", "custom", "anomaly",
|
||||
"military", "maritime", "flight", "infrastructure", "weather",
|
||||
"sigint", "prediction", "research",
|
||||
}
|
||||
|
||||
PIN_COLORS = {
|
||||
"threat": "#ef4444", # red
|
||||
"news": "#f59e0b", # amber
|
||||
"geolocation": "#8b5cf6", # violet
|
||||
"custom": "#3b82f6", # blue
|
||||
"anomaly": "#f97316", # orange
|
||||
"military": "#dc2626", # dark red
|
||||
"maritime": "#0ea5e9", # sky
|
||||
"flight": "#6366f1", # indigo
|
||||
"infrastructure": "#64748b", # slate
|
||||
"weather": "#22d3ee", # cyan
|
||||
"sigint": "#a855f7", # purple
|
||||
"prediction": "#eab308", # yellow
|
||||
"research": "#10b981", # emerald
|
||||
}
|
||||
|
||||
LAYER_COLORS = [
|
||||
"#3b82f6", "#ef4444", "#22d3ee", "#f59e0b", "#8b5cf6",
|
||||
"#10b981", "#f97316", "#6366f1", "#ec4899", "#14b8a6",
|
||||
]
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# In-memory store
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_layers: list[dict[str, Any]] = []
|
||||
_pins: list[dict[str, Any]] = []
|
||||
_lock = threading.Lock()
|
||||
|
||||
# Persistence file path
|
||||
_PERSIST_DIR = os.path.join(os.path.dirname(os.path.dirname(__file__)), "data")
|
||||
_PERSIST_FILE = os.path.join(_PERSIST_DIR, "pin_layers.json")
|
||||
_OLD_PERSIST_FILE = os.path.join(_PERSIST_DIR, "ai_pins.json")
|
||||
|
||||
|
||||
def _ensure_persist_dir():
|
||||
try:
|
||||
os.makedirs(_PERSIST_DIR, exist_ok=True)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
|
||||
def _save_to_disk():
|
||||
"""Persist layers and pins to JSON file. Called under lock."""
|
||||
try:
|
||||
_ensure_persist_dir()
|
||||
with open(_PERSIST_FILE, "w", encoding="utf-8") as f:
|
||||
json.dump({"layers": _layers, "pins": _pins}, f, indent=2, default=str)
|
||||
except (OSError, IOError) as e:
|
||||
logger.warning(f"Failed to persist pin layers: {e}")
|
||||
|
||||
|
||||
def _load_from_disk():
|
||||
"""Load layers and pins from disk on startup."""
|
||||
global _layers, _pins
|
||||
try:
|
||||
if os.path.exists(_PERSIST_FILE):
|
||||
with open(_PERSIST_FILE, "r", encoding="utf-8") as f:
|
||||
data = json.load(f)
|
||||
if isinstance(data, dict):
|
||||
_layers = data.get("layers", [])
|
||||
_pins = data.get("pins", [])
|
||||
logger.info(f"Loaded {len(_layers)} layers, {len(_pins)} pins from disk")
|
||||
return
|
||||
|
||||
# Migrate from old flat pin file
|
||||
if os.path.exists(_OLD_PERSIST_FILE):
|
||||
with open(_OLD_PERSIST_FILE, "r", encoding="utf-8") as f:
|
||||
old_pins = json.load(f)
|
||||
if isinstance(old_pins, list) and old_pins:
|
||||
legacy_layer = _make_layer("Legacy", "Migrated pins", source="system")
|
||||
_layers.append(legacy_layer)
|
||||
for p in old_pins:
|
||||
if isinstance(p, dict):
|
||||
p["layer_id"] = legacy_layer["id"]
|
||||
_pins.append(p)
|
||||
logger.info(f"Migrated {len(_pins)} pins from ai_pins.json into Legacy layer")
|
||||
_save_to_disk()
|
||||
except (OSError, IOError, json.JSONDecodeError) as e:
|
||||
logger.warning(f"Failed to load pin layers from disk: {e}")
|
||||
|
||||
|
||||
def _make_layer(
|
||||
name: str,
|
||||
description: str = "",
|
||||
source: str = "user",
|
||||
color: str = "",
|
||||
feed_url: str = "",
|
||||
feed_interval: int = 300,
|
||||
) -> dict[str, Any]:
|
||||
"""Create a layer dict."""
|
||||
layer_id = str(uuid.uuid4())[:12]
|
||||
now = time.time()
|
||||
return {
|
||||
"id": layer_id,
|
||||
"name": name[:100],
|
||||
"description": description[:500],
|
||||
"source": source[:50],
|
||||
"visible": True,
|
||||
"color": color or LAYER_COLORS[len(_layers) % len(LAYER_COLORS)],
|
||||
"created_at": now,
|
||||
"created_at_iso": datetime.utcfromtimestamp(now).isoformat() + "Z",
|
||||
"feed_url": feed_url[:1000] if feed_url else "",
|
||||
"feed_interval": max(60, min(86400, feed_interval)),
|
||||
"pin_count": 0,
|
||||
}
|
||||
|
||||
|
||||
# Load on import
|
||||
_load_from_disk()
|
||||
|
||||
# One-time cleanup: remove correlation_engine auto-pins (no longer generated)
|
||||
_corr_before = len(_pins)
|
||||
_pins[:] = [p for p in _pins if p.get("source") != "correlation_engine"]
|
||||
if len(_pins) < _corr_before:
|
||||
logger.info("Cleaned up %d legacy correlation_engine pins", _corr_before - len(_pins))
|
||||
_save_to_disk()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Layer CRUD
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def create_layer(
|
||||
name: str,
|
||||
description: str = "",
|
||||
source: str = "user",
|
||||
color: str = "",
|
||||
feed_url: str = "",
|
||||
feed_interval: int = 300,
|
||||
) -> dict[str, Any]:
|
||||
"""Create a new pin layer."""
|
||||
with _lock:
|
||||
layer = _make_layer(name, description, source, color, feed_url, feed_interval)
|
||||
_layers.append(layer)
|
||||
_save_to_disk()
|
||||
return layer
|
||||
|
||||
|
||||
def get_layers() -> list[dict[str, Any]]:
|
||||
"""Return all layers with current pin counts."""
|
||||
now = time.time()
|
||||
with _lock:
|
||||
result = []
|
||||
for layer in _layers:
|
||||
count = sum(
|
||||
1 for p in _pins
|
||||
if p.get("layer_id") == layer["id"]
|
||||
and not (p.get("expires_at") and p["expires_at"] < now)
|
||||
)
|
||||
result.append({**layer, "pin_count": count})
|
||||
return result
|
||||
|
||||
|
||||
def update_layer(layer_id: str, **updates) -> Optional[dict[str, Any]]:
|
||||
"""Update layer fields. Returns updated layer or None if not found."""
|
||||
allowed = {"name", "description", "visible", "color", "feed_url", "feed_interval", "feed_last_fetched"}
|
||||
with _lock:
|
||||
for layer in _layers:
|
||||
if layer["id"] == layer_id:
|
||||
for k, v in updates.items():
|
||||
if k in allowed and v is not None:
|
||||
if k == "name":
|
||||
layer[k] = str(v)[:100]
|
||||
elif k == "description":
|
||||
layer[k] = str(v)[:500]
|
||||
elif k == "visible":
|
||||
layer[k] = bool(v)
|
||||
elif k == "color":
|
||||
layer[k] = str(v)[:20]
|
||||
elif k == "feed_url":
|
||||
layer[k] = str(v)[:1000]
|
||||
elif k == "feed_interval":
|
||||
layer[k] = max(60, min(86400, int(v)))
|
||||
elif k == "feed_last_fetched":
|
||||
layer[k] = float(v)
|
||||
_save_to_disk()
|
||||
return dict(layer)
|
||||
return None
|
||||
|
||||
|
||||
def delete_layer(layer_id: str) -> int:
|
||||
"""Delete a layer and all its pins. Returns count of pins removed."""
|
||||
with _lock:
|
||||
before_layers = len(_layers)
|
||||
_layers[:] = [l for l in _layers if l["id"] != layer_id]
|
||||
if len(_layers) == before_layers:
|
||||
return 0 # not found
|
||||
before_pins = len(_pins)
|
||||
_pins[:] = [p for p in _pins if p.get("layer_id") != layer_id]
|
||||
removed = before_pins - len(_pins)
|
||||
_save_to_disk()
|
||||
return removed
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Pin CRUD
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def create_pin(
|
||||
lat: float,
|
||||
lng: float,
|
||||
label: str,
|
||||
category: str = "custom",
|
||||
*,
|
||||
layer_id: str = "",
|
||||
color: str = "",
|
||||
description: str = "",
|
||||
source: str = "openclaw",
|
||||
source_url: str = "",
|
||||
confidence: float = 1.0,
|
||||
ttl_hours: float = 0,
|
||||
metadata: Optional[dict] = None,
|
||||
entity_attachment: Optional[dict] = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Create a single pin and return it."""
|
||||
pin_id = str(uuid.uuid4())[:12]
|
||||
now = time.time()
|
||||
|
||||
cat = category if category in PIN_CATEGORIES else "custom"
|
||||
pin_color = color or PIN_COLORS.get(cat, "#3b82f6")
|
||||
|
||||
# Validate entity_attachment if provided
|
||||
attachment = None
|
||||
if entity_attachment and isinstance(entity_attachment, dict):
|
||||
etype = str(entity_attachment.get("entity_type", "")).strip()
|
||||
eid = str(entity_attachment.get("entity_id", "")).strip()
|
||||
if etype and eid:
|
||||
attachment = {
|
||||
"entity_type": etype[:50],
|
||||
"entity_id": eid[:100],
|
||||
"entity_label": str(entity_attachment.get("entity_label", ""))[:200],
|
||||
}
|
||||
|
||||
pin = {
|
||||
"id": pin_id,
|
||||
"layer_id": layer_id or "",
|
||||
"lat": lat,
|
||||
"lng": lng,
|
||||
"label": label[:200],
|
||||
"category": cat,
|
||||
"color": pin_color,
|
||||
"description": description[:2000],
|
||||
"source": source[:100],
|
||||
"source_url": source_url[:500],
|
||||
"confidence": max(0.0, min(1.0, confidence)),
|
||||
"created_at": now,
|
||||
"created_at_iso": datetime.utcfromtimestamp(now).isoformat() + "Z",
|
||||
"expires_at": now + (ttl_hours * 3600) if ttl_hours > 0 else None,
|
||||
"metadata": metadata or {},
|
||||
"entity_attachment": attachment,
|
||||
"comments": [],
|
||||
}
|
||||
|
||||
with _lock:
|
||||
_pins.append(pin)
|
||||
_save_to_disk()
|
||||
|
||||
return pin
|
||||
|
||||
|
||||
def create_pins_batch(items: list[dict], default_layer_id: str = "") -> list[dict[str, Any]]:
|
||||
"""Create multiple pins at once."""
|
||||
created = []
|
||||
now = time.time()
|
||||
|
||||
with _lock:
|
||||
for item in items[:200]: # max 200 per batch
|
||||
pin_id = str(uuid.uuid4())[:12]
|
||||
cat = item.get("category", "custom")
|
||||
if cat not in PIN_CATEGORIES:
|
||||
cat = "custom"
|
||||
pin_color = item.get("color", "") or PIN_COLORS.get(cat, "#3b82f6")
|
||||
ttl = float(item.get("ttl_hours", 0) or 0)
|
||||
|
||||
attachment = None
|
||||
ea = item.get("entity_attachment")
|
||||
if ea and isinstance(ea, dict):
|
||||
etype = str(ea.get("entity_type", "")).strip()
|
||||
eid = str(ea.get("entity_id", "")).strip()
|
||||
if etype and eid:
|
||||
attachment = {
|
||||
"entity_type": etype[:50],
|
||||
"entity_id": eid[:100],
|
||||
"entity_label": str(ea.get("entity_label", ""))[:200],
|
||||
}
|
||||
|
||||
pin = {
|
||||
"id": pin_id,
|
||||
"layer_id": item.get("layer_id", default_layer_id) or "",
|
||||
"lat": float(item.get("lat", 0)),
|
||||
"lng": float(item.get("lng", 0)),
|
||||
"label": str(item.get("label", ""))[:200],
|
||||
"category": cat,
|
||||
"color": pin_color,
|
||||
"description": str(item.get("description", ""))[:2000],
|
||||
"source": str(item.get("source", "openclaw"))[:100],
|
||||
"source_url": str(item.get("source_url", ""))[:500],
|
||||
"confidence": max(0.0, min(1.0, float(item.get("confidence", 1.0)))),
|
||||
"created_at": now,
|
||||
"created_at_iso": datetime.utcfromtimestamp(now).isoformat() + "Z",
|
||||
"expires_at": now + (ttl * 3600) if ttl > 0 else None,
|
||||
"metadata": item.get("metadata", {}),
|
||||
"entity_attachment": attachment,
|
||||
"comments": [],
|
||||
}
|
||||
_pins.append(pin)
|
||||
created.append(pin)
|
||||
|
||||
_save_to_disk()
|
||||
return created
|
||||
|
||||
|
||||
def get_pins(
|
||||
category: str = "",
|
||||
source: str = "",
|
||||
layer_id: str = "",
|
||||
limit: int = 500,
|
||||
include_expired: bool = False,
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Get pins with optional filters."""
|
||||
now = time.time()
|
||||
with _lock:
|
||||
results = []
|
||||
for pin in _pins:
|
||||
if not include_expired and pin.get("expires_at") and pin["expires_at"] < now:
|
||||
continue
|
||||
if category and pin.get("category") != category:
|
||||
continue
|
||||
if source and pin.get("source") != source:
|
||||
continue
|
||||
if layer_id and pin.get("layer_id") != layer_id:
|
||||
continue
|
||||
results.append(pin)
|
||||
if len(results) >= limit:
|
||||
break
|
||||
return results
|
||||
|
||||
|
||||
def get_pin(pin_id: str) -> Optional[dict[str, Any]]:
|
||||
"""Return a single pin by ID (including comments), or None."""
|
||||
with _lock:
|
||||
for pin in _pins:
|
||||
if pin.get("id") == pin_id:
|
||||
# Ensure comments key exists for legacy pins
|
||||
if "comments" not in pin:
|
||||
pin["comments"] = []
|
||||
return dict(pin)
|
||||
return None
|
||||
|
||||
|
||||
def update_pin(pin_id: str, **updates) -> Optional[dict[str, Any]]:
|
||||
"""Update a pin's editable fields (label, description, category, color)."""
|
||||
allowed = {"label", "description", "category", "color"}
|
||||
with _lock:
|
||||
for pin in _pins:
|
||||
if pin.get("id") != pin_id:
|
||||
continue
|
||||
for k, v in updates.items():
|
||||
if k not in allowed or v is None:
|
||||
continue
|
||||
if k == "label":
|
||||
pin[k] = str(v)[:200]
|
||||
elif k == "description":
|
||||
pin[k] = str(v)[:2000]
|
||||
elif k == "category":
|
||||
cat = str(v)
|
||||
if cat in PIN_CATEGORIES:
|
||||
pin[k] = cat
|
||||
# Refresh color if it was the category default
|
||||
if not updates.get("color"):
|
||||
pin["color"] = PIN_COLORS.get(cat, pin.get("color", "#3b82f6"))
|
||||
elif k == "color":
|
||||
pin[k] = str(v)[:20]
|
||||
pin["updated_at"] = time.time()
|
||||
_save_to_disk()
|
||||
return dict(pin)
|
||||
return None
|
||||
|
||||
|
||||
def add_pin_comment(
|
||||
pin_id: str,
|
||||
text: str,
|
||||
author: str = "user",
|
||||
author_label: str = "",
|
||||
reply_to: str = "",
|
||||
) -> Optional[dict[str, Any]]:
|
||||
"""Append a comment to a pin. Returns the updated pin (with all comments)."""
|
||||
text = (text or "").strip()
|
||||
if not text:
|
||||
return None
|
||||
with _lock:
|
||||
for pin in _pins:
|
||||
if pin.get("id") != pin_id:
|
||||
continue
|
||||
if "comments" not in pin or not isinstance(pin["comments"], list):
|
||||
pin["comments"] = []
|
||||
comment = {
|
||||
"id": str(uuid.uuid4())[:12],
|
||||
"text": text[:4000],
|
||||
"author": (author or "user")[:50],
|
||||
"author_label": (author_label or "")[:100],
|
||||
"reply_to": (reply_to or "")[:12],
|
||||
"created_at": time.time(),
|
||||
"created_at_iso": datetime.utcnow().isoformat() + "Z",
|
||||
}
|
||||
pin["comments"].append(comment)
|
||||
_save_to_disk()
|
||||
return dict(pin)
|
||||
return None
|
||||
|
||||
|
||||
def delete_pin_comment(pin_id: str, comment_id: str) -> bool:
|
||||
"""Remove a single comment from a pin."""
|
||||
with _lock:
|
||||
for pin in _pins:
|
||||
if pin.get("id") != pin_id:
|
||||
continue
|
||||
comments = pin.get("comments") or []
|
||||
before = len(comments)
|
||||
pin["comments"] = [c for c in comments if c.get("id") != comment_id]
|
||||
if len(pin["comments"]) < before:
|
||||
_save_to_disk()
|
||||
return True
|
||||
return False
|
||||
return False
|
||||
|
||||
|
||||
def delete_pin(pin_id: str) -> bool:
|
||||
"""Delete a single pin by ID."""
|
||||
with _lock:
|
||||
before = len(_pins)
|
||||
_pins[:] = [p for p in _pins if p.get("id") != pin_id]
|
||||
if len(_pins) < before:
|
||||
_save_to_disk()
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def clear_pins(category: str = "", source: str = "", layer_id: str = "") -> int:
|
||||
"""Clear pins, optionally filtered. Returns count removed."""
|
||||
with _lock:
|
||||
before = len(_pins)
|
||||
|
||||
def keep(p):
|
||||
if layer_id and p.get("layer_id") != layer_id:
|
||||
return True # different layer, keep
|
||||
if category and source:
|
||||
return not (p.get("category") == category and p.get("source") == source)
|
||||
if category:
|
||||
return p.get("category") != category
|
||||
if source:
|
||||
return p.get("source") != source
|
||||
if layer_id:
|
||||
return p.get("layer_id") != layer_id
|
||||
return False
|
||||
|
||||
if not category and not source and not layer_id:
|
||||
_pins.clear()
|
||||
else:
|
||||
_pins[:] = [p for p in _pins if keep(p)]
|
||||
|
||||
removed = before - len(_pins)
|
||||
if removed:
|
||||
_save_to_disk()
|
||||
return removed
|
||||
|
||||
|
||||
def get_feed_layers() -> list[dict[str, Any]]:
|
||||
"""Return layers that have a non-empty feed_url."""
|
||||
with _lock:
|
||||
return [dict(l) for l in _layers if l.get("feed_url")]
|
||||
|
||||
|
||||
def replace_layer_pins(layer_id: str, new_pins: list[dict[str, Any]]) -> int:
|
||||
"""Atomically replace all pins in a layer with new_pins. Returns count added."""
|
||||
now = time.time()
|
||||
with _lock:
|
||||
# Remove old pins for this layer
|
||||
_pins[:] = [p for p in _pins if p.get("layer_id") != layer_id]
|
||||
# Add new pins
|
||||
added = 0
|
||||
for item in new_pins[:500]: # cap at 500 per feed
|
||||
pin_id = str(uuid.uuid4())[:12]
|
||||
cat = item.get("category", "custom")
|
||||
if cat not in PIN_CATEGORIES:
|
||||
cat = "custom"
|
||||
pin_color = item.get("color", "") or PIN_COLORS.get(cat, "#3b82f6")
|
||||
|
||||
attachment = None
|
||||
ea = item.get("entity_attachment")
|
||||
if ea and isinstance(ea, dict):
|
||||
etype = str(ea.get("entity_type", "")).strip()
|
||||
eid = str(ea.get("entity_id", "")).strip()
|
||||
if etype and eid:
|
||||
attachment = {
|
||||
"entity_type": etype[:50],
|
||||
"entity_id": eid[:100],
|
||||
"entity_label": str(ea.get("entity_label", ""))[:200],
|
||||
}
|
||||
|
||||
pin = {
|
||||
"id": pin_id,
|
||||
"layer_id": layer_id,
|
||||
"lat": float(item.get("lat", 0)),
|
||||
"lng": float(item.get("lng", 0)),
|
||||
"label": str(item.get("label", item.get("name", "")))[:200],
|
||||
"category": cat,
|
||||
"color": pin_color,
|
||||
"description": str(item.get("description", ""))[:2000],
|
||||
"source": str(item.get("source", "feed"))[:100],
|
||||
"source_url": str(item.get("source_url", ""))[:500],
|
||||
"confidence": max(0.0, min(1.0, float(item.get("confidence", 1.0)))),
|
||||
"created_at": now,
|
||||
"created_at_iso": datetime.utcfromtimestamp(now).isoformat() + "Z",
|
||||
"expires_at": None,
|
||||
"metadata": item.get("metadata", {}),
|
||||
"entity_attachment": attachment,
|
||||
"comments": [],
|
||||
}
|
||||
_pins.append(pin)
|
||||
added += 1
|
||||
_save_to_disk()
|
||||
return added
|
||||
|
||||
|
||||
def purge_expired() -> int:
|
||||
"""Remove expired pins. Called periodically."""
|
||||
now = time.time()
|
||||
with _lock:
|
||||
before = len(_pins)
|
||||
_pins[:] = [p for p in _pins if not (p.get("expires_at") and p["expires_at"] < now)]
|
||||
removed = before - len(_pins)
|
||||
if removed:
|
||||
_save_to_disk()
|
||||
return removed
|
||||
|
||||
|
||||
def pin_count() -> dict[str, int]:
|
||||
"""Return counts by category."""
|
||||
now = time.time()
|
||||
counts: dict[str, int] = {}
|
||||
with _lock:
|
||||
for pin in _pins:
|
||||
if pin.get("expires_at") and pin["expires_at"] < now:
|
||||
continue
|
||||
cat = pin.get("category", "custom")
|
||||
counts[cat] = counts.get(cat, 0) + 1
|
||||
return counts
|
||||
|
||||
|
||||
def pins_as_geojson(layer_id: str = "") -> dict[str, Any]:
|
||||
"""Convert active pins to GeoJSON FeatureCollection for the map layer."""
|
||||
now = time.time()
|
||||
features = []
|
||||
with _lock:
|
||||
# Build set of visible layer IDs
|
||||
visible_layers = {l["id"] for l in _layers if l.get("visible", True)}
|
||||
|
||||
for pin in _pins:
|
||||
if pin.get("expires_at") and pin["expires_at"] < now:
|
||||
continue
|
||||
# Layer filter
|
||||
pid_layer = pin.get("layer_id", "")
|
||||
if layer_id and pid_layer != layer_id:
|
||||
continue
|
||||
# Skip pins in hidden layers
|
||||
if pid_layer and pid_layer not in visible_layers:
|
||||
continue
|
||||
|
||||
props = {
|
||||
"id": pin["id"],
|
||||
"layer_id": pid_layer,
|
||||
"label": pin["label"],
|
||||
"category": pin["category"],
|
||||
"color": pin["color"],
|
||||
"description": pin.get("description", ""),
|
||||
"source": pin["source"],
|
||||
"source_url": pin.get("source_url", ""),
|
||||
"confidence": pin.get("confidence", 1.0),
|
||||
"created_at": pin.get("created_at_iso", ""),
|
||||
"comment_count": len(pin.get("comments") or []),
|
||||
}
|
||||
|
||||
# Entity attachment info (frontend resolves position)
|
||||
ea = pin.get("entity_attachment")
|
||||
if ea:
|
||||
props["entity_attachment"] = ea
|
||||
|
||||
features.append({
|
||||
"type": "Feature",
|
||||
"geometry": {
|
||||
"type": "Point",
|
||||
"coordinates": [pin["lng"], pin["lat"]],
|
||||
},
|
||||
"properties": props,
|
||||
})
|
||||
return {
|
||||
"type": "FeatureCollection",
|
||||
"features": features,
|
||||
}
|
||||
@@ -0,0 +1,189 @@
|
||||
"""Analysis Zone store — OpenClaw-placed map overlays with analyst notes.
|
||||
|
||||
These render as the dashed-border squares on the correlations layer.
|
||||
Unlike automated correlations (which are recomputed every cycle), analysis
|
||||
zones persist until the agent or user deletes them, or their TTL expires.
|
||||
|
||||
Shape matches the correlation alert schema so the frontend renders them
|
||||
identically — the ``source`` field marks them as agent-placed and enables
|
||||
the delete button in the popup.
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
import time
|
||||
import uuid
|
||||
from typing import Any
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_zones: list[dict[str, Any]] = []
|
||||
_lock = threading.Lock()
|
||||
|
||||
_PERSIST_DIR = os.path.join(os.path.dirname(os.path.dirname(__file__)), "data")
|
||||
_PERSIST_FILE = os.path.join(_PERSIST_DIR, "analysis_zones.json")
|
||||
|
||||
ZONE_CATEGORIES = {
|
||||
"contradiction", # narrative vs telemetry mismatch
|
||||
"analysis", # general analyst note / assessment
|
||||
"warning", # potential threat or risk area
|
||||
"observation", # neutral observation worth marking
|
||||
"hypothesis", # unverified theory to investigate
|
||||
}
|
||||
|
||||
# Map categories to correlation type colors on the frontend
|
||||
CATEGORY_COLORS = {
|
||||
"contradiction": "amber",
|
||||
"analysis": "cyan",
|
||||
"warning": "red",
|
||||
"observation": "blue",
|
||||
"hypothesis": "purple",
|
||||
}
|
||||
|
||||
|
||||
def _ensure_dir():
|
||||
try:
|
||||
os.makedirs(_PERSIST_DIR, exist_ok=True)
|
||||
except OSError:
|
||||
pass
|
||||
|
||||
|
||||
def _save():
|
||||
"""Persist to disk. Called under lock."""
|
||||
try:
|
||||
_ensure_dir()
|
||||
with open(_PERSIST_FILE, "w", encoding="utf-8") as f:
|
||||
json.dump(_zones, f, indent=2, default=str)
|
||||
except Exception as e:
|
||||
logger.warning("Failed to save analysis zones: %s", e)
|
||||
|
||||
|
||||
def _load():
|
||||
"""Load from disk on startup."""
|
||||
global _zones
|
||||
try:
|
||||
if os.path.exists(_PERSIST_FILE):
|
||||
with open(_PERSIST_FILE, "r", encoding="utf-8") as f:
|
||||
data = json.load(f)
|
||||
if isinstance(data, list):
|
||||
_zones = data
|
||||
logger.info("Loaded %d analysis zones from disk", len(_zones))
|
||||
except Exception as e:
|
||||
logger.warning("Failed to load analysis zones: %s", e)
|
||||
|
||||
|
||||
# Load on import
|
||||
_load()
|
||||
|
||||
|
||||
def _expire():
|
||||
"""Remove zones past their TTL. Called under lock."""
|
||||
now = time.time()
|
||||
before = len(_zones)
|
||||
_zones[:] = [
|
||||
z for z in _zones
|
||||
if z.get("ttl_hours", 0) <= 0
|
||||
or (now - z.get("created_at", now)) < z["ttl_hours"] * 3600
|
||||
]
|
||||
removed = before - len(_zones)
|
||||
if removed:
|
||||
logger.info("Expired %d analysis zones", removed)
|
||||
|
||||
|
||||
def create_zone(
|
||||
*,
|
||||
lat: float,
|
||||
lng: float,
|
||||
title: str,
|
||||
body: str,
|
||||
category: str = "analysis",
|
||||
severity: str = "medium",
|
||||
cell_size_deg: float = 1.0,
|
||||
ttl_hours: float = 0,
|
||||
source: str = "openclaw",
|
||||
drivers: list[str] | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Create an analysis zone. Returns the created zone dict."""
|
||||
category = category if category in ZONE_CATEGORIES else "analysis"
|
||||
if severity not in ("high", "medium", "low"):
|
||||
severity = "medium"
|
||||
cell_size_deg = max(0.1, min(cell_size_deg, 10.0))
|
||||
|
||||
zone: dict[str, Any] = {
|
||||
"id": str(uuid.uuid4())[:12],
|
||||
"lat": lat,
|
||||
"lng": lng,
|
||||
"type": "analysis_zone",
|
||||
"category": category,
|
||||
"severity": severity,
|
||||
"score": {"high": 90, "medium": 60, "low": 30}.get(severity, 60),
|
||||
"title": title[:200],
|
||||
"body": body[:2000],
|
||||
"drivers": (drivers or [title])[:5],
|
||||
"cell_size": cell_size_deg,
|
||||
"source": source,
|
||||
"created_at": time.time(),
|
||||
"ttl_hours": ttl_hours,
|
||||
}
|
||||
|
||||
with _lock:
|
||||
_expire()
|
||||
_zones.append(zone)
|
||||
_save()
|
||||
|
||||
logger.info("Analysis zone created: %s at (%.2f, %.2f)", title[:40], lat, lng)
|
||||
return zone
|
||||
|
||||
|
||||
def list_zones() -> list[dict[str, Any]]:
|
||||
"""Return all live (non-expired) zones."""
|
||||
with _lock:
|
||||
_expire()
|
||||
return list(_zones)
|
||||
|
||||
|
||||
def get_zone(zone_id: str) -> dict[str, Any] | None:
|
||||
"""Get a single zone by ID."""
|
||||
with _lock:
|
||||
for z in _zones:
|
||||
if z["id"] == zone_id:
|
||||
return dict(z)
|
||||
return None
|
||||
|
||||
|
||||
def delete_zone(zone_id: str) -> bool:
|
||||
"""Delete a zone by ID. Returns True if found and removed."""
|
||||
with _lock:
|
||||
before = len(_zones)
|
||||
_zones[:] = [z for z in _zones if z["id"] != zone_id]
|
||||
if len(_zones) < before:
|
||||
_save()
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def clear_zones(*, source: str | None = None) -> int:
|
||||
"""Clear all zones, optionally filtered by source. Returns count removed."""
|
||||
with _lock:
|
||||
before = len(_zones)
|
||||
if source:
|
||||
_zones[:] = [z for z in _zones if z.get("source") != source]
|
||||
else:
|
||||
_zones.clear()
|
||||
removed = before - len(_zones)
|
||||
if removed:
|
||||
_save()
|
||||
return removed
|
||||
|
||||
|
||||
def get_live_zones() -> list[dict[str, Any]]:
|
||||
"""Return zones formatted for the correlation engine merge.
|
||||
|
||||
This is called by compute_correlations() to inject agent-placed zones
|
||||
into the correlations list that the frontend renders as map squares.
|
||||
"""
|
||||
with _lock:
|
||||
_expire()
|
||||
return [dict(z) for z in _zones]
|
||||
@@ -4,11 +4,12 @@ Keys are stored in the backend .env file and loaded via python-dotenv.
|
||||
"""
|
||||
|
||||
import os
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
# Path to the backend .env file
|
||||
ENV_PATH = Path(__file__).parent.parent / ".env"
|
||||
# Path to the example template that ships with the repo
|
||||
ENV_EXAMPLE_PATH = Path(__file__).parent.parent.parent / ".env.example"
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# API Registry — every external service the dashboard depends on
|
||||
@@ -143,15 +144,33 @@ API_REGISTRY = [
|
||||
]
|
||||
|
||||
|
||||
def _obfuscate(value: str) -> str:
|
||||
"""Show first 4 chars, mask the rest with bullets."""
|
||||
if not value or len(value) <= 4:
|
||||
return "••••••••"
|
||||
return value[:4] + "•" * (len(value) - 4)
|
||||
def get_env_path_info() -> dict:
|
||||
"""Return absolute paths for the backend .env and .env.example template.
|
||||
|
||||
Surfaced to the frontend so the API Keys settings panel can tell users
|
||||
exactly where to put their keys when in-app editing fails (admin-not-set,
|
||||
file permissions, read-only filesystem, etc.).
|
||||
"""
|
||||
env_path = ENV_PATH.resolve()
|
||||
example_path = ENV_EXAMPLE_PATH.resolve()
|
||||
return {
|
||||
"env_path": str(env_path),
|
||||
"env_path_exists": env_path.exists(),
|
||||
"env_path_writable": os.access(env_path.parent, os.W_OK)
|
||||
and (not env_path.exists() or os.access(env_path, os.W_OK)),
|
||||
"env_example_path": str(example_path),
|
||||
"env_example_path_exists": example_path.exists(),
|
||||
}
|
||||
|
||||
|
||||
def get_api_keys():
|
||||
"""Return the full API registry with obfuscated key values."""
|
||||
"""Return the API registry with a binary set/unset flag per key.
|
||||
|
||||
Key values themselves are NEVER returned to the client — not even an
|
||||
obfuscated prefix. Users edit the .env file directly; the panel uses
|
||||
`is_set` to render a CONFIGURED / NOT CONFIGURED badge and the path
|
||||
info from `get_env_path_info()` to tell them where to put each key.
|
||||
"""
|
||||
result = []
|
||||
for api in API_REGISTRY:
|
||||
entry = {
|
||||
@@ -163,41 +182,10 @@ def get_api_keys():
|
||||
"required": api["required"],
|
||||
"has_key": api["env_key"] is not None,
|
||||
"env_key": api["env_key"],
|
||||
"value_obfuscated": None,
|
||||
"is_set": False,
|
||||
}
|
||||
if api["env_key"]:
|
||||
raw = os.environ.get(api["env_key"], "")
|
||||
entry["value_obfuscated"] = _obfuscate(raw)
|
||||
entry["is_set"] = bool(raw)
|
||||
result.append(entry)
|
||||
return result
|
||||
|
||||
|
||||
def update_api_key(env_key: str, new_value: str) -> bool:
|
||||
"""Update a single key in the .env file and in the current process env."""
|
||||
valid_keys = {api["env_key"] for api in API_REGISTRY if api.get("env_key")}
|
||||
if env_key not in valid_keys:
|
||||
return False
|
||||
|
||||
if not isinstance(new_value, str):
|
||||
return False
|
||||
if "\n" in new_value or "\r" in new_value:
|
||||
return False
|
||||
|
||||
if not ENV_PATH.exists():
|
||||
ENV_PATH.write_text("", encoding="utf-8")
|
||||
|
||||
# Update os.environ immediately
|
||||
os.environ[env_key] = new_value
|
||||
|
||||
# Update the .env file on disk
|
||||
content = ENV_PATH.read_text(encoding="utf-8")
|
||||
pattern = re.compile(rf"^{re.escape(env_key)}=.*$", re.MULTILINE)
|
||||
if pattern.search(content):
|
||||
content = pattern.sub(f"{env_key}={new_value}", content)
|
||||
else:
|
||||
content = content.rstrip("\n") + f"\n{env_key}={new_value}\n"
|
||||
|
||||
ENV_PATH.write_text(content, encoding="utf-8")
|
||||
return True
|
||||
|
||||
@@ -818,6 +818,105 @@ out body;
|
||||
return cameras
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# ALPR / Surveillance Camera Locations (OSM Overpass)
|
||||
# ---------------------------------------------------------------------------
|
||||
# Queries OpenStreetMap for ALPR/LPR tagged surveillance cameras.
|
||||
# These cameras rarely have public media URLs — this ingestor captures
|
||||
# their LOCATIONS for situational awareness (density heatmap, blind-spot
|
||||
# analysis). No plate-read data is fetched — only publicly-mapped positions.
|
||||
|
||||
|
||||
class OSMALPRCameraIngestor(BaseCCTVIngestor):
|
||||
"""ALPR / license-plate reader camera locations from OpenStreetMap.
|
||||
|
||||
Searches for nodes tagged with surveillance:type=ALPR or
|
||||
man_made=surveillance + camera:type values indicating plate readers.
|
||||
Only geolocations are ingested — no live feeds or detection data.
|
||||
"""
|
||||
|
||||
URL = "https://overpass-api.de/api/interpreter"
|
||||
QUERY = """
|
||||
[out:json][timeout:45];
|
||||
(
|
||||
node["surveillance:type"="ALPR"];
|
||||
node["surveillance:type"="alpr"];
|
||||
node["surveillance:type"="LPR"];
|
||||
node["surveillance:type"="lpr"];
|
||||
node["man_made"="surveillance"]["camera:type"="ALPR"];
|
||||
node["man_made"="surveillance"]["camera:type"="alpr"];
|
||||
node["man_made"="surveillance"]["camera:type"="LPR"];
|
||||
node["man_made"="surveillance"]["camera:type"="lpr"];
|
||||
node["man_made"="surveillance"]["description"~"[Ll]icense [Pp]late"];
|
||||
node["man_made"="surveillance"]["description"~"ALPR"];
|
||||
node["man_made"="surveillance"]["description"~"Flock"];
|
||||
);
|
||||
out body;
|
||||
""".strip()
|
||||
|
||||
def fetch_data(self) -> List[Dict[str, Any]]:
|
||||
query = quote(self.QUERY, safe="")
|
||||
resp = fetch_with_curl(
|
||||
f"{self.URL}?data={query}",
|
||||
timeout=50,
|
||||
headers={"Accept": "application/json"},
|
||||
)
|
||||
if not resp or resp.status_code != 200:
|
||||
logger.warning(
|
||||
"OSM ALPR camera fetch failed: HTTP %s",
|
||||
resp.status_code if resp else "no response",
|
||||
)
|
||||
return []
|
||||
data = resp.json()
|
||||
cameras = []
|
||||
for item in data.get("elements", []) if isinstance(data, dict) else []:
|
||||
lat = item.get("lat")
|
||||
lon = item.get("lon")
|
||||
if lat is None or lon is None:
|
||||
continue
|
||||
try:
|
||||
lat, lon = float(lat), float(lon)
|
||||
except (ValueError, TypeError):
|
||||
continue
|
||||
|
||||
tags = item.get("tags", {}) if isinstance(item.get("tags"), dict) else {}
|
||||
|
||||
# Extract what we can from tags
|
||||
operator = (
|
||||
tags.get("operator")
|
||||
or tags.get("brand")
|
||||
or tags.get("network")
|
||||
or "Unknown"
|
||||
)
|
||||
description = (
|
||||
tags.get("description")
|
||||
or tags.get("name")
|
||||
or tags.get("surveillance:type", "ALPR")
|
||||
)
|
||||
direction = (
|
||||
tags.get("camera:direction")
|
||||
or tags.get("direction")
|
||||
or tags.get("surveillance:direction")
|
||||
or "Unknown"
|
||||
)
|
||||
|
||||
# ALPR cameras typically have no public media URL — use a
|
||||
# placeholder so the pin renders but no proxy attempt is made.
|
||||
cameras.append(
|
||||
{
|
||||
"id": f"ALPR-{item.get('id')}",
|
||||
"source_agency": str(operator)[:60],
|
||||
"lat": lat,
|
||||
"lon": lon,
|
||||
"direction_facing": f"ALPR: {str(description)[:100]} ({str(direction)[:30]})",
|
||||
"media_url": "",
|
||||
"media_type": "none",
|
||||
"refresh_rate_seconds": 0,
|
||||
}
|
||||
)
|
||||
logger.info("OSM ALPR ingestor found %d cameras", len(cameras))
|
||||
return cameras
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# DGT Spain — National Road Cameras
|
||||
|
||||
+260
-7
@@ -10,6 +10,10 @@ class Settings(BaseSettings):
|
||||
ALLOW_INSECURE_ADMIN: bool = False
|
||||
PUBLIC_API_KEY: str = ""
|
||||
|
||||
# OpenClaw agent connectivity
|
||||
OPENCLAW_HMAC_SECRET: str = "" # HMAC shared secret for direct mode (auto-generated if empty)
|
||||
OPENCLAW_ACCESS_TIER: str = "restricted" # "full" or "restricted"
|
||||
|
||||
# Data sources
|
||||
AIS_API_KEY: str = ""
|
||||
OPENSKY_CLIENT_ID: str = ""
|
||||
@@ -27,7 +31,8 @@ class Settings(BaseSettings):
|
||||
MESH_RNS_ENABLED: bool = False
|
||||
MESH_ARTI_ENABLED: bool = False
|
||||
MESH_ARTI_SOCKS_PORT: int = 9050
|
||||
MESH_RELAY_PEERS: str = "http://cipher0.shadowbroker.info:8000"
|
||||
MESH_RELAY_PEERS: str = ""
|
||||
MESH_DEFAULT_SYNC_PEERS: str = "https://node.shadowbroker.info"
|
||||
MESH_BOOTSTRAP_DISABLED: bool = False
|
||||
MESH_BOOTSTRAP_MANIFEST_PATH: str = "data/bootstrap_peers.json"
|
||||
MESH_BOOTSTRAP_SIGNER_PUBLIC_KEY: str = ""
|
||||
@@ -37,7 +42,7 @@ class Settings(BaseSettings):
|
||||
MESH_RELAY_PUSH_TIMEOUT_S: int = 10
|
||||
MESH_RELAY_MAX_FAILURES: int = 3
|
||||
MESH_RELAY_FAILURE_COOLDOWN_S: int = 120
|
||||
MESH_PEER_PUSH_SECRET: str = "Mv63UvLfwqOEVWeRBXjA8MtFl2nEkkhUlLYVHiX1Zzo"
|
||||
MESH_PEER_PUSH_SECRET: str = ""
|
||||
MESH_RNS_APP_NAME: str = "shadowbroker"
|
||||
MESH_RNS_ASPECT: str = "infonet"
|
||||
MESH_RNS_IDENTITY_PATH: str = ""
|
||||
@@ -60,7 +65,8 @@ class Settings(BaseSettings):
|
||||
# Keep a low background cadence on private RNS links so quiet nodes are less
|
||||
# trivially fingerprintable by silence alone. Set to 0 to disable explicitly.
|
||||
MESH_RNS_COVER_INTERVAL_S: int = 30
|
||||
MESH_RNS_COVER_SIZE: int = 64
|
||||
MESH_RNS_COVER_SIZE: int = 512
|
||||
MESH_DM_MAILBOX_TTL_S: int = 900
|
||||
MESH_RNS_IBF_WINDOW: int = 256
|
||||
MESH_RNS_IBF_TABLE_SIZE: int = 64
|
||||
MESH_RNS_IBF_MINHASH_SIZE: int = 16
|
||||
@@ -75,44 +81,221 @@ class Settings(BaseSettings):
|
||||
MESH_RNS_IBF_FAIL_THRESHOLD: int = 3
|
||||
MESH_RNS_IBF_COOLDOWN_S: int = 120
|
||||
MESH_VERIFY_INTERVAL_S: int = 600
|
||||
MESH_VERIFY_SIGNATURES: bool = True
|
||||
# MESH_VERIFY_SIGNATURES is intentionally removed — the audit loop in main.py
|
||||
# always calls validate_chain_incremental(verify_signatures=True). Any value
|
||||
# set in the environment is ignored.
|
||||
MESH_DM_SECURE_MODE: bool = True
|
||||
MESH_DM_TOKEN_PEPPER: str = ""
|
||||
MESH_DM_ALLOW_LEGACY_GET: bool = False
|
||||
MESH_ALLOW_LEGACY_DM1_UNTIL: str = ""
|
||||
MESH_ALLOW_LEGACY_DM_GET_UNTIL: str = ""
|
||||
MESH_ALLOW_LEGACY_DM_SIGNATURE_COMPAT_UNTIL: str = ""
|
||||
MESH_DM_PERSIST_SPOOL: bool = False
|
||||
MESH_DM_RELAY_FILE_PATH: str = ""
|
||||
MESH_DM_RELAY_AUTO_RELOAD: bool = False
|
||||
MESH_DM_REQUIRE_SENDER_SEAL_SHARED: bool = True
|
||||
MESH_DM_NONCE_TTL_S: int = 300
|
||||
MESH_DM_NONCE_CACHE_MAX: int = 4096
|
||||
MESH_DM_NONCE_PER_AGENT_MAX: int = 256
|
||||
MESH_DM_REQUEST_MAX_AGE_S: int = 300
|
||||
MESH_DM_REQUEST_MAILBOX_LIMIT: int = 12
|
||||
MESH_DM_SHARED_MAILBOX_LIMIT: int = 48
|
||||
MESH_DM_SELF_MAILBOX_LIMIT: int = 12
|
||||
MESH_BLOCK_LEGACY_AGENT_ID_LOOKUP: bool = True
|
||||
MESH_ALLOW_COMPAT_DM_INVITE_IMPORT: bool = False
|
||||
MESH_ALLOW_COMPAT_DM_INVITE_IMPORT_UNTIL: str = ""
|
||||
MESH_ALLOW_LEGACY_NODE_ID_COMPAT_UNTIL: str = ""
|
||||
# Rotate voter-blinding salts on a rolling cadence so new reputation
|
||||
# events do not reuse one forever-stable blinded identity.
|
||||
MESH_VOTER_BLIND_SALT_ROTATE_DAYS: int = 30
|
||||
# Keep historical salts long enough to cover live vote records, so
|
||||
# duplicate-vote detection and wallet-cost accounting survive rotation.
|
||||
MESH_VOTER_BLIND_SALT_GRACE_DAYS: int = 30
|
||||
MESH_DM_MAX_MSG_BYTES: int = 8192
|
||||
MESH_DM_ALLOW_SENDER_SEAL: bool = False
|
||||
# TTL for DH key and prekey bundle registrations — stale entries are pruned.
|
||||
MESH_DM_KEY_TTL_DAYS: int = 30
|
||||
# TTL for invite-scoped prekey lookup aliases; shorter windows reduce
|
||||
# long-lived relay linkage between opaque lookup handles and agent IDs.
|
||||
MESH_DM_PREKEY_LOOKUP_ALIAS_TTL_DAYS: int = 14
|
||||
# TTL for relay witness history; keep continuity metadata bounded instead
|
||||
# of relying on a hidden hardcoded retention window.
|
||||
MESH_DM_WITNESS_TTL_DAYS: int = 14
|
||||
# TTL for mailbox binding metadata — shorter = smaller metadata footprint on disk.
|
||||
MESH_DM_BINDING_TTL_DAYS: int = 7
|
||||
MESH_DM_BINDING_TTL_DAYS: int = 3
|
||||
# When False, mailbox bindings are memory-only (agents re-register on restart).
|
||||
MESH_DM_METADATA_PERSIST: bool = True
|
||||
# Enable explicitly only if restart continuity is worth persisting DM graph metadata.
|
||||
MESH_DM_METADATA_PERSIST: bool = False
|
||||
# Second explicit opt-in for at-rest DM metadata persistence. This keeps a
|
||||
# single boolean flip from silently writing mailbox graph metadata to disk.
|
||||
MESH_DM_METADATA_PERSIST_ACKNOWLEDGE: bool = False
|
||||
# Optional import path for externally managed root witness material packages.
|
||||
# Relative paths resolve from the backend directory.
|
||||
MESH_DM_ROOT_EXTERNAL_WITNESS_IMPORT_PATH: str = ""
|
||||
# Optional URI for externally managed root witness material packages.
|
||||
# Supports file:// and http(s):// sources; when set it overrides the local path.
|
||||
MESH_DM_ROOT_EXTERNAL_WITNESS_IMPORT_URI: str = ""
|
||||
# Maximum acceptable age for externally sourced root witness packages.
|
||||
# Strong DM trust fails closed when the imported package exported_at is older than this.
|
||||
MESH_DM_ROOT_EXTERNAL_WITNESS_MAX_AGE_S: int = 3600
|
||||
# Warning threshold for externally sourced root witness packages.
|
||||
# When current external witness material reaches this age, operator health degrades to warning
|
||||
# before the strong path eventually fails closed at MAX_AGE.
|
||||
MESH_DM_ROOT_EXTERNAL_WITNESS_WARN_AGE_S: int = 2700
|
||||
# Optional export path for the append-only stable-root transparency ledger.
|
||||
# Relative paths resolve from the backend directory.
|
||||
MESH_DM_ROOT_TRANSPARENCY_LEDGER_EXPORT_PATH: str = ""
|
||||
# Optional URI used to read back and verify published transparency ledgers.
|
||||
# Supports file:// and http(s):// sources.
|
||||
MESH_DM_ROOT_TRANSPARENCY_LEDGER_READBACK_URI: str = ""
|
||||
# Maximum acceptable age for externally read transparency ledgers.
|
||||
# Strong DM trust fails closed when exported_at is older than this.
|
||||
MESH_DM_ROOT_TRANSPARENCY_LEDGER_MAX_AGE_S: int = 3600
|
||||
# Warning threshold for externally read transparency ledgers.
|
||||
# When current external transparency readback reaches this age, operator health degrades to warning
|
||||
# before the strong path eventually fails closed at MAX_AGE.
|
||||
MESH_DM_ROOT_TRANSPARENCY_LEDGER_WARN_AGE_S: int = 2700
|
||||
MESH_SCOPED_TOKENS: str = ""
|
||||
# Deprecated legacy env vars kept for backward config compatibility only.
|
||||
# Ordinary shipped gate flows keep MLS decrypt local; backend decrypt is
|
||||
# reserved for explicit recovery reads.
|
||||
MESH_GATE_BACKEND_DECRYPT_COMPAT: bool = False
|
||||
MESH_GATE_BACKEND_DECRYPT_COMPAT_ACKNOWLEDGE: bool = False
|
||||
MESH_BACKEND_GATE_DECRYPT_COMPAT: bool = False
|
||||
# Deprecated legacy env vars kept for backward config compatibility only.
|
||||
# Ordinary shipped gate flows keep compose/post local and submit encrypted
|
||||
# payloads to the backend for sign/post only.
|
||||
MESH_GATE_BACKEND_PLAINTEXT_COMPAT: bool = False
|
||||
MESH_GATE_BACKEND_PLAINTEXT_COMPAT_ACKNOWLEDGE: bool = False
|
||||
MESH_BACKEND_GATE_PLAINTEXT_COMPAT: bool = False
|
||||
# Runtime gate for recovery envelopes. When off, per-gate
|
||||
# envelope_recovery / envelope_always policies fail closed to
|
||||
# envelope_disabled. Default True so the Reddit-like durable history
|
||||
# model works out of the box: any member with the gate_secret can
|
||||
# decrypt every envelope encrypted from the moment they had that key.
|
||||
# Set MESH_GATE_RECOVERY_ENVELOPE_ENABLE=false to revert to MLS-only
|
||||
# forward-secret behavior (your own history becomes unreadable after
|
||||
# the sending ratchet advances).
|
||||
MESH_GATE_RECOVERY_ENVELOPE_ENABLE: bool = True
|
||||
MESH_GATE_RECOVERY_ENVELOPE_ENABLE_ACKNOWLEDGE: bool = True
|
||||
# Durable gate plaintext retention is disabled by default. Enable only
|
||||
# when the operator explicitly accepts the at-rest privacy tradeoff.
|
||||
MESH_GATE_PLAINTEXT_PERSIST: bool = False
|
||||
MESH_GATE_PLAINTEXT_PERSIST_ACKNOWLEDGE: bool = False
|
||||
MESH_GATE_SESSION_ROTATE_MSGS: int = 50
|
||||
MESH_GATE_SESSION_ROTATE_S: int = 3600
|
||||
MESH_GATE_LEGACY_ENVELOPE_FALLBACK_MAX_DAYS: int = 30
|
||||
# Add a randomized grace window before anonymous gate-session auto-rotation
|
||||
# so threshold-triggered identity swaps are less trivially correlated.
|
||||
MESH_GATE_SESSION_ROTATE_JITTER_S: int = 180
|
||||
# Gate persona (named identity) rotation thresholds. Rotating the signing
|
||||
# key limits the linkability window. Zero = disabled.
|
||||
MESH_GATE_PERSONA_ROTATE_MSGS: int = 200
|
||||
MESH_GATE_PERSONA_ROTATE_S: int = 604800 # 7 days
|
||||
MESH_GATE_PERSONA_ROTATE_JITTER_S: int = 600
|
||||
# Feature-flagged session stream for multiplexed gate room updates.
|
||||
# Disabled by default so rollout stays explicit while stream-first rooms bake.
|
||||
MESH_GATE_SESSION_STREAM_ENABLED: bool = False
|
||||
MESH_GATE_SESSION_STREAM_HEARTBEAT_S: int = 20
|
||||
MESH_GATE_SESSION_STREAM_BATCH_MS: int = 1500
|
||||
MESH_GATE_SESSION_STREAM_MAX_GATES: int = 16
|
||||
# Private gate APIs expose a backward-jittered timestamp view so observers
|
||||
# cannot trivially align exact send times from response metadata alone.
|
||||
MESH_GATE_TIMESTAMP_JITTER_S: int = 60
|
||||
# Ban/kick gate-secret rotation is on by default (hardening Rec #10): the
|
||||
# invariant has baked and a ban that does not rotate is effectively a
|
||||
# display-only removal. Set MESH_GATE_BAN_KICK_ROTATION_ENABLE=false to
|
||||
# revert to observe-only during incident triage.
|
||||
MESH_GATE_BAN_KICK_ROTATION_ENABLE: bool = True
|
||||
MESH_BLOCK_LEGACY_NODE_ID_COMPAT: bool = True
|
||||
MESH_ALLOW_RAW_SECURE_STORAGE_FALLBACK: bool = False
|
||||
MESH_ACK_RAW_FALLBACK_AT_OWN_RISK: bool = False
|
||||
MESH_SECURE_STORAGE_SECRET: str = ""
|
||||
MESH_PRIVATE_LOG_TTL_S: int = 900
|
||||
# Sprint 1 rollout: restored DM boot probes stay disabled by default until
|
||||
# the architect reviews false positives from the observe-only path.
|
||||
MESH_DM_RESTORED_SESSION_BOOT_PROBE_ENABLE: bool = False
|
||||
# Queued DM release requires explicit per-item approval before any weaker
|
||||
# relay fallback. Silent fallback is not a safe private-mode default.
|
||||
MESH_PRIVATE_RELEASE_APPROVAL_ENABLE: bool = True
|
||||
# Expiry for user-approved scoped private relay fallback policy. The policy
|
||||
# is still bounded by hidden-transport checks before it can auto-release.
|
||||
MESH_PRIVATE_RELAY_POLICY_TTL_S: int = 3600
|
||||
# Background privacy prewarm prepares keys/aliases/transport readiness
|
||||
# before send-time. Anonymous mode uses a cadence gate so user clicks do
|
||||
# not directly create hidden-transport activity.
|
||||
MESH_PRIVACY_PREWARM_ENABLE: bool = True
|
||||
MESH_PRIVACY_PREWARM_INTERVAL_S: int = 300
|
||||
MESH_PRIVACY_PREWARM_ANON_CADENCE_S: int = 300
|
||||
# Sprint 4 rollout: authenticated RNS cover markers remain disabled until
|
||||
# the observer-equivalence and receive-path DoS tests are green.
|
||||
MESH_RNS_COVER_AUTH_MARKER_ENABLE: bool = False
|
||||
# Signed-write revocation lookups use a short local TTL; stale entries force
|
||||
# a local rebuild before honor. Offline/local-refresh failures remain
|
||||
# observe-only until the later enforcement sprint.
|
||||
MESH_SIGNED_REVOCATION_CACHE_TTL_S: int = 300
|
||||
MESH_SIGNED_REVOCATION_CACHE_ENFORCE: bool = True
|
||||
MESH_SIGNED_WRITE_CONTEXT_REQUIRED: bool = True
|
||||
# Sprint 5 rollout: when enabled, root witness finality requires
|
||||
# independent quorum for threshold>1 witnessed roots before they count as
|
||||
# verified first-contact provenance.
|
||||
WORMHOLE_ROOT_WITNESS_FINALITY_ENFORCE: bool = False
|
||||
# Optional JSON artifact generated by CI/release workflow for the Sprint 8
|
||||
# release gate. Relative paths resolve from the backend directory.
|
||||
# dev = permissive local/dev behavior; testnet-private = strict private
|
||||
# defaults; release-candidate = no compatibility/debug escape hatches.
|
||||
MESH_RELEASE_PROFILE: str = "dev"
|
||||
MESH_RELEASE_ATTESTATION_PATH: str = ""
|
||||
# Operator release attestation for the Sprint 8 release gate. This does
|
||||
# not change runtime behavior; it only records that the DM relay security
|
||||
# suite was run and passed for the release candidate.
|
||||
MESH_RELEASE_DM_RELAY_SECURITY_SUITE_GREEN: bool = False
|
||||
PRIVACY_CORE_MIN_VERSION: str = "0.1.0"
|
||||
PRIVACY_CORE_ALLOWED_SHA256: str = ""
|
||||
PRIVACY_CORE_DEV_OVERRIDE: bool = False
|
||||
# Sprint 4 rollout: fail fast when the loaded privacy-core artifact is
|
||||
# missing required FFI symbols expected by the current Python bridge.
|
||||
PRIVACY_CORE_EXPORT_SET_AUDIT_ENABLE: bool = True
|
||||
# Clearnet fallback policy for private-tier messages.
|
||||
# "block" (default) = refuse to send private messages over clearnet.
|
||||
# "allow" = fall back to clearnet when Tor/RNS is unavailable (weaker privacy).
|
||||
MESH_PRIVATE_CLEARNET_FALLBACK: str = "block"
|
||||
# Second explicit opt-in for private-tier clearnet fallback. Without this
|
||||
# acknowledgement, "allow" remains requested but not effective.
|
||||
MESH_PRIVATE_CLEARNET_FALLBACK_ACKNOWLEDGE: bool = False
|
||||
# Meshtastic MQTT bridge — disabled by default to avoid hammering the
|
||||
# public broker. Users opt in explicitly.
|
||||
MESH_MQTT_ENABLED: bool = False
|
||||
# Meshtastic MQTT broker credentials (defaults match public firmware).
|
||||
MESH_MQTT_BROKER: str = "mqtt.meshtastic.org"
|
||||
MESH_MQTT_PORT: int = 1883
|
||||
MESH_MQTT_USER: str = "meshdev"
|
||||
MESH_MQTT_PASS: str = "large4cats"
|
||||
# Hex-encoded PSK — empty string means use the default LongFast key.
|
||||
# Must decode to exactly 16 or 32 bytes when set.
|
||||
MESH_MQTT_PSK: str = ""
|
||||
# Optional operator-provided Meshtastic node ID (e.g. "!abcd1234") included
|
||||
# in the User-Agent when fetching from meshtastic.liamcottle.net so the
|
||||
# service operator can identify per-install traffic instead of a generic
|
||||
# "ShadowBroker" aggregate.
|
||||
MESHTASTIC_OPERATOR_CALLSIGN: str = ""
|
||||
|
||||
# SAR (Synthetic Aperture Radar) data layer
|
||||
# Mode A — free catalog metadata, no account, default-on
|
||||
MESH_SAR_CATALOG_ENABLED: bool = True
|
||||
# Mode B — free pre-processed anomalies (OPERA / EGMS / GFM / EMS / UNOSAT)
|
||||
# Two-step opt-in: must be "allow" AND _ACKNOWLEDGE must be true
|
||||
MESH_SAR_PRODUCTS_FETCH: str = "block"
|
||||
MESH_SAR_PRODUCTS_FETCH_ACKNOWLEDGE: bool = False
|
||||
# NASA Earthdata Login (free) — required for OPERA products
|
||||
MESH_SAR_EARTHDATA_USER: str = ""
|
||||
MESH_SAR_EARTHDATA_TOKEN: str = ""
|
||||
# Copernicus Data Space (free) — required for EGMS / EMS products
|
||||
MESH_SAR_COPERNICUS_USER: str = ""
|
||||
MESH_SAR_COPERNICUS_TOKEN: str = ""
|
||||
# Whether OpenClaw agents may read/act on the SAR layer
|
||||
MESH_SAR_OPENCLAW_ENABLED: bool = True
|
||||
# Require private-tier transport before signing/broadcasting SAR anomalies
|
||||
MESH_SAR_REQUIRE_PRIVATE_TIER: bool = True
|
||||
|
||||
model_config = SettingsConfigDict(env_file=".env", extra="ignore")
|
||||
|
||||
@@ -120,3 +303,73 @@ class Settings(BaseSettings):
|
||||
@lru_cache
|
||||
def get_settings() -> Settings:
|
||||
return Settings()
|
||||
|
||||
|
||||
def private_clearnet_fallback_requested(settings: Settings | None = None) -> str:
|
||||
snapshot = settings or get_settings()
|
||||
policy = str(getattr(snapshot, "MESH_PRIVATE_CLEARNET_FALLBACK", "block") or "block").strip().lower()
|
||||
return "allow" if policy == "allow" else "block"
|
||||
|
||||
|
||||
def private_clearnet_fallback_effective(settings: Settings | None = None) -> str:
|
||||
snapshot = settings or get_settings()
|
||||
requested = private_clearnet_fallback_requested(snapshot)
|
||||
acknowledged = bool(getattr(snapshot, "MESH_PRIVATE_CLEARNET_FALLBACK_ACKNOWLEDGE", False))
|
||||
if requested == "allow" and acknowledged:
|
||||
return "allow"
|
||||
return "block"
|
||||
|
||||
|
||||
def backend_gate_decrypt_compat_effective(settings: Settings | None = None) -> bool:
|
||||
snapshot = settings or get_settings()
|
||||
return bool(
|
||||
getattr(snapshot, "MESH_BACKEND_GATE_DECRYPT_COMPAT", False)
|
||||
or getattr(snapshot, "MESH_GATE_BACKEND_DECRYPT_COMPAT", False)
|
||||
)
|
||||
|
||||
|
||||
def backend_gate_plaintext_compat_effective(settings: Settings | None = None) -> bool:
|
||||
snapshot = settings or get_settings()
|
||||
return bool(
|
||||
getattr(snapshot, "MESH_BACKEND_GATE_PLAINTEXT_COMPAT", False)
|
||||
or getattr(snapshot, "MESH_GATE_BACKEND_PLAINTEXT_COMPAT", False)
|
||||
)
|
||||
|
||||
|
||||
def gate_recovery_envelope_effective(settings: Settings | None = None) -> bool:
|
||||
snapshot = settings or get_settings()
|
||||
requested = bool(getattr(snapshot, "MESH_GATE_RECOVERY_ENVELOPE_ENABLE", False))
|
||||
acknowledged = bool(getattr(snapshot, "MESH_GATE_RECOVERY_ENVELOPE_ENABLE_ACKNOWLEDGE", False))
|
||||
return requested and acknowledged
|
||||
|
||||
|
||||
def gate_plaintext_persist_effective(settings: Settings | None = None) -> bool:
|
||||
snapshot = settings or get_settings()
|
||||
requested = bool(getattr(snapshot, "MESH_GATE_PLAINTEXT_PERSIST", False))
|
||||
acknowledged = bool(getattr(snapshot, "MESH_GATE_PLAINTEXT_PERSIST_ACKNOWLEDGE", False))
|
||||
return requested and acknowledged
|
||||
|
||||
|
||||
def gate_ban_kick_rotation_enabled(settings: Settings | None = None) -> bool:
|
||||
snapshot = settings or get_settings()
|
||||
return bool(getattr(snapshot, "MESH_GATE_BAN_KICK_ROTATION_ENABLE", False))
|
||||
|
||||
|
||||
def dm_restored_session_boot_probe_enabled(settings: Settings | None = None) -> bool:
|
||||
snapshot = settings or get_settings()
|
||||
return bool(getattr(snapshot, "MESH_DM_RESTORED_SESSION_BOOT_PROBE_ENABLE", False))
|
||||
|
||||
|
||||
def signed_revocation_cache_ttl_s(settings: Settings | None = None) -> int:
|
||||
snapshot = settings or get_settings()
|
||||
return max(0, int(getattr(snapshot, "MESH_SIGNED_REVOCATION_CACHE_TTL_S", 300) or 0))
|
||||
|
||||
|
||||
def signed_revocation_cache_enforce(settings: Settings | None = None) -> bool:
|
||||
snapshot = settings or get_settings()
|
||||
return bool(getattr(snapshot, "MESH_SIGNED_REVOCATION_CACHE_ENFORCE", False))
|
||||
|
||||
|
||||
def wormhole_root_witness_finality_enforce(settings: Settings | None = None) -> bool:
|
||||
snapshot = settings or get_settings()
|
||||
return bool(getattr(snapshot, "WORMHOLE_ROOT_WITNESS_FINALITY_ENFORCE", False))
|
||||
|
||||
@@ -8,9 +8,13 @@ Correlation types:
|
||||
- RF Anomaly: GPS jamming + internet outage (both required)
|
||||
- Military Buildup: Military flights + naval vessels + GDELT conflict events
|
||||
- Infrastructure Cascade: Internet outage + KiwiSDR offline in same zone
|
||||
- Possible Contradiction: Official denial/statement + infrastructure disruption
|
||||
in same region — hypothesis generator, NOT verdict
|
||||
"""
|
||||
|
||||
import logging
|
||||
import math
|
||||
import re
|
||||
from collections import defaultdict
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
@@ -306,6 +310,427 @@ def _detect_infra_cascades(data: dict) -> list[dict]:
|
||||
return alerts
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Possible Contradiction: official denial/statement + infra disruption
|
||||
#
|
||||
# This is a HYPOTHESIS GENERATOR, not a verdict engine. It says "LOOK HERE"
|
||||
# when an official statement (denial, clarification, refusal) co-locates with
|
||||
# infrastructure disruption (internet outage, sigint change). The human or
|
||||
# higher-order reasoning decides what actually happened.
|
||||
#
|
||||
# Context ratings:
|
||||
# STRONG — denial + outage + prediction market movement in same region
|
||||
# MODERATE — denial + outage (no market signal)
|
||||
# WEAK — denial + minor outage or distant co-location
|
||||
# DETECTION_GAP — denial found but NO telemetry to verify (equally valuable)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
# Denial / official-statement patterns in headlines and URL slugs
|
||||
_DENIAL_PATTERNS = [
|
||||
re.compile(p, re.IGNORECASE) for p in [
|
||||
r"\bden(?:y|ies|ied|ial)\b",
|
||||
r"\brefut(?:e[ds]?|ing)\b",
|
||||
r"\breject(?:s|ed|ing)?\b",
|
||||
r"\bclarif(?:y|ies|ied|ication)\b",
|
||||
r"\bdismiss(?:es|ed|ing)?\b",
|
||||
r"\bno\s+attack\b",
|
||||
r"\bdid\s+not\s+(?:attack|strike|bomb|target|order|invade|kill)\b",
|
||||
r"\bnever\s+(?:attack|strike|bomb|target|order|invade|happen)\b",
|
||||
r"\bfalse\s+(?:report|claim|allegation|rumor|narrative)\b",
|
||||
r"\bmisinformation\b",
|
||||
r"\bdisinformation\b",
|
||||
r"\bpropaganda\b",
|
||||
r"\b(?:army|military|government|ministry|official)\s+(?:says|clarifies|denies|refutes)\b",
|
||||
r"\brumor[s]?\b.*\buntrue\b",
|
||||
r"\bcategorically\b",
|
||||
r"\bbaseless\b",
|
||||
]
|
||||
]
|
||||
|
||||
# Broader cell radius for sparse telemetry regions (Africa, Central Asia, etc.)
|
||||
# These regions have fewer IODA/RIPE probes so outage data is sparser
|
||||
_SPARSE_REGIONS_LAT_RANGES = [
|
||||
(-35, 37), # Africa roughly
|
||||
(25, 50), # Central Asia band (when lng 40-90)
|
||||
]
|
||||
|
||||
|
||||
def _is_sparse_region(lat: float, lng: float) -> bool:
|
||||
"""Check if coordinates fall in a region with sparse telemetry coverage."""
|
||||
# Africa
|
||||
if -35 <= lat <= 37 and -20 <= lng <= 55:
|
||||
return True
|
||||
# Central Asia
|
||||
if 25 <= lat <= 50 and 40 <= lng <= 90:
|
||||
return True
|
||||
# South America interior
|
||||
if -55 <= lat <= 12 and -80 <= lng <= -35:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _haversine_km(lat1: float, lon1: float, lat2: float, lon2: float) -> float:
|
||||
"""Great-circle distance in km."""
|
||||
R = 6371.0
|
||||
dlat = math.radians(lat2 - lat1)
|
||||
dlon = math.radians(lon2 - lon1)
|
||||
a = (math.sin(dlat / 2) ** 2 +
|
||||
math.cos(math.radians(lat1)) * math.cos(math.radians(lat2)) *
|
||||
math.sin(dlon / 2) ** 2)
|
||||
return R * 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
|
||||
|
||||
|
||||
def _matches_denial(text: str) -> bool:
|
||||
"""Check if text matches any denial/official-statement pattern."""
|
||||
return any(p.search(text) for p in _DENIAL_PATTERNS)
|
||||
|
||||
|
||||
def _detect_contradictions(data: dict) -> list[dict]:
|
||||
"""Detect possible contradictions between official statements and telemetry.
|
||||
|
||||
Scans GDELT headlines for denial language, then checks whether internet
|
||||
outages or other infrastructure disruptions exist in the same geographic
|
||||
region. Scores confidence and lists alternative explanations.
|
||||
"""
|
||||
gdelt = data.get("gdelt") or []
|
||||
internet_outages = data.get("internet_outages") or []
|
||||
news = data.get("news") or []
|
||||
prediction_markets = data.get("prediction_markets") or []
|
||||
|
||||
# ── Step 1: Find GDELT events with denial/official-statement language ──
|
||||
denial_events: list[dict] = []
|
||||
|
||||
# GDELT comes as GeoJSON features
|
||||
gdelt_features = gdelt
|
||||
if isinstance(gdelt, dict):
|
||||
gdelt_features = gdelt.get("features", [])
|
||||
|
||||
for feature in gdelt_features:
|
||||
# Handle both GeoJSON features and flat dicts
|
||||
if "properties" in feature and "geometry" in feature:
|
||||
props = feature.get("properties", {})
|
||||
geom = feature.get("geometry", {})
|
||||
coords = geom.get("coordinates", [])
|
||||
if len(coords) >= 2:
|
||||
lng, lat = float(coords[0]), float(coords[1])
|
||||
else:
|
||||
continue
|
||||
headlines = props.get("_headlines_list", [])
|
||||
urls = props.get("_urls_list", [])
|
||||
name = props.get("name", "")
|
||||
count = props.get("count", 1)
|
||||
else:
|
||||
lat = feature.get("lat") or feature.get("actionGeo_Lat")
|
||||
lng = feature.get("lng") or feature.get("lon") or feature.get("actionGeo_Long")
|
||||
if lat is None or lng is None:
|
||||
continue
|
||||
lat, lng = float(lat), float(lng)
|
||||
headlines = [feature.get("title", "")]
|
||||
urls = [feature.get("sourceurl", "")]
|
||||
name = feature.get("name", "")
|
||||
count = 1
|
||||
|
||||
# Check all headlines + URL slugs for denial patterns
|
||||
all_text = " ".join(str(h) for h in headlines if h)
|
||||
all_text += " " + " ".join(str(u) for u in urls if u)
|
||||
|
||||
if _matches_denial(all_text):
|
||||
denial_events.append({
|
||||
"lat": lat,
|
||||
"lng": lng,
|
||||
"headlines": [h for h in headlines if h][:5],
|
||||
"urls": [u for u in urls if u][:3],
|
||||
"location_name": name,
|
||||
"event_count": count,
|
||||
})
|
||||
|
||||
# Also scan news articles for denial language
|
||||
for article in news:
|
||||
title = str(article.get("title", "") or "")
|
||||
desc = str(article.get("description", "") or article.get("summary", "") or "")
|
||||
if not _matches_denial(title + " " + desc):
|
||||
continue
|
||||
# News articles often lack coordinates — try to match to GDELT locations
|
||||
# For now, only include if we have coordinates
|
||||
lat = article.get("lat") or article.get("latitude")
|
||||
lng = article.get("lng") or article.get("lon") or article.get("longitude")
|
||||
if lat is not None and lng is not None:
|
||||
denial_events.append({
|
||||
"lat": float(lat),
|
||||
"lng": float(lng),
|
||||
"headlines": [title],
|
||||
"urls": [article.get("url") or article.get("link") or ""],
|
||||
"location_name": "",
|
||||
"event_count": 1,
|
||||
})
|
||||
|
||||
if not denial_events:
|
||||
return []
|
||||
|
||||
# ── Step 2: Cross-reference with internet outages ──
|
||||
alerts: list[dict] = []
|
||||
|
||||
for denial in denial_events:
|
||||
d_lat, d_lng = denial["lat"], denial["lng"]
|
||||
sparse = _is_sparse_region(d_lat, d_lng)
|
||||
search_radius_km = 1500.0 if sparse else 500.0
|
||||
|
||||
# Find nearby outages
|
||||
nearby_outages: list[dict] = []
|
||||
for outage in internet_outages:
|
||||
o_lat = outage.get("lat") or outage.get("latitude")
|
||||
o_lng = outage.get("lng") or outage.get("lon") or outage.get("longitude")
|
||||
if o_lat is None or o_lng is None:
|
||||
continue
|
||||
try:
|
||||
dist = _haversine_km(d_lat, d_lng, float(o_lat), float(o_lng))
|
||||
except (ValueError, TypeError):
|
||||
continue
|
||||
if dist <= search_radius_km:
|
||||
nearby_outages.append({
|
||||
"region": outage.get("region_name") or outage.get("country_name", ""),
|
||||
"severity": _outage_pct(outage),
|
||||
"distance_km": round(dist, 0),
|
||||
"level": outage.get("level", ""),
|
||||
})
|
||||
|
||||
# ── Step 3: Check prediction markets for related movements ──
|
||||
denial_text = " ".join(denial["headlines"]).lower()
|
||||
related_markets: list[dict] = []
|
||||
for market in prediction_markets:
|
||||
m_title = str(market.get("title", "") or market.get("question", "") or "").lower()
|
||||
# Look for keyword overlap between denial and market
|
||||
denial_words = set(re.findall(r"[a-z]{4,}", denial_text))
|
||||
market_words = set(re.findall(r"[a-z]{4,}", m_title))
|
||||
overlap = denial_words & market_words - {"that", "this", "with", "from", "have", "been", "were", "will", "says", "said"}
|
||||
if len(overlap) >= 2:
|
||||
prob = market.get("probability") or market.get("lastTradePrice") or market.get("yes_price")
|
||||
if prob is not None:
|
||||
related_markets.append({
|
||||
"title": market.get("title") or market.get("question"),
|
||||
"probability": float(prob),
|
||||
})
|
||||
|
||||
# ── Step 4: Score confidence and assign context rating ──
|
||||
indicators = 1 # denial itself
|
||||
drivers: list[str] = []
|
||||
|
||||
# Primary driver: the denial headline
|
||||
headline_display = denial["headlines"][0] if denial["headlines"] else "Official statement"
|
||||
if len(headline_display) > 80:
|
||||
headline_display = headline_display[:77] + "..."
|
||||
drivers.append(f'"{headline_display}"')
|
||||
|
||||
# Outage co-location
|
||||
has_outage = False
|
||||
if nearby_outages:
|
||||
best_outage = max(nearby_outages, key=lambda o: o["severity"])
|
||||
if best_outage["severity"] >= 10:
|
||||
indicators += 1
|
||||
has_outage = True
|
||||
drivers.append(
|
||||
f"Internet outage {best_outage['severity']:.0f}% "
|
||||
f"({best_outage['region']}, {best_outage['distance_km']:.0f}km away)"
|
||||
)
|
||||
elif best_outage["severity"] > 0:
|
||||
indicators += 0.5 # minor outage, partial indicator
|
||||
has_outage = True
|
||||
drivers.append(
|
||||
f"Minor outage ({best_outage['region']}, "
|
||||
f"{best_outage['distance_km']:.0f}km away)"
|
||||
)
|
||||
|
||||
# Prediction market signal
|
||||
has_market = False
|
||||
if related_markets:
|
||||
indicators += 1
|
||||
has_market = True
|
||||
top_market = related_markets[0]
|
||||
drivers.append(
|
||||
f"Market: \"{top_market['title'][:50]}\" "
|
||||
f"at {top_market['probability']:.0%}"
|
||||
)
|
||||
|
||||
# Multiple denial sources strengthen the signal
|
||||
if denial["event_count"] > 1:
|
||||
indicators += 0.5
|
||||
drivers.append(f"{denial['event_count']} sources reporting")
|
||||
|
||||
# Context rating
|
||||
if has_outage and has_market:
|
||||
context = "STRONG"
|
||||
elif has_outage:
|
||||
context = "MODERATE"
|
||||
elif has_market:
|
||||
context = "WEAK" # market signal without infra disruption
|
||||
else:
|
||||
context = "DETECTION_GAP"
|
||||
|
||||
# Severity mapping
|
||||
if context == "STRONG":
|
||||
sev = "high"
|
||||
elif context == "MODERATE":
|
||||
sev = "medium"
|
||||
else:
|
||||
sev = "low"
|
||||
|
||||
# Alternative explanations (always present — this is a hypothesis generator)
|
||||
alternatives: list[str] = []
|
||||
if has_outage:
|
||||
alternatives.append("Routine infrastructure maintenance or cable damage")
|
||||
alternatives.append("Weather-related outage coinciding with news cycle")
|
||||
if not has_outage and context == "DETECTION_GAP":
|
||||
alternatives.append("Statement may be truthful — no contradicting telemetry found")
|
||||
alternatives.append("Telemetry coverage gap in this region")
|
||||
alternatives.append("Denial may be responding to social media rumors, not real events")
|
||||
|
||||
lat_c, lng_c = _cell_center(_cell_key(d_lat, d_lng))
|
||||
alerts.append({
|
||||
"lat": lat_c,
|
||||
"lng": lng_c,
|
||||
"type": "contradiction",
|
||||
"severity": sev,
|
||||
"score": _severity_score(sev),
|
||||
"drivers": drivers[:4],
|
||||
"cell_size": _CELL_SIZE,
|
||||
"context": context,
|
||||
"alternatives": alternatives[:3],
|
||||
"location_name": denial.get("location_name", ""),
|
||||
"headlines": denial["headlines"][:3],
|
||||
"related_markets": related_markets[:3],
|
||||
"nearby_outages": nearby_outages[:5],
|
||||
})
|
||||
|
||||
# Deduplicate: keep highest-scored alert per cell
|
||||
seen_cells: dict[str, dict] = {}
|
||||
for alert in alerts:
|
||||
key = _cell_key(alert["lat"], alert["lng"])
|
||||
if key not in seen_cells or alert["score"] > seen_cells[key]["score"]:
|
||||
seen_cells[key] = alert
|
||||
|
||||
result = list(seen_cells.values())
|
||||
if result:
|
||||
by_context = defaultdict(int)
|
||||
for a in result:
|
||||
by_context[a["context"]] += 1
|
||||
logger.info(
|
||||
"Contradictions: %d possible (%s)",
|
||||
len(result),
|
||||
", ".join(f"{v} {k}" for k, v in sorted(by_context.items())),
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Public API
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Correlation → Pin bridge
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
# Types and their pin categories
|
||||
_CORR_PIN_CATEGORIES = {
|
||||
"rf_anomaly": "anomaly",
|
||||
"military_buildup": "military",
|
||||
"infra_cascade": "infrastructure",
|
||||
"contradiction": "research",
|
||||
}
|
||||
|
||||
# Deduplicate: don't re-pin the same cell within this window (seconds).
|
||||
_CORR_PIN_DEDUP_WINDOW = 600 # 10 minutes
|
||||
_recent_corr_pins: dict[str, float] = {}
|
||||
|
||||
|
||||
def _auto_pin_correlations(alerts: list[dict]) -> int:
|
||||
"""Create AI Intel pins for high-severity correlation alerts.
|
||||
|
||||
Only pins alerts with severity >= medium. Uses cell-key dedup so the
|
||||
same grid cell doesn't get re-pinned every fetch cycle.
|
||||
|
||||
Returns the number of pins created this cycle.
|
||||
"""
|
||||
import time as _time
|
||||
|
||||
now = _time.time()
|
||||
|
||||
# Evict stale dedup entries
|
||||
expired = [k for k, ts in _recent_corr_pins.items() if now - ts > _CORR_PIN_DEDUP_WINDOW]
|
||||
for k in expired:
|
||||
_recent_corr_pins.pop(k, None)
|
||||
|
||||
created = 0
|
||||
for alert in alerts:
|
||||
sev = alert.get("severity", "low")
|
||||
if sev == "low":
|
||||
continue # Don't pin low-severity noise
|
||||
|
||||
lat = alert.get("lat")
|
||||
lng = alert.get("lng")
|
||||
if lat is None or lng is None:
|
||||
continue
|
||||
|
||||
# Dedup key: type + cell
|
||||
dedup_key = f"{alert['type']}:{_cell_key(lat, lng)}"
|
||||
if dedup_key in _recent_corr_pins:
|
||||
continue
|
||||
|
||||
category = _CORR_PIN_CATEGORIES.get(alert["type"], "anomaly")
|
||||
drivers = alert.get("drivers", [])
|
||||
atype = alert["type"]
|
||||
|
||||
if atype == "contradiction":
|
||||
ctx = alert.get("context", "")
|
||||
label = f"[{ctx}] Possible Contradiction"
|
||||
parts = list(drivers)
|
||||
if alert.get("alternatives"):
|
||||
parts.append("Alternatives: " + "; ".join(alert["alternatives"][:2]))
|
||||
description = " | ".join(parts) if parts else "Narrative contradiction detected"
|
||||
else:
|
||||
label = f"[{sev.upper()}] {atype.replace('_', ' ').title()}"
|
||||
description = "; ".join(drivers) if drivers else "Multi-layer correlation alert"
|
||||
|
||||
try:
|
||||
from services.ai_pin_store import create_pin
|
||||
|
||||
meta = {
|
||||
"correlation_type": atype,
|
||||
"severity": sev,
|
||||
"drivers": drivers,
|
||||
"cell_size": alert.get("cell_size", _CELL_SIZE),
|
||||
}
|
||||
# Add contradiction-specific metadata
|
||||
if atype == "contradiction":
|
||||
meta["context_rating"] = alert.get("context", "")
|
||||
meta["alternatives"] = alert.get("alternatives", [])
|
||||
meta["headlines"] = alert.get("headlines", [])
|
||||
meta["location_name"] = alert.get("location_name", "")
|
||||
if alert.get("related_markets"):
|
||||
meta["related_markets"] = alert["related_markets"]
|
||||
|
||||
create_pin(
|
||||
lat=lat,
|
||||
lng=lng,
|
||||
label=label,
|
||||
category=category,
|
||||
description=description,
|
||||
source="correlation_engine",
|
||||
confidence=alert.get("score", 60) / 100.0,
|
||||
ttl_hours=2.0, # Auto-expire correlation pins after 2 hours
|
||||
metadata=meta,
|
||||
)
|
||||
_recent_corr_pins[dedup_key] = now
|
||||
created += 1
|
||||
except Exception as exc:
|
||||
logger.warning("Failed to auto-pin correlation: %s", exc)
|
||||
|
||||
if created:
|
||||
logger.info("Correlation engine auto-pinned %d alerts", created)
|
||||
return created
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Public API
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -330,13 +755,29 @@ def compute_correlations(data: dict) -> list[dict]:
|
||||
except Exception as e:
|
||||
logger.error("Correlation engine infra cascade error: %s", e)
|
||||
|
||||
# Contradiction detection removed from automated engine — too many false
|
||||
# positives from regex headline matching. Contradiction/analysis alerts are
|
||||
# now placed by OpenClaw agents via place_analysis_zone, which lets an LLM
|
||||
# reason about the evidence rather than pattern-matching keywords.
|
||||
try:
|
||||
from services.analysis_zone_store import get_live_zones
|
||||
alerts.extend(get_live_zones())
|
||||
except Exception as e:
|
||||
logger.error("Analysis zone merge error: %s", e)
|
||||
|
||||
rf = sum(1 for a in alerts if a["type"] == "rf_anomaly")
|
||||
mil = sum(1 for a in alerts if a["type"] == "military_buildup")
|
||||
infra = sum(1 for a in alerts if a["type"] == "infra_cascade")
|
||||
contra = sum(1 for a in alerts if a["type"] == "contradiction")
|
||||
if alerts:
|
||||
logger.info(
|
||||
"Correlations: %d alerts (%d rf, %d mil, %d infra)",
|
||||
len(alerts), rf, mil, infra,
|
||||
"Correlations: %d alerts (%d rf, %d mil, %d infra, %d contra)",
|
||||
len(alerts), rf, mil, infra, contra,
|
||||
)
|
||||
|
||||
# Correlation alerts are returned in the correlations data feed only.
|
||||
# They are NOT auto-pinned to AI Intel — that layer is reserved for
|
||||
# user / OpenClaw pins. Correlations are visualised via the dedicated
|
||||
# correlations overlay on the map.
|
||||
|
||||
return alerts
|
||||
|
||||
@@ -16,9 +16,12 @@ Heavy logic has been extracted into services/fetchers/:
|
||||
|
||||
import logging
|
||||
import concurrent.futures
|
||||
import json
|
||||
import math
|
||||
import os
|
||||
import time
|
||||
from datetime import datetime, timedelta
|
||||
from pathlib import Path
|
||||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv()
|
||||
@@ -56,6 +59,7 @@ from services.fetchers.earth_observation import ( # noqa: F401
|
||||
fetch_air_quality,
|
||||
fetch_volcanoes,
|
||||
fetch_viirs_change_nodes,
|
||||
fetch_uap_sightings,
|
||||
)
|
||||
from services.fetchers.infrastructure import ( # noqa: F401
|
||||
fetch_internet_outages,
|
||||
@@ -90,10 +94,35 @@ from services.fetchers.meshtastic_map import (
|
||||
load_meshtastic_cache_if_available,
|
||||
) # noqa: F401
|
||||
from services.fetchers.fimi import fetch_fimi # noqa: F401
|
||||
from services.fetchers.crowdthreat import fetch_crowdthreat # noqa: F401
|
||||
from services.fetchers.wastewater import fetch_wastewater # noqa: F401
|
||||
from services.fetchers.sar_catalog import fetch_sar_catalog # noqa: F401
|
||||
from services.fetchers.sar_products import fetch_sar_products # noqa: F401
|
||||
from services.ais_stream import prune_stale_vessels # noqa: F401
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
_SLOW_FETCH_S = float(os.environ.get("FETCH_SLOW_THRESHOLD_S", "5"))
|
||||
# Hard wall-clock limit per individual fetch task. A task that exceeds this
|
||||
# is treated as a failure so it cannot block an entire fetch tier indefinitely.
|
||||
_TASK_HARD_TIMEOUT_S = float(os.environ.get("FETCH_TASK_TIMEOUT_S", "120"))
|
||||
_FAST_STARTUP_CACHE_MAX_AGE_S = float(os.environ.get("FAST_STARTUP_CACHE_MAX_AGE_S", "300"))
|
||||
_FAST_STARTUP_CACHE_PATH = Path(__file__).resolve().parents[1] / "data" / "fast_startup_cache.json"
|
||||
_FAST_STARTUP_CACHE_KEYS = (
|
||||
"commercial_flights",
|
||||
"military_flights",
|
||||
"private_flights",
|
||||
"private_jets",
|
||||
"tracked_flights",
|
||||
"ships",
|
||||
"uavs",
|
||||
"gps_jamming",
|
||||
"satellites",
|
||||
"satellite_source",
|
||||
"satellite_analysis",
|
||||
"sigint",
|
||||
"sigint_totals",
|
||||
"trains",
|
||||
)
|
||||
|
||||
# Shared thread pool — reused across all fetch cycles instead of creating/destroying per tick
|
||||
_SHARED_EXECUTOR = concurrent.futures.ThreadPoolExecutor(
|
||||
@@ -101,6 +130,80 @@ _SHARED_EXECUTOR = concurrent.futures.ThreadPoolExecutor(
|
||||
)
|
||||
|
||||
|
||||
def _cache_json_safe(value):
|
||||
if isinstance(value, float):
|
||||
return value if math.isfinite(value) else None
|
||||
if isinstance(value, dict):
|
||||
return {str(k): _cache_json_safe(v) for k, v in value.items()}
|
||||
if isinstance(value, (list, tuple)):
|
||||
return [_cache_json_safe(v) for v in value]
|
||||
return value
|
||||
|
||||
|
||||
def _load_fast_startup_cache_if_available() -> bool:
|
||||
"""Seed moving layers from a recent disk cache while live fetches warm up."""
|
||||
if _FAST_STARTUP_CACHE_MAX_AGE_S <= 0 or not _FAST_STARTUP_CACHE_PATH.exists():
|
||||
return False
|
||||
try:
|
||||
with _FAST_STARTUP_CACHE_PATH.open("r", encoding="utf-8") as fh:
|
||||
payload = json.load(fh)
|
||||
cached_at = float(payload.get("cached_at") or 0)
|
||||
age_s = time.time() - cached_at
|
||||
if cached_at <= 0 or age_s > _FAST_STARTUP_CACHE_MAX_AGE_S:
|
||||
logger.info("Skipping stale fast startup cache (age %.1fs)", age_s)
|
||||
return False
|
||||
layers = payload.get("layers") or {}
|
||||
freshness = payload.get("freshness") or {}
|
||||
loaded: list[str] = []
|
||||
with _data_lock:
|
||||
for key in _FAST_STARTUP_CACHE_KEYS:
|
||||
if key in layers:
|
||||
latest_data[key] = layers[key]
|
||||
loaded.append(key)
|
||||
for key, ts in freshness.items():
|
||||
source_timestamps[str(key)] = ts
|
||||
if payload.get("last_updated"):
|
||||
latest_data["last_updated"] = payload.get("last_updated")
|
||||
if not loaded:
|
||||
return False
|
||||
from services.fetchers._store import bump_data_version
|
||||
|
||||
bump_data_version()
|
||||
logger.info(
|
||||
"Loaded fast startup cache for %d layers (age %.1fs) so the map can paint before remote feeds finish",
|
||||
len(loaded),
|
||||
age_s,
|
||||
)
|
||||
return True
|
||||
except Exception as e:
|
||||
logger.warning("Fast startup cache load failed (non-fatal): %s", e)
|
||||
return False
|
||||
|
||||
|
||||
def _save_fast_startup_cache() -> None:
|
||||
"""Persist recent moving layers for the next cold start."""
|
||||
try:
|
||||
with _data_lock:
|
||||
payload = {
|
||||
"cached_at": time.time(),
|
||||
"last_updated": latest_data.get("last_updated"),
|
||||
"layers": {key: latest_data.get(key) for key in _FAST_STARTUP_CACHE_KEYS},
|
||||
"freshness": {
|
||||
key: source_timestamps.get(key)
|
||||
for key in _FAST_STARTUP_CACHE_KEYS
|
||||
if source_timestamps.get(key)
|
||||
},
|
||||
}
|
||||
safe_payload = _cache_json_safe(payload)
|
||||
_FAST_STARTUP_CACHE_PATH.parent.mkdir(parents=True, exist_ok=True)
|
||||
tmp_path = _FAST_STARTUP_CACHE_PATH.with_suffix(".tmp")
|
||||
with tmp_path.open("w", encoding="utf-8") as fh:
|
||||
json.dump(safe_payload, fh, separators=(",", ":"))
|
||||
tmp_path.replace(_FAST_STARTUP_CACHE_PATH)
|
||||
except Exception as e:
|
||||
logger.debug("Fast startup cache save skipped: %s", e)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Scheduler & Orchestration
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -109,10 +212,12 @@ def _run_tasks(label: str, funcs: list):
|
||||
if not funcs:
|
||||
return
|
||||
futures = {_SHARED_EXECUTOR.submit(func): (func.__name__, time.perf_counter()) for func in funcs}
|
||||
for future in concurrent.futures.as_completed(futures):
|
||||
name, start = futures[future]
|
||||
# Iterate directly so future.result(timeout=...) is the blocking call.
|
||||
# as_completed() blocks inside __next__() waiting for completion — the timeout
|
||||
# on result() would never be reached for a hanging task under that pattern.
|
||||
for future, (name, start) in futures.items():
|
||||
try:
|
||||
future.result()
|
||||
future.result(timeout=_TASK_HARD_TIMEOUT_S)
|
||||
duration = time.perf_counter() - start
|
||||
from services.fetch_health import record_success
|
||||
|
||||
@@ -164,6 +269,7 @@ def update_fast_data():
|
||||
latest_data["last_updated"] = datetime.utcnow().isoformat()
|
||||
from services.fetchers._store import bump_data_version
|
||||
bump_data_version()
|
||||
_save_fast_startup_cache()
|
||||
logger.info("Fast-tier update complete.")
|
||||
|
||||
|
||||
@@ -219,6 +325,7 @@ def update_all_data(*, startup_mode: bool = False):
|
||||
logger.info("Full data update starting (parallel)...")
|
||||
# Preload Meshtastic map cache immediately (instant, from disk)
|
||||
load_meshtastic_cache_if_available()
|
||||
_load_fast_startup_cache_if_available()
|
||||
with _data_lock:
|
||||
meshtastic_seeded = bool(latest_data.get("meshtastic_map_nodes"))
|
||||
futures = {
|
||||
@@ -231,6 +338,11 @@ def update_all_data(*, startup_mode: bool = False):
|
||||
_SHARED_EXECUTOR.submit(fetch_fimi): ("fetch_fimi", time.perf_counter()),
|
||||
_SHARED_EXECUTOR.submit(fetch_gdelt): ("fetch_gdelt", time.perf_counter()),
|
||||
_SHARED_EXECUTOR.submit(update_liveuamap): ("update_liveuamap", time.perf_counter()),
|
||||
_SHARED_EXECUTOR.submit(fetch_uap_sightings): ("fetch_uap_sightings", time.perf_counter()),
|
||||
_SHARED_EXECUTOR.submit(fetch_wastewater): ("fetch_wastewater", time.perf_counter()),
|
||||
_SHARED_EXECUTOR.submit(fetch_crowdthreat): ("fetch_crowdthreat", time.perf_counter()),
|
||||
_SHARED_EXECUTOR.submit(fetch_sar_catalog): ("fetch_sar_catalog", time.perf_counter()),
|
||||
_SHARED_EXECUTOR.submit(fetch_sar_products): ("fetch_sar_products", time.perf_counter()),
|
||||
}
|
||||
if not startup_mode or not meshtastic_seeded:
|
||||
futures[_SHARED_EXECUTOR.submit(fetch_meshtastic_nodes)] = (
|
||||
@@ -241,10 +353,9 @@ def update_all_data(*, startup_mode: bool = False):
|
||||
logger.info(
|
||||
"Startup preload: Meshtastic cache already loaded, deferring remote map refresh to scheduled cadence"
|
||||
)
|
||||
for future in concurrent.futures.as_completed(futures):
|
||||
name, start = futures[future]
|
||||
for future, (name, start) in futures.items():
|
||||
try:
|
||||
future.result()
|
||||
future.result(timeout=_TASK_HARD_TIMEOUT_S)
|
||||
duration = time.perf_counter() - start
|
||||
from services.fetch_health import record_success
|
||||
|
||||
@@ -257,6 +368,42 @@ def update_all_data(*, startup_mode: bool = False):
|
||||
|
||||
record_failure(name, error=e, duration_s=duration)
|
||||
logger.exception(f"full-refresh task failed: {name}")
|
||||
# Run CCTV ingest immediately so cameras are available on first request
|
||||
# (the scheduled job also runs every 10 min for ongoing refresh).
|
||||
if startup_mode:
|
||||
try:
|
||||
from services.cctv_pipeline import (
|
||||
TFLJamCamIngestor, LTASingaporeIngestor, AustinTXIngestor,
|
||||
NYCDOTIngestor, CaltransIngestor, ColoradoDOTIngestor,
|
||||
WSDOTIngestor, GeorgiaDOTIngestor, IllinoisDOTIngestor,
|
||||
MichiganDOTIngestor, WindyWebcamsIngestor, DGTNationalIngestor,
|
||||
MadridCityIngestor, OSMTrafficCameraIngestor, get_all_cameras,
|
||||
)
|
||||
from services.cctv_pipeline import OSMALPRCameraIngestor
|
||||
_startup_ingestors = [
|
||||
TFLJamCamIngestor(), LTASingaporeIngestor(), AustinTXIngestor(),
|
||||
NYCDOTIngestor(), CaltransIngestor(), ColoradoDOTIngestor(),
|
||||
WSDOTIngestor(), GeorgiaDOTIngestor(), IllinoisDOTIngestor(),
|
||||
MichiganDOTIngestor(), WindyWebcamsIngestor(), DGTNationalIngestor(),
|
||||
MadridCityIngestor(), OSMTrafficCameraIngestor(),
|
||||
OSMALPRCameraIngestor(),
|
||||
]
|
||||
logger.info("Running CCTV ingest at startup (%d ingestors)...", len(_startup_ingestors))
|
||||
ingest_futures = {
|
||||
_SHARED_EXECUTOR.submit(ing.ingest): ing.__class__.__name__
|
||||
for ing in _startup_ingestors
|
||||
}
|
||||
for fut in concurrent.futures.as_completed(ingest_futures, timeout=90):
|
||||
name = ingest_futures[fut]
|
||||
try:
|
||||
fut.result()
|
||||
except Exception as e:
|
||||
logger.warning("CCTV startup ingest %s failed: %s", name, e)
|
||||
fetch_cctv()
|
||||
logger.info("CCTV startup ingest complete — %d cameras in DB", len(get_all_cameras()))
|
||||
except Exception as e:
|
||||
logger.warning("CCTV startup ingest failed (non-fatal): %s", e)
|
||||
|
||||
logger.info("Full data update complete.")
|
||||
|
||||
|
||||
@@ -406,6 +553,38 @@ def start_scheduler():
|
||||
misfire_grace_time=60,
|
||||
)
|
||||
|
||||
# Route database — bulk refresh from vrs-standing-data.adsb.lol every 5
|
||||
# days. Replaces the legacy /api/0/routeset POST (blocked under our UA,
|
||||
# and broken upstream). Airline schedules change on a quarterly cycle,
|
||||
# so 5 days is well within the staleness budget; new flight numbers
|
||||
# added within the window simply fall back to UNKNOWN until refresh.
|
||||
from services.fetchers.route_database import refresh_route_database
|
||||
|
||||
_scheduler.add_job(
|
||||
lambda: _run_task_with_health(refresh_route_database, "refresh_route_database"),
|
||||
"interval",
|
||||
days=5,
|
||||
id="route_database",
|
||||
max_instances=1,
|
||||
misfire_grace_time=3600,
|
||||
)
|
||||
|
||||
# Aircraft metadata database — bulk refresh from OpenSky's public S3
|
||||
# bucket every 5 days. Provides hex24 -> ICAO type so OpenSky-sourced
|
||||
# flights (which lack 't' in /states/all) get aircraft category and
|
||||
# fuel/CO2 emissions populated. Snapshots are monthly; 5 days catches
|
||||
# newer drops without hammering the bucket.
|
||||
from services.fetchers.aircraft_database import refresh_aircraft_database
|
||||
|
||||
_scheduler.add_job(
|
||||
lambda: _run_task_with_health(refresh_aircraft_database, "refresh_aircraft_database"),
|
||||
"interval",
|
||||
days=5,
|
||||
id="aircraft_database",
|
||||
max_instances=1,
|
||||
misfire_grace_time=3600,
|
||||
)
|
||||
|
||||
# GDELT — every 30 minutes (downloads 32 ZIP files per call, avoid rate limits)
|
||||
_scheduler.add_job(
|
||||
lambda: _run_task_with_health(fetch_gdelt, "fetch_gdelt"),
|
||||
@@ -510,14 +689,21 @@ def start_scheduler():
|
||||
misfire_grace_time=120,
|
||||
)
|
||||
|
||||
# Meshtastic map API — every 4 hours, fetch global node positions
|
||||
# Meshtastic map API — once per day with a per-install random offset to
|
||||
# avoid thundering the one-person hobby service at the top of the hour.
|
||||
# The fetcher also short-circuits on a fresh on-disk cache, so the
|
||||
# practical network cadence is closer to "once per day per install".
|
||||
import random as _random_jitter
|
||||
|
||||
_meshtastic_jitter_minutes = _random_jitter.randint(0, 180)
|
||||
_scheduler.add_job(
|
||||
lambda: _run_task_with_health(fetch_meshtastic_nodes, "fetch_meshtastic_nodes"),
|
||||
"interval",
|
||||
hours=4,
|
||||
hours=24,
|
||||
minutes=_meshtastic_jitter_minutes,
|
||||
id="meshtastic_map",
|
||||
max_instances=1,
|
||||
misfire_grace_time=600,
|
||||
misfire_grace_time=3600,
|
||||
)
|
||||
|
||||
# Oracle resolution sweep — every hour, check if any markets with predictions have concluded
|
||||
@@ -550,9 +736,136 @@ def start_scheduler():
|
||||
misfire_grace_time=600,
|
||||
)
|
||||
|
||||
# UAP sightings (NUFORC) — daily at 12:00 UTC
|
||||
_scheduler.add_job(
|
||||
lambda: _run_task_with_health(
|
||||
lambda: fetch_uap_sightings(force_refresh=True),
|
||||
"fetch_uap_sightings",
|
||||
),
|
||||
"cron",
|
||||
hour=12,
|
||||
minute=0,
|
||||
id="uap_sightings_daily",
|
||||
max_instances=1,
|
||||
misfire_grace_time=3600,
|
||||
)
|
||||
|
||||
# WastewaterSCAN pathogen surveillance — daily at 12:00 UTC (samples update ~daily)
|
||||
_scheduler.add_job(
|
||||
lambda: _run_task_with_health(fetch_wastewater, "fetch_wastewater"),
|
||||
"cron",
|
||||
hour=12,
|
||||
minute=0,
|
||||
id="wastewater_daily",
|
||||
max_instances=1,
|
||||
misfire_grace_time=3600,
|
||||
)
|
||||
|
||||
# CrowdThreat verified threat intelligence — daily at 12:00 UTC
|
||||
_scheduler.add_job(
|
||||
lambda: _run_task_with_health(fetch_crowdthreat, "fetch_crowdthreat"),
|
||||
"cron",
|
||||
hour=12,
|
||||
minute=0,
|
||||
id="crowdthreat_daily",
|
||||
max_instances=1,
|
||||
misfire_grace_time=3600,
|
||||
)
|
||||
|
||||
# SAR catalog (Mode A) — every hour, free metadata from ASF Search.
|
||||
# No account, no downloads, no DSP. Pure scene catalog + coverage hints.
|
||||
_scheduler.add_job(
|
||||
lambda: _run_task_with_health(fetch_sar_catalog, "fetch_sar_catalog"),
|
||||
"interval",
|
||||
hours=1,
|
||||
id="sar_catalog",
|
||||
max_instances=1,
|
||||
misfire_grace_time=600,
|
||||
next_run_time=datetime.utcnow() + timedelta(minutes=3),
|
||||
)
|
||||
|
||||
# SAR products (Mode B) — every 30 minutes, opt-in only.
|
||||
# Pre-processed deformation/flood/damage anomalies from OPERA, EGMS, GFM,
|
||||
# EMS, UNOSAT. Disabled until both MESH_SAR_PRODUCTS_FETCH=allow and
|
||||
# MESH_SAR_PRODUCTS_FETCH_ACKNOWLEDGE=true are set.
|
||||
_scheduler.add_job(
|
||||
lambda: _run_task_with_health(fetch_sar_products, "fetch_sar_products"),
|
||||
"interval",
|
||||
minutes=30,
|
||||
id="sar_products",
|
||||
max_instances=1,
|
||||
misfire_grace_time=600,
|
||||
next_run_time=datetime.utcnow() + timedelta(minutes=5),
|
||||
)
|
||||
|
||||
# ── Time Machine auto-snapshots ─────────────────────────────────────
|
||||
# Compressed snapshots taken on two profiles (high_freq + standard).
|
||||
# Intervals are read from _timemachine_config at each invocation so
|
||||
# config changes via the API take effect without restarting.
|
||||
|
||||
def _auto_snapshot_high_freq():
|
||||
"""Auto-snapshot fast-moving layers (flights, ships, satellites)."""
|
||||
try:
|
||||
from services.node_settings import read_node_settings
|
||||
if not read_node_settings().get("timemachine_enabled", False):
|
||||
return # Time Machine is off — skip
|
||||
from routers.ai_intel import _timemachine_config, _take_snapshot_internal
|
||||
cfg = _timemachine_config["profiles"]["high_freq"]
|
||||
if cfg["interval_minutes"] <= 0:
|
||||
return # disabled
|
||||
layers = cfg["layers"]
|
||||
result = _take_snapshot_internal(layers=layers, profile="auto_high_freq", compress=True)
|
||||
logger.info("Time Machine auto-snapshot (high_freq): %s — %s layers",
|
||||
result.get("snapshot_id"), len(result.get("layers", [])))
|
||||
except Exception as e:
|
||||
logger.warning("Time Machine auto-snapshot (high_freq) failed: %s", e)
|
||||
|
||||
def _auto_snapshot_standard():
|
||||
"""Auto-snapshot contextual layers (news, earthquakes, weather, etc.)."""
|
||||
try:
|
||||
from services.node_settings import read_node_settings
|
||||
if not read_node_settings().get("timemachine_enabled", False):
|
||||
return # Time Machine is off — skip
|
||||
from routers.ai_intel import _timemachine_config, _take_snapshot_internal
|
||||
cfg = _timemachine_config["profiles"]["standard"]
|
||||
if cfg["interval_minutes"] <= 0:
|
||||
return # disabled
|
||||
layers = cfg["layers"]
|
||||
result = _take_snapshot_internal(layers=layers, profile="auto_standard", compress=True)
|
||||
logger.info("Time Machine auto-snapshot (standard): %s — %s layers",
|
||||
result.get("snapshot_id"), len(result.get("layers", [])))
|
||||
except Exception as e:
|
||||
logger.warning("Time Machine auto-snapshot (standard) failed: %s", e)
|
||||
|
||||
_scheduler.add_job(
|
||||
_auto_snapshot_high_freq,
|
||||
"interval",
|
||||
minutes=15,
|
||||
id="timemachine_high_freq",
|
||||
max_instances=1,
|
||||
misfire_grace_time=60,
|
||||
next_run_time=datetime.utcnow() + timedelta(minutes=2), # first snapshot 2m after startup
|
||||
)
|
||||
_scheduler.add_job(
|
||||
_auto_snapshot_standard,
|
||||
"interval",
|
||||
minutes=120,
|
||||
id="timemachine_standard",
|
||||
max_instances=1,
|
||||
misfire_grace_time=300,
|
||||
next_run_time=datetime.utcnow() + timedelta(minutes=5), # first snapshot 5m after startup
|
||||
)
|
||||
|
||||
_scheduler.start()
|
||||
logger.info("Scheduler started.")
|
||||
|
||||
# Start the feed ingester daemon (refreshes feed-backed pin layers)
|
||||
try:
|
||||
from services.feed_ingester import start_feed_ingester
|
||||
start_feed_ingester()
|
||||
except Exception as e:
|
||||
logger.warning("Failed to start feed ingester: %s", e)
|
||||
|
||||
|
||||
def stop_scheduler():
|
||||
if _scheduler:
|
||||
|
||||
+895
-28
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,238 @@
|
||||
"""Feed Ingester — background daemon that refreshes feed-backed pin layers.
|
||||
|
||||
Layers with a non-empty `feed_url` are polled at their `feed_interval`
|
||||
(seconds, minimum 60). The feed is expected to return either:
|
||||
|
||||
1. GeoJSON FeatureCollection — features are converted to pins
|
||||
2. JSON array of pin objects — used directly
|
||||
|
||||
Each refresh atomically replaces the layer's pins with the new data.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import threading
|
||||
import time
|
||||
from typing import Any
|
||||
|
||||
import requests
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# State
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_running = False
|
||||
_thread: threading.Thread | None = None
|
||||
_CHECK_INTERVAL = 30 # seconds between scanning for layers that need refresh
|
||||
_last_fetched: dict[str, float] = {} # layer_id → last fetch timestamp
|
||||
_FETCH_TIMEOUT = 20 # seconds
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# GeoJSON → pin conversion
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _geojson_features_to_pins(features: list[dict]) -> list[dict[str, Any]]:
|
||||
"""Convert GeoJSON Feature objects to pin dicts."""
|
||||
pins: list[dict[str, Any]] = []
|
||||
for feat in features:
|
||||
if not isinstance(feat, dict):
|
||||
continue
|
||||
geom = feat.get("geometry") or {}
|
||||
props = feat.get("properties") or {}
|
||||
|
||||
# Extract coordinates
|
||||
coords = geom.get("coordinates")
|
||||
if geom.get("type") != "Point" or not coords or len(coords) < 2:
|
||||
continue
|
||||
|
||||
lng, lat = float(coords[0]), float(coords[1])
|
||||
if not (-90 <= lat <= 90 and -180 <= lng <= 180):
|
||||
continue
|
||||
|
||||
pin: dict[str, Any] = {
|
||||
"lat": lat,
|
||||
"lng": lng,
|
||||
"label": str(props.get("label", props.get("name", props.get("title", ""))))[:200],
|
||||
"category": str(props.get("category", "custom"))[:50],
|
||||
"color": str(props.get("color", ""))[:20],
|
||||
"description": str(props.get("description", props.get("summary", "")))[:2000],
|
||||
"source": "feed",
|
||||
"source_url": str(props.get("source_url", props.get("url", props.get("link", ""))))[:500],
|
||||
"confidence": float(props.get("confidence", 1.0)),
|
||||
}
|
||||
|
||||
# Entity attachment if present
|
||||
entity_type = props.get("entity_type", "")
|
||||
entity_id = props.get("entity_id", "")
|
||||
if entity_type and entity_id:
|
||||
pin["entity_attachment"] = {
|
||||
"entity_type": str(entity_type),
|
||||
"entity_id": str(entity_id),
|
||||
"entity_label": str(props.get("entity_label", "")),
|
||||
}
|
||||
|
||||
pins.append(pin)
|
||||
return pins
|
||||
|
||||
|
||||
def _parse_feed_response(data: Any) -> list[dict[str, Any]]:
|
||||
"""Parse a feed response into a list of pin dicts."""
|
||||
if isinstance(data, dict):
|
||||
# GeoJSON FeatureCollection
|
||||
if data.get("type") == "FeatureCollection" and isinstance(data.get("features"), list):
|
||||
return _geojson_features_to_pins(data["features"])
|
||||
# Single Feature
|
||||
if data.get("type") == "Feature":
|
||||
return _geojson_features_to_pins([data])
|
||||
# Wrapped response like {"ok": true, "data": [...]}
|
||||
inner = data.get("data") or data.get("results") or data.get("pins") or data.get("items")
|
||||
if isinstance(inner, list):
|
||||
return _normalize_pin_list(inner)
|
||||
|
||||
if isinstance(data, list):
|
||||
# Check if first item looks like a GeoJSON Feature
|
||||
if data and isinstance(data[0], dict) and data[0].get("type") == "Feature":
|
||||
return _geojson_features_to_pins(data)
|
||||
return _normalize_pin_list(data)
|
||||
|
||||
return []
|
||||
|
||||
|
||||
def _normalize_pin_list(items: list) -> list[dict[str, Any]]:
|
||||
"""Normalize a list of raw pin objects, ensuring lat/lng are present."""
|
||||
pins: list[dict[str, Any]] = []
|
||||
for item in items:
|
||||
if not isinstance(item, dict):
|
||||
continue
|
||||
lat = item.get("lat") or item.get("latitude")
|
||||
lng = item.get("lng") or item.get("lon") or item.get("longitude")
|
||||
if lat is None or lng is None:
|
||||
continue
|
||||
try:
|
||||
lat, lng = float(lat), float(lng)
|
||||
except (ValueError, TypeError):
|
||||
continue
|
||||
if not (-90 <= lat <= 90 and -180 <= lng <= 180):
|
||||
continue
|
||||
|
||||
pin: dict[str, Any] = {
|
||||
"lat": lat,
|
||||
"lng": lng,
|
||||
"label": str(item.get("label", item.get("name", item.get("title", ""))))[:200],
|
||||
"category": str(item.get("category", "custom"))[:50],
|
||||
"color": str(item.get("color", ""))[:20],
|
||||
"description": str(item.get("description", item.get("summary", "")))[:2000],
|
||||
"source": "feed",
|
||||
"source_url": str(item.get("source_url", item.get("url", item.get("link", ""))))[:500],
|
||||
"confidence": float(item.get("confidence", 1.0)),
|
||||
}
|
||||
|
||||
entity_type = item.get("entity_type", "")
|
||||
entity_id = item.get("entity_id", "")
|
||||
if entity_type and entity_id:
|
||||
pin["entity_attachment"] = {
|
||||
"entity_type": str(entity_type),
|
||||
"entity_id": str(entity_id),
|
||||
"entity_label": str(item.get("entity_label", "")),
|
||||
}
|
||||
|
||||
pins.append(pin)
|
||||
return pins
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Fetch a single layer
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _fetch_layer_feed(layer: dict[str, Any]) -> None:
|
||||
"""Fetch a feed URL and replace the layer's pins."""
|
||||
layer_id = layer["id"]
|
||||
feed_url = layer["feed_url"]
|
||||
layer_name = layer.get("name", layer_id)
|
||||
|
||||
try:
|
||||
resp = requests.get(
|
||||
feed_url,
|
||||
timeout=_FETCH_TIMEOUT,
|
||||
headers={"User-Agent": "ShadowBroker-FeedIngester/1.0"},
|
||||
)
|
||||
resp.raise_for_status()
|
||||
data = resp.json()
|
||||
except requests.RequestException as e:
|
||||
logger.warning("Feed fetch failed for layer '%s' (%s): %s", layer_name, feed_url, e)
|
||||
return
|
||||
except (ValueError, TypeError) as e:
|
||||
logger.warning("Feed parse failed for layer '%s' (%s): %s", layer_name, feed_url, e)
|
||||
return
|
||||
|
||||
pins = _parse_feed_response(data)
|
||||
|
||||
from services.ai_pin_store import replace_layer_pins, update_layer
|
||||
count = replace_layer_pins(layer_id, pins)
|
||||
|
||||
# Update layer metadata with last_fetched timestamp
|
||||
update_layer(layer_id, feed_last_fetched=time.time())
|
||||
|
||||
_last_fetched[layer_id] = time.time()
|
||||
logger.info("Feed refresh for layer '%s': %d pins from %s", layer_name, count, feed_url)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Main loop
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _ingest_loop() -> None:
|
||||
"""Daemon loop: scan for feed layers and refresh those that are due."""
|
||||
while _running:
|
||||
try:
|
||||
from services.ai_pin_store import get_feed_layers
|
||||
|
||||
layers = get_feed_layers()
|
||||
now = time.time()
|
||||
|
||||
for layer in layers:
|
||||
layer_id = layer["id"]
|
||||
interval = max(60, layer.get("feed_interval", 300))
|
||||
last = _last_fetched.get(layer_id, 0)
|
||||
|
||||
if now - last >= interval:
|
||||
try:
|
||||
_fetch_layer_feed(layer)
|
||||
except Exception as e:
|
||||
logger.warning("Feed ingestion error for layer %s: %s",
|
||||
layer.get("name", layer_id), e)
|
||||
|
||||
except Exception as e:
|
||||
logger.error("Feed ingester loop error: %s", e)
|
||||
|
||||
# Sleep in short increments so we can stop cleanly
|
||||
for _ in range(int(_CHECK_INTERVAL)):
|
||||
if not _running:
|
||||
break
|
||||
time.sleep(1)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Start / stop
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def start_feed_ingester() -> None:
|
||||
"""Start the feed ingester daemon thread."""
|
||||
global _running, _thread
|
||||
if _thread and _thread.is_alive():
|
||||
return
|
||||
_running = True
|
||||
_thread = threading.Thread(target=_ingest_loop, daemon=True, name="feed-ingester")
|
||||
_thread.start()
|
||||
logger.info("Feed ingester daemon started (check interval=%ds)", _CHECK_INTERVAL)
|
||||
|
||||
|
||||
def stop_feed_ingester() -> None:
|
||||
"""Stop the feed ingester daemon."""
|
||||
global _running
|
||||
_running = False
|
||||
@@ -4,6 +4,7 @@ Central location for latest_data, source_timestamps, and the data lock.
|
||||
Every fetcher imports from here instead of maintaining its own copy.
|
||||
"""
|
||||
|
||||
import copy
|
||||
import threading
|
||||
import logging
|
||||
from datetime import datetime
|
||||
@@ -42,6 +43,7 @@ class DashboardData(TypedDict, total=False):
|
||||
gps_jamming: List[Dict[str, Any]]
|
||||
satellites: List[Dict[str, Any]]
|
||||
satellite_source: str
|
||||
satellite_analysis: Dict[str, Any]
|
||||
prediction_markets: List[Dict[str, Any]]
|
||||
sigint: List[Dict[str, Any]]
|
||||
sigint_totals: Dict[str, Any]
|
||||
@@ -61,6 +63,12 @@ class DashboardData(TypedDict, total=False):
|
||||
fimi: Dict[str, Any]
|
||||
psk_reporter: List[Dict[str, Any]]
|
||||
correlations: List[Dict[str, Any]]
|
||||
uap_sightings: List[Dict[str, Any]]
|
||||
wastewater: List[Dict[str, Any]]
|
||||
crowdthreat: List[Dict[str, Any]]
|
||||
sar_scenes: List[Dict[str, Any]]
|
||||
sar_anomalies: List[Dict[str, Any]]
|
||||
sar_aoi_coverage: List[Dict[str, Any]]
|
||||
|
||||
|
||||
# In-memory store
|
||||
@@ -105,6 +113,12 @@ latest_data: DashboardData = {
|
||||
"fimi": {},
|
||||
"psk_reporter": [],
|
||||
"correlations": [],
|
||||
"uap_sightings": [],
|
||||
"wastewater": [],
|
||||
"crowdthreat": [],
|
||||
"sar_scenes": [],
|
||||
"sar_anomalies": [],
|
||||
"sar_aoi_coverage": [],
|
||||
}
|
||||
|
||||
# Per-source freshness timestamps
|
||||
@@ -117,9 +131,21 @@ source_freshness: dict[str, dict] = {}
|
||||
def _mark_fresh(*keys):
|
||||
"""Record the current UTC time for one or more data source keys."""
|
||||
now = datetime.utcnow().isoformat()
|
||||
global _data_version
|
||||
changed: list[tuple[str, int, int]] = [] # (layer, version, count)
|
||||
with _data_lock:
|
||||
for k in keys:
|
||||
source_timestamps[k] = now
|
||||
_layer_versions[k] = _layer_versions.get(k, 0) + 1
|
||||
# Grab entity count while we hold the lock (cheap len())
|
||||
val = latest_data.get(k)
|
||||
count = len(val) if isinstance(val, list) else (1 if val is not None else 0)
|
||||
changed.append((k, _layer_versions[k], count))
|
||||
# Publish partial fetch progress immediately so the frontend can
|
||||
# observe newly available data without waiting for the entire tier.
|
||||
_data_version += 1
|
||||
# Notify SSE listeners outside the lock to avoid deadlocks
|
||||
_notify_layer_change(changed)
|
||||
|
||||
|
||||
# Thread lock for safe reads/writes to latest_data
|
||||
@@ -129,16 +155,73 @@ _data_lock = threading.Lock()
|
||||
# Used for cheap ETag generation instead of MD5-hashing the full response.
|
||||
_data_version: int = 0
|
||||
|
||||
# Per-layer version counters — incremented only when that specific layer
|
||||
# refreshes. Used by get_layer_slice for per-layer incremental updates
|
||||
# and by the SSE stream to push targeted layer_changed notifications.
|
||||
_layer_versions: dict[str, int] = {}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Layer-change notification callbacks (thread → async SSE bridge)
|
||||
# ---------------------------------------------------------------------------
|
||||
_layer_change_callbacks: list = []
|
||||
_layer_change_callbacks_lock = threading.Lock()
|
||||
|
||||
|
||||
def register_layer_change_callback(callback) -> None:
|
||||
"""Register a callback invoked on every _mark_fresh().
|
||||
|
||||
Signature: callback(layer: str, version: int, count: int)
|
||||
Called from fetcher threads — must be thread-safe.
|
||||
"""
|
||||
with _layer_change_callbacks_lock:
|
||||
_layer_change_callbacks.append(callback)
|
||||
|
||||
|
||||
def unregister_layer_change_callback(callback) -> None:
|
||||
"""Remove a previously registered callback."""
|
||||
with _layer_change_callbacks_lock:
|
||||
try:
|
||||
_layer_change_callbacks.remove(callback)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
|
||||
def _notify_layer_change(changed: list[tuple[str, int, int]]) -> None:
|
||||
"""Fire all registered callbacks for each changed layer."""
|
||||
with _layer_change_callbacks_lock:
|
||||
cbs = list(_layer_change_callbacks)
|
||||
for cb in cbs:
|
||||
for layer, version, count in changed:
|
||||
try:
|
||||
cb(layer, version, count)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
def get_layer_versions() -> dict[str, int]:
|
||||
"""Return a snapshot of all per-layer version counters."""
|
||||
with _data_lock:
|
||||
return dict(_layer_versions)
|
||||
|
||||
|
||||
def get_layer_version(layer: str) -> int:
|
||||
"""Return the version counter for a single layer (0 if never refreshed)."""
|
||||
with _data_lock:
|
||||
return _layer_versions.get(layer, 0)
|
||||
|
||||
|
||||
def bump_data_version() -> None:
|
||||
"""Increment the data version counter after a fetch cycle completes."""
|
||||
global _data_version
|
||||
_data_version += 1
|
||||
with _data_lock:
|
||||
_data_version += 1
|
||||
|
||||
|
||||
def get_data_version() -> int:
|
||||
"""Return the current data version (for ETag generation)."""
|
||||
return _data_version
|
||||
with _data_lock:
|
||||
return _data_version
|
||||
|
||||
|
||||
_active_layers_version: int = 0
|
||||
@@ -156,21 +239,17 @@ def get_active_layers_version() -> int:
|
||||
|
||||
|
||||
def get_latest_data_subset(*keys: str) -> DashboardData:
|
||||
"""Return a shallow snapshot of only the requested top-level keys.
|
||||
"""Return a deep snapshot of only the requested top-level keys.
|
||||
|
||||
This avoids cloning the entire dashboard store for endpoints that only need
|
||||
a small tier-specific subset.
|
||||
a small tier-specific subset. Deep copy ensures callers cannot mutate
|
||||
nested structures (e.g. individual flight dicts) and affect the live store.
|
||||
"""
|
||||
with _data_lock:
|
||||
snap: DashboardData = {}
|
||||
for key in keys:
|
||||
value = latest_data.get(key)
|
||||
if isinstance(value, list):
|
||||
snap[key] = list(value)
|
||||
elif isinstance(value, dict):
|
||||
snap[key] = dict(value)
|
||||
else:
|
||||
snap[key] = value
|
||||
snap[key] = copy.deepcopy(value)
|
||||
return snap
|
||||
|
||||
|
||||
@@ -231,10 +310,16 @@ active_layers: dict[str, bool] = {
|
||||
"satnogs": True,
|
||||
"tinygs": True,
|
||||
"ukraine_alerts": True,
|
||||
"power_plants": False,
|
||||
"power_plants": True,
|
||||
"viirs_nightlights": False,
|
||||
"psk_reporter": True,
|
||||
"correlations": True,
|
||||
"contradictions": True,
|
||||
"uap_sightings": True,
|
||||
"wastewater": True,
|
||||
"ai_intel": True,
|
||||
"crowdthreat": True,
|
||||
"sar": True,
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -0,0 +1,177 @@
|
||||
"""OpenSky aircraft metadata: ICAO24 hex -> ICAO type code + friendly model.
|
||||
|
||||
OpenSky's /states/all does not include aircraft type, so OpenSky-sourced
|
||||
flights arrive with ``t`` field empty. This module bulk-loads the public
|
||||
OpenSky aircraft database (one snapshot CSV per month, ~108 MB uncompressed,
|
||||
~600k aircraft) once every 5 days and exposes a fast in-memory hex lookup.
|
||||
|
||||
The data is also useful when adsb.lol's live API is degraded: even the
|
||||
adsb.lol /v2 feed sometimes returns aircraft with empty ``t`` for newly seen
|
||||
transponders, and the lookup gracefully fills those in too.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import csv
|
||||
import logging
|
||||
import threading
|
||||
import time
|
||||
import xml.etree.ElementTree as ET
|
||||
from typing import Any
|
||||
|
||||
import requests
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_BUCKET_LIST_URL = (
|
||||
"https://s3.opensky-network.org/data-samples?prefix=metadata/&list-type=2"
|
||||
)
|
||||
_BUCKET_BASE = "https://s3.opensky-network.org/data-samples/"
|
||||
_S3_NS = "{http://s3.amazonaws.com/doc/2006-03-01/}"
|
||||
_REFRESH_INTERVAL_S = 5 * 24 * 3600
|
||||
_LIST_TIMEOUT_S = 30
|
||||
_DOWNLOAD_TIMEOUT_S = 600
|
||||
_USER_AGENT = (
|
||||
"ShadowBroker-OSINT/0.9.7 "
|
||||
"(+https://github.com/BigBodyCobain/Shadowbroker; "
|
||||
"contact: bigbodycobain@gmail.com)"
|
||||
)
|
||||
|
||||
_lock = threading.RLock()
|
||||
_aircraft_by_hex: dict[str, dict[str, str]] = {}
|
||||
_last_refresh = 0.0
|
||||
_in_progress = False
|
||||
|
||||
|
||||
def _latest_snapshot_key() -> str:
|
||||
"""Discover the most recent aircraft-database-complete snapshot key."""
|
||||
response = requests.get(
|
||||
_BUCKET_LIST_URL,
|
||||
timeout=_LIST_TIMEOUT_S,
|
||||
headers={"User-Agent": _USER_AGENT},
|
||||
)
|
||||
response.raise_for_status()
|
||||
root = ET.fromstring(response.text)
|
||||
keys: list[str] = []
|
||||
for content in root.iter(f"{_S3_NS}Contents"):
|
||||
key_el = content.find(f"{_S3_NS}Key")
|
||||
if key_el is None or not key_el.text:
|
||||
continue
|
||||
if "aircraft-database-complete-" in key_el.text and key_el.text.endswith(".csv"):
|
||||
keys.append(key_el.text)
|
||||
if not keys:
|
||||
raise RuntimeError("no aircraft-database-complete snapshot found in bucket listing")
|
||||
return sorted(keys)[-1]
|
||||
|
||||
|
||||
def _stream_csv_index(url: str) -> dict[str, dict[str, str]]:
|
||||
"""Stream-parse the OpenSky aircraft CSV into a hex-keyed index.
|
||||
|
||||
The CSV uses single-quote quoting, so csv.DictReader is configured with
|
||||
``quotechar="'"``. Rows are processed line-by-line via iter_lines() to
|
||||
keep memory bounded even though the file is ~108 MB.
|
||||
"""
|
||||
with requests.get(
|
||||
url,
|
||||
timeout=_DOWNLOAD_TIMEOUT_S,
|
||||
stream=True,
|
||||
headers={"User-Agent": _USER_AGENT},
|
||||
) as response:
|
||||
response.raise_for_status()
|
||||
line_iter = (
|
||||
line.decode("utf-8", errors="replace")
|
||||
for line in response.iter_lines(decode_unicode=False)
|
||||
if line
|
||||
)
|
||||
reader = csv.DictReader(line_iter, quotechar="'")
|
||||
index: dict[str, dict[str, str]] = {}
|
||||
for row in reader:
|
||||
hex_code = (row.get("icao24") or "").strip().lower()
|
||||
if not hex_code or hex_code == "000000":
|
||||
continue
|
||||
typecode = (row.get("typecode") or "").strip().upper()
|
||||
model = (row.get("model") or "").strip()
|
||||
mfr = (row.get("manufacturerName") or "").strip()
|
||||
registration = (row.get("registration") or "").strip().upper()
|
||||
operator = (row.get("operator") or "").strip()
|
||||
if not (typecode or model):
|
||||
continue
|
||||
entry: dict[str, str] = {}
|
||||
if typecode:
|
||||
entry["typecode"] = typecode
|
||||
if model:
|
||||
entry["model"] = model
|
||||
if mfr:
|
||||
entry["manufacturer"] = mfr
|
||||
if registration:
|
||||
entry["registration"] = registration
|
||||
if operator:
|
||||
entry["operator"] = operator
|
||||
index[hex_code] = entry
|
||||
return index
|
||||
|
||||
|
||||
def refresh_aircraft_database(force: bool = False) -> bool:
|
||||
"""Download the latest OpenSky aircraft snapshot and rebuild the index.
|
||||
|
||||
Returns True if a refresh was performed (success or attempted), False if
|
||||
skipped because the cache is still fresh or another refresh is in flight.
|
||||
"""
|
||||
global _last_refresh, _in_progress
|
||||
|
||||
now = time.time()
|
||||
with _lock:
|
||||
if _in_progress:
|
||||
return False
|
||||
if not force and (now - _last_refresh) < _REFRESH_INTERVAL_S and _aircraft_by_hex:
|
||||
return False
|
||||
_in_progress = True
|
||||
|
||||
try:
|
||||
started = time.time()
|
||||
key = _latest_snapshot_key()
|
||||
index = _stream_csv_index(_BUCKET_BASE + key)
|
||||
with _lock:
|
||||
_aircraft_by_hex.clear()
|
||||
_aircraft_by_hex.update(index)
|
||||
_last_refresh = time.time()
|
||||
logger.info(
|
||||
"aircraft database refreshed in %.1fs from %s: %d aircraft",
|
||||
time.time() - started,
|
||||
key,
|
||||
len(index),
|
||||
)
|
||||
return True
|
||||
except (requests.RequestException, OSError, ValueError, ET.ParseError) as exc:
|
||||
logger.warning("aircraft database refresh failed: %s", exc)
|
||||
return True
|
||||
finally:
|
||||
with _lock:
|
||||
_in_progress = False
|
||||
|
||||
|
||||
def lookup_aircraft(icao24: str) -> dict[str, str] | None:
|
||||
"""Return the metadata record for an ICAO24 hex code, or None."""
|
||||
key = (icao24 or "").strip().lower()
|
||||
if not key:
|
||||
return None
|
||||
with _lock:
|
||||
entry = _aircraft_by_hex.get(key)
|
||||
return dict(entry) if entry else None
|
||||
|
||||
|
||||
def lookup_aircraft_type(icao24: str) -> str:
|
||||
"""Return the ICAO type code (e.g. 'B738', 'GLF4') or '' if unknown."""
|
||||
entry = lookup_aircraft(icao24)
|
||||
if not entry:
|
||||
return ""
|
||||
return entry.get("typecode", "")
|
||||
|
||||
|
||||
def aircraft_database_status() -> dict[str, Any]:
|
||||
with _lock:
|
||||
return {
|
||||
"last_refresh": _last_refresh,
|
||||
"aircraft": len(_aircraft_by_hex),
|
||||
"in_progress": _in_progress,
|
||||
}
|
||||
@@ -0,0 +1,129 @@
|
||||
"""CrowdThreat fetcher — crowdsourced global threat intelligence.
|
||||
|
||||
Polls verified threat reports from CrowdThreat's public API and normalises
|
||||
them into map-ready records with category-based icon IDs.
|
||||
|
||||
No API key required — the /threats endpoint is unauthenticated.
|
||||
"""
|
||||
|
||||
import logging
|
||||
|
||||
from services.network_utils import fetch_with_curl
|
||||
from services.fetchers._store import latest_data, _data_lock, _mark_fresh, is_any_active
|
||||
from services.fetchers.retry import with_retry
|
||||
|
||||
logger = logging.getLogger("services.data_fetcher")
|
||||
|
||||
_CT_BASE = "https://backend.crowdthreat.world"
|
||||
|
||||
# CrowdThreat category_id → icon ID used on the MapLibre layer
|
||||
_CATEGORY_ICON = {
|
||||
1: "ct-security", # Security & Conflict (red)
|
||||
2: "ct-crime", # Crime & Safety (blue)
|
||||
3: "ct-aviation", # Aviation (green)
|
||||
4: "ct-maritime", # Maritime (teal)
|
||||
5: "ct-infrastructure", # Industrial & Infra (orange)
|
||||
6: "ct-special", # Special Threats (purple)
|
||||
7: "ct-social", # Social & Political (pink)
|
||||
8: "ct-other", # Other (gray)
|
||||
}
|
||||
|
||||
_CATEGORY_COLOUR = {
|
||||
1: "#ef4444", # red
|
||||
2: "#3b82f6", # blue
|
||||
3: "#22c55e", # green
|
||||
4: "#14b8a6", # teal
|
||||
5: "#f97316", # orange
|
||||
6: "#a855f7", # purple
|
||||
7: "#ec4899", # pink
|
||||
8: "#6b7280", # gray
|
||||
}
|
||||
|
||||
|
||||
@with_retry(max_retries=2, base_delay=5)
|
||||
def fetch_crowdthreat():
|
||||
"""Fetch verified threat reports from CrowdThreat public API."""
|
||||
if not is_any_active("crowdthreat"):
|
||||
return
|
||||
|
||||
try:
|
||||
resp = fetch_with_curl(f"{_CT_BASE}/threats", timeout=20)
|
||||
if not resp or resp.status_code != 200:
|
||||
logger.warning("CrowdThreat API returned %s", getattr(resp, "status_code", "None"))
|
||||
return
|
||||
|
||||
payload = resp.json()
|
||||
raw_threats = payload.get("data", {}).get("threats", [])
|
||||
if not raw_threats:
|
||||
logger.debug("CrowdThreat returned 0 threats")
|
||||
return
|
||||
|
||||
except Exception as e:
|
||||
logger.error("CrowdThreat fetch error: %s", e)
|
||||
return
|
||||
|
||||
processed = []
|
||||
for t in raw_threats:
|
||||
loc = t.get("location") or {}
|
||||
lng_lat = loc.get("lng_lat")
|
||||
if not lng_lat or len(lng_lat) < 2:
|
||||
continue
|
||||
try:
|
||||
lng = float(lng_lat[0])
|
||||
lat = float(lng_lat[1])
|
||||
except (TypeError, ValueError):
|
||||
continue
|
||||
|
||||
cat = t.get("category") or {}
|
||||
cat_id = cat.get("id", 8)
|
||||
subcat = t.get("subcategory") or {}
|
||||
threat_type = t.get("type") or {}
|
||||
dates = t.get("dates") or {}
|
||||
occurred = dates.get("occurred") or {}
|
||||
reported = dates.get("reported") or {}
|
||||
|
||||
# Extract all available detail from the API response
|
||||
summary = (t.get("summary") or t.get("description") or "").strip()
|
||||
verification = (t.get("verification_status") or t.get("status") or "").strip()
|
||||
country_obj = loc.get("country") or {}
|
||||
country = country_obj.get("name", "") if isinstance(country_obj, dict) else str(country_obj or "")
|
||||
media = t.get("media") or t.get("images") or t.get("attachments") or []
|
||||
source_url = t.get("source_url") or t.get("url") or t.get("link") or ""
|
||||
severity = t.get("severity") or t.get("severity_level") or t.get("risk_level") or ""
|
||||
votes = t.get("votes") or t.get("upvotes") or 0
|
||||
reporter = t.get("user") or t.get("reporter") or {}
|
||||
reporter_name = reporter.get("name", "") if isinstance(reporter, dict) else ""
|
||||
|
||||
processed.append({
|
||||
"id": t.get("id"),
|
||||
"title": t.get("title", ""),
|
||||
"summary": summary[:500] if summary else "",
|
||||
"lat": lat,
|
||||
"lng": lng,
|
||||
"address": loc.get("name", ""),
|
||||
"city": loc.get("city", ""),
|
||||
"country": country,
|
||||
"category": cat.get("name", "Other"),
|
||||
"category_id": cat_id,
|
||||
"category_colour": _CATEGORY_COLOUR.get(cat_id, "#6b7280"),
|
||||
"subcategory": subcat.get("name", ""),
|
||||
"threat_type": threat_type.get("name", ""),
|
||||
"icon_id": _CATEGORY_ICON.get(cat_id, "ct-other"),
|
||||
"occurred": occurred.get("raw", ""),
|
||||
"occurred_iso": occurred.get("iso", ""),
|
||||
"timeago": occurred.get("timeago", ""),
|
||||
"reported": reported.get("raw", ""),
|
||||
"verification": verification,
|
||||
"severity": str(severity),
|
||||
"source_url": source_url,
|
||||
"media_urls": [m.get("url") or m for m in media[:3]] if isinstance(media, list) else [],
|
||||
"votes": int(votes) if votes else 0,
|
||||
"reporter": reporter_name,
|
||||
"source": "CrowdThreat",
|
||||
})
|
||||
|
||||
logger.info("CrowdThreat: fetched %d verified threats", len(processed))
|
||||
|
||||
with _data_lock:
|
||||
latest_data["crowdthreat"] = processed
|
||||
_mark_fresh("crowdthreat")
|
||||
@@ -1,14 +1,19 @@
|
||||
"""Earth-observation fetchers — earthquakes, FIRMS fires, space weather, weather radar,
|
||||
severe weather alerts, air quality, volcanoes."""
|
||||
|
||||
import concurrent.futures
|
||||
import csv
|
||||
import hashlib
|
||||
import io
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
import time
|
||||
import heapq
|
||||
from datetime import datetime
|
||||
from datetime import datetime, timedelta
|
||||
from pathlib import Path
|
||||
from services.network_utils import fetch_with_curl
|
||||
from services.fetchers._store import latest_data, _data_lock, _mark_fresh
|
||||
@@ -596,3 +601,852 @@ def fetch_viirs_change_nodes():
|
||||
if nodes:
|
||||
_mark_fresh("viirs_change_nodes")
|
||||
logger.info(f"VIIRS change nodes: {len(nodes)} nodes from {len(_VIIRS_AOIS)} AOIs")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# UAP Sightings (NUFORC — National UAP Reporting Center)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
# Shape → canonical category mapping for consistent frontend filtering
|
||||
_UAP_SHAPE_MAP = {
|
||||
"light": "light", "fireball": "fireball", "orb": "orb",
|
||||
"sphere": "orb", "circle": "orb", "oval": "orb", "egg": "orb",
|
||||
"triangle": "triangle", "delta": "triangle", "chevron": "triangle",
|
||||
"boomerang": "triangle",
|
||||
"cigar": "cigar", "cylinder": "cigar", "tube": "cigar",
|
||||
"disk": "disk", "disc": "disk", "saucer": "disk",
|
||||
"diamond": "diamond", "cone": "diamond", "cross": "diamond",
|
||||
"rectangle": "rectangle", "square": "rectangle",
|
||||
"formation": "formation", "cluster": "formation",
|
||||
"changing": "changing", "flash": "flash", "star": "light",
|
||||
"tic-tac": "tic-tac", "tic tac": "tic-tac",
|
||||
}
|
||||
|
||||
# US state → approximate centroid for coarse geocoding when city lookup fails
|
||||
_US_STATE_COORDS: dict[str, tuple[float, float]] = {
|
||||
"AL": (32.8, -86.8), "AK": (64.2, -152.5), "AZ": (34.0, -111.1),
|
||||
"AR": (35.2, -91.8), "CA": (36.8, -119.4), "CO": (39.6, -105.3),
|
||||
"CT": (41.6, -72.7), "DE": (39.3, -75.5), "FL": (27.8, -81.8),
|
||||
"GA": (32.7, -83.5), "HI": (19.9, -155.6), "ID": (44.1, -114.7),
|
||||
"IL": (40.3, -89.0), "IN": (40.3, -86.1), "IA": (42.0, -93.2),
|
||||
"KS": (39.0, -98.5), "KY": (37.8, -84.3), "LA": (31.2, -92.5),
|
||||
"ME": (45.3, -69.4), "MD": (39.0, -76.6), "MA": (42.4, -71.4),
|
||||
"MI": (44.3, -85.6), "MN": (46.7, -94.7), "MS": (32.7, -89.5),
|
||||
"MO": (38.6, -91.8), "MT": (46.8, -110.4), "NE": (41.5, -99.9),
|
||||
"NV": (38.8, -116.4), "NH": (43.2, -71.6), "NJ": (40.1, -74.4),
|
||||
"NM": (34.5, -106.0), "NY": (43.0, -75.0), "NC": (35.6, -79.8),
|
||||
"ND": (47.5, -100.5), "OH": (40.4, -82.9), "OK": (35.0, -97.1),
|
||||
"OR": (43.8, -120.6), "PA": (41.2, -77.2), "RI": (41.6, -71.5),
|
||||
"SC": (33.8, -81.2), "SD": (43.9, -99.4), "TN": (35.5, -86.6),
|
||||
"TX": (31.0, -97.6), "UT": (39.3, -111.1), "VT": (44.6, -72.6),
|
||||
"VA": (37.4, -78.7), "WA": (47.4, -120.7), "WV": (38.6, -80.6),
|
||||
"WI": (43.8, -88.8), "WY": (43.1, -107.6), "DC": (38.9, -77.0),
|
||||
}
|
||||
|
||||
|
||||
def _normalize_uap_shape(raw: str) -> str:
|
||||
"""Normalize a raw NUFORC shape string to a canonical category."""
|
||||
key = raw.strip().lower()
|
||||
return _UAP_SHAPE_MAP.get(key, "unknown")
|
||||
|
||||
|
||||
def _reverse_geocode_state(lat: float, lng: float) -> tuple[str, str]:
|
||||
"""Best-effort reverse-geocode a lat/lng to (state_abbr, country).
|
||||
|
||||
Uses the _US_STATE_COORDS centroid table for fast approximate matching.
|
||||
Returns ('', 'Unknown') if no close match is found.
|
||||
"""
|
||||
best_state = ""
|
||||
best_dist = 999.0
|
||||
for st, (slat, slng) in _US_STATE_COORDS.items():
|
||||
d = ((lat - slat) ** 2 + (lng - slng) ** 2) ** 0.5
|
||||
if d < best_dist:
|
||||
best_dist = d
|
||||
best_state = st
|
||||
if best_dist < 5.0: # ~5 degrees tolerance
|
||||
return best_state, "US"
|
||||
return "", "Unknown"
|
||||
|
||||
|
||||
# ── NUFORC Mapbox Tilequery API ─────────────────────────────────────────
|
||||
# NUFORC's website switched to a JS-rendered Mapbox GL map. The old HTML
|
||||
# table scraper is defunct. We now query the Mapbox Tilequery API against
|
||||
# NUFORC's public tileset to get precise sighting coordinates.
|
||||
#
|
||||
# Tileset: nuforc.cmm18aqea06bu1mmselhpnano-0ce5v
|
||||
# Layer: Sightings Fields: Count, From, To, LinkLat, LinkLon
|
||||
#
|
||||
# We sample a grid of points across the US/world with a 100 km radius and
|
||||
# filter to sightings within the last 60 days.
|
||||
|
||||
_NUFORC_TILESET = "nuforc.cmm18aqea06bu1mmselhpnano-0ce5v"
|
||||
_NUFORC_TOKEN = os.environ.get("NUFORC_MAPBOX_TOKEN", "").strip()
|
||||
_NUFORC_RADIUS_M = 200_000 # 200 km query radius
|
||||
_NUFORC_LIMIT = 50 # max features per tilequery call
|
||||
_NUFORC_RECENT_DAYS = int(os.environ.get("NUFORC_RECENT_DAYS", "60"))
|
||||
_NUFORC_GEOCODE_WORKERS = max(1, int(os.environ.get("NUFORC_GEOCODE_WORKERS", "1")))
|
||||
# Photon (Komoot) is more lenient than Nominatim — ~200ms per query in
|
||||
# practice, so a 0.3s spacing keeps us well under any soft throttle while
|
||||
# still rebuilding a full 12-month window in ~10 minutes.
|
||||
_NUFORC_GEOCODE_SPACING_S = float(os.environ.get("NUFORC_GEOCODE_SPACING_S", "0.3"))
|
||||
_NUFORC_DATA_DIR = Path(__file__).resolve().parent.parent.parent / "data"
|
||||
_NUFORC_SIGHTINGS_CACHE_FILE = _NUFORC_DATA_DIR / "nuforc_recent_sightings.json"
|
||||
_NUFORC_LOCATION_CACHE_FILE = _NUFORC_DATA_DIR / "nuforc_location_cache.json"
|
||||
|
||||
# Live NUFORC databank scraping (wpDataTables server-side AJAX).
|
||||
# The HuggingFace mirror froze at 2023-12-20, so we pull directly from
|
||||
# nuforc.org's monthly sub-index. Each month page embeds a wdtNonce we
|
||||
# must extract, then POST to admin-ajax.php to get the DataTables JSON.
|
||||
_NUFORC_LIVE_INDEX_URL = "https://nuforc.org/subndx/?id=e{yyyymm}"
|
||||
_NUFORC_LIVE_AJAX_URL = (
|
||||
"https://nuforc.org/wp-admin/admin-ajax.php"
|
||||
"?action=get_wdtable&table_id=1&wdt_var1=YearMonth&wdt_var2={yyyymm}"
|
||||
)
|
||||
_NUFORC_LIVE_NONCE_RE = re.compile(
|
||||
r'id=["\']wdtNonceFrontendServerSide_1["\'][^>]*value=["\']([a-f0-9]+)["\']'
|
||||
)
|
||||
_NUFORC_LIVE_SIGHTING_ID_RE = re.compile(r"id=(\d+)")
|
||||
_NUFORC_LIVE_USER_AGENT = "Mozilla/5.0 (ShadowBroker-OSINT NUFORC-fetcher)"
|
||||
_NUFORC_LIVE_SESSION_COOKIES = _NUFORC_DATA_DIR / "nuforc_session.cookies"
|
||||
|
||||
# Sample grid covering continental US, Alaska, Hawaii, Canada, UK, Australia
|
||||
_TILEQUERY_GRID: list[tuple[float, float]] = [
|
||||
# Continental US — ~4° spacing (lon, lat)
|
||||
(-122.4, 37.8), (-118.2, 34.1), (-112.1, 33.4), (-104.9, 39.7),
|
||||
(-95.4, 29.8), (-96.8, 32.8), (-87.6, 41.9), (-84.4, 33.7),
|
||||
(-81.7, 41.5), (-80.2, 25.8), (-77.0, 38.9), (-74.0, 40.7),
|
||||
(-71.1, 42.4), (-90.2, 38.6), (-93.3, 44.9), (-111.9, 40.8),
|
||||
(-122.7, 45.5), (-86.2, 39.8), (-106.6, 35.1), (-73.9, 43.2),
|
||||
(-76.6, 39.3), (-97.5, 35.5), (-83.0, 42.3), (-117.2, 32.7),
|
||||
(-82.5, 28.0), (-78.6, 35.8), (-90.1, 30.0), (-71.4, 41.8),
|
||||
# Alaska, Hawaii
|
||||
(-149.9, 61.2), (-155.5, 19.9),
|
||||
# Canada
|
||||
(-79.4, 43.7), (-123.1, 49.3), (-73.6, 45.5),
|
||||
# UK & Europe
|
||||
(-0.1, 51.5), (-3.2, 55.9),
|
||||
# Australia
|
||||
(151.2, -33.9), (144.9, -37.8),
|
||||
]
|
||||
|
||||
|
||||
def _fetch_nuforc_tilequery(lng: float, lat: float) -> list[dict]:
|
||||
"""Query NUFORC Mapbox tileset around a single point, return raw features."""
|
||||
if not _NUFORC_TOKEN:
|
||||
return []
|
||||
url = (
|
||||
f"https://api.mapbox.com/v4/{_NUFORC_TILESET}/tilequery/"
|
||||
f"{lng},{lat}.json"
|
||||
f"?radius={_NUFORC_RADIUS_M}&limit={_NUFORC_LIMIT}"
|
||||
f"&access_token={_NUFORC_TOKEN}"
|
||||
)
|
||||
try:
|
||||
resp = fetch_with_curl(url, timeout=12)
|
||||
if resp.status_code == 200:
|
||||
data = resp.json()
|
||||
return data.get("features", [])
|
||||
except Exception:
|
||||
pass
|
||||
return []
|
||||
|
||||
|
||||
def _parse_nuforc_tile_date(value: str) -> datetime | None:
|
||||
raw = str(value or "").strip()
|
||||
if not raw:
|
||||
return None
|
||||
raw = raw.replace("T", " ")
|
||||
raw = re.sub(r"\s+local$", "", raw, flags=re.IGNORECASE)
|
||||
raw = re.sub(r"\s+utc$", "", raw, flags=re.IGNORECASE)
|
||||
for fmt in (
|
||||
"%Y-%m-%d %H:%M:%S",
|
||||
"%Y-%m-%d %H:%M",
|
||||
"%Y-%m-%d",
|
||||
"%m/%d/%Y %H:%M",
|
||||
"%m/%d/%Y",
|
||||
):
|
||||
try:
|
||||
return datetime.strptime(raw, fmt)
|
||||
except (TypeError, ValueError):
|
||||
continue
|
||||
match = re.match(r"^(\d{4}-\d{2}-\d{2})", raw)
|
||||
if match:
|
||||
try:
|
||||
return datetime.strptime(match.group(1), "%Y-%m-%d")
|
||||
except ValueError:
|
||||
return None
|
||||
return None
|
||||
|
||||
|
||||
def _load_nuforc_sightings_cache(*, force_refresh: bool = False) -> list[dict] | None:
|
||||
if force_refresh or not _NUFORC_SIGHTINGS_CACHE_FILE.exists():
|
||||
return None
|
||||
try:
|
||||
raw = json.loads(_NUFORC_SIGHTINGS_CACHE_FILE.read_text(encoding="utf-8"))
|
||||
built = raw.get("built", "")
|
||||
built_dt = datetime.fromisoformat(built) if built else None
|
||||
if built_dt is None:
|
||||
return None
|
||||
if (datetime.utcnow() - built_dt).total_seconds() > 86400:
|
||||
return None
|
||||
sightings = raw.get("sightings")
|
||||
if isinstance(sightings, list):
|
||||
if len(sightings) <= 0:
|
||||
logger.info("UAP sightings: cache is fresh but empty; rebuilding")
|
||||
return None
|
||||
logger.info(
|
||||
"UAP sightings: loaded %d cached reports from %s",
|
||||
len(sightings),
|
||||
built,
|
||||
)
|
||||
return sightings
|
||||
except Exception as e:
|
||||
logger.warning("UAP sightings: cache load error: %s", e)
|
||||
return None
|
||||
|
||||
|
||||
def _save_nuforc_sightings_cache(sightings: list[dict]) -> None:
|
||||
if not sightings:
|
||||
logger.warning("UAP sightings: refusing to save empty daily cache")
|
||||
return
|
||||
try:
|
||||
_NUFORC_DATA_DIR.mkdir(parents=True, exist_ok=True)
|
||||
payload = {
|
||||
"built": datetime.utcnow().isoformat(),
|
||||
"count": len(sightings),
|
||||
"sightings": sightings,
|
||||
}
|
||||
_NUFORC_SIGHTINGS_CACHE_FILE.write_text(
|
||||
json.dumps(payload, separators=(",", ":")),
|
||||
encoding="utf-8",
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning("UAP sightings: cache save error: %s", e)
|
||||
|
||||
|
||||
def _load_nuforc_location_cache() -> dict[str, list[float] | None]:
|
||||
if not _NUFORC_LOCATION_CACHE_FILE.exists():
|
||||
return {}
|
||||
try:
|
||||
raw = json.loads(_NUFORC_LOCATION_CACHE_FILE.read_text(encoding="utf-8"))
|
||||
if not isinstance(raw, dict):
|
||||
return {}
|
||||
cache: dict[str, list[float] | None] = {}
|
||||
for key, value in raw.items():
|
||||
if not isinstance(key, str):
|
||||
continue
|
||||
if (
|
||||
isinstance(value, list)
|
||||
and len(value) == 2
|
||||
and all(isinstance(v, (int, float)) for v in value)
|
||||
):
|
||||
cache[key] = [float(value[0]), float(value[1])]
|
||||
elif value is None:
|
||||
cache[key] = None
|
||||
return cache
|
||||
except Exception as e:
|
||||
logger.warning("UAP sightings: location cache load error: %s", e)
|
||||
return {}
|
||||
|
||||
|
||||
def _save_nuforc_location_cache(cache: dict[str, list[float] | None]) -> None:
|
||||
try:
|
||||
_NUFORC_DATA_DIR.mkdir(parents=True, exist_ok=True)
|
||||
_NUFORC_LOCATION_CACHE_FILE.write_text(
|
||||
json.dumps(cache, separators=(",", ":")),
|
||||
encoding="utf-8",
|
||||
)
|
||||
except Exception as e:
|
||||
logger.warning("UAP sightings: location cache save error: %s", e)
|
||||
|
||||
|
||||
def _normalize_uap_location(raw: str) -> str:
|
||||
return re.sub(r"\s+", " ", str(raw or "").strip())
|
||||
|
||||
|
||||
def _uap_country_from_location(location: str, state: str) -> str:
|
||||
if state:
|
||||
return "US"
|
||||
upper = location.upper()
|
||||
if "USA" in upper or "UNITED STATES" in upper:
|
||||
return "US"
|
||||
parts = [part.strip() for part in location.split(",") if part.strip()]
|
||||
if not parts:
|
||||
return "Unknown"
|
||||
country = parts[-1]
|
||||
return country.upper() if len(country) == 2 else country
|
||||
|
||||
|
||||
_US_COUNTRY_ALIASES = {
|
||||
"", "USA", "US", "U.S.", "U.S.A.",
|
||||
"UNITED STATES", "UNITED STATES OF AMERICA",
|
||||
}
|
||||
|
||||
|
||||
def _uap_geocode_candidates(
|
||||
location: str, city: str, state: str, country: str = ""
|
||||
) -> list[str]:
|
||||
"""Build geocode query candidates in priority order.
|
||||
|
||||
NUFORC's live databank is international, so we must query with the
|
||||
actual country first. Only when the country is empty or explicitly US
|
||||
do we fall back to the legacy USA-assumption behavior.
|
||||
"""
|
||||
candidates: list[str] = []
|
||||
c = (country or "").strip()
|
||||
c_upper = c.upper()
|
||||
is_us = c_upper in _US_COUNTRY_ALIASES
|
||||
|
||||
if not is_us:
|
||||
# Non-US: try country-qualified queries first to prevent the
|
||||
# geocoder from fuzzy-matching to a same-named US city.
|
||||
if city and state:
|
||||
candidates.append(f"{city}, {state}, {c}")
|
||||
if city:
|
||||
candidates.append(f"{city}, {c}")
|
||||
if city and state:
|
||||
candidates.append(f"{city}, {state}")
|
||||
if city:
|
||||
candidates.append(city)
|
||||
else:
|
||||
if city and state:
|
||||
candidates.append(f"{city}, {state}, USA")
|
||||
candidates.append(f"{city}, {state}")
|
||||
if city:
|
||||
candidates.append(city)
|
||||
|
||||
normalized = _normalize_uap_location(location)
|
||||
if normalized:
|
||||
candidates.append(normalized)
|
||||
parts = [part.strip() for part in normalized.split(",") if part.strip()]
|
||||
if len(parts) >= 2:
|
||||
candidates.append(", ".join(parts[:2]))
|
||||
if parts:
|
||||
candidates.append(parts[0])
|
||||
|
||||
deduped: list[str] = []
|
||||
seen: set[str] = set()
|
||||
for candidate in candidates:
|
||||
key = candidate.lower()
|
||||
if key in seen:
|
||||
continue
|
||||
seen.add(key)
|
||||
deduped.append(candidate)
|
||||
return deduped
|
||||
|
||||
|
||||
def _photon_lookup(query: str) -> list[float] | None:
|
||||
"""Query Komoot's public Photon instance (OSM-based, no API key).
|
||||
|
||||
Returns [lat, lng] on success, None on any failure. We bypass the
|
||||
shared search_geocode() helper on purpose: it falls back to an
|
||||
airport-name token matcher on failure that confidently returns
|
||||
completely wrong coordinates, which poisoned the cache for years.
|
||||
"""
|
||||
from urllib.parse import urlencode
|
||||
|
||||
params = urlencode({"q": query, "limit": 1})
|
||||
url = f"https://photon.komoot.io/api?{params}"
|
||||
try:
|
||||
res = fetch_with_curl(
|
||||
url,
|
||||
headers={
|
||||
"User-Agent": "ShadowBroker-OSINT/1.0 (NUFORC-UAP-layer)",
|
||||
"Accept-Language": "en",
|
||||
},
|
||||
timeout=10,
|
||||
)
|
||||
except Exception:
|
||||
return None
|
||||
if not res or res.status_code != 200:
|
||||
return None
|
||||
try:
|
||||
payload = res.json()
|
||||
except Exception:
|
||||
return None
|
||||
features = (payload or {}).get("features") or []
|
||||
if not features:
|
||||
return None
|
||||
try:
|
||||
# GeoJSON order is [lng, lat] — flip to our [lat, lng] convention.
|
||||
coords = features[0]["geometry"]["coordinates"]
|
||||
return [float(coords[1]), float(coords[0])]
|
||||
except (KeyError, IndexError, TypeError, ValueError):
|
||||
return None
|
||||
|
||||
|
||||
def _geocode_uap_location(
|
||||
location: str, city: str, state: str, country: str = ""
|
||||
) -> list[float] | None:
|
||||
"""Resolve a NUFORC sighting location to [lat, lng] via Photon.
|
||||
|
||||
Returns None on failure. The caller caches None alongside real hits
|
||||
so we don't retry unresolvable queries every run.
|
||||
"""
|
||||
for query in _uap_geocode_candidates(location, city, state, country):
|
||||
coords = _photon_lookup(query)
|
||||
if coords:
|
||||
return coords
|
||||
return None
|
||||
|
||||
|
||||
def _build_uap_sighting_id(row: dict, occurred: str, location: str) -> str:
|
||||
raw_id = str(row.get("Sighting", "") or row.get("sighting", "")).strip()
|
||||
if raw_id:
|
||||
return raw_id
|
||||
digest = hashlib.sha1(
|
||||
f"{occurred}|{location}|{row.get('Summary', '')}|{row.get('Text', '')}".encode("utf-8", "ignore")
|
||||
).hexdigest()[:12]
|
||||
return f"NUFORC-{digest}"
|
||||
|
||||
|
||||
def _nuforc_months_for_window(days: int) -> list[str]:
|
||||
"""Enumerate YYYYMM strings covering the rolling `days`-day window.
|
||||
|
||||
Returned newest first. Always includes the current month even if the
|
||||
window technically starts later, because new reports land there.
|
||||
"""
|
||||
today = datetime.utcnow().date()
|
||||
start = today - timedelta(days=days)
|
||||
months: list[str] = []
|
||||
cur = today.replace(day=1)
|
||||
start_floor = start.replace(day=1)
|
||||
while cur >= start_floor:
|
||||
months.append(cur.strftime("%Y%m"))
|
||||
if cur.month == 1:
|
||||
cur = cur.replace(year=cur.year - 1, month=12)
|
||||
else:
|
||||
cur = cur.replace(month=cur.month - 1)
|
||||
return months
|
||||
|
||||
|
||||
def _nuforc_fetch_month_live(yyyymm: str, cookie_jar: Path) -> list[dict]:
|
||||
"""Pull one month of NUFORC sightings via the live wpDataTables AJAX.
|
||||
|
||||
Returns a list of raw row dicts with the fields we care about:
|
||||
id, occurred (YYYY-MM-DD), posted (YYYY-MM-DD), city, state, country,
|
||||
shape_raw, summary, explanation. Empty list on any failure — caller
|
||||
decides whether a failure is fatal.
|
||||
"""
|
||||
from services.fetchers.nuforc_enrichment import _parse_date
|
||||
|
||||
curl_bin = shutil.which("curl") or "curl"
|
||||
index_url = _NUFORC_LIVE_INDEX_URL.format(yyyymm=yyyymm)
|
||||
ajax_url = _NUFORC_LIVE_AJAX_URL.format(yyyymm=yyyymm)
|
||||
|
||||
# Step 1: GET the month index to capture session cookies + fresh nonce.
|
||||
try:
|
||||
index_res = subprocess.run(
|
||||
[
|
||||
curl_bin, "-sL",
|
||||
"-A", _NUFORC_LIVE_USER_AGENT,
|
||||
"-c", str(cookie_jar),
|
||||
"-b", str(cookie_jar),
|
||||
index_url,
|
||||
],
|
||||
capture_output=True, text=True, timeout=60,
|
||||
encoding="utf-8", errors="replace",
|
||||
)
|
||||
except (subprocess.SubprocessError, OSError) as e:
|
||||
logger.warning("NUFORC live: index fetch failed for %s: %s", yyyymm, e)
|
||||
return []
|
||||
if index_res.returncode != 0 or not index_res.stdout:
|
||||
logger.warning(
|
||||
"NUFORC live: index fetch exit=%s for %s", index_res.returncode, yyyymm,
|
||||
)
|
||||
return []
|
||||
nonce_match = _NUFORC_LIVE_NONCE_RE.search(index_res.stdout)
|
||||
if not nonce_match:
|
||||
logger.warning("NUFORC live: wdtNonce not found on index page for %s", yyyymm)
|
||||
return []
|
||||
nonce = nonce_match.group(1)
|
||||
|
||||
# Step 2: POST to admin-ajax.php with length=-1 to pull the whole month.
|
||||
post_data = (
|
||||
"draw=1"
|
||||
"&columns%5B0%5D%5Bdata%5D=0&columns%5B0%5D%5Bsearchable%5D=true&columns%5B0%5D%5Borderable%5D=false"
|
||||
"&columns%5B1%5D%5Bdata%5D=1&columns%5B1%5D%5Bsearchable%5D=true&columns%5B1%5D%5Borderable%5D=true"
|
||||
"&order%5B0%5D%5Bcolumn%5D=1&order%5B0%5D%5Bdir%5D=desc"
|
||||
"&start=0&length=-1"
|
||||
"&search%5Bvalue%5D=&search%5Bregex%5D=false"
|
||||
f"&wdtNonce={nonce}"
|
||||
)
|
||||
try:
|
||||
ajax_res = subprocess.run(
|
||||
[
|
||||
curl_bin, "-sL",
|
||||
"-A", _NUFORC_LIVE_USER_AGENT,
|
||||
"-c", str(cookie_jar),
|
||||
"-b", str(cookie_jar),
|
||||
"-X", "POST",
|
||||
"-H", f"Referer: {index_url}",
|
||||
"-H", "X-Requested-With: XMLHttpRequest",
|
||||
"-H", "Content-Type: application/x-www-form-urlencoded",
|
||||
"--data", post_data,
|
||||
ajax_url,
|
||||
],
|
||||
capture_output=True, text=True, timeout=120,
|
||||
encoding="utf-8", errors="replace",
|
||||
)
|
||||
except (subprocess.SubprocessError, OSError) as e:
|
||||
logger.warning("NUFORC live: ajax fetch failed for %s: %s", yyyymm, e)
|
||||
return []
|
||||
if ajax_res.returncode != 0 or not ajax_res.stdout:
|
||||
logger.warning(
|
||||
"NUFORC live: ajax fetch exit=%s for %s", ajax_res.returncode, yyyymm,
|
||||
)
|
||||
return []
|
||||
try:
|
||||
payload = json.loads(ajax_res.stdout)
|
||||
except json.JSONDecodeError as e:
|
||||
logger.warning("NUFORC live: ajax JSON decode failed for %s: %s", yyyymm, e)
|
||||
return []
|
||||
|
||||
raw_rows = payload.get("data") or []
|
||||
out: list[dict] = []
|
||||
for raw in raw_rows:
|
||||
if not isinstance(raw, list) or len(raw) < 8:
|
||||
continue
|
||||
link_html = str(raw[0] or "")
|
||||
occurred_raw = str(raw[1] or "")
|
||||
city = str(raw[2] or "").strip()
|
||||
state = str(raw[3] or "").strip()
|
||||
country = str(raw[4] or "").strip()
|
||||
shape_raw = (str(raw[5] or "").strip() or "Unknown")
|
||||
summary = str(raw[6] or "").strip()
|
||||
reported_raw = str(raw[7] or "")
|
||||
explanation = str(raw[9] or "").strip() if len(raw) > 9 and raw[9] else ""
|
||||
|
||||
occurred_ymd = _parse_date(occurred_raw)
|
||||
if not occurred_ymd:
|
||||
continue
|
||||
if not city and not state and not country:
|
||||
continue
|
||||
|
||||
id_match = _NUFORC_LIVE_SIGHTING_ID_RE.search(link_html)
|
||||
if id_match:
|
||||
sighting_id = f"NUFORC-{id_match.group(1)}"
|
||||
else:
|
||||
digest = hashlib.sha1(
|
||||
f"{occurred_ymd}|{city}|{state}|{summary}".encode("utf-8", "ignore")
|
||||
).hexdigest()[:12]
|
||||
sighting_id = f"NUFORC-{digest}"
|
||||
|
||||
if summary and len(summary) > 280:
|
||||
summary = summary[:277] + "..."
|
||||
if not summary:
|
||||
summary = "Sighting reported"
|
||||
|
||||
out.append({
|
||||
"id": sighting_id,
|
||||
"occurred": occurred_ymd,
|
||||
"posted": _parse_date(reported_raw) or occurred_ymd,
|
||||
"city": city,
|
||||
"state": state,
|
||||
"country": country,
|
||||
"shape_raw": shape_raw,
|
||||
"summary": summary,
|
||||
"explanation": explanation,
|
||||
})
|
||||
return out
|
||||
|
||||
|
||||
def _build_recent_uap_sightings() -> list[dict]:
|
||||
"""Build the rolling 1-year UAP sightings layer from live NUFORC data.
|
||||
|
||||
Hits nuforc.org's public sub-index once per month in the window, drops
|
||||
anything outside the exact day-precision cutoff, dedupes by sighting id,
|
||||
geocodes city+state via the existing location cache, and returns rows
|
||||
keyed to the same schema the frontend already renders.
|
||||
"""
|
||||
cutoff_dt = datetime.utcnow() - timedelta(days=_NUFORC_RECENT_DAYS)
|
||||
cutoff_str = cutoff_dt.strftime("%Y-%m-%d")
|
||||
months = _nuforc_months_for_window(_NUFORC_RECENT_DAYS)
|
||||
|
||||
try:
|
||||
_NUFORC_DATA_DIR.mkdir(parents=True, exist_ok=True)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
rows: list[dict] = []
|
||||
locations: dict[str, tuple[str, str]] = {}
|
||||
seen_ids: set[str] = set()
|
||||
total_pulled = 0
|
||||
months_with_data = 0
|
||||
|
||||
for yyyymm in months:
|
||||
month_rows = _nuforc_fetch_month_live(yyyymm, _NUFORC_LIVE_SESSION_COOKIES)
|
||||
if month_rows:
|
||||
months_with_data += 1
|
||||
total_pulled += len(month_rows)
|
||||
for row in month_rows:
|
||||
if row["occurred"] < cutoff_str:
|
||||
continue
|
||||
if row["id"] in seen_ids:
|
||||
continue
|
||||
seen_ids.add(row["id"])
|
||||
|
||||
# Build the geocode key as "City, State, Country" to match the
|
||||
# existing 3,000+ entry location cache (format: "Toronto, ON, Canada").
|
||||
parts = [row["city"], row["state"], row["country"]]
|
||||
location = _normalize_uap_location(
|
||||
", ".join(p for p in parts if p) if any(parts) else ""
|
||||
)
|
||||
if not location:
|
||||
continue
|
||||
|
||||
row["location"] = location
|
||||
locations.setdefault(location, (row["city"], row["state"], row["country"]))
|
||||
row["shape"] = (
|
||||
_normalize_uap_shape(row["shape_raw"])
|
||||
if row["shape_raw"] != "Unknown"
|
||||
else "unknown"
|
||||
)
|
||||
if not row["country"]:
|
||||
row["country"] = _uap_country_from_location(location, row["state"])
|
||||
rows.append(row)
|
||||
|
||||
# Clean up the cookie jar — we don't reuse it across runs.
|
||||
try:
|
||||
if _NUFORC_LIVE_SESSION_COOKIES.exists():
|
||||
_NUFORC_LIVE_SESSION_COOKIES.unlink()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Source-integrity canary: if the upstream plugin changed its
|
||||
# DataTables schema or the wdtNonce regex is stale, total_pulled
|
||||
# collapses to ~0 without any HTTP error. assert_canary logs a loud
|
||||
# ERROR so the failure is visible in the health registry and the
|
||||
# daily refresh log, instead of silently serving a stale cache.
|
||||
from services.slo import assert_canary
|
||||
assert_canary("uap_sightings", total_pulled)
|
||||
|
||||
if not rows:
|
||||
raise RuntimeError(
|
||||
f"NUFORC live: zero rows pulled across {len(months)} months "
|
||||
f"(months_with_data={months_with_data})"
|
||||
)
|
||||
|
||||
from services.geocode_validate import coord_in_country
|
||||
|
||||
location_cache = _load_nuforc_location_cache()
|
||||
missing_locations = [location for location in locations if location not in location_cache]
|
||||
if missing_locations:
|
||||
logger.info(
|
||||
"UAP sightings: geocoding %d new locations (throttled at %.1fs spacing)",
|
||||
len(missing_locations),
|
||||
_NUFORC_GEOCODE_SPACING_S,
|
||||
)
|
||||
# Sequential with spacing — Photon is fast and lenient but we
|
||||
# stay sub-second to be polite. Incremental cache saves every 50
|
||||
# hits keep long runs resumable.
|
||||
resolved = 0
|
||||
bbox_rejected = 0
|
||||
save_every = 50
|
||||
for idx, location in enumerate(missing_locations):
|
||||
city, state, country = locations[location]
|
||||
coords = None
|
||||
try:
|
||||
coords = _geocode_uap_location(location, city, state, country)
|
||||
except Exception:
|
||||
coords = None
|
||||
|
||||
# Country-bbox post-filter: reject namesake collisions like
|
||||
# "Milan, WI" landing in Milan, Italy. Unknown countries
|
||||
# (bbox not registered) are passed through unchanged.
|
||||
if coords and country:
|
||||
inside = coord_in_country(coords[0], coords[1], country)
|
||||
if inside is False:
|
||||
logger.warning(
|
||||
"UAP sightings: bbox reject %r -> (%.3f, %.3f) not in %s",
|
||||
location, coords[0], coords[1], country,
|
||||
)
|
||||
coords = None
|
||||
bbox_rejected += 1
|
||||
|
||||
location_cache[location] = coords
|
||||
if coords:
|
||||
resolved += 1
|
||||
|
||||
if (idx + 1) % save_every == 0:
|
||||
_save_nuforc_location_cache(location_cache)
|
||||
logger.info(
|
||||
"UAP sightings: geocoded %d/%d (%d resolved, %d bbox-rejected)",
|
||||
idx + 1, len(missing_locations), resolved, bbox_rejected,
|
||||
)
|
||||
if idx + 1 < len(missing_locations):
|
||||
time.sleep(_NUFORC_GEOCODE_SPACING_S)
|
||||
_save_nuforc_location_cache(location_cache)
|
||||
logger.info(
|
||||
"UAP sightings: geocoding complete — %d/%d resolved, %d bbox-rejected",
|
||||
resolved, len(missing_locations), bbox_rejected,
|
||||
)
|
||||
|
||||
sightings: list[dict] = []
|
||||
skipped_unmapped = 0
|
||||
skipped_bbox = 0
|
||||
for row in rows:
|
||||
coords = location_cache.get(row["location"])
|
||||
if not coords:
|
||||
skipped_unmapped += 1
|
||||
continue
|
||||
# Apply bbox filter to pre-existing cache entries too — this
|
||||
# cleans up the ~1-2% of cached coords that pre-dated the bbox
|
||||
# check without requiring a full cache rebuild.
|
||||
if row.get("country"):
|
||||
inside = coord_in_country(coords[0], coords[1], row["country"])
|
||||
if inside is False:
|
||||
skipped_bbox += 1
|
||||
continue
|
||||
sightings.append(
|
||||
{
|
||||
"id": row["id"],
|
||||
"date_time": row["occurred"],
|
||||
"city": row["city"],
|
||||
"state": row["state"],
|
||||
"country": row["country"],
|
||||
"shape": row["shape"],
|
||||
"shape_raw": row["shape_raw"],
|
||||
"duration": row.get("duration", ""),
|
||||
"summary": row["summary"],
|
||||
"posted": row["posted"],
|
||||
"lat": float(coords[0]),
|
||||
"lng": float(coords[1]),
|
||||
"count": 1,
|
||||
"source": "NUFORC",
|
||||
}
|
||||
)
|
||||
if row.get("explanation"):
|
||||
sightings[-1]["explanation"] = row["explanation"]
|
||||
|
||||
sightings.sort(
|
||||
key=lambda sighting: (
|
||||
sighting.get("date_time", ""),
|
||||
sighting.get("posted", ""),
|
||||
str(sighting.get("id", "")),
|
||||
),
|
||||
reverse=True,
|
||||
)
|
||||
logger.info(
|
||||
"UAP sightings: %d mapped reports from %d rows across %d months "
|
||||
"(cutoff %s, %d unmapped, %d bbox-rejected)",
|
||||
len(sightings),
|
||||
total_pulled,
|
||||
len(months),
|
||||
cutoff_str,
|
||||
skipped_unmapped,
|
||||
skipped_bbox,
|
||||
)
|
||||
return sightings
|
||||
|
||||
|
||||
@with_retry(max_retries=1, base_delay=5)
|
||||
def fetch_uap_sightings(*, force_refresh: bool = False):
|
||||
"""Fetch last-year UAP sightings from NUFORC.
|
||||
|
||||
Startup reads the cached daily snapshot when it is still fresh. The daily
|
||||
scheduler forces a rebuild so this layer updates once per day instead of
|
||||
churning continuously.
|
||||
"""
|
||||
from services.fetchers._store import is_any_active
|
||||
|
||||
if not is_any_active("uap_sightings"):
|
||||
return
|
||||
|
||||
sightings = _load_nuforc_sightings_cache(force_refresh=force_refresh)
|
||||
if sightings is None:
|
||||
sightings = _build_recent_uap_sightings()
|
||||
_save_nuforc_sightings_cache(sightings)
|
||||
|
||||
with _data_lock:
|
||||
latest_data["uap_sightings"] = sightings
|
||||
_mark_fresh("uap_sightings")
|
||||
return
|
||||
|
||||
cutoff = datetime.utcnow() - timedelta(days=_NUFORC_RECENT_DAYS)
|
||||
|
||||
# Query the grid concurrently (up to 8 threads)
|
||||
all_features: list[dict] = []
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=8) as pool:
|
||||
futures = {
|
||||
pool.submit(_fetch_nuforc_tilequery, lng, lat): (lng, lat)
|
||||
for lng, lat in _TILEQUERY_GRID
|
||||
}
|
||||
for fut in concurrent.futures.as_completed(futures, timeout=60):
|
||||
try:
|
||||
all_features.extend(fut.result())
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Deduplicate by (LinkLat, LinkLon) and filter to recent sightings
|
||||
seen: set[tuple[str, str]] = set()
|
||||
sightings: list[dict] = []
|
||||
enriched_count = 0
|
||||
for feat in all_features:
|
||||
props = feat.get("properties", {})
|
||||
link_lat = props.get("LinkLat", "")
|
||||
link_lon = props.get("LinkLon", "")
|
||||
if not link_lat or not link_lon:
|
||||
continue
|
||||
|
||||
key = (link_lat, link_lon)
|
||||
if key in seen:
|
||||
continue
|
||||
seen.add(key)
|
||||
|
||||
# Filter by date — keep if the latest sighting date >= cutoff
|
||||
to_date = props.get("To", "")
|
||||
from_date = props.get("From", "")
|
||||
latest_date = to_date or from_date
|
||||
latest_dt = _parse_nuforc_tile_date(latest_date)
|
||||
if latest_dt is not None and latest_dt < cutoff:
|
||||
continue
|
||||
|
||||
try:
|
||||
lat = float(link_lat)
|
||||
lng = float(link_lon)
|
||||
except (ValueError, TypeError):
|
||||
continue
|
||||
|
||||
count = int(props.get("Count", "1") or "1")
|
||||
state_abbr, country = _reverse_geocode_state(lat, lng)
|
||||
|
||||
# Enrich with HF NUFORC dataset (shape, duration, city, summary)
|
||||
enrichment = enrich_sighting(state_abbr, from_date, to_date)
|
||||
city = enrichment.get("city", "")
|
||||
shape_raw = enrichment.get("shape_raw", "Unknown")
|
||||
shape = _normalize_uap_shape(shape_raw) if shape_raw != "Unknown" else "unknown"
|
||||
duration = enrichment.get("duration", "")
|
||||
summary = enrichment.get("summary", "")
|
||||
if enrichment:
|
||||
enriched_count += 1
|
||||
|
||||
# Build display summary: prefer enriched text, fall back to count-based
|
||||
if not summary:
|
||||
summary = f"{count} sighting(s) reported" if count > 1 else "Sighting reported"
|
||||
|
||||
sightings.append({
|
||||
"id": f"NUFORC-{hash(key) & 0xFFFFFFFF:08x}",
|
||||
"date_time": from_date if from_date == to_date else f"{from_date} to {to_date}",
|
||||
"city": city,
|
||||
"state": state_abbr,
|
||||
"country": country,
|
||||
"shape": shape,
|
||||
"shape_raw": shape_raw,
|
||||
"duration": duration,
|
||||
"summary": summary,
|
||||
"posted": to_date,
|
||||
"lat": lat,
|
||||
"lng": lng,
|
||||
"count": count,
|
||||
"source": "NUFORC",
|
||||
})
|
||||
|
||||
logger.info(
|
||||
f"UAP sightings: {len(sightings)} recent from NUFORC tilequery "
|
||||
f"({len(all_features)} raw, {enriched_count} enriched)"
|
||||
)
|
||||
|
||||
with _data_lock:
|
||||
latest_data["uap_sightings"] = sightings
|
||||
if sightings:
|
||||
_mark_fresh("uap_sightings")
|
||||
|
||||
@@ -1,20 +1,24 @@
|
||||
"""
|
||||
Fuel burn & CO2 emissions estimator for private jets.
|
||||
Fuel burn & CO2 emissions estimator.
|
||||
Based on manufacturer-published cruise fuel burn rates (GPH at long-range cruise).
|
||||
1 US gallon of Jet-A produces ~21.1 lbs (9.57 kg) of CO2.
|
||||
|
||||
Piston entries use 100LL (avgas), which is close enough to Jet-A in CO2 yield
|
||||
(~8.4 kg/gal vs 9.57 kg/gal); we keep one constant to stay simple — the result
|
||||
is a slight over-estimate for piston aircraft, which is preferable to under.
|
||||
"""
|
||||
|
||||
JET_A_CO2_KG_PER_GALLON = 9.57
|
||||
|
||||
# ICAO type code -> gallons per hour at long-range cruise
|
||||
FUEL_BURN_GPH: dict[str, int] = {
|
||||
# Gulfstream
|
||||
# ── Gulfstream ─────────────────────────────────────────────────────
|
||||
"GLF6": 430, # G650/G650ER
|
||||
"G700": 480, # G700
|
||||
"GLF5": 390, # G550
|
||||
"GVSP": 400, # GV-SP
|
||||
"GLF4": 330, # G-IV
|
||||
# Bombardier
|
||||
# ── Bombardier business ────────────────────────────────────────────
|
||||
"GL7T": 490, # Global 7500
|
||||
"GLEX": 430, # Global Express/6000/6500
|
||||
"GL5T": 420, # Global 5000/5500
|
||||
@@ -22,51 +26,208 @@ FUEL_BURN_GPH: dict[str, int] = {
|
||||
"CL60": 310, # Challenger 604/605
|
||||
"CL30": 200, # Challenger 300
|
||||
"CL65": 320, # Challenger 650
|
||||
# Dassault
|
||||
# ── Bombardier regional jets ──────────────────────────────────────
|
||||
"CRJ2": 360, # CRJ-100/200
|
||||
"CRJ7": 380, # CRJ-700
|
||||
"CRJ9": 410, # CRJ-900
|
||||
"CRJX": 440, # CRJ-1000
|
||||
# ── Dassault ───────────────────────────────────────────────────────
|
||||
"F7X": 350, # Falcon 7X
|
||||
"F8X": 370, # Falcon 8X
|
||||
"F900": 285, # Falcon 900/900EX/900LX
|
||||
"F2TH": 230, # Falcon 2000
|
||||
"FA50": 240, # Falcon 50
|
||||
# Cessna
|
||||
# ── Cessna Citation ────────────────────────────────────────────────
|
||||
"CITX": 280, # Citation X
|
||||
"C750": 280, # Citation X (alt code)
|
||||
"C68A": 195, # Citation Latitude
|
||||
"C700": 230, # Citation Longitude
|
||||
"C680": 220, # Citation Sovereign
|
||||
"C560": 190, # Citation Excel/XLS
|
||||
"C56X": 195, # Citation Excel/XLS/XLS+
|
||||
"C560": 190, # Citation Excel/XLS (legacy)
|
||||
"C550": 165, # Citation II/Bravo/V
|
||||
"C525": 80, # Citation CJ1
|
||||
"C25A": 100, # CJ1+ / 525A
|
||||
"C25B": 110, # CJ2+ / 525B
|
||||
"C25C": 130, # CJ4 (some operators)
|
||||
"C510": 75, # Citation Mustang
|
||||
"C650": 240, # Citation III/VI/VII
|
||||
"CJ3": 120, # CJ3
|
||||
"CJ4": 135, # CJ4
|
||||
# Boeing
|
||||
"B737": 850, # BBJ (737)
|
||||
"B738": 920, # BBJ2 (737-800)
|
||||
# ── Cessna piston / turboprop singles & twins ─────────────────────
|
||||
"C172": 9, # Skyhawk
|
||||
"C152": 6,
|
||||
"C150": 6,
|
||||
"C170": 8,
|
||||
"C177": 11,
|
||||
"C180": 12,
|
||||
"C182": 13, # Skylane
|
||||
"C185": 14,
|
||||
"C206": 15,
|
||||
"C208": 50, # Caravan (turboprop)
|
||||
"C210": 18,
|
||||
"C310": 32,
|
||||
"C340": 38,
|
||||
"C414": 36,
|
||||
"C421": 40,
|
||||
# ── Boeing mainline ────────────────────────────────────────────────
|
||||
"B737": 850, # 737-700 / BBJ
|
||||
"B738": 920, # 737-800
|
||||
"B739": 880, # 737-900/900ER
|
||||
"B38M": 700, # 737-8 MAX
|
||||
"B39M": 740, # 737-9 MAX
|
||||
"B752": 1100, # 757-200
|
||||
"B753": 1200, # 757-300
|
||||
"B762": 1400, # 767-200
|
||||
"B763": 1450, # 767-300/300ER
|
||||
"B764": 1500, # 767-400ER
|
||||
"B772": 1850, # 777-200
|
||||
"B77L": 1900, # 777-200LR / 777F
|
||||
"B77W": 2050, # 777-300ER
|
||||
"B788": 1200, # 787-8
|
||||
# Airbus
|
||||
"A318": 780, # ACJ318
|
||||
"A319": 850, # ACJ319
|
||||
"A320": 900, # ACJ320
|
||||
"B789": 1300, # 787-9
|
||||
"B78X": 1350, # 787-10
|
||||
"B744": 3050, # 747-400
|
||||
"B748": 2900, # 747-8
|
||||
# ── Airbus mainline ────────────────────────────────────────────────
|
||||
"A318": 780, # A318
|
||||
"A319": 850, # A319
|
||||
"A320": 900, # A320
|
||||
"A321": 990, # A321
|
||||
"A19N": 580, # A319neo
|
||||
"A20N": 580, # A320neo
|
||||
"A21N": 700, # A321neo
|
||||
"A332": 1500, # A330-200
|
||||
"A333": 1550, # A330-300
|
||||
"A338": 1300, # A330-800neo
|
||||
"A339": 1350, # A330-900neo
|
||||
"A343": 1800, # A340-300
|
||||
"A346": 2100, # A340-600
|
||||
# Pilatus
|
||||
"A359": 1450, # A350-900
|
||||
"A35K": 1600, # A350-1000
|
||||
"A388": 3200, # A380-800
|
||||
# ── Embraer regional / business ───────────────────────────────────
|
||||
"E135": 300, # Legacy 600/650 (regional ERJ-135 base)
|
||||
"E145": 320, # ERJ-145
|
||||
"E170": 460, # E170
|
||||
"E75L": 490, # E175-LR
|
||||
"E75S": 490, # E175 standard
|
||||
"E175": 490, # E175 (some)
|
||||
"E190": 580, # E190
|
||||
"E195": 600, # E195
|
||||
"E290": 510, # E190-E2
|
||||
"E295": 540, # E195-E2
|
||||
"E50P": 135, # Phenom 300 (also Phenom 100 var)
|
||||
"E55P": 185, # Praetor 500 / Legacy 500
|
||||
"E545": 170, # Praetor 500 (alt)
|
||||
"E500": 80, # Phenom 100
|
||||
# ── ATR / Bombardier / Saab turboprops ────────────────────────────
|
||||
"AT43": 230, # ATR 42-300/-320
|
||||
"AT45": 230, # ATR 42-500
|
||||
"AT46": 250, # ATR 42-600
|
||||
"AT72": 300, # ATR 72-200/-210
|
||||
"AT75": 280, # ATR 72-500
|
||||
"AT76": 280, # ATR 72-600
|
||||
"DH8A": 220, # Dash 8 -100
|
||||
"DH8B": 240, # Dash 8 -200
|
||||
"DH8C": 280, # Dash 8 -300
|
||||
"DH8D": 300, # Dash 8 Q400
|
||||
"SF34": 200, # Saab 340
|
||||
"SB20": 220, # Saab 2000
|
||||
# ── Pilatus / Daher single-engine turboprops ──────────────────────
|
||||
"PC24": 115, # PC-24
|
||||
"PC12": 60, # PC-12
|
||||
# Embraer
|
||||
"E55P": 185, # Legacy 500
|
||||
"E135": 300, # Legacy 600/650
|
||||
"E50P": 135, # Phenom 300
|
||||
"E500": 80, # Phenom 100
|
||||
# Learjet
|
||||
"TBM7": 60, # TBM 700/850
|
||||
"TBM8": 65, # TBM 850 alt
|
||||
"TBM9": 70, # TBM 900/930/940/960
|
||||
"M600": 60, # Piper M600
|
||||
"P46T": 22, # PA-46 Meridian (turboprop variant)
|
||||
# ── Learjet ────────────────────────────────────────────────────────
|
||||
"LJ60": 195, # Learjet 60
|
||||
"LJ75": 185, # Learjet 75
|
||||
"LJ45": 175, # Learjet 45
|
||||
# Hawker
|
||||
"LJ31": 165, # Learjet 31
|
||||
"LJ40": 175, # Learjet 40
|
||||
"LJ55": 195, # Learjet 55
|
||||
# ── Hawker / Beechjet ─────────────────────────────────────────────
|
||||
"H25B": 210, # Hawker 800/800XP
|
||||
"H25C": 215, # Hawker 900XP
|
||||
# Beechcraft
|
||||
"BE40": 150, # Beechjet 400 / Hawker 400XP
|
||||
"PRM1": 130, # Premier I
|
||||
# ── Beechcraft King Air ───────────────────────────────────────────
|
||||
"B350": 100, # King Air 350
|
||||
"B200": 80, # King Air 200/250
|
||||
"BE20": 80, # K-Air 200 (alt)
|
||||
"BE9L": 60, # K-Air 90
|
||||
"BE9T": 70, # K-Air F90
|
||||
"BE10": 100, # K-Air 100
|
||||
"BE30": 90, # K-Air 300
|
||||
# ── Beechcraft / Cirrus / Piper / Mooney pistons ──────────────────
|
||||
"BE23": 9, # Sundowner
|
||||
"BE33": 13, # Bonanza 33
|
||||
"BE35": 14, # Bonanza V-tail
|
||||
"BE36": 16, # A36 Bonanza
|
||||
"BE55": 24, # Baron 55
|
||||
"BE58": 28, # Baron 58
|
||||
"BE76": 17, # Duchess
|
||||
"BE95": 20, # Travel Air
|
||||
"P28A": 10, # PA-28 Warrior/Archer
|
||||
"P28B": 11, # PA-28 Cherokee
|
||||
"P28R": 12, # PA-28R Arrow
|
||||
"P32R": 14, # PA-32R Lance/Saratoga
|
||||
"PA11": 5, # Cub Special
|
||||
"PA12": 6, # Super Cruiser
|
||||
"PA18": 6, # Super Cub
|
||||
"PA22": 8, # Tri-Pacer
|
||||
"PA23": 18, # Apache / Aztec
|
||||
"PA24": 12, # Comanche
|
||||
"PA25": 12, # Pawnee
|
||||
"PA28": 10, # PA-28 generic
|
||||
"PA30": 16, # Twin Comanche
|
||||
"PA31": 30, # Navajo
|
||||
"PA32": 14, # Cherokee Six / Saratoga
|
||||
"PA34": 18, # Seneca
|
||||
"PA38": 5, # Tomahawk
|
||||
"PA44": 17, # Seminole
|
||||
"PA46": 18, # Malibu / Mirage / Matrix
|
||||
"M20P": 12, # Mooney M20 (generic)
|
||||
"SR20": 11, # Cirrus SR20
|
||||
"SR22": 16, # Cirrus SR22
|
||||
"S22T": 19, # SR22T (turbo)
|
||||
"DA40": 9, # Diamond DA40
|
||||
"DA42": 14, # Diamond DA42 TwinStar
|
||||
"DA62": 17, # Diamond DA62
|
||||
"DV20": 6, # Diamond Katana
|
||||
# ── Helicopters (civilian) ────────────────────────────────────────
|
||||
"A109": 60, # AW109
|
||||
"A119": 50, # AW119
|
||||
"A139": 130, # AW139
|
||||
"A169": 90, # AW169
|
||||
"A189": 145, # AW189
|
||||
"AS35": 55, # AS350 AStar
|
||||
"AS50": 55, # AStar (alt)
|
||||
"AS65": 110, # Dauphin
|
||||
"B06": 35, # Bell 206 JetRanger
|
||||
"B407": 50, # Bell 407
|
||||
"B412": 145, # Bell 412
|
||||
"B429": 80, # Bell 429
|
||||
"B505": 35, # Bell 505
|
||||
"EC30": 50, # H125 / EC130
|
||||
"EC35": 70, # EC135
|
||||
"EC45": 85, # EC145
|
||||
"EC75": 130, # EC175
|
||||
"H125": 55,
|
||||
"H130": 50,
|
||||
"H135": 70,
|
||||
"H145": 85,
|
||||
"H155": 110,
|
||||
"H160": 95,
|
||||
"H175": 130,
|
||||
"R22": 9, # Robinson R22 (piston)
|
||||
"R44": 16, # Robinson R44 (piston)
|
||||
"R66": 30, # Robinson R66 (turbine)
|
||||
"S76": 140, # Sikorsky S-76
|
||||
"S92": 220, # Sikorsky S-92
|
||||
}
|
||||
|
||||
# Common string names -> ICAO type code
|
||||
@@ -108,13 +269,23 @@ def get_emissions_info(model: str) -> dict | None:
|
||||
if not model:
|
||||
return None
|
||||
model_clean = model.strip()
|
||||
model_upper = model_clean.upper()
|
||||
# Try direct ICAO code match first
|
||||
gph = FUEL_BURN_GPH.get(model_clean.upper())
|
||||
gph = FUEL_BURN_GPH.get(model_upper)
|
||||
if gph is None:
|
||||
# Try alias lookup
|
||||
code = _ALIASES.get(model_clean)
|
||||
if code:
|
||||
gph = FUEL_BURN_GPH.get(code)
|
||||
if gph is None:
|
||||
# Friendly names from the Plane-Alert DB often lead with the ICAO type
|
||||
# code as the first token (e.g. "B200 Super King Air"). Probe each
|
||||
# token against FUEL_BURN_GPH directly.
|
||||
for token in model_upper.replace("-", " ").replace(",", " ").split():
|
||||
candidate = FUEL_BURN_GPH.get(token)
|
||||
if candidate is not None:
|
||||
gph = candidate
|
||||
break
|
||||
if gph is None:
|
||||
# Fuzzy: check if any alias is a substring
|
||||
model_lower = model_clean.lower()
|
||||
|
||||
@@ -13,12 +13,13 @@ import concurrent.futures
|
||||
import random
|
||||
import requests
|
||||
from datetime import datetime
|
||||
from cachetools import TTLCache
|
||||
from services.network_utils import fetch_with_curl
|
||||
from services.fetchers._store import latest_data, _data_lock, _mark_fresh
|
||||
from services.fetchers.plane_alert import enrich_with_plane_alert, enrich_with_tracked_names
|
||||
from services.fetchers.emissions import get_emissions_info
|
||||
from services.fetchers.retry import with_retry
|
||||
from services.fetchers.route_database import lookup_route
|
||||
from services.fetchers.aircraft_database import lookup_aircraft_type
|
||||
from services.constants import GPS_JAMMING_NACP_THRESHOLD, GPS_JAMMING_MIN_RATIO, GPS_JAMMING_MIN_AIRCRAFT
|
||||
|
||||
logger = logging.getLogger("services.data_fetcher")
|
||||
@@ -76,6 +77,7 @@ opensky_client = OpenSkyClient(
|
||||
# Throttling and caching for OpenSky (400 req/day limit)
|
||||
last_opensky_fetch = 0
|
||||
cached_opensky_flights = []
|
||||
_opensky_cache_lock = threading.Lock()
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Supplemental ADS-B sources for blind-spot gap-filling
|
||||
@@ -98,6 +100,7 @@ _AIRPLANES_LIVE_DELAY_SECONDS = 1.2
|
||||
_AIRPLANES_LIVE_DELAY_JITTER_SECONDS = 0.4
|
||||
last_supplemental_fetch = 0
|
||||
cached_supplemental_flights = []
|
||||
_supplemental_cache_lock = threading.Lock()
|
||||
|
||||
# Helicopter type codes (backend classification)
|
||||
_HELI_TYPES_BACKEND = {
|
||||
@@ -255,10 +258,11 @@ flight_trails = {} # {icao_hex: {points: [[lat, lng, alt, ts], ...], last_seen:
|
||||
_trails_lock = threading.Lock()
|
||||
_MAX_TRACKED_TRAILS = 2000
|
||||
|
||||
# Routes cache
|
||||
dynamic_routes_cache = TTLCache(maxsize=5000, ttl=7200)
|
||||
routes_fetch_in_progress = False
|
||||
_routes_lock = threading.Lock()
|
||||
# Route enrichment is now served from services.fetchers.route_database, which
|
||||
# bulk-loads vrs-standing-data.adsb.lol/routes.csv.gz once per day and looks up
|
||||
# callsigns from an in-memory index. Replaces the legacy /api/0/routeset POST,
|
||||
# which was both blocked under the ShadowBroker UA (HTTP 451) and broken
|
||||
# upstream (returning 201 with empty body even for unblocked clients).
|
||||
|
||||
|
||||
def _fetch_supplemental_sources(seen_hex: set) -> list:
|
||||
@@ -266,12 +270,13 @@ def _fetch_supplemental_sources(seen_hex: set) -> list:
|
||||
global last_supplemental_fetch, cached_supplemental_flights
|
||||
|
||||
now = time.time()
|
||||
if now - last_supplemental_fetch < _SUPPLEMENTAL_FETCH_INTERVAL:
|
||||
return [
|
||||
f
|
||||
for f in cached_supplemental_flights
|
||||
if f.get("hex", "").lower().strip() not in seen_hex
|
||||
]
|
||||
with _supplemental_cache_lock:
|
||||
if now - last_supplemental_fetch < _SUPPLEMENTAL_FETCH_INTERVAL:
|
||||
return [
|
||||
f
|
||||
for f in cached_supplemental_flights
|
||||
if f.get("hex", "").lower().strip() not in seen_hex
|
||||
]
|
||||
|
||||
new_supplemental = []
|
||||
supplemental_hex = set()
|
||||
@@ -363,8 +368,9 @@ def _fetch_supplemental_sources(seen_hex: set) -> list:
|
||||
|
||||
fi_count = len(new_supplemental) - ap_count
|
||||
|
||||
cached_supplemental_flights = new_supplemental
|
||||
last_supplemental_fetch = now
|
||||
with _supplemental_cache_lock:
|
||||
cached_supplemental_flights = new_supplemental
|
||||
last_supplemental_fetch = now
|
||||
if new_supplemental:
|
||||
_mark_fresh("supplemental_flights")
|
||||
|
||||
@@ -375,73 +381,6 @@ def _fetch_supplemental_sources(seen_hex: set) -> list:
|
||||
return new_supplemental
|
||||
|
||||
|
||||
def fetch_routes_background(sampled):
|
||||
global routes_fetch_in_progress
|
||||
with _routes_lock:
|
||||
if routes_fetch_in_progress:
|
||||
return
|
||||
routes_fetch_in_progress = True
|
||||
|
||||
try:
|
||||
callsigns_to_query = []
|
||||
for f in sampled:
|
||||
c_sign = str(f.get("flight", "")).strip()
|
||||
if c_sign and c_sign != "UNKNOWN":
|
||||
callsigns_to_query.append(
|
||||
{"callsign": c_sign, "lat": f.get("lat", 0), "lng": f.get("lon", 0)}
|
||||
)
|
||||
|
||||
batch_size = 100
|
||||
batches = [
|
||||
callsigns_to_query[i : i + batch_size]
|
||||
for i in range(0, len(callsigns_to_query), batch_size)
|
||||
]
|
||||
|
||||
for batch in batches:
|
||||
try:
|
||||
r = fetch_with_curl(
|
||||
"https://api.adsb.lol/api/0/routeset",
|
||||
method="POST",
|
||||
json_data={"planes": batch},
|
||||
timeout=15,
|
||||
)
|
||||
if r.status_code == 200:
|
||||
route_data = r.json()
|
||||
route_list = []
|
||||
if isinstance(route_data, dict):
|
||||
route_list = route_data.get("value", [])
|
||||
elif isinstance(route_data, list):
|
||||
route_list = route_data
|
||||
|
||||
for route in route_list:
|
||||
callsign = route.get("callsign", "")
|
||||
airports = route.get("_airports", [])
|
||||
if airports and len(airports) >= 2:
|
||||
orig_apt = airports[0]
|
||||
dest_apt = airports[-1]
|
||||
with _routes_lock:
|
||||
dynamic_routes_cache[callsign] = {
|
||||
"orig_name": f"{orig_apt.get('iata', '')}: {orig_apt.get('name', 'Unknown')}",
|
||||
"dest_name": f"{dest_apt.get('iata', '')}: {dest_apt.get('name', 'Unknown')}",
|
||||
"orig_loc": [orig_apt.get("lon", 0), orig_apt.get("lat", 0)],
|
||||
"dest_loc": [dest_apt.get("lon", 0), dest_apt.get("lat", 0)],
|
||||
}
|
||||
time.sleep(0.25)
|
||||
except (
|
||||
requests.RequestException,
|
||||
ConnectionError,
|
||||
TimeoutError,
|
||||
ValueError,
|
||||
KeyError,
|
||||
json.JSONDecodeError,
|
||||
OSError,
|
||||
) as e:
|
||||
logger.debug(f"Route batch request failed: {e}")
|
||||
finally:
|
||||
with _routes_lock:
|
||||
routes_fetch_in_progress = False
|
||||
|
||||
|
||||
def _classify_and_publish(all_adsb_flights):
|
||||
"""Shared pipeline: normalize raw ADS-B data → classify → merge → publish to latest_data.
|
||||
|
||||
@@ -453,13 +392,6 @@ def _classify_and_publish(all_adsb_flights):
|
||||
if not all_adsb_flights:
|
||||
return
|
||||
|
||||
with _routes_lock:
|
||||
already_running = routes_fetch_in_progress
|
||||
if not already_running:
|
||||
threading.Thread(
|
||||
target=fetch_routes_background, args=(all_adsb_flights,), daemon=True
|
||||
).start()
|
||||
|
||||
for f in all_adsb_flights:
|
||||
try:
|
||||
lat = f.get("lat")
|
||||
@@ -478,8 +410,7 @@ def _classify_and_publish(all_adsb_flights):
|
||||
origin_name = "UNKNOWN"
|
||||
dest_name = "UNKNOWN"
|
||||
|
||||
with _routes_lock:
|
||||
cached_route = dynamic_routes_cache.get(flight_str)
|
||||
cached_route = lookup_route(flight_str)
|
||||
if cached_route:
|
||||
origin_name = cached_route["orig_name"]
|
||||
dest_name = cached_route["dest_name"]
|
||||
@@ -501,7 +432,18 @@ def _classify_and_publish(all_adsb_flights):
|
||||
gs_knots = f.get("gs")
|
||||
speed_knots = round(gs_knots, 1) if isinstance(gs_knots, (int, float)) else None
|
||||
|
||||
model_upper = f.get("t", "").upper()
|
||||
# OpenSky's /states/all doesn't carry the aircraft type, so its
|
||||
# records arrive with t="Unknown". Backfill from the OpenSky
|
||||
# aircraft metadata DB by ICAO24 hex so heli classification and
|
||||
# downstream emissions enrichment both see a real type code.
|
||||
raw_type = str(f.get("t") or "").strip()
|
||||
if not raw_type or raw_type.lower() == "unknown":
|
||||
looked_up_type = lookup_aircraft_type(f.get("hex", ""))
|
||||
if looked_up_type:
|
||||
f["t"] = looked_up_type
|
||||
raw_type = looked_up_type
|
||||
|
||||
model_upper = raw_type.upper()
|
||||
if model_upper == "TWR":
|
||||
continue
|
||||
|
||||
@@ -543,8 +485,14 @@ def _classify_and_publish(all_adsb_flights):
|
||||
for f in flights:
|
||||
enrich_with_plane_alert(f)
|
||||
enrich_with_tracked_names(f)
|
||||
# Attach fuel-burn / CO2 emissions estimate when model is known
|
||||
# Attach fuel-burn / CO2 emissions estimate when model is known.
|
||||
# OpenSky's /states/all doesn't carry aircraft type, so OpenSky-sourced
|
||||
# flights arrive with model="Unknown". For tracked planes, the
|
||||
# Plane-Alert DB has the friendly type name in alert_type, and the
|
||||
# emissions aliases table already maps those names to ICAO codes.
|
||||
model = f.get("model")
|
||||
if not model or model.strip().lower() in {"", "unknown"}:
|
||||
model = f.get("alert_type") or ""
|
||||
if model:
|
||||
emi = get_emissions_info(model)
|
||||
if emi:
|
||||
@@ -618,6 +566,10 @@ def _classify_and_publish(all_adsb_flights):
|
||||
latest_data["flights"] = flights
|
||||
|
||||
# Merge tracked civilian flights with tracked military flights
|
||||
# Stale tracked flights (not seen in any ADS-B source for >5 min) are dropped.
|
||||
_TRACKED_STALE_S = 300 # 5 minutes
|
||||
_merge_ts = time.time()
|
||||
|
||||
with _data_lock:
|
||||
existing_tracked = copy.deepcopy(latest_data.get("tracked_flights", []))
|
||||
|
||||
@@ -625,10 +577,12 @@ def _classify_and_publish(all_adsb_flights):
|
||||
for t in tracked:
|
||||
icao = t.get("icao24", "").upper()
|
||||
if icao:
|
||||
t["_seen_at"] = _merge_ts
|
||||
fresh_tracked_map[icao] = t
|
||||
|
||||
merged_tracked = []
|
||||
seen_icaos = set()
|
||||
stale_dropped = 0
|
||||
for old_t in existing_tracked:
|
||||
icao = old_t.get("icao24", "").upper()
|
||||
if icao in fresh_tracked_map:
|
||||
@@ -639,8 +593,13 @@ def _classify_and_publish(all_adsb_flights):
|
||||
merged_tracked.append(fresh)
|
||||
seen_icaos.add(icao)
|
||||
else:
|
||||
merged_tracked.append(old_t)
|
||||
seen_icaos.add(icao)
|
||||
# Keep stale entry only if it was seen recently
|
||||
age = _merge_ts - old_t.get("_seen_at", 0)
|
||||
if age < _TRACKED_STALE_S:
|
||||
merged_tracked.append(old_t)
|
||||
seen_icaos.add(icao)
|
||||
else:
|
||||
stale_dropped += 1
|
||||
|
||||
for icao, t in fresh_tracked_map.items():
|
||||
if icao not in seen_icaos:
|
||||
@@ -649,10 +608,12 @@ def _classify_and_publish(all_adsb_flights):
|
||||
with _data_lock:
|
||||
latest_data["tracked_flights"] = merged_tracked
|
||||
logger.info(
|
||||
f"Tracked flights: {len(merged_tracked)} total ({len(fresh_tracked_map)} fresh from civilian)"
|
||||
f"Tracked flights: {len(merged_tracked)} total ({len(fresh_tracked_map)} fresh from civilian, {stale_dropped} stale dropped)"
|
||||
)
|
||||
|
||||
# --- Trail Accumulation ---
|
||||
_TRAIL_INTERVAL_S = 600 # only record a new trail point every 10 minutes
|
||||
|
||||
def _accumulate_trail(f, now_ts, check_route=True):
|
||||
hex_id = f.get("icao24", "").lower()
|
||||
if not hex_id:
|
||||
@@ -668,7 +629,11 @@ def _classify_and_publish(all_adsb_flights):
|
||||
if hex_id not in flight_trails:
|
||||
flight_trails[hex_id] = {"points": [], "last_seen": now_ts}
|
||||
trail_data = flight_trails[hex_id]
|
||||
if (
|
||||
# Only append a new point if 10 minutes have passed since the last one
|
||||
last_point_ts = trail_data["points"][-1][3] if trail_data["points"] else 0
|
||||
if now_ts - last_point_ts < _TRAIL_INTERVAL_S:
|
||||
trail_data["last_seen"] = now_ts
|
||||
elif (
|
||||
trail_data["points"]
|
||||
and trail_data["points"][-1][0] == point[0]
|
||||
and trail_data["points"][-1][1] == point[1]
|
||||
@@ -688,22 +653,26 @@ def _classify_and_publish(all_adsb_flights):
|
||||
tracked_snapshot = copy.deepcopy(latest_data.get("tracked_flights", []))
|
||||
raw_flights_snapshot = list(latest_data.get("flights", []))
|
||||
|
||||
all_lists = [commercial, private_jets, private_ga, existing_tracked]
|
||||
# Commercial/private: skip trail if route is known (route line replaces trail)
|
||||
route_check_lists = [commercial, private_jets, private_ga]
|
||||
# Tracked + military: ALWAYS accumulate trails (high-interest flights)
|
||||
always_trail_lists = [existing_tracked, military_snapshot]
|
||||
seen_hexes = set()
|
||||
trail_count = 0
|
||||
with _trails_lock:
|
||||
for flist in all_lists:
|
||||
for flist in route_check_lists:
|
||||
for f in flist:
|
||||
count, hex_id = _accumulate_trail(f, now_ts, check_route=True)
|
||||
trail_count += count
|
||||
if hex_id:
|
||||
seen_hexes.add(hex_id)
|
||||
|
||||
for mf in military_snapshot:
|
||||
count, hex_id = _accumulate_trail(mf, now_ts, check_route=False)
|
||||
trail_count += count
|
||||
if hex_id:
|
||||
seen_hexes.add(hex_id)
|
||||
for flist in always_trail_lists:
|
||||
for f in flist:
|
||||
count, hex_id = _accumulate_trail(f, now_ts, check_route=False)
|
||||
trail_count += count
|
||||
if hex_id:
|
||||
seen_hexes.add(hex_id)
|
||||
|
||||
tracked_hexes = {t.get("icao24", "").lower() for t in tracked_snapshot}
|
||||
stale_keys = []
|
||||
@@ -889,79 +858,100 @@ def _enrich_with_opensky_and_supplemental(adsb_flights):
|
||||
now = time.time()
|
||||
global last_opensky_fetch, cached_opensky_flights
|
||||
|
||||
if now - last_opensky_fetch > 300:
|
||||
with _opensky_cache_lock:
|
||||
_need_opensky = now - last_opensky_fetch > 300
|
||||
if not _need_opensky:
|
||||
opensky_snapshot = list(cached_opensky_flights)
|
||||
|
||||
if _need_opensky:
|
||||
token = opensky_client.get_token()
|
||||
if token:
|
||||
opensky_regions = [
|
||||
{
|
||||
"name": "Africa",
|
||||
"bbox": {"lamin": -35.0, "lomin": -20.0, "lamax": 38.0, "lomax": 55.0},
|
||||
},
|
||||
{
|
||||
"name": "Asia",
|
||||
"bbox": {"lamin": 0.0, "lomin": 30.0, "lamax": 75.0, "lomax": 150.0},
|
||||
},
|
||||
{
|
||||
"name": "South America",
|
||||
"bbox": {"lamin": -60.0, "lomin": -95.0, "lamax": 15.0, "lomax": -30.0},
|
||||
},
|
||||
]
|
||||
|
||||
# One global /states/all query = 4 credits flat per OpenSky
|
||||
# docs (https://openskynetwork.github.io/opensky-api/rest.html).
|
||||
# At the current 5-minute cadence that's 4 × 288 = 1152
|
||||
# credits/day, ~29% of the 4000-credit standard daily quota,
|
||||
# and returns every aircraft worldwide in a single call.
|
||||
# The previous 3-regional-bbox approach cost 12 credits/cycle
|
||||
# AND missed North America, Europe, and Oceania entirely.
|
||||
new_opensky_flights = []
|
||||
for os_reg in opensky_regions:
|
||||
try:
|
||||
bb = os_reg["bbox"]
|
||||
os_url = f"https://opensky-network.org/api/states/all?lamin={bb['lamin']}&lomin={bb['lomin']}&lamax={bb['lamax']}&lomax={bb['lomax']}"
|
||||
headers = {"Authorization": f"Bearer {token}"}
|
||||
os_res = requests.get(os_url, headers=headers, timeout=15)
|
||||
try:
|
||||
os_url = "https://opensky-network.org/api/states/all"
|
||||
headers = {"Authorization": f"Bearer {token}"}
|
||||
os_res = requests.get(os_url, headers=headers, timeout=30)
|
||||
|
||||
if os_res.status_code == 200:
|
||||
os_data = os_res.json()
|
||||
states = os_data.get("states") or []
|
||||
logger.info(
|
||||
f"OpenSky: Fetched {len(states)} states for {os_reg['name']}"
|
||||
if os_res.status_code == 200:
|
||||
os_data = os_res.json()
|
||||
states = os_data.get("states") or []
|
||||
remaining = os_res.headers.get("X-Rate-Limit-Remaining", "?")
|
||||
logger.info(
|
||||
f"OpenSky: fetched {len(states)} global states "
|
||||
f"(credits remaining: {remaining})"
|
||||
)
|
||||
for s in states:
|
||||
if s[5] is None or s[6] is None:
|
||||
continue
|
||||
new_opensky_flights.append(
|
||||
{
|
||||
"hex": s[0],
|
||||
"flight": s[1].strip() if s[1] else "UNKNOWN",
|
||||
"r": s[2],
|
||||
"lon": s[5],
|
||||
"lat": s[6],
|
||||
"alt_baro": (s[7] * 3.28084) if s[7] else 0,
|
||||
"track": s[10] or 0,
|
||||
"gs": (s[9] * 1.94384) if s[9] else 0,
|
||||
"t": "Unknown",
|
||||
"is_opensky": True,
|
||||
}
|
||||
)
|
||||
elif os_res.status_code == 429:
|
||||
retry_after = os_res.headers.get("X-Rate-Limit-Retry-After-Seconds", "?")
|
||||
logger.warning(
|
||||
f"OpenSky daily quota exhausted (4000 credits). "
|
||||
f"Retry after {retry_after}s. Serving stale data until reset."
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
f"OpenSky /states/all failed: HTTP {os_res.status_code}"
|
||||
)
|
||||
except (
|
||||
requests.RequestException,
|
||||
ConnectionError,
|
||||
TimeoutError,
|
||||
ValueError,
|
||||
KeyError,
|
||||
json.JSONDecodeError,
|
||||
OSError,
|
||||
) as ex:
|
||||
logger.error(f"OpenSky global fetch error: {ex}")
|
||||
|
||||
for s in states:
|
||||
new_opensky_flights.append(
|
||||
{
|
||||
"hex": s[0],
|
||||
"flight": s[1].strip() if s[1] else "UNKNOWN",
|
||||
"r": s[2],
|
||||
"lon": s[5],
|
||||
"lat": s[6],
|
||||
"alt_baro": (s[7] * 3.28084) if s[7] else 0,
|
||||
"track": s[10] or 0,
|
||||
"gs": (s[9] * 1.94384) if s[9] else 0,
|
||||
"t": "Unknown",
|
||||
"is_opensky": True,
|
||||
}
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
f"OpenSky API {os_reg['name']} failed: {os_res.status_code}"
|
||||
)
|
||||
except (
|
||||
requests.RequestException,
|
||||
ConnectionError,
|
||||
TimeoutError,
|
||||
ValueError,
|
||||
KeyError,
|
||||
json.JSONDecodeError,
|
||||
OSError,
|
||||
) as ex:
|
||||
logger.error(f"OpenSky fetching error for {os_reg['name']}: {ex}")
|
||||
|
||||
cached_opensky_flights = new_opensky_flights
|
||||
last_opensky_fetch = now
|
||||
with _opensky_cache_lock:
|
||||
if new_opensky_flights:
|
||||
cached_opensky_flights = new_opensky_flights
|
||||
last_opensky_fetch = now
|
||||
opensky_snapshot = new_opensky_flights or list(cached_opensky_flights)
|
||||
else:
|
||||
# Token refresh failed — fall back to existing cached data
|
||||
with _opensky_cache_lock:
|
||||
opensky_snapshot = list(cached_opensky_flights)
|
||||
|
||||
# Merge OpenSky (dedup by hex)
|
||||
for osf in cached_opensky_flights:
|
||||
for osf in opensky_snapshot:
|
||||
h = osf.get("hex")
|
||||
if h and h.lower().strip() not in seen_hex:
|
||||
all_flights.append(osf)
|
||||
seen_hex.add(h.lower().strip())
|
||||
|
||||
# Publish OpenSky-merged data immediately so users see flights even if
|
||||
# supplemental gap-fill is slow or rate-limited (airplanes.live can take
|
||||
# 100+ seconds when its regional endpoints are throttled).
|
||||
if len(all_flights) > len(adsb_flights):
|
||||
logger.info(
|
||||
f"OpenSky merge: {len(all_flights) - len(adsb_flights)} additional aircraft, "
|
||||
"publishing before supplemental gap-fill"
|
||||
)
|
||||
_classify_and_publish(all_flights)
|
||||
|
||||
# Supplemental gap-fill
|
||||
try:
|
||||
gap_fill = _fetch_supplemental_sources(seen_hex)
|
||||
@@ -1008,14 +998,18 @@ def fetch_flights():
|
||||
if adsb_flights:
|
||||
logger.info(f"adsb.lol: {len(adsb_flights)} aircraft — publishing immediately")
|
||||
_classify_and_publish(adsb_flights)
|
||||
|
||||
# Phase 2: kick off slow enrichment in background
|
||||
threading.Thread(
|
||||
target=_enrich_with_opensky_and_supplemental,
|
||||
args=(adsb_flights,),
|
||||
daemon=True,
|
||||
).start()
|
||||
else:
|
||||
logger.warning("adsb.lol returned 0 aircraft")
|
||||
logger.warning(
|
||||
"adsb.lol returned 0 aircraft — relying on OpenSky/supplemental sources"
|
||||
)
|
||||
|
||||
# Phase 2: always run — OpenSky is the fallback when adsb.lol blocks us
|
||||
# (it has been known to 451 the bulk regional endpoint), and supplemental
|
||||
# gap-fill should always run regardless of Phase 1 success.
|
||||
threading.Thread(
|
||||
target=_enrich_with_opensky_and_supplemental,
|
||||
args=(adsb_flights,),
|
||||
daemon=True,
|
||||
).start()
|
||||
except Exception as e:
|
||||
logger.error(f"Error fetching flights: {e}")
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
"""Ship and geopolitics fetchers — AIS vessels, carriers, frontlines, GDELT, LiveUAmap, fishing."""
|
||||
|
||||
import csv
|
||||
import concurrent.futures
|
||||
import io
|
||||
import math
|
||||
import os
|
||||
import logging
|
||||
import time
|
||||
from urllib.parse import urlencode
|
||||
from services.network_utils import fetch_with_curl
|
||||
from services.fetchers._store import latest_data, _data_lock, _mark_fresh
|
||||
from services.fetchers.retry import with_retry
|
||||
@@ -27,20 +30,24 @@ def fetch_ships():
|
||||
from services.ais_stream import get_ais_vessels
|
||||
from services.carrier_tracker import get_carrier_positions
|
||||
|
||||
ships = []
|
||||
try:
|
||||
carriers = get_carrier_positions()
|
||||
ships.extend(carriers)
|
||||
except (ConnectionError, TimeoutError, OSError, ValueError, KeyError, TypeError) as e:
|
||||
logger.error(f"Carrier tracker error (non-fatal): {e}")
|
||||
carriers = []
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=2, thread_name_prefix="ship_fetch") as executor:
|
||||
carrier_future = executor.submit(get_carrier_positions)
|
||||
ais_future = executor.submit(get_ais_vessels)
|
||||
|
||||
try:
|
||||
ais_vessels = get_ais_vessels()
|
||||
ships.extend(ais_vessels)
|
||||
except (ConnectionError, TimeoutError, OSError, ValueError, KeyError, TypeError) as e:
|
||||
logger.error(f"AIS stream error (non-fatal): {e}")
|
||||
ais_vessels = []
|
||||
try:
|
||||
carriers = carrier_future.result()
|
||||
except (ConnectionError, TimeoutError, OSError, ValueError, KeyError, TypeError) as e:
|
||||
logger.error(f"Carrier tracker error (non-fatal): {e}")
|
||||
carriers = []
|
||||
|
||||
try:
|
||||
ais_vessels = ais_future.result()
|
||||
except (ConnectionError, TimeoutError, OSError, ValueError, KeyError, TypeError) as e:
|
||||
logger.error(f"AIS stream error (non-fatal): {e}")
|
||||
ais_vessels = []
|
||||
|
||||
ships = list(carriers or [])
|
||||
ships.extend(ais_vessels or [])
|
||||
|
||||
# Enrich ships with yacht alert data (tracked superyachts)
|
||||
from services.fetchers.yacht_alert import enrich_with_yacht_alert
|
||||
@@ -200,52 +207,177 @@ def update_liveuamap():
|
||||
# ---------------------------------------------------------------------------
|
||||
# Fishing Activity (Global Fishing Watch)
|
||||
# ---------------------------------------------------------------------------
|
||||
def _fishing_vessel_key(event: dict) -> str:
|
||||
vessel_ssvid = str(event.get("vessel_ssvid", "") or "").strip()
|
||||
if vessel_ssvid:
|
||||
return f"ssvid:{vessel_ssvid}"
|
||||
vessel_id = str(event.get("vessel_id", "") or "").strip()
|
||||
if vessel_id:
|
||||
return f"vid:{vessel_id}"
|
||||
vessel_name = str(event.get("vessel_name", "") or "").strip().upper()
|
||||
vessel_flag = str(event.get("vessel_flag", "") or "").strip().upper()
|
||||
if vessel_name:
|
||||
return f"name:{vessel_name}|flag:{vessel_flag}"
|
||||
return f"event:{event.get('id', '')}"
|
||||
|
||||
|
||||
def _fishing_event_rank(event: dict) -> tuple[str, str, float, str]:
|
||||
return (
|
||||
str(event.get("end", "") or ""),
|
||||
str(event.get("start", "") or ""),
|
||||
float(event.get("duration_hrs", 0) or 0),
|
||||
str(event.get("id", "") or ""),
|
||||
)
|
||||
|
||||
|
||||
def _dedupe_fishing_events(events: list[dict]) -> list[dict]:
|
||||
latest_by_vessel: dict[str, dict] = {}
|
||||
counts_by_vessel: dict[str, int] = {}
|
||||
|
||||
for event in events:
|
||||
vessel_key = _fishing_vessel_key(event)
|
||||
counts_by_vessel[vessel_key] = counts_by_vessel.get(vessel_key, 0) + 1
|
||||
current = latest_by_vessel.get(vessel_key)
|
||||
if current is None or _fishing_event_rank(event) > _fishing_event_rank(current):
|
||||
latest_by_vessel[vessel_key] = event
|
||||
|
||||
deduped: list[dict] = []
|
||||
for vessel_key, event in latest_by_vessel.items():
|
||||
event_copy = dict(event)
|
||||
event_copy["event_count"] = counts_by_vessel.get(vessel_key, 1)
|
||||
deduped.append(event_copy)
|
||||
|
||||
deduped.sort(key=_fishing_event_rank, reverse=True)
|
||||
return deduped
|
||||
|
||||
|
||||
_FISHING_FETCH_INTERVAL_S = 3600 # once per hour — GFW data has ~5 day lag
|
||||
_last_fishing_fetch_ts: float = 0.0
|
||||
|
||||
|
||||
@with_retry(max_retries=1, base_delay=5)
|
||||
def fetch_fishing_activity():
|
||||
"""Fetch recent fishing events from Global Fishing Watch (~5 day lag)."""
|
||||
from services.fetchers._store import is_any_active
|
||||
global _last_fishing_fetch_ts
|
||||
from services.fetchers._store import is_any_active, latest_data
|
||||
|
||||
if not is_any_active("fishing_activity"):
|
||||
return
|
||||
|
||||
# Skip if we already have data and fetched less than an hour ago
|
||||
now = time.time()
|
||||
if latest_data.get("fishing_activity") and (now - _last_fishing_fetch_ts) < _FISHING_FETCH_INTERVAL_S:
|
||||
return
|
||||
|
||||
token = os.environ.get("GFW_API_TOKEN", "")
|
||||
if not token:
|
||||
logger.debug("GFW_API_TOKEN not set, skipping fishing activity fetch")
|
||||
return
|
||||
events = []
|
||||
try:
|
||||
url = (
|
||||
"https://gateway.api.globalfishingwatch.org/v3/events"
|
||||
"?datasets[0]=public-global-fishing-events:latest"
|
||||
"&limit=500&sort=start&sort-direction=DESC"
|
||||
)
|
||||
import datetime as _dt
|
||||
|
||||
_end = _dt.date.today().isoformat()
|
||||
_start = (_dt.date.today() - _dt.timedelta(days=7)).isoformat()
|
||||
page_size = max(1, int(os.environ.get("GFW_EVENTS_PAGE_SIZE", "500") or "500"))
|
||||
offset = 0
|
||||
seen_offsets: set[int] = set()
|
||||
seen_ids: set[str] = set()
|
||||
headers = {"Authorization": f"Bearer {token}"}
|
||||
response = fetch_with_curl(url, timeout=30, headers=headers)
|
||||
if response.status_code == 200:
|
||||
entries = response.json().get("entries", [])
|
||||
|
||||
while True:
|
||||
if offset in seen_offsets:
|
||||
logger.warning("Fishing activity pagination repeated offset=%s; stopping fetch", offset)
|
||||
break
|
||||
seen_offsets.add(offset)
|
||||
|
||||
query = urlencode(
|
||||
{
|
||||
"datasets[0]": "public-global-fishing-events:latest",
|
||||
"start-date": _start,
|
||||
"end-date": _end,
|
||||
"limit": page_size,
|
||||
"offset": offset,
|
||||
}
|
||||
)
|
||||
url = f"https://gateway.api.globalfishingwatch.org/v3/events?{query}"
|
||||
response = fetch_with_curl(url, timeout=30, headers=headers)
|
||||
if response.status_code != 200:
|
||||
logger.warning(
|
||||
"Fishing activity fetch failed at offset=%s: HTTP %s",
|
||||
offset,
|
||||
response.status_code,
|
||||
)
|
||||
break
|
||||
|
||||
payload = response.json() or {}
|
||||
entries = payload.get("entries", [])
|
||||
if not entries:
|
||||
break
|
||||
|
||||
added_this_page = 0
|
||||
for e in entries:
|
||||
pos = e.get("position", {})
|
||||
vessel = e.get("vessel") or {}
|
||||
lat = pos.get("lat")
|
||||
lng = pos.get("lon")
|
||||
if lat is None or lng is None:
|
||||
continue
|
||||
event_id = str(e.get("id", "") or "")
|
||||
if event_id and event_id in seen_ids:
|
||||
continue
|
||||
if event_id:
|
||||
seen_ids.add(event_id)
|
||||
dur = e.get("event", {}).get("duration", 0) or 0
|
||||
events.append(
|
||||
{
|
||||
"id": e.get("id", ""),
|
||||
"id": event_id,
|
||||
"type": e.get("type", "fishing"),
|
||||
"lat": lat,
|
||||
"lng": lng,
|
||||
"start": e.get("start", ""),
|
||||
"end": e.get("end", ""),
|
||||
"vessel_name": (e.get("vessel") or {}).get("name", "Unknown"),
|
||||
"vessel_flag": (e.get("vessel") or {}).get("flag", ""),
|
||||
"vessel_id": str(vessel.get("id", "") or ""),
|
||||
"vessel_ssvid": str(vessel.get("ssvid", "") or ""),
|
||||
"vessel_name": vessel.get("name", "Unknown"),
|
||||
"vessel_flag": vessel.get("flag", ""),
|
||||
"duration_hrs": round(dur / 3600, 1),
|
||||
}
|
||||
)
|
||||
logger.info(f"Fishing activity: {len(events)} events")
|
||||
added_this_page += 1
|
||||
|
||||
if len(entries) < page_size:
|
||||
break
|
||||
|
||||
next_offset = payload.get("nextOffset")
|
||||
if next_offset is None:
|
||||
next_offset = (payload.get("pagination") or {}).get("nextOffset")
|
||||
if next_offset is None:
|
||||
next_offset = offset + page_size
|
||||
try:
|
||||
next_offset = int(next_offset)
|
||||
except (TypeError, ValueError):
|
||||
next_offset = offset + page_size
|
||||
if next_offset <= offset:
|
||||
logger.warning(
|
||||
"Fishing activity pagination produced non-increasing next offset=%s; stopping fetch",
|
||||
next_offset,
|
||||
)
|
||||
break
|
||||
if added_this_page == 0:
|
||||
logger.warning(
|
||||
"Fishing activity page at offset=%s added no new events; stopping fetch",
|
||||
offset,
|
||||
)
|
||||
break
|
||||
offset = next_offset
|
||||
raw_event_count = len(events)
|
||||
events = _dedupe_fishing_events(events)
|
||||
logger.info("Fishing activity: %s raw events -> %s deduped vessels", raw_event_count, len(events))
|
||||
except (ConnectionError, TimeoutError, OSError, ValueError, KeyError, TypeError) as e:
|
||||
logger.error(f"Error fetching fishing activity: {e}")
|
||||
with _data_lock:
|
||||
latest_data["fishing_activity"] = events
|
||||
if events:
|
||||
_mark_fresh("fishing_activity")
|
||||
_last_fishing_fetch_ts = time.time()
|
||||
|
||||
@@ -25,7 +25,10 @@ logger = logging.getLogger("services.data_fetcher")
|
||||
_API_URL = "https://meshtastic.liamcottle.net/api/v1/nodes"
|
||||
_CACHE_FILE = Path(__file__).resolve().parent.parent.parent / "data" / "meshtastic_nodes_cache.json"
|
||||
_FETCH_TIMEOUT = 90 # seconds — response is ~37MB, needs time on slow connections
|
||||
_MAX_AGE_HOURS = 4 # discard nodes not seen within this window (matches refresh interval)
|
||||
_MAX_AGE_HOURS = 24 # discard nodes not seen within this window
|
||||
# Skip network fetch if cached data is fresher than this — the API is a
|
||||
# one-person hobby service, so we prefer stale data over hammering it.
|
||||
_CACHE_TRUST_HOURS = 20
|
||||
|
||||
# Track when we last fetched so the frontend can show staleness
|
||||
_last_fetch_ts: float = 0.0
|
||||
@@ -141,13 +144,54 @@ def fetch_meshtastic_nodes():
|
||||
return
|
||||
global _last_fetch_ts
|
||||
|
||||
# Trust a recent cache on disk — avoids hammering the upstream HTTP API
|
||||
# when every install polls on roughly the same cadence.
|
||||
try:
|
||||
if _CACHE_FILE.exists():
|
||||
mtime = _CACHE_FILE.stat().st_mtime
|
||||
if time.time() - mtime < _CACHE_TRUST_HOURS * 3600:
|
||||
# If memory is empty (cold start), hydrate from cache and skip fetch.
|
||||
with _data_lock:
|
||||
has_memory = bool(latest_data.get("meshtastic_map_nodes"))
|
||||
if not has_memory:
|
||||
cached = _load_cache()
|
||||
if cached:
|
||||
with _data_lock:
|
||||
latest_data["meshtastic_map_nodes"] = cached
|
||||
latest_data["meshtastic_map_fetched_at"] = mtime
|
||||
_mark_fresh("meshtastic_map")
|
||||
logger.info(
|
||||
"Meshtastic map: cache fresh (<%.0fh), skipping network fetch",
|
||||
_CACHE_TRUST_HOURS,
|
||||
)
|
||||
return
|
||||
else:
|
||||
logger.info(
|
||||
"Meshtastic map: cache fresh (<%.0fh), skipping network fetch",
|
||||
_CACHE_TRUST_HOURS,
|
||||
)
|
||||
return
|
||||
except Exception as e:
|
||||
logger.debug(f"Meshtastic cache freshness check failed: {e}")
|
||||
|
||||
# Build a polite User-Agent. Include the operator callsign when set so
|
||||
# the upstream service can correlate per-install traffic if needed.
|
||||
try:
|
||||
from services.config import get_settings
|
||||
|
||||
callsign = str(getattr(get_settings(), "MESHTASTIC_OPERATOR_CALLSIGN", "") or "").strip()
|
||||
except Exception:
|
||||
callsign = ""
|
||||
ua_base = "ShadowBroker-OSINT/0.9.7 (+https://github.com/BigBodyCobain/Shadowbroker; contact: bigbodycobain@gmail.com; 24h polling)"
|
||||
user_agent = f"{ua_base}; node={callsign}" if callsign else ua_base
|
||||
|
||||
try:
|
||||
logger.info("Fetching Meshtastic map nodes from API...")
|
||||
resp = requests.get(
|
||||
_API_URL,
|
||||
timeout=_FETCH_TIMEOUT,
|
||||
headers={
|
||||
"User-Agent": "ShadowBroker/1.0 (OSINT dashboard, 4h polling)",
|
||||
"User-Agent": user_agent,
|
||||
"Accept": "application/json",
|
||||
},
|
||||
)
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
import requests
|
||||
from services.network_utils import fetch_with_curl
|
||||
from services.fetchers._store import latest_data, _data_lock, _mark_fresh
|
||||
@@ -296,17 +297,23 @@ def fetch_military_flights():
|
||||
with _data_lock:
|
||||
latest_data["military_flights"] = remaining_mil
|
||||
|
||||
# Store tracked military flights — update positions for existing entries
|
||||
# Store tracked military flights — update positions for existing entries.
|
||||
# Drop stale entries not refreshed by ANY source (civilian or military) within 5 min.
|
||||
_TRACKED_STALE_S = 300 # 5 minutes
|
||||
_merge_ts = time.time()
|
||||
|
||||
with _data_lock:
|
||||
existing_tracked = list(latest_data.get("tracked_flights", []))
|
||||
fresh_mil_map = {}
|
||||
for t in tracked_mil:
|
||||
icao = t.get("icao24", "").upper()
|
||||
if icao:
|
||||
t["_seen_at"] = _merge_ts
|
||||
fresh_mil_map[icao] = t
|
||||
|
||||
updated_tracked = []
|
||||
seen_icaos = set()
|
||||
stale_dropped = 0
|
||||
for old_t in existing_tracked:
|
||||
icao = old_t.get("icao24", "").upper()
|
||||
if icao in fresh_mil_map:
|
||||
@@ -317,11 +324,16 @@ def fetch_military_flights():
|
||||
updated_tracked.append(fresh)
|
||||
seen_icaos.add(icao)
|
||||
else:
|
||||
updated_tracked.append(old_t)
|
||||
seen_icaos.add(icao)
|
||||
# Keep stale entry only if it was seen recently
|
||||
age = _merge_ts - old_t.get("_seen_at", 0)
|
||||
if age < _TRACKED_STALE_S:
|
||||
updated_tracked.append(old_t)
|
||||
seen_icaos.add(icao)
|
||||
else:
|
||||
stale_dropped += 1
|
||||
for icao, t in fresh_mil_map.items():
|
||||
if icao not in seen_icaos:
|
||||
updated_tracked.append(t)
|
||||
with _data_lock:
|
||||
latest_data["tracked_flights"] = updated_tracked
|
||||
logger.info(f"Tracked flights: {len(updated_tracked)} total ({len(tracked_mil)} from military)")
|
||||
logger.info(f"Tracked flights: {len(updated_tracked)} total ({len(tracked_mil)} from military, {stale_dropped} stale dropped)")
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
"""News fetching, geocoding, clustering, and risk assessment."""
|
||||
import re
|
||||
import time
|
||||
import logging
|
||||
import calendar
|
||||
import concurrent.futures
|
||||
import requests
|
||||
import feedparser
|
||||
@@ -11,6 +13,10 @@ from services.oracle_service import enrich_news_items, compute_global_threat_lev
|
||||
|
||||
logger = logging.getLogger("services.data_fetcher")
|
||||
|
||||
# Maximum article age in seconds. Anything older than this is dropped
|
||||
# during each fetch cycle so the threat feed stays current.
|
||||
_MAX_ARTICLE_AGE_SECS = 48 * 3600 # 48 hours
|
||||
|
||||
|
||||
# Keyword -> coordinate mapping for geocoding news articles
|
||||
_KEYWORD_COORDS = {
|
||||
@@ -178,6 +184,17 @@ def fetch_news():
|
||||
if not feed:
|
||||
continue
|
||||
for entry in feed.entries[:5]:
|
||||
# Drop articles older than the max-age threshold so the
|
||||
# threat feed doesn't show stale stories across cycles.
|
||||
pp = entry.get("published_parsed")
|
||||
if pp:
|
||||
try:
|
||||
entry_epoch = calendar.timegm(pp)
|
||||
if time.time() - entry_epoch > _MAX_ARTICLE_AGE_SECS:
|
||||
continue
|
||||
except (TypeError, ValueError, OverflowError):
|
||||
pass # unparseable date — keep the article
|
||||
|
||||
title = entry.get('title', '')
|
||||
summary = entry.get('summary', '')
|
||||
|
||||
|
||||
@@ -0,0 +1,360 @@
|
||||
"""NUFORC Enrichment — downloads the Hugging Face NUFORC dataset and builds
|
||||
a compact spatial+temporal index for enriching tilequery hits with shape,
|
||||
duration, city, and summary text.
|
||||
|
||||
The full CSV (~170 MB) is streamed once and processed into a lightweight JSON
|
||||
cache (~1-3 MB) stored at ``backend/data/nuforc_enrichment.json``. Subsequent
|
||||
startups load from cache until it expires (30 days).
|
||||
|
||||
Index structure::
|
||||
|
||||
{
|
||||
"built": "2026-04-08T12:00:00",
|
||||
"count": 12345,
|
||||
"by_state": {
|
||||
"AZ": [
|
||||
{"d": "2024-01-15", "city": "Tucson", "shape": "triangle",
|
||||
"dur": "5 minutes", "summary": "Bright triangular object..."},
|
||||
...
|
||||
],
|
||||
...
|
||||
}
|
||||
}
|
||||
|
||||
Entries within each state are sorted by date descending (newest first).
|
||||
"""
|
||||
|
||||
import csv
|
||||
import gzip
|
||||
import io
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import threading
|
||||
import time
|
||||
from datetime import datetime, timedelta
|
||||
from pathlib import Path
|
||||
|
||||
from services.network_utils import fetch_with_curl
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_DATA_DIR = Path(__file__).resolve().parent.parent.parent / "data"
|
||||
_CACHE_FILE = _DATA_DIR / "nuforc_enrichment.json"
|
||||
_CACHE_TTL_DAYS = 1 # Rebuild daily — fresh data each cycle
|
||||
|
||||
# HuggingFace dataset — use the structured string export, not the old flat blob.
|
||||
_HF_CSV_URL = (
|
||||
"https://huggingface.co/datasets/kcimc/NUFORC/resolve/main/nuforc_str.csv"
|
||||
)
|
||||
|
||||
# Only keep sightings from the last N years for the enrichment index
|
||||
_KEEP_YEARS = 5
|
||||
|
||||
# ── In-memory index ────────────────────────────────────────────────────────
|
||||
_index: dict | None = None
|
||||
_index_lock = threading.Lock()
|
||||
_building = False
|
||||
|
||||
# US state abbreviations for parsing "City, ST" locations
|
||||
_US_STATES = {
|
||||
"AL", "AK", "AZ", "AR", "CA", "CO", "CT", "DE", "FL", "GA",
|
||||
"HI", "ID", "IL", "IN", "IA", "KS", "KY", "LA", "ME", "MD",
|
||||
"MA", "MI", "MN", "MS", "MO", "MT", "NE", "NV", "NH", "NJ",
|
||||
"NM", "NY", "NC", "ND", "OH", "OK", "OR", "PA", "RI", "SC",
|
||||
"SD", "TN", "TX", "UT", "VT", "VA", "WA", "WV", "WI", "WY",
|
||||
"DC",
|
||||
}
|
||||
|
||||
|
||||
def _parse_location(loc: str) -> tuple[str, str]:
|
||||
"""Parse 'City, ST' or 'City, ST (explanation)' → (city, state_abbr).
|
||||
|
||||
Returns ('', '') if unparseable.
|
||||
"""
|
||||
if not loc:
|
||||
return "", ""
|
||||
loc = re.sub(r"\s*\(.*\)\s*$", "", loc).strip()
|
||||
parts = [p.strip() for p in loc.split(",") if p.strip()]
|
||||
if len(parts) < 2:
|
||||
return "", ""
|
||||
for idx in range(len(parts) - 1):
|
||||
candidate = parts[idx + 1].upper().strip()
|
||||
if candidate in _US_STATES:
|
||||
city = ", ".join(parts[: idx + 1]).strip()
|
||||
return city, candidate
|
||||
candidate = parts[-1].upper().strip()
|
||||
if candidate in _US_STATES:
|
||||
return ", ".join(parts[:-1]).strip(), candidate
|
||||
return parts[0], ""
|
||||
|
||||
|
||||
def _parse_date(date_str: str) -> str:
|
||||
"""Best-effort parse NUFORC date strings → 'YYYY-MM-DD'.
|
||||
|
||||
Returns '' on failure.
|
||||
"""
|
||||
if not date_str:
|
||||
return ""
|
||||
cleaned = str(date_str).strip()
|
||||
cleaned = re.sub(r"\s+local$", "", cleaned, flags=re.IGNORECASE)
|
||||
cleaned = re.sub(r"\s+utc$", "", cleaned, flags=re.IGNORECASE)
|
||||
cleaned = cleaned.replace("T", " ")
|
||||
for fmt in (
|
||||
"%m/%d/%Y %H:%M",
|
||||
"%m/%d/%Y %I:%M:%S %p",
|
||||
"%m/%d/%Y",
|
||||
"%Y-%m-%d %H:%M:%S",
|
||||
"%Y-%m-%d %H:%M",
|
||||
"%Y-%m-%d",
|
||||
):
|
||||
try:
|
||||
return datetime.strptime(cleaned, fmt).strftime("%Y-%m-%d")
|
||||
except (ValueError, TypeError):
|
||||
continue
|
||||
match = re.match(r"^(\d{4}-\d{2}-\d{2})", cleaned)
|
||||
if match:
|
||||
return match.group(1)
|
||||
return ""
|
||||
|
||||
|
||||
def _load_cache() -> dict | None:
|
||||
"""Load the on-disk cache if it exists and is fresh enough."""
|
||||
if not _CACHE_FILE.exists():
|
||||
return None
|
||||
try:
|
||||
raw = _CACHE_FILE.read_text(encoding="utf-8")
|
||||
data = json.loads(raw)
|
||||
built = data.get("built", "")
|
||||
if built:
|
||||
built_dt = datetime.fromisoformat(built)
|
||||
if datetime.utcnow() - built_dt < timedelta(days=_CACHE_TTL_DAYS):
|
||||
if int(data.get("count", 0) or 0) <= 0:
|
||||
logger.info("NUFORC enrichment: cache is fresh but empty; rebuilding")
|
||||
return None
|
||||
logger.info(
|
||||
"NUFORC enrichment: loaded cache (%d entries, built %s)",
|
||||
data.get("count", 0), built,
|
||||
)
|
||||
return data
|
||||
else:
|
||||
logger.info("NUFORC enrichment: cache expired (built %s)", built)
|
||||
except Exception as e:
|
||||
logger.warning("NUFORC enrichment: cache load error: %s", e)
|
||||
return None
|
||||
|
||||
|
||||
def _save_cache(data: dict):
|
||||
"""Persist the enrichment index to disk."""
|
||||
try:
|
||||
_DATA_DIR.mkdir(parents=True, exist_ok=True)
|
||||
_CACHE_FILE.write_text(json.dumps(data, separators=(",", ":")), encoding="utf-8")
|
||||
logger.info("NUFORC enrichment: saved cache (%d entries)", data.get("count", 0))
|
||||
except Exception as e:
|
||||
logger.warning("NUFORC enrichment: cache save error: %s", e)
|
||||
|
||||
|
||||
def _download_and_build() -> dict | None:
|
||||
"""Stream-download the HF CSV and build the enrichment index.
|
||||
|
||||
Returns the index dict or None on failure.
|
||||
"""
|
||||
cutoff = datetime.utcnow() - timedelta(days=_KEEP_YEARS * 365)
|
||||
cutoff_str = cutoff.strftime("%Y-%m-%d")
|
||||
|
||||
logger.info("NUFORC enrichment: downloading HF dataset (this may take a minute)...")
|
||||
try:
|
||||
resp = fetch_with_curl(_HF_CSV_URL, timeout=180, follow_redirects=True)
|
||||
if not resp or resp.status_code != 200:
|
||||
logger.warning(
|
||||
"NUFORC enrichment: download failed HTTP %s",
|
||||
getattr(resp, "status_code", "None"),
|
||||
)
|
||||
return None
|
||||
except Exception as e:
|
||||
logger.error("NUFORC enrichment: download error: %s", e)
|
||||
return None
|
||||
|
||||
# Parse CSV from response text
|
||||
by_state: dict[str, list[dict]] = {}
|
||||
total = 0
|
||||
kept = 0
|
||||
|
||||
try:
|
||||
reader = csv.DictReader(io.StringIO(resp.text))
|
||||
for row in reader:
|
||||
total += 1
|
||||
occurred = _parse_date(
|
||||
row.get("Occurred", "")
|
||||
or row.get("Date / Time", "")
|
||||
or row.get("Date", "")
|
||||
)
|
||||
if not occurred or occurred < cutoff_str:
|
||||
continue
|
||||
|
||||
city, state = _parse_location(
|
||||
row.get("Location", "")
|
||||
or row.get("City", "")
|
||||
or row.get("location", "")
|
||||
)
|
||||
if not state:
|
||||
continue # can't index without state
|
||||
|
||||
shape = (row.get("Shape", "") or row.get("shape", "") or "").strip()
|
||||
duration = (row.get("Duration", "") or row.get("duration", "") or "").strip()
|
||||
summary = (
|
||||
row.get("Summary", "")
|
||||
or row.get("summary", "")
|
||||
or row.get("Text", "")
|
||||
or row.get("text", "")
|
||||
or ""
|
||||
).strip()
|
||||
if summary and len(summary) > 200:
|
||||
summary = summary[:197] + "..."
|
||||
|
||||
entry = {"d": occurred, "city": city, "shape": shape}
|
||||
if duration:
|
||||
entry["dur"] = duration
|
||||
if summary:
|
||||
entry["sum"] = summary
|
||||
|
||||
by_state.setdefault(state, []).append(entry)
|
||||
kept += 1
|
||||
except Exception as e:
|
||||
logger.error("NUFORC enrichment: CSV parse error: %s", e)
|
||||
return None
|
||||
|
||||
# Sort each state's entries by date descending (newest first)
|
||||
for st in by_state:
|
||||
by_state[st].sort(key=lambda e: e["d"], reverse=True)
|
||||
|
||||
data = {
|
||||
"built": datetime.utcnow().isoformat(),
|
||||
"count": kept,
|
||||
"by_state": by_state,
|
||||
}
|
||||
logger.info(
|
||||
"NUFORC enrichment: built index — %d entries from %d total rows (%d states)",
|
||||
kept, total, len(by_state),
|
||||
)
|
||||
return data
|
||||
|
||||
|
||||
def _ensure_index():
|
||||
"""Load or build the enrichment index (thread-safe, non-blocking)."""
|
||||
global _index, _building
|
||||
|
||||
with _index_lock:
|
||||
if _index is not None:
|
||||
return
|
||||
if _building:
|
||||
return # another thread is already building
|
||||
_building = True
|
||||
|
||||
# Try loading from disk first
|
||||
cached = _load_cache()
|
||||
if cached:
|
||||
with _index_lock:
|
||||
_index = cached
|
||||
_building = False
|
||||
return
|
||||
|
||||
# Download and build in background so we don't block startup
|
||||
def _build():
|
||||
global _index, _building
|
||||
try:
|
||||
result = _download_and_build()
|
||||
if result:
|
||||
_save_cache(result)
|
||||
with _index_lock:
|
||||
_index = result
|
||||
else:
|
||||
logger.warning("NUFORC enrichment: build failed, enrichment unavailable")
|
||||
finally:
|
||||
with _index_lock:
|
||||
_building = False
|
||||
|
||||
thread = threading.Thread(target=_build, name="nuforc-enrichment", daemon=True)
|
||||
thread.start()
|
||||
|
||||
|
||||
def refresh_enrichment_index():
|
||||
"""Force-rebuild the enrichment index. Called by the daily cron job.
|
||||
|
||||
Downloads the latest HF CSV, rebuilds the in-memory + disk cache.
|
||||
Runs synchronously (meant to be called from a background thread).
|
||||
"""
|
||||
global _index
|
||||
logger.info("NUFORC enrichment: daily refresh starting...")
|
||||
result = _download_and_build()
|
||||
if result:
|
||||
_save_cache(result)
|
||||
with _index_lock:
|
||||
_index = result
|
||||
logger.info("NUFORC enrichment: daily refresh complete (%d entries)", result.get("count", 0))
|
||||
else:
|
||||
logger.warning("NUFORC enrichment: daily refresh failed, keeping stale index")
|
||||
|
||||
|
||||
def enrich_sighting(state: str, from_date: str, to_date: str) -> dict:
|
||||
"""Look up enrichment data for a tilequery hit.
|
||||
|
||||
Args:
|
||||
state: 2-letter US state code (from reverse geocode)
|
||||
from_date: earliest sighting date (YYYY-MM-DD)
|
||||
to_date: latest sighting date (YYYY-MM-DD)
|
||||
|
||||
Returns:
|
||||
Dict with optional keys: city, shape, duration, summary.
|
||||
Empty dict if no match found.
|
||||
"""
|
||||
_ensure_index()
|
||||
|
||||
with _index_lock:
|
||||
idx = _index
|
||||
|
||||
if not idx or not state:
|
||||
return {}
|
||||
|
||||
entries = idx.get("by_state", {}).get(state, [])
|
||||
if not entries:
|
||||
return {}
|
||||
|
||||
# Find the best match by date proximity
|
||||
target = to_date or from_date
|
||||
if not target:
|
||||
# No date filter — just return the most recent entry for this state
|
||||
e = entries[0]
|
||||
else:
|
||||
best = None
|
||||
best_dist = 999999
|
||||
for e in entries:
|
||||
# Simple string distance on dates (YYYY-MM-DD sorts lexicographically)
|
||||
try:
|
||||
t = datetime.strptime(target, "%Y-%m-%d")
|
||||
d = datetime.strptime(e["d"], "%Y-%m-%d")
|
||||
dist = abs((t - d).days)
|
||||
except (ValueError, TypeError):
|
||||
continue
|
||||
if dist < best_dist:
|
||||
best_dist = dist
|
||||
best = e
|
||||
if dist == 0:
|
||||
break # exact date match
|
||||
|
||||
if best is None or best_dist > 90:
|
||||
return {} # no match within 3 months
|
||||
e = best
|
||||
|
||||
result = {}
|
||||
if e.get("city"):
|
||||
result["city"] = e["city"]
|
||||
if e.get("shape"):
|
||||
result["shape"] = e["shape"]
|
||||
result["shape_raw"] = e["shape"]
|
||||
if e.get("dur"):
|
||||
result["duration"] = e["dur"]
|
||||
if e.get("sum"):
|
||||
result["summary"] = e["sum"]
|
||||
return result
|
||||
@@ -8,14 +8,33 @@ full metadata (volume, end dates, descriptions, source badges).
|
||||
import json
|
||||
import logging
|
||||
import math
|
||||
import os
|
||||
import threading
|
||||
import time
|
||||
from urllib.parse import urlencode
|
||||
from cachetools import TTLCache, cached
|
||||
|
||||
logger = logging.getLogger("services.data_fetcher")
|
||||
|
||||
_market_cache = TTLCache(maxsize=1, ttl=60) # 60-second TTL — markets change fast
|
||||
|
||||
# Delta tracking: {market_title: previous_consensus_pct}
|
||||
_prev_probabilities: dict[str, float] = {}
|
||||
_market_cache = TTLCache(maxsize=1, ttl=300)
|
||||
_POLYMARKET_PAGE_DELAY_S = float(os.environ.get("MESH_POLYMARKET_PAGE_DELAY_S", "0.02"))
|
||||
_KALSHI_PAGE_DELAY_S = float(os.environ.get("MESH_KALSHI_PAGE_DELAY_S", "0.08"))
|
||||
_provider_pace_lock = threading.Lock()
|
||||
_provider_last_request_at: dict[str, float] = {}
|
||||
|
||||
|
||||
def _pace_provider(provider: str, min_interval_s: float) -> None:
|
||||
if min_interval_s <= 0:
|
||||
return
|
||||
with _provider_pace_lock:
|
||||
now = time.monotonic()
|
||||
wait_s = min_interval_s - (now - _provider_last_request_at.get(provider, 0.0))
|
||||
if wait_s > 0:
|
||||
time.sleep(wait_s)
|
||||
now = time.monotonic()
|
||||
_provider_last_request_at[provider] = now
|
||||
|
||||
|
||||
def _finite_or_none(value):
|
||||
@@ -28,7 +47,7 @@ def _finite_or_none(value):
|
||||
# ---------------------------------------------------------------------------
|
||||
# Category classification
|
||||
# ---------------------------------------------------------------------------
|
||||
CATEGORIES = ["POLITICS", "CONFLICT", "NEWS", "FINANCE", "CRYPTO"]
|
||||
CATEGORIES = ["POLITICS", "CONFLICT", "NEWS", "FINANCE", "CRYPTO", "SPORTS"]
|
||||
|
||||
_KALSHI_CATEGORY_MAP = {
|
||||
"Politics": "POLITICS",
|
||||
@@ -38,7 +57,7 @@ _KALSHI_CATEGORY_MAP = {
|
||||
"Tech": "FINANCE",
|
||||
"Science": "NEWS",
|
||||
"Climate and Weather": "NEWS",
|
||||
"Sports": "NEWS",
|
||||
"Sports": "SPORTS",
|
||||
"Culture": "NEWS",
|
||||
}
|
||||
|
||||
@@ -62,7 +81,14 @@ _TAG_CATEGORY_MAP = {
|
||||
"Ethereum": "CRYPTO",
|
||||
"AI": "NEWS",
|
||||
"Science": "NEWS",
|
||||
"Sports": "NEWS",
|
||||
"Sports": "SPORTS",
|
||||
"NBA": "SPORTS",
|
||||
"NFL": "SPORTS",
|
||||
"MLB": "SPORTS",
|
||||
"NHL": "SPORTS",
|
||||
"Soccer": "SPORTS",
|
||||
"Tennis": "SPORTS",
|
||||
"Golf": "SPORTS",
|
||||
"Culture": "NEWS",
|
||||
"Entertainment": "NEWS",
|
||||
"Tech": "FINANCE",
|
||||
@@ -152,6 +178,26 @@ _KEYWORD_CATEGORIES = {
|
||||
"market cap",
|
||||
"revenue",
|
||||
],
|
||||
"SPORTS": [
|
||||
"nba",
|
||||
"nfl",
|
||||
"mlb",
|
||||
"nhl",
|
||||
"wnba",
|
||||
"soccer",
|
||||
"football",
|
||||
"basketball",
|
||||
"baseball",
|
||||
"hockey",
|
||||
"ufc",
|
||||
"mma",
|
||||
"tennis",
|
||||
"golf",
|
||||
"championship",
|
||||
"playoffs",
|
||||
"world cup",
|
||||
"super bowl",
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
@@ -177,21 +223,186 @@ def _classify_category(title: str, poly_tags: list[str], kalshi_category: str) -
|
||||
return "NEWS"
|
||||
|
||||
|
||||
def _polymarket_event_to_entry(ev: dict) -> dict | None:
|
||||
title = ev.get("title", "")
|
||||
if not title:
|
||||
return None
|
||||
|
||||
markets = ev.get("markets", [])
|
||||
best_pct = None
|
||||
total_volume = 0
|
||||
outcomes = []
|
||||
for m in markets:
|
||||
raw_op = m.get("outcomePrices")
|
||||
price = None
|
||||
try:
|
||||
op = json.loads(raw_op) if isinstance(raw_op, str) else raw_op
|
||||
if isinstance(op, list) and len(op) >= 1:
|
||||
price = _finite_or_none(op[0])
|
||||
except (json.JSONDecodeError, ValueError, TypeError):
|
||||
pass
|
||||
if price is None:
|
||||
price = _finite_or_none(m.get("lastTradePrice") or m.get("bestBid"))
|
||||
pct = None
|
||||
if price is not None:
|
||||
try:
|
||||
pct = round(price * 100, 1)
|
||||
if best_pct is None or pct > best_pct:
|
||||
best_pct = pct
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
volume = _finite_or_none(m.get("volume", 0) or 0)
|
||||
if volume is not None:
|
||||
total_volume += volume
|
||||
oname = m.get("groupItemTitle") or ""
|
||||
if oname and pct is not None:
|
||||
outcomes.append({"name": oname, "pct": pct})
|
||||
|
||||
if len(outcomes) > 2:
|
||||
outcomes.sort(key=lambda x: x["pct"], reverse=True)
|
||||
else:
|
||||
outcomes = []
|
||||
|
||||
tag_labels = [t.get("label", "") for t in ev.get("tags", []) if t.get("label")]
|
||||
return {
|
||||
"title": title,
|
||||
"source": "polymarket",
|
||||
"pct": best_pct,
|
||||
"slug": ev.get("slug", ""),
|
||||
"description": ev.get("description") or "",
|
||||
"end_date": ev.get("endDate"),
|
||||
"volume": round(total_volume, 2),
|
||||
"volume_24h": round(_finite_or_none(ev.get("volume24hr", 0) or 0) or 0, 2),
|
||||
"tags": tag_labels,
|
||||
"outcomes": outcomes,
|
||||
}
|
||||
|
||||
|
||||
def _kalshi_market_pct(m: dict) -> float | None:
|
||||
bid = _finite_or_none(m.get("yes_bid_dollars"))
|
||||
ask = _finite_or_none(m.get("yes_ask_dollars"))
|
||||
last = _finite_or_none(m.get("last_price_dollars"))
|
||||
if bid is not None and ask is not None and ask >= bid:
|
||||
return round(((bid + ask) / 2) * 100, 1)
|
||||
if last is not None:
|
||||
return round(last * 100, 1)
|
||||
cents = _finite_or_none(m.get("yes_price") or m.get("last_price"))
|
||||
if cents is None:
|
||||
return None
|
||||
return round(cents * 100, 1) if cents <= 1 else round(cents, 1)
|
||||
|
||||
|
||||
def _kalshi_market_volume(m: dict) -> float:
|
||||
for key in ("volume_24h_fp", "volume_fp", "dollar_volume", "volume"):
|
||||
value = _finite_or_none(m.get(key))
|
||||
if value is not None:
|
||||
return value
|
||||
return 0
|
||||
|
||||
|
||||
def _kalshi_market_category(m: dict) -> str:
|
||||
text = " ".join(
|
||||
str(m.get(k, "") or "")
|
||||
for k in ("ticker", "event_ticker", "mve_collection_ticker", "title", "yes_sub_title", "no_sub_title")
|
||||
).lower()
|
||||
if any(token in text for token in ("sports", "xnba", "xnfl", "xmlb", "xnhl", "soccer", "tennis", "golf")):
|
||||
return "Sports"
|
||||
return str(m.get("category", "") or "")
|
||||
|
||||
|
||||
def _kalshi_event_to_entry(ev: dict, markets: list[dict] | None = None) -> dict | None:
|
||||
title = ev.get("title", "")
|
||||
if not title:
|
||||
return None
|
||||
|
||||
markets = markets or ev.get("markets", []) or []
|
||||
best_pct = None
|
||||
total_volume = 0.0
|
||||
close_dates = []
|
||||
outcomes = []
|
||||
first_ticker = ""
|
||||
descriptions = []
|
||||
for m in markets:
|
||||
first_ticker = first_ticker or m.get("ticker", "")
|
||||
pct = _kalshi_market_pct(m)
|
||||
if pct is not None:
|
||||
if best_pct is None or pct > best_pct:
|
||||
best_pct = pct
|
||||
oname = m.get("yes_sub_title") or m.get("sub_title") or m.get("title") or ""
|
||||
if oname and oname != title:
|
||||
outcomes.append({"name": oname, "pct": pct})
|
||||
total_volume += _kalshi_market_volume(m)
|
||||
cd = m.get("close_time") or m.get("close_date") or m.get("expiration_time")
|
||||
if cd:
|
||||
close_dates.append(cd)
|
||||
desc = (m.get("rules_primary") or m.get("rules_secondary") or "").strip()
|
||||
if desc:
|
||||
descriptions.append(desc)
|
||||
|
||||
if len(outcomes) > 2:
|
||||
outcomes.sort(key=lambda x: x["pct"], reverse=True)
|
||||
else:
|
||||
outcomes = []
|
||||
|
||||
desc = (ev.get("settle_details") or ev.get("underlying") or "").strip()
|
||||
if not desc and descriptions:
|
||||
desc = descriptions[0]
|
||||
|
||||
return {
|
||||
"title": title,
|
||||
"source": "kalshi",
|
||||
"pct": best_pct,
|
||||
"ticker": first_ticker or ev.get("event_ticker", "") or ev.get("ticker", ""),
|
||||
"description": desc,
|
||||
"sub_title": ev.get("sub_title", ""),
|
||||
"end_date": max(close_dates) if close_dates else None,
|
||||
"volume": round(total_volume, 2),
|
||||
"category": ev.get("category", ""),
|
||||
"outcomes": outcomes,
|
||||
}
|
||||
|
||||
|
||||
def _kalshi_market_to_entry(m: dict) -> dict | None:
|
||||
title = m.get("title") or m.get("yes_sub_title") or ""
|
||||
if not title:
|
||||
return None
|
||||
pct = _kalshi_market_pct(m)
|
||||
volume = _kalshi_market_volume(m)
|
||||
desc = (m.get("rules_primary") or m.get("rules_secondary") or "").strip()
|
||||
end_date = m.get("close_time") or m.get("expiration_time") or m.get("expected_expiration_time")
|
||||
return {
|
||||
"title": title,
|
||||
"source": "kalshi",
|
||||
"pct": pct,
|
||||
"ticker": m.get("ticker", "") or m.get("event_ticker", ""),
|
||||
"description": desc,
|
||||
"sub_title": m.get("subtitle", ""),
|
||||
"end_date": end_date,
|
||||
"volume": round(volume, 2),
|
||||
"category": _kalshi_market_category(m),
|
||||
"outcomes": [],
|
||||
}
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Polymarket
|
||||
# ---------------------------------------------------------------------------
|
||||
def _fetch_polymarket_events() -> list[dict]:
|
||||
"""Fetch active events from Polymarket Gamma API (no auth required).
|
||||
|
||||
Fetches up to 500 events (multiple pages) for better search coverage.
|
||||
Fetches paginated active events, bounded by MESH_POLYMARKET_MAX_EVENTS
|
||||
so boot-time refresh does not become unbounded.
|
||||
"""
|
||||
from services.network_utils import fetch_with_curl
|
||||
|
||||
all_events = []
|
||||
for offset in range(0, 500, 100):
|
||||
page_size = 250
|
||||
max_events = int(os.environ.get("MESH_POLYMARKET_MAX_EVENTS", "5000"))
|
||||
for offset in range(0, max_events, page_size):
|
||||
try:
|
||||
_pace_provider("polymarket", _POLYMARKET_PAGE_DELAY_S)
|
||||
resp = fetch_with_curl(
|
||||
f"https://gamma-api.polymarket.com/events?active=true&closed=false&limit=100&offset={offset}",
|
||||
f"https://gamma-api.polymarket.com/events?active=true&closed=false&limit={page_size}&offset={offset}",
|
||||
timeout=15,
|
||||
)
|
||||
if not resp or resp.status_code != 200:
|
||||
@@ -200,6 +411,8 @@ def _fetch_polymarket_events() -> list[dict]:
|
||||
if not isinstance(page, list) or not page:
|
||||
break
|
||||
all_events.extend(page)
|
||||
if len(page) < page_size:
|
||||
break
|
||||
except Exception as e:
|
||||
logger.warning(f"Polymarket page offset={offset} error: {e}")
|
||||
break
|
||||
@@ -286,6 +499,42 @@ def _fetch_kalshi_events() -> list[dict]:
|
||||
"""Fetch active events from Kalshi public API (no auth required)."""
|
||||
from services.network_utils import fetch_with_curl
|
||||
|
||||
try:
|
||||
max_events = int(os.environ.get("MESH_KALSHI_MAX_EVENTS", "2000"))
|
||||
page_size = 200
|
||||
markets = []
|
||||
cursor = ""
|
||||
while len(markets) < max_events:
|
||||
params = {"status": "open", "limit": str(page_size)}
|
||||
if cursor:
|
||||
params["cursor"] = cursor
|
||||
_pace_provider("kalshi", _KALSHI_PAGE_DELAY_S)
|
||||
resp = fetch_with_curl(
|
||||
f"https://api.elections.kalshi.com/trade-api/v2/markets?{urlencode(params)}",
|
||||
timeout=15,
|
||||
)
|
||||
if not resp or resp.status_code != 200:
|
||||
break
|
||||
data = resp.json()
|
||||
page = data.get("markets", []) if isinstance(data, dict) else []
|
||||
if not page:
|
||||
break
|
||||
markets.extend(page)
|
||||
cursor = data.get("cursor") or ""
|
||||
if not cursor or len(page) < page_size:
|
||||
break
|
||||
|
||||
results = []
|
||||
for market in markets:
|
||||
entry = _kalshi_market_to_entry(market)
|
||||
if entry:
|
||||
results.append(entry)
|
||||
if results:
|
||||
logger.info(f"Kalshi: fetched {len(results)} active events from v2")
|
||||
return results
|
||||
except Exception as e:
|
||||
logger.warning(f"Kalshi v2 fetch error, falling back to legacy v1: {e}")
|
||||
|
||||
try:
|
||||
resp = fetch_with_curl(
|
||||
"https://api.elections.kalshi.com/v1/events?status=open&limit=100",
|
||||
@@ -540,11 +789,11 @@ def fetch_prediction_markets():
|
||||
# ---------------------------------------------------------------------------
|
||||
# Direct API search (not limited to cached data)
|
||||
# ---------------------------------------------------------------------------
|
||||
def search_polymarket_direct(query: str, limit: int = 20) -> list[dict]:
|
||||
def search_polymarket_direct(query: str, limit: int = 20, offset: int = 0) -> list[dict]:
|
||||
"""Search Polymarket by scanning API pages for title matches.
|
||||
|
||||
The Gamma API has no text search parameter, so we scan cached events
|
||||
plus additional pages until we find enough matches or exhaust the scan.
|
||||
Prefer Polymarket's public search endpoint, then fall back to scanning
|
||||
Gamma event pages if search is unavailable.
|
||||
"""
|
||||
from services.network_utils import fetch_with_curl
|
||||
|
||||
@@ -552,11 +801,53 @@ def search_polymarket_direct(query: str, limit: int = 20) -> list[dict]:
|
||||
q_words = set(q_lower.split())
|
||||
results = []
|
||||
|
||||
try:
|
||||
params = urlencode({"q": query, "limit": str(limit), "offset": str(max(0, offset))})
|
||||
_pace_provider("polymarket", _POLYMARKET_PAGE_DELAY_S)
|
||||
resp = fetch_with_curl(
|
||||
f"https://gamma-api.polymarket.com/public-search?{params}",
|
||||
timeout=15,
|
||||
)
|
||||
if resp and resp.status_code == 200:
|
||||
data = resp.json()
|
||||
events = data.get("events", []) if isinstance(data, dict) else []
|
||||
for ev in events:
|
||||
if ev.get("closed") or ev.get("active") is False:
|
||||
continue
|
||||
entry = _polymarket_event_to_entry(ev)
|
||||
if not entry:
|
||||
continue
|
||||
category = _classify_category(entry["title"], entry.get("tags", []), "")
|
||||
pct = _finite_or_none(entry.get("pct"))
|
||||
sources = [{"name": "POLY", "pct": pct}] if pct is not None else []
|
||||
results.append(
|
||||
{
|
||||
"title": entry["title"],
|
||||
"polymarket_pct": pct,
|
||||
"kalshi_pct": None,
|
||||
"consensus_pct": pct,
|
||||
"description": entry.get("description", ""),
|
||||
"end_date": entry.get("end_date"),
|
||||
"volume": entry.get("volume", 0),
|
||||
"volume_24h": entry.get("volume_24h", 0),
|
||||
"kalshi_volume": 0,
|
||||
"category": category,
|
||||
"sources": sources,
|
||||
"slug": entry.get("slug", ""),
|
||||
"outcomes": entry.get("outcomes", []),
|
||||
}
|
||||
)
|
||||
logger.info(f"Polymarket search '{query}': {len(results)} results via public-search")
|
||||
return results[:limit]
|
||||
except Exception as e:
|
||||
logger.warning(f"Polymarket public-search '{query}' error: {e}")
|
||||
|
||||
# Scan up to 2000 events (10 pages of 200) looking for title matches
|
||||
for offset in range(0, 2000, 200):
|
||||
for scan_offset in range(0, 3000, 200):
|
||||
try:
|
||||
_pace_provider("polymarket", _POLYMARKET_PAGE_DELAY_S)
|
||||
resp = fetch_with_curl(
|
||||
f"https://gamma-api.polymarket.com/events?active=true&closed=false&limit=200&offset={offset}",
|
||||
f"https://gamma-api.polymarket.com/events?active=true&closed=false&limit=200&offset={scan_offset}",
|
||||
timeout=15,
|
||||
)
|
||||
if not resp or resp.status_code != 200:
|
||||
@@ -637,11 +928,168 @@ def search_polymarket_direct(query: str, limit: int = 20) -> list[dict]:
|
||||
}
|
||||
)
|
||||
# Stop scanning if we have enough results
|
||||
if len(results) >= limit:
|
||||
if len(results) >= offset + limit:
|
||||
break
|
||||
except Exception as e:
|
||||
logger.warning(f"Polymarket search scan offset={offset} error: {e}")
|
||||
logger.warning(f"Polymarket search scan offset={scan_offset} error: {e}")
|
||||
break
|
||||
|
||||
logger.info(f"Polymarket search '{query}': {len(results)} results (scanned API)")
|
||||
return results[:limit]
|
||||
return results[offset : offset + limit]
|
||||
|
||||
|
||||
def search_kalshi_direct(query: str, limit: int = 20, offset: int = 0) -> list[dict]:
|
||||
"""Search Kalshi events by scanning API pages for title matches."""
|
||||
from services.network_utils import fetch_with_curl
|
||||
|
||||
q_lower = query.lower()
|
||||
q_words = set(q_lower.split())
|
||||
results = []
|
||||
|
||||
try:
|
||||
max_scan = int(os.environ.get("MESH_KALSHI_SEARCH_SCAN_EVENTS", "1200"))
|
||||
page_size = 200
|
||||
cursor = ""
|
||||
scanned = 0
|
||||
while scanned < max_scan and len(results) < offset + limit:
|
||||
params = {"status": "open", "limit": str(page_size)}
|
||||
if cursor:
|
||||
params["cursor"] = cursor
|
||||
_pace_provider("kalshi", _KALSHI_PAGE_DELAY_S)
|
||||
resp = fetch_with_curl(
|
||||
f"https://api.elections.kalshi.com/trade-api/v2/markets?{urlencode(params)}",
|
||||
timeout=15,
|
||||
)
|
||||
if not resp or resp.status_code != 200:
|
||||
break
|
||||
data = resp.json()
|
||||
markets = data.get("markets", []) if isinstance(data, dict) else []
|
||||
if not markets:
|
||||
break
|
||||
scanned += len(markets)
|
||||
for market in markets:
|
||||
haystack = " ".join(
|
||||
str(market.get(k, "") or "")
|
||||
for k in ("title", "yes_sub_title", "no_sub_title", "event_ticker", "ticker")
|
||||
).lower()
|
||||
if q_lower not in haystack and not any(w in haystack for w in q_words):
|
||||
continue
|
||||
entry = _kalshi_market_to_entry(market)
|
||||
if not entry:
|
||||
continue
|
||||
pct = _finite_or_none(entry.get("pct"))
|
||||
sources = [{"name": "KALSHI", "pct": pct}] if pct is not None else []
|
||||
category = _classify_category(entry["title"], [], entry.get("category", ""))
|
||||
results.append({
|
||||
"title": entry["title"],
|
||||
"polymarket_pct": None,
|
||||
"kalshi_pct": pct,
|
||||
"consensus_pct": pct,
|
||||
"description": entry.get("description", ""),
|
||||
"end_date": entry.get("end_date"),
|
||||
"volume": 0,
|
||||
"volume_24h": 0,
|
||||
"kalshi_volume": entry.get("volume", 0),
|
||||
"category": category,
|
||||
"sources": sources,
|
||||
"slug": "",
|
||||
"kalshi_ticker": entry.get("ticker", ""),
|
||||
"outcomes": entry.get("outcomes", []),
|
||||
})
|
||||
if len(results) >= offset + limit:
|
||||
break
|
||||
cursor = data.get("cursor") or ""
|
||||
if not cursor or len(markets) < page_size:
|
||||
break
|
||||
if results:
|
||||
logger.info(f"Kalshi search '{query}': {len(results)} results via v2 scan")
|
||||
return results[offset : offset + limit]
|
||||
except Exception as e:
|
||||
logger.warning(f"Kalshi v2 search '{query}' error, falling back to legacy v1: {e}")
|
||||
|
||||
try:
|
||||
resp = fetch_with_curl(
|
||||
"https://api.elections.kalshi.com/v1/events?status=open&limit=200",
|
||||
timeout=15,
|
||||
)
|
||||
if not resp or resp.status_code != 200:
|
||||
return []
|
||||
data = resp.json()
|
||||
events = data.get("events", []) if isinstance(data, dict) else []
|
||||
|
||||
for ev in events:
|
||||
title = ev.get("title", "")
|
||||
if not title:
|
||||
continue
|
||||
title_lower = title.lower()
|
||||
if q_lower not in title_lower and not any(w in title_lower for w in q_words):
|
||||
continue
|
||||
|
||||
markets = ev.get("markets", [])
|
||||
best_pct = None
|
||||
total_volume = 0
|
||||
close_dates = []
|
||||
outcomes = []
|
||||
for m in markets:
|
||||
price = m.get("yes_price") or m.get("last_price")
|
||||
pct = None
|
||||
if price is not None:
|
||||
try:
|
||||
price = _finite_or_none(price)
|
||||
if price is None:
|
||||
raise ValueError("non-finite")
|
||||
pct = round(price, 1)
|
||||
if pct <= 1:
|
||||
pct = round(pct * 100, 1)
|
||||
if best_pct is None or pct > best_pct:
|
||||
best_pct = pct
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
try:
|
||||
volume = _finite_or_none(
|
||||
m.get("dollar_volume", 0) or m.get("volume", 0) or 0
|
||||
)
|
||||
if volume is not None:
|
||||
total_volume += int(volume)
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
cd = m.get("close_date")
|
||||
if cd:
|
||||
close_dates.append(cd)
|
||||
oname = m.get("title") or m.get("subtitle", "")
|
||||
if oname and pct is not None:
|
||||
outcomes.append({"name": oname, "pct": pct})
|
||||
if len(outcomes) > 2:
|
||||
outcomes.sort(key=lambda x: x["pct"], reverse=True)
|
||||
else:
|
||||
outcomes = []
|
||||
|
||||
desc = (ev.get("settle_details") or ev.get("underlying") or "").strip()
|
||||
category = _classify_category(title, [], ev.get("category", ""))
|
||||
sources = []
|
||||
if best_pct is not None:
|
||||
sources.append({"name": "KALSHI", "pct": best_pct})
|
||||
|
||||
results.append({
|
||||
"title": title,
|
||||
"polymarket_pct": None,
|
||||
"kalshi_pct": best_pct,
|
||||
"consensus_pct": best_pct,
|
||||
"description": desc,
|
||||
"end_date": max(close_dates) if close_dates else None,
|
||||
"volume": total_volume,
|
||||
"volume_24h": 0,
|
||||
"kalshi_volume": total_volume,
|
||||
"category": category,
|
||||
"sources": sources,
|
||||
"slug": "",
|
||||
"kalshi_ticker": ev.get("ticker", ""),
|
||||
"outcomes": outcomes,
|
||||
})
|
||||
if len(results) >= offset + limit:
|
||||
break
|
||||
except Exception as e:
|
||||
logger.warning(f"Kalshi search '{query}' error: {e}")
|
||||
|
||||
logger.info(f"Kalshi search '{query}': {len(results)} results")
|
||||
return results[offset : offset + limit]
|
||||
|
||||
@@ -0,0 +1,166 @@
|
||||
"""Static route + airport database loaded from vrs-standing-data.adsb.lol.
|
||||
|
||||
Replaces the per-batch /api/0/routeset POST with a single daily bulk download.
|
||||
Routes change ~weekly when airlines update schedules, so a 24h refresh cadence
|
||||
is far more than sufficient and removes ~all live-API pressure on adsb.lol.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import csv
|
||||
import gzip
|
||||
import io
|
||||
import logging
|
||||
import threading
|
||||
import time
|
||||
from typing import Any
|
||||
|
||||
import requests
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_ROUTES_URL = "https://vrs-standing-data.adsb.lol/routes.csv.gz"
|
||||
_AIRPORTS_URL = "https://vrs-standing-data.adsb.lol/airports.csv.gz"
|
||||
_REFRESH_INTERVAL_S = 5 * 24 * 3600
|
||||
_HTTP_TIMEOUT_S = 60
|
||||
|
||||
_USER_AGENT = (
|
||||
"ShadowBroker-OSINT/0.9.7 "
|
||||
"(+https://github.com/BigBodyCobain/Shadowbroker; "
|
||||
"contact: bigbodycobain@gmail.com)"
|
||||
)
|
||||
|
||||
_lock = threading.RLock()
|
||||
_routes_by_callsign: dict[str, dict[str, Any]] = {}
|
||||
_airports_by_icao: dict[str, dict[str, Any]] = {}
|
||||
_last_refresh = 0.0
|
||||
_refresh_in_progress = False
|
||||
|
||||
|
||||
def _fetch_csv_gz(url: str) -> list[dict[str, str]]:
|
||||
response = requests.get(
|
||||
url,
|
||||
timeout=_HTTP_TIMEOUT_S,
|
||||
headers={"User-Agent": _USER_AGENT, "Accept-Encoding": "gzip"},
|
||||
)
|
||||
response.raise_for_status()
|
||||
text = gzip.decompress(response.content).decode("utf-8-sig")
|
||||
return list(csv.DictReader(io.StringIO(text)))
|
||||
|
||||
|
||||
def _build_route_index(rows: list[dict[str, str]]) -> dict[str, dict[str, Any]]:
|
||||
index: dict[str, dict[str, Any]] = {}
|
||||
for row in rows:
|
||||
callsign = (row.get("Callsign") or "").strip().upper()
|
||||
airport_codes = (row.get("AirportCodes") or "").strip()
|
||||
if not callsign or not airport_codes:
|
||||
continue
|
||||
icaos = [c.strip() for c in airport_codes.split("-") if c.strip()]
|
||||
if len(icaos) < 2:
|
||||
continue
|
||||
index[callsign] = {
|
||||
"airline_code": (row.get("AirlineCode") or "").strip(),
|
||||
"airport_codes": airport_codes,
|
||||
"airport_icaos": icaos,
|
||||
}
|
||||
return index
|
||||
|
||||
|
||||
def _build_airport_index(rows: list[dict[str, str]]) -> dict[str, dict[str, Any]]:
|
||||
index: dict[str, dict[str, Any]] = {}
|
||||
for row in rows:
|
||||
icao = (row.get("ICAO") or "").strip().upper()
|
||||
if not icao:
|
||||
continue
|
||||
try:
|
||||
lat = float(row.get("Latitude") or 0)
|
||||
lon = float(row.get("Longitude") or 0)
|
||||
except (TypeError, ValueError):
|
||||
continue
|
||||
index[icao] = {
|
||||
"name": (row.get("Name") or "").strip(),
|
||||
"iata": (row.get("IATA") or "").strip(),
|
||||
"country": (row.get("CountryISO2") or "").strip(),
|
||||
"lat": lat,
|
||||
"lon": lon,
|
||||
}
|
||||
return index
|
||||
|
||||
|
||||
def refresh_route_database(force: bool = False) -> bool:
|
||||
"""Pull routes.csv.gz + airports.csv.gz and rebuild the in-memory indexes.
|
||||
|
||||
Returns True if a refresh was performed (success or attempted), False if
|
||||
skipped because the cache is still fresh or another refresh is in flight.
|
||||
"""
|
||||
global _last_refresh, _refresh_in_progress
|
||||
|
||||
now = time.time()
|
||||
with _lock:
|
||||
if _refresh_in_progress:
|
||||
return False
|
||||
if not force and (now - _last_refresh) < _REFRESH_INTERVAL_S and _routes_by_callsign:
|
||||
return False
|
||||
_refresh_in_progress = True
|
||||
|
||||
try:
|
||||
started = time.time()
|
||||
airport_rows = _fetch_csv_gz(_AIRPORTS_URL)
|
||||
route_rows = _fetch_csv_gz(_ROUTES_URL)
|
||||
airports = _build_airport_index(airport_rows)
|
||||
routes = _build_route_index(route_rows)
|
||||
with _lock:
|
||||
_airports_by_icao.clear()
|
||||
_airports_by_icao.update(airports)
|
||||
_routes_by_callsign.clear()
|
||||
_routes_by_callsign.update(routes)
|
||||
_last_refresh = time.time()
|
||||
logger.info(
|
||||
"route database refreshed in %.1fs: %d routes, %d airports",
|
||||
time.time() - started,
|
||||
len(routes),
|
||||
len(airports),
|
||||
)
|
||||
return True
|
||||
except (requests.RequestException, OSError, ValueError) as exc:
|
||||
logger.warning("route database refresh failed: %s", exc)
|
||||
return True
|
||||
finally:
|
||||
with _lock:
|
||||
_refresh_in_progress = False
|
||||
|
||||
|
||||
def lookup_route(callsign: str) -> dict[str, Any] | None:
|
||||
"""Resolve a callsign to {orig_name, dest_name, orig_loc, dest_loc} or None.
|
||||
|
||||
Matches the shape produced by the legacy fetch_routes_background cache so
|
||||
the caller in flights.py can be a drop-in replacement.
|
||||
"""
|
||||
key = (callsign or "").strip().upper()
|
||||
if not key:
|
||||
return None
|
||||
with _lock:
|
||||
route = _routes_by_callsign.get(key)
|
||||
if not route:
|
||||
return None
|
||||
icaos = route["airport_icaos"]
|
||||
orig = _airports_by_icao.get(icaos[0].upper())
|
||||
dest = _airports_by_icao.get(icaos[-1].upper())
|
||||
if not orig or not dest:
|
||||
return None
|
||||
return {
|
||||
"orig_name": f"{orig['iata']}: {orig['name']}" if orig["iata"] else orig["name"],
|
||||
"dest_name": f"{dest['iata']}: {dest['name']}" if dest["iata"] else dest["name"],
|
||||
"orig_loc": [orig["lon"], orig["lat"]],
|
||||
"dest_loc": [dest["lon"], dest["lat"]],
|
||||
}
|
||||
|
||||
|
||||
def route_database_status() -> dict[str, Any]:
|
||||
with _lock:
|
||||
return {
|
||||
"last_refresh": _last_refresh,
|
||||
"routes": len(_routes_by_callsign),
|
||||
"airports": len(_airports_by_icao),
|
||||
"in_progress": _refresh_in_progress,
|
||||
}
|
||||
@@ -0,0 +1,74 @@
|
||||
"""SAR catalog fetcher (Mode A — default-on, free, no account).
|
||||
|
||||
Hits ASF Search every hour for Sentinel-1 scenes that touched any of
|
||||
the operator-defined AOIs in the last ~36h. Pure metadata, no
|
||||
downloads.
|
||||
|
||||
Result is written to ``latest_data["sar_scenes"]`` and a per-AOI
|
||||
coverage summary to ``latest_data["sar_aoi_coverage"]``.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
|
||||
from services.fetchers._store import _data_lock, _mark_fresh, is_any_active, latest_data
|
||||
from services.fetchers.retry import with_retry
|
||||
from services.sar.sar_aoi import load_aois
|
||||
from services.sar.sar_catalog_client import estimate_next_pass, search_scenes_for_aoi
|
||||
from services.sar.sar_config import catalog_enabled
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@with_retry(max_retries=1, base_delay=2)
|
||||
def fetch_sar_catalog() -> None:
|
||||
"""Refresh the SAR scene catalog for all configured AOIs."""
|
||||
if not catalog_enabled():
|
||||
return
|
||||
if not is_any_active("sar"):
|
||||
return
|
||||
aois = load_aois()
|
||||
if not aois:
|
||||
logger.debug("SAR catalog: no AOIs configured")
|
||||
return
|
||||
|
||||
all_scenes: list[dict] = []
|
||||
coverage: list[dict] = []
|
||||
for aoi in aois:
|
||||
try:
|
||||
scenes = search_scenes_for_aoi(aoi)
|
||||
except (ConnectionError, TimeoutError, OSError, ValueError) as exc:
|
||||
logger.debug("SAR catalog %s: %s", aoi.id, exc)
|
||||
scenes = []
|
||||
scene_dicts = [s.to_dict() for s in scenes]
|
||||
all_scenes.extend(scene_dicts)
|
||||
next_pass = estimate_next_pass(scenes)
|
||||
coverage.append(
|
||||
{
|
||||
"aoi_id": aoi.id,
|
||||
"aoi_name": aoi.name,
|
||||
"category": aoi.category,
|
||||
"center_lat": aoi.center_lat,
|
||||
"center_lon": aoi.center_lon,
|
||||
"radius_km": aoi.radius_km,
|
||||
"recent_scene_count": len(scene_dicts),
|
||||
"latest_scene_time": (
|
||||
max((s["time"] for s in scene_dicts), default="")
|
||||
if scene_dicts
|
||||
else ""
|
||||
),
|
||||
**next_pass,
|
||||
}
|
||||
)
|
||||
|
||||
with _data_lock:
|
||||
latest_data["sar_scenes"] = all_scenes
|
||||
latest_data["sar_aoi_coverage"] = coverage
|
||||
if all_scenes or coverage:
|
||||
_mark_fresh("sar_scenes", "sar_aoi_coverage")
|
||||
logger.info(
|
||||
"SAR catalog: %d scenes across %d AOIs",
|
||||
len(all_scenes),
|
||||
len(aois),
|
||||
)
|
||||
@@ -0,0 +1,103 @@
|
||||
"""SAR pre-processed product fetcher (Mode B — opt-in, free, account needed).
|
||||
|
||||
Pulls already-computed deformation, flood, water, and damage products
|
||||
from NASA OPERA, Copernicus EGMS, GFM, EMS, and UNOSAT. No local DSP.
|
||||
|
||||
Two-step opt-in: ``MESH_SAR_PRODUCTS_FETCH=allow`` AND
|
||||
``MESH_SAR_PRODUCTS_FETCH_ACKNOWLEDGE=true``. When either flag is
|
||||
unset, this fetcher logs a single startup hint and returns.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from typing import Any
|
||||
|
||||
from services.fetchers._store import _data_lock, _mark_fresh, is_any_active, latest_data
|
||||
from services.fetchers.retry import with_retry
|
||||
from services.sar.sar_aoi import load_aois
|
||||
from services.sar.sar_config import products_fetch_enabled, products_fetch_status
|
||||
from services.sar.sar_normalize import SarAnomaly
|
||||
from services.sar.sar_products_client import (
|
||||
fetch_egms_for_aoi,
|
||||
fetch_ems_for_aoi,
|
||||
fetch_gfm_for_aoi,
|
||||
fetch_opera_for_aoi,
|
||||
fetch_unosat_for_aoi,
|
||||
)
|
||||
from services.sar.sar_signing import emit_signed_anomaly
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
_LOGGED_DISABLED_HINT = False
|
||||
|
||||
|
||||
def _hint_disabled_once() -> None:
|
||||
global _LOGGED_DISABLED_HINT
|
||||
if _LOGGED_DISABLED_HINT:
|
||||
return
|
||||
_LOGGED_DISABLED_HINT = True
|
||||
status = products_fetch_status()
|
||||
missing = ", ".join(status.get("missing", [])) or "nothing"
|
||||
logger.info(
|
||||
"SAR Mode B (ground-change alerts) is disabled. Missing: %s. "
|
||||
"Enable in Settings → SAR or set the env vars listed in .env.example. "
|
||||
"Free signup: https://urs.earthdata.nasa.gov/users/new",
|
||||
missing,
|
||||
)
|
||||
|
||||
|
||||
@with_retry(max_retries=1, base_delay=3)
|
||||
def fetch_sar_products() -> None:
|
||||
"""Refresh pre-processed SAR anomalies for all configured AOIs."""
|
||||
if not products_fetch_enabled():
|
||||
_hint_disabled_once()
|
||||
return
|
||||
if not is_any_active("sar"):
|
||||
return
|
||||
aois = load_aois()
|
||||
if not aois:
|
||||
logger.debug("SAR products: no AOIs configured")
|
||||
return
|
||||
|
||||
seen_ids: set[str] = set()
|
||||
all_anomalies: list[dict[str, Any]] = []
|
||||
publish_summary = {"signed": 0, "skipped": 0, "reasons": {}}
|
||||
|
||||
for aoi in aois:
|
||||
for fetcher in (
|
||||
fetch_opera_for_aoi,
|
||||
fetch_egms_for_aoi,
|
||||
fetch_gfm_for_aoi,
|
||||
fetch_ems_for_aoi,
|
||||
fetch_unosat_for_aoi,
|
||||
):
|
||||
try:
|
||||
anomalies: list[SarAnomaly] = fetcher(aoi) or []
|
||||
except (ConnectionError, TimeoutError, OSError, ValueError, KeyError, TypeError) as exc:
|
||||
logger.debug("SAR %s for %s failed: %s", fetcher.__name__, aoi.id, exc)
|
||||
anomalies = []
|
||||
for a in anomalies:
|
||||
if a.anomaly_id in seen_ids:
|
||||
continue
|
||||
seen_ids.add(a.anomaly_id)
|
||||
all_anomalies.append(a.to_dict())
|
||||
status = emit_signed_anomaly(a)
|
||||
if status.get("signed"):
|
||||
publish_summary["signed"] += 1
|
||||
else:
|
||||
publish_summary["skipped"] += 1
|
||||
reason = status.get("reason", "unknown")
|
||||
publish_summary["reasons"][reason] = (
|
||||
publish_summary["reasons"].get(reason, 0) + 1
|
||||
)
|
||||
|
||||
with _data_lock:
|
||||
latest_data["sar_anomalies"] = all_anomalies
|
||||
if all_anomalies:
|
||||
_mark_fresh("sar_anomalies")
|
||||
logger.info(
|
||||
"SAR products: %d anomalies (%d signed, %d skipped)",
|
||||
len(all_anomalies),
|
||||
publish_summary["signed"],
|
||||
publish_summary["skipped"],
|
||||
)
|
||||
@@ -5,6 +5,11 @@ CelesTrak Fair Use Policy (https://celestrak.org/NORAD/elements/):
|
||||
- Use If-Modified-Since headers for conditional requests
|
||||
- No parallel/concurrent connections — one request at a time
|
||||
- Set a descriptive User-Agent
|
||||
|
||||
Analysis features (derived from cached TLEs — no extra network requests):
|
||||
- Maneuver detection: TLE-to-TLE comparison per satellite
|
||||
- Decay anomaly: mean-motion change rate monitoring
|
||||
- Overflight counting: 24h ground-track sampling for a bounding box
|
||||
"""
|
||||
|
||||
import math
|
||||
@@ -41,6 +46,67 @@ _sat_classified_cache = {"data": None, "gp_fetch_ts": 0}
|
||||
_SAT_CACHE_PATH = Path(__file__).parent.parent.parent / "data" / "sat_gp_cache.json"
|
||||
_SAT_CACHE_META_PATH = Path(__file__).parent.parent.parent / "data" / "sat_gp_cache_meta.json"
|
||||
|
||||
# ── Historical TLE storage for maneuver & decay detection ───────────────────
|
||||
# Stores the previous TLE snapshot keyed by NORAD_CAT_ID.
|
||||
# Populated when a fresh CelesTrak fetch replaces cached data.
|
||||
# Persisted to disk so analysis survives restarts.
|
||||
_SAT_HISTORY_PATH = Path(__file__).parent.parent.parent / "data" / "sat_tle_history.json"
|
||||
_tle_history: dict[int, dict] = {} # {norad_id: {elements + "epoch_ts"}}
|
||||
|
||||
|
||||
def _load_tle_history():
|
||||
"""Load previous TLE snapshot from disk."""
|
||||
global _tle_history
|
||||
try:
|
||||
if _SAT_HISTORY_PATH.exists():
|
||||
with open(_SAT_HISTORY_PATH, "r") as f:
|
||||
raw = json.load(f)
|
||||
_tle_history = {int(k): v for k, v in raw.items()}
|
||||
logger.info(f"Satellites: Loaded TLE history for {len(_tle_history)} objects")
|
||||
except (IOError, OSError, json.JSONDecodeError, ValueError, KeyError) as e:
|
||||
logger.warning(f"Satellites: Failed to load TLE history: {e}")
|
||||
_tle_history = {}
|
||||
|
||||
|
||||
def _save_tle_history():
|
||||
"""Persist current TLE snapshot as history for next comparison."""
|
||||
try:
|
||||
_SAT_HISTORY_PATH.parent.mkdir(parents=True, exist_ok=True)
|
||||
with open(_SAT_HISTORY_PATH, "w") as f:
|
||||
json.dump(_tle_history, f)
|
||||
except (IOError, OSError) as e:
|
||||
logger.warning(f"Satellites: Failed to save TLE history: {e}")
|
||||
|
||||
|
||||
def _snapshot_current_tles(gp_data):
|
||||
"""Capture orbital elements from current GP data as the new 'previous' snapshot.
|
||||
|
||||
Called once per CelesTrak fetch (every 24h). The old snapshot becomes
|
||||
the comparison baseline for maneuver/decay detection.
|
||||
"""
|
||||
global _tle_history
|
||||
new_snapshot = {}
|
||||
for sat in gp_data:
|
||||
norad_id = sat.get("NORAD_CAT_ID")
|
||||
if norad_id is None:
|
||||
continue
|
||||
epoch_str = sat.get("EPOCH", "")
|
||||
try:
|
||||
epoch_dt = datetime.strptime(epoch_str[:19], "%Y-%m-%dT%H:%M:%S")
|
||||
epoch_ts = epoch_dt.timestamp()
|
||||
except (ValueError, TypeError):
|
||||
epoch_ts = 0
|
||||
new_snapshot[int(norad_id)] = {
|
||||
"MEAN_MOTION": sat.get("MEAN_MOTION"),
|
||||
"ECCENTRICITY": sat.get("ECCENTRICITY"),
|
||||
"INCLINATION": sat.get("INCLINATION"),
|
||||
"RA_OF_ASC_NODE": sat.get("RA_OF_ASC_NODE"),
|
||||
"BSTAR": sat.get("BSTAR"),
|
||||
"epoch_ts": epoch_ts,
|
||||
}
|
||||
_tle_history = new_snapshot
|
||||
_save_tle_history()
|
||||
|
||||
|
||||
def _load_sat_cache():
|
||||
"""Load satellite GP data from local disk cache."""
|
||||
@@ -99,360 +165,368 @@ def _save_cache_meta():
|
||||
|
||||
|
||||
# Satellite intelligence classification database
|
||||
# Matched by substring against OBJECT_NAME (case-insensitive).
|
||||
# Order matters — first match wins, so specific names go before generic prefixes.
|
||||
_SAT_INTEL_DB = [
|
||||
(
|
||||
"USA 224",
|
||||
{
|
||||
"country": "USA",
|
||||
"mission": "military_recon",
|
||||
"sat_type": "KH-11 Reconnaissance",
|
||||
"wiki": "https://en.wikipedia.org/wiki/KH-11_KENNEN",
|
||||
},
|
||||
),
|
||||
(
|
||||
"USA 245",
|
||||
{
|
||||
"country": "USA",
|
||||
"mission": "military_recon",
|
||||
"sat_type": "KH-11 Reconnaissance",
|
||||
"wiki": "https://en.wikipedia.org/wiki/KH-11_KENNEN",
|
||||
},
|
||||
),
|
||||
(
|
||||
"USA 290",
|
||||
{
|
||||
"country": "USA",
|
||||
"mission": "military_recon",
|
||||
"sat_type": "KH-11 Reconnaissance",
|
||||
"wiki": "https://en.wikipedia.org/wiki/KH-11_KENNEN",
|
||||
},
|
||||
),
|
||||
(
|
||||
"USA 314",
|
||||
{
|
||||
"country": "USA",
|
||||
"mission": "military_recon",
|
||||
"sat_type": "KH-11 Reconnaissance",
|
||||
"wiki": "https://en.wikipedia.org/wiki/KH-11_KENNEN",
|
||||
},
|
||||
),
|
||||
(
|
||||
"USA 338",
|
||||
{
|
||||
"country": "USA",
|
||||
"mission": "military_recon",
|
||||
"sat_type": "Keyhole Successor",
|
||||
"wiki": "https://en.wikipedia.org/wiki/KH-11_KENNEN",
|
||||
},
|
||||
),
|
||||
(
|
||||
"TOPAZ",
|
||||
{
|
||||
"country": "Russia",
|
||||
"mission": "military_recon",
|
||||
"sat_type": "Optical Reconnaissance",
|
||||
"wiki": "https://en.wikipedia.org/wiki/Persona_(satellite)",
|
||||
},
|
||||
),
|
||||
(
|
||||
"PERSONA",
|
||||
{
|
||||
"country": "Russia",
|
||||
"mission": "military_recon",
|
||||
"sat_type": "Optical Reconnaissance",
|
||||
"wiki": "https://en.wikipedia.org/wiki/Persona_(satellite)",
|
||||
},
|
||||
),
|
||||
(
|
||||
"KONDOR",
|
||||
{
|
||||
"country": "Russia",
|
||||
"mission": "military_sar",
|
||||
"sat_type": "SAR Reconnaissance",
|
||||
"wiki": "https://en.wikipedia.org/wiki/Kondor_(satellite)",
|
||||
},
|
||||
),
|
||||
(
|
||||
"BARS-M",
|
||||
{
|
||||
"country": "Russia",
|
||||
"mission": "military_recon",
|
||||
"sat_type": "Mapping Reconnaissance",
|
||||
"wiki": "https://en.wikipedia.org/wiki/Bars-M",
|
||||
},
|
||||
),
|
||||
(
|
||||
"YAOGAN",
|
||||
{
|
||||
"country": "China",
|
||||
"mission": "military_recon",
|
||||
"sat_type": "Remote Sensing / ELINT",
|
||||
"wiki": "https://en.wikipedia.org/wiki/Yaogan",
|
||||
},
|
||||
),
|
||||
(
|
||||
"GAOFEN",
|
||||
{
|
||||
"country": "China",
|
||||
"mission": "military_recon",
|
||||
"sat_type": "High-Res Imaging",
|
||||
"wiki": "https://en.wikipedia.org/wiki/Gaofen",
|
||||
},
|
||||
),
|
||||
(
|
||||
"JILIN",
|
||||
{
|
||||
"country": "China",
|
||||
"mission": "commercial_imaging",
|
||||
"sat_type": "Video / Imaging",
|
||||
"wiki": "https://en.wikipedia.org/wiki/Jilin-1",
|
||||
},
|
||||
),
|
||||
(
|
||||
"OFEK",
|
||||
{
|
||||
"country": "Israel",
|
||||
"mission": "military_recon",
|
||||
"sat_type": "Reconnaissance",
|
||||
"wiki": "https://en.wikipedia.org/wiki/Ofeq",
|
||||
},
|
||||
),
|
||||
(
|
||||
"CSO",
|
||||
{
|
||||
"country": "France",
|
||||
"mission": "military_recon",
|
||||
"sat_type": "Optical Reconnaissance",
|
||||
"wiki": "https://en.wikipedia.org/wiki/CSO_(satellite)",
|
||||
},
|
||||
),
|
||||
(
|
||||
"IGS",
|
||||
{
|
||||
"country": "Japan",
|
||||
"mission": "military_recon",
|
||||
"sat_type": "Intelligence Gathering",
|
||||
"wiki": "https://en.wikipedia.org/wiki/Information_Gathering_Satellite",
|
||||
},
|
||||
),
|
||||
(
|
||||
"CAPELLA",
|
||||
{
|
||||
"country": "USA",
|
||||
"mission": "sar",
|
||||
"sat_type": "SAR Imaging",
|
||||
"wiki": "https://en.wikipedia.org/wiki/Capella_Space",
|
||||
},
|
||||
),
|
||||
(
|
||||
"ICEYE",
|
||||
{
|
||||
"country": "Finland",
|
||||
"mission": "sar",
|
||||
"sat_type": "SAR Microsatellite",
|
||||
"wiki": "https://en.wikipedia.org/wiki/ICEYE",
|
||||
},
|
||||
),
|
||||
(
|
||||
"COSMO-SKYMED",
|
||||
{
|
||||
"country": "Italy",
|
||||
"mission": "sar",
|
||||
"sat_type": "SAR Constellation",
|
||||
"wiki": "https://en.wikipedia.org/wiki/COSMO-SkyMed",
|
||||
},
|
||||
),
|
||||
(
|
||||
"TANDEM",
|
||||
{
|
||||
"country": "Germany",
|
||||
"mission": "sar",
|
||||
"sat_type": "SAR Interferometry",
|
||||
"wiki": "https://en.wikipedia.org/wiki/TanDEM-X",
|
||||
},
|
||||
),
|
||||
(
|
||||
"PAZ",
|
||||
{
|
||||
"country": "Spain",
|
||||
"mission": "sar",
|
||||
"sat_type": "SAR Imaging",
|
||||
"wiki": "https://en.wikipedia.org/wiki/PAZ_(satellite)",
|
||||
},
|
||||
),
|
||||
(
|
||||
"WORLDVIEW",
|
||||
{
|
||||
"country": "USA",
|
||||
"mission": "commercial_imaging",
|
||||
"sat_type": "Maxar High-Res",
|
||||
"wiki": "https://en.wikipedia.org/wiki/WorldView-3",
|
||||
},
|
||||
),
|
||||
(
|
||||
"GEOEYE",
|
||||
{
|
||||
"country": "USA",
|
||||
"mission": "commercial_imaging",
|
||||
"sat_type": "Maxar Imaging",
|
||||
"wiki": "https://en.wikipedia.org/wiki/GeoEye-1",
|
||||
},
|
||||
),
|
||||
(
|
||||
"PLEIADES",
|
||||
{
|
||||
"country": "France",
|
||||
"mission": "commercial_imaging",
|
||||
"sat_type": "Airbus Imaging",
|
||||
"wiki": "https://en.wikipedia.org/wiki/Pl%C3%A9iades_(satellite)",
|
||||
},
|
||||
),
|
||||
(
|
||||
"SPOT",
|
||||
{
|
||||
"country": "France",
|
||||
"mission": "commercial_imaging",
|
||||
"sat_type": "Airbus Medium-Res",
|
||||
"wiki": "https://en.wikipedia.org/wiki/SPOT_(satellite)",
|
||||
},
|
||||
),
|
||||
(
|
||||
"PLANET",
|
||||
{
|
||||
"country": "USA",
|
||||
"mission": "commercial_imaging",
|
||||
"sat_type": "PlanetScope",
|
||||
"wiki": "https://en.wikipedia.org/wiki/Planet_Labs",
|
||||
},
|
||||
),
|
||||
(
|
||||
"SKYSAT",
|
||||
{
|
||||
"country": "USA",
|
||||
"mission": "commercial_imaging",
|
||||
"sat_type": "Planet Video",
|
||||
"wiki": "https://en.wikipedia.org/wiki/SkySat",
|
||||
},
|
||||
),
|
||||
(
|
||||
"BLACKSKY",
|
||||
{
|
||||
"country": "USA",
|
||||
"mission": "commercial_imaging",
|
||||
"sat_type": "BlackSky Imaging",
|
||||
"wiki": "https://en.wikipedia.org/wiki/BlackSky",
|
||||
},
|
||||
),
|
||||
(
|
||||
"NROL",
|
||||
{
|
||||
"country": "USA",
|
||||
"mission": "sigint",
|
||||
"sat_type": "Classified NRO",
|
||||
"wiki": "https://en.wikipedia.org/wiki/National_Reconnaissance_Office",
|
||||
},
|
||||
),
|
||||
(
|
||||
"MENTOR",
|
||||
{
|
||||
"country": "USA",
|
||||
"mission": "sigint",
|
||||
"sat_type": "SIGINT / ELINT",
|
||||
"wiki": "https://en.wikipedia.org/wiki/Mentor_(satellite)",
|
||||
},
|
||||
),
|
||||
(
|
||||
"LUCH",
|
||||
{
|
||||
"country": "Russia",
|
||||
"mission": "sigint",
|
||||
"sat_type": "Relay / SIGINT",
|
||||
"wiki": "https://en.wikipedia.org/wiki/Luch_(satellite)",
|
||||
},
|
||||
),
|
||||
(
|
||||
"SHIJIAN",
|
||||
{
|
||||
"country": "China",
|
||||
"mission": "sigint",
|
||||
"sat_type": "ELINT / Tech Demo",
|
||||
"wiki": "https://en.wikipedia.org/wiki/Shijian",
|
||||
},
|
||||
),
|
||||
(
|
||||
"NAVSTAR",
|
||||
{
|
||||
"country": "USA",
|
||||
"mission": "navigation",
|
||||
"sat_type": "GPS",
|
||||
"wiki": "https://en.wikipedia.org/wiki/GPS_satellite_blocks",
|
||||
},
|
||||
),
|
||||
(
|
||||
"GLONASS",
|
||||
{
|
||||
"country": "Russia",
|
||||
"mission": "navigation",
|
||||
"sat_type": "GLONASS",
|
||||
"wiki": "https://en.wikipedia.org/wiki/GLONASS",
|
||||
},
|
||||
),
|
||||
(
|
||||
"BEIDOU",
|
||||
{
|
||||
"country": "China",
|
||||
"mission": "navigation",
|
||||
"sat_type": "BeiDou",
|
||||
"wiki": "https://en.wikipedia.org/wiki/BeiDou",
|
||||
},
|
||||
),
|
||||
(
|
||||
"GALILEO",
|
||||
{
|
||||
"country": "EU",
|
||||
"mission": "navigation",
|
||||
"sat_type": "Galileo",
|
||||
"wiki": "https://en.wikipedia.org/wiki/Galileo_(satellite_navigation)",
|
||||
},
|
||||
),
|
||||
(
|
||||
"SBIRS",
|
||||
{
|
||||
"country": "USA",
|
||||
"mission": "early_warning",
|
||||
"sat_type": "Missile Warning",
|
||||
"wiki": "https://en.wikipedia.org/wiki/Space-Based_Infrared_System",
|
||||
},
|
||||
),
|
||||
(
|
||||
"TUNDRA",
|
||||
{
|
||||
"country": "Russia",
|
||||
"mission": "early_warning",
|
||||
"sat_type": "Missile Warning",
|
||||
"wiki": "https://en.wikipedia.org/wiki/Tundra_(satellite)",
|
||||
},
|
||||
),
|
||||
(
|
||||
"ISS",
|
||||
{
|
||||
"country": "Intl",
|
||||
"mission": "space_station",
|
||||
"sat_type": "Space Station",
|
||||
"wiki": "https://en.wikipedia.org/wiki/International_Space_Station",
|
||||
},
|
||||
),
|
||||
(
|
||||
"TIANGONG",
|
||||
{
|
||||
"country": "China",
|
||||
"mission": "space_station",
|
||||
"sat_type": "Space Station",
|
||||
"wiki": "https://en.wikipedia.org/wiki/Tiangong_space_station",
|
||||
},
|
||||
),
|
||||
# ── USA Keyhole / Reconnaissance ────────────────────────────────────────
|
||||
("USA 224", {"country": "USA", "mission": "military_recon", "sat_type": "KH-11 Reconnaissance", "wiki": "https://en.wikipedia.org/wiki/KH-11_KENNEN"}),
|
||||
("USA 245", {"country": "USA", "mission": "military_recon", "sat_type": "KH-11 Reconnaissance", "wiki": "https://en.wikipedia.org/wiki/KH-11_KENNEN"}),
|
||||
("USA 290", {"country": "USA", "mission": "military_recon", "sat_type": "KH-11 Reconnaissance", "wiki": "https://en.wikipedia.org/wiki/KH-11_KENNEN"}),
|
||||
("USA 314", {"country": "USA", "mission": "military_recon", "sat_type": "KH-11 Reconnaissance", "wiki": "https://en.wikipedia.org/wiki/KH-11_KENNEN"}),
|
||||
("USA 338", {"country": "USA", "mission": "military_recon", "sat_type": "Keyhole Successor", "wiki": "https://en.wikipedia.org/wiki/KH-11_KENNEN"}),
|
||||
# ── USA SIGINT / NRO ────────────────────────────────────────────────────
|
||||
("NROL", {"country": "USA", "mission": "sigint", "sat_type": "Classified NRO", "wiki": "https://en.wikipedia.org/wiki/National_Reconnaissance_Office"}),
|
||||
("MENTOR", {"country": "USA", "mission": "sigint", "sat_type": "SIGINT / ELINT (Orion)", "wiki": "https://en.wikipedia.org/wiki/Mentor_(satellite)"}),
|
||||
("TRUMPET", {"country": "USA", "mission": "sigint", "sat_type": "SIGINT (HEO)", "wiki": "https://en.wikipedia.org/wiki/Trumpet_(satellite)"}),
|
||||
("INTRUDER", {"country": "USA", "mission": "sigint", "sat_type": "Naval SIGINT (NOSS)", "wiki": "https://en.wikipedia.org/wiki/Naval_Ocean_Surveillance_System"}),
|
||||
# ── USA Early Warning / Missile Defense ─────────────────────────────────
|
||||
("SBIRS", {"country": "USA", "mission": "early_warning", "sat_type": "Missile Warning", "wiki": "https://en.wikipedia.org/wiki/Space-Based_Infrared_System"}),
|
||||
("DSP", {"country": "USA", "mission": "early_warning", "sat_type": "Defense Support Program", "wiki": "https://en.wikipedia.org/wiki/Defense_Support_Program"}),
|
||||
# ── USA Communications (Military) ───────────────────────────────────────
|
||||
("MUOS", {"country": "USA", "mission": "military_comms", "sat_type": "Mobile User Objective System", "wiki": "https://en.wikipedia.org/wiki/Mobile_User_Objective_System"}),
|
||||
("AEHF", {"country": "USA", "mission": "military_comms", "sat_type": "Advanced EHF", "wiki": "https://en.wikipedia.org/wiki/Advanced_Extremely_High_Frequency"}),
|
||||
("WGS", {"country": "USA", "mission": "military_comms", "sat_type": "Wideband Global SATCOM", "wiki": "https://en.wikipedia.org/wiki/Wideband_Global_SATCOM"}),
|
||||
("MILSTAR", {"country": "USA", "mission": "military_comms", "sat_type": "Milstar Secure Comms", "wiki": "https://en.wikipedia.org/wiki/Milstar"}),
|
||||
# ── USA Navigation ──────────────────────────────────────────────────────
|
||||
("NAVSTAR", {"country": "USA", "mission": "navigation", "sat_type": "GPS", "wiki": "https://en.wikipedia.org/wiki/GPS_satellite_blocks"}),
|
||||
# ── Russia Reconnaissance ───────────────────────────────────────────────
|
||||
("TOPAZ", {"country": "Russia", "mission": "military_recon", "sat_type": "Optical Reconnaissance", "wiki": "https://en.wikipedia.org/wiki/Persona_(satellite)"}),
|
||||
("PERSONA", {"country": "Russia", "mission": "military_recon", "sat_type": "Optical Reconnaissance", "wiki": "https://en.wikipedia.org/wiki/Persona_(satellite)"}),
|
||||
("KONDOR", {"country": "Russia", "mission": "military_sar", "sat_type": "SAR Reconnaissance", "wiki": "https://en.wikipedia.org/wiki/Kondor_(satellite)"}),
|
||||
("BARS-M", {"country": "Russia", "mission": "military_recon", "sat_type": "Mapping Reconnaissance", "wiki": "https://en.wikipedia.org/wiki/Bars-M"}),
|
||||
("RAZDAN", {"country": "Russia", "mission": "military_recon", "sat_type": "Optical Reconnaissance", "wiki": "https://en.wikipedia.org/wiki/Razdan_(satellite)"}),
|
||||
("LOTOS", {"country": "Russia", "mission": "sigint", "sat_type": "ELINT (Lotos-S)", "wiki": "https://en.wikipedia.org/wiki/Lotos-S"}),
|
||||
("PION", {"country": "Russia", "mission": "sigint", "sat_type": "Naval SIGINT/Radar", "wiki": "https://en.wikipedia.org/wiki/Pion-NKS"}),
|
||||
("LUCH", {"country": "Russia", "mission": "sigint", "sat_type": "Relay / SIGINT", "wiki": "https://en.wikipedia.org/wiki/Luch_(satellite)"}),
|
||||
# ── Russia Early Warning & Navigation ───────────────────────────────────
|
||||
("TUNDRA", {"country": "Russia", "mission": "early_warning", "sat_type": "Missile Warning (EKS)", "wiki": "https://en.wikipedia.org/wiki/Tundra_(satellite)"}),
|
||||
("GLONASS", {"country": "Russia", "mission": "navigation", "sat_type": "GLONASS", "wiki": "https://en.wikipedia.org/wiki/GLONASS"}),
|
||||
# ── China Military / Intel ──────────────────────────────────────────────
|
||||
("YAOGAN", {"country": "China", "mission": "military_recon", "sat_type": "Remote Sensing / ELINT", "wiki": "https://en.wikipedia.org/wiki/Yaogan"}),
|
||||
("GAOFEN", {"country": "China", "mission": "military_recon", "sat_type": "High-Res Imaging", "wiki": "https://en.wikipedia.org/wiki/Gaofen"}),
|
||||
("JILIN", {"country": "China", "mission": "commercial_imaging", "sat_type": "Video / Imaging", "wiki": "https://en.wikipedia.org/wiki/Jilin-1"}),
|
||||
("SHIJIAN", {"country": "China", "mission": "sigint", "sat_type": "ELINT / Tech Demo", "wiki": "https://en.wikipedia.org/wiki/Shijian"}),
|
||||
("TONGXIN JISHU SHIYAN", {"country": "China", "mission": "military_comms", "sat_type": "Military Comms Test", "wiki": "https://en.wikipedia.org/wiki/Tongxin_Jishu_Shiyan"}),
|
||||
("BEIDOU", {"country": "China", "mission": "navigation", "sat_type": "BeiDou", "wiki": "https://en.wikipedia.org/wiki/BeiDou"}),
|
||||
("TIANGONG", {"country": "China", "mission": "space_station", "sat_type": "Space Station", "wiki": "https://en.wikipedia.org/wiki/Tiangong_space_station"}),
|
||||
# ── Allied Military / Intel ─────────────────────────────────────────────
|
||||
("OFEK", {"country": "Israel", "mission": "military_recon", "sat_type": "Reconnaissance", "wiki": "https://en.wikipedia.org/wiki/Ofeq"}),
|
||||
("EROS", {"country": "Israel", "mission": "commercial_imaging", "sat_type": "High-Res Imaging", "wiki": "https://en.wikipedia.org/wiki/EROS_(satellite)"}),
|
||||
("CSO", {"country": "France", "mission": "military_recon", "sat_type": "Optical Reconnaissance", "wiki": "https://en.wikipedia.org/wiki/CSO_(satellite)"}),
|
||||
("HELIOS", {"country": "France", "mission": "military_recon", "sat_type": "Optical Reconnaissance", "wiki": "https://en.wikipedia.org/wiki/Helios_(satellite)"}),
|
||||
("CERES", {"country": "France", "mission": "sigint", "sat_type": "ELINT Constellation", "wiki": "https://en.wikipedia.org/wiki/CERES_(satellite)"}),
|
||||
("IGS", {"country": "Japan", "mission": "military_recon", "sat_type": "Intelligence Gathering", "wiki": "https://en.wikipedia.org/wiki/Information_Gathering_Satellite"}),
|
||||
("KOMPSAT", {"country": "South Korea", "mission": "military_recon", "sat_type": "Multi-Purpose Satellite", "wiki": "https://en.wikipedia.org/wiki/KOMPSAT"}),
|
||||
("SAR-LUPE", {"country": "Germany", "mission": "military_sar", "sat_type": "SAR Reconnaissance", "wiki": "https://en.wikipedia.org/wiki/SAR-Lupe"}),
|
||||
("SARAH", {"country": "Germany", "mission": "military_sar", "sat_type": "SAR Successor (SARah)", "wiki": "https://en.wikipedia.org/wiki/SARah"}),
|
||||
# ── Commercial SAR ──────────────────────────────────────────────────────
|
||||
("CAPELLA", {"country": "USA", "mission": "sar", "sat_type": "SAR Imaging", "wiki": "https://en.wikipedia.org/wiki/Capella_Space"}),
|
||||
("ICEYE", {"country": "Finland", "mission": "sar", "sat_type": "SAR Microsatellite", "wiki": "https://en.wikipedia.org/wiki/ICEYE"}),
|
||||
("COSMO-SKYMED", {"country": "Italy", "mission": "sar", "sat_type": "SAR Constellation", "wiki": "https://en.wikipedia.org/wiki/COSMO-SkyMed"}),
|
||||
("TANDEM", {"country": "Germany", "mission": "sar", "sat_type": "SAR Interferometry", "wiki": "https://en.wikipedia.org/wiki/TanDEM-X"}),
|
||||
("PAZ", {"country": "Spain", "mission": "sar", "sat_type": "SAR Imaging", "wiki": "https://en.wikipedia.org/wiki/PAZ_(satellite)"}),
|
||||
("UMBRA", {"country": "USA", "mission": "sar", "sat_type": "SAR Microsatellite", "wiki": "https://en.wikipedia.org/wiki/Umbra_(company)"}),
|
||||
# ── Commercial Optical Imaging ──────────────────────────────────────────
|
||||
("WORLDVIEW", {"country": "USA", "mission": "commercial_imaging", "sat_type": "Maxar High-Res", "wiki": "https://en.wikipedia.org/wiki/WorldView-3"}),
|
||||
("GEOEYE", {"country": "USA", "mission": "commercial_imaging", "sat_type": "Maxar Imaging", "wiki": "https://en.wikipedia.org/wiki/GeoEye-1"}),
|
||||
("LEGION", {"country": "USA", "mission": "commercial_imaging", "sat_type": "Maxar Legion", "wiki": "https://en.wikipedia.org/wiki/WorldView_Legion"}),
|
||||
("PLEIADES", {"country": "France", "mission": "commercial_imaging", "sat_type": "Airbus Imaging", "wiki": "https://en.wikipedia.org/wiki/Pl%C3%A9iades_(satellite)"}),
|
||||
("SPOT", {"country": "France", "mission": "commercial_imaging", "sat_type": "Airbus Medium-Res", "wiki": "https://en.wikipedia.org/wiki/SPOT_(satellite)"}),
|
||||
("SKYSAT", {"country": "USA", "mission": "commercial_imaging", "sat_type": "Planet Video", "wiki": "https://en.wikipedia.org/wiki/SkySat"}),
|
||||
("BLACKSKY", {"country": "USA", "mission": "commercial_imaging", "sat_type": "BlackSky Imaging", "wiki": "https://en.wikipedia.org/wiki/BlackSky"}),
|
||||
# ── Starlink (separate category) ────────────────────────────────────────
|
||||
("STARLINK", {"country": "USA", "mission": "starlink", "sat_type": "Starlink Mega-Constellation", "wiki": "https://en.wikipedia.org/wiki/Starlink"}),
|
||||
# ── Other Constellations ────────────────────────────────────────────────
|
||||
("ONEWEB", {"country": "UK", "mission": "constellation", "sat_type": "OneWeb LEO Broadband", "wiki": "https://en.wikipedia.org/wiki/OneWeb"}),
|
||||
("GALILEO", {"country": "EU", "mission": "navigation", "sat_type": "Galileo", "wiki": "https://en.wikipedia.org/wiki/Galileo_(satellite_navigation)"}),
|
||||
# ── Space Stations ──────────────────────────────────────────────────────
|
||||
("ISS", {"country": "Intl", "mission": "space_station", "sat_type": "Space Station", "wiki": "https://en.wikipedia.org/wiki/International_Space_Station"}),
|
||||
# ── Generic fallback patterns (last resort) ─────────────────────────────
|
||||
("PLANET", {"country": "USA", "mission": "commercial_imaging", "sat_type": "PlanetScope", "wiki": "https://en.wikipedia.org/wiki/Planet_Labs"}),
|
||||
]
|
||||
|
||||
# CelesTrak SATCAT owner codes → country mapping for satellites not matched by name.
|
||||
# Used as a secondary classifier alongside name-pattern matching.
|
||||
_OWNER_CODE_MAP = {
|
||||
"US": "USA", "CIS": "Russia", "PRC": "China", "ISS": "Intl",
|
||||
"FR": "France", "UK": "UK", "GER": "Germany", "JPN": "Japan",
|
||||
"IND": "India", "ISRA": "Israel", "IT": "Italy", "KOR": "South Korea",
|
||||
"ESA": "EU", "NATO": "NATO", "TURK": "Turkey", "UAE": "UAE",
|
||||
"AUS": "Australia", "CA": "Canada", "SPN": "Spain", "FIN": "Finland",
|
||||
"BRAZ": "Brazil", "IRAN": "Iran", "NKOR": "North Korea",
|
||||
}
|
||||
|
||||
# ── Maneuver detection thresholds (per Lemmens & Krag 2014, Kim et al. 2021) ─
|
||||
# These are above TLE fitting noise but low enough to catch real maneuvers.
|
||||
_MANEUVER_THRESHOLDS = {
|
||||
"period_min": 0.1, # minutes — above TLE noise (~0.01–0.05 min)
|
||||
"inclination_deg": 0.05, # degrees — above J2 secular drift (~0.001°/day)
|
||||
"eccentricity": 0.005, # above TLE fitting noise (~0.0001–0.001)
|
||||
"raan_residual_deg": 0.5, # degrees — only after J2 correction (Vallado §9.4)
|
||||
}
|
||||
|
||||
# ── Decay anomaly threshold ─────────────────────────────────────────────────
|
||||
# Flag if mean motion change rate exceeds this (rev/day per day).
|
||||
# Normal drag-induced decay is ~0.001 rev/day/day for LEO.
|
||||
_DECAY_MM_RATE_THRESHOLD = 0.01 # rev/day per day
|
||||
|
||||
|
||||
def _j2_raan_rate(inclination_deg, mean_motion_revday):
|
||||
"""Expected RAAN precession rate due to J2 (Vallado §9.4).
|
||||
|
||||
Returns degrees/day. Negative for prograde orbits.
|
||||
"""
|
||||
J2 = 1.08263e-3
|
||||
Re = 6378.137 # km
|
||||
mu = 398600.4418 # km^3/s^2
|
||||
n_rad_s = mean_motion_revday * 2 * math.pi / 86400.0
|
||||
if n_rad_s <= 0:
|
||||
return 0.0
|
||||
a = (mu / (n_rad_s ** 2)) ** (1.0 / 3.0) # semi-major axis in km
|
||||
if a <= Re:
|
||||
return 0.0
|
||||
cos_i = math.cos(math.radians(inclination_deg))
|
||||
raan_rate = -1.5 * n_rad_s * J2 * (Re / a) ** 2 * cos_i
|
||||
return math.degrees(raan_rate) * 86400.0 / (2 * math.pi) # deg/day
|
||||
|
||||
|
||||
def detect_maneuvers(current_gp_data):
|
||||
"""Compare current TLEs against stored history to detect orbital maneuvers.
|
||||
|
||||
Returns list of maneuver alert dicts. Only runs when _tle_history is populated
|
||||
(i.e., after the second CelesTrak fetch or from persisted history).
|
||||
|
||||
Thresholds from Lemmens & Krag (2014), Kim et al. (2021).
|
||||
"""
|
||||
if not _tle_history:
|
||||
return []
|
||||
|
||||
alerts = []
|
||||
for sat in current_gp_data:
|
||||
norad_id = sat.get("NORAD_CAT_ID")
|
||||
if norad_id is None:
|
||||
continue
|
||||
norad_id = int(norad_id)
|
||||
prev = _tle_history.get(norad_id)
|
||||
if prev is None:
|
||||
continue
|
||||
|
||||
cur_mm = sat.get("MEAN_MOTION")
|
||||
cur_inc = sat.get("INCLINATION")
|
||||
cur_ecc = sat.get("ECCENTRICITY")
|
||||
cur_raan = sat.get("RA_OF_ASC_NODE")
|
||||
prev_mm = prev.get("MEAN_MOTION")
|
||||
prev_inc = prev.get("INCLINATION")
|
||||
prev_ecc = prev.get("ECCENTRICITY")
|
||||
prev_raan = prev.get("RA_OF_ASC_NODE")
|
||||
|
||||
if any(v is None for v in (cur_mm, cur_inc, cur_ecc, cur_raan,
|
||||
prev_mm, prev_inc, prev_ecc, prev_raan)):
|
||||
continue
|
||||
|
||||
# Convert mean motion (rev/day) to period (minutes)
|
||||
cur_period = 1440.0 / cur_mm if cur_mm > 0 else 0
|
||||
prev_period = 1440.0 / prev_mm if prev_mm > 0 else 0
|
||||
|
||||
reasons = []
|
||||
t = _MANEUVER_THRESHOLDS
|
||||
|
||||
delta_period = abs(cur_period - prev_period)
|
||||
if delta_period > t["period_min"]:
|
||||
reasons.append(f"period Δ{delta_period:+.3f} min")
|
||||
|
||||
delta_inc = abs(cur_inc - prev_inc)
|
||||
if delta_inc > t["inclination_deg"]:
|
||||
reasons.append(f"inclination Δ{delta_inc:+.4f}°")
|
||||
|
||||
delta_ecc = abs(cur_ecc - prev_ecc)
|
||||
if delta_ecc > t["eccentricity"]:
|
||||
reasons.append(f"eccentricity Δ{delta_ecc:+.6f}")
|
||||
|
||||
# RAAN with J2 correction — only flag residual beyond expected precession
|
||||
epoch_str = sat.get("EPOCH", "")
|
||||
try:
|
||||
epoch_dt = datetime.strptime(epoch_str[:19], "%Y-%m-%dT%H:%M:%S")
|
||||
epoch_ts = epoch_dt.timestamp()
|
||||
except (ValueError, TypeError):
|
||||
epoch_ts = 0
|
||||
prev_epoch_ts = prev.get("epoch_ts", 0)
|
||||
dt_days = (epoch_ts - prev_epoch_ts) / 86400.0 if (epoch_ts and prev_epoch_ts) else 1.0
|
||||
if dt_days > 0:
|
||||
expected_raan_drift = _j2_raan_rate(cur_inc, cur_mm) * dt_days
|
||||
actual_raan_change = cur_raan - prev_raan
|
||||
# Normalize to [-180, 180]
|
||||
actual_raan_change = (actual_raan_change + 180) % 360 - 180
|
||||
raan_residual = abs(actual_raan_change - expected_raan_drift)
|
||||
if raan_residual > t["raan_residual_deg"]:
|
||||
reasons.append(f"RAAN residual {raan_residual:.3f}° (J2-corrected)")
|
||||
|
||||
if reasons:
|
||||
alerts.append({
|
||||
"norad_id": norad_id,
|
||||
"name": sat.get("OBJECT_NAME", "UNKNOWN"),
|
||||
"type": "maneuver",
|
||||
"reasons": reasons,
|
||||
"epoch": sat.get("EPOCH", ""),
|
||||
"delta_period_min": round(delta_period, 4),
|
||||
"delta_inclination_deg": round(delta_inc, 5),
|
||||
"delta_eccentricity": round(delta_ecc, 7),
|
||||
})
|
||||
|
||||
logger.info(f"Satellites: Maneuver scan — {len(alerts)} detections from {len(current_gp_data)} objects")
|
||||
return alerts
|
||||
|
||||
|
||||
def detect_decay_anomalies(current_gp_data):
|
||||
"""Flag satellites with abnormal mean-motion change rates (possible decay).
|
||||
|
||||
A rapidly increasing mean motion indicates orbital decay — the satellite
|
||||
is losing altitude. Normal LEO drag is ~0.001 rev/day/day.
|
||||
"""
|
||||
if not _tle_history:
|
||||
return []
|
||||
|
||||
alerts = []
|
||||
for sat in current_gp_data:
|
||||
norad_id = sat.get("NORAD_CAT_ID")
|
||||
if norad_id is None:
|
||||
continue
|
||||
norad_id = int(norad_id)
|
||||
prev = _tle_history.get(norad_id)
|
||||
if prev is None:
|
||||
continue
|
||||
|
||||
cur_mm = sat.get("MEAN_MOTION")
|
||||
prev_mm = prev.get("MEAN_MOTION")
|
||||
if cur_mm is None or prev_mm is None:
|
||||
continue
|
||||
|
||||
epoch_str = sat.get("EPOCH", "")
|
||||
try:
|
||||
epoch_dt = datetime.strptime(epoch_str[:19], "%Y-%m-%dT%H:%M:%S")
|
||||
epoch_ts = epoch_dt.timestamp()
|
||||
except (ValueError, TypeError):
|
||||
continue
|
||||
prev_epoch_ts = prev.get("epoch_ts", 0)
|
||||
dt_days = (epoch_ts - prev_epoch_ts) / 86400.0 if (epoch_ts and prev_epoch_ts) else 0
|
||||
if dt_days < 0.5:
|
||||
continue # Need at least 12h between TLEs for meaningful comparison
|
||||
|
||||
mm_rate = (cur_mm - prev_mm) / dt_days # rev/day per day
|
||||
if abs(mm_rate) > _DECAY_MM_RATE_THRESHOLD:
|
||||
cur_alt_km = (8681663.7 / (cur_mm ** (2.0 / 3.0))) - 6371.0 if cur_mm > 0 else 0
|
||||
alerts.append({
|
||||
"norad_id": norad_id,
|
||||
"name": sat.get("OBJECT_NAME", "UNKNOWN"),
|
||||
"type": "decay_anomaly",
|
||||
"mm_rate": round(mm_rate, 6),
|
||||
"current_mm": round(cur_mm, 4),
|
||||
"approx_alt_km": round(cur_alt_km, 1),
|
||||
"epoch": sat.get("EPOCH", ""),
|
||||
"dt_days": round(dt_days, 2),
|
||||
})
|
||||
|
||||
logger.info(f"Satellites: Decay scan — {len(alerts)} anomalies detected")
|
||||
return alerts
|
||||
|
||||
|
||||
def compute_overflights(gp_data, bbox, hours=24, step_minutes=10):
|
||||
"""Count unique satellites whose ground track enters a bounding box.
|
||||
|
||||
Args:
|
||||
gp_data: Full GP catalog (list of dicts with orbital elements).
|
||||
bbox: Dict with keys 's', 'w', 'n', 'e' (degrees).
|
||||
hours: Look-back window (default 24h).
|
||||
step_minutes: Sampling interval (default 10 min).
|
||||
|
||||
Returns dict with total count and per-mission breakdown.
|
||||
Uses SGP4 propagation — CPU cost is ~O(catalog_size × timesteps).
|
||||
Only propagates satellites that could plausibly overfly the bbox latitude range.
|
||||
"""
|
||||
if not gp_data or not bbox:
|
||||
return {"total": 0, "by_mission": {}, "satellites": []}
|
||||
|
||||
south, west = bbox["s"], bbox["w"]
|
||||
north, east = bbox["n"], bbox["e"]
|
||||
now = datetime.utcnow()
|
||||
steps = int(hours * 60 / step_minutes)
|
||||
|
||||
# Pre-filter: only propagate sats whose inclination allows them to reach bbox latitude
|
||||
max_lat = max(abs(south), abs(north))
|
||||
candidates = [s for s in gp_data if s.get("INCLINATION") is not None
|
||||
and s.get("INCLINATION") >= max_lat * 0.8] # 20% margin
|
||||
|
||||
seen_ids = set()
|
||||
results = []
|
||||
by_mission = {}
|
||||
|
||||
for s in candidates:
|
||||
norad_id = s.get("NORAD_CAT_ID")
|
||||
mean_motion = s.get("MEAN_MOTION")
|
||||
ecc = s.get("ECCENTRICITY")
|
||||
incl = s.get("INCLINATION")
|
||||
raan = s.get("RA_OF_ASC_NODE")
|
||||
argp = s.get("ARG_OF_PERICENTER")
|
||||
ma = s.get("MEAN_ANOMALY")
|
||||
bstar = s.get("BSTAR", 0)
|
||||
epoch_str = s.get("EPOCH", "")
|
||||
|
||||
if any(v is None for v in (mean_motion, ecc, incl, raan, argp, ma, epoch_str)):
|
||||
continue
|
||||
|
||||
try:
|
||||
epoch_dt = datetime.strptime(epoch_str[:19], "%Y-%m-%dT%H:%M:%S")
|
||||
epoch_jd, epoch_fr = jday(
|
||||
epoch_dt.year, epoch_dt.month, epoch_dt.day,
|
||||
epoch_dt.hour, epoch_dt.minute, epoch_dt.second,
|
||||
)
|
||||
sat_obj = Satrec()
|
||||
sat_obj.sgp4init(
|
||||
WGS72, "i", norad_id or 0,
|
||||
(epoch_jd + epoch_fr) - 2433281.5,
|
||||
bstar, 0.0, 0.0, ecc,
|
||||
math.radians(argp), math.radians(incl), math.radians(ma),
|
||||
mean_motion * 2 * math.pi / 1440.0, math.radians(raan),
|
||||
)
|
||||
except (ValueError, TypeError):
|
||||
continue
|
||||
|
||||
for step in range(steps):
|
||||
t = now - timedelta(minutes=step * step_minutes)
|
||||
jd_t, fr_t = jday(t.year, t.month, t.day, t.hour, t.minute, t.second)
|
||||
e, r, _ = sat_obj.sgp4(jd_t, fr_t)
|
||||
if e != 0:
|
||||
continue
|
||||
x, y, z = r
|
||||
gmst = _gmst(jd_t + fr_t)
|
||||
lng_rad = math.atan2(y, x) - gmst
|
||||
lat_deg = math.degrees(math.atan2(z, math.sqrt(x * x + y * y)))
|
||||
lng_deg = math.degrees(lng_rad) % 360
|
||||
if lng_deg > 180:
|
||||
lng_deg -= 360
|
||||
|
||||
# Check bounding box (handles antimeridian crossing)
|
||||
lat_in = south <= lat_deg <= north
|
||||
if west <= east:
|
||||
lng_in = west <= lng_deg <= east
|
||||
else:
|
||||
lng_in = lng_deg >= west or lng_deg <= east
|
||||
|
||||
if lat_in and lng_in and norad_id not in seen_ids:
|
||||
seen_ids.add(norad_id)
|
||||
name = s.get("OBJECT_NAME", "UNKNOWN")
|
||||
# Classify for mission breakdown
|
||||
mission = "unknown"
|
||||
for key, meta in _SAT_INTEL_DB:
|
||||
if key.upper() in name.upper():
|
||||
mission = meta.get("mission", "unknown")
|
||||
break
|
||||
by_mission[mission] = by_mission.get(mission, 0) + 1
|
||||
results.append({"norad_id": norad_id, "name": name, "mission": mission})
|
||||
break # Already counted this sat, move to next
|
||||
|
||||
return {"total": len(results), "by_mission": by_mission, "satellites": results}
|
||||
|
||||
|
||||
def _parse_tle_to_gp(name, norad_id, line1, line2):
|
||||
"""Convert TLE two-line element to CelesTrak GP-style dict."""
|
||||
@@ -539,9 +613,18 @@ def fetch_satellites():
|
||||
if not is_any_active("satellites"):
|
||||
return
|
||||
sats = []
|
||||
maneuver_alerts = []
|
||||
decay_alerts = []
|
||||
starlink_summary = {}
|
||||
data = None
|
||||
classified = None
|
||||
try:
|
||||
now_ts = time.time()
|
||||
|
||||
# On first call, load TLE history from disk for maneuver detection
|
||||
if not _tle_history:
|
||||
_load_tle_history()
|
||||
|
||||
# On first call, try disk cache before hitting CelesTrak
|
||||
if _sat_gp_cache["data"] is None:
|
||||
disk_data = _load_sat_cache()
|
||||
@@ -594,6 +677,9 @@ def fetch_satellites():
|
||||
if lm:
|
||||
_sat_gp_cache["last_modified"] = lm
|
||||
_save_sat_cache(gp_data)
|
||||
# Snapshot current TLEs as history before overwriting
|
||||
# (the old _tle_history becomes the comparison baseline)
|
||||
_snapshot_current_tles(gp_data)
|
||||
logger.info(
|
||||
f"Satellites: Downloaded {len(gp_data)} GP records from CelesTrak"
|
||||
)
|
||||
@@ -651,11 +737,14 @@ def fetch_satellites():
|
||||
and _sat_classified_cache["data"]
|
||||
):
|
||||
classified = _sat_classified_cache["data"]
|
||||
starlink_summary = _sat_classified_cache.get("starlink_summary", {})
|
||||
logger.info(
|
||||
f"Satellites: Using cached classification ({len(classified)} sats, TLEs unchanged)"
|
||||
)
|
||||
else:
|
||||
classified = []
|
||||
starlink_count = 0
|
||||
starlink_shells = {} # inclination shell → count
|
||||
for sat in data:
|
||||
name = sat.get("OBJECT_NAME", "UNKNOWN").upper()
|
||||
intel = None
|
||||
@@ -663,8 +752,24 @@ def fetch_satellites():
|
||||
if key.upper() in name:
|
||||
intel = dict(meta)
|
||||
break
|
||||
if not intel:
|
||||
# Secondary classification via SATCAT owner code
|
||||
owner = sat.get("OWNER", sat.get("OBJECT_OWNER", ""))
|
||||
if owner in _OWNER_CODE_MAP:
|
||||
intel = {"country": _OWNER_CODE_MAP[owner], "mission": "general", "sat_type": "Unclassified"}
|
||||
if not intel:
|
||||
continue
|
||||
|
||||
# Starlink: count and summarize but don't propagate individually
|
||||
# (6000+ sats would be too expensive to position every 60s)
|
||||
if intel.get("mission") == "starlink":
|
||||
starlink_count += 1
|
||||
inc = sat.get("INCLINATION")
|
||||
if inc is not None:
|
||||
shell_key = f"{round(inc, 0):.0f}°"
|
||||
starlink_shells[shell_key] = starlink_shells.get(shell_key, 0) + 1
|
||||
continue # Skip individual propagation
|
||||
|
||||
entry = {
|
||||
"id": sat.get("NORAD_CAT_ID"),
|
||||
"name": sat.get("OBJECT_NAME", "UNKNOWN"),
|
||||
@@ -679,14 +784,35 @@ def fetch_satellites():
|
||||
}
|
||||
entry.update(intel)
|
||||
classified.append(entry)
|
||||
|
||||
starlink_summary = {
|
||||
"total": starlink_count,
|
||||
"shells": starlink_shells,
|
||||
}
|
||||
_sat_classified_cache["data"] = classified
|
||||
_sat_classified_cache["starlink_summary"] = starlink_summary
|
||||
_sat_classified_cache["gp_fetch_ts"] = _sat_gp_cache["last_fetch"]
|
||||
logger.info(
|
||||
f"Satellites: {len(classified)} intel-classified out of {len(data)} total in catalog"
|
||||
f"Satellites: {len(classified)} intel-classified, "
|
||||
f"{starlink_count} Starlink (summarized), "
|
||||
f"out of {len(data)} total in catalog"
|
||||
)
|
||||
|
||||
all_sats = classified
|
||||
|
||||
# ── Run analysis detectors against the full GP catalog ──────────────
|
||||
# These use cached TLEs only — no extra network requests.
|
||||
maneuver_alerts = []
|
||||
decay_alerts = []
|
||||
try:
|
||||
maneuver_alerts = detect_maneuvers(data)
|
||||
except (ValueError, TypeError, KeyError, ZeroDivisionError) as e:
|
||||
logger.error(f"Satellites: Maneuver detection error: {e}")
|
||||
try:
|
||||
decay_alerts = detect_decay_anomalies(data)
|
||||
except (ValueError, TypeError, KeyError, ZeroDivisionError) as e:
|
||||
logger.error(f"Satellites: Decay detection error: {e}")
|
||||
|
||||
now = datetime.utcnow()
|
||||
jd, fr = jday(
|
||||
now.year, now.month, now.day, now.hour, now.minute, now.second + now.microsecond / 1e6
|
||||
@@ -800,6 +926,13 @@ def fetch_satellites():
|
||||
with _data_lock:
|
||||
latest_data["satellites"] = sats
|
||||
latest_data["satellite_source"] = _sat_gp_cache.get("source", "none")
|
||||
latest_data["satellite_analysis"] = {
|
||||
"maneuvers": maneuver_alerts,
|
||||
"decay_anomalies": decay_alerts,
|
||||
"starlink": starlink_summary,
|
||||
"catalog_size": len(data) if data else 0,
|
||||
"classified_count": len(classified) if classified else 0,
|
||||
}
|
||||
_mark_fresh("satellites")
|
||||
else:
|
||||
with _data_lock:
|
||||
|
||||
@@ -0,0 +1,216 @@
|
||||
"""WastewaterSCAN fetcher — pathogen surveillance via wastewater monitoring.
|
||||
|
||||
Data source: Stanford/Emory WastewaterSCAN project
|
||||
- Plant locations: https://storage.googleapis.com/wastewater-dev-data/json/plants.json
|
||||
- Time series: https://storage.googleapis.com/wastewater-dev-data/json/{uuid}.json
|
||||
|
||||
All data is public, no authentication required. ~192 treatment plants across
|
||||
the US with daily sampling for COVID (N Gene), Influenza A/B, RSV, Norovirus,
|
||||
MPXV, Measles, H5N1, and others.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import time
|
||||
import concurrent.futures
|
||||
from datetime import datetime, timedelta
|
||||
from services.network_utils import fetch_with_curl
|
||||
from services.fetchers._store import latest_data, _data_lock, _mark_fresh
|
||||
from services.fetchers.retry import with_retry
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_GCS_BASE = "https://storage.googleapis.com/wastewater-dev-data/json"
|
||||
|
||||
# Cache the plants list for 24 hours (it rarely changes)
|
||||
_plants_cache: list[dict] = []
|
||||
_plants_cache_ts: float = 0
|
||||
_PLANTS_CACHE_TTL = 86400 # 24 hours
|
||||
|
||||
# Key pathogen targets to extract — maps internal target name to display label
|
||||
_TARGET_DISPLAY: dict[str, str] = {
|
||||
"N Gene": "COVID-19",
|
||||
"Influenza A F1R1": "Influenza A",
|
||||
"Influenza B": "Influenza B",
|
||||
"RSV": "RSV",
|
||||
"Noro_G2": "Norovirus",
|
||||
"MPXV_G2R_WA": "Mpox",
|
||||
"InfA_H5": "H5N1 (Bird Flu)",
|
||||
"HMPV_4": "HMPV",
|
||||
"Rota": "Rotavirus",
|
||||
"HAV": "Hepatitis A",
|
||||
"C_auris": "Candida auris",
|
||||
"EVD68": "Enterovirus D68",
|
||||
}
|
||||
|
||||
# Activity categories that represent elevated/alert levels
|
||||
_ALERT_CATEGORIES = {"high", "very high", "above normal"}
|
||||
|
||||
|
||||
def _fetch_plants() -> list[dict]:
|
||||
"""Fetch the full plants list from GCS, with 24h caching."""
|
||||
global _plants_cache, _plants_cache_ts
|
||||
|
||||
if _plants_cache and (time.time() - _plants_cache_ts) < _PLANTS_CACHE_TTL:
|
||||
return _plants_cache
|
||||
|
||||
url = f"{_GCS_BASE}/plants.json"
|
||||
resp = fetch_with_curl(url, timeout=30)
|
||||
if resp.status_code != 200:
|
||||
logger.warning(f"WastewaterSCAN plants fetch failed: HTTP {resp.status_code}")
|
||||
return _plants_cache # return stale cache on failure
|
||||
|
||||
data = resp.json()
|
||||
plants = data.get("plants", [])
|
||||
_plants_cache = plants
|
||||
_plants_cache_ts = time.time()
|
||||
logger.info(f"WastewaterSCAN: cached {len(plants)} plant locations")
|
||||
return plants
|
||||
|
||||
|
||||
def _fetch_plant_latest(plant_id: str) -> dict | None:
|
||||
"""Fetch the most recent sample for a single plant.
|
||||
|
||||
Returns a dict with pathogen levels or None on failure.
|
||||
"""
|
||||
url = f"{_GCS_BASE}/{plant_id}.json"
|
||||
try:
|
||||
resp = fetch_with_curl(url, timeout=12)
|
||||
if resp.status_code != 200:
|
||||
return None
|
||||
data = resp.json()
|
||||
samples = data.get("samples", [])
|
||||
if not samples:
|
||||
return None
|
||||
|
||||
# Find the most recent sample (last element, sorted by date)
|
||||
latest = samples[-1]
|
||||
collection_date = latest.get("collection_date", "")
|
||||
|
||||
# Skip samples older than 30 days
|
||||
try:
|
||||
sample_dt = datetime.strptime(collection_date, "%Y-%m-%d")
|
||||
if sample_dt < datetime.utcnow() - timedelta(days=30):
|
||||
return None
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
|
||||
# Extract key pathogen levels
|
||||
targets = latest.get("targets", {})
|
||||
pathogens: list[dict] = []
|
||||
alert_count = 0
|
||||
|
||||
for target_key, display_name in _TARGET_DISPLAY.items():
|
||||
target_data = targets.get(target_key)
|
||||
if not target_data:
|
||||
continue
|
||||
|
||||
concentration = target_data.get("gc_g_dry_weight", 0) or 0
|
||||
activity = target_data.get("activity_category", "not calculated")
|
||||
normalized = target_data.get("gc_g_dry_weight_pmmov", 0) or 0
|
||||
|
||||
if concentration <= 0 and normalized <= 0:
|
||||
continue # no detection
|
||||
|
||||
is_alert = activity.lower() in _ALERT_CATEGORIES
|
||||
if is_alert:
|
||||
alert_count += 1
|
||||
|
||||
pathogens.append({
|
||||
"name": display_name,
|
||||
"target_key": target_key,
|
||||
"concentration": round(concentration, 1),
|
||||
"normalized": round(normalized, 6),
|
||||
"activity": activity,
|
||||
"alert": is_alert,
|
||||
})
|
||||
|
||||
if not pathogens:
|
||||
return None
|
||||
|
||||
return {
|
||||
"collection_date": collection_date,
|
||||
"pathogens": pathogens,
|
||||
"alert_count": alert_count,
|
||||
}
|
||||
except Exception as e:
|
||||
logger.debug(f"WastewaterSCAN: failed to fetch plant {plant_id}: {e}")
|
||||
return None
|
||||
|
||||
|
||||
@with_retry(max_retries=1, base_delay=5)
|
||||
def fetch_wastewater():
|
||||
"""Fetch WastewaterSCAN plant locations and latest pathogen levels.
|
||||
|
||||
1. Fetches the plant list (cached 24h) for locations.
|
||||
2. Concurrently fetches time series for all plants, extracting only
|
||||
the most recent sample's pathogen data.
|
||||
3. Merges into a flat list suitable for map rendering.
|
||||
"""
|
||||
from services.fetchers._store import is_any_active
|
||||
|
||||
if not is_any_active("wastewater"):
|
||||
return
|
||||
|
||||
plants = _fetch_plants()
|
||||
if not plants:
|
||||
logger.warning("WastewaterSCAN: no plant data available")
|
||||
return
|
||||
|
||||
# Build base records from plant metadata
|
||||
plant_map: dict[str, dict] = {}
|
||||
for p in plants:
|
||||
point = p.get("point") or {}
|
||||
coords = point.get("coordinates") or []
|
||||
if len(coords) < 2:
|
||||
continue
|
||||
|
||||
pid = p.get("id") or p.get("uuid", "")
|
||||
if not pid:
|
||||
continue
|
||||
|
||||
plant_map[pid] = {
|
||||
"id": pid,
|
||||
"name": p.get("name", ""),
|
||||
"site_name": p.get("site_name", ""),
|
||||
"city": p.get("city", ""),
|
||||
"state": p.get("state", ""),
|
||||
"country": p.get("country", "US"),
|
||||
"population": p.get("sewershed_pop"),
|
||||
"lat": coords[1],
|
||||
"lng": coords[0],
|
||||
"pathogens": [],
|
||||
"alert_count": 0,
|
||||
"collection_date": "",
|
||||
"source": "WastewaterSCAN",
|
||||
}
|
||||
|
||||
# Fetch latest samples concurrently (up to 12 threads)
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=12) as pool:
|
||||
futures = {
|
||||
pool.submit(_fetch_plant_latest, pid): pid
|
||||
for pid in plant_map
|
||||
}
|
||||
for fut in concurrent.futures.as_completed(futures, timeout=120):
|
||||
pid = futures[fut]
|
||||
try:
|
||||
result = fut.result()
|
||||
if result:
|
||||
plant_map[pid]["pathogens"] = result["pathogens"]
|
||||
plant_map[pid]["alert_count"] = result["alert_count"]
|
||||
plant_map[pid]["collection_date"] = result["collection_date"]
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
nodes = list(plant_map.values())
|
||||
active_nodes = [n for n in nodes if n["pathogens"]]
|
||||
|
||||
logger.info(
|
||||
f"WastewaterSCAN: {len(nodes)} plants, "
|
||||
f"{len(active_nodes)} with recent pathogen data, "
|
||||
f"{sum(n['alert_count'] for n in nodes)} total alerts"
|
||||
)
|
||||
|
||||
with _data_lock:
|
||||
latest_data["wastewater"] = nodes
|
||||
if nodes:
|
||||
_mark_fresh("wastewater")
|
||||
+57
-31
@@ -4,6 +4,7 @@ from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
import threading
|
||||
from typing import Any, Dict, List
|
||||
@@ -81,43 +82,63 @@ def _load_local_search_cache() -> List[Dict[str, Any]]:
|
||||
|
||||
|
||||
def _search_local_fallback(query: str, limit: int) -> List[Dict[str, Any]]:
|
||||
"""Strict local lookup used only when ``local_only=True`` is set.
|
||||
|
||||
Historical behaviour (substring-token-in-haystack matching) produced
|
||||
catastrophically wrong results: any query containing a common word
|
||||
would match the first airport with that word anywhere in its name,
|
||||
which silently poisoned every cache downstream. Fixed to require
|
||||
whole-word matches against airport name/IATA/id and cached-geocode
|
||||
labels.
|
||||
"""
|
||||
q = query.strip().lower()
|
||||
if not q:
|
||||
return []
|
||||
q_tokens = set(re.findall(r"[a-z0-9]+", q))
|
||||
if not q_tokens:
|
||||
return []
|
||||
|
||||
matches: List[Dict[str, Any]] = []
|
||||
seen: set[tuple[float, float, str]] = set()
|
||||
|
||||
def _whole_word_tokens(text: str) -> set[str]:
|
||||
return set(re.findall(r"[a-z0-9]+", (text or "").lower()))
|
||||
|
||||
for item in cached_airports:
|
||||
haystacks = [
|
||||
str(item.get("name", "")).lower(),
|
||||
str(item.get("iata", "")).lower(),
|
||||
str(item.get("id", "")).lower(),
|
||||
]
|
||||
if any(q in h for h in haystacks):
|
||||
label = f'{item.get("name", "Airport")} ({item.get("iata", "")})'
|
||||
key = (float(item["lat"]), float(item["lng"]), label)
|
||||
if key not in seen:
|
||||
seen.add(key)
|
||||
matches.append(
|
||||
{
|
||||
"label": label,
|
||||
"lat": float(item["lat"]),
|
||||
"lng": float(item["lng"]),
|
||||
}
|
||||
)
|
||||
if len(matches) >= limit:
|
||||
return matches
|
||||
name_tokens = _whole_word_tokens(item.get("name", ""))
|
||||
iata = str(item.get("iata", "")).lower().strip()
|
||||
icao = str(item.get("id", "")).lower().strip()
|
||||
# IATA/ICAO must match exactly; name must share ALL query tokens
|
||||
# with the airport name (not "any token in haystack").
|
||||
exact_code = bool(iata and iata in q_tokens) or bool(icao and icao in q_tokens)
|
||||
name_match = bool(q_tokens) and q_tokens.issubset(name_tokens)
|
||||
if not (exact_code or name_match):
|
||||
continue
|
||||
label = f'{item.get("name", "Airport")} ({item.get("iata", "")})'
|
||||
key = (float(item["lat"]), float(item["lng"]), label)
|
||||
if key not in seen:
|
||||
seen.add(key)
|
||||
matches.append(
|
||||
{
|
||||
"label": label,
|
||||
"lat": float(item["lat"]),
|
||||
"lng": float(item["lng"]),
|
||||
}
|
||||
)
|
||||
if len(matches) >= limit:
|
||||
return matches
|
||||
|
||||
for item in _load_local_search_cache():
|
||||
label = str(item.get("label", ""))
|
||||
if q in label.lower():
|
||||
key = (float(item["lat"]), float(item["lng"]), label)
|
||||
if key not in seen:
|
||||
seen.add(key)
|
||||
matches.append(item)
|
||||
if len(matches) >= limit:
|
||||
break
|
||||
label_tokens = _whole_word_tokens(label)
|
||||
if not q_tokens.issubset(label_tokens):
|
||||
continue
|
||||
key = (float(item["lat"]), float(item["lng"]), label)
|
||||
if key not in seen:
|
||||
seen.add(key)
|
||||
matches.append(item)
|
||||
if len(matches) >= limit:
|
||||
break
|
||||
|
||||
return matches
|
||||
|
||||
@@ -163,9 +184,14 @@ def search_geocode(query: str, limit: int = 5, local_only: bool = False) -> List
|
||||
timeout=6,
|
||||
)
|
||||
except Exception:
|
||||
results = _search_local_fallback(q, limit)
|
||||
_set_cache(key, results)
|
||||
return results
|
||||
# Intentionally no silent airport-name fallback. Callers that
|
||||
# want offline results should pass ``local_only=True``; anything
|
||||
# else means we return an empty list so the caller can decide
|
||||
# whether to retry or propagate the failure. The old behaviour
|
||||
# of falling through to _search_local_fallback silently poisoned
|
||||
# every downstream cache with airport coordinates for any query.
|
||||
_set_cache(key, [])
|
||||
return []
|
||||
|
||||
results: List[Dict[str, Any]] = []
|
||||
if res and res.status_code == 200:
|
||||
@@ -184,9 +210,9 @@ def search_geocode(query: str, limit: int = 5, local_only: bool = False) -> List
|
||||
continue
|
||||
except Exception:
|
||||
results = []
|
||||
if not results:
|
||||
results = _search_local_fallback(q, limit)
|
||||
|
||||
# No silent airport-name fallback on empty results either — same
|
||||
# reason as above. Empty means empty.
|
||||
_set_cache(key, results)
|
||||
return results
|
||||
|
||||
|
||||
@@ -0,0 +1,246 @@
|
||||
"""Country-bbox post-filter for geocoded results.
|
||||
|
||||
Any fetcher that turns a country-tagged row into a lat/lng should call
|
||||
``coord_in_country()`` after the geocoder returns. If the coordinate
|
||||
falls outside the country's bounding box, the result is almost
|
||||
certainly a namesake collision (e.g. "Milan, WI" landing in Milan,
|
||||
Italy) and the caller should reject or retry with a stronger query.
|
||||
|
||||
This is a cheap sanity gate that catches geocoder mistakes no human
|
||||
operator will ever spot by eye across thousands of points.
|
||||
|
||||
Bounding boxes are deliberately generous — they include territories,
|
||||
overseas islands, and a small buffer — so that legitimate coastal or
|
||||
border cities are never false-rejected. Goal is to catch "wrong
|
||||
continent", not "off by a few km".
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Optional, Tuple
|
||||
|
||||
# (min_lat, min_lng, max_lat, max_lng)
|
||||
_COUNTRY_BBOX: dict[str, Tuple[float, float, float, float]] = {
|
||||
# North America
|
||||
"USA": (18.0, -180.0, 72.0, -65.0), # inc. Alaska + Hawaii
|
||||
"Canada": (41.0, -142.0, 84.0, -52.0),
|
||||
"Mexico": (14.0, -120.0, 33.0, -86.0),
|
||||
# South & Central America
|
||||
"Brazil": (-35.0, -74.5, 6.0, -34.0),
|
||||
"Argentina": (-56.0, -74.0, -21.5, -53.0),
|
||||
"Chile": (-56.0, -76.0, -17.0, -66.0),
|
||||
"Colombia": (-5.0, -82.0, 13.5, -66.5),
|
||||
"Peru": (-19.0, -82.0, 0.5, -68.5),
|
||||
"Venezuela": (0.5, -73.5, 12.5, -59.5),
|
||||
"Ecuador": (-5.5, -92.5, 2.0, -75.0), # inc. Galápagos
|
||||
"Bolivia": (-23.0, -69.5, -9.5, -57.5),
|
||||
"Uruguay": (-35.0, -58.5, -30.0, -53.0),
|
||||
"Paraguay": (-28.0, -63.0, -19.0, -54.0),
|
||||
"Guatemala": (13.5, -92.5, 18.0, -88.0),
|
||||
"Honduras": (12.5, -89.5, 16.5, -83.0),
|
||||
"Nicaragua": (10.5, -88.0, 15.5, -83.0),
|
||||
"Costa Rica": (8.0, -86.0, 11.5, -82.5),
|
||||
"Panama": (7.0, -83.5, 9.7, -77.0),
|
||||
"El Salvador": (13.0, -90.5, 14.5, -87.5),
|
||||
"Cuba": (19.5, -85.0, 23.5, -74.0),
|
||||
"Dominican Republic": (17.5, -72.5, 20.0, -68.0),
|
||||
"Haiti": (17.5, -74.5, 20.5, -71.5),
|
||||
"Jamaica": (17.5, -78.5, 18.7, -76.0),
|
||||
"Puerto Rico": (17.5, -68.0, 19.0, -65.0),
|
||||
# Europe
|
||||
"United Kingdom": (49.0, -9.0, 61.0, 2.5),
|
||||
"Ireland": (51.0, -11.0, 56.0, -5.0),
|
||||
"France": (41.0, -5.5, 51.5, 9.8),
|
||||
"Germany": (47.0, 5.5, 56.0, 15.5),
|
||||
"Spain": (27.0, -18.5, 44.0, 4.5), # inc. Canary Islands
|
||||
"Portugal": (32.0, -32.0, 42.5, -6.0), # inc. Azores + Madeira
|
||||
"Italy": (36.0, 6.5, 47.5, 19.0),
|
||||
"Netherlands": (50.5, 3.0, 53.8, 7.3),
|
||||
"Belgium": (49.4, 2.5, 51.6, 6.5),
|
||||
"Switzerland": (45.7, 5.8, 48.0, 10.6),
|
||||
"Austria": (46.3, 9.5, 49.1, 17.2),
|
||||
"Poland": (49.0, 14.0, 55.0, 24.2),
|
||||
"Czech Republic": (48.5, 12.0, 51.2, 18.9),
|
||||
"Slovakia": (47.7, 16.8, 49.7, 22.6),
|
||||
"Hungary": (45.7, 16.1, 48.6, 22.9),
|
||||
"Romania": (43.6, 20.2, 48.3, 29.7),
|
||||
"Bulgaria": (41.2, 22.3, 44.3, 28.7),
|
||||
"Greece": (34.7, 19.3, 41.8, 29.7),
|
||||
"Turkey": (35.8, 25.6, 42.2, 44.8),
|
||||
"Ukraine": (44.3, 22.1, 52.4, 40.3),
|
||||
"Belarus": (51.2, 23.1, 56.2, 32.8),
|
||||
"Russia": (41.0, 19.0, 82.0, 180.0),
|
||||
"Sweden": (55.0, 10.5, 69.1, 24.2),
|
||||
"Norway": (57.9, 4.5, 71.2, 31.1),
|
||||
"Finland": (59.7, 20.5, 70.1, 31.6),
|
||||
"Denmark": (54.5, 8.0, 57.9, 15.3),
|
||||
"Iceland": (63.3, -24.6, 66.6, -13.4),
|
||||
"Serbia": (42.2, 18.8, 46.2, 23.0),
|
||||
"Croatia": (42.3, 13.4, 46.6, 19.5),
|
||||
"Slovenia": (45.4, 13.3, 46.9, 16.7),
|
||||
"Bosnia and Herzegovina": (42.5, 15.7, 45.3, 19.7),
|
||||
"North Macedonia": (40.8, 20.4, 42.4, 23.1),
|
||||
"Albania": (39.6, 19.2, 42.7, 21.1),
|
||||
"Kosovo": (41.8, 20.0, 43.3, 21.8),
|
||||
"Moldova": (45.4, 26.6, 48.5, 30.2),
|
||||
"Lithuania": (53.8, 20.9, 56.5, 26.9),
|
||||
"Latvia": (55.6, 20.9, 58.1, 28.3),
|
||||
"Estonia": (57.5, 21.7, 59.8, 28.3),
|
||||
"Luxembourg": (49.4, 5.7, 50.2, 6.6),
|
||||
"Malta": (35.7, 14.1, 36.1, 14.7),
|
||||
"Cyprus": (34.5, 32.2, 35.8, 34.7),
|
||||
# Middle East
|
||||
"Israel": (29.4, 34.2, 33.4, 35.9),
|
||||
"Lebanon": (33.0, 35.1, 34.7, 36.7),
|
||||
"Jordan": (29.1, 34.9, 33.4, 39.4),
|
||||
"Syria": (32.3, 35.7, 37.4, 42.4),
|
||||
"Iraq": (29.0, 38.8, 37.4, 48.8),
|
||||
"Iran": (25.0, 44.0, 40.0, 63.4),
|
||||
"Saudi Arabia": (16.3, 34.5, 32.2, 55.7),
|
||||
"Yemen": (12.0, 42.5, 19.0, 54.5),
|
||||
"United Arab Emirates": (22.6, 51.5, 26.1, 56.4),
|
||||
"Oman": (16.6, 52.0, 26.4, 59.9),
|
||||
"Qatar": (24.4, 50.7, 26.2, 51.7),
|
||||
"Bahrain": (25.8, 50.4, 26.4, 50.8),
|
||||
"Kuwait": (28.5, 46.5, 30.1, 48.4),
|
||||
"Afghanistan": (29.4, 60.5, 38.5, 74.9),
|
||||
# Asia
|
||||
"India": (6.0, 68.0, 36.0, 98.0),
|
||||
"Pakistan": (23.7, 60.9, 37.1, 77.8),
|
||||
"Bangladesh": (20.6, 88.0, 26.6, 92.7),
|
||||
"Sri Lanka": (5.9, 79.5, 9.9, 82.0),
|
||||
"Nepal": (26.3, 80.0, 30.5, 88.2),
|
||||
"China": (18.0, 73.0, 54.0, 135.5),
|
||||
"Mongolia": (41.6, 87.7, 52.2, 119.9),
|
||||
"Japan": (24.0, 122.0, 46.0, 146.0),
|
||||
"South Korea": (33.1, 125.1, 38.6, 131.9),
|
||||
"North Korea": (37.7, 124.2, 43.0, 130.7),
|
||||
"Taiwan": (21.8, 119.3, 25.4, 122.1),
|
||||
"Hong Kong": (22.1, 113.8, 22.6, 114.5),
|
||||
"Vietnam": (8.2, 102.1, 23.4, 109.5),
|
||||
"Thailand": (5.6, 97.3, 20.5, 105.7),
|
||||
"Cambodia": (10.4, 102.3, 14.7, 107.7),
|
||||
"Laos": (13.9, 100.0, 22.5, 107.7),
|
||||
"Myanmar": (9.5, 92.1, 28.6, 101.2),
|
||||
"Malaysia": (0.8, 99.5, 7.5, 119.3),
|
||||
"Singapore": (1.1, 103.5, 1.5, 104.1),
|
||||
"Indonesia": (-11.1, 94.8, 6.1, 141.1),
|
||||
"Philippines": (4.5, 116.0, 21.5, 127.0),
|
||||
"Brunei": (4.0, 114.0, 5.1, 115.4),
|
||||
"Kazakhstan": (40.5, 46.4, 55.5, 87.4),
|
||||
"Uzbekistan": (37.1, 55.9, 45.6, 73.2),
|
||||
"Kyrgyzstan": (39.1, 69.2, 43.3, 80.3),
|
||||
"Tajikistan": (36.6, 67.3, 41.1, 75.2),
|
||||
"Turkmenistan": (35.1, 52.4, 42.8, 66.7),
|
||||
"Azerbaijan": (38.3, 44.7, 41.9, 50.6),
|
||||
"Armenia": (38.8, 43.4, 41.3, 46.6),
|
||||
"Georgia": (41.0, 40.0, 43.6, 46.8),
|
||||
# Oceania
|
||||
"Australia": (-44.0, 112.0, -9.0, 155.0),
|
||||
"New Zealand": (-48.0, 165.0, -33.0, 179.5),
|
||||
"Papua New Guinea": (-11.7, 140.8, -1.0, 156.0),
|
||||
"Fiji": (-21.0, 176.8, -12.4, -178.3), # crosses antimeridian; see handling
|
||||
# Africa (selected — most common NUFORC reporters)
|
||||
"South Africa": (-35.0, 16.0, -22.0, 33.0),
|
||||
"Egypt": (21.7, 24.7, 31.7, 36.9),
|
||||
"Morocco": (27.6, -13.2, 35.9, -1.0),
|
||||
"Algeria": (18.9, -8.7, 37.1, 12.0),
|
||||
"Tunisia": (30.2, 7.5, 37.5, 11.6),
|
||||
"Libya": (19.5, 9.3, 33.2, 25.2),
|
||||
"Sudan": (8.6, 21.8, 22.2, 38.6),
|
||||
"Ethiopia": (3.4, 32.9, 14.9, 48.0),
|
||||
"Kenya": (-4.7, 33.9, 5.5, 41.9),
|
||||
"Tanzania": (-11.8, 29.3, -0.9, 40.4),
|
||||
"Uganda": (-1.5, 29.5, 4.2, 35.0),
|
||||
"Nigeria": (4.2, 2.6, 13.9, 14.7),
|
||||
"Ghana": (4.7, -3.3, 11.2, 1.2),
|
||||
"Senegal": (12.3, -17.6, 16.7, -11.3),
|
||||
"Ivory Coast": (4.3, -8.6, 10.7, -2.5),
|
||||
"Cameroon": (1.6, 8.5, 13.1, 16.2),
|
||||
"Angola": (-18.1, 11.7, -4.4, 24.1),
|
||||
"Zimbabwe": (-22.5, 25.2, -15.6, 33.1),
|
||||
"Zambia": (-18.1, 21.9, -8.2, 33.7),
|
||||
"Mozambique": (-26.9, 30.2, -10.5, 40.9),
|
||||
"Madagascar": (-25.7, 43.2, -11.9, 50.5),
|
||||
"Democratic Republic of the Congo": (-13.5, 12.2, 5.4, 31.4),
|
||||
"Rwanda": (-2.9, 28.8, -1.0, 30.9),
|
||||
}
|
||||
|
||||
# Common aliases used in NUFORC / other data sources.
|
||||
_COUNTRY_ALIASES: dict[str, str] = {
|
||||
"US": "USA",
|
||||
"U.S.": "USA",
|
||||
"U.S.A.": "USA",
|
||||
"United States": "USA",
|
||||
"United States of America": "USA",
|
||||
"America": "USA",
|
||||
"UK": "United Kingdom",
|
||||
"U.K.": "United Kingdom",
|
||||
"Britain": "United Kingdom",
|
||||
"Great Britain": "United Kingdom",
|
||||
"England": "United Kingdom",
|
||||
"Scotland": "United Kingdom",
|
||||
"Wales": "United Kingdom",
|
||||
"Northern Ireland": "United Kingdom",
|
||||
"Czechia": "Czech Republic",
|
||||
"Czechoslovakia": "Czech Republic",
|
||||
"South Korea": "South Korea",
|
||||
"Korea": "South Korea",
|
||||
"Republic of Korea": "South Korea",
|
||||
"Democratic People's Republic of Korea": "North Korea",
|
||||
"DPRK": "North Korea",
|
||||
"Russian Federation": "Russia",
|
||||
"Viet Nam": "Vietnam",
|
||||
"Côte d'Ivoire": "Ivory Coast",
|
||||
"Cote d'Ivoire": "Ivory Coast",
|
||||
"DR Congo": "Democratic Republic of the Congo",
|
||||
"DRC": "Democratic Republic of the Congo",
|
||||
"Congo-Kinshasa": "Democratic Republic of the Congo",
|
||||
"Macedonia": "North Macedonia",
|
||||
"Burma": "Myanmar",
|
||||
"Holland": "Netherlands",
|
||||
}
|
||||
|
||||
|
||||
def canonical_country(country: str) -> str:
|
||||
"""Normalise a country string to its registry key."""
|
||||
if not country:
|
||||
return ""
|
||||
c = country.strip()
|
||||
return _COUNTRY_ALIASES.get(c, c)
|
||||
|
||||
|
||||
def coord_in_country(lat: float, lng: float, country: str) -> Optional[bool]:
|
||||
"""Return True if (lat, lng) is inside the country bbox, False if it
|
||||
is outside, or None if the country is unknown (cannot validate — the
|
||||
caller should treat unknown as "pass", not "fail").
|
||||
"""
|
||||
try:
|
||||
lat_f = float(lat)
|
||||
lng_f = float(lng)
|
||||
except (TypeError, ValueError):
|
||||
return None
|
||||
if not (-90.0 <= lat_f <= 90.0 and -180.0 <= lng_f <= 180.0):
|
||||
return False
|
||||
c = canonical_country(country)
|
||||
bbox = _COUNTRY_BBOX.get(c)
|
||||
if bbox is None:
|
||||
return None
|
||||
min_lat, min_lng, max_lat, max_lng = bbox
|
||||
return min_lat <= lat_f <= max_lat and min_lng <= lng_f <= max_lng
|
||||
|
||||
|
||||
def validate_geocode(
|
||||
lat: float,
|
||||
lng: float,
|
||||
country: str,
|
||||
) -> bool:
|
||||
"""Higher-level gate used in fetcher geocoding loops.
|
||||
|
||||
Returns True if the coordinate is acceptable for the given country,
|
||||
False if it's clearly a namesake collision that should be rejected.
|
||||
Unknown countries are treated as "accept" so we don't throw away
|
||||
otherwise-good data for uncovered regions.
|
||||
"""
|
||||
result = coord_in_country(lat, lng, country)
|
||||
return result is not False
|
||||
@@ -201,10 +201,12 @@ def _is_gibberish(text):
|
||||
# Persistent cache for article titles — survives across GDELT cache refreshes
|
||||
# Bounded to 5000 entries with 24hr TTL to prevent unbounded memory growth
|
||||
_article_title_cache = TTLCache(maxsize=5000, ttl=86400)
|
||||
_article_snippet_cache: dict[str, str | None] = {}
|
||||
_article_url_safety_cache = TTLCache(maxsize=5000, ttl=3600)
|
||||
_TITLE_FETCH_MAX_REDIRECTS = 3
|
||||
_TITLE_FETCH_READ_BYTES = 32768
|
||||
_ALLOWED_ARTICLE_PORTS = {80, 443, 8080, 8443}
|
||||
_MAX_SNIPPET_LEN = 200
|
||||
|
||||
|
||||
def _hostname_resolves_public(hostname: str, port: int) -> bool:
|
||||
@@ -269,6 +271,30 @@ def _is_safe_public_article_url(url: str) -> tuple[bool, str]:
|
||||
return result
|
||||
|
||||
|
||||
def _extract_snippet(url: str, chunk: str) -> None:
|
||||
"""Extract og:description or meta description from an already-fetched HTML chunk."""
|
||||
import re
|
||||
import html as html_mod
|
||||
|
||||
if url in _article_snippet_cache:
|
||||
return
|
||||
snippet = None
|
||||
# Try og:description first
|
||||
for pattern in (
|
||||
r'<meta[^>]+property=["\']og:description["\'][^>]+content=["\']([^"\'>]+)["\']',
|
||||
r'<meta[^>]+content=["\']([^"\'>]+)["\'][^>]+property=["\']og:description["\']',
|
||||
r'<meta[^>]+name=["\']description["\'][^>]+content=["\']([^"\'>]+)["\']',
|
||||
r'<meta[^>]+content=["\']([^"\'>]+)["\'][^>]+name=["\']description["\']',
|
||||
):
|
||||
m = re.search(pattern, chunk, re.I)
|
||||
if m:
|
||||
snippet = html_mod.unescape(m.group(1)).strip()
|
||||
break
|
||||
if snippet and len(snippet) > _MAX_SNIPPET_LEN:
|
||||
snippet = snippet[:_MAX_SNIPPET_LEN - 3].rsplit(" ", 1)[0] + "..."
|
||||
_article_snippet_cache[url] = snippet if snippet and len(snippet) > 15 else None
|
||||
|
||||
|
||||
def _fetch_article_title(url):
|
||||
"""Fetch the real headline from an article's HTML <title> or og:title tag.
|
||||
Returns the title string, or None if it can't be fetched.
|
||||
@@ -343,6 +369,8 @@ def _fetch_article_title(url):
|
||||
title = title[:117] + "..."
|
||||
if len(title) > 10:
|
||||
_article_title_cache[url] = title
|
||||
# Also extract og:description / meta description for snippet
|
||||
_extract_snippet(url, chunk)
|
||||
return title
|
||||
|
||||
_article_title_cache[url] = None
|
||||
@@ -405,21 +433,49 @@ def _parse_gdelt_export_zip(zip_bytes, conflict_codes, seen_locs, features, loc_
|
||||
actor1 = row[6].strip() if len(row) > 6 else ""
|
||||
actor2 = row[16].strip() if len(row) > 16 else ""
|
||||
|
||||
# Extract enrichment fields from GDELT CSV
|
||||
event_date = row[1].strip() if len(row) > 1 else ""
|
||||
full_event_code = row[26].strip() if len(row) > 26 else ""
|
||||
quad_class = int(row[29]) if len(row) > 29 and row[29].strip().isdigit() else 0
|
||||
goldstein = float(row[30]) if len(row) > 30 and row[30].strip() else 0.0
|
||||
num_mentions = int(row[31]) if len(row) > 31 and row[31].strip().isdigit() else 0
|
||||
num_sources = int(row[32]) if len(row) > 32 and row[32].strip().isdigit() else 0
|
||||
num_articles = int(row[33]) if len(row) > 33 and row[33].strip().isdigit() else 0
|
||||
avg_tone = float(row[34]) if len(row) > 34 and row[34].strip() else 0.0
|
||||
|
||||
loc_key = f"{round(lat, 1)}_{round(lng, 1)}"
|
||||
if loc_key in seen_locs:
|
||||
# Merge: increment count and add source URL if new (dedup by domain)
|
||||
# Merge: increment count, accumulate intensity, add source URL
|
||||
idx = loc_index[loc_key]
|
||||
feat = features[idx]
|
||||
feat["properties"]["count"] = feat["properties"].get("count", 1) + 1
|
||||
urls = feat["properties"].get("_urls", [])
|
||||
seen_domains = feat["properties"].get("_domains", set())
|
||||
props = feat["properties"]
|
||||
props["count"] = props.get("count", 1) + 1
|
||||
# Track worst Goldstein score (most negative = most intense)
|
||||
if goldstein < props.get("goldstein", 0):
|
||||
props["goldstein"] = round(goldstein, 1)
|
||||
# Accumulate mentions/sources for importance ranking
|
||||
props["num_mentions"] = props.get("num_mentions", 0) + num_mentions
|
||||
props["num_sources"] = props.get("num_sources", 0) + num_sources
|
||||
props["num_articles"] = props.get("num_articles", 0) + num_articles
|
||||
# Track latest date
|
||||
if event_date and event_date > props.get("event_date", ""):
|
||||
props["event_date"] = event_date
|
||||
# Collect actors
|
||||
actors = props.get("_actors_set", set())
|
||||
if actor1:
|
||||
actors.add(actor1)
|
||||
if actor2:
|
||||
actors.add(actor2)
|
||||
props["_actors_set"] = actors
|
||||
urls = props.get("_urls", [])
|
||||
seen_domains = props.get("_domains", set())
|
||||
if source_url:
|
||||
domain = _extract_domain(source_url)
|
||||
if domain not in seen_domains and len(urls) < 10:
|
||||
urls.append(source_url)
|
||||
seen_domains.add(domain)
|
||||
feat["properties"]["_urls"] = urls
|
||||
feat["properties"]["_domains"] = seen_domains
|
||||
props["_urls"] = urls
|
||||
props["_domains"] = seen_domains
|
||||
continue
|
||||
seen_locs.add(loc_key)
|
||||
|
||||
@@ -429,6 +485,11 @@ def _parse_gdelt_export_zip(zip_bytes, conflict_codes, seen_locs, features, loc_
|
||||
or "Unknown Incident"
|
||||
)
|
||||
domain = _extract_domain(source_url) if source_url else ""
|
||||
actors_set = set()
|
||||
if actor1:
|
||||
actors_set.add(actor1)
|
||||
if actor2:
|
||||
actors_set.add(actor2)
|
||||
loc_index[loc_key] = len(features)
|
||||
features.append(
|
||||
{
|
||||
@@ -436,6 +497,17 @@ def _parse_gdelt_export_zip(zip_bytes, conflict_codes, seen_locs, features, loc_
|
||||
"properties": {
|
||||
"name": name,
|
||||
"count": 1,
|
||||
"event_date": event_date,
|
||||
"event_code": full_event_code,
|
||||
"quad_class": quad_class,
|
||||
"goldstein": round(goldstein, 1),
|
||||
"num_mentions": num_mentions,
|
||||
"num_sources": num_sources,
|
||||
"num_articles": num_articles,
|
||||
"avg_tone": round(avg_tone, 1),
|
||||
"actor1": actor1,
|
||||
"actor2": actor2,
|
||||
"_actors_set": actors_set,
|
||||
"_urls": [source_url] if source_url else [],
|
||||
"_domains": {domain} if domain else set(),
|
||||
},
|
||||
@@ -468,12 +540,19 @@ def _build_feature_html(features, fetched_titles=None):
|
||||
for f in features:
|
||||
urls = f["properties"].pop("_urls", [])
|
||||
f["properties"].pop("_domains", None)
|
||||
# Convert actors set to sorted list for JSON serialization
|
||||
actors_set = f["properties"].pop("_actors_set", set())
|
||||
if actors_set:
|
||||
f["properties"]["actors"] = sorted(actors_set)[:6]
|
||||
headlines = []
|
||||
snippets = []
|
||||
for u in urls:
|
||||
real_title = fetched_titles.get(u) if fetched_titles else None
|
||||
headlines.append(real_title if real_title else _url_to_headline(u))
|
||||
snippets.append(_article_snippet_cache.get(u) or "")
|
||||
f["properties"]["_urls_list"] = urls
|
||||
f["properties"]["_headlines_list"] = headlines
|
||||
f["properties"]["_snippets_list"] = snippets
|
||||
if urls:
|
||||
links = []
|
||||
for u, h in zip(urls, headlines):
|
||||
@@ -498,16 +577,19 @@ def _enrich_gdelt_titles_background(features, all_article_urls):
|
||||
fetched_count = sum(1 for v in fetched_titles.values() if v)
|
||||
logger.info(f"[BG] Resolved {fetched_count}/{len(all_article_urls)} article titles")
|
||||
|
||||
# Update features in-place with real titles
|
||||
# Update features in-place with real titles and snippets
|
||||
for f in features:
|
||||
urls = f["properties"].get("_urls_list", [])
|
||||
if not urls:
|
||||
continue
|
||||
headlines = []
|
||||
snippets = []
|
||||
for u in urls:
|
||||
real_title = fetched_titles.get(u)
|
||||
headlines.append(real_title if real_title else _url_to_headline(u))
|
||||
snippets.append(_article_snippet_cache.get(u) or "")
|
||||
f["properties"]["_headlines_list"] = headlines
|
||||
f["properties"]["_snippets_list"] = snippets
|
||||
links = []
|
||||
for u, h in zip(urls, headlines):
|
||||
safe_url = u if u.startswith(("http://", "https://")) else "about:blank"
|
||||
@@ -564,8 +646,8 @@ def fetch_global_military_incidents():
|
||||
|
||||
latest_ts = datetime.strptime(ts_match.group(1), "%Y%m%d%H%M%S")
|
||||
|
||||
# Generate URLs for the last 8 hours (32 files at 15-min intervals)
|
||||
NUM_FILES = 32
|
||||
# Generate URLs for the last 12 hours (48 files at 15-min intervals)
|
||||
NUM_FILES = 48
|
||||
urls = []
|
||||
for i in range(NUM_FILES):
|
||||
ts = latest_ts - timedelta(minutes=15 * i)
|
||||
@@ -583,7 +665,7 @@ def fetch_global_military_incidents():
|
||||
logger.info(f"Downloaded {successful}/{len(urls)} GDELT exports")
|
||||
|
||||
# Parse all downloaded files
|
||||
CONFLICT_CODES = {"14", "17", "18", "19", "20"}
|
||||
CONFLICT_CODES = {"13", "14", "15", "16", "17", "18", "19", "20"}
|
||||
features = []
|
||||
seen_locs = set()
|
||||
loc_index = {} # loc_key -> index in features
|
||||
|
||||
@@ -0,0 +1,129 @@
|
||||
"""Infonet economy & governance layer.
|
||||
|
||||
Layered ON TOP OF the existing mesh primitives in ``services/mesh/``.
|
||||
The chain-write cutover (2026-04-28) registers Infonet event types
|
||||
with ``mesh_schema`` and ``mesh_hashchain`` so production writes flow
|
||||
through the legacy chain. The cutover is performed at import time by
|
||||
``services.infonet._chain_cutover``.
|
||||
|
||||
The only legacy file modified by the cutover is ``mesh_schema.py``,
|
||||
which gained a generic extension hook (``register_extension_validator``).
|
||||
``mesh_hashchain.py`` is byte-identical to its Sprint 1 baseline; the
|
||||
cutover mutates its module-level ``ACTIVE_APPEND_EVENT_TYPES`` set
|
||||
(which is a mutable ``set``, not a frozenset, by design).
|
||||
|
||||
See ``infonet-economy/IMPLEMENTATION_PLAN.md`` and ``infonet-economy/BUILD_LOG.md``
|
||||
in the repository root for the build order, sprint scope, and integration
|
||||
principles. ``infonet-economy/RULES_SKELETON.md`` is the source of truth
|
||||
for any formula / value / state machine implemented here.
|
||||
"""
|
||||
|
||||
# Trigger the chain-write cutover at import time. Idempotent — see
|
||||
# ``_chain_cutover.perform_cutover``. This must happen before any
|
||||
# adapter or producer uses mesh_schema.validate_event_payload on a
|
||||
# new event type.
|
||||
from services.infonet import _chain_cutover as _chain_cutover_module
|
||||
_chain_cutover_module.perform_cutover()
|
||||
del _chain_cutover_module
|
||||
|
||||
from services.infonet.config import (
|
||||
CONFIG,
|
||||
CONFIG_SCHEMA,
|
||||
CROSS_FIELD_INVARIANTS,
|
||||
IMMUTABLE_PRINCIPLES,
|
||||
InvalidPetition,
|
||||
reset_config_for_tests,
|
||||
validate_config_schema_completeness,
|
||||
validate_cross_field_invariants,
|
||||
validate_petition_value,
|
||||
)
|
||||
from services.infonet.identity_rotation import (
|
||||
RotationBlocker,
|
||||
RotationDecision,
|
||||
rotation_descendants,
|
||||
validate_rotation,
|
||||
)
|
||||
from services.infonet.markets import (
|
||||
EvidenceBundle,
|
||||
MarketStatus,
|
||||
ResolutionResult,
|
||||
build_snapshot,
|
||||
collect_evidence,
|
||||
collect_resolution_stakes,
|
||||
compute_market_status,
|
||||
compute_snapshot_event_hash,
|
||||
evidence_content_hash,
|
||||
excluded_predictor_ids,
|
||||
find_snapshot,
|
||||
is_first_for_side,
|
||||
is_predictor_excluded,
|
||||
resolve_market,
|
||||
should_advance_phase,
|
||||
submission_hash,
|
||||
)
|
||||
from services.infonet.reputation import (
|
||||
OracleRepBreakdown,
|
||||
compute_common_rep,
|
||||
compute_oracle_rep,
|
||||
compute_oracle_rep_active,
|
||||
compute_oracle_rep_lifetime,
|
||||
decay_factor_for_age,
|
||||
last_successful_prediction_ts,
|
||||
)
|
||||
from services.infonet.schema import (
|
||||
INFONET_ECONOMY_EVENT_TYPES,
|
||||
InfonetEventSchema,
|
||||
get_infonet_schema,
|
||||
validate_infonet_event_payload,
|
||||
)
|
||||
from services.infonet.time_validity import (
|
||||
chain_majority_time,
|
||||
event_meets_phase_window,
|
||||
is_event_too_future,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"CONFIG",
|
||||
"CONFIG_SCHEMA",
|
||||
"CROSS_FIELD_INVARIANTS",
|
||||
"IMMUTABLE_PRINCIPLES",
|
||||
"INFONET_ECONOMY_EVENT_TYPES",
|
||||
"EvidenceBundle",
|
||||
"InfonetEventSchema",
|
||||
"InvalidPetition",
|
||||
"MarketStatus",
|
||||
"OracleRepBreakdown",
|
||||
"ResolutionResult",
|
||||
"RotationBlocker",
|
||||
"RotationDecision",
|
||||
"build_snapshot",
|
||||
"chain_majority_time",
|
||||
"collect_evidence",
|
||||
"collect_resolution_stakes",
|
||||
"compute_common_rep",
|
||||
"compute_market_status",
|
||||
"compute_oracle_rep",
|
||||
"compute_oracle_rep_active",
|
||||
"compute_oracle_rep_lifetime",
|
||||
"compute_snapshot_event_hash",
|
||||
"decay_factor_for_age",
|
||||
"event_meets_phase_window",
|
||||
"evidence_content_hash",
|
||||
"excluded_predictor_ids",
|
||||
"find_snapshot",
|
||||
"get_infonet_schema",
|
||||
"is_event_too_future",
|
||||
"is_first_for_side",
|
||||
"is_predictor_excluded",
|
||||
"last_successful_prediction_ts",
|
||||
"reset_config_for_tests",
|
||||
"resolve_market",
|
||||
"rotation_descendants",
|
||||
"should_advance_phase",
|
||||
"submission_hash",
|
||||
"validate_config_schema_completeness",
|
||||
"validate_cross_field_invariants",
|
||||
"validate_infonet_event_payload",
|
||||
"validate_petition_value",
|
||||
"validate_rotation",
|
||||
]
|
||||
@@ -0,0 +1,108 @@
|
||||
"""Chain-write cutover — register Infonet economy event types with the
|
||||
legacy mesh_schema + mesh_hashchain at import time.
|
||||
|
||||
Source of truth: ``infonet-economy/BUILD_LOG.md`` Sprint 4 §6.2 cutover
|
||||
decision (Option C — rename + coexist with new event-type names).
|
||||
|
||||
Before this cutover, Sprints 1-7 produced economy events through
|
||||
``InfonetHashchainAdapter.dry_run_append`` only. None of them landed
|
||||
on the legacy chain because ``mesh_hashchain.Infonet.append`` rejected
|
||||
any event_type not in ``ACTIVE_APPEND_EVENT_TYPES``.
|
||||
|
||||
This module performs the surgical wiring needed for production writes:
|
||||
|
||||
1. Mutates ``mesh_hashchain.ACTIVE_APPEND_EVENT_TYPES`` (a mutable
|
||||
set, not a frozenset) to include every type in
|
||||
``INFONET_ECONOMY_EVENT_TYPES``.
|
||||
2. Registers each economy event type's payload validator with
|
||||
``mesh_schema._EXTENSION_VALIDATORS`` via the Sprint-8-polish
|
||||
``register_extension_validator`` hook.
|
||||
|
||||
The cutover is **idempotent**: importing this module twice leaves the
|
||||
state unchanged.
|
||||
|
||||
The direction is **one-way**: infonet imports mesh_*; mesh never
|
||||
imports infonet. mesh_schema's hook is generic — it doesn't know
|
||||
about infonet specifically.
|
||||
|
||||
What is NOT modified by this cutover:
|
||||
|
||||
- ``mesh_schema.SCHEMA_REGISTRY`` — legacy validators stay as-is.
|
||||
Economy types use the parallel ``_EXTENSION_VALIDATORS`` registry.
|
||||
- ``mesh_schema.ACTIVE_PUBLIC_LEDGER_EVENT_TYPES`` — legacy frozenset
|
||||
unchanged. The runtime decision in
|
||||
``mesh_hashchain.Infonet.append`` consults the mutable
|
||||
``ACTIVE_APPEND_EVENT_TYPES`` set.
|
||||
- ``mesh_hashchain.py`` — byte-identical to its Sprint 1 baseline.
|
||||
- The legacy ``normalize_payload`` and "no ephemeral on this type"
|
||||
checks — extension events skip them. Economy event payloads
|
||||
already have their own normalization (the schema in
|
||||
``services/infonet/schema.py``).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import threading
|
||||
|
||||
from services.infonet.schema import (
|
||||
INFONET_ECONOMY_EVENT_TYPES,
|
||||
validate_infonet_event_payload,
|
||||
)
|
||||
from services.mesh import mesh_hashchain, mesh_schema
|
||||
|
||||
|
||||
_CUTOVER_LOCK = threading.Lock()
|
||||
_CUTOVER_DONE = False
|
||||
|
||||
|
||||
def perform_cutover() -> None:
|
||||
"""Idempotent registration of every Infonet economy event type.
|
||||
|
||||
Safe to call multiple times. After the first call, repeat calls
|
||||
are no-ops (the lock + sentinel guard re-entry).
|
||||
"""
|
||||
global _CUTOVER_DONE
|
||||
with _CUTOVER_LOCK:
|
||||
if _CUTOVER_DONE:
|
||||
return
|
||||
# Extend the active-append set so mesh_hashchain.Infonet.append
|
||||
# accepts these types. The set is mutable by design (legacy
|
||||
# mesh_hashchain.py line 163 uses set(), not frozenset()).
|
||||
mesh_hashchain.ACTIVE_APPEND_EVENT_TYPES.update(INFONET_ECONOMY_EVENT_TYPES)
|
||||
# Register a validator for each. The lambda binds to the loop
|
||||
# variable via default-arg trick to avoid late-binding bugs.
|
||||
for event_type in INFONET_ECONOMY_EVENT_TYPES:
|
||||
mesh_schema.register_extension_validator(
|
||||
event_type,
|
||||
lambda payload, _et=event_type: validate_infonet_event_payload(_et, payload),
|
||||
)
|
||||
_CUTOVER_DONE = True
|
||||
|
||||
|
||||
def cutover_status() -> dict[str, object]:
|
||||
"""Diagnostic — used by tests and health endpoints to confirm the
|
||||
cutover ran and registered every type."""
|
||||
return {
|
||||
"done": _CUTOVER_DONE,
|
||||
"registered_types": sorted(
|
||||
t for t in INFONET_ECONOMY_EVENT_TYPES
|
||||
if mesh_schema.is_extension_event_type(t)
|
||||
),
|
||||
"missing_types": sorted(
|
||||
t for t in INFONET_ECONOMY_EVENT_TYPES
|
||||
if not mesh_schema.is_extension_event_type(t)
|
||||
),
|
||||
"active_append_includes_economy": INFONET_ECONOMY_EVENT_TYPES.issubset(
|
||||
mesh_hashchain.ACTIVE_APPEND_EVENT_TYPES
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
# Run automatically when the module is imported. The infonet package
|
||||
# __init__ imports this module, so any code that uses
|
||||
# ``services.infonet`` at all triggers the cutover. Production callers
|
||||
# don't need to do anything explicit.
|
||||
perform_cutover()
|
||||
|
||||
|
||||
__all__ = ["cutover_status", "perform_cutover"]
|
||||
@@ -0,0 +1,38 @@
|
||||
"""Adapter layer between the Infonet economy package and the legacy
|
||||
``services/mesh/`` primitives.
|
||||
|
||||
Rule: **adapters import from mesh, mesh never imports from infonet.**
|
||||
This keeps the dependency direction one-way and lets us delete the
|
||||
infonet package without touching mesh.
|
||||
|
||||
The legacy mesh files (``mesh_schema.py``, ``mesh_signed_events.py``,
|
||||
``mesh_hashchain.py``, ``mesh_reputation.py``, ``mesh_oracle.py``) stay
|
||||
byte-identical through Sprint 3. From Sprint 4 onward, when actual chain
|
||||
writes for new event types start happening, the hashchain adapter is
|
||||
the single integration point that decides whether to:
|
||||
|
||||
1. Modify ``ACTIVE_APPEND_EVENT_TYPES`` in ``mesh_schema.py`` (one-shot,
|
||||
minimal mesh change), OR
|
||||
2. Maintain a parallel append surface in ``hashchain_adapter`` that
|
||||
shares the on-disk chain file but bypasses the legacy event-type
|
||||
gate.
|
||||
|
||||
The decision is recorded in ``infonet-economy/BUILD_LOG.md`` Sprint 4
|
||||
when made.
|
||||
"""
|
||||
|
||||
from services.infonet.adapters.hashchain_adapter import (
|
||||
InfonetHashchainAdapter,
|
||||
extended_active_event_types,
|
||||
)
|
||||
from services.infonet.adapters.signed_write_adapter import (
|
||||
INFONET_SIGNED_WRITE_KINDS,
|
||||
InfonetSignedWriteKind,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"INFONET_SIGNED_WRITE_KINDS",
|
||||
"InfonetHashchainAdapter",
|
||||
"InfonetSignedWriteKind",
|
||||
"extended_active_event_types",
|
||||
]
|
||||
@@ -0,0 +1,178 @@
|
||||
"""Gate adapter — Sprint 6 implementation.
|
||||
|
||||
Bridges chain history to the gate sacrifice / locking / shutdown
|
||||
lifecycle. Same ``chain_provider`` pattern as the other adapters.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import time
|
||||
from typing import Any, Callable, Iterable
|
||||
|
||||
from services.infonet.gates import (
|
||||
AppealValidation,
|
||||
EntryDecision,
|
||||
GateMeta,
|
||||
LockedGateState,
|
||||
ShutdownState,
|
||||
SuspensionState,
|
||||
can_enter,
|
||||
compute_member_set,
|
||||
compute_shutdown_state,
|
||||
compute_suspension_state,
|
||||
cumulative_member_oracle_rep,
|
||||
get_gate_meta,
|
||||
is_locked,
|
||||
is_member,
|
||||
is_ratified,
|
||||
locked_at,
|
||||
locked_by,
|
||||
paused_execution_remaining_sec,
|
||||
validate_appeal_filing,
|
||||
validate_lock_request,
|
||||
validate_shutdown_filing,
|
||||
validate_suspend_filing,
|
||||
)
|
||||
from services.infonet.gates.locking import LockValidation
|
||||
from services.infonet.gates.shutdown.suspend import FilingValidation
|
||||
from services.infonet.time_validity import chain_majority_time
|
||||
|
||||
|
||||
_ChainProvider = Callable[[], Iterable[dict[str, Any]]]
|
||||
|
||||
|
||||
def _empty_chain() -> list[dict[str, Any]]:
|
||||
return []
|
||||
|
||||
|
||||
class InfonetGateAdapter:
|
||||
"""Project chain state into gate views."""
|
||||
|
||||
def __init__(self, chain_provider: _ChainProvider | None = None) -> None:
|
||||
self._chain_provider: _ChainProvider = chain_provider or _empty_chain
|
||||
|
||||
def _events(self) -> list[dict[str, Any]]:
|
||||
return [e for e in self._chain_provider() if isinstance(e, dict)]
|
||||
|
||||
def _now(self, override: float | None) -> float:
|
||||
if override is not None:
|
||||
return float(override)
|
||||
events = self._events()
|
||||
chain_now = chain_majority_time(events)
|
||||
return chain_now if chain_now > 0 else float(time.time())
|
||||
|
||||
# ── Metadata + membership ────────────────────────────────────────
|
||||
def gate_meta(self, gate_id: str) -> GateMeta | None:
|
||||
return get_gate_meta(gate_id, self._events())
|
||||
|
||||
def member_set(self, gate_id: str) -> set[str]:
|
||||
return compute_member_set(gate_id, self._events())
|
||||
|
||||
def is_member(self, node_id: str, gate_id: str) -> bool:
|
||||
return is_member(node_id, gate_id, self._events())
|
||||
|
||||
def can_enter(self, node_id: str, gate_id: str) -> EntryDecision:
|
||||
return can_enter(node_id, gate_id, self._events())
|
||||
|
||||
# ── Ratification ─────────────────────────────────────────────────
|
||||
def is_ratified(self, gate_id: str) -> bool:
|
||||
return is_ratified(gate_id, self._events())
|
||||
|
||||
def cumulative_member_oracle_rep(self, gate_id: str) -> float:
|
||||
return cumulative_member_oracle_rep(gate_id, self._events())
|
||||
|
||||
# ── Locking ──────────────────────────────────────────────────────
|
||||
def is_locked(self, gate_id: str) -> bool:
|
||||
return is_locked(gate_id, self._events())
|
||||
|
||||
def locked_state(self, gate_id: str) -> LockedGateState:
|
||||
events = self._events()
|
||||
return LockedGateState(
|
||||
locked=is_locked(gate_id, events),
|
||||
locked_at=locked_at(gate_id, events),
|
||||
locked_by=locked_by(gate_id, events),
|
||||
)
|
||||
|
||||
def validate_lock_request(
|
||||
self, node_id: str, gate_id: str, *, lock_cost: int | None = None,
|
||||
) -> LockValidation:
|
||||
return validate_lock_request(node_id, gate_id, self._events(), lock_cost=lock_cost)
|
||||
|
||||
# ── Suspension ───────────────────────────────────────────────────
|
||||
def suspension_state(
|
||||
self, gate_id: str, *, now: float | None = None,
|
||||
) -> SuspensionState:
|
||||
return compute_suspension_state(gate_id, self._events(), now=self._now(now))
|
||||
|
||||
def validate_suspend_filing(
|
||||
self,
|
||||
gate_id: str,
|
||||
filer_id: str,
|
||||
*,
|
||||
reason: str,
|
||||
evidence_hashes: list[str],
|
||||
now: float | None = None,
|
||||
filer_cooldown_until: float | None = None,
|
||||
) -> FilingValidation:
|
||||
return validate_suspend_filing(
|
||||
gate_id, filer_id,
|
||||
reason=reason, evidence_hashes=evidence_hashes,
|
||||
chain=self._events(), now=self._now(now),
|
||||
filer_cooldown_until=filer_cooldown_until,
|
||||
)
|
||||
|
||||
# ── Shutdown ─────────────────────────────────────────────────────
|
||||
def shutdown_state(
|
||||
self, gate_id: str, *, now: float | None = None,
|
||||
) -> ShutdownState:
|
||||
return compute_shutdown_state(gate_id, self._events(), now=self._now(now))
|
||||
|
||||
def validate_shutdown_filing(
|
||||
self,
|
||||
gate_id: str,
|
||||
filer_id: str,
|
||||
*,
|
||||
reason: str,
|
||||
evidence_hashes: list[str],
|
||||
now: float | None = None,
|
||||
filer_cooldown_until: float | None = None,
|
||||
) -> FilingValidation:
|
||||
return validate_shutdown_filing(
|
||||
gate_id, filer_id,
|
||||
reason=reason, evidence_hashes=evidence_hashes,
|
||||
chain=self._events(), now=self._now(now),
|
||||
filer_cooldown_until=filer_cooldown_until,
|
||||
)
|
||||
|
||||
# ── Appeal ───────────────────────────────────────────────────────
|
||||
def validate_appeal_filing(
|
||||
self,
|
||||
gate_id: str,
|
||||
target_petition_id: str,
|
||||
filer_id: str,
|
||||
*,
|
||||
reason: str,
|
||||
evidence_hashes: list[str],
|
||||
now: float | None = None,
|
||||
filer_cooldown_until: float | None = None,
|
||||
) -> AppealValidation:
|
||||
return validate_appeal_filing(
|
||||
gate_id, target_petition_id, filer_id,
|
||||
reason=reason, evidence_hashes=evidence_hashes,
|
||||
chain=self._events(), now=self._now(now),
|
||||
filer_cooldown_until=filer_cooldown_until,
|
||||
)
|
||||
|
||||
def paused_execution_remaining_sec(
|
||||
self,
|
||||
target_petition_id: str,
|
||||
*,
|
||||
appeal_filed_at: float,
|
||||
) -> float:
|
||||
return paused_execution_remaining_sec(
|
||||
target_petition_id, self._events(),
|
||||
appeal_filed_at=appeal_filed_at,
|
||||
)
|
||||
|
||||
|
||||
__all__ = ["InfonetGateAdapter"]
|
||||
@@ -0,0 +1,125 @@
|
||||
"""Bridge between Infonet economy events and the legacy ``mesh_hashchain``.
|
||||
|
||||
Sprint 1 ships this as a **dry-run-only** surface. We do NOT call the
|
||||
legacy ``Infonet.append`` for new event types because that method
|
||||
hard-rejects anything not in ``ACTIVE_APPEND_EVENT_TYPES`` (defined in
|
||||
``mesh_schema.py``). Modifying that set is a Sprint 4 task — it requires
|
||||
the rest of the producer code to exist, otherwise a malformed
|
||||
``prediction_create`` could land on the chain with no resolver to
|
||||
process it.
|
||||
|
||||
What this adapter DOES today:
|
||||
|
||||
- ``extended_active_event_types()`` — returns the union of legacy active
|
||||
types and new economy types, for tooling that needs the full surface
|
||||
(e.g. RPC layer, frontend type generation).
|
||||
- ``InfonetHashchainAdapter.dry_run_append`` — validates a payload
|
||||
against the new schema and returns the event dict the legacy
|
||||
``Infonet.append`` would have built. Useful for tests and for the
|
||||
future cutover plan.
|
||||
|
||||
What this adapter will do in Sprint 4:
|
||||
|
||||
- ``append_infonet_event`` — actually call ``Infonet.append`` once
|
||||
``ACTIVE_APPEND_EVENT_TYPES`` is unioned with the economy types.
|
||||
|
||||
The Sprint 1 contract:
|
||||
|
||||
- ``mesh_hashchain.py`` is byte-identical to the pre-Sprint-1 baseline.
|
||||
- No event reaches the legacy chain via this adapter in Sprint 1.
|
||||
- Tests cover validation behavior only.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import hashlib
|
||||
import json
|
||||
import time
|
||||
from typing import Any
|
||||
|
||||
from services.mesh.mesh_schema import (
|
||||
ACTIVE_PUBLIC_LEDGER_EVENT_TYPES as _LEGACY_ACTIVE_TYPES,
|
||||
)
|
||||
|
||||
from services.infonet.schema import (
|
||||
INFONET_ECONOMY_EVENT_TYPES,
|
||||
validate_infonet_event_payload,
|
||||
)
|
||||
|
||||
|
||||
def extended_active_event_types() -> frozenset[str]:
|
||||
"""Union of legacy active types and new economy types.
|
||||
|
||||
Frozen at import time. The legacy set is itself a frozenset so this
|
||||
is safe to call from any thread.
|
||||
"""
|
||||
return _LEGACY_ACTIVE_TYPES | INFONET_ECONOMY_EVENT_TYPES
|
||||
|
||||
|
||||
class InfonetHashchainAdapter:
|
||||
"""Validation-only adapter for new Infonet economy events.
|
||||
|
||||
Real chain integration lives in Sprint 4. Tests should use
|
||||
``dry_run_append`` to assert that producer code is constructing
|
||||
correctly-shaped events before the cutover.
|
||||
"""
|
||||
|
||||
def dry_run_append(
|
||||
self,
|
||||
event_type: str,
|
||||
node_id: str,
|
||||
payload: dict[str, Any],
|
||||
*,
|
||||
sequence: int = 1,
|
||||
timestamp: float | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Validate and return a synthetic event dict.
|
||||
|
||||
Mirrors the shape that ``mesh_hashchain.Infonet.append`` would
|
||||
produce for legacy types — same field set, same ordering. Does
|
||||
NOT compute a real signature (Sprint 4 territory) and does NOT
|
||||
write to disk.
|
||||
|
||||
Raises ``ValueError`` on validation failure — the same exception
|
||||
type the legacy ``append`` raises so callers don't need to
|
||||
special-case the cutover later.
|
||||
"""
|
||||
if event_type not in INFONET_ECONOMY_EVENT_TYPES:
|
||||
raise ValueError(f"event_type {event_type!r} not in INFONET_ECONOMY_EVENT_TYPES")
|
||||
if not isinstance(node_id, str) or not node_id:
|
||||
raise ValueError("node_id is required")
|
||||
if not isinstance(sequence, int) or isinstance(sequence, bool) or sequence <= 0:
|
||||
raise ValueError("sequence must be a positive integer")
|
||||
|
||||
ok, reason = validate_infonet_event_payload(event_type, payload)
|
||||
if not ok:
|
||||
raise ValueError(reason)
|
||||
|
||||
ts = float(timestamp) if timestamp is not None else float(time.time())
|
||||
|
||||
canonical = {
|
||||
"event_type": event_type,
|
||||
"node_id": node_id,
|
||||
"payload": payload,
|
||||
"timestamp": ts,
|
||||
"sequence": sequence,
|
||||
}
|
||||
encoded = json.dumps(canonical, sort_keys=True, separators=(",", ":"), ensure_ascii=False)
|
||||
event_id = hashlib.sha256(encoded.encode("utf-8")).hexdigest()
|
||||
|
||||
return {
|
||||
"event_id": event_id,
|
||||
"event_type": event_type,
|
||||
"node_id": node_id,
|
||||
"timestamp": ts,
|
||||
"sequence": sequence,
|
||||
"payload": payload,
|
||||
# signature / public_key intentionally omitted in Sprint 1.
|
||||
"is_provisional": True,
|
||||
}
|
||||
|
||||
|
||||
__all__ = [
|
||||
"InfonetHashchainAdapter",
|
||||
"extended_active_event_types",
|
||||
]
|
||||
@@ -0,0 +1,124 @@
|
||||
"""Adapter from chain history to the market lifecycle / resolution view.
|
||||
|
||||
Sprint 4: real implementation (replaces the Sprint 1 ``NotImplementedError``
|
||||
skeleton). Wires the pure functions in ``services/infonet/markets/`` to
|
||||
the same chain-provider pattern used by ``InfonetReputationAdapter``.
|
||||
|
||||
Sprint 5 will extend this with dispute open / dispute_stake / dispute
|
||||
resolve methods. Sprint 8 will extend the resolution path with
|
||||
bootstrap-mode handling.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Callable, Iterable
|
||||
|
||||
from services.infonet.markets import (
|
||||
DisputeView,
|
||||
EvidenceBundle,
|
||||
MarketStatus,
|
||||
ResolutionResult,
|
||||
build_snapshot,
|
||||
collect_disputes,
|
||||
collect_evidence,
|
||||
collect_resolution_stakes,
|
||||
compute_dispute_outcome,
|
||||
compute_market_status,
|
||||
compute_snapshot_event_hash,
|
||||
dispute_settlement_effects,
|
||||
effective_outcome,
|
||||
excluded_predictor_ids,
|
||||
find_snapshot,
|
||||
is_predictor_excluded,
|
||||
market_was_reversed,
|
||||
resolve_market,
|
||||
should_advance_phase,
|
||||
)
|
||||
|
||||
|
||||
_ChainProvider = Callable[[], Iterable[dict[str, Any]]]
|
||||
|
||||
|
||||
def _empty_chain() -> list[dict[str, Any]]:
|
||||
return []
|
||||
|
||||
|
||||
class InfonetOracleAdapter:
|
||||
"""Project chain state into market lifecycle + resolution views."""
|
||||
|
||||
def __init__(self, chain_provider: _ChainProvider | None = None) -> None:
|
||||
self._chain_provider: _ChainProvider = chain_provider or _empty_chain
|
||||
|
||||
def _events(self) -> list[dict[str, Any]]:
|
||||
return [e for e in self._chain_provider() if isinstance(e, dict)]
|
||||
|
||||
# ── Lifecycle ────────────────────────────────────────────────────
|
||||
def market_status(self, market_id: str, *, now: float) -> MarketStatus:
|
||||
return compute_market_status(market_id, self._events(), now=now)
|
||||
|
||||
def should_advance_phase(
|
||||
self, market_id: str, *, now: float,
|
||||
) -> tuple[MarketStatus, MarketStatus] | None:
|
||||
return should_advance_phase(market_id, self._events(), now=now)
|
||||
|
||||
# ── Snapshot ─────────────────────────────────────────────────────
|
||||
def take_snapshot(self, market_id: str, *, frozen_at: float) -> dict[str, Any]:
|
||||
return build_snapshot(market_id, self._events(), frozen_at=frozen_at)
|
||||
|
||||
def find_snapshot(self, market_id: str) -> dict[str, Any] | None:
|
||||
return find_snapshot(market_id, self._events())
|
||||
|
||||
@staticmethod
|
||||
def snapshot_event_hash(
|
||||
snapshot_payload: dict[str, Any],
|
||||
*,
|
||||
market_id: str,
|
||||
creator_node_id: str,
|
||||
sequence: int,
|
||||
) -> str:
|
||||
return compute_snapshot_event_hash(
|
||||
snapshot_payload,
|
||||
market_id=market_id,
|
||||
creator_node_id=creator_node_id,
|
||||
sequence=sequence,
|
||||
)
|
||||
|
||||
# ── Evidence ─────────────────────────────────────────────────────
|
||||
def collect_evidence(self, market_id: str) -> list[EvidenceBundle]:
|
||||
return collect_evidence(market_id, self._events())
|
||||
|
||||
# ── Resolution ───────────────────────────────────────────────────
|
||||
def excluded_predictor_ids(self, market_id: str) -> set[str]:
|
||||
return excluded_predictor_ids(market_id, self._events())
|
||||
|
||||
def is_predictor_excluded(self, node_id: str, market_id: str) -> bool:
|
||||
return is_predictor_excluded(node_id, market_id, self._events())
|
||||
|
||||
def collect_resolution_stakes(self, market_id: str):
|
||||
return collect_resolution_stakes(market_id, self._events())
|
||||
|
||||
def resolve_market(
|
||||
self, market_id: str, *, is_provisional: bool = False,
|
||||
) -> ResolutionResult:
|
||||
return resolve_market(market_id, self._events(), is_provisional=is_provisional)
|
||||
|
||||
# ── Disputes (Sprint 5) ──────────────────────────────────────────
|
||||
def collect_disputes(self, market_id: str) -> list[DisputeView]:
|
||||
return collect_disputes(market_id, self._events())
|
||||
|
||||
@staticmethod
|
||||
def compute_dispute_outcome(dispute: DisputeView) -> str:
|
||||
return compute_dispute_outcome(dispute)
|
||||
|
||||
@staticmethod
|
||||
def dispute_settlement_effects(dispute: DisputeView) -> dict:
|
||||
return dispute_settlement_effects(dispute)
|
||||
|
||||
def market_was_reversed(self, market_id: str) -> bool:
|
||||
return market_was_reversed(market_id, self._events())
|
||||
|
||||
def effective_outcome(self, market_id: str, original_outcome: str) -> str:
|
||||
return effective_outcome(original_outcome, market_id, self._events())
|
||||
|
||||
|
||||
__all__ = ["InfonetOracleAdapter"]
|
||||
@@ -0,0 +1,107 @@
|
||||
"""Adapter that projects chain history into the new reputation views.
|
||||
|
||||
Sprint 2: real implementation. Replaces the Sprint 1 ``NotImplementedError``
|
||||
skeleton with the pure functions in ``services/infonet/reputation/``.
|
||||
|
||||
Why this exists rather than callers importing the pure functions
|
||||
directly: the adapter is the single integration boundary that future
|
||||
sprints will extend (Sprint 3 wraps anti-gaming penalties around the
|
||||
common-rep view, Sprint 4 extends the oracle-rep balance with
|
||||
resolution-stake redistribution, Sprint 5 layers in dispute reversal).
|
||||
By keeping callers on this adapter, the producer code never has to
|
||||
change as those layers ship.
|
||||
|
||||
The adapter takes a ``chain_provider`` callable rather than reaching
|
||||
into ``mesh_hashchain`` itself. Two reasons:
|
||||
|
||||
1. Tests pass a list of synthetic events directly — no hashchain
|
||||
instance required, no fixture overhead.
|
||||
2. Sprint 4 cutover decisions (parallel append surface vs unifying
|
||||
``ACTIVE_APPEND_EVENT_TYPES``) won't ripple into reputation code.
|
||||
|
||||
Cross-cutting design rule: reputation reads are background work. They
|
||||
must NEVER block a user-facing request. The adapter exposes only pure
|
||||
synchronous functions because they ARE pure — caches at the adapter
|
||||
layer (Sprint 3+) make repeat reads cheap. Callers that need real-time
|
||||
freshness should call directly on each request; callers that can
|
||||
tolerate staleness should poll a cached adapter instance.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import time
|
||||
from typing import Any, Callable, Iterable
|
||||
|
||||
from services.infonet.reputation import (
|
||||
OracleRepBreakdown,
|
||||
compute_common_rep,
|
||||
compute_oracle_rep,
|
||||
compute_oracle_rep_active,
|
||||
compute_oracle_rep_lifetime,
|
||||
decay_factor_for_age,
|
||||
last_successful_prediction_ts,
|
||||
)
|
||||
from services.infonet.reputation.oracle_rep import compute_oracle_rep_breakdown
|
||||
from services.infonet.time_validity import chain_majority_time
|
||||
|
||||
|
||||
_ChainProvider = Callable[[], Iterable[dict[str, Any]]]
|
||||
|
||||
|
||||
def _empty_chain() -> list[dict[str, Any]]:
|
||||
return []
|
||||
|
||||
|
||||
class InfonetReputationAdapter:
|
||||
"""Project chain state into oracle/common rep views.
|
||||
|
||||
``chain_provider`` is a zero-arg callable returning an iterable of
|
||||
chain events. Pass a closure that reads from
|
||||
``mesh_hashchain.Infonet.events`` in production, or a literal list
|
||||
in tests.
|
||||
"""
|
||||
|
||||
def __init__(self, chain_provider: _ChainProvider | None = None) -> None:
|
||||
self._chain_provider: _ChainProvider = chain_provider or _empty_chain
|
||||
|
||||
def _events(self) -> list[dict[str, Any]]:
|
||||
return [e for e in self._chain_provider() if isinstance(e, dict)]
|
||||
|
||||
def oracle_rep(self, node_id: str) -> float:
|
||||
return compute_oracle_rep(node_id, self._events())
|
||||
|
||||
def oracle_rep_breakdown(self, node_id: str) -> OracleRepBreakdown:
|
||||
return compute_oracle_rep_breakdown(node_id, self._events())
|
||||
|
||||
def oracle_rep_lifetime(self, node_id: str) -> float:
|
||||
return compute_oracle_rep_lifetime(node_id, self._events())
|
||||
|
||||
def oracle_rep_active(self, node_id: str, *, now: float | None = None) -> float:
|
||||
events = self._events()
|
||||
if now is None:
|
||||
chain_now = chain_majority_time(events)
|
||||
# Fall back to local clock only when the chain has no
|
||||
# distinct-node history yet (genesis / fresh mesh). This is
|
||||
# the only place a local clock leaks into governance —
|
||||
# acceptable because there are no oracles to penalize yet.
|
||||
now = chain_now if chain_now > 0 else time.time()
|
||||
return compute_oracle_rep_active(node_id, events, now=now)
|
||||
|
||||
def common_rep(self, node_id: str) -> float:
|
||||
return compute_common_rep(node_id, self._events())
|
||||
|
||||
def last_successful_prediction_ts(self, node_id: str) -> float | None:
|
||||
return last_successful_prediction_ts(node_id, self._events())
|
||||
|
||||
def decay_factor(self, node_id: str, *, now: float | None = None) -> float:
|
||||
events = self._events()
|
||||
if now is None:
|
||||
now = chain_majority_time(events) or time.time()
|
||||
last_ts = last_successful_prediction_ts(node_id, events)
|
||||
if last_ts is None:
|
||||
return 0.0
|
||||
days = max(0.0, (float(now) - last_ts) / 86400.0)
|
||||
return decay_factor_for_age(days)
|
||||
|
||||
|
||||
__all__ = ["InfonetReputationAdapter"]
|
||||
@@ -0,0 +1,97 @@
|
||||
"""Parallel ``SignedWriteKind`` enum for Infonet economy events.
|
||||
|
||||
Why a parallel enum and not extending the legacy one:
|
||||
|
||||
The legacy ``services/mesh/mesh_signed_events.SignedWriteKind`` is
|
||||
imported in many places and changing it ripples through DM, gate, and
|
||||
oracle code that we are not modifying in Sprint 1. Instead we publish a
|
||||
parallel enum here for the new event types and rely on the hashchain
|
||||
adapter to translate or co-route as needed.
|
||||
|
||||
Sprint 7+ may collapse these two enums once the upgrade-hash governance
|
||||
is shipped and a coordinated cutover is possible.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class InfonetSignedWriteKind(str, Enum):
|
||||
# Reputation
|
||||
UPREP = "uprep"
|
||||
DOWNREP = "downrep"
|
||||
|
||||
# Markets / resolution-as-prediction
|
||||
PREDICTION_CREATE = "prediction_create"
|
||||
PREDICTION_PLACE = "prediction_place"
|
||||
TRUTH_STAKE_PLACE = "truth_stake_place"
|
||||
TRUTH_STAKE_RESOLVE = "truth_stake_resolve"
|
||||
MARKET_SNAPSHOT = "market_snapshot"
|
||||
EVIDENCE_SUBMIT = "evidence_submit"
|
||||
RESOLUTION_STAKE = "resolution_stake"
|
||||
BOOTSTRAP_RESOLUTION_VOTE = "bootstrap_resolution_vote"
|
||||
RESOLUTION_FINALIZE = "resolution_finalize"
|
||||
|
||||
# Disputes
|
||||
DISPUTE_OPEN = "dispute_open"
|
||||
DISPUTE_STAKE = "dispute_stake"
|
||||
DISPUTE_RESOLVE = "dispute_resolve"
|
||||
|
||||
# Gates (extend legacy GATE_CREATE / GATE_MESSAGE)
|
||||
GATE_ENTER = "gate_enter"
|
||||
GATE_EXIT = "gate_exit"
|
||||
GATE_LOCK = "gate_lock"
|
||||
|
||||
# Gate shutdown lifecycle
|
||||
GATE_SUSPEND_FILE = "gate_suspend_file"
|
||||
GATE_SUSPEND_VOTE = "gate_suspend_vote"
|
||||
GATE_SUSPEND_EXECUTE = "gate_suspend_execute"
|
||||
GATE_SHUTDOWN_FILE = "gate_shutdown_file"
|
||||
GATE_SHUTDOWN_VOTE = "gate_shutdown_vote"
|
||||
GATE_SHUTDOWN_EXECUTE = "gate_shutdown_execute"
|
||||
GATE_UNSUSPEND = "gate_unsuspend"
|
||||
GATE_SHUTDOWN_APPEAL_FILE = "gate_shutdown_appeal_file"
|
||||
GATE_SHUTDOWN_APPEAL_VOTE = "gate_shutdown_appeal_vote"
|
||||
GATE_SHUTDOWN_APPEAL_RESOLVE = "gate_shutdown_appeal_resolve"
|
||||
|
||||
# Governance
|
||||
PETITION_FILE = "petition_file"
|
||||
PETITION_SIGN = "petition_sign"
|
||||
PETITION_VOTE = "petition_vote"
|
||||
CHALLENGE_FILE = "challenge_file"
|
||||
CHALLENGE_VOTE = "challenge_vote"
|
||||
PETITION_EXECUTE = "petition_execute"
|
||||
|
||||
# Upgrade-hash governance
|
||||
UPGRADE_PROPOSE = "upgrade_propose"
|
||||
UPGRADE_SIGN = "upgrade_sign"
|
||||
UPGRADE_VOTE = "upgrade_vote"
|
||||
UPGRADE_CHALLENGE = "upgrade_challenge"
|
||||
UPGRADE_CHALLENGE_VOTE = "upgrade_challenge_vote"
|
||||
UPGRADE_SIGNAL_READY = "upgrade_signal_ready"
|
||||
UPGRADE_ACTIVATE = "upgrade_activate"
|
||||
|
||||
# Identity
|
||||
NODE_REGISTER = "node_register"
|
||||
IDENTITY_ROTATE = "identity_rotate"
|
||||
CITIZENSHIP_CLAIM = "citizenship_claim"
|
||||
|
||||
# Economy
|
||||
COIN_TRANSFER = "coin_transfer"
|
||||
COIN_MINT = "coin_mint"
|
||||
BOUNTY_CREATE = "bounty_create"
|
||||
BOUNTY_CLAIM = "bounty_claim"
|
||||
|
||||
# Content
|
||||
POST_CREATE = "post_create"
|
||||
POST_REPLY = "post_reply"
|
||||
|
||||
|
||||
INFONET_SIGNED_WRITE_KINDS: frozenset[InfonetSignedWriteKind] = frozenset(InfonetSignedWriteKind)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"INFONET_SIGNED_WRITE_KINDS",
|
||||
"InfonetSignedWriteKind",
|
||||
]
|
||||
@@ -0,0 +1,78 @@
|
||||
"""Bootstrap mode — Argon2id PoW + eligibility + one-vote-per-node dedup.
|
||||
|
||||
Source of truth: ``infonet-economy/RULES_SKELETON.md`` §3.10 step 0.5.
|
||||
|
||||
Bootstrap mode replaces oracle-rep-weighted resolution with
|
||||
**eligible-node-one-vote** for the first ``bootstrap_market_count``
|
||||
(default 100) markets. Each eligible Heavy Node submits a
|
||||
``bootstrap_resolution_vote`` event with an Argon2id PoW solution.
|
||||
|
||||
Key Sprint 8 invariants:
|
||||
|
||||
- **Argon2id is Heavy-Node-only.** Light Nodes lack the ≥64 MB RAM
|
||||
required per computation. The PoW verifier does NOT run on Light
|
||||
Nodes.
|
||||
- **Salt = raw ``snapshot_event_hash`` bytes.** Hex-encoding or any
|
||||
reformatting causes a consensus fork. The salt MUST be the exact
|
||||
byte sequence of the snapshot event hash.
|
||||
- **Leading-zero check is on RAW output bytes, MSB first.** Different
|
||||
bit ordering causes a consensus fork.
|
||||
- **Identity age is measured against ``market.snapshot.frozen_at``,
|
||||
NOT against ``now``.** This is deterministic — every node computes
|
||||
the same eligibility from the same chain state. Prevents clock
|
||||
manipulation.
|
||||
- **One-vote-per-node tie-break is stateless.** Among multiple votes
|
||||
from the same node_id for the same market_id, the canonical vote is
|
||||
the one with the LOWEST LEXICOGRAPHICAL ``event_hash``. Every node
|
||||
selects the same canonical vote regardless of observation order.
|
||||
- **Anti-DoS funnel runs cheapest-first.** Schema → signature →
|
||||
identity age → predictor exclusion → phase + dedup → Argon2id.
|
||||
Argon2id is last because it's the most expensive.
|
||||
|
||||
Sprint 8 ships the eligibility + dedup + ramp pipeline in pure
|
||||
Python. ``verify_pow`` is a structural verifier that takes the
|
||||
already-computed hash output as input — it does NOT call Argon2id
|
||||
itself. Production callers wire this through ``privacy-core`` Rust.
|
||||
A future sprint will add the Rust binding; until then, tests
|
||||
synthesize valid hash outputs.
|
||||
"""
|
||||
|
||||
from services.infonet.bootstrap.argon2id import (
|
||||
canonical_pow_preimage,
|
||||
has_leading_zero_bits,
|
||||
verify_pow_structure,
|
||||
)
|
||||
from services.infonet.bootstrap.eligibility import (
|
||||
EligibilityDecision,
|
||||
is_identity_age_eligible,
|
||||
validate_bootstrap_eligibility,
|
||||
)
|
||||
from services.infonet.bootstrap.filter_funnel import (
|
||||
FunnelStage,
|
||||
run_filter_funnel,
|
||||
)
|
||||
from services.infonet.bootstrap.one_vote_dedup import (
|
||||
canonical_event_hash,
|
||||
deduplicate_votes,
|
||||
)
|
||||
from services.infonet.bootstrap.ramp import (
|
||||
ActiveFeatures,
|
||||
compute_active_features,
|
||||
network_node_count,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"ActiveFeatures",
|
||||
"EligibilityDecision",
|
||||
"FunnelStage",
|
||||
"canonical_event_hash",
|
||||
"canonical_pow_preimage",
|
||||
"compute_active_features",
|
||||
"deduplicate_votes",
|
||||
"has_leading_zero_bits",
|
||||
"is_identity_age_eligible",
|
||||
"network_node_count",
|
||||
"run_filter_funnel",
|
||||
"validate_bootstrap_eligibility",
|
||||
"verify_pow_structure",
|
||||
]
|
||||
@@ -0,0 +1,146 @@
|
||||
"""Argon2id canonicalization — preimage construction and leading-zero check.
|
||||
|
||||
Source of truth: ``infonet-economy/RULES_SKELETON.md`` §3.10 step 0.5
|
||||
+ the ``CONFIG['bootstrap_pow_argon2id_*']`` comment block.
|
||||
|
||||
Two consensus-critical pieces of canonicalization:
|
||||
|
||||
1. **Canonical preimage** — exact byte sequence the Argon2id call
|
||||
takes as `password`. UTF-8 encoded, "|"-delimited, no trailing
|
||||
delimiter. Format:
|
||||
|
||||
"bootstrap_resolution_vote" || protocol_version || node_id ||
|
||||
market_id || side || snapshot_event_hash || pow_nonce
|
||||
|
||||
The component order MUST match the spec exactly. Any deviation
|
||||
causes consensus fork.
|
||||
|
||||
2. **Leading-zero check** — operates on RAW Argon2id output bytes,
|
||||
MSB first (big-endian bit numbering). Difficulty N requires the
|
||||
first N bits of the 32-byte output to be zero. With difficulty=16
|
||||
that means the first 2 bytes are 0x00 0x00.
|
||||
|
||||
Sprint 8 does NOT execute Argon2id itself — the verifier here takes
|
||||
an already-computed hash bytes object as input. Production callers
|
||||
wire this through ``privacy-core`` Rust binding. A stub Python
|
||||
implementation is intentionally absent to avoid accidental drift
|
||||
between the Sprint 8 pure-Python path and the eventual Rust path.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from services.infonet.config import CONFIG, IMMUTABLE_PRINCIPLES
|
||||
|
||||
|
||||
def canonical_pow_preimage(
|
||||
*,
|
||||
node_id: str,
|
||||
market_id: str,
|
||||
side: str,
|
||||
snapshot_event_hash: str,
|
||||
pow_nonce: int,
|
||||
protocol_version: str | None = None,
|
||||
) -> bytes:
|
||||
"""Build the canonical preimage for the Argon2id ``password`` input.
|
||||
|
||||
Returns UTF-8 bytes of ``"bootstrap_resolution_vote|<version>|<node>|
|
||||
<market>|<side>|<snapshot_hash>|<nonce>"`` with NO trailing delimiter.
|
||||
|
||||
``protocol_version`` defaults to ``IMMUTABLE_PRINCIPLES['protocol_version']``
|
||||
— it's pulled at call time so a hard-fork upgrade picks up the
|
||||
new value automatically. Pass an explicit value when computing
|
||||
against a hypothetical version (test scenarios).
|
||||
"""
|
||||
if not isinstance(node_id, str) or not node_id:
|
||||
raise ValueError("node_id must be a non-empty string")
|
||||
if not isinstance(market_id, str) or not market_id:
|
||||
raise ValueError("market_id must be a non-empty string")
|
||||
if side not in ("yes", "no"):
|
||||
raise ValueError("side must be 'yes' or 'no'")
|
||||
if not isinstance(snapshot_event_hash, str) or not snapshot_event_hash:
|
||||
raise ValueError("snapshot_event_hash must be a non-empty string")
|
||||
if not isinstance(pow_nonce, int) or isinstance(pow_nonce, bool) or pow_nonce < 0:
|
||||
raise ValueError("pow_nonce must be a non-negative int")
|
||||
pv = protocol_version if protocol_version is not None else IMMUTABLE_PRINCIPLES["protocol_version"]
|
||||
if not isinstance(pv, str) or not pv:
|
||||
raise ValueError("protocol_version must be a non-empty string")
|
||||
|
||||
parts = [
|
||||
"bootstrap_resolution_vote",
|
||||
pv,
|
||||
node_id,
|
||||
market_id,
|
||||
side,
|
||||
snapshot_event_hash,
|
||||
str(pow_nonce),
|
||||
]
|
||||
return "|".join(parts).encode("utf-8")
|
||||
|
||||
|
||||
def has_leading_zero_bits(raw_output: bytes, difficulty: int) -> bool:
|
||||
"""``True`` if the first ``difficulty`` bits of ``raw_output``
|
||||
are all zero.
|
||||
|
||||
Bit numbering: MSB first (big-endian). Byte order: as-is in the
|
||||
raw output. With difficulty=16, the first two bytes must be
|
||||
``\\x00\\x00``. With difficulty=4, the first byte must be in
|
||||
``\\x00``..``\\x0f``.
|
||||
"""
|
||||
if not isinstance(raw_output, (bytes, bytearray)):
|
||||
raise ValueError("raw_output must be bytes")
|
||||
if not isinstance(difficulty, int) or difficulty < 0:
|
||||
raise ValueError("difficulty must be a non-negative int")
|
||||
if difficulty == 0:
|
||||
return True
|
||||
|
||||
full_bytes, remaining_bits = divmod(difficulty, 8)
|
||||
if len(raw_output) < full_bytes + (1 if remaining_bits else 0):
|
||||
return False
|
||||
for i in range(full_bytes):
|
||||
if raw_output[i] != 0:
|
||||
return False
|
||||
if remaining_bits:
|
||||
# The next byte's top `remaining_bits` bits must be zero.
|
||||
next_byte = raw_output[full_bytes]
|
||||
# Mask of the top `remaining_bits` bits (MSB first).
|
||||
mask = ((0xFF << (8 - remaining_bits)) & 0xFF)
|
||||
if (next_byte & mask) != 0:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def verify_pow_structure(
|
||||
*,
|
||||
raw_output: bytes,
|
||||
difficulty: int | None = None,
|
||||
expected_output_len: int | None = None,
|
||||
) -> bool:
|
||||
"""Verify the Argon2id output's structural properties.
|
||||
|
||||
- Output length must match ``expected_output_len`` (default
|
||||
``CONFIG['bootstrap_pow_argon2id_output_len']``, fixed at 32).
|
||||
- Leading zero check passes for ``difficulty`` (default
|
||||
``CONFIG['bootstrap_pow_difficulty']``).
|
||||
|
||||
Does NOT verify that ``raw_output`` was actually produced by
|
||||
Argon2id from the canonical preimage — that's the caller's job
|
||||
via ``privacy-core`` Rust binding (or Python's ``argon2-cffi`` in
|
||||
test environments). Sprint 8 keeps the cryptographic-call layer
|
||||
as an external concern.
|
||||
"""
|
||||
if not isinstance(raw_output, (bytes, bytearray)):
|
||||
return False
|
||||
expected = expected_output_len if expected_output_len is not None else int(
|
||||
CONFIG["bootstrap_pow_argon2id_output_len"]
|
||||
)
|
||||
if len(raw_output) != expected:
|
||||
return False
|
||||
diff = difficulty if difficulty is not None else int(CONFIG["bootstrap_pow_difficulty"])
|
||||
return has_leading_zero_bits(raw_output, diff)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"canonical_pow_preimage",
|
||||
"has_leading_zero_bits",
|
||||
"verify_pow_structure",
|
||||
]
|
||||
@@ -0,0 +1,129 @@
|
||||
"""Bootstrap eligibility — identity age + predictor exclusion.
|
||||
|
||||
Source of truth: ``infonet-economy/RULES_SKELETON.md`` §3.10 step 0.5
|
||||
(``is_bootstrap_eligible``).
|
||||
|
||||
Two gates:
|
||||
|
||||
1. **Identity age vs ``frozen_at`` (NOT ``now``).** Spec is explicit:
|
||||
|
||||
node.created_at + (bootstrap_min_identity_age_days * 86400)
|
||||
<= market.snapshot.frozen_at
|
||||
|
||||
Measuring against the frozen snapshot timestamp keeps eligibility
|
||||
deterministic — every node computes the same set from the same
|
||||
chain state. Measuring against ``now`` would make eligibility
|
||||
depend on local clock, which is a clock-manipulation attack
|
||||
surface.
|
||||
|
||||
2. **Predictor exclusion.** Same as normal resolution:
|
||||
``frozen_predictor_ids ∪ rotation_descendants(frozen_predictor_ids)``.
|
||||
Reuses ``services.infonet.markets.resolution.excluded_predictor_ids``
|
||||
(Sprint 4) — single source of truth.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Iterable
|
||||
|
||||
from services.infonet.config import CONFIG
|
||||
from services.infonet.markets.resolution import excluded_predictor_ids
|
||||
from services.infonet.markets.snapshot import find_snapshot
|
||||
|
||||
|
||||
_SECONDS_PER_DAY = 86400.0
|
||||
|
||||
|
||||
def _payload(event: dict[str, Any]) -> dict[str, Any]:
|
||||
p = event.get("payload")
|
||||
return p if isinstance(p, dict) else {}
|
||||
|
||||
|
||||
def _node_created_at(node_id: str, chain: Iterable[dict[str, Any]]) -> float | None:
|
||||
"""First chain appearance of ``node_id`` — used as a proxy for
|
||||
``node.created_at``. Per RULES §2.1: "Timestamp of first appearance
|
||||
on chain". A ``node_register`` event is preferred when present;
|
||||
otherwise the earliest event signed by ``node_id``.
|
||||
"""
|
||||
earliest_register: float | None = None
|
||||
earliest_any: float | None = None
|
||||
for ev in chain:
|
||||
if not isinstance(ev, dict):
|
||||
continue
|
||||
author = ev.get("node_id")
|
||||
if author != node_id:
|
||||
continue
|
||||
try:
|
||||
ts = float(ev.get("timestamp") or 0.0)
|
||||
except (TypeError, ValueError):
|
||||
continue
|
||||
if ev.get("event_type") == "node_register":
|
||||
if earliest_register is None or ts < earliest_register:
|
||||
earliest_register = ts
|
||||
if earliest_any is None or ts < earliest_any:
|
||||
earliest_any = ts
|
||||
return earliest_register if earliest_register is not None else earliest_any
|
||||
|
||||
|
||||
def is_identity_age_eligible(
|
||||
node_id: str,
|
||||
market_id: str,
|
||||
chain: Iterable[dict[str, Any]],
|
||||
*,
|
||||
min_age_days: float | None = None,
|
||||
) -> bool:
|
||||
"""``True`` iff
|
||||
``node.created_at + min_age_days * 86400 <= market.snapshot.frozen_at``.
|
||||
|
||||
Returns ``False`` if the snapshot doesn't exist yet, the node has
|
||||
no chain history, or the timing condition fails.
|
||||
"""
|
||||
chain_list = list(chain)
|
||||
snapshot = find_snapshot(market_id, chain_list)
|
||||
if snapshot is None:
|
||||
return False
|
||||
try:
|
||||
frozen_at = float(snapshot.get("frozen_at") or 0.0)
|
||||
except (TypeError, ValueError):
|
||||
return False
|
||||
created_at = _node_created_at(node_id, chain_list)
|
||||
if created_at is None:
|
||||
return False
|
||||
age_days = float(min_age_days if min_age_days is not None
|
||||
else CONFIG["bootstrap_min_identity_age_days"])
|
||||
threshold_ts = created_at + age_days * _SECONDS_PER_DAY
|
||||
return threshold_ts <= frozen_at
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class EligibilityDecision:
|
||||
eligible: bool
|
||||
reason: str
|
||||
|
||||
|
||||
def validate_bootstrap_eligibility(
|
||||
node_id: str,
|
||||
market_id: str,
|
||||
chain: Iterable[dict[str, Any]],
|
||||
) -> EligibilityDecision:
|
||||
"""Combine identity-age + predictor-exclusion checks.
|
||||
|
||||
Used by the Sprint 8 anti-DoS funnel and by the bootstrap
|
||||
resolution path itself.
|
||||
"""
|
||||
chain_list = list(chain)
|
||||
if find_snapshot(market_id, chain_list) is None:
|
||||
return EligibilityDecision(False, "snapshot_missing")
|
||||
if not is_identity_age_eligible(node_id, market_id, chain_list):
|
||||
return EligibilityDecision(False, "identity_age_too_young")
|
||||
if node_id in excluded_predictor_ids(market_id, chain_list):
|
||||
return EligibilityDecision(False, "predictor_excluded")
|
||||
return EligibilityDecision(True, "ok")
|
||||
|
||||
|
||||
__all__ = [
|
||||
"EligibilityDecision",
|
||||
"is_identity_age_eligible",
|
||||
"validate_bootstrap_eligibility",
|
||||
]
|
||||
@@ -0,0 +1,76 @@
|
||||
"""Anti-DoS filter funnel — cheapest-first validator chain.
|
||||
|
||||
Source of truth: ``infonet-economy/RULES_SKELETON.md`` §3.10 step 0.5
|
||||
"Anti-DoS filter funnel (validation order for bootstrap_resolution_vote)".
|
||||
|
||||
Validation order (each stage short-circuits to reject):
|
||||
|
||||
1. Schema — format / required fields / enum sanity (free)
|
||||
2. Signature — Ed25519 verify (~µs)
|
||||
3. Identity age — vs snapshot.frozen_at (chain lookup)
|
||||
4. Predictor — vs frozen_predictor_ids ∪ rotation_descendants
|
||||
5. Phase + dedup
|
||||
6. Argon2id PoW — most expensive (~64MB allocation + hash)
|
||||
|
||||
Why ordering matters: an attacker flooding malformed events should
|
||||
never trigger the Argon2id work. Schema rejection happens first
|
||||
(microseconds), so the funnel discards cheap-to-reject inputs cheap.
|
||||
|
||||
Sprint 8 ships the funnel as a list of ``FunnelStage`` callables.
|
||||
Production callers compose them in order; each stage returns
|
||||
``(accepted, reason)``.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Callable
|
||||
|
||||
|
||||
_StageFn = Callable[[dict[str, Any]], tuple[bool, str]]
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class FunnelStage:
|
||||
name: str
|
||||
check: _StageFn
|
||||
cost_tier: int
|
||||
"""Cost ranking 1=cheapest, 6=most expensive. Used by tests to
|
||||
confirm the stages are in the spec's ordering."""
|
||||
|
||||
|
||||
def run_filter_funnel(
|
||||
event: dict[str, Any],
|
||||
stages: list[FunnelStage],
|
||||
) -> tuple[bool, str]:
|
||||
"""Run ``stages`` in order; return on the first failure.
|
||||
|
||||
Returns ``(True, "ok")`` if every stage passes, otherwise
|
||||
``(False, "<stage>: <reason>")`` with the failing stage's name
|
||||
and reason. The stage's own ``cost_tier`` is included in the
|
||||
failing diagnostic so monitoring can spot when expensive stages
|
||||
are doing the work cheap stages should have caught.
|
||||
"""
|
||||
if not isinstance(event, dict):
|
||||
return False, "schema: event must be an object"
|
||||
seen_tiers: list[int] = []
|
||||
for stage in stages:
|
||||
if seen_tiers and stage.cost_tier < max(seen_tiers):
|
||||
# Sprint 8 invariant: tiers must be monotonically
|
||||
# non-decreasing. A misordered funnel is a developer
|
||||
# error, not an attacker input — fail loudly.
|
||||
raise ValueError(
|
||||
f"filter funnel out of order: stage {stage.name} "
|
||||
f"has cost_tier={stage.cost_tier} after a higher tier"
|
||||
)
|
||||
seen_tiers.append(stage.cost_tier)
|
||||
ok, reason = stage.check(event)
|
||||
if not ok:
|
||||
return False, f"{stage.name}: {reason}"
|
||||
return True, "ok"
|
||||
|
||||
|
||||
__all__ = [
|
||||
"FunnelStage",
|
||||
"run_filter_funnel",
|
||||
]
|
||||
@@ -0,0 +1,85 @@
|
||||
"""Stateless one-vote-per-node dedup.
|
||||
|
||||
Source of truth: ``infonet-economy/RULES_SKELETON.md`` §3.10 step 0.5
|
||||
("Phase valid + one-vote-per-node (stateless duplicate resolution)").
|
||||
|
||||
The protocol allows a node to submit only one
|
||||
``bootstrap_resolution_vote`` per market_id. If duplicates appear
|
||||
(retries, network split + heal, malicious flooding), the canonical
|
||||
choice is **the vote with the lowest lexicographical event_hash**.
|
||||
|
||||
Key property: this is **stateless and order-independent**. Every node
|
||||
computes the same canonical vote regardless of which duplicate they
|
||||
saw first. No "last-write-wins" or "first-write-wins" — just the
|
||||
hash comparison.
|
||||
|
||||
``event_hash = SHA-256(canonical_serialize(event))`` — must include
|
||||
signature, payload, and metadata so two events with different
|
||||
payloads produce different hashes.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import hashlib
|
||||
import json
|
||||
from typing import Any, Iterable
|
||||
|
||||
|
||||
def canonical_event_hash(event: dict[str, Any]) -> str:
|
||||
"""SHA-256 of the canonically-serialized event.
|
||||
|
||||
Canonicalization: sorted keys, compact separators, UTF-8.
|
||||
Includes every field on the event dict — payload, signature (if
|
||||
present), node_id, timestamp, sequence, event_type. Different
|
||||
inputs always produce different hashes.
|
||||
"""
|
||||
encoded = json.dumps(event, sort_keys=True, separators=(",", ":"), ensure_ascii=False)
|
||||
return hashlib.sha256(encoded.encode("utf-8")).hexdigest()
|
||||
|
||||
|
||||
def _payload(event: dict[str, Any]) -> dict[str, Any]:
|
||||
p = event.get("payload")
|
||||
return p if isinstance(p, dict) else {}
|
||||
|
||||
|
||||
def deduplicate_votes(
|
||||
market_id: str,
|
||||
chain: Iterable[dict[str, Any]],
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Return the canonical set of ``bootstrap_resolution_vote`` events
|
||||
for ``market_id`` — at most one per ``node_id``, with the lowest
|
||||
lexicographical ``canonical_event_hash`` chosen on collision.
|
||||
|
||||
The returned list is sorted by ``(node_id, event_hash)`` so the
|
||||
output is deterministic for any chain ordering.
|
||||
"""
|
||||
candidates_per_node: dict[str, list[tuple[str, dict[str, Any]]]] = {}
|
||||
for ev in chain:
|
||||
if not isinstance(ev, dict):
|
||||
continue
|
||||
if ev.get("event_type") != "bootstrap_resolution_vote":
|
||||
continue
|
||||
if _payload(ev).get("market_id") != market_id:
|
||||
continue
|
||||
node = ev.get("node_id")
|
||||
if not isinstance(node, str) or not node:
|
||||
continue
|
||||
h = canonical_event_hash(ev)
|
||||
candidates_per_node.setdefault(node, []).append((h, ev))
|
||||
|
||||
canonical: list[dict[str, Any]] = []
|
||||
for node, candidates in candidates_per_node.items():
|
||||
# Lowest lexicographical event_hash wins. Stable secondary
|
||||
# sort by sequence to make the choice deterministic for
|
||||
# any duplicate hash (which would itself be a SHA-256
|
||||
# collision — so academically impossible).
|
||||
candidates.sort(key=lambda c: (c[0], int(c[1].get("sequence") or 0)))
|
||||
canonical.append(candidates[0][1])
|
||||
canonical.sort(key=lambda e: (e.get("node_id") or "", canonical_event_hash(e)))
|
||||
return canonical
|
||||
|
||||
|
||||
__all__ = [
|
||||
"canonical_event_hash",
|
||||
"deduplicate_votes",
|
||||
]
|
||||
@@ -0,0 +1,105 @@
|
||||
"""Soft feature activation ramp — node-count milestones.
|
||||
|
||||
Source of truth: ``infonet-economy/RULES_SKELETON.md`` §1.2
|
||||
(``CONFIG['bootstrap_threshold']`` comment) + the spec's general
|
||||
"phase activation by network size" theme.
|
||||
|
||||
The protocol activates features in stages as the network grows. The
|
||||
canonical milestones are 1k / 2k / 5k / 10k node count, but the
|
||||
specific thresholds and which features they unlock are a Sprint 8+
|
||||
design choice that's expected to evolve via governance.
|
||||
|
||||
Sprint 8 ships:
|
||||
|
||||
- ``network_node_count(chain)`` — distinct ``node_register`` events
|
||||
on the chain.
|
||||
- ``compute_active_features(chain)`` — returns an ``ActiveFeatures``
|
||||
flag set indicating which protocol features are currently active.
|
||||
|
||||
Today's bindings:
|
||||
|
||||
- ``bootstrap_resolution_active`` — True while node count is below
|
||||
``bootstrap_threshold`` (default 1000). Bootstrap-mode markets use
|
||||
eligible-node-one-vote resolution.
|
||||
- ``staked_resolution_active`` — True once node count crosses 1k.
|
||||
Oracle-rep-weighted resolution staking is the primary mechanism.
|
||||
- ``governance_petitions_active`` — True at 2k+. Petitions can be
|
||||
filed.
|
||||
- ``upgrade_governance_active`` — True at 5k+. Upgrade-hash
|
||||
governance is unlocked.
|
||||
- ``commoncoin_active`` — True at 10k+. CommonCoin minting starts.
|
||||
|
||||
These bindings are intentionally simple — production wiring will
|
||||
read them via governance petitions that adjust ``bootstrap_threshold``
|
||||
and the milestones themselves.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Iterable
|
||||
|
||||
from services.infonet.config import CONFIG
|
||||
|
||||
|
||||
def network_node_count(chain: Iterable[dict[str, Any]]) -> int:
|
||||
"""Distinct nodes that have appeared on the chain.
|
||||
|
||||
Counted as: distinct ``node_id`` from ``node_register`` events.
|
||||
If no ``node_register`` events exist on the chain (e.g. test
|
||||
chains that only synthesize markets/predictions), falls back to
|
||||
distinct authoring nodes across all events. Production chains
|
||||
will have the registers.
|
||||
"""
|
||||
registered: set[str] = set()
|
||||
fallback: set[str] = set()
|
||||
for ev in chain:
|
||||
if not isinstance(ev, dict):
|
||||
continue
|
||||
node = ev.get("node_id")
|
||||
if not isinstance(node, str) or not node:
|
||||
continue
|
||||
fallback.add(node)
|
||||
if ev.get("event_type") == "node_register":
|
||||
registered.add(node)
|
||||
return len(registered) if registered else len(fallback)
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ActiveFeatures:
|
||||
bootstrap_resolution_active: bool
|
||||
staked_resolution_active: bool
|
||||
governance_petitions_active: bool
|
||||
upgrade_governance_active: bool
|
||||
commoncoin_active: bool
|
||||
node_count: int
|
||||
|
||||
|
||||
# Milestone thresholds promoted to CONFIG 2026-04-28 (Sprint 8 polish).
|
||||
# Governance can now tune them via petition; the cross-field invariant
|
||||
# in config.py enforces strict ascending order across the four tiers.
|
||||
|
||||
|
||||
def compute_active_features(chain: Iterable[dict[str, Any]]) -> ActiveFeatures:
|
||||
chain_list = [e for e in chain if isinstance(e, dict)]
|
||||
n = network_node_count(chain_list)
|
||||
bootstrap_threshold = int(CONFIG["bootstrap_threshold"])
|
||||
return ActiveFeatures(
|
||||
# Bootstrap resolution is active until the network crosses the
|
||||
# bootstrap_threshold. Once crossed, it's still allowed for
|
||||
# bootstrap-indexed markets, but new markets default to
|
||||
# staked resolution.
|
||||
bootstrap_resolution_active=n < bootstrap_threshold,
|
||||
staked_resolution_active=n >= int(CONFIG["ramp_staked_resolution_threshold"]),
|
||||
governance_petitions_active=n >= int(CONFIG["ramp_petitions_threshold"]),
|
||||
upgrade_governance_active=n >= int(CONFIG["ramp_upgrade_threshold"]),
|
||||
commoncoin_active=n >= int(CONFIG["ramp_commoncoin_threshold"]),
|
||||
node_count=n,
|
||||
)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"ActiveFeatures",
|
||||
"compute_active_features",
|
||||
"network_node_count",
|
||||
]
|
||||
@@ -0,0 +1,519 @@
|
||||
"""Constitutional + governable parameters for the Infonet economy.
|
||||
|
||||
Source of truth: ``infonet-economy/RULES_SKELETON.md`` §1.
|
||||
|
||||
- ``IMMUTABLE_PRINCIPLES`` — constitutional, exposed as a ``MappingProxyType``.
|
||||
Mutation attempts raise ``TypeError`` at the language level. New keys can
|
||||
only be added through upgrade-hash governance (Sprint 7) which is itself
|
||||
governed by these principles — i.e. a hard fork.
|
||||
|
||||
- ``CONFIG`` — amendable parameters. Live (mutable) dict; all writes go
|
||||
through ``validate_petition_value`` first. The dict itself is a
|
||||
module-level singleton — the governance DSL executor (Sprint 7) is the
|
||||
only intended writer in production. Tests must use
|
||||
``reset_config_for_tests`` to restore baseline.
|
||||
|
||||
- ``CONFIG_SCHEMA`` — per-key bounds and types. Itself an immutable
|
||||
``MappingProxyType``. New schema entries require a hard fork (same flow
|
||||
as ``IMMUTABLE_PRINCIPLES``).
|
||||
|
||||
- ``CROSS_FIELD_INVARIANTS`` — ordered-pair invariants checked AFTER all
|
||||
updates in a ``BATCH_UPDATE_PARAMS``. Spec note: supermajority must
|
||||
always exceed quorum, etc.
|
||||
|
||||
This file is read by every subsequent sprint. Adding a CONFIG key without
|
||||
adding a matching CONFIG_SCHEMA entry is a Sprint 1 invariant violation
|
||||
and is asserted by the tests.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from copy import deepcopy
|
||||
from types import MappingProxyType
|
||||
from typing import Any
|
||||
|
||||
|
||||
class InvalidPetition(ValueError):
|
||||
"""Raised by ``validate_petition_value`` and the governance DSL executor.
|
||||
|
||||
Signals that a proposed CONFIG mutation is rejected by the schema or by
|
||||
a cross-field invariant. The DSL executor (Sprint 7) catches this and
|
||||
rolls back the petition — never partially applies.
|
||||
"""
|
||||
|
||||
|
||||
# ─── Constitutional principles ───────────────────────────────────────────
|
||||
# Immutable. Mutation attempts raise TypeError at the language level
|
||||
# because MappingProxyType is read-only.
|
||||
#
|
||||
# RULES_SKELETON.md §1.1 — adding a key here is a hard fork.
|
||||
|
||||
IMMUTABLE_PRINCIPLES: MappingProxyType = MappingProxyType({
|
||||
"oracle_rep_source": "predictions_only",
|
||||
"hashchain_append_only": True,
|
||||
"audit_public": True,
|
||||
"identity_permissionless": True,
|
||||
"signature_required": True,
|
||||
"redemption_path_exists": True,
|
||||
"coin_governance_firewall": True,
|
||||
"protocol_version": "0.1.0",
|
||||
})
|
||||
|
||||
|
||||
# ─── Amendable parameters ────────────────────────────────────────────────
|
||||
# RULES_SKELETON.md §1.2.
|
||||
# Mutable dict. Production writes only via the Sprint 7 governance DSL
|
||||
# executor which calls validate_petition_value first.
|
||||
|
||||
_BASELINE_CONFIG: dict[str, Any] = {
|
||||
# ── Reputation ──
|
||||
"vote_decay_days": 90,
|
||||
"min_rep_to_vote": 3,
|
||||
"min_rep_to_create_gate": 10,
|
||||
"bootstrap_threshold": 1000,
|
||||
"weekly_vote_base": 5,
|
||||
"weekly_vote_per_oracle": 10,
|
||||
"daily_vote_limit_per_target": 1,
|
||||
|
||||
# ── Oracle Rep ──
|
||||
"oracle_min_earned": 0.01,
|
||||
"farming_soft_threshold": 0.60,
|
||||
"farming_hard_threshold": 0.80,
|
||||
"farming_easy_bet_cutoff": 0.80,
|
||||
"subjective_oracle_rep_mint": False,
|
||||
|
||||
# ── Market Liquidity ──
|
||||
"min_market_participants": 5,
|
||||
"min_market_total_stake": 10.0,
|
||||
|
||||
# ── Resolution Phase ──
|
||||
"evidence_window_hours": 48,
|
||||
"resolution_window_hours": 72,
|
||||
"evidence_bond_cost": 2.0,
|
||||
"evidence_first_bonus": 0.5,
|
||||
"resolution_supermajority": 0.75,
|
||||
"min_resolution_stake_total": 20.0,
|
||||
"resolution_loser_burn_pct": 0.02,
|
||||
"data_unavailable_threshold": 0.33,
|
||||
"resolution_stalemate_burn_pct": 0.02,
|
||||
|
||||
# ── Governance Decay ──
|
||||
"governance_decay_days": 90,
|
||||
"governance_decay_factor": 0.50,
|
||||
|
||||
# ── Time Validity ──
|
||||
"max_future_event_drift_sec": 300,
|
||||
"phase_boundary_stale_reject": True,
|
||||
|
||||
# ── Identity Rotation ──
|
||||
"rotation_blocked_during_stakes": True,
|
||||
|
||||
# ── Anti-Gaming ──
|
||||
"vcs_min_weight": 0.10,
|
||||
"clustering_min_weight": 0.20,
|
||||
"temporal_burst_window_sec": 300,
|
||||
"temporal_burst_min_upreps": 5,
|
||||
"progressive_penalty_base": 1.0,
|
||||
# Common-rep base formula multiplier (RULES §3.3). Promoted from
|
||||
# Sprint 2's module-private constant 2026-04-28 so governance can
|
||||
# tune the default common-rep payout per uprep.
|
||||
"common_rep_weight_factor": 0.10,
|
||||
# Progressive-penalty trigger threshold — average correlation
|
||||
# score above which the whale-deterrence multiplier kicks in
|
||||
# (Sprint 3 polish 2026-04-28). 0.0 = disabled (Sprint 3 default
|
||||
# behavior preserved).
|
||||
"progressive_penalty_threshold": 0.0,
|
||||
|
||||
# ── Gates ──
|
||||
"gate_ratification_rep": 50,
|
||||
"gate_lock_cost_per_member": 10,
|
||||
"gate_lock_min_members": 5,
|
||||
"gate_creation_rate_limit": 5,
|
||||
|
||||
# ── Truth Stakes ──
|
||||
"truth_stake_min_days": 1,
|
||||
"truth_stake_max_days": 7,
|
||||
"truth_stake_grace_hours": 24,
|
||||
"truth_stake_max_extensions": 3,
|
||||
"truth_stake_tie_burn_pct": 0.20,
|
||||
"truth_stake_self_stake": False,
|
||||
|
||||
# ── Dispute Resolution ──
|
||||
"dispute_window_days": 7,
|
||||
"dispute_common_rep_stakeable": True,
|
||||
|
||||
# ── CommonCoin ──
|
||||
"monthly_mint_amount": 100000,
|
||||
"ubi_share_pct": 0.50,
|
||||
"oracle_dividend_pct": 0.50,
|
||||
"citizenship_sacrifice_cost": 10,
|
||||
"year1_max_coins_per_node": 10000,
|
||||
|
||||
# ── Governance ──
|
||||
"petition_filing_cost": 15,
|
||||
"petition_signature_threshold": 0.25,
|
||||
"petition_signature_window_days": 14,
|
||||
"petition_vote_window_days": 7,
|
||||
"petition_supermajority": 0.67,
|
||||
"petition_quorum": 0.30,
|
||||
"challenge_filing_cost": 25,
|
||||
"challenge_window_hours": 48,
|
||||
|
||||
# ── Upgrade-Hash Governance ──
|
||||
"upgrade_filing_cost": 25,
|
||||
"upgrade_signature_threshold": 0.25,
|
||||
"upgrade_signature_window_days": 14,
|
||||
"upgrade_vote_window_days": 14,
|
||||
"upgrade_supermajority": 0.80,
|
||||
"upgrade_quorum": 0.40,
|
||||
"upgrade_activation_threshold": 0.67,
|
||||
"upgrade_activation_window_days": 30,
|
||||
"upgrade_challenge_window_hours": 48,
|
||||
|
||||
# ── Gate Shutdown ──
|
||||
"gate_suspend_filing_cost": 15,
|
||||
"gate_shutdown_filing_cost": 25,
|
||||
"gate_suspend_supermajority": 0.67,
|
||||
"gate_suspend_locked_supermajority": 0.75,
|
||||
"gate_shutdown_supermajority": 0.75,
|
||||
"gate_shutdown_locked_supermajority": 0.80,
|
||||
"gate_shutdown_quorum": 0.30,
|
||||
"gate_suspend_duration_days": 30,
|
||||
"gate_shutdown_execution_delay_days": 7,
|
||||
"gate_shutdown_cooldown_days": 90,
|
||||
"gate_shutdown_fail_penalty_days": 30,
|
||||
"gate_shutdown_appeal_filing_cost": 20,
|
||||
"gate_shutdown_appeal_window_hours": 48,
|
||||
"gate_shutdown_appeal_vote_window_days": 7,
|
||||
"gate_shutdown_appeal_supermajority": 0.67,
|
||||
"gate_shutdown_appeal_locked_supermajority": 0.75,
|
||||
"gate_shutdown_appeal_quorum": 0.30,
|
||||
|
||||
# ── Market Creation ──
|
||||
"market_creation_bond": 3,
|
||||
"market_creation_bond_return_threshold": 5,
|
||||
|
||||
# ── Bootstrap ──
|
||||
"bootstrap_market_count": 100,
|
||||
"bootstrap_evidence_bond_cost": 0,
|
||||
"bootstrap_resolution_mode": "eligible_node_one_vote",
|
||||
"bootstrap_resolution_supermajority": 0.75,
|
||||
"bootstrap_min_identity_age_days": 3,
|
||||
"bootstrap_pow_algorithm": "argon2id",
|
||||
"bootstrap_pow_argon2id_version": 0x13,
|
||||
"bootstrap_pow_argon2id_m": 65536,
|
||||
"bootstrap_pow_argon2id_t": 3,
|
||||
"bootstrap_pow_argon2id_p": 1,
|
||||
"bootstrap_pow_argon2id_output_len": 32,
|
||||
"bootstrap_pow_difficulty": 16,
|
||||
|
||||
# ── Ramp milestones (Sprint 8 polish 2026-04-28) ──
|
||||
# Network-size thresholds at which features activate. Promoted
|
||||
# from Sprint 8 hardcoded constants so governance can tune them.
|
||||
# Values denote the minimum distinct-node count required.
|
||||
"ramp_staked_resolution_threshold": 1000,
|
||||
"ramp_petitions_threshold": 2000,
|
||||
"ramp_upgrade_threshold": 5000,
|
||||
"ramp_commoncoin_threshold": 10000,
|
||||
}
|
||||
|
||||
|
||||
CONFIG: dict[str, Any] = deepcopy(_BASELINE_CONFIG)
|
||||
|
||||
|
||||
def reset_config_for_tests() -> None:
|
||||
"""Restore CONFIG to the pre-petition baseline. Tests only.
|
||||
|
||||
Used by the autouse fixture in ``services/infonet/tests/conftest.py`` so
|
||||
that one test mutating CONFIG (via a simulated petition execution)
|
||||
cannot leak state into the next test.
|
||||
"""
|
||||
CONFIG.clear()
|
||||
CONFIG.update(deepcopy(_BASELINE_CONFIG))
|
||||
|
||||
|
||||
# ─── CONFIG schema (per-key bounds) ──────────────────────────────────────
|
||||
# RULES_SKELETON.md §1.3.
|
||||
# Itself an immutable structure — new keys require upgrade-hash governance
|
||||
# (a hard fork). validate_petition_value rejects any key not present here.
|
||||
|
||||
_SCHEMA_TYPES = {
|
||||
"int": (int,),
|
||||
"float": (int, float),
|
||||
"bool": (bool,),
|
||||
"str": (str,),
|
||||
}
|
||||
|
||||
_CONFIG_SCHEMA_BACKING: dict[str, MappingProxyType] = {
|
||||
# ── Reputation ──
|
||||
"vote_decay_days": MappingProxyType({"type": "int", "min": 7, "max": 365}),
|
||||
"min_rep_to_vote": MappingProxyType({"type": "int", "min": 0, "max": 100}),
|
||||
"min_rep_to_create_gate": MappingProxyType({"type": "int", "min": 1, "max": 1000}),
|
||||
"bootstrap_threshold": MappingProxyType({"type": "int", "min": 100, "max": 100000}),
|
||||
"weekly_vote_base": MappingProxyType({"type": "int", "min": 1, "max": 100}),
|
||||
"weekly_vote_per_oracle": MappingProxyType({"type": "int", "min": 1, "max": 1000}),
|
||||
"daily_vote_limit_per_target": MappingProxyType({"type": "int", "min": 1, "max": 10}),
|
||||
|
||||
# ── Oracle Rep ──
|
||||
"oracle_min_earned": MappingProxyType({"type": "float", "min": 0.001, "max": 1.0}),
|
||||
"farming_soft_threshold": MappingProxyType({"type": "float", "min": 0.10, "max": 0.95}),
|
||||
"farming_hard_threshold": MappingProxyType({"type": "float", "min": 0.20, "max": 0.99}),
|
||||
"farming_easy_bet_cutoff": MappingProxyType({"type": "float", "min": 0.50, "max": 0.99}),
|
||||
"subjective_oracle_rep_mint": MappingProxyType({"type": "bool"}),
|
||||
|
||||
# ── Market Liquidity ──
|
||||
"min_market_participants": MappingProxyType({"type": "int", "min": 2, "max": 100}),
|
||||
"min_market_total_stake": MappingProxyType({"type": "float", "min": 1.0, "max": 1000.0}),
|
||||
|
||||
# ── Resolution ──
|
||||
"evidence_window_hours": MappingProxyType({"type": "int", "min": 12, "max": 168}),
|
||||
"resolution_window_hours": MappingProxyType({"type": "int", "min": 24, "max": 336}),
|
||||
"evidence_bond_cost": MappingProxyType({"type": "float", "min": 0.5, "max": 50.0}),
|
||||
"evidence_first_bonus": MappingProxyType({"type": "float", "min": 0.0, "max": 10.0}),
|
||||
"resolution_supermajority": MappingProxyType({"type": "float", "min": 0.51, "max": 0.95}),
|
||||
"min_resolution_stake_total": MappingProxyType({"type": "float", "min": 5.0, "max": 500.0}),
|
||||
"resolution_loser_burn_pct": MappingProxyType({"type": "float", "min": 0.0, "max": 0.10}),
|
||||
"data_unavailable_threshold": MappingProxyType({"type": "float", "min": 0.10, "max": 0.50}),
|
||||
"resolution_stalemate_burn_pct": MappingProxyType({"type": "float", "min": 0.0, "max": 0.10}),
|
||||
|
||||
# ── Governance Decay ──
|
||||
"governance_decay_days": MappingProxyType({"type": "int", "min": 7, "max": 365}),
|
||||
"governance_decay_factor": MappingProxyType({"type": "float", "min": 0.10, "max": 0.99}),
|
||||
|
||||
# ── Time Validity ──
|
||||
"max_future_event_drift_sec": MappingProxyType({"type": "int", "min": 30, "max": 3600}),
|
||||
"phase_boundary_stale_reject": MappingProxyType({"type": "bool"}),
|
||||
|
||||
# ── Identity Rotation ──
|
||||
"rotation_blocked_during_stakes": MappingProxyType({"type": "bool"}),
|
||||
|
||||
# ── Anti-Gaming ──
|
||||
"vcs_min_weight": MappingProxyType({"type": "float", "min": 0.0, "max": 1.0}),
|
||||
"clustering_min_weight": MappingProxyType({"type": "float", "min": 0.0, "max": 1.0}),
|
||||
"temporal_burst_window_sec": MappingProxyType({"type": "int", "min": 30, "max": 3600}),
|
||||
"temporal_burst_min_upreps": MappingProxyType({"type": "int", "min": 2, "max": 100}),
|
||||
"progressive_penalty_base": MappingProxyType({"type": "float", "min": 0.1, "max": 100.0}),
|
||||
"common_rep_weight_factor": MappingProxyType({"type": "float", "min": 0.0, "max": 1.0}),
|
||||
"progressive_penalty_threshold": MappingProxyType({"type": "float", "min": 0.0, "max": 1.0}),
|
||||
|
||||
# ── Gates ──
|
||||
"gate_ratification_rep": MappingProxyType({"type": "int", "min": 1, "max": 10000}),
|
||||
"gate_lock_cost_per_member": MappingProxyType({"type": "int", "min": 1, "max": 1000}),
|
||||
"gate_lock_min_members": MappingProxyType({"type": "int", "min": 2, "max": 1000}),
|
||||
"gate_creation_rate_limit": MappingProxyType({"type": "int", "min": 1, "max": 100}),
|
||||
|
||||
# ── Truth Stakes ──
|
||||
"truth_stake_min_days": MappingProxyType({"type": "int", "min": 1, "max": 30}),
|
||||
"truth_stake_max_days": MappingProxyType({"type": "int", "min": 1, "max": 90}),
|
||||
"truth_stake_grace_hours": MappingProxyType({"type": "int", "min": 1, "max": 168}),
|
||||
"truth_stake_max_extensions": MappingProxyType({"type": "int", "min": 0, "max": 10}),
|
||||
"truth_stake_tie_burn_pct": MappingProxyType({"type": "float", "min": 0.0, "max": 0.50}),
|
||||
"truth_stake_self_stake": MappingProxyType({"type": "bool"}),
|
||||
|
||||
# ── Dispute Resolution ──
|
||||
"dispute_window_days": MappingProxyType({"type": "int", "min": 1, "max": 30}),
|
||||
"dispute_common_rep_stakeable": MappingProxyType({"type": "bool"}),
|
||||
|
||||
# ── CommonCoin ──
|
||||
"monthly_mint_amount": MappingProxyType({"type": "int", "min": 1, "max": 1_000_000_000}),
|
||||
"ubi_share_pct": MappingProxyType({"type": "float", "min": 0.0, "max": 1.0}),
|
||||
"oracle_dividend_pct": MappingProxyType({"type": "float", "min": 0.0, "max": 1.0}),
|
||||
"citizenship_sacrifice_cost": MappingProxyType({"type": "int", "min": 1, "max": 1000}),
|
||||
"year1_max_coins_per_node": MappingProxyType({"type": "int", "min": 1, "max": 1_000_000_000}),
|
||||
|
||||
# ── Governance ──
|
||||
"petition_filing_cost": MappingProxyType({"type": "int", "min": 1, "max": 100}),
|
||||
"petition_signature_threshold": MappingProxyType({"type": "float", "min": 0.05, "max": 0.50}),
|
||||
"petition_signature_window_days": MappingProxyType({"type": "int", "min": 1, "max": 60}),
|
||||
"petition_vote_window_days": MappingProxyType({"type": "int", "min": 1, "max": 30}),
|
||||
"petition_supermajority": MappingProxyType({"type": "float", "min": 0.51, "max": 0.95}),
|
||||
"petition_quorum": MappingProxyType({"type": "float", "min": 0.10, "max": 0.80}),
|
||||
"challenge_filing_cost": MappingProxyType({"type": "int", "min": 1, "max": 200}),
|
||||
"challenge_window_hours": MappingProxyType({"type": "int", "min": 12, "max": 168}),
|
||||
|
||||
# ── Upgrade-Hash Governance ──
|
||||
"upgrade_filing_cost": MappingProxyType({"type": "int", "min": 1, "max": 200}),
|
||||
"upgrade_signature_threshold": MappingProxyType({"type": "float", "min": 0.05, "max": 0.50}),
|
||||
"upgrade_signature_window_days": MappingProxyType({"type": "int", "min": 1, "max": 60}),
|
||||
"upgrade_vote_window_days": MappingProxyType({"type": "int", "min": 1, "max": 60}),
|
||||
"upgrade_supermajority": MappingProxyType({"type": "float", "min": 0.51, "max": 0.99}),
|
||||
"upgrade_quorum": MappingProxyType({"type": "float", "min": 0.10, "max": 0.95}),
|
||||
"upgrade_activation_threshold": MappingProxyType({"type": "float", "min": 0.51, "max": 0.99}),
|
||||
"upgrade_activation_window_days": MappingProxyType({"type": "int", "min": 1, "max": 90}),
|
||||
"upgrade_challenge_window_hours": MappingProxyType({"type": "int", "min": 12, "max": 168}),
|
||||
|
||||
# ── Gate Shutdown ──
|
||||
"gate_suspend_filing_cost": MappingProxyType({"type": "int", "min": 1, "max": 200}),
|
||||
"gate_shutdown_filing_cost": MappingProxyType({"type": "int", "min": 1, "max": 200}),
|
||||
"gate_suspend_supermajority": MappingProxyType({"type": "float", "min": 0.51, "max": 0.95}),
|
||||
"gate_suspend_locked_supermajority": MappingProxyType({"type": "float", "min": 0.51, "max": 0.95}),
|
||||
"gate_shutdown_supermajority": MappingProxyType({"type": "float", "min": 0.51, "max": 0.99}),
|
||||
"gate_shutdown_locked_supermajority": MappingProxyType({"type": "float", "min": 0.51, "max": 0.99}),
|
||||
"gate_shutdown_quorum": MappingProxyType({"type": "float", "min": 0.10, "max": 0.80}),
|
||||
"gate_suspend_duration_days": MappingProxyType({"type": "int", "min": 1, "max": 365}),
|
||||
"gate_shutdown_execution_delay_days": MappingProxyType({"type": "int", "min": 1, "max": 90}),
|
||||
"gate_shutdown_cooldown_days": MappingProxyType({"type": "int", "min": 7, "max": 365}),
|
||||
"gate_shutdown_fail_penalty_days": MappingProxyType({"type": "int", "min": 0, "max": 365}),
|
||||
"gate_shutdown_appeal_filing_cost": MappingProxyType({"type": "int", "min": 1, "max": 200}),
|
||||
"gate_shutdown_appeal_window_hours": MappingProxyType({"type": "int", "min": 12, "max": 168}),
|
||||
"gate_shutdown_appeal_vote_window_days": MappingProxyType({"type": "int", "min": 1, "max": 30}),
|
||||
"gate_shutdown_appeal_supermajority": MappingProxyType({"type": "float", "min": 0.51, "max": 0.95}),
|
||||
"gate_shutdown_appeal_locked_supermajority": MappingProxyType({"type": "float", "min": 0.51, "max": 0.95}),
|
||||
"gate_shutdown_appeal_quorum": MappingProxyType({"type": "float", "min": 0.10, "max": 0.80}),
|
||||
|
||||
# ── Market Creation ──
|
||||
"market_creation_bond": MappingProxyType({"type": "int", "min": 0, "max": 1000}),
|
||||
"market_creation_bond_return_threshold": MappingProxyType({"type": "int", "min": 1, "max": 1000}),
|
||||
|
||||
# ── Bootstrap ──
|
||||
"bootstrap_market_count": MappingProxyType({"type": "int", "min": 0, "max": 100000}),
|
||||
"bootstrap_evidence_bond_cost": MappingProxyType({"type": "float", "min": 0.0, "max": 50.0}),
|
||||
"bootstrap_resolution_mode": MappingProxyType({"type": "str", "enum": ("eligible_node_one_vote",)}),
|
||||
"bootstrap_resolution_supermajority": MappingProxyType({"type": "float", "min": 0.51, "max": 0.95}),
|
||||
"bootstrap_min_identity_age_days": MappingProxyType({"type": "int", "min": 0, "max": 365}),
|
||||
"bootstrap_pow_algorithm": MappingProxyType({"type": "str", "enum": ("argon2id",)}),
|
||||
"bootstrap_pow_argon2id_version": MappingProxyType({"type": "int", "enum": (0x13,)}),
|
||||
"bootstrap_pow_argon2id_m": MappingProxyType({"type": "int", "min": 8192, "max": 1_048_576}),
|
||||
"bootstrap_pow_argon2id_t": MappingProxyType({"type": "int", "min": 1, "max": 100}),
|
||||
"bootstrap_pow_argon2id_p": MappingProxyType({"type": "int", "min": 1, "max": 16}),
|
||||
"bootstrap_pow_argon2id_output_len": MappingProxyType({"type": "int", "enum": (32,)}),
|
||||
"bootstrap_pow_difficulty": MappingProxyType({"type": "int", "min": 1, "max": 64}),
|
||||
|
||||
# ── Ramp milestones ──
|
||||
"ramp_staked_resolution_threshold": MappingProxyType({"type": "int", "min": 1, "max": 10_000_000}),
|
||||
"ramp_petitions_threshold": MappingProxyType({"type": "int", "min": 1, "max": 10_000_000}),
|
||||
"ramp_upgrade_threshold": MappingProxyType({"type": "int", "min": 1, "max": 10_000_000}),
|
||||
"ramp_commoncoin_threshold": MappingProxyType({"type": "int", "min": 1, "max": 10_000_000}),
|
||||
}
|
||||
|
||||
CONFIG_SCHEMA: MappingProxyType = MappingProxyType(_CONFIG_SCHEMA_BACKING)
|
||||
|
||||
|
||||
# ─── Cross-field invariants ──────────────────────────────────────────────
|
||||
# RULES_SKELETON.md §1.3.
|
||||
# Each tuple is (left_key, op, right_key). Only ">" supported today —
|
||||
# extend the dispatch in validate_cross_field_invariants when new ops
|
||||
# appear in the spec.
|
||||
|
||||
CROSS_FIELD_INVARIANTS: tuple[tuple[str, str, str], ...] = (
|
||||
("petition_supermajority", ">", "petition_quorum"),
|
||||
("resolution_supermajority", ">", "data_unavailable_threshold"),
|
||||
("upgrade_supermajority", ">", "upgrade_quorum"),
|
||||
("gate_shutdown_supermajority", ">", "gate_shutdown_quorum"),
|
||||
("gate_suspend_supermajority", ">", "gate_shutdown_quorum"),
|
||||
("farming_hard_threshold", ">", "farming_soft_threshold"),
|
||||
("truth_stake_max_days", ">", "truth_stake_min_days"),
|
||||
("upgrade_filing_cost", ">", "petition_filing_cost"),
|
||||
# Ramp milestones must be in strict ascending order so each tier
|
||||
# genuinely activates additional capability (Sprint 8 polish
|
||||
# 2026-04-28).
|
||||
("ramp_petitions_threshold", ">", "ramp_staked_resolution_threshold"),
|
||||
("ramp_upgrade_threshold", ">", "ramp_petitions_threshold"),
|
||||
("ramp_commoncoin_threshold", ">", "ramp_upgrade_threshold"),
|
||||
)
|
||||
|
||||
|
||||
# ─── Validators (used by the Sprint 7 governance DSL executor) ───────────
|
||||
|
||||
def validate_petition_value(
|
||||
key: str,
|
||||
value: Any,
|
||||
current_config: dict[str, Any] | None = None,
|
||||
) -> None:
|
||||
"""Validate one (key, value) pair against ``CONFIG_SCHEMA``.
|
||||
|
||||
Raises ``InvalidPetition`` on any failure. Returns ``None`` on success.
|
||||
|
||||
``current_config`` is accepted for API symmetry with the spec snippet
|
||||
in RULES §1.3 — current Sprint 1 logic doesn't need it. Future
|
||||
cross-field-aware updates may consult it.
|
||||
"""
|
||||
del current_config # deliberately unused — see docstring
|
||||
schema = CONFIG_SCHEMA.get(key)
|
||||
if schema is None:
|
||||
raise InvalidPetition(f"No schema for key: {key}")
|
||||
|
||||
type_name = schema["type"]
|
||||
expected = _SCHEMA_TYPES.get(type_name)
|
||||
if expected is None:
|
||||
raise InvalidPetition(f"Schema for {key} has unknown type: {type_name}")
|
||||
|
||||
if type_name == "bool":
|
||||
if not isinstance(value, bool):
|
||||
raise InvalidPetition(
|
||||
f"Type mismatch for {key}: expected bool, got {type(value).__name__}"
|
||||
)
|
||||
elif type_name == "int":
|
||||
if isinstance(value, bool) or not isinstance(value, int):
|
||||
raise InvalidPetition(
|
||||
f"Type mismatch for {key}: expected int, got {type(value).__name__}"
|
||||
)
|
||||
elif type_name == "float":
|
||||
if isinstance(value, bool) or not isinstance(value, expected):
|
||||
raise InvalidPetition(
|
||||
f"Type mismatch for {key}: expected float, got {type(value).__name__}"
|
||||
)
|
||||
else: # str
|
||||
if not isinstance(value, expected):
|
||||
raise InvalidPetition(
|
||||
f"Type mismatch for {key}: expected {type_name}, got {type(value).__name__}"
|
||||
)
|
||||
|
||||
if "min" in schema and value < schema["min"]:
|
||||
raise InvalidPetition(f"{key}={value} below minimum {schema['min']}")
|
||||
if "max" in schema and value > schema["max"]:
|
||||
raise InvalidPetition(f"{key}={value} above maximum {schema['max']}")
|
||||
if "enum" in schema and value not in schema["enum"]:
|
||||
raise InvalidPetition(f"{key}={value} not in allowed values {tuple(schema['enum'])}")
|
||||
|
||||
|
||||
def validate_cross_field_invariants(config: dict[str, Any]) -> None:
|
||||
"""Check every entry of ``CROSS_FIELD_INVARIANTS`` against ``config``.
|
||||
|
||||
Called by the DSL executor AFTER all updates from a single petition
|
||||
payload have been applied to a candidate config dict. Raises
|
||||
``InvalidPetition`` on the first violation. The candidate config is
|
||||
discarded by the executor when this raises.
|
||||
"""
|
||||
for left_key, op, right_key in CROSS_FIELD_INVARIANTS:
|
||||
if left_key not in config:
|
||||
raise InvalidPetition(f"Cross-field invariant references missing key: {left_key}")
|
||||
if right_key not in config:
|
||||
raise InvalidPetition(f"Cross-field invariant references missing key: {right_key}")
|
||||
left_val = config[left_key]
|
||||
right_val = config[right_key]
|
||||
if op == ">":
|
||||
if not (left_val > right_val):
|
||||
raise InvalidPetition(
|
||||
f"Cross-field invariant violated: {left_key}={left_val} must be > "
|
||||
f"{right_key}={right_val}"
|
||||
)
|
||||
else:
|
||||
raise InvalidPetition(f"Unknown cross-field operator: {op}")
|
||||
|
||||
|
||||
def validate_config_schema_completeness() -> None:
|
||||
"""Sprint 1 invariant: every CONFIG key has a matching CONFIG_SCHEMA entry.
|
||||
|
||||
Raises ``InvalidPetition`` listing missing keys. Called both from the
|
||||
Sprint 1 adversarial test and from the DSL executor on startup.
|
||||
"""
|
||||
missing = sorted(set(CONFIG.keys()) - set(CONFIG_SCHEMA.keys()))
|
||||
extra = sorted(set(CONFIG_SCHEMA.keys()) - set(CONFIG.keys()))
|
||||
if missing:
|
||||
raise InvalidPetition(f"CONFIG keys without CONFIG_SCHEMA entry: {missing}")
|
||||
if extra:
|
||||
raise InvalidPetition(f"CONFIG_SCHEMA keys without CONFIG entry: {extra}")
|
||||
|
||||
|
||||
__all__ = [
|
||||
"CONFIG",
|
||||
"CONFIG_SCHEMA",
|
||||
"CROSS_FIELD_INVARIANTS",
|
||||
"IMMUTABLE_PRINCIPLES",
|
||||
"InvalidPetition",
|
||||
"reset_config_for_tests",
|
||||
"validate_config_schema_completeness",
|
||||
"validate_cross_field_invariants",
|
||||
"validate_petition_value",
|
||||
]
|
||||
@@ -0,0 +1,106 @@
|
||||
"""Event construction helpers for the Infonet economy.
|
||||
|
||||
A thin layer over ``services/infonet/schema.py``: each public function
|
||||
builds a payload dict for one event type, validates it, and returns it.
|
||||
The caller is responsible for signing the event and routing it through
|
||||
``services/infonet/adapters/hashchain_adapter.py`` for actual append.
|
||||
|
||||
Sprint 1 scope: payload builders + validation. No chain writes. The
|
||||
hashchain adapter's ``append_infonet_event`` is the eventual integration
|
||||
point — see ``adapters/hashchain_adapter.py``.
|
||||
|
||||
Why a builder layer and not free-form dicts:
|
||||
- Centralizes the canonical field set per event_type so callers can't
|
||||
drift from the schema.
|
||||
- Allows future sprints to attach deterministic computation (e.g.
|
||||
``probability_at_bet`` reconstruction in Sprint 4) without changing
|
||||
callers.
|
||||
- Matches the "events extend, never replace" rule from the plan §3.1 —
|
||||
the legacy event constructors in ``mesh_schema.py`` keep working
|
||||
unchanged; new event types live here.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
from services.infonet.schema import (
|
||||
INFONET_ECONOMY_EVENT_TYPES,
|
||||
validate_infonet_event_payload,
|
||||
)
|
||||
|
||||
|
||||
class EventConstructionError(ValueError):
|
||||
"""Raised when a payload fails validation at build time.
|
||||
|
||||
Distinct from chain-level errors (signature, replay, sequence) —
|
||||
those originate in the hashchain adapter, not here.
|
||||
"""
|
||||
|
||||
|
||||
def build_event(event_type: str, payload: dict[str, Any]) -> dict[str, Any]:
|
||||
"""Validate and return a payload for ``event_type``.
|
||||
|
||||
The returned dict is a shallow copy — callers can attach signature,
|
||||
sequence, public_key, etc. before passing it to the hashchain
|
||||
adapter for append.
|
||||
"""
|
||||
if event_type not in INFONET_ECONOMY_EVENT_TYPES:
|
||||
raise EventConstructionError(
|
||||
f"event_type {event_type!r} is not in INFONET_ECONOMY_EVENT_TYPES"
|
||||
)
|
||||
payload = dict(payload or {})
|
||||
ok, reason = validate_infonet_event_payload(event_type, payload)
|
||||
if not ok:
|
||||
raise EventConstructionError(f"{event_type}: {reason}")
|
||||
return payload
|
||||
|
||||
|
||||
# ─── Convenience builders ────────────────────────────────────────────────
|
||||
# Sprint 1 ships only a representative slice. Full per-type builders for
|
||||
# the producing modules (markets/, gates/, governance/, ...) live in
|
||||
# their respective sprints — they will all funnel through ``build_event``
|
||||
# so this module stays the single validation choke point.
|
||||
|
||||
def build_uprep(target_node_id: str, target_event_id: str) -> dict[str, Any]:
|
||||
return build_event("uprep", {
|
||||
"target_node_id": target_node_id,
|
||||
"target_event_id": target_event_id,
|
||||
})
|
||||
|
||||
|
||||
def build_citizenship_claim(sacrifice_amount: int) -> dict[str, Any]:
|
||||
return build_event("citizenship_claim", {"sacrifice_amount": sacrifice_amount})
|
||||
|
||||
|
||||
def build_petition_file(
|
||||
petition_id: str,
|
||||
petition_payload: dict[str, Any],
|
||||
) -> dict[str, Any]:
|
||||
return build_event("petition_file", {
|
||||
"petition_id": petition_id,
|
||||
"petition_payload": petition_payload,
|
||||
})
|
||||
|
||||
|
||||
def build_petition_vote(petition_id: str, vote: str) -> dict[str, Any]:
|
||||
return build_event("petition_vote", {"petition_id": petition_id, "vote": vote})
|
||||
|
||||
|
||||
def build_node_register(public_key: str, public_key_algo: str, node_class: str) -> dict[str, Any]:
|
||||
return build_event("node_register", {
|
||||
"public_key": public_key,
|
||||
"public_key_algo": public_key_algo,
|
||||
"node_class": node_class,
|
||||
})
|
||||
|
||||
|
||||
__all__ = [
|
||||
"EventConstructionError",
|
||||
"build_citizenship_claim",
|
||||
"build_event",
|
||||
"build_node_register",
|
||||
"build_petition_file",
|
||||
"build_petition_vote",
|
||||
"build_uprep",
|
||||
]
|
||||
@@ -0,0 +1,77 @@
|
||||
"""Gate sacrifice + locking + shutdown lifecycle (Sprint 6).
|
||||
|
||||
Pure-function design: every entry point reads the chain and returns a
|
||||
deterministic value. State (member set / suspended_until / shutdown
|
||||
status / appeal status) is derived, never stored.
|
||||
|
||||
Source of truth: ``infonet-economy/RULES_SKELETON.md`` §3.16, §5.3,
|
||||
§5.5.
|
||||
"""
|
||||
|
||||
from services.infonet.gates.locking import (
|
||||
LockedGateState,
|
||||
is_locked,
|
||||
locked_at,
|
||||
locked_by,
|
||||
validate_lock_request,
|
||||
)
|
||||
from services.infonet.gates.ratification import (
|
||||
RATIFICATION_THRESHOLD,
|
||||
cumulative_member_oracle_rep,
|
||||
is_ratified,
|
||||
)
|
||||
from services.infonet.gates.sacrifice import (
|
||||
EntryDecision,
|
||||
EntryRefusal,
|
||||
can_enter,
|
||||
compute_member_set,
|
||||
is_member,
|
||||
)
|
||||
from services.infonet.gates.shutdown.appeal import (
|
||||
AppealValidation,
|
||||
paused_execution_remaining_sec,
|
||||
validate_appeal_filing,
|
||||
)
|
||||
from services.infonet.gates.shutdown.shutdown import (
|
||||
ShutdownState,
|
||||
compute_shutdown_state,
|
||||
validate_shutdown_filing,
|
||||
)
|
||||
from services.infonet.gates.shutdown.suspend import (
|
||||
SuspensionState,
|
||||
compute_suspension_state,
|
||||
validate_suspend_filing,
|
||||
)
|
||||
from services.infonet.gates.state import (
|
||||
GateMeta,
|
||||
events_for_gate,
|
||||
get_gate_meta,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"AppealValidation",
|
||||
"EntryDecision",
|
||||
"EntryRefusal",
|
||||
"GateMeta",
|
||||
"LockedGateState",
|
||||
"RATIFICATION_THRESHOLD",
|
||||
"ShutdownState",
|
||||
"SuspensionState",
|
||||
"can_enter",
|
||||
"compute_member_set",
|
||||
"compute_shutdown_state",
|
||||
"compute_suspension_state",
|
||||
"cumulative_member_oracle_rep",
|
||||
"events_for_gate",
|
||||
"get_gate_meta",
|
||||
"is_locked",
|
||||
"is_member",
|
||||
"is_ratified",
|
||||
"locked_at",
|
||||
"locked_by",
|
||||
"paused_execution_remaining_sec",
|
||||
"validate_appeal_filing",
|
||||
"validate_lock_request",
|
||||
"validate_shutdown_filing",
|
||||
"validate_suspend_filing",
|
||||
]
|
||||
@@ -0,0 +1,153 @@
|
||||
"""Gate locking — "constitutionalize-a-gate".
|
||||
|
||||
Source of truth: ``infonet-economy/RULES_SKELETON.md`` §5.3 step 4 +
|
||||
``CONFIG['gate_lock_cost_per_member']`` / ``CONFIG['gate_lock_min_members']``.
|
||||
|
||||
Locking semantics:
|
||||
|
||||
- Each ``gate_lock`` event records one member contributing
|
||||
``CONFIG['gate_lock_cost_per_member']`` (default 10) common rep.
|
||||
- A gate is "locked" once ≥ ``CONFIG['gate_lock_min_members']``
|
||||
(default 5) distinct current members have each emitted a valid
|
||||
``gate_lock`` event.
|
||||
- Once locked, the gate's rules become immutable — no governance
|
||||
petition can modify them. Only an upgrade-hash governance event
|
||||
(out of scope for Sprint 6) can amend a locked gate's rules.
|
||||
|
||||
Validation rules for an incoming ``gate_lock`` event (callers in
|
||||
production should run these *before* emitting):
|
||||
|
||||
- The gate exists.
|
||||
- The locker is a current member.
|
||||
- The locker hasn't already locked this gate (one lock per node).
|
||||
- The locker has paid (the burn happens at emit time; this module
|
||||
asserts the schematic ``lock_cost`` matches CONFIG).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Iterable
|
||||
|
||||
from services.infonet.config import CONFIG
|
||||
from services.infonet.gates.sacrifice import compute_member_set
|
||||
from services.infonet.gates.state import events_for_gate
|
||||
|
||||
|
||||
def _payload(event: dict[str, Any]) -> dict[str, Any]:
|
||||
p = event.get("payload")
|
||||
return p if isinstance(p, dict) else {}
|
||||
|
||||
|
||||
def _lock_cost_per_member() -> int:
|
||||
return int(CONFIG["gate_lock_cost_per_member"])
|
||||
|
||||
|
||||
def _lock_min_members() -> int:
|
||||
return int(CONFIG["gate_lock_min_members"])
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class LockedGateState:
|
||||
locked: bool
|
||||
locked_at: float | None
|
||||
locked_by: tuple[str, ...]
|
||||
|
||||
|
||||
def _collect_lock_contributions(
|
||||
gate_id: str,
|
||||
chain: Iterable[dict[str, Any]],
|
||||
) -> list[tuple[str, float]]:
|
||||
"""Return ``[(node_id, timestamp)]`` for each accepted ``gate_lock``
|
||||
event in chain order. Subsequent locks from the same node are
|
||||
ignored (one lock per node)."""
|
||||
chain_list = list(chain)
|
||||
members = compute_member_set(gate_id, chain_list)
|
||||
seen: set[str] = set()
|
||||
out: list[tuple[str, float]] = []
|
||||
for ev in events_for_gate(gate_id, chain_list):
|
||||
if ev.get("event_type") != "gate_lock":
|
||||
continue
|
||||
node = ev.get("node_id")
|
||||
if not isinstance(node, str) or not node:
|
||||
continue
|
||||
if node in seen:
|
||||
continue
|
||||
if node not in members:
|
||||
# Non-member lock attempt — ignored. The producer-side
|
||||
# check should also refuse to emit, but resolver-side
|
||||
# enforcement is defense-in-depth.
|
||||
continue
|
||||
p = _payload(ev)
|
||||
try:
|
||||
paid = float(p.get("lock_cost") or 0.0)
|
||||
except (TypeError, ValueError):
|
||||
paid = 0.0
|
||||
if paid < float(_lock_cost_per_member()):
|
||||
continue
|
||||
seen.add(node)
|
||||
out.append((node, float(ev.get("timestamp") or 0.0)))
|
||||
return out
|
||||
|
||||
|
||||
def _state(gate_id: str, chain: Iterable[dict[str, Any]]) -> LockedGateState:
|
||||
contributions = _collect_lock_contributions(gate_id, chain)
|
||||
if len(contributions) < _lock_min_members():
|
||||
return LockedGateState(locked=False, locked_at=None, locked_by=())
|
||||
contributions.sort(key=lambda c: c[1])
|
||||
threshold_ts = contributions[_lock_min_members() - 1][1]
|
||||
nodes = tuple(c[0] for c in contributions)
|
||||
return LockedGateState(locked=True, locked_at=threshold_ts, locked_by=nodes)
|
||||
|
||||
|
||||
def is_locked(gate_id: str, chain: Iterable[dict[str, Any]]) -> bool:
|
||||
return _state(gate_id, chain).locked
|
||||
|
||||
|
||||
def locked_at(gate_id: str, chain: Iterable[dict[str, Any]]) -> float | None:
|
||||
return _state(gate_id, chain).locked_at
|
||||
|
||||
|
||||
def locked_by(gate_id: str, chain: Iterable[dict[str, Any]]) -> tuple[str, ...]:
|
||||
return _state(gate_id, chain).locked_by
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class LockValidation:
|
||||
accepted: bool
|
||||
reason: str
|
||||
cost: int
|
||||
|
||||
|
||||
def validate_lock_request(
|
||||
node_id: str,
|
||||
gate_id: str,
|
||||
chain: Iterable[dict[str, Any]],
|
||||
*,
|
||||
lock_cost: int | None = None,
|
||||
) -> LockValidation:
|
||||
"""Pre-emit check for a ``gate_lock`` event from ``node_id``.
|
||||
|
||||
Returns ``accepted=False`` with a structured ``reason`` when
|
||||
rejected — the UI surfaces these directly so the user knows what
|
||||
needs to change.
|
||||
"""
|
||||
chain_list = list(chain)
|
||||
cost = int(_lock_cost_per_member() if lock_cost is None else lock_cost)
|
||||
if cost < _lock_cost_per_member():
|
||||
return LockValidation(False, "lock_cost_below_min", cost)
|
||||
if node_id not in compute_member_set(gate_id, chain_list):
|
||||
return LockValidation(False, "not_a_member", cost)
|
||||
if node_id in {n for n, _ in _collect_lock_contributions(gate_id, chain_list)}:
|
||||
return LockValidation(False, "already_locked_by_node", cost)
|
||||
return LockValidation(True, "ok", cost)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"LockedGateState",
|
||||
"LockValidation",
|
||||
"is_locked",
|
||||
"locked_at",
|
||||
"locked_by",
|
||||
"validate_lock_request",
|
||||
]
|
||||
@@ -0,0 +1,53 @@
|
||||
"""Gate ratification — cumulative oracle rep threshold.
|
||||
|
||||
Source of truth: ``infonet-economy/RULES_SKELETON.md`` §5.3 step 3.
|
||||
|
||||
A gate is "ratified" once the SUM of its members' oracle rep crosses
|
||||
``CONFIG['gate_ratification_rep']`` (default 50). Ratification is a
|
||||
recognition signal — it doesn't gate any functionality, but UI may
|
||||
surface it as "this gate is established / legitimate".
|
||||
|
||||
Pure function over the chain. The threshold is governable via petition
|
||||
(Sprint 7) by changing the CONFIG value.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Iterable
|
||||
|
||||
from services.infonet.config import CONFIG
|
||||
from services.infonet.gates.sacrifice import compute_member_set
|
||||
from services.infonet.reputation import compute_oracle_rep
|
||||
|
||||
|
||||
def _ratification_threshold() -> int:
|
||||
return int(CONFIG["gate_ratification_rep"])
|
||||
|
||||
|
||||
# Public alias for consumers who don't want to import CONFIG.
|
||||
RATIFICATION_THRESHOLD = _ratification_threshold()
|
||||
|
||||
|
||||
def cumulative_member_oracle_rep(
|
||||
gate_id: str,
|
||||
chain: Iterable[dict[str, Any]],
|
||||
) -> float:
|
||||
"""Sum of current members' oracle rep balances."""
|
||||
chain_list = list(chain)
|
||||
members = compute_member_set(gate_id, chain_list)
|
||||
return sum(compute_oracle_rep(m, chain_list) for m in members)
|
||||
|
||||
|
||||
def is_ratified(
|
||||
gate_id: str,
|
||||
chain: Iterable[dict[str, Any]],
|
||||
) -> bool:
|
||||
"""``True`` once cumulative member oracle rep meets the threshold."""
|
||||
return cumulative_member_oracle_rep(gate_id, chain) >= float(_ratification_threshold())
|
||||
|
||||
|
||||
__all__ = [
|
||||
"RATIFICATION_THRESHOLD",
|
||||
"cumulative_member_oracle_rep",
|
||||
"is_ratified",
|
||||
]
|
||||
@@ -0,0 +1,192 @@
|
||||
"""Gate sacrifice mechanic — burn-on-entry, not threshold check.
|
||||
|
||||
Source of truth: ``infonet-economy/RULES_SKELETON.md`` §3.16, §5.3 step 2.
|
||||
|
||||
A node enters a gate by **burning** common rep equal to
|
||||
``gate.entry_sacrifice``. The burn is permanent and non-refundable
|
||||
(even on voluntary exit). This is the constitutional difference from
|
||||
threshold-based access: you can't fake having enough rep — you have
|
||||
to spend it.
|
||||
|
||||
The eligibility checks happen *before* the burn:
|
||||
|
||||
- Node's common rep ≥ ``min_overall_rep + entry_sacrifice``.
|
||||
- Node's per-gate rep meets each ``min_gate_rep[required_gate]``.
|
||||
|
||||
If those pass, the entry is accepted, ``entry_sacrifice`` is burned
|
||||
from the node's common rep, and the node is recorded as a member.
|
||||
|
||||
This module exposes pure functions:
|
||||
|
||||
- ``can_enter(node_id, gate_id, chain)`` — eligibility check + cost,
|
||||
returning a structured ``EntryDecision`` so the UI can render
|
||||
exactly *why* a node can't enter (cross-cutting non-hostile UX rule).
|
||||
- ``compute_member_set(gate_id, chain)`` — current members from
|
||||
``gate_enter`` − ``gate_exit`` events.
|
||||
- ``is_member(node_id, gate_id, chain)`` — convenience.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Iterable
|
||||
|
||||
from services.infonet.gates.state import events_for_gate, get_gate_meta
|
||||
from services.infonet.reputation import compute_common_rep
|
||||
|
||||
|
||||
def _payload(event: dict[str, Any]) -> dict[str, Any]:
|
||||
p = event.get("payload")
|
||||
return p if isinstance(p, dict) else {}
|
||||
|
||||
|
||||
def compute_member_set(
|
||||
gate_id: str,
|
||||
chain: Iterable[dict[str, Any]],
|
||||
) -> set[str]:
|
||||
"""Current member set: ``gate_enter`` − ``gate_exit`` − members
|
||||
booted by ``gate_shutdown_execute``. The shutdown case zeroes the
|
||||
set out — once a gate is shut down, there are no members.
|
||||
"""
|
||||
chain_list = list(chain)
|
||||
events = events_for_gate(gate_id, chain_list)
|
||||
members: set[str] = set()
|
||||
shutdown_seen = False
|
||||
for ev in events:
|
||||
et = ev.get("event_type")
|
||||
if et == "gate_shutdown_execute":
|
||||
shutdown_seen = True
|
||||
members = set()
|
||||
continue
|
||||
node = ev.get("node_id")
|
||||
if not isinstance(node, str) or not node:
|
||||
continue
|
||||
if et == "gate_enter":
|
||||
if not shutdown_seen:
|
||||
members.add(node)
|
||||
elif et == "gate_exit":
|
||||
members.discard(node)
|
||||
return members
|
||||
|
||||
|
||||
def is_member(
|
||||
node_id: str,
|
||||
gate_id: str,
|
||||
chain: Iterable[dict[str, Any]],
|
||||
) -> bool:
|
||||
return node_id in compute_member_set(gate_id, list(chain))
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class EntryRefusal:
|
||||
"""Structured "why a node can't enter" diagnostic.
|
||||
|
||||
The cross-cutting non-hostile UX rule (BUILD_LOG.md design rules
|
||||
§1) requires the UI to show the user a path forward — not a
|
||||
blanket "denied". This dataclass carries enough info for the
|
||||
frontend to render "you need 5 more common rep" or "you need
|
||||
more rep in gate X".
|
||||
"""
|
||||
kind: str
|
||||
detail: str
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class EntryDecision:
|
||||
accepted: bool
|
||||
cost: int
|
||||
refusals: tuple[EntryRefusal, ...]
|
||||
|
||||
|
||||
def compute_gate_rep(
|
||||
node_id: str,
|
||||
gate_id: str,
|
||||
chain: Iterable[dict[str, Any]],
|
||||
) -> float:
|
||||
"""Per-gate reputation: common rep earned from upreps cast by
|
||||
members of ``gate_id``.
|
||||
|
||||
Sprint 6 ships a simple variant: same formula as
|
||||
``compute_common_rep`` but only upreps from current members of
|
||||
``gate_id`` count. Anti-gaming penalties (Sprint 3) still apply
|
||||
via the underlying ``compute_common_rep`` call when called with
|
||||
the synthetic chain — but for Sprint 6 we filter at the chain
|
||||
level and pass the filtered chain to the global function.
|
||||
|
||||
A more sophisticated per-gate formula (e.g. using only upreps
|
||||
that happened *while* the upreper was a member, or weighting by
|
||||
in-gate activity) is open for governance to specify later.
|
||||
"""
|
||||
chain_list = [e for e in chain if isinstance(e, dict)]
|
||||
members = compute_member_set(gate_id, chain_list)
|
||||
if not members:
|
||||
return 0.0
|
||||
# Filter to upreps authored by current gate members targeting node_id.
|
||||
# Pass the WHOLE chain to compute_common_rep (it needs full event
|
||||
# history for oracle_rep computation of the upreper); but limit
|
||||
# which uprep events count by stripping non-member ones.
|
||||
filtered: list[dict[str, Any]] = []
|
||||
for ev in chain_list:
|
||||
if ev.get("event_type") == "uprep":
|
||||
author = ev.get("node_id")
|
||||
if author not in members:
|
||||
continue
|
||||
filtered.append(ev)
|
||||
return compute_common_rep(node_id, filtered)
|
||||
|
||||
|
||||
def can_enter(
|
||||
node_id: str,
|
||||
gate_id: str,
|
||||
chain: Iterable[dict[str, Any]],
|
||||
) -> EntryDecision:
|
||||
"""RULES §3.16 — eligibility + cost.
|
||||
|
||||
Returns a structured decision. ``accepted=True`` means: burning
|
||||
``cost`` common rep from ``node_id`` satisfies all entry rules.
|
||||
``accepted=False`` lists every reason refusal occurred so the UI
|
||||
can show all of them at once.
|
||||
"""
|
||||
chain_list = list(chain)
|
||||
meta = get_gate_meta(gate_id, chain_list)
|
||||
if meta is None:
|
||||
return EntryDecision(
|
||||
accepted=False, cost=0,
|
||||
refusals=(EntryRefusal(kind="gate_not_found", detail=gate_id),),
|
||||
)
|
||||
if is_member(node_id, gate_id, chain_list):
|
||||
return EntryDecision(
|
||||
accepted=False, cost=0,
|
||||
refusals=(EntryRefusal(kind="already_member", detail=gate_id),),
|
||||
)
|
||||
|
||||
refusals: list[EntryRefusal] = []
|
||||
common_rep = compute_common_rep(node_id, chain_list)
|
||||
needed = meta.min_overall_rep + meta.entry_sacrifice
|
||||
if common_rep < needed:
|
||||
refusals.append(EntryRefusal(
|
||||
kind="insufficient_common_rep",
|
||||
detail=f"have {common_rep:.4f}, need {needed} (min_overall_rep "
|
||||
f"{meta.min_overall_rep} + entry_sacrifice {meta.entry_sacrifice})",
|
||||
))
|
||||
for required_gate, min_rep in meta.min_gate_rep.items():
|
||||
gate_rep = compute_gate_rep(node_id, required_gate, chain_list)
|
||||
if gate_rep < min_rep:
|
||||
refusals.append(EntryRefusal(
|
||||
kind="insufficient_gate_rep",
|
||||
detail=f"gate {required_gate}: have {gate_rep:.4f}, need {min_rep}",
|
||||
))
|
||||
return EntryDecision(
|
||||
accepted=not refusals, cost=meta.entry_sacrifice if not refusals else 0,
|
||||
refusals=tuple(refusals),
|
||||
)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"EntryDecision",
|
||||
"EntryRefusal",
|
||||
"can_enter",
|
||||
"compute_gate_rep",
|
||||
"compute_member_set",
|
||||
"is_member",
|
||||
]
|
||||
@@ -0,0 +1,43 @@
|
||||
"""Gate shutdown lifecycle — Tier 1 suspend, Tier 2 shutdown, typed appeal.
|
||||
|
||||
Source of truth: ``infonet-economy/RULES_SKELETON.md`` §5.5.
|
||||
|
||||
Three modules with clean separation of concerns:
|
||||
|
||||
- ``suspend.py`` — Tier 1: 30-day reversible freeze. Filed via
|
||||
``gate_suspend_file``, voted on, executed via
|
||||
``gate_suspend_execute``, auto-unsuspends after 30 days unless a
|
||||
shutdown petition passes.
|
||||
- ``shutdown.py`` — Tier 2: 7-day-delayed archive. PREREQUISITE: gate
|
||||
must currently be suspended.
|
||||
- ``appeal.py`` — Typed shutdown appeal: pauses the 7-day execution
|
||||
timer, max one appeal per shutdown, 48h window after vote passage.
|
||||
"""
|
||||
|
||||
from services.infonet.gates.shutdown.appeal import (
|
||||
AppealValidation,
|
||||
paused_execution_remaining_sec,
|
||||
validate_appeal_filing,
|
||||
)
|
||||
from services.infonet.gates.shutdown.shutdown import (
|
||||
ShutdownState,
|
||||
compute_shutdown_state,
|
||||
validate_shutdown_filing,
|
||||
)
|
||||
from services.infonet.gates.shutdown.suspend import (
|
||||
SuspensionState,
|
||||
compute_suspension_state,
|
||||
validate_suspend_filing,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"AppealValidation",
|
||||
"ShutdownState",
|
||||
"SuspensionState",
|
||||
"compute_shutdown_state",
|
||||
"compute_suspension_state",
|
||||
"paused_execution_remaining_sec",
|
||||
"validate_appeal_filing",
|
||||
"validate_shutdown_filing",
|
||||
"validate_suspend_filing",
|
||||
]
|
||||
@@ -0,0 +1,189 @@
|
||||
"""Typed shutdown appeal — pauses execution timer, anti-stall bounded.
|
||||
|
||||
Source of truth: ``infonet-economy/RULES_SKELETON.md`` §5.5 step 7.
|
||||
|
||||
An appeal pauses the 7-day shutdown execution timer. The
|
||||
"anti-stall" property limits abuse:
|
||||
|
||||
- One appeal per shutdown petition (no infinite re-appeals).
|
||||
- 48-hour filing window after the shutdown vote passes.
|
||||
- If the appeal fails, the original shutdown's execution timer
|
||||
resumes from where it was paused — the shutdown still happens,
|
||||
just delayed by the appeal-vote duration.
|
||||
|
||||
This module exposes:
|
||||
|
||||
- ``validate_appeal_filing`` — pre-emit checks.
|
||||
- ``paused_execution_remaining_sec`` — compute how much time was
|
||||
remaining on the shutdown timer when the appeal was filed (so the
|
||||
resolver can resume the timer from that point).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Iterable
|
||||
|
||||
from services.infonet.config import CONFIG
|
||||
from services.infonet.gates.shutdown.shutdown import compute_shutdown_state
|
||||
from services.infonet.gates.state import get_gate_meta
|
||||
|
||||
|
||||
_SECONDS_PER_HOUR = 3600.0
|
||||
_SECONDS_PER_DAY = 86400.0
|
||||
|
||||
|
||||
def _payload(event: dict[str, Any]) -> dict[str, Any]:
|
||||
p = event.get("payload")
|
||||
return p if isinstance(p, dict) else {}
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class AppealValidation:
|
||||
accepted: bool
|
||||
reason: str
|
||||
|
||||
|
||||
def _shutdown_petition_filed_at(
|
||||
target_petition_id: str,
|
||||
chain: Iterable[dict[str, Any]],
|
||||
) -> float | None:
|
||||
for ev in chain:
|
||||
if not isinstance(ev, dict):
|
||||
continue
|
||||
if ev.get("event_type") != "gate_shutdown_file":
|
||||
continue
|
||||
if _payload(ev).get("petition_id") == target_petition_id:
|
||||
return float(ev.get("timestamp") or 0.0)
|
||||
return None
|
||||
|
||||
|
||||
def _shutdown_vote_passed_at(
|
||||
target_petition_id: str,
|
||||
chain: Iterable[dict[str, Any]],
|
||||
) -> float | None:
|
||||
"""Return the timestamp of the ``gate_shutdown_vote`` event whose
|
||||
payload says ``vote=="passed"`` for the target petition. The
|
||||
appeal window starts here."""
|
||||
for ev in chain:
|
||||
if not isinstance(ev, dict):
|
||||
continue
|
||||
if ev.get("event_type") != "gate_shutdown_vote":
|
||||
continue
|
||||
p = _payload(ev)
|
||||
if p.get("petition_id") != target_petition_id:
|
||||
continue
|
||||
if p.get("vote") == "passed":
|
||||
return float(ev.get("timestamp") or 0.0)
|
||||
return None
|
||||
|
||||
|
||||
def _has_appeal(
|
||||
target_petition_id: str,
|
||||
chain: Iterable[dict[str, Any]],
|
||||
) -> bool:
|
||||
for ev in chain:
|
||||
if not isinstance(ev, dict):
|
||||
continue
|
||||
if ev.get("event_type") != "gate_shutdown_appeal_file":
|
||||
continue
|
||||
if _payload(ev).get("target_petition_id") == target_petition_id:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def validate_appeal_filing(
|
||||
gate_id: str,
|
||||
target_petition_id: str,
|
||||
filer_id: str,
|
||||
*,
|
||||
reason: str,
|
||||
evidence_hashes: list[str],
|
||||
chain: Iterable[dict[str, Any]],
|
||||
now: float,
|
||||
filer_cooldown_until: float | None = None,
|
||||
) -> AppealValidation:
|
||||
"""Pre-emit validation for ``gate_shutdown_appeal_file``.
|
||||
|
||||
Rejects if:
|
||||
- Reason or evidence missing.
|
||||
- Gate doesn't exist.
|
||||
- Target shutdown petition doesn't exist.
|
||||
- Target petition is not currently in "executing" status (i.e.
|
||||
vote hasn't passed yet, or shutdown already executed).
|
||||
- 48-hour filing window has elapsed since vote passage.
|
||||
- Target petition already has an appeal (one per shutdown).
|
||||
- Filer cooldown active.
|
||||
"""
|
||||
chain_list = list(chain)
|
||||
if not isinstance(reason, str) or not reason.strip():
|
||||
return AppealValidation(False, "reason_empty")
|
||||
if not isinstance(evidence_hashes, list) or not evidence_hashes:
|
||||
return AppealValidation(False, "evidence_required")
|
||||
if get_gate_meta(gate_id, chain_list) is None:
|
||||
return AppealValidation(False, "gate_not_found")
|
||||
|
||||
if not _shutdown_petition_filed_at(target_petition_id, chain_list):
|
||||
return AppealValidation(False, "target_petition_not_found")
|
||||
|
||||
# The "already-filed" check fires before the status check on
|
||||
# purpose — once an appeal is filed, the petition status flips
|
||||
# from "executing" to "appealed", and surfacing that as
|
||||
# "target_not_in_executing_state" would mislead a second filer
|
||||
# about *why* their appeal was refused. Spec invariant: one
|
||||
# appeal per shutdown; surface that directly.
|
||||
if _has_appeal(target_petition_id, chain_list):
|
||||
return AppealValidation(False, "appeal_already_filed")
|
||||
|
||||
state = compute_shutdown_state(gate_id, chain_list, now=now)
|
||||
if state.pending_status not in ("executing",):
|
||||
return AppealValidation(False, "target_not_in_executing_state")
|
||||
|
||||
vote_passed = _shutdown_vote_passed_at(target_petition_id, chain_list)
|
||||
if vote_passed is None:
|
||||
return AppealValidation(False, "vote_not_passed")
|
||||
window_s = float(CONFIG["gate_shutdown_appeal_window_hours"]) * _SECONDS_PER_HOUR
|
||||
if now > vote_passed + window_s:
|
||||
return AppealValidation(False, "appeal_window_expired")
|
||||
|
||||
if filer_cooldown_until is not None and filer_cooldown_until > now:
|
||||
return AppealValidation(False, "filer_cooldown_active")
|
||||
# filer_id is consumed by the producer event payload, not by validation here.
|
||||
del filer_id
|
||||
return AppealValidation(True, "ok")
|
||||
|
||||
|
||||
def paused_execution_remaining_sec(
|
||||
target_petition_id: str,
|
||||
chain: Iterable[dict[str, Any]],
|
||||
*,
|
||||
appeal_filed_at: float,
|
||||
) -> float:
|
||||
"""Compute how much time was remaining on the shutdown's
|
||||
execution timer when the appeal was filed.
|
||||
|
||||
The original shutdown's ``execution_at`` was
|
||||
``vote_passed_at + execution_delay_days * 86400``. The remaining
|
||||
time at appeal-filing time is ``execution_at - appeal_filed_at``,
|
||||
clamped to ≥ 0.
|
||||
|
||||
The producer of the ``gate_shutdown_appeal_resolve`` event with
|
||||
``outcome="resumed"`` should attach
|
||||
``resumed_execution_at = now + this_value`` so the timer resumes
|
||||
from where it paused.
|
||||
"""
|
||||
chain_list = list(chain)
|
||||
vote_passed = _shutdown_vote_passed_at(target_petition_id, chain_list)
|
||||
if vote_passed is None:
|
||||
return 0.0
|
||||
delay_s = float(CONFIG["gate_shutdown_execution_delay_days"]) * _SECONDS_PER_DAY
|
||||
execution_at = vote_passed + delay_s
|
||||
remaining = execution_at - float(appeal_filed_at)
|
||||
return max(0.0, remaining)
|
||||
|
||||
|
||||
__all__ = [
|
||||
"AppealValidation",
|
||||
"paused_execution_remaining_sec",
|
||||
"validate_appeal_filing",
|
||||
]
|
||||
@@ -0,0 +1,195 @@
|
||||
"""Tier 2: 7-day-delayed shutdown.
|
||||
|
||||
Source of truth: ``infonet-economy/RULES_SKELETON.md`` §5.5 steps 5-8.
|
||||
|
||||
PREREQUISITE: gate must currently be suspended. The shutdown petition
|
||||
itself is a vote among oracle-rep holders. If it passes, a 7-day
|
||||
execution delay opens (the appeal window). After the delay (and any
|
||||
appeal resolution), the ``gate_shutdown_execute`` event archives the
|
||||
gate permanently.
|
||||
|
||||
State derivation:
|
||||
|
||||
- A shutdown petition can be: ``filed``, ``vote_passed``, ``executing``
|
||||
(after vote, during 7-day delay), ``appealed`` (timer paused),
|
||||
``executed``, ``failed``, ``voided_appeal``.
|
||||
- This module computes the petition status from chain events; it does
|
||||
NOT execute the petition itself (the producer emits
|
||||
``gate_shutdown_execute`` based on this status).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Iterable
|
||||
|
||||
from services.infonet.config import CONFIG
|
||||
from services.infonet.gates.shutdown.suspend import (
|
||||
FilingValidation,
|
||||
compute_suspension_state,
|
||||
)
|
||||
from services.infonet.gates.state import events_for_gate, get_gate_meta
|
||||
|
||||
|
||||
_SECONDS_PER_DAY = 86400.0
|
||||
|
||||
|
||||
def _payload(event: dict[str, Any]) -> dict[str, Any]:
|
||||
p = event.get("payload")
|
||||
return p if isinstance(p, dict) else {}
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ShutdownState:
|
||||
"""Derived snapshot of all shutdown petitions filed against a gate."""
|
||||
has_pending: bool
|
||||
pending_petition_id: str | None
|
||||
pending_status: str | None # "filed" | "vote_passed" | "executing" | "appealed" | "failed"
|
||||
execution_at: float | None
|
||||
executed: bool
|
||||
|
||||
|
||||
def compute_shutdown_state(
|
||||
gate_id: str,
|
||||
chain: Iterable[dict[str, Any]],
|
||||
*,
|
||||
now: float,
|
||||
) -> ShutdownState:
|
||||
chain_list = list(chain)
|
||||
events = events_for_gate(gate_id, chain_list)
|
||||
|
||||
petitions: dict[str, dict[str, Any]] = {}
|
||||
for ev in events:
|
||||
et = ev.get("event_type")
|
||||
if et != "gate_shutdown_file":
|
||||
continue
|
||||
p = _payload(ev)
|
||||
pid = p.get("petition_id")
|
||||
if not isinstance(pid, str) or not pid:
|
||||
continue
|
||||
petitions[pid] = {
|
||||
"filed_at": float(ev.get("timestamp") or 0.0),
|
||||
"status": "filed",
|
||||
"execution_at": None,
|
||||
"appealed": False,
|
||||
}
|
||||
|
||||
# Walk votes/executions/appeals in chain order.
|
||||
chain_all = [e for e in chain_list if isinstance(e, dict)]
|
||||
chain_all.sort(key=lambda e: (float(e.get("timestamp") or 0.0), int(e.get("sequence") or 0)))
|
||||
|
||||
for ev in chain_all:
|
||||
et = ev.get("event_type")
|
||||
if et not in ("gate_shutdown_vote", "gate_shutdown_execute",
|
||||
"gate_shutdown_appeal_file", "gate_shutdown_appeal_resolve"):
|
||||
continue
|
||||
p = _payload(ev)
|
||||
|
||||
if et == "gate_shutdown_vote":
|
||||
pid = p.get("petition_id")
|
||||
if not isinstance(pid, str) or pid not in petitions:
|
||||
continue
|
||||
# Sprint 6 simplification: a vote event with payload
|
||||
# {"vote": "passed"} is treated as the canonical pass
|
||||
# signal. Real production may aggregate per-voter votes
|
||||
# in Sprint 7's governance DSL — Sprint 6 honors whichever
|
||||
# outcome the spec-side vote tally already reached.
|
||||
outcome = p.get("vote")
|
||||
if outcome == "passed":
|
||||
petitions[pid]["status"] = "executing"
|
||||
delay_s = float(CONFIG["gate_shutdown_execution_delay_days"]) * _SECONDS_PER_DAY
|
||||
petitions[pid]["execution_at"] = float(ev.get("timestamp") or 0.0) + delay_s
|
||||
elif outcome == "failed":
|
||||
petitions[pid]["status"] = "failed"
|
||||
|
||||
elif et == "gate_shutdown_appeal_file":
|
||||
target = p.get("target_petition_id")
|
||||
if isinstance(target, str) and target in petitions:
|
||||
petitions[target]["appealed"] = True
|
||||
petitions[target]["status"] = "appealed"
|
||||
petitions[target]["execution_at"] = None # paused
|
||||
|
||||
elif et == "gate_shutdown_appeal_resolve":
|
||||
target = p.get("target_petition_id")
|
||||
outcome = p.get("outcome")
|
||||
if isinstance(target, str) and target in petitions:
|
||||
if outcome == "voided_shutdown":
|
||||
petitions[target]["status"] = "voided_appeal"
|
||||
elif outcome == "resumed":
|
||||
petitions[target]["status"] = "executing"
|
||||
# execution_at restored by the producer who emitted
|
||||
# the resolve event with a fresh execution_at field.
|
||||
new_exec = p.get("resumed_execution_at")
|
||||
try:
|
||||
petitions[target]["execution_at"] = float(new_exec)
|
||||
except (TypeError, ValueError):
|
||||
petitions[target]["execution_at"] = None
|
||||
|
||||
elif et == "gate_shutdown_execute":
|
||||
pid = p.get("petition_id")
|
||||
if isinstance(pid, str) and pid in petitions:
|
||||
petitions[pid]["status"] = "executed"
|
||||
|
||||
executed = any(p["status"] == "executed" for p in petitions.values())
|
||||
pending_pid = None
|
||||
pending = None
|
||||
for pid, p in petitions.items():
|
||||
if p["status"] in ("filed", "executing", "appealed"):
|
||||
pending_pid = pid
|
||||
pending = p
|
||||
break
|
||||
|
||||
return ShutdownState(
|
||||
has_pending=pending is not None,
|
||||
pending_petition_id=pending_pid,
|
||||
pending_status=pending["status"] if pending else None,
|
||||
execution_at=pending["execution_at"] if pending else None,
|
||||
executed=executed,
|
||||
)
|
||||
|
||||
|
||||
def validate_shutdown_filing(
|
||||
gate_id: str,
|
||||
filer_id: str,
|
||||
*,
|
||||
reason: str,
|
||||
evidence_hashes: list[str],
|
||||
chain: Iterable[dict[str, Any]],
|
||||
now: float,
|
||||
filer_cooldown_until: float | None = None,
|
||||
) -> FilingValidation:
|
||||
"""Pre-emit validation for ``gate_shutdown_file``.
|
||||
|
||||
Critical Sprint 6 invariant: shutdown filings REQUIRE the gate to
|
||||
currently be suspended. This is the spec's two-tier escalation
|
||||
safeguard — a gate cannot be shut down without first surviving a
|
||||
suspension period.
|
||||
"""
|
||||
chain_list = list(chain)
|
||||
if not isinstance(reason, str) or not reason.strip():
|
||||
return FilingValidation(False, "reason_empty")
|
||||
if not isinstance(evidence_hashes, list) or not evidence_hashes:
|
||||
return FilingValidation(False, "evidence_required")
|
||||
if get_gate_meta(gate_id, chain_list) is None:
|
||||
return FilingValidation(False, "gate_not_found")
|
||||
|
||||
suspension = compute_suspension_state(gate_id, chain_list, now=now)
|
||||
if suspension.status == "shutdown":
|
||||
return FilingValidation(False, "gate_already_shutdown")
|
||||
if suspension.status != "suspended":
|
||||
return FilingValidation(False, "gate_not_suspended")
|
||||
|
||||
shutdown = compute_shutdown_state(gate_id, chain_list, now=now)
|
||||
if shutdown.has_pending:
|
||||
return FilingValidation(False, "shutdown_already_pending")
|
||||
if filer_cooldown_until is not None and filer_cooldown_until > now:
|
||||
return FilingValidation(False, "filer_cooldown_active")
|
||||
_ = filer_id
|
||||
return FilingValidation(True, "ok")
|
||||
|
||||
|
||||
__all__ = [
|
||||
"ShutdownState",
|
||||
"compute_shutdown_state",
|
||||
"validate_shutdown_filing",
|
||||
]
|
||||
@@ -0,0 +1,172 @@
|
||||
"""Tier 1: 30-day reversible suspend.
|
||||
|
||||
Source of truth: ``infonet-economy/RULES_SKELETON.md`` §5.5 steps 1-4.
|
||||
|
||||
State derivation:
|
||||
|
||||
- A gate is "suspended" iff:
|
||||
- the most recent ``gate_suspend_execute`` event is more recent
|
||||
than any ``gate_unsuspend`` or ``gate_shutdown_execute`` event,
|
||||
- AND the suspended_until window has not yet elapsed.
|
||||
- ``compute_suspension_state`` returns the current suspension status
|
||||
including the auto-unsuspend timestamp.
|
||||
- ``validate_suspend_filing`` is the pre-emit check the UI should use
|
||||
before letting a node sign a ``gate_suspend_file`` event.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Iterable
|
||||
|
||||
from services.infonet.config import CONFIG
|
||||
from services.infonet.gates.state import events_for_gate, get_gate_meta
|
||||
|
||||
|
||||
_SECONDS_PER_DAY = 86400.0
|
||||
|
||||
|
||||
def _payload(event: dict[str, Any]) -> dict[str, Any]:
|
||||
p = event.get("payload")
|
||||
return p if isinstance(p, dict) else {}
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class SuspensionState:
|
||||
"""``status`` is one of ``"active"``, ``"suspended"``,
|
||||
``"shutdown"``. ``suspended_until`` is the auto-unsuspend
|
||||
timestamp or ``None`` when not currently suspended."""
|
||||
status: str
|
||||
suspended_at: float | None
|
||||
suspended_until: float | None
|
||||
last_shutdown_petition_at: float | None
|
||||
"""Used for 90-day cooldown checks on subsequent shutdown petitions."""
|
||||
|
||||
|
||||
def compute_suspension_state(
|
||||
gate_id: str,
|
||||
chain: Iterable[dict[str, Any]],
|
||||
*,
|
||||
now: float,
|
||||
) -> SuspensionState:
|
||||
chain_list = list(chain)
|
||||
events = events_for_gate(gate_id, chain_list)
|
||||
|
||||
last_shutdown_filed_ts: float | None = None
|
||||
last_shutdown_executed_ts: float | None = None
|
||||
suspended_at: float | None = None
|
||||
last_unsuspend_ts: float | None = None
|
||||
|
||||
for ev in events:
|
||||
et = ev.get("event_type")
|
||||
ts = float(ev.get("timestamp") or 0.0)
|
||||
if et == "gate_suspend_execute":
|
||||
suspended_at = ts
|
||||
elif et == "gate_unsuspend":
|
||||
last_unsuspend_ts = ts
|
||||
elif et == "gate_shutdown_file":
|
||||
last_shutdown_filed_ts = ts
|
||||
elif et == "gate_shutdown_execute":
|
||||
last_shutdown_executed_ts = ts
|
||||
|
||||
if last_shutdown_executed_ts is not None:
|
||||
return SuspensionState(
|
||||
status="shutdown",
|
||||
suspended_at=suspended_at,
|
||||
suspended_until=None,
|
||||
last_shutdown_petition_at=last_shutdown_filed_ts,
|
||||
)
|
||||
|
||||
if suspended_at is None:
|
||||
return SuspensionState(
|
||||
status="active",
|
||||
suspended_at=None,
|
||||
suspended_until=None,
|
||||
last_shutdown_petition_at=last_shutdown_filed_ts,
|
||||
)
|
||||
|
||||
if last_unsuspend_ts is not None and last_unsuspend_ts > suspended_at:
|
||||
return SuspensionState(
|
||||
status="active",
|
||||
suspended_at=None,
|
||||
suspended_until=None,
|
||||
last_shutdown_petition_at=last_shutdown_filed_ts,
|
||||
)
|
||||
|
||||
duration = float(CONFIG["gate_suspend_duration_days"]) * _SECONDS_PER_DAY
|
||||
suspended_until = suspended_at + duration
|
||||
|
||||
if now >= suspended_until:
|
||||
# Window auto-elapsed; even without an explicit gate_unsuspend
|
||||
# event, the gate is logically active again.
|
||||
return SuspensionState(
|
||||
status="active",
|
||||
suspended_at=None,
|
||||
suspended_until=None,
|
||||
last_shutdown_petition_at=last_shutdown_filed_ts,
|
||||
)
|
||||
|
||||
return SuspensionState(
|
||||
status="suspended",
|
||||
suspended_at=suspended_at,
|
||||
suspended_until=suspended_until,
|
||||
last_shutdown_petition_at=last_shutdown_filed_ts,
|
||||
)
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class FilingValidation:
|
||||
accepted: bool
|
||||
reason: str
|
||||
|
||||
|
||||
def validate_suspend_filing(
|
||||
gate_id: str,
|
||||
filer_id: str,
|
||||
*,
|
||||
reason: str,
|
||||
evidence_hashes: list[str],
|
||||
chain: Iterable[dict[str, Any]],
|
||||
now: float,
|
||||
filer_cooldown_until: float | None = None,
|
||||
) -> FilingValidation:
|
||||
"""Pre-emit validation for a ``gate_suspend_file`` event.
|
||||
|
||||
Rejects if:
|
||||
- Reason is empty.
|
||||
- No evidence hashes.
|
||||
- Gate doesn't exist.
|
||||
- Gate is already suspended or shut down.
|
||||
- Filer's cooldown is still active.
|
||||
- Gate's 90-day shutdown-petition cooldown is active.
|
||||
"""
|
||||
chain_list = list(chain)
|
||||
if not isinstance(reason, str) or not reason.strip():
|
||||
return FilingValidation(False, "reason_empty")
|
||||
if not isinstance(evidence_hashes, list) or not evidence_hashes:
|
||||
return FilingValidation(False, "evidence_required")
|
||||
if not all(isinstance(h, str) and h for h in evidence_hashes):
|
||||
return FilingValidation(False, "evidence_hashes_invalid")
|
||||
if get_gate_meta(gate_id, chain_list) is None:
|
||||
return FilingValidation(False, "gate_not_found")
|
||||
state = compute_suspension_state(gate_id, chain_list, now=now)
|
||||
if state.status == "shutdown":
|
||||
return FilingValidation(False, "gate_shutdown")
|
||||
if state.status == "suspended":
|
||||
return FilingValidation(False, "already_suspended")
|
||||
if filer_cooldown_until is not None and filer_cooldown_until > now:
|
||||
return FilingValidation(False, "filer_cooldown_active")
|
||||
if state.last_shutdown_petition_at is not None:
|
||||
cooldown_s = float(CONFIG["gate_shutdown_cooldown_days"]) * _SECONDS_PER_DAY
|
||||
if now < state.last_shutdown_petition_at + cooldown_s:
|
||||
return FilingValidation(False, "gate_cooldown_active")
|
||||
_ = filer_id # producer logs filer separately; not consulted for validation here.
|
||||
return FilingValidation(True, "ok")
|
||||
|
||||
|
||||
__all__ = [
|
||||
"FilingValidation",
|
||||
"SuspensionState",
|
||||
"compute_suspension_state",
|
||||
"validate_suspend_filing",
|
||||
]
|
||||
@@ -0,0 +1,111 @@
|
||||
"""Common chain helpers shared across the gates package.
|
||||
|
||||
The legacy ``gate_create`` event is owned by mesh_schema (it predates
|
||||
the economy layer). Sprint 6 reads those events and extracts the
|
||||
structured fields it needs from the ``rules`` payload, with sensible
|
||||
defaults when a key is missing — same pattern the rest of the
|
||||
protocol uses for forward compatibility.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Iterable
|
||||
|
||||
|
||||
def _payload(event: dict[str, Any]) -> dict[str, Any]:
|
||||
p = event.get("payload")
|
||||
return p if isinstance(p, dict) else {}
|
||||
|
||||
|
||||
def _gate_id(event: dict[str, Any]) -> str:
|
||||
p = _payload(event)
|
||||
gid = p.get("gate_id") or p.get("gate")
|
||||
return str(gid) if isinstance(gid, str) else ""
|
||||
|
||||
|
||||
def events_for_gate(
|
||||
gate_id: str,
|
||||
chain: Iterable[dict[str, Any]],
|
||||
) -> list[dict[str, Any]]:
|
||||
"""All events that reference ``gate_id``, sorted by chain order."""
|
||||
out: list[dict[str, Any]] = []
|
||||
for ev in chain:
|
||||
if not isinstance(ev, dict):
|
||||
continue
|
||||
if _gate_id(ev) == gate_id:
|
||||
out.append(ev)
|
||||
out.sort(key=lambda e: (float(e.get("timestamp") or 0.0), int(e.get("sequence") or 0)))
|
||||
return out
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class GateMeta:
|
||||
"""Static metadata extracted from the original ``gate_create`` event."""
|
||||
gate_id: str
|
||||
creator_node_id: str
|
||||
display_name: str
|
||||
entry_sacrifice: int
|
||||
min_overall_rep: int
|
||||
min_gate_rep: dict[str, int]
|
||||
created_at: float
|
||||
raw_rules: dict[str, Any]
|
||||
|
||||
|
||||
def _safe_int(val: Any, default: int = 0) -> int:
|
||||
try:
|
||||
if isinstance(val, bool):
|
||||
return default
|
||||
return int(val)
|
||||
except (TypeError, ValueError):
|
||||
return default
|
||||
|
||||
|
||||
def get_gate_meta(
|
||||
gate_id: str,
|
||||
chain: Iterable[dict[str, Any]],
|
||||
) -> GateMeta | None:
|
||||
"""Return the gate's static metadata, or ``None`` if no
|
||||
``gate_create`` event exists for it on the chain.
|
||||
|
||||
Multiple ``gate_create`` events with the same gate_id are unusual
|
||||
but possible at peer-gossip ingestion time; the FIRST one wins
|
||||
(same first-write-wins pattern as ``find_snapshot``). Subsequent
|
||||
forgeries are ignored.
|
||||
"""
|
||||
for ev in chain:
|
||||
if not isinstance(ev, dict):
|
||||
continue
|
||||
if ev.get("event_type") != "gate_create":
|
||||
continue
|
||||
if _gate_id(ev) != gate_id:
|
||||
continue
|
||||
p = _payload(ev)
|
||||
rules = p.get("rules")
|
||||
if not isinstance(rules, dict):
|
||||
rules = {}
|
||||
cross_gate = rules.get("min_gate_rep")
|
||||
if not isinstance(cross_gate, dict):
|
||||
cross_gate = {}
|
||||
return GateMeta(
|
||||
gate_id=gate_id,
|
||||
creator_node_id=str(ev.get("node_id") or ""),
|
||||
display_name=str(p.get("display_name") or ""),
|
||||
entry_sacrifice=_safe_int(rules.get("entry_sacrifice"), 0),
|
||||
min_overall_rep=_safe_int(rules.get("min_overall_rep"), 0),
|
||||
min_gate_rep={
|
||||
str(k): _safe_int(v, 0)
|
||||
for k, v in cross_gate.items()
|
||||
if isinstance(k, str) and k
|
||||
},
|
||||
created_at=float(ev.get("timestamp") or 0.0),
|
||||
raw_rules=dict(rules),
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
__all__ = [
|
||||
"GateMeta",
|
||||
"events_for_gate",
|
||||
"get_gate_meta",
|
||||
]
|
||||
@@ -0,0 +1,56 @@
|
||||
"""Governance — petitions, declarative DSL executor, constitutional
|
||||
challenge, and upgrade-hash governance (Sprint 7).
|
||||
|
||||
Source of truth: ``infonet-economy/RULES_SKELETON.md`` §3.15, §5.4, §5.6.
|
||||
|
||||
The DSL executor is the centerpiece of Sprint 7. It is intentionally
|
||||
**not a sandbox**: it cannot run arbitrary code, period. The four
|
||||
allowed payload types (UPDATE_PARAM / BATCH_UPDATE_PARAMS /
|
||||
ENABLE_FEATURE / DISABLE_FEATURE) are dispatched as plain Python
|
||||
switch cases. There is NO ``eval``, ``exec``, ``compile``, or
|
||||
dynamic attribute access anywhere in the executor. The whole class
|
||||
of code-injection attacks goes away by design.
|
||||
|
||||
Protocol upgrades that need new logic use upgrade-hash governance —
|
||||
nodes vote on a software release hash, not on-chain code.
|
||||
"""
|
||||
|
||||
from services.infonet.governance.challenge import (
|
||||
ChallengeState,
|
||||
compute_challenge_state,
|
||||
validate_challenge_filing,
|
||||
)
|
||||
from services.infonet.governance.dsl_executor import (
|
||||
DSLExecutionResult,
|
||||
apply_petition_payload,
|
||||
forbidden_attributes_check,
|
||||
)
|
||||
from services.infonet.governance.petition import (
|
||||
PetitionState,
|
||||
compute_petition_state,
|
||||
network_governance_weight,
|
||||
validate_petition_filing,
|
||||
)
|
||||
from services.infonet.governance.upgrade_hash import (
|
||||
HeavyNodeReadinessState,
|
||||
UpgradeProposalState,
|
||||
compute_upgrade_state,
|
||||
validate_upgrade_proposal,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"ChallengeState",
|
||||
"DSLExecutionResult",
|
||||
"HeavyNodeReadinessState",
|
||||
"PetitionState",
|
||||
"UpgradeProposalState",
|
||||
"apply_petition_payload",
|
||||
"compute_challenge_state",
|
||||
"compute_petition_state",
|
||||
"compute_upgrade_state",
|
||||
"forbidden_attributes_check",
|
||||
"network_governance_weight",
|
||||
"validate_challenge_filing",
|
||||
"validate_petition_filing",
|
||||
"validate_upgrade_proposal",
|
||||
]
|
||||
@@ -0,0 +1,161 @@
|
||||
"""Constitutional challenge — 48-hour window after a petition passes.
|
||||
|
||||
Source of truth: ``infonet-economy/RULES_SKELETON.md`` §5.4 step 4.
|
||||
|
||||
A challenger sacrifices ``challenge_filing_cost`` (default 25) common
|
||||
rep to file a challenge against a passed petition. The challenge then
|
||||
goes to a vote — if it succeeds (``uphold`` wins by majority oracle
|
||||
rep), the petition is voided. If it fails, the challenger loses the
|
||||
sacrificed rep and the petition proceeds to execution.
|
||||
|
||||
This module exposes:
|
||||
|
||||
- ``compute_challenge_state(petition_id, chain, *, now)`` — derives
|
||||
the challenge outcome from chain events.
|
||||
- ``validate_challenge_filing(filer_common_rep, ...)`` — pre-emit
|
||||
check.
|
||||
|
||||
Sprint 7 voting tally uses ``oracle_rep_active`` weight, same as
|
||||
petition voting itself.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Iterable
|
||||
|
||||
from services.infonet.config import CONFIG
|
||||
from services.infonet.reputation import compute_oracle_rep_active
|
||||
|
||||
|
||||
_HOUR_S = 3600.0
|
||||
|
||||
|
||||
def _payload(event: dict[str, Any]) -> dict[str, Any]:
|
||||
p = event.get("payload")
|
||||
return p if isinstance(p, dict) else {}
|
||||
|
||||
|
||||
@dataclass
|
||||
class ChallengeState:
|
||||
petition_id: str
|
||||
filed: bool
|
||||
filer_id: str | None
|
||||
filed_at: float | None
|
||||
deadline: float | None
|
||||
uphold_weight: float
|
||||
void_weight: float
|
||||
outcome: str # "voided" | "rejected" | "pending" | "none"
|
||||
|
||||
|
||||
def compute_challenge_state(
|
||||
petition_id: str,
|
||||
chain: Iterable[dict[str, Any]],
|
||||
*,
|
||||
now: float,
|
||||
) -> ChallengeState:
|
||||
chain_list = [e for e in chain if isinstance(e, dict)]
|
||||
|
||||
file_event = None
|
||||
vote_events: list[dict[str, Any]] = []
|
||||
for ev in chain_list:
|
||||
if _payload(ev).get("petition_id") != petition_id:
|
||||
continue
|
||||
et = ev.get("event_type")
|
||||
if et == "challenge_file":
|
||||
if file_event is None:
|
||||
file_event = ev
|
||||
elif et == "challenge_vote":
|
||||
vote_events.append(ev)
|
||||
|
||||
if file_event is None:
|
||||
return ChallengeState(
|
||||
petition_id=petition_id, filed=False,
|
||||
filer_id=None, filed_at=None, deadline=None,
|
||||
uphold_weight=0.0, void_weight=0.0, outcome="none",
|
||||
)
|
||||
|
||||
filed_at = float(file_event.get("timestamp") or 0.0)
|
||||
deadline = filed_at + float(CONFIG["challenge_window_hours"]) * _HOUR_S
|
||||
|
||||
state = ChallengeState(
|
||||
petition_id=petition_id, filed=True,
|
||||
filer_id=str(file_event.get("node_id") or ""),
|
||||
filed_at=filed_at, deadline=deadline,
|
||||
uphold_weight=0.0, void_weight=0.0,
|
||||
outcome="pending",
|
||||
)
|
||||
|
||||
seen: dict[str, str] = {}
|
||||
cache: dict[str, float] = {}
|
||||
for ev in sorted(vote_events,
|
||||
key=lambda e: (float(e.get("timestamp") or 0.0),
|
||||
int(e.get("sequence") or 0))):
|
||||
voter = ev.get("node_id")
|
||||
if not isinstance(voter, str) or not voter or voter in seen:
|
||||
continue
|
||||
ts = float(ev.get("timestamp") or 0.0)
|
||||
if ts < filed_at or ts > deadline:
|
||||
continue
|
||||
vote = _payload(ev).get("vote")
|
||||
if vote not in ("uphold", "void"):
|
||||
continue
|
||||
seen[voter] = vote
|
||||
if voter not in cache:
|
||||
cache[voter] = compute_oracle_rep_active(voter, chain_list, now=ts)
|
||||
w = cache[voter]
|
||||
if vote == "uphold":
|
||||
# "uphold" means: uphold the constitutional challenge —
|
||||
# i.e. void the original petition. Per RULES §5.4 step 4:
|
||||
# "Challenge upheld → 'voided_challenge' (petition killed)".
|
||||
state.uphold_weight += w
|
||||
else: # "void" the challenge → original petition stands
|
||||
state.void_weight += w
|
||||
|
||||
if now <= deadline:
|
||||
return state # still pending
|
||||
|
||||
if state.uphold_weight > state.void_weight:
|
||||
state.outcome = "voided"
|
||||
else:
|
||||
state.outcome = "rejected"
|
||||
return state
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ChallengeFilingValidation:
|
||||
accepted: bool
|
||||
reason: str
|
||||
|
||||
|
||||
def validate_challenge_filing(
|
||||
filer_common_rep: float,
|
||||
petition_id: str,
|
||||
chain: Iterable[dict[str, Any]],
|
||||
*,
|
||||
now: float,
|
||||
) -> ChallengeFilingValidation:
|
||||
"""Pre-emit check for a ``challenge_file`` event.
|
||||
|
||||
Rejects if:
|
||||
- Filer lacks the ``challenge_filing_cost``.
|
||||
- A challenge already exists on this petition.
|
||||
- The challenge window for the petition has elapsed (caller is
|
||||
expected to have already verified the petition's voting closed
|
||||
successfully — that timestamp comes from
|
||||
``compute_petition_state``).
|
||||
"""
|
||||
if filer_common_rep < float(CONFIG["challenge_filing_cost"]):
|
||||
return ChallengeFilingValidation(False, "insufficient_common_rep")
|
||||
state = compute_challenge_state(petition_id, list(chain), now=now)
|
||||
if state.filed:
|
||||
return ChallengeFilingValidation(False, "challenge_already_filed")
|
||||
return ChallengeFilingValidation(True, "ok")
|
||||
|
||||
|
||||
__all__ = [
|
||||
"ChallengeFilingValidation",
|
||||
"ChallengeState",
|
||||
"compute_challenge_state",
|
||||
"validate_challenge_filing",
|
||||
]
|
||||
@@ -0,0 +1,223 @@
|
||||
"""Declarative DSL executor — the type-safe, no-eval petition applier.
|
||||
|
||||
Source of truth: ``infonet-economy/RULES_SKELETON.md`` §1.2 (the
|
||||
governance section comment block) + §5.4 step 5.
|
||||
|
||||
CRITICAL design property: this module **cannot execute arbitrary
|
||||
code**. It is a switch over four typed payload variants, each with a
|
||||
fully-validated key/value or feature-flag operation. There is NO use
|
||||
of ``eval``, ``exec``, ``compile``, ``ast.parse``, ``getattr`` with a
|
||||
runtime key, ``__import__``, ``subprocess``, ``os.system``, or any
|
||||
other dynamic-execution primitive.
|
||||
|
||||
The whole class of code-injection attacks is eliminated by design —
|
||||
even if an attacker passes a maliciously crafted petition payload, the
|
||||
executor either applies a typed value or rejects with
|
||||
``InvalidPetition``. There is no path to executing the attacker's
|
||||
input as code.
|
||||
|
||||
Sprint 7's adversarial tests assert this invariant by reading this
|
||||
file's source bytes and confirming none of the forbidden builtins
|
||||
appear (``forbidden_attributes_check``).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from copy import deepcopy
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any
|
||||
|
||||
from services.infonet.config import (
|
||||
CONFIG,
|
||||
CONFIG_SCHEMA,
|
||||
IMMUTABLE_PRINCIPLES,
|
||||
InvalidPetition,
|
||||
validate_cross_field_invariants,
|
||||
validate_petition_value,
|
||||
)
|
||||
|
||||
|
||||
_ALLOWED_PAYLOAD_TYPES = frozenset({
|
||||
"UPDATE_PARAM",
|
||||
"BATCH_UPDATE_PARAMS",
|
||||
"ENABLE_FEATURE",
|
||||
"DISABLE_FEATURE",
|
||||
})
|
||||
|
||||
|
||||
@dataclass
|
||||
class DSLExecutionResult:
|
||||
"""Outcome of applying a petition payload.
|
||||
|
||||
``new_config`` is a fresh dict — the caller decides whether to
|
||||
swap the live ``CONFIG`` with it. Sprint 7's tests apply the
|
||||
result and verify the swap; production callers wire this through
|
||||
the ``petition_execute`` event handler.
|
||||
"""
|
||||
new_config: dict[str, Any]
|
||||
changed_keys: tuple[str, ...] = field(default_factory=tuple)
|
||||
|
||||
|
||||
def _check_payload_shape(payload: Any) -> str:
|
||||
if not isinstance(payload, dict):
|
||||
raise InvalidPetition("petition_payload must be an object")
|
||||
payload_type = payload.get("type")
|
||||
if payload_type not in _ALLOWED_PAYLOAD_TYPES:
|
||||
raise InvalidPetition(
|
||||
f"unknown petition_payload type: {payload_type!r}; "
|
||||
f"allowed: {sorted(_ALLOWED_PAYLOAD_TYPES)}"
|
||||
)
|
||||
return str(payload_type)
|
||||
|
||||
|
||||
def _check_key_writeable(key: str) -> None:
|
||||
"""Reject writes to keys not in CONFIG_SCHEMA. ``IMMUTABLE_PRINCIPLES``
|
||||
keys never appear in ``CONFIG_SCHEMA``, so this also rejects them.
|
||||
"""
|
||||
if not isinstance(key, str) or not key:
|
||||
raise InvalidPetition("CONFIG key must be a non-empty string")
|
||||
if key not in CONFIG_SCHEMA:
|
||||
# Also surface a clearer diagnostic if the user attempted to
|
||||
# mutate an IMMUTABLE_PRINCIPLES key.
|
||||
if key in IMMUTABLE_PRINCIPLES:
|
||||
raise InvalidPetition(
|
||||
f"key {key!r} is in IMMUTABLE_PRINCIPLES — only an "
|
||||
f"upgrade-hash governance hard fork can change it"
|
||||
)
|
||||
raise InvalidPetition(f"unknown CONFIG key: {key!r}")
|
||||
|
||||
|
||||
def _apply_update_param(
|
||||
payload: dict[str, Any],
|
||||
candidate: dict[str, Any],
|
||||
) -> tuple[dict[str, Any], list[str]]:
|
||||
if "key" not in payload or "value" not in payload:
|
||||
raise InvalidPetition("UPDATE_PARAM requires key + value")
|
||||
key = payload["key"]
|
||||
value = payload["value"]
|
||||
_check_key_writeable(key)
|
||||
validate_petition_value(key, value, candidate)
|
||||
candidate[key] = value
|
||||
return candidate, [key]
|
||||
|
||||
|
||||
def _apply_batch_update(
|
||||
payload: dict[str, Any],
|
||||
candidate: dict[str, Any],
|
||||
) -> tuple[dict[str, Any], list[str]]:
|
||||
updates = payload.get("updates")
|
||||
if not isinstance(updates, list) or not updates:
|
||||
raise InvalidPetition("BATCH_UPDATE_PARAMS requires a non-empty 'updates' list")
|
||||
seen_keys: set[str] = set()
|
||||
changed: list[str] = []
|
||||
for u in updates:
|
||||
if not isinstance(u, dict) or "key" not in u or "value" not in u:
|
||||
raise InvalidPetition("BATCH_UPDATE_PARAMS entries must be {key, value}")
|
||||
key = u["key"]
|
||||
if key in seen_keys:
|
||||
raise InvalidPetition(f"duplicate key in BATCH_UPDATE_PARAMS: {key!r}")
|
||||
seen_keys.add(key)
|
||||
_check_key_writeable(key)
|
||||
validate_petition_value(key, u["value"], candidate)
|
||||
candidate[key] = u["value"]
|
||||
changed.append(key)
|
||||
return candidate, changed
|
||||
|
||||
|
||||
def _apply_feature_toggle(
|
||||
payload: dict[str, Any],
|
||||
candidate: dict[str, Any],
|
||||
*,
|
||||
enable: bool,
|
||||
) -> tuple[dict[str, Any], list[str]]:
|
||||
feature = payload.get("feature")
|
||||
if not isinstance(feature, str) or not feature:
|
||||
raise InvalidPetition("ENABLE_FEATURE / DISABLE_FEATURE requires non-empty 'feature'")
|
||||
_check_key_writeable(feature)
|
||||
schema = CONFIG_SCHEMA.get(feature)
|
||||
if schema is None or schema.get("type") != "bool":
|
||||
raise InvalidPetition(
|
||||
f"feature {feature!r} is not a boolean CONFIG key"
|
||||
)
|
||||
candidate[feature] = bool(enable)
|
||||
return candidate, [feature]
|
||||
|
||||
|
||||
def apply_petition_payload(
|
||||
payload: dict[str, Any],
|
||||
current_config: dict[str, Any] | None = None,
|
||||
) -> DSLExecutionResult:
|
||||
"""Apply a validated petition payload to a CANDIDATE copy of CONFIG.
|
||||
|
||||
Transactional: validation runs against the candidate; if any check
|
||||
fails, the candidate is discarded and ``InvalidPetition`` is
|
||||
raised. The live ``CONFIG`` is never partially mutated.
|
||||
|
||||
Pass ``current_config`` when applying against a hypothetical state
|
||||
(testing, upgrade-hash dry-runs). Otherwise the live ``CONFIG`` is
|
||||
deep-copied as the starting point.
|
||||
"""
|
||||
payload_type = _check_payload_shape(payload)
|
||||
candidate = deepcopy(current_config) if current_config is not None else deepcopy(CONFIG)
|
||||
|
||||
if payload_type == "UPDATE_PARAM":
|
||||
candidate, changed = _apply_update_param(payload, candidate)
|
||||
elif payload_type == "BATCH_UPDATE_PARAMS":
|
||||
candidate, changed = _apply_batch_update(payload, candidate)
|
||||
elif payload_type == "ENABLE_FEATURE":
|
||||
candidate, changed = _apply_feature_toggle(payload, candidate, enable=True)
|
||||
elif payload_type == "DISABLE_FEATURE":
|
||||
candidate, changed = _apply_feature_toggle(payload, candidate, enable=False)
|
||||
else: # pragma: no cover — _check_payload_shape gated this
|
||||
raise InvalidPetition(f"unhandled payload type: {payload_type}")
|
||||
|
||||
# Cross-field invariants validated against the FINAL candidate.
|
||||
validate_cross_field_invariants(candidate)
|
||||
|
||||
return DSLExecutionResult(new_config=candidate, changed_keys=tuple(changed))
|
||||
|
||||
|
||||
# ─── No-eval guard ──────────────────────────────────────────────────────
|
||||
|
||||
# Forbidden attribute names whose presence in this module's source
|
||||
# would violate the "no arbitrary code execution" property. Sprint 7's
|
||||
# adversarial test reads this file and asserts none of these substrings
|
||||
# appear (outside of this list and the guard function below — the
|
||||
# guard's job is to *name* the forbidden surface, not use it).
|
||||
|
||||
_FORBIDDEN_ATTRIBUTES: frozenset[str] = frozenset({
|
||||
# Call-syntax tokens. Scanned against this module's source by the
|
||||
# Sprint 7 adversarial test. Bare module names (``subprocess``,
|
||||
# ``os``, etc.) are deliberately NOT in this set — their mere
|
||||
# mention in prose is harmless; what we forbid is the CALL.
|
||||
"eval(",
|
||||
"exec(",
|
||||
"compile(",
|
||||
"__import__(",
|
||||
"ast.parse(",
|
||||
"subprocess.run(",
|
||||
"subprocess.Popen(",
|
||||
"subprocess.call(",
|
||||
"subprocess.check_output(",
|
||||
"os.system(",
|
||||
"os.popen(",
|
||||
"pickle.loads(",
|
||||
"marshal.loads(",
|
||||
})
|
||||
|
||||
|
||||
def forbidden_attributes_check() -> tuple[str, ...]:
|
||||
"""Return the curated list of forbidden surface names.
|
||||
|
||||
Used by the Sprint 7 adversarial test to scan this module's source
|
||||
for any forbidden token. Exposed as a function so the test stays
|
||||
decoupled from the module's internal layout.
|
||||
"""
|
||||
return tuple(sorted(_FORBIDDEN_ATTRIBUTES))
|
||||
|
||||
|
||||
__all__ = [
|
||||
"DSLExecutionResult",
|
||||
"apply_petition_payload",
|
||||
"forbidden_attributes_check",
|
||||
]
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user