mirror of
https://github.com/PlaneQuery/OpenAirframes.git
synced 2026-05-03 08:15:08 +02:00
Compare commits
13 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 6a250a63fb | |||
| 9e24fcbc63 | |||
| 8ce04f1f83 | |||
| 9441761ac9 | |||
| ccf55b2308 | |||
| 76eaf118ef | |||
| 0fcbad0fbc | |||
| 0c7484e7bf | |||
| 8c60ac611d | |||
| 145f1006be | |||
| f5465f0552 | |||
| 17098ae39a | |||
| 6f6b65780a |
@@ -8,8 +8,8 @@ body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
Submit **one object** or an **array of objects** that matches the community submission schema.
|
||||
|
||||
Submit **one object** or an **array of objects** that matches the community submission [schema](https://github.com/PlaneQuery/OpenAirframes/blob/main/schemas/community_submission.v1.schema.json). Reuse existing tags from the schema when possible.
|
||||
|
||||
**Rules (enforced on review/automation):**
|
||||
- Each object must include **at least one** of:
|
||||
- `registration_number`
|
||||
@@ -27,7 +27,7 @@ body:
|
||||
```json
|
||||
{
|
||||
"registration_number": "N12345",
|
||||
"tags": {"owner": "John Doe"},
|
||||
"tags": {"owner": "John Doe", "photo": "https://example.com/photo.jpg"},
|
||||
"start_date": "2025-01-01"
|
||||
}
|
||||
```
|
||||
|
||||
@@ -95,20 +95,27 @@ jobs:
|
||||
# Verify tar integrity
|
||||
tar -tf extracted_data.tar > /dev/null && echo "Tar integrity check passed" || { echo "Tar integrity check FAILED"; exit 1; }
|
||||
|
||||
# Create checksum of the FULL tar before splitting (for verification after reassembly)
|
||||
echo "=== Creating checksum of full tar ==="
|
||||
sha256sum extracted_data.tar > full_tar.sha256
|
||||
cat full_tar.sha256
|
||||
# Record tar size and checksum for verification after reassembly
|
||||
echo "=== Recording tar metadata ==="
|
||||
ORIGINAL_SIZE=$(stat --format=%s extracted_data.tar)
|
||||
ORIGINAL_SHA=$(sha256sum extracted_data.tar | awk '{print $1}')
|
||||
echo "Size: $ORIGINAL_SIZE"
|
||||
echo "SHA256: $ORIGINAL_SHA"
|
||||
|
||||
# Split into 500MB chunks to avoid artifact upload issues
|
||||
echo "=== Splitting tar into 500MB chunks ==="
|
||||
mkdir -p tar_chunks
|
||||
split -b 500M extracted_data.tar tar_chunks/extracted_data.tar.part_
|
||||
rm extracted_data.tar
|
||||
mv full_tar.sha256 tar_chunks/
|
||||
|
||||
# Write metadata file (plain text so artifact upload won't skip it)
|
||||
echo "$ORIGINAL_SHA extracted_data.tar" > tar_chunks/checksum.txt
|
||||
echo "$ORIGINAL_SIZE" >> tar_chunks/checksum.txt
|
||||
|
||||
echo "=== Chunks created ==="
|
||||
ls -lah tar_chunks/
|
||||
echo "=== Checksum file ==="
|
||||
cat tar_chunks/checksum.txt
|
||||
else
|
||||
echo "ERROR: No extracted directories found, cannot create tar"
|
||||
exit 1
|
||||
@@ -179,19 +186,30 @@ jobs:
|
||||
echo "=== Reassembled tar file info ==="
|
||||
ls -lah extracted_data.tar
|
||||
|
||||
# Verify checksum of reassembled tar matches original
|
||||
echo "=== Verifying reassembled tar checksum ==="
|
||||
echo "Original checksum:"
|
||||
cat tar_chunks/full_tar.sha256
|
||||
echo "Reassembled checksum:"
|
||||
sha256sum extracted_data.tar
|
||||
sha256sum -c tar_chunks/full_tar.sha256 || { echo "ERROR: Reassembled tar checksum mismatch - data corrupted during transfer"; exit 1; }
|
||||
echo "Checksum verified - data integrity confirmed"
|
||||
# Verify integrity
|
||||
echo "=== Verifying reassembled tar ==="
|
||||
if [ -f tar_chunks/checksum.txt ]; then
|
||||
EXPECTED_SHA=$(head -1 tar_chunks/checksum.txt | awk '{print $1}')
|
||||
EXPECTED_SIZE=$(sed -n '2p' tar_chunks/checksum.txt)
|
||||
ACTUAL_SHA=$(sha256sum extracted_data.tar | awk '{print $1}')
|
||||
ACTUAL_SIZE=$(stat --format=%s extracted_data.tar)
|
||||
echo "Expected: SHA=$EXPECTED_SHA Size=$EXPECTED_SIZE"
|
||||
echo "Actual: SHA=$ACTUAL_SHA Size=$ACTUAL_SIZE"
|
||||
if [ "$EXPECTED_SHA" != "$ACTUAL_SHA" ] || [ "$EXPECTED_SIZE" != "$ACTUAL_SIZE" ]; then
|
||||
echo "ERROR: Reassembled tar does not match original - data corrupted during transfer"
|
||||
exit 1
|
||||
fi
|
||||
echo "Checksum and size verified"
|
||||
else
|
||||
echo "WARNING: No checksum file found, falling back to tar integrity check"
|
||||
tar -tf extracted_data.tar > /dev/null || { echo "ERROR: Tar file is corrupted"; exit 1; }
|
||||
echo "Tar integrity check passed"
|
||||
fi
|
||||
|
||||
rm -rf tar_chunks
|
||||
|
||||
echo "=== Extracting ==="
|
||||
tar -xvf extracted_data.tar
|
||||
tar -xf extracted_data.tar
|
||||
rm extracted_data.tar
|
||||
echo "has_data=true" >> "$GITHUB_OUTPUT"
|
||||
echo "=== Contents of data/output ==="
|
||||
|
||||
@@ -261,10 +261,64 @@ jobs:
|
||||
path: data/openairframes/openairframes_community_*.csv
|
||||
retention-days: 1
|
||||
|
||||
build-adsbexchange-json:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name != 'schedule'
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: "3.14"
|
||||
|
||||
- name: Run ADS-B Exchange JSON release script
|
||||
run: |
|
||||
python -m src.contributions.create_daily_adsbexchange_release ${{ inputs.date && format('--date {0}', inputs.date) || '' }}
|
||||
ls -lah data/openairframes
|
||||
|
||||
- name: Upload ADS-B Exchange JSON artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: adsbexchange-json
|
||||
path: data/openairframes/basic-ac-db_*.json.gz
|
||||
retention-days: 1
|
||||
|
||||
build-mictronics-db:
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event_name != 'schedule'
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: "3.14"
|
||||
|
||||
- name: Run Mictronics DB release script
|
||||
continue-on-error: true
|
||||
run: |
|
||||
python -m src.contributions.create_daily_microtonics_release ${{ inputs.date && format('--date {0}', inputs.date) || '' }}
|
||||
ls -lah data/openairframes
|
||||
|
||||
- name: Upload Mictronics DB artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: mictronics-db
|
||||
path: data/openairframes/mictronics-db_*.zip
|
||||
retention-days: 1
|
||||
if-no-files-found: ignore
|
||||
|
||||
create-release:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [build-faa, adsb-reduce, build-community]
|
||||
if: github.event_name != 'schedule'
|
||||
needs: [build-faa, adsb-reduce, build-community, build-adsbexchange-json, build-mictronics-db]
|
||||
if: github.event_name != 'schedule' && !failure() && !cancelled()
|
||||
steps:
|
||||
- name: Checkout for gh CLI
|
||||
uses: actions/checkout@v4
|
||||
@@ -291,6 +345,19 @@ jobs:
|
||||
name: community-release
|
||||
path: artifacts/community
|
||||
|
||||
- name: Download ADS-B Exchange JSON artifact
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: adsbexchange-json
|
||||
path: artifacts/adsbexchange
|
||||
|
||||
- name: Download Mictronics DB artifact
|
||||
uses: actions/download-artifact@v4
|
||||
continue-on-error: true
|
||||
with:
|
||||
name: mictronics-db
|
||||
path: artifacts/mictronics
|
||||
|
||||
- name: Debug artifact structure
|
||||
run: |
|
||||
echo "=== Full artifacts tree ==="
|
||||
@@ -301,6 +368,10 @@ jobs:
|
||||
find artifacts/adsb -type f 2>/dev/null || echo "No files found in artifacts/adsb"
|
||||
echo "=== Community artifacts ==="
|
||||
find artifacts/community -type f 2>/dev/null || echo "No files found in artifacts/community"
|
||||
echo "=== ADS-B Exchange JSON artifacts ==="
|
||||
find artifacts/adsbexchange -type f 2>/dev/null || echo "No files found in artifacts/adsbexchange"
|
||||
echo "=== Mictronics DB artifacts ==="
|
||||
find artifacts/mictronics -type f 2>/dev/null || echo "No files found in artifacts/mictronics"
|
||||
|
||||
- name: Prepare release metadata
|
||||
id: meta
|
||||
@@ -320,6 +391,8 @@ jobs:
|
||||
CSV_FILE_ADSB=$(find artifacts/adsb -name "openairframes_adsb_*.csv" -type f 2>/dev/null | head -1)
|
||||
CSV_FILE_COMMUNITY=$(find artifacts/community -name "openairframes_community_*.csv" -type f 2>/dev/null | head -1)
|
||||
ZIP_FILE=$(find artifacts/faa -name "ReleasableAircraft_*.zip" -type f 2>/dev/null | head -1)
|
||||
JSON_FILE_ADSBX=$(find artifacts/adsbexchange -name "basic-ac-db_*.json.gz" -type f 2>/dev/null | head -1)
|
||||
ZIP_FILE_MICTRONICS=$(find artifacts/mictronics -name "mictronics-db_*.zip" -type f 2>/dev/null | head -1)
|
||||
|
||||
# Validate required files exist
|
||||
MISSING_FILES=""
|
||||
@@ -332,12 +405,24 @@ jobs:
|
||||
if [ -z "$ZIP_FILE" ] || [ ! -f "$ZIP_FILE" ]; then
|
||||
MISSING_FILES="$MISSING_FILES FAA_ZIP"
|
||||
fi
|
||||
if [ -z "$JSON_FILE_ADSBX" ] || [ ! -f "$JSON_FILE_ADSBX" ]; then
|
||||
MISSING_FILES="$MISSING_FILES ADSBX_JSON"
|
||||
fi
|
||||
|
||||
# Optional files - warn but don't fail
|
||||
OPTIONAL_MISSING=""
|
||||
if [ -z "$ZIP_FILE_MICTRONICS" ] || [ ! -f "$ZIP_FILE_MICTRONICS" ]; then
|
||||
OPTIONAL_MISSING="$OPTIONAL_MISSING MICTRONICS_ZIP"
|
||||
ZIP_FILE_MICTRONICS=""
|
||||
fi
|
||||
|
||||
if [ -n "$MISSING_FILES" ]; then
|
||||
echo "ERROR: Missing required release files:$MISSING_FILES"
|
||||
echo "FAA CSV: $CSV_FILE_FAA"
|
||||
echo "ADSB CSV: $CSV_FILE_ADSB"
|
||||
echo "ZIP: $ZIP_FILE"
|
||||
echo "ADSBX JSON: $JSON_FILE_ADSBX"
|
||||
echo "MICTRONICS ZIP: $ZIP_FILE_MICTRONICS"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
@@ -346,6 +431,15 @@ jobs:
|
||||
CSV_BASENAME_ADSB=$(basename "$CSV_FILE_ADSB")
|
||||
CSV_BASENAME_COMMUNITY=$(basename "$CSV_FILE_COMMUNITY" 2>/dev/null || echo "")
|
||||
ZIP_BASENAME=$(basename "$ZIP_FILE")
|
||||
JSON_BASENAME_ADSBX=$(basename "$JSON_FILE_ADSBX")
|
||||
ZIP_BASENAME_MICTRONICS=""
|
||||
if [ -n "$ZIP_FILE_MICTRONICS" ]; then
|
||||
ZIP_BASENAME_MICTRONICS=$(basename "$ZIP_FILE_MICTRONICS")
|
||||
fi
|
||||
|
||||
if [ -n "$OPTIONAL_MISSING" ]; then
|
||||
echo "WARNING: Optional files missing:$OPTIONAL_MISSING (will continue without them)"
|
||||
fi
|
||||
|
||||
echo "date=$DATE" >> "$GITHUB_OUTPUT"
|
||||
echo "tag=$TAG" >> "$GITHUB_OUTPUT"
|
||||
@@ -357,6 +451,10 @@ jobs:
|
||||
echo "csv_basename_community=$CSV_BASENAME_COMMUNITY" >> "$GITHUB_OUTPUT"
|
||||
echo "zip_file=$ZIP_FILE" >> "$GITHUB_OUTPUT"
|
||||
echo "zip_basename=$ZIP_BASENAME" >> "$GITHUB_OUTPUT"
|
||||
echo "json_file_adsbx=$JSON_FILE_ADSBX" >> "$GITHUB_OUTPUT"
|
||||
echo "json_basename_adsbx=$JSON_BASENAME_ADSBX" >> "$GITHUB_OUTPUT"
|
||||
echo "zip_file_mictronics=$ZIP_FILE_MICTRONICS" >> "$GITHUB_OUTPUT"
|
||||
echo "zip_basename_mictronics=$ZIP_BASENAME_MICTRONICS" >> "$GITHUB_OUTPUT"
|
||||
echo "name=OpenAirframes snapshot ($DATE)${BRANCH_SUFFIX}" >> "$GITHUB_OUTPUT"
|
||||
|
||||
echo "Found files:"
|
||||
@@ -364,6 +462,8 @@ jobs:
|
||||
echo " ADSB CSV: $CSV_FILE_ADSB"
|
||||
echo " Community CSV: $CSV_FILE_COMMUNITY"
|
||||
echo " ZIP: $ZIP_FILE"
|
||||
echo " ADSBX JSON: $JSON_FILE_ADSBX"
|
||||
echo " MICTRONICS ZIP: $ZIP_FILE_MICTRONICS"
|
||||
|
||||
- name: Delete existing release if exists
|
||||
run: |
|
||||
@@ -377,7 +477,7 @@ jobs:
|
||||
with:
|
||||
tag_name: ${{ steps.meta.outputs.tag }}
|
||||
name: ${{ steps.meta.outputs.name }}
|
||||
fail_on_unmatched_files: true
|
||||
fail_on_unmatched_files: false
|
||||
body: |
|
||||
Automated daily snapshot generated at 06:00 UTC for ${{ steps.meta.outputs.date }}.
|
||||
|
||||
@@ -386,10 +486,14 @@ jobs:
|
||||
- ${{ steps.meta.outputs.csv_basename_adsb }}
|
||||
- ${{ steps.meta.outputs.csv_basename_community }}
|
||||
- ${{ steps.meta.outputs.zip_basename }}
|
||||
- ${{ steps.meta.outputs.json_basename_adsbx }}
|
||||
${{ steps.meta.outputs.zip_basename_mictronics && format('- {0}', steps.meta.outputs.zip_basename_mictronics) || '' }}
|
||||
files: |
|
||||
${{ steps.meta.outputs.csv_file_faa }}
|
||||
${{ steps.meta.outputs.csv_file_adsb }}
|
||||
${{ steps.meta.outputs.csv_file_community }}
|
||||
${{ steps.meta.outputs.zip_file }}
|
||||
${{ steps.meta.outputs.json_file_adsbx }}
|
||||
${{ steps.meta.outputs.zip_file_mictronics }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
@@ -48,29 +48,52 @@ jobs:
|
||||
git fetch origin "$branch_name"
|
||||
git checkout "$branch_name"
|
||||
|
||||
# Merge main into PR branch
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
|
||||
if git merge origin/main -m "Merge main to update schema"; then
|
||||
# Regenerate schema for this PR's submission (adds any new tags)
|
||||
python -m src.contributions.regenerate_pr_schema || true
|
||||
|
||||
# If there are changes, commit and push
|
||||
if [ -n "$(git status --porcelain schemas/)" ]; then
|
||||
git add schemas/
|
||||
git commit -m "Update schema with new tags"
|
||||
git push origin "$branch_name"
|
||||
echo " Updated PR #$pr_number with schema changes"
|
||||
else
|
||||
git push origin "$branch_name"
|
||||
echo " Merged main into PR #$pr_number"
|
||||
# Get the community submission file(s) and schema from this branch
|
||||
community_files=$(git diff --name-only origin/main...HEAD -- 'community/' 'schemas/')
|
||||
|
||||
if [ -z "$community_files" ]; then
|
||||
echo " No community/schema files found in PR #$pr_number, skipping"
|
||||
git checkout main
|
||||
continue
|
||||
fi
|
||||
|
||||
echo " Files to preserve: $community_files"
|
||||
|
||||
# Save the community files content
|
||||
mkdir -p /tmp/pr_files
|
||||
for file in $community_files; do
|
||||
if [ -f "$file" ]; then
|
||||
mkdir -p "/tmp/pr_files/$(dirname "$file")"
|
||||
cp "$file" "/tmp/pr_files/$file"
|
||||
fi
|
||||
done
|
||||
|
||||
# Reset branch to main (clean slate)
|
||||
git reset --hard origin/main
|
||||
|
||||
# Restore the community files
|
||||
for file in $community_files; do
|
||||
if [ -f "/tmp/pr_files/$file" ]; then
|
||||
mkdir -p "$(dirname "$file")"
|
||||
cp "/tmp/pr_files/$file" "$file"
|
||||
fi
|
||||
done
|
||||
rm -rf /tmp/pr_files
|
||||
|
||||
# Regenerate schema with current main + this submission's tags
|
||||
python -m src.contributions.regenerate_pr_schema || true
|
||||
|
||||
# Stage and commit all changes
|
||||
git add community/ schemas/
|
||||
if ! git diff --cached --quiet; then
|
||||
git commit -m "Community submission (rebased on main)"
|
||||
git push --force origin "$branch_name"
|
||||
echo " Rebased PR #$pr_number onto main"
|
||||
else
|
||||
echo " Merge conflict in PR #$pr_number, adding comment"
|
||||
gh pr comment "$pr_number" --body $'⚠️ **Merge Conflict**\n\nAnother community submission was merged and this PR has conflicts.\n\nA maintainer may need to:\n1. Close this PR\n2. Remove the `approved` label from the original issue\n3. Re-add the `approved` label to regenerate the PR'
|
||||
git merge --abort
|
||||
fi
|
||||
echo " No changes needed for PR #$pr_number"
|
||||
fi
|
||||
|
||||
git checkout main
|
||||
|
||||
@@ -20,7 +20,7 @@ A daily release is created at **06:00 UTC** and includes:
|
||||
All [FAA registration data](https://www.faa.gov/licenses_certificates/aircraft_certification/aircraft_registry/releasable_aircraft_download) from 2023-08-16 to present (~260 MB)
|
||||
|
||||
- **openairframes_adsb.csv**
|
||||
Airframe information derived from ADS-B messages on the [ADSB.lol](https://www.adsb.lol/) network, from 2026-02-12 to present. The airframe information originates from [mictronics aircraft database](https://www.mictronics.de/aircraft-database/) (~5 MB).
|
||||
Airframe information derived from ADS-B messages on the [ADSB.lol](https://www.adsb.lol/) network, from 2026-02-12 to present (will be from 2024-01-01 soon). The airframe information originates from [mictronics aircraft database](https://www.mictronics.de/aircraft-database/) (~5 MB).
|
||||
|
||||
- **ReleasableAircraft_{date}.zip**
|
||||
A daily snapshot of the FAA database, which updates at **05:30 UTC**
|
||||
@@ -43,7 +43,8 @@ Please try to follow the submission formatting guidelines. If you are struggling
|
||||
|
||||
## For Developers
|
||||
All code, compute (GitHub Actions), and storage (releases) are in this GitHub repository Improvements are welcome. Potential features include:
|
||||
- Web UI
|
||||
- Web UI for data
|
||||
- Web UI for contributors
|
||||
- Additional export formats in the daily release
|
||||
- Data fusion from multiple sources in the daily release
|
||||
- Automated airframe data connectors, including (but not limited to) civil aviation authorities and airline APIs
|
||||
|
||||
@@ -82,8 +82,8 @@ def fetch_releases(version_date: str) -> list:
|
||||
if version_date == "v2024.12.31":
|
||||
year = "2025"
|
||||
BASE_URL = f"https://api.github.com/repos/adsblol/globe_history_{year}/releases"
|
||||
# Match exact release name, exclude tmp releases
|
||||
PATTERN = rf"^{re.escape(version_date)}-planes-readsb-prod-\d+$"
|
||||
# Match both normal and tmp releases
|
||||
PATTERN = rf"^{re.escape(version_date)}-planes-readsb-prod-\d+(tmp)?$"
|
||||
releases = []
|
||||
page = 1
|
||||
|
||||
@@ -582,6 +582,12 @@ def process_version_date(version_date: str, keep_folders: bool = False):
|
||||
print(f"No releases found for {vd}.")
|
||||
return None
|
||||
|
||||
# Prefer non-tmp releases; only use tmp if no normal releases exist
|
||||
normal_releases = [r for r in releases if "tmp" not in r["tag_name"]]
|
||||
tmp_releases = [r for r in releases if "tmp" in r["tag_name"]]
|
||||
releases = normal_releases if normal_releases else tmp_releases
|
||||
print(f"Using {'normal' if normal_releases else 'tmp'} releases ({len(releases)} found)")
|
||||
|
||||
downloaded_files = []
|
||||
for release in releases:
|
||||
tag_name = release["tag_name"]
|
||||
|
||||
@@ -59,6 +59,12 @@ def download_and_extract(version_date: str) -> str | None:
|
||||
print(f"No releases found for {version_date}")
|
||||
return None
|
||||
|
||||
# Prefer non-tmp releases; only use tmp if no normal releases exist
|
||||
normal_releases = [r for r in releases if "tmp" not in r["tag_name"]]
|
||||
tmp_releases = [r for r in releases if "tmp" in r["tag_name"]]
|
||||
releases = normal_releases if normal_releases else tmp_releases
|
||||
print(f"Using {'normal' if normal_releases else 'tmp'} releases ({len(releases)} found)")
|
||||
|
||||
downloaded_files = []
|
||||
for release in releases:
|
||||
tag_name = release["tag_name"]
|
||||
|
||||
@@ -0,0 +1,155 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Run the full ADS-B processing pipeline locally.
|
||||
|
||||
Downloads adsb.lol data, processes trace files, and outputs openairframes_adsb CSV.
|
||||
|
||||
Usage:
|
||||
# Single day (yesterday by default)
|
||||
python -m src.adsb.run_local
|
||||
|
||||
# Single day (specific date)
|
||||
python -m src.adsb.run_local 2024-01-15
|
||||
|
||||
# Date range (inclusive)
|
||||
python -m src.adsb.run_local 2024-01-01 2024-01-07
|
||||
"""
|
||||
import argparse
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
|
||||
def run_cmd(cmd: list[str], description: str) -> None:
|
||||
"""Run a command and exit on failure."""
|
||||
print(f"\n>>> {' '.join(cmd)}")
|
||||
result = subprocess.run(cmd)
|
||||
if result.returncode != 0:
|
||||
print(f"ERROR: {description} failed with exit code {result.returncode}")
|
||||
sys.exit(result.returncode)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Run full ADS-B processing pipeline locally",
|
||||
usage="python -m src.adsb.run_local [start_date] [end_date]"
|
||||
)
|
||||
parser.add_argument(
|
||||
"start_date",
|
||||
nargs="?",
|
||||
help="Start date (YYYY-MM-DD). Default: yesterday"
|
||||
)
|
||||
parser.add_argument(
|
||||
"end_date",
|
||||
nargs="?",
|
||||
help="End date (YYYY-MM-DD, inclusive). If omitted, processes single day"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--chunks",
|
||||
type=int,
|
||||
default=4,
|
||||
help="Number of parallel chunks (default: 4)"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--skip-base",
|
||||
action="store_true",
|
||||
help="Skip downloading and merging with base release"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
# Determine dates
|
||||
if args.start_date:
|
||||
start_date = datetime.strptime(args.start_date, "%Y-%m-%d")
|
||||
else:
|
||||
start_date = datetime.utcnow() - timedelta(days=1)
|
||||
|
||||
end_date = None
|
||||
if args.end_date:
|
||||
end_date = datetime.strptime(args.end_date, "%Y-%m-%d")
|
||||
|
||||
start_str = start_date.strftime("%Y-%m-%d")
|
||||
end_str = end_date.strftime("%Y-%m-%d") if end_date else None
|
||||
|
||||
print("=" * 60)
|
||||
print("ADS-B Processing Pipeline")
|
||||
print("=" * 60)
|
||||
if end_str:
|
||||
print(f"Date range: {start_str} to {end_str}")
|
||||
else:
|
||||
print(f"Date: {start_str}")
|
||||
print(f"Chunks: {args.chunks}")
|
||||
print("=" * 60)
|
||||
|
||||
# Step 1: Download and extract
|
||||
print("\n" + "=" * 60)
|
||||
print("Step 1: Download and Extract")
|
||||
print("=" * 60)
|
||||
|
||||
if end_str:
|
||||
cmd = ["python", "-m", "src.adsb.download_and_list_icaos",
|
||||
"--start-date", start_str, "--end-date", end_str]
|
||||
else:
|
||||
cmd = ["python", "-m", "src.adsb.download_and_list_icaos",
|
||||
"--date", start_str]
|
||||
run_cmd(cmd, "Download and extract")
|
||||
|
||||
# Step 2: Process chunks
|
||||
print("\n" + "=" * 60)
|
||||
print("Step 2: Process Chunks")
|
||||
print("=" * 60)
|
||||
|
||||
for chunk_id in range(args.chunks):
|
||||
print(f"\n--- Chunk {chunk_id + 1}/{args.chunks} ---")
|
||||
if end_str:
|
||||
cmd = ["python", "-m", "src.adsb.process_icao_chunk",
|
||||
"--chunk-id", str(chunk_id),
|
||||
"--total-chunks", str(args.chunks),
|
||||
"--start-date", start_str,
|
||||
"--end-date", end_str]
|
||||
else:
|
||||
cmd = ["python", "-m", "src.adsb.process_icao_chunk",
|
||||
"--chunk-id", str(chunk_id),
|
||||
"--total-chunks", str(args.chunks),
|
||||
"--date", start_str]
|
||||
run_cmd(cmd, f"Process chunk {chunk_id}")
|
||||
|
||||
# Step 3: Combine chunks to CSV
|
||||
print("\n" + "=" * 60)
|
||||
print("Step 3: Combine to CSV")
|
||||
print("=" * 60)
|
||||
|
||||
chunks_dir = "./data/output/adsb_chunks"
|
||||
cmd = ["python", "-m", "src.adsb.combine_chunks_to_csv",
|
||||
"--chunks-dir", chunks_dir]
|
||||
|
||||
if end_str:
|
||||
cmd.extend(["--start-date", start_str, "--end-date", end_str])
|
||||
else:
|
||||
cmd.extend(["--date", start_str])
|
||||
|
||||
if args.skip_base:
|
||||
cmd.append("--skip-base")
|
||||
|
||||
run_cmd(cmd, "Combine chunks")
|
||||
|
||||
print("\n" + "=" * 60)
|
||||
print("Done!")
|
||||
print("=" * 60)
|
||||
|
||||
# Show output
|
||||
output_dir = "./data/openairframes"
|
||||
if end_str:
|
||||
output_file = f"openairframes_adsb_{start_str}_{end_str}.csv"
|
||||
else:
|
||||
output_file = f"openairframes_adsb_{start_str}_{start_str}.csv"
|
||||
|
||||
output_path = os.path.join(output_dir, output_file)
|
||||
if os.path.exists(output_path):
|
||||
size_mb = os.path.getsize(output_path) / (1024 * 1024)
|
||||
print(f"Output: {output_path}")
|
||||
print(f"Size: {size_mb:.1f} MB")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,40 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Download ADS-B Exchange basic-ac-db.json.gz.
|
||||
|
||||
Usage:
|
||||
python -m src.contributions.create_daily_adsbexchange_release [--date YYYY-MM-DD]
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import shutil
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from urllib.request import Request, urlopen
|
||||
|
||||
URL = "https://downloads.adsbexchange.com/downloads/basic-ac-db.json.gz"
|
||||
OUT_ROOT = Path("data/openairframes")
|
||||
|
||||
|
||||
def main() -> None:
|
||||
parser = argparse.ArgumentParser(description="Create daily ADS-B Exchange JSON release")
|
||||
parser.add_argument("--date", type=str, help="Date to process (YYYY-MM-DD format, default: today UTC)")
|
||||
args = parser.parse_args()
|
||||
|
||||
date_str = args.date or datetime.now(timezone.utc).strftime("%Y-%m-%d")
|
||||
|
||||
OUT_ROOT.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
gz_path = OUT_ROOT / f"basic-ac-db_{date_str}.json.gz"
|
||||
|
||||
print(f"Downloading {URL}...")
|
||||
req = Request(URL, headers={"User-Agent": "openairframes-downloader/1.0"}, method="GET")
|
||||
with urlopen(req, timeout=300) as r, gz_path.open("wb") as f:
|
||||
shutil.copyfileobj(r, f)
|
||||
|
||||
print(f"Wrote: {gz_path}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,55 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Download Mictronics aircraft database zip.
|
||||
|
||||
Usage:
|
||||
python -m src.contributions.create_daily_microtonics_release [--date YYYY-MM-DD]
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import argparse
|
||||
import shutil
|
||||
import sys
|
||||
import time
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from urllib.error import URLError
|
||||
from urllib.request import Request, urlopen
|
||||
|
||||
URL = "https://www.mictronics.de/aircraft-database/indexedDB_old.php"
|
||||
OUT_ROOT = Path("data/openairframes")
|
||||
MAX_RETRIES = 3
|
||||
RETRY_DELAY = 30 # seconds
|
||||
|
||||
|
||||
def main() -> None:
|
||||
parser = argparse.ArgumentParser(description="Create daily Mictronics database release")
|
||||
parser.add_argument("--date", type=str, help="Date to process (YYYY-MM-DD format, default: today UTC)")
|
||||
args = parser.parse_args()
|
||||
|
||||
date_str = args.date or datetime.now(timezone.utc).strftime("%Y-%m-%d")
|
||||
|
||||
OUT_ROOT.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
zip_path = OUT_ROOT / f"mictronics-db_{date_str}.zip"
|
||||
|
||||
for attempt in range(1, MAX_RETRIES + 1):
|
||||
try:
|
||||
print(f"Downloading {URL} (attempt {attempt}/{MAX_RETRIES})...")
|
||||
req = Request(URL, headers={"User-Agent": "Mozilla/5.0 (compatible; openairframes-downloader/1.0)"}, method="GET")
|
||||
with urlopen(req, timeout=120) as r, zip_path.open("wb") as f:
|
||||
shutil.copyfileobj(r, f)
|
||||
print(f"Wrote: {zip_path}")
|
||||
return
|
||||
except (URLError, TimeoutError) as e:
|
||||
print(f"Attempt {attempt} failed: {e}")
|
||||
if attempt < MAX_RETRIES:
|
||||
print(f"Retrying in {RETRY_DELAY} seconds...")
|
||||
time.sleep(RETRY_DELAY)
|
||||
else:
|
||||
print("All retries exhausted. Mictronics download failed.")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -47,6 +47,9 @@ def convert_faa_master_txt_to_df(zip_path: Path, date: str):
|
||||
|
||||
# Convert all NaN to empty strings
|
||||
df = df.fillna("")
|
||||
# The FAA parser can produce the literal string "None" for missing values;
|
||||
# replace those so they match the empty-string convention used everywhere else.
|
||||
df = df.replace("None", "")
|
||||
|
||||
return df
|
||||
|
||||
@@ -84,8 +87,8 @@ def concat_faa_historical_df(df_base, df_new):
|
||||
# Convert to string
|
||||
val_str = str(val).strip()
|
||||
|
||||
# Handle empty strings
|
||||
if val_str == "" or val_str == "nan":
|
||||
# Handle empty strings and null-like literals
|
||||
if val_str == "" or val_str == "nan" or val_str == "None":
|
||||
return ""
|
||||
|
||||
# Check if it looks like a list representation (starts with [ )
|
||||
|
||||
Reference in New Issue
Block a user