mirror of
https://github.com/FoggedLens/deflock.git
synced 2026-02-12 15:02:45 +00:00
use arm64 for alpr_cache (renamed from alpr_cluster)
This commit is contained in:
@@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
|
||||
ECR_REPO_URL=912821578123.dkr.ecr.us-east-1.amazonaws.com/alpr_clusters-lambda
|
||||
ECR_REPO_URL=912821578123.dkr.ecr.us-east-1.amazonaws.com/alpr_cache-lambda
|
||||
|
||||
set -e
|
||||
|
||||
@@ -12,20 +12,15 @@ fi
|
||||
|
||||
cd src
|
||||
|
||||
# build Docker image
|
||||
docker build -t alpr_clusters .
|
||||
|
||||
# tag docker image with ECR repo
|
||||
docker tag alpr_clusters:latest $ECR_REPO_URL:latest
|
||||
|
||||
# login to ECR
|
||||
aws ecr get-login-password --region us-east-1 | docker login --username AWS --password-stdin $ECR_REPO_URL
|
||||
|
||||
# push Docker image to ECR
|
||||
# build and push Docker image to ECR for ARM64 using legacy format
|
||||
docker buildx build --platform linux/arm64 -t $ECR_REPO_URL:latest --load .
|
||||
docker push $ECR_REPO_URL:latest
|
||||
|
||||
# update lambda function
|
||||
export AWS_PAGER=""
|
||||
aws lambda update-function-code --function-name alpr_clusters --image-uri $ECR_REPO_URL:latest
|
||||
# export AWS_PAGER=""
|
||||
# aws lambda update-function-code --function-name alpr_cache --image-uri $ECR_REPO_URL:latest
|
||||
|
||||
echo "Deployed!"
|
||||
12
serverless/alpr_cache/src/Dockerfile
Normal file
12
serverless/alpr_cache/src/Dockerfile
Normal file
@@ -0,0 +1,12 @@
|
||||
# Use the official AWS Lambda Python 3.14 base image from public ECR
|
||||
FROM public.ecr.aws/lambda/python:3.14-arm64
|
||||
|
||||
# Copy function code
|
||||
COPY alpr_cache.py ${LAMBDA_TASK_ROOT}
|
||||
|
||||
# Install dependencies
|
||||
COPY requirements.txt .
|
||||
RUN pip install -r requirements.txt
|
||||
|
||||
# Set the CMD to your handler
|
||||
CMD ["alpr_cache.lambda_handler"]
|
||||
@@ -1,12 +0,0 @@
|
||||
# Use the official AWS Lambda Python 3.14 base image
|
||||
FROM amazon/aws-lambda-python:3.14-x86_64
|
||||
|
||||
# Copy function code
|
||||
COPY alpr_clusters.py ${LAMBDA_TASK_ROOT}
|
||||
|
||||
# Install dependencies
|
||||
COPY requirements.txt .
|
||||
RUN pip install -r requirements.txt
|
||||
|
||||
# Set the CMD to your handler
|
||||
CMD ["alpr_clusters.lambda_handler"]
|
||||
@@ -1,144 +0,0 @@
|
||||
import json
|
||||
from collections import defaultdict
|
||||
from typing import Any
|
||||
|
||||
import boto3
|
||||
import os
|
||||
import time
|
||||
import math
|
||||
import requests
|
||||
import re
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
def terraform_rate_expression_to_minutes(rate_expression: str) -> int:
|
||||
match = re.match(r"rate\((\d+)\s*(day|hour|minute)s?\)", rate_expression)
|
||||
if not match:
|
||||
raise ValueError(f"Invalid rate expression: {rate_expression}")
|
||||
|
||||
value, unit = int(match.group(1)), match.group(2)
|
||||
|
||||
if unit == "day":
|
||||
return value * 24 * 60
|
||||
elif unit == "hour":
|
||||
return value * 60
|
||||
elif unit == "minute":
|
||||
return value
|
||||
else:
|
||||
raise ValueError(f"Unsupported time unit: {unit}")
|
||||
|
||||
UPDATE_RATE_MINS = terraform_rate_expression_to_minutes(os.getenv("UPDATE_RATE_MINS", "rate(60 minutes)"))
|
||||
GRACE_PERIOD_MINS = int(2) # XXX: set expiration a few minutes after in case put object takes a while
|
||||
TILE_SIZE_DEGREES = int(20)
|
||||
|
||||
WHITELISTED_TAGS = [
|
||||
"operator",
|
||||
"manufacturer",
|
||||
"direction",
|
||||
"brand",
|
||||
"camera:direction",
|
||||
"surveillance:brand",
|
||||
"surveillance:operator",
|
||||
"surveillance:manufacturer"
|
||||
]
|
||||
|
||||
def get_all_nodes():
|
||||
# Set up the Overpass API query
|
||||
query = """
|
||||
[out:json];
|
||||
node["man_made"="surveillance"]["surveillance:type"="ALPR"];
|
||||
out body;
|
||||
"""
|
||||
|
||||
url = "http://overpass-api.de/api/interpreter"
|
||||
response = requests.get(
|
||||
url, params={"data": query}, headers={"User-Agent": "DeFlock/1.0"}
|
||||
)
|
||||
response.raise_for_status()
|
||||
return response.json()["elements"]
|
||||
|
||||
def segment_regions(nodes: Any, tile_size_degrees: int) -> dict[str, list[Any]]:
|
||||
print("Segmenting regions...")
|
||||
tile_dict = defaultdict(list)
|
||||
for node in nodes:
|
||||
lat, lon = node["lat"], node["lon"]
|
||||
tile_lat = math.floor(lat / tile_size_degrees) * tile_size_degrees
|
||||
tile_lon = math.floor(lon / tile_size_degrees) * tile_size_degrees
|
||||
bare_node = {
|
||||
"id": node["id"],
|
||||
"lat": lat,
|
||||
"lon": lon,
|
||||
"tags": {k: v for k, v in node["tags"].items() if k in WHITELISTED_TAGS},
|
||||
}
|
||||
tile_dict[f"{tile_lat}/{tile_lon}"].append(bare_node)
|
||||
print("Region segmentation complete.")
|
||||
|
||||
return tile_dict
|
||||
|
||||
def lambda_handler(event, context):
|
||||
nodes = get_all_nodes()
|
||||
regions_dict = segment_regions(nodes=nodes, tile_size_degrees=TILE_SIZE_DEGREES)
|
||||
|
||||
print("Uploading data to S3...")
|
||||
|
||||
s3 = boto3.client("s3")
|
||||
bucket_new = os.getenv("OUTPUT_BUCKET", "cdn.deflock.me")
|
||||
|
||||
# TODO: handle outdated index files when their referenced files are deleted
|
||||
epoch = int(time.time())
|
||||
tile_index = {
|
||||
"expiration_utc": epoch + (UPDATE_RATE_MINS + GRACE_PERIOD_MINS) * 60,
|
||||
"regions": list(regions_dict.keys()),
|
||||
"tile_url": "https://cdn.deflock.me/regions/{lat}/{lon}.json?v=" + str(epoch),
|
||||
"tile_size_degrees": TILE_SIZE_DEGREES,
|
||||
}
|
||||
|
||||
print("Uploading regions to S3...")
|
||||
|
||||
def upload_json_to_s3(bucket, key, body):
|
||||
s3.put_object(
|
||||
Bucket=bucket,
|
||||
Key=key,
|
||||
Body=body,
|
||||
ContentType="application/json",
|
||||
)
|
||||
|
||||
# Use ThreadPoolExecutor for concurrent uploads
|
||||
with ThreadPoolExecutor(max_workers=10) as executor:
|
||||
futures = []
|
||||
for latlng_string, elements in regions_dict.items():
|
||||
lat, lon = latlng_string.split("/")
|
||||
key = f"regions/{lat}/{lon}.json"
|
||||
body = json.dumps(elements)
|
||||
futures.append(executor.submit(upload_json_to_s3, bucket_new, key, body))
|
||||
|
||||
# add index file
|
||||
futures.append(executor.submit(upload_json_to_s3, bucket_new, "regions/index.json", json.dumps(tile_index)))
|
||||
|
||||
# Wait for all futures to complete
|
||||
for future in futures:
|
||||
future.result()
|
||||
|
||||
print("Regions uploaded to S3. Done!")
|
||||
|
||||
return {
|
||||
"statusCode": 200,
|
||||
"body": "Successfully cached OSM nodes",
|
||||
}
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from pathlib import Path
|
||||
|
||||
nodes_file_path = Path("nodes.json")
|
||||
if nodes_file_path.exists():
|
||||
nodes = json.load(nodes_file_path.open())
|
||||
else:
|
||||
nodes = get_all_nodes()
|
||||
with nodes_file_path.open("w") as f:
|
||||
json.dump(nodes, f)
|
||||
|
||||
|
||||
|
||||
regions_dict = segment_regions(nodes=nodes, tile_size_degrees=5)
|
||||
with open("regions_dict.json", "w") as f:
|
||||
json.dump(regions_dict, f)
|
||||
@@ -10,11 +10,11 @@ module "alpr_counts" {
|
||||
sns_topic_arn = aws_sns_topic.lambda_alarms.arn
|
||||
}
|
||||
|
||||
module "alpr_clusters" {
|
||||
module_name = "alpr_clusters"
|
||||
source = "./modules/alpr_clusters"
|
||||
module "alpr_cache" {
|
||||
module_name = "alpr_cache"
|
||||
source = "./modules/alpr_cache"
|
||||
deflock_cdn_bucket = var.deflock_cdn_bucket
|
||||
rate = "rate(1 hour)"
|
||||
rate = "rate(30 minutes)"
|
||||
sns_topic_arn = aws_sns_topic.lambda_alarms.arn
|
||||
}
|
||||
|
||||
|
||||
@@ -53,6 +53,7 @@ resource "aws_lambda_function" "overpass_lambda" {
|
||||
image_uri = "${aws_ecr_repository.lambda_repository.repository_url}:latest"
|
||||
timeout = 180
|
||||
memory_size = 512
|
||||
architectures = ["arm64"]
|
||||
environment {
|
||||
variables = {
|
||||
UPDATE_RATE_MINS = var.rate
|
||||
@@ -108,4 +109,4 @@ resource "aws_cloudwatch_metric_alarm" "lambda_error_alarm" {
|
||||
threshold = 0
|
||||
comparison_operator = "GreaterThanThreshold"
|
||||
alarm_actions = [var.sns_topic_arn]
|
||||
}
|
||||
}
|
||||
@@ -1,3 +1,3 @@
|
||||
output "ecr_repository_url" {
|
||||
value = aws_ecr_repository.lambda_repository.repository_url
|
||||
}
|
||||
}
|
||||
@@ -13,4 +13,4 @@ variable "rate" {
|
||||
variable "sns_topic_arn" {
|
||||
description = "The ARN of the SNS topic for Lambda alarms"
|
||||
type = string
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user