mirror of
https://github.com/garrytan/gstack.git
synced 2026-05-02 03:35:09 +02:00
64d5a3e424
* fix: drop all anon RLS policies + revoke view access + add cache table Migration 002 locks down the Supabase telemetry backend: - Drops all SELECT, INSERT, UPDATE policies for the anon role - Explicitly revokes SELECT on crash_clusters and skill_sequences views - Drops stale error_message/failed_step columns (exist live but not in migration) - Creates community_pulse_cache table for server-side aggregation caching * feat: extend community-pulse with full dashboard data + server-side cache community-pulse now returns top skills, crash clusters, version distribution, and weekly active count in a single aggregated response. Results are cached in the community_pulse_cache table (1-hour TTL) to prevent DoS via repeated expensive queries. * fix: route all telemetry through edge functions, not PostgREST - gstack-telemetry-sync: POST to /functions/v1/telemetry-ingest instead of /rest/v1/telemetry_events. Removes sed field-renaming (edge function expects raw JSONL names). Parses inserted count — holds cursor if zero inserted. - gstack-update-check: POST to /functions/v1/update-check. - gstack-community-dashboard: calls community-pulse edge function instead of direct PostgREST queries. - config.sh: removes GSTACK_TELEMETRY_ENDPOINT, fixes misleading comment. * test: RLS smoke test + telemetry field name verification - verify-rls.sh: 9-check smoke test (5 reads + 3 inserts + 1 update) verifying anon key is fully locked out after migration. - telemetry.test.ts: verifies JSONL uses raw field names (v, ts, sessions) that the edge function expects, not Postgres column names. - README.md: fixes privacy claim to match actual RLS policy. * chore: bump version and changelog (v0.11.16.0) Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * fix: pre-landing review fixes — JSONB field order, version filter, RLS verification - Dashboard JSON parsing: use per-object grep instead of field-order-dependent regex (JSONB doesn't preserve key order) - Version distribution: filter to skill_run events only (was counting all types) - verify-rls.sh: only 401/403 count as PASS (not empty 200 or 5xx); add Authorization header to test as anon role properly - Remove dead empty loop in community-pulse * chore: untrack browse/dist binaries — 116MB of arm64-only Mach-O These compiled Bun binaries only work on arm64 macOS, and ./setup already rebuilds from source for every platform. They were tracked despite .gitignore due to being committed before the ignore rule. Untracking stops them from appearing as modified in every diff. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * docs: tone down changelog — security hardening, not incident report * fix: keep INSERT policies for old client compat, preserve extra columns - Keep anon INSERT policies so pre-v0.11.16 clients can still sync telemetry via PostgREST while new clients use edge functions - Add error_message/failed_step columns to migration (reconcile repo with live schema) instead of dropping them - Security fix still lands: SELECT and UPDATE policies are dropped Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com> * fix: sync package.json version with VERSION file (0.11.16.0) --------- Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
139 lines
4.1 KiB
TypeScript
139 lines
4.1 KiB
TypeScript
// gstack community-pulse edge function
|
|
// Returns aggregated community stats for the dashboard:
|
|
// weekly active count, top skills, crash clusters, version distribution.
|
|
// Uses server-side cache (community_pulse_cache table) to prevent DoS.
|
|
|
|
import { createClient } from "https://esm.sh/@supabase/supabase-js@2";
|
|
|
|
const CACHE_MAX_AGE_MS = 60 * 60 * 1000; // 1 hour
|
|
|
|
Deno.serve(async () => {
|
|
const supabase = createClient(
|
|
Deno.env.get("SUPABASE_URL") ?? "",
|
|
Deno.env.get("SUPABASE_SERVICE_ROLE_KEY") ?? ""
|
|
);
|
|
|
|
try {
|
|
// Check cache first
|
|
const { data: cached } = await supabase
|
|
.from("community_pulse_cache")
|
|
.select("data, refreshed_at")
|
|
.eq("id", 1)
|
|
.single();
|
|
|
|
if (cached?.refreshed_at) {
|
|
const age = Date.now() - new Date(cached.refreshed_at).getTime();
|
|
if (age < CACHE_MAX_AGE_MS) {
|
|
return new Response(JSON.stringify(cached.data), {
|
|
status: 200,
|
|
headers: {
|
|
"Content-Type": "application/json",
|
|
"Cache-Control": "public, max-age=3600",
|
|
},
|
|
});
|
|
}
|
|
}
|
|
|
|
// Cache is stale or missing — recompute
|
|
const weekAgo = new Date(Date.now() - 7 * 24 * 60 * 60 * 1000).toISOString();
|
|
const twoWeeksAgo = new Date(Date.now() - 14 * 24 * 60 * 60 * 1000).toISOString();
|
|
|
|
// Weekly active (update checks this week)
|
|
const { count: thisWeek } = await supabase
|
|
.from("update_checks")
|
|
.select("*", { count: "exact", head: true })
|
|
.gte("checked_at", weekAgo);
|
|
|
|
// Last week (for change %)
|
|
const { count: lastWeek } = await supabase
|
|
.from("update_checks")
|
|
.select("*", { count: "exact", head: true })
|
|
.gte("checked_at", twoWeeksAgo)
|
|
.lt("checked_at", weekAgo);
|
|
|
|
const current = thisWeek ?? 0;
|
|
const previous = lastWeek ?? 0;
|
|
const changePct = previous > 0
|
|
? Math.round(((current - previous) / previous) * 100)
|
|
: 0;
|
|
|
|
// Top skills (last 7 days)
|
|
const { data: skillRows } = await supabase
|
|
.from("telemetry_events")
|
|
.select("skill")
|
|
.eq("event_type", "skill_run")
|
|
.gte("event_timestamp", weekAgo)
|
|
.not("skill", "is", null)
|
|
.limit(1000);
|
|
|
|
const skillCounts: Record<string, number> = {};
|
|
for (const row of skillRows ?? []) {
|
|
if (row.skill) {
|
|
skillCounts[row.skill] = (skillCounts[row.skill] ?? 0) + 1;
|
|
}
|
|
}
|
|
const topSkills = Object.entries(skillCounts)
|
|
.sort(([, a], [, b]) => b - a)
|
|
.slice(0, 10)
|
|
.map(([skill, count]) => ({ skill, count }));
|
|
|
|
// Crash clusters (top 5)
|
|
const { data: crashes } = await supabase
|
|
.from("crash_clusters")
|
|
.select("error_class, gstack_version, total_occurrences, identified_users")
|
|
.limit(5);
|
|
|
|
// Version distribution (last 7 days)
|
|
const versionCounts: Record<string, number> = {};
|
|
const { data: versionRows } = await supabase
|
|
.from("telemetry_events")
|
|
.select("gstack_version")
|
|
.eq("event_type", "skill_run")
|
|
.gte("event_timestamp", weekAgo)
|
|
.limit(1000);
|
|
|
|
for (const row of versionRows ?? []) {
|
|
if (row.gstack_version) {
|
|
versionCounts[row.gstack_version] = (versionCounts[row.gstack_version] ?? 0) + 1;
|
|
}
|
|
}
|
|
const topVersions = Object.entries(versionCounts)
|
|
.sort(([, a], [, b]) => b - a)
|
|
.slice(0, 5)
|
|
.map(([version, count]) => ({ version, count }));
|
|
|
|
const result = {
|
|
weekly_active: current,
|
|
change_pct: changePct,
|
|
top_skills: topSkills,
|
|
crashes: crashes ?? [],
|
|
versions: topVersions,
|
|
};
|
|
|
|
// Upsert cache
|
|
await supabase
|
|
.from("community_pulse_cache")
|
|
.upsert({
|
|
id: 1,
|
|
data: result,
|
|
refreshed_at: new Date().toISOString(),
|
|
});
|
|
|
|
return new Response(JSON.stringify(result), {
|
|
status: 200,
|
|
headers: {
|
|
"Content-Type": "application/json",
|
|
"Cache-Control": "public, max-age=3600",
|
|
},
|
|
});
|
|
} catch {
|
|
return new Response(
|
|
JSON.stringify({ weekly_active: 0, change_pct: 0, top_skills: [], crashes: [], versions: [] }),
|
|
{
|
|
status: 200,
|
|
headers: { "Content-Type": "application/json" },
|
|
}
|
|
);
|
|
}
|
|
});
|