merge: incorporate origin/main into community-mode branch

Resolved 10 conflicted files:
- VERSION/package.json: kept 0.12.0.0 (feature branch version)
- CHANGELOG.md: preserved both branch entry and main's new entries
- supabase/config.sh: kept GSTACK_WEB_URL, accepted TELEMETRY_ENDPOINT removal
- bin/gstack-{community-dashboard,telemetry-log,telemetry-sync,update-check}:
  took main's improved versions (edge function approach, safe cursor, UUID gen)
- supabase/functions/community-pulse: took main's count-based approach
- test/telemetry.test.ts: took main's structure with fingerprint field name

Post-merge fixes:
- Removed shadowed local RESOLVERS/functions in gen-skill-docs.ts (main's
  resolver imports now take precedence for tier-based preamble, coverage gates)
- Added 3 missing E2E_TIERS entries (ship-plan-*, review-plan-completion)
- Updated telemetry test to match current prompt text
- Regenerated all SKILL.md files

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Garry Tan
2026-03-24 21:01:19 -07:00
68 changed files with 5645 additions and 495 deletions
+2 -4
View File
@@ -1,13 +1,11 @@
#!/usr/bin/env bash
# Supabase project config for gstack telemetry
# These are PUBLIC keys — safe to commit (like Firebase public config).
# RLS policies restrict what the anon/publishable key can do (INSERT only).
# RLS denies all access to the anon key. All reads and writes go through
# edge functions (which use SUPABASE_SERVICE_ROLE_KEY server-side).
GSTACK_SUPABASE_URL="https://frugpmstpnojnhfyimgv.supabase.co"
GSTACK_SUPABASE_ANON_KEY="sb_publishable_tR4i6cyMIrYTE3s6OyHGHw_ppx2p6WK"
# Telemetry ingest endpoint (Data API)
GSTACK_TELEMETRY_ENDPOINT="${GSTACK_SUPABASE_URL}/rest/v1"
# gstack.gg web app (auth + screenshot upload)
GSTACK_WEB_URL="https://gstack.gg"
+101 -43
View File
@@ -1,9 +1,12 @@
// gstack community-pulse edge function
// Returns weekly active installation count for preamble display.
// Cached for 1 hour via Cache-Control header.
// Returns aggregated community stats for the dashboard:
// weekly active count, top skills, crash clusters, version distribution.
// Uses server-side cache (community_pulse_cache table) to prevent DoS.
import { createClient } from "https://esm.sh/@supabase/supabase-js@2";
const CACHE_MAX_AGE_MS = 60 * 60 * 1000; // 1 hour
Deno.serve(async () => {
const supabase = createClient(
Deno.env.get("SUPABASE_URL") ?? "",
@@ -11,68 +14,123 @@ Deno.serve(async () => {
);
try {
// Count unique update checks in the last 7 days (install base proxy)
// Check cache first
const { data: cached } = await supabase
.from("community_pulse_cache")
.select("data, refreshed_at")
.eq("id", 1)
.single();
if (cached?.refreshed_at) {
const age = Date.now() - new Date(cached.refreshed_at).getTime();
if (age < CACHE_MAX_AGE_MS) {
return new Response(JSON.stringify(cached.data), {
status: 200,
headers: {
"Content-Type": "application/json",
"Cache-Control": "public, max-age=3600",
},
});
}
}
// Cache is stale or missing — recompute
const weekAgo = new Date(Date.now() - 7 * 24 * 60 * 60 * 1000).toISOString();
const twoWeeksAgo = new Date(Date.now() - 14 * 24 * 60 * 60 * 1000).toISOString();
// This week's unique installs (by install_fingerprint, filtered to source=live)
const { data: thisWeekData } = await supabase
// Weekly active (update checks this week)
const { count: thisWeek } = await supabase
.from("update_checks")
.select("install_fingerprint")
.eq("source", "live")
.gte("checked_at", weekAgo);
// Last week's unique installs (for change %)
const { data: lastWeekData } = await supabase
// Last week (for change %)
const { count: lastWeek } = await supabase
.from("update_checks")
.select("install_fingerprint")
.eq("source", "live")
.gte("checked_at", twoWeeksAgo)
.lt("checked_at", weekAgo);
let current = new Set((thisWeekData ?? []).map((e: { install_fingerprint: string }) => e.install_fingerprint).filter(Boolean)).size;
let previous = new Set((lastWeekData ?? []).map((e: { install_fingerprint: string }) => e.install_fingerprint).filter(Boolean)).size;
// Fallback: if no fingerprinted data, count distinct sessions from telemetry_events
if (current === 0) {
const { data: thisWeekSessions } = await supabase
.from("telemetry_events")
.select("session_id")
.eq("event_type", "skill_run")
.eq("source", "live")
.gte("event_timestamp", weekAgo);
const { data: lastWeekSessions } = await supabase
.from("telemetry_events")
.select("session_id")
.eq("event_type", "skill_run")
.eq("source", "live")
.gte("event_timestamp", twoWeeksAgo)
.lt("event_timestamp", weekAgo);
current = new Set((thisWeekSessions ?? []).map((e: { session_id: string }) => e.session_id)).size;
previous = new Set((lastWeekSessions ?? []).map((e: { session_id: string }) => e.session_id)).size;
}
const current = thisWeek ?? 0;
const previous = lastWeek ?? 0;
const changePct = previous > 0
? Math.round(((current - previous) / previous) * 100)
: 0;
return new Response(
JSON.stringify({
weekly_active: current,
change_pct: changePct,
}),
{
status: 200,
headers: {
"Content-Type": "application/json",
"Cache-Control": "public, max-age=3600", // 1 hour cache
},
// Top skills (last 7 days)
const { data: skillRows } = await supabase
.from("telemetry_events")
.select("skill")
.eq("event_type", "skill_run")
.gte("event_timestamp", weekAgo)
.not("skill", "is", null)
.limit(1000);
const skillCounts: Record<string, number> = {};
for (const row of skillRows ?? []) {
if (row.skill) {
skillCounts[row.skill] = (skillCounts[row.skill] ?? 0) + 1;
}
);
}
const topSkills = Object.entries(skillCounts)
.sort(([, a], [, b]) => b - a)
.slice(0, 10)
.map(([skill, count]) => ({ skill, count }));
// Crash clusters (top 5)
const { data: crashes } = await supabase
.from("crash_clusters")
.select("error_class, gstack_version, total_occurrences, identified_users")
.limit(5);
// Version distribution (last 7 days)
const versionCounts: Record<string, number> = {};
const { data: versionRows } = await supabase
.from("telemetry_events")
.select("gstack_version")
.eq("event_type", "skill_run")
.gte("event_timestamp", weekAgo)
.limit(1000);
for (const row of versionRows ?? []) {
if (row.gstack_version) {
versionCounts[row.gstack_version] = (versionCounts[row.gstack_version] ?? 0) + 1;
}
}
const topVersions = Object.entries(versionCounts)
.sort(([, a], [, b]) => b - a)
.slice(0, 5)
.map(([version, count]) => ({ version, count }));
const result = {
weekly_active: current,
change_pct: changePct,
top_skills: topSkills,
crashes: crashes ?? [],
versions: topVersions,
};
// Upsert cache
await supabase
.from("community_pulse_cache")
.upsert({
id: 1,
data: result,
refreshed_at: new Date().toISOString(),
});
return new Response(JSON.stringify(result), {
status: 200,
headers: {
"Content-Type": "application/json",
"Cache-Control": "public, max-age=3600",
},
});
} catch {
return new Response(
JSON.stringify({ weekly_active: 0, change_pct: 0 }),
JSON.stringify({ weekly_active: 0, change_pct: 0, top_skills: [], crashes: [], versions: [] }),
{
status: 200,
headers: { "Content-Type": "application/json" },
+36
View File
@@ -0,0 +1,36 @@
-- 002_tighten_rls.sql
-- Lock down read/update access. Keep INSERT policies so old clients can still
-- write via PostgREST while new clients migrate to edge functions.
-- Drop all SELECT policies (anon key should not read telemetry data)
DROP POLICY IF EXISTS "anon_select" ON telemetry_events;
DROP POLICY IF EXISTS "anon_select" ON installations;
DROP POLICY IF EXISTS "anon_select" ON update_checks;
-- Drop dangerous UPDATE policy (was unrestricted on all columns)
DROP POLICY IF EXISTS "anon_update_last_seen" ON installations;
-- Keep INSERT policies — old clients (pre-v0.11.16) still POST directly to
-- PostgREST. These will be dropped in a future migration once adoption of
-- edge-function-based sync is widespread.
-- (anon_insert_only ON telemetry_events — kept)
-- (anon_insert_only ON installations — kept)
-- (anon_insert_only ON update_checks — kept)
-- Explicitly revoke view access (belt-and-suspenders)
REVOKE SELECT ON crash_clusters FROM anon;
REVOKE SELECT ON skill_sequences FROM anon;
-- Keep error_message and failed_step columns (exist on live schema, may be
-- used in future). Add them to the migration record so repo matches live.
ALTER TABLE telemetry_events ADD COLUMN IF NOT EXISTS error_message TEXT;
ALTER TABLE telemetry_events ADD COLUMN IF NOT EXISTS failed_step TEXT;
-- Cache table for community-pulse aggregation (prevents DoS via repeated queries)
CREATE TABLE IF NOT EXISTS community_pulse_cache (
id INTEGER PRIMARY KEY DEFAULT 1,
data JSONB NOT NULL DEFAULT '{}'::jsonb,
refreshed_at TIMESTAMPTZ DEFAULT now()
);
ALTER TABLE community_pulse_cache ENABLE ROW LEVEL SECURITY;
-- No anon policies — only service_role_key (used by edge functions) can read/write
+143
View File
@@ -0,0 +1,143 @@
#!/usr/bin/env bash
# verify-rls.sh — smoke test after deploying 002_tighten_rls.sql
#
# Verifies:
# - SELECT denied on all tables and views (security fix)
# - UPDATE denied on installations (security fix)
# - INSERT still allowed on tables (kept for old client compat)
#
# Run manually after deploying the migration:
# bash supabase/verify-rls.sh
set -uo pipefail
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
. "$SCRIPT_DIR/config.sh"
URL="$GSTACK_SUPABASE_URL"
KEY="$GSTACK_SUPABASE_ANON_KEY"
PASS=0
FAIL=0
TOTAL=0
# check <description> <expected> <method> <path> [data]
# expected: "deny" (want 401/403) or "allow" (want 200/201)
check() {
local desc="$1"
local expected="$2"
local method="$3"
local path="$4"
local data="${5:-}"
TOTAL=$(( TOTAL + 1 ))
local resp_file
resp_file="$(mktemp 2>/dev/null || echo "/tmp/verify-rls-$$-$TOTAL")"
local http_code
if [ "$method" = "GET" ]; then
http_code="$(curl -s -o "$resp_file" -w '%{http_code}' --max-time 10 \
"${URL}/rest/v1/${path}" \
-H "apikey: ${KEY}" \
-H "Authorization: Bearer ${KEY}" \
-H "Content-Type: application/json" 2>/dev/null)" || http_code="000"
elif [ "$method" = "POST" ]; then
http_code="$(curl -s -o "$resp_file" -w '%{http_code}' --max-time 10 \
-X POST "${URL}/rest/v1/${path}" \
-H "apikey: ${KEY}" \
-H "Authorization: Bearer ${KEY}" \
-H "Content-Type: application/json" \
-H "Prefer: return=minimal" \
-d "$data" 2>/dev/null)" || http_code="000"
elif [ "$method" = "PATCH" ]; then
http_code="$(curl -s -o "$resp_file" -w '%{http_code}' --max-time 10 \
-X PATCH "${URL}/rest/v1/${path}" \
-H "apikey: ${KEY}" \
-H "Authorization: Bearer ${KEY}" \
-H "Content-Type: application/json" \
-d "$data" 2>/dev/null)" || http_code="000"
fi
# Trim to last 3 chars (the HTTP code) in case of concatenation
http_code="$(echo "$http_code" | grep -oE '[0-9]{3}$' || echo "000")"
if [ "$expected" = "deny" ]; then
case "$http_code" in
401|403)
echo " PASS $desc (HTTP $http_code, denied)"
PASS=$(( PASS + 1 )) ;;
200|204)
# For GETs: 200+empty means RLS filtering (pass). 200+data means leak (fail).
# For PATCH: 204 means no rows matched — could be RLS or missing row.
if [ "$method" = "GET" ]; then
body="$(cat "$resp_file" 2>/dev/null || echo "")"
if [ "$body" = "[]" ] || [ -z "$body" ]; then
echo " PASS $desc (HTTP $http_code, empty — RLS filtering)"
PASS=$(( PASS + 1 ))
else
echo " FAIL $desc (HTTP $http_code, got data!)"
FAIL=$(( FAIL + 1 ))
fi
else
# PATCH 204 = no rows affected. RLS blocked the update or row doesn't exist.
# Either way, the attacker can't modify data.
echo " PASS $desc (HTTP $http_code, no rows affected)"
PASS=$(( PASS + 1 ))
fi ;;
000)
echo " WARN $desc (connection failed)"
FAIL=$(( FAIL + 1 )) ;;
*)
echo " WARN $desc (HTTP $http_code — unexpected)"
FAIL=$(( FAIL + 1 )) ;;
esac
elif [ "$expected" = "allow" ]; then
case "$http_code" in
200|201|204|409)
# 409 = conflict (duplicate key) — INSERT policy works, row already exists
echo " PASS $desc (HTTP $http_code, allowed as expected)"
PASS=$(( PASS + 1 )) ;;
401|403)
echo " FAIL $desc (HTTP $http_code, denied — should be allowed)"
FAIL=$(( FAIL + 1 )) ;;
000)
echo " WARN $desc (connection failed)"
FAIL=$(( FAIL + 1 )) ;;
*)
echo " WARN $desc (HTTP $http_code — unexpected)"
FAIL=$(( FAIL + 1 )) ;;
esac
fi
rm -f "$resp_file" 2>/dev/null || true
}
echo "RLS Verification (after 002_tighten_rls.sql)"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
echo "Read denial (should be blocked):"
check "SELECT telemetry_events" deny GET "telemetry_events?select=*&limit=1"
check "SELECT installations" deny GET "installations?select=*&limit=1"
check "SELECT update_checks" deny GET "update_checks?select=*&limit=1"
check "SELECT crash_clusters" deny GET "crash_clusters?select=*&limit=1"
check "SELECT skill_sequences" deny GET "skill_sequences?select=skill_a&limit=1"
echo ""
echo "Update denial (should be blocked):"
check "UPDATE installations" deny PATCH "installations?installation_id=eq.test_verify_rls" '{"gstack_version":"hacked"}'
echo ""
echo "Insert allowed (kept for old client compat):"
check "INSERT telemetry_events" allow POST "telemetry_events" '{"gstack_version":"verify_rls_test","os":"test","event_timestamp":"2026-01-01T00:00:00Z","outcome":"test"}'
check "INSERT update_checks" allow POST "update_checks" '{"gstack_version":"verify_rls_test","os":"test"}'
check "INSERT installations" allow POST "installations" '{"installation_id":"verify_rls_test"}'
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Results: $PASS passed, $FAIL failed (of $TOTAL checks)"
if [ "$FAIL" -gt 0 ]; then
echo "VERDICT: FAIL"
exit 1
else
echo "VERDICT: PASS — reads/updates blocked, inserts allowed"
exit 0
fi