Merge branch 'main' into garrytan/team-supabase-store

Brings in 55 commits from main (v0.12.x–v0.13.5.0): Factory Droid compat,
prompt injection defense, user sovereignty, security audit, design binary,
skill namespacing, modular resolvers, Chrome sidebar, and more.

Conflict resolution:
- .agents/ SKILL.md files: deleted (main moved to .factory/)
- 8 .tmpl templates: accepted main (new features: CDP mode, design tools,
  global retro, parallelization, distribution checks, plan audits)
- scripts/gen-skill-docs.ts: accepted main's modular resolver refactor
- test/helpers/session-runner.ts: accepted main + layered back CostEntry
  tracking from team branch
- Generated SKILL.md files: regenerated via bun run gen:skill-docs
- Updated tests to match main's gstack-slug output (2 lines, no PROJECTS_DIR)
  and review log mechanism (gstack-review-log, not $BRANCH.jsonl)

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Garry Tan
2026-03-29 15:12:12 -07:00
267 changed files with 60292 additions and 12207 deletions
+2 -4
View File
@@ -1,10 +1,8 @@
#!/usr/bin/env bash
# Supabase project config for gstack telemetry
# These are PUBLIC keys — safe to commit (like Firebase public config).
# RLS policies restrict what the anon/publishable key can do (INSERT only).
# RLS denies all access to the anon key. All reads and writes go through
# edge functions (which use SUPABASE_SERVICE_ROLE_KEY server-side).
GSTACK_SUPABASE_URL="https://frugpmstpnojnhfyimgv.supabase.co"
GSTACK_SUPABASE_ANON_KEY="sb_publishable_tR4i6cyMIrYTE3s6OyHGHw_ppx2p6WK"
# Telemetry ingest endpoint (Data API)
GSTACK_TELEMETRY_ENDPOINT="${GSTACK_SUPABASE_URL}/rest/v1"
+97 -18
View File
@@ -1,9 +1,12 @@
// gstack community-pulse edge function
// Returns weekly active installation count for preamble display.
// Cached for 1 hour via Cache-Control header.
// Returns aggregated community stats for the dashboard:
// weekly active count, top skills, crash clusters, version distribution.
// Uses server-side cache (community_pulse_cache table) to prevent DoS.
import { createClient } from "https://esm.sh/@supabase/supabase-js@2";
const CACHE_MAX_AGE_MS = 60 * 60 * 1000; // 1 hour
Deno.serve(async () => {
const supabase = createClient(
Deno.env.get("SUPABASE_URL") ?? "",
@@ -11,17 +14,37 @@ Deno.serve(async () => {
);
try {
// Count unique update checks in the last 7 days (install base proxy)
// Check cache first
const { data: cached } = await supabase
.from("community_pulse_cache")
.select("data, refreshed_at")
.eq("id", 1)
.single();
if (cached?.refreshed_at) {
const age = Date.now() - new Date(cached.refreshed_at).getTime();
if (age < CACHE_MAX_AGE_MS) {
return new Response(JSON.stringify(cached.data), {
status: 200,
headers: {
"Content-Type": "application/json",
"Cache-Control": "public, max-age=3600",
},
});
}
}
// Cache is stale or missing — recompute
const weekAgo = new Date(Date.now() - 7 * 24 * 60 * 60 * 1000).toISOString();
const twoWeeksAgo = new Date(Date.now() - 14 * 24 * 60 * 60 * 1000).toISOString();
// This week's active
// Weekly active (update checks this week)
const { count: thisWeek } = await supabase
.from("update_checks")
.select("*", { count: "exact", head: true })
.gte("checked_at", weekAgo);
// Last week's active (for change %)
// Last week (for change %)
const { count: lastWeek } = await supabase
.from("update_checks")
.select("*", { count: "exact", head: true })
@@ -34,22 +57,78 @@ Deno.serve(async () => {
? Math.round(((current - previous) / previous) * 100)
: 0;
return new Response(
JSON.stringify({
weekly_active: current,
change_pct: changePct,
}),
{
status: 200,
headers: {
"Content-Type": "application/json",
"Cache-Control": "public, max-age=3600", // 1 hour cache
},
// Top skills (last 7 days)
const { data: skillRows } = await supabase
.from("telemetry_events")
.select("skill")
.eq("event_type", "skill_run")
.gte("event_timestamp", weekAgo)
.not("skill", "is", null)
.limit(1000);
const skillCounts: Record<string, number> = {};
for (const row of skillRows ?? []) {
if (row.skill) {
skillCounts[row.skill] = (skillCounts[row.skill] ?? 0) + 1;
}
);
}
const topSkills = Object.entries(skillCounts)
.sort(([, a], [, b]) => b - a)
.slice(0, 10)
.map(([skill, count]) => ({ skill, count }));
// Crash clusters (top 5)
const { data: crashes } = await supabase
.from("crash_clusters")
.select("error_class, gstack_version, total_occurrences, identified_users")
.limit(5);
// Version distribution (last 7 days)
const versionCounts: Record<string, number> = {};
const { data: versionRows } = await supabase
.from("telemetry_events")
.select("gstack_version")
.eq("event_type", "skill_run")
.gte("event_timestamp", weekAgo)
.limit(1000);
for (const row of versionRows ?? []) {
if (row.gstack_version) {
versionCounts[row.gstack_version] = (versionCounts[row.gstack_version] ?? 0) + 1;
}
}
const topVersions = Object.entries(versionCounts)
.sort(([, a], [, b]) => b - a)
.slice(0, 5)
.map(([version, count]) => ({ version, count }));
const result = {
weekly_active: current,
change_pct: changePct,
top_skills: topSkills,
crashes: crashes ?? [],
versions: topVersions,
};
// Upsert cache
await supabase
.from("community_pulse_cache")
.upsert({
id: 1,
data: result,
refreshed_at: new Date().toISOString(),
});
return new Response(JSON.stringify(result), {
status: 200,
headers: {
"Content-Type": "application/json",
"Cache-Control": "public, max-age=3600",
},
});
} catch {
return new Response(
JSON.stringify({ weekly_active: 0, change_pct: 0 }),
JSON.stringify({ weekly_active: 0, change_pct: 0, top_skills: [], crashes: [], versions: [] }),
{
status: 200,
headers: { "Content-Type": "application/json" },
+36
View File
@@ -0,0 +1,36 @@
-- 002_tighten_rls.sql
-- Lock down read/update access. Keep INSERT policies so old clients can still
-- write via PostgREST while new clients migrate to edge functions.
-- Drop all SELECT policies (anon key should not read telemetry data)
DROP POLICY IF EXISTS "anon_select" ON telemetry_events;
DROP POLICY IF EXISTS "anon_select" ON installations;
DROP POLICY IF EXISTS "anon_select" ON update_checks;
-- Drop dangerous UPDATE policy (was unrestricted on all columns)
DROP POLICY IF EXISTS "anon_update_last_seen" ON installations;
-- Keep INSERT policies — old clients (pre-v0.11.16) still POST directly to
-- PostgREST. These will be dropped in a future migration once adoption of
-- edge-function-based sync is widespread.
-- (anon_insert_only ON telemetry_events — kept)
-- (anon_insert_only ON installations — kept)
-- (anon_insert_only ON update_checks — kept)
-- Explicitly revoke view access (belt-and-suspenders)
REVOKE SELECT ON crash_clusters FROM anon;
REVOKE SELECT ON skill_sequences FROM anon;
-- Keep error_message and failed_step columns (exist on live schema, may be
-- used in future). Add them to the migration record so repo matches live.
ALTER TABLE telemetry_events ADD COLUMN IF NOT EXISTS error_message TEXT;
ALTER TABLE telemetry_events ADD COLUMN IF NOT EXISTS failed_step TEXT;
-- Cache table for community-pulse aggregation (prevents DoS via repeated queries)
CREATE TABLE IF NOT EXISTS community_pulse_cache (
id INTEGER PRIMARY KEY DEFAULT 1,
data JSONB NOT NULL DEFAULT '{}'::jsonb,
refreshed_at TIMESTAMPTZ DEFAULT now()
);
ALTER TABLE community_pulse_cache ENABLE ROW LEVEL SECURITY;
-- No anon policies — only service_role_key (used by edge functions) can read/write
+143
View File
@@ -0,0 +1,143 @@
#!/usr/bin/env bash
# verify-rls.sh — smoke test after deploying 002_tighten_rls.sql
#
# Verifies:
# - SELECT denied on all tables and views (security fix)
# - UPDATE denied on installations (security fix)
# - INSERT still allowed on tables (kept for old client compat)
#
# Run manually after deploying the migration:
# bash supabase/verify-rls.sh
set -uo pipefail
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
. "$SCRIPT_DIR/config.sh"
URL="$GSTACK_SUPABASE_URL"
KEY="$GSTACK_SUPABASE_ANON_KEY"
PASS=0
FAIL=0
TOTAL=0
# check <description> <expected> <method> <path> [data]
# expected: "deny" (want 401/403) or "allow" (want 200/201)
check() {
local desc="$1"
local expected="$2"
local method="$3"
local path="$4"
local data="${5:-}"
TOTAL=$(( TOTAL + 1 ))
local resp_file
resp_file="$(mktemp 2>/dev/null || echo "/tmp/verify-rls-$$-$TOTAL")"
local http_code
if [ "$method" = "GET" ]; then
http_code="$(curl -s -o "$resp_file" -w '%{http_code}' --max-time 10 \
"${URL}/rest/v1/${path}" \
-H "apikey: ${KEY}" \
-H "Authorization: Bearer ${KEY}" \
-H "Content-Type: application/json" 2>/dev/null)" || http_code="000"
elif [ "$method" = "POST" ]; then
http_code="$(curl -s -o "$resp_file" -w '%{http_code}' --max-time 10 \
-X POST "${URL}/rest/v1/${path}" \
-H "apikey: ${KEY}" \
-H "Authorization: Bearer ${KEY}" \
-H "Content-Type: application/json" \
-H "Prefer: return=minimal" \
-d "$data" 2>/dev/null)" || http_code="000"
elif [ "$method" = "PATCH" ]; then
http_code="$(curl -s -o "$resp_file" -w '%{http_code}' --max-time 10 \
-X PATCH "${URL}/rest/v1/${path}" \
-H "apikey: ${KEY}" \
-H "Authorization: Bearer ${KEY}" \
-H "Content-Type: application/json" \
-d "$data" 2>/dev/null)" || http_code="000"
fi
# Trim to last 3 chars (the HTTP code) in case of concatenation
http_code="$(echo "$http_code" | grep -oE '[0-9]{3}$' || echo "000")"
if [ "$expected" = "deny" ]; then
case "$http_code" in
401|403)
echo " PASS $desc (HTTP $http_code, denied)"
PASS=$(( PASS + 1 )) ;;
200|204)
# For GETs: 200+empty means RLS filtering (pass). 200+data means leak (fail).
# For PATCH: 204 means no rows matched — could be RLS or missing row.
if [ "$method" = "GET" ]; then
body="$(cat "$resp_file" 2>/dev/null || echo "")"
if [ "$body" = "[]" ] || [ -z "$body" ]; then
echo " PASS $desc (HTTP $http_code, empty — RLS filtering)"
PASS=$(( PASS + 1 ))
else
echo " FAIL $desc (HTTP $http_code, got data!)"
FAIL=$(( FAIL + 1 ))
fi
else
# PATCH 204 = no rows affected. RLS blocked the update or row doesn't exist.
# Either way, the attacker can't modify data.
echo " PASS $desc (HTTP $http_code, no rows affected)"
PASS=$(( PASS + 1 ))
fi ;;
000)
echo " WARN $desc (connection failed)"
FAIL=$(( FAIL + 1 )) ;;
*)
echo " WARN $desc (HTTP $http_code — unexpected)"
FAIL=$(( FAIL + 1 )) ;;
esac
elif [ "$expected" = "allow" ]; then
case "$http_code" in
200|201|204|409)
# 409 = conflict (duplicate key) — INSERT policy works, row already exists
echo " PASS $desc (HTTP $http_code, allowed as expected)"
PASS=$(( PASS + 1 )) ;;
401|403)
echo " FAIL $desc (HTTP $http_code, denied — should be allowed)"
FAIL=$(( FAIL + 1 )) ;;
000)
echo " WARN $desc (connection failed)"
FAIL=$(( FAIL + 1 )) ;;
*)
echo " WARN $desc (HTTP $http_code — unexpected)"
FAIL=$(( FAIL + 1 )) ;;
esac
fi
rm -f "$resp_file" 2>/dev/null || true
}
echo "RLS Verification (after 002_tighten_rls.sql)"
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo ""
echo "Read denial (should be blocked):"
check "SELECT telemetry_events" deny GET "telemetry_events?select=*&limit=1"
check "SELECT installations" deny GET "installations?select=*&limit=1"
check "SELECT update_checks" deny GET "update_checks?select=*&limit=1"
check "SELECT crash_clusters" deny GET "crash_clusters?select=*&limit=1"
check "SELECT skill_sequences" deny GET "skill_sequences?select=skill_a&limit=1"
echo ""
echo "Update denial (should be blocked):"
check "UPDATE installations" deny PATCH "installations?installation_id=eq.test_verify_rls" '{"gstack_version":"hacked"}'
echo ""
echo "Insert allowed (kept for old client compat):"
check "INSERT telemetry_events" allow POST "telemetry_events" '{"gstack_version":"verify_rls_test","os":"test","event_timestamp":"2026-01-01T00:00:00Z","outcome":"test"}'
check "INSERT update_checks" allow POST "update_checks" '{"gstack_version":"verify_rls_test","os":"test"}'
check "INSERT installations" allow POST "installations" '{"installation_id":"verify_rls_test"}'
echo ""
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
echo "Results: $PASS passed, $FAIL failed (of $TOTAL checks)"
if [ "$FAIL" -gt 0 ]; then
echo "VERDICT: FAIL"
exit 1
else
echo "VERDICT: PASS — reads/updates blocked, inserts allowed"
exit 0
fi