Merge branch 'main' into garrytan/team-supabase-store

Resolved 4 conflicts:
- scripts/gen-skill-docs.ts: kept ARTIFACT_SETUP + added main's new
  resolvers (SPEC_REVIEW_LOOP, DESIGN_SKETCH, BENEFITS_FROM,
  CODEX_REVIEW_STEP). Updated codex review-log to use new paths.
- ship/SKILL.md.tmpl: adopted {{CODEX_REVIEW_STEP}} macro from main
- test/skill-e2e.test.ts: added main's new E2E tests (office-hours
  spec review, plan-ceo benefits-from) + kept our E2E isolation cleanup

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Garry Tan
2026-03-21 09:29:05 -07:00
96 changed files with 17868 additions and 342 deletions
+10
View File
@@ -0,0 +1,10 @@
#!/usr/bin/env bash
# Supabase project config for gstack telemetry
# These are PUBLIC keys — safe to commit (like Firebase public config).
# RLS policies restrict what the anon/publishable key can do (INSERT only).
GSTACK_SUPABASE_URL="https://frugpmstpnojnhfyimgv.supabase.co"
GSTACK_SUPABASE_ANON_KEY="sb_publishable_tR4i6cyMIrYTE3s6OyHGHw_ppx2p6WK"
# Telemetry ingest endpoint (Data API)
GSTACK_TELEMETRY_ENDPOINT="${GSTACK_SUPABASE_URL}/rest/v1"
@@ -0,0 +1,59 @@
// gstack community-pulse edge function
// Returns weekly active installation count for preamble display.
// Cached for 1 hour via Cache-Control header.
import { createClient } from "https://esm.sh/@supabase/supabase-js@2";
Deno.serve(async () => {
const supabase = createClient(
Deno.env.get("SUPABASE_URL") ?? "",
Deno.env.get("SUPABASE_SERVICE_ROLE_KEY") ?? ""
);
try {
// Count unique update checks in the last 7 days (install base proxy)
const weekAgo = new Date(Date.now() - 7 * 24 * 60 * 60 * 1000).toISOString();
const twoWeeksAgo = new Date(Date.now() - 14 * 24 * 60 * 60 * 1000).toISOString();
// This week's active
const { count: thisWeek } = await supabase
.from("update_checks")
.select("*", { count: "exact", head: true })
.gte("checked_at", weekAgo);
// Last week's active (for change %)
const { count: lastWeek } = await supabase
.from("update_checks")
.select("*", { count: "exact", head: true })
.gte("checked_at", twoWeeksAgo)
.lt("checked_at", weekAgo);
const current = thisWeek ?? 0;
const previous = lastWeek ?? 0;
const changePct = previous > 0
? Math.round(((current - previous) / previous) * 100)
: 0;
return new Response(
JSON.stringify({
weekly_active: current,
change_pct: changePct,
}),
{
status: 200,
headers: {
"Content-Type": "application/json",
"Cache-Control": "public, max-age=3600", // 1 hour cache
},
}
);
} catch {
return new Response(
JSON.stringify({ weekly_active: 0, change_pct: 0 }),
{
status: 200,
headers: { "Content-Type": "application/json" },
}
);
}
});
@@ -0,0 +1,135 @@
// gstack telemetry-ingest edge function
// Validates and inserts a batch of telemetry events.
// Called by bin/gstack-telemetry-sync.
import { createClient } from "https://esm.sh/@supabase/supabase-js@2";
interface TelemetryEvent {
v: number;
ts: string;
event_type: string;
skill: string;
session_id?: string;
gstack_version: string;
os: string;
arch?: string;
duration_s?: number;
outcome: string;
error_class?: string;
used_browse?: boolean;
sessions?: number;
installation_id?: string;
}
const MAX_BATCH_SIZE = 100;
const MAX_PAYLOAD_BYTES = 50_000; // 50KB
Deno.serve(async (req) => {
if (req.method !== "POST") {
return new Response("POST required", { status: 405 });
}
// Check payload size
const contentLength = parseInt(req.headers.get("content-length") || "0");
if (contentLength > MAX_PAYLOAD_BYTES) {
return new Response("Payload too large", { status: 413 });
}
try {
const body = await req.json();
const events: TelemetryEvent[] = Array.isArray(body) ? body : [body];
if (events.length > MAX_BATCH_SIZE) {
return new Response(`Batch too large (max ${MAX_BATCH_SIZE})`, { status: 400 });
}
const supabase = createClient(
Deno.env.get("SUPABASE_URL") ?? "",
Deno.env.get("SUPABASE_SERVICE_ROLE_KEY") ?? ""
);
// Validate and transform events
const rows = [];
const installationUpserts: Map<string, { version: string; os: string }> = new Map();
for (const event of events) {
// Required fields
if (!event.ts || !event.gstack_version || !event.os || !event.outcome) {
continue; // skip malformed
}
// Validate schema version
if (event.v !== 1) continue;
// Validate event_type
const validTypes = ["skill_run", "upgrade_prompted", "upgrade_completed"];
if (!validTypes.includes(event.event_type)) continue;
rows.push({
schema_version: event.v,
event_type: event.event_type,
gstack_version: String(event.gstack_version).slice(0, 20),
os: String(event.os).slice(0, 20),
arch: event.arch ? String(event.arch).slice(0, 20) : null,
event_timestamp: event.ts,
skill: event.skill ? String(event.skill).slice(0, 50) : null,
session_id: event.session_id ? String(event.session_id).slice(0, 50) : null,
duration_s: typeof event.duration_s === "number" ? event.duration_s : null,
outcome: String(event.outcome).slice(0, 20),
error_class: event.error_class ? String(event.error_class).slice(0, 100) : null,
used_browse: event.used_browse === true,
concurrent_sessions: typeof event.sessions === "number" ? event.sessions : 1,
installation_id: event.installation_id ? String(event.installation_id).slice(0, 64) : null,
});
// Track installations for upsert
if (event.installation_id) {
installationUpserts.set(event.installation_id, {
version: event.gstack_version,
os: event.os,
});
}
}
if (rows.length === 0) {
return new Response(JSON.stringify({ inserted: 0 }), {
status: 200,
headers: { "Content-Type": "application/json" },
});
}
// Insert events
const { error: insertError } = await supabase
.from("telemetry_events")
.insert(rows);
if (insertError) {
return new Response(JSON.stringify({ error: insertError.message }), {
status: 500,
headers: { "Content-Type": "application/json" },
});
}
// Upsert installations (update last_seen)
for (const [id, data] of installationUpserts) {
await supabase
.from("installations")
.upsert(
{
installation_id: id,
last_seen: new Date().toISOString(),
gstack_version: data.version,
os: data.os,
},
{ onConflict: "installation_id" }
);
}
return new Response(JSON.stringify({ inserted: rows.length }), {
status: 200,
headers: { "Content-Type": "application/json" },
});
} catch {
return new Response("Invalid request", { status: 400 });
}
});
+37
View File
@@ -0,0 +1,37 @@
// gstack update-check edge function
// Logs an install ping and returns the current latest version.
// Called by bin/gstack-update-check as a parallel background request.
import { createClient } from "https://esm.sh/@supabase/supabase-js@2";
const CURRENT_VERSION = Deno.env.get("GSTACK_CURRENT_VERSION") || "0.6.4.1";
Deno.serve(async (req) => {
if (req.method !== "POST") {
return new Response(CURRENT_VERSION, { status: 200 });
}
try {
const { version, os } = await req.json();
if (!version || !os) {
return new Response(CURRENT_VERSION, { status: 200 });
}
const supabase = createClient(
Deno.env.get("SUPABASE_URL") ?? "",
Deno.env.get("SUPABASE_SERVICE_ROLE_KEY") ?? ""
);
// Log the update check (fire-and-forget)
await supabase.from("update_checks").insert({
gstack_version: String(version).slice(0, 20),
os: String(os).slice(0, 20),
});
return new Response(CURRENT_VERSION, { status: 200 });
} catch {
// Always return the version, even if logging fails
return new Response(CURRENT_VERSION, { status: 200 });
}
});
+89
View File
@@ -0,0 +1,89 @@
-- gstack telemetry schema
-- Tables for tracking usage, installations, and update checks.
-- Main telemetry events (skill runs, upgrades)
CREATE TABLE telemetry_events (
id UUID DEFAULT gen_random_uuid() PRIMARY KEY,
received_at TIMESTAMPTZ DEFAULT now(),
schema_version INTEGER NOT NULL DEFAULT 1,
event_type TEXT NOT NULL DEFAULT 'skill_run',
gstack_version TEXT NOT NULL,
os TEXT NOT NULL,
arch TEXT,
event_timestamp TIMESTAMPTZ NOT NULL,
skill TEXT,
session_id TEXT,
duration_s NUMERIC,
outcome TEXT NOT NULL,
error_class TEXT,
used_browse BOOLEAN DEFAULT false,
concurrent_sessions INTEGER DEFAULT 1,
installation_id TEXT -- nullable, only for "community" tier
);
-- Index for skill_sequences view performance
CREATE INDEX idx_telemetry_session_ts ON telemetry_events (session_id, event_timestamp);
-- Index for crash clustering
CREATE INDEX idx_telemetry_error ON telemetry_events (error_class, gstack_version) WHERE outcome = 'error';
-- Retention tracking per installation
CREATE TABLE installations (
installation_id TEXT PRIMARY KEY,
first_seen TIMESTAMPTZ DEFAULT now(),
last_seen TIMESTAMPTZ DEFAULT now(),
gstack_version TEXT,
os TEXT
);
-- Install pings from update checks
CREATE TABLE update_checks (
id UUID DEFAULT gen_random_uuid() PRIMARY KEY,
checked_at TIMESTAMPTZ DEFAULT now(),
gstack_version TEXT NOT NULL,
os TEXT NOT NULL
);
-- RLS: anon key can INSERT and SELECT (all telemetry data is anonymous)
ALTER TABLE telemetry_events ENABLE ROW LEVEL SECURITY;
CREATE POLICY "anon_insert_only" ON telemetry_events FOR INSERT WITH CHECK (true);
CREATE POLICY "anon_select" ON telemetry_events FOR SELECT USING (true);
ALTER TABLE installations ENABLE ROW LEVEL SECURITY;
CREATE POLICY "anon_insert_only" ON installations FOR INSERT WITH CHECK (true);
CREATE POLICY "anon_select" ON installations FOR SELECT USING (true);
-- Allow upsert (update last_seen)
CREATE POLICY "anon_update_last_seen" ON installations FOR UPDATE USING (true) WITH CHECK (true);
ALTER TABLE update_checks ENABLE ROW LEVEL SECURITY;
CREATE POLICY "anon_insert_only" ON update_checks FOR INSERT WITH CHECK (true);
CREATE POLICY "anon_select" ON update_checks FOR SELECT USING (true);
-- Crash clustering view
CREATE VIEW crash_clusters AS
SELECT
error_class,
gstack_version,
COUNT(*) as total_occurrences,
COUNT(DISTINCT installation_id) as identified_users, -- community tier only
COUNT(*) - COUNT(installation_id) as anonymous_occurrences, -- events without installation_id
MIN(event_timestamp) as first_seen,
MAX(event_timestamp) as last_seen
FROM telemetry_events
WHERE outcome = 'error' AND error_class IS NOT NULL
GROUP BY error_class, gstack_version
ORDER BY total_occurrences DESC;
-- Skill sequence co-occurrence view
CREATE VIEW skill_sequences AS
SELECT
a.skill as skill_a,
b.skill as skill_b,
COUNT(DISTINCT a.session_id) as co_occurrences
FROM telemetry_events a
JOIN telemetry_events b ON a.session_id = b.session_id
AND a.skill != b.skill
AND a.event_timestamp < b.event_timestamp
WHERE a.event_type = 'skill_run' AND b.event_type = 'skill_run'
GROUP BY a.skill, b.skill
HAVING COUNT(DISTINCT a.session_id) >= 10
ORDER BY co_occurrences DESC;