mirror of
https://github.com/garrytan/gstack.git
synced 2026-05-07 05:56:41 +02:00
Merge branch 'main' into garrytan/team-supabase-store
Brings in 55 commits from main (v0.12.x–v0.13.5.0): Factory Droid compat, prompt injection defense, user sovereignty, security audit, design binary, skill namespacing, modular resolvers, Chrome sidebar, and more. Conflict resolution: - .agents/ SKILL.md files: deleted (main moved to .factory/) - 8 .tmpl templates: accepted main (new features: CDP mode, design tools, global retro, parallelization, distribution checks, plan audits) - scripts/gen-skill-docs.ts: accepted main's modular resolver refactor - test/helpers/session-runner.ts: accepted main + layered back CostEntry tracking from team branch - Generated SKILL.md files: regenerated via bun run gen:skill-docs - Updated tests to match main's gstack-slug output (2 lines, no PROJECTS_DIR) and review log mechanism (gstack-review-log, not $BRANCH.jsonl) Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Executable
+68
@@ -0,0 +1,68 @@
|
||||
#!/bin/bash
|
||||
# Launch Chrome with CDP (remote debugging) enabled.
|
||||
# Usage: chrome-cdp [port]
|
||||
#
|
||||
# Chrome refuses --remote-debugging-port on its default data directory.
|
||||
# We create a separate data dir with a symlink to the user's real profile,
|
||||
# so Chrome thinks it's non-default but uses the same cookies/extensions.
|
||||
|
||||
PORT="${1:-9222}"
|
||||
CHROME="/Applications/Google Chrome.app/Contents/MacOS/Google Chrome"
|
||||
REAL_PROFILE="$HOME/Library/Application Support/Google/Chrome"
|
||||
CDP_DATA_DIR="$HOME/.gstack/cdp-profile/chrome"
|
||||
|
||||
if ! [ -f "$CHROME" ]; then
|
||||
echo "Chrome not found at $CHROME" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Check if Chrome is running
|
||||
if pgrep -f "Google Chrome" >/dev/null 2>&1; then
|
||||
echo "Chrome is still running. Quitting..."
|
||||
osascript -e 'tell application "Google Chrome" to quit' 2>/dev/null
|
||||
|
||||
# Wait for it to fully exit
|
||||
for i in $(seq 1 20); do
|
||||
pgrep -f "Google Chrome" >/dev/null 2>&1 || break
|
||||
sleep 0.5
|
||||
done
|
||||
|
||||
if pgrep -f "Google Chrome" >/dev/null 2>&1; then
|
||||
echo "Chrome won't quit. Force-killing..." >&2
|
||||
pkill -f "Google Chrome"
|
||||
sleep 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Set up CDP data dir with symlinked profile
|
||||
# Chrome requires a "non-default" data dir for --remote-debugging-port.
|
||||
# We symlink the real Default profile so cookies/extensions carry over.
|
||||
mkdir -p "$CDP_DATA_DIR"
|
||||
if [ -d "$REAL_PROFILE/Default" ] && ! [ -e "$CDP_DATA_DIR/Default" ]; then
|
||||
ln -s "$REAL_PROFILE/Default" "$CDP_DATA_DIR/Default"
|
||||
echo "Linked real Chrome profile into CDP data dir"
|
||||
fi
|
||||
# Also link Local State (contains crypto keys for cookie decryption, etc.)
|
||||
if [ -f "$REAL_PROFILE/Local State" ] && ! [ -e "$CDP_DATA_DIR/Local State" ]; then
|
||||
ln -s "$REAL_PROFILE/Local State" "$CDP_DATA_DIR/Local State"
|
||||
fi
|
||||
|
||||
echo "Launching Chrome with CDP on port $PORT..."
|
||||
"$CHROME" \
|
||||
--remote-debugging-port="$PORT" \
|
||||
--user-data-dir="$CDP_DATA_DIR" \
|
||||
--restore-last-session &
|
||||
disown
|
||||
|
||||
# Wait for CDP to be available
|
||||
for i in $(seq 1 30); do
|
||||
if curl -s "http://127.0.0.1:$PORT/json/version" >/dev/null 2>&1; then
|
||||
echo "CDP ready on port $PORT"
|
||||
echo "Run: \$B connect chrome"
|
||||
exit 0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
|
||||
echo "CDP not available after 30s." >&2
|
||||
exit 1
|
||||
@@ -1,7 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
# gstack-community-dashboard — community usage stats from Supabase
|
||||
#
|
||||
# Queries the Supabase REST API to show community-wide gstack usage:
|
||||
# Calls the community-pulse edge function for aggregated stats:
|
||||
# skill popularity, crash clusters, version distribution, retention.
|
||||
#
|
||||
# Env overrides (for testing):
|
||||
@@ -30,51 +30,40 @@ if [ -z "$SUPABASE_URL" ] || [ -z "$ANON_KEY" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# ─── Helper: query Supabase REST API ─────────────────────────
|
||||
query() {
|
||||
local table="$1"
|
||||
local params="${2:-}"
|
||||
curl -sf --max-time 10 \
|
||||
"${SUPABASE_URL}/rest/v1/${table}?${params}" \
|
||||
-H "apikey: ${ANON_KEY}" \
|
||||
-H "Authorization: Bearer ${ANON_KEY}" \
|
||||
2>/dev/null || echo "[]"
|
||||
}
|
||||
# ─── Fetch aggregated stats from edge function ────────────────
|
||||
DATA="$(curl -sf --max-time 15 \
|
||||
"${SUPABASE_URL}/functions/v1/community-pulse" \
|
||||
-H "apikey: ${ANON_KEY}" \
|
||||
2>/dev/null || echo "{}")"
|
||||
|
||||
echo "gstack community dashboard"
|
||||
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
||||
echo ""
|
||||
|
||||
# ─── Weekly active installs ──────────────────────────────────
|
||||
WEEK_AGO="$(date -u -v-7d +%Y-%m-%dT%H:%M:%SZ 2>/dev/null || date -u -d '7 days ago' +%Y-%m-%dT%H:%M:%SZ 2>/dev/null || echo "")"
|
||||
if [ -n "$WEEK_AGO" ]; then
|
||||
PULSE="$(curl -sf --max-time 10 \
|
||||
"${SUPABASE_URL}/functions/v1/community-pulse" \
|
||||
-H "Authorization: Bearer ${ANON_KEY}" \
|
||||
2>/dev/null || echo '{"weekly_active":0}')"
|
||||
WEEKLY="$(echo "$DATA" | grep -o '"weekly_active":[0-9]*' | grep -o '[0-9]*' || echo "0")"
|
||||
CHANGE="$(echo "$DATA" | grep -o '"change_pct":[0-9-]*' | grep -o '[0-9-]*' || echo "0")"
|
||||
|
||||
WEEKLY="$(echo "$PULSE" | grep -o '"weekly_active":[0-9]*' | grep -o '[0-9]*' || echo "0")"
|
||||
CHANGE="$(echo "$PULSE" | grep -o '"change_pct":[0-9-]*' | grep -o '[0-9-]*' || echo "0")"
|
||||
|
||||
echo "Weekly active installs: ${WEEKLY}"
|
||||
if [ "$CHANGE" -gt 0 ] 2>/dev/null; then
|
||||
echo " Change: +${CHANGE}%"
|
||||
elif [ "$CHANGE" -lt 0 ] 2>/dev/null; then
|
||||
echo " Change: ${CHANGE}%"
|
||||
fi
|
||||
echo ""
|
||||
echo "Weekly active installs: ${WEEKLY}"
|
||||
if [ "$CHANGE" -gt 0 ] 2>/dev/null; then
|
||||
echo " Change: +${CHANGE}%"
|
||||
elif [ "$CHANGE" -lt 0 ] 2>/dev/null; then
|
||||
echo " Change: ${CHANGE}%"
|
||||
fi
|
||||
echo ""
|
||||
|
||||
# ─── Skill popularity (top 10) ───────────────────────────────
|
||||
echo "Top skills (last 7 days)"
|
||||
echo "────────────────────────"
|
||||
|
||||
# Query telemetry_events, group by skill
|
||||
EVENTS="$(query "telemetry_events" "select=skill,gstack_version&event_type=eq.skill_run&event_timestamp=gte.${WEEK_AGO}&limit=1000" 2>/dev/null || echo "[]")"
|
||||
|
||||
if [ "$EVENTS" != "[]" ] && [ -n "$EVENTS" ]; then
|
||||
echo "$EVENTS" | grep -o '"skill":"[^"]*"' | awk -F'"' '{print $4}' | sort | uniq -c | sort -rn | head -10 | while read -r COUNT SKILL; do
|
||||
printf " /%-20s %d runs\n" "$SKILL" "$COUNT"
|
||||
# Parse top_skills array from JSON
|
||||
SKILLS="$(echo "$DATA" | grep -o '"top_skills":\[[^]]*\]' || echo "")"
|
||||
if [ -n "$SKILLS" ] && [ "$SKILLS" != '"top_skills":[]' ]; then
|
||||
# Parse each object — handle any key order (JSONB doesn't preserve order)
|
||||
echo "$SKILLS" | grep -o '{[^}]*}' | while read -r OBJ; do
|
||||
SKILL="$(echo "$OBJ" | grep -o '"skill":"[^"]*"' | awk -F'"' '{print $4}')"
|
||||
COUNT="$(echo "$OBJ" | grep -o '"count":[0-9]*' | grep -o '[0-9]*')"
|
||||
[ -n "$SKILL" ] && [ -n "$COUNT" ] && printf " /%-20s %s runs\n" "$SKILL" "$COUNT"
|
||||
done
|
||||
else
|
||||
echo " No data yet"
|
||||
@@ -85,12 +74,12 @@ echo ""
|
||||
echo "Top crash clusters"
|
||||
echo "──────────────────"
|
||||
|
||||
CRASHES="$(query "crash_clusters" "select=error_class,gstack_version,total_occurrences,identified_users&limit=5" 2>/dev/null || echo "[]")"
|
||||
|
||||
if [ "$CRASHES" != "[]" ] && [ -n "$CRASHES" ]; then
|
||||
echo "$CRASHES" | grep -o '"error_class":"[^"]*"' | awk -F'"' '{print $4}' | head -5 | while read -r ERR; do
|
||||
C="$(echo "$CRASHES" | grep -o "\"error_class\":\"$ERR\"[^}]*\"total_occurrences\":[0-9]*" | grep -o '"total_occurrences":[0-9]*' | head -1 | grep -o '[0-9]*')"
|
||||
printf " %-30s %s occurrences\n" "$ERR" "${C:-?}"
|
||||
CRASHES="$(echo "$DATA" | grep -o '"crashes":\[[^]]*\]' || echo "")"
|
||||
if [ -n "$CRASHES" ] && [ "$CRASHES" != '"crashes":[]' ]; then
|
||||
echo "$CRASHES" | grep -o '{[^}]*}' | head -5 | while read -r OBJ; do
|
||||
ERR="$(echo "$OBJ" | grep -o '"error_class":"[^"]*"' | awk -F'"' '{print $4}')"
|
||||
C="$(echo "$OBJ" | grep -o '"total_occurrences":[0-9]*' | grep -o '[0-9]*')"
|
||||
[ -n "$ERR" ] && printf " %-30s %s occurrences\n" "$ERR" "${C:-?}"
|
||||
done
|
||||
else
|
||||
echo " No crashes reported"
|
||||
@@ -101,9 +90,12 @@ echo ""
|
||||
echo "Version distribution (last 7 days)"
|
||||
echo "───────────────────────────────────"
|
||||
|
||||
if [ "$EVENTS" != "[]" ] && [ -n "$EVENTS" ]; then
|
||||
echo "$EVENTS" | grep -o '"gstack_version":"[^"]*"' | awk -F'"' '{print $4}' | sort | uniq -c | sort -rn | head -5 | while read -r COUNT VER; do
|
||||
printf " v%-15s %d events\n" "$VER" "$COUNT"
|
||||
VERSIONS="$(echo "$DATA" | grep -o '"versions":\[[^]]*\]' || echo "")"
|
||||
if [ -n "$VERSIONS" ] && [ "$VERSIONS" != '"versions":[]' ]; then
|
||||
echo "$VERSIONS" | grep -o '{[^}]*}' | head -5 | while read -r OBJ; do
|
||||
VER="$(echo "$OBJ" | grep -o '"version":"[^"]*"' | awk -F'"' '{print $4}')"
|
||||
COUNT="$(echo "$OBJ" | grep -o '"count":[0-9]*' | grep -o '[0-9]*')"
|
||||
[ -n "$VER" ] && [ -n "$COUNT" ] && printf " v%-15s %s events\n" "$VER" "$COUNT"
|
||||
done
|
||||
else
|
||||
echo " No data yet"
|
||||
|
||||
+17
-3
@@ -16,14 +16,28 @@ CONFIG_FILE="$STATE_DIR/config.yaml"
|
||||
case "${1:-}" in
|
||||
get)
|
||||
KEY="${2:?Usage: gstack-config get <key>}"
|
||||
grep -E "^${KEY}:" "$CONFIG_FILE" 2>/dev/null | tail -1 | awk '{print $2}' | tr -d '[:space:]' || true
|
||||
# Validate key (alphanumeric + underscore only)
|
||||
if ! printf '%s' "$KEY" | grep -qE '^[a-zA-Z0-9_]+$'; then
|
||||
echo "Error: key must contain only alphanumeric characters and underscores" >&2
|
||||
exit 1
|
||||
fi
|
||||
grep -F "${KEY}:" "$CONFIG_FILE" 2>/dev/null | tail -1 | awk '{print $2}' | tr -d '[:space:]' || true
|
||||
;;
|
||||
set)
|
||||
KEY="${2:?Usage: gstack-config set <key> <value>}"
|
||||
VALUE="${3:?Usage: gstack-config set <key> <value>}"
|
||||
# Validate key (alphanumeric + underscore only)
|
||||
if ! printf '%s' "$KEY" | grep -qE '^[a-zA-Z0-9_]+$'; then
|
||||
echo "Error: key must contain only alphanumeric characters and underscores" >&2
|
||||
exit 1
|
||||
fi
|
||||
mkdir -p "$STATE_DIR"
|
||||
if grep -qE "^${KEY}:" "$CONFIG_FILE" 2>/dev/null; then
|
||||
sed -i '' "s/^${KEY}:.*/${KEY}: ${VALUE}/" "$CONFIG_FILE"
|
||||
# Escape sed special chars in value and drop embedded newlines
|
||||
ESC_VALUE="$(printf '%s' "$VALUE" | head -1 | sed 's/[&/\]/\\&/g')"
|
||||
if grep -qF "${KEY}:" "$CONFIG_FILE" 2>/dev/null; then
|
||||
# Portable in-place edit (BSD sed uses -i '', GNU sed uses -i without arg)
|
||||
_tmpfile="$(mktemp "${CONFIG_FILE}.XXXXXX")"
|
||||
sed "s/^${KEY}:.*/${KEY}: ${ESC_VALUE}/" "$CONFIG_FILE" > "$_tmpfile" && mv "$_tmpfile" "$CONFIG_FILE"
|
||||
else
|
||||
echo "${KEY}: ${VALUE}" >> "$CONFIG_FILE"
|
||||
fi
|
||||
|
||||
Executable
+65
@@ -0,0 +1,65 @@
|
||||
#!/bin/bash
|
||||
# gstack-extension — helper to install the Chrome extension
|
||||
#
|
||||
# When using $B connect, the extension auto-loads. This script is for
|
||||
# installing it in your regular Chrome (not the Playwright-controlled one).
|
||||
|
||||
set -e
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
|
||||
|
||||
# Find the extension directory
|
||||
EXT_DIR=""
|
||||
if [ -f "$REPO_ROOT/extension/manifest.json" ]; then
|
||||
EXT_DIR="$REPO_ROOT/extension"
|
||||
elif [ -f "$HOME/.claude/skills/gstack/extension/manifest.json" ]; then
|
||||
EXT_DIR="$HOME/.claude/skills/gstack/extension"
|
||||
fi
|
||||
|
||||
if [ -z "$EXT_DIR" ]; then
|
||||
echo "Error: extension/ directory not found."
|
||||
echo "Expected at: $REPO_ROOT/extension/ or ~/.claude/skills/gstack/extension/"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Copy path to clipboard
|
||||
echo -n "$EXT_DIR" | pbcopy 2>/dev/null
|
||||
|
||||
# Get browse server port
|
||||
PORT=""
|
||||
STATE_FILE="$REPO_ROOT/.gstack/browse.json"
|
||||
if [ -f "$STATE_FILE" ]; then
|
||||
PORT=$(grep -o '"port":[0-9]*' "$STATE_FILE" | grep -o '[0-9]*')
|
||||
fi
|
||||
|
||||
echo "gstack Chrome Extension Setup"
|
||||
echo "=============================="
|
||||
echo ""
|
||||
echo "Extension path (copied to clipboard):"
|
||||
echo " $EXT_DIR"
|
||||
echo ""
|
||||
|
||||
if [ -n "$PORT" ]; then
|
||||
echo "Browse server port: $PORT"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
echo "Quick install (if using \$B connect):"
|
||||
echo " The extension auto-loads when you run \$B connect."
|
||||
echo " No manual installation needed!"
|
||||
echo ""
|
||||
echo "Manual install (for your regular Chrome):"
|
||||
echo ""
|
||||
echo " 1. Opening chrome://extensions now..."
|
||||
|
||||
# Open chrome://extensions
|
||||
osascript -e 'tell application "Google Chrome" to open location "chrome://extensions"' 2>/dev/null || \
|
||||
open "chrome://extensions" 2>/dev/null || \
|
||||
echo " Could not open Chrome. Navigate to chrome://extensions manually."
|
||||
|
||||
echo " 2. Toggle 'Developer mode' ON (top-right)"
|
||||
echo " 3. Click 'Load unpacked'"
|
||||
echo " 4. In the file picker: Cmd+Shift+G → paste (path is in your clipboard) → Enter → Select"
|
||||
echo " 5. Click the gstack puzzle icon in toolbar → enter port: ${PORT:-<check \$B status>}"
|
||||
echo " 6. Click 'Open Side Panel'"
|
||||
Executable
BIN
Binary file not shown.
@@ -0,0 +1,591 @@
|
||||
#!/usr/bin/env bun
|
||||
/**
|
||||
* gstack-global-discover — Discover AI coding sessions across Claude Code, Codex CLI, and Gemini CLI.
|
||||
* Resolves each session's working directory to a git repo, deduplicates by normalized remote URL,
|
||||
* and outputs structured JSON to stdout.
|
||||
*
|
||||
* Usage:
|
||||
* gstack-global-discover --since 7d [--format json|summary]
|
||||
* gstack-global-discover --help
|
||||
*/
|
||||
|
||||
import { existsSync, readdirSync, statSync, readFileSync, openSync, readSync, closeSync } from "fs";
|
||||
import { join, basename } from "path";
|
||||
import { execSync } from "child_process";
|
||||
import { homedir } from "os";
|
||||
|
||||
// ── Types ──────────────────────────────────────────────────────────────────
|
||||
|
||||
interface Session {
|
||||
tool: "claude_code" | "codex" | "gemini";
|
||||
cwd: string;
|
||||
}
|
||||
|
||||
interface Repo {
|
||||
name: string;
|
||||
remote: string;
|
||||
paths: string[];
|
||||
sessions: { claude_code: number; codex: number; gemini: number };
|
||||
}
|
||||
|
||||
interface DiscoveryResult {
|
||||
window: string;
|
||||
start_date: string;
|
||||
repos: Repo[];
|
||||
tools: {
|
||||
claude_code: { total_sessions: number; repos: number };
|
||||
codex: { total_sessions: number; repos: number };
|
||||
gemini: { total_sessions: number; repos: number };
|
||||
};
|
||||
total_sessions: number;
|
||||
total_repos: number;
|
||||
}
|
||||
|
||||
// ── CLI parsing ────────────────────────────────────────────────────────────
|
||||
|
||||
function printUsage(): void {
|
||||
console.error(`Usage: gstack-global-discover --since <window> [--format json|summary]
|
||||
|
||||
--since <window> Time window: e.g. 7d, 14d, 30d, 24h
|
||||
--format <fmt> Output format: json (default) or summary
|
||||
--help Show this help
|
||||
|
||||
Examples:
|
||||
gstack-global-discover --since 7d
|
||||
gstack-global-discover --since 14d --format summary`);
|
||||
}
|
||||
|
||||
function parseArgs(): { since: string; format: "json" | "summary" } {
|
||||
const args = process.argv.slice(2);
|
||||
let since = "";
|
||||
let format: "json" | "summary" = "json";
|
||||
|
||||
for (let i = 0; i < args.length; i++) {
|
||||
if (args[i] === "--help" || args[i] === "-h") {
|
||||
printUsage();
|
||||
process.exit(0);
|
||||
} else if (args[i] === "--since" && args[i + 1]) {
|
||||
since = args[++i];
|
||||
} else if (args[i] === "--format" && args[i + 1]) {
|
||||
const f = args[++i];
|
||||
if (f !== "json" && f !== "summary") {
|
||||
console.error(`Invalid format: ${f}. Use 'json' or 'summary'.`);
|
||||
printUsage();
|
||||
process.exit(1);
|
||||
}
|
||||
format = f;
|
||||
} else {
|
||||
console.error(`Unknown argument: ${args[i]}`);
|
||||
printUsage();
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
if (!since) {
|
||||
console.error("Error: --since is required.");
|
||||
printUsage();
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
if (!/^\d+(d|h|w)$/.test(since)) {
|
||||
console.error(`Invalid window format: ${since}. Use e.g. 7d, 24h, 2w.`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
return { since, format };
|
||||
}
|
||||
|
||||
function windowToDate(window: string): Date {
|
||||
const match = window.match(/^(\d+)(d|h|w)$/);
|
||||
if (!match) throw new Error(`Invalid window: ${window}`);
|
||||
const [, numStr, unit] = match;
|
||||
const num = parseInt(numStr, 10);
|
||||
const now = new Date();
|
||||
|
||||
if (unit === "h") {
|
||||
return new Date(now.getTime() - num * 60 * 60 * 1000);
|
||||
} else if (unit === "w") {
|
||||
// weeks — midnight-aligned like days
|
||||
const d = new Date(now);
|
||||
d.setDate(d.getDate() - num * 7);
|
||||
d.setHours(0, 0, 0, 0);
|
||||
return d;
|
||||
} else {
|
||||
// days — midnight-aligned
|
||||
const d = new Date(now);
|
||||
d.setDate(d.getDate() - num);
|
||||
d.setHours(0, 0, 0, 0);
|
||||
return d;
|
||||
}
|
||||
}
|
||||
|
||||
// ── URL normalization ──────────────────────────────────────────────────────
|
||||
|
||||
export function normalizeRemoteUrl(url: string): string {
|
||||
let normalized = url.trim();
|
||||
|
||||
// SSH → HTTPS: git@github.com:user/repo → https://github.com/user/repo
|
||||
const sshMatch = normalized.match(/^(?:ssh:\/\/)?git@([^:]+):(.+)$/);
|
||||
if (sshMatch) {
|
||||
normalized = `https://${sshMatch[1]}/${sshMatch[2]}`;
|
||||
}
|
||||
|
||||
// Strip .git suffix
|
||||
if (normalized.endsWith(".git")) {
|
||||
normalized = normalized.slice(0, -4);
|
||||
}
|
||||
|
||||
// Lowercase the host portion
|
||||
try {
|
||||
const parsed = new URL(normalized);
|
||||
parsed.hostname = parsed.hostname.toLowerCase();
|
||||
normalized = parsed.toString();
|
||||
// Remove trailing slash
|
||||
if (normalized.endsWith("/")) {
|
||||
normalized = normalized.slice(0, -1);
|
||||
}
|
||||
} catch {
|
||||
// Not a valid URL (e.g., local:<path>), return as-is
|
||||
}
|
||||
|
||||
return normalized;
|
||||
}
|
||||
|
||||
// ── Git helpers ────────────────────────────────────────────────────────────
|
||||
|
||||
function isGitRepo(dir: string): boolean {
|
||||
return existsSync(join(dir, ".git"));
|
||||
}
|
||||
|
||||
function getGitRemote(cwd: string): string | null {
|
||||
if (!existsSync(cwd) || !isGitRepo(cwd)) return null;
|
||||
try {
|
||||
const remote = execSync("git remote get-url origin", {
|
||||
cwd,
|
||||
encoding: "utf-8",
|
||||
timeout: 5000,
|
||||
stdio: ["pipe", "pipe", "pipe"],
|
||||
}).trim();
|
||||
return remote || null;
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
// ── Scanners ───────────────────────────────────────────────────────────────
|
||||
|
||||
function scanClaudeCode(since: Date): Session[] {
|
||||
const projectsDir = join(homedir(), ".claude", "projects");
|
||||
if (!existsSync(projectsDir)) return [];
|
||||
|
||||
const sessions: Session[] = [];
|
||||
|
||||
let dirs: string[];
|
||||
try {
|
||||
dirs = readdirSync(projectsDir);
|
||||
} catch {
|
||||
return [];
|
||||
}
|
||||
|
||||
for (const dirName of dirs) {
|
||||
const dirPath = join(projectsDir, dirName);
|
||||
try {
|
||||
const stat = statSync(dirPath);
|
||||
if (!stat.isDirectory()) continue;
|
||||
} catch {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Find JSONL files
|
||||
let jsonlFiles: string[];
|
||||
try {
|
||||
jsonlFiles = readdirSync(dirPath).filter((f) => f.endsWith(".jsonl"));
|
||||
} catch {
|
||||
continue;
|
||||
}
|
||||
if (jsonlFiles.length === 0) continue;
|
||||
|
||||
// Coarse mtime pre-filter: check if any JSONL file is recent
|
||||
const hasRecentFile = jsonlFiles.some((f) => {
|
||||
try {
|
||||
return statSync(join(dirPath, f)).mtime >= since;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
});
|
||||
if (!hasRecentFile) continue;
|
||||
|
||||
// Resolve cwd
|
||||
let cwd = resolveClaudeCodeCwd(dirPath, dirName, jsonlFiles);
|
||||
if (!cwd) continue;
|
||||
|
||||
// Count only JSONL files modified within the window as sessions
|
||||
const recentFiles = jsonlFiles.filter((f) => {
|
||||
try {
|
||||
return statSync(join(dirPath, f)).mtime >= since;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
});
|
||||
for (let i = 0; i < recentFiles.length; i++) {
|
||||
sessions.push({ tool: "claude_code", cwd });
|
||||
}
|
||||
}
|
||||
|
||||
return sessions;
|
||||
}
|
||||
|
||||
function resolveClaudeCodeCwd(
|
||||
dirPath: string,
|
||||
dirName: string,
|
||||
jsonlFiles: string[]
|
||||
): string | null {
|
||||
// Fast-path: decode directory name
|
||||
// e.g., -Users-garrytan-git-repo → /Users/garrytan/git/repo
|
||||
const decoded = dirName.replace(/^-/, "/").replace(/-/g, "/");
|
||||
if (existsSync(decoded)) return decoded;
|
||||
|
||||
// Fallback: read cwd from first JSONL file
|
||||
// Sort by mtime descending, pick most recent
|
||||
const sorted = jsonlFiles
|
||||
.map((f) => {
|
||||
try {
|
||||
return { name: f, mtime: statSync(join(dirPath, f)).mtime.getTime() };
|
||||
} catch {
|
||||
return null;
|
||||
}
|
||||
})
|
||||
.filter(Boolean)
|
||||
.sort((a, b) => b!.mtime - a!.mtime) as { name: string; mtime: number }[];
|
||||
|
||||
for (const file of sorted.slice(0, 3)) {
|
||||
const cwd = extractCwdFromJsonl(join(dirPath, file.name));
|
||||
if (cwd && existsSync(cwd)) return cwd;
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
function extractCwdFromJsonl(filePath: string): string | null {
|
||||
try {
|
||||
// Read only the first 8KB to avoid loading huge JSONL files into memory
|
||||
const fd = openSync(filePath, "r");
|
||||
const buf = Buffer.alloc(8192);
|
||||
const bytesRead = readSync(fd, buf, 0, 8192, 0);
|
||||
closeSync(fd);
|
||||
const text = buf.toString("utf-8", 0, bytesRead);
|
||||
const lines = text.split("\n").slice(0, 15);
|
||||
for (const line of lines) {
|
||||
if (!line.trim()) continue;
|
||||
try {
|
||||
const obj = JSON.parse(line);
|
||||
if (obj.cwd) return obj.cwd;
|
||||
} catch {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// File read error
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
function scanCodex(since: Date): Session[] {
|
||||
const sessionsDir = join(homedir(), ".codex", "sessions");
|
||||
if (!existsSync(sessionsDir)) return [];
|
||||
|
||||
const sessions: Session[] = [];
|
||||
|
||||
// Walk YYYY/MM/DD directory structure
|
||||
try {
|
||||
const years = readdirSync(sessionsDir);
|
||||
for (const year of years) {
|
||||
const yearPath = join(sessionsDir, year);
|
||||
if (!statSync(yearPath).isDirectory()) continue;
|
||||
|
||||
const months = readdirSync(yearPath);
|
||||
for (const month of months) {
|
||||
const monthPath = join(yearPath, month);
|
||||
if (!statSync(monthPath).isDirectory()) continue;
|
||||
|
||||
const days = readdirSync(monthPath);
|
||||
for (const day of days) {
|
||||
const dayPath = join(monthPath, day);
|
||||
if (!statSync(dayPath).isDirectory()) continue;
|
||||
|
||||
const files = readdirSync(dayPath).filter((f) =>
|
||||
f.startsWith("rollout-") && f.endsWith(".jsonl")
|
||||
);
|
||||
|
||||
for (const file of files) {
|
||||
const filePath = join(dayPath, file);
|
||||
try {
|
||||
const stat = statSync(filePath);
|
||||
if (stat.mtime < since) continue;
|
||||
} catch {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Read first line for session_meta (only first 4KB)
|
||||
try {
|
||||
const fd = openSync(filePath, "r");
|
||||
const buf = Buffer.alloc(4096);
|
||||
const bytesRead = readSync(fd, buf, 0, 4096, 0);
|
||||
closeSync(fd);
|
||||
const firstLine = buf.toString("utf-8", 0, bytesRead).split("\n")[0];
|
||||
if (!firstLine) continue;
|
||||
const meta = JSON.parse(firstLine);
|
||||
if (meta.type === "session_meta" && meta.payload?.cwd) {
|
||||
sessions.push({ tool: "codex", cwd: meta.payload.cwd });
|
||||
}
|
||||
} catch {
|
||||
console.error(`Warning: could not parse Codex session ${filePath}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch {
|
||||
// Directory read error
|
||||
}
|
||||
|
||||
return sessions;
|
||||
}
|
||||
|
||||
function scanGemini(since: Date): Session[] {
|
||||
const tmpDir = join(homedir(), ".gemini", "tmp");
|
||||
if (!existsSync(tmpDir)) return [];
|
||||
|
||||
// Load projects.json for path mapping
|
||||
const projectsPath = join(homedir(), ".gemini", "projects.json");
|
||||
let projectsMap: Record<string, string> = {}; // name → path
|
||||
if (existsSync(projectsPath)) {
|
||||
try {
|
||||
const data = JSON.parse(readFileSync(projectsPath, { encoding: "utf-8" }));
|
||||
// Format: { projects: { "/path": "name" } } — we want name → path
|
||||
const projects = data.projects || {};
|
||||
for (const [path, name] of Object.entries(projects)) {
|
||||
projectsMap[name as string] = path;
|
||||
}
|
||||
} catch {
|
||||
console.error("Warning: could not parse ~/.gemini/projects.json");
|
||||
}
|
||||
}
|
||||
|
||||
const sessions: Session[] = [];
|
||||
const seenTimestamps = new Map<string, Set<string>>(); // projectName → Set<startTime>
|
||||
|
||||
let projectDirs: string[];
|
||||
try {
|
||||
projectDirs = readdirSync(tmpDir);
|
||||
} catch {
|
||||
return [];
|
||||
}
|
||||
|
||||
for (const projectName of projectDirs) {
|
||||
const chatsDir = join(tmpDir, projectName, "chats");
|
||||
if (!existsSync(chatsDir)) continue;
|
||||
|
||||
// Resolve cwd from projects.json
|
||||
let cwd = projectsMap[projectName] || null;
|
||||
|
||||
// Fallback: check .project_root
|
||||
if (!cwd) {
|
||||
const projectRootFile = join(tmpDir, projectName, ".project_root");
|
||||
if (existsSync(projectRootFile)) {
|
||||
try {
|
||||
cwd = readFileSync(projectRootFile, { encoding: "utf-8" }).trim();
|
||||
} catch {}
|
||||
}
|
||||
}
|
||||
|
||||
if (!cwd || !existsSync(cwd)) continue;
|
||||
|
||||
const seen = seenTimestamps.get(projectName) || new Set<string>();
|
||||
seenTimestamps.set(projectName, seen);
|
||||
|
||||
let files: string[];
|
||||
try {
|
||||
files = readdirSync(chatsDir).filter((f) =>
|
||||
f.startsWith("session-") && f.endsWith(".json")
|
||||
);
|
||||
} catch {
|
||||
continue;
|
||||
}
|
||||
|
||||
for (const file of files) {
|
||||
const filePath = join(chatsDir, file);
|
||||
try {
|
||||
const stat = statSync(filePath);
|
||||
if (stat.mtime < since) continue;
|
||||
} catch {
|
||||
continue;
|
||||
}
|
||||
|
||||
try {
|
||||
const data = JSON.parse(readFileSync(filePath, { encoding: "utf-8" }));
|
||||
const startTime = data.startTime || "";
|
||||
|
||||
// Deduplicate by startTime within project
|
||||
if (startTime && seen.has(startTime)) continue;
|
||||
if (startTime) seen.add(startTime);
|
||||
|
||||
sessions.push({ tool: "gemini", cwd });
|
||||
} catch {
|
||||
console.error(`Warning: could not parse Gemini session ${filePath}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return sessions;
|
||||
}
|
||||
|
||||
// ── Deduplication ──────────────────────────────────────────────────────────
|
||||
|
||||
async function resolveAndDeduplicate(sessions: Session[]): Promise<Repo[]> {
|
||||
// Group sessions by cwd
|
||||
const byCwd = new Map<string, Session[]>();
|
||||
for (const s of sessions) {
|
||||
const existing = byCwd.get(s.cwd) || [];
|
||||
existing.push(s);
|
||||
byCwd.set(s.cwd, existing);
|
||||
}
|
||||
|
||||
// Resolve git remotes for each cwd
|
||||
const cwds = Array.from(byCwd.keys());
|
||||
const remoteMap = new Map<string, string>(); // cwd → normalized remote
|
||||
|
||||
for (const cwd of cwds) {
|
||||
const raw = getGitRemote(cwd);
|
||||
if (raw) {
|
||||
remoteMap.set(cwd, normalizeRemoteUrl(raw));
|
||||
} else if (existsSync(cwd) && isGitRepo(cwd)) {
|
||||
remoteMap.set(cwd, `local:${cwd}`);
|
||||
}
|
||||
}
|
||||
|
||||
// Group by normalized remote
|
||||
const byRemote = new Map<string, { paths: string[]; sessions: Session[] }>();
|
||||
for (const [cwd, cwdSessions] of byCwd) {
|
||||
const remote = remoteMap.get(cwd);
|
||||
if (!remote) continue;
|
||||
|
||||
const existing = byRemote.get(remote) || { paths: [], sessions: [] };
|
||||
if (!existing.paths.includes(cwd)) existing.paths.push(cwd);
|
||||
existing.sessions.push(...cwdSessions);
|
||||
byRemote.set(remote, existing);
|
||||
}
|
||||
|
||||
// Build Repo objects
|
||||
const repos: Repo[] = [];
|
||||
for (const [remote, data] of byRemote) {
|
||||
// Find first valid path
|
||||
const validPath = data.paths.find((p) => existsSync(p) && isGitRepo(p));
|
||||
if (!validPath) continue;
|
||||
|
||||
// Derive name from remote URL
|
||||
let name: string;
|
||||
if (remote.startsWith("local:")) {
|
||||
name = basename(remote.replace("local:", ""));
|
||||
} else {
|
||||
try {
|
||||
const url = new URL(remote);
|
||||
name = basename(url.pathname);
|
||||
} catch {
|
||||
name = basename(remote);
|
||||
}
|
||||
}
|
||||
|
||||
const sessionCounts = { claude_code: 0, codex: 0, gemini: 0 };
|
||||
for (const s of data.sessions) {
|
||||
sessionCounts[s.tool]++;
|
||||
}
|
||||
|
||||
repos.push({
|
||||
name,
|
||||
remote,
|
||||
paths: data.paths,
|
||||
sessions: sessionCounts,
|
||||
});
|
||||
}
|
||||
|
||||
// Sort by total sessions descending
|
||||
repos.sort(
|
||||
(a, b) =>
|
||||
b.sessions.claude_code + b.sessions.codex + b.sessions.gemini -
|
||||
(a.sessions.claude_code + a.sessions.codex + a.sessions.gemini)
|
||||
);
|
||||
|
||||
return repos;
|
||||
}
|
||||
|
||||
// ── Main ───────────────────────────────────────────────────────────────────
|
||||
|
||||
async function main() {
|
||||
const { since, format } = parseArgs();
|
||||
const sinceDate = windowToDate(since);
|
||||
const startDate = sinceDate.toISOString().split("T")[0];
|
||||
|
||||
// Run all scanners
|
||||
const ccSessions = scanClaudeCode(sinceDate);
|
||||
const codexSessions = scanCodex(sinceDate);
|
||||
const geminiSessions = scanGemini(sinceDate);
|
||||
|
||||
const allSessions = [...ccSessions, ...codexSessions, ...geminiSessions];
|
||||
|
||||
// Summary to stderr
|
||||
console.error(
|
||||
`Discovered: ${ccSessions.length} CC sessions, ${codexSessions.length} Codex sessions, ${geminiSessions.length} Gemini sessions`
|
||||
);
|
||||
|
||||
// Deduplicate
|
||||
const repos = await resolveAndDeduplicate(allSessions);
|
||||
|
||||
console.error(`→ ${repos.length} unique repos`);
|
||||
|
||||
// Count per-tool repo counts
|
||||
const ccRepos = new Set(repos.filter((r) => r.sessions.claude_code > 0).map((r) => r.remote)).size;
|
||||
const codexRepos = new Set(repos.filter((r) => r.sessions.codex > 0).map((r) => r.remote)).size;
|
||||
const geminiRepos = new Set(repos.filter((r) => r.sessions.gemini > 0).map((r) => r.remote)).size;
|
||||
|
||||
const result: DiscoveryResult = {
|
||||
window: since,
|
||||
start_date: startDate,
|
||||
repos,
|
||||
tools: {
|
||||
claude_code: { total_sessions: ccSessions.length, repos: ccRepos },
|
||||
codex: { total_sessions: codexSessions.length, repos: codexRepos },
|
||||
gemini: { total_sessions: geminiSessions.length, repos: geminiRepos },
|
||||
},
|
||||
total_sessions: allSessions.length,
|
||||
total_repos: repos.length,
|
||||
};
|
||||
|
||||
if (format === "json") {
|
||||
console.log(JSON.stringify(result, null, 2));
|
||||
} else {
|
||||
// Summary format
|
||||
console.log(`Window: ${since} (since ${startDate})`);
|
||||
console.log(`Sessions: ${allSessions.length} total (CC: ${ccSessions.length}, Codex: ${codexSessions.length}, Gemini: ${geminiSessions.length})`);
|
||||
console.log(`Repos: ${repos.length} unique`);
|
||||
console.log("");
|
||||
for (const repo of repos) {
|
||||
const total = repo.sessions.claude_code + repo.sessions.codex + repo.sessions.gemini;
|
||||
const tools = [];
|
||||
if (repo.sessions.claude_code > 0) tools.push(`CC:${repo.sessions.claude_code}`);
|
||||
if (repo.sessions.codex > 0) tools.push(`Codex:${repo.sessions.codex}`);
|
||||
if (repo.sessions.gemini > 0) tools.push(`Gemini:${repo.sessions.gemini}`);
|
||||
console.log(` ${repo.name} (${total} sessions) — ${tools.join(", ")}`);
|
||||
console.log(` Remote: ${repo.remote}`);
|
||||
console.log(` Paths: ${repo.paths.join(", ")}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Only run main when executed directly (not when imported for testing)
|
||||
if (import.meta.main) {
|
||||
main().catch((err) => {
|
||||
console.error(`Fatal error: ${err.message}`);
|
||||
process.exit(1);
|
||||
});
|
||||
}
|
||||
Executable
+20
@@ -0,0 +1,20 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# gstack-platform-detect: show which AI coding agents are installed and gstack status
|
||||
printf "%-16s %-10s %-40s %s\n" "Agent" "Version" "Skill Path" "gstack"
|
||||
printf "%-16s %-10s %-40s %s\n" "-----" "-------" "----------" "------"
|
||||
for entry in "claude:claude" "codex:codex" "droid:factory" "kiro-cli:kiro"; do
|
||||
bin="${entry%%:*}"; label="${entry##*:}"
|
||||
if command -v "$bin" >/dev/null 2>&1; then
|
||||
ver=$("$bin" --version 2>/dev/null | head -1 || echo "unknown")
|
||||
case "$label" in
|
||||
claude) spath="$HOME/.claude/skills/gstack" ;;
|
||||
codex) spath="$HOME/.codex/skills/gstack" ;;
|
||||
factory) spath="$HOME/.factory/skills/gstack" ;;
|
||||
kiro) spath="$HOME/.kiro/skills/gstack" ;;
|
||||
esac
|
||||
status=$([ -d "$spath" ] && echo "INSTALLED" || echo "NOT INSTALLED")
|
||||
printf "%-16s %-10s %-40s %s\n" "$label" "$ver" "$spath" "$status"
|
||||
fi
|
||||
done
|
||||
Executable
+93
@@ -0,0 +1,93 @@
|
||||
#!/usr/bin/env bash
|
||||
# gstack-repo-mode — detect solo vs collaborative repo mode
|
||||
# Usage: source <(gstack-repo-mode) → sets REPO_MODE variable
|
||||
# Or: gstack-repo-mode → prints REPO_MODE=... line
|
||||
#
|
||||
# Detection heuristic (90-day window):
|
||||
# Solo: top author >= 80% of commits
|
||||
# Collaborative: top author < 80%
|
||||
#
|
||||
# Override: gstack-config set repo_mode solo|collaborative
|
||||
# Cache: ~/.gstack/projects/$SLUG/repo-mode.json (7-day TTL)
|
||||
set -euo pipefail
|
||||
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
# Compute SLUG directly (avoid eval of gstack-slug — branch names can contain shell metacharacters)
|
||||
REMOTE_URL=$(git remote get-url origin 2>/dev/null || true)
|
||||
if [ -z "$REMOTE_URL" ]; then
|
||||
echo "REPO_MODE=unknown"
|
||||
exit 0
|
||||
fi
|
||||
SLUG=$(echo "$REMOTE_URL" | sed 's|.*[:/]\([^/]*/[^/]*\)\.git$|\1|;s|.*[:/]\([^/]*/[^/]*\)$|\1|' | tr '/' '-')
|
||||
[ -z "${SLUG:-}" ] && { echo "REPO_MODE=unknown"; exit 0; }
|
||||
|
||||
# Validate: only allow known values (prevent shell injection via source <(...))
|
||||
validate_mode() {
|
||||
case "$1" in solo|collaborative|unknown) echo "$1" ;; *) echo "unknown" ;; esac
|
||||
}
|
||||
|
||||
# Config override takes precedence
|
||||
OVERRIDE=$("$SCRIPT_DIR/gstack-config" get repo_mode 2>/dev/null || true)
|
||||
if [ -n "$OVERRIDE" ] && [ "$OVERRIDE" != "null" ]; then
|
||||
echo "REPO_MODE=$(validate_mode "$OVERRIDE")"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Check cache (7-day TTL)
|
||||
CACHE_DIR="$HOME/.gstack/projects/$SLUG"
|
||||
CACHE_FILE="$CACHE_DIR/repo-mode.json"
|
||||
if [ -f "$CACHE_FILE" ]; then
|
||||
CACHE_AGE=$(( $(date +%s) - $(stat -f %m "$CACHE_FILE" 2>/dev/null || stat -c %Y "$CACHE_FILE" 2>/dev/null || echo 0) ))
|
||||
if [ "$CACHE_AGE" -lt 604800 ]; then # 7 days in seconds
|
||||
MODE=$(grep -o '"mode":"[^"]*"' "$CACHE_FILE" | head -1 | cut -d'"' -f4)
|
||||
[ -n "$MODE" ] && echo "REPO_MODE=$(validate_mode "$MODE")" && exit 0
|
||||
fi
|
||||
fi
|
||||
|
||||
# Compute from git history (90-day window)
|
||||
# Use default branch (not HEAD) to avoid feature-branch sampling bias
|
||||
DEFAULT_BRANCH=$(git symbolic-ref refs/remotes/origin/HEAD 2>/dev/null | sed 's|refs/remotes/||' || true)
|
||||
# Fallback: try origin/main, then origin/master, then HEAD
|
||||
if [ -z "$DEFAULT_BRANCH" ]; then
|
||||
if git rev-parse --verify origin/main &>/dev/null; then
|
||||
DEFAULT_BRANCH="origin/main"
|
||||
elif git rev-parse --verify origin/master &>/dev/null; then
|
||||
DEFAULT_BRANCH="origin/master"
|
||||
else
|
||||
DEFAULT_BRANCH="HEAD"
|
||||
fi
|
||||
fi
|
||||
SHORTLOG=$(git shortlog -sn --since="90 days ago" --no-merges "$DEFAULT_BRANCH" 2>/dev/null)
|
||||
if [ -z "$SHORTLOG" ]; then
|
||||
echo "REPO_MODE=unknown"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Compute TOTAL from ALL authors (not truncated) to avoid solo bias
|
||||
TOTAL=$(echo "$SHORTLOG" | awk '{s+=$1} END {print s}')
|
||||
TOP=$(echo "$SHORTLOG" | head -1 | awk '{print $1}')
|
||||
AUTHORS=$(echo "$SHORTLOG" | wc -l | tr -d ' ')
|
||||
|
||||
# Minimum sample: need at least 5 commits to classify
|
||||
if [ "$TOTAL" -lt 5 ]; then
|
||||
echo "REPO_MODE=unknown"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
TOP_PCT=$(( TOP * 100 / TOTAL ))
|
||||
|
||||
# Solo: top author >= 80% of commits (occasional outside PRs don't change mode)
|
||||
if [ "$TOP_PCT" -ge 80 ]; then
|
||||
MODE=solo
|
||||
else
|
||||
MODE=collaborative
|
||||
fi
|
||||
|
||||
# Cache result atomically (fail silently if ~/.gstack is unwritable)
|
||||
mkdir -p "$CACHE_DIR" 2>/dev/null || true
|
||||
CACHE_TMP=$(mktemp "$CACHE_DIR/.repo-mode-XXXXXX" 2>/dev/null || true)
|
||||
if [ -n "$CACHE_TMP" ]; then
|
||||
echo "{\"mode\":\"$MODE\",\"top_pct\":$TOP_PCT,\"authors\":$AUTHORS,\"total\":$TOTAL,\"computed\":\"$(date -u +%Y-%m-%dT%H:%M:%SZ)\"}" > "$CACHE_TMP" 2>/dev/null && mv "$CACHE_TMP" "$CACHE_FILE" 2>/dev/null || rm -f "$CACHE_TMP" 2>/dev/null
|
||||
fi
|
||||
|
||||
echo "REPO_MODE=$MODE"
|
||||
+11
-2
@@ -3,7 +3,16 @@
|
||||
# Usage: gstack-review-log '{"skill":"...","timestamp":"...","status":"..."}'
|
||||
set -euo pipefail
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
eval $("$SCRIPT_DIR/gstack-slug" 2>/dev/null)
|
||||
eval "$("$SCRIPT_DIR/gstack-slug" 2>/dev/null)"
|
||||
GSTACK_HOME="${GSTACK_HOME:-$HOME/.gstack}"
|
||||
mkdir -p "$GSTACK_HOME/projects/$SLUG"
|
||||
echo "$1" >> "$GSTACK_HOME/projects/$SLUG/$BRANCH-reviews.jsonl"
|
||||
|
||||
# Validate: input must be parseable JSON (reject malformed or injection attempts)
|
||||
INPUT="$1"
|
||||
if ! printf '%s' "$INPUT" | bun -e "JSON.parse(await Bun.stdin.text())" 2>/dev/null; then
|
||||
# Not valid JSON — refuse to append
|
||||
echo "gstack-review-log: invalid JSON, skipping" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "$INPUT" >> "$GSTACK_HOME/projects/$SLUG/$BRANCH-reviews.jsonl"
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
# Usage: gstack-review-read
|
||||
set -euo pipefail
|
||||
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
|
||||
eval $("$SCRIPT_DIR/gstack-slug" 2>/dev/null)
|
||||
eval "$("$SCRIPT_DIR/gstack-slug" 2>/dev/null)"
|
||||
GSTACK_HOME="${GSTACK_HOME:-$HOME/.gstack}"
|
||||
cat "$GSTACK_HOME/projects/$SLUG/$BRANCH-reviews.jsonl" 2>/dev/null || echo "NO_REVIEWS"
|
||||
echo "---CONFIG---"
|
||||
|
||||
+13
-7
@@ -1,12 +1,18 @@
|
||||
#!/usr/bin/env bash
|
||||
# gstack-slug — output project slug and sanitized branch name
|
||||
# Usage: source <(gstack-slug) → sets SLUG and BRANCH variables
|
||||
# Or: gstack-slug → prints SLUG=... and BRANCH=... lines
|
||||
# Usage: eval "$(gstack-slug)" → sets SLUG and BRANCH variables
|
||||
# Or: gstack-slug → prints SLUG=... and BRANCH=... lines
|
||||
#
|
||||
# Security: output is sanitized to [a-zA-Z0-9._-] only, preventing
|
||||
# shell injection when consumed via source or eval.
|
||||
set -euo pipefail
|
||||
SLUG=$(git remote get-url origin 2>/dev/null | sed 's|.*[:/]\([^/]*/[^/]*\)\.git$|\1|;s|.*[:/]\([^/]*/[^/]*\)$|\1|' | tr '/' '-' | tr '[:upper:]' '[:lower:]')
|
||||
BRANCH=$(git rev-parse --abbrev-ref HEAD 2>/dev/null | tr '/' '-')
|
||||
STATE_DIR="${GSTACK_STATE_DIR:-$HOME/.gstack}"
|
||||
PROJECTS_DIR="${STATE_DIR}/projects"
|
||||
RAW_SLUG=$(git remote get-url origin 2>/dev/null | sed 's|.*[:/]\([^/]*/[^/]*\)\.git$|\1|;s|.*[:/]\([^/]*/[^/]*\)$|\1|' | tr '/' '-') || true
|
||||
RAW_BRANCH=$(git rev-parse --abbrev-ref HEAD 2>/dev/null | tr '/' '-') || true
|
||||
# Strip any characters that aren't alphanumeric, dot, hyphen, or underscore
|
||||
SLUG=$(printf '%s' "${RAW_SLUG:-}" | tr -cd 'a-zA-Z0-9._-')
|
||||
BRANCH=$(printf '%s' "${RAW_BRANCH:-}" | tr -cd 'a-zA-Z0-9._-')
|
||||
# Fallback when git context is absent
|
||||
SLUG="${SLUG:-$(basename "$PWD" | tr -cd 'a-zA-Z0-9._-')}"
|
||||
BRANCH="${BRANCH:-unknown}"
|
||||
echo "SLUG=$SLUG"
|
||||
echo "BRANCH=$BRANCH"
|
||||
echo "PROJECTS_DIR=$PROJECTS_DIR"
|
||||
|
||||
+66
-21
@@ -32,21 +32,30 @@ OUTCOME="unknown"
|
||||
USED_BROWSE="false"
|
||||
SESSION_ID=""
|
||||
ERROR_CLASS=""
|
||||
ERROR_MESSAGE=""
|
||||
FAILED_STEP=""
|
||||
EVENT_TYPE="skill_run"
|
||||
SOURCE=""
|
||||
|
||||
while [ $# -gt 0 ]; do
|
||||
case "$1" in
|
||||
--skill) SKILL="$2"; shift 2 ;;
|
||||
--duration) DURATION="$2"; shift 2 ;;
|
||||
--outcome) OUTCOME="$2"; shift 2 ;;
|
||||
--used-browse) USED_BROWSE="$2"; shift 2 ;;
|
||||
--session-id) SESSION_ID="$2"; shift 2 ;;
|
||||
--error-class) ERROR_CLASS="$2"; shift 2 ;;
|
||||
--event-type) EVENT_TYPE="$2"; shift 2 ;;
|
||||
--skill) SKILL="$2"; shift 2 ;;
|
||||
--duration) DURATION="$2"; shift 2 ;;
|
||||
--outcome) OUTCOME="$2"; shift 2 ;;
|
||||
--used-browse) USED_BROWSE="$2"; shift 2 ;;
|
||||
--session-id) SESSION_ID="$2"; shift 2 ;;
|
||||
--error-class) ERROR_CLASS="$2"; shift 2 ;;
|
||||
--error-message) ERROR_MESSAGE="$2"; shift 2 ;;
|
||||
--failed-step) FAILED_STEP="$2"; shift 2 ;;
|
||||
--event-type) EVENT_TYPE="$2"; shift 2 ;;
|
||||
--source) SOURCE="$2"; shift 2 ;;
|
||||
*) shift ;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Source: flag > env > default 'live'
|
||||
SOURCE="${SOURCE:-${GSTACK_TELEMETRY_SOURCE:-live}}"
|
||||
|
||||
# ─── Read telemetry tier ─────────────────────────────────────
|
||||
TIER="$("$CONFIG_CMD" get telemetry 2>/dev/null || true)"
|
||||
TIER="${TIER:-off}"
|
||||
@@ -106,18 +115,29 @@ if [ -d "$STATE_DIR/sessions" ]; then
|
||||
fi
|
||||
|
||||
# Generate installation_id for community tier
|
||||
# Uses a random UUID stored locally — not derived from hostname/user so it
|
||||
# can't be guessed or correlated by someone who knows your machine identity.
|
||||
INSTALL_ID=""
|
||||
if [ "$TIER" = "community" ]; then
|
||||
HOST="$(hostname 2>/dev/null || echo "unknown")"
|
||||
USER="$(whoami 2>/dev/null || echo "unknown")"
|
||||
if command -v shasum >/dev/null 2>&1; then
|
||||
INSTALL_ID="$(printf '%s-%s' "$HOST" "$USER" | shasum -a 256 | awk '{print $1}')"
|
||||
elif command -v sha256sum >/dev/null 2>&1; then
|
||||
INSTALL_ID="$(printf '%s-%s' "$HOST" "$USER" | sha256sum | awk '{print $1}')"
|
||||
elif command -v openssl >/dev/null 2>&1; then
|
||||
INSTALL_ID="$(printf '%s-%s' "$HOST" "$USER" | openssl dgst -sha256 | awk '{print $NF}')"
|
||||
ID_FILE="$HOME/.gstack/installation-id"
|
||||
if [ -f "$ID_FILE" ]; then
|
||||
INSTALL_ID="$(cat "$ID_FILE" 2>/dev/null)"
|
||||
fi
|
||||
if [ -z "$INSTALL_ID" ]; then
|
||||
# Generate a random UUID v4
|
||||
if command -v uuidgen >/dev/null 2>&1; then
|
||||
INSTALL_ID="$(uuidgen | tr '[:upper:]' '[:lower:]')"
|
||||
elif [ -r /proc/sys/kernel/random/uuid ]; then
|
||||
INSTALL_ID="$(cat /proc/sys/kernel/random/uuid)"
|
||||
else
|
||||
# Fallback: random hex from /dev/urandom
|
||||
INSTALL_ID="$(od -An -tx1 -N16 /dev/urandom 2>/dev/null | tr -d ' \n')"
|
||||
fi
|
||||
if [ -n "$INSTALL_ID" ]; then
|
||||
mkdir -p "$(dirname "$ID_FILE")" 2>/dev/null
|
||||
printf '%s' "$INSTALL_ID" > "$ID_FILE" 2>/dev/null
|
||||
fi
|
||||
fi
|
||||
# If no SHA-256 command available, install_id stays empty
|
||||
fi
|
||||
|
||||
# Local-only fields (never sent remotely)
|
||||
@@ -131,9 +151,33 @@ fi
|
||||
# ─── Construct and append JSON ───────────────────────────────
|
||||
mkdir -p "$ANALYTICS_DIR"
|
||||
|
||||
# Escape null fields
|
||||
# Sanitize string fields for JSON safety (strip quotes, backslashes, control chars)
|
||||
json_safe() { printf '%s' "$1" | tr -d '"\\\n\r\t' | head -c 200; }
|
||||
SKILL="$(json_safe "$SKILL")"
|
||||
OUTCOME="$(json_safe "$OUTCOME")"
|
||||
SESSION_ID="$(json_safe "$SESSION_ID")"
|
||||
SOURCE="$(json_safe "$SOURCE")"
|
||||
EVENT_TYPE="$(json_safe "$EVENT_TYPE")"
|
||||
REPO_SLUG="$(json_safe "$REPO_SLUG")"
|
||||
BRANCH="$(json_safe "$BRANCH")"
|
||||
|
||||
# Escape null fields — sanitize ERROR_CLASS and FAILED_STEP via json_safe()
|
||||
ERR_FIELD="null"
|
||||
[ -n "$ERROR_CLASS" ] && ERR_FIELD="\"$ERROR_CLASS\""
|
||||
[ -n "$ERROR_CLASS" ] && ERR_FIELD="\"$(json_safe "$ERROR_CLASS")\""
|
||||
|
||||
ERR_MSG_FIELD="null"
|
||||
[ -n "$ERROR_MESSAGE" ] && ERR_MSG_FIELD="\"$(printf '%s' "$ERROR_MESSAGE" | head -c 200 | sed -e 's/\\/\\\\/g' -e 's/"/\\"/g' -e 's/ /\\t/g' | tr '\n\r' ' ')\""
|
||||
|
||||
STEP_FIELD="null"
|
||||
[ -n "$FAILED_STEP" ] && STEP_FIELD="\"$(json_safe "$FAILED_STEP")\""
|
||||
|
||||
# Cap unreasonable durations
|
||||
if [ -n "$DURATION" ] && [ "$DURATION" -gt 86400 ] 2>/dev/null; then
|
||||
DURATION="" # null if > 24h
|
||||
fi
|
||||
if [ -n "$DURATION" ] && [ "$DURATION" -lt 0 ] 2>/dev/null; then
|
||||
DURATION="" # null if negative
|
||||
fi
|
||||
|
||||
DUR_FIELD="null"
|
||||
[ -n "$DURATION" ] && DUR_FIELD="$DURATION"
|
||||
@@ -144,10 +188,11 @@ INSTALL_FIELD="null"
|
||||
BROWSE_BOOL="false"
|
||||
[ "$USED_BROWSE" = "true" ] && BROWSE_BOOL="true"
|
||||
|
||||
printf '{"v":1,"ts":"%s","event_type":"%s","skill":"%s","session_id":"%s","gstack_version":"%s","os":"%s","arch":"%s","duration_s":%s,"outcome":"%s","error_class":%s,"used_browse":%s,"sessions":%s,"installation_id":%s,"_repo_slug":"%s","_branch":"%s"}\n' \
|
||||
printf '{"v":1,"ts":"%s","event_type":"%s","skill":"%s","session_id":"%s","gstack_version":"%s","os":"%s","arch":"%s","duration_s":%s,"outcome":"%s","error_class":%s,"error_message":%s,"failed_step":%s,"used_browse":%s,"sessions":%s,"installation_id":%s,"source":"%s","_repo_slug":"%s","_branch":"%s"}\n' \
|
||||
"$TS" "$EVENT_TYPE" "$SKILL" "$SESSION_ID" "$GSTACK_VERSION" "$OS" "$ARCH" \
|
||||
"$DUR_FIELD" "$OUTCOME" "$ERR_FIELD" "$BROWSE_BOOL" "${SESSIONS:-1}" \
|
||||
"$INSTALL_FIELD" "$REPO_SLUG" "$BRANCH" >> "$JSONL_FILE" 2>/dev/null || true
|
||||
"$DUR_FIELD" "$OUTCOME" "$ERR_FIELD" "$ERR_MSG_FIELD" "$STEP_FIELD" \
|
||||
"$BROWSE_BOOL" "${SESSIONS:-1}" \
|
||||
"$INSTALL_FIELD" "$SOURCE" "$REPO_SLUG" "$BRANCH" >> "$JSONL_FILE" 2>/dev/null || true
|
||||
|
||||
# ─── Trigger sync if tier is not off ─────────────────────────
|
||||
SYNC_CMD="$GSTACK_DIR/bin/gstack-telemetry-sync"
|
||||
|
||||
+26
-16
@@ -3,11 +3,12 @@
|
||||
#
|
||||
# Fire-and-forget, backgrounded, rate-limited to once per 5 minutes.
|
||||
# Strips local-only fields before sending. Respects privacy tiers.
|
||||
# Posts to the telemetry-ingest edge function (not PostgREST directly).
|
||||
#
|
||||
# Env overrides (for testing):
|
||||
# GSTACK_STATE_DIR — override ~/.gstack state directory
|
||||
# GSTACK_DIR — override auto-detected gstack root
|
||||
# GSTACK_TELEMETRY_ENDPOINT — override Supabase endpoint URL
|
||||
# GSTACK_SUPABASE_URL — override Supabase project URL
|
||||
set -uo pipefail
|
||||
|
||||
GSTACK_DIR="${GSTACK_DIR:-$(cd "$(dirname "$0")/.." && pwd)}"
|
||||
@@ -19,15 +20,15 @@ RATE_FILE="$ANALYTICS_DIR/.last-sync-time"
|
||||
CONFIG_CMD="$GSTACK_DIR/bin/gstack-config"
|
||||
|
||||
# Source Supabase config if not overridden by env
|
||||
if [ -z "${GSTACK_TELEMETRY_ENDPOINT:-}" ] && [ -f "$GSTACK_DIR/supabase/config.sh" ]; then
|
||||
if [ -z "${GSTACK_SUPABASE_URL:-}" ] && [ -f "$GSTACK_DIR/supabase/config.sh" ]; then
|
||||
. "$GSTACK_DIR/supabase/config.sh"
|
||||
fi
|
||||
ENDPOINT="${GSTACK_TELEMETRY_ENDPOINT:-}"
|
||||
SUPABASE_URL="${GSTACK_SUPABASE_URL:-}"
|
||||
ANON_KEY="${GSTACK_SUPABASE_ANON_KEY:-}"
|
||||
|
||||
# ─── Pre-checks ──────────────────────────────────────────────
|
||||
# No endpoint configured yet → exit silently
|
||||
[ -z "$ENDPOINT" ] && exit 0
|
||||
# No Supabase URL configured yet → exit silently
|
||||
[ -z "$SUPABASE_URL" ] && exit 0
|
||||
|
||||
# No JSONL file → nothing to sync
|
||||
[ -f "$JSONL_FILE" ] || exit 0
|
||||
@@ -66,6 +67,8 @@ UNSENT="$(tail -n "+$SKIP" "$JSONL_FILE" 2>/dev/null || true)"
|
||||
[ -z "$UNSENT" ] && exit 0
|
||||
|
||||
# ─── Strip local-only fields and build batch ─────────────────
|
||||
# Edge function expects raw JSONL field names (v, ts, sessions) —
|
||||
# no column renaming needed (the function maps them internally).
|
||||
BATCH="["
|
||||
FIRST=true
|
||||
COUNT=0
|
||||
@@ -75,13 +78,10 @@ while IFS= read -r LINE; do
|
||||
[ -z "$LINE" ] && continue
|
||||
echo "$LINE" | grep -q '^{' || continue
|
||||
|
||||
# Strip local-only fields + map JSONL field names to Postgres column names
|
||||
# Strip local-only fields (keep v, ts, sessions as-is for edge function)
|
||||
CLEAN="$(echo "$LINE" | sed \
|
||||
-e 's/,"_repo_slug":"[^"]*"//g' \
|
||||
-e 's/,"_branch":"[^"]*"//g' \
|
||||
-e 's/"v":/"schema_version":/g' \
|
||||
-e 's/"ts":/"event_timestamp":/g' \
|
||||
-e 's/"sessions":/"concurrent_sessions":/g' \
|
||||
-e 's/,"repo":"[^"]*"//g')"
|
||||
|
||||
# If anonymous tier, strip installation_id
|
||||
@@ -106,21 +106,31 @@ BATCH="$BATCH]"
|
||||
# Nothing to send after filtering
|
||||
[ "$COUNT" -eq 0 ] && exit 0
|
||||
|
||||
# ─── POST to Supabase ────────────────────────────────────────
|
||||
HTTP_CODE="$(curl -s -o /dev/null -w '%{http_code}' --max-time 10 \
|
||||
-X POST "${ENDPOINT}/telemetry_events" \
|
||||
# ─── POST to edge function ───────────────────────────────────
|
||||
RESP_FILE="$(mktemp /tmp/gstack-sync-XXXXXX 2>/dev/null || echo "/tmp/gstack-sync-$$")"
|
||||
HTTP_CODE="$(curl -s -w '%{http_code}' --max-time 10 \
|
||||
-X POST "${SUPABASE_URL}/functions/v1/telemetry-ingest" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "apikey: ${ANON_KEY}" \
|
||||
-H "Authorization: Bearer ${ANON_KEY}" \
|
||||
-H "Prefer: return=minimal" \
|
||||
-o "$RESP_FILE" \
|
||||
-d "$BATCH" 2>/dev/null || echo "000")"
|
||||
|
||||
# ─── Update cursor on success (2xx) ─────────────────────────
|
||||
case "$HTTP_CODE" in
|
||||
2*) NEW_CURSOR=$(( CURSOR + COUNT ))
|
||||
echo "$NEW_CURSOR" > "$CURSOR_FILE" 2>/dev/null || true ;;
|
||||
2*)
|
||||
# Parse inserted count from response — only advance if events were actually inserted.
|
||||
# Advance by SENT count (not inserted count) because we can't map inserted back to
|
||||
# source lines. If inserted==0, something is systemically wrong — don't advance.
|
||||
INSERTED="$(grep -o '"inserted":[0-9]*' "$RESP_FILE" 2>/dev/null | grep -o '[0-9]*' || echo "0")"
|
||||
if [ "${INSERTED:-0}" -gt 0 ] 2>/dev/null; then
|
||||
NEW_CURSOR=$(( CURSOR + COUNT ))
|
||||
echo "$NEW_CURSOR" > "$CURSOR_FILE" 2>/dev/null || true
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
|
||||
rm -f "$RESP_FILE" 2>/dev/null || true
|
||||
|
||||
# Update rate limit marker
|
||||
touch "$RATE_FILE" 2>/dev/null || true
|
||||
|
||||
|
||||
Executable
+252
@@ -0,0 +1,252 @@
|
||||
#!/usr/bin/env bash
|
||||
# gstack-uninstall — remove gstack skills, state, and browse daemons
|
||||
#
|
||||
# Usage:
|
||||
# gstack-uninstall — interactive uninstall (prompts before removing)
|
||||
# gstack-uninstall --force — remove everything without prompting
|
||||
# gstack-uninstall --keep-state — remove skills but keep ~/.gstack/ data
|
||||
#
|
||||
# What gets REMOVED:
|
||||
# ~/.claude/skills/gstack — global Claude skill install (git clone or vendored)
|
||||
# ~/.claude/skills/{skill} — per-skill symlinks created by setup
|
||||
# ~/.codex/skills/gstack* — Codex skill install + per-skill symlinks
|
||||
# ~/.factory/skills/gstack* — Factory Droid skill install + per-skill symlinks
|
||||
# ~/.kiro/skills/gstack* — Kiro skill install + per-skill symlinks
|
||||
# ~/.gstack/ — global state (config, analytics, sessions, projects,
|
||||
# repos, installation-id, browse error logs)
|
||||
# .claude/skills/gstack* — project-local skill install (--local installs)
|
||||
# .gstack/ — per-project browse state (in current git repo)
|
||||
# .gstack-worktrees/ — per-project test worktrees (in current git repo)
|
||||
# .agents/skills/gstack* — Codex/Gemini/Cursor sidecar (in current git repo)
|
||||
# Running browse daemons — stopped via SIGTERM before cleanup
|
||||
#
|
||||
# What is NOT REMOVED:
|
||||
# ~/Library/Caches/ms-playwright/ — Playwright Chromium (shared, may be used by other tools)
|
||||
# ~/.gstack-dev/ — developer eval artifacts (only present in gstack contributors)
|
||||
#
|
||||
# Env overrides (for testing):
|
||||
# GSTACK_DIR — override auto-detected gstack root
|
||||
# GSTACK_STATE_DIR — override ~/.gstack state directory
|
||||
#
|
||||
# NOTE: Uses set -uo pipefail (no -e) — uninstall must never abort partway.
|
||||
set -uo pipefail
|
||||
|
||||
if [ -z "${HOME:-}" ]; then
|
||||
echo "ERROR: \$HOME is not set" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
GSTACK_DIR="${GSTACK_DIR:-$(cd "$(dirname "$0")/.." && pwd)}"
|
||||
STATE_DIR="${GSTACK_STATE_DIR:-$HOME/.gstack}"
|
||||
_GIT_ROOT="$(git rev-parse --show-toplevel 2>/dev/null || true)"
|
||||
|
||||
# ─── Parse flags ─────────────────────────────────────────────
|
||||
FORCE=0
|
||||
KEEP_STATE=0
|
||||
while [ $# -gt 0 ]; do
|
||||
case "$1" in
|
||||
--force) FORCE=1; shift ;;
|
||||
--keep-state) KEEP_STATE=1; shift ;;
|
||||
-h|--help)
|
||||
sed -n '2,/^[^#]/{ /^#/s/^# \{0,1\}//p; }' "$0"
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
echo "Unknown option: $1" >&2
|
||||
echo "Usage: gstack-uninstall [--force] [--keep-state]" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# ─── Confirmation ────────────────────────────────────────────
|
||||
if [ "$FORCE" -eq 0 ]; then
|
||||
echo "This will remove gstack from your system:"
|
||||
{ [ -d "$HOME/.claude/skills/gstack" ] || [ -L "$HOME/.claude/skills/gstack" ]; } && echo " ~/.claude/skills/gstack (+ per-skill symlinks)"
|
||||
[ -d "$HOME/.codex/skills" ] && echo " ~/.codex/skills/gstack*"
|
||||
[ -d "$HOME/.factory/skills" ] && echo " ~/.factory/skills/gstack*"
|
||||
[ -d "$HOME/.kiro/skills" ] && echo " ~/.kiro/skills/gstack*"
|
||||
[ "$KEEP_STATE" -eq 0 ] && [ -d "$STATE_DIR" ] && echo " $STATE_DIR"
|
||||
|
||||
if [ -n "$_GIT_ROOT" ]; then
|
||||
[ -d "$_GIT_ROOT/.claude/skills/gstack" ] && echo " $_GIT_ROOT/.claude/skills/gstack (project-local)"
|
||||
[ -d "$_GIT_ROOT/.gstack" ] && echo " $_GIT_ROOT/.gstack/ (browse state + reports)"
|
||||
[ -d "$_GIT_ROOT/.gstack-worktrees" ] && echo " $_GIT_ROOT/.gstack-worktrees/"
|
||||
[ -d "$_GIT_ROOT/.agents/skills" ] && echo " $_GIT_ROOT/.agents/skills/gstack*"
|
||||
fi
|
||||
|
||||
# Preview running daemons
|
||||
if [ -n "$_GIT_ROOT" ] && [ -f "$_GIT_ROOT/.gstack/browse.json" ]; then
|
||||
_PREVIEW_PID="$(awk -F'[:,]' '/"pid"/ { for(i=1;i<=NF;i++) if($i ~ /"pid"/) { gsub(/[^0-9]/, "", $(i+1)); print $(i+1); exit } }' "$_GIT_ROOT/.gstack/browse.json" 2>/dev/null || true)"
|
||||
[ -n "$_PREVIEW_PID" ] && kill -0 "$_PREVIEW_PID" 2>/dev/null && echo " browse daemon (PID $_PREVIEW_PID) will be stopped"
|
||||
fi
|
||||
|
||||
printf "\nContinue? [y/N] "
|
||||
read -r REPLY
|
||||
case "$REPLY" in
|
||||
y|Y|yes|YES) ;;
|
||||
*) echo "Aborted."; exit 0 ;;
|
||||
esac
|
||||
fi
|
||||
|
||||
REMOVED=()
|
||||
|
||||
# ─── Stop running browse daemons ─────────────────────────────
|
||||
# Browse servers write PID to {project}/.gstack/browse.json.
|
||||
# Stop any we can find before removing state directories.
|
||||
stop_browse_daemon() {
|
||||
local state_file="$1"
|
||||
if [ ! -f "$state_file" ]; then
|
||||
return
|
||||
fi
|
||||
local pid
|
||||
pid="$(awk -F'[:,]' '/"pid"/ { for(i=1;i<=NF;i++) if($i ~ /"pid"/) { gsub(/[^0-9]/, "", $(i+1)); print $(i+1); exit } }' "$state_file" 2>/dev/null || true)"
|
||||
if [ -n "$pid" ] && kill -0 "$pid" 2>/dev/null; then
|
||||
kill "$pid" 2>/dev/null || true
|
||||
# Wait up to 2s for graceful shutdown
|
||||
local waited=0
|
||||
while [ "$waited" -lt 4 ] && kill -0 "$pid" 2>/dev/null; do
|
||||
sleep 0.5
|
||||
waited=$(( waited + 1 ))
|
||||
done
|
||||
if kill -0 "$pid" 2>/dev/null; then
|
||||
kill -9 "$pid" 2>/dev/null || true
|
||||
fi
|
||||
REMOVED+=("browse daemon (PID $pid)")
|
||||
fi
|
||||
}
|
||||
|
||||
# Stop daemon in current project
|
||||
if [ -n "$_GIT_ROOT" ] && [ -f "$_GIT_ROOT/.gstack/browse.json" ]; then
|
||||
stop_browse_daemon "$_GIT_ROOT/.gstack/browse.json"
|
||||
fi
|
||||
|
||||
# Stop daemons tracked in global projects directory
|
||||
if [ -d "$STATE_DIR/projects" ]; then
|
||||
while IFS= read -r _BJ; do
|
||||
stop_browse_daemon "$_BJ"
|
||||
done < <(find "$STATE_DIR/projects" -name browse.json -path '*/.gstack/*' 2>/dev/null || true)
|
||||
fi
|
||||
|
||||
# ─── Remove global Claude skills ────────────────────────────
|
||||
CLAUDE_SKILLS="$HOME/.claude/skills"
|
||||
if [ -d "$CLAUDE_SKILLS/gstack" ] || [ -L "$CLAUDE_SKILLS/gstack" ]; then
|
||||
# Remove per-skill symlinks that point into gstack/
|
||||
for _LINK in "$CLAUDE_SKILLS"/*; do
|
||||
[ -L "$_LINK" ] || continue
|
||||
_NAME="$(basename "$_LINK")"
|
||||
[ "$_NAME" = "gstack" ] && continue
|
||||
_TARGET="$(readlink "$_LINK" 2>/dev/null || true)"
|
||||
case "$_TARGET" in
|
||||
gstack/*|*/gstack/*) rm -f "$_LINK"; REMOVED+=("claude/$_NAME") ;;
|
||||
esac
|
||||
done
|
||||
|
||||
rm -rf "$CLAUDE_SKILLS/gstack"
|
||||
REMOVED+=("~/.claude/skills/gstack")
|
||||
fi
|
||||
|
||||
# ─── Remove project-local Claude skills (--local installs) ──
|
||||
if [ -n "$_GIT_ROOT" ] && [ -d "$_GIT_ROOT/.claude/skills" ]; then
|
||||
for _LINK in "$_GIT_ROOT/.claude/skills"/*; do
|
||||
[ -L "$_LINK" ] || continue
|
||||
_TARGET="$(readlink "$_LINK" 2>/dev/null || true)"
|
||||
case "$_TARGET" in
|
||||
gstack/*|*/gstack/*) rm -f "$_LINK"; REMOVED+=("local claude/$(basename "$_LINK")") ;;
|
||||
esac
|
||||
done
|
||||
if [ -d "$_GIT_ROOT/.claude/skills/gstack" ] || [ -L "$_GIT_ROOT/.claude/skills/gstack" ]; then
|
||||
rm -rf "$_GIT_ROOT/.claude/skills/gstack"
|
||||
REMOVED+=("$_GIT_ROOT/.claude/skills/gstack")
|
||||
fi
|
||||
fi
|
||||
|
||||
# ─── Remove Codex skills ────────────────────────────────────
|
||||
CODEX_SKILLS="$HOME/.codex/skills"
|
||||
if [ -d "$CODEX_SKILLS" ]; then
|
||||
for _ITEM in "$CODEX_SKILLS"/gstack*; do
|
||||
[ -e "$_ITEM" ] || [ -L "$_ITEM" ] || continue
|
||||
rm -rf "$_ITEM"
|
||||
REMOVED+=("codex/$(basename "$_ITEM")")
|
||||
done
|
||||
fi
|
||||
|
||||
# ─── Remove Factory Droid skills ────────────────────────────
|
||||
FACTORY_SKILLS="$HOME/.factory/skills"
|
||||
if [ -d "$FACTORY_SKILLS" ]; then
|
||||
for _ITEM in "$FACTORY_SKILLS"/gstack*; do
|
||||
[ -e "$_ITEM" ] || [ -L "$_ITEM" ] || continue
|
||||
rm -rf "$_ITEM"
|
||||
REMOVED+=("factory/$(basename "$_ITEM")")
|
||||
done
|
||||
fi
|
||||
|
||||
# ─── Remove Kiro skills ─────────────────────────────────────
|
||||
KIRO_SKILLS="$HOME/.kiro/skills"
|
||||
if [ -d "$KIRO_SKILLS" ]; then
|
||||
for _ITEM in "$KIRO_SKILLS"/gstack*; do
|
||||
[ -e "$_ITEM" ] || [ -L "$_ITEM" ] || continue
|
||||
rm -rf "$_ITEM"
|
||||
REMOVED+=("kiro/$(basename "$_ITEM")")
|
||||
done
|
||||
fi
|
||||
|
||||
# ─── Remove per-project .agents/ sidecar ─────────────────────
|
||||
if [ -n "$_GIT_ROOT" ] && [ -d "$_GIT_ROOT/.agents/skills" ]; then
|
||||
for _ITEM in "$_GIT_ROOT/.agents/skills"/gstack*; do
|
||||
[ -e "$_ITEM" ] || [ -L "$_ITEM" ] || continue
|
||||
rm -rf "$_ITEM"
|
||||
REMOVED+=("agents/$(basename "$_ITEM")")
|
||||
done
|
||||
|
||||
rmdir "$_GIT_ROOT/.agents/skills" 2>/dev/null || true
|
||||
rmdir "$_GIT_ROOT/.agents" 2>/dev/null || true
|
||||
fi
|
||||
|
||||
# ─── Remove per-project .factory/ sidecar ────────────────────
|
||||
if [ -n "$_GIT_ROOT" ] && [ -d "$_GIT_ROOT/.factory/skills" ]; then
|
||||
for _ITEM in "$_GIT_ROOT/.factory/skills"/gstack*; do
|
||||
[ -e "$_ITEM" ] || [ -L "$_ITEM" ] || continue
|
||||
rm -rf "$_ITEM"
|
||||
REMOVED+=("factory/$(basename "$_ITEM")")
|
||||
done
|
||||
|
||||
rmdir "$_GIT_ROOT/.factory/skills" 2>/dev/null || true
|
||||
rmdir "$_GIT_ROOT/.factory" 2>/dev/null || true
|
||||
fi
|
||||
|
||||
# ─── Remove per-project state ───────────────────────────────
|
||||
if [ -n "$_GIT_ROOT" ]; then
|
||||
if [ -d "$_GIT_ROOT/.gstack" ]; then
|
||||
rm -rf "$_GIT_ROOT/.gstack"
|
||||
REMOVED+=("$_GIT_ROOT/.gstack/")
|
||||
fi
|
||||
if [ -d "$_GIT_ROOT/.gstack-worktrees" ]; then
|
||||
rm -rf "$_GIT_ROOT/.gstack-worktrees"
|
||||
REMOVED+=("$_GIT_ROOT/.gstack-worktrees/")
|
||||
fi
|
||||
fi
|
||||
|
||||
# ─── Remove global state ────────────────────────────────────
|
||||
if [ "$KEEP_STATE" -eq 0 ] && [ -d "$STATE_DIR" ]; then
|
||||
rm -rf "$STATE_DIR"
|
||||
REMOVED+=("$STATE_DIR")
|
||||
fi
|
||||
|
||||
# ─── Clean up temp files ────────────────────────────────────
|
||||
for _TMP in /tmp/gstack-latest-version /tmp/gstack-sketch-*.html /tmp/gstack-sketch.png /tmp/gstack-sync-*; do
|
||||
if [ -e "$_TMP" ]; then
|
||||
rm -f "$_TMP"
|
||||
REMOVED+=("$(basename "$_TMP")")
|
||||
fi
|
||||
done
|
||||
|
||||
# ─── Summary ────────────────────────────────────────────────
|
||||
if [ ${#REMOVED[@]} -gt 0 ]; then
|
||||
echo "Removed: ${REMOVED[*]}"
|
||||
echo "gstack uninstalled."
|
||||
else
|
||||
echo "Nothing to remove — gstack is not installed."
|
||||
fi
|
||||
|
||||
exit 0
|
||||
+29
-14
@@ -20,9 +20,10 @@ SNOOZE_FILE="$STATE_DIR/update-snoozed"
|
||||
VERSION_FILE="$GSTACK_DIR/VERSION"
|
||||
REMOTE_URL="${GSTACK_REMOTE_URL:-https://raw.githubusercontent.com/garrytan/gstack/main/VERSION}"
|
||||
|
||||
# ─── Force flag (busts cache for standalone /gstack-upgrade) ──
|
||||
# ─── Force flag (busts cache + snooze for standalone /gstack-upgrade) ──
|
||||
if [ "${1:-}" = "--force" ]; then
|
||||
rm -f "$CACHE_FILE"
|
||||
rm -f "$SNOOZE_FILE"
|
||||
fi
|
||||
|
||||
# ─── Step 0: Check if updates are disabled ────────────────────
|
||||
@@ -31,6 +32,24 @@ if [ "$_UC" = "false" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# ─── Migration: fix stale Codex descriptions (one-time) ───────
|
||||
# Existing installs may have .agents/skills/gstack/SKILL.md with oversized
|
||||
# descriptions (>1024 chars) that Codex rejects. We can't regenerate from
|
||||
# the runtime root (no bun/scripts), so delete oversized files — the next
|
||||
# ./setup or /gstack-upgrade will regenerate them properly.
|
||||
# Marker file ensures this runs at most once per install.
|
||||
if [ ! -f "$STATE_DIR/.codex-desc-healed" ]; then
|
||||
for _AGENTS_SKILL in "$GSTACK_DIR"/.agents/skills/*/SKILL.md; do
|
||||
[ -f "$_AGENTS_SKILL" ] || continue
|
||||
_DESC=$(awk '/^---$/{n++;next}n==1&&/^description:/{d=1;sub(/^description:\s*/,"");if(length>0)print;next}d&&/^ /{sub(/^ /,"");print;next}d{d=0}' "$_AGENTS_SKILL" | wc -c | tr -d ' ')
|
||||
if [ "${_DESC:-0}" -gt 1024 ]; then
|
||||
rm -f "$_AGENTS_SKILL"
|
||||
fi
|
||||
done
|
||||
mkdir -p "$STATE_DIR"
|
||||
touch "$STATE_DIR/.codex-desc-healed"
|
||||
fi
|
||||
|
||||
# ─── Snooze helper ──────────────────────────────────────────
|
||||
# check_snooze <remote_version>
|
||||
# Returns 0 if snoozed (should stay quiet), 1 if not snoozed (should output).
|
||||
@@ -94,12 +113,11 @@ if [ -f "$MARKER_FILE" ]; then
|
||||
OLD="$(cat "$MARKER_FILE" 2>/dev/null | tr -d '[:space:]')"
|
||||
rm -f "$MARKER_FILE"
|
||||
rm -f "$SNOOZE_FILE"
|
||||
mkdir -p "$STATE_DIR"
|
||||
echo "UP_TO_DATE $LOCAL" > "$CACHE_FILE"
|
||||
if [ -n "$OLD" ]; then
|
||||
echo "JUST_UPGRADED $OLD $LOCAL"
|
||||
fi
|
||||
exit 0
|
||||
# Don't exit — fall through to remote check in case
|
||||
# more updates landed since the upgrade
|
||||
fi
|
||||
|
||||
# ─── Step 3: Check cache freshness ──────────────────────────
|
||||
@@ -141,25 +159,22 @@ fi
|
||||
mkdir -p "$STATE_DIR"
|
||||
|
||||
# Fire Supabase install ping in background (parallel, non-blocking)
|
||||
# This logs an update check event for community health metrics.
|
||||
# If the endpoint isn't configured or Supabase is down, this is a no-op.
|
||||
# Source Supabase config for install ping
|
||||
if [ -z "${GSTACK_TELEMETRY_ENDPOINT:-}" ] && [ -f "$GSTACK_DIR/supabase/config.sh" ]; then
|
||||
# This logs an update check event for community health metrics via edge function.
|
||||
# If Supabase is not configured or telemetry is off, this is a no-op.
|
||||
if [ -z "${GSTACK_SUPABASE_URL:-}" ] && [ -f "$GSTACK_DIR/supabase/config.sh" ]; then
|
||||
. "$GSTACK_DIR/supabase/config.sh"
|
||||
fi
|
||||
_SUPA_ENDPOINT="${GSTACK_TELEMETRY_ENDPOINT:-}"
|
||||
_SUPA_URL="${GSTACK_SUPABASE_URL:-}"
|
||||
_SUPA_KEY="${GSTACK_SUPABASE_ANON_KEY:-}"
|
||||
# Respect telemetry opt-out — don't ping Supabase if user set telemetry: off
|
||||
_TEL_TIER="$("$GSTACK_DIR/bin/gstack-config" get telemetry 2>/dev/null || true)"
|
||||
if [ -n "$_SUPA_ENDPOINT" ] && [ -n "$_SUPA_KEY" ] && [ "${_TEL_TIER:-off}" != "off" ]; then
|
||||
if [ -n "$_SUPA_URL" ] && [ -n "$_SUPA_KEY" ] && [ "${_TEL_TIER:-off}" != "off" ]; then
|
||||
_OS="$(uname -s | tr '[:upper:]' '[:lower:]')"
|
||||
curl -sf --max-time 5 \
|
||||
-X POST "${_SUPA_ENDPOINT}/update_checks" \
|
||||
-X POST "${_SUPA_URL}/functions/v1/update-check" \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "apikey: ${_SUPA_KEY}" \
|
||||
-H "Authorization: Bearer ${_SUPA_KEY}" \
|
||||
-H "Prefer: return=minimal" \
|
||||
-d "{\"gstack_version\":\"$LOCAL\",\"os\":\"$_OS\"}" \
|
||||
-d "{\"version\":\"$LOCAL\",\"os\":\"$_OS\"}" \
|
||||
>/dev/null 2>&1 &
|
||||
fi
|
||||
|
||||
|
||||
Reference in New Issue
Block a user