diff --git a/ARCHITECTURE.md b/ARCHITECTURE.md index bccb13ff..db55ee36 100644 --- a/ARCHITECTURE.md +++ b/ARCHITECTURE.md @@ -202,7 +202,9 @@ Templates contain the workflows, tips, and examples that require human judgment. | `{{BROWSE_SETUP}}` | `gen-skill-docs.ts` | Binary discovery + setup instructions | | `{{BASE_BRANCH_DETECT}}` | `gen-skill-docs.ts` | Dynamic base branch detection for PR-targeting skills (ship, review, qa, plan-ceo-review) | | `{{QA_METHODOLOGY}}` | `gen-skill-docs.ts` | Shared QA methodology block for /qa and /qa-only | -| `{{DESIGN_METHODOLOGY}}` | `gen-skill-docs.ts` | Shared design audit methodology for /plan-design-review and /qa-design-review | +| `{{DESIGN_METHODOLOGY}}` | `gen-skill-docs.ts` | Shared design audit methodology for /plan-design-review and /design-review | +| `{{REVIEW_DASHBOARD}}` | `gen-skill-docs.ts` | Review Readiness Dashboard for /ship pre-flight | +| `{{TEST_BOOTSTRAP}}` | `gen-skill-docs.ts` | Test framework detection, bootstrap, CI/CD setup for /qa, /ship, /design-review | This is structurally sound — if a command exists in code, it appears in docs. If it doesn't exist, it can't appear. diff --git a/CHANGELOG.md b/CHANGELOG.md index d97d8b26..14f263ba 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,148 @@ # Changelog +## [0.6.4.0] - 2026-03-17 + +### Added + +- **`/plan-design-review` is now interactive — rates 0-10, fixes the plan.** Instead of producing a report with letter grades, the designer now works like CEO and Eng review: rates each design dimension 0-10, explains what a 10 looks like, then edits the plan to get there. One AskUserQuestion per design choice. The output is a better plan, not a document about the plan. +- **CEO review now calls in the designer.** When `/plan-ceo-review` detects UI scope in a plan, it activates a Design & UX section (Section 11) covering information architecture, interaction state coverage, AI slop risk, and responsive intention. For deep design work, it recommends `/plan-design-review`. +- **14 of 15 skills now have full test coverage (E2E + LLM-judge + validation).** Added LLM-judge quality evals for 10 skills that were missing them: ship, retro, qa-only, plan-ceo-review, plan-eng-review, plan-design-review, design-review, design-consultation, document-release, gstack-upgrade. Added real E2E test for gstack-upgrade (was a `.todo`). Added design-consultation to command validation. +- **Bisect commit style.** CLAUDE.md now requires every commit to be a single logical change — renames separate from rewrites, test infrastructure separate from test implementations. + +### Changed + +- `/qa-design-review` renamed to `/design-review` — the "qa-" prefix was confusing now that `/plan-design-review` is plan-mode. Updated across all 22 files. + +## [0.6.3.0] - 2026-03-17 + +### Added + +- **Every PR touching frontend code now gets a design review automatically.** `/review` and `/ship` apply a 20-item design checklist against changed CSS, HTML, JSX, and view files. Catches AI slop patterns (purple gradients, 3-column icon grids, generic hero copy), typography issues (body text < 16px, blacklisted fonts), accessibility gaps (`outline: none`), and `!important` abuse. Mechanical CSS fixes are auto-applied; design judgment calls ask you first. +- **`gstack-diff-scope` categorizes what changed in your branch.** Run `eval $(gstack-diff-scope main)` and get `SCOPE_FRONTEND=true/false`, `SCOPE_BACKEND`, `SCOPE_PROMPTS`, `SCOPE_TESTS`, `SCOPE_DOCS`, `SCOPE_CONFIG`. Design review uses it to skip silently on backend-only PRs. Ship pre-flight uses it to recommend design review when frontend files are touched. +- **Design review shows up in the Review Readiness Dashboard.** The dashboard now distinguishes between "LITE" (code-level, runs automatically in /review and /ship) and "FULL" (visual audit via /plan-design-review with browse binary). Both show up as Design Review entries. +- **E2E eval for design review detection.** Planted CSS/HTML fixtures with 7 known anti-patterns (Papyrus font, 14px body text, `outline: none`, `!important`, purple gradient, generic hero copy, 3-column feature grid). The eval verifies `/review` catches at least 4 of 7. + +## [0.6.2.0] - 2026-03-17 + +### Added + +- **Plan reviews now think like the best in the world.** `/plan-ceo-review` applies 14 cognitive patterns from Bezos (one-way doors, Day 1 proxy skepticism), Grove (paranoid scanning), Munger (inversion), Horowitz (wartime awareness), Chesky/Graham (founder mode), and Altman (leverage obsession). `/plan-eng-review` applies 15 patterns from Larson (team state diagnosis), McKinley (boring by default), Brooks (essential vs accidental complexity), Beck (make the change easy), Majors (own your code in production), and Google SRE (error budgets). `/plan-design-review` applies 12 patterns from Rams (subtraction default), Norman (time-horizon design), Zhuo (principled taste), Gebbia (design for trust, storyboard the journey), and Ive (care is visible). +- **Latent space activation, not checklists.** The cognitive patterns name-drop frameworks and people so the LLM draws on its deep knowledge of how they actually think. The instruction is "internalize these, don't enumerate them" — making each review a genuine perspective shift, not a longer checklist. + +## [0.6.1.0] - 2026-03-17 + +### Added + +- **E2E and LLM-judge tests now only run what you changed.** Each test declares which source files it depends on. When you run `bun run test:e2e`, it checks your diff and skips tests whose dependencies weren't touched. A branch that only changes `/retro` now runs 2 tests instead of 31. Use `bun run test:e2e:all` to force everything. +- **`bun run eval:select` previews which tests would run.** See exactly which tests your diff triggers before spending API credits. Supports `--json` for scripting and `--base ` to override the base branch. +- **Completeness guardrail catches forgotten test entries.** A free unit test validates that every `testName` in the E2E and LLM-judge test files has a corresponding entry in the TOUCHFILES map. New tests without entries fail `bun test` immediately — no silent always-run degradation. + +### Changed + +- `test:evals` and `test:e2e` now auto-select based on diff (was: all-or-nothing) +- New `test:evals:all` and `test:e2e:all` scripts for explicit full runs + +## 0.6.1 — 2026-03-17 — Boil the Lake + +Every gstack skill now follows the **Completeness Principle**: always recommend the +full implementation when AI makes the marginal cost near-zero. No more "Choose B +because it's 90% of the value" when option A is 70 lines more code. + +Read the philosophy: https://garryslist.org/posts/boil-the-ocean + +- **Completeness scoring**: every AskUserQuestion option now shows a completeness + score (1-10), biasing toward the complete solution +- **Dual time estimates**: effort estimates show both human-team and CC+gstack time + (e.g., "human: ~2 weeks / CC: ~1 hour") with a task-type compression reference table +- **Anti-pattern examples**: concrete "don't do this" gallery in the preamble so the + principle isn't abstract +- **First-time onboarding**: new users see a one-time introduction linking to the + essay, with option to open in browser +- **Review completeness gaps**: `/review` now flags shortcut implementations where the + complete version costs <30 min CC time +- **Lake Score**: CEO and Eng review completion summaries show how many recommendations + chose the complete option vs shortcuts +- **CEO + Eng review dual-time**: temporal interrogation, effort estimates, and delight + opportunities all show both human and CC time scales + +## 0.6.0.1 — 2026-03-17 + +- **`/gstack-upgrade` now catches stale vendored copies automatically.** If your global gstack is up to date but the vendored copy in your project is behind, `/gstack-upgrade` detects the mismatch and syncs it. No more manually asking "did we vendor it?" — it just tells you and offers to update. +- **Upgrade sync is safer.** If `./setup` fails while syncing a vendored copy, gstack restores the previous version from backup instead of leaving a broken install. + +### For contributors + +- Standalone usage section in `gstack-upgrade/SKILL.md.tmpl` now references Steps 2 and 4.5 (DRY) instead of duplicating detection/sync bash blocks. Added one new version-comparison bash block. +- Update check fallback in standalone mode now matches the preamble pattern (global path → local path → `|| true`). + +## 0.6.0 — 2026-03-17 + +- **100% test coverage is the key to great vibe coding.** gstack now bootstraps test frameworks from scratch when your project doesn't have one. Detects your runtime, researches the best framework, asks you to pick, installs it, writes 3-5 real tests for your actual code, sets up CI/CD (GitHub Actions), creates TESTING.md, and adds test culture instructions to CLAUDE.md. Every Claude Code session after that writes tests naturally. +- **Every bug fix now gets a regression test.** When `/qa` fixes a bug and verifies it, Phase 8e.5 automatically generates a regression test that catches the exact scenario that broke. Tests include full attribution tracing back to the QA report. Auto-incrementing filenames prevent collisions across sessions. +- **Ship with confidence — coverage audit shows what's tested and what's not.** `/ship` Step 3.4 builds a code path map from your diff, searches for corresponding tests, and produces an ASCII coverage diagram with quality stars (★★★ = edge cases + errors, ★★ = happy path, ★ = smoke test). Gaps get tests auto-generated. PR body shows "Tests: 42 → 47 (+5 new)". +- **Your retro tracks test health.** `/retro` now shows total test files, tests added this period, regression test commits, and trend deltas. If test ratio drops below 20%, it flags it as a growth area. +- **Design reviews generate regression tests too.** `/qa-design-review` Phase 8e.5 skips CSS-only fixes (those are caught by re-running the design audit) but writes tests for JavaScript behavior changes like broken dropdowns or animation failures. + +### For contributors + +- Added `generateTestBootstrap()` resolver to `gen-skill-docs.ts` (~155 lines). Registered as `{{TEST_BOOTSTRAP}}` in the RESOLVERS map. Inserted into qa, ship (Step 2.5), and qa-design-review templates. +- Phase 8e.5 regression test generation added to `qa/SKILL.md.tmpl` (46 lines) and CSS-aware variant to `qa-design-review/SKILL.md.tmpl` (12 lines). Rule 13 amended to allow creating new test files. +- Step 3.4 test coverage audit added to `ship/SKILL.md.tmpl` (88 lines) with quality scoring rubric and ASCII diagram format. +- Test health tracking added to `retro/SKILL.md.tmpl`: 3 new data gathering commands, metrics row, narrative section, JSON schema field. +- `qa-only/SKILL.md.tmpl` gets recommendation note when no test framework detected. +- `qa-report-template.md` gains Regression Tests section with deferred test specs. +- ARCHITECTURE.md placeholder table updated with `{{TEST_BOOTSTRAP}}` and `{{REVIEW_DASHBOARD}}`. +- WebSearch added to allowed-tools for qa, ship, qa-design-review. +- 26 new validation tests, 2 new E2E evals (bootstrap + coverage audit). +- 2 new P3 TODOs: CI/CD for non-GitHub providers, auto-upgrade weak tests. + +## 0.5.4 — 2026-03-17 + +- **Engineering review is always the full review now.** `/plan-eng-review` no longer asks you to choose between "big change" and "small change" modes. Every plan gets the full interactive walkthrough (architecture, code quality, tests, performance). Scope reduction is only suggested when the complexity check actually triggers — not as a standing menu option. +- **Ship stops asking about reviews once you've answered.** When `/ship` asks about missing reviews and you say "ship anyway" or "not relevant," that decision is saved for the branch. No more getting re-asked every time you re-run `/ship` after a pre-landing fix. + +### For contributors + +- Removed SMALL_CHANGE / BIG_CHANGE / SCOPE_REDUCTION menu from `plan-eng-review/SKILL.md.tmpl`. Scope reduction is now proactive (triggered by complexity check) rather than a menu item. +- Added review gate override persistence to `ship/SKILL.md.tmpl` — writes `ship-review-override` entries to `$BRANCH-reviews.jsonl` so subsequent `/ship` runs skip the gate. +- Updated 2 E2E test prompts to match new flow. + +## 0.5.3 — 2026-03-17 + +- **You're always in control — even when dreaming big.** `/plan-ceo-review` now presents every scope expansion as an individual decision you opt into. EXPANSION mode recommends enthusiastically, but you say yes or no to each idea. No more "the agent went wild and added 5 features I didn't ask for." +- **New mode: SELECTIVE EXPANSION.** Hold your current scope as the baseline, but see what else is possible. The agent surfaces expansion opportunities one by one with neutral recommendations — you cherry-pick the ones worth doing. Perfect for iterating on existing features where you want rigor but also want to be tempted by adjacent improvements. +- **Your CEO review visions are saved, not lost.** Expansion ideas, cherry-pick decisions, and 10x visions are now persisted to `~/.gstack/projects/{repo}/ceo-plans/` as structured design documents. Stale plans get archived automatically. If a vision is exceptional, you can promote it to `docs/designs/` in your repo for the team. + +- **Smarter ship gates.** `/ship` no longer nags you about CEO and Design reviews when they're not relevant. Eng Review is the only required gate (and you can disable even that with `gstack-config set skip_eng_review true`). CEO Review is recommended for big product changes; Design Review for UI work. The dashboard still shows all three — it just won't block you for the optional ones. + +### For contributors + +- Added SELECTIVE EXPANSION mode to `plan-ceo-review/SKILL.md.tmpl` with cherry-pick ceremony, neutral recommendation posture, and HOLD SCOPE baseline. +- Rewrote EXPANSION mode's Step 0D to include opt-in ceremony — distill vision into discrete proposals, present each as AskUserQuestion. +- Added CEO plan persistence (0D-POST step): structured markdown with YAML frontmatter (`status: ACTIVE/ARCHIVED/PROMOTED`), scope decisions table, archival flow. +- Added `docs/designs` promotion step after Review Log. +- Mode Quick Reference table expanded to 4 columns. +- Review Readiness Dashboard: Eng Review required (overridable via `skip_eng_review` config), CEO/Design optional with agent judgment. +- New tests: CEO review mode validation (4 modes, persistence, promotion), SELECTIVE EXPANSION E2E test. + +## 0.5.2 — 2026-03-17 + +- **Your design consultant now takes creative risks.** `/design-consultation` doesn't just propose a safe, coherent system — it explicitly breaks down SAFE CHOICES (category baseline) vs. RISKS (where your product stands out). You pick which rules to break. Every risk comes with a rationale for why it works and what it costs. +- **See the landscape before you choose.** When you opt into research, the agent browses real sites in your space with screenshots and accessibility tree analysis — not just web search results. You see what's out there before making design decisions. +- **Preview pages that look like your product.** The preview page now renders realistic product mockups — dashboards with sidebar nav and data tables, marketing pages with hero sections, settings pages with forms — not just font swatches and color palettes. + +## 0.5.1 — 2026-03-17 +- **Know where you stand before you ship.** Every `/plan-ceo-review`, `/plan-eng-review`, and `/plan-design-review` now logs its result to a review tracker. At the end of each review, you see a **Review Readiness Dashboard** showing which reviews are done, when they ran, and whether they're clean — with a clear CLEARED TO SHIP or NOT READY verdict. +- **`/ship` checks your reviews before creating the PR.** Pre-flight now reads the dashboard and asks if you want to continue when reviews are missing. Informational only — it won't block you, but you'll know what you skipped. +- **One less thing to copy-paste.** The SLUG computation (that opaque sed pipeline for computing `owner-repo` from git remote) is now a shared `bin/gstack-slug` helper. All 14 inline copies across templates replaced with `eval $(gstack-slug)`. If the format ever changes, fix it once. +- **Screenshots are now visible during QA and browse sessions.** When gstack takes screenshots, they now show up as clickable image elements in your output — no more invisible `/tmp/browse-screenshot.png` paths you can't see. Works in `/qa`, `/qa-only`, `/plan-design-review`, `/qa-design-review`, `/browse`, and `/gstack`. + +### For contributors + +- Added `{{REVIEW_DASHBOARD}}` resolver to `gen-skill-docs.ts` — shared dashboard reader injected into 4 templates (3 review skills + ship). +- Added `bin/gstack-slug` helper (5-line bash) with unit tests. Outputs `SLUG=` and `BRANCH=` lines, sanitizes `/` to `-`. +- New TODOs: smart review relevance detection (P3), `/merge` skill for review-gated PR merge (P2). + ## 0.5.0 — 2026-03-16 - **Your site just got a design review.** `/plan-design-review` opens your site and reviews it like a senior product designer — typography, spacing, hierarchy, color, responsive, interactions, and AI slop detection. Get letter grades (A-F) per category, a dual headline "Design Score" + "AI Slop Score", and a structured first impression that doesn't pull punches. diff --git a/CLAUDE.md b/CLAUDE.md index 9160fd22..17817471 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -5,8 +5,11 @@ ```bash bun install # install dependencies bun test # run free tests (browse + snapshot + skill validation) -bun run test:evals # run paid evals: LLM judge + E2E (~$4/run) -bun run test:e2e # run E2E tests only (~$3.85/run) +bun run test:evals # run paid evals: LLM judge + E2E (diff-based, ~$4/run max) +bun run test:evals:all # run ALL paid evals regardless of diff +bun run test:e2e # run E2E tests only (diff-based, ~$3.85/run max) +bun run test:e2e:all # run ALL E2E tests regardless of diff +bun run eval:select # show which tests would run based on current diff bun run dev # run CLI in dev mode, e.g. bun run dev goto https://example.com bun run build # gen docs + compile binaries bun run gen:skill-docs # regenerate SKILL.md files from templates @@ -22,6 +25,12 @@ bun run eval:trend # per-test pass rate trends (flaky detection) (tool-by-tool via `--output-format stream-json --verbose`). Results are persisted to `~/.gstack-dev/evals/` with auto-comparison against the previous run. +**Diff-based test selection:** `test:evals` and `test:e2e` auto-select tests based +on `git diff` against the base branch. Each test declares its file dependencies in +`test/helpers/touchfiles.ts`. Changes to global touchfiles (session-runner, eval-store, +llm-judge, gen-skill-docs) trigger all tests. Use `EVALS_ALL=1` or the `:all` script +variants to force all tests. Run `eval:select` to preview which tests would run. + ## Project structure ``` @@ -45,7 +54,7 @@ gstack/ │ └── skill-e2e.test.ts # Tier 2: E2E via claude -p (~$3.85/run) ├── qa-only/ # /qa-only skill (report-only QA, no fixes) ├── plan-design-review/ # /plan-design-review skill (report-only design audit) -├── qa-design-review/ # /qa-design-review skill (design audit + fix loop) +├── design-review/ # /design-review skill (design audit + fix loop) ├── ship/ # Ship workflow skill ├── review/ # PR review skill ├── plan-ceo-review/ # /plan-ceo-review skill @@ -111,6 +120,22 @@ symlink or a real copy. If it's a symlink to your working directory, be aware th gen-skill-docs pipeline, consider whether the changes should be tested in isolation before going live (especially if the user is actively using gstack in other windows). +## Commit style + +**Always bisect commits.** Every commit should be a single logical change. When +you've made multiple changes (e.g., a rename + a rewrite + new tests), split them +into separate commits before pushing. Each commit should be independently +understandable and revertable. + +Examples of good bisection: +- Rename/move separate from behavior changes +- Test infrastructure (touchfiles, helpers) separate from test implementations +- Template changes separate from generated file regeneration +- Mechanical refactors separate from new features + +When the user says "bisect commit" or "bisect and push," split staged/unstaged +changes into logical commits and push. + ## CHANGELOG style CHANGELOG.md is **for users**, not contributors. Write it like product release notes: @@ -122,6 +147,23 @@ CHANGELOG.md is **for users**, not contributors. Write it like product release n - No jargon: say "every question now tells you which project and branch you're in" not "AskUserQuestion format standardized across skill templates via preamble resolver." +## AI effort compression + +When estimating or discussing effort, always show both human-team and CC+gstack time: + +| Task type | Human team | CC+gstack | Compression | +|-----------|-----------|-----------|-------------| +| Boilerplate / scaffolding | 2 days | 15 min | ~100x | +| Test writing | 1 day | 15 min | ~50x | +| Feature implementation | 1 week | 30 min | ~30x | +| Bug fix + regression test | 4 hours | 15 min | ~20x | +| Architecture / design | 2 days | 4 hours | ~5x | +| Research / exploration | 1 day | 3 hours | ~3x | + +Completeness is cheap. Don't recommend shortcuts when the complete implementation +is a "lake" (achievable) not an "ocean" (multi-quarter migration). See the +Completeness Principle in the skill preamble for the full philosophy. + ## Local plans Contributors can store long-range vision docs and design documents in `~/.gstack-dev/plans/`. diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 8f26f2ea..215b166f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -54,6 +54,10 @@ the issue, fix it, and open a PR. This is the best way to contribute: fix gstack while doing your real work, in the project where you actually felt the pain. +### Session awareness + +When you have 3+ gstack sessions open simultaneously, every question tells you which project, which branch, and what's happening. No more staring at a question thinking "wait, which window is this?" The format is consistent across all 13 skills. + ## Working on gstack inside the gstack repo When you're editing gstack skills and want to test them by actually using gstack diff --git a/README.md b/README.md index ecd3bc08..925c5406 100644 --- a/README.md +++ b/README.md @@ -1,842 +1,195 @@ # gstack -**gstack turns Claude Code from one generic assistant into a team of specialists you can summon on demand.** +Hi, I'm [Garry Tan](https://x.com/garrytan). I'm President & CEO of [Y Combinator](https://www.ycombinator.com/), where I've worked with thousands of startups including Coinbase, Instacart, and Rippling when the founders were just one or two people in a garage — companies now worth tens of billions of dollars. Before YC, I designed the Palantir logo and was one of the first eng manager/PM/designers there. I cofounded Posterous, a blog platform we sold to Twitter. I built Bookface, YC's internal social network, back in 2013. I've been building products as a designer, PM, and eng manager for a long time. -Thirteen opinionated workflow skills for [Claude Code](https://docs.anthropic.com/en/docs/claude-code). Plan review, design review, code review, one-command shipping, browser automation, QA testing, engineering retrospectives, and post-ship documentation — all as slash commands. +And right now I am in the middle of something that feels like a new era entirely. -### Without gstack +In the last 60 days I have written **over 600,000 lines of production code** — 35% tests — and I am doing **10,000 to 20,000 usable lines of code per day** as a part-time part of my day while doing all my duties as CEO of YC. That is not a typo. My last `/retro` (developer stats from the last 7 days) across 3 projects: **140,751 lines added, 362 commits, ~115k net LOC**. The models are getting dramatically better every week. We are at the dawn of something real — one person shipping at a scale that used to require a team of twenty. -- The agent takes your request literally — it never asks if you're building the right thing -- It will implement exactly what you said, even when the real product is something bigger -- "Review my PR" gives inconsistent depth every time -- "Ship this" turns into a long back-and-forth about what to do -- The agent can write code but can't see your app — it's half blind -- You still do QA by hand: open browser, click around, check pages, squint at layouts +**2026 — 1,237 contributions and counting:** -### With gstack +![GitHub contributions 2026 — 1,237 contributions, massive acceleration in Jan-Mar](docs/images/github-2026.png) -| Skill | Mode | What it does | -|-------|------|--------------| -| `/plan-ceo-review` | Founder / CEO | Rethink the problem. Find the 10-star product hiding inside the request. | -| `/plan-eng-review` | Eng manager / tech lead | Lock in architecture, data flow, diagrams, edge cases, and tests. | -| `/plan-design-review` | Senior product designer | Designer's eye audit. 80-item checklist, letter grades, AI Slop detection, DESIGN.md inference. Report only — never touches code. | -| `/design-consultation` | Design consultant | Build a complete design system from scratch. Researches competitors, proposes aesthetic + typography + color + spacing + motion, generates a preview page, and writes DESIGN.md. | -| `/review` | Paranoid staff engineer | Find the bugs that pass CI but blow up in production. Triages Greptile review comments. | -| `/ship` | Release engineer | Sync main, run tests, resolve Greptile reviews, push, open PR. For a ready branch, not for deciding what to build. | -| `/browse` | QA engineer | Give the agent eyes. It logs in, clicks through your app, takes screenshots, catches breakage. Full QA pass in 60 seconds. | -| `/qa` | QA + fix engineer | Test app, find bugs, fix them with atomic commits, re-verify. Before/after health scores and ship-readiness summary. Three tiers: Quick, Standard, Exhaustive. | -| `/qa-only` | QA reporter | Report-only QA testing. Same methodology as /qa but never fixes anything. Use when you want a pure bug report without code changes. | -| `/qa-design-review` | Designer + frontend engineer | Same design audit as /plan-design-review, then fixes what it finds. Atomic `style(design):` commits, before/after screenshots, CSS-safe self-regulation. | -| `/setup-browser-cookies` | Session manager | Import cookies from your real browser (Comet, Chrome, Arc, Brave, Edge) into the headless session. Test authenticated pages without logging in manually. | -| `/retro` | Engineering manager | Team-aware retro: your deep-dive + per-person praise and growth opportunities for every contributor. | -| `/document-release` | Technical writer | Update README, ARCHITECTURE, CONTRIBUTING, and project docs to match what you just shipped. | +**2013 — when I built Bookface at YC (772 contributions):** -## Demo: one feature, five modes +![GitHub contributions 2013 — 772 contributions building Bookface at YC](docs/images/github-2013.png) -I start almost every feature in plan mode. I describe what I want to build, then use `/plan-ceo-review` to pressure-test whether I am even building the right thing. Only after the product direction is locked do I shift into engineering, review, ship, and test. +Same person. Different era. The difference is the tooling. -``` -You: [enter plan mode, describe the feature] - I want to add seller photo upload to the listing app. Sellers - should be able to upload a photo of their item and we create - the listing from it. +**gstack is how I do it.** It is my open source software factory. It turns Claude Code into a virtual engineering team you actually manage — a CEO who rethinks the product, an eng manager who locks the architecture, a designer who catches AI slop, a paranoid reviewer who finds production bugs, a QA lead who opens a real browser and clicks through your app, and a release engineer who ships the PR. Thirteen specialists, all as slash commands, all Markdown, **all free, MIT license, available right now.** -You: /plan-ceo-review +I am learning how to get to the edge of what agentic systems can do as of March 2026, and this is my live experiment. I am sharing it because I want the whole world on this journey with me. -Claude: "Photo upload" is not the feature. The real job is helping sellers - create listings that actually sell. Here's what the 10-star version - looks like: auto-identify the product from the photo, pull specs and - pricing comps from the web, draft the title and description, suggest - the best hero image... +Fork it. Improve it. Make it yours. Don't player hate, appreciate. -You: /plan-eng-review +**Who this is for:** +- **Founders and CEOs** — especially technical ones who still want to ship. This is how you build like a team of twenty. +- **First-time Claude Code users** — gstack is the best way to start. Structured roles instead of a blank prompt. +- **Tech leads and staff engineers** — bring rigorous review, QA, and release automation to every PR -Claude: [Architecture diagram, state machine for upload → classify → enrich - → draft pipeline, async job boundaries, failure modes, test matrix] +## Quick start: your first 10 minutes -You: [exit plan mode, implement the plan] +1. Install gstack (30 seconds — see below) +2. Run `/plan-ceo-review` on any feature idea +3. Run `/review` on any branch with changes +4. Run `/qa` on your staging URL +5. Stop there. You'll know if this is for you. -You: /review +Expect first useful run in under 5 minutes on any repo with tests already set up. -Claude: Race condition: two tabs can overwrite cover-photo selection. - Trust boundary: pulling web data into draft generation creates - a prompt injection vector. Missing: orphan cleanup for failed uploads. +**If you only read one more section, read this one.** -You: (fix the issues) +## Install — takes 30 seconds -You: /ship - -Claude: [Syncs main, runs tests, pushes branch, opens PR — 6 tool calls, done] - -You: /qa - -Claude: Analyzing branch diff... 8 files changed, 3 routes affected. - [Tests /listings/new, /listings/:id, /api/listings against localhost:3000] - All 3 routes working. Upload + enrichment flow passes end to end. - -You: /setup-browser-cookies staging.myapp.com - -Claude: Imported 8 cookies for staging.myapp.com from Chrome. - -You: /qa https://staging.myapp.com --quick - -Claude: [Smoke test: homepage + 5 pages, 30 seconds] - Health Score: 91/100. No critical issues. 1 medium: mobile nav overlap. - -You: /plan-design-review https://staging.myapp.com - -Claude: Design Score: B | AI Slop Score: C - - "The site communicates competence but not confidence." - "I notice the spacing between sections is uniform — nothing feels important." - "The first 3 things my eye goes to are: the gradient hero, a 3-column - feature grid with icons in circles, and a cookie-cutter CTA." - "If I had to describe this in one word: template." - - Top issues: generic typography (Inter everywhere), AI slop patterns - (3-column icon grid, purple gradient), flat heading scale. - - [Full report with letter grades, 12 findings, inferred design system] - Want me to save this as your DESIGN.md? - -You: /qa-design-review - -Claude: [Runs the same audit, then fixes 8 design issues] - style(design): FINDING-001 — replace Inter with system font stack - style(design): FINDING-003 — add heading scale contrast - style(design): FINDING-007 — break 3-column grid into asymmetric layout - Design Score: B → A- | AI Slop Score: C → A -``` - -## Who this is for - -You already use Claude Code heavily and want consistent, high-rigor workflows instead of one mushy generic mode. You want to tell the model what kind of brain to use right now — founder taste, engineering rigor, paranoid review, or fast execution. - -This is not a prompt pack for beginners. It is an operating system for people who ship. - -## How to fly: 10 sessions at once - -gstack is powerful with one Claude Code session. It is transformative with ten. - -[Conductor](https://conductor.build) runs multiple Claude Code sessions in parallel — each in its own isolated workspace. That means you can have one session running `/qa` on staging, another doing `/review` on a PR, a third implementing a feature, and seven more working on other branches. All at the same time. - -Each workspace gets its own isolated browser instance automatically — separate Chromium process, cookies, tabs, and logs stored in `.gstack/` inside each project root. No port collisions, no shared state, no configuration needed. `/browse` and `/qa` sessions never interfere with each other, even across ten parallel workspaces. - -This is the setup I use. One person, ten parallel agents, each with the right cognitive mode for its task. That is not incremental improvement. That is a different way of building software. - -## Install - -**Requirements:** [Claude Code](https://docs.anthropic.com/en/docs/claude-code), [Git](https://git-scm.com/), [Bun](https://bun.sh/) v1.0+. `/browse` compiles a native binary — works on macOS and Linux (x64 and arm64). +**Requirements:** [Claude Code](https://docs.anthropic.com/en/docs/claude-code), [Git](https://git-scm.com/), [Bun](https://bun.sh/) v1.0+ ### Step 1: Install on your machine -Open Claude Code and paste this. Claude will do the rest. +Open Claude Code and paste this. Claude does the rest. -> Install gstack: run `git clone https://github.com/garrytan/gstack.git ~/.claude/skills/gstack && cd ~/.claude/skills/gstack && ./setup` then add a "gstack" section to CLAUDE.md that says to use the /browse skill from gstack for all web browsing, never use mcp\_\_claude-in-chrome\_\_\* tools, and lists the available skills: /plan-ceo-review, /plan-eng-review, /plan-design-review, /design-consultation, /review, /ship, /browse, /qa, /qa-only, /qa-design-review, /setup-browser-cookies, /retro, /document-release. Then ask the user if they also want to add gstack to the current project so teammates get it. +> Install gstack: run **`git clone https://github.com/garrytan/gstack.git ~/.claude/skills/gstack && cd ~/.claude/skills/gstack && ./setup`** then add a "gstack" section to CLAUDE.md that says to use the /browse skill from gstack for all web browsing, never use mcp\_\_claude-in-chrome\_\_\* tools, and lists the available skills: /plan-ceo-review, /plan-eng-review, /plan-design-review, /design-consultation, /review, /ship, /browse, /qa, /qa-only, /qa-design-review, /setup-browser-cookies, /retro, /document-release. Then ask the user if they also want to add gstack to the current project so teammates get it. ### Step 2: Add to your repo so teammates get it (optional) -> Add gstack to this project: run `cp -Rf ~/.claude/skills/gstack .claude/skills/gstack && rm -rf .claude/skills/gstack/.git && cd .claude/skills/gstack && ./setup` then add a "gstack" section to this project's CLAUDE.md that says to use the /browse skill from gstack for all web browsing, never use mcp\_\_claude-in-chrome\_\_\* tools, lists the available skills: /plan-ceo-review, /plan-eng-review, /plan-design-review, /design-consultation, /review, /ship, /browse, /qa, /qa-only, /qa-design-review, /setup-browser-cookies, /retro, /document-release, and tells Claude that if gstack skills aren't working, run `cd .claude/skills/gstack && ./setup` to build the binary and register skills. +> Add gstack to this project: run **`cp -Rf ~/.claude/skills/gstack .claude/skills/gstack && rm -rf .claude/skills/gstack/.git && cd .claude/skills/gstack && ./setup`** then add a "gstack" section to this project's CLAUDE.md that says to use the /browse skill from gstack for all web browsing, never use mcp\_\_claude-in-chrome\_\_\* tools, lists the available skills: /plan-ceo-review, /plan-eng-review, /plan-design-review, /design-consultation, /review, /ship, /browse, /qa, /qa-only, /qa-design-review, /setup-browser-cookies, /retro, /document-release, and tells Claude that if gstack skills aren't working, run `cd .claude/skills/gstack && ./setup` to build the binary and register skills. -Real files get committed to your repo (not a submodule), so `git clone` just works. The binary and node\_modules are gitignored — teammates just need to run `cd .claude/skills/gstack && ./setup` once to build (or `/browse` handles it automatically on first use). +Real files get committed to your repo (not a submodule), so `git clone` just works. Everything lives inside `.claude/`. Nothing touches your PATH or runs in the background. -### What gets installed - -- Skill files (Markdown prompts) in `~/.claude/skills/gstack/` (or `.claude/skills/gstack/` for project installs) -- Symlinks at `~/.claude/skills/browse`, `~/.claude/skills/qa`, `~/.claude/skills/review`, etc. pointing into the gstack directory -- Browser binary at `browse/dist/browse` (~58MB, gitignored) -- `node_modules/` (gitignored) -- `/retro` saves JSON snapshots to `.context/retros/` in your project for trend tracking - -Everything lives inside `.claude/`. Nothing touches your PATH or runs in the background. - ---- +## See it work ``` -+----------------------------------------------------------------------------+ -| | -| Are you a great software engineer who loves to write 10K LOC/day | -| and land 10 PRs a day like Garry? | -| | -| Come work at YC: ycombinator.com/software | -| | -| Extremely competitive salary and equity. | -| Now hiring in San Francisco, Dogpatch District. | -| Come join the revolution. | -| | -+----------------------------------------------------------------------------+ +You: I want to add photo upload for sellers. +You: /plan-ceo-review +Claude: "Photo upload" is not the feature. The real job is helping + sellers create listings that actually sell. What if we + auto-identify the product, pull specs and comps from the + web, and draft the listing automatically? That's 10 stars. + "Upload a photo" is 3 stars. Which are we building? + [8 expansion proposals, you cherry-pick 5, defer 3 to backlog] + +You: /plan-design-review +Claude: Design Score: B | AI Slop Score: C + "Upload flow looks like a default Bootstrap form." + [80-item audit, infers your design system, exports DESIGN.md] + [flags 3 AI slop patterns: gradient hero, icon grid, uniform radius] + +You: /plan-eng-review +Claude: ┌─────────┐ ┌──────────┐ ┌──────────┐ ┌─────────┐ + │ Upload │───▶│ Classify │───▶│ Enrich │───▶│ Draft │ + │ (sync) │ │ (async) │ │ (async) │ │ (async) │ + └─────────┘ └──────────┘ └──────────┘ └─────────┘ + [ASCII diagrams for every data flow, state machine, error path] + [14-case test matrix, 6 failure modes mapped, 3 security concerns] + +You: Approve plan. Exit plan mode. + [Claude writes 2,400 lines across 11 files — models, services, + controllers, views, migrations, and tests. ~8 minutes.] + +You: /review +Claude: [AUTO-FIXED] Orphan S3 cleanup on failed upload + [AUTO-FIXED] Missing index on listings.status + [ASK] Race condition on hero image selection → You: yes + [traces every new enum value through all switch statements] + 3 issues — 2 auto-fixed, 1 fixed. + +You: /qa https://staging.myapp.com +Claude: [opens real browser, logs in, uploads photos, clicks through flows] + Upload → classify → enrich → draft: end to end ✓ + Mobile: ✓ | Slow connection: ✓ | Bad image: ✓ + [finds bug: preview doesn't clear on second upload — fixes it] + Regression test generated. + +You: /ship +Claude: Tests: 42 → 51 (+9 new) + Coverage: 14/14 code paths (100%) + PR: github.com/you/app/pull/42 ``` ---- +One feature. Seven commands. The agent reframed the product, ran an 80-item design audit, drew the architecture, wrote 2,400 lines of code, found a race condition I would have missed, auto-fixed two issues, opened a real browser to QA test, found and fixed a bug I didn't know about, wrote 9 tests, and generated a regression test. That is not a copilot. That is a team. -## How I use these skills +## The team -Created by [Garry Tan](https://x.com/garrytan), President & CEO of [Y Combinator](https://www.ycombinator.com/). +| Skill | Your specialist | What they do | +|-------|----------------|--------------| +| `/plan-ceo-review` | **CEO / Founder** | Rethink the problem. Find the 10-star product hiding inside the request. Four modes: Expansion, Selective Expansion, Hold Scope, Reduction. | +| `/plan-eng-review` | **Eng Manager** | Lock in architecture, data flow, diagrams, edge cases, and tests. Forces hidden assumptions into the open. | +| `/plan-design-review` | **Senior Designer** | 80-item design audit with letter grades. AI Slop detection. Infers your design system. Report only — never touches code. | +| `/design-consultation` | **Design Partner** | Build a complete design system from scratch. Knows the landscape, proposes creative risks, generates realistic product mockups. Design at the heart of all other phases. | +| `/review` | **Staff Engineer** | Find the bugs that pass CI but blow up in production. Auto-fixes the obvious ones. Flags completeness gaps. | +| `/ship` | **Release Engineer** | Sync main, run tests, audit coverage, push, open PR. Bootstraps test frameworks if you don't have one. One command. | +| `/browse` | **QA Engineer** | Give the agent eyes. Real Chromium browser, real clicks, real screenshots. ~100ms per command. | +| `/qa` | **QA Lead** | Test your app, find bugs, fix them with atomic commits, re-verify. Auto-generates regression tests for every fix. | +| `/qa-only` | **QA Reporter** | Same methodology as /qa but report only. Use when you want a pure bug report without code changes. | +| `/design-review` | **Designer Who Codes** | Same audit as /plan-design-review, then fixes what it finds. Atomic commits, before/after screenshots. | +| `/setup-browser-cookies` | **Session Manager** | Import cookies from your real browser (Chrome, Arc, Brave, Edge) into the headless session. Test authenticated pages. | +| `/retro` | **Eng Manager** | Team-aware weekly retro. Per-person breakdowns, shipping streaks, test health trends, growth opportunities. | +| `/document-release` | **Technical Writer** | Update all project docs to match what you just shipped. Catches stale READMEs automatically. | -I built gstack because I do not want AI coding tools stuck in one mushy mode. +**[Deep dives with examples and philosophy for every skill →](docs/skills.md)** -Planning is not review. Review is not shipping. Founder taste is not engineering rigor. If you blur all of that together, you usually get a mediocre blend of all four. +## What's new and why it matters -I want explicit gears. +**Design is at the heart.** `/design-consultation` doesn't just pick fonts. It researches what's out there in your space, proposes safe choices AND creative risks, generates realistic mockups of your actual product, and writes `DESIGN.md` — and then `/design-review` and `/plan-eng-review` read what you chose. Design decisions flow through the whole system. -These skills let me tell the model what kind of brain I want right now. I can switch cognitive modes on demand — founder, eng manager, paranoid reviewer, release machine. That is the unlock. +**`/qa` was a massive unlock.** It let me go from 6 to 12 parallel workers. Claude Code saying *"I SEE THE ISSUE"* and then actually fixing it, generating a regression test, and verifying the fix — that changed how I work. The agent has eyes now. ---- +**Smart review routing.** Just like at a well-run startup: CEO doesn't have to look at infra bug fixes, design review isn't needed for backend changes. gstack tracks what reviews are run, figures out what's appropriate, and just does the smart thing. The Review Readiness Dashboard tells you where you stand before you ship. -## `/plan-ceo-review` +**Test everything.** `/ship` bootstraps test frameworks from scratch if your project doesn't have one. Every `/ship` run produces a coverage audit. Every `/qa` bug fix generates a regression test. 100% test coverage is the goal — tests make vibe coding safe instead of yolo coding. -This is my **founder mode**. +**`/document-release` is the engineer you never had.** It reads every doc file in your project, cross-references the diff, and updates everything that drifted. README, ARCHITECTURE, CONTRIBUTING, CLAUDE.md, TODOS — all kept current automatically. -This is where I want the model to think with taste, ambition, user empathy, and a long time horizon. I do not want it taking the request literally. I want it asking a more important question first: +## 10 sessions at once -**What is this product actually for?** +gstack is powerful with one session. It is transformative with ten. -I think of this as **Brian Chesky mode**. +[Conductor](https://conductor.build) runs multiple Claude Code sessions in parallel — each in its own isolated workspace. One session running `/qa` on staging, another doing `/review` on a PR, a third implementing a feature, and seven more on other branches. All at the same time. -The point is not to implement the obvious ticket. The point is to rethink the problem from the user's point of view and find the version that feels inevitable, delightful, and maybe even a little magical. +One person, ten parallel agents, each with the right cognitive mode. That is a different way of building software. -### Example +## Come ride the wave -Say I am building a Craigslist-style listing app and I say: +This is **free, MIT licensed, open source, available now.** No premium tier. No waitlist. No strings. -> "Let sellers upload a photo for their item." +I open sourced how I do development and I am actively upgrading my own software factory here. You can fork it and make it your own. That's the whole point. I want everyone on this journey. -A weak assistant will add a file picker and save an image. +Same tools, different outcome — because gstack gives you structured roles and review gates, not generic agent chaos. That governance is the difference between shipping fast and shipping reckless. -That is not the real product. +The models are getting better fast. The people who figure out how to work with them now — really work with them, not just dabble — are going to have a massive advantage. This is that window. Let's go. -In `/plan-ceo-review`, I want the model to ask whether "photo upload" is even the feature. Maybe the real feature is helping someone create a listing that actually sells. +Thirteen specialists. All slash commands. All Markdown. All free. **[github.com/garrytan/gstack](https://github.com/garrytan/gstack)** — MIT License -If that is the real job, the whole plan changes. +> **We're hiring.** Want to ship 10K+ LOC/day and help harden gstack? +> Come work at YC — [ycombinator.com/software](https://ycombinator.com/software) +> Extremely competitive salary and equity. San Francisco, Dogpatch District. -Now the model should ask: +## Docs -* Can we identify the product from the photo? -* Can we infer the SKU or model number? -* Can we search the web and draft the title and description automatically? -* Can we pull specs, category, and pricing comps? -* Can we suggest which photo will convert best as the hero image? -* Can we detect when the uploaded photo is ugly, dark, cluttered, or low-trust? -* Can we make the experience feel premium instead of like a dead form from 2007? - -That is what `/plan-ceo-review` does for me. - -It does not just ask, "how do I add this feature?" -It asks, **"what is the 10-star product hiding inside this request?"** - -That is a very different kind of power. - ---- - -## `/plan-eng-review` - -This is my **eng manager mode**. - -Once the product direction is right, I want a different kind of intelligence entirely. I do not want more sprawling ideation. I do not want more "wouldn't it be cool if." I want the model to become my best technical lead. - -This mode should nail: - -* architecture -* system boundaries -* data flow -* state transitions -* failure modes -* edge cases -* trust boundaries -* test coverage - -And one surprisingly big unlock for me: **diagrams**. - -LLMs get way more complete when you force them to draw the system. Sequence diagrams, state diagrams, component diagrams, data-flow diagrams, even test matrices. Diagrams force hidden assumptions into the open. They make hand-wavy planning much harder. - -So `/plan-eng-review` is where I want the model to build the technical spine that can carry the product vision. - -### Example - -Take the same listing app example. - -Let's say `/plan-ceo-review` already did its job. We decided the real feature is not just photo upload. It is a smart listing flow that: - -* uploads photos -* identifies the product -* enriches the listing from the web -* drafts a strong title and description -* suggests the best hero image - -Now `/plan-eng-review` takes over. - -Now I want the model to answer questions like: - -* What is the architecture for upload, classification, enrichment, and draft generation? -* Which steps happen synchronously, and which go to background jobs? -* Where are the boundaries between app server, object storage, vision model, search/enrichment APIs, and the listing database? -* What happens if upload succeeds but enrichment fails? -* What happens if product identification is low-confidence? -* How do retries work? -* How do we prevent duplicate jobs? -* What gets persisted when, and what can be safely recomputed? - -And this is where I want diagrams — architecture diagrams, state models, data-flow diagrams, test matrices. Diagrams force hidden assumptions into the open. They make hand-wavy planning much harder. - -That is `/plan-eng-review`. - -Not "make the idea smaller." -**Make the idea buildable.** - ---- - -## `/plan-design-review` - -This is my **senior designer mode**. - -Most developers cannot tell whether their site looks AI-generated. I could not, until I started paying attention. There is a growing class of sites that are functional but soulless — they work fine but scream "an AI built this and nobody with taste looked at it." Purple gradients, 3-column icon grids, uniform bubbly border-radius on everything, centered text on every section, decorative blobs floating in the background. The ChatGPT aesthetic. - -`/plan-design-review` gives the agent a designer's eye. - -It opens your site and reacts to it the way a Stripe or Linear designer would — immediately, viscerally, with opinions. The first output is a structured gut reaction: what the site communicates at a glance, what the eye is drawn to, and a one-word verdict. That is the most valuable part. Everything after is supporting evidence. - -Then it extracts your actual design system by reading the CSS: how many fonts are you really using? What is your heading scale? How many colors? Is your spacing on a grid or arbitrary? It structures this as an **Inferred Design System** and offers to save it as a `DESIGN.md` — so now you have a design system you never had before, derived from what you actually shipped. - -The audit covers 80 items across 10 categories. Typography, spacing, hierarchy, color, responsive design, interaction states, motion, content quality, performance feel, and — the headline feature — **AI Slop Detection**. Ten specific anti-patterns that mark a site as machine-generated. - -Every finding gets a letter grade (A through F) per category. The report opens with two headline scores: **Design Score** and **AI Slop Score**. "Your site got a B on design but a D on AI Slop" tells you exactly where to focus. - -### Example - -``` -You: /plan-design-review https://myapp.com - -Claude: Design Score: C | AI Slop Score: D - - "The site communicates generic SaaS energy." - "I notice the hero section uses a blue-to-purple gradient - with a 3-column feature grid below it — the single most - recognizable AI-generated layout pattern." - "The first 3 things my eye goes to are: the gradient, the - symmetrical icon grid, and a 'Get Started' button that - looks like every other 'Get Started' button." - "If I had to describe this in one word: template." - - Inferred Design System: - - Fonts: Inter (body), Inter (headings) — 1 font family - - Colors: 14 unique non-gray colors (flag: >12) - - Heading scale: 36/24/18/16 — flat, no drama between sizes - - Spacing: mostly 16px and 24px — no rhythm - - Top 5 Design Improvements: - 1. Break the 3-column icon grid (High, AI Slop) - 2. Add heading scale contrast — at least 2:1 ratio (High, Typography) - 3. Replace uniform border-radius with intentional hierarchy (Medium, Spacing) - 4. Kill the gradient hero — use a real image or bold typography (High, AI Slop) - 5. Add a second font for headings — Inter-only reads as generic (Medium, Typography) - - [Full report saved to .gstack/design-reports/] - Want me to save this inferred design system as your DESIGN.md? -``` - -This is report only — it never touches your code. Use `/qa-design-review` when you want it to fix what it finds. - ---- - -## `/design-consultation` - -This is my **design partner mode**. - -`/plan-design-review` audits a site that already exists. `/design-consultation` is for when you have nothing yet — no design system, no font choices, no color palette. You are starting from zero and you want a senior designer to sit down with you and build the whole visual identity together. - -It is a conversation, not a form. The agent asks about your product, your users, and your space. If you want, it researches what top competitors in your category are doing — fonts, colors, layout patterns, aesthetic direction. Then it proposes a complete, coherent design system: aesthetic direction, typography (3+ fonts with specific roles), color palette with hex values, spacing scale, layout approach, and motion strategy. Every recommendation comes with a rationale. Every choice reinforces every other choice. - -The key insight: individual design decisions are easy to make but hard to make coherently. Picking a font is simple. Picking a font that works with your color palette, your spacing density, your aesthetic direction, and your product's personality — that is the actual job. `/design-consultation` solves the coherence problem by proposing everything as one package, then letting you adjust any piece while it checks that the rest still holds together. - -After you agree on the system, it generates an interactive HTML preview page — your fonts rendered at real sizes, your color palette as actual UI components (buttons, cards, form inputs, alerts), light/dark mode toggle, all using your product name instead of lorem ipsum. You open the page in your browser and see the design system rendered before a single line of product code is written. - -Then it writes `DESIGN.md` to your repo root — your project's design source of truth — and updates `CLAUDE.md` so every future Claude Code session respects the system. From that point on, `/qa-design-review` can audit against it, and any agent working on your frontend knows the rules. - -``` -You: /design-consultation - -Claude: From the README, this looks like a developer analytics dashboard - for engineering teams. Sound right? And would you like me to - research what other dev tools are doing for design, or should - I work from what I know? - -You: Yes, it's a dashboard for eng managers. Research would be great. - -Claude: I looked at Linear, Vercel, Railway, Planetscale, and Posthog. - They converge on clean sans-serifs and muted palettes, but - Linear stands out with its dark-first approach and tight spacing. - The gap: most of these feel interchangeable. Here's my proposal: - - AESTHETIC: Industrial/Utilitarian — data-first, no decoration tax - TYPOGRAPHY: Geist (body + UI), Instrument Serif (display), JetBrains Mono (data) - COLOR: Restrained — slate neutrals, single teal accent (#0D9488) - SPACING: 4px base, compact density - LAYOUT: Grid-disciplined, 12-column - MOTION: Minimal-functional — only transitions that aid comprehension - - This coheres because the utilitarian aesthetic demands a tight - spacing grid and restrained color. The serif display font creates - hierarchy contrast without adding visual noise. The monospace for - data tables feels native to the engineering audience. - - Want to adjust anything, or should I generate the preview page? - -You: Generate it. - -Claude: Preview opened in your browser. - [/tmp/design-consultation-preview-1710000000.html] - - The page shows Instrument Serif at hero scale, Geist in body - paragraphs, JetBrains Mono in a sample data table. Color swatches - with your teal accent against slate grays. Buttons, cards, and - alerts rendered in both light and dark mode. - - How does it feel? Want to adjust anything before I write DESIGN.md? - -You: Looks great. Ship it. - -Claude: Wrote DESIGN.md (typography, color, spacing, layout, motion). - Updated CLAUDE.md with design system guidelines. - All future sessions will respect these choices. -``` - ---- - -## `/qa-design-review` - -This is my **designer who codes mode**. - -`/plan-design-review` tells you what is wrong. `/qa-design-review` fixes it. - -It runs the same 80-item audit, then enters a fix loop: for each design finding, it locates the source file, makes the minimal CSS/styling change, commits with `style(design): FINDING-NNN`, re-navigates to verify, and takes before/after screenshots. One commit per fix, fully bisectable. - -The self-regulation heuristic is tuned for design work — CSS-only changes get a free pass (they are inherently safe and reversible), but changes to component JSX/TSX files count against the risk budget. Hard cap at 30 fixes. If the risk score exceeds 20%, it stops and asks. - -``` -You: /qa-design-review https://myapp.com - -Claude: [Runs full design audit — same output as /plan-design-review] - Design Score: C | AI Slop Score: D - 12 findings (4 high, 5 medium, 3 polish) - - Fixing 9 design issues... - - style(design): FINDING-001 — replace 3-column icon grid with asymmetric layout - style(design): FINDING-002 — add heading scale 48/32/24/18/16 - style(design): FINDING-003 — remove gradient hero, use bold typography - style(design): FINDING-004 — add second font for headings - style(design): FINDING-005 — vary border-radius by element role - style(design): FINDING-006 — left-align body text, reserve center for headings - style(design): FINDING-007 — add hover/focus states to all interactive elements - style(design): FINDING-008 — add prefers-reduced-motion media query - style(design): FINDING-009 — set max content width to 680px for body text - - Final audit: - Design Score: C → B+ | AI Slop Score: D → A - 9 fixes applied (8 verified, 1 best-effort). 3 deferred. - - [Report with before/after screenshots saved to .gstack/design-reports/] -``` - -Nine commits, each touching one concern. The AI Slop score went from D to A because the three most recognizable patterns (gradient hero, 3-column grid, uniform radius) are gone. The design score improved two grades because the typography now has a scale, the spacing has hierarchy, and interactive elements have proper states. - ---- - -## `/review` - -This is my **paranoid staff engineer mode**. - -Passing tests do not mean the branch is safe. - -`/review` exists because there is a whole class of bugs that can survive CI and still punch you in the face in production. This mode is not about dreaming bigger. It is not about making the plan prettier. It is about asking: - -**What can still break?** - -This is a structural audit, not a style nitpick pass. I want the model to look for things like: - -* N+1 queries -* stale reads -* race conditions -* bad trust boundaries -* missing indexes -* escaping bugs -* broken invariants -* bad retry logic -* tests that pass while missing the real failure mode - -### Example - -Suppose the smart listing flow is implemented and the tests are green. - -`/review` should still ask: - -* Did I introduce an N+1 query when rendering listing photos or draft suggestions? -* Am I trusting client-provided file metadata instead of validating the actual file? -* Can two tabs race and overwrite cover-photo selection or item details? -* Do failed uploads leave orphaned files in storage forever? -* Can the "exactly one hero image" rule break under concurrency? -* If enrichment APIs partially fail, do I degrade gracefully or save garbage? -* Did I accidentally create a prompt injection or trust-boundary problem by pulling web data into draft generation? - -That is the point of `/review`. - -I do not want flattery here. -I want the model imagining the production incident before it happens. - ---- - -## `/ship` - -This is my **release machine mode**. - -Once I have decided what to build, nailed the technical plan, and run a serious review, I do not want more talking. I want execution. - -`/ship` is for the final mile. It is for a ready branch, not for deciding what to build. - -This is where the model should stop behaving like a brainstorm partner and start behaving like a disciplined release engineer: sync with main, run the right tests, make sure the branch state is sane, update changelog or versioning if the repo expects it, push, and create or update the PR. - -Momentum matters here. - -A lot of branches die when the interesting work is done and only the boring release work is left. Humans procrastinate that part. AI should not. - -### Example - -Suppose the smart listing flow is finished. - -The product thinking is done. -The architecture is done. -The review pass is done. -Now the branch just needs to get landed. - -That is what `/ship` is for. - -It takes care of the repetitive release hygiene so I do not bleed energy on: - -* syncing with main -* rerunning tests -* checking for weird branch state -* updating changelog/version metadata -* pushing the branch -* opening or updating the PR - -At this point I do not want more ideation. -I want the plane landed. - ---- - -## Greptile integration - -[Greptile](https://greptile.com) is a YC company that reviews your PRs automatically. It catches real bugs — race conditions, security issues, things that pass CI and blow up in production. It has genuinely saved my ass more than once. I love these guys. - -**Setup:** Install Greptile on your GitHub repo at [greptile.com](https://greptile.com) — it takes about 30 seconds. Once it's reviewing your PRs, gstack picks up its comments automatically. No additional configuration. - -The problem with any automated reviewer is triage. Greptile is good, but not every comment is a real issue. Some are false positives. Some flag things you already fixed three commits ago. Without a triage layer, the comments pile up and you start ignoring them — which defeats the purpose. - -gstack solves this. `/review` and `/ship` are now Greptile-aware. They read Greptile's comments, classify each one, and take action: - -- **Valid issues** get added to the critical findings and fixed before shipping -- **Already-fixed issues** get an auto-reply acknowledging the catch -- **False positives** get pushed back — you confirm, and a reply goes out explaining why it's wrong - -The result is a two-layer review: Greptile catches things asynchronously on the PR, then `/review` and `/ship` triage those findings as part of the normal workflow. Nothing falls through the cracks. - -It also learns. Every false positive you confirm gets saved to `~/.gstack/greptile-history.md`. Future runs auto-skip known FP patterns for your codebase. And `/retro` tracks Greptile's batting average over time — so you can see whether the signal-to-noise ratio is improving. - -### Example - -``` -You: /ship - -Claude: [syncs main, runs tests, pre-landing review...] - - Greptile found 3 comments on this PR: - - [VALID] app/services/payment_service.rb:47 — Race condition: - concurrent charges can double-debit. Recommend DB-level advisory - lock around the charge block. - → https://github.com/you/app/pull/42#discussion_r123 - - [ALREADY FIXED] app/models/user.rb:12 — Missing null check - on email before downcase. - → Already fixed in commit abc1234. Auto-replying. - - [FALSE POSITIVE] lib/auth.rb:88 — "Token comparison should - use constant-time comparison." We already use - ActiveSupport::SecurityUtils.secure_compare here. - → https://github.com/you/app/pull/42#discussion_r125 - - Fixing the race condition in payment_service.rb... - Applied advisory lock. Committed: fix: address Greptile - review — advisory lock on concurrent charges - - The auth token comment is a false positive — we're already - using secure_compare. - - Options for the false positive: - A) Reply to Greptile explaining (recommended) - B) Fix it anyway - C) Ignore - -You: A - -Claude: Replied to Greptile. Re-running tests after the fix... - All tests pass. Continuing to version bump. - - [creates PR with Greptile Review section in body] - PR: https://github.com/you/app/pull/42 -``` - -Three Greptile comments. One real fix. One auto-acknowledged. One false positive pushed back with a reply. Total extra time: about 30 seconds. And the PR body has the full audit trail. - ---- - -## `/browse` - -This is my **QA engineer mode**. - -`/browse` is the skill that closes the loop. Before it, the agent could think and code but was still half blind. It had to guess about UI state, auth flows, redirects, console errors, empty states, and broken layouts. Now it can just go look. - -That is a real step-change. The full cycle becomes: plan, code, run the app, inspect the UI, reproduce the bug, verify the fix, ship. QA stops being "go click around manually" and becomes part of the same agentic loop. It gives the agent eyes. Now it can do the boring, high-context QA work itself: click through the app, catch breakage, verify the fix, and keep going. That feels like having a real operator in the machine. - -It is a compiled binary that talks to a persistent Chromium daemon — built on [Playwright](https://playwright.dev/) by Microsoft. First call starts the browser (~3s). Every call after that: ~100-200ms. The browser stays running between commands, so cookies, tabs, and localStorage carry over. - -After I push a branch, I tell Claude to go check staging. It does the entire QA pass for me: - -``` -You: /browse staging.myapp.com — log in, test the signup flow, and check - every page I changed in this branch - -Claude: [18 tool calls, ~60 seconds] - - > browse goto https://staging.myapp.com/signup - > browse snapshot -i - > browse fill @e2 "test@example.com" - > browse fill @e3 "password123" - > browse click @e5 (Submit) - > browse screenshot /tmp/signup.png - > Read /tmp/signup.png - - Signup works. Redirected to onboarding. Now checking changed pages. - - > browse goto https://staging.myapp.com/dashboard - > browse screenshot /tmp/dashboard.png - > Read /tmp/dashboard.png - > browse console - - Dashboard loads. No console errors. Charts render with sample data. - - > browse goto https://staging.myapp.com/settings/billing - > browse screenshot /tmp/billing.png - > Read /tmp/billing.png - - Billing page loads. Plan selector shows Free/Pro/Enterprise. - Stripe elements render correctly. - - > browse goto https://staging.myapp.com/api/health - > browse text - - API health endpoint returns {"status":"ok","version":"2.4.1"}. - - All 4 pages load correctly. No console errors. No broken layouts. - Signup → onboarding → dashboard flow works end to end. -``` - -18 tool calls, about a minute. It signed up a test user, navigated every page I changed, took screenshots, read them, checked the console for errors, and verified the API. That is a full QA pass. I did not open a browser. - -It works for reading documentation, verifying deployments, filling out forms, taking screenshots, comparing pages across environments, and anything else where Claude needs eyes on a live URL. - -**Security note:** `/browse` runs a persistent Chromium session. Cookies, localStorage, and session state carry over between commands. Do not use it against sensitive production environments unless you intend to — it is a real browser with real state. The session auto-shuts down after 30 minutes of idle time. - -For the full command reference, technical internals, and architecture details, see [BROWSER.md](BROWSER.md). - ---- - -## `/qa` - -This is my **QA lead mode**. - -`/browse` gives the agent eyes. `/qa` gives it a testing methodology. - -The most common use case: you're on a feature branch, you just finished coding, and you want to verify everything works. Just say `/qa` — it reads your git diff, identifies which pages and routes your changes affect, spins up the browser, and tests each one. No URL required. No manual test plan. It figures out what to test from the code you changed. - -``` -You: /qa - -Claude: Analyzing branch diff against main... - 12 files changed: 3 controllers, 2 views, 4 services, 3 tests - - Affected routes: /listings/new, /listings/:id, /api/listings - Detected app running on localhost:3000. - - [Tests each affected page — navigates, fills forms, clicks buttons, - screenshots, checks console errors] - - QA Report: 3 routes tested, all working. - - /listings/new: upload + enrichment flow works end to end - - /listings/:id: detail page renders correctly - - /api/listings: returns 200 with expected shape - No console errors. No regressions on adjacent pages. -``` - -Four modes: - -- **Diff-aware** (automatic on feature branches) — reads `git diff main`, identifies affected pages, tests them specifically. The fastest path from "I just wrote code" to "it works." -- **Full** — systematic exploration of the entire app. 5-15 minutes depending on app size. Documents 5-10 well-evidenced issues. -- **Quick** (`--quick`) — 30-second smoke test. Homepage + top 5 nav targets. Loads? Console errors? Broken links? -- **Regression** (`--regression baseline.json`) — run full mode, then diff against a previous baseline. Which issues are fixed? Which are new? What's the score delta? - -``` -You: /qa https://staging.myapp.com - -Claude: [Explores 12 pages, fills 3 forms, tests 2 flows] - - QA Report: staging.myapp.com — Health Score: 72/100 - - Top 3 Issues: - 1. CRITICAL: Checkout form submits with empty required fields - 2. HIGH: Mobile nav menu doesn't close after selecting an item - 3. MEDIUM: Dashboard chart overlaps sidebar below 1024px - - [Full report with screenshots saved to .gstack/qa-reports/] -``` - -Reports and screenshots accumulate in `.gstack/qa-reports/` so you can track quality over time and compare runs. - -**Testing authenticated pages:** Use `/setup-browser-cookies` first to import your real browser sessions, then `/qa` can test pages behind login. - ---- - -## `/setup-browser-cookies` - -This is my **session manager mode**. - -Before `/qa` or `/browse` can test authenticated pages, they need cookies. Instead of manually logging in through the headless browser every time, `/setup-browser-cookies` imports your real sessions directly from your daily browser. - -It auto-detects installed Chromium browsers (Comet, Chrome, Arc, Brave, Edge), decrypts cookies via the macOS Keychain, and loads them into the Playwright session. An interactive picker UI lets you choose exactly which domains to import — no cookie values are ever displayed. - -``` -You: /setup-browser-cookies - -Claude: Cookie picker opened — select the domains you want to import - in your browser, then tell me when you're done. - - [You pick github.com, myapp.com in the browser UI] - -You: done - -Claude: Imported 2 domains (47 cookies). Session is ready. -``` - -Or skip the UI entirely: - -``` -You: /setup-browser-cookies github.com - -Claude: Imported 12 cookies for github.com from Comet. -``` - -First import per browser triggers a macOS Keychain prompt — click "Allow" or "Always Allow." - ---- - -## `/retro` - -This is my **engineering manager mode**. - -At the end of the week I want to know what actually happened. Not vibes — data. `/retro` analyzes commit history, work patterns, and shipping velocity and writes a candid retrospective. - -It is team-aware. It identifies who is running the command, gives you the deepest treatment on your own work, then breaks down every contributor with specific praise and growth opportunities — the kind of feedback you would actually give in a 1:1. It computes metrics like commits, LOC, test ratio, PR sizes, and fix ratio. It detects coding sessions from commit timestamps, finds hotspot files, tracks shipping streaks, and identifies the biggest ship of the week. - -``` -You: /retro - -Claude: Week of Mar 1: 47 commits (3 contributors), 3.2k LOC, 38% tests, 12 PRs, peak: 10pm | Streak: 47d - - ## Your Week - 32 commits, +2.4k LOC, 41% tests. Peak hours: 9-11pm. - Biggest ship: cookie import system (browser decryption + picker UI). - What you did well: shipped a complete feature with encryption, UI, and - 18 unit tests in one focused push... - - ## Team Breakdown - - ### Alice - 12 commits focused on app/services/. Every PR under 200 LOC — disciplined. - Opportunity: test ratio at 12% — worth investing before payment gets more complex. - - ### Bob - 3 commits — fixed the N+1 query on dashboard. Small but high-impact. - Opportunity: only 1 active day this week — check if blocked on anything. - - [Top 3 team wins, 3 things to improve, 3 habits for next week] -``` - -It saves a JSON snapshot to `.context/retros/` so the next run can show trends. Run `/retro compare` to see this week vs last week side by side. - ---- - -## `/document-release` - -This is my **technical writer mode**. - -After `/ship` creates the PR but before it merges, `/document-release` reads every documentation file in the project and cross-references it against the diff. It updates file paths, command lists, project structure trees, and anything else that drifted. Risky or subjective changes get surfaced as questions — everything else is handled automatically. - -``` -You: /document-release - -Claude: Analyzing 21 files changed across 3 commits. Found 8 documentation files. - - README.md: updated skill count from 9 to 10, added new skill to table - CLAUDE.md: added new directory to project structure - CONTRIBUTING.md: current — no changes needed - TODOS.md: marked 2 items complete, added 1 new item - - All docs updated and committed. PR body updated with doc diff. -``` - -It also polishes CHANGELOG voice (without ever overwriting entries), cleans up completed TODOS, checks cross-doc consistency, and asks about VERSION bumps only when appropriate. - ---- +| Doc | What it covers | +|-----|---------------| +| [Skill Deep Dives](docs/skills.md) | Philosophy, examples, and workflow for every skill (includes Greptile integration) | +| [Architecture](ARCHITECTURE.md) | Design decisions and system internals | +| [Browser Reference](BROWSER.md) | Full command reference for `/browse` | +| [Contributing](CONTRIBUTING.md) | Dev setup, testing, contributor mode, and dev mode | +| [Changelog](CHANGELOG.md) | What's new in every version | ## Troubleshooting -**Skill not showing up in Claude Code?** -Run `cd ~/.claude/skills/gstack && ./setup` (or `cd .claude/skills/gstack && ./setup` for project installs). This rebuilds symlinks so Claude can discover the skills. +**Skill not showing up?** `cd ~/.claude/skills/gstack && ./setup` -**`/browse` fails or binary not found?** -Run `cd ~/.claude/skills/gstack && bun install && bun run build`. This compiles the browser binary. Requires Bun v1.0+. +**`/browse` fails?** `cd ~/.claude/skills/gstack && bun install && bun run build` -**Project copy is stale?** -Run `/gstack-upgrade` — it updates both the global install and any vendored project copy automatically. +**Stale install?** Run `/gstack-upgrade` — or set `auto_upgrade: true` in `~/.gstack/config.yaml` -**`bun` not installed?** -Install it: `curl -fsSL https://bun.sh/install | bash` +**Claude says it can't see the skills?** Make sure your project's `CLAUDE.md` has a gstack section. Add this: -## Upgrading - -Run `/gstack-upgrade` in Claude Code. It detects your install type (global or vendored), upgrades, syncs any project copies, and shows what's new. - -Or set `auto_upgrade: true` in `~/.gstack/config.yaml` to upgrade automatically whenever a new version is available. - -## Uninstalling - -Paste this into Claude Code: - -> Uninstall gstack: remove the skill symlinks by running `for s in browse plan-ceo-review plan-eng-review plan-design-review design-consultation review ship retro qa qa-only qa-design-review setup-browser-cookies document-release; do rm -f ~/.claude/skills/$s; done` then run `rm -rf ~/.claude/skills/gstack` and remove the gstack section from CLAUDE.md. If this project also has gstack at .claude/skills/gstack, remove it by running `for s in browse plan-ceo-review plan-eng-review plan-design-review review ship retro qa qa-only qa-design-review setup-browser-cookies document-release; do rm -f .claude/skills/$s; done && rm -rf .claude/skills/gstack` and remove the gstack section from the project CLAUDE.md too. - -## Development - -See [CONTRIBUTING.md](CONTRIBUTING.md) for setup, testing, and dev mode. See [ARCHITECTURE.md](ARCHITECTURE.md) for design decisions and system internals. See [BROWSER.md](BROWSER.md) for the browse command reference. - -### Testing - -```bash -bun test # free static tests (<5s) -EVALS=1 bun run test:evals # full E2E + LLM evals (~$4, ~20min) -bun run eval:watch # live dashboard during E2E runs ``` - -E2E tests stream real-time progress, write machine-readable diagnostics, and persist partial results that survive kills. See CONTRIBUTING.md for the full eval infrastructure. +## gstack +Use /browse from gstack for all web browsing. Never use mcp__claude-in-chrome__* tools. +Available skills: /plan-ceo-review, /plan-eng-review, /plan-design-review, +/design-consultation, /review, /ship, /browse, /qa, /qa-only, /qa-design-review, +/setup-browser-cookies, /retro, /document-release. +``` ### Team sync (optional) @@ -846,4 +199,4 @@ To set up: copy `.gstack-sync.json.example` to `.gstack-sync.json`, create a Sup ## License -MIT +MIT. Free forever. Go build something. diff --git a/SKILL.md b/SKILL.md index 76007eee..72bdb682 100644 --- a/SKILL.md +++ b/SKILL.md @@ -28,22 +28,61 @@ find ~/.gstack/sessions -mmin +120 -type f -delete 2>/dev/null || true _CONTRIB=$(~/.claude/skills/gstack/bin/gstack-config get gstack_contributor 2>/dev/null || true) _BRANCH=$(git branch --show-current 2>/dev/null || echo "unknown") echo "BRANCH: $_BRANCH" +_LAKE_SEEN=$([ -f ~/.gstack/.completeness-intro-seen ] && echo "yes" || echo "no") +echo "LAKE_INTRO: $_LAKE_SEEN" ``` If output shows `UPGRADE_AVAILABLE `: read `~/.claude/skills/gstack/gstack-upgrade/SKILL.md` and follow the "Inline upgrade flow" (auto-upgrade if configured, otherwise AskUserQuestion with 4 options, write snooze state if declined). If `JUST_UPGRADED `: tell user "Running gstack v{to} (just updated!)" and continue. +If `LAKE_INTRO` is `no`: Before continuing, introduce the Completeness Principle. +Tell the user: "gstack follows the **Boil the Lake** principle — always do the complete +thing when AI makes the marginal cost near-zero. Read more: https://garryslist.org/posts/boil-the-ocean" +Then offer to open the essay in their default browser: + +```bash +open https://garryslist.org/posts/boil-the-ocean +touch ~/.gstack/.completeness-intro-seen +``` + +Only run `open` if the user says yes. Always run `touch` to mark as seen. This only happens once. + ## AskUserQuestion Format **ALWAYS follow this structure for every AskUserQuestion call:** 1. **Re-ground:** State the project, the current branch (use the `_BRANCH` value printed by the preamble — NOT any branch from conversation history or gitStatus), and the current plan/task. (1-2 sentences) 2. **Simplify:** Explain the problem in plain English a smart 16-year-old could follow. No raw function names, no internal jargon, no implementation details. Use concrete examples and analogies. Say what it DOES, not what it's called. -3. **Recommend:** `RECOMMENDATION: Choose [X] because [one-line reason]` -4. **Options:** Lettered options: `A) ... B) ... C) ...` +3. **Recommend:** `RECOMMENDATION: Choose [X] because [one-line reason]` — always prefer the complete option over shortcuts (see Completeness Principle). Include `Completeness: X/10` for each option. Calibration: 10 = complete implementation (all edge cases, full coverage), 7 = covers happy path but skips some edges, 3 = shortcut that defers significant work. If both options are 8+, pick the higher; if one is ≤5, flag it. +4. **Options:** Lettered options: `A) ... B) ... C) ...` — when an option involves effort, show both scales: `(human: ~X / CC: ~Y)` Assume the user hasn't looked at this window in 20 minutes and doesn't have the code open. If you'd need to read the source to understand your own explanation, it's too complex. Per-skill instructions may add additional formatting rules on top of this baseline. +## Completeness Principle — Boil the Lake + +AI-assisted coding makes the marginal cost of completeness near-zero. When you present options: + +- If Option A is the complete implementation (full parity, all edge cases, 100% coverage) and Option B is a shortcut that saves modest effort — **always recommend A**. The delta between 80 lines and 150 lines is meaningless with CC+gstack. "Good enough" is the wrong instinct when "complete" costs minutes more. +- **Lake vs. ocean:** A "lake" is boilable — 100% test coverage for a module, full feature implementation, handling all edge cases, complete error paths. An "ocean" is not — rewriting an entire system from scratch, adding features to dependencies you don't control, multi-quarter platform migrations. Recommend boiling lakes. Flag oceans as out of scope. +- **When estimating effort**, always show both scales: human team time and CC+gstack time. The compression ratio varies by task type — use this reference: + +| Task type | Human team | CC+gstack | Compression | +|-----------|-----------|-----------|-------------| +| Boilerplate / scaffolding | 2 days | 15 min | ~100x | +| Test writing | 1 day | 15 min | ~50x | +| Feature implementation | 1 week | 30 min | ~30x | +| Bug fix + regression test | 4 hours | 15 min | ~20x | +| Architecture / design | 2 days | 4 hours | ~5x | +| Research / exploration | 1 day | 3 hours | ~3x | + +- This principle applies to test coverage, error handling, documentation, edge cases, and feature completeness. Don't skip the last 10% to "save time" — with AI, that 10% costs seconds. + +**Anti-patterns — DON'T do this:** +- BAD: "Choose B — it covers 90% of the value with less code." (If A is only 70 lines more, choose A.) +- BAD: "We can skip edge case handling to save time." (Edge case handling costs minutes with CC.) +- BAD: "Let's defer test coverage to a follow-up PR." (Tests are the cheapest lake to boil.) +- BAD: Quoting only human-team effort: "This would take 2 weeks." (Say: "2 weeks human / ~1 hour CC.") + ## Contributor Mode If `_CONTRIB` is `true`: you are in **contributor mode**. You're a gstack user who also helps make it better. @@ -111,6 +150,7 @@ If `NEEDS_SETUP`: - NEVER use `mcp__claude-in-chrome__*` tools. They are slow and unreliable. - Browser persists between calls — cookies, login sessions, and tabs carry over. - Dialogs (alert/confirm/prompt) are auto-accepted by default — no browser lockup. +- **Show screenshots:** After `$B screenshot`, `$B snapshot -a -o`, or `$B responsive`, always use the Read tool on the output PNG(s) so the user can see them. Without this, screenshots are invisible. ## QA Workflows diff --git a/SKILL.md.tmpl b/SKILL.md.tmpl index 7f2e11db..dd2b2345 100644 --- a/SKILL.md.tmpl +++ b/SKILL.md.tmpl @@ -29,6 +29,7 @@ Auto-shuts down after 30 min idle. State persists between calls (cookies, tabs, - NEVER use `mcp__claude-in-chrome__*` tools. They are slow and unreliable. - Browser persists between calls — cookies, login sessions, and tabs carry over. - Dialogs (alert/confirm/prompt) are auto-accepted by default — no browser lockup. +- **Show screenshots:** After `$B screenshot`, `$B snapshot -a -o`, or `$B responsive`, always use the Read tool on the output PNG(s) so the user can see them. Without this, screenshots are invisible. ## QA Workflows diff --git a/TODOS.md b/TODOS.md index 1fb67502..9c114e04 100644 --- a/TODOS.md +++ b/TODOS.md @@ -263,6 +263,30 @@ **Effort:** S **Priority:** P3 +### CI/CD generation for non-GitHub providers + +**What:** Extend CI/CD bootstrap to generate GitLab CI (`.gitlab-ci.yml`), CircleCI (`.circleci/config.yml`), and Bitrise pipelines. + +**Why:** Not all projects use GitHub Actions. Universal CI/CD bootstrap would make test bootstrap work for everyone. + +**Context:** v1 ships with GitHub Actions only. Detection logic already checks for `.gitlab-ci.yml`, `.circleci/`, `bitrise.yml` and skips with an informational note. Each provider needs ~20 lines of template text in `generateTestBootstrap()`. + +**Effort:** M +**Priority:** P3 +**Depends on:** Test bootstrap (shipped) + +### Auto-upgrade weak tests (★) to strong tests (★★★) + +**What:** When Step 3.4 coverage audit identifies existing ★-rated tests (smoke/trivial assertions), generate improved versions testing edge cases and error paths. + +**Why:** Many codebases have tests that technically exist but don't catch real bugs — `expect(component).toBeDefined()` isn't testing behavior. Upgrading these closes the gap between "has tests" and "has good tests." + +**Context:** Requires the quality scoring rubric from the test coverage audit. Modifying existing test files is riskier than creating new ones — needs careful diffing to ensure the upgraded test still passes. Consider creating a companion test file rather than modifying the original. + +**Effort:** M +**Priority:** P3 +**Depends on:** Test quality scoring (shipped) + ## Retro ### Deployment health tracking (retro + browse) @@ -488,6 +512,16 @@ Implemented as `supabase/functions/weekly-digest/index.ts`. pg_cron Monday 9am U **Priority:** P2 **Depends on:** None +### Cross-platform URL open helper + +**What:** `gstack-open-url` helper script — detect platform, use `open` (macOS) or `xdg-open` (Linux). + +**Why:** The first-time Completeness Principle intro uses macOS `open` to launch the essay. If gstack ever supports Linux, this silently fails. + +**Effort:** S (human: ~30 min / CC: ~2 min) +**Priority:** P4 +**Depends on:** Nothing + ### CDP-based DOM mutation detection for ref staleness **What:** Use Chrome DevTools Protocol `DOM.documentUpdated` / MutationObserver events to proactively invalidate stale refs when the DOM changes, without requiring an explicit `snapshot` call. @@ -534,6 +568,46 @@ Shipped as `/design-consultation` on garrytan/design branch. Renamed from `/setu **Priority:** P2 **Depends on:** None +## Ship Confidence Dashboard + +### Smart review relevance detection — PARTIALLY SHIPPED + +~~**What:** Auto-detect which of the 4 reviews are relevant based on branch changes (skip Design Review if no CSS/view changes, skip Code Review if plan-only).~~ + +`bin/gstack-diff-scope` shipped — categorizes diff into SCOPE_FRONTEND, SCOPE_BACKEND, SCOPE_PROMPTS, SCOPE_TESTS, SCOPE_DOCS, SCOPE_CONFIG. Used by design-review-lite to skip when no frontend files changed. Dashboard integration for conditional row display is a follow-up. + +**Remaining:** Dashboard conditional row display (hide "Design Review: NOT YET RUN" when SCOPE_FRONTEND=false). Extend to Eng Review (skip for docs-only) and CEO Review (skip for config-only). + +**Effort:** S +**Priority:** P3 +**Depends on:** gstack-diff-scope (shipped) + +### /merge skill — review-gated PR merge + +**What:** Create a `/merge` skill that merges an approved PR, but first checks the Review Readiness Dashboard and runs `/review` (Fix-First) if code review hasn't been done. Separates "ship" (create PR) from "merge" (land it). + +**Why:** Currently `/review` runs inside `/ship` Step 3.5 but isn't tracked as a gate. A `/merge` skill ensures code review always happens before landing, and enables workflows where someone else reviews the PR first. + +**Context:** `/ship` creates the PR. `/merge` would: check dashboard → run `/review` if needed → `gh pr merge`. This is where code review tracking belongs — at merge time, not at plan time. + +**Effort:** M +**Priority:** P2 +**Depends on:** Ship Confidence Dashboard (shipped) + +## Completeness + +### Completeness metrics dashboard + +**What:** Track how often Claude chooses the complete option vs shortcut across gstack sessions. Aggregate into a dashboard showing completeness trend over time. + +**Why:** Without measurement, we can't know if the Completeness Principle is working. Could surface patterns (e.g., certain skills still bias toward shortcuts). + +**Context:** Would require logging choices (e.g., append to a JSONL file when AskUserQuestion resolves), parsing them, and displaying trends. Similar pattern to eval persistence. + +**Effort:** M (human) / S (CC) +**Priority:** P3 +**Depends on:** Boil the Lake shipped (v0.6.1) + ## Completed ### Phase 1: Foundations (v0.2.0) diff --git a/VERSION b/VERSION index 8f0916f7..31d34d20 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -0.5.0 +0.6.4.0 diff --git a/bin/gstack-diff-scope b/bin/gstack-diff-scope new file mode 100755 index 00000000..ada66c0a --- /dev/null +++ b/bin/gstack-diff-scope @@ -0,0 +1,71 @@ +#!/usr/bin/env bash +# gstack-diff-scope — categorize what changed in the diff against a base branch +# Usage: eval $(gstack-diff-scope main) → sets SCOPE_FRONTEND=true SCOPE_BACKEND=false ... +# Or: gstack-diff-scope main → prints SCOPE_*=... lines +set -euo pipefail + +BASE="${1:-main}" + +# Get changed file list +FILES=$(git diff "${BASE}...HEAD" --name-only 2>/dev/null || git diff "${BASE}" --name-only 2>/dev/null || echo "") + +if [ -z "$FILES" ]; then + echo "SCOPE_FRONTEND=false" + echo "SCOPE_BACKEND=false" + echo "SCOPE_PROMPTS=false" + echo "SCOPE_TESTS=false" + echo "SCOPE_DOCS=false" + echo "SCOPE_CONFIG=false" + exit 0 +fi + +FRONTEND=false +BACKEND=false +PROMPTS=false +TESTS=false +DOCS=false +CONFIG=false + +while IFS= read -r f; do + case "$f" in + # Frontend: CSS, views, components, templates + *.css|*.scss|*.less|*.sass|*.pcss|*.module.css|*.module.scss) FRONTEND=true ;; + *.tsx|*.jsx|*.vue|*.svelte|*.astro) FRONTEND=true ;; + *.erb|*.haml|*.slim|*.hbs|*.ejs) FRONTEND=true ;; + *.html) FRONTEND=true ;; + tailwind.config.*|postcss.config.*) FRONTEND=true ;; + app/views/*|*/components/*|styles/*|css/*|app/assets/stylesheets/*) FRONTEND=true ;; + + # Prompts: prompt builders, system prompts, generation services + *prompt_builder*|*generation_service*|*writer_service*|*designer_service*) PROMPTS=true ;; + *evaluator*|*scorer*|*classifier_service*|*analyzer*) PROMPTS=true ;; + *voice*.rb|*writing*.rb|*prompt*.rb|*token*.rb) PROMPTS=true ;; + app/services/chat_tools/*|app/services/x_thread_tools/*) PROMPTS=true ;; + config/system_prompts/*) PROMPTS=true ;; + + # Tests + *.test.*|*.spec.*|*_test.*|*_spec.*) TESTS=true ;; + test/*|tests/*|spec/*|__tests__/*|cypress/*|e2e/*) TESTS=true ;; + + # Docs + *.md) DOCS=true ;; + + # Config + package.json|package-lock.json|yarn.lock|bun.lockb) CONFIG=true ;; + Gemfile|Gemfile.lock) CONFIG=true ;; + *.yml|*.yaml) CONFIG=true ;; + .github/*) CONFIG=true ;; + requirements.txt|pyproject.toml|go.mod|Cargo.toml|composer.json) CONFIG=true ;; + + # Backend: everything else that's code (excluding views/components already matched) + *.rb|*.py|*.go|*.rs|*.java|*.php|*.ex|*.exs) BACKEND=true ;; + *.ts|*.js) BACKEND=true ;; # Non-component TS/JS is backend + esac +done <<< "$FILES" + +echo "SCOPE_FRONTEND=$FRONTEND" +echo "SCOPE_BACKEND=$BACKEND" +echo "SCOPE_PROMPTS=$PROMPTS" +echo "SCOPE_TESTS=$TESTS" +echo "SCOPE_DOCS=$DOCS" +echo "SCOPE_CONFIG=$CONFIG" diff --git a/bin/gstack-slug b/bin/gstack-slug new file mode 100755 index 00000000..7336b7b4 --- /dev/null +++ b/bin/gstack-slug @@ -0,0 +1,9 @@ +#!/usr/bin/env bash +# gstack-slug — output project slug and sanitized branch name +# Usage: eval $(gstack-slug) → sets SLUG and BRANCH variables +# Or: gstack-slug → prints SLUG=... and BRANCH=... lines +set -euo pipefail +SLUG=$(git remote get-url origin 2>/dev/null | sed 's|.*[:/]\([^/]*/[^/]*\)\.git$|\1|;s|.*[:/]\([^/]*/[^/]*\)$|\1|' | tr '/' '-') +BRANCH=$(git rev-parse --abbrev-ref HEAD 2>/dev/null | tr '/' '-') +echo "SLUG=$SLUG" +echo "BRANCH=$BRANCH" diff --git a/browse/SKILL.md b/browse/SKILL.md index dd473dc0..50198d5f 100644 --- a/browse/SKILL.md +++ b/browse/SKILL.md @@ -28,22 +28,61 @@ find ~/.gstack/sessions -mmin +120 -type f -delete 2>/dev/null || true _CONTRIB=$(~/.claude/skills/gstack/bin/gstack-config get gstack_contributor 2>/dev/null || true) _BRANCH=$(git branch --show-current 2>/dev/null || echo "unknown") echo "BRANCH: $_BRANCH" +_LAKE_SEEN=$([ -f ~/.gstack/.completeness-intro-seen ] && echo "yes" || echo "no") +echo "LAKE_INTRO: $_LAKE_SEEN" ``` If output shows `UPGRADE_AVAILABLE `: read `~/.claude/skills/gstack/gstack-upgrade/SKILL.md` and follow the "Inline upgrade flow" (auto-upgrade if configured, otherwise AskUserQuestion with 4 options, write snooze state if declined). If `JUST_UPGRADED `: tell user "Running gstack v{to} (just updated!)" and continue. +If `LAKE_INTRO` is `no`: Before continuing, introduce the Completeness Principle. +Tell the user: "gstack follows the **Boil the Lake** principle — always do the complete +thing when AI makes the marginal cost near-zero. Read more: https://garryslist.org/posts/boil-the-ocean" +Then offer to open the essay in their default browser: + +```bash +open https://garryslist.org/posts/boil-the-ocean +touch ~/.gstack/.completeness-intro-seen +``` + +Only run `open` if the user says yes. Always run `touch` to mark as seen. This only happens once. + ## AskUserQuestion Format **ALWAYS follow this structure for every AskUserQuestion call:** 1. **Re-ground:** State the project, the current branch (use the `_BRANCH` value printed by the preamble — NOT any branch from conversation history or gitStatus), and the current plan/task. (1-2 sentences) 2. **Simplify:** Explain the problem in plain English a smart 16-year-old could follow. No raw function names, no internal jargon, no implementation details. Use concrete examples and analogies. Say what it DOES, not what it's called. -3. **Recommend:** `RECOMMENDATION: Choose [X] because [one-line reason]` -4. **Options:** Lettered options: `A) ... B) ... C) ...` +3. **Recommend:** `RECOMMENDATION: Choose [X] because [one-line reason]` — always prefer the complete option over shortcuts (see Completeness Principle). Include `Completeness: X/10` for each option. Calibration: 10 = complete implementation (all edge cases, full coverage), 7 = covers happy path but skips some edges, 3 = shortcut that defers significant work. If both options are 8+, pick the higher; if one is ≤5, flag it. +4. **Options:** Lettered options: `A) ... B) ... C) ...` — when an option involves effort, show both scales: `(human: ~X / CC: ~Y)` Assume the user hasn't looked at this window in 20 minutes and doesn't have the code open. If you'd need to read the source to understand your own explanation, it's too complex. Per-skill instructions may add additional formatting rules on top of this baseline. +## Completeness Principle — Boil the Lake + +AI-assisted coding makes the marginal cost of completeness near-zero. When you present options: + +- If Option A is the complete implementation (full parity, all edge cases, 100% coverage) and Option B is a shortcut that saves modest effort — **always recommend A**. The delta between 80 lines and 150 lines is meaningless with CC+gstack. "Good enough" is the wrong instinct when "complete" costs minutes more. +- **Lake vs. ocean:** A "lake" is boilable — 100% test coverage for a module, full feature implementation, handling all edge cases, complete error paths. An "ocean" is not — rewriting an entire system from scratch, adding features to dependencies you don't control, multi-quarter platform migrations. Recommend boiling lakes. Flag oceans as out of scope. +- **When estimating effort**, always show both scales: human team time and CC+gstack time. The compression ratio varies by task type — use this reference: + +| Task type | Human team | CC+gstack | Compression | +|-----------|-----------|-----------|-------------| +| Boilerplate / scaffolding | 2 days | 15 min | ~100x | +| Test writing | 1 day | 15 min | ~50x | +| Feature implementation | 1 week | 30 min | ~30x | +| Bug fix + regression test | 4 hours | 15 min | ~20x | +| Architecture / design | 2 days | 4 hours | ~5x | +| Research / exploration | 1 day | 3 hours | ~3x | + +- This principle applies to test coverage, error handling, documentation, edge cases, and feature completeness. Don't skip the last 10% to "save time" — with AI, that 10% costs seconds. + +**Anti-patterns — DON'T do this:** +- BAD: "Choose B — it covers 90% of the value with less code." (If A is only 70 lines more, choose A.) +- BAD: "We can skip edge case handling to save time." (Edge case handling costs minutes with CC.) +- BAD: "Let's defer test coverage to a follow-up PR." (Tests are the cheapest lake to boil.) +- BAD: Quoting only human-team effort: "This would take 2 weeks." (Say: "2 weeks human / ~1 hour CC.") + ## Contributor Mode If `_CONTRIB` is `true`: you are in **contributor mode**. You're a gstack user who also helps make it better. @@ -184,6 +223,9 @@ $B snapshot -D # verify deletion happened $B diff https://staging.app.com https://prod.app.com ``` +### 11. Show screenshots to the user +After `$B screenshot`, `$B snapshot -a -o`, or `$B responsive`, always use the Read tool on the output PNG(s) so the user can see them. Without this, screenshots are invisible. + ## Snapshot Flags The snapshot is your primary tool for understanding and interacting with pages. diff --git a/browse/SKILL.md.tmpl b/browse/SKILL.md.tmpl index 6ce20634..0c42ffcb 100644 --- a/browse/SKILL.md.tmpl +++ b/browse/SKILL.md.tmpl @@ -102,6 +102,9 @@ $B snapshot -D # verify deletion happened $B diff https://staging.app.com https://prod.app.com ``` +### 11. Show screenshots to the user +After `$B screenshot`, `$B snapshot -a -o`, or `$B responsive`, always use the Read tool on the output PNG(s) so the user can see them. Without this, screenshots are invisible. + ## Snapshot Flags {{SNAPSHOT_FLAGS}} diff --git a/browse/src/browser-manager.ts b/browse/src/browser-manager.ts index 260c8219..e094f3a5 100644 --- a/browse/src/browser-manager.ts +++ b/browse/src/browser-manager.ts @@ -15,7 +15,7 @@ * restores state. Falls back to clean slate on any failure. */ -import { chromium, type Browser, type BrowserContext, type Page, type Locator } from 'playwright'; +import { chromium, type Browser, type BrowserContext, type BrowserContextOptions, type Page, type Locator } from 'playwright'; import { addConsoleEntry, addNetworkEntry, addDialogEntry, networkBuffer, type DialogEntry } from './buffers'; export interface RefEntry { @@ -57,7 +57,7 @@ export class BrowserManager { process.exit(1); }); - const contextOptions: any = { + const contextOptions: BrowserContextOptions = { viewport: { width: 1280, height: 720 }, }; if (this.customUserAgent) { @@ -282,7 +282,7 @@ export class BrowserManager { try { // 1. Save state from current context const savedCookies = await this.context.cookies(); - const savedPages: Array<{ url: string; isActive: boolean; storage: any }> = []; + const savedPages: Array<{ url: string; isActive: boolean; storage: { localStorage: Record; sessionStorage: Record } | null }> = []; for (const [id, page] of this.pages) { const url = page.url(); @@ -308,7 +308,7 @@ export class BrowserManager { await this.context.close().catch(() => {}); // 3. Create new context with updated settings - const contextOptions: any = { + const contextOptions: BrowserContextOptions = { viewport: { width: 1280, height: 720 }, }; if (this.customUserAgent) { @@ -340,15 +340,15 @@ export class BrowserManager { // 6. Restore storage if (saved.storage) { try { - await page.evaluate((s: any) => { + await page.evaluate((s: { localStorage: Record; sessionStorage: Record }) => { if (s.localStorage) { for (const [k, v] of Object.entries(s.localStorage)) { - localStorage.setItem(k, v as string); + localStorage.setItem(k, v); } } if (s.sessionStorage) { for (const [k, v] of Object.entries(s.sessionStorage)) { - sessionStorage.setItem(k, v as string); + sessionStorage.setItem(k, v); } } }, saved.storage); @@ -369,13 +369,13 @@ export class BrowserManager { this.clearRefs(); return null; // success - } catch (err: any) { + } catch (err: unknown) { // Fallback: create a clean context + blank tab try { this.pages.clear(); if (this.context) await this.context.close().catch(() => {}); - const contextOptions: any = { + const contextOptions: BrowserContextOptions = { viewport: { width: 1280, height: 720 }, }; if (this.customUserAgent) { @@ -387,7 +387,7 @@ export class BrowserManager { } catch { // If even the fallback fails, we're in trouble — but browser is still alive } - return `Context recreation failed: ${err.message}. Browser reset to blank tab.`; + return `Context recreation failed: ${err instanceof Error ? err.message : String(err)}. Browser reset to blank tab.`; } } diff --git a/browse/src/meta-commands.ts b/browse/src/meta-commands.ts index c17930b3..3c622db9 100644 --- a/browse/src/meta-commands.ts +++ b/browse/src/meta-commands.ts @@ -5,6 +5,7 @@ import type { BrowserManager } from './browser-manager'; import { handleSnapshot } from './snapshot'; import { getCleanText } from './read-commands'; +import { READ_COMMANDS, WRITE_COMMANDS, META_COMMANDS } from './commands'; import * as Diff from 'diff'; import * as fs from 'fs'; import * as path from 'path'; @@ -20,28 +21,6 @@ function validateOutputPath(filePath: string): void { } } -// Command sets for chain routing (mirrors server.ts — kept local to avoid circular import) -const CHAIN_READ = new Set([ - 'text', 'html', 'links', 'forms', 'accessibility', - 'js', 'eval', 'css', 'attrs', - 'console', 'network', 'cookies', 'storage', 'perf', - 'dialog', 'is', -]); -const CHAIN_WRITE = new Set([ - 'goto', 'back', 'forward', 'reload', - 'click', 'fill', 'select', 'hover', 'type', 'press', 'scroll', 'wait', - 'viewport', 'cookie', 'cookie-import', 'header', 'useragent', - 'upload', 'dialog-accept', 'dialog-dismiss', - 'cookie-import-browser', -]); -const CHAIN_META = new Set([ - 'tabs', 'tab', 'newtab', 'closetab', - 'status', 'stop', 'restart', - 'screenshot', 'pdf', 'responsive', - 'chain', 'diff', - 'url', 'snapshot', -]); - export async function handleMetaCommand( command: string, args: string[], @@ -223,9 +202,9 @@ export async function handleMetaCommand( const [name, ...cmdArgs] = cmd; try { let result: string; - if (CHAIN_WRITE.has(name)) result = await handleWriteCommand(name, cmdArgs, bm); - else if (CHAIN_READ.has(name)) result = await handleReadCommand(name, cmdArgs, bm); - else if (CHAIN_META.has(name)) result = await handleMetaCommand(name, cmdArgs, bm, shutdown); + if (WRITE_COMMANDS.has(name)) result = await handleWriteCommand(name, cmdArgs, bm); + else if (READ_COMMANDS.has(name)) result = await handleReadCommand(name, cmdArgs, bm); + else if (META_COMMANDS.has(name)) result = await handleMetaCommand(name, cmdArgs, bm, shutdown); else throw new Error(`Unknown command: ${name}`); results.push(`[${name}] ${result}`); } catch (err: any) { diff --git a/browse/src/server.ts b/browse/src/server.ts index 5e76f421..f30a4881 100644 --- a/browse/src/server.ts +++ b/browse/src/server.ts @@ -104,7 +104,7 @@ async function flushBuffers() { const lines = entries.map(e => `[${new Date(e.timestamp).toISOString()}] [${e.level}] ${e.text}` ).join('\n') + '\n'; - await Bun.write(CONSOLE_LOG_PATH, (await Bun.file(CONSOLE_LOG_PATH).text().catch(() => '')) + lines); + fs.appendFileSync(CONSOLE_LOG_PATH, lines); lastConsoleFlushed = consoleBuffer.totalAdded; } @@ -115,7 +115,7 @@ async function flushBuffers() { const lines = entries.map(e => `[${new Date(e.timestamp).toISOString()}] ${e.method} ${e.url} → ${e.status || 'pending'} (${e.duration || '?'}ms, ${e.size || '?'}B)` ).join('\n') + '\n'; - await Bun.write(NETWORK_LOG_PATH, (await Bun.file(NETWORK_LOG_PATH).text().catch(() => '')) + lines); + fs.appendFileSync(NETWORK_LOG_PATH, lines); lastNetworkFlushed = networkBuffer.totalAdded; } @@ -126,7 +126,7 @@ async function flushBuffers() { const lines = entries.map(e => `[${new Date(e.timestamp).toISOString()}] [${e.type}] "${e.message}" → ${e.action}${e.response ? ` "${e.response}"` : ''}` ).join('\n') + '\n'; - await Bun.write(DIALOG_LOG_PATH, (await Bun.file(DIALOG_LOG_PATH).text().catch(() => '')) + lines); + fs.appendFileSync(DIALOG_LOG_PATH, lines); lastDialogFlushed = dialogBuffer.totalAdded; } } catch { diff --git a/design-consultation/SKILL.md b/design-consultation/SKILL.md index d92d47fd..8fd9cb4b 100644 --- a/design-consultation/SKILL.md +++ b/design-consultation/SKILL.md @@ -2,7 +2,7 @@ name: design-consultation version: 1.0.0 description: | - Design consultation: understands your product, researches competitors, proposes a + Design consultation: understands your product, researches the landscape, proposes a complete design system (aesthetic, typography, color, layout, spacing, motion), and generates font+color preview pages. Creates DESIGN.md as your project's design source of truth. For existing sites, use /plan-design-review to infer the system instead. @@ -31,22 +31,61 @@ find ~/.gstack/sessions -mmin +120 -type f -delete 2>/dev/null || true _CONTRIB=$(~/.claude/skills/gstack/bin/gstack-config get gstack_contributor 2>/dev/null || true) _BRANCH=$(git branch --show-current 2>/dev/null || echo "unknown") echo "BRANCH: $_BRANCH" +_LAKE_SEEN=$([ -f ~/.gstack/.completeness-intro-seen ] && echo "yes" || echo "no") +echo "LAKE_INTRO: $_LAKE_SEEN" ``` If output shows `UPGRADE_AVAILABLE `: read `~/.claude/skills/gstack/gstack-upgrade/SKILL.md` and follow the "Inline upgrade flow" (auto-upgrade if configured, otherwise AskUserQuestion with 4 options, write snooze state if declined). If `JUST_UPGRADED `: tell user "Running gstack v{to} (just updated!)" and continue. +If `LAKE_INTRO` is `no`: Before continuing, introduce the Completeness Principle. +Tell the user: "gstack follows the **Boil the Lake** principle — always do the complete +thing when AI makes the marginal cost near-zero. Read more: https://garryslist.org/posts/boil-the-ocean" +Then offer to open the essay in their default browser: + +```bash +open https://garryslist.org/posts/boil-the-ocean +touch ~/.gstack/.completeness-intro-seen +``` + +Only run `open` if the user says yes. Always run `touch` to mark as seen. This only happens once. + ## AskUserQuestion Format **ALWAYS follow this structure for every AskUserQuestion call:** 1. **Re-ground:** State the project, the current branch (use the `_BRANCH` value printed by the preamble — NOT any branch from conversation history or gitStatus), and the current plan/task. (1-2 sentences) 2. **Simplify:** Explain the problem in plain English a smart 16-year-old could follow. No raw function names, no internal jargon, no implementation details. Use concrete examples and analogies. Say what it DOES, not what it's called. -3. **Recommend:** `RECOMMENDATION: Choose [X] because [one-line reason]` -4. **Options:** Lettered options: `A) ... B) ... C) ...` +3. **Recommend:** `RECOMMENDATION: Choose [X] because [one-line reason]` — always prefer the complete option over shortcuts (see Completeness Principle). Include `Completeness: X/10` for each option. Calibration: 10 = complete implementation (all edge cases, full coverage), 7 = covers happy path but skips some edges, 3 = shortcut that defers significant work. If both options are 8+, pick the higher; if one is ≤5, flag it. +4. **Options:** Lettered options: `A) ... B) ... C) ...` — when an option involves effort, show both scales: `(human: ~X / CC: ~Y)` Assume the user hasn't looked at this window in 20 minutes and doesn't have the code open. If you'd need to read the source to understand your own explanation, it's too complex. Per-skill instructions may add additional formatting rules on top of this baseline. +## Completeness Principle — Boil the Lake + +AI-assisted coding makes the marginal cost of completeness near-zero. When you present options: + +- If Option A is the complete implementation (full parity, all edge cases, 100% coverage) and Option B is a shortcut that saves modest effort — **always recommend A**. The delta between 80 lines and 150 lines is meaningless with CC+gstack. "Good enough" is the wrong instinct when "complete" costs minutes more. +- **Lake vs. ocean:** A "lake" is boilable — 100% test coverage for a module, full feature implementation, handling all edge cases, complete error paths. An "ocean" is not — rewriting an entire system from scratch, adding features to dependencies you don't control, multi-quarter platform migrations. Recommend boiling lakes. Flag oceans as out of scope. +- **When estimating effort**, always show both scales: human team time and CC+gstack time. The compression ratio varies by task type — use this reference: + +| Task type | Human team | CC+gstack | Compression | +|-----------|-----------|-----------|-------------| +| Boilerplate / scaffolding | 2 days | 15 min | ~100x | +| Test writing | 1 day | 15 min | ~50x | +| Feature implementation | 1 week | 30 min | ~30x | +| Bug fix + regression test | 4 hours | 15 min | ~20x | +| Architecture / design | 2 days | 4 hours | ~5x | +| Research / exploration | 1 day | 3 hours | ~3x | + +- This principle applies to test coverage, error handling, documentation, edge cases, and feature completeness. Don't skip the last 10% to "save time" — with AI, that 10% costs seconds. + +**Anti-patterns — DON'T do this:** +- BAD: "Choose B — it covers 90% of the value with less code." (If A is only 70 lines more, choose A.) +- BAD: "We can skip edge case handling to save time." (Edge case handling costs minutes with CC.) +- BAD: "Let's defer test coverage to a follow-up PR." (Tests are the cheapest lake to boil.) +- BAD: Quoting only human-team effort: "This would take 2 weeks." (Say: "2 weeks human / ~1 hour CC.") + ## Contributor Mode If `_CONTRIB` is `true`: you are in **contributor mode**. You're a gstack user who also helps make it better. @@ -114,7 +153,7 @@ ls src/ app/ pages/ components/ 2>/dev/null | head -30 Look for brainstorm output: ```bash -SLUG=$(git remote get-url origin 2>/dev/null | sed 's|.*[:/]\([^/]*/[^/]*\)\.git$|\1|;s|.*[:/]\([^/]*/[^/]*\)$|\1|' | tr '/' '-') +eval $(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null) ls ~/.gstack/projects/$SLUG/*brainstorm* 2>/dev/null | head -5 ls .context/*brainstorm* .context/attachments/*brainstorm* 2>/dev/null | head -5 ``` @@ -123,6 +162,29 @@ If brainstorm output exists, read it — the product context is pre-filled. If the codebase is empty and purpose is unclear, say: *"I don't have a clear picture of what you're building yet. Want to brainstorm first with `/brainstorm`? Once we know the product direction, we can set up the design system."* +**Find the browse binary (optional — enables visual competitive research):** + +## SETUP (run this check BEFORE any browse command) + +```bash +_ROOT=$(git rev-parse --show-toplevel 2>/dev/null) +B="" +[ -n "$_ROOT" ] && [ -x "$_ROOT/.claude/skills/gstack/browse/dist/browse" ] && B="$_ROOT/.claude/skills/gstack/browse/dist/browse" +[ -z "$B" ] && B=~/.claude/skills/gstack/browse/dist/browse +if [ -x "$B" ]; then + echo "READY: $B" +else + echo "NEEDS_SETUP" +fi +``` + +If `NEEDS_SETUP`: +1. Tell the user: "gstack browse needs a one-time build (~10 seconds). OK to proceed?" Then STOP and wait. +2. Run: `cd && ./setup` +3. If `bun` is not installed: `curl -fsSL https://bun.sh/install | bash` + +If browse is not available, that's fine — visual research is optional. The skill works without it using WebSearch and your built-in design knowledge. + --- ## Phase 1: Product Context @@ -135,7 +197,7 @@ Ask the user a single question that covers everything you need to know. Pre-fill 3. "Want me to research what top products in your space are doing for design, or should I work from my design knowledge?" 4. **Explicitly say:** "At any point you can just drop into chat and we'll talk through anything — this isn't a rigid form, it's a conversation." -If the README or brainstorm gives you enough context, pre-fill and confirm: *"From what I can see, this is [X] for [Y] in the [Z] space. Sound right? And would you like me to research competitors, or should I work from what I know?"* +If the README or brainstorm gives you enough context, pre-fill and confirm: *"From what I can see, this is [X] for [Y] in the [Z] space. Sound right? And would you like me to research what's out there in this space, or should I work from what I know?"* --- @@ -143,17 +205,40 @@ If the README or brainstorm gives you enough context, pre-fill and confirm: *"Fr If the user wants competitive research: +**Step 1: Identify what's out there via WebSearch** + Use WebSearch to find 5-10 products in their space. Search for: - "[product category] website design" - "[product category] best websites 2025" - "best [industry] web apps" -For each competitor found, note: fonts used, color palette, layout approach, aesthetic direction. +**Step 2: Visual research via browse (if available)** -Summarize your findings conversationally: -> "I looked at [competitors]. They tend toward [patterns] — lots of [common choices]. The opportunity to be distinctive is [gap]. Here's what I'd recommend based on this..." +If the browse binary is available (`$B` is set), visit the top 3-5 sites in the space and capture visual evidence: -If WebSearch is unavailable or returns poor results, fall back gracefully: *"Couldn't get good research results, so I'll work from my design knowledge of the [industry] space."* +```bash +$B goto "https://example-site.com" +$B screenshot "/tmp/design-research-site-name.png" +$B snapshot +``` + +For each site, analyze: fonts actually used, color palette, layout approach, spacing density, aesthetic direction. The screenshot gives you the feel; the snapshot gives you structural data. + +If a site blocks the headless browser or requires login, skip it and note why. + +If browse is not available, rely on WebSearch results and your built-in design knowledge — this is fine. + +**Step 3: Synthesize findings** + +The goal of research is NOT to copy. It is to get in the ballpark — to understand the visual language users in this category already expect. This gives you the baseline. The interesting design work starts after you have the baseline: deciding where to follow conventions (so the product feels literate) and where to break from them (so the product is memorable). + +Summarize conversationally: +> "I looked at what's out there. Here's the landscape: they converge on [patterns]. Most of them feel [observation — e.g., interchangeable, polished but generic, etc.]. The opportunity to stand out is [gap]. Here's where I'd play it safe and where I'd take a risk..." + +**Graceful degradation:** +- Browse available → screenshots + snapshots + WebSearch (richest research) +- Browse unavailable → WebSearch only (still good) +- WebSearch also unavailable → agent's built-in design knowledge (always works) If the user said no research, skip entirely and proceed to Phase 3 using your built-in design knowledge. @@ -163,7 +248,7 @@ If the user said no research, skip entirely and proceed to Phase 3 using your bu This is the soul of the skill. Propose EVERYTHING as one coherent package. -**AskUserQuestion Q2 — present the full proposal:** +**AskUserQuestion Q2 — present the full proposal with SAFE/RISK breakdown:** ``` Based on [product context] and [research findings / my design knowledge]: @@ -178,12 +263,21 @@ MOTION: [approach] — [rationale] This system is coherent because [explain how choices reinforce each other]. -Want to adjust anything? You can drill into any section, or just tell me -what feels off and I'll rework it. Or if this looks right, I'll generate -a preview page so you can see the fonts and colors rendered. +SAFE CHOICES (category baseline — your users expect these): + - [2-3 decisions that match category conventions, with rationale for playing safe] + +RISKS (where your product gets its own face): + - [2-3 deliberate departures from convention] + - For each risk: what it is, why it works, what you gain, what it costs + +The safe choices keep you literate in your category. The risks are where +your product becomes memorable. Which risks appeal to you? Want to see +different ones? Or adjust anything else? ``` -**Options:** A) Looks great — generate the preview page. B) I want to adjust [section]. C) Start over with a different direction. D) Skip the preview, just write DESIGN.md. +The SAFE/RISK breakdown is critical. Design coherence is table stakes — every product in a category can be coherent and still look identical. The real question is: where do you take creative risks? The agent should always propose at least 2 risks, each with a clear rationale for why the risk is worth taking and what the user gives up. Risks might include: an unexpected typeface for the category, a bold accent color nobody else uses, tighter or looser spacing than the norm, a layout approach that breaks from convention, motion choices that add personality. + +**Options:** A) Looks great — generate the preview page. B) I want to adjust [section]. C) I want different risks — show me wilder options. D) Start over with a different direction. E) Skip the preview, just write DESIGN.md. ### Your Design Knowledge (use to inform proposals — do NOT display as tables) @@ -273,7 +367,7 @@ The agent writes a **single, self-contained HTML file** (no framework dependenci 1. **Loads proposed fonts** from Google Fonts (or Bunny Fonts) via `` tags 2. **Uses the proposed color palette** throughout — dogfood the design system 3. **Shows the product name** (not "Lorem Ipsum") as the hero heading -4. **Font comparison section:** +4. **Font specimen section:** - Each font candidate shown in its proposed role (hero heading, body paragraph, button label, data table row) - Side-by-side comparison if multiple candidates for one role - Real content that matches the product (e.g., civic tech → government data examples) @@ -281,11 +375,17 @@ The agent writes a **single, self-contained HTML file** (no framework dependenci - Swatches with hex values and names - Sample UI components rendered in the palette: buttons (primary, secondary, ghost), cards, form inputs, alerts (success, warning, error, info) - Background/text color combinations showing contrast -6. **Light/dark mode toggle** using CSS custom properties and a JS toggle button -7. **Clean, professional layout** — the preview page IS a taste signal for the skill -8. **Responsive** — looks good on any screen width +6. **Realistic product mockups** — this is what makes the preview page powerful. Based on the project type from Phase 1, render 2-3 realistic page layouts using the full design system: + - **Dashboard / web app:** sample data table with metrics, sidebar nav, header with user avatar, stat cards + - **Marketing site:** hero section with real copy, feature highlights, testimonial block, CTA + - **Settings / admin:** form with labeled inputs, toggle switches, dropdowns, save button + - **Auth / onboarding:** login form with social buttons, branding, input validation states + - Use the product name, realistic content for the domain, and the proposed spacing/layout/border-radius. The user should see their product (roughly) before writing any code. +7. **Light/dark mode toggle** using CSS custom properties and a JS toggle button +8. **Clean, professional layout** — the preview page IS a taste signal for the skill +9. **Responsive** — looks good on any screen width -The page should make the user think "oh nice, they thought of this." It's selling the design system visually, not just listing hex codes. +The page should make the user think "oh nice, they thought of this." It's selling the design system by showing what the product could feel like, not just listing hex codes and font names. If `open` fails (headless environment), tell the user: *"I wrote the preview to [path] — open it in your browser to see the fonts and colors rendered."* diff --git a/design-consultation/SKILL.md.tmpl b/design-consultation/SKILL.md.tmpl index 11d868fa..141a9190 100644 --- a/design-consultation/SKILL.md.tmpl +++ b/design-consultation/SKILL.md.tmpl @@ -2,7 +2,7 @@ name: design-consultation version: 1.0.0 description: | - Design consultation: understands your product, researches competitors, proposes a + Design consultation: understands your product, researches the landscape, proposes a complete design system (aesthetic, typography, color, layout, spacing, motion), and generates font+color preview pages. Creates DESIGN.md as your project's design source of truth. For existing sites, use /plan-design-review to infer the system instead. @@ -49,7 +49,7 @@ ls src/ app/ pages/ components/ 2>/dev/null | head -30 Look for brainstorm output: ```bash -SLUG=$(git remote get-url origin 2>/dev/null | sed 's|.*[:/]\([^/]*/[^/]*\)\.git$|\1|;s|.*[:/]\([^/]*/[^/]*\)$|\1|' | tr '/' '-') +eval $(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null) ls ~/.gstack/projects/$SLUG/*brainstorm* 2>/dev/null | head -5 ls .context/*brainstorm* .context/attachments/*brainstorm* 2>/dev/null | head -5 ``` @@ -58,6 +58,12 @@ If brainstorm output exists, read it — the product context is pre-filled. If the codebase is empty and purpose is unclear, say: *"I don't have a clear picture of what you're building yet. Want to brainstorm first with `/brainstorm`? Once we know the product direction, we can set up the design system."* +**Find the browse binary (optional — enables visual competitive research):** + +{{BROWSE_SETUP}} + +If browse is not available, that's fine — visual research is optional. The skill works without it using WebSearch and your built-in design knowledge. + --- ## Phase 1: Product Context @@ -70,7 +76,7 @@ Ask the user a single question that covers everything you need to know. Pre-fill 3. "Want me to research what top products in your space are doing for design, or should I work from my design knowledge?" 4. **Explicitly say:** "At any point you can just drop into chat and we'll talk through anything — this isn't a rigid form, it's a conversation." -If the README or brainstorm gives you enough context, pre-fill and confirm: *"From what I can see, this is [X] for [Y] in the [Z] space. Sound right? And would you like me to research competitors, or should I work from what I know?"* +If the README or brainstorm gives you enough context, pre-fill and confirm: *"From what I can see, this is [X] for [Y] in the [Z] space. Sound right? And would you like me to research what's out there in this space, or should I work from what I know?"* --- @@ -78,17 +84,40 @@ If the README or brainstorm gives you enough context, pre-fill and confirm: *"Fr If the user wants competitive research: +**Step 1: Identify what's out there via WebSearch** + Use WebSearch to find 5-10 products in their space. Search for: - "[product category] website design" - "[product category] best websites 2025" - "best [industry] web apps" -For each competitor found, note: fonts used, color palette, layout approach, aesthetic direction. +**Step 2: Visual research via browse (if available)** -Summarize your findings conversationally: -> "I looked at [competitors]. They tend toward [patterns] — lots of [common choices]. The opportunity to be distinctive is [gap]. Here's what I'd recommend based on this..." +If the browse binary is available (`$B` is set), visit the top 3-5 sites in the space and capture visual evidence: -If WebSearch is unavailable or returns poor results, fall back gracefully: *"Couldn't get good research results, so I'll work from my design knowledge of the [industry] space."* +```bash +$B goto "https://example-site.com" +$B screenshot "/tmp/design-research-site-name.png" +$B snapshot +``` + +For each site, analyze: fonts actually used, color palette, layout approach, spacing density, aesthetic direction. The screenshot gives you the feel; the snapshot gives you structural data. + +If a site blocks the headless browser or requires login, skip it and note why. + +If browse is not available, rely on WebSearch results and your built-in design knowledge — this is fine. + +**Step 3: Synthesize findings** + +The goal of research is NOT to copy. It is to get in the ballpark — to understand the visual language users in this category already expect. This gives you the baseline. The interesting design work starts after you have the baseline: deciding where to follow conventions (so the product feels literate) and where to break from them (so the product is memorable). + +Summarize conversationally: +> "I looked at what's out there. Here's the landscape: they converge on [patterns]. Most of them feel [observation — e.g., interchangeable, polished but generic, etc.]. The opportunity to stand out is [gap]. Here's where I'd play it safe and where I'd take a risk..." + +**Graceful degradation:** +- Browse available → screenshots + snapshots + WebSearch (richest research) +- Browse unavailable → WebSearch only (still good) +- WebSearch also unavailable → agent's built-in design knowledge (always works) If the user said no research, skip entirely and proceed to Phase 3 using your built-in design knowledge. @@ -98,7 +127,7 @@ If the user said no research, skip entirely and proceed to Phase 3 using your bu This is the soul of the skill. Propose EVERYTHING as one coherent package. -**AskUserQuestion Q2 — present the full proposal:** +**AskUserQuestion Q2 — present the full proposal with SAFE/RISK breakdown:** ``` Based on [product context] and [research findings / my design knowledge]: @@ -113,12 +142,21 @@ MOTION: [approach] — [rationale] This system is coherent because [explain how choices reinforce each other]. -Want to adjust anything? You can drill into any section, or just tell me -what feels off and I'll rework it. Or if this looks right, I'll generate -a preview page so you can see the fonts and colors rendered. +SAFE CHOICES (category baseline — your users expect these): + - [2-3 decisions that match category conventions, with rationale for playing safe] + +RISKS (where your product gets its own face): + - [2-3 deliberate departures from convention] + - For each risk: what it is, why it works, what you gain, what it costs + +The safe choices keep you literate in your category. The risks are where +your product becomes memorable. Which risks appeal to you? Want to see +different ones? Or adjust anything else? ``` -**Options:** A) Looks great — generate the preview page. B) I want to adjust [section]. C) Start over with a different direction. D) Skip the preview, just write DESIGN.md. +The SAFE/RISK breakdown is critical. Design coherence is table stakes — every product in a category can be coherent and still look identical. The real question is: where do you take creative risks? The agent should always propose at least 2 risks, each with a clear rationale for why the risk is worth taking and what the user gives up. Risks might include: an unexpected typeface for the category, a bold accent color nobody else uses, tighter or looser spacing than the norm, a layout approach that breaks from convention, motion choices that add personality. + +**Options:** A) Looks great — generate the preview page. B) I want to adjust [section]. C) I want different risks — show me wilder options. D) Start over with a different direction. E) Skip the preview, just write DESIGN.md. ### Your Design Knowledge (use to inform proposals — do NOT display as tables) @@ -208,7 +246,7 @@ The agent writes a **single, self-contained HTML file** (no framework dependenci 1. **Loads proposed fonts** from Google Fonts (or Bunny Fonts) via `` tags 2. **Uses the proposed color palette** throughout — dogfood the design system 3. **Shows the product name** (not "Lorem Ipsum") as the hero heading -4. **Font comparison section:** +4. **Font specimen section:** - Each font candidate shown in its proposed role (hero heading, body paragraph, button label, data table row) - Side-by-side comparison if multiple candidates for one role - Real content that matches the product (e.g., civic tech → government data examples) @@ -216,11 +254,17 @@ The agent writes a **single, self-contained HTML file** (no framework dependenci - Swatches with hex values and names - Sample UI components rendered in the palette: buttons (primary, secondary, ghost), cards, form inputs, alerts (success, warning, error, info) - Background/text color combinations showing contrast -6. **Light/dark mode toggle** using CSS custom properties and a JS toggle button -7. **Clean, professional layout** — the preview page IS a taste signal for the skill -8. **Responsive** — looks good on any screen width +6. **Realistic product mockups** — this is what makes the preview page powerful. Based on the project type from Phase 1, render 2-3 realistic page layouts using the full design system: + - **Dashboard / web app:** sample data table with metrics, sidebar nav, header with user avatar, stat cards + - **Marketing site:** hero section with real copy, feature highlights, testimonial block, CTA + - **Settings / admin:** form with labeled inputs, toggle switches, dropdowns, save button + - **Auth / onboarding:** login form with social buttons, branding, input validation states + - Use the product name, realistic content for the domain, and the proposed spacing/layout/border-radius. The user should see their product (roughly) before writing any code. +7. **Light/dark mode toggle** using CSS custom properties and a JS toggle button +8. **Clean, professional layout** — the preview page IS a taste signal for the skill +9. **Responsive** — looks good on any screen width -The page should make the user think "oh nice, they thought of this." It's selling the design system visually, not just listing hex codes. +The page should make the user think "oh nice, they thought of this." It's selling the design system by showing what the product could feel like, not just listing hex codes and font names. If `open` fails (headless environment), tell the user: *"I wrote the preview to [path] — open it in your browser to see the fonts and colors rendered."* diff --git a/qa-design-review/SKILL.md b/design-review/SKILL.md similarity index 70% rename from qa-design-review/SKILL.md rename to design-review/SKILL.md index 08c9916a..b06e0827 100644 --- a/qa-design-review/SKILL.md +++ b/design-review/SKILL.md @@ -1,11 +1,11 @@ --- -name: qa-design-review -version: 1.0.0 +name: design-review +version: 2.0.0 description: | Designer's eye QA: finds visual inconsistency, spacing issues, hierarchy problems, AI slop patterns, and slow interactions — then fixes them. Iteratively fixes issues in source code, committing each fix atomically and re-verifying with before/after - screenshots. For report-only mode, use /plan-design-review instead. + screenshots. For plan-mode design review (before implementation), use /plan-design-review. allowed-tools: - Bash - Read @@ -14,6 +14,7 @@ allowed-tools: - Glob - Grep - AskUserQuestion + - WebSearch --- @@ -30,22 +31,61 @@ find ~/.gstack/sessions -mmin +120 -type f -delete 2>/dev/null || true _CONTRIB=$(~/.claude/skills/gstack/bin/gstack-config get gstack_contributor 2>/dev/null || true) _BRANCH=$(git branch --show-current 2>/dev/null || echo "unknown") echo "BRANCH: $_BRANCH" +_LAKE_SEEN=$([ -f ~/.gstack/.completeness-intro-seen ] && echo "yes" || echo "no") +echo "LAKE_INTRO: $_LAKE_SEEN" ``` If output shows `UPGRADE_AVAILABLE `: read `~/.claude/skills/gstack/gstack-upgrade/SKILL.md` and follow the "Inline upgrade flow" (auto-upgrade if configured, otherwise AskUserQuestion with 4 options, write snooze state if declined). If `JUST_UPGRADED `: tell user "Running gstack v{to} (just updated!)" and continue. +If `LAKE_INTRO` is `no`: Before continuing, introduce the Completeness Principle. +Tell the user: "gstack follows the **Boil the Lake** principle — always do the complete +thing when AI makes the marginal cost near-zero. Read more: https://garryslist.org/posts/boil-the-ocean" +Then offer to open the essay in their default browser: + +```bash +open https://garryslist.org/posts/boil-the-ocean +touch ~/.gstack/.completeness-intro-seen +``` + +Only run `open` if the user says yes. Always run `touch` to mark as seen. This only happens once. + ## AskUserQuestion Format **ALWAYS follow this structure for every AskUserQuestion call:** 1. **Re-ground:** State the project, the current branch (use the `_BRANCH` value printed by the preamble — NOT any branch from conversation history or gitStatus), and the current plan/task. (1-2 sentences) 2. **Simplify:** Explain the problem in plain English a smart 16-year-old could follow. No raw function names, no internal jargon, no implementation details. Use concrete examples and analogies. Say what it DOES, not what it's called. -3. **Recommend:** `RECOMMENDATION: Choose [X] because [one-line reason]` -4. **Options:** Lettered options: `A) ... B) ... C) ...` +3. **Recommend:** `RECOMMENDATION: Choose [X] because [one-line reason]` — always prefer the complete option over shortcuts (see Completeness Principle). Include `Completeness: X/10` for each option. Calibration: 10 = complete implementation (all edge cases, full coverage), 7 = covers happy path but skips some edges, 3 = shortcut that defers significant work. If both options are 8+, pick the higher; if one is ≤5, flag it. +4. **Options:** Lettered options: `A) ... B) ... C) ...` — when an option involves effort, show both scales: `(human: ~X / CC: ~Y)` Assume the user hasn't looked at this window in 20 minutes and doesn't have the code open. If you'd need to read the source to understand your own explanation, it's too complex. Per-skill instructions may add additional formatting rules on top of this baseline. +## Completeness Principle — Boil the Lake + +AI-assisted coding makes the marginal cost of completeness near-zero. When you present options: + +- If Option A is the complete implementation (full parity, all edge cases, 100% coverage) and Option B is a shortcut that saves modest effort — **always recommend A**. The delta between 80 lines and 150 lines is meaningless with CC+gstack. "Good enough" is the wrong instinct when "complete" costs minutes more. +- **Lake vs. ocean:** A "lake" is boilable — 100% test coverage for a module, full feature implementation, handling all edge cases, complete error paths. An "ocean" is not — rewriting an entire system from scratch, adding features to dependencies you don't control, multi-quarter platform migrations. Recommend boiling lakes. Flag oceans as out of scope. +- **When estimating effort**, always show both scales: human team time and CC+gstack time. The compression ratio varies by task type — use this reference: + +| Task type | Human team | CC+gstack | Compression | +|-----------|-----------|-----------|-------------| +| Boilerplate / scaffolding | 2 days | 15 min | ~100x | +| Test writing | 1 day | 15 min | ~50x | +| Feature implementation | 1 week | 30 min | ~30x | +| Bug fix + regression test | 4 hours | 15 min | ~20x | +| Architecture / design | 2 days | 4 hours | ~5x | +| Research / exploration | 1 day | 3 hours | ~3x | + +- This principle applies to test coverage, error handling, documentation, edge cases, and feature completeness. Don't skip the last 10% to "save time" — with AI, that 10% costs seconds. + +**Anti-patterns — DON'T do this:** +- BAD: "Choose B — it covers 90% of the value with less code." (If A is only 70 lines more, choose A.) +- BAD: "We can skip edge case handling to save time." (Edge case handling costs minutes with CC.) +- BAD: "Let's defer test coverage to a follow-up PR." (Tests are the cheapest lake to boil.) +- BAD: Quoting only human-team effort: "This would take 2 weeks." (Say: "2 weeks human / ~1 hour CC.") + ## Contributor Mode If `_CONTRIB` is `true`: you are in **contributor mode**. You're a gstack user who also helps make it better. @@ -83,7 +123,7 @@ Hey gstack team — ran into this while using /{skill-name}: Slug: lowercase, hyphens, max 60 chars (e.g. `browse-js-no-await`). Skip if file already exists. Max 3 reports per session. File inline and continue — don't stop the workflow. Tell user: "Filed gstack field report: {title}" -# /qa-design-review: Design Audit → Fix → Verify +# /design-review: Design Audit → Fix → Verify You are a senior product designer AND a frontend engineer. Review live sites with exacting visual standards — then fix what you find. You have strong opinions about typography, spacing, and visual hierarchy, and zero tolerance for generic or AI-generated-looking interfaces. @@ -110,7 +150,7 @@ Look for `DESIGN.md`, `design-system.md`, or similar in the repo root. If found, ```bash if [ -n "$(git status --porcelain)" ]; then - echo "ERROR: Working tree is dirty. Commit or stash changes before running /qa-design-review." + echo "ERROR: Working tree is dirty. Commit or stash changes before running /design-review." exit 1 fi ``` @@ -136,6 +176,161 @@ If `NEEDS_SETUP`: 2. Run: `cd && ./setup` 3. If `bun` is not installed: `curl -fsSL https://bun.sh/install | bash` +**Check test framework (bootstrap if needed):** + +## Test Framework Bootstrap + +**Detect existing test framework and project runtime:** + +```bash +# Detect project runtime +[ -f Gemfile ] && echo "RUNTIME:ruby" +[ -f package.json ] && echo "RUNTIME:node" +[ -f requirements.txt ] || [ -f pyproject.toml ] && echo "RUNTIME:python" +[ -f go.mod ] && echo "RUNTIME:go" +[ -f Cargo.toml ] && echo "RUNTIME:rust" +[ -f composer.json ] && echo "RUNTIME:php" +[ -f mix.exs ] && echo "RUNTIME:elixir" +# Detect sub-frameworks +[ -f Gemfile ] && grep -q "rails" Gemfile 2>/dev/null && echo "FRAMEWORK:rails" +[ -f package.json ] && grep -q '"next"' package.json 2>/dev/null && echo "FRAMEWORK:nextjs" +# Check for existing test infrastructure +ls jest.config.* vitest.config.* playwright.config.* .rspec pytest.ini pyproject.toml phpunit.xml 2>/dev/null +ls -d test/ tests/ spec/ __tests__/ cypress/ e2e/ 2>/dev/null +# Check opt-out marker +[ -f .gstack/no-test-bootstrap ] && echo "BOOTSTRAP_DECLINED" +``` + +**If test framework detected** (config files or test directories found): +Print "Test framework detected: {name} ({N} existing tests). Skipping bootstrap." +Read 2-3 existing test files to learn conventions (naming, imports, assertion style, setup patterns). +Store conventions as prose context for use in Phase 8e.5 or Step 3.4. **Skip the rest of bootstrap.** + +**If BOOTSTRAP_DECLINED** appears: Print "Test bootstrap previously declined — skipping." **Skip the rest of bootstrap.** + +**If NO runtime detected** (no config files found): Use AskUserQuestion: +"I couldn't detect your project's language. What runtime are you using?" +Options: A) Node.js/TypeScript B) Ruby/Rails C) Python D) Go E) Rust F) PHP G) Elixir H) This project doesn't need tests. +If user picks H → write `.gstack/no-test-bootstrap` and continue without tests. + +**If runtime detected but no test framework — bootstrap:** + +### B2. Research best practices + +Use WebSearch to find current best practices for the detected runtime: +- `"[runtime] best test framework 2025 2026"` +- `"[framework A] vs [framework B] comparison"` + +If WebSearch is unavailable, use this built-in knowledge table: + +| Runtime | Primary recommendation | Alternative | +|---------|----------------------|-------------| +| Ruby/Rails | minitest + fixtures + capybara | rspec + factory_bot + shoulda-matchers | +| Node.js | vitest + @testing-library | jest + @testing-library | +| Next.js | vitest + @testing-library/react + playwright | jest + cypress | +| Python | pytest + pytest-cov | unittest | +| Go | stdlib testing + testify | stdlib only | +| Rust | cargo test (built-in) + mockall | — | +| PHP | phpunit + mockery | pest | +| Elixir | ExUnit (built-in) + ex_machina | — | + +### B3. Framework selection + +Use AskUserQuestion: +"I detected this is a [Runtime/Framework] project with no test framework. I researched current best practices. Here are the options: +A) [Primary] — [rationale]. Includes: [packages]. Supports: unit, integration, smoke, e2e +B) [Alternative] — [rationale]. Includes: [packages] +C) Skip — don't set up testing right now +RECOMMENDATION: Choose A because [reason based on project context]" + +If user picks C → write `.gstack/no-test-bootstrap`. Tell user: "If you change your mind later, delete `.gstack/no-test-bootstrap` and re-run." Continue without tests. + +If multiple runtimes detected (monorepo) → ask which runtime to set up first, with option to do both sequentially. + +### B4. Install and configure + +1. Install the chosen packages (npm/bun/gem/pip/etc.) +2. Create minimal config file +3. Create directory structure (test/, spec/, etc.) +4. Create one example test matching the project's code to verify setup works + +If package installation fails → debug once. If still failing → revert with `git checkout -- package.json package-lock.json` (or equivalent for the runtime). Warn user and continue without tests. + +### B4.5. First real tests + +Generate 3-5 real tests for existing code: + +1. **Find recently changed files:** `git log --since=30.days --name-only --format="" | sort | uniq -c | sort -rn | head -10` +2. **Prioritize by risk:** Error handlers > business logic with conditionals > API endpoints > pure functions +3. **For each file:** Write one test that tests real behavior with meaningful assertions. Never `expect(x).toBeDefined()` — test what the code DOES. +4. Run each test. Passes → keep. Fails → fix once. Still fails → delete silently. +5. Generate at least 1 test, cap at 5. + +Never import secrets, API keys, or credentials in test files. Use environment variables or test fixtures. + +### B5. Verify + +```bash +# Run the full test suite to confirm everything works +{detected test command} +``` + +If tests fail → debug once. If still failing → revert all bootstrap changes and warn user. + +### B5.5. CI/CD pipeline + +```bash +# Check CI provider +ls -d .github/ 2>/dev/null && echo "CI:github" +ls .gitlab-ci.yml .circleci/ bitrise.yml 2>/dev/null +``` + +If `.github/` exists (or no CI detected — default to GitHub Actions): +Create `.github/workflows/test.yml` with: +- `runs-on: ubuntu-latest` +- Appropriate setup action for the runtime (setup-node, setup-ruby, setup-python, etc.) +- The same test command verified in B5 +- Trigger: push + pull_request + +If non-GitHub CI detected → skip CI generation with note: "Detected {provider} — CI pipeline generation supports GitHub Actions only. Add test step to your existing pipeline manually." + +### B6. Create TESTING.md + +First check: If TESTING.md already exists → read it and update/append rather than overwriting. Never destroy existing content. + +Write TESTING.md with: +- Philosophy: "100% test coverage is the key to great vibe coding. Tests let you move fast, trust your instincts, and ship with confidence — without them, vibe coding is just yolo coding. With tests, it's a superpower." +- Framework name and version +- How to run tests (the verified command from B5) +- Test layers: Unit tests (what, where, when), Integration tests, Smoke tests, E2E tests +- Conventions: file naming, assertion style, setup/teardown patterns + +### B7. Update CLAUDE.md + +First check: If CLAUDE.md already has a `## Testing` section → skip. Don't duplicate. + +Append a `## Testing` section: +- Run command and test directory +- Reference to TESTING.md +- Test expectations: + - 100% test coverage is the goal — tests make vibe coding safe + - When writing new functions, write a corresponding test + - When fixing a bug, write a regression test + - When adding error handling, write a test that triggers the error + - When adding a conditional (if/else, switch), write tests for BOTH paths + - Never commit code that makes existing tests fail + +### B8. Commit + +```bash +git status --porcelain +``` + +Only commit if there are changes. Stage all bootstrap files (config, test directory, TESTING.md, CLAUDE.md, .github/workflows/test.yml if created): +`git commit -m "chore: bootstrap test framework ({framework name})"` + +--- + **Create output directories:** ```bash @@ -396,7 +591,7 @@ Compare screenshots and observations across pages for: **Project-scoped:** ```bash -SLUG=$(git remote get-url origin 2>/dev/null | sed 's|.*[:/]\([^/]*/[^/]*\)\.git$|\1|;s|.*[:/]\([^/]*/[^/]*\)$|\1|' | tr '/' '-') +eval $(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null) mkdir -p ~/.gstack/projects/$SLUG ``` Write to: `~/.gstack/projects/{slug}/{user}-{branch}-design-audit-{datetime}.md` @@ -477,6 +672,7 @@ Tie everything to user goals and product objectives. Always suggest specific imp 8. **Responsive is design, not just "not broken."** A stacked desktop layout on mobile is not responsive design — it's lazy. Evaluate whether the mobile layout makes *design* sense. 9. **Document incrementally.** Write each finding to the report as you find it. Don't batch. 10. **Depth over breadth.** 5-10 well-documented findings with screenshots and specific suggestions > 20 vague observations. +11. **Show screenshots to the user.** After every `$B screenshot`, `$B snapshot -a -o`, or `$B responsive` command, use the Read tool on the output file(s) so the user can see them inline. For `responsive` (3 files), Read all three. This is critical — without it, screenshots are invisible to the user. Record baseline design score and AI slop score at end of Phase 6. @@ -564,6 +760,18 @@ Take **before/after screenshot pair** for every fix. - **best-effort**: fix applied but couldn't fully verify (e.g., needs specific browser state) - **reverted**: regression detected → `git revert HEAD` → mark finding as "deferred" +### 8e.5. Regression Test (design-review variant) + +Design fixes are typically CSS-only. Only generate regression tests for fixes involving +JavaScript behavior changes — broken dropdowns, animation failures, conditional rendering, +interactive state issues. + +For CSS-only fixes: skip entirely. CSS regressions are caught by re-running /design-review. + +If the fix involved JS behavior: follow the same procedure as /qa Phase 8e.5 (study existing +test patterns, write a regression test encoding the exact bug condition, run it, commit if +passes or defer if fails). Commit format: `test(design): regression test for FINDING-NNN`. + ### 8f. Self-Regulation (STOP AND EVALUATE) Every 5 fixes (or after any revert), compute the design-fix risk level: @@ -602,7 +810,7 @@ Write the report to both local and project-scoped locations: **Project-scoped:** ```bash -SLUG=$(git remote get-url origin 2>/dev/null | sed 's|.*[:/]\([^/]*/[^/]*\)\.git$|\1|;s|.*[:/]\([^/]*/[^/]*\)$|\1|' | tr '/' '-') +eval $(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null) mkdir -p ~/.gstack/projects/$SLUG ``` Write to `~/.gstack/projects/{slug}/{user}-{branch}-design-audit-{datetime}.md` @@ -630,15 +838,15 @@ Write to `~/.gstack/projects/{slug}/{user}-{branch}-design-audit-{datetime}.md` If the repo has a `TODOS.md`: 1. **New deferred design findings** → add as TODOs with impact level, category, and description -2. **Fixed findings that were in TODOS.md** → annotate with "Fixed by /qa-design-review on {branch}, {date}" +2. **Fixed findings that were in TODOS.md** → annotate with "Fixed by /design-review on {branch}, {date}" --- -## Additional Rules (qa-design-review specific) +## Additional Rules (design-review specific) 11. **Clean working tree required.** Refuse to start if `git status --porcelain` is non-empty. 12. **One commit per fix.** Never bundle multiple design fixes into one commit. -13. **Never modify tests or CI configuration.** Only fix application source code and styles. +13. **Only modify tests when generating regression tests in Phase 8e.5.** Never modify CI configuration. Never modify existing tests — only create new test files. 14. **Revert on regression.** If a fix makes things worse, `git revert HEAD` immediately. 15. **Self-regulate.** Follow the design-fix risk heuristic. When in doubt, stop and ask. 16. **CSS-first.** Prefer CSS/styling changes over structural component changes. CSS-only changes are safer and more reversible. diff --git a/qa-design-review/SKILL.md.tmpl b/design-review/SKILL.md.tmpl similarity index 85% rename from qa-design-review/SKILL.md.tmpl rename to design-review/SKILL.md.tmpl index 86e9aab9..eb8dd6b8 100644 --- a/qa-design-review/SKILL.md.tmpl +++ b/design-review/SKILL.md.tmpl @@ -1,11 +1,11 @@ --- -name: qa-design-review -version: 1.0.0 +name: design-review +version: 2.0.0 description: | Designer's eye QA: finds visual inconsistency, spacing issues, hierarchy problems, AI slop patterns, and slow interactions — then fixes them. Iteratively fixes issues in source code, committing each fix atomically and re-verifying with before/after - screenshots. For report-only mode, use /plan-design-review instead. + screenshots. For plan-mode design review (before implementation), use /plan-design-review. allowed-tools: - Bash - Read @@ -14,11 +14,12 @@ allowed-tools: - Glob - Grep - AskUserQuestion + - WebSearch --- {{PREAMBLE}} -# /qa-design-review: Design Audit → Fix → Verify +# /design-review: Design Audit → Fix → Verify You are a senior product designer AND a frontend engineer. Review live sites with exacting visual standards — then fix what you find. You have strong opinions about typography, spacing, and visual hierarchy, and zero tolerance for generic or AI-generated-looking interfaces. @@ -45,7 +46,7 @@ Look for `DESIGN.md`, `design-system.md`, or similar in the repo root. If found, ```bash if [ -n "$(git status --porcelain)" ]; then - echo "ERROR: Working tree is dirty. Commit or stash changes before running /qa-design-review." + echo "ERROR: Working tree is dirty. Commit or stash changes before running /design-review." exit 1 fi ``` @@ -54,6 +55,10 @@ fi {{BROWSE_SETUP}} +**Check test framework (bootstrap if needed):** + +{{TEST_BOOTSTRAP}} + **Create output directories:** ```bash @@ -153,6 +158,18 @@ Take **before/after screenshot pair** for every fix. - **best-effort**: fix applied but couldn't fully verify (e.g., needs specific browser state) - **reverted**: regression detected → `git revert HEAD` → mark finding as "deferred" +### 8e.5. Regression Test (design-review variant) + +Design fixes are typically CSS-only. Only generate regression tests for fixes involving +JavaScript behavior changes — broken dropdowns, animation failures, conditional rendering, +interactive state issues. + +For CSS-only fixes: skip entirely. CSS regressions are caught by re-running /design-review. + +If the fix involved JS behavior: follow the same procedure as /qa Phase 8e.5 (study existing +test patterns, write a regression test encoding the exact bug condition, run it, commit if +passes or defer if fails). Commit format: `test(design): regression test for FINDING-NNN`. + ### 8f. Self-Regulation (STOP AND EVALUATE) Every 5 fixes (or after any revert), compute the design-fix risk level: @@ -191,7 +208,7 @@ Write the report to both local and project-scoped locations: **Project-scoped:** ```bash -SLUG=$(git remote get-url origin 2>/dev/null | sed 's|.*[:/]\([^/]*/[^/]*\)\.git$|\1|;s|.*[:/]\([^/]*/[^/]*\)$|\1|' | tr '/' '-') +eval $(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null) mkdir -p ~/.gstack/projects/$SLUG ``` Write to `~/.gstack/projects/{slug}/{user}-{branch}-design-audit-{datetime}.md` @@ -219,15 +236,15 @@ Write to `~/.gstack/projects/{slug}/{user}-{branch}-design-audit-{datetime}.md` If the repo has a `TODOS.md`: 1. **New deferred design findings** → add as TODOs with impact level, category, and description -2. **Fixed findings that were in TODOS.md** → annotate with "Fixed by /qa-design-review on {branch}, {date}" +2. **Fixed findings that were in TODOS.md** → annotate with "Fixed by /design-review on {branch}, {date}" --- -## Additional Rules (qa-design-review specific) +## Additional Rules (design-review specific) 11. **Clean working tree required.** Refuse to start if `git status --porcelain` is non-empty. 12. **One commit per fix.** Never bundle multiple design fixes into one commit. -13. **Never modify tests or CI configuration.** Only fix application source code and styles. +13. **Only modify tests when generating regression tests in Phase 8e.5.** Never modify CI configuration. Never modify existing tests — only create new test files. 14. **Revert on regression.** If a fix makes things worse, `git revert HEAD` immediately. 15. **Self-regulate.** Follow the design-fix risk heuristic. When in doubt, stop and ask. 16. **CSS-first.** Prefer CSS/styling changes over structural component changes. CSS-only changes are safer and more reversible. diff --git a/docs/images/github-2013.png b/docs/images/github-2013.png new file mode 100644 index 00000000..186324ba Binary files /dev/null and b/docs/images/github-2013.png differ diff --git a/docs/images/github-2026.png b/docs/images/github-2026.png new file mode 100644 index 00000000..b908c8a4 Binary files /dev/null and b/docs/images/github-2026.png differ diff --git a/docs/skills.md b/docs/skills.md new file mode 100644 index 00000000..6ddf9f61 --- /dev/null +++ b/docs/skills.md @@ -0,0 +1,645 @@ +# Skill Deep Dives + +Detailed guides for every gstack skill — philosophy, workflow, and examples. + +| Skill | Your specialist | What they do | +|-------|----------------|--------------| +| [`/plan-ceo-review`](#plan-ceo-review) | **CEO / Founder** | Rethink the problem. Find the 10-star product hiding inside the request. Four modes: Expansion, Selective Expansion, Hold Scope, Reduction. | +| [`/plan-eng-review`](#plan-eng-review) | **Eng Manager** | Lock in architecture, data flow, diagrams, edge cases, and tests. Forces hidden assumptions into the open. | +| [`/plan-design-review`](#plan-design-review) | **Senior Designer** | Interactive plan-mode design review. Rates each dimension 0-10, explains what a 10 looks like, fixes the plan. Works in plan mode. | +| [`/design-consultation`](#design-consultation) | **Design Partner** | Build a complete design system from scratch. Knows the landscape, proposes creative risks, generates realistic product mockups. Design at the heart of all other phases. | +| [`/review`](#review) | **Staff Engineer** | Find the bugs that pass CI but blow up in production. Auto-fixes the obvious ones. Flags completeness gaps. | +| [`/ship`](#ship) | **Release Engineer** | Sync main, run tests, audit coverage, push, open PR. Bootstraps test frameworks if you don't have one. One command. | +| [`/browse`](#browse) | **QA Engineer** | Give the agent eyes. Real Chromium browser, real clicks, real screenshots. ~100ms per command. | +| [`/qa`](#qa) | **QA Lead** | Test your app, find bugs, fix them with atomic commits, re-verify. Auto-generates regression tests for every fix. | +| [`/qa-only`](#qa) | **QA Reporter** | Same methodology as /qa but report only. Use when you want a pure bug report without code changes. | +| [`/design-review`](#design-review) | **Designer Who Codes** | Live-site visual audit + fix loop. 80-item audit, then fixes what it finds. Atomic commits, before/after screenshots. | +| [`/setup-browser-cookies`](#setup-browser-cookies) | **Session Manager** | Import cookies from your real browser (Chrome, Arc, Brave, Edge) into the headless session. Test authenticated pages. | +| [`/retro`](#retro) | **Eng Manager** | Team-aware weekly retro. Per-person breakdowns, shipping streaks, test health trends, growth opportunities. | +| [`/document-release`](#document-release) | **Technical Writer** | Update all project docs to match what you just shipped. Catches stale READMEs automatically. | + +--- + +## `/plan-ceo-review` + +This is my **founder mode**. + +This is where I want the model to think with taste, ambition, user empathy, and a long time horizon. I do not want it taking the request literally. I want it asking a more important question first: + +**What is this product actually for?** + +I think of this as **Brian Chesky mode**. + +The point is not to implement the obvious ticket. The point is to rethink the problem from the user's point of view and find the version that feels inevitable, delightful, and maybe even a little magical. + +### Example + +Say I am building a Craigslist-style listing app and I say: + +> "Let sellers upload a photo for their item." + +A weak assistant will add a file picker and save an image. + +That is not the real product. + +In `/plan-ceo-review`, I want the model to ask whether "photo upload" is even the feature. Maybe the real feature is helping someone create a listing that actually sells. + +If that is the real job, the whole plan changes. + +Now the model should ask: + +* Can we identify the product from the photo? +* Can we infer the SKU or model number? +* Can we search the web and draft the title and description automatically? +* Can we pull specs, category, and pricing comps? +* Can we suggest which photo will convert best as the hero image? +* Can we detect when the uploaded photo is ugly, dark, cluttered, or low-trust? +* Can we make the experience feel premium instead of like a dead form from 2007? + +That is what `/plan-ceo-review` does for me. + +It does not just ask, "how do I add this feature?" +It asks, **"what is the 10-star product hiding inside this request?"** + +### Four modes + +- **SCOPE EXPANSION** — dream big. The agent proposes the ambitious version. Every expansion is presented as an individual decision you opt into. Recommends enthusiastically. +- **SELECTIVE EXPANSION** — hold your current scope as the baseline, but see what else is possible. The agent surfaces opportunities one by one with neutral recommendations — you cherry-pick the ones worth doing. +- **HOLD SCOPE** — maximum rigor on the existing plan. No expansions surfaced. +- **SCOPE REDUCTION** — find the minimum viable version. Cut everything else. + +Visions and decisions are persisted to `~/.gstack/projects/` so they survive beyond the conversation. Exceptional visions can be promoted to `docs/designs/` in your repo for the team. + +--- + +## `/plan-eng-review` + +This is my **eng manager mode**. + +Once the product direction is right, I want a different kind of intelligence entirely. I do not want more sprawling ideation. I do not want more "wouldn't it be cool if." I want the model to become my best technical lead. + +This mode should nail: + +* architecture +* system boundaries +* data flow +* state transitions +* failure modes +* edge cases +* trust boundaries +* test coverage + +And one surprisingly big unlock for me: **diagrams**. + +LLMs get way more complete when you force them to draw the system. Sequence diagrams, state diagrams, component diagrams, data-flow diagrams, even test matrices. Diagrams force hidden assumptions into the open. They make hand-wavy planning much harder. + +So `/plan-eng-review` is where I want the model to build the technical spine that can carry the product vision. + +### Example + +Take the same listing app example. + +Let's say `/plan-ceo-review` already did its job. We decided the real feature is not just photo upload. It is a smart listing flow that: + +* uploads photos +* identifies the product +* enriches the listing from the web +* drafts a strong title and description +* suggests the best hero image + +Now `/plan-eng-review` takes over. + +Now I want the model to answer questions like: + +* What is the architecture for upload, classification, enrichment, and draft generation? +* Which steps happen synchronously, and which go to background jobs? +* Where are the boundaries between app server, object storage, vision model, search/enrichment APIs, and the listing database? +* What happens if upload succeeds but enrichment fails? +* What happens if product identification is low-confidence? +* How do retries work? +* How do we prevent duplicate jobs? +* What gets persisted when, and what can be safely recomputed? + +And this is where I want diagrams — architecture diagrams, state models, data-flow diagrams, test matrices. Diagrams force hidden assumptions into the open. They make hand-wavy planning much harder. + +That is `/plan-eng-review`. + +Not "make the idea smaller." +**Make the idea buildable.** + +### Review Readiness Dashboard + +Every review (CEO, Eng, Design) logs its result. At the end of each review, you see a dashboard: + +``` ++====================================================================+ +| REVIEW READINESS DASHBOARD | ++====================================================================+ +| Review | Runs | Last Run | Status | Required | +|-----------------|------|---------------------|-----------|----------| +| Eng Review | 1 | 2026-03-16 15:00 | CLEAR | YES | +| CEO Review | 1 | 2026-03-16 14:30 | CLEAR | no | +| Design Review | 0 | — | — | no | ++--------------------------------------------------------------------+ +| VERDICT: CLEARED — Eng Review passed | ++====================================================================+ +``` + +Eng Review is the only required gate (disable with `gstack-config set skip_eng_review true`). CEO and Design are informational — recommended for product and UI changes respectively. + +### Plan-to-QA flow + +When `/plan-eng-review` finishes the test review section, it writes a test plan artifact to `~/.gstack/projects/`. When you later run `/qa`, it picks up that test plan automatically — your engineering review feeds directly into QA testing with no manual copy-paste. + +--- + +## `/plan-design-review` + +This is my **senior designer reviewing your plan** — before you write a single line of code. + +Most plans describe what the backend does but never specify what the user actually sees. Empty states? Error states? Loading states? Mobile layout? AI slop risk? These decisions get deferred to "figure it out during implementation" — and then an engineer ships "No items found." as the empty state because nobody specified anything better. + +`/plan-design-review` catches all of this during planning, when it's cheap to fix. + +It works like `/plan-ceo-review` and `/plan-eng-review` — interactive, one issue at a time, with the **STOP + AskUserQuestion** pattern. It rates each design dimension 0-10, explains what a 10 looks like, then edits the plan to get there. The rating drives the work: rate low = lots of fixes, rate high = quick pass. + +Seven passes over the plan: information architecture, interaction state coverage, user journey, AI slop risk, design system alignment, responsive/accessibility, and unresolved design decisions. For each pass, it finds gaps and either fixes them directly (obvious ones) or asks you to make a design choice (genuine tradeoffs). + +### Example + +``` +You: /plan-design-review + +Claude: Initial Design Rating: 4/10 + + "This plan describes a user dashboard but never specifies + what the user sees first. It says 'cards with icons' — + which looks like every SaaS template. It mentions zero + loading states, zero empty states, and no mobile behavior." + + Pass 1 (Info Architecture): 3/10 + "A 10 would define primary/secondary/tertiary content + hierarchy for every screen." + → Added information hierarchy section to plan + + Pass 2 (Interaction States): 2/10 + "The plan has 4 UI features but specifies 0 out of 20 + interaction states (4 features × 5 states each)." + → Added interaction state table to plan + + Pass 4 (AI Slop): 4/10 + "The plan says 'clean, modern UI with cards and icons' + and 'hero section with gradient'. These are the top 2 + AI-generated-looking patterns." + → Rewrote UI descriptions with specific, intentional alternatives + + Overall: 4/10 → 8/10 after fixes + "Plan is design-complete. Run /design-review after + implementation for visual QA." +``` + +When you re-run it, sections already at 8+ get a quick pass. Sections below 8 get full treatment. For live-site visual audits post-implementation, use `/design-review`. + +--- + +## `/design-consultation` + +This is my **design partner mode**. + +`/plan-design-review` audits a site that already exists. `/design-consultation` is for when you have nothing yet — no design system, no font choices, no color palette. You are starting from zero and you want a senior designer to sit down with you and build the whole visual identity together. + +It is a conversation, not a form. The agent asks about your product, your users, and your audience. It thinks about what your product needs to communicate — trust, speed, craft, warmth, whatever fits — and works backward from that to concrete choices. Then it proposes a complete, coherent design system: aesthetic direction, typography (3+ fonts with specific roles), color palette with hex values, spacing scale, layout approach, and motion strategy. Every recommendation comes with a rationale. Every choice reinforces every other choice. + +But coherence is table stakes. Every dev tool dashboard looks the same — clean sans-serif, muted grays, a blue accent. They are all coherent. They are all forgettable. The difference between a product that looks "nice" and one that people actually recognize is the **deliberate creative risks**: the unexpected serif for headings, the bold accent nobody else in your category uses, the tighter spacing that makes your data feel authoritative instead of airy. + +That is what `/design-consultation` is really about. It does not just propose a safe system. It proposes safe choices AND risks — and tells you which is which. "Here are the choices that keep you literate in your category. And here is where I think you should break from convention, and why." You pick which risks to take. The agent checks that the whole system still coheres either way. + +If you want, the agent will research what's already out there in your space — take screenshots of real sites, analyze their fonts and colors and spacing — so you can see the landscape before you make choices. This is not about copying. It is about getting in the ballpark so you know what the conventions are, and then deciding which ones are worth breaking. + +After you agree on the system, it generates an interactive HTML preview page — not just swatches and font samples, but realistic product pages. If you are building a dashboard, you see a dashboard with a sidebar, data tables, and stat cards. If you are building a marketing site, you see a hero section with real copy and a CTA. Everything rendered in your design system, with your product name, in light and dark mode. You see what your product could feel like before a single line of production code is written. + +Then it writes `DESIGN.md` to your repo root — your project's design source of truth — and updates `CLAUDE.md` so every future Claude Code session respects the system. From that point on, `/design-review` can audit against it, and any agent working on your frontend knows the rules. + +### Example + +``` +You: /design-consultation + +Claude: From the README, this looks like a developer analytics dashboard + for engineering teams. Sound right? Tell me about the feeling + you want — and would you like me to look at what's out there + first so we can see the landscape? + +You: Yes, it's for eng managers. Show me what's out there. + +Claude: [browses Linear, Vercel, Datadog, Posthog — takes screenshots] + + Here's the landscape. They all converge on clean sans-serifs + and muted blue-gray palettes. Linear stands out with dark-first + and tight spacing. The rest feel interchangeable. + + Here's my proposal — and where I'd play it safe vs. take risks: + + AESTHETIC: Industrial/Utilitarian — data-first, every pixel earns its place + TYPOGRAPHY: Geist (body + UI), Instrument Serif (display), JetBrains Mono (data) + COLOR: Restrained — slate neutrals, single teal accent (#0D9488) + SPACING: 4px base, compact density + LAYOUT: Grid-disciplined, 12-column + MOTION: Minimal-functional — only transitions that aid comprehension + + SAFE CHOICES (match category expectations): + Geist for body — your users already know this font from Vercel + Slate neutrals — warm grays feel right for data-heavy UIs + Grid-disciplined layout — dashboards need predictable alignment + + RISKS (where you'd stand out): + Instrument Serif for display headings — nobody in dev tools + uses a serif. It creates instant hierarchy contrast and + signals "we care about craft" in a space full of sans-serifs. + Single teal accent instead of blue — everyone else in this space uses blue. + Teal is close enough to feel trustworthy, different enough + to be recognizable. + 4px compact density — tighter than Linear, says "we respect + your screen real estate." + + The safe choices keep you literate. The risks are where your + product gets its own face. Which risks appeal to you? + +You: Love the serif risk. Let's see it. + +Claude: Preview opened in your browser. + [/tmp/design-consultation-preview-1710000000.html] + + The page shows a realistic dashboard: sidebar nav, header with + avatar, stat cards, data table — all in your design system. + Instrument Serif at hero scale, Geist in body text, JetBrains + Mono in the data table. Teal accent on buttons and active states. + Light and dark mode toggle in the corner. + + How does it feel? Want to adjust anything before I write DESIGN.md? + +You: Ship it. + +Claude: Wrote DESIGN.md (typography, color, spacing, layout, motion). + Updated CLAUDE.md with design system guidelines. + All future sessions will respect these choices. +``` + +--- + +## `/design-review` + +This is my **designer who codes mode**. + +`/plan-design-review` reviews your plan before implementation. `/design-review` audits and fixes the live site after. + +It runs an 80-item visual audit on your live site, then enters a fix loop: for each design finding, it locates the source file, makes the minimal CSS/styling change, commits with `style(design): FINDING-NNN`, re-navigates to verify, and takes before/after screenshots. One commit per fix, fully bisectable. + +The self-regulation heuristic is tuned for design work — CSS-only changes get a free pass (they are inherently safe and reversible), but changes to component JSX/TSX files count against the risk budget. Hard cap at 30 fixes. If the risk score exceeds 20%, it stops and asks. + +### Example + +``` +You: /design-review https://myapp.com + +Claude: [Runs full 80-item visual audit on the live site] + Design Score: C | AI Slop Score: D + 12 findings (4 high, 5 medium, 3 polish) + + Fixing 9 design issues... + + style(design): FINDING-001 — replace 3-column icon grid with asymmetric layout + style(design): FINDING-002 — add heading scale 48/32/24/18/16 + style(design): FINDING-003 — remove gradient hero, use bold typography + style(design): FINDING-004 — add second font for headings + style(design): FINDING-005 — vary border-radius by element role + style(design): FINDING-006 — left-align body text, reserve center for headings + style(design): FINDING-007 — add hover/focus states to all interactive elements + style(design): FINDING-008 — add prefers-reduced-motion media query + style(design): FINDING-009 — set max content width to 680px for body text + + Final audit: + Design Score: C → B+ | AI Slop Score: D → A + 9 fixes applied (8 verified, 1 best-effort). 3 deferred. + + [Report with before/after screenshots saved to .gstack/design-reports/] +``` + +Nine commits, each touching one concern. The AI Slop score went from D to A because the three most recognizable patterns (gradient hero, 3-column grid, uniform radius) are gone. + +--- + +## `/review` + +This is my **paranoid staff engineer mode**. + +Passing tests do not mean the branch is safe. + +`/review` exists because there is a whole class of bugs that can survive CI and still punch you in the face in production. This mode is not about dreaming bigger. It is not about making the plan prettier. It is about asking: + +**What can still break?** + +This is a structural audit, not a style nitpick pass. I want the model to look for things like: + +* N+1 queries +* stale reads +* race conditions +* bad trust boundaries +* missing indexes +* escaping bugs +* broken invariants +* bad retry logic +* tests that pass while missing the real failure mode +* forgotten enum handlers — add a new status or type constant, and `/review` traces it through every switch statement and allowlist in your codebase, not just the files you changed + +### Fix-First + +Findings get action, not just listed. Obvious mechanical fixes (dead code, stale comments, N+1 queries) are applied automatically — you see `[AUTO-FIXED] file:line Problem → what was done` for each one. Genuinely ambiguous issues (security, race conditions, design decisions) get surfaced for your call. + +### Completeness gaps + +`/review` now flags shortcut implementations where the complete version costs less than 30 minutes of CC time. If you chose the 80% solution and the 100% solution is a lake, not an ocean, the review will call it out. + +### Example + +Suppose the smart listing flow is implemented and the tests are green. + +`/review` should still ask: + +* Did I introduce an N+1 query when rendering listing photos or draft suggestions? +* Am I trusting client-provided file metadata instead of validating the actual file? +* Can two tabs race and overwrite cover-photo selection or item details? +* Do failed uploads leave orphaned files in storage forever? +* Can the "exactly one hero image" rule break under concurrency? +* If enrichment APIs partially fail, do I degrade gracefully or save garbage? +* Did I accidentally create a prompt injection or trust-boundary problem by pulling web data into draft generation? + +That is the point of `/review`. + +I do not want flattery here. +I want the model imagining the production incident before it happens. + +--- + +## `/ship` + +This is my **release machine mode**. + +Once I have decided what to build, nailed the technical plan, and run a serious review, I do not want more talking. I want execution. + +`/ship` is for the final mile. It is for a ready branch, not for deciding what to build. + +This is where the model should stop behaving like a brainstorm partner and start behaving like a disciplined release engineer: sync with main, run the right tests, make sure the branch state is sane, update changelog or versioning if the repo expects it, push, and create or update the PR. + +### Test bootstrap + +If your project doesn't have a test framework, `/ship` sets one up — detects your runtime, researches the best framework, installs it, writes 3-5 real tests for your actual code, sets up CI/CD (GitHub Actions), and creates TESTING.md. 100% test coverage is the goal — tests make vibe coding safe instead of yolo coding. + +### Coverage audit + +Every `/ship` run builds a code path map from your diff, searches for corresponding tests, and produces an ASCII coverage diagram with quality stars. Gaps get tests auto-generated. Your PR body shows the coverage: `Tests: 42 → 47 (+5 new)`. + +### Review gate + +`/ship` checks the [Review Readiness Dashboard](#review-readiness-dashboard) before creating the PR. If the Eng Review is missing, it asks — but won't block you. Decisions are saved per-branch so you're never re-asked. + +A lot of branches die when the interesting work is done and only the boring release work is left. Humans procrastinate that part. AI should not. + +--- + +## `/browse` + +This is my **QA engineer mode**. + +`/browse` is the skill that closes the loop. Before it, the agent could think and code but was still half blind. It had to guess about UI state, auth flows, redirects, console errors, empty states, and broken layouts. Now it can just go look. + +It is a compiled binary that talks to a persistent Chromium daemon — built on [Playwright](https://playwright.dev/) by Microsoft. First call starts the browser (~3s). Every call after that: ~100-200ms. The browser stays running between commands, so cookies, tabs, and localStorage carry over. + +### Example + +``` +You: /browse staging.myapp.com — log in, test the signup flow, and check + every page I changed in this branch + +Claude: [18 tool calls, ~60 seconds] + + > browse goto https://staging.myapp.com/signup + > browse snapshot -i + > browse fill @e2 "test@example.com" + > browse fill @e3 "password123" + > browse click @e5 (Submit) + > browse screenshot /tmp/signup.png + > Read /tmp/signup.png + + Signup works. Redirected to onboarding. Now checking changed pages. + + > browse goto https://staging.myapp.com/dashboard + > browse screenshot /tmp/dashboard.png + > Read /tmp/dashboard.png + > browse console + + Dashboard loads. No console errors. Charts render with sample data. + + All 4 pages load correctly. No console errors. No broken layouts. + Signup → onboarding → dashboard flow works end to end. +``` + +18 tool calls, about a minute. Full QA pass. No browser opened. + +**Security note:** `/browse` runs a persistent Chromium session. Cookies, localStorage, and session state carry over between commands. Do not use it against sensitive production environments unless you intend to — it is a real browser with real state. The session auto-shuts down after 30 minutes of idle time. + +For the full command reference, see [BROWSER.md](../BROWSER.md). + +--- + +## `/qa` + +This is my **QA lead mode**. + +`/browse` gives the agent eyes. `/qa` gives it a testing methodology. + +The most common use case: you're on a feature branch, you just finished coding, and you want to verify everything works. Just say `/qa` — it reads your git diff, identifies which pages and routes your changes affect, spins up the browser, and tests each one. No URL required. No manual test plan. + +Four modes: + +- **Diff-aware** (automatic on feature branches) — reads `git diff main`, identifies affected pages, tests them specifically +- **Full** — systematic exploration of the entire app. 5-15 minutes. Documents 5-10 well-evidenced issues. +- **Quick** (`--quick`) — 30-second smoke test. Homepage + top 5 nav targets. +- **Regression** (`--regression baseline.json`) — run full mode, then diff against a previous baseline. + +### Automatic regression tests + +When `/qa` fixes a bug and verifies it, it automatically generates a regression test that catches the exact scenario that broke. Tests include full attribution tracing back to the QA report. + +### Example + +``` +You: /qa https://staging.myapp.com + +Claude: [Explores 12 pages, fills 3 forms, tests 2 flows] + + QA Report: staging.myapp.com — Health Score: 72/100 + + Top 3 Issues: + 1. CRITICAL: Checkout form submits with empty required fields + 2. HIGH: Mobile nav menu doesn't close after selecting an item + 3. MEDIUM: Dashboard chart overlaps sidebar below 1024px + + [Full report with screenshots saved to .gstack/qa-reports/] +``` + +**Testing authenticated pages:** Use `/setup-browser-cookies` first to import your real browser sessions, then `/qa` can test pages behind login. + +--- + +## `/setup-browser-cookies` + +This is my **session manager mode**. + +Before `/qa` or `/browse` can test authenticated pages, they need cookies. Instead of manually logging in through the headless browser every time, `/setup-browser-cookies` imports your real sessions directly from your daily browser. + +It auto-detects installed Chromium browsers (Comet, Chrome, Arc, Brave, Edge), decrypts cookies via the macOS Keychain, and loads them into the Playwright session. An interactive picker UI lets you choose exactly which domains to import — no cookie values are ever displayed. + +``` +You: /setup-browser-cookies + +Claude: Cookie picker opened — select the domains you want to import + in your browser, then tell me when you're done. + + [You pick github.com, myapp.com in the browser UI] + +You: done + +Claude: Imported 2 domains (47 cookies). Session is ready. +``` + +Or skip the UI entirely: + +``` +You: /setup-browser-cookies github.com + +Claude: Imported 12 cookies for github.com from Comet. +``` + +--- + +## `/retro` + +This is my **engineering manager mode**. + +At the end of the week I want to know what actually happened. Not vibes — data. `/retro` analyzes commit history, work patterns, and shipping velocity and writes a candid retrospective. + +It is team-aware. It identifies who is running the command, gives you the deepest treatment on your own work, then breaks down every contributor with specific praise and growth opportunities. It computes metrics like commits, LOC, test ratio, PR sizes, and fix ratio. It detects coding sessions from commit timestamps, finds hotspot files, tracks shipping streaks, and identifies the biggest ship of the week. + +It also tracks test health: total test files, tests added this period, regression test commits, and trend deltas. If test ratio drops below 20%, it flags it as a growth area. + +### Example + +``` +You: /retro + +Claude: Week of Mar 1: 47 commits (3 contributors), 3.2k LOC, 38% tests, 12 PRs, peak: 10pm | Streak: 47d + + ## Your Week + 32 commits, +2.4k LOC, 41% tests. Peak hours: 9-11pm. + Biggest ship: cookie import system (browser decryption + picker UI). + What you did well: shipped a complete feature with encryption, UI, and + 18 unit tests in one focused push... + + ## Team Breakdown + + ### Alice + 12 commits focused on app/services/. Every PR under 200 LOC — disciplined. + Opportunity: test ratio at 12% — worth investing before payment gets more complex. + + ### Bob + 3 commits — fixed the N+1 query on dashboard. Small but high-impact. + Opportunity: only 1 active day this week — check if blocked on anything. + + [Top 3 team wins, 3 things to improve, 3 habits for next week] +``` + +It saves a JSON snapshot to `.context/retros/` so the next run can show trends. + +--- + +## `/document-release` + +This is my **technical writer mode**. + +After `/ship` creates the PR but before it merges, `/document-release` reads every documentation file in the project and cross-references it against the diff. It updates file paths, command lists, project structure trees, and anything else that drifted. Risky or subjective changes get surfaced as questions — everything else is handled automatically. + +``` +You: /document-release + +Claude: Analyzing 21 files changed across 3 commits. Found 8 documentation files. + + README.md: updated skill count from 9 to 10, added new skill to table + CLAUDE.md: added new directory to project structure + CONTRIBUTING.md: current — no changes needed + TODOS.md: marked 2 items complete, added 1 new item + + All docs updated and committed. PR body updated with doc diff. +``` + +It also polishes CHANGELOG voice (without ever overwriting entries), cleans up completed TODOS, checks cross-doc consistency, and asks about VERSION bumps only when appropriate. + +--- + +## Greptile integration + +[Greptile](https://greptile.com) is a YC company that reviews your PRs automatically. It catches real bugs — race conditions, security issues, things that pass CI and blow up in production. It has genuinely saved my ass more than once. I love these guys. + +### Setup + +Install Greptile on your GitHub repo at [greptile.com](https://greptile.com) — it takes about 30 seconds. Once it's reviewing your PRs, gstack picks up its comments automatically. No additional configuration. + +### How it works + +The problem with any automated reviewer is triage. Greptile is good, but not every comment is a real issue. Some are false positives. Some flag things you already fixed three commits ago. Without a triage layer, the comments pile up and you start ignoring them — which defeats the purpose. + +gstack solves this. `/review` and `/ship` are now Greptile-aware. They read Greptile's comments, classify each one, and take action: + +- **Valid issues** get added to the critical findings and fixed before shipping +- **Already-fixed issues** get an auto-reply acknowledging the catch +- **False positives** get pushed back — you confirm, and a reply goes out explaining why it's wrong + +The result is a two-layer review: Greptile catches things asynchronously on the PR, then `/review` and `/ship` triage those findings as part of the normal workflow. Nothing falls through the cracks. + +### Learning from history + +Every false positive you confirm gets saved to `~/.gstack/greptile-history.md`. Future runs auto-skip known FP patterns for your codebase. And `/retro` tracks Greptile's batting average over time — so you can see whether the signal-to-noise ratio is improving. + +### Example + +``` +You: /ship + +Claude: [syncs main, runs tests, pre-landing review...] + + Greptile found 3 comments on this PR: + + [VALID] app/services/payment_service.rb:47 — Race condition: + concurrent charges can double-debit. Recommend DB-level advisory + lock around the charge block. + + [ALREADY FIXED] app/models/user.rb:12 — Missing null check + on email before downcase. + → Already fixed in commit abc1234. Auto-replying. + + [FALSE POSITIVE] lib/auth.rb:88 — "Token comparison should + use constant-time comparison." We already use + ActiveSupport::SecurityUtils.secure_compare here. + + Fixing the race condition in payment_service.rb... + + The auth token comment is a false positive. + Options: A) Reply to Greptile explaining B) Fix anyway C) Ignore + +You: A + +Claude: Replied to Greptile. All tests pass. + PR: github.com/you/app/pull/42 +``` + +Three Greptile comments. One real fix. One auto-acknowledged. One false positive pushed back with a reply. Total extra time: about 30 seconds. diff --git a/document-release/SKILL.md b/document-release/SKILL.md index 694d64da..bc8ff4ca 100644 --- a/document-release/SKILL.md +++ b/document-release/SKILL.md @@ -29,22 +29,61 @@ find ~/.gstack/sessions -mmin +120 -type f -delete 2>/dev/null || true _CONTRIB=$(~/.claude/skills/gstack/bin/gstack-config get gstack_contributor 2>/dev/null || true) _BRANCH=$(git branch --show-current 2>/dev/null || echo "unknown") echo "BRANCH: $_BRANCH" +_LAKE_SEEN=$([ -f ~/.gstack/.completeness-intro-seen ] && echo "yes" || echo "no") +echo "LAKE_INTRO: $_LAKE_SEEN" ``` If output shows `UPGRADE_AVAILABLE `: read `~/.claude/skills/gstack/gstack-upgrade/SKILL.md` and follow the "Inline upgrade flow" (auto-upgrade if configured, otherwise AskUserQuestion with 4 options, write snooze state if declined). If `JUST_UPGRADED `: tell user "Running gstack v{to} (just updated!)" and continue. +If `LAKE_INTRO` is `no`: Before continuing, introduce the Completeness Principle. +Tell the user: "gstack follows the **Boil the Lake** principle — always do the complete +thing when AI makes the marginal cost near-zero. Read more: https://garryslist.org/posts/boil-the-ocean" +Then offer to open the essay in their default browser: + +```bash +open https://garryslist.org/posts/boil-the-ocean +touch ~/.gstack/.completeness-intro-seen +``` + +Only run `open` if the user says yes. Always run `touch` to mark as seen. This only happens once. + ## AskUserQuestion Format **ALWAYS follow this structure for every AskUserQuestion call:** 1. **Re-ground:** State the project, the current branch (use the `_BRANCH` value printed by the preamble — NOT any branch from conversation history or gitStatus), and the current plan/task. (1-2 sentences) 2. **Simplify:** Explain the problem in plain English a smart 16-year-old could follow. No raw function names, no internal jargon, no implementation details. Use concrete examples and analogies. Say what it DOES, not what it's called. -3. **Recommend:** `RECOMMENDATION: Choose [X] because [one-line reason]` -4. **Options:** Lettered options: `A) ... B) ... C) ...` +3. **Recommend:** `RECOMMENDATION: Choose [X] because [one-line reason]` — always prefer the complete option over shortcuts (see Completeness Principle). Include `Completeness: X/10` for each option. Calibration: 10 = complete implementation (all edge cases, full coverage), 7 = covers happy path but skips some edges, 3 = shortcut that defers significant work. If both options are 8+, pick the higher; if one is ≤5, flag it. +4. **Options:** Lettered options: `A) ... B) ... C) ...` — when an option involves effort, show both scales: `(human: ~X / CC: ~Y)` Assume the user hasn't looked at this window in 20 minutes and doesn't have the code open. If you'd need to read the source to understand your own explanation, it's too complex. Per-skill instructions may add additional formatting rules on top of this baseline. +## Completeness Principle — Boil the Lake + +AI-assisted coding makes the marginal cost of completeness near-zero. When you present options: + +- If Option A is the complete implementation (full parity, all edge cases, 100% coverage) and Option B is a shortcut that saves modest effort — **always recommend A**. The delta between 80 lines and 150 lines is meaningless with CC+gstack. "Good enough" is the wrong instinct when "complete" costs minutes more. +- **Lake vs. ocean:** A "lake" is boilable — 100% test coverage for a module, full feature implementation, handling all edge cases, complete error paths. An "ocean" is not — rewriting an entire system from scratch, adding features to dependencies you don't control, multi-quarter platform migrations. Recommend boiling lakes. Flag oceans as out of scope. +- **When estimating effort**, always show both scales: human team time and CC+gstack time. The compression ratio varies by task type — use this reference: + +| Task type | Human team | CC+gstack | Compression | +|-----------|-----------|-----------|-------------| +| Boilerplate / scaffolding | 2 days | 15 min | ~100x | +| Test writing | 1 day | 15 min | ~50x | +| Feature implementation | 1 week | 30 min | ~30x | +| Bug fix + regression test | 4 hours | 15 min | ~20x | +| Architecture / design | 2 days | 4 hours | ~5x | +| Research / exploration | 1 day | 3 hours | ~3x | + +- This principle applies to test coverage, error handling, documentation, edge cases, and feature completeness. Don't skip the last 10% to "save time" — with AI, that 10% costs seconds. + +**Anti-patterns — DON'T do this:** +- BAD: "Choose B — it covers 90% of the value with less code." (If A is only 70 lines more, choose A.) +- BAD: "We can skip edge case handling to save time." (Edge case handling costs minutes with CC.) +- BAD: "Let's defer test coverage to a follow-up PR." (Tests are the cheapest lake to boil.) +- BAD: Quoting only human-team effort: "This would take 2 weeks." (Say: "2 weeks human / ~1 hour CC.") + ## Contributor Mode If `_CONTRIB` is `true`: you are in **contributor mode**. You're a gstack user who also helps make it better. diff --git a/gstack-upgrade/SKILL.md b/gstack-upgrade/SKILL.md index 9d635651..50b67dfc 100644 --- a/gstack-upgrade/SKILL.md +++ b/gstack-upgrade/SKILL.md @@ -156,6 +156,13 @@ rm -rf "$LOCAL_GSTACK.bak" ``` Tell user: "Also updated vendored copy at `$LOCAL_GSTACK` — commit `.claude/skills/gstack/` when you're ready." +If `./setup` fails, restore from backup and warn the user: +```bash +rm -rf "$LOCAL_GSTACK" +mv "$LOCAL_GSTACK.bak" "$LOCAL_GSTACK" +``` +Tell user: "Sync failed — restored previous version at `$LOCAL_GSTACK`. Run `/gstack-upgrade` manually to retry." + ### Step 5: Write marker + clear cache ```bash @@ -193,9 +200,26 @@ When invoked directly as `/gstack-upgrade` (not from a preamble): 1. Force a fresh update check (bypass cache): ```bash -~/.claude/skills/gstack/bin/gstack-update-check --force +~/.claude/skills/gstack/bin/gstack-update-check --force 2>/dev/null || \ +.claude/skills/gstack/bin/gstack-update-check --force 2>/dev/null || true ``` Use the output to determine if an upgrade is available. 2. If `UPGRADE_AVAILABLE `: follow Steps 2-6 above. -3. If no output (up to date): tell the user "You're already on the latest version (v{version})." + +3. If no output (primary is up to date): check for a stale local vendored copy. + +Run the Step 2 bash block above to detect the primary install type and directory (`INSTALL_TYPE` and `INSTALL_DIR`). Then run the Step 4.5 detection bash block above to check for a local vendored copy (`LOCAL_GSTACK`). + +**If `LOCAL_GSTACK` is empty** (no local vendored copy): tell the user "You're already on the latest version (v{version})." + +**If `LOCAL_GSTACK` is non-empty**, compare versions: +```bash +PRIMARY_VER=$(cat "$INSTALL_DIR/VERSION" 2>/dev/null || echo "unknown") +LOCAL_VER=$(cat "$LOCAL_GSTACK/VERSION" 2>/dev/null || echo "unknown") +echo "PRIMARY=$PRIMARY_VER LOCAL=$LOCAL_VER" +``` + +**If versions differ:** follow the Step 4.5 sync bash block above to update the local copy from the primary. Tell user: "Global v{PRIMARY_VER} is up to date. Updated local vendored copy from v{LOCAL_VER} → v{PRIMARY_VER}. Commit `.claude/skills/gstack/` when you're ready." + +**If versions match:** tell the user "You're on the latest version (v{PRIMARY_VER}). Global and local vendored copy are both up to date." diff --git a/gstack-upgrade/SKILL.md.tmpl b/gstack-upgrade/SKILL.md.tmpl index a441b8d6..0acf16c2 100644 --- a/gstack-upgrade/SKILL.md.tmpl +++ b/gstack-upgrade/SKILL.md.tmpl @@ -154,6 +154,13 @@ rm -rf "$LOCAL_GSTACK.bak" ``` Tell user: "Also updated vendored copy at `$LOCAL_GSTACK` — commit `.claude/skills/gstack/` when you're ready." +If `./setup` fails, restore from backup and warn the user: +```bash +rm -rf "$LOCAL_GSTACK" +mv "$LOCAL_GSTACK.bak" "$LOCAL_GSTACK" +``` +Tell user: "Sync failed — restored previous version at `$LOCAL_GSTACK`. Run `/gstack-upgrade` manually to retry." + ### Step 5: Write marker + clear cache ```bash @@ -191,9 +198,26 @@ When invoked directly as `/gstack-upgrade` (not from a preamble): 1. Force a fresh update check (bypass cache): ```bash -~/.claude/skills/gstack/bin/gstack-update-check --force +~/.claude/skills/gstack/bin/gstack-update-check --force 2>/dev/null || \ +.claude/skills/gstack/bin/gstack-update-check --force 2>/dev/null || true ``` Use the output to determine if an upgrade is available. 2. If `UPGRADE_AVAILABLE `: follow Steps 2-6 above. -3. If no output (up to date): tell the user "You're already on the latest version (v{version})." + +3. If no output (primary is up to date): check for a stale local vendored copy. + +Run the Step 2 bash block above to detect the primary install type and directory (`INSTALL_TYPE` and `INSTALL_DIR`). Then run the Step 4.5 detection bash block above to check for a local vendored copy (`LOCAL_GSTACK`). + +**If `LOCAL_GSTACK` is empty** (no local vendored copy): tell the user "You're already on the latest version (v{version})." + +**If `LOCAL_GSTACK` is non-empty**, compare versions: +```bash +PRIMARY_VER=$(cat "$INSTALL_DIR/VERSION" 2>/dev/null || echo "unknown") +LOCAL_VER=$(cat "$LOCAL_GSTACK/VERSION" 2>/dev/null || echo "unknown") +echo "PRIMARY=$PRIMARY_VER LOCAL=$LOCAL_VER" +``` + +**If versions differ:** follow the Step 4.5 sync bash block above to update the local copy from the primary. Tell user: "Global v{PRIMARY_VER} is up to date. Updated local vendored copy from v{LOCAL_VER} → v{PRIMARY_VER}. Commit `.claude/skills/gstack/` when you're ready." + +**If versions match:** tell the user "You're on the latest version (v{PRIMARY_VER}). Global and local vendored copy are both up to date." diff --git a/package.json b/package.json index da816815..502617d9 100644 --- a/package.json +++ b/package.json @@ -8,13 +8,15 @@ "browse": "./browse/dist/browse" }, "scripts": { - "build": "bun run gen:skill-docs && bun build --compile browse/src/cli.ts --outfile browse/dist/browse && bun build --compile browse/src/find-browse.ts --outfile browse/dist/find-browse && git rev-parse HEAD > browse/dist/.version && rm -f .*.bun-build", + "build": "bun run gen:skill-docs && bun build --compile browse/src/cli.ts --outfile browse/dist/browse && bun build --compile browse/src/find-browse.ts --outfile browse/dist/find-browse && git rev-parse HEAD > browse/dist/.version && rm -f .*.bun-build || true", "gen:skill-docs": "bun run scripts/gen-skill-docs.ts", "dev": "bun run browse/src/cli.ts", "server": "bun run browse/src/server.ts", "test": "bun test browse/test/ test/ --ignore test/skill-e2e.test.ts --ignore test/skill-llm-eval.test.ts", "test:evals": "EVALS=1 bun test test/skill-llm-eval.test.ts test/skill-e2e.test.ts", + "test:evals:all": "EVALS=1 EVALS_ALL=1 bun test test/skill-llm-eval.test.ts test/skill-e2e.test.ts", "test:e2e": "EVALS=1 bun test test/skill-e2e.test.ts", + "test:e2e:all": "EVALS=1 EVALS_ALL=1 bun test test/skill-e2e.test.ts", "skill:check": "bun run scripts/skill-check.ts", "dev:skill": "bun run scripts/dev-skill.ts", "start": "bun run browse/src/server.ts", @@ -22,7 +24,8 @@ "eval:compare": "bun run lib/cli-eval.ts compare", "eval:summary": "bun run lib/cli-eval.ts summary", "eval:trend": "bun run lib/cli-eval.ts trend", - "eval:watch": "bun run lib/cli-eval.ts watch" + "eval:watch": "bun run lib/cli-eval.ts watch", + "eval:select": "bun run scripts/eval-select.ts" }, "dependencies": { "playwright": "^1.58.2", diff --git a/plan-ceo-review/SKILL.md b/plan-ceo-review/SKILL.md index b05db5e9..ce799fe1 100644 --- a/plan-ceo-review/SKILL.md +++ b/plan-ceo-review/SKILL.md @@ -3,9 +3,9 @@ name: plan-ceo-review version: 1.0.0 description: | CEO/founder-mode plan review. Rethink the problem, find the 10-star product, - challenge premises, expand scope when it creates a better product. Three modes: - SCOPE EXPANSION (dream big), HOLD SCOPE (maximum rigor), SCOPE REDUCTION - (strip to essentials). + challenge premises, expand scope when it creates a better product. Four modes: + SCOPE EXPANSION (dream big), SELECTIVE EXPANSION (hold scope + cherry-pick + expansions), HOLD SCOPE (maximum rigor), SCOPE REDUCTION (strip to essentials). allowed-tools: - Read - Grep @@ -28,22 +28,61 @@ find ~/.gstack/sessions -mmin +120 -type f -delete 2>/dev/null || true _CONTRIB=$(~/.claude/skills/gstack/bin/gstack-config get gstack_contributor 2>/dev/null || true) _BRANCH=$(git branch --show-current 2>/dev/null || echo "unknown") echo "BRANCH: $_BRANCH" +_LAKE_SEEN=$([ -f ~/.gstack/.completeness-intro-seen ] && echo "yes" || echo "no") +echo "LAKE_INTRO: $_LAKE_SEEN" ``` If output shows `UPGRADE_AVAILABLE `: read `~/.claude/skills/gstack/gstack-upgrade/SKILL.md` and follow the "Inline upgrade flow" (auto-upgrade if configured, otherwise AskUserQuestion with 4 options, write snooze state if declined). If `JUST_UPGRADED `: tell user "Running gstack v{to} (just updated!)" and continue. +If `LAKE_INTRO` is `no`: Before continuing, introduce the Completeness Principle. +Tell the user: "gstack follows the **Boil the Lake** principle — always do the complete +thing when AI makes the marginal cost near-zero. Read more: https://garryslist.org/posts/boil-the-ocean" +Then offer to open the essay in their default browser: + +```bash +open https://garryslist.org/posts/boil-the-ocean +touch ~/.gstack/.completeness-intro-seen +``` + +Only run `open` if the user says yes. Always run `touch` to mark as seen. This only happens once. + ## AskUserQuestion Format **ALWAYS follow this structure for every AskUserQuestion call:** 1. **Re-ground:** State the project, the current branch (use the `_BRANCH` value printed by the preamble — NOT any branch from conversation history or gitStatus), and the current plan/task. (1-2 sentences) 2. **Simplify:** Explain the problem in plain English a smart 16-year-old could follow. No raw function names, no internal jargon, no implementation details. Use concrete examples and analogies. Say what it DOES, not what it's called. -3. **Recommend:** `RECOMMENDATION: Choose [X] because [one-line reason]` -4. **Options:** Lettered options: `A) ... B) ... C) ...` +3. **Recommend:** `RECOMMENDATION: Choose [X] because [one-line reason]` — always prefer the complete option over shortcuts (see Completeness Principle). Include `Completeness: X/10` for each option. Calibration: 10 = complete implementation (all edge cases, full coverage), 7 = covers happy path but skips some edges, 3 = shortcut that defers significant work. If both options are 8+, pick the higher; if one is ≤5, flag it. +4. **Options:** Lettered options: `A) ... B) ... C) ...` — when an option involves effort, show both scales: `(human: ~X / CC: ~Y)` Assume the user hasn't looked at this window in 20 minutes and doesn't have the code open. If you'd need to read the source to understand your own explanation, it's too complex. Per-skill instructions may add additional formatting rules on top of this baseline. +## Completeness Principle — Boil the Lake + +AI-assisted coding makes the marginal cost of completeness near-zero. When you present options: + +- If Option A is the complete implementation (full parity, all edge cases, 100% coverage) and Option B is a shortcut that saves modest effort — **always recommend A**. The delta between 80 lines and 150 lines is meaningless with CC+gstack. "Good enough" is the wrong instinct when "complete" costs minutes more. +- **Lake vs. ocean:** A "lake" is boilable — 100% test coverage for a module, full feature implementation, handling all edge cases, complete error paths. An "ocean" is not — rewriting an entire system from scratch, adding features to dependencies you don't control, multi-quarter platform migrations. Recommend boiling lakes. Flag oceans as out of scope. +- **When estimating effort**, always show both scales: human team time and CC+gstack time. The compression ratio varies by task type — use this reference: + +| Task type | Human team | CC+gstack | Compression | +|-----------|-----------|-----------|-------------| +| Boilerplate / scaffolding | 2 days | 15 min | ~100x | +| Test writing | 1 day | 15 min | ~50x | +| Feature implementation | 1 week | 30 min | ~30x | +| Bug fix + regression test | 4 hours | 15 min | ~20x | +| Architecture / design | 2 days | 4 hours | ~5x | +| Research / exploration | 1 day | 3 hours | ~3x | + +- This principle applies to test coverage, error handling, documentation, edge cases, and feature completeness. Don't skip the last 10% to "save time" — with AI, that 10% costs seconds. + +**Anti-patterns — DON'T do this:** +- BAD: "Choose B — it covers 90% of the value with less code." (If A is only 70 lines more, choose A.) +- BAD: "We can skip edge case handling to save time." (Edge case handling costs minutes with CC.) +- BAD: "Let's defer test coverage to a follow-up PR." (Tests are the cheapest lake to boil.) +- BAD: Quoting only human-team effort: "This would take 2 weeks." (Say: "2 weeks human / ~1 hour CC.") + ## Contributor Mode If `_CONTRIB` is `true`: you are in **contributor mode**. You're a gstack user who also helps make it better. @@ -105,10 +144,12 @@ branch name wherever the instructions say "the base branch." ## Philosophy You are not here to rubber-stamp this plan. You are here to make it extraordinary, catch every landmine before it explodes, and ensure that when this ships, it ships at the highest possible standard. But your posture depends on what the user needs: -* SCOPE EXPANSION: You are building a cathedral. Envision the platonic ideal. Push scope UP. Ask "what would make this 10x better for 2x the effort?" The answer to "should we also build X?" is "yes, if it serves the vision." You have permission to dream. +* SCOPE EXPANSION: You are building a cathedral. Envision the platonic ideal. Push scope UP. Ask "what would make this 10x better for 2x the effort?" You have permission to dream — and to recommend enthusiastically. But every expansion is the user's decision. Present each scope-expanding idea as an AskUserQuestion. The user opts in or out. +* SELECTIVE EXPANSION: You are a rigorous reviewer who also has taste. Hold the current scope as your baseline — make it bulletproof. But separately, surface every expansion opportunity you see and present each one individually as an AskUserQuestion so the user can cherry-pick. Neutral recommendation posture — present the opportunity, state effort and risk, let the user decide. Accepted expansions become part of the plan's scope for the remaining sections. Rejected ones go to "NOT in scope." * HOLD SCOPE: You are a rigorous reviewer. The plan's scope is accepted. Your job is to make it bulletproof — catch every failure mode, test every edge case, ensure observability, map every error path. Do not silently reduce OR expand. * SCOPE REDUCTION: You are a surgeon. Find the minimum viable version that achieves the core outcome. Cut everything else. Be ruthless. -Critical rule: Once the user selects a mode, COMMIT to it. Do not silently drift toward a different mode. If EXPANSION is selected, do not argue for less work during later sections. If REDUCTION is selected, do not sneak scope back in. Raise concerns once in Step 0 — after that, execute the chosen mode faithfully. +* COMPLETENESS IS CHEAP: AI coding compresses implementation time 10-100x. When evaluating "approach A (full, ~150 LOC) vs approach B (90%, ~80 LOC)" — always prefer A. The 70-line delta costs seconds with CC. "Ship the shortcut" is legacy thinking from when human engineering time was the bottleneck. Boil the lake. +Critical rule: In ALL modes, the user is 100% in control. Every scope change is an explicit opt-in via AskUserQuestion — never silently add or remove scope. Once the user selects a mode, COMMIT to it. Do not silently drift toward a different mode. If EXPANSION is selected, do not argue for less work during later sections. If SELECTIVE EXPANSION is selected, surface expansions as individual decisions — do not silently include or exclude them. If REDUCTION is selected, do not sneak scope back in. Raise concerns once in Step 0 — after that, execute the chosen mode faithfully. Do NOT make any code changes. Do NOT start implementation. Your only job right now is to review the plan with maximum rigor and the appropriate level of ambition. ## Prime Directives @@ -135,6 +176,31 @@ Do NOT make any code changes. Do NOT start implementation. Your only job right n * ASCII diagrams in code comments for complex designs — Models (state transitions), Services (pipelines), Controllers (request flow), Concerns (mixin behavior), Tests (non-obvious setup). * Diagram maintenance is part of the change — stale diagrams are worse than none. +## Cognitive Patterns — How Great CEOs Think + +These are not checklist items. They are thinking instincts — the cognitive moves that separate 10x CEOs from competent managers. Let them shape your perspective throughout the review. Don't enumerate them; internalize them. + +1. **Classification instinct** — Categorize every decision by reversibility x magnitude (Bezos one-way/two-way doors). Most things are two-way doors; move fast. +2. **Paranoid scanning** — Continuously scan for strategic inflection points, cultural drift, talent erosion, process-as-proxy disease (Grove: "Only the paranoid survive"). +3. **Inversion reflex** — For every "how do we win?" also ask "what would make us fail?" (Munger). +4. **Focus as subtraction** — Primary value-add is what to *not* do. Jobs went from 350 products to 10. Default: do fewer things, better. +5. **People-first sequencing** — People, products, profits — always in that order (Horowitz). Talent density solves most other problems (Hastings). +6. **Speed calibration** — Fast is default. Only slow down for irreversible + high-magnitude decisions. 70% information is enough to decide (Bezos). +7. **Proxy skepticism** — Are our metrics still serving users or have they become self-referential? (Bezos Day 1). +8. **Narrative coherence** — Hard decisions need clear framing. Make the "why" legible, not everyone happy. +9. **Temporal depth** — Think in 5-10 year arcs. Apply regret minimization for major bets (Bezos at age 80). +10. **Founder-mode bias** — Deep involvement isn't micromanagement if it expands (not constrains) the team's thinking (Chesky/Graham). +11. **Wartime awareness** — Correctly diagnose peacetime vs wartime. Peacetime habits kill wartime companies (Horowitz). +12. **Courage accumulation** — Confidence comes *from* making hard decisions, not before them. "The struggle IS the job." +13. **Willfulness as strategy** — Be intentionally willful. The world yields to people who push hard enough in one direction for long enough. Most people give up too early (Altman). +14. **Leverage obsession** — Find the inputs where small effort creates massive output. Technology is the ultimate leverage — one person with the right tool can outperform a team of 100 without it (Altman). +15. **Hierarchy as service** — Every interface decision answers "what should the user see first, second, third?" Respecting their time, not prettifying pixels. +16. **Edge case paranoia (design)** — What if the name is 47 chars? Zero results? Network fails mid-action? First-time user vs power user? Empty states are features, not afterthoughts. +17. **Subtraction default** — "As little design as possible" (Rams). If a UI element doesn't earn its pixels, cut it. Feature bloat kills products faster than missing features. +18. **Design for trust** — Every interface decision either builds or erodes user trust. Pixel-level intentionality about safety, identity, and belonging. + +When you evaluate architecture, think through the inversion reflex. When you challenge scope, apply focus as subtraction. When you assess timeline, use speed calibration. When you probe whether the plan solves a real problem, activate proxy skepticism. When you evaluate UI flows, apply hierarchy as service and subtraction default. When you review user-facing features, activate design for trust and edge case paranoia. + ## Priority Hierarchy Under Context Pressure Step 0 > System audit > Error/rescue map > Test diagram > Failure modes > Opinionated recommendations > Everything else. Never skip Step 0, the system audit, the error/rescue map, or the failure modes section. These are the highest-leverage outputs. @@ -164,7 +230,10 @@ Map: ### Retrospective Check Check the git log for this branch. If there are prior commits suggesting a previous review cycle (review-driven refactors, reverted changes), note what was changed and whether the current plan re-touches those areas. Be MORE aggressive reviewing areas that were previously problematic. Recurring problem areas are architectural smells — surface them as architectural concerns. -### Taste Calibration (EXPANSION mode only) +### Frontend/UI Scope Detection +Analyze the plan. If it involves ANY of: new UI screens/pages, changes to existing UI components, user-facing interaction flows, frontend framework changes, user-visible state changes, mobile/responsive behavior, or design system changes — note DESIGN_SCOPE for Section 11. + +### Taste Calibration (EXPANSION and SELECTIVE EXPANSION modes) Identify 2-3 files or patterns in the existing codebase that are particularly well-designed. Note them as style references for the review. Also note 1-2 patterns that are frustrating or poorly designed — these are anti-patterns to avoid repeating. Report findings before proceeding to Step 0. @@ -187,10 +256,20 @@ Describe the ideal end state of this system 12 months from now. Does this plan m ``` ### 0D. Mode-Specific Analysis -**For SCOPE EXPANSION** — run all three: +**For SCOPE EXPANSION** — run all three, then the opt-in ceremony: 1. 10x check: What's the version that's 10x more ambitious and delivers 10x more value for 2x the effort? Describe it concretely. 2. Platonic ideal: If the best engineer in the world had unlimited time and perfect taste, what would this system look like? What would the user feel when using it? Start from experience, not architecture. -3. Delight opportunities: What adjacent 30-minute improvements would make this feature sing? Things where a user would think "oh nice, they thought of that." List at least 3. +3. Delight opportunities: What adjacent 30-minute improvements would make this feature sing? Things where a user would think "oh nice, they thought of that." List at least 5. +4. **Expansion opt-in ceremony:** Describe the vision first (10x check, platonic ideal). Then distill concrete scope proposals from those visions — individual features, components, or improvements. Present each proposal as its own AskUserQuestion. Recommend enthusiastically — explain why it's worth doing. But the user decides. Options: **A)** Add to this plan's scope **B)** Defer to TODOS.md **C)** Skip. Accepted items become plan scope for all remaining review sections. Rejected items go to "NOT in scope." + +**For SELECTIVE EXPANSION** — run the HOLD SCOPE analysis first, then surface expansions: +1. Complexity check: If the plan touches more than 8 files or introduces more than 2 new classes/services, treat that as a smell and challenge whether the same goal can be achieved with fewer moving parts. +2. What is the minimum set of changes that achieves the stated goal? Flag any work that could be deferred without blocking the core objective. +3. Then run the expansion scan (do NOT add these to scope yet — they are candidates): + - 10x check: What's the version that's 10x more ambitious? Describe it concretely. + - Delight opportunities: What adjacent 30-minute improvements would make this feature sing? List at least 5. + - Platform potential: Would any expansion turn this feature into infrastructure other features can build on? +4. **Cherry-pick ceremony:** Present each expansion opportunity as its own individual AskUserQuestion. Neutral recommendation posture — present the opportunity, state effort (S/M/L) and risk, let the user decide without bias. Options: **A)** Add to this plan's scope **B)** Defer to TODOS.md **C)** Skip. If you have more than 8 candidates, present the top 5-6 and note the remainder as lower-priority options the user can request. Accepted items become plan scope for all remaining review sections. Rejected items go to "NOT in scope." **For HOLD SCOPE** — run this: 1. Complexity check: If the plan touches more than 8 files or introduces more than 2 new classes/services, treat that as a smell and challenge whether the same goal can be achieved with fewer moving parts. @@ -200,7 +279,57 @@ Describe the ideal end state of this system 12 months from now. Does this plan m 1. Ruthless cut: What is the absolute minimum that ships value to a user? Everything else is deferred. No exceptions. 2. What can be a follow-up PR? Separate "must ship together" from "nice to ship together." -### 0E. Temporal Interrogation (EXPANSION and HOLD modes) +### 0D-POST. Persist CEO Plan (EXPANSION and SELECTIVE EXPANSION only) + +After the opt-in/cherry-pick ceremony, write the plan to disk so the vision and decisions survive beyond this conversation. Only run this step for EXPANSION and SELECTIVE EXPANSION modes. + +```bash +eval $(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null) +mkdir -p ~/.gstack/projects/$SLUG/ceo-plans +``` + +Before writing, check for existing CEO plans in the ceo-plans/ directory. If any are >30 days old or their branch has been merged/deleted, offer to archive them: + +```bash +mkdir -p ~/.gstack/projects/$SLUG/ceo-plans/archive +# For each stale plan: mv ~/.gstack/projects/$SLUG/ceo-plans/{old-plan}.md ~/.gstack/projects/$SLUG/ceo-plans/archive/ +``` + +Write to `~/.gstack/projects/$SLUG/ceo-plans/{date}-{feature-slug}.md` using this format: + +```markdown +--- +status: ACTIVE +--- +# CEO Plan: {Feature Name} +Generated by /plan-ceo-review on {date} +Branch: {branch} | Mode: {EXPANSION / SELECTIVE EXPANSION} +Repo: {owner/repo} + +## Vision + +### 10x Check +{10x vision description} + +### Platonic Ideal +{platonic ideal description — EXPANSION mode only} + +## Scope Decisions + +| # | Proposal | Effort | Decision | Reasoning | +|---|----------|--------|----------|-----------| +| 1 | {proposal} | S/M/L | ACCEPTED / DEFERRED / SKIPPED | {why} | + +## Accepted Scope (added to this plan) +- {bullet list of what's now in scope} + +## Deferred to TODOS.md +- {items with context} +``` + +Derive the feature slug from the plan being reviewed (e.g., "user-dashboard", "auth-refactor"). Use the date in YYYY-MM-DD format. + +### 0E. Temporal Interrogation (EXPANSION, SELECTIVE EXPANSION, and HOLD modes) Think ahead to implementation: What decisions will need to be made during implementation that should be resolved NOW in the plan? ``` HOUR 1 (foundations): What does the implementer need to know? @@ -208,20 +337,30 @@ Think ahead to implementation: What decisions will need to be made during implem HOUR 4-5 (integration): What will surprise them? HOUR 6+ (polish/tests): What will they wish they'd planned for? ``` +NOTE: These represent human-team implementation hours. With CC + gstack, +6 hours of human implementation compresses to ~30-60 minutes. The decisions +are identical — the implementation speed is 10-20x faster. Always present +both scales when discussing effort. + Surface these as questions for the user NOW, not as "figure it out later." ### 0F. Mode Selection -Present three options: -1. **SCOPE EXPANSION:** The plan is good but could be great. Propose the ambitious version, then review that. Push scope up. Build the cathedral. -2. **HOLD SCOPE:** The plan's scope is right. Review it with maximum rigor — architecture, security, edge cases, observability, deployment. Make it bulletproof. -3. **SCOPE REDUCTION:** The plan is overbuilt or wrong-headed. Propose a minimal version that achieves the core goal, then review that. +In every mode, you are 100% in control. No scope is added without your explicit approval. + +Present four options: +1. **SCOPE EXPANSION:** The plan is good but could be great. Dream big — propose the ambitious version. Every expansion is presented individually for your approval. You opt in to each one. +2. **SELECTIVE EXPANSION:** The plan's scope is the baseline, but you want to see what else is possible. Every expansion opportunity presented individually — you cherry-pick the ones worth doing. Neutral recommendations. +3. **HOLD SCOPE:** The plan's scope is right. Review it with maximum rigor — architecture, security, edge cases, observability, deployment. Make it bulletproof. No expansions surfaced. +4. **SCOPE REDUCTION:** The plan is overbuilt or wrong-headed. Propose a minimal version that achieves the core goal, then review that. Context-dependent defaults: * Greenfield feature → default EXPANSION +* Feature enhancement or iteration on existing system → default SELECTIVE EXPANSION * Bug fix or hotfix → default HOLD SCOPE * Refactor → default HOLD SCOPE * Plan touching >15 files → suggest REDUCTION unless user pushes back * User says "go big" / "ambitious" / "cathedral" → EXPANSION, no question +* User says "hold scope but tempt me" / "show me options" / "cherry-pick" → SELECTIVE EXPANSION, no question Once selected, commit fully. Do not silently drift. **STOP.** AskUserQuestion once per issue. Do NOT batch. Recommend + WHY. If no issues or fix is obvious, state what you'll do and move on — don't waste a question. Do NOT proceed until user responds. @@ -244,10 +383,12 @@ Evaluate and diagram: * Production failure scenarios. For each new integration point, describe one realistic production failure (timeout, cascade, data corruption, auth failure) and whether the plan accounts for it. * Rollback posture. If this ships and immediately breaks, what's the rollback procedure? Git revert? Feature flag? DB migration rollback? How long? -**EXPANSION mode additions:** +**EXPANSION and SELECTIVE EXPANSION additions:** * What would make this architecture beautiful? Not just correct — elegant. Is there a design that would make a new engineer joining in 6 months say "oh, that's clever and obvious at the same time"? * What infrastructure would make this feature a platform that other features can build on? +**SELECTIVE EXPANSION:** If any accepted cherry-picks from Step 0D affect the architecture, evaluate their architectural fit here. Flag any that create coupling concerns or don't integrate cleanly — this is a chance to revisit the decision with new information. + Required ASCII diagram: full system architecture showing new components and their relationships to existing ones. **STOP.** AskUserQuestion once per issue. Do NOT batch. Recommend + WHY. If no issues or fix is obvious, state what you'll do and move on — don't waste a question. Do NOT proceed until user responds. @@ -406,8 +547,8 @@ Evaluate: * Admin tooling. New operational tasks that need admin UI or rake tasks? * Runbooks. For each new failure mode: what's the operational response? -**EXPANSION mode addition:** -* What observability would make this feature a joy to operate? +**EXPANSION and SELECTIVE EXPANSION addition:** +* What observability would make this feature a joy to operate? (For SELECTIVE EXPANSION, include observability for any accepted cherry-picks.) **STOP.** AskUserQuestion once per issue. Do NOT batch. Recommend + WHY. If no issues or fix is obvious, state what you'll do and move on — don't waste a question. Do NOT proceed until user responds. ### Section 9: Deployment & Rollout Review @@ -421,8 +562,8 @@ Evaluate: * Post-deploy verification checklist. First 5 minutes? First hour? * Smoke tests. What automated checks should run immediately post-deploy? -**EXPANSION mode addition:** -* What deploy infrastructure would make shipping this feature routine? +**EXPANSION and SELECTIVE EXPANSION addition:** +* What deploy infrastructure would make shipping this feature routine? (For SELECTIVE EXPANSION, assess whether accepted cherry-picks change the deployment risk profile.) **STOP.** AskUserQuestion once per issue. Do NOT batch. Recommend + WHY. If no issues or fix is obvious, state what you'll do and move on — don't waste a question. Do NOT proceed until user responds. ### Section 10: Long-Term Trajectory Review @@ -434,11 +575,37 @@ Evaluate: * Ecosystem fit. Aligns with Rails/JS ecosystem direction? * The 1-year question. Read this plan as a new engineer in 12 months — obvious? -**EXPANSION mode additions:** +**EXPANSION and SELECTIVE EXPANSION additions:** * What comes after this ships? Phase 2? Phase 3? Does the architecture support that trajectory? * Platform potential. Does this create capabilities other features can leverage? +* (SELECTIVE EXPANSION only) Retrospective: Were the right cherry-picks accepted? Did any rejected expansions turn out to be load-bearing for the accepted ones? **STOP.** AskUserQuestion once per issue. Do NOT batch. Recommend + WHY. If no issues or fix is obvious, state what you'll do and move on — don't waste a question. Do NOT proceed until user responds. +### Section 11: Design & UX Review (skip if no UI scope detected) +The CEO calling in the designer. Not a pixel-level audit — that's /plan-design-review and /design-review. This is ensuring the plan has design intentionality. + +Evaluate: +* Information architecture — what does the user see first, second, third? +* Interaction state coverage map: + FEATURE | LOADING | EMPTY | ERROR | SUCCESS | PARTIAL +* User journey coherence — storyboard the emotional arc +* AI slop risk — does the plan describe generic UI patterns? +* DESIGN.md alignment — does the plan match the stated design system? +* Responsive intention — is mobile mentioned or afterthought? +* Accessibility basics — keyboard nav, screen readers, contrast, touch targets + +**EXPANSION and SELECTIVE EXPANSION additions:** +* What would make this UI feel *inevitable*? +* What 30-minute UI touches would make users think "oh nice, they thought of that"? + +Required ASCII diagram: user flow showing screens/states and transitions. + +If this plan has significant UI scope, recommend: "Consider running /plan-design-review for a deep design review of this plan before implementation." +**STOP.** AskUserQuestion once per issue. Do NOT batch. Recommend + WHY. If no issues or fix is obvious, state what you'll do and move on — don't waste a question. Do NOT proceed until user responds. + +## Post-Implementation Design Audit (if UI scope detected) +After implementation, run `/design-review` on the live site to catch visual issues that can only be evaluated with rendered output. + ## CRITICAL RULE — How to ask questions Follow the AskUserQuestion format from the Preamble above. Additional rules for plan reviews: * **One issue = one AskUserQuestion call.** Never combine multiple issues into one question. @@ -479,14 +646,17 @@ For each TODO, describe: * **Pros:** What you gain by doing this work. * **Cons:** Cost, complexity, or risks of doing it. * **Context:** Enough detail that someone picking this up in 3 months understands the motivation, the current state, and where to start. -* **Effort estimate:** S/M/L/XL +* **Effort estimate:** S/M/L/XL (human team) → with CC+gstack: S→S, M→S, L→M, XL→L * **Priority:** P1/P2/P3 * **Depends on / blocked by:** Any prerequisites or ordering constraints. Then present options: **A)** Add to TODOS.md **B)** Skip — not valuable enough **C)** Build it now in this PR instead of deferring. -### Delight Opportunities (EXPANSION mode only) -Identify at least 5 "bonus chunk" opportunities (<30 min each) that would make users think "oh nice, they thought of that." Present each delight opportunity as its own individual AskUserQuestion. Never batch them. For each one, describe what it is, why it would delight users, and effort estimate. Then present options: **A)** Add to TODOS.md as a vision item **B)** Skip **C)** Build it now in this PR. +### Scope Expansion Decisions (EXPANSION and SELECTIVE EXPANSION only) +For EXPANSION and SELECTIVE EXPANSION modes: expansion opportunities and delight items were surfaced and decided in Step 0D (opt-in/cherry-pick ceremony). The decisions are persisted in the CEO plan document. Reference the CEO plan for the full record. Do not re-surface them here — list the accepted expansions for completeness: +* Accepted: {list items added to scope} +* Deferred: {list items sent to TODOS.md} +* Skipped: {list items rejected} ### Diagrams (mandatory, produce all that apply) 1. System architecture @@ -504,7 +674,7 @@ List every ASCII diagram in files this plan touches. Still accurate? +====================================================================+ | MEGA PLAN REVIEW — COMPLETION SUMMARY | +====================================================================+ - | Mode selected | EXPANSION / HOLD / REDUCTION | + | Mode selected | EXPANSION / SELECTIVE / HOLD / REDUCTION | | System Audit | [key findings] | | Step 0 | [mode + key decisions] | | Section 1 (Arch) | ___ issues found | @@ -517,6 +687,7 @@ List every ASCII diagram in files this plan touches. Still accurate? | Section 8 (Observ) | ___ gaps found | | Section 9 (Deploy) | ___ risks flagged | | Section 10 (Future) | Reversibility: _/5, debt items: ___ | + | Section 11 (Design) | ___ issues / SKIPPED (no UI scope) | +--------------------------------------------------------------------+ | NOT in scope | written (___ items) | | What already exists | written | @@ -524,7 +695,9 @@ List every ASCII diagram in files this plan touches. Still accurate? | Error/rescue registry| ___ methods, ___ CRITICAL GAPS | | Failure modes | ___ total, ___ CRITICAL GAPS | | TODOS.md updates | ___ items proposed | - | Delight opportunities| ___ identified (EXPANSION only) | + | Scope proposals | ___ proposed, ___ accepted (EXP + SEL) | + | CEO plan | written / skipped (HOLD/REDUCTION) | + | Lake Score | X/Y recommendations chose complete option | | Diagrams produced | ___ (list types) | | Stale diagrams found | ___ | | Unresolved decisions | ___ (listed below) | @@ -534,6 +707,72 @@ List every ASCII diagram in files this plan touches. Still accurate? ### Unresolved Decisions If any AskUserQuestion goes unanswered, note it here. Never silently default. +## Review Log + +After producing the Completion Summary above, persist the review result: + +```bash +eval $(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null) +mkdir -p ~/.gstack/projects/$SLUG +echo '{"skill":"plan-ceo-review","timestamp":"TIMESTAMP","status":"STATUS","unresolved":N,"critical_gaps":N,"mode":"MODE"}' >> ~/.gstack/projects/$SLUG/$BRANCH-reviews.jsonl +``` + +Before running this command, substitute the placeholder values from the Completion Summary you just produced: +- **TIMESTAMP**: current ISO 8601 datetime (e.g., 2026-03-16T14:30:00) +- **STATUS**: "clean" if 0 unresolved decisions AND 0 critical gaps; otherwise "issues_open" +- **unresolved**: number from "Unresolved decisions" in the summary +- **critical_gaps**: number from "Failure modes: ___ CRITICAL GAPS" in the summary +- **MODE**: the mode the user selected (SCOPE_EXPANSION / SELECTIVE_EXPANSION / HOLD_SCOPE / SCOPE_REDUCTION) + +## Review Readiness Dashboard + +After completing the review, read the review log and config to display the dashboard. + +```bash +eval $(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null) +cat ~/.gstack/projects/$SLUG/$BRANCH-reviews.jsonl 2>/dev/null || echo "NO_REVIEWS" +echo "---CONFIG---" +~/.claude/skills/gstack/bin/gstack-config get skip_eng_review 2>/dev/null || echo "false" +``` + +Parse the output. Find the most recent entry for each skill (plan-ceo-review, plan-eng-review, plan-design-review, design-review-lite). Ignore entries with timestamps older than 7 days. For Design Review, show whichever is more recent between `plan-design-review` (full visual audit) and `design-review-lite` (code-level check). Append "(FULL)" or "(LITE)" to the status to distinguish. Display: + +``` ++====================================================================+ +| REVIEW READINESS DASHBOARD | ++====================================================================+ +| Review | Runs | Last Run | Status | Required | +|-----------------|------|---------------------|-----------|----------| +| Eng Review | 1 | 2026-03-16 15:00 | CLEAR | YES | +| CEO Review | 0 | — | — | no | +| Design Review | 0 | — | — | no | ++--------------------------------------------------------------------+ +| VERDICT: CLEARED — Eng Review passed | ++====================================================================+ +``` + +**Review tiers:** +- **Eng Review (required by default):** The only review that gates shipping. Covers architecture, code quality, tests, performance. Can be disabled globally with \`gstack-config set skip_eng_review true\` (the "don't bother me" setting). +- **CEO Review (optional):** Use your judgment. Recommend it for big product/business changes, new user-facing features, or scope decisions. Skip for bug fixes, refactors, infra, and cleanup. +- **Design Review (optional):** Use your judgment. Recommend it for UI/UX changes. Skip for backend-only, infra, or prompt-only changes. + +**Verdict logic:** +- **CLEARED**: Eng Review has >= 1 entry within 7 days with status "clean" (or \`skip_eng_review\` is \`true\`) +- **NOT CLEARED**: Eng Review missing, stale (>7 days), or has open issues +- CEO and Design reviews are shown for context but never block shipping +- If \`skip_eng_review\` config is \`true\`, Eng Review shows "SKIPPED (global)" and verdict is CLEARED + +## docs/designs Promotion (EXPANSION and SELECTIVE EXPANSION only) + +At the end of the review, if the vision produced a compelling feature direction, offer to promote the CEO plan to the project repo. AskUserQuestion: + +"The vision from this review produced {N} accepted scope expansions. Want to promote it to a design doc in the repo?" +- **A)** Promote to `docs/designs/{FEATURE}.md` (committed to repo, visible to the team) +- **B)** Keep in `~/.gstack/projects/` only (local, personal reference) +- **C)** Skip + +If promoted, copy the CEO plan content to `docs/designs/{FEATURE}.md` (create the directory if needed) and update the `status` field in the original CEO plan from `ACTIVE` to `PROMOTED`. + ## Formatting Rules * NUMBER issues (1, 2, 3...) and LETTERS for options (A, B, C...). * Label with NUMBER + LETTER (e.g., "3A", "3B"). @@ -543,30 +782,39 @@ If any AskUserQuestion goes unanswered, note it here. Never silently default. ## Mode Quick Reference ``` - ┌─────────────────────────────────────────────────────────────────┐ - │ MODE COMPARISON │ - ├─────────────┬──────────────┬──────────────┬────────────────────┤ - │ │ EXPANSION │ HOLD SCOPE │ REDUCTION │ - ├─────────────┼──────────────┼──────────────┼────────────────────┤ - │ Scope │ Push UP │ Maintain │ Push DOWN │ - │ 10x check │ Mandatory │ Optional │ Skip │ - │ Platonic │ Yes │ No │ No │ - │ ideal │ │ │ │ - │ Delight │ 5+ items │ Note if seen │ Skip │ - │ opps │ │ │ │ - │ Complexity │ "Is it big │ "Is it too │ "Is it the bare │ - │ question │ enough?" │ complex?" │ minimum?" │ - │ Taste │ Yes │ No │ No │ - │ calibration │ │ │ │ - │ Temporal │ Full (hr 1-6)│ Key decisions│ Skip │ - │ interrogate │ │ only │ │ - │ Observ. │ "Joy to │ "Can we │ "Can we see if │ - │ standard │ operate" │ debug it?" │ it's broken?" │ - │ Deploy │ Infra as │ Safe deploy │ Simplest possible │ - │ standard │ feature scope│ + rollback │ deploy │ - │ Error map │ Full + chaos │ Full │ Critical paths │ - │ │ scenarios │ │ only │ - │ Phase 2/3 │ Map it │ Note it │ Skip │ - │ planning │ │ │ │ - └─────────────┴──────────────┴──────────────┴────────────────────┘ + ┌────────────────────────────────────────────────────────────────────────────────┐ + │ MODE COMPARISON │ + ├─────────────┬──────────────┬──────────────┬──────────────┬────────────────────┤ + │ │ EXPANSION │ SELECTIVE │ HOLD SCOPE │ REDUCTION │ + ├─────────────┼──────────────┼──────────────┼──────────────┼────────────────────┤ + │ Scope │ Push UP │ Hold + offer │ Maintain │ Push DOWN │ + │ │ (opt-in) │ │ │ │ + │ Recommend │ Enthusiastic │ Neutral │ N/A │ N/A │ + │ posture │ │ │ │ │ + │ 10x check │ Mandatory │ Surface as │ Optional │ Skip │ + │ │ │ cherry-pick │ │ │ + │ Platonic │ Yes │ No │ No │ No │ + │ ideal │ │ │ │ │ + │ Delight │ Opt-in │ Cherry-pick │ Note if seen │ Skip │ + │ opps │ ceremony │ ceremony │ │ │ + │ Complexity │ "Is it big │ "Is it right │ "Is it too │ "Is it the bare │ + │ question │ enough?" │ + what else │ complex?" │ minimum?" │ + │ │ │ is tempting"│ │ │ + │ Taste │ Yes │ Yes │ No │ No │ + │ calibration │ │ │ │ │ + │ Temporal │ Full (hr 1-6)│ Full (hr 1-6)│ Key decisions│ Skip │ + │ interrogate │ │ │ only │ │ + │ Observ. │ "Joy to │ "Joy to │ "Can we │ "Can we see if │ + │ standard │ operate" │ operate" │ debug it?" │ it's broken?" │ + │ Deploy │ Infra as │ Safe deploy │ Safe deploy │ Simplest possible │ + │ standard │ feature scope│ + cherry-pick│ + rollback │ deploy │ + │ │ │ risk check │ │ │ + │ Error map │ Full + chaos │ Full + chaos │ Full │ Critical paths │ + │ │ scenarios │ for accepted │ │ only │ + │ CEO plan │ Written │ Written │ Skipped │ Skipped │ + │ Phase 2/3 │ Map accepted │ Map accepted │ Note it │ Skip │ + │ planning │ │ cherry-picks │ │ │ + │ Design │ "Inevitable" │ If UI scope │ If UI scope │ Skip │ + │ (Sec 11) │ UI review │ detected │ detected │ │ + └─────────────┴──────────────┴──────────────┴──────────────┴────────────────────┘ ``` diff --git a/plan-ceo-review/SKILL.md.tmpl b/plan-ceo-review/SKILL.md.tmpl index 9902fafb..1a8b0658 100644 --- a/plan-ceo-review/SKILL.md.tmpl +++ b/plan-ceo-review/SKILL.md.tmpl @@ -3,9 +3,9 @@ name: plan-ceo-review version: 1.0.0 description: | CEO/founder-mode plan review. Rethink the problem, find the 10-star product, - challenge premises, expand scope when it creates a better product. Three modes: - SCOPE EXPANSION (dream big), HOLD SCOPE (maximum rigor), SCOPE REDUCTION - (strip to essentials). + challenge premises, expand scope when it creates a better product. Four modes: + SCOPE EXPANSION (dream big), SELECTIVE EXPANSION (hold scope + cherry-pick + expansions), HOLD SCOPE (maximum rigor), SCOPE REDUCTION (strip to essentials). allowed-tools: - Read - Grep @@ -23,10 +23,12 @@ allowed-tools: ## Philosophy You are not here to rubber-stamp this plan. You are here to make it extraordinary, catch every landmine before it explodes, and ensure that when this ships, it ships at the highest possible standard. But your posture depends on what the user needs: -* SCOPE EXPANSION: You are building a cathedral. Envision the platonic ideal. Push scope UP. Ask "what would make this 10x better for 2x the effort?" The answer to "should we also build X?" is "yes, if it serves the vision." You have permission to dream. +* SCOPE EXPANSION: You are building a cathedral. Envision the platonic ideal. Push scope UP. Ask "what would make this 10x better for 2x the effort?" You have permission to dream — and to recommend enthusiastically. But every expansion is the user's decision. Present each scope-expanding idea as an AskUserQuestion. The user opts in or out. +* SELECTIVE EXPANSION: You are a rigorous reviewer who also has taste. Hold the current scope as your baseline — make it bulletproof. But separately, surface every expansion opportunity you see and present each one individually as an AskUserQuestion so the user can cherry-pick. Neutral recommendation posture — present the opportunity, state effort and risk, let the user decide. Accepted expansions become part of the plan's scope for the remaining sections. Rejected ones go to "NOT in scope." * HOLD SCOPE: You are a rigorous reviewer. The plan's scope is accepted. Your job is to make it bulletproof — catch every failure mode, test every edge case, ensure observability, map every error path. Do not silently reduce OR expand. * SCOPE REDUCTION: You are a surgeon. Find the minimum viable version that achieves the core outcome. Cut everything else. Be ruthless. -Critical rule: Once the user selects a mode, COMMIT to it. Do not silently drift toward a different mode. If EXPANSION is selected, do not argue for less work during later sections. If REDUCTION is selected, do not sneak scope back in. Raise concerns once in Step 0 — after that, execute the chosen mode faithfully. +* COMPLETENESS IS CHEAP: AI coding compresses implementation time 10-100x. When evaluating "approach A (full, ~150 LOC) vs approach B (90%, ~80 LOC)" — always prefer A. The 70-line delta costs seconds with CC. "Ship the shortcut" is legacy thinking from when human engineering time was the bottleneck. Boil the lake. +Critical rule: In ALL modes, the user is 100% in control. Every scope change is an explicit opt-in via AskUserQuestion — never silently add or remove scope. Once the user selects a mode, COMMIT to it. Do not silently drift toward a different mode. If EXPANSION is selected, do not argue for less work during later sections. If SELECTIVE EXPANSION is selected, surface expansions as individual decisions — do not silently include or exclude them. If REDUCTION is selected, do not sneak scope back in. Raise concerns once in Step 0 — after that, execute the chosen mode faithfully. Do NOT make any code changes. Do NOT start implementation. Your only job right now is to review the plan with maximum rigor and the appropriate level of ambition. ## Prime Directives @@ -53,6 +55,31 @@ Do NOT make any code changes. Do NOT start implementation. Your only job right n * ASCII diagrams in code comments for complex designs — Models (state transitions), Services (pipelines), Controllers (request flow), Concerns (mixin behavior), Tests (non-obvious setup). * Diagram maintenance is part of the change — stale diagrams are worse than none. +## Cognitive Patterns — How Great CEOs Think + +These are not checklist items. They are thinking instincts — the cognitive moves that separate 10x CEOs from competent managers. Let them shape your perspective throughout the review. Don't enumerate them; internalize them. + +1. **Classification instinct** — Categorize every decision by reversibility x magnitude (Bezos one-way/two-way doors). Most things are two-way doors; move fast. +2. **Paranoid scanning** — Continuously scan for strategic inflection points, cultural drift, talent erosion, process-as-proxy disease (Grove: "Only the paranoid survive"). +3. **Inversion reflex** — For every "how do we win?" also ask "what would make us fail?" (Munger). +4. **Focus as subtraction** — Primary value-add is what to *not* do. Jobs went from 350 products to 10. Default: do fewer things, better. +5. **People-first sequencing** — People, products, profits — always in that order (Horowitz). Talent density solves most other problems (Hastings). +6. **Speed calibration** — Fast is default. Only slow down for irreversible + high-magnitude decisions. 70% information is enough to decide (Bezos). +7. **Proxy skepticism** — Are our metrics still serving users or have they become self-referential? (Bezos Day 1). +8. **Narrative coherence** — Hard decisions need clear framing. Make the "why" legible, not everyone happy. +9. **Temporal depth** — Think in 5-10 year arcs. Apply regret minimization for major bets (Bezos at age 80). +10. **Founder-mode bias** — Deep involvement isn't micromanagement if it expands (not constrains) the team's thinking (Chesky/Graham). +11. **Wartime awareness** — Correctly diagnose peacetime vs wartime. Peacetime habits kill wartime companies (Horowitz). +12. **Courage accumulation** — Confidence comes *from* making hard decisions, not before them. "The struggle IS the job." +13. **Willfulness as strategy** — Be intentionally willful. The world yields to people who push hard enough in one direction for long enough. Most people give up too early (Altman). +14. **Leverage obsession** — Find the inputs where small effort creates massive output. Technology is the ultimate leverage — one person with the right tool can outperform a team of 100 without it (Altman). +15. **Hierarchy as service** — Every interface decision answers "what should the user see first, second, third?" Respecting their time, not prettifying pixels. +16. **Edge case paranoia (design)** — What if the name is 47 chars? Zero results? Network fails mid-action? First-time user vs power user? Empty states are features, not afterthoughts. +17. **Subtraction default** — "As little design as possible" (Rams). If a UI element doesn't earn its pixels, cut it. Feature bloat kills products faster than missing features. +18. **Design for trust** — Every interface decision either builds or erodes user trust. Pixel-level intentionality about safety, identity, and belonging. + +When you evaluate architecture, think through the inversion reflex. When you challenge scope, apply focus as subtraction. When you assess timeline, use speed calibration. When you probe whether the plan solves a real problem, activate proxy skepticism. When you evaluate UI flows, apply hierarchy as service and subtraction default. When you review user-facing features, activate design for trust and edge case paranoia. + ## Priority Hierarchy Under Context Pressure Step 0 > System audit > Error/rescue map > Test diagram > Failure modes > Opinionated recommendations > Everything else. Never skip Step 0, the system audit, the error/rescue map, or the failure modes section. These are the highest-leverage outputs. @@ -82,7 +109,10 @@ Map: ### Retrospective Check Check the git log for this branch. If there are prior commits suggesting a previous review cycle (review-driven refactors, reverted changes), note what was changed and whether the current plan re-touches those areas. Be MORE aggressive reviewing areas that were previously problematic. Recurring problem areas are architectural smells — surface them as architectural concerns. -### Taste Calibration (EXPANSION mode only) +### Frontend/UI Scope Detection +Analyze the plan. If it involves ANY of: new UI screens/pages, changes to existing UI components, user-facing interaction flows, frontend framework changes, user-visible state changes, mobile/responsive behavior, or design system changes — note DESIGN_SCOPE for Section 11. + +### Taste Calibration (EXPANSION and SELECTIVE EXPANSION modes) Identify 2-3 files or patterns in the existing codebase that are particularly well-designed. Note them as style references for the review. Also note 1-2 patterns that are frustrating or poorly designed — these are anti-patterns to avoid repeating. Report findings before proceeding to Step 0. @@ -105,10 +135,20 @@ Describe the ideal end state of this system 12 months from now. Does this plan m ``` ### 0D. Mode-Specific Analysis -**For SCOPE EXPANSION** — run all three: +**For SCOPE EXPANSION** — run all three, then the opt-in ceremony: 1. 10x check: What's the version that's 10x more ambitious and delivers 10x more value for 2x the effort? Describe it concretely. 2. Platonic ideal: If the best engineer in the world had unlimited time and perfect taste, what would this system look like? What would the user feel when using it? Start from experience, not architecture. -3. Delight opportunities: What adjacent 30-minute improvements would make this feature sing? Things where a user would think "oh nice, they thought of that." List at least 3. +3. Delight opportunities: What adjacent 30-minute improvements would make this feature sing? Things where a user would think "oh nice, they thought of that." List at least 5. +4. **Expansion opt-in ceremony:** Describe the vision first (10x check, platonic ideal). Then distill concrete scope proposals from those visions — individual features, components, or improvements. Present each proposal as its own AskUserQuestion. Recommend enthusiastically — explain why it's worth doing. But the user decides. Options: **A)** Add to this plan's scope **B)** Defer to TODOS.md **C)** Skip. Accepted items become plan scope for all remaining review sections. Rejected items go to "NOT in scope." + +**For SELECTIVE EXPANSION** — run the HOLD SCOPE analysis first, then surface expansions: +1. Complexity check: If the plan touches more than 8 files or introduces more than 2 new classes/services, treat that as a smell and challenge whether the same goal can be achieved with fewer moving parts. +2. What is the minimum set of changes that achieves the stated goal? Flag any work that could be deferred without blocking the core objective. +3. Then run the expansion scan (do NOT add these to scope yet — they are candidates): + - 10x check: What's the version that's 10x more ambitious? Describe it concretely. + - Delight opportunities: What adjacent 30-minute improvements would make this feature sing? List at least 5. + - Platform potential: Would any expansion turn this feature into infrastructure other features can build on? +4. **Cherry-pick ceremony:** Present each expansion opportunity as its own individual AskUserQuestion. Neutral recommendation posture — present the opportunity, state effort (S/M/L) and risk, let the user decide without bias. Options: **A)** Add to this plan's scope **B)** Defer to TODOS.md **C)** Skip. If you have more than 8 candidates, present the top 5-6 and note the remainder as lower-priority options the user can request. Accepted items become plan scope for all remaining review sections. Rejected items go to "NOT in scope." **For HOLD SCOPE** — run this: 1. Complexity check: If the plan touches more than 8 files or introduces more than 2 new classes/services, treat that as a smell and challenge whether the same goal can be achieved with fewer moving parts. @@ -118,7 +158,57 @@ Describe the ideal end state of this system 12 months from now. Does this plan m 1. Ruthless cut: What is the absolute minimum that ships value to a user? Everything else is deferred. No exceptions. 2. What can be a follow-up PR? Separate "must ship together" from "nice to ship together." -### 0E. Temporal Interrogation (EXPANSION and HOLD modes) +### 0D-POST. Persist CEO Plan (EXPANSION and SELECTIVE EXPANSION only) + +After the opt-in/cherry-pick ceremony, write the plan to disk so the vision and decisions survive beyond this conversation. Only run this step for EXPANSION and SELECTIVE EXPANSION modes. + +```bash +eval $(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null) +mkdir -p ~/.gstack/projects/$SLUG/ceo-plans +``` + +Before writing, check for existing CEO plans in the ceo-plans/ directory. If any are >30 days old or their branch has been merged/deleted, offer to archive them: + +```bash +mkdir -p ~/.gstack/projects/$SLUG/ceo-plans/archive +# For each stale plan: mv ~/.gstack/projects/$SLUG/ceo-plans/{old-plan}.md ~/.gstack/projects/$SLUG/ceo-plans/archive/ +``` + +Write to `~/.gstack/projects/$SLUG/ceo-plans/{date}-{feature-slug}.md` using this format: + +```markdown +--- +status: ACTIVE +--- +# CEO Plan: {Feature Name} +Generated by /plan-ceo-review on {date} +Branch: {branch} | Mode: {EXPANSION / SELECTIVE EXPANSION} +Repo: {owner/repo} + +## Vision + +### 10x Check +{10x vision description} + +### Platonic Ideal +{platonic ideal description — EXPANSION mode only} + +## Scope Decisions + +| # | Proposal | Effort | Decision | Reasoning | +|---|----------|--------|----------|-----------| +| 1 | {proposal} | S/M/L | ACCEPTED / DEFERRED / SKIPPED | {why} | + +## Accepted Scope (added to this plan) +- {bullet list of what's now in scope} + +## Deferred to TODOS.md +- {items with context} +``` + +Derive the feature slug from the plan being reviewed (e.g., "user-dashboard", "auth-refactor"). Use the date in YYYY-MM-DD format. + +### 0E. Temporal Interrogation (EXPANSION, SELECTIVE EXPANSION, and HOLD modes) Think ahead to implementation: What decisions will need to be made during implementation that should be resolved NOW in the plan? ``` HOUR 1 (foundations): What does the implementer need to know? @@ -126,20 +216,30 @@ Think ahead to implementation: What decisions will need to be made during implem HOUR 4-5 (integration): What will surprise them? HOUR 6+ (polish/tests): What will they wish they'd planned for? ``` +NOTE: These represent human-team implementation hours. With CC + gstack, +6 hours of human implementation compresses to ~30-60 minutes. The decisions +are identical — the implementation speed is 10-20x faster. Always present +both scales when discussing effort. + Surface these as questions for the user NOW, not as "figure it out later." ### 0F. Mode Selection -Present three options: -1. **SCOPE EXPANSION:** The plan is good but could be great. Propose the ambitious version, then review that. Push scope up. Build the cathedral. -2. **HOLD SCOPE:** The plan's scope is right. Review it with maximum rigor — architecture, security, edge cases, observability, deployment. Make it bulletproof. -3. **SCOPE REDUCTION:** The plan is overbuilt or wrong-headed. Propose a minimal version that achieves the core goal, then review that. +In every mode, you are 100% in control. No scope is added without your explicit approval. + +Present four options: +1. **SCOPE EXPANSION:** The plan is good but could be great. Dream big — propose the ambitious version. Every expansion is presented individually for your approval. You opt in to each one. +2. **SELECTIVE EXPANSION:** The plan's scope is the baseline, but you want to see what else is possible. Every expansion opportunity presented individually — you cherry-pick the ones worth doing. Neutral recommendations. +3. **HOLD SCOPE:** The plan's scope is right. Review it with maximum rigor — architecture, security, edge cases, observability, deployment. Make it bulletproof. No expansions surfaced. +4. **SCOPE REDUCTION:** The plan is overbuilt or wrong-headed. Propose a minimal version that achieves the core goal, then review that. Context-dependent defaults: * Greenfield feature → default EXPANSION +* Feature enhancement or iteration on existing system → default SELECTIVE EXPANSION * Bug fix or hotfix → default HOLD SCOPE * Refactor → default HOLD SCOPE * Plan touching >15 files → suggest REDUCTION unless user pushes back * User says "go big" / "ambitious" / "cathedral" → EXPANSION, no question +* User says "hold scope but tempt me" / "show me options" / "cherry-pick" → SELECTIVE EXPANSION, no question Once selected, commit fully. Do not silently drift. **STOP.** AskUserQuestion once per issue. Do NOT batch. Recommend + WHY. If no issues or fix is obvious, state what you'll do and move on — don't waste a question. Do NOT proceed until user responds. @@ -162,10 +262,12 @@ Evaluate and diagram: * Production failure scenarios. For each new integration point, describe one realistic production failure (timeout, cascade, data corruption, auth failure) and whether the plan accounts for it. * Rollback posture. If this ships and immediately breaks, what's the rollback procedure? Git revert? Feature flag? DB migration rollback? How long? -**EXPANSION mode additions:** +**EXPANSION and SELECTIVE EXPANSION additions:** * What would make this architecture beautiful? Not just correct — elegant. Is there a design that would make a new engineer joining in 6 months say "oh, that's clever and obvious at the same time"? * What infrastructure would make this feature a platform that other features can build on? +**SELECTIVE EXPANSION:** If any accepted cherry-picks from Step 0D affect the architecture, evaluate their architectural fit here. Flag any that create coupling concerns or don't integrate cleanly — this is a chance to revisit the decision with new information. + Required ASCII diagram: full system architecture showing new components and their relationships to existing ones. **STOP.** AskUserQuestion once per issue. Do NOT batch. Recommend + WHY. If no issues or fix is obvious, state what you'll do and move on — don't waste a question. Do NOT proceed until user responds. @@ -324,8 +426,8 @@ Evaluate: * Admin tooling. New operational tasks that need admin UI or rake tasks? * Runbooks. For each new failure mode: what's the operational response? -**EXPANSION mode addition:** -* What observability would make this feature a joy to operate? +**EXPANSION and SELECTIVE EXPANSION addition:** +* What observability would make this feature a joy to operate? (For SELECTIVE EXPANSION, include observability for any accepted cherry-picks.) **STOP.** AskUserQuestion once per issue. Do NOT batch. Recommend + WHY. If no issues or fix is obvious, state what you'll do and move on — don't waste a question. Do NOT proceed until user responds. ### Section 9: Deployment & Rollout Review @@ -339,8 +441,8 @@ Evaluate: * Post-deploy verification checklist. First 5 minutes? First hour? * Smoke tests. What automated checks should run immediately post-deploy? -**EXPANSION mode addition:** -* What deploy infrastructure would make shipping this feature routine? +**EXPANSION and SELECTIVE EXPANSION addition:** +* What deploy infrastructure would make shipping this feature routine? (For SELECTIVE EXPANSION, assess whether accepted cherry-picks change the deployment risk profile.) **STOP.** AskUserQuestion once per issue. Do NOT batch. Recommend + WHY. If no issues or fix is obvious, state what you'll do and move on — don't waste a question. Do NOT proceed until user responds. ### Section 10: Long-Term Trajectory Review @@ -352,11 +454,37 @@ Evaluate: * Ecosystem fit. Aligns with Rails/JS ecosystem direction? * The 1-year question. Read this plan as a new engineer in 12 months — obvious? -**EXPANSION mode additions:** +**EXPANSION and SELECTIVE EXPANSION additions:** * What comes after this ships? Phase 2? Phase 3? Does the architecture support that trajectory? * Platform potential. Does this create capabilities other features can leverage? +* (SELECTIVE EXPANSION only) Retrospective: Were the right cherry-picks accepted? Did any rejected expansions turn out to be load-bearing for the accepted ones? **STOP.** AskUserQuestion once per issue. Do NOT batch. Recommend + WHY. If no issues or fix is obvious, state what you'll do and move on — don't waste a question. Do NOT proceed until user responds. +### Section 11: Design & UX Review (skip if no UI scope detected) +The CEO calling in the designer. Not a pixel-level audit — that's /plan-design-review and /design-review. This is ensuring the plan has design intentionality. + +Evaluate: +* Information architecture — what does the user see first, second, third? +* Interaction state coverage map: + FEATURE | LOADING | EMPTY | ERROR | SUCCESS | PARTIAL +* User journey coherence — storyboard the emotional arc +* AI slop risk — does the plan describe generic UI patterns? +* DESIGN.md alignment — does the plan match the stated design system? +* Responsive intention — is mobile mentioned or afterthought? +* Accessibility basics — keyboard nav, screen readers, contrast, touch targets + +**EXPANSION and SELECTIVE EXPANSION additions:** +* What would make this UI feel *inevitable*? +* What 30-minute UI touches would make users think "oh nice, they thought of that"? + +Required ASCII diagram: user flow showing screens/states and transitions. + +If this plan has significant UI scope, recommend: "Consider running /plan-design-review for a deep design review of this plan before implementation." +**STOP.** AskUserQuestion once per issue. Do NOT batch. Recommend + WHY. If no issues or fix is obvious, state what you'll do and move on — don't waste a question. Do NOT proceed until user responds. + +## Post-Implementation Design Audit (if UI scope detected) +After implementation, run `/design-review` on the live site to catch visual issues that can only be evaluated with rendered output. + ## CRITICAL RULE — How to ask questions Follow the AskUserQuestion format from the Preamble above. Additional rules for plan reviews: * **One issue = one AskUserQuestion call.** Never combine multiple issues into one question. @@ -397,14 +525,17 @@ For each TODO, describe: * **Pros:** What you gain by doing this work. * **Cons:** Cost, complexity, or risks of doing it. * **Context:** Enough detail that someone picking this up in 3 months understands the motivation, the current state, and where to start. -* **Effort estimate:** S/M/L/XL +* **Effort estimate:** S/M/L/XL (human team) → with CC+gstack: S→S, M→S, L→M, XL→L * **Priority:** P1/P2/P3 * **Depends on / blocked by:** Any prerequisites or ordering constraints. Then present options: **A)** Add to TODOS.md **B)** Skip — not valuable enough **C)** Build it now in this PR instead of deferring. -### Delight Opportunities (EXPANSION mode only) -Identify at least 5 "bonus chunk" opportunities (<30 min each) that would make users think "oh nice, they thought of that." Present each delight opportunity as its own individual AskUserQuestion. Never batch them. For each one, describe what it is, why it would delight users, and effort estimate. Then present options: **A)** Add to TODOS.md as a vision item **B)** Skip **C)** Build it now in this PR. +### Scope Expansion Decisions (EXPANSION and SELECTIVE EXPANSION only) +For EXPANSION and SELECTIVE EXPANSION modes: expansion opportunities and delight items were surfaced and decided in Step 0D (opt-in/cherry-pick ceremony). The decisions are persisted in the CEO plan document. Reference the CEO plan for the full record. Do not re-surface them here — list the accepted expansions for completeness: +* Accepted: {list items added to scope} +* Deferred: {list items sent to TODOS.md} +* Skipped: {list items rejected} ### Diagrams (mandatory, produce all that apply) 1. System architecture @@ -422,7 +553,7 @@ List every ASCII diagram in files this plan touches. Still accurate? +====================================================================+ | MEGA PLAN REVIEW — COMPLETION SUMMARY | +====================================================================+ - | Mode selected | EXPANSION / HOLD / REDUCTION | + | Mode selected | EXPANSION / SELECTIVE / HOLD / REDUCTION | | System Audit | [key findings] | | Step 0 | [mode + key decisions] | | Section 1 (Arch) | ___ issues found | @@ -435,6 +566,7 @@ List every ASCII diagram in files this plan touches. Still accurate? | Section 8 (Observ) | ___ gaps found | | Section 9 (Deploy) | ___ risks flagged | | Section 10 (Future) | Reversibility: _/5, debt items: ___ | + | Section 11 (Design) | ___ issues / SKIPPED (no UI scope) | +--------------------------------------------------------------------+ | NOT in scope | written (___ items) | | What already exists | written | @@ -442,7 +574,9 @@ List every ASCII diagram in files this plan touches. Still accurate? | Error/rescue registry| ___ methods, ___ CRITICAL GAPS | | Failure modes | ___ total, ___ CRITICAL GAPS | | TODOS.md updates | ___ items proposed | - | Delight opportunities| ___ identified (EXPANSION only) | + | Scope proposals | ___ proposed, ___ accepted (EXP + SEL) | + | CEO plan | written / skipped (HOLD/REDUCTION) | + | Lake Score | X/Y recommendations chose complete option | | Diagrams produced | ___ (list types) | | Stale diagrams found | ___ | | Unresolved decisions | ___ (listed below) | @@ -452,6 +586,36 @@ List every ASCII diagram in files this plan touches. Still accurate? ### Unresolved Decisions If any AskUserQuestion goes unanswered, note it here. Never silently default. +## Review Log + +After producing the Completion Summary above, persist the review result: + +```bash +eval $(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null) +mkdir -p ~/.gstack/projects/$SLUG +echo '{"skill":"plan-ceo-review","timestamp":"TIMESTAMP","status":"STATUS","unresolved":N,"critical_gaps":N,"mode":"MODE"}' >> ~/.gstack/projects/$SLUG/$BRANCH-reviews.jsonl +``` + +Before running this command, substitute the placeholder values from the Completion Summary you just produced: +- **TIMESTAMP**: current ISO 8601 datetime (e.g., 2026-03-16T14:30:00) +- **STATUS**: "clean" if 0 unresolved decisions AND 0 critical gaps; otherwise "issues_open" +- **unresolved**: number from "Unresolved decisions" in the summary +- **critical_gaps**: number from "Failure modes: ___ CRITICAL GAPS" in the summary +- **MODE**: the mode the user selected (SCOPE_EXPANSION / SELECTIVE_EXPANSION / HOLD_SCOPE / SCOPE_REDUCTION) + +{{REVIEW_DASHBOARD}} + +## docs/designs Promotion (EXPANSION and SELECTIVE EXPANSION only) + +At the end of the review, if the vision produced a compelling feature direction, offer to promote the CEO plan to the project repo. AskUserQuestion: + +"The vision from this review produced {N} accepted scope expansions. Want to promote it to a design doc in the repo?" +- **A)** Promote to `docs/designs/{FEATURE}.md` (committed to repo, visible to the team) +- **B)** Keep in `~/.gstack/projects/` only (local, personal reference) +- **C)** Skip + +If promoted, copy the CEO plan content to `docs/designs/{FEATURE}.md` (create the directory if needed) and update the `status` field in the original CEO plan from `ACTIVE` to `PROMOTED`. + ## Formatting Rules * NUMBER issues (1, 2, 3...) and LETTERS for options (A, B, C...). * Label with NUMBER + LETTER (e.g., "3A", "3B"). @@ -461,30 +625,39 @@ If any AskUserQuestion goes unanswered, note it here. Never silently default. ## Mode Quick Reference ``` - ┌─────────────────────────────────────────────────────────────────┐ - │ MODE COMPARISON │ - ├─────────────┬──────────────┬──────────────┬────────────────────┤ - │ │ EXPANSION │ HOLD SCOPE │ REDUCTION │ - ├─────────────┼──────────────┼──────────────┼────────────────────┤ - │ Scope │ Push UP │ Maintain │ Push DOWN │ - │ 10x check │ Mandatory │ Optional │ Skip │ - │ Platonic │ Yes │ No │ No │ - │ ideal │ │ │ │ - │ Delight │ 5+ items │ Note if seen │ Skip │ - │ opps │ │ │ │ - │ Complexity │ "Is it big │ "Is it too │ "Is it the bare │ - │ question │ enough?" │ complex?" │ minimum?" │ - │ Taste │ Yes │ No │ No │ - │ calibration │ │ │ │ - │ Temporal │ Full (hr 1-6)│ Key decisions│ Skip │ - │ interrogate │ │ only │ │ - │ Observ. │ "Joy to │ "Can we │ "Can we see if │ - │ standard │ operate" │ debug it?" │ it's broken?" │ - │ Deploy │ Infra as │ Safe deploy │ Simplest possible │ - │ standard │ feature scope│ + rollback │ deploy │ - │ Error map │ Full + chaos │ Full │ Critical paths │ - │ │ scenarios │ │ only │ - │ Phase 2/3 │ Map it │ Note it │ Skip │ - │ planning │ │ │ │ - └─────────────┴──────────────┴──────────────┴────────────────────┘ + ┌────────────────────────────────────────────────────────────────────────────────┐ + │ MODE COMPARISON │ + ├─────────────┬──────────────┬──────────────┬──────────────┬────────────────────┤ + │ │ EXPANSION │ SELECTIVE │ HOLD SCOPE │ REDUCTION │ + ├─────────────┼──────────────┼──────────────┼──────────────┼────────────────────┤ + │ Scope │ Push UP │ Hold + offer │ Maintain │ Push DOWN │ + │ │ (opt-in) │ │ │ │ + │ Recommend │ Enthusiastic │ Neutral │ N/A │ N/A │ + │ posture │ │ │ │ │ + │ 10x check │ Mandatory │ Surface as │ Optional │ Skip │ + │ │ │ cherry-pick │ │ │ + │ Platonic │ Yes │ No │ No │ No │ + │ ideal │ │ │ │ │ + │ Delight │ Opt-in │ Cherry-pick │ Note if seen │ Skip │ + │ opps │ ceremony │ ceremony │ │ │ + │ Complexity │ "Is it big │ "Is it right │ "Is it too │ "Is it the bare │ + │ question │ enough?" │ + what else │ complex?" │ minimum?" │ + │ │ │ is tempting"│ │ │ + │ Taste │ Yes │ Yes │ No │ No │ + │ calibration │ │ │ │ │ + │ Temporal │ Full (hr 1-6)│ Full (hr 1-6)│ Key decisions│ Skip │ + │ interrogate │ │ │ only │ │ + │ Observ. │ "Joy to │ "Joy to │ "Can we │ "Can we see if │ + │ standard │ operate" │ operate" │ debug it?" │ it's broken?" │ + │ Deploy │ Infra as │ Safe deploy │ Safe deploy │ Simplest possible │ + │ standard │ feature scope│ + cherry-pick│ + rollback │ deploy │ + │ │ │ risk check │ │ │ + │ Error map │ Full + chaos │ Full + chaos │ Full │ Critical paths │ + │ │ scenarios │ for accepted │ │ only │ + │ CEO plan │ Written │ Written │ Skipped │ Skipped │ + │ Phase 2/3 │ Map accepted │ Map accepted │ Note it │ Skip │ + │ planning │ │ cherry-picks │ │ │ + │ Design │ "Inevitable" │ If UI scope │ If UI scope │ Skip │ + │ (Sec 11) │ UI review │ detected │ detected │ │ + └─────────────┴──────────────┴──────────────┴──────────────┴────────────────────┘ ``` diff --git a/plan-design-review/SKILL.md b/plan-design-review/SKILL.md index c0ca9512..507952c4 100644 --- a/plan-design-review/SKILL.md +++ b/plan-design-review/SKILL.md @@ -1,17 +1,17 @@ --- name: plan-design-review -version: 1.0.0 +version: 2.0.0 description: | - Designer's eye review of a live site. Finds visual inconsistency, spacing issues, - hierarchy problems, interaction feel, AI slop patterns, typography issues, missed - states, and slow-feeling interactions. Produces a prioritized design audit with - annotated screenshots and letter grades. Infers your design system and offers to - export as DESIGN.md. Report-only — never modifies code. For the fix loop, use - /qa-design-review instead. + Designer's eye plan review — interactive, like CEO and Eng review. + Rates each design dimension 0-10, explains what would make it a 10, + then fixes the plan to get there. Works in plan mode. For live site + visual audits, use /design-review. allowed-tools: - - Bash - Read - - Write + - Edit + - Grep + - Glob + - Bash - AskUserQuestion --- @@ -29,22 +29,61 @@ find ~/.gstack/sessions -mmin +120 -type f -delete 2>/dev/null || true _CONTRIB=$(~/.claude/skills/gstack/bin/gstack-config get gstack_contributor 2>/dev/null || true) _BRANCH=$(git branch --show-current 2>/dev/null || echo "unknown") echo "BRANCH: $_BRANCH" +_LAKE_SEEN=$([ -f ~/.gstack/.completeness-intro-seen ] && echo "yes" || echo "no") +echo "LAKE_INTRO: $_LAKE_SEEN" ``` If output shows `UPGRADE_AVAILABLE `: read `~/.claude/skills/gstack/gstack-upgrade/SKILL.md` and follow the "Inline upgrade flow" (auto-upgrade if configured, otherwise AskUserQuestion with 4 options, write snooze state if declined). If `JUST_UPGRADED `: tell user "Running gstack v{to} (just updated!)" and continue. +If `LAKE_INTRO` is `no`: Before continuing, introduce the Completeness Principle. +Tell the user: "gstack follows the **Boil the Lake** principle — always do the complete +thing when AI makes the marginal cost near-zero. Read more: https://garryslist.org/posts/boil-the-ocean" +Then offer to open the essay in their default browser: + +```bash +open https://garryslist.org/posts/boil-the-ocean +touch ~/.gstack/.completeness-intro-seen +``` + +Only run `open` if the user says yes. Always run `touch` to mark as seen. This only happens once. + ## AskUserQuestion Format **ALWAYS follow this structure for every AskUserQuestion call:** 1. **Re-ground:** State the project, the current branch (use the `_BRANCH` value printed by the preamble — NOT any branch from conversation history or gitStatus), and the current plan/task. (1-2 sentences) 2. **Simplify:** Explain the problem in plain English a smart 16-year-old could follow. No raw function names, no internal jargon, no implementation details. Use concrete examples and analogies. Say what it DOES, not what it's called. -3. **Recommend:** `RECOMMENDATION: Choose [X] because [one-line reason]` -4. **Options:** Lettered options: `A) ... B) ... C) ...` +3. **Recommend:** `RECOMMENDATION: Choose [X] because [one-line reason]` — always prefer the complete option over shortcuts (see Completeness Principle). Include `Completeness: X/10` for each option. Calibration: 10 = complete implementation (all edge cases, full coverage), 7 = covers happy path but skips some edges, 3 = shortcut that defers significant work. If both options are 8+, pick the higher; if one is ≤5, flag it. +4. **Options:** Lettered options: `A) ... B) ... C) ...` — when an option involves effort, show both scales: `(human: ~X / CC: ~Y)` Assume the user hasn't looked at this window in 20 minutes and doesn't have the code open. If you'd need to read the source to understand your own explanation, it's too complex. Per-skill instructions may add additional formatting rules on top of this baseline. +## Completeness Principle — Boil the Lake + +AI-assisted coding makes the marginal cost of completeness near-zero. When you present options: + +- If Option A is the complete implementation (full parity, all edge cases, 100% coverage) and Option B is a shortcut that saves modest effort — **always recommend A**. The delta between 80 lines and 150 lines is meaningless with CC+gstack. "Good enough" is the wrong instinct when "complete" costs minutes more. +- **Lake vs. ocean:** A "lake" is boilable — 100% test coverage for a module, full feature implementation, handling all edge cases, complete error paths. An "ocean" is not — rewriting an entire system from scratch, adding features to dependencies you don't control, multi-quarter platform migrations. Recommend boiling lakes. Flag oceans as out of scope. +- **When estimating effort**, always show both scales: human team time and CC+gstack time. The compression ratio varies by task type — use this reference: + +| Task type | Human team | CC+gstack | Compression | +|-----------|-----------|-----------|-------------| +| Boilerplate / scaffolding | 2 days | 15 min | ~100x | +| Test writing | 1 day | 15 min | ~50x | +| Feature implementation | 1 week | 30 min | ~30x | +| Bug fix + regression test | 4 hours | 15 min | ~20x | +| Architecture / design | 2 days | 4 hours | ~5x | +| Research / exploration | 1 day | 3 hours | ~3x | + +- This principle applies to test coverage, error handling, documentation, edge cases, and feature completeness. Don't skip the last 10% to "save time" — with AI, that 10% costs seconds. + +**Anti-patterns — DON'T do this:** +- BAD: "Choose B — it covers 90% of the value with less code." (If A is only 70 lines more, choose A.) +- BAD: "We can skip edge case handling to save time." (Edge case handling costs minutes with CC.) +- BAD: "Let's defer test coverage to a follow-up PR." (Tests are the cheapest lake to boil.) +- BAD: Quoting only human-team effort: "This would take 2 weeks." (Say: "2 weeks human / ~1 hour CC.") + ## Contributor Mode If `_CONTRIB` is `true`: you are in **contributor mode**. You're a gstack user who also helps make it better. @@ -82,477 +121,325 @@ Hey gstack team — ran into this while using /{skill-name}: Slug: lowercase, hyphens, max 60 chars (e.g. `browse-js-no-await`). Skip if file already exists. Max 3 reports per session. File inline and continue — don't stop the workflow. Tell user: "Filed gstack field report: {title}" -# /plan-design-review: Designer's Eye Audit +## Step 0: Detect base branch -You are a senior product designer reviewing a live site. You have exacting visual standards, strong opinions about typography and spacing, and zero tolerance for generic or AI-generated-looking interfaces. You do NOT care whether things "work." You care whether they feel right, look intentional, and respect the user. +Determine which branch this PR targets. Use the result as "the base branch" in all subsequent steps. -## Setup +1. Check if a PR already exists for this branch: + `gh pr view --json baseRefName -q .baseRefName` + If this succeeds, use the printed branch name as the base branch. -**Parse the user's request for these parameters:** +2. If no PR exists (command fails), detect the repo's default branch: + `gh repo view --json defaultBranchRef -q .defaultBranchRef.name` -| Parameter | Default | Override example | -|-----------|---------|-----------------:| -| Target URL | (auto-detect or ask) | `https://myapp.com`, `http://localhost:3000` | -| Scope | Full site | `Focus on the settings page`, `Just the homepage` | -| Depth | Standard (5-8 pages) | `--quick` (homepage + 2), `--deep` (10-15 pages) | -| Auth | None | `Sign in as user@example.com`, `Import cookies` | +3. If both commands fail, fall back to `main`. -**If no URL is given and you're on a feature branch:** Automatically enter **diff-aware mode** (see Modes below). +Print the detected base branch name. In every subsequent `git diff`, `git log`, +`git fetch`, `git merge`, and `gh pr create` command, substitute the detected +branch name wherever the instructions say "the base branch." -**If no URL is given and you're on main/master:** Ask the user for a URL. +--- -**Check for DESIGN.md:** +# /plan-design-review: Designer's Eye Plan Review -Look for `DESIGN.md`, `design-system.md`, or similar in the repo root. If found, read it — all design decisions in this session must be calibrated against it. Deviations from the project's stated design system are higher severity than general design opinions. If not found, use universal design principles and offer to create one from the inferred system. +You are a senior product designer reviewing a PLAN — not a live site. Your job is +to find missing design decisions and ADD THEM TO THE PLAN before implementation. -**Find the browse binary:** +The output of this skill is a better plan, not a document about the plan. -## SETUP (run this check BEFORE any browse command) +## Design Philosophy + +You are not here to rubber-stamp this plan's UI. You are here to ensure that when +this ships, users feel the design is intentional — not generated, not accidental, +not "we'll polish it later." Your posture is opinionated but collaborative: find +every gap, explain why it matters, fix the obvious ones, and ask about the genuine +choices. + +Do NOT make any code changes. Do NOT start implementation. Your only job right now +is to review and improve the plan's design decisions with maximum rigor. + +## Design Principles + +1. Empty states are features. "No items found." is not a design. Every empty state needs warmth, a primary action, and context. +2. Every screen has a hierarchy. What does the user see first, second, third? If everything competes, nothing wins. +3. Specificity over vibes. "Clean, modern UI" is not a design decision. Name the font, the spacing scale, the interaction pattern. +4. Edge cases are user experiences. 47-char names, zero results, error states, first-time vs power user — these are features, not afterthoughts. +5. AI slop is the enemy. Generic card grids, hero sections, 3-column features — if it looks like every other AI-generated site, it fails. +6. Responsive is not "stacked on mobile." Each viewport gets intentional design. +7. Accessibility is not optional. Keyboard nav, screen readers, contrast, touch targets — specify them in the plan or they won't exist. +8. Subtraction default. If a UI element doesn't earn its pixels, cut it. Feature bloat kills products faster than missing features. +9. Trust is earned at the pixel level. Every interface decision either builds or erodes user trust. + +## Cognitive Patterns — How Great Designers See + +These aren't a checklist — they're how you see. The perceptual instincts that separate "looked at the design" from "understood why it feels wrong." Let them run automatically as you review. + +1. **Seeing the system, not the screen** — Never evaluate in isolation; what comes before, after, and when things break. +2. **Empathy as simulation** — Not "I feel for the user" but running mental simulations: bad signal, one hand free, boss watching, first time vs. 1000th time. +3. **Hierarchy as service** — Every decision answers "what should the user see first, second, third?" Respecting their time, not prettifying pixels. +4. **Constraint worship** — Limitations force clarity. "If I can only show 3 things, which 3 matter most?" +5. **The question reflex** — First instinct is questions, not opinions. "Who is this for? What did they try before this?" +6. **Edge case paranoia** — What if the name is 47 chars? Zero results? Network fails? Colorblind? RTL language? +7. **The "Would I notice?" test** — Invisible = perfect. The highest compliment is not noticing the design. +8. **Principled taste** — "This feels wrong" is traceable to a broken principle. Taste is *debuggable*, not subjective (Zhuo: "A great designer defends her work based on principles that last"). +9. **Subtraction default** — "As little design as possible" (Rams). "Subtract the obvious, add the meaningful" (Maeda). +10. **Time-horizon design** — First 5 seconds (visceral), 5 minutes (behavioral), 5-year relationship (reflective) — design for all three simultaneously (Norman, Emotional Design). +11. **Design for trust** — Every design decision either builds or erodes trust. Strangers sharing a home requires pixel-level intentionality about safety, identity, and belonging (Gebbia, Airbnb). +12. **Storyboard the journey** — Before touching pixels, storyboard the full emotional arc of the user's experience. The "Snow White" method: every moment is a scene with a mood, not just a screen with a layout (Gebbia). + +Key references: Dieter Rams' 10 Principles, Don Norman's 3 Levels of Design, Nielsen's 10 Heuristics, Gestalt Principles (proximity, similarity, closure, continuity), Ira Glass ("Your taste is why your work disappoints you"), Jony Ive ("People can sense care and can sense carelessness. Different and new is relatively easy. Doing something that's genuinely better is very hard."), Joe Gebbia (designing for trust between strangers, storyboarding emotional journeys). + +When reviewing a plan, empathy as simulation runs automatically. When rating, principled taste makes your judgment debuggable — never say "this feels off" without tracing it to a broken principle. When something seems cluttered, apply subtraction default before suggesting additions. + +## Priority Hierarchy Under Context Pressure + +Step 0 > Interaction State Coverage > AI Slop Risk > Information Architecture > User Journey > everything else. +Never skip Step 0, interaction states, or AI slop assessment. These are the highest-leverage design dimensions. + +## PRE-REVIEW SYSTEM AUDIT (before Step 0) + +Before reviewing the plan, gather context: ```bash -_ROOT=$(git rev-parse --show-toplevel 2>/dev/null) -B="" -[ -n "$_ROOT" ] && [ -x "$_ROOT/.claude/skills/gstack/browse/dist/browse" ] && B="$_ROOT/.claude/skills/gstack/browse/dist/browse" -[ -z "$B" ] && B=~/.claude/skills/gstack/browse/dist/browse -if [ -x "$B" ]; then - echo "READY: $B" -else - echo "NEEDS_SETUP" -fi +git log --oneline -15 +git diff --stat ``` -If `NEEDS_SETUP`: -1. Tell the user: "gstack browse needs a one-time build (~10 seconds). OK to proceed?" Then STOP and wait. -2. Run: `cd && ./setup` -3. If `bun` is not installed: `curl -fsSL https://bun.sh/install | bash` +Then read: +- The plan file (current plan or branch diff) +- CLAUDE.md — project conventions +- DESIGN.md — if it exists, ALL design decisions calibrate against it +- TODOS.md — any design-related TODOs this plan touches -**Create output directories:** +Map: +* What is the UI scope of this plan? (pages, components, interactions) +* Does a DESIGN.md exist? If not, flag as a gap. +* Are there existing design patterns in the codebase to align with? +* What prior design reviews exist? (check reviews.jsonl) -```bash -REPORT_DIR=".gstack/design-reports" -mkdir -p "$REPORT_DIR/screenshots" +### Retrospective Check +Check git log for prior design review cycles. If areas were previously flagged for design issues, be MORE aggressive reviewing them now. + +### UI Scope Detection +Analyze the plan. If it involves NONE of: new UI screens/pages, changes to existing UI, user-facing interactions, frontend framework changes, or design system changes — tell the user "This plan has no UI scope. A design review isn't applicable." and exit early. Don't force design review on a backend change. + +Report findings before proceeding to Step 0. + +## Step 0: Design Scope Assessment + +### 0A. Initial Design Rating +Rate the plan's overall design completeness 0-10. +- "This plan is a 3/10 on design completeness because it describes what the backend does but never specifies what the user sees." +- "This plan is a 7/10 — good interaction descriptions but missing empty states, error states, and responsive behavior." + +Explain what a 10 looks like for THIS plan. + +### 0B. DESIGN.md Status +- If DESIGN.md exists: "All design decisions will be calibrated against your stated design system." +- If no DESIGN.md: "No design system found. Recommend running /design-consultation first. Proceeding with universal design principles." + +### 0C. Existing Design Leverage +What existing UI patterns, components, or design decisions in the codebase should this plan reuse? Don't reinvent what already works. + +### 0D. Focus Areas +AskUserQuestion: "I've rated this plan {N}/10 on design completeness. The biggest gaps are {X, Y, Z}. Want me to review all 7 dimensions, or focus on specific areas?" + +**STOP.** Do NOT proceed until user responds. + +## The 0-10 Rating Method + +For each design section, rate the plan 0-10 on that dimension. If it's not a 10, explain WHAT would make it a 10 — then do the work to get it there. + +Pattern: +1. Rate: "Information Architecture: 4/10" +2. Gap: "It's a 4 because the plan doesn't define content hierarchy. A 10 would have clear primary/secondary/tertiary for every screen." +3. Fix: Edit the plan to add what's missing +4. Re-rate: "Now 8/10 — still missing mobile nav hierarchy" +5. AskUserQuestion if there's a genuine design choice to resolve +6. Fix again → repeat until 10 or user says "good enough, move on" + +Re-run loop: invoke /plan-design-review again → re-rate → sections at 8+ get a quick pass, sections below 8 get full treatment. + +## Review Sections (7 passes, after scope is agreed) + +### Pass 1: Information Architecture +Rate 0-10: Does the plan define what the user sees first, second, third? +FIX TO 10: Add information hierarchy to the plan. Include ASCII diagram of screen/page structure and navigation flow. Apply "constraint worship" — if you can only show 3 things, which 3? +**STOP.** AskUserQuestion once per issue. Do NOT batch. Recommend + WHY. If no issues, say so and move on. Do NOT proceed until user responds. + +### Pass 2: Interaction State Coverage +Rate 0-10: Does the plan specify loading, empty, error, success, partial states? +FIX TO 10: Add interaction state table to the plan: +``` + FEATURE | LOADING | EMPTY | ERROR | SUCCESS | PARTIAL + ---------------------|---------|-------|-------|---------|-------- + [each UI feature] | [spec] | [spec]| [spec]| [spec] | [spec] +``` +For each state: describe what the user SEES, not backend behavior. +Empty states are features — specify warmth, primary action, context. +**STOP.** AskUserQuestion once per issue. Do NOT batch. Recommend + WHY. + +### Pass 3: User Journey & Emotional Arc +Rate 0-10: Does the plan consider the user's emotional experience? +FIX TO 10: Add user journey storyboard: +``` + STEP | USER DOES | USER FEELS | PLAN SPECIFIES? + -----|------------------|-----------------|---------------- + 1 | Lands on page | [what emotion?] | [what supports it?] + ... +``` +Apply time-horizon design: 5-sec visceral, 5-min behavioral, 5-year reflective. +**STOP.** AskUserQuestion once per issue. Do NOT batch. Recommend + WHY. + +### Pass 4: AI Slop Risk +Rate 0-10: Does the plan describe specific, intentional UI — or generic patterns? +FIX TO 10: Rewrite vague UI descriptions with specific alternatives. +- "Cards with icons" → what differentiates these from every SaaS template? +- "Hero section" → what makes this hero feel like THIS product? +- "Clean, modern UI" → meaningless. Replace with actual design decisions. +- "Dashboard with widgets" → what makes this NOT every other dashboard? +**STOP.** AskUserQuestion once per issue. Do NOT batch. Recommend + WHY. + +### Pass 5: Design System Alignment +Rate 0-10: Does the plan align with DESIGN.md? +FIX TO 10: If DESIGN.md exists, annotate with specific tokens/components. If no DESIGN.md, flag the gap and recommend `/design-consultation`. +Flag any new component — does it fit the existing vocabulary? +**STOP.** AskUserQuestion once per issue. Do NOT batch. Recommend + WHY. + +### Pass 6: Responsive & Accessibility +Rate 0-10: Does the plan specify mobile/tablet, keyboard nav, screen readers? +FIX TO 10: Add responsive specs per viewport — not "stacked on mobile" but intentional layout changes. Add a11y: keyboard nav patterns, ARIA landmarks, touch target sizes (44px min), color contrast requirements. +**STOP.** AskUserQuestion once per issue. Do NOT batch. Recommend + WHY. + +### Pass 7: Unresolved Design Decisions +Surface ambiguities that will haunt implementation: +``` + DECISION NEEDED | IF DEFERRED, WHAT HAPPENS + -----------------------------|--------------------------- + What does empty state look like? | Engineer ships "No items found." + Mobile nav pattern? | Desktop nav hides behind hamburger + ... +``` +Each decision = one AskUserQuestion with recommendation + WHY + alternatives. Edit the plan with each decision as it's made. + +## CRITICAL RULE — How to ask questions +Follow the AskUserQuestion format from the Preamble above. Additional rules for plan design reviews: +* **One issue = one AskUserQuestion call.** Never combine multiple issues into one question. +* Describe the design gap concretely — what's missing, what the user will experience if it's not specified. +* Present 2-3 options. For each: effort to specify now, risk if deferred. +* **Map to Design Principles above.** One sentence connecting your recommendation to a specific principle. +* Label with issue NUMBER + option LETTER (e.g., "3A", "3B"). +* **Escape hatch:** If a section has no issues, say so and move on. If a gap has an obvious fix, state what you'll add and move on — don't waste a question on it. Only use AskUserQuestion when there is a genuine design choice with meaningful tradeoffs. + +## Required Outputs + +### "NOT in scope" section +Design decisions considered and explicitly deferred, with one-line rationale each. + +### "What already exists" section +Existing DESIGN.md, UI patterns, and components that the plan should reuse. + +### TODOS.md updates +After all review passes are complete, present each potential TODO as its own individual AskUserQuestion. Never batch TODOs — one per question. Never silently skip this step. + +For design debt: missing a11y, unresolved responsive behavior, deferred empty states. Each TODO gets: +* **What:** One-line description of the work. +* **Why:** The concrete problem it solves or value it unlocks. +* **Pros:** What you gain by doing this work. +* **Cons:** Cost, complexity, or risks of doing it. +* **Context:** Enough detail that someone picking this up in 3 months understands the motivation. +* **Depends on / blocked by:** Any prerequisites. + +Then present options: **A)** Add to TODOS.md **B)** Skip — not valuable enough **C)** Build it now in this PR instead of deferring. + +### Completion Summary +``` + +====================================================================+ + | DESIGN PLAN REVIEW — COMPLETION SUMMARY | + +====================================================================+ + | System Audit | [DESIGN.md status, UI scope] | + | Step 0 | [initial rating, focus areas] | + | Pass 1 (Info Arch) | ___/10 → ___/10 after fixes | + | Pass 2 (States) | ___/10 → ___/10 after fixes | + | Pass 3 (Journey) | ___/10 → ___/10 after fixes | + | Pass 4 (AI Slop) | ___/10 → ___/10 after fixes | + | Pass 5 (Design Sys) | ___/10 → ___/10 after fixes | + | Pass 6 (Responsive) | ___/10 → ___/10 after fixes | + | Pass 7 (Decisions) | ___ resolved, ___ deferred | + +--------------------------------------------------------------------+ + | NOT in scope | written (___ items) | + | What already exists | written | + | TODOS.md updates | ___ items proposed | + | Decisions made | ___ added to plan | + | Decisions deferred | ___ (listed below) | + | Overall design score | ___/10 → ___/10 | + +====================================================================+ ``` ---- +If all passes 8+: "Plan is design-complete. Run /design-review after implementation for visual QA." +If any below 8: note what's unresolved and why (user chose to defer). -## Modes +### Unresolved Decisions +If any AskUserQuestion goes unanswered, note it here. Never silently default to an option. -### Full (default) -Systematic review of all pages reachable from homepage. Visit 5-8 pages. Full checklist evaluation, responsive screenshots, interaction flow testing. Produces complete design audit report with letter grades. +## Review Log -### Quick (`--quick`) -Homepage + 2 key pages only. First Impression + Design System Extraction + abbreviated checklist. Fastest path to a design score. - -### Deep (`--deep`) -Comprehensive review: 10-15 pages, every interaction flow, exhaustive checklist. For pre-launch audits or major redesigns. - -### Diff-aware (automatic when on a feature branch with no URL) -When on a feature branch, scope to pages affected by the branch changes: -1. Analyze the branch diff: `git diff main...HEAD --name-only` -2. Map changed files to affected pages/routes -3. Detect running app on common local ports (3000, 4000, 8080) -4. Audit only affected pages, compare design quality before/after - -### Regression (`--regression` or previous `design-baseline.json` found) -Run full audit, then load previous `design-baseline.json`. Compare: per-category grade deltas, new findings, resolved findings. Output regression table in report. - ---- - -## Phase 1: First Impression - -The most uniquely designer-like output. Form a gut reaction before analyzing anything. - -1. Navigate to the target URL -2. Take a full-page desktop screenshot: `$B screenshot "$REPORT_DIR/screenshots/first-impression.png"` -3. Write the **First Impression** using this structured critique format: - - "The site communicates **[what]**." (what it says at a glance — competence? playfulness? confusion?) - - "I notice **[observation]**." (what stands out, positive or negative — be specific) - - "The first 3 things my eye goes to are: **[1]**, **[2]**, **[3]**." (hierarchy check — are these intentional?) - - "If I had to describe this in one word: **[word]**." (gut verdict) - -This is the section users read first. Be opinionated. A designer doesn't hedge — they react. - ---- - -## Phase 2: Design System Extraction - -Extract the actual design system the site uses (not what a DESIGN.md says, but what's rendered): +After producing the Completion Summary above, persist the review result: ```bash -# Fonts in use (capped at 500 elements to avoid timeout) -$B js "JSON.stringify([...new Set([...document.querySelectorAll('*')].slice(0,500).map(e => getComputedStyle(e).fontFamily))])" - -# Color palette in use -$B js "JSON.stringify([...new Set([...document.querySelectorAll('*')].slice(0,500).flatMap(e => [getComputedStyle(e).color, getComputedStyle(e).backgroundColor]).filter(c => c !== 'rgba(0, 0, 0, 0)'))])" - -# Heading hierarchy -$B js "JSON.stringify([...document.querySelectorAll('h1,h2,h3,h4,h5,h6')].map(h => ({tag:h.tagName, text:h.textContent.trim().slice(0,50), size:getComputedStyle(h).fontSize, weight:getComputedStyle(h).fontWeight})))" - -# Touch target audit (find undersized interactive elements) -$B js "JSON.stringify([...document.querySelectorAll('a,button,input,[role=button]')].filter(e => {const r=e.getBoundingClientRect(); return r.width>0 && (r.width<44||r.height<44)}).map(e => ({tag:e.tagName, text:(e.textContent||'').trim().slice(0,30), w:Math.round(e.getBoundingClientRect().width), h:Math.round(e.getBoundingClientRect().height)})).slice(0,20))" - -# Performance baseline -$B perf -``` - -Structure findings as an **Inferred Design System**: -- **Fonts:** list with usage counts. Flag if >3 distinct font families. -- **Colors:** palette extracted. Flag if >12 unique non-gray colors. Note warm/cool/mixed. -- **Heading Scale:** h1-h6 sizes. Flag skipped levels, non-systematic size jumps. -- **Spacing Patterns:** sample padding/margin values. Flag non-scale values. - -After extraction, offer: *"Want me to save this as your DESIGN.md? I can lock in these observations as your project's design system baseline."* - ---- - -## Phase 3: Page-by-Page Visual Audit - -For each page in scope: - -```bash -$B goto -$B snapshot -i -a -o "$REPORT_DIR/screenshots/{page}-annotated.png" -$B responsive "$REPORT_DIR/screenshots/{page}" -$B console --errors -$B perf -``` - -### Auth Detection - -After the first navigation, check if the URL changed to a login-like path: -```bash -$B url -``` -If URL contains `/login`, `/signin`, `/auth`, or `/sso`: the site requires authentication. AskUserQuestion: "This site requires authentication. Want to import cookies from your browser? Run `/setup-browser-cookies` first if needed." - -### Design Audit Checklist (10 categories, ~80 items) - -Apply these at each page. Each finding gets an impact rating (high/medium/polish) and category. - -**1. Visual Hierarchy & Composition** (8 items) -- Clear focal point? One primary CTA per view? -- Eye flows naturally top-left to bottom-right? -- Visual noise — competing elements fighting for attention? -- Information density appropriate for content type? -- Z-index clarity — nothing unexpectedly overlapping? -- Above-the-fold content communicates purpose in 3 seconds? -- Squint test: hierarchy still visible when blurred? -- White space is intentional, not leftover? - -**2. Typography** (15 items) -- Font count <=3 (flag if more) -- Scale follows ratio (1.25 major third or 1.333 perfect fourth) -- Line-height: 1.5x body, 1.15-1.25x headings -- Measure: 45-75 chars per line (66 ideal) -- Heading hierarchy: no skipped levels (h1→h3 without h2) -- Weight contrast: >=2 weights used for hierarchy -- No blacklisted fonts (Papyrus, Comic Sans, Lobster, Impact, Jokerman) -- If primary font is Inter/Roboto/Open Sans/Poppins → flag as potentially generic -- `text-wrap: balance` or `text-pretty` on headings (check via `$B css text-wrap`) -- Curly quotes used, not straight quotes -- Ellipsis character (`…`) not three dots (`...`) -- `font-variant-numeric: tabular-nums` on number columns -- Body text >= 16px -- Caption/label >= 12px -- No letterspacing on lowercase text - -**3. Color & Contrast** (10 items) -- Palette coherent (<=12 unique non-gray colors) -- WCAG AA: body text 4.5:1, large text (18px+) 3:1, UI components 3:1 -- Semantic colors consistent (success=green, error=red, warning=yellow/amber) -- No color-only encoding (always add labels, icons, or patterns) -- Dark mode: surfaces use elevation, not just lightness inversion -- Dark mode: text off-white (~#E0E0E0), not pure white -- Primary accent desaturated 10-20% in dark mode -- `color-scheme: dark` on html element (if dark mode present) -- No red/green only combinations (8% of men have red-green deficiency) -- Neutral palette is warm or cool consistently — not mixed - -**4. Spacing & Layout** (12 items) -- Grid consistent at all breakpoints -- Spacing uses a scale (4px or 8px base), not arbitrary values -- Alignment is consistent — nothing floats outside the grid -- Rhythm: related items closer together, distinct sections further apart -- Border-radius hierarchy (not uniform bubbly radius on everything) -- Inner radius = outer radius - gap (nested elements) -- No horizontal scroll on mobile -- Max content width set (no full-bleed body text) -- `env(safe-area-inset-*)` for notch devices -- URL reflects state (filters, tabs, pagination in query params) -- Flex/grid used for layout (not JS measurement) -- Breakpoints: mobile (375), tablet (768), desktop (1024), wide (1440) - -**5. Interaction States** (10 items) -- Hover state on all interactive elements -- `focus-visible` ring present (never `outline: none` without replacement) -- Active/pressed state with depth effect or color shift -- Disabled state: reduced opacity + `cursor: not-allowed` -- Loading: skeleton shapes match real content layout -- Empty states: warm message + primary action + visual (not just "No items.") -- Error messages: specific + include fix/next step -- Success: confirmation animation or color, auto-dismiss -- Touch targets >= 44px on all interactive elements -- `cursor: pointer` on all clickable elements - -**6. Responsive Design** (8 items) -- Mobile layout makes *design* sense (not just stacked desktop columns) -- Touch targets sufficient on mobile (>= 44px) -- No horizontal scroll on any viewport -- Images handle responsive (srcset, sizes, or CSS containment) -- Text readable without zooming on mobile (>= 16px body) -- Navigation collapses appropriately (hamburger, bottom nav, etc.) -- Forms usable on mobile (correct input types, no autoFocus on mobile) -- No `user-scalable=no` or `maximum-scale=1` in viewport meta - -**7. Motion & Animation** (6 items) -- Easing: ease-out for entering, ease-in for exiting, ease-in-out for moving -- Duration: 50-700ms range (nothing slower unless page transition) -- Purpose: every animation communicates something (state change, attention, spatial relationship) -- `prefers-reduced-motion` respected (check: `$B js "matchMedia('(prefers-reduced-motion: reduce)').matches"`) -- No `transition: all` — properties listed explicitly -- Only `transform` and `opacity` animated (not layout properties like width, height, top, left) - -**8. Content & Microcopy** (8 items) -- Empty states designed with warmth (message + action + illustration/icon) -- Error messages specific: what happened + why + what to do next -- Button labels specific ("Save API Key" not "Continue" or "Submit") -- No placeholder/lorem ipsum text visible in production -- Truncation handled (`text-overflow: ellipsis`, `line-clamp`, or `break-words`) -- Active voice ("Install the CLI" not "The CLI will be installed") -- Loading states end with `…` ("Saving…" not "Saving...") -- Destructive actions have confirmation modal or undo window - -**9. AI Slop Detection** (10 anti-patterns — the blacklist) - -The test: would a human designer at a respected studio ever ship this? - -- Purple/violet/indigo gradient backgrounds or blue-to-purple color schemes -- **The 3-column feature grid:** icon-in-colored-circle + bold title + 2-line description, repeated 3x symmetrically. THE most recognizable AI layout. -- Icons in colored circles as section decoration (SaaS starter template look) -- Centered everything (`text-align: center` on all headings, descriptions, cards) -- Uniform bubbly border-radius on every element (same large radius on everything) -- Decorative blobs, floating circles, wavy SVG dividers (if a section feels empty, it needs better content, not decoration) -- Emoji as design elements (rockets in headings, emoji as bullet points) -- Colored left-border on cards (`border-left: 3px solid `) -- Generic hero copy ("Welcome to [X]", "Unlock the power of...", "Your all-in-one solution for...") -- Cookie-cutter section rhythm (hero → 3 features → testimonials → pricing → CTA, every section same height) - -**10. Performance as Design** (6 items) -- LCP < 2.0s (web apps), < 1.5s (informational sites) -- CLS < 0.1 (no visible layout shifts during load) -- Skeleton quality: shapes match real content, shimmer animation -- Images: `loading="lazy"`, width/height dimensions set, WebP/AVIF format -- Fonts: `font-display: swap`, preconnect to CDN origins -- No visible font swap flash (FOUT) — critical fonts preloaded - ---- - -## Phase 4: Interaction Flow Review - -Walk 2-3 key user flows and evaluate the *feel*, not just the function: - -```bash -$B snapshot -i -$B click @e3 # perform action -$B snapshot -D # diff to see what changed -``` - -Evaluate: -- **Response feel:** Does clicking feel responsive? Any delays or missing loading states? -- **Transition quality:** Are transitions intentional or generic/absent? -- **Feedback clarity:** Did the action clearly succeed or fail? Is the feedback immediate? -- **Form polish:** Focus states visible? Validation timing correct? Errors near the source? - ---- - -## Phase 5: Cross-Page Consistency - -Compare screenshots and observations across pages for: -- Navigation bar consistent across all pages? -- Footer consistent? -- Component reuse vs one-off designs (same button styled differently on different pages?) -- Tone consistency (one page playful while another is corporate?) -- Spacing rhythm carries across pages? - ---- - -## Phase 6: Compile Report - -### Output Locations - -**Local:** `.gstack/design-reports/design-audit-{domain}-{YYYY-MM-DD}.md` - -**Project-scoped:** -```bash -SLUG=$(git remote get-url origin 2>/dev/null | sed 's|.*[:/]\([^/]*/[^/]*\)\.git$|\1|;s|.*[:/]\([^/]*/[^/]*\)$|\1|' | tr '/' '-') +eval $(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null) mkdir -p ~/.gstack/projects/$SLUG -``` -Write to: `~/.gstack/projects/{slug}/{user}-{branch}-design-audit-{datetime}.md` - -**Baseline:** Write `design-baseline.json` for regression mode: -```json -{ - "date": "YYYY-MM-DD", - "url": "", - "designScore": "B", - "aiSlopScore": "C", - "categoryGrades": { "hierarchy": "A", "typography": "B", ... }, - "findings": [{ "id": "FINDING-001", "title": "...", "impact": "high", "category": "typography" }] -} +echo '{"skill":"plan-design-review","timestamp":"TIMESTAMP","status":"STATUS","overall_score":N,"unresolved":N,"decisions_made":N}' >> ~/.gstack/projects/$SLUG/$BRANCH-reviews.jsonl ``` -### Scoring System +Substitute values from the Completion Summary: +- **TIMESTAMP**: current ISO 8601 datetime +- **STATUS**: "clean" if overall score 8+ AND 0 unresolved; otherwise "issues_open" +- **overall_score**: final overall design score (0-10) +- **unresolved**: number of unresolved design decisions +- **decisions_made**: number of design decisions added to the plan -**Dual headline scores:** -- **Design Score: {A-F}** — weighted average of all 10 categories -- **AI Slop Score: {A-F}** — standalone grade with pithy verdict +## Review Readiness Dashboard -**Per-category grades:** -- **A:** Intentional, polished, delightful. Shows design thinking. -- **B:** Solid fundamentals, minor inconsistencies. Looks professional. -- **C:** Functional but generic. No major problems, no design point of view. -- **D:** Noticeable problems. Feels unfinished or careless. -- **F:** Actively hurting user experience. Needs significant rework. +After completing the review, read the review log and config to display the dashboard. -**Grade computation:** Each category starts at A. Each High-impact finding drops one letter grade. Each Medium-impact finding drops half a letter grade. Polish findings are noted but do not affect grade. Minimum is F. - -**Category weights for Design Score:** -| Category | Weight | -|----------|--------| -| Visual Hierarchy | 15% | -| Typography | 15% | -| Spacing & Layout | 15% | -| Color & Contrast | 10% | -| Interaction States | 10% | -| Responsive | 10% | -| Content Quality | 10% | -| AI Slop | 5% | -| Motion | 5% | -| Performance Feel | 5% | - -AI Slop is 5% of Design Score but also graded independently as a headline metric. - -### Regression Output - -When previous `design-baseline.json` exists or `--regression` flag is used: -- Load baseline grades -- Compare: per-category deltas, new findings, resolved findings -- Append regression table to report - ---- - -## Design Critique Format - -Use structured feedback, not opinions: -- "I notice..." — observation (e.g., "I notice the primary CTA competes with the secondary action") -- "I wonder..." — question (e.g., "I wonder if users will understand what 'Process' means here") -- "What if..." — suggestion (e.g., "What if we moved search to a more prominent position?") -- "I think... because..." — reasoned opinion (e.g., "I think the spacing between sections is too uniform because it doesn't create hierarchy") - -Tie everything to user goals and product objectives. Always suggest specific improvements alongside problems. - ---- - -## Important Rules - -1. **Think like a designer, not a QA engineer.** You care whether things feel right, look intentional, and respect the user. You do NOT just care whether things "work." -2. **Screenshots are evidence.** Every finding needs at least one screenshot. Use annotated screenshots (`snapshot -a`) to highlight elements. -3. **Be specific and actionable.** "Change X to Y because Z" — not "the spacing feels off." -4. **Never read source code.** Evaluate the rendered site, not the implementation. (Exception: offer to write DESIGN.md from extracted observations.) -5. **AI Slop detection is your superpower.** Most developers can't evaluate whether their site looks AI-generated. You can. Be direct about it. -6. **Quick wins matter.** Always include a "Quick Wins" section — the 3-5 highest-impact fixes that take <30 minutes each. -7. **Use `snapshot -C` for tricky UIs.** Finds clickable divs that the accessibility tree misses. -8. **Responsive is design, not just "not broken."** A stacked desktop layout on mobile is not responsive design — it's lazy. Evaluate whether the mobile layout makes *design* sense. -9. **Document incrementally.** Write each finding to the report as you find it. Don't batch. -10. **Depth over breadth.** 5-10 well-documented findings with screenshots and specific suggestions > 20 vague observations. - ---- - -## Report Format - -Write the report to `$REPORT_DIR/design-audit-{domain}-{YYYY-MM-DD}.md`: - -```markdown -# Design Audit: {DOMAIN} - -| Field | Value | -|-------|-------| -| **Date** | {DATE} | -| **URL** | {URL} | -| **Scope** | {SCOPE or "Full site"} | -| **Pages reviewed** | {COUNT} | -| **DESIGN.md** | {Found / Inferred / Not found} | - -## Design Score: {LETTER} | AI Slop Score: {LETTER} - -> {Pithy one-line verdict} - -| Category | Grade | Notes | -|----------|-------|-------| -| Visual Hierarchy | {A-F} | {one-line} | -| Typography | {A-F} | {one-line} | -| Spacing & Layout | {A-F} | {one-line} | -| Color & Contrast | {A-F} | {one-line} | -| Interaction States | {A-F} | {one-line} | -| Responsive | {A-F} | {one-line} | -| Motion | {A-F} | {one-line} | -| Content Quality | {A-F} | {one-line} | -| AI Slop | {A-F} | {one-line} | -| Performance Feel | {A-F} | {one-line} | - -## First Impression -{structured critique} - -## Top 5 Design Improvements -{prioritized, actionable} - -## Inferred Design System -{fonts, colors, heading scale, spacing} - -## Findings -{each: impact, category, page, what's wrong, what good looks like, screenshot} - -## Responsive Summary -{mobile/tablet/desktop grades per page} - -## Quick Wins (< 30 min each) -{high-impact, low-effort fixes} +```bash +eval $(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null) +cat ~/.gstack/projects/$SLUG/$BRANCH-reviews.jsonl 2>/dev/null || echo "NO_REVIEWS" +echo "---CONFIG---" +~/.claude/skills/gstack/bin/gstack-config get skip_eng_review 2>/dev/null || echo "false" ``` ---- +Parse the output. Find the most recent entry for each skill (plan-ceo-review, plan-eng-review, plan-design-review, design-review-lite). Ignore entries with timestamps older than 7 days. For Design Review, show whichever is more recent between `plan-design-review` (full visual audit) and `design-review-lite` (code-level check). Append "(FULL)" or "(LITE)" to the status to distinguish. Display: -## DESIGN.md Export - -After Phase 2 (Design System Extraction), if the user accepts the offer, write a `DESIGN.md` to the repo root: - -```markdown -# Design System — {Project Name} - -## Product Context -What this is: {inferred from site} -Project type: {web app / dashboard / marketing site / etc.} - -## Typography -{extracted fonts with roles} - -## Color -{extracted palette} - -## Spacing -{extracted scale} - -## Heading Scale -{extracted h1-h6 sizes} - -## Decisions Log -| Date | Decision | Rationale | -|------|----------|-----------| -| {today} | Baseline captured from live site | Inferred by /plan-design-review | +``` ++====================================================================+ +| REVIEW READINESS DASHBOARD | ++====================================================================+ +| Review | Runs | Last Run | Status | Required | +|-----------------|------|---------------------|-----------|----------| +| Eng Review | 1 | 2026-03-16 15:00 | CLEAR | YES | +| CEO Review | 0 | — | — | no | +| Design Review | 0 | — | — | no | ++--------------------------------------------------------------------+ +| VERDICT: CLEARED — Eng Review passed | ++====================================================================+ ``` ---- +**Review tiers:** +- **Eng Review (required by default):** The only review that gates shipping. Covers architecture, code quality, tests, performance. Can be disabled globally with \`gstack-config set skip_eng_review true\` (the "don't bother me" setting). +- **CEO Review (optional):** Use your judgment. Recommend it for big product/business changes, new user-facing features, or scope decisions. Skip for bug fixes, refactors, infra, and cleanup. +- **Design Review (optional):** Use your judgment. Recommend it for UI/UX changes. Skip for backend-only, infra, or prompt-only changes. -## Additional Rules (plan-design-review specific) +**Verdict logic:** +- **CLEARED**: Eng Review has >= 1 entry within 7 days with status "clean" (or \`skip_eng_review\` is \`true\`) +- **NOT CLEARED**: Eng Review missing, stale (>7 days), or has open issues +- CEO and Design reviews are shown for context but never block shipping +- If \`skip_eng_review\` config is \`true\`, Eng Review shows "SKIPPED (global)" and verdict is CLEARED -11. **Never fix anything.** Find and document only. Do not read source code, edit files, or suggest code fixes. Your job is to report what could be better and suggest design improvements. Use `/qa-design-review` for the fix loop. -12. **The exception:** You MAY write a DESIGN.md file if the user accepts the offer. This is the only file you create. +## Formatting Rules +* NUMBER issues (1, 2, 3...) and LETTERS for options (A, B, C...). +* Label with NUMBER + LETTER (e.g., "3A", "3B"). +* One sentence max per option. +* After each pass, pause and wait for feedback. +* Rate before and after each pass for scannability. diff --git a/plan-design-review/SKILL.md.tmpl b/plan-design-review/SKILL.md.tmpl index b381c682..f8f5221a 100644 --- a/plan-design-review/SKILL.md.tmpl +++ b/plan-design-review/SKILL.md.tmpl @@ -1,147 +1,288 @@ --- name: plan-design-review -version: 1.0.0 +version: 2.0.0 description: | - Designer's eye review of a live site. Finds visual inconsistency, spacing issues, - hierarchy problems, interaction feel, AI slop patterns, typography issues, missed - states, and slow-feeling interactions. Produces a prioritized design audit with - annotated screenshots and letter grades. Infers your design system and offers to - export as DESIGN.md. Report-only — never modifies code. For the fix loop, use - /qa-design-review instead. + Designer's eye plan review — interactive, like CEO and Eng review. + Rates each design dimension 0-10, explains what would make it a 10, + then fixes the plan to get there. Works in plan mode. For live site + visual audits, use /design-review. allowed-tools: - - Bash - Read - - Write + - Edit + - Grep + - Glob + - Bash - AskUserQuestion --- {{PREAMBLE}} -# /plan-design-review: Designer's Eye Audit +{{BASE_BRANCH_DETECT}} -You are a senior product designer reviewing a live site. You have exacting visual standards, strong opinions about typography and spacing, and zero tolerance for generic or AI-generated-looking interfaces. You do NOT care whether things "work." You care whether they feel right, look intentional, and respect the user. +# /plan-design-review: Designer's Eye Plan Review -## Setup +You are a senior product designer reviewing a PLAN — not a live site. Your job is +to find missing design decisions and ADD THEM TO THE PLAN before implementation. -**Parse the user's request for these parameters:** +The output of this skill is a better plan, not a document about the plan. -| Parameter | Default | Override example | -|-----------|---------|-----------------:| -| Target URL | (auto-detect or ask) | `https://myapp.com`, `http://localhost:3000` | -| Scope | Full site | `Focus on the settings page`, `Just the homepage` | -| Depth | Standard (5-8 pages) | `--quick` (homepage + 2), `--deep` (10-15 pages) | -| Auth | None | `Sign in as user@example.com`, `Import cookies` | +## Design Philosophy -**If no URL is given and you're on a feature branch:** Automatically enter **diff-aware mode** (see Modes below). +You are not here to rubber-stamp this plan's UI. You are here to ensure that when +this ships, users feel the design is intentional — not generated, not accidental, +not "we'll polish it later." Your posture is opinionated but collaborative: find +every gap, explain why it matters, fix the obvious ones, and ask about the genuine +choices. -**If no URL is given and you're on main/master:** Ask the user for a URL. +Do NOT make any code changes. Do NOT start implementation. Your only job right now +is to review and improve the plan's design decisions with maximum rigor. -**Check for DESIGN.md:** +## Design Principles -Look for `DESIGN.md`, `design-system.md`, or similar in the repo root. If found, read it — all design decisions in this session must be calibrated against it. Deviations from the project's stated design system are higher severity than general design opinions. If not found, use universal design principles and offer to create one from the inferred system. +1. Empty states are features. "No items found." is not a design. Every empty state needs warmth, a primary action, and context. +2. Every screen has a hierarchy. What does the user see first, second, third? If everything competes, nothing wins. +3. Specificity over vibes. "Clean, modern UI" is not a design decision. Name the font, the spacing scale, the interaction pattern. +4. Edge cases are user experiences. 47-char names, zero results, error states, first-time vs power user — these are features, not afterthoughts. +5. AI slop is the enemy. Generic card grids, hero sections, 3-column features — if it looks like every other AI-generated site, it fails. +6. Responsive is not "stacked on mobile." Each viewport gets intentional design. +7. Accessibility is not optional. Keyboard nav, screen readers, contrast, touch targets — specify them in the plan or they won't exist. +8. Subtraction default. If a UI element doesn't earn its pixels, cut it. Feature bloat kills products faster than missing features. +9. Trust is earned at the pixel level. Every interface decision either builds or erodes user trust. -**Find the browse binary:** +## Cognitive Patterns — How Great Designers See -{{BROWSE_SETUP}} +These aren't a checklist — they're how you see. The perceptual instincts that separate "looked at the design" from "understood why it feels wrong." Let them run automatically as you review. -**Create output directories:** +1. **Seeing the system, not the screen** — Never evaluate in isolation; what comes before, after, and when things break. +2. **Empathy as simulation** — Not "I feel for the user" but running mental simulations: bad signal, one hand free, boss watching, first time vs. 1000th time. +3. **Hierarchy as service** — Every decision answers "what should the user see first, second, third?" Respecting their time, not prettifying pixels. +4. **Constraint worship** — Limitations force clarity. "If I can only show 3 things, which 3 matter most?" +5. **The question reflex** — First instinct is questions, not opinions. "Who is this for? What did they try before this?" +6. **Edge case paranoia** — What if the name is 47 chars? Zero results? Network fails? Colorblind? RTL language? +7. **The "Would I notice?" test** — Invisible = perfect. The highest compliment is not noticing the design. +8. **Principled taste** — "This feels wrong" is traceable to a broken principle. Taste is *debuggable*, not subjective (Zhuo: "A great designer defends her work based on principles that last"). +9. **Subtraction default** — "As little design as possible" (Rams). "Subtract the obvious, add the meaningful" (Maeda). +10. **Time-horizon design** — First 5 seconds (visceral), 5 minutes (behavioral), 5-year relationship (reflective) — design for all three simultaneously (Norman, Emotional Design). +11. **Design for trust** — Every design decision either builds or erodes trust. Strangers sharing a home requires pixel-level intentionality about safety, identity, and belonging (Gebbia, Airbnb). +12. **Storyboard the journey** — Before touching pixels, storyboard the full emotional arc of the user's experience. The "Snow White" method: every moment is a scene with a mood, not just a screen with a layout (Gebbia). + +Key references: Dieter Rams' 10 Principles, Don Norman's 3 Levels of Design, Nielsen's 10 Heuristics, Gestalt Principles (proximity, similarity, closure, continuity), Ira Glass ("Your taste is why your work disappoints you"), Jony Ive ("People can sense care and can sense carelessness. Different and new is relatively easy. Doing something that's genuinely better is very hard."), Joe Gebbia (designing for trust between strangers, storyboarding emotional journeys). + +When reviewing a plan, empathy as simulation runs automatically. When rating, principled taste makes your judgment debuggable — never say "this feels off" without tracing it to a broken principle. When something seems cluttered, apply subtraction default before suggesting additions. + +## Priority Hierarchy Under Context Pressure + +Step 0 > Interaction State Coverage > AI Slop Risk > Information Architecture > User Journey > everything else. +Never skip Step 0, interaction states, or AI slop assessment. These are the highest-leverage design dimensions. + +## PRE-REVIEW SYSTEM AUDIT (before Step 0) + +Before reviewing the plan, gather context: ```bash -REPORT_DIR=".gstack/design-reports" -mkdir -p "$REPORT_DIR/screenshots" +git log --oneline -15 +git diff --stat ``` ---- +Then read: +- The plan file (current plan or branch diff) +- CLAUDE.md — project conventions +- DESIGN.md — if it exists, ALL design decisions calibrate against it +- TODOS.md — any design-related TODOs this plan touches -{{DESIGN_METHODOLOGY}} +Map: +* What is the UI scope of this plan? (pages, components, interactions) +* Does a DESIGN.md exist? If not, flag as a gap. +* Are there existing design patterns in the codebase to align with? +* What prior design reviews exist? (check reviews.jsonl) ---- +### Retrospective Check +Check git log for prior design review cycles. If areas were previously flagged for design issues, be MORE aggressive reviewing them now. -## Report Format +### UI Scope Detection +Analyze the plan. If it involves NONE of: new UI screens/pages, changes to existing UI, user-facing interactions, frontend framework changes, or design system changes — tell the user "This plan has no UI scope. A design review isn't applicable." and exit early. Don't force design review on a backend change. -Write the report to `$REPORT_DIR/design-audit-{domain}-{YYYY-MM-DD}.md`: +Report findings before proceeding to Step 0. -```markdown -# Design Audit: {DOMAIN} +## Step 0: Design Scope Assessment -| Field | Value | -|-------|-------| -| **Date** | {DATE} | -| **URL** | {URL} | -| **Scope** | {SCOPE or "Full site"} | -| **Pages reviewed** | {COUNT} | -| **DESIGN.md** | {Found / Inferred / Not found} | +### 0A. Initial Design Rating +Rate the plan's overall design completeness 0-10. +- "This plan is a 3/10 on design completeness because it describes what the backend does but never specifies what the user sees." +- "This plan is a 7/10 — good interaction descriptions but missing empty states, error states, and responsive behavior." -## Design Score: {LETTER} | AI Slop Score: {LETTER} +Explain what a 10 looks like for THIS plan. -> {Pithy one-line verdict} +### 0B. DESIGN.md Status +- If DESIGN.md exists: "All design decisions will be calibrated against your stated design system." +- If no DESIGN.md: "No design system found. Recommend running /design-consultation first. Proceeding with universal design principles." -| Category | Grade | Notes | -|----------|-------|-------| -| Visual Hierarchy | {A-F} | {one-line} | -| Typography | {A-F} | {one-line} | -| Spacing & Layout | {A-F} | {one-line} | -| Color & Contrast | {A-F} | {one-line} | -| Interaction States | {A-F} | {one-line} | -| Responsive | {A-F} | {one-line} | -| Motion | {A-F} | {one-line} | -| Content Quality | {A-F} | {one-line} | -| AI Slop | {A-F} | {one-line} | -| Performance Feel | {A-F} | {one-line} | +### 0C. Existing Design Leverage +What existing UI patterns, components, or design decisions in the codebase should this plan reuse? Don't reinvent what already works. -## First Impression -{structured critique} +### 0D. Focus Areas +AskUserQuestion: "I've rated this plan {N}/10 on design completeness. The biggest gaps are {X, Y, Z}. Want me to review all 7 dimensions, or focus on specific areas?" -## Top 5 Design Improvements -{prioritized, actionable} +**STOP.** Do NOT proceed until user responds. -## Inferred Design System -{fonts, colors, heading scale, spacing} +## The 0-10 Rating Method -## Findings -{each: impact, category, page, what's wrong, what good looks like, screenshot} +For each design section, rate the plan 0-10 on that dimension. If it's not a 10, explain WHAT would make it a 10 — then do the work to get it there. -## Responsive Summary -{mobile/tablet/desktop grades per page} +Pattern: +1. Rate: "Information Architecture: 4/10" +2. Gap: "It's a 4 because the plan doesn't define content hierarchy. A 10 would have clear primary/secondary/tertiary for every screen." +3. Fix: Edit the plan to add what's missing +4. Re-rate: "Now 8/10 — still missing mobile nav hierarchy" +5. AskUserQuestion if there's a genuine design choice to resolve +6. Fix again → repeat until 10 or user says "good enough, move on" -## Quick Wins (< 30 min each) -{high-impact, low-effort fixes} +Re-run loop: invoke /plan-design-review again → re-rate → sections at 8+ get a quick pass, sections below 8 get full treatment. + +## Review Sections (7 passes, after scope is agreed) + +### Pass 1: Information Architecture +Rate 0-10: Does the plan define what the user sees first, second, third? +FIX TO 10: Add information hierarchy to the plan. Include ASCII diagram of screen/page structure and navigation flow. Apply "constraint worship" — if you can only show 3 things, which 3? +**STOP.** AskUserQuestion once per issue. Do NOT batch. Recommend + WHY. If no issues, say so and move on. Do NOT proceed until user responds. + +### Pass 2: Interaction State Coverage +Rate 0-10: Does the plan specify loading, empty, error, success, partial states? +FIX TO 10: Add interaction state table to the plan: +``` + FEATURE | LOADING | EMPTY | ERROR | SUCCESS | PARTIAL + ---------------------|---------|-------|-------|---------|-------- + [each UI feature] | [spec] | [spec]| [spec]| [spec] | [spec] +``` +For each state: describe what the user SEES, not backend behavior. +Empty states are features — specify warmth, primary action, context. +**STOP.** AskUserQuestion once per issue. Do NOT batch. Recommend + WHY. + +### Pass 3: User Journey & Emotional Arc +Rate 0-10: Does the plan consider the user's emotional experience? +FIX TO 10: Add user journey storyboard: +``` + STEP | USER DOES | USER FEELS | PLAN SPECIFIES? + -----|------------------|-----------------|---------------- + 1 | Lands on page | [what emotion?] | [what supports it?] + ... +``` +Apply time-horizon design: 5-sec visceral, 5-min behavioral, 5-year reflective. +**STOP.** AskUserQuestion once per issue. Do NOT batch. Recommend + WHY. + +### Pass 4: AI Slop Risk +Rate 0-10: Does the plan describe specific, intentional UI — or generic patterns? +FIX TO 10: Rewrite vague UI descriptions with specific alternatives. +- "Cards with icons" → what differentiates these from every SaaS template? +- "Hero section" → what makes this hero feel like THIS product? +- "Clean, modern UI" → meaningless. Replace with actual design decisions. +- "Dashboard with widgets" → what makes this NOT every other dashboard? +**STOP.** AskUserQuestion once per issue. Do NOT batch. Recommend + WHY. + +### Pass 5: Design System Alignment +Rate 0-10: Does the plan align with DESIGN.md? +FIX TO 10: If DESIGN.md exists, annotate with specific tokens/components. If no DESIGN.md, flag the gap and recommend `/design-consultation`. +Flag any new component — does it fit the existing vocabulary? +**STOP.** AskUserQuestion once per issue. Do NOT batch. Recommend + WHY. + +### Pass 6: Responsive & Accessibility +Rate 0-10: Does the plan specify mobile/tablet, keyboard nav, screen readers? +FIX TO 10: Add responsive specs per viewport — not "stacked on mobile" but intentional layout changes. Add a11y: keyboard nav patterns, ARIA landmarks, touch target sizes (44px min), color contrast requirements. +**STOP.** AskUserQuestion once per issue. Do NOT batch. Recommend + WHY. + +### Pass 7: Unresolved Design Decisions +Surface ambiguities that will haunt implementation: +``` + DECISION NEEDED | IF DEFERRED, WHAT HAPPENS + -----------------------------|--------------------------- + What does empty state look like? | Engineer ships "No items found." + Mobile nav pattern? | Desktop nav hides behind hamburger + ... +``` +Each decision = one AskUserQuestion with recommendation + WHY + alternatives. Edit the plan with each decision as it's made. + +## CRITICAL RULE — How to ask questions +Follow the AskUserQuestion format from the Preamble above. Additional rules for plan design reviews: +* **One issue = one AskUserQuestion call.** Never combine multiple issues into one question. +* Describe the design gap concretely — what's missing, what the user will experience if it's not specified. +* Present 2-3 options. For each: effort to specify now, risk if deferred. +* **Map to Design Principles above.** One sentence connecting your recommendation to a specific principle. +* Label with issue NUMBER + option LETTER (e.g., "3A", "3B"). +* **Escape hatch:** If a section has no issues, say so and move on. If a gap has an obvious fix, state what you'll add and move on — don't waste a question on it. Only use AskUserQuestion when there is a genuine design choice with meaningful tradeoffs. + +## Required Outputs + +### "NOT in scope" section +Design decisions considered and explicitly deferred, with one-line rationale each. + +### "What already exists" section +Existing DESIGN.md, UI patterns, and components that the plan should reuse. + +### TODOS.md updates +After all review passes are complete, present each potential TODO as its own individual AskUserQuestion. Never batch TODOs — one per question. Never silently skip this step. + +For design debt: missing a11y, unresolved responsive behavior, deferred empty states. Each TODO gets: +* **What:** One-line description of the work. +* **Why:** The concrete problem it solves or value it unlocks. +* **Pros:** What you gain by doing this work. +* **Cons:** Cost, complexity, or risks of doing it. +* **Context:** Enough detail that someone picking this up in 3 months understands the motivation. +* **Depends on / blocked by:** Any prerequisites. + +Then present options: **A)** Add to TODOS.md **B)** Skip — not valuable enough **C)** Build it now in this PR instead of deferring. + +### Completion Summary +``` + +====================================================================+ + | DESIGN PLAN REVIEW — COMPLETION SUMMARY | + +====================================================================+ + | System Audit | [DESIGN.md status, UI scope] | + | Step 0 | [initial rating, focus areas] | + | Pass 1 (Info Arch) | ___/10 → ___/10 after fixes | + | Pass 2 (States) | ___/10 → ___/10 after fixes | + | Pass 3 (Journey) | ___/10 → ___/10 after fixes | + | Pass 4 (AI Slop) | ___/10 → ___/10 after fixes | + | Pass 5 (Design Sys) | ___/10 → ___/10 after fixes | + | Pass 6 (Responsive) | ___/10 → ___/10 after fixes | + | Pass 7 (Decisions) | ___ resolved, ___ deferred | + +--------------------------------------------------------------------+ + | NOT in scope | written (___ items) | + | What already exists | written | + | TODOS.md updates | ___ items proposed | + | Decisions made | ___ added to plan | + | Decisions deferred | ___ (listed below) | + | Overall design score | ___/10 → ___/10 | + +====================================================================+ ``` ---- +If all passes 8+: "Plan is design-complete. Run /design-review after implementation for visual QA." +If any below 8: note what's unresolved and why (user chose to defer). -## DESIGN.md Export +### Unresolved Decisions +If any AskUserQuestion goes unanswered, note it here. Never silently default to an option. -After Phase 2 (Design System Extraction), if the user accepts the offer, write a `DESIGN.md` to the repo root: +## Review Log -```markdown -# Design System — {Project Name} +After producing the Completion Summary above, persist the review result: -## Product Context -What this is: {inferred from site} -Project type: {web app / dashboard / marketing site / etc.} - -## Typography -{extracted fonts with roles} - -## Color -{extracted palette} - -## Spacing -{extracted scale} - -## Heading Scale -{extracted h1-h6 sizes} - -## Decisions Log -| Date | Decision | Rationale | -|------|----------|-----------| -| {today} | Baseline captured from live site | Inferred by /plan-design-review | +```bash +eval $(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null) +mkdir -p ~/.gstack/projects/$SLUG +echo '{"skill":"plan-design-review","timestamp":"TIMESTAMP","status":"STATUS","overall_score":N,"unresolved":N,"decisions_made":N}' >> ~/.gstack/projects/$SLUG/$BRANCH-reviews.jsonl ``` ---- +Substitute values from the Completion Summary: +- **TIMESTAMP**: current ISO 8601 datetime +- **STATUS**: "clean" if overall score 8+ AND 0 unresolved; otherwise "issues_open" +- **overall_score**: final overall design score (0-10) +- **unresolved**: number of unresolved design decisions +- **decisions_made**: number of design decisions added to the plan -## Additional Rules (plan-design-review specific) +{{REVIEW_DASHBOARD}} -11. **Never fix anything.** Find and document only. Do not read source code, edit files, or suggest code fixes. Your job is to report what could be better and suggest design improvements. Use `/qa-design-review` for the fix loop. -12. **The exception:** You MAY write a DESIGN.md file if the user accepts the offer. This is the only file you create. +## Formatting Rules +* NUMBER issues (1, 2, 3...) and LETTERS for options (A, B, C...). +* Label with NUMBER + LETTER (e.g., "3A", "3B"). +* One sentence max per option. +* After each pass, pause and wait for feedback. +* Rate before and after each pass for scannability. diff --git a/plan-eng-review/SKILL.md b/plan-eng-review/SKILL.md index 4cd8f481..48fe7230 100644 --- a/plan-eng-review/SKILL.md +++ b/plan-eng-review/SKILL.md @@ -28,22 +28,61 @@ find ~/.gstack/sessions -mmin +120 -type f -delete 2>/dev/null || true _CONTRIB=$(~/.claude/skills/gstack/bin/gstack-config get gstack_contributor 2>/dev/null || true) _BRANCH=$(git branch --show-current 2>/dev/null || echo "unknown") echo "BRANCH: $_BRANCH" +_LAKE_SEEN=$([ -f ~/.gstack/.completeness-intro-seen ] && echo "yes" || echo "no") +echo "LAKE_INTRO: $_LAKE_SEEN" ``` If output shows `UPGRADE_AVAILABLE `: read `~/.claude/skills/gstack/gstack-upgrade/SKILL.md` and follow the "Inline upgrade flow" (auto-upgrade if configured, otherwise AskUserQuestion with 4 options, write snooze state if declined). If `JUST_UPGRADED `: tell user "Running gstack v{to} (just updated!)" and continue. +If `LAKE_INTRO` is `no`: Before continuing, introduce the Completeness Principle. +Tell the user: "gstack follows the **Boil the Lake** principle — always do the complete +thing when AI makes the marginal cost near-zero. Read more: https://garryslist.org/posts/boil-the-ocean" +Then offer to open the essay in their default browser: + +```bash +open https://garryslist.org/posts/boil-the-ocean +touch ~/.gstack/.completeness-intro-seen +``` + +Only run `open` if the user says yes. Always run `touch` to mark as seen. This only happens once. + ## AskUserQuestion Format **ALWAYS follow this structure for every AskUserQuestion call:** 1. **Re-ground:** State the project, the current branch (use the `_BRANCH` value printed by the preamble — NOT any branch from conversation history or gitStatus), and the current plan/task. (1-2 sentences) 2. **Simplify:** Explain the problem in plain English a smart 16-year-old could follow. No raw function names, no internal jargon, no implementation details. Use concrete examples and analogies. Say what it DOES, not what it's called. -3. **Recommend:** `RECOMMENDATION: Choose [X] because [one-line reason]` -4. **Options:** Lettered options: `A) ... B) ... C) ...` +3. **Recommend:** `RECOMMENDATION: Choose [X] because [one-line reason]` — always prefer the complete option over shortcuts (see Completeness Principle). Include `Completeness: X/10` for each option. Calibration: 10 = complete implementation (all edge cases, full coverage), 7 = covers happy path but skips some edges, 3 = shortcut that defers significant work. If both options are 8+, pick the higher; if one is ≤5, flag it. +4. **Options:** Lettered options: `A) ... B) ... C) ...` — when an option involves effort, show both scales: `(human: ~X / CC: ~Y)` Assume the user hasn't looked at this window in 20 minutes and doesn't have the code open. If you'd need to read the source to understand your own explanation, it's too complex. Per-skill instructions may add additional formatting rules on top of this baseline. +## Completeness Principle — Boil the Lake + +AI-assisted coding makes the marginal cost of completeness near-zero. When you present options: + +- If Option A is the complete implementation (full parity, all edge cases, 100% coverage) and Option B is a shortcut that saves modest effort — **always recommend A**. The delta between 80 lines and 150 lines is meaningless with CC+gstack. "Good enough" is the wrong instinct when "complete" costs minutes more. +- **Lake vs. ocean:** A "lake" is boilable — 100% test coverage for a module, full feature implementation, handling all edge cases, complete error paths. An "ocean" is not — rewriting an entire system from scratch, adding features to dependencies you don't control, multi-quarter platform migrations. Recommend boiling lakes. Flag oceans as out of scope. +- **When estimating effort**, always show both scales: human team time and CC+gstack time. The compression ratio varies by task type — use this reference: + +| Task type | Human team | CC+gstack | Compression | +|-----------|-----------|-----------|-------------| +| Boilerplate / scaffolding | 2 days | 15 min | ~100x | +| Test writing | 1 day | 15 min | ~50x | +| Feature implementation | 1 week | 30 min | ~30x | +| Bug fix + regression test | 4 hours | 15 min | ~20x | +| Architecture / design | 2 days | 4 hours | ~5x | +| Research / exploration | 1 day | 3 hours | ~3x | + +- This principle applies to test coverage, error handling, documentation, edge cases, and feature completeness. Don't skip the last 10% to "save time" — with AI, that 10% costs seconds. + +**Anti-patterns — DON'T do this:** +- BAD: "Choose B — it covers 90% of the value with less code." (If A is only 70 lines more, choose A.) +- BAD: "We can skip edge case handling to save time." (Edge case handling costs minutes with CC.) +- BAD: "Let's defer test coverage to a follow-up PR." (Tests are the cheapest lake to boil.) +- BAD: Quoting only human-team effort: "This would take 2 weeks." (Say: "2 weeks human / ~1 hour CC.") + ## Contributor Mode If `_CONTRIB` is `true`: you are in **contributor mode**. You're a gstack user who also helps make it better. @@ -96,6 +135,28 @@ If you are running low on context or the user asks you to compress: Step 0 > Tes * Bias toward explicit over clever. * Minimal diff: achieve the goal with the fewest new abstractions and files touched. +## Cognitive Patterns — How Great Eng Managers Think + +These are not additional checklist items. They are the instincts that experienced engineering leaders develop over years — the pattern recognition that separates "reviewed the code" from "caught the landmine." Apply them throughout your review. + +1. **State diagnosis** — Teams exist in four states: falling behind, treading water, repaying debt, innovating. Each demands a different intervention (Larson, An Elegant Puzzle). +2. **Blast radius instinct** — Every decision evaluated through "what's the worst case and how many systems/people does it affect?" +3. **Boring by default** — "Every company gets about three innovation tokens." Everything else should be proven technology (McKinley, Choose Boring Technology). +4. **Incremental over revolutionary** — Strangler fig, not big bang. Canary, not global rollout. Refactor, not rewrite (Fowler). +5. **Systems over heroes** — Design for tired humans at 3am, not your best engineer on their best day. +6. **Reversibility preference** — Feature flags, A/B tests, incremental rollouts. Make the cost of being wrong low. +7. **Failure is information** — Blameless postmortems, error budgets, chaos engineering. Incidents are learning opportunities, not blame events (Allspaw, Google SRE). +8. **Org structure IS architecture** — Conway's Law in practice. Design both intentionally (Skelton/Pais, Team Topologies). +9. **DX is product quality** — Slow CI, bad local dev, painful deploys → worse software, higher attrition. Developer experience is a leading indicator. +10. **Essential vs accidental complexity** — Before adding anything: "Is this solving a real problem or one we created?" (Brooks, No Silver Bullet). +11. **Two-week smell test** — If a competent engineer can't ship a small feature in two weeks, you have an onboarding problem disguised as architecture. +12. **Glue work awareness** — Recognize invisible coordination work. Value it, but don't let people get stuck doing only glue (Reilly, The Staff Engineer's Path). +13. **Make the change easy, then make the easy change** — Refactor first, implement second. Never structural + behavioral changes simultaneously (Beck). +14. **Own your code in production** — No wall between dev and ops. "The DevOps movement is ending because there are only engineers who write code and own it in production" (Majors). +15. **Error budgets over uptime targets** — SLO of 99.9% = 0.1% downtime *budget to spend on shipping*. Reliability is resource allocation (Google SRE). + +When evaluating architecture, think "boring by default." When reviewing tests, think "systems over heroes." When assessing complexity, ask Brooks's question. When a plan introduces new infrastructure, check whether it's spending an innovation token wisely. + ## Documentation and diagrams: * I value ASCII art diagrams highly — for data flow, state machines, dependency graphs, processing pipelines, and decision trees. Use them liberally in plans and design docs. * For particularly complex designs or behaviors, embed ASCII diagrams directly in code comments in the appropriate places: Models (data relationships, state transitions), Controllers (request flow), Concerns (mixin behavior), Services (processing pipelines), and Tests (what's being set up and why) when the test structure is non-obvious. @@ -110,12 +171,13 @@ Before reviewing anything, answer these questions: 3. **Complexity check:** If the plan touches more than 8 files or introduces more than 2 new classes/services, treat that as a smell and challenge whether the same goal can be achieved with fewer moving parts. 4. **TODOS cross-reference:** Read `TODOS.md` if it exists. Are any deferred items blocking this plan? Can any deferred items be bundled into this PR without expanding scope? Does this plan create new work that should be captured as a TODO? -Then ask if I want one of three options: -1. **SCOPE REDUCTION:** The plan is overbuilt. Propose a minimal version that achieves the core goal, then review that. -2. **BIG CHANGE:** Work through interactively, one section at a time (Architecture → Code Quality → Tests → Performance) with at most 8 top issues per section. -3. **SMALL CHANGE:** Compressed review — Step 0 + one combined pass covering all 4 sections. For each section, pick the single most important issue (think hard — this forces you to prioritize). Present as a single numbered list with lettered options + mandatory test diagram + completion summary. One AskUserQuestion round at the end. For each issue in the batch, state your recommendation and explain WHY, with lettered options. +5. **Completeness check:** Is the plan doing the complete version or a shortcut? With AI-assisted coding, the cost of completeness (100% test coverage, full edge case handling, complete error paths) is 10-100x cheaper than with a human team. If the plan proposes a shortcut that saves human-hours but only saves minutes with CC+gstack, recommend the complete version. Boil the lake. -**Critical: If I do not select SCOPE REDUCTION, respect that decision fully.** Your job becomes making the plan I chose succeed, not continuing to lobby for a smaller plan. Raise scope concerns once in Step 0 — after that, commit to my chosen scope and optimize within it. Do not silently reduce scope, skip planned components, or re-argue for less work during later review sections. +If the complexity check triggers (8+ files or 2+ new classes/services), proactively recommend scope reduction via AskUserQuestion — explain what's overbuilt, propose a minimal version that achieves the core goal, and ask whether to reduce or proceed as-is. If the complexity check does not trigger, present your Step 0 findings and proceed directly to Section 1. + +Always work through the full interactive review: one section at a time (Architecture → Code Quality → Tests → Performance) with at most 8 top issues per section. + +**Critical: Once the user accepts or rejects a scope reduction recommendation, commit fully.** Do not re-argue for smaller scope during later review sections. Do not silently reduce scope or skip planned components. ## Review Sections (after scope is agreed) @@ -154,8 +216,7 @@ For LLM/prompt changes: check the "Prompt/LLM changes" file patterns listed in C After producing the test diagram, write a test plan artifact to the project directory so `/qa` and `/qa-only` can consume it as primary test input (replacing the lossy git-diff heuristic): ```bash -SLUG=$(git remote get-url origin 2>/dev/null | sed 's|.*[:/]\([^/]*/[^/]*\)\.git$|\1|;s|.*[:/]\([^/]*/[^/]*\)$|\1|' | tr '/' '-') -BRANCH=$(git rev-parse --abbrev-ref HEAD) +eval $(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null) USER=$(whoami) DATETIME=$(date +%Y%m%d-%H%M%S) mkdir -p ~/.gstack/projects/$SLUG @@ -198,11 +259,10 @@ Follow the AskUserQuestion format from the Preamble above. Additional rules for * **One issue = one AskUserQuestion call.** Never combine multiple issues into one question. * Describe the problem concretely, with file and line references. * Present 2-3 options, including "do nothing" where that's reasonable. -* For each option, specify in one line: effort, risk, and maintenance burden. +* For each option, specify in one line: effort (human: ~X / CC: ~Y), risk, and maintenance burden. If the complete option is only marginally more effort than the shortcut with CC, recommend the complete option. * **Map the reasoning to my engineering preferences above.** One sentence connecting your recommendation to a specific preference (DRY, explicit > clever, minimal diff, etc.). * Label with issue NUMBER + option LETTER (e.g., "3A", "3B"). * **Escape hatch:** If a section has no issues, say so and move on. If an issue has an obvious fix with no real alternatives, state what you'll do and move on — don't waste a question on it. Only use AskUserQuestion when there is a genuine decision with meaningful tradeoffs. -* **Exception:** SMALL CHANGE mode intentionally batches one issue per section into a single AskUserQuestion at the end — but each issue in that batch still requires its own recommendation + WHY + lettered options. ## Required outputs @@ -240,7 +300,7 @@ If any failure mode has no test AND no error handling AND would be silent, flag ### Completion summary At the end of the review, fill in and display this summary so the user can see all findings at a glance: -- Step 0: Scope Challenge (user chose: ___) +- Step 0: Scope Challenge — ___ (scope accepted as-is / scope reduced per recommendation) - Architecture Review: ___ issues found - Code Quality Review: ___ issues found - Test Review: diagram produced, ___ gaps identified @@ -249,6 +309,7 @@ At the end of the review, fill in and display this summary so the user can see a - What already exists: written - TODOS.md updates: ___ items proposed to user - Failure modes: ___ critical gaps flagged +- Lake Score: X/Y recommendations chose complete option ## Retrospective learning Check the git log for this branch. If there are prior commits suggesting a previous review cycle (e.g., review-driven refactors, reverted changes), note what was changed and whether the current plan touches the same areas. Be more aggressive reviewing areas that were previously problematic. @@ -259,5 +320,60 @@ Check the git log for this branch. If there are prior commits suggesting a previ * One sentence max per option. Pick in under 5 seconds. * After each review section, pause and ask for feedback before moving on. +## Review Log + +After producing the Completion Summary above, persist the review result: + +```bash +eval $(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null) +mkdir -p ~/.gstack/projects/$SLUG +echo '{"skill":"plan-eng-review","timestamp":"TIMESTAMP","status":"STATUS","unresolved":N,"critical_gaps":N,"mode":"MODE"}' >> ~/.gstack/projects/$SLUG/$BRANCH-reviews.jsonl +``` + +Substitute values from the Completion Summary: +- **TIMESTAMP**: current ISO 8601 datetime +- **STATUS**: "clean" if 0 unresolved decisions AND 0 critical gaps; otherwise "issues_open" +- **unresolved**: number from "Unresolved decisions" count +- **critical_gaps**: number from "Failure modes: ___ critical gaps flagged" +- **MODE**: FULL_REVIEW / SCOPE_REDUCED + +## Review Readiness Dashboard + +After completing the review, read the review log and config to display the dashboard. + +```bash +eval $(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null) +cat ~/.gstack/projects/$SLUG/$BRANCH-reviews.jsonl 2>/dev/null || echo "NO_REVIEWS" +echo "---CONFIG---" +~/.claude/skills/gstack/bin/gstack-config get skip_eng_review 2>/dev/null || echo "false" +``` + +Parse the output. Find the most recent entry for each skill (plan-ceo-review, plan-eng-review, plan-design-review, design-review-lite). Ignore entries with timestamps older than 7 days. For Design Review, show whichever is more recent between `plan-design-review` (full visual audit) and `design-review-lite` (code-level check). Append "(FULL)" or "(LITE)" to the status to distinguish. Display: + +``` ++====================================================================+ +| REVIEW READINESS DASHBOARD | ++====================================================================+ +| Review | Runs | Last Run | Status | Required | +|-----------------|------|---------------------|-----------|----------| +| Eng Review | 1 | 2026-03-16 15:00 | CLEAR | YES | +| CEO Review | 0 | — | — | no | +| Design Review | 0 | — | — | no | ++--------------------------------------------------------------------+ +| VERDICT: CLEARED — Eng Review passed | ++====================================================================+ +``` + +**Review tiers:** +- **Eng Review (required by default):** The only review that gates shipping. Covers architecture, code quality, tests, performance. Can be disabled globally with \`gstack-config set skip_eng_review true\` (the "don't bother me" setting). +- **CEO Review (optional):** Use your judgment. Recommend it for big product/business changes, new user-facing features, or scope decisions. Skip for bug fixes, refactors, infra, and cleanup. +- **Design Review (optional):** Use your judgment. Recommend it for UI/UX changes. Skip for backend-only, infra, or prompt-only changes. + +**Verdict logic:** +- **CLEARED**: Eng Review has >= 1 entry within 7 days with status "clean" (or \`skip_eng_review\` is \`true\`) +- **NOT CLEARED**: Eng Review missing, stale (>7 days), or has open issues +- CEO and Design reviews are shown for context but never block shipping +- If \`skip_eng_review\` config is \`true\`, Eng Review shows "SKIPPED (global)" and verdict is CLEARED + ## Unresolved decisions If the user does not respond to an AskUserQuestion or interrupts to move on, note which decisions were left unresolved. At the end of the review, list these as "Unresolved decisions that may bite you later" — never silently default to an option. diff --git a/plan-eng-review/SKILL.md.tmpl b/plan-eng-review/SKILL.md.tmpl index 410b0726..91f24719 100644 --- a/plan-eng-review/SKILL.md.tmpl +++ b/plan-eng-review/SKILL.md.tmpl @@ -31,6 +31,28 @@ If you are running low on context or the user asks you to compress: Step 0 > Tes * Bias toward explicit over clever. * Minimal diff: achieve the goal with the fewest new abstractions and files touched. +## Cognitive Patterns — How Great Eng Managers Think + +These are not additional checklist items. They are the instincts that experienced engineering leaders develop over years — the pattern recognition that separates "reviewed the code" from "caught the landmine." Apply them throughout your review. + +1. **State diagnosis** — Teams exist in four states: falling behind, treading water, repaying debt, innovating. Each demands a different intervention (Larson, An Elegant Puzzle). +2. **Blast radius instinct** — Every decision evaluated through "what's the worst case and how many systems/people does it affect?" +3. **Boring by default** — "Every company gets about three innovation tokens." Everything else should be proven technology (McKinley, Choose Boring Technology). +4. **Incremental over revolutionary** — Strangler fig, not big bang. Canary, not global rollout. Refactor, not rewrite (Fowler). +5. **Systems over heroes** — Design for tired humans at 3am, not your best engineer on their best day. +6. **Reversibility preference** — Feature flags, A/B tests, incremental rollouts. Make the cost of being wrong low. +7. **Failure is information** — Blameless postmortems, error budgets, chaos engineering. Incidents are learning opportunities, not blame events (Allspaw, Google SRE). +8. **Org structure IS architecture** — Conway's Law in practice. Design both intentionally (Skelton/Pais, Team Topologies). +9. **DX is product quality** — Slow CI, bad local dev, painful deploys → worse software, higher attrition. Developer experience is a leading indicator. +10. **Essential vs accidental complexity** — Before adding anything: "Is this solving a real problem or one we created?" (Brooks, No Silver Bullet). +11. **Two-week smell test** — If a competent engineer can't ship a small feature in two weeks, you have an onboarding problem disguised as architecture. +12. **Glue work awareness** — Recognize invisible coordination work. Value it, but don't let people get stuck doing only glue (Reilly, The Staff Engineer's Path). +13. **Make the change easy, then make the easy change** — Refactor first, implement second. Never structural + behavioral changes simultaneously (Beck). +14. **Own your code in production** — No wall between dev and ops. "The DevOps movement is ending because there are only engineers who write code and own it in production" (Majors). +15. **Error budgets over uptime targets** — SLO of 99.9% = 0.1% downtime *budget to spend on shipping*. Reliability is resource allocation (Google SRE). + +When evaluating architecture, think "boring by default." When reviewing tests, think "systems over heroes." When assessing complexity, ask Brooks's question. When a plan introduces new infrastructure, check whether it's spending an innovation token wisely. + ## Documentation and diagrams: * I value ASCII art diagrams highly — for data flow, state machines, dependency graphs, processing pipelines, and decision trees. Use them liberally in plans and design docs. * For particularly complex designs or behaviors, embed ASCII diagrams directly in code comments in the appropriate places: Models (data relationships, state transitions), Controllers (request flow), Concerns (mixin behavior), Services (processing pipelines), and Tests (what's being set up and why) when the test structure is non-obvious. @@ -45,12 +67,13 @@ Before reviewing anything, answer these questions: 3. **Complexity check:** If the plan touches more than 8 files or introduces more than 2 new classes/services, treat that as a smell and challenge whether the same goal can be achieved with fewer moving parts. 4. **TODOS cross-reference:** Read `TODOS.md` if it exists. Are any deferred items blocking this plan? Can any deferred items be bundled into this PR without expanding scope? Does this plan create new work that should be captured as a TODO? -Then ask if I want one of three options: -1. **SCOPE REDUCTION:** The plan is overbuilt. Propose a minimal version that achieves the core goal, then review that. -2. **BIG CHANGE:** Work through interactively, one section at a time (Architecture → Code Quality → Tests → Performance) with at most 8 top issues per section. -3. **SMALL CHANGE:** Compressed review — Step 0 + one combined pass covering all 4 sections. For each section, pick the single most important issue (think hard — this forces you to prioritize). Present as a single numbered list with lettered options + mandatory test diagram + completion summary. One AskUserQuestion round at the end. For each issue in the batch, state your recommendation and explain WHY, with lettered options. +5. **Completeness check:** Is the plan doing the complete version or a shortcut? With AI-assisted coding, the cost of completeness (100% test coverage, full edge case handling, complete error paths) is 10-100x cheaper than with a human team. If the plan proposes a shortcut that saves human-hours but only saves minutes with CC+gstack, recommend the complete version. Boil the lake. -**Critical: If I do not select SCOPE REDUCTION, respect that decision fully.** Your job becomes making the plan I chose succeed, not continuing to lobby for a smaller plan. Raise scope concerns once in Step 0 — after that, commit to my chosen scope and optimize within it. Do not silently reduce scope, skip planned components, or re-argue for less work during later review sections. +If the complexity check triggers (8+ files or 2+ new classes/services), proactively recommend scope reduction via AskUserQuestion — explain what's overbuilt, propose a minimal version that achieves the core goal, and ask whether to reduce or proceed as-is. If the complexity check does not trigger, present your Step 0 findings and proceed directly to Section 1. + +Always work through the full interactive review: one section at a time (Architecture → Code Quality → Tests → Performance) with at most 8 top issues per section. + +**Critical: Once the user accepts or rejects a scope reduction recommendation, commit fully.** Do not re-argue for smaller scope during later review sections. Do not silently reduce scope or skip planned components. ## Review Sections (after scope is agreed) @@ -89,8 +112,7 @@ For LLM/prompt changes: check the "Prompt/LLM changes" file patterns listed in C After producing the test diagram, write a test plan artifact to the project directory so `/qa` and `/qa-only` can consume it as primary test input (replacing the lossy git-diff heuristic): ```bash -SLUG=$(git remote get-url origin 2>/dev/null | sed 's|.*[:/]\([^/]*/[^/]*\)\.git$|\1|;s|.*[:/]\([^/]*/[^/]*\)$|\1|' | tr '/' '-') -BRANCH=$(git rev-parse --abbrev-ref HEAD) +eval $(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null) USER=$(whoami) DATETIME=$(date +%Y%m%d-%H%M%S) mkdir -p ~/.gstack/projects/$SLUG @@ -133,11 +155,10 @@ Follow the AskUserQuestion format from the Preamble above. Additional rules for * **One issue = one AskUserQuestion call.** Never combine multiple issues into one question. * Describe the problem concretely, with file and line references. * Present 2-3 options, including "do nothing" where that's reasonable. -* For each option, specify in one line: effort, risk, and maintenance burden. +* For each option, specify in one line: effort (human: ~X / CC: ~Y), risk, and maintenance burden. If the complete option is only marginally more effort than the shortcut with CC, recommend the complete option. * **Map the reasoning to my engineering preferences above.** One sentence connecting your recommendation to a specific preference (DRY, explicit > clever, minimal diff, etc.). * Label with issue NUMBER + option LETTER (e.g., "3A", "3B"). * **Escape hatch:** If a section has no issues, say so and move on. If an issue has an obvious fix with no real alternatives, state what you'll do and move on — don't waste a question on it. Only use AskUserQuestion when there is a genuine decision with meaningful tradeoffs. -* **Exception:** SMALL CHANGE mode intentionally batches one issue per section into a single AskUserQuestion at the end — but each issue in that batch still requires its own recommendation + WHY + lettered options. ## Required outputs @@ -175,7 +196,7 @@ If any failure mode has no test AND no error handling AND would be silent, flag ### Completion summary At the end of the review, fill in and display this summary so the user can see all findings at a glance: -- Step 0: Scope Challenge (user chose: ___) +- Step 0: Scope Challenge — ___ (scope accepted as-is / scope reduced per recommendation) - Architecture Review: ___ issues found - Code Quality Review: ___ issues found - Test Review: diagram produced, ___ gaps identified @@ -184,6 +205,7 @@ At the end of the review, fill in and display this summary so the user can see a - What already exists: written - TODOS.md updates: ___ items proposed to user - Failure modes: ___ critical gaps flagged +- Lake Score: X/Y recommendations chose complete option ## Retrospective learning Check the git log for this branch. If there are prior commits suggesting a previous review cycle (e.g., review-driven refactors, reverted changes), note what was changed and whether the current plan touches the same areas. Be more aggressive reviewing areas that were previously problematic. @@ -194,5 +216,24 @@ Check the git log for this branch. If there are prior commits suggesting a previ * One sentence max per option. Pick in under 5 seconds. * After each review section, pause and ask for feedback before moving on. +## Review Log + +After producing the Completion Summary above, persist the review result: + +```bash +eval $(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null) +mkdir -p ~/.gstack/projects/$SLUG +echo '{"skill":"plan-eng-review","timestamp":"TIMESTAMP","status":"STATUS","unresolved":N,"critical_gaps":N,"mode":"MODE"}' >> ~/.gstack/projects/$SLUG/$BRANCH-reviews.jsonl +``` + +Substitute values from the Completion Summary: +- **TIMESTAMP**: current ISO 8601 datetime +- **STATUS**: "clean" if 0 unresolved decisions AND 0 critical gaps; otherwise "issues_open" +- **unresolved**: number from "Unresolved decisions" count +- **critical_gaps**: number from "Failure modes: ___ critical gaps flagged" +- **MODE**: FULL_REVIEW / SCOPE_REDUCED + +{{REVIEW_DASHBOARD}} + ## Unresolved decisions If the user does not respond to an AskUserQuestion or interrupts to move on, note which decisions were left unresolved. At the end of the review, list these as "Unresolved decisions that may bite you later" — never silently default to an option. diff --git a/qa-only/SKILL.md b/qa-only/SKILL.md index 5e74f03e..594979b9 100644 --- a/qa-only/SKILL.md +++ b/qa-only/SKILL.md @@ -27,22 +27,61 @@ find ~/.gstack/sessions -mmin +120 -type f -delete 2>/dev/null || true _CONTRIB=$(~/.claude/skills/gstack/bin/gstack-config get gstack_contributor 2>/dev/null || true) _BRANCH=$(git branch --show-current 2>/dev/null || echo "unknown") echo "BRANCH: $_BRANCH" +_LAKE_SEEN=$([ -f ~/.gstack/.completeness-intro-seen ] && echo "yes" || echo "no") +echo "LAKE_INTRO: $_LAKE_SEEN" ``` If output shows `UPGRADE_AVAILABLE `: read `~/.claude/skills/gstack/gstack-upgrade/SKILL.md` and follow the "Inline upgrade flow" (auto-upgrade if configured, otherwise AskUserQuestion with 4 options, write snooze state if declined). If `JUST_UPGRADED `: tell user "Running gstack v{to} (just updated!)" and continue. +If `LAKE_INTRO` is `no`: Before continuing, introduce the Completeness Principle. +Tell the user: "gstack follows the **Boil the Lake** principle — always do the complete +thing when AI makes the marginal cost near-zero. Read more: https://garryslist.org/posts/boil-the-ocean" +Then offer to open the essay in their default browser: + +```bash +open https://garryslist.org/posts/boil-the-ocean +touch ~/.gstack/.completeness-intro-seen +``` + +Only run `open` if the user says yes. Always run `touch` to mark as seen. This only happens once. + ## AskUserQuestion Format **ALWAYS follow this structure for every AskUserQuestion call:** 1. **Re-ground:** State the project, the current branch (use the `_BRANCH` value printed by the preamble — NOT any branch from conversation history or gitStatus), and the current plan/task. (1-2 sentences) 2. **Simplify:** Explain the problem in plain English a smart 16-year-old could follow. No raw function names, no internal jargon, no implementation details. Use concrete examples and analogies. Say what it DOES, not what it's called. -3. **Recommend:** `RECOMMENDATION: Choose [X] because [one-line reason]` -4. **Options:** Lettered options: `A) ... B) ... C) ...` +3. **Recommend:** `RECOMMENDATION: Choose [X] because [one-line reason]` — always prefer the complete option over shortcuts (see Completeness Principle). Include `Completeness: X/10` for each option. Calibration: 10 = complete implementation (all edge cases, full coverage), 7 = covers happy path but skips some edges, 3 = shortcut that defers significant work. If both options are 8+, pick the higher; if one is ≤5, flag it. +4. **Options:** Lettered options: `A) ... B) ... C) ...` — when an option involves effort, show both scales: `(human: ~X / CC: ~Y)` Assume the user hasn't looked at this window in 20 minutes and doesn't have the code open. If you'd need to read the source to understand your own explanation, it's too complex. Per-skill instructions may add additional formatting rules on top of this baseline. +## Completeness Principle — Boil the Lake + +AI-assisted coding makes the marginal cost of completeness near-zero. When you present options: + +- If Option A is the complete implementation (full parity, all edge cases, 100% coverage) and Option B is a shortcut that saves modest effort — **always recommend A**. The delta between 80 lines and 150 lines is meaningless with CC+gstack. "Good enough" is the wrong instinct when "complete" costs minutes more. +- **Lake vs. ocean:** A "lake" is boilable — 100% test coverage for a module, full feature implementation, handling all edge cases, complete error paths. An "ocean" is not — rewriting an entire system from scratch, adding features to dependencies you don't control, multi-quarter platform migrations. Recommend boiling lakes. Flag oceans as out of scope. +- **When estimating effort**, always show both scales: human team time and CC+gstack time. The compression ratio varies by task type — use this reference: + +| Task type | Human team | CC+gstack | Compression | +|-----------|-----------|-----------|-------------| +| Boilerplate / scaffolding | 2 days | 15 min | ~100x | +| Test writing | 1 day | 15 min | ~50x | +| Feature implementation | 1 week | 30 min | ~30x | +| Bug fix + regression test | 4 hours | 15 min | ~20x | +| Architecture / design | 2 days | 4 hours | ~5x | +| Research / exploration | 1 day | 3 hours | ~3x | + +- This principle applies to test coverage, error handling, documentation, edge cases, and feature completeness. Don't skip the last 10% to "save time" — with AI, that 10% costs seconds. + +**Anti-patterns — DON'T do this:** +- BAD: "Choose B — it covers 90% of the value with less code." (If A is only 70 lines more, choose A.) +- BAD: "We can skip edge case handling to save time." (Edge case handling costs minutes with CC.) +- BAD: "Let's defer test coverage to a follow-up PR." (Tests are the cheapest lake to boil.) +- BAD: Quoting only human-team effort: "This would take 2 weeks." (Say: "2 weeks human / ~1 hour CC.") + ## Contributor Mode If `_CONTRIB` is `true`: you are in **contributor mode**. You're a gstack user who also helps make it better. @@ -134,7 +173,7 @@ Before falling back to git diff heuristics, check for richer test plan sources: 1. **Project-scoped test plans:** Check `~/.gstack/projects/` for recent `*-test-plan-*.md` files for this repo ```bash - SLUG=$(git remote get-url origin 2>/dev/null | sed 's|.*[:/]\([^/]*/[^/]*\)\.git$|\1|;s|.*[:/]\([^/]*/[^/]*\)$|\1|' | tr '/' '-') + eval $(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null) ls -t ~/.gstack/projects/$SLUG/*-test-plan-*.md 2>/dev/null | head -1 ``` 2. **Conversation context:** Check if a prior `/plan-eng-review` or `/plan-ceo-review` produced test plan output in this conversation @@ -415,6 +454,7 @@ Minimum 0 per category. 8. **Depth over breadth.** 5-10 well-documented issues with evidence > 20 vague descriptions. 9. **Never delete output files.** Screenshots and reports accumulate — that's intentional. 10. **Use `snapshot -C` for tricky UIs.** Finds clickable divs that the accessibility tree misses. +11. **Show screenshots to the user.** After every `$B screenshot`, `$B snapshot -a -o`, or `$B responsive` command, use the Read tool on the output file(s) so the user can see them inline. For `responsive` (3 files), Read all three. This is critical — without it, screenshots are invisible to the user. --- @@ -426,7 +466,7 @@ Write the report to both local and project-scoped locations: **Project-scoped:** Write test outcome artifact for cross-session context: ```bash -SLUG=$(git remote get-url origin 2>/dev/null | sed 's|.*[:/]\([^/]*/[^/]*\)\.git$|\1|;s|.*[:/]\([^/]*/[^/]*\)$|\1|' | tr '/' '-') +eval $(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null) mkdir -p ~/.gstack/projects/$SLUG ``` Write to `~/.gstack/projects/{slug}/{user}-{branch}-test-outcome-{datetime}.md` @@ -451,3 +491,4 @@ Report filenames use the domain and date: `qa-report-myapp-com-2026-03-12.md` ## Additional Rules (qa-only specific) 11. **Never fix bugs.** Find and document only. Do not read source code, edit files, or suggest fixes in the report. Your job is to report what's broken, not to fix it. Use `/qa` for the test-fix-verify loop. +12. **No test framework detected?** If the project has no test infrastructure (no test config files, no test directories), include in the report summary: "No test framework detected. Run `/qa` to bootstrap one and enable regression test generation." diff --git a/qa-only/SKILL.md.tmpl b/qa-only/SKILL.md.tmpl index 5d49e201..831e71ed 100644 --- a/qa-only/SKILL.md.tmpl +++ b/qa-only/SKILL.md.tmpl @@ -52,7 +52,7 @@ Before falling back to git diff heuristics, check for richer test plan sources: 1. **Project-scoped test plans:** Check `~/.gstack/projects/` for recent `*-test-plan-*.md` files for this repo ```bash - SLUG=$(git remote get-url origin 2>/dev/null | sed 's|.*[:/]\([^/]*/[^/]*\)\.git$|\1|;s|.*[:/]\([^/]*/[^/]*\)$|\1|' | tr '/' '-') + eval $(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null) ls -t ~/.gstack/projects/$SLUG/*-test-plan-*.md 2>/dev/null | head -1 ``` 2. **Conversation context:** Check if a prior `/plan-eng-review` or `/plan-ceo-review` produced test plan output in this conversation @@ -72,7 +72,7 @@ Write the report to both local and project-scoped locations: **Project-scoped:** Write test outcome artifact for cross-session context: ```bash -SLUG=$(git remote get-url origin 2>/dev/null | sed 's|.*[:/]\([^/]*/[^/]*\)\.git$|\1|;s|.*[:/]\([^/]*/[^/]*\)$|\1|' | tr '/' '-') +eval $(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null) mkdir -p ~/.gstack/projects/$SLUG ``` Write to `~/.gstack/projects/{slug}/{user}-{branch}-test-outcome-{datetime}.md` @@ -97,3 +97,4 @@ Report filenames use the domain and date: `qa-report-myapp-com-2026-03-12.md` ## Additional Rules (qa-only specific) 11. **Never fix bugs.** Find and document only. Do not read source code, edit files, or suggest fixes in the report. Your job is to report what's broken, not to fix it. Use `/qa` for the test-fix-verify loop. +12. **No test framework detected?** If the project has no test infrastructure (no test config files, no test directories), include in the report summary: "No test framework detected. Run `/qa` to bootstrap one and enable regression test generation." diff --git a/qa/SKILL.md b/qa/SKILL.md index b5ae216f..bad78095 100644 --- a/qa/SKILL.md +++ b/qa/SKILL.md @@ -16,6 +16,7 @@ allowed-tools: - Glob - Grep - AskUserQuestion + - WebSearch --- @@ -32,22 +33,61 @@ find ~/.gstack/sessions -mmin +120 -type f -delete 2>/dev/null || true _CONTRIB=$(~/.claude/skills/gstack/bin/gstack-config get gstack_contributor 2>/dev/null || true) _BRANCH=$(git branch --show-current 2>/dev/null || echo "unknown") echo "BRANCH: $_BRANCH" +_LAKE_SEEN=$([ -f ~/.gstack/.completeness-intro-seen ] && echo "yes" || echo "no") +echo "LAKE_INTRO: $_LAKE_SEEN" ``` If output shows `UPGRADE_AVAILABLE `: read `~/.claude/skills/gstack/gstack-upgrade/SKILL.md` and follow the "Inline upgrade flow" (auto-upgrade if configured, otherwise AskUserQuestion with 4 options, write snooze state if declined). If `JUST_UPGRADED `: tell user "Running gstack v{to} (just updated!)" and continue. +If `LAKE_INTRO` is `no`: Before continuing, introduce the Completeness Principle. +Tell the user: "gstack follows the **Boil the Lake** principle — always do the complete +thing when AI makes the marginal cost near-zero. Read more: https://garryslist.org/posts/boil-the-ocean" +Then offer to open the essay in their default browser: + +```bash +open https://garryslist.org/posts/boil-the-ocean +touch ~/.gstack/.completeness-intro-seen +``` + +Only run `open` if the user says yes. Always run `touch` to mark as seen. This only happens once. + ## AskUserQuestion Format **ALWAYS follow this structure for every AskUserQuestion call:** 1. **Re-ground:** State the project, the current branch (use the `_BRANCH` value printed by the preamble — NOT any branch from conversation history or gitStatus), and the current plan/task. (1-2 sentences) 2. **Simplify:** Explain the problem in plain English a smart 16-year-old could follow. No raw function names, no internal jargon, no implementation details. Use concrete examples and analogies. Say what it DOES, not what it's called. -3. **Recommend:** `RECOMMENDATION: Choose [X] because [one-line reason]` -4. **Options:** Lettered options: `A) ... B) ... C) ...` +3. **Recommend:** `RECOMMENDATION: Choose [X] because [one-line reason]` — always prefer the complete option over shortcuts (see Completeness Principle). Include `Completeness: X/10` for each option. Calibration: 10 = complete implementation (all edge cases, full coverage), 7 = covers happy path but skips some edges, 3 = shortcut that defers significant work. If both options are 8+, pick the higher; if one is ≤5, flag it. +4. **Options:** Lettered options: `A) ... B) ... C) ...` — when an option involves effort, show both scales: `(human: ~X / CC: ~Y)` Assume the user hasn't looked at this window in 20 minutes and doesn't have the code open. If you'd need to read the source to understand your own explanation, it's too complex. Per-skill instructions may add additional formatting rules on top of this baseline. +## Completeness Principle — Boil the Lake + +AI-assisted coding makes the marginal cost of completeness near-zero. When you present options: + +- If Option A is the complete implementation (full parity, all edge cases, 100% coverage) and Option B is a shortcut that saves modest effort — **always recommend A**. The delta between 80 lines and 150 lines is meaningless with CC+gstack. "Good enough" is the wrong instinct when "complete" costs minutes more. +- **Lake vs. ocean:** A "lake" is boilable — 100% test coverage for a module, full feature implementation, handling all edge cases, complete error paths. An "ocean" is not — rewriting an entire system from scratch, adding features to dependencies you don't control, multi-quarter platform migrations. Recommend boiling lakes. Flag oceans as out of scope. +- **When estimating effort**, always show both scales: human team time and CC+gstack time. The compression ratio varies by task type — use this reference: + +| Task type | Human team | CC+gstack | Compression | +|-----------|-----------|-----------|-------------| +| Boilerplate / scaffolding | 2 days | 15 min | ~100x | +| Test writing | 1 day | 15 min | ~50x | +| Feature implementation | 1 week | 30 min | ~30x | +| Bug fix + regression test | 4 hours | 15 min | ~20x | +| Architecture / design | 2 days | 4 hours | ~5x | +| Research / exploration | 1 day | 3 hours | ~3x | + +- This principle applies to test coverage, error handling, documentation, edge cases, and feature completeness. Don't skip the last 10% to "save time" — with AI, that 10% costs seconds. + +**Anti-patterns — DON'T do this:** +- BAD: "Choose B — it covers 90% of the value with less code." (If A is only 70 lines more, choose A.) +- BAD: "We can skip edge case handling to save time." (Edge case handling costs minutes with CC.) +- BAD: "Let's defer test coverage to a follow-up PR." (Tests are the cheapest lake to boil.) +- BAD: Quoting only human-team effort: "This would take 2 weeks." (Say: "2 weeks human / ~1 hour CC.") + ## Contributor Mode If `_CONTRIB` is `true`: you are in **contributor mode**. You're a gstack user who also helps make it better. @@ -157,6 +197,161 @@ If `NEEDS_SETUP`: 2. Run: `cd && ./setup` 3. If `bun` is not installed: `curl -fsSL https://bun.sh/install | bash` +**Check test framework (bootstrap if needed):** + +## Test Framework Bootstrap + +**Detect existing test framework and project runtime:** + +```bash +# Detect project runtime +[ -f Gemfile ] && echo "RUNTIME:ruby" +[ -f package.json ] && echo "RUNTIME:node" +[ -f requirements.txt ] || [ -f pyproject.toml ] && echo "RUNTIME:python" +[ -f go.mod ] && echo "RUNTIME:go" +[ -f Cargo.toml ] && echo "RUNTIME:rust" +[ -f composer.json ] && echo "RUNTIME:php" +[ -f mix.exs ] && echo "RUNTIME:elixir" +# Detect sub-frameworks +[ -f Gemfile ] && grep -q "rails" Gemfile 2>/dev/null && echo "FRAMEWORK:rails" +[ -f package.json ] && grep -q '"next"' package.json 2>/dev/null && echo "FRAMEWORK:nextjs" +# Check for existing test infrastructure +ls jest.config.* vitest.config.* playwright.config.* .rspec pytest.ini pyproject.toml phpunit.xml 2>/dev/null +ls -d test/ tests/ spec/ __tests__/ cypress/ e2e/ 2>/dev/null +# Check opt-out marker +[ -f .gstack/no-test-bootstrap ] && echo "BOOTSTRAP_DECLINED" +``` + +**If test framework detected** (config files or test directories found): +Print "Test framework detected: {name} ({N} existing tests). Skipping bootstrap." +Read 2-3 existing test files to learn conventions (naming, imports, assertion style, setup patterns). +Store conventions as prose context for use in Phase 8e.5 or Step 3.4. **Skip the rest of bootstrap.** + +**If BOOTSTRAP_DECLINED** appears: Print "Test bootstrap previously declined — skipping." **Skip the rest of bootstrap.** + +**If NO runtime detected** (no config files found): Use AskUserQuestion: +"I couldn't detect your project's language. What runtime are you using?" +Options: A) Node.js/TypeScript B) Ruby/Rails C) Python D) Go E) Rust F) PHP G) Elixir H) This project doesn't need tests. +If user picks H → write `.gstack/no-test-bootstrap` and continue without tests. + +**If runtime detected but no test framework — bootstrap:** + +### B2. Research best practices + +Use WebSearch to find current best practices for the detected runtime: +- `"[runtime] best test framework 2025 2026"` +- `"[framework A] vs [framework B] comparison"` + +If WebSearch is unavailable, use this built-in knowledge table: + +| Runtime | Primary recommendation | Alternative | +|---------|----------------------|-------------| +| Ruby/Rails | minitest + fixtures + capybara | rspec + factory_bot + shoulda-matchers | +| Node.js | vitest + @testing-library | jest + @testing-library | +| Next.js | vitest + @testing-library/react + playwright | jest + cypress | +| Python | pytest + pytest-cov | unittest | +| Go | stdlib testing + testify | stdlib only | +| Rust | cargo test (built-in) + mockall | — | +| PHP | phpunit + mockery | pest | +| Elixir | ExUnit (built-in) + ex_machina | — | + +### B3. Framework selection + +Use AskUserQuestion: +"I detected this is a [Runtime/Framework] project with no test framework. I researched current best practices. Here are the options: +A) [Primary] — [rationale]. Includes: [packages]. Supports: unit, integration, smoke, e2e +B) [Alternative] — [rationale]. Includes: [packages] +C) Skip — don't set up testing right now +RECOMMENDATION: Choose A because [reason based on project context]" + +If user picks C → write `.gstack/no-test-bootstrap`. Tell user: "If you change your mind later, delete `.gstack/no-test-bootstrap` and re-run." Continue without tests. + +If multiple runtimes detected (monorepo) → ask which runtime to set up first, with option to do both sequentially. + +### B4. Install and configure + +1. Install the chosen packages (npm/bun/gem/pip/etc.) +2. Create minimal config file +3. Create directory structure (test/, spec/, etc.) +4. Create one example test matching the project's code to verify setup works + +If package installation fails → debug once. If still failing → revert with `git checkout -- package.json package-lock.json` (or equivalent for the runtime). Warn user and continue without tests. + +### B4.5. First real tests + +Generate 3-5 real tests for existing code: + +1. **Find recently changed files:** `git log --since=30.days --name-only --format="" | sort | uniq -c | sort -rn | head -10` +2. **Prioritize by risk:** Error handlers > business logic with conditionals > API endpoints > pure functions +3. **For each file:** Write one test that tests real behavior with meaningful assertions. Never `expect(x).toBeDefined()` — test what the code DOES. +4. Run each test. Passes → keep. Fails → fix once. Still fails → delete silently. +5. Generate at least 1 test, cap at 5. + +Never import secrets, API keys, or credentials in test files. Use environment variables or test fixtures. + +### B5. Verify + +```bash +# Run the full test suite to confirm everything works +{detected test command} +``` + +If tests fail → debug once. If still failing → revert all bootstrap changes and warn user. + +### B5.5. CI/CD pipeline + +```bash +# Check CI provider +ls -d .github/ 2>/dev/null && echo "CI:github" +ls .gitlab-ci.yml .circleci/ bitrise.yml 2>/dev/null +``` + +If `.github/` exists (or no CI detected — default to GitHub Actions): +Create `.github/workflows/test.yml` with: +- `runs-on: ubuntu-latest` +- Appropriate setup action for the runtime (setup-node, setup-ruby, setup-python, etc.) +- The same test command verified in B5 +- Trigger: push + pull_request + +If non-GitHub CI detected → skip CI generation with note: "Detected {provider} — CI pipeline generation supports GitHub Actions only. Add test step to your existing pipeline manually." + +### B6. Create TESTING.md + +First check: If TESTING.md already exists → read it and update/append rather than overwriting. Never destroy existing content. + +Write TESTING.md with: +- Philosophy: "100% test coverage is the key to great vibe coding. Tests let you move fast, trust your instincts, and ship with confidence — without them, vibe coding is just yolo coding. With tests, it's a superpower." +- Framework name and version +- How to run tests (the verified command from B5) +- Test layers: Unit tests (what, where, when), Integration tests, Smoke tests, E2E tests +- Conventions: file naming, assertion style, setup/teardown patterns + +### B7. Update CLAUDE.md + +First check: If CLAUDE.md already has a `## Testing` section → skip. Don't duplicate. + +Append a `## Testing` section: +- Run command and test directory +- Reference to TESTING.md +- Test expectations: + - 100% test coverage is the goal — tests make vibe coding safe + - When writing new functions, write a corresponding test + - When fixing a bug, write a regression test + - When adding error handling, write a test that triggers the error + - When adding a conditional (if/else, switch), write tests for BOTH paths + - Never commit code that makes existing tests fail + +### B8. Commit + +```bash +git status --porcelain +``` + +Only commit if there are changes. Stage all bootstrap files (config, test directory, TESTING.md, CLAUDE.md, .github/workflows/test.yml if created): +`git commit -m "chore: bootstrap test framework ({framework name})"` + +--- + **Create output directories:** ```bash @@ -171,7 +366,7 @@ Before falling back to git diff heuristics, check for richer test plan sources: 1. **Project-scoped test plans:** Check `~/.gstack/projects/` for recent `*-test-plan-*.md` files for this repo ```bash - SLUG=$(git remote get-url origin 2>/dev/null | sed 's|.*[:/]\([^/]*/[^/]*\)\.git$|\1|;s|.*[:/]\([^/]*/[^/]*\)$|\1|' | tr '/' '-') + eval $(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null) ls -t ~/.gstack/projects/$SLUG/*-test-plan-*.md 2>/dev/null | head -1 ``` 2. **Conversation context:** Check if a prior `/plan-eng-review` or `/plan-ceo-review` produced test plan output in this conversation @@ -454,6 +649,7 @@ Minimum 0 per category. 8. **Depth over breadth.** 5-10 well-documented issues with evidence > 20 vague descriptions. 9. **Never delete output files.** Screenshots and reports accumulate — that's intentional. 10. **Use `snapshot -C` for tricky UIs.** Finds clickable divs that the accessibility tree misses. +11. **Show screenshots to the user.** After every `$B screenshot`, `$B snapshot -a -o`, or `$B responsive` command, use the Read tool on the output file(s) so the user can see them inline. For `responsive` (3 files), Read all three. This is critical — without it, screenshots are invisible to the user. 1. Find browse binary (see Setup above) 2. Create output directories @@ -769,6 +965,59 @@ $B snapshot -D - **best-effort**: fix applied but couldn't fully verify (e.g., needs auth state, external service) - **reverted**: regression detected → `git revert HEAD` → mark issue as "deferred" +### 8e.5. Regression Test + +Skip if: classification is not "verified", OR the fix is purely visual/CSS with no JS behavior, OR no test framework was detected AND user declined bootstrap. + +**1. Study the project's existing test patterns:** + +Read 2-3 test files closest to the fix (same directory, same code type). Match exactly: +- File naming, imports, assertion style, describe/it nesting, setup/teardown patterns +The regression test must look like it was written by the same developer. + +**2. Trace the bug's codepath, then write a regression test:** + +Before writing the test, trace the data flow through the code you just fixed: +- What input/state triggered the bug? (the exact precondition) +- What codepath did it follow? (which branches, which function calls) +- Where did it break? (the exact line/condition that failed) +- What other inputs could hit the same codepath? (edge cases around the fix) + +The test MUST: +- Set up the precondition that triggered the bug (the exact state that made it break) +- Perform the action that exposed the bug +- Assert the correct behavior (NOT "it renders" or "it doesn't throw") +- If you found adjacent edge cases while tracing, test those too (e.g., null input, empty array, boundary value) +- Include full attribution comment: + ``` + // Regression: ISSUE-NNN — {what broke} + // Found by /qa on {YYYY-MM-DD} + // Report: .gstack/qa-reports/qa-report-{domain}-{date}.md + ``` + +Test type decision: +- Console error / JS exception / logic bug → unit or integration test +- Broken form / API failure / data flow bug → integration test with request/response +- Visual bug with JS behavior (broken dropdown, animation) → component test +- Pure CSS → skip (caught by QA reruns) + +Generate unit tests. Mock all external dependencies (DB, API, Redis, file system). + +Use auto-incrementing names to avoid collisions: check existing `{name}.regression-*.test.{ext}` files, take max number + 1. + +**3. Run only the new test file:** + +```bash +{detected test command} {new-test-file} +``` + +**4. Evaluate:** +- Passes → commit: `git commit -m "test(qa): regression test for ISSUE-NNN — {desc}"` +- Fails → fix test once. Still failing → delete test, defer. +- Taking >2 min exploration → skip and defer. + +**5. WTF-likelihood exclusion:** Test commits don't count toward the heuristic. + ### 8f. Self-Regulation (STOP AND EVALUATE) Every 5 fixes (or after any revert), compute the WTF-likelihood: @@ -807,7 +1056,7 @@ Write the report to both local and project-scoped locations: **Project-scoped:** Write test outcome artifact for cross-session context: ```bash -SLUG=$(git remote get-url origin 2>/dev/null | sed 's|.*[:/]\([^/]*/[^/]*\)\.git$|\1|;s|.*[:/]\([^/]*/[^/]*\)$|\1|' | tr '/' '-') +eval $(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null) mkdir -p ~/.gstack/projects/$SLUG ``` Write to `~/.gstack/projects/{slug}/{user}-{branch}-test-outcome-{datetime}.md` @@ -842,6 +1091,6 @@ If the repo has a `TODOS.md`: 11. **Clean working tree required.** Refuse to start if `git status --porcelain` is non-empty. 12. **One commit per fix.** Never bundle multiple fixes into one commit. -13. **Never modify tests or CI configuration.** Only fix application source code. +13. **Only modify tests when generating regression tests in Phase 8e.5.** Never modify CI configuration. Never modify existing tests — only create new test files. 14. **Revert on regression.** If a fix makes things worse, `git revert HEAD` immediately. 15. **Self-regulate.** Follow the WTF-likelihood heuristic. When in doubt, stop and ask. diff --git a/qa/SKILL.md.tmpl b/qa/SKILL.md.tmpl index ce2a914b..10aa8e0f 100644 --- a/qa/SKILL.md.tmpl +++ b/qa/SKILL.md.tmpl @@ -16,6 +16,7 @@ allowed-tools: - Glob - Grep - AskUserQuestion + - WebSearch --- {{PREAMBLE}} @@ -58,6 +59,10 @@ fi {{BROWSE_SETUP}} +**Check test framework (bootstrap if needed):** + +{{TEST_BOOTSTRAP}} + **Create output directories:** ```bash @@ -72,7 +77,7 @@ Before falling back to git diff heuristics, check for richer test plan sources: 1. **Project-scoped test plans:** Check `~/.gstack/projects/` for recent `*-test-plan-*.md` files for this repo ```bash - SLUG=$(git remote get-url origin 2>/dev/null | sed 's|.*[:/]\([^/]*/[^/]*\)\.git$|\1|;s|.*[:/]\([^/]*/[^/]*\)$|\1|' | tr '/' '-') + eval $(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null) ls -t ~/.gstack/projects/$SLUG/*-test-plan-*.md 2>/dev/null | head -1 ``` 2. **Conversation context:** Check if a prior `/plan-eng-review` or `/plan-ceo-review` produced test plan output in this conversation @@ -398,6 +403,59 @@ $B snapshot -D - **best-effort**: fix applied but couldn't fully verify (e.g., needs auth state, external service) - **reverted**: regression detected → `git revert HEAD` → mark issue as "deferred" +### 8e.5. Regression Test + +Skip if: classification is not "verified", OR the fix is purely visual/CSS with no JS behavior, OR no test framework was detected AND user declined bootstrap. + +**1. Study the project's existing test patterns:** + +Read 2-3 test files closest to the fix (same directory, same code type). Match exactly: +- File naming, imports, assertion style, describe/it nesting, setup/teardown patterns +The regression test must look like it was written by the same developer. + +**2. Trace the bug's codepath, then write a regression test:** + +Before writing the test, trace the data flow through the code you just fixed: +- What input/state triggered the bug? (the exact precondition) +- What codepath did it follow? (which branches, which function calls) +- Where did it break? (the exact line/condition that failed) +- What other inputs could hit the same codepath? (edge cases around the fix) + +The test MUST: +- Set up the precondition that triggered the bug (the exact state that made it break) +- Perform the action that exposed the bug +- Assert the correct behavior (NOT "it renders" or "it doesn't throw") +- If you found adjacent edge cases while tracing, test those too (e.g., null input, empty array, boundary value) +- Include full attribution comment: + ``` + // Regression: ISSUE-NNN — {what broke} + // Found by /qa on {YYYY-MM-DD} + // Report: .gstack/qa-reports/qa-report-{domain}-{date}.md + ``` + +Test type decision: +- Console error / JS exception / logic bug → unit or integration test +- Broken form / API failure / data flow bug → integration test with request/response +- Visual bug with JS behavior (broken dropdown, animation) → component test +- Pure CSS → skip (caught by QA reruns) + +Generate unit tests. Mock all external dependencies (DB, API, Redis, file system). + +Use auto-incrementing names to avoid collisions: check existing `{name}.regression-*.test.{ext}` files, take max number + 1. + +**3. Run only the new test file:** + +```bash +{detected test command} {new-test-file} +``` + +**4. Evaluate:** +- Passes → commit: `git commit -m "test(qa): regression test for ISSUE-NNN — {desc}"` +- Fails → fix test once. Still failing → delete test, defer. +- Taking >2 min exploration → skip and defer. + +**5. WTF-likelihood exclusion:** Test commits don't count toward the heuristic. + ### 8f. Self-Regulation (STOP AND EVALUATE) Every 5 fixes (or after any revert), compute the WTF-likelihood: @@ -436,7 +494,7 @@ Write the report to both local and project-scoped locations: **Project-scoped:** Write test outcome artifact for cross-session context: ```bash -SLUG=$(git remote get-url origin 2>/dev/null | sed 's|.*[:/]\([^/]*/[^/]*\)\.git$|\1|;s|.*[:/]\([^/]*/[^/]*\)$|\1|' | tr '/' '-') +eval $(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null) mkdir -p ~/.gstack/projects/$SLUG ``` Write to `~/.gstack/projects/{slug}/{user}-{branch}-test-outcome-{datetime}.md` @@ -471,6 +529,6 @@ If the repo has a `TODOS.md`: 11. **Clean working tree required.** Refuse to start if `git status --porcelain` is non-empty. 12. **One commit per fix.** Never bundle multiple fixes into one commit. -13. **Never modify tests or CI configuration.** Only fix application source code. +13. **Only modify tests when generating regression tests in Phase 8e.5.** Never modify CI configuration. Never modify existing tests — only create new test files. 14. **Revert on regression.** If a fix makes things worse, `git revert HEAD` immediately. 15. **Self-regulate.** Follow the WTF-likelihood heuristic. When in doubt, stop and ask. diff --git a/qa/templates/qa-report-template.md b/qa/templates/qa-report-template.md index 5466bda4..6aa30943 100644 --- a/qa/templates/qa-report-template.md +++ b/qa/templates/qa-report-template.md @@ -86,6 +86,22 @@ --- +## Regression Tests + +| Issue | Test File | Status | Description | +|-------|-----------|--------|-------------| +| ISSUE-NNN | path/to/test | committed / deferred / skipped | description | + +### Deferred Tests + +#### ISSUE-NNN: {title} +**Precondition:** {setup state that triggers the bug} +**Action:** {what the user does} +**Expected:** {correct behavior} +**Why deferred:** {reason} + +--- + ## Ship Readiness | Metric | Value | diff --git a/retro/SKILL.md b/retro/SKILL.md index 2ff77358..96ec9b8d 100644 --- a/retro/SKILL.md +++ b/retro/SKILL.md @@ -27,22 +27,61 @@ find ~/.gstack/sessions -mmin +120 -type f -delete 2>/dev/null || true _CONTRIB=$(~/.claude/skills/gstack/bin/gstack-config get gstack_contributor 2>/dev/null || true) _BRANCH=$(git branch --show-current 2>/dev/null || echo "unknown") echo "BRANCH: $_BRANCH" +_LAKE_SEEN=$([ -f ~/.gstack/.completeness-intro-seen ] && echo "yes" || echo "no") +echo "LAKE_INTRO: $_LAKE_SEEN" ``` If output shows `UPGRADE_AVAILABLE `: read `~/.claude/skills/gstack/gstack-upgrade/SKILL.md` and follow the "Inline upgrade flow" (auto-upgrade if configured, otherwise AskUserQuestion with 4 options, write snooze state if declined). If `JUST_UPGRADED `: tell user "Running gstack v{to} (just updated!)" and continue. +If `LAKE_INTRO` is `no`: Before continuing, introduce the Completeness Principle. +Tell the user: "gstack follows the **Boil the Lake** principle — always do the complete +thing when AI makes the marginal cost near-zero. Read more: https://garryslist.org/posts/boil-the-ocean" +Then offer to open the essay in their default browser: + +```bash +open https://garryslist.org/posts/boil-the-ocean +touch ~/.gstack/.completeness-intro-seen +``` + +Only run `open` if the user says yes. Always run `touch` to mark as seen. This only happens once. + ## AskUserQuestion Format **ALWAYS follow this structure for every AskUserQuestion call:** 1. **Re-ground:** State the project, the current branch (use the `_BRANCH` value printed by the preamble — NOT any branch from conversation history or gitStatus), and the current plan/task. (1-2 sentences) 2. **Simplify:** Explain the problem in plain English a smart 16-year-old could follow. No raw function names, no internal jargon, no implementation details. Use concrete examples and analogies. Say what it DOES, not what it's called. -3. **Recommend:** `RECOMMENDATION: Choose [X] because [one-line reason]` -4. **Options:** Lettered options: `A) ... B) ... C) ...` +3. **Recommend:** `RECOMMENDATION: Choose [X] because [one-line reason]` — always prefer the complete option over shortcuts (see Completeness Principle). Include `Completeness: X/10` for each option. Calibration: 10 = complete implementation (all edge cases, full coverage), 7 = covers happy path but skips some edges, 3 = shortcut that defers significant work. If both options are 8+, pick the higher; if one is ≤5, flag it. +4. **Options:** Lettered options: `A) ... B) ... C) ...` — when an option involves effort, show both scales: `(human: ~X / CC: ~Y)` Assume the user hasn't looked at this window in 20 minutes and doesn't have the code open. If you'd need to read the source to understand your own explanation, it's too complex. Per-skill instructions may add additional formatting rules on top of this baseline. +## Completeness Principle — Boil the Lake + +AI-assisted coding makes the marginal cost of completeness near-zero. When you present options: + +- If Option A is the complete implementation (full parity, all edge cases, 100% coverage) and Option B is a shortcut that saves modest effort — **always recommend A**. The delta between 80 lines and 150 lines is meaningless with CC+gstack. "Good enough" is the wrong instinct when "complete" costs minutes more. +- **Lake vs. ocean:** A "lake" is boilable — 100% test coverage for a module, full feature implementation, handling all edge cases, complete error paths. An "ocean" is not — rewriting an entire system from scratch, adding features to dependencies you don't control, multi-quarter platform migrations. Recommend boiling lakes. Flag oceans as out of scope. +- **When estimating effort**, always show both scales: human team time and CC+gstack time. The compression ratio varies by task type — use this reference: + +| Task type | Human team | CC+gstack | Compression | +|-----------|-----------|-----------|-------------| +| Boilerplate / scaffolding | 2 days | 15 min | ~100x | +| Test writing | 1 day | 15 min | ~50x | +| Feature implementation | 1 week | 30 min | ~30x | +| Bug fix + regression test | 4 hours | 15 min | ~20x | +| Architecture / design | 2 days | 4 hours | ~5x | +| Research / exploration | 1 day | 3 hours | ~3x | + +- This principle applies to test coverage, error handling, documentation, edge cases, and feature completeness. Don't skip the last 10% to "save time" — with AI, that 10% costs seconds. + +**Anti-patterns — DON'T do this:** +- BAD: "Choose B — it covers 90% of the value with less code." (If A is only 70 lines more, choose A.) +- BAD: "We can skip edge case handling to save time." (Edge case handling costs minutes with CC.) +- BAD: "Let's defer test coverage to a follow-up PR." (Tests are the cheapest lake to boil.) +- BAD: Quoting only human-team effort: "This would take 2 weeks." (Say: "2 weeks human / ~1 hour CC.") + ## Contributor Mode If `_CONTRIB` is `true`: you are in **contributor mode**. You're a gstack user who also helps make it better. @@ -164,6 +203,15 @@ cat ~/.gstack/greptile-history.md 2>/dev/null || true # 9. TODOS.md backlog (if available) cat TODOS.md 2>/dev/null || true + +# 10. Test file count +find . -name '*.test.*' -o -name '*.spec.*' -o -name '*_test.*' -o -name '*_spec.*' 2>/dev/null | grep -v node_modules | wc -l + +# 11. Regression test commits in window +git log origin/ --since="" --oneline --grep="test(qa):" --grep="test(design):" --grep="test: coverage" + +# 12. Test files changed in window +git log origin/ --since="" --format="" --name-only | grep -E '\.(test|spec)\.' | sort -u | wc -l ``` ### Step 2: Compute Metrics @@ -185,6 +233,7 @@ Calculate and present these metrics in a summary table: | Detected sessions | N | | Avg LOC/session-hour | N | | Greptile signal | N% (Y catches, Z FPs) | +| Test Health | N total tests · M added this period · K regression tests | Then show a **per-author leaderboard** immediately below: @@ -408,7 +457,17 @@ Use the Write tool to save the JSON file with this schema: } ``` -**Note:** Only include the `greptile` field if `~/.gstack/greptile-history.md` exists and has entries within the time window. Only include the `backlog` field if `TODOS.md` exists. If either has no data, omit the field entirely. +**Note:** Only include the `greptile` field if `~/.gstack/greptile-history.md` exists and has entries within the time window. Only include the `backlog` field if `TODOS.md` exists. Only include the `test_health` field if test files were found (command 10 returns > 0). If any has no data, omit the field entirely. + +Include test health data in the JSON when test files exist: +```json + "test_health": { + "total_test_files": 47, + "tests_added_this_period": 5, + "regression_test_commits": 3, + "test_files_changed": 8 + } +``` Include backlog data in the JSON when TODOS.md exists: ```json @@ -470,6 +529,13 @@ Narrative covering: - Any XL PRs that should have been split - Greptile signal ratio and trend (if history exists): "Greptile: X% signal (Y valid catches, Z false positives)" +### Test Health +- Total test files: N (from command 10) +- Tests added this period: M (from command 12 — test files changed) +- Regression test commits: list `test(qa):` and `test(design):` and `test: coverage` commits from command 11 +- If prior retro exists and has `test_health`: show delta "Test count: {last} → {now} (+{delta})" +- If test ratio < 20%: flag as growth area — "100% test coverage is the goal. Tests make vibe coding safe." + ### Focus & Highlights (from Step 8) - Focus score with interpretation diff --git a/retro/SKILL.md.tmpl b/retro/SKILL.md.tmpl index 7291a6ed..ed0a387a 100644 --- a/retro/SKILL.md.tmpl +++ b/retro/SKILL.md.tmpl @@ -99,6 +99,15 @@ cat ~/.gstack/greptile-history.md 2>/dev/null || true # 9. TODOS.md backlog (if available) cat TODOS.md 2>/dev/null || true + +# 10. Test file count +find . -name '*.test.*' -o -name '*.spec.*' -o -name '*_test.*' -o -name '*_spec.*' 2>/dev/null | grep -v node_modules | wc -l + +# 11. Regression test commits in window +git log origin/ --since="" --oneline --grep="test(qa):" --grep="test(design):" --grep="test: coverage" + +# 12. Test files changed in window +git log origin/ --since="" --format="" --name-only | grep -E '\.(test|spec)\.' | sort -u | wc -l ``` ### Step 2: Compute Metrics @@ -120,6 +129,7 @@ Calculate and present these metrics in a summary table: | Detected sessions | N | | Avg LOC/session-hour | N | | Greptile signal | N% (Y catches, Z FPs) | +| Test Health | N total tests · M added this period · K regression tests | Then show a **per-author leaderboard** immediately below: @@ -343,7 +353,17 @@ Use the Write tool to save the JSON file with this schema: } ``` -**Note:** Only include the `greptile` field if `~/.gstack/greptile-history.md` exists and has entries within the time window. Only include the `backlog` field if `TODOS.md` exists. If either has no data, omit the field entirely. +**Note:** Only include the `greptile` field if `~/.gstack/greptile-history.md` exists and has entries within the time window. Only include the `backlog` field if `TODOS.md` exists. Only include the `test_health` field if test files were found (command 10 returns > 0). If any has no data, omit the field entirely. + +Include test health data in the JSON when test files exist: +```json + "test_health": { + "total_test_files": 47, + "tests_added_this_period": 5, + "regression_test_commits": 3, + "test_files_changed": 8 + } +``` Include backlog data in the JSON when TODOS.md exists: ```json @@ -405,6 +425,13 @@ Narrative covering: - Any XL PRs that should have been split - Greptile signal ratio and trend (if history exists): "Greptile: X% signal (Y valid catches, Z false positives)" +### Test Health +- Total test files: N (from command 10) +- Tests added this period: M (from command 12 — test files changed) +- Regression test commits: list `test(qa):` and `test(design):` and `test: coverage` commits from command 11 +- If prior retro exists and has `test_health`: show delta "Test count: {last} → {now} (+{delta})" +- If test ratio < 20%: flag as growth area — "100% test coverage is the goal. Tests make vibe coding safe." + ### Focus & Highlights (from Step 8) - Focus score with interpretation diff --git a/review/SKILL.md b/review/SKILL.md index 3ad683d6..3a14a9d3 100644 --- a/review/SKILL.md +++ b/review/SKILL.md @@ -28,22 +28,61 @@ find ~/.gstack/sessions -mmin +120 -type f -delete 2>/dev/null || true _CONTRIB=$(~/.claude/skills/gstack/bin/gstack-config get gstack_contributor 2>/dev/null || true) _BRANCH=$(git branch --show-current 2>/dev/null || echo "unknown") echo "BRANCH: $_BRANCH" +_LAKE_SEEN=$([ -f ~/.gstack/.completeness-intro-seen ] && echo "yes" || echo "no") +echo "LAKE_INTRO: $_LAKE_SEEN" ``` If output shows `UPGRADE_AVAILABLE `: read `~/.claude/skills/gstack/gstack-upgrade/SKILL.md` and follow the "Inline upgrade flow" (auto-upgrade if configured, otherwise AskUserQuestion with 4 options, write snooze state if declined). If `JUST_UPGRADED `: tell user "Running gstack v{to} (just updated!)" and continue. +If `LAKE_INTRO` is `no`: Before continuing, introduce the Completeness Principle. +Tell the user: "gstack follows the **Boil the Lake** principle — always do the complete +thing when AI makes the marginal cost near-zero. Read more: https://garryslist.org/posts/boil-the-ocean" +Then offer to open the essay in their default browser: + +```bash +open https://garryslist.org/posts/boil-the-ocean +touch ~/.gstack/.completeness-intro-seen +``` + +Only run `open` if the user says yes. Always run `touch` to mark as seen. This only happens once. + ## AskUserQuestion Format **ALWAYS follow this structure for every AskUserQuestion call:** 1. **Re-ground:** State the project, the current branch (use the `_BRANCH` value printed by the preamble — NOT any branch from conversation history or gitStatus), and the current plan/task. (1-2 sentences) 2. **Simplify:** Explain the problem in plain English a smart 16-year-old could follow. No raw function names, no internal jargon, no implementation details. Use concrete examples and analogies. Say what it DOES, not what it's called. -3. **Recommend:** `RECOMMENDATION: Choose [X] because [one-line reason]` -4. **Options:** Lettered options: `A) ... B) ... C) ...` +3. **Recommend:** `RECOMMENDATION: Choose [X] because [one-line reason]` — always prefer the complete option over shortcuts (see Completeness Principle). Include `Completeness: X/10` for each option. Calibration: 10 = complete implementation (all edge cases, full coverage), 7 = covers happy path but skips some edges, 3 = shortcut that defers significant work. If both options are 8+, pick the higher; if one is ≤5, flag it. +4. **Options:** Lettered options: `A) ... B) ... C) ...` — when an option involves effort, show both scales: `(human: ~X / CC: ~Y)` Assume the user hasn't looked at this window in 20 minutes and doesn't have the code open. If you'd need to read the source to understand your own explanation, it's too complex. Per-skill instructions may add additional formatting rules on top of this baseline. +## Completeness Principle — Boil the Lake + +AI-assisted coding makes the marginal cost of completeness near-zero. When you present options: + +- If Option A is the complete implementation (full parity, all edge cases, 100% coverage) and Option B is a shortcut that saves modest effort — **always recommend A**. The delta between 80 lines and 150 lines is meaningless with CC+gstack. "Good enough" is the wrong instinct when "complete" costs minutes more. +- **Lake vs. ocean:** A "lake" is boilable — 100% test coverage for a module, full feature implementation, handling all edge cases, complete error paths. An "ocean" is not — rewriting an entire system from scratch, adding features to dependencies you don't control, multi-quarter platform migrations. Recommend boiling lakes. Flag oceans as out of scope. +- **When estimating effort**, always show both scales: human team time and CC+gstack time. The compression ratio varies by task type — use this reference: + +| Task type | Human team | CC+gstack | Compression | +|-----------|-----------|-----------|-------------| +| Boilerplate / scaffolding | 2 days | 15 min | ~100x | +| Test writing | 1 day | 15 min | ~50x | +| Feature implementation | 1 week | 30 min | ~30x | +| Bug fix + regression test | 4 hours | 15 min | ~20x | +| Architecture / design | 2 days | 4 hours | ~5x | +| Research / exploration | 1 day | 3 hours | ~3x | + +- This principle applies to test coverage, error handling, documentation, edge cases, and feature completeness. Don't skip the last 10% to "save time" — with AI, that 10% costs seconds. + +**Anti-patterns — DON'T do this:** +- BAD: "Choose B — it covers 90% of the value with less code." (If A is only 70 lines more, choose A.) +- BAD: "We can skip edge case handling to save time." (Edge case handling costs minutes with CC.) +- BAD: "Let's defer test coverage to a follow-up PR." (Tests are the cheapest lake to boil.) +- BAD: Quoting only human-team effort: "This would take 2 weeks." (Say: "2 weeks human / ~1 hour CC.") + ## Contributor Mode If `_CONTRIB` is `true`: you are in **contributor mode**. You're a gstack user who also helps make it better. @@ -157,6 +196,47 @@ Follow the output format specified in the checklist. Respect the suppressions --- +## Step 4.5: Design Review (conditional) + +## Design Review (conditional, diff-scoped) + +Check if the diff touches frontend files using `gstack-diff-scope`: + +```bash +eval $(~/.claude/skills/gstack/bin/gstack-diff-scope 2>/dev/null) +``` + +**If `SCOPE_FRONTEND=false`:** Skip design review silently. No output. + +**If `SCOPE_FRONTEND=true`:** + +1. **Check for DESIGN.md.** If `DESIGN.md` or `design-system.md` exists in the repo root, read it. All design findings are calibrated against it — patterns blessed in DESIGN.md are not flagged. If not found, use universal design principles. + +2. **Read `.claude/skills/review/design-checklist.md`.** If the file cannot be read, skip design review with a note: "Design checklist not found — skipping design review." + +3. **Read each changed frontend file** (full file, not just diff hunks). Frontend files are identified by the patterns listed in the checklist. + +4. **Apply the design checklist** against the changed files. For each item: + - **[HIGH] mechanical CSS fix** (`outline: none`, `!important`, `font-size < 16px`): classify as AUTO-FIX + - **[HIGH/MEDIUM] design judgment needed**: classify as ASK + - **[LOW] intent-based detection**: present as "Possible — verify visually or run /design-review" + +5. **Include findings** in the review output under a "Design Review" header, following the output format in the checklist. Design findings merge with code review findings into the same Fix-First flow. + +6. **Log the result** for the Review Readiness Dashboard: + +```bash +eval $(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null) +mkdir -p ~/.gstack/projects/$SLUG +echo '{"skill":"design-review-lite","timestamp":"TIMESTAMP","status":"STATUS","findings":N,"auto_fixed":M}' >> ~/.gstack/projects/$SLUG/$BRANCH-reviews.jsonl +``` + +Substitute: TIMESTAMP = ISO 8601 datetime, STATUS = "clean" if 0 findings or "issues_found", N = total findings, M = auto-fixed count. + +Include any design findings alongside the findings from Step 4. They follow the same Fix-First flow in Step 5 — AUTO-FIX for mechanical CSS fixes, ASK for everything else. + +--- + ## Step 5: Fix-First Review **Every finding gets action — not just critical ones.** diff --git a/review/SKILL.md.tmpl b/review/SKILL.md.tmpl index c122ada1..c1d3fae6 100644 --- a/review/SKILL.md.tmpl +++ b/review/SKILL.md.tmpl @@ -75,6 +75,14 @@ Follow the output format specified in the checklist. Respect the suppressions --- +## Step 4.5: Design Review (conditional) + +{{DESIGN_REVIEW_LITE}} + +Include any design findings alongside the findings from Step 4. They follow the same Fix-First flow in Step 5 — AUTO-FIX for mechanical CSS fixes, ASK for everything else. + +--- + ## Step 5: Fix-First Review **Every finding gets action — not just critical ones.** diff --git a/review/checklist.md b/review/checklist.md index e2246050..282c9944 100644 --- a/review/checklist.md +++ b/review/checklist.md @@ -84,6 +84,12 @@ To do this: use Grep to find all references to the sibling values (e.g., grep fo - `.expects(:something).never` missing when a code path should explicitly NOT call an external service - Security enforcement features (blocking, rate limiting, auth) without integration tests verifying the enforcement path works end-to-end +#### Completeness Gaps +- Shortcut implementations where the complete version would cost <30 minutes CC time (e.g., partial enum handling, incomplete error paths, missing edge cases that are straightforward to add) +- Options presented with only human-team effort estimates — should show both human and CC+gstack time +- Test coverage gaps where adding the missing tests is a "lake" not an "ocean" (e.g., missing negative-path tests, missing edge case tests that mirror happy-path structure) +- Features implemented at 80-90% when 100% is achievable with modest additional code + #### Crypto & Entropy - Truncation of data instead of hashing (last N chars instead of SHA-256) — less entropy, easier collisions - `rand()` / `Random.rand` for security-sensitive values — use `SecureRandom` instead @@ -113,6 +119,7 @@ CRITICAL (highest severity): INFORMATIONAL (lower severity): ├─ LLM Output Trust Boundary ├─ Dead Code & Consistency └─ Enum & Value Completeness ├─ LLM Prompt Issues ├─ Test Gaps + ├─ Completeness Gaps ├─ Crypto & Entropy ├─ Time Window Safety ├─ Type Coercion at Boundaries diff --git a/review/design-checklist.md b/review/design-checklist.md new file mode 100644 index 00000000..90075165 --- /dev/null +++ b/review/design-checklist.md @@ -0,0 +1,132 @@ +# Design Review Checklist (Lite) + +> **Subset of DESIGN_METHODOLOGY** — when adding items here, also update `generateDesignMethodology()` in `scripts/gen-skill-docs.ts`, and vice versa. + +## Instructions + +This checklist applies to **source code in the diff** — not rendered output. Read each changed frontend file (full file, not just diff hunks) and flag anti-patterns. + +**Trigger:** Only run this checklist if the diff touches frontend files. Use `gstack-diff-scope` to detect: + +```bash +eval $(~/.claude/skills/gstack/bin/gstack-diff-scope 2>/dev/null) +``` + +If `SCOPE_FRONTEND=false`, skip the entire design review silently. + +**DESIGN.md calibration:** If `DESIGN.md` or `design-system.md` exists in the repo root, read it first. All findings are calibrated against the project's stated design system. Patterns explicitly blessed in DESIGN.md are NOT flagged. If no DESIGN.md exists, use universal design principles. + +--- + +## Confidence Tiers + +Each item is tagged with a detection confidence level: + +- **[HIGH]** — Reliably detectable via grep/pattern match. Definitive findings. +- **[MEDIUM]** — Detectable via pattern aggregation or heuristic. Flag as findings but expect some noise. +- **[LOW]** — Requires understanding visual intent. Present as: "Possible issue — verify visually or run /design-review." + +--- + +## Classification + +**AUTO-FIX** (mechanical CSS fixes only — HIGH confidence, no design judgment needed): +- `outline: none` without replacement → add `outline: revert` or `&:focus-visible { outline: 2px solid currentColor; }` +- `!important` in new CSS → remove and fix specificity +- `font-size` < 16px on body text → bump to 16px + +**ASK** (everything else — requires design judgment): +- All AI slop findings, typography structure, spacing choices, interaction state gaps, DESIGN.md violations + +**LOW confidence items** → present as "Possible: [description]. Verify visually or run /design-review." Never AUTO-FIX. + +--- + +## Output Format + +``` +Design Review: N issues (X auto-fixable, Y need input, Z possible) + +**AUTO-FIXED:** +- [file:line] Problem → fix applied + +**NEEDS INPUT:** +- [file:line] Problem description + Recommended fix: suggested fix + +**POSSIBLE (verify visually):** +- [file:line] Possible issue — verify with /design-review +``` + +If no issues found: `Design Review: No issues found.` + +If no frontend files changed: skip silently, no output. + +--- + +## Categories + +### 1. AI Slop Detection (6 items) — highest priority + +These are the telltale signs of AI-generated UI that no designer at a respected studio would ship. + +- **[MEDIUM]** Purple/violet/indigo gradient backgrounds or blue-to-purple color schemes. Look for `linear-gradient` with values in the `#6366f1`–`#8b5cf6` range, or CSS custom properties resolving to purple/violet. + +- **[LOW]** The 3-column feature grid: icon-in-colored-circle + bold title + 2-line description, repeated 3x symmetrically. Look for a grid/flex container with exactly 3 children that each contain a circular element + heading + paragraph. + +- **[LOW]** Icons in colored circles as section decoration. Look for elements with `border-radius: 50%` + a background color used as decorative containers for icons. + +- **[HIGH]** Centered everything: `text-align: center` on all headings, descriptions, and cards. Grep for `text-align: center` density — if >60% of text containers use center alignment, flag it. + +- **[MEDIUM]** Uniform bubbly border-radius on every element: same large radius (16px+) applied to cards, buttons, inputs, containers uniformly. Aggregate `border-radius` values — if >80% use the same value ≥16px, flag it. + +- **[MEDIUM]** Generic hero copy: "Welcome to [X]", "Unlock the power of...", "Your all-in-one solution for...", "Revolutionize your...", "Streamline your workflow". Grep HTML/JSX content for these patterns. + +### 2. Typography (4 items) + +- **[HIGH]** Body text `font-size` < 16px. Grep for `font-size` declarations on `body`, `p`, `.text`, or base styles. Values below 16px (or 1rem when base is 16px) are flagged. + +- **[HIGH]** More than 3 font families introduced in the diff. Count distinct `font-family` declarations. Flag if >3 unique families appear across changed files. + +- **[HIGH]** Heading hierarchy skipping levels: `h1` followed by `h3` without an `h2` in the same file/component. Check HTML/JSX for heading tags. + +- **[HIGH]** Blacklisted fonts: Papyrus, Comic Sans, Lobster, Impact, Jokerman. Grep `font-family` for these names. + +### 3. Spacing & Layout (4 items) + +- **[MEDIUM]** Arbitrary spacing values not on a 4px or 8px scale, when DESIGN.md specifies a spacing scale. Check `margin`, `padding`, `gap` values against the stated scale. Only flag when DESIGN.md defines a scale. + +- **[MEDIUM]** Fixed widths without responsive handling: `width: NNNpx` on containers without `max-width` or `@media` breakpoints. Risk of horizontal scroll on mobile. + +- **[MEDIUM]** Missing `max-width` on text containers: body text or paragraph containers with no `max-width` set, allowing lines >75 characters. Check for `max-width` on text wrappers. + +- **[HIGH]** `!important` in new CSS rules. Grep for `!important` in added lines. Almost always a specificity escape hatch that should be fixed properly. + +### 4. Interaction States (3 items) + +- **[MEDIUM]** Interactive elements (buttons, links, inputs) missing hover/focus states. Check if `:hover` and `:focus` or `:focus-visible` pseudo-classes exist for new interactive element styles. + +- **[HIGH]** `outline: none` or `outline: 0` without a replacement focus indicator. Grep for `outline:\s*none` or `outline:\s*0`. This removes keyboard accessibility. + +- **[LOW]** Touch targets < 44px on interactive elements. Check `min-height`/`min-width`/`padding` on buttons and links. Requires computing effective size from multiple properties — low confidence from code alone. + +### 5. DESIGN.md Violations (3 items, conditional) + +Only apply if `DESIGN.md` or `design-system.md` exists: + +- **[MEDIUM]** Colors not in the stated palette. Compare color values in changed CSS against the palette defined in DESIGN.md. + +- **[MEDIUM]** Fonts not in the stated typography section. Compare `font-family` values against DESIGN.md's font list. + +- **[MEDIUM]** Spacing values outside the stated scale. Compare `margin`/`padding`/`gap` values against DESIGN.md's spacing scale. + +--- + +## Suppressions + +Do NOT flag: +- Patterns explicitly documented in DESIGN.md as intentional choices +- Third-party/vendor CSS files (node_modules, vendor directories) +- CSS resets or normalize stylesheets +- Test fixture files +- Generated/minified CSS diff --git a/scripts/eval-select.ts b/scripts/eval-select.ts new file mode 100644 index 00000000..cdbdcc84 --- /dev/null +++ b/scripts/eval-select.ts @@ -0,0 +1,86 @@ +#!/usr/bin/env bun +/** + * Show which E2E and LLM-judge tests would run based on the current git diff. + * + * Usage: + * bun run eval:select # human-readable output + * bun run eval:select --json # machine-readable JSON + * bun run eval:select --base main # override base branch + */ + +import * as path from 'path'; +import { + selectTests, + detectBaseBranch, + getChangedFiles, + E2E_TOUCHFILES, + LLM_JUDGE_TOUCHFILES, + GLOBAL_TOUCHFILES, +} from '../test/helpers/touchfiles'; + +const ROOT = path.resolve(import.meta.dir, '..'); +const args = process.argv.slice(2); +const jsonMode = args.includes('--json'); +const baseIdx = args.indexOf('--base'); +const baseOverride = baseIdx >= 0 ? args[baseIdx + 1] : undefined; + +// Detect base branch +const baseBranch = baseOverride || detectBaseBranch(ROOT) || 'main'; +const changedFiles = getChangedFiles(baseBranch, ROOT); + +if (changedFiles.length === 0) { + if (jsonMode) { + console.log(JSON.stringify({ base: baseBranch, changed_files: 0, e2e: 'all', llm_judge: 'all', reason: 'no diff — would run all tests' })); + } else { + console.log(`Base: ${baseBranch}`); + console.log('No changed files detected — all tests would run.'); + } + process.exit(0); +} + +const e2eSelection = selectTests(changedFiles, E2E_TOUCHFILES, GLOBAL_TOUCHFILES); +const llmSelection = selectTests(changedFiles, LLM_JUDGE_TOUCHFILES, GLOBAL_TOUCHFILES); + +if (jsonMode) { + console.log(JSON.stringify({ + base: baseBranch, + changed_files: changedFiles, + e2e: { + selected: e2eSelection.selected, + skipped: e2eSelection.skipped, + reason: e2eSelection.reason, + count: `${e2eSelection.selected.length}/${Object.keys(E2E_TOUCHFILES).length}`, + }, + llm_judge: { + selected: llmSelection.selected, + skipped: llmSelection.skipped, + reason: llmSelection.reason, + count: `${llmSelection.selected.length}/${Object.keys(LLM_JUDGE_TOUCHFILES).length}`, + }, + }, null, 2)); +} else { + console.log(`Base: ${baseBranch}`); + console.log(`Changed files: ${changedFiles.length}`); + console.log(); + + console.log(`E2E (${e2eSelection.reason}): ${e2eSelection.selected.length}/${Object.keys(E2E_TOUCHFILES).length} tests`); + if (e2eSelection.selected.length > 0 && e2eSelection.selected.length < Object.keys(E2E_TOUCHFILES).length) { + console.log(` Selected: ${e2eSelection.selected.join(', ')}`); + console.log(` Skipped: ${e2eSelection.skipped.join(', ')}`); + } else if (e2eSelection.selected.length === 0) { + console.log(' No E2E tests affected.'); + } else { + console.log(' All E2E tests selected.'); + } + console.log(); + + console.log(`LLM-judge (${llmSelection.reason}): ${llmSelection.selected.length}/${Object.keys(LLM_JUDGE_TOUCHFILES).length} tests`); + if (llmSelection.selected.length > 0 && llmSelection.selected.length < Object.keys(LLM_JUDGE_TOUCHFILES).length) { + console.log(` Selected: ${llmSelection.selected.join(', ')}`); + console.log(` Skipped: ${llmSelection.skipped.join(', ')}`); + } else if (llmSelection.selected.length === 0) { + console.log(' No LLM-judge tests affected.'); + } else { + console.log(' All LLM-judge tests selected.'); + } +} diff --git a/scripts/gen-skill-docs.ts b/scripts/gen-skill-docs.ts index 52f1145c..1a5d5ee2 100644 --- a/scripts/gen-skill-docs.ts +++ b/scripts/gen-skill-docs.ts @@ -107,22 +107,61 @@ find ~/.gstack/sessions -mmin +120 -type f -delete 2>/dev/null || true _CONTRIB=$(~/.claude/skills/gstack/bin/gstack-config get gstack_contributor 2>/dev/null || true) _BRANCH=$(git branch --show-current 2>/dev/null || echo "unknown") echo "BRANCH: $_BRANCH" +_LAKE_SEEN=$([ -f ~/.gstack/.completeness-intro-seen ] && echo "yes" || echo "no") +echo "LAKE_INTRO: $_LAKE_SEEN" \`\`\` If output shows \`UPGRADE_AVAILABLE \`: read \`~/.claude/skills/gstack/gstack-upgrade/SKILL.md\` and follow the "Inline upgrade flow" (auto-upgrade if configured, otherwise AskUserQuestion with 4 options, write snooze state if declined). If \`JUST_UPGRADED \`: tell user "Running gstack v{to} (just updated!)" and continue. +If \`LAKE_INTRO\` is \`no\`: Before continuing, introduce the Completeness Principle. +Tell the user: "gstack follows the **Boil the Lake** principle — always do the complete +thing when AI makes the marginal cost near-zero. Read more: https://garryslist.org/posts/boil-the-ocean" +Then offer to open the essay in their default browser: + +\`\`\`bash +open https://garryslist.org/posts/boil-the-ocean +touch ~/.gstack/.completeness-intro-seen +\`\`\` + +Only run \`open\` if the user says yes. Always run \`touch\` to mark as seen. This only happens once. + ## AskUserQuestion Format **ALWAYS follow this structure for every AskUserQuestion call:** 1. **Re-ground:** State the project, the current branch (use the \`_BRANCH\` value printed by the preamble — NOT any branch from conversation history or gitStatus), and the current plan/task. (1-2 sentences) 2. **Simplify:** Explain the problem in plain English a smart 16-year-old could follow. No raw function names, no internal jargon, no implementation details. Use concrete examples and analogies. Say what it DOES, not what it's called. -3. **Recommend:** \`RECOMMENDATION: Choose [X] because [one-line reason]\` -4. **Options:** Lettered options: \`A) ... B) ... C) ...\` +3. **Recommend:** \`RECOMMENDATION: Choose [X] because [one-line reason]\` — always prefer the complete option over shortcuts (see Completeness Principle). Include \`Completeness: X/10\` for each option. Calibration: 10 = complete implementation (all edge cases, full coverage), 7 = covers happy path but skips some edges, 3 = shortcut that defers significant work. If both options are 8+, pick the higher; if one is ≤5, flag it. +4. **Options:** Lettered options: \`A) ... B) ... C) ...\` — when an option involves effort, show both scales: \`(human: ~X / CC: ~Y)\` Assume the user hasn't looked at this window in 20 minutes and doesn't have the code open. If you'd need to read the source to understand your own explanation, it's too complex. Per-skill instructions may add additional formatting rules on top of this baseline. +## Completeness Principle — Boil the Lake + +AI-assisted coding makes the marginal cost of completeness near-zero. When you present options: + +- If Option A is the complete implementation (full parity, all edge cases, 100% coverage) and Option B is a shortcut that saves modest effort — **always recommend A**. The delta between 80 lines and 150 lines is meaningless with CC+gstack. "Good enough" is the wrong instinct when "complete" costs minutes more. +- **Lake vs. ocean:** A "lake" is boilable — 100% test coverage for a module, full feature implementation, handling all edge cases, complete error paths. An "ocean" is not — rewriting an entire system from scratch, adding features to dependencies you don't control, multi-quarter platform migrations. Recommend boiling lakes. Flag oceans as out of scope. +- **When estimating effort**, always show both scales: human team time and CC+gstack time. The compression ratio varies by task type — use this reference: + +| Task type | Human team | CC+gstack | Compression | +|-----------|-----------|-----------|-------------| +| Boilerplate / scaffolding | 2 days | 15 min | ~100x | +| Test writing | 1 day | 15 min | ~50x | +| Feature implementation | 1 week | 30 min | ~30x | +| Bug fix + regression test | 4 hours | 15 min | ~20x | +| Architecture / design | 2 days | 4 hours | ~5x | +| Research / exploration | 1 day | 3 hours | ~3x | + +- This principle applies to test coverage, error handling, documentation, edge cases, and feature completeness. Don't skip the last 10% to "save time" — with AI, that 10% costs seconds. + +**Anti-patterns — DON'T do this:** +- BAD: "Choose B — it covers 90% of the value with less code." (If A is only 70 lines more, choose A.) +- BAD: "We can skip edge case handling to save time." (Edge case handling costs minutes with CC.) +- BAD: "Let's defer test coverage to a follow-up PR." (Tests are the cheapest lake to boil.) +- BAD: Quoting only human-team effort: "This would take 2 weeks." (Say: "2 weeks human / ~1 hour CC.") + ## Contributor Mode If \`_CONTRIB\` is \`true\`: you are in **contributor mode**. You're a gstack user who also helps make it better. @@ -476,9 +515,49 @@ Minimum 0 per category. 7. **Test like a user.** Use realistic data. Walk through complete workflows end-to-end. 8. **Depth over breadth.** 5-10 well-documented issues with evidence > 20 vague descriptions. 9. **Never delete output files.** Screenshots and reports accumulate — that's intentional. -10. **Use \`snapshot -C\` for tricky UIs.** Finds clickable divs that the accessibility tree misses.`; +10. **Use \`snapshot -C\` for tricky UIs.** Finds clickable divs that the accessibility tree misses. +11. **Show screenshots to the user.** After every \`$B screenshot\`, \`$B snapshot -a -o\`, or \`$B responsive\` command, use the Read tool on the output file(s) so the user can see them inline. For \`responsive\` (3 files), Read all three. This is critical — without it, screenshots are invisible to the user.`; } +function generateDesignReviewLite(): string { + return `## Design Review (conditional, diff-scoped) + +Check if the diff touches frontend files using \`gstack-diff-scope\`: + +\`\`\`bash +eval $(~/.claude/skills/gstack/bin/gstack-diff-scope 2>/dev/null) +\`\`\` + +**If \`SCOPE_FRONTEND=false\`:** Skip design review silently. No output. + +**If \`SCOPE_FRONTEND=true\`:** + +1. **Check for DESIGN.md.** If \`DESIGN.md\` or \`design-system.md\` exists in the repo root, read it. All design findings are calibrated against it — patterns blessed in DESIGN.md are not flagged. If not found, use universal design principles. + +2. **Read \`.claude/skills/review/design-checklist.md\`.** If the file cannot be read, skip design review with a note: "Design checklist not found — skipping design review." + +3. **Read each changed frontend file** (full file, not just diff hunks). Frontend files are identified by the patterns listed in the checklist. + +4. **Apply the design checklist** against the changed files. For each item: + - **[HIGH] mechanical CSS fix** (\`outline: none\`, \`!important\`, \`font-size < 16px\`): classify as AUTO-FIX + - **[HIGH/MEDIUM] design judgment needed**: classify as ASK + - **[LOW] intent-based detection**: present as "Possible — verify visually or run /design-review" + +5. **Include findings** in the review output under a "Design Review" header, following the output format in the checklist. Design findings merge with code review findings into the same Fix-First flow. + +6. **Log the result** for the Review Readiness Dashboard: + +\`\`\`bash +eval $(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null) +mkdir -p ~/.gstack/projects/$SLUG +echo '{"skill":"design-review-lite","timestamp":"TIMESTAMP","status":"STATUS","findings":N,"auto_fixed":M}' >> ~/.gstack/projects/$SLUG/$BRANCH-reviews.jsonl +\`\`\` + +Substitute: TIMESTAMP = ISO 8601 datetime, STATUS = "clean" if 0 findings or "issues_found", N = total findings, M = auto-fixed count.`; +} + +// NOTE: design-checklist.md is a subset of this methodology for code-level detection. +// When adding items here, also update review/design-checklist.md, and vice versa. function generateDesignMethodology(): string { return `## Modes @@ -729,7 +808,7 @@ Compare screenshots and observations across pages for: **Project-scoped:** \`\`\`bash -SLUG=$(git remote get-url origin 2>/dev/null | sed 's|.*[:/]\\([^/]*/[^/]*\\)\\.git$|\\1|;s|.*[:/]\\([^/]*/[^/]*\\)$|\\1|' | tr '/' '-') +eval $(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null) mkdir -p ~/.gstack/projects/$SLUG \`\`\` Write to: \`~/.gstack/projects/{slug}/{user}-{branch}-design-audit-{datetime}.md\` @@ -809,7 +888,203 @@ Tie everything to user goals and product objectives. Always suggest specific imp 7. **Use \`snapshot -C\` for tricky UIs.** Finds clickable divs that the accessibility tree misses. 8. **Responsive is design, not just "not broken."** A stacked desktop layout on mobile is not responsive design — it's lazy. Evaluate whether the mobile layout makes *design* sense. 9. **Document incrementally.** Write each finding to the report as you find it. Don't batch. -10. **Depth over breadth.** 5-10 well-documented findings with screenshots and specific suggestions > 20 vague observations.`; +10. **Depth over breadth.** 5-10 well-documented findings with screenshots and specific suggestions > 20 vague observations. +11. **Show screenshots to the user.** After every \`$B screenshot\`, \`$B snapshot -a -o\`, or \`$B responsive\` command, use the Read tool on the output file(s) so the user can see them inline. For \`responsive\` (3 files), Read all three. This is critical — without it, screenshots are invisible to the user.`; +} + +function generateReviewDashboard(): string { + return `## Review Readiness Dashboard + +After completing the review, read the review log and config to display the dashboard. + +\`\`\`bash +eval $(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null) +cat ~/.gstack/projects/$SLUG/$BRANCH-reviews.jsonl 2>/dev/null || echo "NO_REVIEWS" +echo "---CONFIG---" +~/.claude/skills/gstack/bin/gstack-config get skip_eng_review 2>/dev/null || echo "false" +\`\`\` + +Parse the output. Find the most recent entry for each skill (plan-ceo-review, plan-eng-review, plan-design-review, design-review-lite). Ignore entries with timestamps older than 7 days. For Design Review, show whichever is more recent between \`plan-design-review\` (full visual audit) and \`design-review-lite\` (code-level check). Append "(FULL)" or "(LITE)" to the status to distinguish. Display: + +\`\`\` ++====================================================================+ +| REVIEW READINESS DASHBOARD | ++====================================================================+ +| Review | Runs | Last Run | Status | Required | +|-----------------|------|---------------------|-----------|----------| +| Eng Review | 1 | 2026-03-16 15:00 | CLEAR | YES | +| CEO Review | 0 | — | — | no | +| Design Review | 0 | — | — | no | ++--------------------------------------------------------------------+ +| VERDICT: CLEARED — Eng Review passed | ++====================================================================+ +\`\`\` + +**Review tiers:** +- **Eng Review (required by default):** The only review that gates shipping. Covers architecture, code quality, tests, performance. Can be disabled globally with \\\`gstack-config set skip_eng_review true\\\` (the "don't bother me" setting). +- **CEO Review (optional):** Use your judgment. Recommend it for big product/business changes, new user-facing features, or scope decisions. Skip for bug fixes, refactors, infra, and cleanup. +- **Design Review (optional):** Use your judgment. Recommend it for UI/UX changes. Skip for backend-only, infra, or prompt-only changes. + +**Verdict logic:** +- **CLEARED**: Eng Review has >= 1 entry within 7 days with status "clean" (or \\\`skip_eng_review\\\` is \\\`true\\\`) +- **NOT CLEARED**: Eng Review missing, stale (>7 days), or has open issues +- CEO and Design reviews are shown for context but never block shipping +- If \\\`skip_eng_review\\\` config is \\\`true\\\`, Eng Review shows "SKIPPED (global)" and verdict is CLEARED`; +} + +function generateTestBootstrap(): string { + return `## Test Framework Bootstrap + +**Detect existing test framework and project runtime:** + +\`\`\`bash +# Detect project runtime +[ -f Gemfile ] && echo "RUNTIME:ruby" +[ -f package.json ] && echo "RUNTIME:node" +[ -f requirements.txt ] || [ -f pyproject.toml ] && echo "RUNTIME:python" +[ -f go.mod ] && echo "RUNTIME:go" +[ -f Cargo.toml ] && echo "RUNTIME:rust" +[ -f composer.json ] && echo "RUNTIME:php" +[ -f mix.exs ] && echo "RUNTIME:elixir" +# Detect sub-frameworks +[ -f Gemfile ] && grep -q "rails" Gemfile 2>/dev/null && echo "FRAMEWORK:rails" +[ -f package.json ] && grep -q '"next"' package.json 2>/dev/null && echo "FRAMEWORK:nextjs" +# Check for existing test infrastructure +ls jest.config.* vitest.config.* playwright.config.* .rspec pytest.ini pyproject.toml phpunit.xml 2>/dev/null +ls -d test/ tests/ spec/ __tests__/ cypress/ e2e/ 2>/dev/null +# Check opt-out marker +[ -f .gstack/no-test-bootstrap ] && echo "BOOTSTRAP_DECLINED" +\`\`\` + +**If test framework detected** (config files or test directories found): +Print "Test framework detected: {name} ({N} existing tests). Skipping bootstrap." +Read 2-3 existing test files to learn conventions (naming, imports, assertion style, setup patterns). +Store conventions as prose context for use in Phase 8e.5 or Step 3.4. **Skip the rest of bootstrap.** + +**If BOOTSTRAP_DECLINED** appears: Print "Test bootstrap previously declined — skipping." **Skip the rest of bootstrap.** + +**If NO runtime detected** (no config files found): Use AskUserQuestion: +"I couldn't detect your project's language. What runtime are you using?" +Options: A) Node.js/TypeScript B) Ruby/Rails C) Python D) Go E) Rust F) PHP G) Elixir H) This project doesn't need tests. +If user picks H → write \`.gstack/no-test-bootstrap\` and continue without tests. + +**If runtime detected but no test framework — bootstrap:** + +### B2. Research best practices + +Use WebSearch to find current best practices for the detected runtime: +- \`"[runtime] best test framework 2025 2026"\` +- \`"[framework A] vs [framework B] comparison"\` + +If WebSearch is unavailable, use this built-in knowledge table: + +| Runtime | Primary recommendation | Alternative | +|---------|----------------------|-------------| +| Ruby/Rails | minitest + fixtures + capybara | rspec + factory_bot + shoulda-matchers | +| Node.js | vitest + @testing-library | jest + @testing-library | +| Next.js | vitest + @testing-library/react + playwright | jest + cypress | +| Python | pytest + pytest-cov | unittest | +| Go | stdlib testing + testify | stdlib only | +| Rust | cargo test (built-in) + mockall | — | +| PHP | phpunit + mockery | pest | +| Elixir | ExUnit (built-in) + ex_machina | — | + +### B3. Framework selection + +Use AskUserQuestion: +"I detected this is a [Runtime/Framework] project with no test framework. I researched current best practices. Here are the options: +A) [Primary] — [rationale]. Includes: [packages]. Supports: unit, integration, smoke, e2e +B) [Alternative] — [rationale]. Includes: [packages] +C) Skip — don't set up testing right now +RECOMMENDATION: Choose A because [reason based on project context]" + +If user picks C → write \`.gstack/no-test-bootstrap\`. Tell user: "If you change your mind later, delete \`.gstack/no-test-bootstrap\` and re-run." Continue without tests. + +If multiple runtimes detected (monorepo) → ask which runtime to set up first, with option to do both sequentially. + +### B4. Install and configure + +1. Install the chosen packages (npm/bun/gem/pip/etc.) +2. Create minimal config file +3. Create directory structure (test/, spec/, etc.) +4. Create one example test matching the project's code to verify setup works + +If package installation fails → debug once. If still failing → revert with \`git checkout -- package.json package-lock.json\` (or equivalent for the runtime). Warn user and continue without tests. + +### B4.5. First real tests + +Generate 3-5 real tests for existing code: + +1. **Find recently changed files:** \`git log --since=30.days --name-only --format="" | sort | uniq -c | sort -rn | head -10\` +2. **Prioritize by risk:** Error handlers > business logic with conditionals > API endpoints > pure functions +3. **For each file:** Write one test that tests real behavior with meaningful assertions. Never \`expect(x).toBeDefined()\` — test what the code DOES. +4. Run each test. Passes → keep. Fails → fix once. Still fails → delete silently. +5. Generate at least 1 test, cap at 5. + +Never import secrets, API keys, or credentials in test files. Use environment variables or test fixtures. + +### B5. Verify + +\`\`\`bash +# Run the full test suite to confirm everything works +{detected test command} +\`\`\` + +If tests fail → debug once. If still failing → revert all bootstrap changes and warn user. + +### B5.5. CI/CD pipeline + +\`\`\`bash +# Check CI provider +ls -d .github/ 2>/dev/null && echo "CI:github" +ls .gitlab-ci.yml .circleci/ bitrise.yml 2>/dev/null +\`\`\` + +If \`.github/\` exists (or no CI detected — default to GitHub Actions): +Create \`.github/workflows/test.yml\` with: +- \`runs-on: ubuntu-latest\` +- Appropriate setup action for the runtime (setup-node, setup-ruby, setup-python, etc.) +- The same test command verified in B5 +- Trigger: push + pull_request + +If non-GitHub CI detected → skip CI generation with note: "Detected {provider} — CI pipeline generation supports GitHub Actions only. Add test step to your existing pipeline manually." + +### B6. Create TESTING.md + +First check: If TESTING.md already exists → read it and update/append rather than overwriting. Never destroy existing content. + +Write TESTING.md with: +- Philosophy: "100% test coverage is the key to great vibe coding. Tests let you move fast, trust your instincts, and ship with confidence — without them, vibe coding is just yolo coding. With tests, it's a superpower." +- Framework name and version +- How to run tests (the verified command from B5) +- Test layers: Unit tests (what, where, when), Integration tests, Smoke tests, E2E tests +- Conventions: file naming, assertion style, setup/teardown patterns + +### B7. Update CLAUDE.md + +First check: If CLAUDE.md already has a \`## Testing\` section → skip. Don't duplicate. + +Append a \`## Testing\` section: +- Run command and test directory +- Reference to TESTING.md +- Test expectations: + - 100% test coverage is the goal — tests make vibe coding safe + - When writing new functions, write a corresponding test + - When fixing a bug, write a regression test + - When adding error handling, write a test that triggers the error + - When adding a conditional (if/else, switch), write tests for BOTH paths + - Never commit code that makes existing tests fail + +### B8. Commit + +\`\`\`bash +git status --porcelain +\`\`\` + +Only commit if there are changes. Stage all bootstrap files (config, test directory, TESTING.md, CLAUDE.md, .github/workflows/test.yml if created): +\`git commit -m "chore: bootstrap test framework ({framework name})"\` + +---`; } const RESOLVERS: Record string> = { @@ -820,6 +1095,9 @@ const RESOLVERS: Record string> = { BASE_BRANCH_DETECT: generateBaseBranchDetect, QA_METHODOLOGY: generateQAMethodology, DESIGN_METHODOLOGY: generateDesignMethodology, + DESIGN_REVIEW_LITE: generateDesignReviewLite, + REVIEW_DASHBOARD: generateReviewDashboard, + TEST_BOOTSTRAP: generateTestBootstrap, }; // ─── Template Processing ──────────────────────────────────── @@ -875,7 +1153,7 @@ function findTemplates(): string[] { path.join(ROOT, 'gstack-upgrade', 'SKILL.md.tmpl'), path.join(ROOT, 'setup-team-sync', 'SKILL.md.tmpl'), path.join(ROOT, 'plan-design-review', 'SKILL.md.tmpl'), - path.join(ROOT, 'qa-design-review', 'SKILL.md.tmpl'), + path.join(ROOT, 'design-review', 'SKILL.md.tmpl'), path.join(ROOT, 'design-consultation', 'SKILL.md.tmpl'), path.join(ROOT, 'document-release', 'SKILL.md.tmpl'), ]; diff --git a/scripts/skill-check.ts b/scripts/skill-check.ts index 97c417ef..3be0245c 100644 --- a/scripts/skill-check.ts +++ b/scripts/skill-check.ts @@ -28,7 +28,7 @@ const SKILL_FILES = [ 'plan-eng-review/SKILL.md', 'setup-browser-cookies/SKILL.md', 'plan-design-review/SKILL.md', - 'qa-design-review/SKILL.md', + 'design-review/SKILL.md', 'gstack-upgrade/SKILL.md', 'document-release/SKILL.md', ].filter(f => fs.existsSync(path.join(ROOT, f))); diff --git a/setup-browser-cookies/SKILL.md b/setup-browser-cookies/SKILL.md index faebe520..da64dec4 100644 --- a/setup-browser-cookies/SKILL.md +++ b/setup-browser-cookies/SKILL.md @@ -25,22 +25,61 @@ find ~/.gstack/sessions -mmin +120 -type f -delete 2>/dev/null || true _CONTRIB=$(~/.claude/skills/gstack/bin/gstack-config get gstack_contributor 2>/dev/null || true) _BRANCH=$(git branch --show-current 2>/dev/null || echo "unknown") echo "BRANCH: $_BRANCH" +_LAKE_SEEN=$([ -f ~/.gstack/.completeness-intro-seen ] && echo "yes" || echo "no") +echo "LAKE_INTRO: $_LAKE_SEEN" ``` If output shows `UPGRADE_AVAILABLE `: read `~/.claude/skills/gstack/gstack-upgrade/SKILL.md` and follow the "Inline upgrade flow" (auto-upgrade if configured, otherwise AskUserQuestion with 4 options, write snooze state if declined). If `JUST_UPGRADED `: tell user "Running gstack v{to} (just updated!)" and continue. +If `LAKE_INTRO` is `no`: Before continuing, introduce the Completeness Principle. +Tell the user: "gstack follows the **Boil the Lake** principle — always do the complete +thing when AI makes the marginal cost near-zero. Read more: https://garryslist.org/posts/boil-the-ocean" +Then offer to open the essay in their default browser: + +```bash +open https://garryslist.org/posts/boil-the-ocean +touch ~/.gstack/.completeness-intro-seen +``` + +Only run `open` if the user says yes. Always run `touch` to mark as seen. This only happens once. + ## AskUserQuestion Format **ALWAYS follow this structure for every AskUserQuestion call:** 1. **Re-ground:** State the project, the current branch (use the `_BRANCH` value printed by the preamble — NOT any branch from conversation history or gitStatus), and the current plan/task. (1-2 sentences) 2. **Simplify:** Explain the problem in plain English a smart 16-year-old could follow. No raw function names, no internal jargon, no implementation details. Use concrete examples and analogies. Say what it DOES, not what it's called. -3. **Recommend:** `RECOMMENDATION: Choose [X] because [one-line reason]` -4. **Options:** Lettered options: `A) ... B) ... C) ...` +3. **Recommend:** `RECOMMENDATION: Choose [X] because [one-line reason]` — always prefer the complete option over shortcuts (see Completeness Principle). Include `Completeness: X/10` for each option. Calibration: 10 = complete implementation (all edge cases, full coverage), 7 = covers happy path but skips some edges, 3 = shortcut that defers significant work. If both options are 8+, pick the higher; if one is ≤5, flag it. +4. **Options:** Lettered options: `A) ... B) ... C) ...` — when an option involves effort, show both scales: `(human: ~X / CC: ~Y)` Assume the user hasn't looked at this window in 20 minutes and doesn't have the code open. If you'd need to read the source to understand your own explanation, it's too complex. Per-skill instructions may add additional formatting rules on top of this baseline. +## Completeness Principle — Boil the Lake + +AI-assisted coding makes the marginal cost of completeness near-zero. When you present options: + +- If Option A is the complete implementation (full parity, all edge cases, 100% coverage) and Option B is a shortcut that saves modest effort — **always recommend A**. The delta between 80 lines and 150 lines is meaningless with CC+gstack. "Good enough" is the wrong instinct when "complete" costs minutes more. +- **Lake vs. ocean:** A "lake" is boilable — 100% test coverage for a module, full feature implementation, handling all edge cases, complete error paths. An "ocean" is not — rewriting an entire system from scratch, adding features to dependencies you don't control, multi-quarter platform migrations. Recommend boiling lakes. Flag oceans as out of scope. +- **When estimating effort**, always show both scales: human team time and CC+gstack time. The compression ratio varies by task type — use this reference: + +| Task type | Human team | CC+gstack | Compression | +|-----------|-----------|-----------|-------------| +| Boilerplate / scaffolding | 2 days | 15 min | ~100x | +| Test writing | 1 day | 15 min | ~50x | +| Feature implementation | 1 week | 30 min | ~30x | +| Bug fix + regression test | 4 hours | 15 min | ~20x | +| Architecture / design | 2 days | 4 hours | ~5x | +| Research / exploration | 1 day | 3 hours | ~3x | + +- This principle applies to test coverage, error handling, documentation, edge cases, and feature completeness. Don't skip the last 10% to "save time" — with AI, that 10% costs seconds. + +**Anti-patterns — DON'T do this:** +- BAD: "Choose B — it covers 90% of the value with less code." (If A is only 70 lines more, choose A.) +- BAD: "We can skip edge case handling to save time." (Edge case handling costs minutes with CC.) +- BAD: "Let's defer test coverage to a follow-up PR." (Tests are the cheapest lake to boil.) +- BAD: Quoting only human-team effort: "This would take 2 weeks." (Say: "2 weeks human / ~1 hour CC.") + ## Contributor Mode If `_CONTRIB` is `true`: you are in **contributor mode**. You're a gstack user who also helps make it better. diff --git a/ship/SKILL.md b/ship/SKILL.md index 89a50475..0db81d8d 100644 --- a/ship/SKILL.md +++ b/ship/SKILL.md @@ -11,6 +11,7 @@ allowed-tools: - Grep - Glob - AskUserQuestion + - WebSearch --- @@ -27,22 +28,61 @@ find ~/.gstack/sessions -mmin +120 -type f -delete 2>/dev/null || true _CONTRIB=$(~/.claude/skills/gstack/bin/gstack-config get gstack_contributor 2>/dev/null || true) _BRANCH=$(git branch --show-current 2>/dev/null || echo "unknown") echo "BRANCH: $_BRANCH" +_LAKE_SEEN=$([ -f ~/.gstack/.completeness-intro-seen ] && echo "yes" || echo "no") +echo "LAKE_INTRO: $_LAKE_SEEN" ``` If output shows `UPGRADE_AVAILABLE `: read `~/.claude/skills/gstack/gstack-upgrade/SKILL.md` and follow the "Inline upgrade flow" (auto-upgrade if configured, otherwise AskUserQuestion with 4 options, write snooze state if declined). If `JUST_UPGRADED `: tell user "Running gstack v{to} (just updated!)" and continue. +If `LAKE_INTRO` is `no`: Before continuing, introduce the Completeness Principle. +Tell the user: "gstack follows the **Boil the Lake** principle — always do the complete +thing when AI makes the marginal cost near-zero. Read more: https://garryslist.org/posts/boil-the-ocean" +Then offer to open the essay in their default browser: + +```bash +open https://garryslist.org/posts/boil-the-ocean +touch ~/.gstack/.completeness-intro-seen +``` + +Only run `open` if the user says yes. Always run `touch` to mark as seen. This only happens once. + ## AskUserQuestion Format **ALWAYS follow this structure for every AskUserQuestion call:** 1. **Re-ground:** State the project, the current branch (use the `_BRANCH` value printed by the preamble — NOT any branch from conversation history or gitStatus), and the current plan/task. (1-2 sentences) 2. **Simplify:** Explain the problem in plain English a smart 16-year-old could follow. No raw function names, no internal jargon, no implementation details. Use concrete examples and analogies. Say what it DOES, not what it's called. -3. **Recommend:** `RECOMMENDATION: Choose [X] because [one-line reason]` -4. **Options:** Lettered options: `A) ... B) ... C) ...` +3. **Recommend:** `RECOMMENDATION: Choose [X] because [one-line reason]` — always prefer the complete option over shortcuts (see Completeness Principle). Include `Completeness: X/10` for each option. Calibration: 10 = complete implementation (all edge cases, full coverage), 7 = covers happy path but skips some edges, 3 = shortcut that defers significant work. If both options are 8+, pick the higher; if one is ≤5, flag it. +4. **Options:** Lettered options: `A) ... B) ... C) ...` — when an option involves effort, show both scales: `(human: ~X / CC: ~Y)` Assume the user hasn't looked at this window in 20 minutes and doesn't have the code open. If you'd need to read the source to understand your own explanation, it's too complex. Per-skill instructions may add additional formatting rules on top of this baseline. +## Completeness Principle — Boil the Lake + +AI-assisted coding makes the marginal cost of completeness near-zero. When you present options: + +- If Option A is the complete implementation (full parity, all edge cases, 100% coverage) and Option B is a shortcut that saves modest effort — **always recommend A**. The delta between 80 lines and 150 lines is meaningless with CC+gstack. "Good enough" is the wrong instinct when "complete" costs minutes more. +- **Lake vs. ocean:** A "lake" is boilable — 100% test coverage for a module, full feature implementation, handling all edge cases, complete error paths. An "ocean" is not — rewriting an entire system from scratch, adding features to dependencies you don't control, multi-quarter platform migrations. Recommend boiling lakes. Flag oceans as out of scope. +- **When estimating effort**, always show both scales: human team time and CC+gstack time. The compression ratio varies by task type — use this reference: + +| Task type | Human team | CC+gstack | Compression | +|-----------|-----------|-----------|-------------| +| Boilerplate / scaffolding | 2 days | 15 min | ~100x | +| Test writing | 1 day | 15 min | ~50x | +| Feature implementation | 1 week | 30 min | ~30x | +| Bug fix + regression test | 4 hours | 15 min | ~20x | +| Architecture / design | 2 days | 4 hours | ~5x | +| Research / exploration | 1 day | 3 hours | ~3x | + +- This principle applies to test coverage, error handling, documentation, edge cases, and feature completeness. Don't skip the last 10% to "save time" — with AI, that 10% costs seconds. + +**Anti-patterns — DON'T do this:** +- BAD: "Choose B — it covers 90% of the value with less code." (If A is only 70 lines more, choose A.) +- BAD: "We can skip edge case handling to save time." (Edge case handling costs minutes with CC.) +- BAD: "Let's defer test coverage to a follow-up PR." (Tests are the cheapest lake to boil.) +- BAD: Quoting only human-team effort: "This would take 2 weeks." (Say: "2 weeks human / ~1 hour CC.") + ## Contributor Mode If `_CONTRIB` is `true`: you are in **contributor mode**. You're a gstack user who also helps make it better. @@ -121,6 +161,7 @@ You are running the `/ship` workflow. This is a **non-interactive, fully automat - Multi-file changesets (auto-split into bisectable commits) - TODOS.md completed-item detection (auto-mark) - Auto-fixable review findings (dead code, N+1, stale comments — fixed automatically) +- Test coverage gaps (auto-generate and commit, or flag in PR body) --- @@ -132,6 +173,69 @@ You are running the `/ship` workflow. This is a **non-interactive, fully automat 3. Run `git diff ...HEAD --stat` and `git log ..HEAD --oneline` to understand what's being shipped. +4. Check review readiness: + +## Review Readiness Dashboard + +After completing the review, read the review log and config to display the dashboard. + +```bash +eval $(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null) +cat ~/.gstack/projects/$SLUG/$BRANCH-reviews.jsonl 2>/dev/null || echo "NO_REVIEWS" +echo "---CONFIG---" +~/.claude/skills/gstack/bin/gstack-config get skip_eng_review 2>/dev/null || echo "false" +``` + +Parse the output. Find the most recent entry for each skill (plan-ceo-review, plan-eng-review, plan-design-review, design-review-lite). Ignore entries with timestamps older than 7 days. For Design Review, show whichever is more recent between `plan-design-review` (full visual audit) and `design-review-lite` (code-level check). Append "(FULL)" or "(LITE)" to the status to distinguish. Display: + +``` ++====================================================================+ +| REVIEW READINESS DASHBOARD | ++====================================================================+ +| Review | Runs | Last Run | Status | Required | +|-----------------|------|---------------------|-----------|----------| +| Eng Review | 1 | 2026-03-16 15:00 | CLEAR | YES | +| CEO Review | 0 | — | — | no | +| Design Review | 0 | — | — | no | ++--------------------------------------------------------------------+ +| VERDICT: CLEARED — Eng Review passed | ++====================================================================+ +``` + +**Review tiers:** +- **Eng Review (required by default):** The only review that gates shipping. Covers architecture, code quality, tests, performance. Can be disabled globally with \`gstack-config set skip_eng_review true\` (the "don't bother me" setting). +- **CEO Review (optional):** Use your judgment. Recommend it for big product/business changes, new user-facing features, or scope decisions. Skip for bug fixes, refactors, infra, and cleanup. +- **Design Review (optional):** Use your judgment. Recommend it for UI/UX changes. Skip for backend-only, infra, or prompt-only changes. + +**Verdict logic:** +- **CLEARED**: Eng Review has >= 1 entry within 7 days with status "clean" (or \`skip_eng_review\` is \`true\`) +- **NOT CLEARED**: Eng Review missing, stale (>7 days), or has open issues +- CEO and Design reviews are shown for context but never block shipping +- If \`skip_eng_review\` config is \`true\`, Eng Review shows "SKIPPED (global)" and verdict is CLEARED + +If the Eng Review is NOT "CLEAR": + +1. **Check for a prior override on this branch:** + ```bash + eval $(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null) + grep '"skill":"ship-review-override"' ~/.gstack/projects/$SLUG/$BRANCH-reviews.jsonl 2>/dev/null || echo "NO_OVERRIDE" + ``` + If an override exists, display the dashboard and note "Review gate previously accepted — continuing." Do NOT ask again. + +2. **If no override exists,** use AskUserQuestion: + - Show that Eng Review is missing or has open issues + - RECOMMENDATION: Choose C if the change is obviously trivial (< 20 lines, typo fix, config-only); Choose B for larger changes + - Options: A) Ship anyway B) Abort — run /plan-eng-review first C) Change is too small to need eng review + - If CEO Review is missing, mention as informational ("CEO Review not run — recommended for product changes") but do NOT block + - For Design Review: run `eval $(~/.claude/skills/gstack/bin/gstack-diff-scope 2>/dev/null)`. If `SCOPE_FRONTEND=true` and no design review (plan-design-review or design-review-lite) exists in the dashboard, mention: "Design Review not run — this PR changes frontend code. The lite design check will run automatically in Step 3.5, but consider running /design-review for a full visual audit post-implementation." Still never block. + +3. **If the user chooses A or C,** persist the decision so future `/ship` runs on this branch skip the gate: + ```bash + eval $(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null) + echo '{"skill":"ship-review-override","timestamp":"'"$(date -u +%Y-%m-%dT%H:%M:%SZ)"'","decision":"USER_CHOICE"}' >> ~/.gstack/projects/$SLUG/$BRANCH-reviews.jsonl + ``` + Substitute USER_CHOICE with "ship_anyway" or "not_relevant". + --- ## Step 2: Merge the base branch (BEFORE tests) @@ -148,6 +252,163 @@ git fetch origin && git merge origin/ --no-edit --- +## Step 2.5: Test Framework Bootstrap + +## Test Framework Bootstrap + +**Detect existing test framework and project runtime:** + +```bash +# Detect project runtime +[ -f Gemfile ] && echo "RUNTIME:ruby" +[ -f package.json ] && echo "RUNTIME:node" +[ -f requirements.txt ] || [ -f pyproject.toml ] && echo "RUNTIME:python" +[ -f go.mod ] && echo "RUNTIME:go" +[ -f Cargo.toml ] && echo "RUNTIME:rust" +[ -f composer.json ] && echo "RUNTIME:php" +[ -f mix.exs ] && echo "RUNTIME:elixir" +# Detect sub-frameworks +[ -f Gemfile ] && grep -q "rails" Gemfile 2>/dev/null && echo "FRAMEWORK:rails" +[ -f package.json ] && grep -q '"next"' package.json 2>/dev/null && echo "FRAMEWORK:nextjs" +# Check for existing test infrastructure +ls jest.config.* vitest.config.* playwright.config.* .rspec pytest.ini pyproject.toml phpunit.xml 2>/dev/null +ls -d test/ tests/ spec/ __tests__/ cypress/ e2e/ 2>/dev/null +# Check opt-out marker +[ -f .gstack/no-test-bootstrap ] && echo "BOOTSTRAP_DECLINED" +``` + +**If test framework detected** (config files or test directories found): +Print "Test framework detected: {name} ({N} existing tests). Skipping bootstrap." +Read 2-3 existing test files to learn conventions (naming, imports, assertion style, setup patterns). +Store conventions as prose context for use in Phase 8e.5 or Step 3.4. **Skip the rest of bootstrap.** + +**If BOOTSTRAP_DECLINED** appears: Print "Test bootstrap previously declined — skipping." **Skip the rest of bootstrap.** + +**If NO runtime detected** (no config files found): Use AskUserQuestion: +"I couldn't detect your project's language. What runtime are you using?" +Options: A) Node.js/TypeScript B) Ruby/Rails C) Python D) Go E) Rust F) PHP G) Elixir H) This project doesn't need tests. +If user picks H → write `.gstack/no-test-bootstrap` and continue without tests. + +**If runtime detected but no test framework — bootstrap:** + +### B2. Research best practices + +Use WebSearch to find current best practices for the detected runtime: +- `"[runtime] best test framework 2025 2026"` +- `"[framework A] vs [framework B] comparison"` + +If WebSearch is unavailable, use this built-in knowledge table: + +| Runtime | Primary recommendation | Alternative | +|---------|----------------------|-------------| +| Ruby/Rails | minitest + fixtures + capybara | rspec + factory_bot + shoulda-matchers | +| Node.js | vitest + @testing-library | jest + @testing-library | +| Next.js | vitest + @testing-library/react + playwright | jest + cypress | +| Python | pytest + pytest-cov | unittest | +| Go | stdlib testing + testify | stdlib only | +| Rust | cargo test (built-in) + mockall | — | +| PHP | phpunit + mockery | pest | +| Elixir | ExUnit (built-in) + ex_machina | — | + +### B3. Framework selection + +Use AskUserQuestion: +"I detected this is a [Runtime/Framework] project with no test framework. I researched current best practices. Here are the options: +A) [Primary] — [rationale]. Includes: [packages]. Supports: unit, integration, smoke, e2e +B) [Alternative] — [rationale]. Includes: [packages] +C) Skip — don't set up testing right now +RECOMMENDATION: Choose A because [reason based on project context]" + +If user picks C → write `.gstack/no-test-bootstrap`. Tell user: "If you change your mind later, delete `.gstack/no-test-bootstrap` and re-run." Continue without tests. + +If multiple runtimes detected (monorepo) → ask which runtime to set up first, with option to do both sequentially. + +### B4. Install and configure + +1. Install the chosen packages (npm/bun/gem/pip/etc.) +2. Create minimal config file +3. Create directory structure (test/, spec/, etc.) +4. Create one example test matching the project's code to verify setup works + +If package installation fails → debug once. If still failing → revert with `git checkout -- package.json package-lock.json` (or equivalent for the runtime). Warn user and continue without tests. + +### B4.5. First real tests + +Generate 3-5 real tests for existing code: + +1. **Find recently changed files:** `git log --since=30.days --name-only --format="" | sort | uniq -c | sort -rn | head -10` +2. **Prioritize by risk:** Error handlers > business logic with conditionals > API endpoints > pure functions +3. **For each file:** Write one test that tests real behavior with meaningful assertions. Never `expect(x).toBeDefined()` — test what the code DOES. +4. Run each test. Passes → keep. Fails → fix once. Still fails → delete silently. +5. Generate at least 1 test, cap at 5. + +Never import secrets, API keys, or credentials in test files. Use environment variables or test fixtures. + +### B5. Verify + +```bash +# Run the full test suite to confirm everything works +{detected test command} +``` + +If tests fail → debug once. If still failing → revert all bootstrap changes and warn user. + +### B5.5. CI/CD pipeline + +```bash +# Check CI provider +ls -d .github/ 2>/dev/null && echo "CI:github" +ls .gitlab-ci.yml .circleci/ bitrise.yml 2>/dev/null +``` + +If `.github/` exists (or no CI detected — default to GitHub Actions): +Create `.github/workflows/test.yml` with: +- `runs-on: ubuntu-latest` +- Appropriate setup action for the runtime (setup-node, setup-ruby, setup-python, etc.) +- The same test command verified in B5 +- Trigger: push + pull_request + +If non-GitHub CI detected → skip CI generation with note: "Detected {provider} — CI pipeline generation supports GitHub Actions only. Add test step to your existing pipeline manually." + +### B6. Create TESTING.md + +First check: If TESTING.md already exists → read it and update/append rather than overwriting. Never destroy existing content. + +Write TESTING.md with: +- Philosophy: "100% test coverage is the key to great vibe coding. Tests let you move fast, trust your instincts, and ship with confidence — without them, vibe coding is just yolo coding. With tests, it's a superpower." +- Framework name and version +- How to run tests (the verified command from B5) +- Test layers: Unit tests (what, where, when), Integration tests, Smoke tests, E2E tests +- Conventions: file naming, assertion style, setup/teardown patterns + +### B7. Update CLAUDE.md + +First check: If CLAUDE.md already has a `## Testing` section → skip. Don't duplicate. + +Append a `## Testing` section: +- Run command and test directory +- Reference to TESTING.md +- Test expectations: + - 100% test coverage is the goal — tests make vibe coding safe + - When writing new functions, write a corresponding test + - When fixing a bug, write a regression test + - When adding error handling, write a test that triggers the error + - When adding a conditional (if/else, switch), write tests for BOTH paths + - Never commit code that makes existing tests fail + +### B8. Commit + +```bash +git status --porcelain +``` + +Only commit if there are changes. Stage all bootstrap files (config, test directory, TESTING.md, CLAUDE.md, .github/workflows/test.yml if created): +`git commit -m "chore: bootstrap test framework ({framework name})"` + +--- + +--- + ## Step 3: Run tests (on merged code) **Do NOT run `RAILS_ENV=test bin/rails db:migrate`** — `bin/test-lane` already calls @@ -232,6 +493,144 @@ If multiple suites need to run, run them sequentially (each needs a test lane). --- +## Step 3.4: Test Coverage Audit + +100% coverage is the goal — every untested path is a path where bugs hide and vibe coding becomes yolo coding. Evaluate what was ACTUALLY coded (from the diff), not what was planned. + +**0. Before/after test count:** + +```bash +# Count test files before any generation +find . -name '*.test.*' -o -name '*.spec.*' -o -name '*_test.*' -o -name '*_spec.*' | grep -v node_modules | wc -l +``` + +Store this number for the PR body. + +**1. Trace every codepath changed** using `git diff origin/...HEAD`: + +Read every changed file. For each one, trace how data flows through the code — don't just list functions, actually follow the execution: + +1. **Read the diff.** For each changed file, read the full file (not just the diff hunk) to understand context. +2. **Trace data flow.** Starting from each entry point (route handler, exported function, event listener, component render), follow the data through every branch: + - Where does input come from? (request params, props, database, API call) + - What transforms it? (validation, mapping, computation) + - Where does it go? (database write, API response, rendered output, side effect) + - What can go wrong at each step? (null/undefined, invalid input, network failure, empty collection) +3. **Diagram the execution.** For each changed file, draw an ASCII diagram showing: + - Every function/method that was added or modified + - Every conditional branch (if/else, switch, ternary, guard clause, early return) + - Every error path (try/catch, rescue, error boundary, fallback) + - Every call to another function (trace into it — does IT have untested branches?) + - Every edge: what happens with null input? Empty array? Invalid type? + +This is the critical step — you're building a map of every line of code that can execute differently based on input. Every branch in this diagram needs a test. + +**2. Map user flows, interactions, and error states:** + +Code coverage isn't enough — you need to cover how real users interact with the changed code. For each changed feature, think through: + +- **User flows:** What sequence of actions does a user take that touches this code? Map the full journey (e.g., "user clicks 'Pay' → form validates → API call → success/failure screen"). Each step in the journey needs a test. +- **Interaction edge cases:** What happens when the user does something unexpected? + - Double-click/rapid resubmit + - Navigate away mid-operation (back button, close tab, click another link) + - Submit with stale data (page sat open for 30 minutes, session expired) + - Slow connection (API takes 10 seconds — what does the user see?) + - Concurrent actions (two tabs, same form) +- **Error states the user can see:** For every error the code handles, what does the user actually experience? + - Is there a clear error message or a silent failure? + - Can the user recover (retry, go back, fix input) or are they stuck? + - What happens with no network? With a 500 from the API? With invalid data from the server? +- **Empty/zero/boundary states:** What does the UI show with zero results? With 10,000 results? With a single character input? With maximum-length input? + +Add these to your diagram alongside the code branches. A user flow with no test is just as much a gap as an untested if/else. + +**3. Check each branch against existing tests:** + +Go through your diagram branch by branch — both code paths AND user flows. For each one, search for a test that exercises it: +- Function `processPayment()` → look for `billing.test.ts`, `billing.spec.ts`, `test/billing_test.rb` +- An if/else → look for tests covering BOTH the true AND false path +- An error handler → look for a test that triggers that specific error condition +- A call to `helperFn()` that has its own branches → those branches need tests too +- A user flow → look for an integration or E2E test that walks through the journey +- An interaction edge case → look for a test that simulates the unexpected action + +Quality scoring rubric: +- ★★★ Tests behavior with edge cases AND error paths +- ★★ Tests correct behavior, happy path only +- ★ Smoke test / existence check / trivial assertion (e.g., "it renders", "it doesn't throw") + +**4. Output ASCII coverage diagram:** + +Include BOTH code paths and user flows in the same diagram: + +``` +CODE PATH COVERAGE +=========================== +[+] src/services/billing.ts + │ + ├── processPayment() + │ ├── [★★★ TESTED] Happy path + card declined + timeout — billing.test.ts:42 + │ ├── [GAP] Network timeout — NO TEST + │ └── [GAP] Invalid currency — NO TEST + │ + └── refundPayment() + ├── [★★ TESTED] Full refund — billing.test.ts:89 + └── [★ TESTED] Partial refund (checks non-throw only) — billing.test.ts:101 + +USER FLOW COVERAGE +=========================== +[+] Payment checkout flow + │ + ├── [★★★ TESTED] Complete purchase — checkout.e2e.ts:15 + ├── [GAP] Double-click submit — NO TEST + ├── [GAP] Navigate away during payment — NO TEST + └── [★ TESTED] Form validation errors (checks render only) — checkout.test.ts:40 + +[+] Error states + │ + ├── [★★ TESTED] Card declined message — billing.test.ts:58 + ├── [GAP] Network timeout UX (what does user see?) — NO TEST + └── [GAP] Empty cart submission — NO TEST + +───────────────────────────────── +COVERAGE: 5/12 paths tested (42%) + Code paths: 3/5 (60%) + User flows: 2/7 (29%) +QUALITY: ★★★: 2 ★★: 2 ★: 1 +GAPS: 7 paths need tests +───────────────────────────────── +``` + +**Fast path:** All paths covered → "Step 3.4: All new code paths have test coverage ✓" Continue. + +**5. Generate tests for uncovered paths:** + +If test framework detected (or bootstrapped in Step 2.5): +- Prioritize error handlers and edge cases first (happy paths are more likely already tested) +- Read 2-3 existing test files to match conventions exactly +- Generate unit tests. Mock all external dependencies (DB, API, Redis). +- Write tests that exercise the specific uncovered path with real assertions +- Run each test. Passes → commit as `test: coverage for {feature}` +- Fails → fix once. Still fails → revert, note gap in diagram. + +Caps: 30 code paths max, 20 tests generated max (code + user flow combined), 2-min per-test exploration cap. + +If no test framework AND user declined bootstrap → diagram only, no generation. Note: "Test generation skipped — no test framework configured." + +**Diff is test-only changes:** Skip Step 3.4 entirely: "No new application code paths to audit." + +**6. After-count and coverage summary:** + +```bash +# Count test files after generation +find . -name '*.test.*' -o -name '*.spec.*' -o -name '*_test.*' -o -name '*_spec.*' | grep -v node_modules | wc -l +``` + +For PR body: `Tests: {before} → {after} (+{delta} new)` +Coverage line: `Test Coverage Audit: N new code paths. M covered (X%). K tests generated, J committed.` + +--- + ## Step 3.5: Pre-Landing Review Review the diff for structural issues that tests don't catch. @@ -244,6 +643,43 @@ Review the diff for structural issues that tests don't catch. - **Pass 1 (CRITICAL):** SQL & Data Safety, LLM Output Trust Boundary - **Pass 2 (INFORMATIONAL):** All remaining categories +## Design Review (conditional, diff-scoped) + +Check if the diff touches frontend files using `gstack-diff-scope`: + +```bash +eval $(~/.claude/skills/gstack/bin/gstack-diff-scope 2>/dev/null) +``` + +**If `SCOPE_FRONTEND=false`:** Skip design review silently. No output. + +**If `SCOPE_FRONTEND=true`:** + +1. **Check for DESIGN.md.** If `DESIGN.md` or `design-system.md` exists in the repo root, read it. All design findings are calibrated against it — patterns blessed in DESIGN.md are not flagged. If not found, use universal design principles. + +2. **Read `.claude/skills/review/design-checklist.md`.** If the file cannot be read, skip design review with a note: "Design checklist not found — skipping design review." + +3. **Read each changed frontend file** (full file, not just diff hunks). Frontend files are identified by the patterns listed in the checklist. + +4. **Apply the design checklist** against the changed files. For each item: + - **[HIGH] mechanical CSS fix** (`outline: none`, `!important`, `font-size < 16px`): classify as AUTO-FIX + - **[HIGH/MEDIUM] design judgment needed**: classify as ASK + - **[LOW] intent-based detection**: present as "Possible — verify visually or run /design-review" + +5. **Include findings** in the review output under a "Design Review" header, following the output format in the checklist. Design findings merge with code review findings into the same Fix-First flow. + +6. **Log the result** for the Review Readiness Dashboard: + +```bash +eval $(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null) +mkdir -p ~/.gstack/projects/$SLUG +echo '{"skill":"design-review-lite","timestamp":"TIMESTAMP","status":"STATUS","findings":N,"auto_fixed":M}' >> ~/.gstack/projects/$SLUG/$BRANCH-reviews.jsonl +``` + +Substitute: TIMESTAMP = ISO 8601 datetime, STATUS = "clean" if 0 findings or "issues_found", N = total findings, M = auto-fixed count. + + Include any design findings alongside the code review findings. They follow the same Fix-First flow below. + 4. **Classify each finding as AUTO-FIX or ASK** per the Fix-First Heuristic in checklist.md. Critical findings lean toward ASK; informational lean toward AUTO-FIX. @@ -460,8 +896,16 @@ gh pr create --base --title ": " --body "$(cat <<'EOF' ## Summary +## Test Coverage + + + ## Pre-Landing Review - + + +## Design Review + + ## Eval Results @@ -535,4 +979,5 @@ If leaderboard data is available, print the table. If sync is not configured or - **Split commits for bisectability** — each commit = one logical change. - **TODOS.md completion detection must be conservative.** Only mark items as completed when the diff clearly shows the work is done. - **Use Greptile reply templates from greptile-triage.md.** Every reply includes evidence (inline diff, code references, re-rank suggestion). Never post vague replies. +- **Step 3.4 generates coverage tests.** They must pass before committing. Never commit failing tests. - **The goal is: user says `/ship`, next thing they see is the review + PR URL.** diff --git a/ship/SKILL.md.tmpl b/ship/SKILL.md.tmpl index 9a38866f..a6083792 100644 --- a/ship/SKILL.md.tmpl +++ b/ship/SKILL.md.tmpl @@ -11,6 +11,7 @@ allowed-tools: - Grep - Glob - AskUserQuestion + - WebSearch --- {{PREAMBLE}} @@ -39,6 +40,7 @@ You are running the `/ship` workflow. This is a **non-interactive, fully automat - Multi-file changesets (auto-split into bisectable commits) - TODOS.md completed-item detection (auto-mark) - Auto-fixable review findings (dead code, N+1, stale comments — fixed automatically) +- Test coverage gaps (auto-generate and commit, or flag in PR body) --- @@ -50,6 +52,33 @@ You are running the `/ship` workflow. This is a **non-interactive, fully automat 3. Run `git diff ...HEAD --stat` and `git log ..HEAD --oneline` to understand what's being shipped. +4. Check review readiness: + +{{REVIEW_DASHBOARD}} + +If the Eng Review is NOT "CLEAR": + +1. **Check for a prior override on this branch:** + ```bash + eval $(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null) + grep '"skill":"ship-review-override"' ~/.gstack/projects/$SLUG/$BRANCH-reviews.jsonl 2>/dev/null || echo "NO_OVERRIDE" + ``` + If an override exists, display the dashboard and note "Review gate previously accepted — continuing." Do NOT ask again. + +2. **If no override exists,** use AskUserQuestion: + - Show that Eng Review is missing or has open issues + - RECOMMENDATION: Choose C if the change is obviously trivial (< 20 lines, typo fix, config-only); Choose B for larger changes + - Options: A) Ship anyway B) Abort — run /plan-eng-review first C) Change is too small to need eng review + - If CEO Review is missing, mention as informational ("CEO Review not run — recommended for product changes") but do NOT block + - For Design Review: run `eval $(~/.claude/skills/gstack/bin/gstack-diff-scope 2>/dev/null)`. If `SCOPE_FRONTEND=true` and no design review (plan-design-review or design-review-lite) exists in the dashboard, mention: "Design Review not run — this PR changes frontend code. The lite design check will run automatically in Step 3.5, but consider running /design-review for a full visual audit post-implementation." Still never block. + +3. **If the user chooses A or C,** persist the decision so future `/ship` runs on this branch skip the gate: + ```bash + eval $(~/.claude/skills/gstack/bin/gstack-slug 2>/dev/null) + echo '{"skill":"ship-review-override","timestamp":"'"$(date -u +%Y-%m-%dT%H:%M:%SZ)"'","decision":"USER_CHOICE"}' >> ~/.gstack/projects/$SLUG/$BRANCH-reviews.jsonl + ``` + Substitute USER_CHOICE with "ship_anyway" or "not_relevant". + --- ## Step 2: Merge the base branch (BEFORE tests) @@ -66,6 +95,12 @@ git fetch origin && git merge origin/ --no-edit --- +## Step 2.5: Test Framework Bootstrap + +{{TEST_BOOTSTRAP}} + +--- + ## Step 3: Run tests (on merged code) **Do NOT run `RAILS_ENV=test bin/rails db:migrate`** — `bin/test-lane` already calls @@ -150,6 +185,144 @@ If multiple suites need to run, run them sequentially (each needs a test lane). --- +## Step 3.4: Test Coverage Audit + +100% coverage is the goal — every untested path is a path where bugs hide and vibe coding becomes yolo coding. Evaluate what was ACTUALLY coded (from the diff), not what was planned. + +**0. Before/after test count:** + +```bash +# Count test files before any generation +find . -name '*.test.*' -o -name '*.spec.*' -o -name '*_test.*' -o -name '*_spec.*' | grep -v node_modules | wc -l +``` + +Store this number for the PR body. + +**1. Trace every codepath changed** using `git diff origin/...HEAD`: + +Read every changed file. For each one, trace how data flows through the code — don't just list functions, actually follow the execution: + +1. **Read the diff.** For each changed file, read the full file (not just the diff hunk) to understand context. +2. **Trace data flow.** Starting from each entry point (route handler, exported function, event listener, component render), follow the data through every branch: + - Where does input come from? (request params, props, database, API call) + - What transforms it? (validation, mapping, computation) + - Where does it go? (database write, API response, rendered output, side effect) + - What can go wrong at each step? (null/undefined, invalid input, network failure, empty collection) +3. **Diagram the execution.** For each changed file, draw an ASCII diagram showing: + - Every function/method that was added or modified + - Every conditional branch (if/else, switch, ternary, guard clause, early return) + - Every error path (try/catch, rescue, error boundary, fallback) + - Every call to another function (trace into it — does IT have untested branches?) + - Every edge: what happens with null input? Empty array? Invalid type? + +This is the critical step — you're building a map of every line of code that can execute differently based on input. Every branch in this diagram needs a test. + +**2. Map user flows, interactions, and error states:** + +Code coverage isn't enough — you need to cover how real users interact with the changed code. For each changed feature, think through: + +- **User flows:** What sequence of actions does a user take that touches this code? Map the full journey (e.g., "user clicks 'Pay' → form validates → API call → success/failure screen"). Each step in the journey needs a test. +- **Interaction edge cases:** What happens when the user does something unexpected? + - Double-click/rapid resubmit + - Navigate away mid-operation (back button, close tab, click another link) + - Submit with stale data (page sat open for 30 minutes, session expired) + - Slow connection (API takes 10 seconds — what does the user see?) + - Concurrent actions (two tabs, same form) +- **Error states the user can see:** For every error the code handles, what does the user actually experience? + - Is there a clear error message or a silent failure? + - Can the user recover (retry, go back, fix input) or are they stuck? + - What happens with no network? With a 500 from the API? With invalid data from the server? +- **Empty/zero/boundary states:** What does the UI show with zero results? With 10,000 results? With a single character input? With maximum-length input? + +Add these to your diagram alongside the code branches. A user flow with no test is just as much a gap as an untested if/else. + +**3. Check each branch against existing tests:** + +Go through your diagram branch by branch — both code paths AND user flows. For each one, search for a test that exercises it: +- Function `processPayment()` → look for `billing.test.ts`, `billing.spec.ts`, `test/billing_test.rb` +- An if/else → look for tests covering BOTH the true AND false path +- An error handler → look for a test that triggers that specific error condition +- A call to `helperFn()` that has its own branches → those branches need tests too +- A user flow → look for an integration or E2E test that walks through the journey +- An interaction edge case → look for a test that simulates the unexpected action + +Quality scoring rubric: +- ★★★ Tests behavior with edge cases AND error paths +- ★★ Tests correct behavior, happy path only +- ★ Smoke test / existence check / trivial assertion (e.g., "it renders", "it doesn't throw") + +**4. Output ASCII coverage diagram:** + +Include BOTH code paths and user flows in the same diagram: + +``` +CODE PATH COVERAGE +=========================== +[+] src/services/billing.ts + │ + ├── processPayment() + │ ├── [★★★ TESTED] Happy path + card declined + timeout — billing.test.ts:42 + │ ├── [GAP] Network timeout — NO TEST + │ └── [GAP] Invalid currency — NO TEST + │ + └── refundPayment() + ├── [★★ TESTED] Full refund — billing.test.ts:89 + └── [★ TESTED] Partial refund (checks non-throw only) — billing.test.ts:101 + +USER FLOW COVERAGE +=========================== +[+] Payment checkout flow + │ + ├── [★★★ TESTED] Complete purchase — checkout.e2e.ts:15 + ├── [GAP] Double-click submit — NO TEST + ├── [GAP] Navigate away during payment — NO TEST + └── [★ TESTED] Form validation errors (checks render only) — checkout.test.ts:40 + +[+] Error states + │ + ├── [★★ TESTED] Card declined message — billing.test.ts:58 + ├── [GAP] Network timeout UX (what does user see?) — NO TEST + └── [GAP] Empty cart submission — NO TEST + +───────────────────────────────── +COVERAGE: 5/12 paths tested (42%) + Code paths: 3/5 (60%) + User flows: 2/7 (29%) +QUALITY: ★★★: 2 ★★: 2 ★: 1 +GAPS: 7 paths need tests +───────────────────────────────── +``` + +**Fast path:** All paths covered → "Step 3.4: All new code paths have test coverage ✓" Continue. + +**5. Generate tests for uncovered paths:** + +If test framework detected (or bootstrapped in Step 2.5): +- Prioritize error handlers and edge cases first (happy paths are more likely already tested) +- Read 2-3 existing test files to match conventions exactly +- Generate unit tests. Mock all external dependencies (DB, API, Redis). +- Write tests that exercise the specific uncovered path with real assertions +- Run each test. Passes → commit as `test: coverage for {feature}` +- Fails → fix once. Still fails → revert, note gap in diagram. + +Caps: 30 code paths max, 20 tests generated max (code + user flow combined), 2-min per-test exploration cap. + +If no test framework AND user declined bootstrap → diagram only, no generation. Note: "Test generation skipped — no test framework configured." + +**Diff is test-only changes:** Skip Step 3.4 entirely: "No new application code paths to audit." + +**6. After-count and coverage summary:** + +```bash +# Count test files after generation +find . -name '*.test.*' -o -name '*.spec.*' -o -name '*_test.*' -o -name '*_spec.*' | grep -v node_modules | wc -l +``` + +For PR body: `Tests: {before} → {after} (+{delta} new)` +Coverage line: `Test Coverage Audit: N new code paths. M covered (X%). K tests generated, J committed.` + +--- + ## Step 3.5: Pre-Landing Review Review the diff for structural issues that tests don't catch. @@ -162,6 +335,10 @@ Review the diff for structural issues that tests don't catch. - **Pass 1 (CRITICAL):** SQL & Data Safety, LLM Output Trust Boundary - **Pass 2 (INFORMATIONAL):** All remaining categories +{{DESIGN_REVIEW_LITE}} + + Include any design findings alongside the code review findings. They follow the same Fix-First flow below. + 4. **Classify each finding as AUTO-FIX or ASK** per the Fix-First Heuristic in checklist.md. Critical findings lean toward ASK; informational lean toward AUTO-FIX. @@ -378,8 +555,16 @@ gh pr create --base --title ": " --body "$(cat <<'EOF' ## Summary +## Test Coverage + + + ## Pre-Landing Review - + + +## Design Review + + ## Eval Results @@ -453,4 +638,5 @@ If leaderboard data is available, print the table. If sync is not configured or - **Split commits for bisectability** — each commit = one logical change. - **TODOS.md completion detection must be conservative.** Only mark items as completed when the diff clearly shows the work is done. - **Use Greptile reply templates from greptile-triage.md.** Every reply includes evidence (inline diff, code references, re-rank suggestion). Never post vague replies. +- **Step 3.4 generates coverage tests.** They must pass before committing. Never commit failing tests. - **The goal is: user says `/ship`, next thing they see is the review + PR URL.** diff --git a/test/fixtures/review-eval-design-slop.css b/test/fixtures/review-eval-design-slop.css new file mode 100644 index 00000000..40e055fb --- /dev/null +++ b/test/fixtures/review-eval-design-slop.css @@ -0,0 +1,86 @@ +/* Planted design anti-patterns for E2E eval — 7 issues */ + +/* Issue 1: [HIGH] Blacklisted font (Papyrus) */ +/* Issue 2: [HIGH] Body text < 16px (14px) */ +body { + font-family: 'Papyrus', sans-serif; + font-size: 14px; + margin: 0; + padding: 0; +} + +/* Issue 5: [MEDIUM] Purple/violet gradient background */ +.hero { + background: linear-gradient(135deg, #6366f1, #8b5cf6); + text-align: center; + padding: 80px 20px; + color: white; +} + +.hero h1 { + text-align: center; + font-size: 48px; +} + +.hero p { + text-align: center; + font-size: 20px; +} + +/* Issue 7: [LOW] 3-column feature grid with icon circles */ +.features { + display: grid; + grid-template-columns: repeat(3, 1fr); + gap: 24px; + padding: 60px 40px; + text-align: center; +} + +.feature-card { + border-radius: 24px; + padding: 32px; + text-align: center; + background: #f9fafb; +} + +/* Icon in colored circle — AI slop pattern */ +.icon-circle { + width: 60px; + height: 60px; + border-radius: 50%; + background: #ede9fe; + display: flex; + align-items: center; + justify-content: center; + margin: 0 auto 16px; + font-size: 24px; +} + +/* Issue 3: [HIGH] outline: none without replacement */ +button { + outline: none; + background: #6366f1; + color: white; + border: none; + padding: 12px 24px; + border-radius: 24px; + cursor: pointer; +} + +.small-link { + font-size: 11px; + padding: 4px 8px; +} + +/* Issue 4: [HIGH] !important usage */ +.override { + color: red !important; + margin-left: 10px !important; +} + +.footer { + text-align: center; + padding: 40px; + background: #1e1b4b; + color: white; +} diff --git a/test/fixtures/review-eval-design-slop.html b/test/fixtures/review-eval-design-slop.html new file mode 100644 index 00000000..f05affd1 --- /dev/null +++ b/test/fixtures/review-eval-design-slop.html @@ -0,0 +1,41 @@ + + + + + + + Our Platform + + + +
+

Welcome to Our Platform

+

Your all-in-one solution for everything you need

+ +
+ + +
+
+
+

Feature One

+

A short description of this amazing feature that will change your life.

+
+
+
+

Feature Two

+

Another incredible capability that sets us apart from the competition.

+
+
+
+

Feature Three

+

Yet another powerful tool to streamline your workflow effortlessly.

+
+
+ + + + diff --git a/test/gen-skill-docs.test.ts b/test/gen-skill-docs.test.ts index def042c9..9dfd1a1c 100644 --- a/test/gen-skill-docs.test.ts +++ b/test/gen-skill-docs.test.ts @@ -70,7 +70,7 @@ describe('gen-skill-docs', () => { { dir: 'setup-browser-cookies', name: 'setup-browser-cookies' }, { dir: 'gstack-upgrade', name: 'gstack-upgrade' }, { dir: 'plan-design-review', name: 'plan-design-review' }, - { dir: 'qa-design-review', name: 'qa-design-review' }, + { dir: 'design-review', name: 'design-review' }, { dir: 'design-consultation', name: 'design-consultation' }, ]; @@ -322,3 +322,31 @@ describe('description quality evals', () => { expect(tipsSection).not.toContain('->'); }); }); + +describe('REVIEW_DASHBOARD resolver', () => { + const REVIEW_SKILLS = ['plan-ceo-review', 'plan-eng-review', 'plan-design-review']; + + for (const skill of REVIEW_SKILLS) { + test(`review dashboard appears in ${skill} generated file`, () => { + const content = fs.readFileSync(path.join(ROOT, skill, 'SKILL.md'), 'utf-8'); + expect(content).toContain('reviews.jsonl'); + expect(content).toContain('REVIEW READINESS DASHBOARD'); + }); + } + + test('review dashboard appears in ship generated file', () => { + const content = fs.readFileSync(path.join(ROOT, 'ship', 'SKILL.md'), 'utf-8'); + expect(content).toContain('reviews.jsonl'); + expect(content).toContain('REVIEW READINESS DASHBOARD'); + }); + + test('resolver output contains key dashboard elements', () => { + const content = fs.readFileSync(path.join(ROOT, 'plan-ceo-review', 'SKILL.md'), 'utf-8'); + expect(content).toContain('VERDICT'); + expect(content).toContain('CLEARED'); + expect(content).toContain('Eng Review'); + expect(content).toContain('7 days'); + expect(content).toContain('Design Review'); + expect(content).toContain('skip_eng_review'); + }); +}); diff --git a/test/helpers/touchfiles.ts b/test/helpers/touchfiles.ts new file mode 100644 index 00000000..995648a1 --- /dev/null +++ b/test/helpers/touchfiles.ts @@ -0,0 +1,200 @@ +/** + * Diff-based test selection for E2E and LLM-judge evals. + * + * Each test declares which source files it depends on ("touchfiles"). + * The test runner checks `git diff` and only runs tests whose + * dependencies were modified. Override with EVALS_ALL=1 to run everything. + */ + +import { spawnSync } from 'child_process'; + +// --- Glob matching --- + +/** + * Match a file path against a glob pattern. + * Supports: + * ** — match any number of path segments + * * — match within a single segment (no /) + */ +export function matchGlob(file: string, pattern: string): boolean { + const regexStr = pattern + .replace(/\./g, '\\.') + .replace(/\*\*/g, '{{GLOBSTAR}}') + .replace(/\*/g, '[^/]*') + .replace(/\{\{GLOBSTAR\}\}/g, '.*'); + return new RegExp(`^${regexStr}$`).test(file); +} + +// --- Touchfile maps --- + +/** + * E2E test touchfiles — keyed by testName (the string passed to runSkillTest). + * Each test lists the file patterns that, if changed, require the test to run. + */ +export const E2E_TOUCHFILES: Record = { + // Browse core + 'browse-basic': ['browse/src/**'], + 'browse-snapshot': ['browse/src/**'], + + // SKILL.md setup + preamble (depend on ROOT SKILL.md only) + 'skillmd-setup-discovery': ['SKILL.md', 'SKILL.md.tmpl'], + 'skillmd-no-local-binary': ['SKILL.md', 'SKILL.md.tmpl'], + 'skillmd-outside-git': ['SKILL.md', 'SKILL.md.tmpl'], + 'contributor-mode': ['SKILL.md', 'SKILL.md.tmpl'], + 'session-awareness': ['SKILL.md', 'SKILL.md.tmpl'], + + // QA + 'qa-quick': ['qa/**', 'browse/src/**'], + 'qa-b6-static': ['qa/**', 'browse/src/**', 'browse/test/fixtures/qa-eval.html', 'test/fixtures/qa-eval-ground-truth.json'], + 'qa-b7-spa': ['qa/**', 'browse/src/**', 'browse/test/fixtures/qa-eval-spa.html', 'test/fixtures/qa-eval-spa-ground-truth.json'], + 'qa-b8-checkout': ['qa/**', 'browse/src/**', 'browse/test/fixtures/qa-eval-checkout.html', 'test/fixtures/qa-eval-checkout-ground-truth.json'], + 'qa-only-no-fix': ['qa-only/**', 'qa/templates/**'], + 'qa-fix-loop': ['qa/**', 'browse/src/**'], + + // Review + 'review-sql-injection': ['review/**', 'test/fixtures/review-eval-vuln.rb'], + 'review-enum-completeness': ['review/**', 'test/fixtures/review-eval-enum*.rb'], + 'review-base-branch': ['review/**'], + 'review-design-lite': ['review/**', 'test/fixtures/review-eval-design-slop.*'], + + // Plan reviews + 'plan-ceo-review': ['plan-ceo-review/**'], + 'plan-ceo-review-selective': ['plan-ceo-review/**'], + 'plan-eng-review': ['plan-eng-review/**'], + 'plan-eng-review-artifact': ['plan-eng-review/**'], + + // Ship + 'ship-base-branch': ['ship/**'], + + // Retro + 'retro': ['retro/**'], + 'retro-base-branch': ['retro/**'], + + // Document-release + 'document-release': ['document-release/**'], + + // QA bootstrap + 'qa-bootstrap': ['qa/**', 'browse/src/**', 'ship/**'], + + // Ship coverage audit + 'ship-coverage-audit': ['ship/**'], + + // Design + 'design-consultation-core': ['design-consultation/**'], + 'design-consultation-research': ['design-consultation/**'], + 'design-consultation-existing': ['design-consultation/**'], + 'design-consultation-preview': ['design-consultation/**'], + 'plan-design-review-plan-mode': ['plan-design-review/**'], + 'plan-design-review-no-ui-scope': ['plan-design-review/**'], + 'design-review-fix': ['design-review/**', 'browse/src/**'], + + // gstack-upgrade + 'gstack-upgrade-happy-path': ['gstack-upgrade/**'], +}; + +/** + * LLM-judge test touchfiles — keyed by test description string. + */ +export const LLM_JUDGE_TOUCHFILES: Record = { + 'command reference table': ['SKILL.md', 'SKILL.md.tmpl', 'browse/src/commands.ts'], + 'snapshot flags reference': ['SKILL.md', 'SKILL.md.tmpl', 'browse/src/snapshot.ts'], + 'browse/SKILL.md reference': ['browse/SKILL.md', 'browse/SKILL.md.tmpl', 'browse/src/**'], + 'setup block': ['SKILL.md', 'SKILL.md.tmpl'], + 'regression vs baseline': ['SKILL.md', 'SKILL.md.tmpl', 'browse/src/commands.ts', 'test/fixtures/eval-baselines.json'], + 'qa/SKILL.md workflow': ['qa/SKILL.md', 'qa/SKILL.md.tmpl'], + 'qa/SKILL.md health rubric': ['qa/SKILL.md', 'qa/SKILL.md.tmpl'], + 'cross-skill greptile consistency': ['review/SKILL.md', 'review/SKILL.md.tmpl', 'ship/SKILL.md', 'ship/SKILL.md.tmpl', 'review/greptile-triage.md', 'retro/SKILL.md', 'retro/SKILL.md.tmpl'], + 'baseline score pinning': ['SKILL.md', 'SKILL.md.tmpl', 'test/fixtures/eval-baselines.json'], + + // Ship & Release + 'ship/SKILL.md workflow': ['ship/SKILL.md', 'ship/SKILL.md.tmpl'], + 'document-release/SKILL.md workflow': ['document-release/SKILL.md', 'document-release/SKILL.md.tmpl'], + + // Plan Reviews + 'plan-ceo-review/SKILL.md modes': ['plan-ceo-review/SKILL.md', 'plan-ceo-review/SKILL.md.tmpl'], + 'plan-eng-review/SKILL.md sections': ['plan-eng-review/SKILL.md', 'plan-eng-review/SKILL.md.tmpl'], + 'plan-design-review/SKILL.md passes': ['plan-design-review/SKILL.md', 'plan-design-review/SKILL.md.tmpl'], + + // Design skills + 'design-review/SKILL.md fix loop': ['design-review/SKILL.md', 'design-review/SKILL.md.tmpl'], + 'design-consultation/SKILL.md research': ['design-consultation/SKILL.md', 'design-consultation/SKILL.md.tmpl'], + + // Other skills + 'retro/SKILL.md instructions': ['retro/SKILL.md', 'retro/SKILL.md.tmpl'], + 'qa-only/SKILL.md workflow': ['qa-only/SKILL.md', 'qa-only/SKILL.md.tmpl'], + 'gstack-upgrade/SKILL.md upgrade flow': ['gstack-upgrade/SKILL.md', 'gstack-upgrade/SKILL.md.tmpl'], +}; + +/** + * Changes to any of these files trigger ALL tests (both E2E and LLM-judge). + */ +export const GLOBAL_TOUCHFILES = [ + 'test/helpers/session-runner.ts', + 'test/helpers/eval-store.ts', + 'test/helpers/llm-judge.ts', + 'scripts/gen-skill-docs.ts', + 'test/helpers/touchfiles.ts', + 'browse/test/test-server.ts', +]; + +// --- Base branch detection --- + +/** + * Detect the base branch by trying refs in order. + * Returns the first valid ref, or null if none found. + */ +export function detectBaseBranch(cwd: string): string | null { + for (const ref of ['origin/main', 'origin/master', 'main', 'master']) { + const result = spawnSync('git', ['rev-parse', '--verify', ref], { + cwd, stdio: 'pipe', timeout: 3000, + }); + if (result.status === 0) return ref; + } + return null; +} + +/** + * Get list of files changed between base branch and HEAD. + */ +export function getChangedFiles(baseBranch: string, cwd: string): string[] { + const result = spawnSync('git', ['diff', '--name-only', `${baseBranch}...HEAD`], { + cwd, stdio: 'pipe', timeout: 5000, + }); + if (result.status !== 0) return []; + return result.stdout.toString().trim().split('\n').filter(Boolean); +} + +// --- Test selection --- + +/** + * Select tests to run based on changed files. + * + * Algorithm: + * 1. If any changed file matches a global touchfile → run ALL tests + * 2. Otherwise, for each test, check if any changed file matches its patterns + * 3. Return selected + skipped lists with reason + */ +export function selectTests( + changedFiles: string[], + touchfiles: Record, + globalTouchfiles: string[] = GLOBAL_TOUCHFILES, +): { selected: string[]; skipped: string[]; reason: string } { + const allTestNames = Object.keys(touchfiles); + + // Global touchfile hit → run all + for (const file of changedFiles) { + if (globalTouchfiles.some(g => matchGlob(file, g))) { + return { selected: allTestNames, skipped: [], reason: `global: ${file}` }; + } + } + + // Per-test matching + const selected: string[] = []; + const skipped: string[] = []; + for (const [testName, patterns] of Object.entries(touchfiles)) { + const hit = changedFiles.some(f => patterns.some(p => matchGlob(f, p))); + (hit ? selected : skipped).push(testName); + } + + return { selected, skipped, reason: 'diff' }; +} diff --git a/test/skill-e2e.test.ts b/test/skill-e2e.test.ts index cd5ad0f2..9a9af222 100644 --- a/test/skill-e2e.test.ts +++ b/test/skill-e2e.test.ts @@ -1,10 +1,11 @@ import { describe, test, expect, beforeAll, afterAll } from 'bun:test'; import { runSkillTest } from './helpers/session-runner'; import type { SkillTestResult } from './helpers/session-runner'; -import { outcomeJudge } from './helpers/llm-judge'; +import { outcomeJudge, callJudge } from './helpers/llm-judge'; import { EvalCollector, judgePassed } from './helpers/eval-store'; import type { EvalTestEntry } from './helpers/eval-store'; import { startTestServer } from '../browse/test/test-server'; +import { selectTests, detectBaseBranch, getChangedFiles, E2E_TOUCHFILES, GLOBAL_TOUCHFILES } from './helpers/touchfiles'; import { spawnSync } from 'child_process'; import * as fs from 'fs'; import * as path from 'path'; @@ -21,6 +22,41 @@ const ROOT = path.resolve(import.meta.dir, '..'); const evalsEnabled = !!process.env.EVALS; const describeE2E = evalsEnabled ? describe : describe.skip; +// --- Diff-based test selection --- +// When EVALS_ALL is not set, only run tests whose touchfiles were modified. +// Set EVALS_ALL=1 to force all tests. Set EVALS_BASE to override base branch. +let selectedTests: string[] | null = null; // null = run all + +if (evalsEnabled && !process.env.EVALS_ALL) { + const baseBranch = process.env.EVALS_BASE + || detectBaseBranch(ROOT) + || 'main'; + const changedFiles = getChangedFiles(baseBranch, ROOT); + + if (changedFiles.length > 0) { + const selection = selectTests(changedFiles, E2E_TOUCHFILES, GLOBAL_TOUCHFILES); + selectedTests = selection.selected; + process.stderr.write(`\nE2E selection (${selection.reason}): ${selection.selected.length}/${Object.keys(E2E_TOUCHFILES).length} tests\n`); + if (selection.skipped.length > 0) { + process.stderr.write(` Skipped: ${selection.skipped.join(', ')}\n`); + } + process.stderr.write('\n'); + } + // If changedFiles is empty (e.g., on main branch), selectedTests stays null → run all +} + +/** Wrap a describe block to skip entirely if none of its tests are selected. */ +function describeIfSelected(name: string, testNames: string[], fn: () => void) { + const anySelected = selectedTests === null || testNames.some(t => selectedTests!.includes(t)); + (anySelected ? describeE2E : describe.skip)(name, fn); +} + +/** Skip an individual test if not selected (for multi-test describe blocks). */ +function testIfSelected(testName: string, fn: () => Promise, timeout: number) { + const shouldRun = selectedTests === null || selectedTests.includes(testName); + (shouldRun ? test : test.skip)(testName, fn, timeout); +} + // Eval result collector — accumulates test results, writes to ~/.gstack-dev/evals/ on finalize const evalCollector = evalsEnabled ? new EvalCollector('e2e') : null; @@ -134,7 +170,10 @@ if (evalsEnabled) { } } -describeE2E('Skill E2E tests', () => { +describeIfSelected('Skill E2E tests', [ + 'browse-basic', 'browse-snapshot', 'skillmd-setup-discovery', + 'skillmd-no-local-binary', 'skillmd-outside-git', 'contributor-mode', 'session-awareness', +], () => { beforeAll(() => { testServer = startTestServer(); tmpDir = fs.mkdtempSync(path.join(os.tmpdir(), 'skill-e2e-')); @@ -146,7 +185,7 @@ describeE2E('Skill E2E tests', () => { try { fs.rmSync(tmpDir, { recursive: true, force: true }); } catch {} }); - test('browse basic commands work without errors', async () => { + testIfSelected('browse-basic', async () => { const result = await runSkillTest({ prompt: `You have a browse binary at ${browseBin}. Assign it to B variable and run these commands in sequence: 1. $B goto ${testServer.url} @@ -167,7 +206,7 @@ Report the results of each command.`, expect(result.exitReason).toBe('success'); }, 90_000); - test('browse snapshot flags all work', async () => { + testIfSelected('browse-snapshot', async () => { const result = await runSkillTest({ prompt: `You have a browse binary at ${browseBin}. Assign it to B variable and run: 1. $B goto ${testServer.url} @@ -192,7 +231,7 @@ Report what each command returned.`, expect(result.exitReason).toBe('success'); }, 90_000); - test('agent discovers browse binary via SKILL.md setup block', async () => { + testIfSelected('skillmd-setup-discovery', async () => { const skillMd = fs.readFileSync(path.join(ROOT, 'SKILL.md'), 'utf-8'); const setupStart = skillMd.indexOf('## SETUP'); const setupEnd = skillMd.indexOf('## IMPORTANT'); @@ -221,7 +260,7 @@ Report whether it worked.`, expect(result.exitReason).toBe('success'); }, 90_000); - test('SKILL.md setup block handles missing local binary gracefully', async () => { + testIfSelected('skillmd-no-local-binary', async () => { // Create a tmpdir with no browse binary — no local .claude/skills/gstack/browse/dist/browse const emptyDir = fs.mkdtempSync(path.join(os.tmpdir(), 'skill-e2e-empty-')); @@ -256,7 +295,7 @@ Report the exact output. Do NOT try to fix or install anything — just report w try { fs.rmSync(emptyDir, { recursive: true, force: true }); } catch {} }, 60_000); - test('SKILL.md setup block works outside git repo', async () => { + testIfSelected('skillmd-outside-git', async () => { // Create a tmpdir outside any git repo const nonGitDir = fs.mkdtempSync(path.join(os.tmpdir(), 'skill-e2e-nogit-')); @@ -287,7 +326,7 @@ Report the exact output — either "READY: " or "NEEDS_SETUP".`, try { fs.rmSync(nonGitDir, { recursive: true, force: true }); } catch {} }, 60_000); - test('contributor mode files a report on gstack error', async () => { + testIfSelected('contributor-mode', async () => { const contribDir = fs.mkdtempSync(path.join(os.tmpdir(), 'skill-e2e-contrib-')); const logsDir = path.join(contribDir, 'contributor-logs'); fs.mkdirSync(logsDir, { recursive: true }); @@ -343,7 +382,7 @@ File a contributor report about this issue. Then tell me what you filed.`, try { fs.rmSync(contribDir, { recursive: true, force: true }); } catch {} }, 90_000); - test('session awareness adds ELI16 context when _SESSIONS >= 3', async () => { + testIfSelected('session-awareness', async () => { const sessionDir = fs.mkdtempSync(path.join(os.tmpdir(), 'skill-e2e-session-')); // Set up a git repo so there's project/branch context to reference @@ -414,7 +453,7 @@ Remember: _SESSIONS=4, so ELI16 mode is active. The user is juggling multiple wi // --- B4: QA skill E2E --- -describeE2E('QA skill E2E', () => { +describeIfSelected('QA skill E2E', ['qa-quick'], () => { let qaDir: string; beforeAll(() => { @@ -469,7 +508,7 @@ Write your report to ${qaDir}/qa-reports/qa-report.md`, // --- B5: Review skill E2E --- -describeE2E('Review skill E2E', () => { +describeIfSelected('Review skill E2E', ['review-sql-injection'], () => { let reviewDir: string; beforeAll(() => { @@ -528,7 +567,7 @@ Write your review findings to ${reviewDir}/review-output.md`, // --- Review: Enum completeness E2E --- -describeE2E('Review enum completeness E2E', () => { +describeIfSelected('Review enum completeness E2E', ['review-enum-completeness'], () => { let enumDir: string; beforeAll(() => { @@ -598,13 +637,107 @@ The diff adds a new "returned" status to the Order model. Your job is to check i }, 120_000); }); +// --- Review: Design review lite E2E --- + +describeE2E('Review design lite E2E', () => { + let designDir: string; + + beforeAll(() => { + designDir = fs.mkdtempSync(path.join(os.tmpdir(), 'skill-e2e-design-lite-')); + + const run = (cmd: string, args: string[]) => + spawnSync(cmd, args, { cwd: designDir, stdio: 'pipe', timeout: 5000 }); + + run('git', ['init']); + run('git', ['config', 'user.email', 'test@test.com']); + run('git', ['config', 'user.name', 'Test']); + + // Commit clean base on main + fs.writeFileSync(path.join(designDir, 'index.html'), '

Clean

\n'); + fs.writeFileSync(path.join(designDir, 'styles.css'), 'body { font-size: 16px; }\n'); + run('git', ['add', '.']); + run('git', ['commit', '-m', 'initial']); + + // Feature branch adds AI slop CSS + HTML + run('git', ['checkout', '-b', 'feature/add-landing-page']); + const slopCss = fs.readFileSync(path.join(ROOT, 'test', 'fixtures', 'review-eval-design-slop.css'), 'utf-8'); + const slopHtml = fs.readFileSync(path.join(ROOT, 'test', 'fixtures', 'review-eval-design-slop.html'), 'utf-8'); + fs.writeFileSync(path.join(designDir, 'styles.css'), slopCss); + fs.writeFileSync(path.join(designDir, 'landing.html'), slopHtml); + run('git', ['add', '.']); + run('git', ['commit', '-m', 'add landing page']); + + // Copy review skill files + fs.copyFileSync(path.join(ROOT, 'review', 'SKILL.md'), path.join(designDir, 'review-SKILL.md')); + fs.copyFileSync(path.join(ROOT, 'review', 'checklist.md'), path.join(designDir, 'review-checklist.md')); + fs.copyFileSync(path.join(ROOT, 'review', 'design-checklist.md'), path.join(designDir, 'review-design-checklist.md')); + fs.copyFileSync(path.join(ROOT, 'review', 'greptile-triage.md'), path.join(designDir, 'review-greptile-triage.md')); + }); + + afterAll(() => { + try { fs.rmSync(designDir, { recursive: true, force: true }); } catch {} + }); + + test('/review catches design anti-patterns in CSS/HTML diff', async () => { + const result = await runSkillTest({ + prompt: `You are in a git repo on branch feature/add-landing-page with changes against main. +Read review-SKILL.md for the review workflow instructions. +Read review-checklist.md for the code review checklist. +Read review-design-checklist.md for the design review checklist. +Run /review on the current diff (git diff main...HEAD). + +The diff adds a landing page with CSS and HTML. Check for both code issues AND design anti-patterns. +Write your review findings to ${designDir}/review-output.md + +Important: The design checklist should catch issues like blacklisted fonts, small font sizes, outline:none, !important, AI slop patterns (purple gradients, generic hero copy, 3-column feature grid), etc.`, + workingDirectory: designDir, + maxTurns: 15, + timeout: 120_000, + testName: 'review-design-lite', + runId, + }); + + logCost('/review design lite', result); + recordE2E('/review design lite', 'Review design lite E2E', result); + expect(result.exitReason).toBe('success'); + + // Verify the review caught at least 4 of 7 planted design issues + const reviewPath = path.join(designDir, 'review-output.md'); + if (fs.existsSync(reviewPath)) { + const review = fs.readFileSync(reviewPath, 'utf-8').toLowerCase(); + let detected = 0; + + // Issue 1: Blacklisted font (Papyrus) — HIGH + if (review.includes('papyrus') || review.includes('blacklisted font') || review.includes('font family')) detected++; + // Issue 2: Body text < 16px — HIGH + if (review.includes('14px') || review.includes('font-size') || review.includes('font size') || review.includes('body text')) detected++; + // Issue 3: outline: none — HIGH + if (review.includes('outline') || review.includes('focus')) detected++; + // Issue 4: !important — HIGH + if (review.includes('!important') || review.includes('important')) detected++; + // Issue 5: Purple gradient — MEDIUM + if (review.includes('gradient') || review.includes('purple') || review.includes('violet') || review.includes('#6366f1') || review.includes('#8b5cf6')) detected++; + // Issue 6: Generic hero copy — MEDIUM + if (review.includes('welcome to') || review.includes('all-in-one') || review.includes('generic') || review.includes('hero copy') || review.includes('ai slop')) detected++; + // Issue 7: 3-column feature grid — LOW + if (review.includes('3-column') || review.includes('three-column') || review.includes('feature grid') || review.includes('icon') || review.includes('circle')) detected++; + + console.log(`Design review detected ${detected}/7 planted issues`); + expect(detected).toBeGreaterThanOrEqual(4); + } + }, 150_000); +}); + // --- B6/B7/B8: Planted-bug outcome evals --- // Outcome evals also need ANTHROPIC_API_KEY for the LLM judge const hasApiKey = !!process.env.ANTHROPIC_API_KEY; const describeOutcome = (evalsEnabled && hasApiKey) ? describe : describe.skip; -describeOutcome('Planted-bug outcome evals', () => { +// Wrap describeOutcome with selection — skip if no planted-bug tests are selected +const outcomeTestNames = ['qa-b6-static', 'qa-b7-spa', 'qa-b8-checkout']; +const anyOutcomeSelected = selectedTests === null || outcomeTestNames.some(t => selectedTests!.includes(t)); +(anyOutcomeSelected ? describeOutcome : describe.skip)('Planted-bug outcome evals', () => { let outcomeDir: string; beforeAll(() => { @@ -768,7 +901,7 @@ CRITICAL RULES: // --- Plan CEO Review E2E --- -describeE2E('Plan CEO Review E2E', () => { +describeIfSelected('Plan CEO Review E2E', ['plan-ceo-review'], () => { let planDir: string; beforeAll(() => { @@ -853,9 +986,92 @@ Focus on reviewing the plan content: architecture, error handling, security, and }, 420_000); }); +// --- Plan CEO Review (SELECTIVE EXPANSION) E2E --- + +describeIfSelected('Plan CEO Review SELECTIVE EXPANSION E2E', ['plan-ceo-review-selective'], () => { + let planDir: string; + + beforeAll(() => { + planDir = fs.mkdtempSync(path.join(os.tmpdir(), 'skill-e2e-plan-ceo-sel-')); + const { spawnSync } = require('child_process'); + const run = (cmd: string, args: string[]) => + spawnSync(cmd, args, { cwd: planDir, stdio: 'pipe', timeout: 5000 }); + + run('git', ['init']); + run('git', ['config', 'user.email', 'test@test.com']); + run('git', ['config', 'user.name', 'Test']); + + fs.writeFileSync(path.join(planDir, 'plan.md'), `# Plan: Add User Dashboard + +## Context +We're building a new user dashboard that shows recent activity, notifications, and quick actions. + +## Changes +1. New React component \`UserDashboard\` in \`src/components/\` +2. REST API endpoint \`GET /api/dashboard\` returning user stats +3. PostgreSQL query for activity aggregation +4. Redis cache layer for dashboard data (5min TTL) + +## Architecture +- Frontend: React + TailwindCSS +- Backend: Express.js REST API +- Database: PostgreSQL with existing user/activity tables +- Cache: Redis for dashboard aggregates + +## Open questions +- Should we use WebSocket for real-time updates? +- How do we handle users with 100k+ activity records? +`); + + run('git', ['add', '.']); + run('git', ['commit', '-m', 'add plan']); + + fs.mkdirSync(path.join(planDir, 'plan-ceo-review'), { recursive: true }); + fs.copyFileSync( + path.join(ROOT, 'plan-ceo-review', 'SKILL.md'), + path.join(planDir, 'plan-ceo-review', 'SKILL.md'), + ); + }); + + afterAll(() => { + try { fs.rmSync(planDir, { recursive: true, force: true }); } catch {} + }); + + test('/plan-ceo-review SELECTIVE EXPANSION produces structured review output', async () => { + const result = await runSkillTest({ + prompt: `Read plan-ceo-review/SKILL.md for the review workflow. + +Read plan.md — that's the plan to review. This is a standalone plan document, not a codebase — skip any codebase exploration or system audit steps. + +Choose SELECTIVE EXPANSION mode. Skip any AskUserQuestion calls — this is non-interactive. +For the cherry-pick ceremony, accept all expansion proposals automatically. +Write your complete review directly to ${planDir}/review-output-selective.md + +Focus on reviewing the plan content: architecture, error handling, security, and performance.`, + workingDirectory: planDir, + maxTurns: 15, + timeout: 360_000, + testName: 'plan-ceo-review-selective', + runId, + }); + + logCost('/plan-ceo-review (SELECTIVE)', result); + recordE2E('/plan-ceo-review-selective', 'Plan CEO Review SELECTIVE EXPANSION E2E', result, { + passed: ['success', 'error_max_turns'].includes(result.exitReason), + }); + expect(['success', 'error_max_turns']).toContain(result.exitReason); + + const reviewPath = path.join(planDir, 'review-output-selective.md'); + if (fs.existsSync(reviewPath)) { + const review = fs.readFileSync(reviewPath, 'utf-8'); + expect(review.length).toBeGreaterThan(200); + } + }, 420_000); +}); + // --- Plan Eng Review E2E --- -describeE2E('Plan Eng Review E2E', () => { +describeIfSelected('Plan Eng Review E2E', ['plan-eng-review'], () => { let planDir: string; beforeAll(() => { @@ -921,7 +1137,7 @@ Replace session-cookie auth with JWT tokens. Currently using express-session + R Read plan.md — that's the plan to review. This is a standalone plan document, not a codebase — skip any codebase exploration steps. -Choose SMALL CHANGE mode. Skip any AskUserQuestion calls — this is non-interactive. +Proceed directly to the full review. Skip any AskUserQuestion calls — this is non-interactive. Write your complete review directly to ${planDir}/review-output.md Focus on architecture, code quality, tests, and performance sections.`, @@ -949,7 +1165,7 @@ Focus on architecture, code quality, tests, and performance sections.`, // --- Retro E2E --- -describeE2E('Retro E2E', () => { +describeIfSelected('Retro E2E', ['retro'], () => { let retroDir: string; beforeAll(() => { @@ -1035,7 +1251,7 @@ Analyze the git history and produce the narrative report as described in the SKI // --- QA-Only E2E (report-only, no fixes) --- -describeE2E('QA-Only skill E2E', () => { +describeIfSelected('QA-Only skill E2E', ['qa-only-no-fix'], () => { let qaOnlyDir: string; beforeAll(() => { @@ -1121,7 +1337,7 @@ Write your report to ${qaOnlyDir}/qa-reports/qa-only-report.md`, // --- QA Fix Loop E2E --- -describeE2E('QA Fix Loop E2E', () => { +describeIfSelected('QA Fix Loop E2E', ['qa-fix-loop'], () => { let qaFixDir: string; let qaFixServer: ReturnType | null = null; @@ -1235,7 +1451,7 @@ This is a test+fix loop: find bugs, fix them in the source code, commit each fix // --- Plan-Eng-Review Test-Plan Artifact E2E --- -describeE2E('Plan-Eng-Review Test-Plan Artifact E2E', () => { +describeIfSelected('Plan-Eng-Review Test-Plan Artifact E2E', ['plan-eng-review-artifact'], () => { let planDir: string; let projectDir: string; @@ -1322,7 +1538,7 @@ export function main() { return Dashboard(); } Read plan.md — that's the plan to review. This is a standalone plan with source code in app.ts and dashboard.ts. -Choose SMALL CHANGE mode. Skip any AskUserQuestion calls — this is non-interactive. +Proceed directly to the full review. Skip any AskUserQuestion calls — this is non-interactive. IMPORTANT: After your review, you MUST write the test-plan artifact as described in the "Test Plan Artifact" section of SKILL.md. The remote-slug shim is at ${planDir}/browse/bin/remote-slug. @@ -1362,7 +1578,7 @@ Write your review to ${planDir}/review-output.md`, // --- Base branch detection smoke tests --- -describeE2E('Base branch detection', () => { +describeIfSelected('Base branch detection', ['review-base-branch', 'ship-base-branch', 'retro-base-branch'], () => { let baseBranchDir: string; const run = (cmd: string, args: string[], cwd: string) => spawnSync(cmd, args, { cwd, stdio: 'pipe', timeout: 5000 }); @@ -1375,7 +1591,7 @@ describeE2E('Base branch detection', () => { try { fs.rmSync(baseBranchDir, { recursive: true, force: true }); } catch {} }); - test('/review detects base branch and diffs against it', async () => { + testIfSelected('review-base-branch', async () => { const dir = path.join(baseBranchDir, 'review-base'); fs.mkdirSync(dir, { recursive: true }); @@ -1428,7 +1644,7 @@ Write your findings to ${dir}/review-output.md`, expect(usedGitDiff).toBe(true); }, 120_000); - test('/ship Step 0-1 detects base branch without destructive actions', async () => { + testIfSelected('ship-base-branch', async () => { const dir = path.join(baseBranchDir, 'ship-base'); fs.mkdirSync(dir, { recursive: true }); @@ -1490,7 +1706,7 @@ Write a summary of what you detected to ${dir}/ship-preflight.md including: expect(destructiveTools).toHaveLength(0); }, 90_000); - test('/retro detects default branch for git queries', async () => { + testIfSelected('retro-base-branch', async () => { const dir = path.join(baseBranchDir, 'retro-base'); fs.mkdirSync(dir, { recursive: true }); @@ -1549,7 +1765,7 @@ Write your retrospective to ${dir}/retro-output.md`, // --- Document-Release skill E2E --- -describeE2E('Document-Release skill E2E', () => { +describeIfSelected('Document-Release skill E2E', ['document-release'], () => { let docReleaseDir: string; beforeAll(() => { @@ -1653,6 +1869,7 @@ IMPORTANT: // --- Deferred skill E2E tests (destructive or require interactive UI) --- +// Deferred tests — only test.todo entries, no selection needed describeE2E('Deferred skill E2E', () => { // Ship is destructive: pushes to remote, creates PRs, modifies VERSION/CHANGELOG test.todo('/ship completes full workflow'); @@ -1660,8 +1877,120 @@ describeE2E('Deferred skill E2E', () => { // Setup-browser-cookies requires interactive browser picker UI test.todo('/setup-browser-cookies imports cookies'); - // Gstack-upgrade is destructive: modifies skill installation directory - test.todo('/gstack-upgrade completes upgrade flow'); +}); + +// --- gstack-upgrade E2E --- + +describeIfSelected('gstack-upgrade E2E', ['gstack-upgrade-happy-path'], () => { + let upgradeDir: string; + let remoteDir: string; + + beforeAll(() => { + upgradeDir = fs.mkdtempSync(path.join(os.tmpdir(), 'skill-e2e-upgrade-')); + remoteDir = fs.mkdtempSync(path.join(os.tmpdir(), 'gstack-remote-')); + + const run = (cmd: string, args: string[], cwd: string) => + spawnSync(cmd, args, { cwd, stdio: 'pipe', timeout: 5000 }); + + // Init the "project" repo + run('git', ['init'], upgradeDir); + run('git', ['config', 'user.email', 'test@test.com'], upgradeDir); + run('git', ['config', 'user.name', 'Test'], upgradeDir); + + // Create mock gstack install directory (local-git type) + const mockGstack = path.join(upgradeDir, '.claude', 'skills', 'gstack'); + fs.mkdirSync(mockGstack, { recursive: true }); + + // Init as a git repo + run('git', ['init'], mockGstack); + run('git', ['config', 'user.email', 'test@test.com'], mockGstack); + run('git', ['config', 'user.name', 'Test'], mockGstack); + + // Create bare remote + run('git', ['init', '--bare'], remoteDir); + run('git', ['remote', 'add', 'origin', remoteDir], mockGstack); + + // Write old version files + fs.writeFileSync(path.join(mockGstack, 'VERSION'), '0.5.0\n'); + fs.writeFileSync(path.join(mockGstack, 'CHANGELOG.md'), + '# Changelog\n\n## 0.5.0 — 2026-03-01\n\n- Initial release\n'); + fs.writeFileSync(path.join(mockGstack, 'setup'), + '#!/bin/bash\necho "Setup completed"\n', { mode: 0o755 }); + + // Initial commit + push + run('git', ['add', '.'], mockGstack); + run('git', ['commit', '-m', 'initial'], mockGstack); + run('git', ['push', '-u', 'origin', 'HEAD:main'], mockGstack); + + // Create new version (simulate upstream release) + fs.writeFileSync(path.join(mockGstack, 'VERSION'), '0.6.0\n'); + fs.writeFileSync(path.join(mockGstack, 'CHANGELOG.md'), + '# Changelog\n\n## 0.6.0 — 2026-03-15\n\n- New feature: interactive design review\n- Fix: snapshot flag validation\n\n## 0.5.0 — 2026-03-01\n\n- Initial release\n'); + run('git', ['add', '.'], mockGstack); + run('git', ['commit', '-m', 'release 0.6.0'], mockGstack); + run('git', ['push', 'origin', 'HEAD:main'], mockGstack); + + // Reset working copy back to old version + run('git', ['reset', '--hard', 'HEAD~1'], mockGstack); + + // Copy gstack-upgrade skill + fs.mkdirSync(path.join(upgradeDir, 'gstack-upgrade'), { recursive: true }); + fs.copyFileSync( + path.join(ROOT, 'gstack-upgrade', 'SKILL.md'), + path.join(upgradeDir, 'gstack-upgrade', 'SKILL.md'), + ); + + // Commit so git repo is clean + run('git', ['add', '.'], upgradeDir); + run('git', ['commit', '-m', 'initial project'], upgradeDir); + }); + + afterAll(() => { + try { fs.rmSync(upgradeDir, { recursive: true, force: true }); } catch {} + try { fs.rmSync(remoteDir, { recursive: true, force: true }); } catch {} + }); + + testIfSelected('gstack-upgrade-happy-path', async () => { + const mockGstack = path.join(upgradeDir, '.claude', 'skills', 'gstack'); + const result = await runSkillTest({ + prompt: `Read gstack-upgrade/SKILL.md for the upgrade workflow. + +You are running /gstack-upgrade standalone. The gstack installation is at ./.claude/skills/gstack (local-git type — it has a .git directory with an origin remote). + +Current version: 0.5.0. A new version 0.6.0 is available on origin/main. + +Follow the standalone upgrade flow: +1. Detect install type (local-git) +2. Run git fetch origin && git reset --hard origin/main in the install directory +3. Run the setup script +4. Show what's new from CHANGELOG + +Skip any AskUserQuestion calls — auto-approve the upgrade. Write a summary of what you did to stdout. + +IMPORTANT: The install directory is at ./.claude/skills/gstack — use that exact path.`, + workingDirectory: upgradeDir, + maxTurns: 20, + timeout: 180_000, + testName: 'gstack-upgrade-happy-path', + runId, + }); + + logCost('/gstack-upgrade happy path', result); + + // Check that the version was updated + const versionAfter = fs.readFileSync(path.join(mockGstack, 'VERSION'), 'utf-8').trim(); + const output = result.output || ''; + const mentionsUpgrade = output.toLowerCase().includes('0.6.0') || + output.toLowerCase().includes('upgrade') || + output.toLowerCase().includes('updated'); + + recordE2E('/gstack-upgrade happy path', 'gstack-upgrade E2E', result, { + passed: versionAfter === '0.6.0' && ['success', 'error_max_turns'].includes(result.exitReason), + }); + + expect(['success', 'error_max_turns']).toContain(result.exitReason); + expect(versionAfter).toBe('0.6.0'); + }, 240_000); }); // --- Design Consultation E2E --- @@ -1690,7 +2019,10 @@ ${designMd} Return JSON: { "passed": true/false, "reasoning": "one paragraph explaining your evaluation" }`); } -describeE2E('Design Consultation E2E', () => { +describeIfSelected('Design Consultation E2E', [ + 'design-consultation-core', 'design-consultation-research', + 'design-consultation-existing', 'design-consultation-preview', +], () => { let designDir: string; beforeAll(() => { @@ -1734,7 +2066,7 @@ A civic tech data platform for government employees to access, visualize, and sh try { fs.rmSync(designDir, { recursive: true, force: true }); } catch {} }); - test('Test 1: core flow produces valid DESIGN.md + CLAUDE.md', async () => { + testIfSelected('design-consultation-core', async () => { const result = await runSkillTest({ prompt: `Read design-consultation/SKILL.md for the design consultation workflow. @@ -1794,7 +2126,7 @@ Write DESIGN.md and CLAUDE.md (or update it) in the working directory.`, } }, 420_000); - test('Test 2: research integration uses WebSearch', async () => { + testIfSelected('design-consultation-research', async () => { // Clean up from previous test try { fs.unlinkSync(path.join(designDir, 'DESIGN.md')); } catch {} try { fs.unlinkSync(path.join(designDir, 'CLAUDE.md')); } catch {} @@ -1804,7 +2136,7 @@ Write DESIGN.md and CLAUDE.md (or update it) in the working directory.`, This is a civic tech data platform called CivicPulse. Read the README.md. -DO research competitors before proposing — search for civic tech and government data platform designs. Skip the font preview page. Skip any AskUserQuestion calls — this is non-interactive. +DO research what's out there before proposing — search for civic tech and government data platform designs. Skip the font preview page. Skip any AskUserQuestion calls — this is non-interactive. Write DESIGN.md to the working directory.`, workingDirectory: designDir, @@ -1851,7 +2183,7 @@ Write DESIGN.md to the working directory.`, expect(designExists).toBe(true); }, 420_000); - test('Test 3: handles existing DESIGN.md', async () => { + testIfSelected('design-consultation-existing', async () => { // Pre-create a minimal DESIGN.md fs.writeFileSync(path.join(designDir, 'DESIGN.md'), `# Design System — CivicPulse @@ -1897,7 +2229,7 @@ Skip research. Skip font preview. Skip any AskUserQuestion calls — this is non } }, 420_000); - test('Test 4: generates font + color preview HTML', async () => { + testIfSelected('design-consultation-preview', async () => { // Clean up try { fs.unlinkSync(path.join(designDir, 'DESIGN.md')); } catch {} @@ -1959,15 +2291,13 @@ Skip research. Skip any AskUserQuestion calls — this is non-interactive. Gener }, 420_000); }); -// --- Plan Design Review E2E --- +// --- Plan Design Review E2E (plan-mode) --- -describeE2E('Plan Design Review E2E', () => { +describeIfSelected('Plan Design Review E2E', ['plan-design-review-plan-mode', 'plan-design-review-no-ui-scope'], () => { let reviewDir: string; beforeAll(() => { - testServer = testServer || startTestServer(); reviewDir = fs.mkdtempSync(path.join(os.tmpdir(), 'skill-e2e-plan-design-')); - setupBrowseShims(reviewDir); const { spawnSync } = require('child_process'); const run = (cmd: string, args: string[]) => @@ -1976,9 +2306,6 @@ describeE2E('Plan Design Review E2E', () => { run('git', ['init']); run('git', ['config', 'user.email', 'test@test.com']); run('git', ['config', 'user.name', 'Test']); - fs.writeFileSync(path.join(reviewDir, 'index.html'), '

Test

\n'); - run('git', ['add', '.']); - run('git', ['commit', '-m', 'initial']); // Copy plan-design-review skill fs.mkdirSync(path.join(reviewDir, 'plan-design-review'), { recursive: true }); @@ -1986,100 +2313,136 @@ describeE2E('Plan Design Review E2E', () => { path.join(ROOT, 'plan-design-review', 'SKILL.md'), path.join(reviewDir, 'plan-design-review', 'SKILL.md'), ); + + // Create a plan file with intentional design gaps + fs.writeFileSync(path.join(reviewDir, 'plan.md'), `# Plan: User Dashboard + +## Context +Build a user dashboard that shows account stats, recent activity, and settings. + +## Implementation +1. Create a dashboard page at /dashboard +2. Show user stats (posts, followers, engagement rate) +3. Add a recent activity feed +4. Add a settings panel +5. Use a clean, modern UI with cards and icons +6. Add a hero section at the top with a gradient background + +## Technical Details +- React components with Tailwind CSS +- API endpoint: GET /api/dashboard +- WebSocket for real-time activity updates +`); + + run('git', ['add', '.']); + run('git', ['commit', '-m', 'initial plan']); }); afterAll(() => { try { fs.rmSync(reviewDir, { recursive: true, force: true }); } catch {} }); - test('Test 5: /plan-design-review produces audit report', async () => { + testIfSelected('plan-design-review-plan-mode', async () => { const result = await runSkillTest({ - prompt: `IMPORTANT: The browse binary is already assigned below as B. Do NOT search for it or run the SKILL.md setup block — just use $B directly. + prompt: `Read plan-design-review/SKILL.md for the design review workflow. -B="${browseBin}" +Review the plan in ./plan.md. This plan has several design gaps — it uses vague language like "clean, modern UI" and "cards and icons", mentions a "hero section with gradient" (AI slop), and doesn't specify empty states, error states, loading states, responsive behavior, or accessibility. -Read plan-design-review/SKILL.md for the design review workflow. +Skip the preamble bash block. Skip any AskUserQuestion calls — this is non-interactive. Rate each design dimension 0-10 and explain what would make it a 10. Then EDIT plan.md to add the missing design decisions (interaction state table, empty states, responsive behavior, etc.). -Review the site at ${testServer.url}. Use --quick mode (homepage + 2 pages). Skip any AskUserQuestion calls — this is non-interactive. Write your audit report to ./design-audit.md. Do not offer to create DESIGN.md.`, +IMPORTANT: Do NOT try to browse any URLs or use a browse binary. This is a plan review, not a live site audit. Just read the plan file, review it, and edit it to fix the gaps.`, workingDirectory: reviewDir, - maxTurns: 20, - timeout: 360_000, - testName: 'plan-design-review-audit', + maxTurns: 15, + timeout: 300_000, + testName: 'plan-design-review-plan-mode', runId, }); - logCost('/plan-design-review audit', result); + logCost('/plan-design-review plan-mode', result); - const reportPath = path.join(reviewDir, 'design-audit.md'); - const reportExists = fs.existsSync(reportPath); - let reportContent = ''; - if (reportExists) { - reportContent = fs.readFileSync(reportPath, 'utf-8'); - } + // Check that the agent produced design ratings (0-10 scale) + const output = result.output || ''; + const hasRatings = /\d+\/10/.test(output); + const hasDesignContent = output.toLowerCase().includes('information architecture') || + output.toLowerCase().includes('interaction state') || + output.toLowerCase().includes('ai slop') || + output.toLowerCase().includes('hierarchy'); - const hasFirstImpression = reportContent.toLowerCase().includes('first impression') || - reportContent.toLowerCase().includes('impression'); + // Check that the plan file was edited (the core new behavior) + const planAfter = fs.readFileSync(path.join(reviewDir, 'plan.md'), 'utf-8'); + const planOriginal = `# Plan: User Dashboard`; + const planWasEdited = planAfter.length > 300; // Original is ~450 chars, edited should be much longer + const planHasDesignAdditions = planAfter.toLowerCase().includes('empty') || + planAfter.toLowerCase().includes('loading') || + planAfter.toLowerCase().includes('error') || + planAfter.toLowerCase().includes('state') || + planAfter.toLowerCase().includes('responsive') || + planAfter.toLowerCase().includes('accessibility'); - recordE2E('/plan-design-review audit', 'Plan Design Review E2E', result, { - passed: reportExists && ['success', 'error_max_turns'].includes(result.exitReason), + recordE2E('/plan-design-review plan-mode', 'Plan Design Review E2E', result, { + passed: hasDesignContent && planWasEdited && ['success', 'error_max_turns'].includes(result.exitReason), }); expect(['success', 'error_max_turns']).toContain(result.exitReason); - expect(reportExists).toBe(true); - if (reportExists) { - expect(reportContent.length).toBeGreaterThan(200); - } - }, 420_000); + // Agent should produce design-relevant output about the plan + expect(hasDesignContent).toBe(true); + // Agent should have edited the plan file to add missing design decisions + expect(planWasEdited).toBe(true); + expect(planHasDesignAdditions).toBe(true); + }, 360_000); - test('Test 6: /plan-design-review exports DESIGN.md', async () => { - // Clean up previous test artifacts - try { fs.unlinkSync(path.join(reviewDir, 'design-audit.md')); } catch {} + testIfSelected('plan-design-review-no-ui-scope', async () => { + // Write a backend-only plan + fs.writeFileSync(path.join(reviewDir, 'backend-plan.md'), `# Plan: Database Migration + +## Context +Migrate user records from PostgreSQL to a new schema with better indexing. + +## Implementation +1. Create migration to add new columns to users table +2. Backfill data from legacy columns +3. Add database indexes for common query patterns +4. Update ActiveRecord models +5. Run migration in staging first, then production +`); const result = await runSkillTest({ - prompt: `IMPORTANT: The browse binary is already assigned below as B. Do NOT search for it or run the SKILL.md setup block — just use $B directly. + prompt: `Read plan-design-review/SKILL.md for the design review workflow. -B="${browseBin}" +Review the plan in ./backend-plan.md. This is a pure backend database migration plan with no UI changes. -Read plan-design-review/SKILL.md for the design review workflow. +Skip the preamble bash block. Skip any AskUserQuestion calls — this is non-interactive. Write your findings directly to stdout. -Review ${testServer.url} with --quick mode. Skip any AskUserQuestion calls — this is non-interactive. After Phase 2 (Design System Extraction), write a DESIGN.md to the working directory. Also write the audit report to ./design-audit.md.`, +IMPORTANT: Do NOT try to browse any URLs or use a browse binary. This is a plan review, not a live site audit.`, workingDirectory: reviewDir, - maxTurns: 25, - timeout: 360_000, - testName: 'plan-design-review-export', + maxTurns: 10, + timeout: 180_000, + testName: 'plan-design-review-no-ui-scope', runId, }); - logCost('/plan-design-review export', result); + logCost('/plan-design-review no-ui-scope', result); - const designPath = path.join(reviewDir, 'DESIGN.md'); - const reportPath = path.join(reviewDir, 'design-audit.md'); - const designExists = fs.existsSync(designPath); - const reportExists = fs.existsSync(reportPath); + // Agent should detect no UI scope and exit early + const output = result.output || ''; + const detectsNoUI = output.toLowerCase().includes('no ui') || + output.toLowerCase().includes('no frontend') || + output.toLowerCase().includes('no design') || + output.toLowerCase().includes('not applicable') || + output.toLowerCase().includes('backend'); - let designContent = ''; - if (designExists) { - designContent = fs.readFileSync(designPath, 'utf-8'); - } - - const hasTypography = designContent.toLowerCase().includes('typography') || designContent.toLowerCase().includes('font'); - const hasColor = designContent.toLowerCase().includes('color'); - - recordE2E('/plan-design-review export', 'Plan Design Review E2E', result, { - passed: designExists && ['success', 'error_max_turns'].includes(result.exitReason), + recordE2E('/plan-design-review no-ui-scope', 'Plan Design Review E2E', result, { + passed: detectsNoUI && ['success', 'error_max_turns'].includes(result.exitReason), }); expect(['success', 'error_max_turns']).toContain(result.exitReason); - // DESIGN.md export is best-effort — agent may not always produce it - if (designExists) { - expect(hasTypography || hasColor).toBe(true); - } - }, 420_000); + expect(detectsNoUI).toBe(true); + }, 240_000); }); -// --- QA Design Review E2E --- +// --- Design Review E2E (live-site audit + fix) --- -describeE2E('QA Design Review E2E', () => { +describeIfSelected('Design Review E2E', ['design-review-fix'], () => { let qaDesignDir: string; let qaDesignServer: ReturnType | null = null; @@ -2155,11 +2518,11 @@ describeE2E('QA Design Review E2E', () => { }, }); - // Copy qa-design-review skill - fs.mkdirSync(path.join(qaDesignDir, 'qa-design-review'), { recursive: true }); + // Copy design-review skill + fs.mkdirSync(path.join(qaDesignDir, 'design-review'), { recursive: true }); fs.copyFileSync( - path.join(ROOT, 'qa-design-review', 'SKILL.md'), - path.join(qaDesignDir, 'qa-design-review', 'SKILL.md'), + path.join(ROOT, 'design-review', 'SKILL.md'), + path.join(qaDesignDir, 'design-review', 'SKILL.md'), ); }); @@ -2168,7 +2531,7 @@ describeE2E('QA Design Review E2E', () => { try { fs.rmSync(qaDesignDir, { recursive: true, force: true }); } catch {} }); - test('Test 7: /qa-design-review audits and fixes design issues', async () => { + test('Test 7: /design-review audits and fixes design issues', async () => { const serverUrl = `http://localhost:${(qaDesignServer as any)?.port}`; const result = await runSkillTest({ @@ -2176,17 +2539,17 @@ describeE2E('QA Design Review E2E', () => { B="${browseBin}" -Read qa-design-review/SKILL.md for the design review + fix workflow. +Read design-review/SKILL.md for the design review + fix workflow. Review the site at ${serverUrl}. Use --quick mode. Skip any AskUserQuestion calls — this is non-interactive. Fix up to 3 issues max. Write your report to ./design-audit.md.`, workingDirectory: qaDesignDir, maxTurns: 30, timeout: 360_000, - testName: 'qa-design-review-fix', + testName: 'design-review-fix', runId, }); - logCost('/qa-design-review fix', result); + logCost('/design-review fix', result); const reportPath = path.join(qaDesignDir, 'design-audit.md'); const reportExists = fs.existsSync(reportPath); @@ -2198,7 +2561,7 @@ Review the site at ${serverUrl}. Use --quick mode. Skip any AskUserQuestion call const commits = gitLog.stdout.toString().trim().split('\n'); const designFixCommits = commits.filter((c: string) => c.includes('style(design)')); - recordE2E('/qa-design-review fix', 'QA Design Review E2E', result, { + recordE2E('/design-review fix', 'Design Review E2E', result, { passed: ['success', 'error_max_turns'].includes(result.exitReason), }); @@ -2216,6 +2579,269 @@ Review the site at ${serverUrl}. Use --quick mode. Skip any AskUserQuestion call }, 420_000); }); +// --- Test Bootstrap E2E --- + +describeIfSelected('Test Bootstrap E2E', ['qa-bootstrap'], () => { + let bootstrapDir: string; + let bootstrapServer: ReturnType; + + beforeAll(() => { + bootstrapDir = fs.mkdtempSync(path.join(os.tmpdir(), 'skill-e2e-bootstrap-')); + setupBrowseShims(bootstrapDir); + + // Copy qa skill files + copyDirSync(path.join(ROOT, 'qa'), path.join(bootstrapDir, 'qa')); + + // Create a minimal Node.js project with NO test framework + fs.writeFileSync(path.join(bootstrapDir, 'package.json'), JSON.stringify({ + name: 'test-bootstrap-app', + version: '1.0.0', + type: 'module', + }, null, 2)); + + // Create a simple app file with a bug + fs.writeFileSync(path.join(bootstrapDir, 'app.js'), ` +export function add(a, b) { return a + b; } +export function subtract(a, b) { return a - b; } +export function divide(a, b) { return a / b; } // BUG: no zero check +`); + + // Create a simple HTML page with a bug + fs.writeFileSync(path.join(bootstrapDir, 'index.html'), ` + +Bootstrap Test + +

Test App

+ Broken Link + + + +`); + + // Init git repo + const run = (cmd: string, args: string[]) => + spawnSync(cmd, args, { cwd: bootstrapDir, stdio: 'pipe', timeout: 5000 }); + run('git', ['init']); + run('git', ['config', 'user.email', 'test@test.com']); + run('git', ['config', 'user.name', 'Test']); + run('git', ['add', '.']); + run('git', ['commit', '-m', 'initial commit']); + + // Serve from working directory + bootstrapServer = Bun.serve({ + port: 0, + hostname: '127.0.0.1', + fetch(req) { + const url = new URL(req.url); + let filePath = url.pathname === '/' ? '/index.html' : url.pathname; + filePath = filePath.replace(/^\//, ''); + const fullPath = path.join(bootstrapDir, filePath); + if (!fs.existsSync(fullPath)) { + return new Response('Not Found', { status: 404 }); + } + const content = fs.readFileSync(fullPath, 'utf-8'); + return new Response(content, { + headers: { 'Content-Type': 'text/html' }, + }); + }, + }); + }); + + afterAll(() => { + bootstrapServer?.stop(); + try { fs.rmSync(bootstrapDir, { recursive: true, force: true }); } catch {} + }); + + test('/qa bootstrap + regression test on zero-test project', async () => { + const serverUrl = `http://127.0.0.1:${bootstrapServer!.port}`; + + const result = await runSkillTest({ + prompt: `You have a browse binary at ${browseBin}. Assign it to B variable like: B="${browseBin}" + +Read the file qa/SKILL.md for the QA workflow instructions. + +Run a Quick-tier QA test on ${serverUrl} +The source code for this page is at ${bootstrapDir}/index.html — you can fix bugs there. +Do NOT use AskUserQuestion — for any AskUserQuestion prompts, choose the RECOMMENDED option automatically. +Write your report to ${bootstrapDir}/qa-reports/qa-report.md + +This project has NO test framework. When the bootstrap asks, pick vitest (option A). +This is a test+fix loop: find bugs, fix them, write regression tests, commit each fix.`, + workingDirectory: bootstrapDir, + maxTurns: 50, + allowedTools: ['Bash', 'Read', 'Write', 'Edit', 'Glob', 'Grep'], + timeout: 420_000, + testName: 'qa-bootstrap', + runId, + }); + + logCost('/qa bootstrap', result); + recordE2E('/qa bootstrap + regression test', 'Test Bootstrap E2E', result, { + passed: ['success', 'error_max_turns'].includes(result.exitReason), + }); + + expect(['success', 'error_max_turns']).toContain(result.exitReason); + + // Verify bootstrap created test infrastructure + const hasTestConfig = fs.existsSync(path.join(bootstrapDir, 'vitest.config.ts')) + || fs.existsSync(path.join(bootstrapDir, 'vitest.config.js')) + || fs.existsSync(path.join(bootstrapDir, 'jest.config.js')) + || fs.existsSync(path.join(bootstrapDir, 'jest.config.ts')); + console.log(`Test config created: ${hasTestConfig}`); + + const hasTestingMd = fs.existsSync(path.join(bootstrapDir, 'TESTING.md')); + console.log(`TESTING.md created: ${hasTestingMd}`); + + // Check for bootstrap commit + const gitLog = spawnSync('git', ['log', '--oneline', '--grep=bootstrap'], { + cwd: bootstrapDir, stdio: 'pipe', + }); + const bootstrapCommits = gitLog.stdout.toString().trim(); + console.log(`Bootstrap commits: ${bootstrapCommits || 'none'}`); + + // Check for regression test commits + const regressionLog = spawnSync('git', ['log', '--oneline', '--grep=test(qa)'], { + cwd: bootstrapDir, stdio: 'pipe', + }); + const regressionCommits = regressionLog.stdout.toString().trim(); + console.log(`Regression test commits: ${regressionCommits || 'none'}`); + + // Verify at least the bootstrap happened (fix commits are bonus) + const allCommits = spawnSync('git', ['log', '--oneline'], { + cwd: bootstrapDir, stdio: 'pipe', + }); + const totalCommits = allCommits.stdout.toString().trim().split('\n').length; + console.log(`Total commits: ${totalCommits}`); + expect(totalCommits).toBeGreaterThan(1); // At least initial + bootstrap + }, 420_000); +}); + +// --- Test Coverage Audit E2E --- + +describeIfSelected('Test Coverage Audit E2E', ['ship-coverage-audit'], () => { + let coverageDir: string; + + beforeAll(() => { + coverageDir = fs.mkdtempSync(path.join(os.tmpdir(), 'skill-e2e-coverage-')); + + // Copy ship skill files + copyDirSync(path.join(ROOT, 'ship'), path.join(coverageDir, 'ship')); + copyDirSync(path.join(ROOT, 'review'), path.join(coverageDir, 'review')); + + // Create a Node.js project WITH test framework but coverage gaps + fs.writeFileSync(path.join(coverageDir, 'package.json'), JSON.stringify({ + name: 'test-coverage-app', + version: '1.0.0', + type: 'module', + scripts: { test: 'echo "no tests yet"' }, + devDependencies: { vitest: '^1.0.0' }, + }, null, 2)); + + // Create vitest config + fs.writeFileSync(path.join(coverageDir, 'vitest.config.ts'), + `import { defineConfig } from 'vitest/config';\nexport default defineConfig({ test: {} });\n`); + + fs.writeFileSync(path.join(coverageDir, 'VERSION'), '0.1.0.0\n'); + fs.writeFileSync(path.join(coverageDir, 'CHANGELOG.md'), '# Changelog\n'); + + // Create source file with multiple code paths + fs.mkdirSync(path.join(coverageDir, 'src'), { recursive: true }); + fs.writeFileSync(path.join(coverageDir, 'src', 'billing.ts'), ` +export function processPayment(amount: number, currency: string) { + if (amount <= 0) throw new Error('Invalid amount'); + if (currency !== 'USD' && currency !== 'EUR') throw new Error('Unsupported currency'); + return { status: 'success', amount, currency }; +} + +export function refundPayment(paymentId: string, reason: string) { + if (!paymentId) throw new Error('Payment ID required'); + if (!reason) throw new Error('Reason required'); + return { status: 'refunded', paymentId, reason }; +} +`); + + // Create a test directory with ONE test (partial coverage) + fs.mkdirSync(path.join(coverageDir, 'test'), { recursive: true }); + fs.writeFileSync(path.join(coverageDir, 'test', 'billing.test.ts'), ` +import { describe, test, expect } from 'vitest'; +import { processPayment } from '../src/billing'; + +describe('processPayment', () => { + test('processes valid payment', () => { + const result = processPayment(100, 'USD'); + expect(result.status).toBe('success'); + }); + // GAP: no test for invalid amount + // GAP: no test for unsupported currency + // GAP: refundPayment not tested at all +}); +`); + + // Init git repo with main branch + const run = (cmd: string, args: string[]) => + spawnSync(cmd, args, { cwd: coverageDir, stdio: 'pipe', timeout: 5000 }); + run('git', ['init', '-b', 'main']); + run('git', ['config', 'user.email', 'test@test.com']); + run('git', ['config', 'user.name', 'Test']); + run('git', ['add', '.']); + run('git', ['commit', '-m', 'initial commit']); + + // Create feature branch + run('git', ['checkout', '-b', 'feature/billing']); + }); + + afterAll(() => { + try { fs.rmSync(coverageDir, { recursive: true, force: true }); } catch {} + }); + + test('/ship Step 3.4 produces coverage diagram', async () => { + const result = await runSkillTest({ + prompt: `Read the file ship/SKILL.md for the ship workflow instructions. + +You are on the feature/billing branch. The base branch is main. +This is a test project — there is no remote, no PR to create. + +ONLY run Step 3.4 (Test Coverage Audit) from the ship workflow. +Skip all other steps (tests, evals, review, version, changelog, commit, push, PR). + +The source code is in ${coverageDir}/src/billing.ts. +Existing tests are in ${coverageDir}/test/billing.test.ts. +The test command is: echo "tests pass" (mocked — just pretend tests pass). + +Produce the ASCII coverage diagram showing which code paths are tested and which have gaps. +Do NOT generate new tests — just produce the diagram and coverage summary. +Output the diagram directly.`, + workingDirectory: coverageDir, + maxTurns: 15, + allowedTools: ['Bash', 'Read', 'Write', 'Edit', 'Glob', 'Grep'], + timeout: 120_000, + testName: 'ship-coverage-audit', + runId, + }); + + logCost('/ship coverage audit', result); + recordE2E('/ship Step 3.4 coverage audit', 'Test Coverage Audit E2E', result, { + passed: result.exitReason === 'success', + }); + + expect(result.exitReason).toBe('success'); + + // Check output contains coverage diagram elements + const output = result.output || ''; + const hasGap = output.includes('GAP') || output.includes('gap') || output.includes('NO TEST'); + const hasTested = output.includes('TESTED') || output.includes('tested') || output.includes('✓'); + const hasCoverage = output.includes('COVERAGE') || output.includes('coverage') || output.includes('paths tested'); + + console.log(`Output has GAP markers: ${hasGap}`); + console.log(`Output has TESTED markers: ${hasTested}`); + console.log(`Output has coverage summary: ${hasCoverage}`); + + // At minimum, the agent should have read the source and test files + const readCalls = result.toolCalls.filter(tc => tc.tool === 'Read'); + expect(readCalls.length).toBeGreaterThan(0); + }, 180_000); +}); + // Module-level afterAll — finalize eval collector after all tests complete afterAll(async () => { if (evalCollector) { diff --git a/test/skill-llm-eval.test.ts b/test/skill-llm-eval.test.ts index 2889538c..f7d7c861 100644 --- a/test/skill-llm-eval.test.ts +++ b/test/skill-llm-eval.test.ts @@ -19,6 +19,7 @@ import { callJudge, judge } from './helpers/llm-judge'; import type { JudgeMeta } from './helpers/llm-judge'; import { EvalCollector } from './helpers/eval-store'; import { MODEL_PRICING } from '../lib/eval-cost'; +import { selectTests, detectBaseBranch, getChangedFiles, LLM_JUDGE_TOUCHFILES, GLOBAL_TOUCHFILES } from './helpers/touchfiles'; const ROOT = path.resolve(import.meta.dir, '..'); // Run when EVALS=1 is set (requires ANTHROPIC_API_KEY in env) @@ -44,8 +45,43 @@ function judgeCosts(meta: JudgeMeta) { }]; } -describeEval('LLM-as-judge quality evals', () => { - test('command reference table scores >= 4 on all dimensions', async () => { +// --- Diff-based test selection --- +let selectedTests: string[] | null = null; + +if (evalsEnabled && !process.env.EVALS_ALL) { + const baseBranch = process.env.EVALS_BASE + || detectBaseBranch(ROOT) + || 'main'; + const changedFiles = getChangedFiles(baseBranch, ROOT); + + if (changedFiles.length > 0) { + const selection = selectTests(changedFiles, LLM_JUDGE_TOUCHFILES, GLOBAL_TOUCHFILES); + selectedTests = selection.selected; + process.stderr.write(`\nLLM-judge selection (${selection.reason}): ${selection.selected.length}/${Object.keys(LLM_JUDGE_TOUCHFILES).length} tests\n`); + if (selection.skipped.length > 0) { + process.stderr.write(` Skipped: ${selection.skipped.join(', ')}\n`); + } + process.stderr.write('\n'); + } +} + +/** Wrap a describe block to skip if none of its tests are selected. */ +function describeIfSelected(name: string, testNames: string[], fn: () => void) { + const anySelected = selectedTests === null || testNames.some(t => selectedTests!.includes(t)); + (anySelected ? describeEval : describe.skip)(name, fn); +} + +/** Skip an individual test if not selected (for multi-test describe blocks). */ +function testIfSelected(testName: string, fn: () => Promise, timeout: number) { + const shouldRun = selectedTests === null || selectedTests.includes(testName); + (shouldRun ? test : test.skip)(testName, fn, timeout); +} + +describeIfSelected('LLM-as-judge quality evals', [ + 'command reference table', 'snapshot flags reference', + 'browse/SKILL.md reference', 'setup block', 'regression vs baseline', +], () => { + testIfSelected('command reference table', async () => { const t0 = Date.now(); const content = fs.readFileSync(path.join(ROOT, 'SKILL.md'), 'utf-8'); const start = content.indexOf('## Command Reference'); @@ -72,7 +108,7 @@ describeEval('LLM-as-judge quality evals', () => { expect(scores.actionability).toBeGreaterThanOrEqual(4); }, 30_000); - test('snapshot flags section scores >= 4 on all dimensions', async () => { + testIfSelected('snapshot flags reference', async () => { const t0 = Date.now(); const content = fs.readFileSync(path.join(ROOT, 'SKILL.md'), 'utf-8'); const start = content.indexOf('## Snapshot System'); @@ -99,7 +135,7 @@ describeEval('LLM-as-judge quality evals', () => { expect(scores.actionability).toBeGreaterThanOrEqual(4); }, 30_000); - test('browse/SKILL.md overall scores >= 4', async () => { + testIfSelected('browse/SKILL.md reference', async () => { const t0 = Date.now(); const content = fs.readFileSync(path.join(ROOT, 'browse', 'SKILL.md'), 'utf-8'); const start = content.indexOf('## Snapshot Flags'); @@ -125,7 +161,7 @@ describeEval('LLM-as-judge quality evals', () => { expect(scores.actionability).toBeGreaterThanOrEqual(4); }, 30_000); - test('setup block scores >= 3 on actionability and clarity', async () => { + testIfSelected('setup block', async () => { const t0 = Date.now(); const content = fs.readFileSync(path.join(ROOT, 'SKILL.md'), 'utf-8'); const setupStart = content.indexOf('## SETUP'); @@ -153,7 +189,7 @@ describeEval('LLM-as-judge quality evals', () => { expect(scores.clarity).toBeGreaterThanOrEqual(3); }, 30_000); - test('regression check: compare branch vs baseline quality', async () => { + testIfSelected('regression vs baseline', async () => { const t0 = Date.now(); const generated = fs.readFileSync(path.join(ROOT, 'SKILL.md'), 'utf-8'); const genStart = generated.indexOf('## Command Reference'); @@ -231,10 +267,10 @@ Scores are 1-5 overall quality.`); // --- Part 7: QA skill quality evals (C6) --- -describeEval('QA skill quality evals', () => { +describeIfSelected('QA skill quality evals', ['qa/SKILL.md workflow', 'qa/SKILL.md health rubric'], () => { const qaContent = fs.readFileSync(path.join(ROOT, 'qa', 'SKILL.md'), 'utf-8'); - test('qa/SKILL.md workflow quality scores >= 4', async () => { + testIfSelected('qa/SKILL.md workflow', async () => { const t0 = Date.now(); const start = qaContent.indexOf('## Workflow'); const end = qaContent.indexOf('## Health Score Rubric'); @@ -278,7 +314,7 @@ ${section}`); expect(scores.actionability).toBeGreaterThanOrEqual(4); }, 30_000); - test('qa/SKILL.md health score rubric is unambiguous', async () => { + testIfSelected('qa/SKILL.md health rubric', async () => { const t0 = Date.now(); const start = qaContent.indexOf('## Health Score Rubric'); const section = qaContent.slice(start); @@ -323,8 +359,8 @@ ${section}`); // --- Part 7: Cross-skill consistency judge (C7) --- -describeEval('Cross-skill consistency evals', () => { - test('greptile-history patterns are consistent across all skills', async () => { +describeIfSelected('Cross-skill consistency evals', ['cross-skill greptile consistency'], () => { + testIfSelected('cross-skill greptile consistency', async () => { const t0 = Date.now(); const reviewContent = fs.readFileSync(path.join(ROOT, 'review', 'SKILL.md'), 'utf-8'); const shipContent = fs.readFileSync(path.join(ROOT, 'ship', 'SKILL.md'), 'utf-8'); @@ -389,10 +425,10 @@ score (1-5): 5 = perfectly consistent, 1 = contradictory`); // --- Part 7: Baseline score pinning (C9) --- -describeEval('Baseline score pinning', () => { +describeIfSelected('Baseline score pinning', ['baseline score pinning'], () => { const baselinesPath = path.join(ROOT, 'test', 'fixtures', 'eval-baselines.json'); - test('LLM eval scores do not regress below baselines', async () => { + testIfSelected('baseline score pinning', async () => { const t0 = Date.now(); if (!fs.existsSync(baselinesPath)) { console.log('No baseline file found — skipping pinning check'); @@ -443,6 +479,210 @@ describeEval('Baseline score pinning', () => { }, 60_000); }); +// --- Workflow SKILL.md quality evals (10 new tests for 100% coverage) --- + +/** + * DRY helper for workflow SKILL.md judge tests. + * Extracts a section from a SKILL.md file and judges its quality as an agent workflow. + */ +async function runWorkflowJudge(opts: { + testName: string; + suite: string; + skillPath: string; + startMarker: string; + endMarker: string | null; + judgeContext: string; + judgeGoal: string; + thresholds?: { clarity: number; completeness: number; actionability: number }; +}) { + const t0 = Date.now(); + const defaults = { clarity: 4, completeness: 3, actionability: 4 }; + const thresholds = { ...defaults, ...opts.thresholds }; + + const content = fs.readFileSync(path.join(ROOT, opts.skillPath), 'utf-8'); + const startIdx = content.indexOf(opts.startMarker); + if (startIdx === -1) throw new Error(`Start marker not found in ${opts.skillPath}: "${opts.startMarker}"`); + + let section: string; + if (opts.endMarker) { + const endIdx = content.indexOf(opts.endMarker, startIdx); + if (endIdx === -1) throw new Error(`End marker not found in ${opts.skillPath}: "${opts.endMarker}"`); + section = content.slice(startIdx, endIdx); + } else { + section = content.slice(startIdx); + } + + const scores = await callJudge(`You are evaluating the quality of ${opts.judgeContext} for an AI coding agent. + +The agent reads this document to learn ${opts.judgeGoal}. It references external tools and files +that are documented separately — do NOT penalize for missing external definitions. + +Rate on three dimensions (1-5 scale): +- **clarity** (1-5): Can an agent follow the instructions without ambiguity? +- **completeness** (1-5): Are all steps, decision points, and outputs well-defined? +- **actionability** (1-5): Can an agent execute this workflow and produce the expected deliverables? + +Respond with ONLY valid JSON: +{"clarity": N, "completeness": N, "actionability": N, "reasoning": "brief explanation"} + +Here is the document to evaluate: + +${section}`); + + console.log(`${opts.testName} scores:`, JSON.stringify(scores, null, 2)); + + evalCollector?.addTest({ + name: opts.testName, + suite: opts.suite, + tier: 'llm-judge', + passed: scores.clarity >= thresholds.clarity && scores.completeness >= thresholds.completeness && scores.actionability >= thresholds.actionability, + duration_ms: Date.now() - t0, + cost_usd: 0.02, + judge_scores: { clarity: scores.clarity, completeness: scores.completeness, actionability: scores.actionability }, + judge_reasoning: scores.reasoning, + }); + + expect(scores.clarity).toBeGreaterThanOrEqual(thresholds.clarity); + expect(scores.completeness).toBeGreaterThanOrEqual(thresholds.completeness); + expect(scores.actionability).toBeGreaterThanOrEqual(thresholds.actionability); +} + +// Block 1: Ship & Release skills +describeIfSelected('Ship & Release skill evals', ['ship/SKILL.md workflow', 'document-release/SKILL.md workflow'], () => { + testIfSelected('ship/SKILL.md workflow', async () => { + await runWorkflowJudge({ + testName: 'ship/SKILL.md workflow', + suite: 'Ship & Release skill evals', + skillPath: 'ship/SKILL.md', + startMarker: '# Ship:', + endMarker: '## Important Rules', + judgeContext: 'a ship/release workflow document', + judgeGoal: 'how to create a PR: merge base branch, run tests, review diff, bump version, update changelog, push, and open PR', + }); + }, 30_000); + + testIfSelected('document-release/SKILL.md workflow', async () => { + await runWorkflowJudge({ + testName: 'document-release/SKILL.md workflow', + suite: 'Ship & Release skill evals', + skillPath: 'document-release/SKILL.md', + startMarker: '# Document Release:', + endMarker: '## Important Rules', + judgeContext: 'a post-ship documentation update workflow', + judgeGoal: 'how to audit and update project documentation after code ships: README, ARCHITECTURE, CONTRIBUTING, CLAUDE.md, CHANGELOG, TODOS', + }); + }, 30_000); +}); + +// Block 2: Plan Review skills +describeIfSelected('Plan Review skill evals', [ + 'plan-ceo-review/SKILL.md modes', 'plan-eng-review/SKILL.md sections', 'plan-design-review/SKILL.md passes', +], () => { + testIfSelected('plan-ceo-review/SKILL.md modes', async () => { + await runWorkflowJudge({ + testName: 'plan-ceo-review/SKILL.md modes', + suite: 'Plan Review skill evals', + skillPath: 'plan-ceo-review/SKILL.md', + startMarker: '## Step 0: Nuclear Scope Challenge', + endMarker: '## Review Sections', + judgeContext: 'a CEO/founder plan review framework with 4 scope modes', + judgeGoal: 'how to conduct a CEO-perspective plan review: challenge scope, select a mode (Expansion, Selective Expansion, Hold Scope, Reduction), then review sections interactively', + }); + }, 30_000); + + testIfSelected('plan-eng-review/SKILL.md sections', async () => { + await runWorkflowJudge({ + testName: 'plan-eng-review/SKILL.md sections', + suite: 'Plan Review skill evals', + skillPath: 'plan-eng-review/SKILL.md', + startMarker: '## BEFORE YOU START:', + endMarker: '## CRITICAL RULE', + judgeContext: 'an engineering plan review framework with 4 review sections', + judgeGoal: 'how to review a plan for architecture quality, code quality, test coverage, and performance — walking through each section interactively with AskUserQuestion', + }); + }, 30_000); + + testIfSelected('plan-design-review/SKILL.md passes', async () => { + await runWorkflowJudge({ + testName: 'plan-design-review/SKILL.md passes', + suite: 'Plan Review skill evals', + skillPath: 'plan-design-review/SKILL.md', + startMarker: '## Review Sections', + endMarker: '## CRITICAL RULE', + judgeContext: 'a design plan review framework with 7 review passes', + judgeGoal: 'how to review a plan for design quality using a 0-10 rating method: rate each dimension, explain what a 10 looks like, edit the plan to fix gaps, then re-rate', + }); + }, 30_000); +}); + +// Block 3: Design skills +describeIfSelected('Design skill evals', ['design-review/SKILL.md fix loop', 'design-consultation/SKILL.md research'], () => { + testIfSelected('design-review/SKILL.md fix loop', async () => { + await runWorkflowJudge({ + testName: 'design-review/SKILL.md fix loop', + suite: 'Design skill evals', + skillPath: 'design-review/SKILL.md', + startMarker: '## Phase 7:', + endMarker: '## Additional Rules', + judgeContext: 'a design audit triage and fix loop workflow', + judgeGoal: 'how to triage design issues by severity, fix them atomically in source code, commit each fix, and re-verify with before/after screenshots', + }); + }, 30_000); + + testIfSelected('design-consultation/SKILL.md research', async () => { + await runWorkflowJudge({ + testName: 'design-consultation/SKILL.md research', + suite: 'Design skill evals', + skillPath: 'design-consultation/SKILL.md', + startMarker: '## Phase 1:', + endMarker: '## Phase 4:', + judgeContext: 'a design consultation research and proposal workflow', + judgeGoal: 'how to gather product context, research the competitive landscape, and produce a complete design system proposal with typography, color, spacing, and motion specifications', + }); + }, 30_000); +}); + +// Block 4: Other skills +describeIfSelected('Other skill evals', [ + 'retro/SKILL.md instructions', 'qa-only/SKILL.md workflow', 'gstack-upgrade/SKILL.md upgrade flow', +], () => { + testIfSelected('retro/SKILL.md instructions', async () => { + await runWorkflowJudge({ + testName: 'retro/SKILL.md instructions', + suite: 'Other skill evals', + skillPath: 'retro/SKILL.md', + startMarker: '## Instructions', + endMarker: '## Compare Mode', + judgeContext: 'an engineering retrospective data gathering and analysis workflow', + judgeGoal: 'how to gather git metrics (commit history, test counts, work patterns), analyze them, produce a structured retro report with praise, growth areas, and trend tracking', + }); + }, 30_000); + + testIfSelected('qa-only/SKILL.md workflow', async () => { + await runWorkflowJudge({ + testName: 'qa-only/SKILL.md workflow', + suite: 'Other skill evals', + skillPath: 'qa-only/SKILL.md', + startMarker: '## Workflow', + endMarker: '## Important Rules', + judgeContext: 'a report-only QA testing workflow', + judgeGoal: 'how to systematically QA test a web application and produce a structured report with health score, screenshots, and repro steps — without fixing anything', + }); + }, 30_000); + + testIfSelected('gstack-upgrade/SKILL.md upgrade flow', async () => { + await runWorkflowJudge({ + testName: 'gstack-upgrade/SKILL.md upgrade flow', + suite: 'Other skill evals', + skillPath: 'gstack-upgrade/SKILL.md', + startMarker: '## Inline upgrade flow', + endMarker: '## Standalone usage', + judgeContext: 'a version upgrade detection and execution workflow', + judgeGoal: 'how to detect install type, compare versions, back up current install, upgrade via git or fresh clone, run setup, and show what changed', + }); + }, 30_000); +}); + // Module-level afterAll — finalize eval collector after all tests complete afterAll(async () => { if (evalCollector) { diff --git a/test/skill-validation.test.ts b/test/skill-validation.test.ts index 83cdca6e..bd0e205b 100644 --- a/test/skill-validation.test.ts +++ b/test/skill-validation.test.ts @@ -72,15 +72,29 @@ describe('SKILL.md command validation', () => { expect(result.snapshotFlagErrors).toHaveLength(0); }); - test('all $B commands in qa-design-review/SKILL.md are valid browse commands', () => { - const skill = path.join(ROOT, 'qa-design-review', 'SKILL.md'); + test('all $B commands in design-review/SKILL.md are valid browse commands', () => { + const skill = path.join(ROOT, 'design-review', 'SKILL.md'); if (!fs.existsSync(skill)) return; const result = validateSkill(skill); expect(result.invalid).toHaveLength(0); }); - test('all snapshot flags in qa-design-review/SKILL.md are valid', () => { - const skill = path.join(ROOT, 'qa-design-review', 'SKILL.md'); + test('all snapshot flags in design-review/SKILL.md are valid', () => { + const skill = path.join(ROOT, 'design-review', 'SKILL.md'); + if (!fs.existsSync(skill)) return; + const result = validateSkill(skill); + expect(result.snapshotFlagErrors).toHaveLength(0); + }); + + test('all $B commands in design-consultation/SKILL.md are valid browse commands', () => { + const skill = path.join(ROOT, 'design-consultation', 'SKILL.md'); + if (!fs.existsSync(skill)) return; + const result = validateSkill(skill); + expect(result.invalid).toHaveLength(0); + }); + + test('all snapshot flags in design-consultation/SKILL.md are valid', () => { + const skill = path.join(ROOT, 'design-consultation', 'SKILL.md'); if (!fs.existsSync(skill)) return; const result = validateSkill(skill); expect(result.snapshotFlagErrors).toHaveLength(0); @@ -205,7 +219,7 @@ describe('Update check preamble', () => { 'plan-ceo-review/SKILL.md', 'plan-eng-review/SKILL.md', 'retro/SKILL.md', 'plan-design-review/SKILL.md', - 'qa-design-review/SKILL.md', + 'design-review/SKILL.md', 'design-consultation/SKILL.md', 'document-release/SKILL.md', ]; @@ -430,6 +444,8 @@ describe('No hardcoded branch names in SKILL templates', () => { 'plan-ceo-review/SKILL.md.tmpl', 'retro/SKILL.md.tmpl', 'document-release/SKILL.md.tmpl', + 'plan-eng-review/SKILL.md.tmpl', + 'plan-design-review/SKILL.md.tmpl', ]; // Patterns that indicate hardcoded 'main' in git commands @@ -513,7 +529,7 @@ describe('v0.4.1 preamble features', () => { 'plan-ceo-review/SKILL.md', 'plan-eng-review/SKILL.md', 'retro/SKILL.md', 'plan-design-review/SKILL.md', - 'qa-design-review/SKILL.md', + 'design-review/SKILL.md', 'design-consultation/SKILL.md', 'document-release/SKILL.md', ]; @@ -543,6 +559,10 @@ describe('Contributor mode preamble structure', () => { 'ship/SKILL.md', 'review/SKILL.md', 'plan-ceo-review/SKILL.md', 'plan-eng-review/SKILL.md', 'retro/SKILL.md', + 'plan-design-review/SKILL.md', + 'design-review/SKILL.md', + 'design-consultation/SKILL.md', + 'document-release/SKILL.md', ]; for (const skill of skillsWithPreamble) { @@ -617,6 +637,43 @@ describe('Enum & Value Completeness in review checklist', () => { }); }); +// --- Completeness Principle spot-check --- + +describe('Completeness Principle in generated SKILL.md files', () => { + const skillsWithPreamble = [ + 'SKILL.md', 'browse/SKILL.md', 'qa/SKILL.md', + 'qa-only/SKILL.md', + 'setup-browser-cookies/SKILL.md', + 'ship/SKILL.md', 'review/SKILL.md', + 'plan-ceo-review/SKILL.md', 'plan-eng-review/SKILL.md', + 'retro/SKILL.md', + 'plan-design-review/SKILL.md', + 'design-review/SKILL.md', + 'design-consultation/SKILL.md', + 'document-release/SKILL.md', + ]; + + for (const skill of skillsWithPreamble) { + test(`${skill} contains Completeness Principle section`, () => { + const content = fs.readFileSync(path.join(ROOT, skill), 'utf-8'); + expect(content).toContain('Completeness Principle'); + expect(content).toContain('Boil the Lake'); + }); + } + + test('Completeness Principle includes compression table', () => { + const content = fs.readFileSync(path.join(ROOT, 'SKILL.md'), 'utf-8'); + expect(content).toContain('CC+gstack'); + expect(content).toContain('Compression'); + }); + + test('Completeness Principle includes anti-patterns', () => { + const content = fs.readFileSync(path.join(ROOT, 'SKILL.md'), 'utf-8'); + expect(content).toContain('BAD:'); + expect(content).toContain('Anti-patterns'); + }); +}); + // --- Part 7: Planted-bug fixture validation (A4) --- describe('Planted-bug fixture validation', () => { @@ -665,3 +722,297 @@ describe('Planted-bug fixture validation', () => { expect(content).toContain('update_column'); }); }); + +// --- CEO review mode validation --- + +describe('CEO review mode validation', () => { + const content = fs.readFileSync(path.join(ROOT, 'plan-ceo-review', 'SKILL.md'), 'utf-8'); + + test('has all four CEO review modes defined', () => { + const modes = ['SCOPE EXPANSION', 'SELECTIVE EXPANSION', 'HOLD SCOPE', 'SCOPE REDUCTION']; + for (const mode of modes) { + expect(content).toContain(mode); + } + }); + + test('has CEO plan persistence step', () => { + expect(content).toContain('ceo-plans'); + expect(content).toContain('status: ACTIVE'); + }); + + test('has docs/designs promotion section', () => { + expect(content).toContain('docs/designs'); + expect(content).toContain('PROMOTED'); + }); + + test('mode quick reference has four columns', () => { + expect(content).toContain('EXPANSION'); + expect(content).toContain('SELECTIVE'); + expect(content).toContain('HOLD SCOPE'); + expect(content).toContain('REDUCTION'); + }); +}); + +// --- gstack-slug helper --- + +describe('gstack-slug', () => { + const SLUG_BIN = path.join(ROOT, 'bin', 'gstack-slug'); + + test('binary exists and is executable', () => { + expect(fs.existsSync(SLUG_BIN)).toBe(true); + const stat = fs.statSync(SLUG_BIN); + expect(stat.mode & 0o111).toBeGreaterThan(0); + }); + + test('outputs SLUG and BRANCH lines in a git repo', () => { + const result = Bun.spawnSync([SLUG_BIN], { cwd: ROOT, stdout: 'pipe', stderr: 'pipe' }); + expect(result.exitCode).toBe(0); + const output = result.stdout.toString(); + expect(output).toContain('SLUG='); + expect(output).toContain('BRANCH='); + }); + + test('SLUG does not contain forward slashes', () => { + const result = Bun.spawnSync([SLUG_BIN], { cwd: ROOT, stdout: 'pipe', stderr: 'pipe' }); + const slug = result.stdout.toString().match(/SLUG=(.*)/)?.[1] ?? ''; + expect(slug).not.toContain('/'); + expect(slug.length).toBeGreaterThan(0); + }); + + test('BRANCH does not contain forward slashes', () => { + const result = Bun.spawnSync([SLUG_BIN], { cwd: ROOT, stdout: 'pipe', stderr: 'pipe' }); + const branch = result.stdout.toString().match(/BRANCH=(.*)/)?.[1] ?? ''; + expect(branch).not.toContain('/'); + expect(branch.length).toBeGreaterThan(0); + }); + + test('output is eval-compatible (KEY=VALUE format)', () => { + const result = Bun.spawnSync([SLUG_BIN], { cwd: ROOT, stdout: 'pipe', stderr: 'pipe' }); + const lines = result.stdout.toString().trim().split('\n'); + expect(lines.length).toBe(2); + expect(lines[0]).toMatch(/^SLUG=.+/); + expect(lines[1]).toMatch(/^BRANCH=.+/); + }); +}); + +// --- Test Bootstrap validation --- + +describe('Test Bootstrap ({{TEST_BOOTSTRAP}}) integration', () => { + test('TEST_BOOTSTRAP resolver produces valid content', () => { + const qaContent = fs.readFileSync(path.join(ROOT, 'qa', 'SKILL.md'), 'utf-8'); + expect(qaContent).toContain('Test Framework Bootstrap'); + expect(qaContent).toContain('RUNTIME:ruby'); + expect(qaContent).toContain('RUNTIME:node'); + expect(qaContent).toContain('RUNTIME:python'); + expect(qaContent).toContain('no-test-bootstrap'); + expect(qaContent).toContain('BOOTSTRAP_DECLINED'); + }); + + test('TEST_BOOTSTRAP appears in qa/SKILL.md', () => { + const content = fs.readFileSync(path.join(ROOT, 'qa', 'SKILL.md'), 'utf-8'); + expect(content).toContain('Test Framework Bootstrap'); + expect(content).toContain('TESTING.md'); + expect(content).toContain('CLAUDE.md'); + }); + + test('TEST_BOOTSTRAP appears in ship/SKILL.md', () => { + const content = fs.readFileSync(path.join(ROOT, 'ship', 'SKILL.md'), 'utf-8'); + expect(content).toContain('Test Framework Bootstrap'); + expect(content).toContain('Step 2.5'); + }); + + test('TEST_BOOTSTRAP appears in design-review/SKILL.md', () => { + const content = fs.readFileSync(path.join(ROOT, 'design-review', 'SKILL.md'), 'utf-8'); + expect(content).toContain('Test Framework Bootstrap'); + }); + + test('TEST_BOOTSTRAP does NOT appear in qa-only/SKILL.md', () => { + const content = fs.readFileSync(path.join(ROOT, 'qa-only', 'SKILL.md'), 'utf-8'); + expect(content).not.toContain('Test Framework Bootstrap'); + // But should have the recommendation note + expect(content).toContain('No test framework detected'); + expect(content).toContain('Run `/qa` to bootstrap'); + }); + + test('bootstrap includes framework knowledge table', () => { + const content = fs.readFileSync(path.join(ROOT, 'qa', 'SKILL.md'), 'utf-8'); + expect(content).toContain('vitest'); + expect(content).toContain('minitest'); + expect(content).toContain('pytest'); + expect(content).toContain('cargo test'); + expect(content).toContain('phpunit'); + expect(content).toContain('ExUnit'); + }); + + test('bootstrap includes CI/CD pipeline generation', () => { + const content = fs.readFileSync(path.join(ROOT, 'qa', 'SKILL.md'), 'utf-8'); + expect(content).toContain('.github/workflows/test.yml'); + expect(content).toContain('GitHub Actions'); + }); + + test('bootstrap includes first real tests step', () => { + const content = fs.readFileSync(path.join(ROOT, 'qa', 'SKILL.md'), 'utf-8'); + expect(content).toContain('First real tests'); + expect(content).toContain('git log --since=30.days'); + expect(content).toContain('Prioritize by risk'); + }); + + test('bootstrap includes vibe coding philosophy', () => { + const content = fs.readFileSync(path.join(ROOT, 'qa', 'SKILL.md'), 'utf-8'); + expect(content).toContain('vibe coding'); + expect(content).toContain('100% test coverage'); + }); + + test('WebSearch is in allowed-tools for qa, ship, design-review', () => { + const qa = fs.readFileSync(path.join(ROOT, 'qa', 'SKILL.md'), 'utf-8'); + const ship = fs.readFileSync(path.join(ROOT, 'ship', 'SKILL.md'), 'utf-8'); + const qaDesign = fs.readFileSync(path.join(ROOT, 'design-review', 'SKILL.md'), 'utf-8'); + expect(qa).toContain('WebSearch'); + expect(ship).toContain('WebSearch'); + expect(qaDesign).toContain('WebSearch'); + }); +}); + +// --- Phase 8e.5 regression test validation --- + +describe('Phase 8e.5 regression test generation', () => { + test('qa/SKILL.md contains Phase 8e.5', () => { + const content = fs.readFileSync(path.join(ROOT, 'qa', 'SKILL.md'), 'utf-8'); + expect(content).toContain('8e.5. Regression Test'); + expect(content).toContain('test(qa): regression test'); + expect(content).toContain('WTF-likelihood exclusion'); + }); + + test('qa/SKILL.md Rule 13 is amended for regression tests', () => { + const content = fs.readFileSync(path.join(ROOT, 'qa', 'SKILL.md'), 'utf-8'); + expect(content).toContain('Only modify tests when generating regression tests in Phase 8e.5'); + expect(content).not.toContain('Never modify tests or CI configuration'); + }); + + test('design-review has CSS-aware Phase 8e.5 variant', () => { + const content = fs.readFileSync(path.join(ROOT, 'design-review', 'SKILL.md'), 'utf-8'); + expect(content).toContain('8e.5. Regression Test (design-review variant)'); + expect(content).toContain('CSS-only'); + expect(content).toContain('test(design): regression test'); + }); + + test('regression test includes full attribution comment format', () => { + const content = fs.readFileSync(path.join(ROOT, 'qa', 'SKILL.md'), 'utf-8'); + expect(content).toContain('// Regression: ISSUE-NNN'); + expect(content).toContain('// Found by /qa on'); + expect(content).toContain('// Report: .gstack/qa-reports/'); + }); + + test('regression test uses auto-incrementing names', () => { + const content = fs.readFileSync(path.join(ROOT, 'qa', 'SKILL.md'), 'utf-8'); + expect(content).toContain('auto-incrementing'); + expect(content).toContain('max number + 1'); + }); +}); + +// --- Step 3.4 coverage audit validation --- + +describe('Step 3.4 test coverage audit', () => { + test('ship/SKILL.md contains Step 3.4', () => { + const content = fs.readFileSync(path.join(ROOT, 'ship', 'SKILL.md'), 'utf-8'); + expect(content).toContain('Step 3.4: Test Coverage Audit'); + expect(content).toContain('CODE PATH COVERAGE'); + }); + + test('Step 3.4 includes quality scoring rubric', () => { + const content = fs.readFileSync(path.join(ROOT, 'ship', 'SKILL.md'), 'utf-8'); + expect(content).toContain('★★★'); + expect(content).toContain('★★'); + expect(content).toContain('edge cases AND error paths'); + expect(content).toContain('happy path only'); + }); + + test('Step 3.4 includes before/after test count', () => { + const content = fs.readFileSync(path.join(ROOT, 'ship', 'SKILL.md'), 'utf-8'); + expect(content).toContain('Count test files before'); + expect(content).toContain('Count test files after'); + }); + + test('ship PR body includes Test Coverage section', () => { + const content = fs.readFileSync(path.join(ROOT, 'ship', 'SKILL.md'), 'utf-8'); + expect(content).toContain('## Test Coverage'); + }); + + test('ship rules include test generation rule', () => { + const content = fs.readFileSync(path.join(ROOT, 'ship', 'SKILL.md'), 'utf-8'); + expect(content).toContain('Step 3.4 generates coverage tests'); + expect(content).toContain('Never commit failing tests'); + }); + + test('Step 3.4 includes vibe coding philosophy', () => { + const content = fs.readFileSync(path.join(ROOT, 'ship', 'SKILL.md'), 'utf-8'); + expect(content).toContain('vibe coding becomes yolo coding'); + }); + + test('Step 3.4 traces actual codepaths, not just syntax', () => { + const content = fs.readFileSync(path.join(ROOT, 'ship', 'SKILL.md'), 'utf-8'); + expect(content).toContain('Trace every codepath'); + expect(content).toContain('Trace data flow'); + expect(content).toContain('Diagram the execution'); + }); + + test('Step 3.4 maps user flows and interaction edge cases', () => { + const content = fs.readFileSync(path.join(ROOT, 'ship', 'SKILL.md'), 'utf-8'); + expect(content).toContain('Map user flows'); + expect(content).toContain('Interaction edge cases'); + expect(content).toContain('Double-click'); + expect(content).toContain('Navigate away'); + expect(content).toContain('Error states the user can see'); + expect(content).toContain('Empty/zero/boundary states'); + }); + + test('Step 3.4 diagram includes USER FLOW COVERAGE section', () => { + const content = fs.readFileSync(path.join(ROOT, 'ship', 'SKILL.md'), 'utf-8'); + expect(content).toContain('USER FLOW COVERAGE'); + expect(content).toContain('Code paths:'); + expect(content).toContain('User flows:'); + }); +}); + +// --- Retro test health validation --- + +describe('Retro test health tracking', () => { + test('retro/SKILL.md has test health data gathering commands', () => { + const content = fs.readFileSync(path.join(ROOT, 'retro', 'SKILL.md'), 'utf-8'); + expect(content).toContain('# 10. Test file count'); + expect(content).toContain('# 11. Regression test commits'); + expect(content).toContain('# 12. Test files changed'); + }); + + test('retro/SKILL.md has Test Health metrics row', () => { + const content = fs.readFileSync(path.join(ROOT, 'retro', 'SKILL.md'), 'utf-8'); + expect(content).toContain('Test Health'); + expect(content).toContain('regression tests'); + }); + + test('retro/SKILL.md has Test Health narrative section', () => { + const content = fs.readFileSync(path.join(ROOT, 'retro', 'SKILL.md'), 'utf-8'); + expect(content).toContain('### Test Health'); + expect(content).toContain('Total test files'); + expect(content).toContain('vibe coding safe'); + }); + + test('retro JSON schema includes test_health field', () => { + const content = fs.readFileSync(path.join(ROOT, 'retro', 'SKILL.md'), 'utf-8'); + expect(content).toContain('test_health'); + expect(content).toContain('total_test_files'); + expect(content).toContain('regression_test_commits'); + }); +}); + +// --- QA report template regression tests section --- + +describe('QA report template', () => { + test('qa-report-template.md has Regression Tests section', () => { + const content = fs.readFileSync(path.join(ROOT, 'qa', 'templates', 'qa-report-template.md'), 'utf-8'); + expect(content).toContain('## Regression Tests'); + expect(content).toContain('committed / deferred / skipped'); + expect(content).toContain('### Deferred Tests'); + expect(content).toContain('**Precondition:**'); + }); +}); diff --git a/test/touchfiles.test.ts b/test/touchfiles.test.ts new file mode 100644 index 00000000..48613d64 --- /dev/null +++ b/test/touchfiles.test.ts @@ -0,0 +1,253 @@ +/** + * Unit tests for diff-based test selection. + * Free (no API calls), runs with `bun test`. + */ + +import { describe, test, expect } from 'bun:test'; +import { spawnSync } from 'child_process'; +import * as fs from 'fs'; +import * as path from 'path'; +import * as os from 'os'; +import { + matchGlob, + selectTests, + detectBaseBranch, + E2E_TOUCHFILES, + LLM_JUDGE_TOUCHFILES, + GLOBAL_TOUCHFILES, +} from './helpers/touchfiles'; + +const ROOT = path.resolve(import.meta.dir, '..'); + +// --- matchGlob --- + +describe('matchGlob', () => { + test('** matches any depth of path segments', () => { + expect(matchGlob('browse/src/commands.ts', 'browse/src/**')).toBe(true); + expect(matchGlob('browse/src/deep/nested/file.ts', 'browse/src/**')).toBe(true); + expect(matchGlob('browse/src/cli.ts', 'browse/src/**')).toBe(true); + }); + + test('** does not match unrelated paths', () => { + expect(matchGlob('browse/src/commands.ts', 'qa/**')).toBe(false); + expect(matchGlob('review/SKILL.md', 'qa/**')).toBe(false); + }); + + test('exact match works', () => { + expect(matchGlob('SKILL.md', 'SKILL.md')).toBe(true); + expect(matchGlob('SKILL.md.tmpl', 'SKILL.md')).toBe(false); + expect(matchGlob('qa/SKILL.md', 'SKILL.md')).toBe(false); + }); + + test('* matches within a single segment', () => { + expect(matchGlob('test/fixtures/review-eval-enum.rb', 'test/fixtures/review-eval-enum*.rb')).toBe(true); + expect(matchGlob('test/fixtures/review-eval-enum-diff.rb', 'test/fixtures/review-eval-enum*.rb')).toBe(true); + expect(matchGlob('test/fixtures/review-eval-vuln.rb', 'test/fixtures/review-eval-enum*.rb')).toBe(false); + }); + + test('dots in patterns are escaped correctly', () => { + expect(matchGlob('SKILL.md', 'SKILL.md')).toBe(true); + expect(matchGlob('SKILLxmd', 'SKILL.md')).toBe(false); + }); + + test('** at end matches files in the directory', () => { + expect(matchGlob('qa/SKILL.md', 'qa/**')).toBe(true); + expect(matchGlob('qa/SKILL.md.tmpl', 'qa/**')).toBe(true); + expect(matchGlob('qa/templates/report.md', 'qa/**')).toBe(true); + }); +}); + +// --- selectTests --- + +describe('selectTests', () => { + test('browse/src change selects browse and qa tests', () => { + const result = selectTests(['browse/src/commands.ts'], E2E_TOUCHFILES); + expect(result.selected).toContain('browse-basic'); + expect(result.selected).toContain('browse-snapshot'); + expect(result.selected).toContain('qa-quick'); + expect(result.selected).toContain('qa-fix-loop'); + expect(result.selected).toContain('design-review-fix'); + expect(result.reason).toBe('diff'); + // Should NOT include unrelated tests + expect(result.selected).not.toContain('plan-ceo-review'); + expect(result.selected).not.toContain('retro'); + expect(result.selected).not.toContain('document-release'); + }); + + test('skill-specific change selects only that skill and related tests', () => { + const result = selectTests(['plan-ceo-review/SKILL.md'], E2E_TOUCHFILES); + expect(result.selected).toContain('plan-ceo-review'); + expect(result.selected).toContain('plan-ceo-review-selective'); + expect(result.selected.length).toBe(2); + expect(result.skipped.length).toBe(Object.keys(E2E_TOUCHFILES).length - 2); + }); + + test('global touchfile triggers ALL tests', () => { + const result = selectTests(['test/helpers/session-runner.ts'], E2E_TOUCHFILES); + expect(result.selected.length).toBe(Object.keys(E2E_TOUCHFILES).length); + expect(result.skipped.length).toBe(0); + expect(result.reason).toContain('global'); + }); + + test('gen-skill-docs.ts is a global touchfile', () => { + const result = selectTests(['scripts/gen-skill-docs.ts'], E2E_TOUCHFILES); + expect(result.selected.length).toBe(Object.keys(E2E_TOUCHFILES).length); + expect(result.reason).toContain('global'); + }); + + test('unrelated file selects nothing', () => { + const result = selectTests(['README.md'], E2E_TOUCHFILES); + expect(result.selected).toEqual([]); + expect(result.skipped.length).toBe(Object.keys(E2E_TOUCHFILES).length); + }); + + test('empty changed files selects nothing', () => { + const result = selectTests([], E2E_TOUCHFILES); + expect(result.selected).toEqual([]); + }); + + test('multiple changed files union their selections', () => { + const result = selectTests( + ['plan-ceo-review/SKILL.md', 'retro/SKILL.md.tmpl'], + E2E_TOUCHFILES, + ); + expect(result.selected).toContain('plan-ceo-review'); + expect(result.selected).toContain('plan-ceo-review-selective'); + expect(result.selected).toContain('retro'); + expect(result.selected).toContain('retro-base-branch'); + expect(result.selected.length).toBe(4); + }); + + test('works with LLM_JUDGE_TOUCHFILES', () => { + const result = selectTests(['qa/SKILL.md'], LLM_JUDGE_TOUCHFILES); + expect(result.selected).toContain('qa/SKILL.md workflow'); + expect(result.selected).toContain('qa/SKILL.md health rubric'); + expect(result.selected.length).toBe(2); + }); + + test('SKILL.md.tmpl root template only selects root-dependent tests', () => { + const result = selectTests(['SKILL.md.tmpl'], E2E_TOUCHFILES); + // Should select the 7 tests that depend on root SKILL.md + expect(result.selected).toContain('skillmd-setup-discovery'); + expect(result.selected).toContain('contributor-mode'); + expect(result.selected).toContain('session-awareness'); + // Should NOT select unrelated tests + expect(result.selected).not.toContain('plan-ceo-review'); + expect(result.selected).not.toContain('retro'); + }); + + test('global touchfiles work for LLM-judge tests too', () => { + const result = selectTests(['scripts/gen-skill-docs.ts'], LLM_JUDGE_TOUCHFILES); + expect(result.selected.length).toBe(Object.keys(LLM_JUDGE_TOUCHFILES).length); + }); +}); + +// --- detectBaseBranch --- + +describe('detectBaseBranch', () => { + test('detects local main branch', () => { + const dir = fs.mkdtempSync(path.join(os.tmpdir(), 'touchfiles-test-')); + const run = (cmd: string, args: string[]) => + spawnSync(cmd, args, { cwd: dir, stdio: 'pipe', timeout: 5000 }); + + run('git', ['init']); + run('git', ['config', 'user.email', 'test@test.com']); + run('git', ['config', 'user.name', 'Test']); + fs.writeFileSync(path.join(dir, 'test.txt'), 'hello\n'); + run('git', ['add', '.']); + run('git', ['commit', '-m', 'init']); + + const result = detectBaseBranch(dir); + // Should find 'main' (or 'master' depending on git default) + expect(result).toMatch(/^(main|master)$/); + + try { fs.rmSync(dir, { recursive: true, force: true }); } catch {} + }); + + test('returns null for empty repo with no branches', () => { + const dir = fs.mkdtempSync(path.join(os.tmpdir(), 'touchfiles-test-')); + const run = (cmd: string, args: string[]) => + spawnSync(cmd, args, { cwd: dir, stdio: 'pipe', timeout: 5000 }); + + run('git', ['init']); + // No commits = no branches + const result = detectBaseBranch(dir); + expect(result).toBeNull(); + + try { fs.rmSync(dir, { recursive: true, force: true }); } catch {} + }); + + test('returns null for non-git directory', () => { + const dir = fs.mkdtempSync(path.join(os.tmpdir(), 'touchfiles-test-')); + const result = detectBaseBranch(dir); + expect(result).toBeNull(); + + try { fs.rmSync(dir, { recursive: true, force: true }); } catch {} + }); +}); + +// --- Completeness: every testName in skill-e2e.test.ts has a TOUCHFILES entry --- + +describe('TOUCHFILES completeness', () => { + test('every E2E testName has a TOUCHFILES entry', () => { + const e2eContent = fs.readFileSync( + path.join(ROOT, 'test', 'skill-e2e.test.ts'), + 'utf-8', + ); + + // Extract all testName: 'value' entries + const testNameRegex = /testName:\s*['"`]([^'"`]+)['"`]/g; + const testNames: string[] = []; + let match; + while ((match = testNameRegex.exec(e2eContent)) !== null) { + let name = match[1]; + // Handle template literals like `qa-${label}` — these expand to + // qa-b6-static, qa-b7-spa, qa-b8-checkout + if (name.includes('${')) continue; // skip template literals, check expanded forms below + testNames.push(name); + } + + // Add the template-expanded testNames from runPlantedBugEval calls + const plantedBugRegex = /runPlantedBugEval\([^,]+,\s*[^,]+,\s*['"`]([^'"`]+)['"`]\)/g; + while ((match = plantedBugRegex.exec(e2eContent)) !== null) { + testNames.push(`qa-${match[1]}`); + } + + expect(testNames.length).toBeGreaterThan(0); + + const missing = testNames.filter(name => !(name in E2E_TOUCHFILES)); + if (missing.length > 0) { + throw new Error( + `E2E tests missing TOUCHFILES entries: ${missing.join(', ')}\n` + + `Add these to E2E_TOUCHFILES in test/helpers/touchfiles.ts`, + ); + } + }); + + test('every LLM-judge test has a TOUCHFILES entry', () => { + const llmContent = fs.readFileSync( + path.join(ROOT, 'test', 'skill-llm-eval.test.ts'), + 'utf-8', + ); + + // Extract test names from addTest({ name: '...' }) calls + const nameRegex = /name:\s*['"`]([^'"`]+)['"`]/g; + const testNames: string[] = []; + let match; + while ((match = nameRegex.exec(llmContent)) !== null) { + testNames.push(match[1]); + } + + // Deduplicate (some tests call addTest with the same name) + const unique = [...new Set(testNames)]; + expect(unique.length).toBeGreaterThan(0); + + const missing = unique.filter(name => !(name in LLM_JUDGE_TOUCHFILES)); + if (missing.length > 0) { + throw new Error( + `LLM-judge tests missing TOUCHFILES entries: ${missing.join(', ')}\n` + + `Add these to LLM_JUDGE_TOUCHFILES in test/helpers/touchfiles.ts`, + ); + } + }); +});