mirror of
https://github.com/garrytan/gstack.git
synced 2026-05-08 06:26:45 +02:00
merge: resolve conflicts with origin/main
Accept main's generated SKILL.md files (will be regenerated by bun run build). Resolve gen-skill-docs.ts: keep community tier 3-option prompt from branch, keep error context fields from branch, add PLAN MODE EXCEPTION from main. Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -0,0 +1,239 @@
|
||||
/**
|
||||
* Shared helpers for E2E test files.
|
||||
*
|
||||
* Extracted from the monolithic skill-e2e.test.ts to support splitting
|
||||
* tests across multiple files by category.
|
||||
*/
|
||||
|
||||
import { describe, test, afterAll } from 'bun:test';
|
||||
import type { SkillTestResult } from './session-runner';
|
||||
import { EvalCollector, judgePassed } from './eval-store';
|
||||
import type { EvalTestEntry } from './eval-store';
|
||||
import { selectTests, detectBaseBranch, getChangedFiles, E2E_TOUCHFILES, GLOBAL_TOUCHFILES } from './touchfiles';
|
||||
import { spawnSync } from 'child_process';
|
||||
import * as fs from 'fs';
|
||||
import * as path from 'path';
|
||||
import * as os from 'os';
|
||||
|
||||
export const ROOT = path.resolve(import.meta.dir, '..', '..');
|
||||
|
||||
// Skip unless EVALS=1. Session runner strips CLAUDE* env vars to avoid nested session issues.
|
||||
//
|
||||
// BLAME PROTOCOL: When an eval fails, do NOT claim "pre-existing" or "not related
|
||||
// to our changes" without proof. Run the same eval on main to verify. These tests
|
||||
// have invisible couplings — preamble text, SKILL.md content, and timing all affect
|
||||
// agent behavior. See CLAUDE.md "E2E eval failure blame protocol" for details.
|
||||
export const evalsEnabled = !!process.env.EVALS;
|
||||
|
||||
// --- Diff-based test selection ---
|
||||
// When EVALS_ALL is not set, only run tests whose touchfiles were modified.
|
||||
// Set EVALS_ALL=1 to force all tests. Set EVALS_BASE to override base branch.
|
||||
export let selectedTests: string[] | null = null; // null = run all
|
||||
|
||||
// EVALS_FAST: skip the 8 slowest tests (all Opus quality tests) for quick feedback
|
||||
const FAST_EXCLUDED_TESTS = [
|
||||
'plan-ceo-review-selective', 'plan-ceo-review', 'retro', 'retro-base-branch',
|
||||
'design-consultation-core', 'design-consultation-existing',
|
||||
'qa-fix-loop', 'design-review-fix',
|
||||
];
|
||||
|
||||
if (evalsEnabled && !process.env.EVALS_ALL) {
|
||||
const baseBranch = process.env.EVALS_BASE
|
||||
|| detectBaseBranch(ROOT)
|
||||
|| 'main';
|
||||
const changedFiles = getChangedFiles(baseBranch, ROOT);
|
||||
|
||||
if (changedFiles.length > 0) {
|
||||
const selection = selectTests(changedFiles, E2E_TOUCHFILES, GLOBAL_TOUCHFILES);
|
||||
selectedTests = selection.selected;
|
||||
process.stderr.write(`\nE2E selection (${selection.reason}): ${selection.selected.length}/${Object.keys(E2E_TOUCHFILES).length} tests\n`);
|
||||
if (selection.skipped.length > 0) {
|
||||
process.stderr.write(` Skipped: ${selection.skipped.join(', ')}\n`);
|
||||
}
|
||||
process.stderr.write('\n');
|
||||
}
|
||||
// If changedFiles is empty (e.g., on main branch), selectedTests stays null → run all
|
||||
}
|
||||
|
||||
// Apply EVALS_FAST filter after diff-based selection
|
||||
if (evalsEnabled && process.env.EVALS_FAST) {
|
||||
if (selectedTests === null) {
|
||||
// Run all minus excluded
|
||||
selectedTests = Object.keys(E2E_TOUCHFILES).filter(t => !FAST_EXCLUDED_TESTS.includes(t));
|
||||
} else {
|
||||
selectedTests = selectedTests.filter(t => !FAST_EXCLUDED_TESTS.includes(t));
|
||||
}
|
||||
process.stderr.write(`EVALS_FAST: excluded ${FAST_EXCLUDED_TESTS.length} slow tests, running ${selectedTests.length}\n\n`);
|
||||
}
|
||||
|
||||
export const describeE2E = evalsEnabled ? describe : describe.skip;
|
||||
|
||||
/** Wrap a describe block to skip entirely if none of its tests are selected. */
|
||||
export function describeIfSelected(name: string, testNames: string[], fn: () => void) {
|
||||
const anySelected = selectedTests === null || testNames.some(t => selectedTests!.includes(t));
|
||||
(anySelected ? describeE2E : describe.skip)(name, fn);
|
||||
}
|
||||
|
||||
// Unique run ID for this E2E session — used for heartbeat + per-run log directory
|
||||
export const runId = new Date().toISOString().replace(/[:.]/g, '').replace('T', '-').slice(0, 15);
|
||||
|
||||
export const browseBin = path.resolve(ROOT, 'browse', 'dist', 'browse');
|
||||
|
||||
// Check if Anthropic API key is available (needed for outcome evals)
|
||||
export const hasApiKey = !!process.env.ANTHROPIC_API_KEY;
|
||||
|
||||
/**
|
||||
* Copy a directory tree recursively (files only, follows structure).
|
||||
*/
|
||||
export function copyDirSync(src: string, dest: string) {
|
||||
fs.mkdirSync(dest, { recursive: true });
|
||||
for (const entry of fs.readdirSync(src, { withFileTypes: true })) {
|
||||
const srcPath = path.join(src, entry.name);
|
||||
const destPath = path.join(dest, entry.name);
|
||||
if (entry.isDirectory()) {
|
||||
copyDirSync(srcPath, destPath);
|
||||
} else {
|
||||
fs.copyFileSync(srcPath, destPath);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Set up browse shims (binary symlink, find-browse, remote-slug) in a tmpDir.
|
||||
*/
|
||||
export function setupBrowseShims(dir: string) {
|
||||
// Symlink browse binary
|
||||
const binDir = path.join(dir, 'browse', 'dist');
|
||||
fs.mkdirSync(binDir, { recursive: true });
|
||||
if (fs.existsSync(browseBin)) {
|
||||
fs.symlinkSync(browseBin, path.join(binDir, 'browse'));
|
||||
}
|
||||
|
||||
// find-browse shim
|
||||
const findBrowseDir = path.join(dir, 'browse', 'bin');
|
||||
fs.mkdirSync(findBrowseDir, { recursive: true });
|
||||
fs.writeFileSync(
|
||||
path.join(findBrowseDir, 'find-browse'),
|
||||
`#!/bin/bash\necho "${browseBin}"\n`,
|
||||
{ mode: 0o755 },
|
||||
);
|
||||
|
||||
// remote-slug shim (returns test-project)
|
||||
fs.writeFileSync(
|
||||
path.join(findBrowseDir, 'remote-slug'),
|
||||
`#!/bin/bash\necho "test-project"\n`,
|
||||
{ mode: 0o755 },
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Print cost summary after an E2E test.
|
||||
*/
|
||||
export function logCost(label: string, result: { costEstimate: { turnsUsed: number; estimatedTokens: number; estimatedCost: number }; duration: number }) {
|
||||
const { turnsUsed, estimatedTokens, estimatedCost } = result.costEstimate;
|
||||
const durationSec = Math.round(result.duration / 1000);
|
||||
console.log(`${label}: $${estimatedCost.toFixed(2)} (${turnsUsed} turns, ${(estimatedTokens / 1000).toFixed(1)}k tokens, ${durationSec}s)`);
|
||||
}
|
||||
|
||||
/**
|
||||
* Dump diagnostic info on planted-bug outcome failure (decision 1C).
|
||||
*/
|
||||
export function dumpOutcomeDiagnostic(dir: string, label: string, report: string, judgeResult: any) {
|
||||
try {
|
||||
const transcriptDir = path.join(dir, '.gstack', 'test-transcripts');
|
||||
fs.mkdirSync(transcriptDir, { recursive: true });
|
||||
const timestamp = new Date().toISOString().replace(/[:.]/g, '-');
|
||||
fs.writeFileSync(
|
||||
path.join(transcriptDir, `${label}-outcome-${timestamp}.json`),
|
||||
JSON.stringify({ label, report, judgeResult }, null, 2),
|
||||
);
|
||||
} catch { /* non-fatal */ }
|
||||
}
|
||||
|
||||
/**
|
||||
* Create an EvalCollector for a specific suite. Returns null if evals are not enabled.
|
||||
*/
|
||||
export function createEvalCollector(suite: string): EvalCollector | null {
|
||||
return evalsEnabled ? new EvalCollector(suite) : null;
|
||||
}
|
||||
|
||||
/** DRY helper to record an E2E test result into the eval collector. */
|
||||
export function recordE2E(
|
||||
evalCollector: EvalCollector | null,
|
||||
name: string,
|
||||
suite: string,
|
||||
result: SkillTestResult,
|
||||
extra?: Partial<EvalTestEntry>,
|
||||
) {
|
||||
// Derive last tool call from transcript for machine-readable diagnostics
|
||||
const lastTool = result.toolCalls.length > 0
|
||||
? `${result.toolCalls[result.toolCalls.length - 1].tool}(${JSON.stringify(result.toolCalls[result.toolCalls.length - 1].input).slice(0, 60)})`
|
||||
: undefined;
|
||||
|
||||
evalCollector?.addTest({
|
||||
name, suite, tier: 'e2e',
|
||||
passed: result.exitReason === 'success' && result.browseErrors.length === 0,
|
||||
duration_ms: result.duration,
|
||||
cost_usd: result.costEstimate.estimatedCost,
|
||||
transcript: result.transcript,
|
||||
output: result.output?.slice(0, 2000),
|
||||
turns_used: result.costEstimate.turnsUsed,
|
||||
browse_errors: result.browseErrors,
|
||||
exit_reason: result.exitReason,
|
||||
timeout_at_turn: result.exitReason === 'timeout' ? result.costEstimate.turnsUsed : undefined,
|
||||
last_tool_call: lastTool,
|
||||
model: result.model,
|
||||
first_response_ms: result.firstResponseMs,
|
||||
max_inter_turn_ms: result.maxInterTurnMs,
|
||||
...extra,
|
||||
});
|
||||
}
|
||||
|
||||
/** Finalize an eval collector (write results). */
|
||||
export async function finalizeEvalCollector(evalCollector: EvalCollector | null) {
|
||||
if (evalCollector) {
|
||||
try {
|
||||
await evalCollector.finalize();
|
||||
} catch (err) {
|
||||
console.error('Failed to save eval results:', err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Pre-seed preamble state files so E2E tests don't waste turns on lake intro + telemetry prompts.
|
||||
// These are one-time interactive prompts that burn 3-7 turns per test if not pre-seeded.
|
||||
if (evalsEnabled) {
|
||||
const gstackDir = path.join(os.homedir(), '.gstack');
|
||||
fs.mkdirSync(gstackDir, { recursive: true });
|
||||
for (const f of ['.completeness-intro-seen', '.telemetry-prompted']) {
|
||||
const p = path.join(gstackDir, f);
|
||||
if (!fs.existsSync(p)) fs.writeFileSync(p, '');
|
||||
}
|
||||
}
|
||||
|
||||
// Fail fast if Anthropic API is unreachable — don't burn through tests getting ConnectionRefused
|
||||
if (evalsEnabled) {
|
||||
const check = spawnSync('sh', ['-c', 'echo "ping" | claude -p --max-turns 1 --output-format stream-json --verbose --dangerously-skip-permissions'], {
|
||||
stdio: 'pipe', timeout: 30_000,
|
||||
});
|
||||
const output = check.stdout?.toString() || '';
|
||||
if (output.includes('ConnectionRefused') || output.includes('Unable to connect')) {
|
||||
throw new Error('Anthropic API unreachable — aborting E2E suite. Fix connectivity and retry.');
|
||||
}
|
||||
}
|
||||
|
||||
/** Skip an individual test if not selected (for multi-test describe blocks). */
|
||||
export function testIfSelected(testName: string, fn: () => Promise<void>, timeout: number) {
|
||||
const shouldRun = selectedTests === null || selectedTests.includes(testName);
|
||||
(shouldRun ? test : test.skip)(testName, fn, timeout);
|
||||
}
|
||||
|
||||
/** Concurrent version — runs in parallel with other concurrent tests within the same describe block. */
|
||||
export function testConcurrentIfSelected(testName: string, fn: () => Promise<void>, timeout: number) {
|
||||
const shouldRun = selectedTests === null || selectedTests.includes(testName);
|
||||
(shouldRun ? test.concurrent : test.skip)(testName, fn, timeout);
|
||||
}
|
||||
|
||||
export { judgePassed } from './eval-store';
|
||||
export { EvalCollector } from './eval-store';
|
||||
export type { EvalTestEntry } from './eval-store';
|
||||
@@ -42,6 +42,11 @@ export interface EvalTestEntry {
|
||||
timeout_at_turn?: number; // which turn was active when timeout hit
|
||||
last_tool_call?: string; // e.g. "Write(review-output.md)"
|
||||
|
||||
// Model + timing diagnostics (added for Sonnet/Opus split)
|
||||
model?: string; // e.g. 'claude-sonnet-4-6' or 'claude-opus-4-6'
|
||||
first_response_ms?: number; // time from spawn to first NDJSON line
|
||||
max_inter_turn_ms?: number; // peak latency between consecutive tool calls
|
||||
|
||||
// Outcome eval
|
||||
detection_rate?: number;
|
||||
false_positives?: number;
|
||||
@@ -65,6 +70,7 @@ export interface EvalResult {
|
||||
failed: number;
|
||||
total_cost_usd: number;
|
||||
total_duration_ms: number;
|
||||
wall_clock_ms?: number; // wall-clock from collector creation to finalization (shows parallelism)
|
||||
tests: EvalTestEntry[];
|
||||
_partial?: boolean; // true for incremental saves, absent in final
|
||||
}
|
||||
@@ -546,6 +552,7 @@ export class EvalCollector {
|
||||
private tests: EvalTestEntry[] = [];
|
||||
private finalized = false;
|
||||
private evalDir: string;
|
||||
private createdAt = Date.now();
|
||||
|
||||
constructor(tier: 'e2e' | 'llm-judge', evalDir?: string) {
|
||||
this.tier = tier;
|
||||
@@ -615,6 +622,7 @@ export class EvalCollector {
|
||||
failed: this.tests.length - passed,
|
||||
total_cost_usd: Math.round(totalCost * 100) / 100,
|
||||
total_duration_ms: totalDuration,
|
||||
wall_clock_ms: Date.now() - this.createdAt,
|
||||
tests: this.tests,
|
||||
};
|
||||
|
||||
|
||||
@@ -0,0 +1,104 @@
|
||||
import { describe, test, expect } from 'bun:test';
|
||||
import { parseGeminiJSONL } from './gemini-session-runner';
|
||||
|
||||
// Fixture: actual Gemini CLI stream-json output with tool use
|
||||
const FIXTURE_LINES = [
|
||||
'{"type":"init","timestamp":"2026-03-20T15:14:46.455Z","session_id":"test-session-123","model":"auto-gemini-3"}',
|
||||
'{"type":"message","timestamp":"2026-03-20T15:14:46.456Z","role":"user","content":"list the files"}',
|
||||
'{"type":"message","timestamp":"2026-03-20T15:14:49.650Z","role":"assistant","content":"I will list the files.","delta":true}',
|
||||
'{"type":"tool_use","timestamp":"2026-03-20T15:14:49.690Z","tool_name":"run_shell_command","tool_id":"cmd_1","parameters":{"command":"ls"}}',
|
||||
'{"type":"tool_result","timestamp":"2026-03-20T15:14:49.931Z","tool_id":"cmd_1","status":"success","output":"file1.ts\\nfile2.ts"}',
|
||||
'{"type":"message","timestamp":"2026-03-20T15:14:51.945Z","role":"assistant","content":"Here are the files.","delta":true}',
|
||||
'{"type":"result","timestamp":"2026-03-20T15:14:52.030Z","status":"success","stats":{"total_tokens":27147,"input_tokens":26928,"output_tokens":87,"cached":0,"duration_ms":5575,"tool_calls":1}}',
|
||||
];
|
||||
|
||||
describe('parseGeminiJSONL', () => {
|
||||
test('extracts session ID from init event', () => {
|
||||
const parsed = parseGeminiJSONL(FIXTURE_LINES);
|
||||
expect(parsed.sessionId).toBe('test-session-123');
|
||||
});
|
||||
|
||||
test('concatenates assistant message deltas into output', () => {
|
||||
const parsed = parseGeminiJSONL(FIXTURE_LINES);
|
||||
expect(parsed.output).toBe('I will list the files.Here are the files.');
|
||||
});
|
||||
|
||||
test('ignores user messages', () => {
|
||||
const lines = [
|
||||
'{"type":"message","role":"user","content":"this should be ignored"}',
|
||||
'{"type":"message","role":"assistant","content":"this should be kept","delta":true}',
|
||||
];
|
||||
const parsed = parseGeminiJSONL(lines);
|
||||
expect(parsed.output).toBe('this should be kept');
|
||||
});
|
||||
|
||||
test('extracts tool names from tool_use events', () => {
|
||||
const parsed = parseGeminiJSONL(FIXTURE_LINES);
|
||||
expect(parsed.toolCalls).toHaveLength(1);
|
||||
expect(parsed.toolCalls[0]).toBe('run_shell_command');
|
||||
});
|
||||
|
||||
test('extracts total tokens from result stats', () => {
|
||||
const parsed = parseGeminiJSONL(FIXTURE_LINES);
|
||||
expect(parsed.tokens).toBe(27147);
|
||||
});
|
||||
|
||||
test('skips malformed lines without throwing', () => {
|
||||
const lines = [
|
||||
'{"type":"init","session_id":"ok"}',
|
||||
'this is not json',
|
||||
'{"type":"message","role":"assistant","content":"hello","delta":true}',
|
||||
'{incomplete json',
|
||||
'{"type":"result","status":"success","stats":{"total_tokens":100}}',
|
||||
];
|
||||
const parsed = parseGeminiJSONL(lines);
|
||||
expect(parsed.sessionId).toBe('ok');
|
||||
expect(parsed.output).toBe('hello');
|
||||
expect(parsed.tokens).toBe(100);
|
||||
});
|
||||
|
||||
test('skips empty and whitespace-only lines', () => {
|
||||
const lines = [
|
||||
'',
|
||||
' ',
|
||||
'{"type":"init","session_id":"s1"}',
|
||||
'\t',
|
||||
'{"type":"result","status":"success","stats":{"total_tokens":50}}',
|
||||
];
|
||||
const parsed = parseGeminiJSONL(lines);
|
||||
expect(parsed.sessionId).toBe('s1');
|
||||
expect(parsed.tokens).toBe(50);
|
||||
});
|
||||
|
||||
test('handles empty input', () => {
|
||||
const parsed = parseGeminiJSONL([]);
|
||||
expect(parsed.output).toBe('');
|
||||
expect(parsed.toolCalls).toHaveLength(0);
|
||||
expect(parsed.tokens).toBe(0);
|
||||
expect(parsed.sessionId).toBeNull();
|
||||
});
|
||||
|
||||
test('handles missing fields gracefully', () => {
|
||||
const lines = [
|
||||
'{"type":"init"}', // no session_id
|
||||
'{"type":"message","role":"assistant"}', // no content
|
||||
'{"type":"tool_use"}', // no tool_name
|
||||
'{"type":"result","status":"success"}', // no stats
|
||||
];
|
||||
const parsed = parseGeminiJSONL(lines);
|
||||
expect(parsed.sessionId).toBeNull();
|
||||
expect(parsed.output).toBe('');
|
||||
expect(parsed.toolCalls).toHaveLength(0);
|
||||
expect(parsed.tokens).toBe(0);
|
||||
});
|
||||
|
||||
test('handles multiple tool_use events', () => {
|
||||
const lines = [
|
||||
'{"type":"tool_use","tool_name":"run_shell_command","tool_id":"cmd_1","parameters":{"command":"ls"}}',
|
||||
'{"type":"tool_use","tool_name":"read_file","tool_id":"cmd_2","parameters":{"path":"foo.ts"}}',
|
||||
'{"type":"tool_use","tool_name":"run_shell_command","tool_id":"cmd_3","parameters":{"command":"cat bar.ts"}}',
|
||||
];
|
||||
const parsed = parseGeminiJSONL(lines);
|
||||
expect(parsed.toolCalls).toEqual(['run_shell_command', 'read_file', 'run_shell_command']);
|
||||
});
|
||||
});
|
||||
@@ -0,0 +1,201 @@
|
||||
/**
|
||||
* Gemini CLI subprocess runner for skill E2E testing.
|
||||
*
|
||||
* Spawns `gemini -p` as an independent process, parses its stream-json
|
||||
* output, and returns structured results. Follows the same pattern as
|
||||
* codex-session-runner.ts but adapted for the Gemini CLI.
|
||||
*
|
||||
* Key differences from Codex session-runner:
|
||||
* - Uses `gemini -p` instead of `codex exec`
|
||||
* - Output is NDJSON with event types: init, message, tool_use, tool_result, result
|
||||
* - Uses `--output-format stream-json --yolo` instead of `--json -s read-only`
|
||||
* - No temp HOME needed — Gemini discovers skills from `.agents/skills/` in cwd
|
||||
* - Message events are streamed with `delta: true` — must concatenate
|
||||
*/
|
||||
|
||||
import * as path from 'path';
|
||||
|
||||
// --- Interfaces ---
|
||||
|
||||
export interface GeminiResult {
|
||||
output: string; // Full assistant message text (concatenated deltas)
|
||||
toolCalls: string[]; // Tool names from tool_use events
|
||||
tokens: number; // Total tokens used
|
||||
exitCode: number; // Process exit code
|
||||
durationMs: number; // Wall clock time
|
||||
sessionId: string | null; // Session ID from init event
|
||||
rawLines: string[]; // Raw JSONL lines for debugging
|
||||
}
|
||||
|
||||
// --- JSONL parser ---
|
||||
|
||||
export interface ParsedGeminiJSONL {
|
||||
output: string;
|
||||
toolCalls: string[];
|
||||
tokens: number;
|
||||
sessionId: string | null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse an array of JSONL lines from `gemini -p --output-format stream-json`.
|
||||
* Pure function — no I/O, no side effects.
|
||||
*
|
||||
* Handles these Gemini event types:
|
||||
* - init → extract session_id
|
||||
* - message (role=assistant, delta=true) → concatenate content into output
|
||||
* - tool_use → extract tool_name
|
||||
* - tool_result → logged but not extracted
|
||||
* - result → extract token usage from stats
|
||||
*/
|
||||
export function parseGeminiJSONL(lines: string[]): ParsedGeminiJSONL {
|
||||
const outputParts: string[] = [];
|
||||
const toolCalls: string[] = [];
|
||||
let tokens = 0;
|
||||
let sessionId: string | null = null;
|
||||
|
||||
for (const line of lines) {
|
||||
if (!line.trim()) continue;
|
||||
try {
|
||||
const obj = JSON.parse(line);
|
||||
const t = obj.type || '';
|
||||
|
||||
if (t === 'init') {
|
||||
const sid = obj.session_id || '';
|
||||
if (sid) sessionId = sid;
|
||||
} else if (t === 'message') {
|
||||
if (obj.role === 'assistant' && obj.content) {
|
||||
outputParts.push(obj.content);
|
||||
}
|
||||
} else if (t === 'tool_use') {
|
||||
const name = obj.tool_name || '';
|
||||
if (name) toolCalls.push(name);
|
||||
} else if (t === 'result') {
|
||||
const stats = obj.stats || {};
|
||||
tokens = (stats.total_tokens || 0);
|
||||
}
|
||||
} catch { /* skip malformed lines */ }
|
||||
}
|
||||
|
||||
return {
|
||||
output: outputParts.join(''),
|
||||
toolCalls,
|
||||
tokens,
|
||||
sessionId,
|
||||
};
|
||||
}
|
||||
|
||||
// --- Main runner ---
|
||||
|
||||
/**
|
||||
* Run a prompt via `gemini -p` and return structured results.
|
||||
*
|
||||
* Spawns gemini with stream-json output, parses JSONL events,
|
||||
* and returns a GeminiResult. Skips gracefully if gemini binary is not found.
|
||||
*/
|
||||
export async function runGeminiSkill(opts: {
|
||||
prompt: string; // What to ask Gemini
|
||||
timeoutMs?: number; // Default 300000 (5 min)
|
||||
cwd?: string; // Working directory (where .agents/skills/ lives)
|
||||
}): Promise<GeminiResult> {
|
||||
const {
|
||||
prompt,
|
||||
timeoutMs = 300_000,
|
||||
cwd,
|
||||
} = opts;
|
||||
|
||||
const startTime = Date.now();
|
||||
|
||||
// Check if gemini binary exists
|
||||
const whichResult = Bun.spawnSync(['which', 'gemini']);
|
||||
if (whichResult.exitCode !== 0) {
|
||||
return {
|
||||
output: 'SKIP: gemini binary not found',
|
||||
toolCalls: [],
|
||||
tokens: 0,
|
||||
exitCode: -1,
|
||||
durationMs: Date.now() - startTime,
|
||||
sessionId: null,
|
||||
rawLines: [],
|
||||
};
|
||||
}
|
||||
|
||||
// Build gemini command
|
||||
const args = ['-p', prompt, '--output-format', 'stream-json', '--yolo'];
|
||||
|
||||
// Spawn gemini — uses real HOME for auth, cwd for skill discovery
|
||||
const proc = Bun.spawn(['gemini', ...args], {
|
||||
cwd: cwd || process.cwd(),
|
||||
stdout: 'pipe',
|
||||
stderr: 'pipe',
|
||||
});
|
||||
|
||||
// Race against timeout
|
||||
let timedOut = false;
|
||||
const timeoutId = setTimeout(() => {
|
||||
timedOut = true;
|
||||
proc.kill();
|
||||
}, timeoutMs);
|
||||
|
||||
// Stream and collect JSONL from stdout
|
||||
const collectedLines: string[] = [];
|
||||
const stderrPromise = new Response(proc.stderr).text();
|
||||
|
||||
const reader = proc.stdout.getReader();
|
||||
const decoder = new TextDecoder();
|
||||
let buf = '';
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
buf += decoder.decode(value, { stream: true });
|
||||
const lines = buf.split('\n');
|
||||
buf = lines.pop() || '';
|
||||
for (const line of lines) {
|
||||
if (!line.trim()) continue;
|
||||
collectedLines.push(line);
|
||||
|
||||
// Real-time progress to stderr
|
||||
try {
|
||||
const event = JSON.parse(line);
|
||||
if (event.type === 'tool_use' && event.tool_name) {
|
||||
const elapsed = Math.round((Date.now() - startTime) / 1000);
|
||||
process.stderr.write(` [gemini ${elapsed}s] tool: ${event.tool_name}\n`);
|
||||
} else if (event.type === 'message' && event.role === 'assistant' && event.content) {
|
||||
const elapsed = Math.round((Date.now() - startTime) / 1000);
|
||||
process.stderr.write(` [gemini ${elapsed}s] message: ${event.content.slice(0, 100)}\n`);
|
||||
}
|
||||
} catch { /* skip — parseGeminiJSONL will handle it later */ }
|
||||
}
|
||||
}
|
||||
} catch { /* stream read error — fall through to exit code handling */ }
|
||||
|
||||
// Flush remaining buffer
|
||||
if (buf.trim()) {
|
||||
collectedLines.push(buf);
|
||||
}
|
||||
|
||||
const stderr = await stderrPromise;
|
||||
const exitCode = await proc.exited;
|
||||
clearTimeout(timeoutId);
|
||||
|
||||
const durationMs = Date.now() - startTime;
|
||||
|
||||
// Parse all collected JSONL lines
|
||||
const parsed = parseGeminiJSONL(collectedLines);
|
||||
|
||||
// Log stderr if non-empty (may contain auth errors, etc.)
|
||||
if (stderr.trim()) {
|
||||
process.stderr.write(` [gemini stderr] ${stderr.trim().slice(0, 200)}\n`);
|
||||
}
|
||||
|
||||
return {
|
||||
output: parsed.output,
|
||||
toolCalls: parsed.toolCalls,
|
||||
tokens: parsed.tokens,
|
||||
exitCode: timedOut ? 124 : exitCode,
|
||||
durationMs,
|
||||
sessionId: parsed.sessionId,
|
||||
rawLines: collectedLines,
|
||||
};
|
||||
}
|
||||
@@ -41,6 +41,12 @@ export interface SkillTestResult {
|
||||
output: string;
|
||||
costEstimate: CostEstimate;
|
||||
transcript: any[];
|
||||
/** Which model was used for this test (added for Sonnet/Opus split diagnostics) */
|
||||
model: string;
|
||||
/** Time from spawn to first NDJSON line, in ms (added for rate-limit diagnostics) */
|
||||
firstResponseMs: number;
|
||||
/** Peak latency between consecutive tool calls, in ms */
|
||||
maxInterTurnMs: number;
|
||||
}
|
||||
|
||||
const BROWSE_ERROR_PATTERNS = [
|
||||
@@ -116,6 +122,8 @@ export async function runSkillTest(options: {
|
||||
timeout?: number;
|
||||
testName?: string;
|
||||
runId?: string;
|
||||
/** Model to use. Defaults to claude-sonnet-4-6 (overridable via EVALS_MODEL env). */
|
||||
model?: string;
|
||||
}): Promise<SkillTestResult> {
|
||||
const {
|
||||
prompt,
|
||||
@@ -126,6 +134,7 @@ export async function runSkillTest(options: {
|
||||
testName,
|
||||
runId,
|
||||
} = options;
|
||||
const model = options.model ?? process.env.EVALS_MODEL ?? 'claude-sonnet-4-6';
|
||||
|
||||
const startTime = Date.now();
|
||||
const startedAt = new Date().toISOString();
|
||||
@@ -144,6 +153,7 @@ export async function runSkillTest(options: {
|
||||
// avoid shell escaping issues. --verbose is required for stream-json mode.
|
||||
const args = [
|
||||
'-p',
|
||||
'--model', model,
|
||||
'--output-format', 'stream-json',
|
||||
'--verbose',
|
||||
'--dangerously-skip-permissions',
|
||||
@@ -151,8 +161,10 @@ export async function runSkillTest(options: {
|
||||
'--allowed-tools', ...allowedTools,
|
||||
];
|
||||
|
||||
// Write prompt to a temp file and pipe it via shell to avoid stdin buffering issues
|
||||
const promptFile = path.join(workingDirectory, '.prompt-tmp');
|
||||
// Write prompt to a temp file OUTSIDE workingDirectory to avoid race conditions
|
||||
// where afterAll cleanup deletes the dir before cat reads the file (especially
|
||||
// with --concurrent --retry). Using os.tmpdir() + unique suffix keeps it stable.
|
||||
const promptFile = path.join(os.tmpdir(), `.prompt-${process.pid}-${Date.now()}-${Math.random().toString(36).slice(2)}`);
|
||||
fs.writeFileSync(promptFile, prompt);
|
||||
|
||||
// Isolate telemetry: E2E tests use a temp state dir so they don't pollute
|
||||
@@ -181,6 +193,9 @@ export async function runSkillTest(options: {
|
||||
const collectedLines: string[] = [];
|
||||
let liveTurnCount = 0;
|
||||
let liveToolCount = 0;
|
||||
let firstResponseMs = 0;
|
||||
let lastToolTime = 0;
|
||||
let maxInterTurnMs = 0;
|
||||
const stderrPromise = new Response(proc.stderr).text();
|
||||
|
||||
const reader = proc.stdout.getReader();
|
||||
@@ -207,7 +222,15 @@ export async function runSkillTest(options: {
|
||||
for (const item of content) {
|
||||
if (item.type === 'tool_use') {
|
||||
liveToolCount++;
|
||||
const elapsed = Math.round((Date.now() - startTime) / 1000);
|
||||
const now = Date.now();
|
||||
const elapsed = Math.round((now - startTime) / 1000);
|
||||
// Track timing telemetry
|
||||
if (firstResponseMs === 0) firstResponseMs = now - startTime;
|
||||
if (lastToolTime > 0) {
|
||||
const interTurn = now - lastToolTime;
|
||||
if (interTurn > maxInterTurnMs) maxInterTurnMs = interTurn;
|
||||
}
|
||||
lastToolTime = now;
|
||||
const progressLine = ` [${elapsed}s] turn ${liveTurnCount} tool #${liveToolCount}: ${item.name}(${truncate(JSON.stringify(item.input || {}), 80)})\n`;
|
||||
process.stderr.write(progressLine);
|
||||
|
||||
@@ -336,5 +359,5 @@ export async function runSkillTest(options: {
|
||||
turnsUsed,
|
||||
};
|
||||
|
||||
return { toolCalls, browseErrors, exitReason, duration, output: resultLine?.result || '', costEstimate, transcript };
|
||||
return { toolCalls, browseErrors, exitReason, duration, output: resultLine?.result || '', costEstimate, transcript, model, firstResponseMs, maxInterTurnMs };
|
||||
}
|
||||
|
||||
@@ -40,7 +40,8 @@ export const E2E_TOUCHFILES: Record<string, string[]> = {
|
||||
'skillmd-setup-discovery': ['SKILL.md', 'SKILL.md.tmpl'],
|
||||
'skillmd-no-local-binary': ['SKILL.md', 'SKILL.md.tmpl'],
|
||||
'skillmd-outside-git': ['SKILL.md', 'SKILL.md.tmpl'],
|
||||
'contributor-mode': ['SKILL.md', 'SKILL.md.tmpl'],
|
||||
|
||||
'contributor-mode': ['SKILL.md.tmpl', 'scripts/gen-skill-docs.ts'],
|
||||
'session-awareness': ['SKILL.md', 'SKILL.md.tmpl'],
|
||||
|
||||
// QA
|
||||
@@ -50,6 +51,7 @@ export const E2E_TOUCHFILES: Record<string, string[]> = {
|
||||
'qa-b8-checkout': ['qa/**', 'browse/src/**', 'browse/test/fixtures/qa-eval-checkout.html', 'test/fixtures/qa-eval-checkout-ground-truth.json'],
|
||||
'qa-only-no-fix': ['qa-only/**', 'qa/templates/**'],
|
||||
'qa-fix-loop': ['qa/**', 'browse/src/**'],
|
||||
'qa-bootstrap': ['qa/**', 'ship/**'],
|
||||
|
||||
// Review
|
||||
'review-sql-injection': ['review/**', 'test/fixtures/review-eval-vuln.rb'],
|
||||
@@ -57,14 +59,22 @@ export const E2E_TOUCHFILES: Record<string, string[]> = {
|
||||
'review-base-branch': ['review/**'],
|
||||
'review-design-lite': ['review/**', 'test/fixtures/review-eval-design-slop.*'],
|
||||
|
||||
// Office Hours
|
||||
'office-hours-spec-review': ['office-hours/**', 'scripts/gen-skill-docs.ts'],
|
||||
|
||||
// Plan reviews
|
||||
'plan-ceo-review': ['plan-ceo-review/**'],
|
||||
'plan-ceo-review-selective': ['plan-ceo-review/**'],
|
||||
'plan-ceo-review-benefits': ['plan-ceo-review/**', 'scripts/gen-skill-docs.ts'],
|
||||
'plan-eng-review': ['plan-eng-review/**'],
|
||||
'plan-eng-review-artifact': ['plan-eng-review/**'],
|
||||
|
||||
// Ship
|
||||
'ship-base-branch': ['ship/**'],
|
||||
'ship-base-branch': ['ship/**'],
|
||||
'ship-local-workflow': ['ship/**', 'scripts/gen-skill-docs.ts'],
|
||||
|
||||
// Setup browser cookies
|
||||
'setup-cookies-detect': ['setup-browser-cookies/**'],
|
||||
|
||||
// Retro
|
||||
'retro': ['retro/**'],
|
||||
@@ -80,17 +90,19 @@ export const E2E_TOUCHFILES: Record<string, string[]> = {
|
||||
'codex-discover-skill': ['codex/**', '.agents/skills/**', 'test/helpers/codex-session-runner.ts'],
|
||||
'codex-review-findings': ['review/**', '.agents/skills/gstack-review/**', 'codex/**', 'test/helpers/codex-session-runner.ts'],
|
||||
|
||||
// QA bootstrap
|
||||
'qa-bootstrap': ['qa/**', 'browse/src/**', 'ship/**'],
|
||||
// Gemini E2E (tests skills via Gemini CLI)
|
||||
'gemini-discover-skill': ['.agents/skills/**', 'test/helpers/gemini-session-runner.ts'],
|
||||
'gemini-review-findings': ['review/**', '.agents/skills/gstack-review/**', 'test/helpers/gemini-session-runner.ts'],
|
||||
|
||||
|
||||
// Ship coverage audit
|
||||
'ship-coverage-audit': ['ship/**'],
|
||||
|
||||
// Design
|
||||
'design-consultation-core': ['design-consultation/**'],
|
||||
'design-consultation-research': ['design-consultation/**'],
|
||||
'design-consultation-existing': ['design-consultation/**'],
|
||||
'design-consultation-preview': ['design-consultation/**'],
|
||||
'design-consultation-core': ['design-consultation/**'],
|
||||
'design-consultation-existing': ['design-consultation/**'],
|
||||
'design-consultation-research': ['design-consultation/**'],
|
||||
'design-consultation-preview': ['design-consultation/**'],
|
||||
'plan-design-review-plan-mode': ['plan-design-review/**'],
|
||||
'plan-design-review-no-ui-scope': ['plan-design-review/**'],
|
||||
'design-review-fix': ['design-review/**', 'browse/src/**'],
|
||||
@@ -98,6 +110,12 @@ export const E2E_TOUCHFILES: Record<string, string[]> = {
|
||||
// gstack-upgrade
|
||||
'gstack-upgrade-happy-path': ['gstack-upgrade/**'],
|
||||
|
||||
// Deploy skills
|
||||
'land-and-deploy-workflow': ['land-and-deploy/**', 'scripts/gen-skill-docs.ts'],
|
||||
'canary-workflow': ['canary/**', 'browse/src/**'],
|
||||
'benchmark-workflow': ['benchmark/**', 'browse/src/**'],
|
||||
'setup-deploy-workflow': ['setup-deploy/**', 'scripts/gen-skill-docs.ts'],
|
||||
|
||||
// Skill routing — journey-stage tests (depend on ALL skill descriptions)
|
||||
'journey-ideation': ['*/SKILL.md.tmpl', 'SKILL.md.tmpl', 'scripts/gen-skill-docs.ts'],
|
||||
'journey-plan-eng': ['*/SKILL.md.tmpl', 'SKILL.md.tmpl', 'scripts/gen-skill-docs.ts'],
|
||||
@@ -140,6 +158,16 @@ export const LLM_JUDGE_TOUCHFILES: Record<string, string[]> = {
|
||||
'design-review/SKILL.md fix loop': ['design-review/SKILL.md', 'design-review/SKILL.md.tmpl'],
|
||||
'design-consultation/SKILL.md research': ['design-consultation/SKILL.md', 'design-consultation/SKILL.md.tmpl'],
|
||||
|
||||
// Office Hours
|
||||
'office-hours/SKILL.md spec review': ['office-hours/SKILL.md', 'office-hours/SKILL.md.tmpl', 'scripts/gen-skill-docs.ts'],
|
||||
'office-hours/SKILL.md design sketch': ['office-hours/SKILL.md', 'office-hours/SKILL.md.tmpl', 'scripts/gen-skill-docs.ts'],
|
||||
|
||||
// Deploy skills
|
||||
'land-and-deploy/SKILL.md workflow': ['land-and-deploy/SKILL.md', 'land-and-deploy/SKILL.md.tmpl'],
|
||||
'canary/SKILL.md monitoring loop': ['canary/SKILL.md', 'canary/SKILL.md.tmpl'],
|
||||
'benchmark/SKILL.md perf collection': ['benchmark/SKILL.md', 'benchmark/SKILL.md.tmpl'],
|
||||
'setup-deploy/SKILL.md platform setup': ['setup-deploy/SKILL.md', 'setup-deploy/SKILL.md.tmpl'],
|
||||
|
||||
// Other skills
|
||||
'retro/SKILL.md instructions': ['retro/SKILL.md', 'retro/SKILL.md.tmpl'],
|
||||
'qa-only/SKILL.md workflow': ['qa-only/SKILL.md', 'qa-only/SKILL.md.tmpl'],
|
||||
@@ -152,6 +180,7 @@ export const LLM_JUDGE_TOUCHFILES: Record<string, string[]> = {
|
||||
export const GLOBAL_TOUCHFILES = [
|
||||
'test/helpers/session-runner.ts',
|
||||
'test/helpers/codex-session-runner.ts',
|
||||
'test/helpers/gemini-session-runner.ts',
|
||||
'test/helpers/eval-store.ts',
|
||||
'test/helpers/llm-judge.ts',
|
||||
'scripts/gen-skill-docs.ts',
|
||||
|
||||
Reference in New Issue
Block a user