mirror of
https://github.com/garrytan/gstack.git
synced 2026-05-06 21:46:40 +02:00
fix(security): IPv6 ULA blocking, cookie redaction, per-tab cancel, targeted token (#664)
Community PR #664 by @mr-k-man (security audit round 1, new parts only). - IPv6 ULA prefix blocking (fc00::/7) in url-validation.ts with false-positive guard for hostnames like fd.example.com - Cookie value redaction for tokens, API keys, JWTs in browse cookies command - Per-tab cancel files in killAgent() replacing broken global kill-signal - design/serve.ts: realpathSync upgrade prevents symlink bypass in /api/reload - extension: targeted getToken handler replaces token-in-health-broadcast - Supabase migration 003: column-level GRANT restricts anon UPDATE scope - Telemetry sync: upsert error logging - 10 new tests for IPv6, cookie redaction, DNS rebinding, path traversal Co-Authored-By: mr-k-man <mr-k-man@users.noreply.github.com> Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -13,6 +13,10 @@ import * as path from 'path';
|
||||
import { TEMP_DIR, isPathWithin } from './platform';
|
||||
import { inspectElement, formatInspectorResult, getModificationHistory } from './cdp-inspector';
|
||||
|
||||
// Redaction patterns for sensitive cookie/storage values — exported for test coverage
|
||||
export const SENSITIVE_COOKIE_NAME = /(^|[_.-])(token|secret|key|password|credential|auth|jwt|session|csrf|sid)($|[_.-])|api.?key/i;
|
||||
export const SENSITIVE_COOKIE_VALUE = /^(eyJ|sk-|sk_live_|sk_test_|pk_live_|pk_test_|rk_live_|sk-ant-|ghp_|gho_|github_pat_|xox[bpsa]-|AKIA[A-Z0-9]{16}|AIza|SG\.|Bearer\s|sbp_)/;
|
||||
|
||||
/** Detect await keyword, ignoring comments. Accepted risk: await in string literals triggers wrapping (harmless). */
|
||||
function hasAwait(code: string): boolean {
|
||||
const stripped = code.replace(/\/\/.*$/gm, '').replace(/\/\*[\s\S]*?\*\//g, '');
|
||||
@@ -300,7 +304,14 @@ export async function handleReadCommand(
|
||||
|
||||
case 'cookies': {
|
||||
const cookies = await page.context().cookies();
|
||||
return JSON.stringify(cookies, null, 2);
|
||||
// Redact cookie values that look like secrets (consistent with storage redaction)
|
||||
const redacted = cookies.map(c => {
|
||||
if (SENSITIVE_COOKIE_NAME.test(c.name) || SENSITIVE_COOKIE_VALUE.test(c.value)) {
|
||||
return { ...c, value: `[REDACTED — ${c.value.length} chars]` };
|
||||
}
|
||||
return c;
|
||||
});
|
||||
return JSON.stringify(redacted, null, 2);
|
||||
}
|
||||
|
||||
case 'storage': {
|
||||
|
||||
+13
-10
@@ -572,7 +572,7 @@ function spawnClaude(userMessage: string, extensionUrl?: string | null, forTabId
|
||||
// Agent status transitions happen when we receive agent_done/agent_error events.
|
||||
}
|
||||
|
||||
function killAgent(): void {
|
||||
function killAgent(targetTabId?: number | null): void {
|
||||
if (agentProcess) {
|
||||
try { agentProcess.kill('SIGTERM'); } catch (err: any) {
|
||||
console.warn('[browse] Failed to SIGTERM agent:', err.message);
|
||||
@@ -581,17 +581,18 @@ function killAgent(): void {
|
||||
console.warn('[browse] Failed to SIGKILL agent:', err.message);
|
||||
} }, 3000);
|
||||
}
|
||||
// Signal the sidebar-agent worker to cancel via a per-tab cancel file.
|
||||
// Using per-tab files prevents race conditions where one agent's cancel
|
||||
// signal is consumed by a different tab's agent in concurrent mode.
|
||||
// When targetTabId is provided, only that tab's agent is cancelled.
|
||||
const cancelDir = path.join(process.env.HOME || '/tmp', '.gstack');
|
||||
const tabId = targetTabId ?? agentTabId ?? 0;
|
||||
const cancelFile = path.join(cancelDir, `sidebar-agent-cancel-${tabId}`);
|
||||
try { fs.writeFileSync(cancelFile, Date.now().toString()); } catch {}
|
||||
agentProcess = null;
|
||||
agentStartTime = null;
|
||||
currentMessage = null;
|
||||
agentStatus = 'idle';
|
||||
|
||||
// Signal sidebar-agent.ts to kill its active claude subprocess.
|
||||
// sidebar-agent runs in a separate non-compiled Bun process (posix_spawn
|
||||
// limitation). It polls the kill-signal file and terminates on any write.
|
||||
const agentQueue = process.env.SIDEBAR_QUEUE_PATH || path.join(process.env.HOME || '/tmp', '.gstack', 'sidebar-agent-queue.jsonl');
|
||||
const killFile = path.join(path.dirname(agentQueue), 'sidebar-agent-kill');
|
||||
try { fs.writeFileSync(killFile, String(Date.now())); } catch {}
|
||||
}
|
||||
|
||||
// Agent health check — detect hung processes
|
||||
@@ -1371,7 +1372,8 @@ async function start() {
|
||||
if (!validateAuth(req)) {
|
||||
return new Response(JSON.stringify({ error: 'Unauthorized' }), { status: 401, headers: { 'Content-Type': 'application/json' } });
|
||||
}
|
||||
killAgent();
|
||||
const killBody = await req.json().catch(() => ({}));
|
||||
killAgent(killBody.tabId ?? null);
|
||||
addChatEntry({ ts: new Date().toISOString(), role: 'agent', type: 'agent_error', error: 'Killed by user' });
|
||||
// Process next in queue
|
||||
if (messageQueue.length > 0) {
|
||||
@@ -1386,7 +1388,8 @@ async function start() {
|
||||
if (!validateAuth(req)) {
|
||||
return new Response(JSON.stringify({ error: 'Unauthorized' }), { status: 401, headers: { 'Content-Type': 'application/json' } });
|
||||
}
|
||||
killAgent();
|
||||
const stopBody = await req.json().catch(() => ({}));
|
||||
killAgent(stopBody.tabId ?? null);
|
||||
addChatEntry({ ts: new Date().toISOString(), role: 'agent', type: 'agent_error', error: 'Stopped by user' });
|
||||
return new Response(JSON.stringify({ ok: true, queuedMessages: messageQueue.length }), {
|
||||
status: 200, headers: { 'Content-Type': 'application/json' },
|
||||
|
||||
@@ -20,12 +20,18 @@ const SERVER_URL = `http://127.0.0.1:${SERVER_PORT}`;
|
||||
const POLL_MS = 200; // 200ms poll — keeps time-to-first-token low
|
||||
const B = process.env.BROWSE_BIN || path.resolve(__dirname, '../../.claude/skills/gstack/browse/dist/browse');
|
||||
|
||||
const CANCEL_DIR = path.join(process.env.HOME || '/tmp', '.gstack');
|
||||
function cancelFileForTab(tabId: number): string {
|
||||
return path.join(CANCEL_DIR, `sidebar-agent-cancel-${tabId}`);
|
||||
}
|
||||
|
||||
let lastLine = 0;
|
||||
let authToken: string | null = null;
|
||||
// Per-tab processing — each tab can run its own agent concurrently
|
||||
const processingTabs = new Set<number>();
|
||||
// Active claude subprocesses — keyed by tabId for targeted kill
|
||||
const activeProcs = new Map<number, ReturnType<typeof spawn>>();
|
||||
let activeProc: ReturnType<typeof spawn> | null = null;
|
||||
// Kill-file timestamp last seen — avoids double-kill on same write
|
||||
let lastKillTs = 0;
|
||||
|
||||
@@ -250,6 +256,10 @@ async function askClaude(queueEntry: any): Promise<void> {
|
||||
effectiveCwd = process.cwd();
|
||||
}
|
||||
|
||||
// Clear any stale cancel signal for this tab before starting
|
||||
const cancelFile = cancelFileForTab(tid);
|
||||
try { fs.unlinkSync(cancelFile); } catch {}
|
||||
|
||||
const proc = spawn('claude', claudeArgs, {
|
||||
stdio: ['pipe', 'pipe', 'pipe'],
|
||||
cwd: effectiveCwd,
|
||||
@@ -270,9 +280,23 @@ async function askClaude(queueEntry: any): Promise<void> {
|
||||
|
||||
// Track active procs so kill-file polling can terminate them
|
||||
activeProcs.set(tid, proc);
|
||||
activeProc = proc;
|
||||
|
||||
proc.stdin.end();
|
||||
|
||||
// Poll for per-tab cancel signal from server's killAgent()
|
||||
const cancelCheck = setInterval(() => {
|
||||
try {
|
||||
if (fs.existsSync(cancelFile)) {
|
||||
console.log(`[sidebar-agent] Cancel signal received for tab ${tid} — killing claude subprocess`);
|
||||
try { proc.kill('SIGTERM'); } catch {}
|
||||
setTimeout(() => { try { proc.kill('SIGKILL'); } catch {} }, 3000);
|
||||
fs.unlinkSync(cancelFile);
|
||||
clearInterval(cancelCheck);
|
||||
}
|
||||
} catch {}
|
||||
}, 500);
|
||||
|
||||
let buffer = '';
|
||||
|
||||
proc.stdout.on('data', (data: Buffer) => {
|
||||
@@ -293,6 +317,8 @@ async function askClaude(queueEntry: any): Promise<void> {
|
||||
});
|
||||
|
||||
proc.on('close', (code) => {
|
||||
clearInterval(cancelCheck);
|
||||
activeProc = null;
|
||||
activeProcs.delete(tid);
|
||||
if (buffer.trim()) {
|
||||
try { handleStreamEvent(JSON.parse(buffer), tid); } catch (err: any) {
|
||||
@@ -310,6 +336,8 @@ async function askClaude(queueEntry: any): Promise<void> {
|
||||
});
|
||||
|
||||
proc.on('error', (err) => {
|
||||
clearInterval(cancelCheck);
|
||||
activeProc = null;
|
||||
const errorMsg = stderrBuffer.trim()
|
||||
? `${err.message}\nstderr: ${stderrBuffer.trim().slice(-500)}`
|
||||
: err.message;
|
||||
|
||||
@@ -3,15 +3,34 @@
|
||||
* Localhost and private IPs are allowed (primary use case: QA testing local dev servers).
|
||||
*/
|
||||
|
||||
const BLOCKED_METADATA_HOSTS = new Set([
|
||||
'169.254.169.254', // AWS/GCP/Azure instance metadata (IPv4 link-local)
|
||||
export const BLOCKED_METADATA_HOSTS = new Set([
|
||||
'169.254.169.254', // AWS/GCP/Azure instance metadata
|
||||
'fe80::1', // IPv6 link-local — common metadata endpoint alias
|
||||
'fd00::', // IPv6 unique local (metadata in some cloud setups)
|
||||
'::ffff:169.254.169.254', // IPv4-mapped IPv6 form of the metadata IP
|
||||
'metadata.google.internal', // GCP metadata
|
||||
'metadata.azure.internal', // Azure IMDS
|
||||
]);
|
||||
|
||||
/**
|
||||
* IPv6 prefixes to block (CIDR-style). Any address starting with these
|
||||
* hex prefixes is rejected. Covers the full ULA range (fc00::/7 = fc00:: and fd00::).
|
||||
*/
|
||||
const BLOCKED_IPV6_PREFIXES = ['fc', 'fd'];
|
||||
|
||||
/**
|
||||
* Check if an IPv6 address falls within a blocked prefix range.
|
||||
* Handles the full ULA range (fc00::/7), not just the exact literal fd00::.
|
||||
* Only matches actual IPv6 addresses (must contain ':'), not hostnames
|
||||
* like fd.example.com or fcustomer.com.
|
||||
*/
|
||||
function isBlockedIpv6(addr: string): boolean {
|
||||
const normalized = addr.toLowerCase().replace(/^\[|\]$/g, '');
|
||||
// Must contain a colon to be an IPv6 address — avoids false positives on
|
||||
// hostnames like fd.example.com or fcustomer.com
|
||||
if (!normalized.includes(':')) return false;
|
||||
return BLOCKED_IPV6_PREFIXES.some(prefix => normalized.startsWith(prefix));
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalize hostname for blocklist comparison:
|
||||
* - Strip trailing dot (DNS fully-qualified notation)
|
||||
@@ -37,7 +56,7 @@ function isMetadataIp(hostname: string): boolean {
|
||||
try {
|
||||
const probe = new URL(`http://${hostname}`);
|
||||
const normalized = probe.hostname;
|
||||
if (BLOCKED_METADATA_HOSTS.has(normalized)) return true;
|
||||
if (BLOCKED_METADATA_HOSTS.has(normalized) || isBlockedIpv6(normalized)) return true;
|
||||
// Also check after stripping trailing dot
|
||||
if (normalized.endsWith('.') && BLOCKED_METADATA_HOSTS.has(normalized.slice(0, -1))) return true;
|
||||
} catch {
|
||||
@@ -69,7 +88,7 @@ async function resolvesToBlockedIp(hostname: string): Promise<boolean> {
|
||||
const v6Check = resolve6(hostname).then(
|
||||
(addresses) => addresses.some(addr => {
|
||||
const normalized = addr.toLowerCase();
|
||||
return BLOCKED_METADATA_HOSTS.has(normalized) ||
|
||||
return BLOCKED_METADATA_HOSTS.has(normalized) || isBlockedIpv6(normalized) ||
|
||||
// fe80::/10 is link-local — always block (covers all fe80:: addresses)
|
||||
normalized.startsWith('fe80:');
|
||||
}),
|
||||
@@ -100,7 +119,7 @@ export async function validateNavigationUrl(url: string): Promise<void> {
|
||||
|
||||
const hostname = normalizeHostname(parsed.hostname.toLowerCase());
|
||||
|
||||
if (BLOCKED_METADATA_HOSTS.has(hostname) || isMetadataIp(hostname)) {
|
||||
if (BLOCKED_METADATA_HOSTS.has(hostname) || isMetadataIp(hostname) || isBlockedIpv6(hostname)) {
|
||||
throw new Error(
|
||||
`Blocked: ${parsed.hostname} is a cloud metadata endpoint. Access is denied for security.`
|
||||
);
|
||||
|
||||
Reference in New Issue
Block a user