mirror of
https://github.com/KeygraphHQ/shannon.git
synced 2026-05-16 22:33:32 +02:00
51e621d0d5
* refactor: modularize claude-executor and extract shared utilities
- Extract message handling into src/ai/message-handlers.ts with pure functions
- Extract output formatting into src/ai/output-formatters.ts
- Extract progress management into src/ai/progress-manager.ts
- Add audit-logger.ts with Null Object pattern for optional logging
- Add shared utilities: formatting.ts, file-io.ts, functional.ts
- Consolidate getPromptNameForAgent into src/types/agents.ts
* feat: add Claude Code custom commands for debug and review
* feat: add Temporal integration foundation (phase 1-2)
- Add Temporal SDK dependencies (@temporalio/client, worker, workflow, activity)
- Add shared types for pipeline state, metrics, and progress queries
- Add classifyErrorForTemporal() for retry behavior classification
- Add docker-compose for Temporal server with SQLite persistence
* feat: add Temporal activities for agent execution (phase 3)
- Add activities.ts with heartbeat loop, git checkpoint/rollback, and error classification
- Export runClaudePrompt, validateAgentOutput, ClaudePromptResult for Temporal use
- Track attempt number via Temporal Context for accurate audit logging
- Rollback git workspace before retry to ensure clean state
* feat: add Temporal workflow for 5-phase pipeline orchestration (phase 4)
* feat: add Temporal worker, client, and query tools (phase 5)
- Add worker.ts with workflow bundling and graceful shutdown
- Add client.ts CLI to start pipelines with progress polling
- Add query.ts CLI to inspect running workflow state
- Fix buffer overflow by truncating error messages and stack traces
- Skip git operations gracefully on non-git repositories
- Add kill.sh/start.sh dev scripts and Dockerfile.worker
* feat: fix Docker worker container setup
- Install uv instead of deprecated uvx package
- Add mcp-server and configs directories to container
- Mount target repo dynamically via TARGET_REPO env variable
* fix: add report assembly step to Temporal workflow
- Add assembleReportActivity to concatenate exploitation evidence files before report agent runs
- Call assembleFinalReport in workflow Phase 5 before runReportAgent
- Ensure deliverables directory exists before writing final report
- Simplify pipeline-testing report prompt to just prepend header
* refactor: consolidate Docker setup to root docker-compose.yml
* feat: improve Temporal client UX and env handling
- Change default to fire-and-forget (--wait flag to opt-in)
- Add splash screen and improve console output formatting
- Add .env to gitignore, remove from dockerignore for container access
- Add Taskfile for common development commands
* refactor: simplify session ID handling and improve Taskfile options
- Include hostname in workflow ID for better audit log organization
- Extract sanitizeHostname utility to audit/utils.ts for reuse
- Remove unused generateSessionLogPath and buildLogFilePath functions
- Simplify Taskfile with CONFIG/OUTPUT/CLEAN named parameters
* chore: add .env.example and simplify .gitignore
* docs: update README and CLAUDE.md for Temporal workflow usage
- Replace Docker CLI instructions with Task-based commands
- Add monitoring/stopping sections and workflow examples
- Document Temporal orchestration layer and troubleshooting
- Simplify file structure to key files overview
* refactor: replace Taskfile with bash CLI script
- Add shannon bash script with start/logs/query/stop/help commands
- Remove Taskfile.yml dependency (no longer requires Task installation)
- Update README.md and CLAUDE.md to use ./shannon commands
- Update client.ts output to show ./shannon commands
* docs: fix deliverable filename in README
* refactor: remove direct CLI and .shannon-store.json in favor of Temporal
- Delete src/shannon.ts direct CLI entry point (Temporal is now the only mode)
- Remove .shannon-store.json session lock (Temporal handles workflow deduplication)
- Remove broken scripts/export-metrics.js (imported non-existent function)
- Update package.json to remove main, start script, and bin entry
- Clean up CLAUDE.md and debug.md to remove obsolete references
* chore: remove licensing comments from prompt files to prevent leaking into actual prompts
* fix: resolve parallel workflow race conditions and retry logic bugs
- Fix save_deliverable race condition using closure pattern instead of global variable
- Fix error classification order so OutputValidationError matches before generic validation
- Fix ApplicationFailure re-classification bug by checking instanceof before re-throwing
- Add per-error-type retry limits (3 for output validation, 50 for billing)
- Add fast retry intervals for pipeline testing mode (10s vs 5min)
- Increase worker concurrent activities to 25 for parallel workflows
* refactor: pipeline vuln→exploit workflow for parallel execution
- Replace sync barrier between vuln/exploit phases with independent pipelines
- Each vuln type runs: vuln agent → queue check → conditional exploit
- Add checkExploitationQueue activity to skip exploits when no vulns found
- Use Promise.allSettled for graceful failure handling across pipelines
- Add PipelineSummary type for aggregated cost/duration/turns metrics
* fix: re-throw retryable errors in checkExploitationQueue
* fix: detect and retry on Claude Code spending cap errors
- Add spending cap pattern detection in detectApiError() with retryable error
- Add matching patterns to classifyErrorForTemporal() for proper Temporal retry
- Add defense-in-depth safeguard in runClaudePrompt() for $0 cost / low turn detection
- Add final sanity check in activities before declaring success
* fix: increase heartbeat timeout to prevent false worker-dead detection
Original 30s timeout was from POC spec assuming <5min activities. With
hour-long activities and multiple concurrent workflows sharing one worker,
resource contention causes event loop stalls exceeding 30s, triggering
false heartbeat timeouts. Increased to 10min (prod) and 5min (testing).
* fix: temporal db init
* fix: persist home dir
* feat: add per-workflow unified logging with ./shannon logs ID=<workflow-id>
- Add WorkflowLogger class for human-readable, per-workflow log files
- Create workflow.log in audit-logs/{workflowId}/ with phase, agent, tool, and LLM events
- Update ./shannon logs to require ID param and tail specific workflow log
- Add phase transition logging at workflow boundaries
- Include workflow completion summary with agent breakdown (duration, cost)
- Mount audit-logs volume in docker-compose for host access
---------
Co-authored-by: ezl-keygraph <ezhil@keygraph.io>
326 lines
9.4 KiB
TypeScript
326 lines
9.4 KiB
TypeScript
// Copyright (C) 2025 Keygraph, Inc.
|
|
//
|
|
// This program is free software: you can redistribute it and/or modify
|
|
// it under the terms of the GNU Affero General Public License version 3
|
|
// as published by the Free Software Foundation.
|
|
|
|
import { fs, path } from 'zx';
|
|
import { PentestError } from './error-handling.js';
|
|
import { asyncPipe } from './utils/functional.js';
|
|
|
|
export type VulnType = 'injection' | 'xss' | 'auth' | 'ssrf' | 'authz';
|
|
|
|
interface VulnTypeConfigItem {
|
|
deliverable: string;
|
|
queue: string;
|
|
}
|
|
|
|
type VulnTypeConfig = Record<VulnType, VulnTypeConfigItem>;
|
|
|
|
type ErrorMessageResolver = string | ((existence: FileExistence) => string);
|
|
|
|
interface ValidationRule {
|
|
predicate: (existence: FileExistence) => boolean;
|
|
errorMessage: ErrorMessageResolver;
|
|
retryable: boolean;
|
|
}
|
|
|
|
interface FileExistence {
|
|
deliverableExists: boolean;
|
|
queueExists: boolean;
|
|
}
|
|
|
|
interface PathsBase {
|
|
vulnType: VulnType;
|
|
deliverable: string;
|
|
queue: string;
|
|
sourceDir: string;
|
|
}
|
|
|
|
interface PathsWithExistence extends PathsBase {
|
|
existence: FileExistence;
|
|
}
|
|
|
|
interface PathsWithQueue extends PathsWithExistence {
|
|
queueData: QueueData;
|
|
}
|
|
|
|
interface PathsWithError {
|
|
error: PentestError;
|
|
}
|
|
|
|
interface QueueData {
|
|
vulnerabilities: unknown[];
|
|
[key: string]: unknown;
|
|
}
|
|
|
|
interface QueueValidationResult {
|
|
valid: boolean;
|
|
data: QueueData | null;
|
|
error: string | null;
|
|
}
|
|
|
|
export interface ExploitationDecision {
|
|
shouldExploit: boolean;
|
|
shouldRetry: boolean;
|
|
vulnerabilityCount: number;
|
|
vulnType: VulnType;
|
|
}
|
|
|
|
export interface SafeValidationResult {
|
|
success: boolean;
|
|
data?: ExploitationDecision;
|
|
error?: PentestError;
|
|
}
|
|
|
|
// Vulnerability type configuration as immutable data
|
|
const VULN_TYPE_CONFIG: VulnTypeConfig = Object.freeze({
|
|
injection: Object.freeze({
|
|
deliverable: 'injection_analysis_deliverable.md',
|
|
queue: 'injection_exploitation_queue.json',
|
|
}),
|
|
xss: Object.freeze({
|
|
deliverable: 'xss_analysis_deliverable.md',
|
|
queue: 'xss_exploitation_queue.json',
|
|
}),
|
|
auth: Object.freeze({
|
|
deliverable: 'auth_analysis_deliverable.md',
|
|
queue: 'auth_exploitation_queue.json',
|
|
}),
|
|
ssrf: Object.freeze({
|
|
deliverable: 'ssrf_analysis_deliverable.md',
|
|
queue: 'ssrf_exploitation_queue.json',
|
|
}),
|
|
authz: Object.freeze({
|
|
deliverable: 'authz_analysis_deliverable.md',
|
|
queue: 'authz_exploitation_queue.json',
|
|
}),
|
|
}) as VulnTypeConfig;
|
|
|
|
// Pure function to create validation rule
|
|
function createValidationRule(
|
|
predicate: (existence: FileExistence) => boolean,
|
|
errorMessage: ErrorMessageResolver,
|
|
retryable: boolean = true
|
|
): ValidationRule {
|
|
return Object.freeze({ predicate, errorMessage, retryable });
|
|
}
|
|
|
|
// Symmetric deliverable rules: queue and deliverable must exist together (prevents partial analysis from triggering exploitation)
|
|
const fileExistenceRules: readonly ValidationRule[] = Object.freeze([
|
|
createValidationRule(
|
|
({ deliverableExists, queueExists }) => deliverableExists && queueExists,
|
|
getExistenceErrorMessage
|
|
),
|
|
]);
|
|
|
|
// Generate appropriate error message based on which files are missing
|
|
function getExistenceErrorMessage(existence: FileExistence): string {
|
|
const { deliverableExists, queueExists } = existence;
|
|
|
|
if (!deliverableExists && !queueExists) {
|
|
return 'Analysis failed: Neither deliverable nor queue file exists. Analysis agent must create both files.';
|
|
}
|
|
if (!queueExists) {
|
|
return 'Analysis incomplete: Deliverable exists but queue file missing. Analysis agent must create both files.';
|
|
}
|
|
return 'Analysis incomplete: Queue exists but deliverable file missing. Analysis agent must create both files.';
|
|
}
|
|
|
|
// Pure function to create file paths
|
|
const createPaths = (
|
|
vulnType: VulnType,
|
|
sourceDir: string
|
|
): PathsBase | PathsWithError => {
|
|
const config = VULN_TYPE_CONFIG[vulnType];
|
|
if (!config) {
|
|
return {
|
|
error: new PentestError(
|
|
`Unknown vulnerability type: ${vulnType}`,
|
|
'validation',
|
|
false,
|
|
{ vulnType }
|
|
),
|
|
};
|
|
}
|
|
|
|
return Object.freeze({
|
|
vulnType,
|
|
deliverable: path.join(sourceDir, 'deliverables', config.deliverable),
|
|
queue: path.join(sourceDir, 'deliverables', config.queue),
|
|
sourceDir,
|
|
});
|
|
};
|
|
|
|
// Pure function to check file existence
|
|
const checkFileExistence = async (
|
|
paths: PathsBase | PathsWithError
|
|
): Promise<PathsWithExistence | PathsWithError> => {
|
|
if ('error' in paths) return paths;
|
|
|
|
const [deliverableExists, queueExists] = await Promise.all([
|
|
fs.pathExists(paths.deliverable),
|
|
fs.pathExists(paths.queue),
|
|
]);
|
|
|
|
return Object.freeze({
|
|
...paths,
|
|
existence: Object.freeze({ deliverableExists, queueExists }),
|
|
});
|
|
};
|
|
|
|
// Validates deliverable/queue symmetry - both must exist or neither
|
|
const validateExistenceRules = (
|
|
pathsWithExistence: PathsWithExistence | PathsWithError
|
|
): PathsWithExistence | PathsWithError => {
|
|
if ('error' in pathsWithExistence) return pathsWithExistence;
|
|
|
|
const { existence, vulnType } = pathsWithExistence;
|
|
|
|
// Find the first rule that fails
|
|
const failedRule = fileExistenceRules.find((rule) => !rule.predicate(existence));
|
|
|
|
if (failedRule) {
|
|
const message =
|
|
typeof failedRule.errorMessage === 'function'
|
|
? failedRule.errorMessage(existence)
|
|
: failedRule.errorMessage;
|
|
|
|
return {
|
|
error: new PentestError(
|
|
`${message} (${vulnType})`,
|
|
'validation',
|
|
failedRule.retryable,
|
|
{
|
|
vulnType,
|
|
deliverablePath: pathsWithExistence.deliverable,
|
|
queuePath: pathsWithExistence.queue,
|
|
existence,
|
|
}
|
|
),
|
|
};
|
|
}
|
|
|
|
return pathsWithExistence;
|
|
};
|
|
|
|
// Pure function to validate queue structure
|
|
const validateQueueStructure = (content: string): QueueValidationResult => {
|
|
try {
|
|
const parsed = JSON.parse(content) as unknown;
|
|
const isValid =
|
|
typeof parsed === 'object' &&
|
|
parsed !== null &&
|
|
'vulnerabilities' in parsed &&
|
|
Array.isArray((parsed as QueueData).vulnerabilities);
|
|
|
|
return Object.freeze({
|
|
valid: isValid,
|
|
data: isValid ? (parsed as QueueData) : null,
|
|
error: null,
|
|
});
|
|
} catch (parseError) {
|
|
return Object.freeze({
|
|
valid: false,
|
|
data: null,
|
|
error: parseError instanceof Error ? parseError.message : String(parseError),
|
|
});
|
|
}
|
|
};
|
|
|
|
// Queue parse failures are retryable - agent can fix malformed JSON on retry
|
|
const validateQueueContent = async (
|
|
pathsWithExistence: PathsWithExistence | PathsWithError
|
|
): Promise<PathsWithQueue | PathsWithError> => {
|
|
if ('error' in pathsWithExistence) return pathsWithExistence;
|
|
|
|
try {
|
|
const queueContent = await fs.readFile(pathsWithExistence.queue, 'utf8');
|
|
const queueValidation = validateQueueStructure(queueContent);
|
|
|
|
if (!queueValidation.valid) {
|
|
// Rule 6: Both exist, queue invalid
|
|
return {
|
|
error: new PentestError(
|
|
queueValidation.error
|
|
? `Queue validation failed for ${pathsWithExistence.vulnType}: Invalid JSON structure. Analysis agent must fix queue format.`
|
|
: `Queue validation failed for ${pathsWithExistence.vulnType}: Missing or invalid 'vulnerabilities' array. Analysis agent must fix queue structure.`,
|
|
'validation',
|
|
true, // retryable
|
|
{
|
|
vulnType: pathsWithExistence.vulnType,
|
|
queuePath: pathsWithExistence.queue,
|
|
originalError: queueValidation.error,
|
|
queueStructure: queueValidation.data ? Object.keys(queueValidation.data) : [],
|
|
}
|
|
),
|
|
};
|
|
}
|
|
|
|
return Object.freeze({
|
|
...pathsWithExistence,
|
|
queueData: queueValidation.data!,
|
|
});
|
|
} catch (readError) {
|
|
return {
|
|
error: new PentestError(
|
|
`Failed to read queue file for ${pathsWithExistence.vulnType}: ${readError instanceof Error ? readError.message : String(readError)}`,
|
|
'filesystem',
|
|
false,
|
|
{
|
|
vulnType: pathsWithExistence.vulnType,
|
|
queuePath: pathsWithExistence.queue,
|
|
originalError: readError instanceof Error ? readError.message : String(readError),
|
|
}
|
|
),
|
|
};
|
|
}
|
|
};
|
|
|
|
// Final decision: skip if queue says no vulns, proceed if vulns found, error otherwise
|
|
const determineExploitationDecision = (
|
|
validatedData: PathsWithQueue | PathsWithError
|
|
): ExploitationDecision => {
|
|
if ('error' in validatedData) {
|
|
throw validatedData.error;
|
|
}
|
|
|
|
const hasVulnerabilities = validatedData.queueData.vulnerabilities.length > 0;
|
|
|
|
// Rule 4: Both exist, queue valid and populated
|
|
// Rule 5: Both exist, queue valid but empty
|
|
return Object.freeze({
|
|
shouldExploit: hasVulnerabilities,
|
|
shouldRetry: false,
|
|
vulnerabilityCount: validatedData.queueData.vulnerabilities.length,
|
|
vulnType: validatedData.vulnType,
|
|
});
|
|
};
|
|
|
|
// Main functional validation pipeline
|
|
export async function validateQueueAndDeliverable(
|
|
vulnType: VulnType,
|
|
sourceDir: string
|
|
): Promise<ExploitationDecision> {
|
|
return asyncPipe<ExploitationDecision>(
|
|
createPaths(vulnType, sourceDir),
|
|
checkFileExistence,
|
|
validateExistenceRules,
|
|
validateQueueContent,
|
|
determineExploitationDecision
|
|
);
|
|
}
|
|
|
|
// Pure function to safely validate (returns result instead of throwing)
|
|
export const safeValidateQueueAndDeliverable = async (
|
|
vulnType: VulnType,
|
|
sourceDir: string
|
|
): Promise<SafeValidationResult> => {
|
|
try {
|
|
const result = await validateQueueAndDeliverable(vulnType, sourceDir);
|
|
return { success: true, data: result };
|
|
} catch (error) {
|
|
return { success: false, error: error as PentestError };
|
|
}
|
|
};
|