mirror of
https://github.com/FuzzingLabs/fuzzforge_ai.git
synced 2026-05-16 18:53:26 +02:00
Initial commit
This commit is contained in:
@@ -0,0 +1,19 @@
|
||||
"""
|
||||
FuzzForge CLI - Command-line interface for FuzzForge security testing platform.
|
||||
|
||||
A comprehensive CLI for managing workflows, runs, findings, and real-time monitoring
|
||||
with local project management and persistent storage.
|
||||
"""
|
||||
# Copyright (c) 2025 FuzzingLabs
|
||||
#
|
||||
# Licensed under the Business Source License 1.1 (BSL). See the LICENSE file
|
||||
# at the root of this repository for details.
|
||||
#
|
||||
# After the Change Date (four years from publication), this version of the
|
||||
# Licensed Work will be made available under the Apache License, Version 2.0.
|
||||
# See the LICENSE-APACHE file or http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Additional attribution and requirements are provided in the NOTICE file.
|
||||
|
||||
|
||||
__version__ = "0.6.0"
|
||||
@@ -0,0 +1,311 @@
|
||||
"""
|
||||
API response validation and graceful degradation utilities.
|
||||
"""
|
||||
# Copyright (c) 2025 FuzzingLabs
|
||||
#
|
||||
# Licensed under the Business Source License 1.1 (BSL). See the LICENSE file
|
||||
# at the root of this repository for details.
|
||||
#
|
||||
# After the Change Date (four years from publication), this version of the
|
||||
# Licensed Work will be made available under the Apache License, Version 2.0.
|
||||
# See the LICENSE-APACHE file or http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Additional attribution and requirements are provided in the NOTICE file.
|
||||
|
||||
|
||||
import logging
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
from pydantic import BaseModel, ValidationError as PydanticValidationError
|
||||
|
||||
from .exceptions import ValidationError, APIConnectionError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class WorkflowMetadata(BaseModel):
|
||||
"""Expected workflow metadata structure"""
|
||||
name: str
|
||||
version: str
|
||||
author: Optional[str] = None
|
||||
description: Optional[str] = None
|
||||
parameters: Dict[str, Any] = {}
|
||||
supported_volume_modes: List[str] = ["ro", "rw"]
|
||||
|
||||
|
||||
class RunStatus(BaseModel):
|
||||
"""Expected run status structure"""
|
||||
run_id: str
|
||||
workflow: str
|
||||
status: str
|
||||
created_at: str
|
||||
updated_at: str
|
||||
|
||||
@property
|
||||
def is_completed(self) -> bool:
|
||||
"""Check if run is in a completed state"""
|
||||
return self.status.lower() in ["completed", "success", "finished"]
|
||||
|
||||
@property
|
||||
def is_running(self) -> bool:
|
||||
"""Check if run is currently running"""
|
||||
return self.status.lower() in ["running", "in_progress", "active"]
|
||||
|
||||
@property
|
||||
def is_failed(self) -> bool:
|
||||
"""Check if run has failed"""
|
||||
return self.status.lower() in ["failed", "error", "cancelled"]
|
||||
|
||||
|
||||
class FindingsResponse(BaseModel):
|
||||
"""Expected findings response structure"""
|
||||
run_id: str
|
||||
sarif: Dict[str, Any]
|
||||
total_issues: Optional[int] = None
|
||||
|
||||
def model_post_init(self, __context: Any) -> None:
|
||||
"""Validate SARIF structure after initialization"""
|
||||
if not self.sarif.get("runs"):
|
||||
logger.warning(f"SARIF data for run {self.run_id} missing 'runs' section")
|
||||
elif not isinstance(self.sarif["runs"], list):
|
||||
logger.warning(f"SARIF 'runs' section is not a list for run {self.run_id}")
|
||||
|
||||
|
||||
def validate_api_response(response_data: Any, expected_model: type[BaseModel],
|
||||
operation: str = "API operation") -> BaseModel:
|
||||
"""
|
||||
Validate API response against expected Pydantic model.
|
||||
|
||||
Args:
|
||||
response_data: Raw response data from API
|
||||
expected_model: Pydantic model class to validate against
|
||||
operation: Description of the operation for error messages
|
||||
|
||||
Returns:
|
||||
Validated model instance
|
||||
|
||||
Raises:
|
||||
ValidationError: If validation fails
|
||||
"""
|
||||
try:
|
||||
return expected_model.model_validate(response_data)
|
||||
except PydanticValidationError as e:
|
||||
logger.error(f"API response validation failed for {operation}: {e}")
|
||||
raise ValidationError(
|
||||
f"API response for {operation}",
|
||||
str(response_data)[:200] + "..." if len(str(response_data)) > 200 else str(response_data),
|
||||
f"valid {expected_model.__name__} format"
|
||||
) from e
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error validating API response for {operation}: {e}")
|
||||
raise ValidationError(
|
||||
f"API response for {operation}",
|
||||
"invalid data",
|
||||
f"valid {expected_model.__name__} format"
|
||||
) from e
|
||||
|
||||
|
||||
def validate_sarif_structure(sarif_data: Dict[str, Any]) -> Dict[str, str]:
|
||||
"""
|
||||
Validate basic SARIF structure and return validation issues.
|
||||
|
||||
Args:
|
||||
sarif_data: SARIF data dictionary
|
||||
|
||||
Returns:
|
||||
Dictionary of validation issues found
|
||||
"""
|
||||
issues = {}
|
||||
|
||||
# Check basic SARIF structure
|
||||
if not isinstance(sarif_data, dict):
|
||||
issues["structure"] = "SARIF data is not a dictionary"
|
||||
return issues
|
||||
|
||||
if "runs" not in sarif_data:
|
||||
issues["runs"] = "Missing 'runs' section in SARIF data"
|
||||
elif not isinstance(sarif_data["runs"], list):
|
||||
issues["runs_type"] = "'runs' section is not a list"
|
||||
elif len(sarif_data["runs"]) == 0:
|
||||
issues["runs_empty"] = "'runs' section is empty"
|
||||
else:
|
||||
# Check first run structure
|
||||
run = sarif_data["runs"][0]
|
||||
if not isinstance(run, dict):
|
||||
issues["run_structure"] = "First run is not a dictionary"
|
||||
else:
|
||||
if "results" not in run:
|
||||
issues["results"] = "Missing 'results' section in run"
|
||||
elif not isinstance(run["results"], list):
|
||||
issues["results_type"] = "'results' section is not a list"
|
||||
|
||||
if "tool" not in run:
|
||||
issues["tool"] = "Missing 'tool' section in run"
|
||||
elif not isinstance(run["tool"], dict):
|
||||
issues["tool_type"] = "'tool' section is not a dictionary"
|
||||
|
||||
return issues
|
||||
|
||||
|
||||
def safe_extract_sarif_summary(sarif_data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""
|
||||
Safely extract summary information from SARIF data with fallbacks.
|
||||
|
||||
Args:
|
||||
sarif_data: SARIF data dictionary
|
||||
|
||||
Returns:
|
||||
Summary dictionary with safe defaults
|
||||
"""
|
||||
summary = {
|
||||
"total_issues": 0,
|
||||
"by_severity": {},
|
||||
"by_rule": {},
|
||||
"tools": [],
|
||||
"validation_issues": []
|
||||
}
|
||||
|
||||
# Validate structure first
|
||||
validation_issues = validate_sarif_structure(sarif_data)
|
||||
if validation_issues:
|
||||
summary["validation_issues"] = list(validation_issues.values())
|
||||
logger.warning(f"SARIF validation issues: {validation_issues}")
|
||||
|
||||
try:
|
||||
runs = sarif_data.get("runs", [])
|
||||
if not runs:
|
||||
return summary
|
||||
|
||||
run = runs[0]
|
||||
results = run.get("results", [])
|
||||
|
||||
summary["total_issues"] = len(results)
|
||||
|
||||
# Count by severity/level
|
||||
for result in results:
|
||||
try:
|
||||
level = result.get("level", "note")
|
||||
rule_id = result.get("ruleId", "unknown")
|
||||
|
||||
summary["by_severity"][level] = summary["by_severity"].get(level, 0) + 1
|
||||
summary["by_rule"][rule_id] = summary["by_rule"].get(rule_id, 0) + 1
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to process result: {e}")
|
||||
continue
|
||||
|
||||
# Extract tool information safely
|
||||
try:
|
||||
tool = run.get("tool", {})
|
||||
driver = tool.get("driver", {})
|
||||
if driver.get("name"):
|
||||
summary["tools"].append({
|
||||
"name": driver.get("name", "unknown"),
|
||||
"version": driver.get("version", "unknown"),
|
||||
"rules": len(driver.get("rules", []))
|
||||
})
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to extract tool information: {e}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to extract SARIF summary: {e}")
|
||||
summary["validation_issues"].append(f"Summary extraction failed: {e}")
|
||||
|
||||
return summary
|
||||
|
||||
|
||||
def validate_workflow_parameters(parameters: Dict[str, Any],
|
||||
workflow_schema: Dict[str, Any]) -> List[str]:
|
||||
"""
|
||||
Validate workflow parameters against schema with detailed error messages.
|
||||
|
||||
Args:
|
||||
parameters: Parameters to validate
|
||||
workflow_schema: JSON schema for the workflow
|
||||
|
||||
Returns:
|
||||
List of validation error messages
|
||||
"""
|
||||
errors = []
|
||||
|
||||
try:
|
||||
properties = workflow_schema.get("properties", {})
|
||||
required = set(workflow_schema.get("required", []))
|
||||
|
||||
# Check required parameters
|
||||
missing_required = required - set(parameters.keys())
|
||||
if missing_required:
|
||||
errors.append(f"Missing required parameters: {', '.join(missing_required)}")
|
||||
|
||||
# Validate individual parameters
|
||||
for param_name, param_value in parameters.items():
|
||||
if param_name not in properties:
|
||||
errors.append(f"Unknown parameter: {param_name}")
|
||||
continue
|
||||
|
||||
param_schema = properties[param_name]
|
||||
param_type = param_schema.get("type", "string")
|
||||
|
||||
# Type validation
|
||||
if param_type == "integer" and not isinstance(param_value, int):
|
||||
errors.append(f"Parameter '{param_name}' must be an integer")
|
||||
elif param_type == "number" and not isinstance(param_value, (int, float)):
|
||||
errors.append(f"Parameter '{param_name}' must be a number")
|
||||
elif param_type == "boolean" and not isinstance(param_value, bool):
|
||||
errors.append(f"Parameter '{param_name}' must be a boolean")
|
||||
elif param_type == "array" and not isinstance(param_value, list):
|
||||
errors.append(f"Parameter '{param_name}' must be an array")
|
||||
|
||||
# Range validation for numbers
|
||||
if param_type in ["integer", "number"] and isinstance(param_value, (int, float)):
|
||||
minimum = param_schema.get("minimum")
|
||||
maximum = param_schema.get("maximum")
|
||||
|
||||
if minimum is not None and param_value < minimum:
|
||||
errors.append(f"Parameter '{param_name}' must be >= {minimum}")
|
||||
if maximum is not None and param_value > maximum:
|
||||
errors.append(f"Parameter '{param_name}' must be <= {maximum}")
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Parameter validation failed: {e}")
|
||||
errors.append(f"Parameter validation error: {e}")
|
||||
|
||||
return errors
|
||||
|
||||
|
||||
def create_fallback_response(response_type: str, **kwargs) -> Dict[str, Any]:
|
||||
"""
|
||||
Create fallback responses when API calls fail.
|
||||
|
||||
Args:
|
||||
response_type: Type of response to create
|
||||
**kwargs: Additional data for the fallback
|
||||
|
||||
Returns:
|
||||
Fallback response dictionary
|
||||
"""
|
||||
fallbacks = {
|
||||
"workflow_list": {
|
||||
"workflows": [],
|
||||
"message": "Unable to fetch workflows from API"
|
||||
},
|
||||
"run_status": {
|
||||
"run_id": kwargs.get("run_id", "unknown"),
|
||||
"workflow": kwargs.get("workflow", "unknown"),
|
||||
"status": "unknown",
|
||||
"created_at": kwargs.get("created_at", "unknown"),
|
||||
"updated_at": kwargs.get("updated_at", "unknown"),
|
||||
"message": "Unable to fetch run status from API"
|
||||
},
|
||||
"findings": {
|
||||
"run_id": kwargs.get("run_id", "unknown"),
|
||||
"sarif": {
|
||||
"version": "2.1.0",
|
||||
"runs": []
|
||||
},
|
||||
"message": "Unable to fetch findings from API"
|
||||
}
|
||||
}
|
||||
|
||||
fallback = fallbacks.get(response_type, {"message": f"No fallback available for {response_type}"})
|
||||
logger.info(f"Using fallback response for {response_type}: {fallback.get('message', 'Unknown fallback')}")
|
||||
|
||||
return fallback
|
||||
@@ -0,0 +1,14 @@
|
||||
"""
|
||||
Command modules for FuzzForge CLI.
|
||||
"""
|
||||
# Copyright (c) 2025 FuzzingLabs
|
||||
#
|
||||
# Licensed under the Business Source License 1.1 (BSL). See the LICENSE file
|
||||
# at the root of this repository for details.
|
||||
#
|
||||
# After the Change Date (four years from publication), this version of the
|
||||
# Licensed Work will be made available under the Apache License, Version 2.0.
|
||||
# See the LICENSE-APACHE file or http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Additional attribution and requirements are provided in the NOTICE file.
|
||||
|
||||
@@ -0,0 +1,133 @@
|
||||
"""AI integration commands for the FuzzForge CLI."""
|
||||
# Copyright (c) 2025 FuzzingLabs
|
||||
#
|
||||
# Licensed under the Business Source License 1.1 (BSL). See the LICENSE file
|
||||
# at the root of this repository for details.
|
||||
#
|
||||
# After the Change Date (four years from publication), this version of the
|
||||
# Licensed Work will be made available under the Apache License, Version 2.0.
|
||||
# See the LICENSE-APACHE file or http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Additional attribution and requirements are provided in the NOTICE file.
|
||||
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
from datetime import datetime
|
||||
from typing import Optional
|
||||
|
||||
import typer
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
from rich.table import Table
|
||||
|
||||
from ..config import ProjectConfigManager
|
||||
|
||||
console = Console()
|
||||
app = typer.Typer(name="ai", help="Interact with the FuzzForge AI system")
|
||||
|
||||
|
||||
@app.command("agent")
|
||||
def ai_agent() -> None:
|
||||
"""Launch the full AI agent CLI with A2A orchestration."""
|
||||
console.print("[cyan]🤖 Opening Project FuzzForge AI Agent session[/cyan]\n")
|
||||
|
||||
try:
|
||||
from fuzzforge_ai.cli import FuzzForgeCLI
|
||||
|
||||
cli = FuzzForgeCLI()
|
||||
asyncio.run(cli.run())
|
||||
except ImportError as exc:
|
||||
console.print(f"[red]Failed to import AI CLI:[/red] {exc}")
|
||||
console.print("[dim]Ensure AI dependencies are installed (pip install -e .)[/dim]")
|
||||
raise typer.Exit(1) from exc
|
||||
except Exception as exc: # pragma: no cover - runtime safety
|
||||
console.print(f"[red]Failed to launch AI agent:[/red] {exc}")
|
||||
console.print("[dim]Check that .env contains LITELLM_MODEL and API keys[/dim]")
|
||||
raise typer.Exit(1) from exc
|
||||
|
||||
|
||||
# Memory + health commands
|
||||
@app.command("status")
|
||||
def ai_status() -> None:
|
||||
"""Show AI system health and configuration."""
|
||||
try:
|
||||
status = asyncio.run(get_ai_status_async())
|
||||
except Exception as exc: # pragma: no cover
|
||||
console.print(f"[red]Failed to get AI status:[/red] {exc}")
|
||||
raise typer.Exit(1) from exc
|
||||
|
||||
console.print("[bold cyan]🤖 FuzzForge AI System Status[/bold cyan]\n")
|
||||
|
||||
config_table = Table(title="Configuration", show_header=True, header_style="bold magenta")
|
||||
config_table.add_column("Setting", style="bold")
|
||||
config_table.add_column("Value", style="cyan")
|
||||
config_table.add_column("Status", style="green")
|
||||
|
||||
for key, info in status["config"].items():
|
||||
status_icon = "✅" if info["configured"] else "❌"
|
||||
display_value = info["value"] if info["value"] else "-"
|
||||
config_table.add_row(key, display_value, f"{status_icon}")
|
||||
|
||||
console.print(config_table)
|
||||
console.print()
|
||||
|
||||
components_table = Table(title="AI Components", show_header=True, header_style="bold magenta")
|
||||
components_table.add_column("Component", style="bold")
|
||||
components_table.add_column("Status", style="green")
|
||||
components_table.add_column("Details", style="dim")
|
||||
|
||||
for component, info in status["components"].items():
|
||||
status_icon = "🟢" if info["available"] else "🔴"
|
||||
components_table.add_row(component, status_icon, info["details"])
|
||||
|
||||
console.print(components_table)
|
||||
|
||||
if status["agents"]:
|
||||
console.print()
|
||||
console.print(f"[bold green]✓[/bold green] {len(status['agents'])} agents registered")
|
||||
|
||||
|
||||
@app.command("server")
|
||||
def ai_server(
|
||||
port: int = typer.Option(10100, "--port", "-p", help="Server port (default: 10100)"),
|
||||
) -> None:
|
||||
"""Start AI system as an A2A server."""
|
||||
console.print(f"[cyan]🚀 Starting FuzzForge AI Server on port {port}[/cyan]")
|
||||
console.print("[dim]Other agents can register this instance at the A2A endpoint[/dim]\n")
|
||||
|
||||
try:
|
||||
os.environ["FUZZFORGE_PORT"] = str(port)
|
||||
from fuzzforge_ai.__main__ import main as start_server
|
||||
|
||||
start_server()
|
||||
except Exception as exc: # pragma: no cover
|
||||
console.print(f"[red]Failed to start AI server:[/red] {exc}")
|
||||
raise typer.Exit(1) from exc
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Helper functions (largely adapted from the OSS implementation)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@app.callback(invoke_without_command=True)
|
||||
def ai_callback(ctx: typer.Context):
|
||||
"""
|
||||
🤖 AI integration features
|
||||
"""
|
||||
# Check if a subcommand is being invoked
|
||||
if ctx.invoked_subcommand is not None:
|
||||
# Let the subcommand handle it
|
||||
return
|
||||
|
||||
# Show not implemented message for default command
|
||||
console.print("🚧 [yellow]AI command is not fully implemented yet.[/yellow]")
|
||||
console.print("Please use specific subcommands:")
|
||||
console.print(" • [cyan]ff ai agent[/cyan] - Launch the full AI agent CLI")
|
||||
console.print(" • [cyan]ff ai status[/cyan] - Show AI system health and configuration")
|
||||
console.print(" • [cyan]ff ai server[/cyan] - Start AI system as an A2A server")
|
||||
|
||||
|
||||
@@ -0,0 +1,384 @@
|
||||
"""
|
||||
Configuration management commands.
|
||||
"""
|
||||
# Copyright (c) 2025 FuzzingLabs
|
||||
#
|
||||
# Licensed under the Business Source License 1.1 (BSL). See the LICENSE file
|
||||
# at the root of this repository for details.
|
||||
#
|
||||
# After the Change Date (four years from publication), this version of the
|
||||
# Licensed Work will be made available under the Apache License, Version 2.0.
|
||||
# See the LICENSE-APACHE file or http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Additional attribution and requirements are provided in the NOTICE file.
|
||||
|
||||
|
||||
import typer
|
||||
from pathlib import Path
|
||||
from rich.console import Console
|
||||
from rich.table import Table
|
||||
from rich.panel import Panel
|
||||
from rich.prompt import Prompt, Confirm
|
||||
from rich import box
|
||||
from typing import Optional
|
||||
|
||||
from ..config import (
|
||||
get_project_config,
|
||||
ensure_project_config,
|
||||
get_global_config,
|
||||
save_global_config,
|
||||
FuzzForgeConfig
|
||||
)
|
||||
from ..exceptions import require_project, ValidationError, handle_error
|
||||
|
||||
console = Console()
|
||||
app = typer.Typer()
|
||||
|
||||
|
||||
@app.command("show")
|
||||
def show_config(
|
||||
global_config: bool = typer.Option(
|
||||
False, "--global", "-g",
|
||||
help="Show global configuration instead of project config"
|
||||
)
|
||||
):
|
||||
"""
|
||||
📋 Display current configuration settings
|
||||
"""
|
||||
if global_config:
|
||||
config = get_global_config()
|
||||
config_type = "Global"
|
||||
config_path = Path.home() / ".config" / "fuzzforge" / "config.yaml"
|
||||
else:
|
||||
try:
|
||||
require_project()
|
||||
config = get_project_config()
|
||||
if not config:
|
||||
raise ValidationError("project configuration", "missing", "initialized project")
|
||||
except Exception as e:
|
||||
handle_error(e, "loading project configuration")
|
||||
return # Unreachable, but makes static analysis happy
|
||||
config_type = "Project"
|
||||
config_path = Path.cwd() / ".fuzzforge" / "config.yaml"
|
||||
|
||||
console.print(f"\n⚙️ [bold]{config_type} Configuration[/bold]\n")
|
||||
|
||||
# Project settings
|
||||
project_table = Table(show_header=False, box=box.SIMPLE)
|
||||
project_table.add_column("Setting", style="bold cyan")
|
||||
project_table.add_column("Value")
|
||||
|
||||
project_table.add_row("Project Name", config.project.name)
|
||||
project_table.add_row("API URL", config.project.api_url)
|
||||
project_table.add_row("Default Timeout", f"{config.project.default_timeout}s")
|
||||
if config.project.default_workflow:
|
||||
project_table.add_row("Default Workflow", config.project.default_workflow)
|
||||
|
||||
console.print(
|
||||
Panel.fit(
|
||||
project_table,
|
||||
title="📁 Project Settings",
|
||||
box=box.ROUNDED
|
||||
)
|
||||
)
|
||||
|
||||
# Retention settings
|
||||
retention_table = Table(show_header=False, box=box.SIMPLE)
|
||||
retention_table.add_column("Setting", style="bold cyan")
|
||||
retention_table.add_column("Value")
|
||||
|
||||
retention_table.add_row("Max Runs", str(config.retention.max_runs))
|
||||
retention_table.add_row("Keep Findings (days)", str(config.retention.keep_findings_days))
|
||||
|
||||
console.print(
|
||||
Panel.fit(
|
||||
retention_table,
|
||||
title="🗄️ Data Retention",
|
||||
box=box.ROUNDED
|
||||
)
|
||||
)
|
||||
|
||||
# Preferences
|
||||
prefs_table = Table(show_header=False, box=box.SIMPLE)
|
||||
prefs_table.add_column("Setting", style="bold cyan")
|
||||
prefs_table.add_column("Value")
|
||||
|
||||
prefs_table.add_row("Auto Save Findings", "✅ Yes" if config.preferences.auto_save_findings else "❌ No")
|
||||
prefs_table.add_row("Show Progress Bars", "✅ Yes" if config.preferences.show_progress_bars else "❌ No")
|
||||
prefs_table.add_row("Table Style", config.preferences.table_style)
|
||||
prefs_table.add_row("Color Output", "✅ Yes" if config.preferences.color_output else "❌ No")
|
||||
|
||||
console.print(
|
||||
Panel.fit(
|
||||
prefs_table,
|
||||
title="🎨 Preferences",
|
||||
box=box.ROUNDED
|
||||
)
|
||||
)
|
||||
|
||||
console.print(f"\n📍 Config file: [dim]{config_path}[/dim]")
|
||||
|
||||
|
||||
@app.command("set")
|
||||
def set_config(
|
||||
key: str = typer.Argument(..., help="Configuration key to set (e.g., 'project.name', 'project.api_url')"),
|
||||
value: str = typer.Argument(..., help="Value to set"),
|
||||
global_config: bool = typer.Option(
|
||||
False, "--global", "-g",
|
||||
help="Set in global configuration instead of project config"
|
||||
)
|
||||
):
|
||||
"""
|
||||
⚙️ Set a configuration value
|
||||
"""
|
||||
if global_config:
|
||||
config = get_global_config()
|
||||
config_type = "global"
|
||||
else:
|
||||
config = get_project_config()
|
||||
if not config:
|
||||
console.print("❌ No project configuration found. Run 'ff init' first.", style="red")
|
||||
raise typer.Exit(1)
|
||||
config_type = "project"
|
||||
|
||||
# Parse the key path
|
||||
key_parts = key.split('.')
|
||||
if len(key_parts) != 2:
|
||||
console.print("❌ Key must be in format 'section.setting' (e.g., 'project.name')", style="red")
|
||||
raise typer.Exit(1)
|
||||
|
||||
section, setting = key_parts
|
||||
|
||||
try:
|
||||
# Update configuration
|
||||
if section == "project":
|
||||
if setting == "name":
|
||||
config.project.name = value
|
||||
elif setting == "api_url":
|
||||
config.project.api_url = value
|
||||
elif setting == "default_timeout":
|
||||
config.project.default_timeout = int(value)
|
||||
elif setting == "default_workflow":
|
||||
config.project.default_workflow = value if value.lower() != "none" else None
|
||||
else:
|
||||
console.print(f"❌ Unknown project setting: {setting}", style="red")
|
||||
raise typer.Exit(1)
|
||||
|
||||
elif section == "retention":
|
||||
if setting == "max_runs":
|
||||
config.retention.max_runs = int(value)
|
||||
elif setting == "keep_findings_days":
|
||||
config.retention.keep_findings_days = int(value)
|
||||
else:
|
||||
console.print(f"❌ Unknown retention setting: {setting}", style="red")
|
||||
raise typer.Exit(1)
|
||||
|
||||
elif section == "preferences":
|
||||
if setting == "auto_save_findings":
|
||||
config.preferences.auto_save_findings = value.lower() in ("true", "yes", "1", "on")
|
||||
elif setting == "show_progress_bars":
|
||||
config.preferences.show_progress_bars = value.lower() in ("true", "yes", "1", "on")
|
||||
elif setting == "table_style":
|
||||
config.preferences.table_style = value
|
||||
elif setting == "color_output":
|
||||
config.preferences.color_output = value.lower() in ("true", "yes", "1", "on")
|
||||
else:
|
||||
console.print(f"❌ Unknown preferences setting: {setting}", style="red")
|
||||
raise typer.Exit(1)
|
||||
|
||||
else:
|
||||
console.print(f"❌ Unknown configuration section: {section}", style="red")
|
||||
console.print("Valid sections: project, retention, preferences", style="dim")
|
||||
raise typer.Exit(1)
|
||||
|
||||
# Save configuration
|
||||
if global_config:
|
||||
save_global_config(config)
|
||||
else:
|
||||
config_path = Path.cwd() / ".fuzzforge" / "config.yaml"
|
||||
config.save_to_file(config_path)
|
||||
|
||||
console.print(f"✅ Set {config_type} configuration: [bold cyan]{key}[/bold cyan] = [bold]{value}[/bold]", style="green")
|
||||
|
||||
except ValueError as e:
|
||||
console.print(f"❌ Invalid value for {key}: {e}", style="red")
|
||||
raise typer.Exit(1)
|
||||
except Exception as e:
|
||||
console.print(f"❌ Failed to set configuration: {e}", style="red")
|
||||
raise typer.Exit(1)
|
||||
|
||||
|
||||
@app.command("get")
|
||||
def get_config(
|
||||
key: str = typer.Argument(..., help="Configuration key to get (e.g., 'project.name')"),
|
||||
global_config: bool = typer.Option(
|
||||
False, "--global", "-g",
|
||||
help="Get from global configuration instead of project config"
|
||||
)
|
||||
):
|
||||
"""
|
||||
📖 Get a specific configuration value
|
||||
"""
|
||||
if global_config:
|
||||
config = get_global_config()
|
||||
else:
|
||||
config = get_project_config()
|
||||
if not config:
|
||||
console.print("❌ No project configuration found. Run 'ff init' first.", style="red")
|
||||
raise typer.Exit(1)
|
||||
|
||||
# Parse the key path
|
||||
key_parts = key.split('.')
|
||||
if len(key_parts) != 2:
|
||||
console.print("❌ Key must be in format 'section.setting' (e.g., 'project.name')", style="red")
|
||||
raise typer.Exit(1)
|
||||
|
||||
section, setting = key_parts
|
||||
|
||||
try:
|
||||
# Get configuration value
|
||||
if section == "project":
|
||||
if setting == "name":
|
||||
value = config.project.name
|
||||
elif setting == "api_url":
|
||||
value = config.project.api_url
|
||||
elif setting == "default_timeout":
|
||||
value = config.project.default_timeout
|
||||
elif setting == "default_workflow":
|
||||
value = config.project.default_workflow or "none"
|
||||
else:
|
||||
console.print(f"❌ Unknown project setting: {setting}", style="red")
|
||||
raise typer.Exit(1)
|
||||
|
||||
elif section == "retention":
|
||||
if setting == "max_runs":
|
||||
value = config.retention.max_runs
|
||||
elif setting == "keep_findings_days":
|
||||
value = config.retention.keep_findings_days
|
||||
else:
|
||||
console.print(f"❌ Unknown retention setting: {setting}", style="red")
|
||||
raise typer.Exit(1)
|
||||
|
||||
elif section == "preferences":
|
||||
if setting == "auto_save_findings":
|
||||
value = config.preferences.auto_save_findings
|
||||
elif setting == "show_progress_bars":
|
||||
value = config.preferences.show_progress_bars
|
||||
elif setting == "table_style":
|
||||
value = config.preferences.table_style
|
||||
elif setting == "color_output":
|
||||
value = config.preferences.color_output
|
||||
else:
|
||||
console.print(f"❌ Unknown preferences setting: {setting}", style="red")
|
||||
raise typer.Exit(1)
|
||||
|
||||
else:
|
||||
console.print(f"❌ Unknown configuration section: {section}", style="red")
|
||||
raise typer.Exit(1)
|
||||
|
||||
console.print(f"{key}: [bold cyan]{value}[/bold cyan]")
|
||||
|
||||
except Exception as e:
|
||||
console.print(f"❌ Failed to get configuration: {e}", style="red")
|
||||
raise typer.Exit(1)
|
||||
|
||||
|
||||
@app.command("reset")
|
||||
def reset_config(
|
||||
global_config: bool = typer.Option(
|
||||
False, "--global", "-g",
|
||||
help="Reset global configuration instead of project config"
|
||||
),
|
||||
force: bool = typer.Option(
|
||||
False, "--force", "-f",
|
||||
help="Skip confirmation prompt"
|
||||
)
|
||||
):
|
||||
"""
|
||||
🔄 Reset configuration to defaults
|
||||
"""
|
||||
config_type = "global" if global_config else "project"
|
||||
|
||||
if not force:
|
||||
if not Confirm.ask(f"Reset {config_type} configuration to defaults?", default=False, console=console):
|
||||
console.print("❌ Reset cancelled", style="yellow")
|
||||
raise typer.Exit(0)
|
||||
|
||||
try:
|
||||
# Create new default configuration
|
||||
new_config = FuzzForgeConfig()
|
||||
|
||||
if global_config:
|
||||
save_global_config(new_config)
|
||||
else:
|
||||
if not Path.cwd().joinpath(".fuzzforge").exists():
|
||||
console.print("❌ No project configuration found. Run 'ff init' first.", style="red")
|
||||
raise typer.Exit(1)
|
||||
|
||||
config_path = Path.cwd() / ".fuzzforge" / "config.yaml"
|
||||
new_config.save_to_file(config_path)
|
||||
|
||||
console.print(f"✅ {config_type.title()} configuration reset to defaults", style="green")
|
||||
|
||||
except Exception as e:
|
||||
console.print(f"❌ Failed to reset configuration: {e}", style="red")
|
||||
raise typer.Exit(1)
|
||||
|
||||
|
||||
@app.command("edit")
|
||||
def edit_config(
|
||||
global_config: bool = typer.Option(
|
||||
False, "--global", "-g",
|
||||
help="Edit global configuration instead of project config"
|
||||
)
|
||||
):
|
||||
"""
|
||||
📝 Open configuration file in default editor
|
||||
"""
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
if global_config:
|
||||
config_path = Path.home() / ".config" / "fuzzforge" / "config.yaml"
|
||||
config_type = "global"
|
||||
else:
|
||||
config_path = Path.cwd() / ".fuzzforge" / "config.yaml"
|
||||
config_type = "project"
|
||||
|
||||
if not config_path.exists():
|
||||
console.print("❌ No project configuration found. Run 'ff init' first.", style="red")
|
||||
raise typer.Exit(1)
|
||||
|
||||
# Try to find a suitable editor
|
||||
editors = ["code", "vim", "nano", "notepad"]
|
||||
editor = None
|
||||
|
||||
for e in editors:
|
||||
try:
|
||||
subprocess.run([e, "--version"], capture_output=True, check=True)
|
||||
editor = e
|
||||
break
|
||||
except (subprocess.CalledProcessError, FileNotFoundError):
|
||||
continue
|
||||
|
||||
if not editor:
|
||||
console.print(f"📍 Configuration file: [bold cyan]{config_path}[/bold cyan]")
|
||||
console.print("❌ No suitable editor found. Please edit the file manually.", style="red")
|
||||
raise typer.Exit(1)
|
||||
|
||||
try:
|
||||
console.print(f"📝 Opening {config_type} configuration in {editor}...")
|
||||
subprocess.run([editor, str(config_path)], check=True)
|
||||
console.print(f"✅ Configuration file edited", style="green")
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
console.print(f"❌ Failed to open editor: {e}", style="red")
|
||||
raise typer.Exit(1)
|
||||
|
||||
|
||||
@app.callback()
|
||||
def config_callback():
|
||||
"""
|
||||
⚙️ Manage configuration settings
|
||||
"""
|
||||
pass
|
||||
@@ -0,0 +1,940 @@
|
||||
"""
|
||||
Findings and security results management commands.
|
||||
"""
|
||||
# Copyright (c) 2025 FuzzingLabs
|
||||
#
|
||||
# Licensed under the Business Source License 1.1 (BSL). See the LICENSE file
|
||||
# at the root of this repository for details.
|
||||
#
|
||||
# After the Change Date (four years from publication), this version of the
|
||||
# Licensed Work will be made available under the Apache License, Version 2.0.
|
||||
# See the LICENSE-APACHE file or http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Additional attribution and requirements are provided in the NOTICE file.
|
||||
|
||||
|
||||
import json
|
||||
import csv
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Optional, Dict, Any, List
|
||||
|
||||
import typer
|
||||
from rich.console import Console
|
||||
from rich.table import Table, Column
|
||||
from rich.panel import Panel
|
||||
from rich.syntax import Syntax
|
||||
from rich.tree import Tree
|
||||
from rich.text import Text
|
||||
from rich import box
|
||||
|
||||
from ..config import get_project_config, FuzzForgeConfig
|
||||
from ..database import get_project_db, ensure_project_db, FindingRecord
|
||||
from ..exceptions import (
|
||||
handle_error, retry_on_network_error, validate_run_id,
|
||||
require_project, ValidationError, DatabaseError
|
||||
)
|
||||
from fuzzforge_sdk import FuzzForgeClient
|
||||
|
||||
console = Console()
|
||||
app = typer.Typer()
|
||||
|
||||
|
||||
@retry_on_network_error(max_retries=3, delay=1.0)
|
||||
def get_client() -> FuzzForgeClient:
|
||||
"""Get configured FuzzForge client with retry on network errors"""
|
||||
config = get_project_config() or FuzzForgeConfig()
|
||||
return FuzzForgeClient(base_url=config.get_api_url(), timeout=config.get_timeout())
|
||||
|
||||
|
||||
def severity_style(severity: str) -> str:
|
||||
"""Get rich style for severity level"""
|
||||
return {
|
||||
"error": "bold red",
|
||||
"warning": "bold yellow",
|
||||
"note": "bold blue",
|
||||
"info": "bold cyan"
|
||||
}.get(severity.lower(), "white")
|
||||
|
||||
|
||||
@app.command("get")
|
||||
def get_findings(
|
||||
run_id: str = typer.Argument(..., help="Run ID to get findings for"),
|
||||
save: bool = typer.Option(
|
||||
True, "--save/--no-save",
|
||||
help="Save findings to local database"
|
||||
),
|
||||
format: str = typer.Option(
|
||||
"table", "--format", "-f",
|
||||
help="Output format: table, json, sarif"
|
||||
)
|
||||
):
|
||||
"""
|
||||
🔍 Retrieve and display security findings for a run
|
||||
"""
|
||||
try:
|
||||
require_project()
|
||||
validate_run_id(run_id)
|
||||
|
||||
if format not in ["table", "json", "sarif"]:
|
||||
raise ValidationError("format", format, "one of: table, json, sarif")
|
||||
with get_client() as client:
|
||||
console.print(f"🔍 Fetching findings for run: {run_id}")
|
||||
findings = client.get_run_findings(run_id)
|
||||
|
||||
# Save to database if requested
|
||||
if save:
|
||||
try:
|
||||
db = ensure_project_db()
|
||||
|
||||
# Extract summary from SARIF
|
||||
sarif_data = findings.sarif
|
||||
runs_data = sarif_data.get("runs", [])
|
||||
summary = {}
|
||||
|
||||
if runs_data:
|
||||
results = runs_data[0].get("results", [])
|
||||
summary = {
|
||||
"total_issues": len(results),
|
||||
"by_severity": {},
|
||||
"by_rule": {},
|
||||
"tools": []
|
||||
}
|
||||
|
||||
for result in results:
|
||||
level = result.get("level", "note")
|
||||
rule_id = result.get("ruleId", "unknown")
|
||||
|
||||
summary["by_severity"][level] = summary["by_severity"].get(level, 0) + 1
|
||||
summary["by_rule"][rule_id] = summary["by_rule"].get(rule_id, 0) + 1
|
||||
|
||||
# Extract tool info
|
||||
tool = runs_data[0].get("tool", {})
|
||||
driver = tool.get("driver", {})
|
||||
if driver.get("name"):
|
||||
summary["tools"].append({
|
||||
"name": driver.get("name"),
|
||||
"version": driver.get("version"),
|
||||
"rules": len(driver.get("rules", []))
|
||||
})
|
||||
|
||||
finding_record = FindingRecord(
|
||||
run_id=run_id,
|
||||
sarif_data=sarif_data,
|
||||
summary=summary,
|
||||
created_at=datetime.now()
|
||||
)
|
||||
db.save_findings(finding_record)
|
||||
console.print("✅ Findings saved to local database", style="green")
|
||||
except Exception as e:
|
||||
console.print(f"⚠️ Failed to save findings to database: {e}", style="yellow")
|
||||
|
||||
# Display findings
|
||||
if format == "json":
|
||||
findings_json = json.dumps(findings.sarif, indent=2)
|
||||
console.print(Syntax(findings_json, "json", theme="monokai"))
|
||||
|
||||
elif format == "sarif":
|
||||
sarif_json = json.dumps(findings.sarif, indent=2)
|
||||
console.print(sarif_json)
|
||||
|
||||
else: # table format
|
||||
display_findings_table(findings.sarif)
|
||||
|
||||
except Exception as e:
|
||||
console.print(f"❌ Failed to get findings: {e}", style="red")
|
||||
raise typer.Exit(1)
|
||||
|
||||
|
||||
def display_findings_table(sarif_data: Dict[str, Any]):
|
||||
"""Display SARIF findings in a rich table format"""
|
||||
runs = sarif_data.get("runs", [])
|
||||
if not runs:
|
||||
console.print("ℹ️ No findings data available", style="dim")
|
||||
return
|
||||
|
||||
run_data = runs[0]
|
||||
results = run_data.get("results", [])
|
||||
tool = run_data.get("tool", {})
|
||||
driver = tool.get("driver", {})
|
||||
|
||||
# Tool information
|
||||
console.print(f"\n🔍 [bold]Security Analysis Results[/bold]")
|
||||
if driver.get("name"):
|
||||
console.print(f"Tool: {driver.get('name')} v{driver.get('version', 'unknown')}")
|
||||
|
||||
if not results:
|
||||
console.print("✅ No security issues found!", style="green")
|
||||
return
|
||||
|
||||
# Summary statistics
|
||||
summary_by_level = {}
|
||||
for result in results:
|
||||
level = result.get("level", "note")
|
||||
summary_by_level[level] = summary_by_level.get(level, 0) + 1
|
||||
|
||||
summary_table = Table(show_header=False, box=box.SIMPLE)
|
||||
summary_table.add_column("Severity", width=15, justify="left", style="bold")
|
||||
summary_table.add_column("Count", width=8, justify="right", style="bold")
|
||||
|
||||
for level, count in sorted(summary_by_level.items()):
|
||||
# Create Rich Text object with color styling
|
||||
level_text = level.upper()
|
||||
severity_text = Text(level_text, style=severity_style(level))
|
||||
count_text = Text(str(count))
|
||||
|
||||
summary_table.add_row(severity_text, count_text)
|
||||
|
||||
console.print(
|
||||
Panel.fit(
|
||||
summary_table,
|
||||
title=f"📊 Summary ({len(results)} total issues)",
|
||||
box=box.ROUNDED
|
||||
)
|
||||
)
|
||||
|
||||
# Detailed results - Rich Text-based table with proper emoji alignment
|
||||
results_table = Table(box=box.ROUNDED)
|
||||
results_table.add_column("Severity", width=12, justify="left", no_wrap=True)
|
||||
results_table.add_column("Rule", width=25, justify="left", style="bold cyan", no_wrap=True)
|
||||
results_table.add_column("Message", width=55, justify="left", no_wrap=True)
|
||||
results_table.add_column("Location", width=20, justify="left", style="dim", no_wrap=True)
|
||||
|
||||
for result in results[:50]: # Limit to first 50 results
|
||||
level = result.get("level", "note")
|
||||
rule_id = result.get("ruleId", "unknown")
|
||||
message = result.get("message", {}).get("text", "No message")
|
||||
|
||||
# Extract location information
|
||||
locations = result.get("locations", [])
|
||||
location_str = ""
|
||||
if locations:
|
||||
physical_location = locations[0].get("physicalLocation", {})
|
||||
artifact_location = physical_location.get("artifactLocation", {})
|
||||
region = physical_location.get("region", {})
|
||||
|
||||
file_path = artifact_location.get("uri", "")
|
||||
if file_path:
|
||||
location_str = Path(file_path).name
|
||||
if region.get("startLine"):
|
||||
location_str += f":{region['startLine']}"
|
||||
if region.get("startColumn"):
|
||||
location_str += f":{region['startColumn']}"
|
||||
|
||||
# Create Rich Text objects with color styling
|
||||
severity_text = Text(level.upper(), style=severity_style(level))
|
||||
severity_text.truncate(12, overflow="ellipsis")
|
||||
|
||||
rule_text = Text(rule_id)
|
||||
rule_text.truncate(25, overflow="ellipsis")
|
||||
|
||||
message_text = Text(message)
|
||||
message_text.truncate(55, overflow="ellipsis")
|
||||
|
||||
location_text = Text(location_str)
|
||||
location_text.truncate(20, overflow="ellipsis")
|
||||
|
||||
results_table.add_row(
|
||||
severity_text,
|
||||
rule_text,
|
||||
message_text,
|
||||
location_text
|
||||
)
|
||||
|
||||
console.print(f"\n📋 [bold]Detailed Results[/bold]")
|
||||
if len(results) > 50:
|
||||
console.print(f"Showing first 50 of {len(results)} results")
|
||||
console.print()
|
||||
console.print(results_table)
|
||||
|
||||
|
||||
@app.command("history")
|
||||
def findings_history(
|
||||
limit: int = typer.Option(20, "--limit", "-l", help="Maximum number of findings to show")
|
||||
):
|
||||
"""
|
||||
📚 Show findings history from local database
|
||||
"""
|
||||
db = get_project_db()
|
||||
if not db:
|
||||
console.print("❌ No FuzzForge project found. Run 'ff init' first.", style="red")
|
||||
raise typer.Exit(1)
|
||||
|
||||
try:
|
||||
findings = db.list_findings(limit=limit)
|
||||
|
||||
if not findings:
|
||||
console.print("❌ No findings found in database", style="red")
|
||||
return
|
||||
|
||||
table = Table(box=box.ROUNDED)
|
||||
table.add_column("Run ID", style="bold cyan", width=36) # Full UUID width
|
||||
table.add_column("Date", justify="center")
|
||||
table.add_column("Total Issues", justify="center", style="bold")
|
||||
table.add_column("Errors", justify="center", style="red")
|
||||
table.add_column("Warnings", justify="center", style="yellow")
|
||||
table.add_column("Notes", justify="center", style="blue")
|
||||
table.add_column("Tools", style="dim")
|
||||
|
||||
for finding in findings:
|
||||
summary = finding.summary
|
||||
total_issues = summary.get("total_issues", 0)
|
||||
by_severity = summary.get("by_severity", {})
|
||||
tools = summary.get("tools", [])
|
||||
|
||||
tool_names = ", ".join([tool.get("name", "Unknown") for tool in tools])
|
||||
|
||||
table.add_row(
|
||||
finding.run_id, # Show full Run ID
|
||||
finding.created_at.strftime("%m-%d %H:%M"),
|
||||
str(total_issues),
|
||||
str(by_severity.get("error", 0)),
|
||||
str(by_severity.get("warning", 0)),
|
||||
str(by_severity.get("note", 0)),
|
||||
tool_names[:30] + "..." if len(tool_names) > 30 else tool_names
|
||||
)
|
||||
|
||||
console.print(f"\n📚 [bold]Findings History ({len(findings)})[/bold]\n")
|
||||
console.print(table)
|
||||
|
||||
console.print(f"\n💡 Use [bold cyan]fuzzforge finding <run-id>[/bold cyan] to view detailed findings")
|
||||
|
||||
except Exception as e:
|
||||
console.print(f"❌ Failed to get findings history: {e}", style="red")
|
||||
raise typer.Exit(1)
|
||||
|
||||
|
||||
@app.command("export")
|
||||
def export_findings(
|
||||
run_id: str = typer.Argument(..., help="Run ID to export findings for"),
|
||||
format: str = typer.Option(
|
||||
"json", "--format", "-f",
|
||||
help="Export format: json, csv, html, sarif"
|
||||
),
|
||||
output: Optional[str] = typer.Option(
|
||||
None, "--output", "-o",
|
||||
help="Output file path (defaults to findings-<run-id>.<format>)"
|
||||
)
|
||||
):
|
||||
"""
|
||||
📤 Export security findings in various formats
|
||||
"""
|
||||
db = get_project_db()
|
||||
if not db:
|
||||
console.print("❌ No FuzzForge project found. Run 'ff init' first.", style="red")
|
||||
raise typer.Exit(1)
|
||||
|
||||
try:
|
||||
# Get findings from database first, fallback to API
|
||||
findings_data = db.get_findings(run_id)
|
||||
if not findings_data:
|
||||
console.print(f"📡 Fetching findings from API for run: {run_id}")
|
||||
with get_client() as client:
|
||||
findings = client.get_run_findings(run_id)
|
||||
sarif_data = findings.sarif
|
||||
else:
|
||||
sarif_data = findings_data.sarif_data
|
||||
|
||||
# Generate output filename
|
||||
if not output:
|
||||
output = f"findings-{run_id[:8]}.{format}"
|
||||
|
||||
output_path = Path(output)
|
||||
|
||||
# Export based on format
|
||||
if format == "sarif":
|
||||
with open(output_path, 'w') as f:
|
||||
json.dump(sarif_data, f, indent=2)
|
||||
|
||||
elif format == "json":
|
||||
# Simplified JSON format
|
||||
simplified_data = extract_simplified_findings(sarif_data)
|
||||
with open(output_path, 'w') as f:
|
||||
json.dump(simplified_data, f, indent=2)
|
||||
|
||||
elif format == "csv":
|
||||
export_to_csv(sarif_data, output_path)
|
||||
|
||||
elif format == "html":
|
||||
export_to_html(sarif_data, output_path, run_id)
|
||||
|
||||
else:
|
||||
console.print(f"❌ Unsupported format: {format}", style="red")
|
||||
raise typer.Exit(1)
|
||||
|
||||
console.print(f"✅ Findings exported to: [bold cyan]{output_path}[/bold cyan]")
|
||||
|
||||
except Exception as e:
|
||||
console.print(f"❌ Failed to export findings: {e}", style="red")
|
||||
raise typer.Exit(1)
|
||||
|
||||
|
||||
def extract_simplified_findings(sarif_data: Dict[str, Any]) -> Dict[str, Any]:
|
||||
"""Extract simplified findings structure from SARIF"""
|
||||
runs = sarif_data.get("runs", [])
|
||||
if not runs:
|
||||
return {"findings": [], "summary": {}}
|
||||
|
||||
run_data = runs[0]
|
||||
results = run_data.get("results", [])
|
||||
tool = run_data.get("tool", {}).get("driver", {})
|
||||
|
||||
simplified = {
|
||||
"tool": {
|
||||
"name": tool.get("name", "Unknown"),
|
||||
"version": tool.get("version", "Unknown")
|
||||
},
|
||||
"summary": {
|
||||
"total_issues": len(results),
|
||||
"by_severity": {}
|
||||
},
|
||||
"findings": []
|
||||
}
|
||||
|
||||
for result in results:
|
||||
level = result.get("level", "note")
|
||||
simplified["summary"]["by_severity"][level] = simplified["summary"]["by_severity"].get(level, 0) + 1
|
||||
|
||||
# Extract location
|
||||
location_info = {}
|
||||
locations = result.get("locations", [])
|
||||
if locations:
|
||||
physical_location = locations[0].get("physicalLocation", {})
|
||||
artifact_location = physical_location.get("artifactLocation", {})
|
||||
region = physical_location.get("region", {})
|
||||
|
||||
location_info = {
|
||||
"file": artifact_location.get("uri", ""),
|
||||
"line": region.get("startLine"),
|
||||
"column": region.get("startColumn")
|
||||
}
|
||||
|
||||
simplified["findings"].append({
|
||||
"rule_id": result.get("ruleId", "unknown"),
|
||||
"severity": level,
|
||||
"message": result.get("message", {}).get("text", ""),
|
||||
"location": location_info
|
||||
})
|
||||
|
||||
return simplified
|
||||
|
||||
|
||||
def export_to_csv(sarif_data: Dict[str, Any], output_path: Path):
|
||||
"""Export findings to CSV format"""
|
||||
runs = sarif_data.get("runs", [])
|
||||
if not runs:
|
||||
return
|
||||
|
||||
results = runs[0].get("results", [])
|
||||
|
||||
with open(output_path, 'w', newline='', encoding='utf-8') as csvfile:
|
||||
fieldnames = ['rule_id', 'severity', 'message', 'file', 'line', 'column']
|
||||
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
|
||||
writer.writeheader()
|
||||
|
||||
for result in results:
|
||||
location_info = {"file": "", "line": "", "column": ""}
|
||||
locations = result.get("locations", [])
|
||||
if locations:
|
||||
physical_location = locations[0].get("physicalLocation", {})
|
||||
artifact_location = physical_location.get("artifactLocation", {})
|
||||
region = physical_location.get("region", {})
|
||||
|
||||
location_info = {
|
||||
"file": artifact_location.get("uri", ""),
|
||||
"line": region.get("startLine", ""),
|
||||
"column": region.get("startColumn", "")
|
||||
}
|
||||
|
||||
writer.writerow({
|
||||
"rule_id": result.get("ruleId", ""),
|
||||
"severity": result.get("level", "note"),
|
||||
"message": result.get("message", {}).get("text", ""),
|
||||
**location_info
|
||||
})
|
||||
|
||||
|
||||
def export_to_html(sarif_data: Dict[str, Any], output_path: Path, run_id: str):
|
||||
"""Export findings to HTML format"""
|
||||
runs = sarif_data.get("runs", [])
|
||||
if not runs:
|
||||
return
|
||||
|
||||
run_data = runs[0]
|
||||
results = run_data.get("results", [])
|
||||
tool = run_data.get("tool", {}).get("driver", {})
|
||||
|
||||
# Simple HTML template
|
||||
html_content = f"""<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>Security Findings - {run_id}</title>
|
||||
<style>
|
||||
body {{ font-family: Arial, sans-serif; margin: 40px; }}
|
||||
.header {{ background: #f4f4f4; padding: 20px; border-radius: 5px; }}
|
||||
.summary {{ margin: 20px 0; }}
|
||||
.findings {{ margin: 20px 0; }}
|
||||
table {{ width: 100%; border-collapse: collapse; }}
|
||||
th, td {{ padding: 10px; text-align: left; border-bottom: 1px solid #ddd; }}
|
||||
th {{ background-color: #f2f2f2; }}
|
||||
.error {{ color: #d32f2f; }}
|
||||
.warning {{ color: #f57c00; }}
|
||||
.note {{ color: #1976d2; }}
|
||||
.info {{ color: #388e3c; }}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="header">
|
||||
<h1>Security Findings Report</h1>
|
||||
<p><strong>Run ID:</strong> {run_id}</p>
|
||||
<p><strong>Tool:</strong> {tool.get('name', 'Unknown')} v{tool.get('version', 'Unknown')}</p>
|
||||
<p><strong>Generated:</strong> {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}</p>
|
||||
</div>
|
||||
|
||||
<div class="summary">
|
||||
<h2>Summary</h2>
|
||||
<p><strong>Total Issues:</strong> {len(results)}</p>
|
||||
</div>
|
||||
|
||||
<div class="findings">
|
||||
<h2>Detailed Findings</h2>
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Rule ID</th>
|
||||
<th>Severity</th>
|
||||
<th>Message</th>
|
||||
<th>Location</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
"""
|
||||
|
||||
for result in results:
|
||||
level = result.get("level", "note")
|
||||
rule_id = result.get("ruleId", "unknown")
|
||||
message = result.get("message", {}).get("text", "")
|
||||
|
||||
# Extract location
|
||||
location_str = ""
|
||||
locations = result.get("locations", [])
|
||||
if locations:
|
||||
physical_location = locations[0].get("physicalLocation", {})
|
||||
artifact_location = physical_location.get("artifactLocation", {})
|
||||
region = physical_location.get("region", {})
|
||||
|
||||
file_path = artifact_location.get("uri", "")
|
||||
if file_path:
|
||||
location_str = file_path
|
||||
if region.get("startLine"):
|
||||
location_str += f":{region['startLine']}"
|
||||
|
||||
html_content += f"""
|
||||
<tr>
|
||||
<td>{rule_id}</td>
|
||||
<td class="{level}">{level}</td>
|
||||
<td>{message}</td>
|
||||
<td>{location_str}</td>
|
||||
</tr>
|
||||
"""
|
||||
|
||||
html_content += """
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
"""
|
||||
|
||||
with open(output_path, 'w', encoding='utf-8') as f:
|
||||
f.write(html_content)
|
||||
|
||||
|
||||
@app.command("all")
|
||||
def all_findings(
|
||||
workflow: Optional[str] = typer.Option(
|
||||
None, "--workflow", "-w",
|
||||
help="Filter by workflow name"
|
||||
),
|
||||
severity: Optional[str] = typer.Option(
|
||||
None, "--severity", "-s",
|
||||
help="Filter by severity levels (comma-separated: error,warning,note,info)"
|
||||
),
|
||||
since: Optional[str] = typer.Option(
|
||||
None, "--since",
|
||||
help="Show findings since date (YYYY-MM-DD)"
|
||||
),
|
||||
limit: Optional[int] = typer.Option(
|
||||
None, "--limit", "-l",
|
||||
help="Maximum number of findings to show"
|
||||
),
|
||||
export_format: Optional[str] = typer.Option(
|
||||
None, "--export", "-e",
|
||||
help="Export format: json, csv, html"
|
||||
),
|
||||
output: Optional[str] = typer.Option(
|
||||
None, "--output", "-o",
|
||||
help="Output file for export"
|
||||
),
|
||||
stats_only: bool = typer.Option(
|
||||
False, "--stats",
|
||||
help="Show statistics only"
|
||||
),
|
||||
show_findings: bool = typer.Option(
|
||||
False, "--show-findings", "-f",
|
||||
help="Show actual findings content, not just summary"
|
||||
),
|
||||
max_findings: int = typer.Option(
|
||||
50, "--max-findings",
|
||||
help="Maximum number of individual findings to display"
|
||||
)
|
||||
):
|
||||
"""
|
||||
📊 Show all findings for the entire project
|
||||
"""
|
||||
db = get_project_db()
|
||||
if not db:
|
||||
console.print("❌ No FuzzForge project found. Run 'ff init' first.", style="red")
|
||||
raise typer.Exit(1)
|
||||
|
||||
try:
|
||||
# Parse filters
|
||||
severity_list = None
|
||||
if severity:
|
||||
severity_list = [s.strip().lower() for s in severity.split(",")]
|
||||
|
||||
since_date = None
|
||||
if since:
|
||||
try:
|
||||
since_date = datetime.strptime(since, "%Y-%m-%d")
|
||||
except ValueError:
|
||||
console.print(f"❌ Invalid date format: {since}. Use YYYY-MM-DD", style="red")
|
||||
raise typer.Exit(1)
|
||||
|
||||
# Get aggregated stats
|
||||
stats = db.get_aggregated_stats()
|
||||
|
||||
# Show statistics
|
||||
if stats_only or not export_format:
|
||||
# Create summary panel
|
||||
summary_text = f"""[bold]📊 Project Security Summary[/bold]
|
||||
|
||||
[cyan]Total Findings Records:[/cyan] {stats['total_findings_records']}
|
||||
[cyan]Total Runs Analyzed:[/cyan] {stats['total_runs']}
|
||||
[cyan]Total Security Issues:[/cyan] {stats['total_issues']}
|
||||
[cyan]Recent Findings (7 days):[/cyan] {stats['recent_findings']}
|
||||
|
||||
[bold]Severity Distribution:[/bold]
|
||||
🔴 Errors: {stats['severity_distribution'].get('error', 0)}
|
||||
🟡 Warnings: {stats['severity_distribution'].get('warning', 0)}
|
||||
🔵 Notes: {stats['severity_distribution'].get('note', 0)}
|
||||
ℹ️ Info: {stats['severity_distribution'].get('info', 0)}
|
||||
|
||||
[bold]By Workflow:[/bold]"""
|
||||
|
||||
for wf_name, count in stats['workflows'].items():
|
||||
summary_text += f"\n • {wf_name}: {count} findings"
|
||||
|
||||
console.print(Panel(summary_text, box=box.ROUNDED, title="FuzzForge Project Analysis", border_style="cyan"))
|
||||
|
||||
if stats_only:
|
||||
return
|
||||
|
||||
# Get all findings with filters
|
||||
findings = db.get_all_findings(
|
||||
workflow=workflow,
|
||||
severity=severity_list,
|
||||
since_date=since_date,
|
||||
limit=limit
|
||||
)
|
||||
|
||||
if not findings:
|
||||
console.print("ℹ️ No findings match the specified filters", style="dim")
|
||||
return
|
||||
|
||||
# Export if requested
|
||||
if export_format:
|
||||
if not output:
|
||||
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
output = f"all_findings_{timestamp}.{export_format}"
|
||||
|
||||
export_all_findings(findings, export_format, output)
|
||||
console.print(f"✅ Exported {len(findings)} findings to: {output}", style="green")
|
||||
return
|
||||
|
||||
# Display findings table
|
||||
table = Table(box=box.ROUNDED, title=f"All Project Findings ({len(findings)} records)")
|
||||
table.add_column("Run ID", style="bold cyan", width=36) # Full UUID width
|
||||
table.add_column("Workflow", style="dim", width=20)
|
||||
table.add_column("Date", justify="center")
|
||||
table.add_column("Issues", justify="center", style="bold")
|
||||
table.add_column("Errors", justify="center", style="red")
|
||||
table.add_column("Warnings", justify="center", style="yellow")
|
||||
table.add_column("Notes", justify="center", style="blue")
|
||||
|
||||
# Get run info for each finding
|
||||
runs_info = {}
|
||||
for finding in findings:
|
||||
run_id = finding.run_id
|
||||
if run_id not in runs_info:
|
||||
run_info = db.get_run(run_id)
|
||||
runs_info[run_id] = run_info
|
||||
|
||||
for finding in findings:
|
||||
run_id = finding.run_id
|
||||
run_info = runs_info.get(run_id)
|
||||
workflow_name = run_info.workflow if run_info else "unknown"
|
||||
|
||||
summary = finding.summary
|
||||
total_issues = summary.get("total_issues", 0)
|
||||
by_severity = summary.get("by_severity", {})
|
||||
|
||||
# Count issues from SARIF data if summary is incomplete
|
||||
if total_issues == 0 and "runs" in finding.sarif_data:
|
||||
for run in finding.sarif_data["runs"]:
|
||||
total_issues += len(run.get("results", []))
|
||||
|
||||
table.add_row(
|
||||
run_id, # Show full Run ID
|
||||
workflow_name[:17] + "..." if len(workflow_name) > 20 else workflow_name,
|
||||
finding.created_at.strftime("%Y-%m-%d %H:%M"),
|
||||
str(total_issues),
|
||||
str(by_severity.get("error", 0)),
|
||||
str(by_severity.get("warning", 0)),
|
||||
str(by_severity.get("note", 0))
|
||||
)
|
||||
|
||||
console.print(table)
|
||||
|
||||
# Show actual findings if requested
|
||||
if show_findings:
|
||||
display_detailed_findings(findings, max_findings)
|
||||
|
||||
console.print(f"\n💡 Use filters to refine results: --workflow, --severity, --since")
|
||||
console.print(f"💡 Show findings content: --show-findings")
|
||||
console.print(f"💡 Export findings: --export json --output report.json")
|
||||
console.print(f"💡 View specific findings: [bold cyan]fuzzforge finding <run-id>[/bold cyan]")
|
||||
|
||||
except Exception as e:
|
||||
console.print(f"❌ Failed to get all findings: {e}", style="red")
|
||||
raise typer.Exit(1)
|
||||
|
||||
|
||||
def display_detailed_findings(findings: List[FindingRecord], max_findings: int):
|
||||
"""Display detailed findings content"""
|
||||
console.print(f"\n📋 [bold]Detailed Findings Content[/bold] (showing up to {max_findings} findings)\n")
|
||||
|
||||
findings_count = 0
|
||||
|
||||
for finding_record in findings:
|
||||
if findings_count >= max_findings:
|
||||
remaining = sum(len(run.get("results", []))
|
||||
for f in findings[findings.index(finding_record):]
|
||||
for run in f.sarif_data.get("runs", []))
|
||||
if remaining > 0:
|
||||
console.print(f"\n... and {remaining} more findings (use --max-findings to show more)")
|
||||
break
|
||||
|
||||
# Get run info for this finding
|
||||
sarif_data = finding_record.sarif_data
|
||||
if not sarif_data or "runs" not in sarif_data:
|
||||
continue
|
||||
|
||||
for run in sarif_data["runs"]:
|
||||
tool = run.get("tool", {})
|
||||
driver = tool.get("driver", {})
|
||||
tool_name = driver.get("name", "Unknown Tool")
|
||||
|
||||
results = run.get("results", [])
|
||||
if not results:
|
||||
continue
|
||||
|
||||
# Group results by severity
|
||||
for result in results:
|
||||
if findings_count >= max_findings:
|
||||
break
|
||||
|
||||
findings_count += 1
|
||||
|
||||
# Extract key information
|
||||
rule_id = result.get("ruleId", "unknown")
|
||||
level = result.get("level", "note").upper()
|
||||
message_text = result.get("message", {}).get("text", "No description")
|
||||
|
||||
# Get location information
|
||||
locations = result.get("locations", [])
|
||||
location_str = "Unknown location"
|
||||
if locations:
|
||||
physical = locations[0].get("physicalLocation", {})
|
||||
artifact = physical.get("artifactLocation", {})
|
||||
region = physical.get("region", {})
|
||||
|
||||
file_path = artifact.get("uri", "")
|
||||
line_number = region.get("startLine", "")
|
||||
|
||||
if file_path:
|
||||
location_str = f"{file_path}"
|
||||
if line_number:
|
||||
location_str += f":{line_number}"
|
||||
|
||||
# Get severity style
|
||||
severity_style = {
|
||||
"ERROR": "bold red",
|
||||
"WARNING": "bold yellow",
|
||||
"NOTE": "bold blue",
|
||||
"INFO": "bold cyan"
|
||||
}.get(level, "white")
|
||||
|
||||
# Create finding panel
|
||||
finding_content = f"""[bold]Rule:[/bold] {rule_id}
|
||||
[bold]Location:[/bold] {location_str}
|
||||
[bold]Tool:[/bold] {tool_name}
|
||||
[bold]Run:[/bold] {finding_record.run_id[:12]}...
|
||||
|
||||
[bold]Description:[/bold]
|
||||
{message_text}"""
|
||||
|
||||
# Add code context if available
|
||||
region = locations[0].get("physicalLocation", {}).get("region", {}) if locations else {}
|
||||
if region.get("snippet", {}).get("text"):
|
||||
code_snippet = region["snippet"]["text"].strip()
|
||||
finding_content += f"\n\n[bold]Code:[/bold]\n[dim]{code_snippet}[/dim]"
|
||||
|
||||
console.print(Panel(
|
||||
finding_content,
|
||||
title=f"[{severity_style}]{level}[/{severity_style}] Finding #{findings_count}",
|
||||
border_style=severity_style.split()[-1] if " " in severity_style else severity_style,
|
||||
box=box.ROUNDED
|
||||
))
|
||||
|
||||
console.print() # Add spacing between findings
|
||||
|
||||
|
||||
def export_all_findings(findings: List[FindingRecord], format: str, output_path: str):
|
||||
"""Export all findings to specified format"""
|
||||
output_file = Path(output_path)
|
||||
|
||||
if format == "json":
|
||||
# Combine all SARIF data
|
||||
all_results = []
|
||||
for finding in findings:
|
||||
if "runs" in finding.sarif_data:
|
||||
for run in finding.sarif_data["runs"]:
|
||||
for result in run.get("results", []):
|
||||
result_entry = {
|
||||
"run_id": finding.run_id,
|
||||
"created_at": finding.created_at.isoformat(),
|
||||
**result
|
||||
}
|
||||
all_results.append(result_entry)
|
||||
|
||||
with open(output_file, 'w') as f:
|
||||
json.dump({
|
||||
"total_findings": len(findings),
|
||||
"export_date": datetime.now().isoformat(),
|
||||
"results": all_results
|
||||
}, f, indent=2)
|
||||
|
||||
elif format == "csv":
|
||||
# Export to CSV
|
||||
with open(output_file, 'w', newline='') as f:
|
||||
writer = csv.writer(f)
|
||||
writer.writerow(["Run ID", "Date", "Severity", "Rule ID", "Message", "File", "Line"])
|
||||
|
||||
for finding in findings:
|
||||
if "runs" in finding.sarif_data:
|
||||
for run in finding.sarif_data["runs"]:
|
||||
for result in run.get("results", []):
|
||||
locations = result.get("locations", [])
|
||||
location_info = locations[0] if locations else {}
|
||||
physical = location_info.get("physicalLocation", {})
|
||||
artifact = physical.get("artifactLocation", {})
|
||||
region = physical.get("region", {})
|
||||
|
||||
writer.writerow([
|
||||
finding.run_id[:12],
|
||||
finding.created_at.strftime("%Y-%m-%d %H:%M"),
|
||||
result.get("level", "note"),
|
||||
result.get("ruleId", ""),
|
||||
result.get("message", {}).get("text", ""),
|
||||
artifact.get("uri", ""),
|
||||
region.get("startLine", "")
|
||||
])
|
||||
|
||||
elif format == "html":
|
||||
# Generate HTML report
|
||||
html_content = f"""<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<title>FuzzForge Security Findings Report</title>
|
||||
<style>
|
||||
body {{ font-family: Arial, sans-serif; margin: 20px; }}
|
||||
h1 {{ color: #333; }}
|
||||
.stats {{ background: #f5f5f5; padding: 15px; border-radius: 5px; margin: 20px 0; }}
|
||||
table {{ width: 100%; border-collapse: collapse; }}
|
||||
th, td {{ padding: 10px; text-align: left; border-bottom: 1px solid #ddd; }}
|
||||
th {{ background: #4CAF50; color: white; }}
|
||||
.error {{ color: red; font-weight: bold; }}
|
||||
.warning {{ color: orange; font-weight: bold; }}
|
||||
.note {{ color: blue; }}
|
||||
.info {{ color: gray; }}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<h1>FuzzForge Security Findings Report</h1>
|
||||
<div class="stats">
|
||||
<p><strong>Generated:</strong> {datetime.now().strftime("%Y-%m-%d %H:%M:%S")}</p>
|
||||
<p><strong>Total Findings:</strong> {len(findings)}</p>
|
||||
</div>
|
||||
<table>
|
||||
<tr>
|
||||
<th>Run ID</th>
|
||||
<th>Date</th>
|
||||
<th>Severity</th>
|
||||
<th>Rule</th>
|
||||
<th>Message</th>
|
||||
<th>Location</th>
|
||||
</tr>"""
|
||||
|
||||
for finding in findings:
|
||||
if "runs" in finding.sarif_data:
|
||||
for run in finding.sarif_data["runs"]:
|
||||
for result in run.get("results", []):
|
||||
level = result.get("level", "note")
|
||||
locations = result.get("locations", [])
|
||||
location_info = locations[0] if locations else {}
|
||||
physical = location_info.get("physicalLocation", {})
|
||||
artifact = physical.get("artifactLocation", {})
|
||||
region = physical.get("region", {})
|
||||
|
||||
html_content += f"""
|
||||
<tr>
|
||||
<td>{finding.run_id[:12]}</td>
|
||||
<td>{finding.created_at.strftime("%Y-%m-%d %H:%M")}</td>
|
||||
<td class="{level}">{level.upper()}</td>
|
||||
<td>{result.get("ruleId", "")}</td>
|
||||
<td>{result.get("message", {}).get("text", "")}</td>
|
||||
<td>{artifact.get("uri", "")} : {region.get("startLine", "")}</td>
|
||||
</tr>"""
|
||||
|
||||
html_content += """
|
||||
</table>
|
||||
</body>
|
||||
</html>"""
|
||||
|
||||
with open(output_file, 'w') as f:
|
||||
f.write(html_content)
|
||||
|
||||
|
||||
@app.callback(invoke_without_command=True)
|
||||
def findings_callback(ctx: typer.Context):
|
||||
"""
|
||||
🔍 View and export security findings
|
||||
"""
|
||||
# Check if a subcommand is being invoked
|
||||
if ctx.invoked_subcommand is not None:
|
||||
# Let the subcommand handle it
|
||||
return
|
||||
|
||||
# Default to history when no subcommand provided
|
||||
findings_history(limit=20)
|
||||
@@ -0,0 +1,251 @@
|
||||
"""Cognee ingestion commands for FuzzForge CLI."""
|
||||
# Copyright (c) 2025 FuzzingLabs
|
||||
#
|
||||
# Licensed under the Business Source License 1.1 (BSL). See the LICENSE file
|
||||
# at the root of this repository for details.
|
||||
#
|
||||
# After the Change Date (four years from publication), this version of the
|
||||
# Licensed Work will be made available under the Apache License, Version 2.0.
|
||||
# See the LICENSE-APACHE file or http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Additional attribution and requirements are provided in the NOTICE file.
|
||||
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import List, Optional
|
||||
|
||||
import typer
|
||||
from rich.console import Console
|
||||
from rich.prompt import Confirm
|
||||
|
||||
from ..config import ProjectConfigManager
|
||||
from ..ingest_utils import collect_ingest_files
|
||||
|
||||
console = Console()
|
||||
app = typer.Typer(
|
||||
name="ingest",
|
||||
help="Ingest files or directories into the Cognee knowledge graph for the current project",
|
||||
invoke_without_command=True,
|
||||
)
|
||||
|
||||
|
||||
@app.callback()
|
||||
def ingest_callback(
|
||||
ctx: typer.Context,
|
||||
path: Optional[Path] = typer.Argument(
|
||||
None,
|
||||
exists=True,
|
||||
file_okay=True,
|
||||
dir_okay=True,
|
||||
readable=True,
|
||||
resolve_path=True,
|
||||
help="File or directory to ingest (defaults to current directory)",
|
||||
),
|
||||
recursive: bool = typer.Option(
|
||||
False,
|
||||
"--recursive",
|
||||
"-r",
|
||||
help="Recursively ingest directories",
|
||||
),
|
||||
file_types: Optional[List[str]] = typer.Option(
|
||||
None,
|
||||
"--file-types",
|
||||
"-t",
|
||||
help="File extensions to include (e.g. --file-types .py --file-types .js)",
|
||||
),
|
||||
exclude: Optional[List[str]] = typer.Option(
|
||||
None,
|
||||
"--exclude",
|
||||
"-e",
|
||||
help="Glob patterns to exclude",
|
||||
),
|
||||
dataset: Optional[str] = typer.Option(
|
||||
None,
|
||||
"--dataset",
|
||||
"-d",
|
||||
help="Dataset name to ingest into",
|
||||
),
|
||||
force: bool = typer.Option(
|
||||
False,
|
||||
"--force",
|
||||
"-f",
|
||||
help="Force re-ingestion and skip confirmation",
|
||||
),
|
||||
):
|
||||
"""Entry point for `fuzzforge ingest` when no subcommand is provided."""
|
||||
if ctx.invoked_subcommand:
|
||||
return
|
||||
|
||||
try:
|
||||
config = ProjectConfigManager()
|
||||
except FileNotFoundError as exc:
|
||||
console.print(f"[red]Error:[/red] {exc}")
|
||||
raise typer.Exit(1) from exc
|
||||
|
||||
if not config.is_initialized():
|
||||
console.print("[red]Error: FuzzForge project not initialized. Run 'ff init' first.[/red]")
|
||||
raise typer.Exit(1)
|
||||
|
||||
config.setup_cognee_environment()
|
||||
if os.getenv("FUZZFORGE_DEBUG", "0") == "1":
|
||||
console.print(
|
||||
"[dim]Cognee directories:\n"
|
||||
f" DATA: {os.getenv('COGNEE_DATA_ROOT', 'unset')}\n"
|
||||
f" SYSTEM: {os.getenv('COGNEE_SYSTEM_ROOT', 'unset')}\n"
|
||||
f" USER: {os.getenv('COGNEE_USER_ID', 'unset')}\n",
|
||||
)
|
||||
project_context = config.get_project_context()
|
||||
|
||||
target_path = path or Path.cwd()
|
||||
dataset_name = dataset or f"{project_context['project_name']}_codebase"
|
||||
|
||||
try:
|
||||
import cognee # noqa: F401 # Just to validate installation
|
||||
except ImportError as exc:
|
||||
console.print("[red]Cognee is not installed.[/red]")
|
||||
console.print("Install with: pip install 'cognee[all]' litellm")
|
||||
raise typer.Exit(1) from exc
|
||||
|
||||
console.print(f"[bold]🔍 Ingesting {target_path} into Cognee knowledge graph[/bold]")
|
||||
console.print(
|
||||
f"Project: [cyan]{project_context['project_name']}[/cyan] "
|
||||
f"(ID: [dim]{project_context['project_id']}[/dim])"
|
||||
)
|
||||
console.print(f"Dataset: [cyan]{dataset_name}[/cyan]")
|
||||
console.print(f"Tenant: [dim]{project_context['tenant_id']}[/dim]")
|
||||
|
||||
if not force:
|
||||
confirm_message = f"Ingest {target_path} into knowledge graph for this project?"
|
||||
if not Confirm.ask(confirm_message, console=console):
|
||||
console.print("[yellow]Ingestion cancelled[/yellow]")
|
||||
raise typer.Exit(0)
|
||||
|
||||
try:
|
||||
asyncio.run(
|
||||
_run_ingestion(
|
||||
config=config,
|
||||
path=target_path.resolve(),
|
||||
recursive=recursive,
|
||||
file_types=file_types,
|
||||
exclude=exclude,
|
||||
dataset=dataset_name,
|
||||
force=force,
|
||||
)
|
||||
)
|
||||
except KeyboardInterrupt:
|
||||
console.print("\n[yellow]Ingestion cancelled by user[/yellow]")
|
||||
raise typer.Exit(1)
|
||||
except Exception as exc: # pragma: no cover - rich reporting
|
||||
console.print(f"[red]Failed to ingest:[/red] {exc}")
|
||||
raise typer.Exit(1) from exc
|
||||
|
||||
|
||||
async def _run_ingestion(
|
||||
*,
|
||||
config: ProjectConfigManager,
|
||||
path: Path,
|
||||
recursive: bool,
|
||||
file_types: Optional[List[str]],
|
||||
exclude: Optional[List[str]],
|
||||
dataset: str,
|
||||
force: bool,
|
||||
) -> None:
|
||||
"""Perform the actual ingestion work."""
|
||||
from fuzzforge_ai.cognee_service import CogneeService
|
||||
|
||||
cognee_service = CogneeService(config)
|
||||
await cognee_service.initialize()
|
||||
|
||||
# Always skip internal bookkeeping directories
|
||||
exclude_patterns = list(exclude or [])
|
||||
default_excludes = {
|
||||
".fuzzforge/**",
|
||||
".git/**",
|
||||
}
|
||||
added_defaults = []
|
||||
for pattern in default_excludes:
|
||||
if pattern not in exclude_patterns:
|
||||
exclude_patterns.append(pattern)
|
||||
added_defaults.append(pattern)
|
||||
|
||||
if added_defaults and os.getenv("FUZZFORGE_DEBUG", "0") == "1":
|
||||
console.print(
|
||||
"[dim]Auto-excluding paths: {patterns}[/dim]".format(
|
||||
patterns=", ".join(added_defaults)
|
||||
)
|
||||
)
|
||||
|
||||
try:
|
||||
files_to_ingest = collect_ingest_files(path, recursive, file_types, exclude_patterns)
|
||||
except Exception as exc:
|
||||
console.print(f"[red]Failed to collect files:[/red] {exc}")
|
||||
return
|
||||
|
||||
if not files_to_ingest:
|
||||
console.print("[yellow]No files found to ingest[/yellow]")
|
||||
return
|
||||
|
||||
console.print(f"Found [green]{len(files_to_ingest)}[/green] files to ingest")
|
||||
|
||||
if force:
|
||||
console.print("Cleaning existing data for this project...")
|
||||
try:
|
||||
await cognee_service.clear_data(confirm=True)
|
||||
except Exception as exc:
|
||||
console.print(f"[yellow]Warning:[/yellow] Could not clean existing data: {exc}")
|
||||
|
||||
console.print("Adding files to Cognee...")
|
||||
valid_file_paths = []
|
||||
for file_path in files_to_ingest:
|
||||
try:
|
||||
with open(file_path, "r", encoding="utf-8") as fh:
|
||||
fh.read(1)
|
||||
valid_file_paths.append(file_path)
|
||||
console.print(f" ✓ {file_path}")
|
||||
except (UnicodeDecodeError, PermissionError) as exc:
|
||||
console.print(f"[yellow]Skipping {file_path}: {exc}[/yellow]")
|
||||
|
||||
if not valid_file_paths:
|
||||
console.print("[yellow]No readable files found to ingest[/yellow]")
|
||||
return
|
||||
|
||||
results = await cognee_service.ingest_files(valid_file_paths, dataset)
|
||||
|
||||
console.print(
|
||||
f"[green]✅ Successfully ingested {results['success']} files into knowledge graph[/green]"
|
||||
)
|
||||
if results["failed"]:
|
||||
console.print(
|
||||
f"[yellow]⚠️ Skipped {results['failed']} files due to errors[/yellow]"
|
||||
)
|
||||
|
||||
try:
|
||||
insights = await cognee_service.search_insights(
|
||||
query=f"What insights can you provide about the {dataset} dataset?",
|
||||
dataset=dataset,
|
||||
)
|
||||
if insights:
|
||||
console.print(f"\n[bold]📊 Generated {len(insights)} insights:[/bold]")
|
||||
for index, insight in enumerate(insights[:3], 1):
|
||||
console.print(f" {index}. {insight}")
|
||||
if len(insights) > 3:
|
||||
console.print(f" ... and {len(insights) - 3} more")
|
||||
|
||||
chunks = await cognee_service.search_chunks(
|
||||
query=f"functions classes methods in {dataset}",
|
||||
dataset=dataset,
|
||||
)
|
||||
if chunks:
|
||||
console.print(
|
||||
f"\n[bold]🔍 Sample searchable content ({len(chunks)} chunks found):[/bold]"
|
||||
)
|
||||
for index, chunk in enumerate(chunks[:2], 1):
|
||||
preview = chunk[:100] + "..." if len(chunk) > 100 else chunk
|
||||
console.print(f" {index}. {preview}")
|
||||
except Exception:
|
||||
# Best-effort stats — ignore failures here
|
||||
pass
|
||||
@@ -0,0 +1,282 @@
|
||||
"""Project initialization commands."""
|
||||
# Copyright (c) 2025 FuzzingLabs
|
||||
#
|
||||
# Licensed under the Business Source License 1.1 (BSL). See the LICENSE file
|
||||
# at the root of this repository for details.
|
||||
#
|
||||
# After the Change Date (four years from publication), this version of the
|
||||
# Licensed Work will be made available under the Apache License, Version 2.0.
|
||||
# See the LICENSE-APACHE file or http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Additional attribution and requirements are provided in the NOTICE file.
|
||||
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
import os
|
||||
from textwrap import dedent
|
||||
from typing import Optional
|
||||
|
||||
import typer
|
||||
from rich.console import Console
|
||||
from rich.prompt import Confirm, Prompt
|
||||
|
||||
from ..config import ensure_project_config
|
||||
from ..database import ensure_project_db
|
||||
|
||||
console = Console()
|
||||
app = typer.Typer()
|
||||
|
||||
|
||||
@app.command()
|
||||
def project(
|
||||
name: Optional[str] = typer.Option(
|
||||
None, "--name", "-n",
|
||||
help="Project name (defaults to current directory name)"
|
||||
),
|
||||
api_url: Optional[str] = typer.Option(
|
||||
None, "--api-url", "-u",
|
||||
help="FuzzForge API URL (defaults to http://localhost:8000)"
|
||||
),
|
||||
force: bool = typer.Option(
|
||||
False, "--force", "-f",
|
||||
help="Force initialization even if project already exists"
|
||||
)
|
||||
):
|
||||
"""
|
||||
📁 Initialize a new FuzzForge project in the current directory.
|
||||
|
||||
This creates a .fuzzforge directory with:
|
||||
• SQLite database for storing runs, findings, and crashes
|
||||
• Configuration file with project settings
|
||||
• Default ignore patterns and preferences
|
||||
"""
|
||||
current_dir = Path.cwd()
|
||||
fuzzforge_dir = current_dir / ".fuzzforge"
|
||||
|
||||
# Check if project already exists
|
||||
if fuzzforge_dir.exists() and not force:
|
||||
if fuzzforge_dir.is_dir() and any(fuzzforge_dir.iterdir()):
|
||||
console.print("❌ FuzzForge project already exists in this directory", style="red")
|
||||
console.print("Use --force to reinitialize", style="dim")
|
||||
raise typer.Exit(1)
|
||||
|
||||
# Get project name
|
||||
if not name:
|
||||
name = Prompt.ask(
|
||||
"Project name",
|
||||
default=current_dir.name,
|
||||
console=console
|
||||
)
|
||||
|
||||
# Get API URL
|
||||
if not api_url:
|
||||
api_url = Prompt.ask(
|
||||
"FuzzForge API URL",
|
||||
default="http://localhost:8000",
|
||||
console=console
|
||||
)
|
||||
|
||||
# Confirm initialization
|
||||
console.print(f"\n📁 Initializing FuzzForge project: [bold cyan]{name}[/bold cyan]")
|
||||
console.print(f"📍 Location: [dim]{current_dir}[/dim]")
|
||||
console.print(f"🔗 API URL: [dim]{api_url}[/dim]")
|
||||
|
||||
if not Confirm.ask("\nProceed with initialization?", default=True, console=console):
|
||||
console.print("❌ Initialization cancelled", style="yellow")
|
||||
raise typer.Exit(0)
|
||||
|
||||
try:
|
||||
# Create .fuzzforge directory
|
||||
console.print("\n🔨 Creating project structure...")
|
||||
fuzzforge_dir.mkdir(exist_ok=True)
|
||||
|
||||
# Initialize configuration
|
||||
console.print("⚙️ Setting up configuration...")
|
||||
ensure_project_config(
|
||||
project_dir=current_dir,
|
||||
project_name=name,
|
||||
api_url=api_url,
|
||||
)
|
||||
|
||||
# Initialize database
|
||||
console.print("🗄️ Initializing database...")
|
||||
ensure_project_db(current_dir)
|
||||
|
||||
_ensure_env_file(fuzzforge_dir, force)
|
||||
_ensure_agents_registry(fuzzforge_dir, force)
|
||||
|
||||
# Create .gitignore if needed
|
||||
gitignore_path = current_dir / ".gitignore"
|
||||
gitignore_entries = [
|
||||
"# FuzzForge CLI",
|
||||
".fuzzforge/findings.db-*", # SQLite temp files
|
||||
".fuzzforge/cache/",
|
||||
".fuzzforge/temp/",
|
||||
]
|
||||
|
||||
if gitignore_path.exists():
|
||||
with open(gitignore_path, 'r') as f:
|
||||
existing_content = f.read()
|
||||
|
||||
if "# FuzzForge CLI" not in existing_content:
|
||||
with open(gitignore_path, 'a') as f:
|
||||
f.write(f"\n{chr(10).join(gitignore_entries)}\n")
|
||||
console.print("📝 Updated .gitignore with FuzzForge entries")
|
||||
else:
|
||||
with open(gitignore_path, 'w') as f:
|
||||
f.write(f"{chr(10).join(gitignore_entries)}\n")
|
||||
console.print("📝 Created .gitignore")
|
||||
|
||||
# Create README if it doesn't exist
|
||||
readme_path = current_dir / "README.md"
|
||||
if not readme_path.exists():
|
||||
readme_content = f"""# {name}
|
||||
|
||||
FuzzForge security testing project.
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
# List available workflows
|
||||
fuzzforge workflows
|
||||
|
||||
# Submit a workflow for analysis
|
||||
fuzzforge workflow <workflow-name> /path/to/target
|
||||
|
||||
# Monitor run progress
|
||||
fuzzforge monitor live <run-id>
|
||||
|
||||
# View findings
|
||||
fuzzforge finding <run-id>
|
||||
```
|
||||
|
||||
## Project Structure
|
||||
|
||||
- `.fuzzforge/` - Project data and configuration
|
||||
- `.fuzzforge/config.yaml` - Project configuration
|
||||
- `.fuzzforge/findings.db` - Local database for runs and findings
|
||||
"""
|
||||
|
||||
with open(readme_path, 'w') as f:
|
||||
f.write(readme_content)
|
||||
console.print("📚 Created README.md")
|
||||
|
||||
console.print("\n✅ FuzzForge project initialized successfully!", style="green")
|
||||
console.print(f"\n🎯 Next steps:")
|
||||
console.print(" • ff workflows - See available workflows")
|
||||
console.print(" • ff status - Check API connectivity")
|
||||
console.print(" • ff workflow <workflow> <path> - Start your first analysis")
|
||||
console.print(" • edit .fuzzforge/.env with API keys & provider settings")
|
||||
|
||||
except Exception as e:
|
||||
console.print(f"\n❌ Initialization failed: {e}", style="red")
|
||||
raise typer.Exit(1)
|
||||
|
||||
|
||||
@app.callback()
|
||||
def init_callback():
|
||||
"""
|
||||
📁 Initialize FuzzForge projects and components
|
||||
"""
|
||||
|
||||
|
||||
def _ensure_env_file(fuzzforge_dir: Path, force: bool) -> None:
|
||||
"""Create or update the .fuzzforge/.env file with AI defaults."""
|
||||
|
||||
env_path = fuzzforge_dir / ".env"
|
||||
if env_path.exists() and not force:
|
||||
console.print("🧪 Using existing .fuzzforge/.env (use --force to regenerate)")
|
||||
return
|
||||
|
||||
console.print("🧠 Configuring AI environment...")
|
||||
console.print(" • Default LLM provider: openai")
|
||||
console.print(" • Default LLM model: gpt-5-mini")
|
||||
console.print(" • To customise provider/model later, edit .fuzzforge/.env")
|
||||
|
||||
llm_provider = "openai"
|
||||
llm_model = "gpt-5-mini"
|
||||
|
||||
api_key = Prompt.ask(
|
||||
"OpenAI API key (leave blank to fill manually)",
|
||||
default="",
|
||||
show_default=False,
|
||||
console=console,
|
||||
)
|
||||
|
||||
enable_cognee = False
|
||||
cognee_url = ""
|
||||
|
||||
session_db_path = fuzzforge_dir / "fuzzforge_sessions.db"
|
||||
session_db_rel = session_db_path.relative_to(fuzzforge_dir.parent)
|
||||
|
||||
env_lines = [
|
||||
"# FuzzForge AI configuration",
|
||||
"# Populate the API key(s) that match your LLM provider",
|
||||
"",
|
||||
f"LLM_PROVIDER={llm_provider}",
|
||||
f"LLM_MODEL={llm_model}",
|
||||
f"LITELLM_MODEL={llm_model}",
|
||||
f"OPENAI_API_KEY={api_key}",
|
||||
f"FUZZFORGE_MCP_URL={os.getenv('FUZZFORGE_MCP_URL', 'http://localhost:8010/mcp')}",
|
||||
"",
|
||||
"# Cognee configuration mirrors the primary LLM by default",
|
||||
f"LLM_COGNEE_PROVIDER={llm_provider}",
|
||||
f"LLM_COGNEE_MODEL={llm_model}",
|
||||
f"LLM_COGNEE_API_KEY={api_key}",
|
||||
"LLM_COGNEE_ENDPOINT=",
|
||||
"COGNEE_MCP_URL=",
|
||||
"",
|
||||
"# Session persistence options: inmemory | sqlite",
|
||||
"SESSION_PERSISTENCE=sqlite",
|
||||
f"SESSION_DB_PATH={session_db_rel}",
|
||||
"",
|
||||
"# Optional integrations",
|
||||
"AGENTOPS_API_KEY=",
|
||||
"FUZZFORGE_DEBUG=0",
|
||||
"",
|
||||
]
|
||||
|
||||
env_path.write_text("\n".join(env_lines), encoding="utf-8")
|
||||
console.print(f"📝 Created {env_path.relative_to(fuzzforge_dir.parent)}")
|
||||
|
||||
template_path = fuzzforge_dir / ".env.template"
|
||||
if not template_path.exists() or force:
|
||||
template_lines = []
|
||||
for line in env_lines:
|
||||
if line.startswith("OPENAI_API_KEY="):
|
||||
template_lines.append("OPENAI_API_KEY=")
|
||||
elif line.startswith("LLM_COGNEE_API_KEY="):
|
||||
template_lines.append("LLM_COGNEE_API_KEY=")
|
||||
else:
|
||||
template_lines.append(line)
|
||||
template_path.write_text("\n".join(template_lines), encoding="utf-8")
|
||||
console.print(f"📝 Created {template_path.relative_to(fuzzforge_dir.parent)}")
|
||||
|
||||
# SQLite session DB will be created automatically when first used by the AI agent
|
||||
|
||||
|
||||
def _ensure_agents_registry(fuzzforge_dir: Path, force: bool) -> None:
|
||||
"""Create a starter agents.yaml registry if needed."""
|
||||
|
||||
agents_path = fuzzforge_dir / "agents.yaml"
|
||||
if agents_path.exists() and not force:
|
||||
return
|
||||
|
||||
template = dedent(
|
||||
"""\
|
||||
# FuzzForge Registered Agents
|
||||
# Populate this list to auto-register remote agents when the AI CLI starts
|
||||
registered_agents: []
|
||||
|
||||
# Example:
|
||||
# registered_agents:
|
||||
# - name: Calculator
|
||||
# url: http://localhost:10201
|
||||
# description: Sample math agent
|
||||
""".strip()
|
||||
)
|
||||
|
||||
agents_path.write_text(template + "\n", encoding="utf-8")
|
||||
console.print(f"📝 Created {agents_path.relative_to(fuzzforge_dir.parent)}")
|
||||
@@ -0,0 +1,436 @@
|
||||
"""
|
||||
Real-time monitoring and statistics commands.
|
||||
"""
|
||||
# Copyright (c) 2025 FuzzingLabs
|
||||
#
|
||||
# Licensed under the Business Source License 1.1 (BSL). See the LICENSE file
|
||||
# at the root of this repository for details.
|
||||
#
|
||||
# After the Change Date (four years from publication), this version of the
|
||||
# Licensed Work will be made available under the Apache License, Version 2.0.
|
||||
# See the LICENSE-APACHE file or http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Additional attribution and requirements are provided in the NOTICE file.
|
||||
|
||||
|
||||
import asyncio
|
||||
import time
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Optional
|
||||
|
||||
import typer
|
||||
from rich.console import Console
|
||||
from rich.table import Table
|
||||
from rich.panel import Panel
|
||||
from rich.live import Live
|
||||
from rich.layout import Layout
|
||||
from rich.progress import Progress, BarColumn, TextColumn, SpinnerColumn
|
||||
from rich.align import Align
|
||||
from rich import box
|
||||
|
||||
from ..config import get_project_config, FuzzForgeConfig
|
||||
from ..database import get_project_db, ensure_project_db, CrashRecord
|
||||
from fuzzforge_sdk import FuzzForgeClient
|
||||
|
||||
console = Console()
|
||||
app = typer.Typer()
|
||||
|
||||
|
||||
def get_client() -> FuzzForgeClient:
|
||||
"""Get configured FuzzForge client"""
|
||||
config = get_project_config() or FuzzForgeConfig()
|
||||
return FuzzForgeClient(base_url=config.get_api_url(), timeout=config.get_timeout())
|
||||
|
||||
|
||||
def format_duration(seconds: int) -> str:
|
||||
"""Format duration in human readable format"""
|
||||
if seconds < 60:
|
||||
return f"{seconds}s"
|
||||
elif seconds < 3600:
|
||||
return f"{seconds // 60}m {seconds % 60}s"
|
||||
else:
|
||||
hours = seconds // 3600
|
||||
minutes = (seconds % 3600) // 60
|
||||
return f"{hours}h {minutes}m"
|
||||
|
||||
|
||||
def format_number(num: int) -> str:
|
||||
"""Format large numbers with K, M suffixes"""
|
||||
if num >= 1000000:
|
||||
return f"{num / 1000000:.1f}M"
|
||||
elif num >= 1000:
|
||||
return f"{num / 1000:.1f}K"
|
||||
else:
|
||||
return str(num)
|
||||
|
||||
|
||||
@app.command("stats")
|
||||
def fuzzing_stats(
|
||||
run_id: str = typer.Argument(..., help="Run ID to get statistics for"),
|
||||
refresh: int = typer.Option(
|
||||
5, "--refresh", "-r",
|
||||
help="Refresh interval in seconds"
|
||||
),
|
||||
once: bool = typer.Option(
|
||||
False, "--once",
|
||||
help="Show stats once and exit"
|
||||
)
|
||||
):
|
||||
"""
|
||||
📊 Show current fuzzing statistics for a run
|
||||
"""
|
||||
try:
|
||||
with get_client() as client:
|
||||
if once:
|
||||
# Show stats once
|
||||
stats = client.get_fuzzing_stats(run_id)
|
||||
display_stats_table(stats)
|
||||
else:
|
||||
# Live updating stats
|
||||
console.print(f"📊 [bold]Live Fuzzing Statistics[/bold] (Run: {run_id[:12]}...)")
|
||||
console.print(f"Refreshing every {refresh}s. Press Ctrl+C to stop.\n")
|
||||
|
||||
with Live(auto_refresh=False, console=console) as live:
|
||||
while True:
|
||||
try:
|
||||
stats = client.get_fuzzing_stats(run_id)
|
||||
table = create_stats_table(stats)
|
||||
live.update(table, refresh=True)
|
||||
time.sleep(refresh)
|
||||
except KeyboardInterrupt:
|
||||
console.print("\n📊 Monitoring stopped", style="yellow")
|
||||
break
|
||||
|
||||
except Exception as e:
|
||||
console.print(f"❌ Failed to get fuzzing stats: {e}", style="red")
|
||||
raise typer.Exit(1)
|
||||
|
||||
|
||||
def display_stats_table(stats):
|
||||
"""Display stats in a simple table"""
|
||||
table = create_stats_table(stats)
|
||||
console.print(table)
|
||||
|
||||
|
||||
def create_stats_table(stats) -> Panel:
|
||||
"""Create a rich table for fuzzing statistics"""
|
||||
# Create main stats table
|
||||
stats_table = Table(show_header=False, box=box.SIMPLE)
|
||||
stats_table.add_column("Metric", style="bold cyan")
|
||||
stats_table.add_column("Value", justify="right", style="bold white")
|
||||
|
||||
stats_table.add_row("Total Executions", format_number(stats.executions))
|
||||
stats_table.add_row("Executions/sec", f"{stats.executions_per_sec:.1f}")
|
||||
stats_table.add_row("Total Crashes", format_number(stats.crashes))
|
||||
stats_table.add_row("Unique Crashes", format_number(stats.unique_crashes))
|
||||
|
||||
if stats.coverage is not None:
|
||||
stats_table.add_row("Code Coverage", f"{stats.coverage:.1f}%")
|
||||
|
||||
stats_table.add_row("Corpus Size", format_number(stats.corpus_size))
|
||||
stats_table.add_row("Elapsed Time", format_duration(stats.elapsed_time))
|
||||
|
||||
if stats.last_crash_time:
|
||||
time_since_crash = datetime.now() - stats.last_crash_time
|
||||
stats_table.add_row("Last Crash", f"{format_duration(int(time_since_crash.total_seconds()))} ago")
|
||||
|
||||
return Panel.fit(
|
||||
stats_table,
|
||||
title=f"📊 Fuzzing Statistics - {stats.workflow}",
|
||||
subtitle=f"Run: {stats.run_id[:12]}...",
|
||||
box=box.ROUNDED
|
||||
)
|
||||
|
||||
|
||||
@app.command("crashes")
|
||||
def crash_reports(
|
||||
run_id: str = typer.Argument(..., help="Run ID to get crash reports for"),
|
||||
save: bool = typer.Option(
|
||||
True, "--save/--no-save",
|
||||
help="Save crashes to local database"
|
||||
),
|
||||
limit: int = typer.Option(
|
||||
50, "--limit", "-l",
|
||||
help="Maximum number of crashes to show"
|
||||
)
|
||||
):
|
||||
"""
|
||||
🐛 Display crash reports for a fuzzing run
|
||||
"""
|
||||
try:
|
||||
with get_client() as client:
|
||||
console.print(f"🐛 Fetching crash reports for run: {run_id}")
|
||||
crashes = client.get_crash_reports(run_id)
|
||||
|
||||
if not crashes:
|
||||
console.print("✅ No crashes found!", style="green")
|
||||
return
|
||||
|
||||
# Save to database if requested
|
||||
if save:
|
||||
db = ensure_project_db()
|
||||
for crash in crashes:
|
||||
crash_record = CrashRecord(
|
||||
run_id=run_id,
|
||||
crash_id=crash.crash_id,
|
||||
signal=crash.signal,
|
||||
stack_trace=crash.stack_trace,
|
||||
input_file=crash.input_file,
|
||||
severity=crash.severity,
|
||||
timestamp=crash.timestamp
|
||||
)
|
||||
db.save_crash(crash_record)
|
||||
console.print("✅ Crashes saved to local database")
|
||||
|
||||
# Display crashes
|
||||
crashes_to_show = crashes[:limit]
|
||||
|
||||
# Summary
|
||||
severity_counts = {}
|
||||
signal_counts = {}
|
||||
for crash in crashes:
|
||||
severity_counts[crash.severity] = severity_counts.get(crash.severity, 0) + 1
|
||||
if crash.signal:
|
||||
signal_counts[crash.signal] = signal_counts.get(crash.signal, 0) + 1
|
||||
|
||||
summary_table = Table(show_header=False, box=box.SIMPLE)
|
||||
summary_table.add_column("Metric", style="bold cyan")
|
||||
summary_table.add_column("Value", justify="right")
|
||||
|
||||
summary_table.add_row("Total Crashes", str(len(crashes)))
|
||||
summary_table.add_row("Unique Signals", str(len(signal_counts)))
|
||||
|
||||
for severity, count in sorted(severity_counts.items()):
|
||||
summary_table.add_row(f"{severity.title()} Severity", str(count))
|
||||
|
||||
console.print(
|
||||
Panel.fit(
|
||||
summary_table,
|
||||
title=f"🐛 Crash Summary",
|
||||
box=box.ROUNDED
|
||||
)
|
||||
)
|
||||
|
||||
# Detailed crash table
|
||||
if crashes_to_show:
|
||||
crashes_table = Table(box=box.ROUNDED)
|
||||
crashes_table.add_column("Crash ID", style="bold cyan")
|
||||
crashes_table.add_column("Signal", justify="center")
|
||||
crashes_table.add_column("Severity", justify="center")
|
||||
crashes_table.add_column("Timestamp", justify="center")
|
||||
crashes_table.add_column("Input File", style="dim")
|
||||
|
||||
for crash in crashes_to_show:
|
||||
signal_emoji = {
|
||||
"SIGSEGV": "💥",
|
||||
"SIGABRT": "🛑",
|
||||
"SIGFPE": "🧮",
|
||||
"SIGILL": "⚠️"
|
||||
}.get(crash.signal or "", "🐛")
|
||||
|
||||
severity_style = {
|
||||
"high": "red",
|
||||
"medium": "yellow",
|
||||
"low": "green"
|
||||
}.get(crash.severity.lower(), "white")
|
||||
|
||||
input_display = ""
|
||||
if crash.input_file:
|
||||
input_display = crash.input_file.split("/")[-1] # Show just filename
|
||||
|
||||
crashes_table.add_row(
|
||||
crash.crash_id[:12] + "..." if len(crash.crash_id) > 15 else crash.crash_id,
|
||||
f"{signal_emoji} {crash.signal or 'Unknown'}",
|
||||
f"[{severity_style}]{crash.severity}[/{severity_style}]",
|
||||
crash.timestamp.strftime("%H:%M:%S"),
|
||||
input_display
|
||||
)
|
||||
|
||||
console.print(f"\n🐛 [bold]Crash Details[/bold]")
|
||||
if len(crashes) > limit:
|
||||
console.print(f"Showing first {limit} of {len(crashes)} crashes")
|
||||
console.print()
|
||||
console.print(crashes_table)
|
||||
|
||||
console.print(f"\n💡 Use [bold cyan]fuzzforge finding {run_id}[/bold cyan] for detailed analysis")
|
||||
|
||||
except Exception as e:
|
||||
console.print(f"❌ Failed to get crash reports: {e}", style="red")
|
||||
raise typer.Exit(1)
|
||||
|
||||
|
||||
def _live_monitor(run_id: str, refresh: int):
|
||||
"""Helper for live monitoring to allow for cleaner exit handling"""
|
||||
with get_client() as client:
|
||||
start_time = time.time()
|
||||
|
||||
def render_layout(run_status, stats):
|
||||
layout = Layout()
|
||||
layout.split_column(
|
||||
Layout(name="header", size=3),
|
||||
Layout(name="main", ratio=1),
|
||||
Layout(name="footer", size=3)
|
||||
)
|
||||
layout["main"].split_row(
|
||||
Layout(name="stats", ratio=1),
|
||||
Layout(name="progress", ratio=1)
|
||||
)
|
||||
header = Panel(
|
||||
f"[bold]FuzzForge Live Monitor[/bold]\n"
|
||||
f"Run: {run_id[:12]}... | Status: {run_status.status} | "
|
||||
f"Uptime: {format_duration(int(time.time() - start_time))}",
|
||||
box=box.ROUNDED,
|
||||
style="cyan"
|
||||
)
|
||||
layout["header"].update(header)
|
||||
layout["stats"].update(create_stats_table(stats))
|
||||
|
||||
progress_table = Table(show_header=False, box=box.SIMPLE)
|
||||
progress_table.add_column("Metric", style="bold")
|
||||
progress_table.add_column("Progress")
|
||||
if stats.executions > 0:
|
||||
exec_rate_percent = min(100, (stats.executions_per_sec / 1000) * 100)
|
||||
progress_table.add_row("Exec Rate", create_progress_bar(exec_rate_percent, "green"))
|
||||
crash_rate = (stats.crashes / stats.executions) * 100000
|
||||
crash_rate_percent = min(100, crash_rate * 10)
|
||||
progress_table.add_row("Crash Rate", create_progress_bar(crash_rate_percent, "red"))
|
||||
if stats.coverage is not None:
|
||||
progress_table.add_row("Coverage", create_progress_bar(stats.coverage, "blue"))
|
||||
layout["progress"].update(Panel.fit(progress_table, title="📊 Progress Indicators", box=box.ROUNDED))
|
||||
|
||||
footer = Panel(
|
||||
f"Last updated: {datetime.now().strftime('%H:%M:%S')} | "
|
||||
f"Refresh interval: {refresh}s | Press Ctrl+C to exit",
|
||||
box=box.ROUNDED,
|
||||
style="dim"
|
||||
)
|
||||
layout["footer"].update(footer)
|
||||
return layout
|
||||
|
||||
with Live(auto_refresh=False, console=console, screen=True) as live:
|
||||
# Initial fetch
|
||||
try:
|
||||
run_status = client.get_run_status(run_id)
|
||||
stats = client.get_fuzzing_stats(run_id)
|
||||
except Exception:
|
||||
# Minimal fallback stats
|
||||
class FallbackStats:
|
||||
def __init__(self, run_id):
|
||||
self.run_id = run_id
|
||||
self.workflow = "unknown"
|
||||
self.executions = 0
|
||||
self.executions_per_sec = 0.0
|
||||
self.crashes = 0
|
||||
self.unique_crashes = 0
|
||||
self.coverage = None
|
||||
self.corpus_size = 0
|
||||
self.elapsed_time = 0
|
||||
self.last_crash_time = None
|
||||
stats = FallbackStats(run_id)
|
||||
run_status = type("RS", (), {"status":"Unknown","is_completed":False,"is_failed":False})()
|
||||
|
||||
live.update(render_layout(run_status, stats), refresh=True)
|
||||
|
||||
# Simple polling approach that actually works
|
||||
consecutive_errors = 0
|
||||
max_errors = 5
|
||||
|
||||
while True:
|
||||
try:
|
||||
# Poll for updates
|
||||
try:
|
||||
run_status = client.get_run_status(run_id)
|
||||
consecutive_errors = 0
|
||||
except Exception as e:
|
||||
consecutive_errors += 1
|
||||
if consecutive_errors >= max_errors:
|
||||
console.print(f"❌ Too many errors getting run status: {e}", style="red")
|
||||
break
|
||||
time.sleep(refresh)
|
||||
continue
|
||||
|
||||
# Try to get fuzzing stats
|
||||
try:
|
||||
stats = client.get_fuzzing_stats(run_id)
|
||||
except Exception as e:
|
||||
# Create fallback stats if not available
|
||||
stats = FallbackStats(run_id)
|
||||
|
||||
# Update display
|
||||
live.update(render_layout(run_status, stats), refresh=True)
|
||||
|
||||
# Check if completed
|
||||
if getattr(run_status, 'is_completed', False) or getattr(run_status, 'is_failed', False):
|
||||
# Show final state for a few seconds
|
||||
console.print("\n🏁 Run completed. Showing final state for 10 seconds...")
|
||||
time.sleep(10)
|
||||
break
|
||||
|
||||
# Wait before next poll
|
||||
time.sleep(refresh)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except Exception as e:
|
||||
console.print(f"⚠️ Monitoring error: {e}", style="yellow")
|
||||
time.sleep(refresh)
|
||||
|
||||
# Completed status update
|
||||
final_message = (
|
||||
f"[bold]FuzzForge Live Monitor - COMPLETED[/bold]\n"
|
||||
f"Run: {run_id[:12]}... | Status: {run_status.status} | "
|
||||
f"Total runtime: {format_duration(int(time.time() - start_time))}"
|
||||
)
|
||||
style = "green" if getattr(run_status, 'is_completed', False) else "red"
|
||||
live.update(Panel(final_message, box=box.ROUNDED, style=style), refresh=True)
|
||||
|
||||
|
||||
@app.command("live")
|
||||
def live_monitor(
|
||||
run_id: str = typer.Argument(..., help="Run ID to monitor live"),
|
||||
refresh: int = typer.Option(
|
||||
2, "--refresh", "-r",
|
||||
help="Refresh interval in seconds (fallback when streaming unavailable)"
|
||||
)
|
||||
):
|
||||
"""
|
||||
📺 Real-time monitoring dashboard with live updates (WebSocket/SSE with REST fallback)
|
||||
"""
|
||||
console.print(f"📺 [bold]Live Monitoring Dashboard[/bold]")
|
||||
console.print(f"Run: {run_id}")
|
||||
console.print(f"Press Ctrl+C to stop monitoring\n")
|
||||
try:
|
||||
_live_monitor(run_id, refresh)
|
||||
except KeyboardInterrupt:
|
||||
console.print("\n📊 Monitoring stopped by user.", style="yellow")
|
||||
except Exception as e:
|
||||
console.print(f"❌ Failed to start live monitoring: {e}", style="red")
|
||||
raise typer.Exit(1)
|
||||
|
||||
|
||||
def create_progress_bar(percentage: float, color: str = "green") -> str:
|
||||
"""Create a simple text progress bar"""
|
||||
width = 20
|
||||
filled = int((percentage / 100) * width)
|
||||
bar = "█" * filled + "░" * (width - filled)
|
||||
return f"[{color}]{bar}[/{color}] {percentage:.1f}%"
|
||||
|
||||
|
||||
@app.callback(invoke_without_command=True)
|
||||
def monitor_callback(ctx: typer.Context):
|
||||
"""
|
||||
📊 Real-time monitoring and statistics
|
||||
"""
|
||||
# Check if a subcommand is being invoked
|
||||
if ctx.invoked_subcommand is not None:
|
||||
# Let the subcommand handle it
|
||||
return
|
||||
|
||||
# Show not implemented message for default command
|
||||
from rich.console import Console
|
||||
console = Console()
|
||||
console.print("🚧 [yellow]Monitor command is not fully implemented yet.[/yellow]")
|
||||
console.print("Please use specific subcommands:")
|
||||
console.print(" • [cyan]ff monitor stats <run-id>[/cyan] - Show execution statistics")
|
||||
console.print(" • [cyan]ff monitor crashes <run-id>[/cyan] - Show crash reports")
|
||||
console.print(" • [cyan]ff monitor live <run-id>[/cyan] - Live monitoring dashboard")
|
||||
@@ -0,0 +1,165 @@
|
||||
"""
|
||||
Status command for showing project and API information.
|
||||
"""
|
||||
# Copyright (c) 2025 FuzzingLabs
|
||||
#
|
||||
# Licensed under the Business Source License 1.1 (BSL). See the LICENSE file
|
||||
# at the root of this repository for details.
|
||||
#
|
||||
# After the Change Date (four years from publication), this version of the
|
||||
# Licensed Work will be made available under the Apache License, Version 2.0.
|
||||
# See the LICENSE-APACHE file or http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Additional attribution and requirements are provided in the NOTICE file.
|
||||
|
||||
|
||||
from pathlib import Path
|
||||
from rich.console import Console
|
||||
from rich.table import Table
|
||||
from rich.panel import Panel
|
||||
from rich import box
|
||||
|
||||
from ..config import get_project_config, FuzzForgeConfig
|
||||
from ..database import get_project_db
|
||||
from fuzzforge_sdk import FuzzForgeClient
|
||||
|
||||
console = Console()
|
||||
|
||||
|
||||
def show_status():
|
||||
"""Show comprehensive project and API status"""
|
||||
current_dir = Path.cwd()
|
||||
fuzzforge_dir = current_dir / ".fuzzforge"
|
||||
|
||||
# Project status
|
||||
console.print("\n📊 [bold]FuzzForge Project Status[/bold]\n")
|
||||
|
||||
if not fuzzforge_dir.exists():
|
||||
console.print(
|
||||
Panel.fit(
|
||||
"❌ No FuzzForge project found in current directory\n\n"
|
||||
"Run [bold cyan]ff init[/bold cyan] to initialize a project",
|
||||
title="Project Status",
|
||||
box=box.ROUNDED
|
||||
)
|
||||
)
|
||||
return
|
||||
|
||||
# Load project configuration
|
||||
config = get_project_config()
|
||||
if not config:
|
||||
config = FuzzForgeConfig()
|
||||
|
||||
# Project info table
|
||||
project_table = Table(show_header=False, box=box.SIMPLE)
|
||||
project_table.add_column("Property", style="bold cyan")
|
||||
project_table.add_column("Value")
|
||||
|
||||
project_table.add_row("Project Name", config.project.name)
|
||||
project_table.add_row("Location", str(current_dir))
|
||||
project_table.add_row("API URL", config.project.api_url)
|
||||
project_table.add_row("Default Timeout", f"{config.project.default_timeout}s")
|
||||
|
||||
console.print(
|
||||
Panel.fit(
|
||||
project_table,
|
||||
title="✅ Project Information",
|
||||
box=box.ROUNDED
|
||||
)
|
||||
)
|
||||
|
||||
# Database status
|
||||
db = get_project_db()
|
||||
if db:
|
||||
try:
|
||||
stats = db.get_stats()
|
||||
db_table = Table(show_header=False, box=box.SIMPLE)
|
||||
db_table.add_column("Metric", style="bold cyan")
|
||||
db_table.add_column("Count", justify="right")
|
||||
|
||||
db_table.add_row("Total Runs", str(stats["total_runs"]))
|
||||
db_table.add_row("Total Findings", str(stats["total_findings"]))
|
||||
db_table.add_row("Total Crashes", str(stats["total_crashes"]))
|
||||
db_table.add_row("Runs (Last 7 days)", str(stats["runs_last_7_days"]))
|
||||
|
||||
if stats["runs_by_status"]:
|
||||
db_table.add_row("", "") # Spacer
|
||||
for status, count in stats["runs_by_status"].items():
|
||||
status_emoji = {
|
||||
"completed": "✅",
|
||||
"running": "🔄",
|
||||
"failed": "❌",
|
||||
"queued": "⏳",
|
||||
"cancelled": "⏹️"
|
||||
}.get(status, "📋")
|
||||
db_table.add_row(f"{status_emoji} {status.title()}", str(count))
|
||||
|
||||
console.print(
|
||||
Panel.fit(
|
||||
db_table,
|
||||
title="🗄️ Database Statistics",
|
||||
box=box.ROUNDED
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
console.print(f"⚠️ Database error: {e}", style="yellow")
|
||||
|
||||
# API status
|
||||
console.print("\n🔗 [bold]API Connectivity[/bold]")
|
||||
try:
|
||||
with FuzzForgeClient(base_url=config.get_api_url(), timeout=10.0) as client:
|
||||
api_status = client.get_api_status()
|
||||
workflows = client.list_workflows()
|
||||
|
||||
api_table = Table(show_header=False, box=box.SIMPLE)
|
||||
api_table.add_column("Property", style="bold cyan")
|
||||
api_table.add_column("Value")
|
||||
|
||||
api_table.add_row("Status", f"✅ Connected")
|
||||
api_table.add_row("Service", f"{api_status.name} v{api_status.version}")
|
||||
api_table.add_row("Workflows", str(len(workflows)))
|
||||
|
||||
console.print(
|
||||
Panel.fit(
|
||||
api_table,
|
||||
title="✅ API Status",
|
||||
box=box.ROUNDED
|
||||
)
|
||||
)
|
||||
|
||||
# Show available workflows
|
||||
if workflows:
|
||||
workflow_table = Table(box=box.SIMPLE_HEAD)
|
||||
workflow_table.add_column("Name", style="bold")
|
||||
workflow_table.add_column("Version", justify="center")
|
||||
workflow_table.add_column("Description")
|
||||
|
||||
for workflow in workflows[:10]: # Limit to first 10
|
||||
workflow_table.add_row(
|
||||
workflow.name,
|
||||
workflow.version,
|
||||
workflow.description[:60] + "..." if len(workflow.description) > 60 else workflow.description
|
||||
)
|
||||
|
||||
if len(workflows) > 10:
|
||||
workflow_table.add_row("...", "...", f"and {len(workflows) - 10} more workflows")
|
||||
|
||||
console.print(
|
||||
Panel.fit(
|
||||
workflow_table,
|
||||
title=f"🔧 Available Workflows ({len(workflows)})",
|
||||
box=box.ROUNDED
|
||||
)
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
console.print(
|
||||
Panel.fit(
|
||||
f"❌ Failed to connect to API\n\n"
|
||||
f"Error: {str(e)}\n\n"
|
||||
f"API URL: {config.get_api_url()}\n\n"
|
||||
"Check that the FuzzForge API is running and accessible.",
|
||||
title="❌ API Connection Failed",
|
||||
box=box.ROUNDED
|
||||
)
|
||||
)
|
||||
@@ -0,0 +1,591 @@
|
||||
"""
|
||||
Workflow execution and management commands.
|
||||
Replaces the old 'runs' terminology with cleaner workflow-centric commands.
|
||||
"""
|
||||
# Copyright (c) 2025 FuzzingLabs
|
||||
#
|
||||
# Licensed under the Business Source License 1.1 (BSL). See the LICENSE file
|
||||
# at the root of this repository for details.
|
||||
#
|
||||
# After the Change Date (four years from publication), this version of the
|
||||
# Licensed Work will be made available under the Apache License, Version 2.0.
|
||||
# See the LICENSE-APACHE file or http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Additional attribution and requirements are provided in the NOTICE file.
|
||||
|
||||
|
||||
import json
|
||||
import time
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Optional, Dict, Any, List
|
||||
|
||||
import typer
|
||||
from rich.console import Console
|
||||
from rich.table import Table
|
||||
from rich.panel import Panel
|
||||
from rich.progress import Progress, SpinnerColumn, TextColumn, BarColumn, TaskProgressColumn
|
||||
from rich.prompt import Prompt, Confirm
|
||||
from rich.live import Live
|
||||
from rich import box
|
||||
|
||||
from ..config import get_project_config, FuzzForgeConfig
|
||||
from ..database import get_project_db, ensure_project_db, RunRecord
|
||||
from ..exceptions import (
|
||||
handle_error, retry_on_network_error, safe_json_load, require_project,
|
||||
APIConnectionError, ValidationError, DatabaseError, FileOperationError
|
||||
)
|
||||
from ..validation import (
|
||||
validate_run_id, validate_workflow_name, validate_target_path,
|
||||
validate_volume_mode, validate_parameters, validate_timeout
|
||||
)
|
||||
from ..progress import progress_manager, spinner, step_progress
|
||||
from ..completion import WorkflowNameComplete, TargetPathComplete, VolumeModetComplete
|
||||
from ..constants import (
|
||||
STATUS_EMOJIS, MAX_RUN_ID_DISPLAY_LENGTH, DEFAULT_VOLUME_MODE,
|
||||
PROGRESS_STEP_DELAYS, MAX_RETRIES, RETRY_DELAY, POLL_INTERVAL
|
||||
)
|
||||
from fuzzforge_sdk import FuzzForgeClient, WorkflowSubmission
|
||||
|
||||
console = Console()
|
||||
app = typer.Typer()
|
||||
|
||||
|
||||
@retry_on_network_error(max_retries=MAX_RETRIES, delay=RETRY_DELAY)
|
||||
def get_client() -> FuzzForgeClient:
|
||||
"""Get configured FuzzForge client with retry on network errors"""
|
||||
config = get_project_config() or FuzzForgeConfig()
|
||||
return FuzzForgeClient(base_url=config.get_api_url(), timeout=config.get_timeout())
|
||||
|
||||
|
||||
def status_emoji(status: str) -> str:
|
||||
"""Get emoji for execution status"""
|
||||
return STATUS_EMOJIS.get(status.lower(), STATUS_EMOJIS["unknown"])
|
||||
|
||||
|
||||
def parse_inline_parameters(params: List[str]) -> Dict[str, Any]:
|
||||
"""Parse inline key=value parameters using improved validation"""
|
||||
return validate_parameters(params)
|
||||
|
||||
|
||||
def execute_workflow_submission(
|
||||
client: FuzzForgeClient,
|
||||
workflow: str,
|
||||
target_path: str,
|
||||
parameters: Dict[str, Any],
|
||||
volume_mode: str,
|
||||
timeout: Optional[int],
|
||||
interactive: bool
|
||||
) -> Any:
|
||||
"""Handle the workflow submission process"""
|
||||
# Get workflow metadata for parameter validation
|
||||
console.print(f"🔧 Getting workflow information for: {workflow}")
|
||||
workflow_meta = client.get_workflow_metadata(workflow)
|
||||
param_response = client.get_workflow_parameters(workflow)
|
||||
|
||||
# Interactive parameter input
|
||||
if interactive and workflow_meta.parameters.get("properties"):
|
||||
properties = workflow_meta.parameters.get("properties", {})
|
||||
required_params = set(workflow_meta.parameters.get("required", []))
|
||||
defaults = param_response.defaults
|
||||
|
||||
missing_required = required_params - set(parameters.keys())
|
||||
|
||||
if missing_required:
|
||||
console.print(f"\n📝 [bold]Missing required parameters:[/bold] {', '.join(missing_required)}")
|
||||
console.print("Please provide values:\n")
|
||||
|
||||
for param_name in missing_required:
|
||||
param_schema = properties.get(param_name, {})
|
||||
description = param_schema.get("description", "")
|
||||
param_type = param_schema.get("type", "string")
|
||||
|
||||
prompt_text = f"{param_name}"
|
||||
if description:
|
||||
prompt_text += f" ({description})"
|
||||
prompt_text += f" [{param_type}]"
|
||||
|
||||
while True:
|
||||
user_input = Prompt.ask(prompt_text, console=console)
|
||||
|
||||
try:
|
||||
if param_type == "integer":
|
||||
parameters[param_name] = int(user_input)
|
||||
elif param_type == "number":
|
||||
parameters[param_name] = float(user_input)
|
||||
elif param_type == "boolean":
|
||||
parameters[param_name] = user_input.lower() in ("true", "yes", "1", "on")
|
||||
elif param_type == "array":
|
||||
parameters[param_name] = [item.strip() for item in user_input.split(",") if item.strip()]
|
||||
else:
|
||||
parameters[param_name] = user_input
|
||||
break
|
||||
except ValueError as e:
|
||||
console.print(f"❌ Invalid {param_type}: {e}", style="red")
|
||||
|
||||
# Validate volume mode
|
||||
validate_volume_mode(volume_mode)
|
||||
if volume_mode not in workflow_meta.supported_volume_modes:
|
||||
raise ValidationError(
|
||||
"volume mode", volume_mode,
|
||||
f"one of: {', '.join(workflow_meta.supported_volume_modes)}"
|
||||
)
|
||||
|
||||
# Create submission
|
||||
submission = WorkflowSubmission(
|
||||
target_path=target_path,
|
||||
volume_mode=volume_mode,
|
||||
parameters=parameters,
|
||||
timeout=timeout
|
||||
)
|
||||
|
||||
# Show submission summary
|
||||
console.print(f"\n🎯 [bold]Executing workflow:[/bold]")
|
||||
console.print(f" Workflow: {workflow}")
|
||||
console.print(f" Target: {target_path}")
|
||||
console.print(f" Volume Mode: {volume_mode}")
|
||||
if parameters:
|
||||
console.print(f" Parameters: {len(parameters)} provided")
|
||||
if timeout:
|
||||
console.print(f" Timeout: {timeout}s")
|
||||
|
||||
# Only ask for confirmation in interactive mode
|
||||
if interactive:
|
||||
if not Confirm.ask("\nExecute workflow?", default=True, console=console):
|
||||
console.print("❌ Execution cancelled", style="yellow")
|
||||
raise typer.Exit(0)
|
||||
else:
|
||||
console.print("\n🚀 Executing workflow...")
|
||||
|
||||
# Submit the workflow with enhanced progress
|
||||
console.print(f"\n🚀 Executing workflow: [bold yellow]{workflow}[/bold yellow]")
|
||||
|
||||
steps = [
|
||||
"Validating workflow configuration",
|
||||
"Connecting to FuzzForge API",
|
||||
"Uploading parameters and settings",
|
||||
"Creating workflow deployment",
|
||||
"Initializing execution environment"
|
||||
]
|
||||
|
||||
with step_progress(steps, f"Executing {workflow}") as progress:
|
||||
progress.next_step() # Validating
|
||||
time.sleep(PROGRESS_STEP_DELAYS["validating"])
|
||||
|
||||
progress.next_step() # Connecting
|
||||
time.sleep(PROGRESS_STEP_DELAYS["connecting"])
|
||||
|
||||
progress.next_step() # Uploading
|
||||
response = client.submit_workflow(workflow, submission)
|
||||
time.sleep(PROGRESS_STEP_DELAYS["uploading"])
|
||||
|
||||
progress.next_step() # Creating deployment
|
||||
time.sleep(PROGRESS_STEP_DELAYS["creating"])
|
||||
|
||||
progress.next_step() # Initializing
|
||||
time.sleep(PROGRESS_STEP_DELAYS["initializing"])
|
||||
|
||||
progress.complete(f"Workflow started successfully!")
|
||||
|
||||
return response
|
||||
|
||||
|
||||
# Main workflow execution command (replaces 'runs submit')
|
||||
@app.command(name="exec", hidden=True) # Hidden because it will be called from main workflow command
|
||||
def execute_workflow(
|
||||
workflow: str = typer.Argument(..., help="Workflow name to execute"),
|
||||
target_path: str = typer.Argument(..., help="Path to analyze"),
|
||||
params: List[str] = typer.Argument(default=None, help="Parameters as key=value pairs"),
|
||||
param_file: Optional[str] = typer.Option(
|
||||
None, "--param-file", "-f",
|
||||
help="JSON file containing workflow parameters"
|
||||
),
|
||||
volume_mode: str = typer.Option(
|
||||
DEFAULT_VOLUME_MODE, "--volume-mode", "-v",
|
||||
help="Volume mount mode: ro (read-only) or rw (read-write)"
|
||||
),
|
||||
timeout: Optional[int] = typer.Option(
|
||||
None, "--timeout", "-t",
|
||||
help="Execution timeout in seconds"
|
||||
),
|
||||
interactive: bool = typer.Option(
|
||||
True, "--interactive/--no-interactive", "-i/-n",
|
||||
help="Interactive parameter input for missing required parameters"
|
||||
),
|
||||
wait: bool = typer.Option(
|
||||
False, "--wait", "-w",
|
||||
help="Wait for execution to complete"
|
||||
),
|
||||
live: bool = typer.Option(
|
||||
False, "--live", "-l",
|
||||
help="Start live monitoring after execution (useful for fuzzing workflows)"
|
||||
)
|
||||
):
|
||||
"""
|
||||
🚀 Execute a workflow on a target
|
||||
|
||||
Use --live for fuzzing workflows to see real-time progress.
|
||||
Use --wait to wait for completion without live dashboard.
|
||||
"""
|
||||
try:
|
||||
# Validate inputs
|
||||
validate_workflow_name(workflow)
|
||||
target_path_obj = validate_target_path(target_path, must_exist=True)
|
||||
target_path = str(target_path_obj.absolute())
|
||||
validate_timeout(timeout)
|
||||
|
||||
# Ensure we're in a project directory
|
||||
require_project()
|
||||
except Exception as e:
|
||||
handle_error(e, "validating inputs")
|
||||
|
||||
# Parse parameters
|
||||
parameters = {}
|
||||
|
||||
# Load from param file
|
||||
if param_file:
|
||||
try:
|
||||
file_params = safe_json_load(param_file)
|
||||
if isinstance(file_params, dict):
|
||||
parameters.update(file_params)
|
||||
else:
|
||||
raise ValidationError("parameter file", param_file, "a JSON object")
|
||||
except Exception as e:
|
||||
handle_error(e, "loading parameter file")
|
||||
|
||||
# Parse inline parameters
|
||||
if params:
|
||||
try:
|
||||
inline_params = parse_inline_parameters(params)
|
||||
parameters.update(inline_params)
|
||||
except Exception as e:
|
||||
handle_error(e, "parsing parameters")
|
||||
|
||||
try:
|
||||
with get_client() as client:
|
||||
response = execute_workflow_submission(
|
||||
client, workflow, target_path, parameters,
|
||||
volume_mode, timeout, interactive
|
||||
)
|
||||
|
||||
console.print(f"✅ Workflow execution started!", style="green")
|
||||
console.print(f" Execution ID: [bold cyan]{response.run_id}[/bold cyan]")
|
||||
console.print(f" Status: {status_emoji(response.status)} {response.status}")
|
||||
|
||||
# Save to database
|
||||
try:
|
||||
db = ensure_project_db()
|
||||
run_record = RunRecord(
|
||||
run_id=response.run_id,
|
||||
workflow=workflow,
|
||||
status=response.status,
|
||||
target_path=target_path,
|
||||
parameters=parameters,
|
||||
created_at=datetime.now()
|
||||
)
|
||||
db.save_run(run_record)
|
||||
except Exception as e:
|
||||
# Don't fail the whole operation if database save fails
|
||||
console.print(f"⚠️ Failed to save execution to database: {e}", style="yellow")
|
||||
|
||||
console.print(f"\n💡 Monitor progress: [bold cyan]fuzzforge monitor {response.run_id}[/bold cyan]")
|
||||
console.print(f"💡 Check status: [bold cyan]fuzzforge workflow status {response.run_id}[/bold cyan]")
|
||||
|
||||
# Suggest --live for fuzzing workflows
|
||||
if not live and not wait and "fuzzing" in workflow.lower():
|
||||
console.print(f"💡 Next time try: [bold cyan]fuzzforge workflow {workflow} {target_path} --live[/bold cyan] for real-time fuzzing dashboard", style="dim")
|
||||
|
||||
# Start live monitoring if requested
|
||||
if live:
|
||||
# Check if this is a fuzzing workflow to show appropriate messaging
|
||||
is_fuzzing = "fuzzing" in workflow.lower()
|
||||
if is_fuzzing:
|
||||
console.print(f"\n📺 Starting live fuzzing dashboard...")
|
||||
console.print("💡 You'll see real-time crash discovery, execution stats, and coverage data.")
|
||||
else:
|
||||
console.print(f"\n📺 Starting live monitoring dashboard...")
|
||||
|
||||
console.print("Press Ctrl+C to stop monitoring (execution continues in background).\n")
|
||||
|
||||
try:
|
||||
from ..commands.monitor import live_monitor
|
||||
# Import monitor command and run it
|
||||
live_monitor(response.run_id, refresh=3)
|
||||
except KeyboardInterrupt:
|
||||
console.print(f"\n⏹️ Live monitoring stopped (execution continues in background)", style="yellow")
|
||||
except Exception as e:
|
||||
console.print(f"⚠️ Failed to start live monitoring: {e}", style="yellow")
|
||||
console.print(f"💡 You can still monitor manually: [bold cyan]fuzzforge monitor {response.run_id}[/bold cyan]")
|
||||
|
||||
# Wait for completion if requested
|
||||
elif wait:
|
||||
console.print(f"\n⏳ Waiting for execution to complete...")
|
||||
try:
|
||||
final_status = client.wait_for_completion(response.run_id, poll_interval=POLL_INTERVAL)
|
||||
|
||||
# Update database
|
||||
try:
|
||||
db.update_run_status(
|
||||
response.run_id,
|
||||
final_status.status,
|
||||
completed_at=datetime.now() if final_status.is_completed else None
|
||||
)
|
||||
except Exception as e:
|
||||
console.print(f"⚠️ Failed to update database: {e}", style="yellow")
|
||||
|
||||
console.print(f"🏁 Execution completed with status: {status_emoji(final_status.status)} {final_status.status}")
|
||||
|
||||
if final_status.is_completed:
|
||||
console.print(f"💡 View findings: [bold cyan]fuzzforge findings {response.run_id}[/bold cyan]")
|
||||
|
||||
except KeyboardInterrupt:
|
||||
console.print(f"\n⏹️ Monitoring cancelled (execution continues in background)", style="yellow")
|
||||
except Exception as e:
|
||||
handle_error(e, "waiting for completion")
|
||||
|
||||
except Exception as e:
|
||||
handle_error(e, "executing workflow")
|
||||
|
||||
|
||||
@app.command("status")
|
||||
def workflow_status(
|
||||
execution_id: Optional[str] = typer.Argument(None, help="Execution ID to check (defaults to most recent)")
|
||||
):
|
||||
"""
|
||||
📊 Check the status of a workflow execution
|
||||
"""
|
||||
try:
|
||||
require_project()
|
||||
|
||||
if execution_id:
|
||||
validate_run_id(execution_id)
|
||||
|
||||
db = get_project_db()
|
||||
if not db:
|
||||
raise DatabaseError("get project database", Exception("No database found"))
|
||||
|
||||
# Get execution ID
|
||||
if not execution_id:
|
||||
recent_runs = db.list_runs(limit=1)
|
||||
if not recent_runs:
|
||||
console.print("⚠️ No executions found in project database", style="yellow")
|
||||
raise typer.Exit(0)
|
||||
execution_id = recent_runs[0].run_id
|
||||
console.print(f"🔍 Using most recent execution: {execution_id}")
|
||||
else:
|
||||
validate_run_id(execution_id)
|
||||
|
||||
# Get status from API
|
||||
with get_client() as client:
|
||||
status = client.get_run_status(execution_id)
|
||||
|
||||
# Update local database
|
||||
try:
|
||||
db.update_run_status(
|
||||
execution_id,
|
||||
status.status,
|
||||
completed_at=status.updated_at if status.is_completed else None
|
||||
)
|
||||
except Exception as e:
|
||||
console.print(f"⚠️ Failed to update database: {e}", style="yellow")
|
||||
|
||||
# Display status
|
||||
console.print(f"\n📊 [bold]Execution Status: {execution_id}[/bold]\n")
|
||||
|
||||
status_table = Table(show_header=False, box=box.SIMPLE)
|
||||
status_table.add_column("Property", style="bold cyan")
|
||||
status_table.add_column("Value")
|
||||
|
||||
status_table.add_row("Execution ID", execution_id)
|
||||
status_table.add_row("Workflow", status.workflow)
|
||||
status_table.add_row("Status", f"{status_emoji(status.status)} {status.status}")
|
||||
status_table.add_row("Created", status.created_at.strftime("%Y-%m-%d %H:%M:%S"))
|
||||
status_table.add_row("Updated", status.updated_at.strftime("%Y-%m-%d %H:%M:%S"))
|
||||
|
||||
if status.is_completed:
|
||||
duration = status.updated_at - status.created_at
|
||||
status_table.add_row("Duration", str(duration).split('.')[0]) # Remove microseconds
|
||||
|
||||
console.print(
|
||||
Panel.fit(
|
||||
status_table,
|
||||
title=f"📊 Status Information",
|
||||
box=box.ROUNDED
|
||||
)
|
||||
)
|
||||
|
||||
# Show next steps
|
||||
if status.is_running:
|
||||
console.print(f"\n💡 Monitor live: [bold cyan]fuzzforge monitor {execution_id}[/bold cyan]")
|
||||
elif status.is_completed:
|
||||
console.print(f"💡 View findings: [bold cyan]fuzzforge finding {execution_id}[/bold cyan]")
|
||||
elif status.is_failed:
|
||||
console.print(f"💡 Check logs: [bold cyan]fuzzforge workflow logs {execution_id}[/bold cyan]")
|
||||
|
||||
except Exception as e:
|
||||
handle_error(e, "getting execution status")
|
||||
|
||||
|
||||
@app.command("history")
|
||||
def workflow_history(
|
||||
workflow: Optional[str] = typer.Option(None, "--workflow", "-w", help="Filter by workflow name"),
|
||||
status: Optional[str] = typer.Option(None, "--status", "-s", help="Filter by status"),
|
||||
limit: int = typer.Option(20, "--limit", "-l", help="Maximum number of executions to show")
|
||||
):
|
||||
"""
|
||||
📋 Show workflow execution history
|
||||
"""
|
||||
try:
|
||||
require_project()
|
||||
|
||||
if limit <= 0:
|
||||
raise ValidationError("limit", limit, "a positive integer")
|
||||
|
||||
db = get_project_db()
|
||||
if not db:
|
||||
raise DatabaseError("get project database", Exception("No database found"))
|
||||
runs = db.list_runs(workflow=workflow, status=status, limit=limit)
|
||||
|
||||
if not runs:
|
||||
console.print("⚠️ No executions found matching criteria", style="yellow")
|
||||
return
|
||||
|
||||
table = Table(box=box.ROUNDED)
|
||||
table.add_column("Execution ID", style="bold cyan")
|
||||
table.add_column("Workflow", style="bold")
|
||||
table.add_column("Status", justify="center")
|
||||
table.add_column("Target", style="dim")
|
||||
table.add_column("Created", justify="center")
|
||||
table.add_column("Parameters", justify="center", style="dim")
|
||||
|
||||
for run in runs:
|
||||
param_count = len(run.parameters) if run.parameters else 0
|
||||
param_str = f"{param_count} params" if param_count > 0 else "-"
|
||||
|
||||
table.add_row(
|
||||
run.run_id[:12] + "..." if len(run.run_id) > MAX_RUN_ID_DISPLAY_LENGTH else run.run_id,
|
||||
run.workflow,
|
||||
f"{status_emoji(run.status)} {run.status}",
|
||||
Path(run.target_path).name,
|
||||
run.created_at.strftime("%m-%d %H:%M"),
|
||||
param_str
|
||||
)
|
||||
|
||||
console.print(f"\n📋 [bold]Workflow Execution History ({len(runs)})[/bold]")
|
||||
if workflow:
|
||||
console.print(f" Filtered by workflow: {workflow}")
|
||||
if status:
|
||||
console.print(f" Filtered by status: {status}")
|
||||
console.print()
|
||||
console.print(table)
|
||||
|
||||
console.print(f"\n💡 Use [bold cyan]fuzzforge workflow status <execution-id>[/bold cyan] for detailed status")
|
||||
|
||||
except Exception as e:
|
||||
handle_error(e, "listing execution history")
|
||||
|
||||
|
||||
@app.command("retry")
|
||||
def retry_workflow(
|
||||
execution_id: Optional[str] = typer.Argument(None, help="Execution ID to retry (defaults to most recent)"),
|
||||
modify_params: bool = typer.Option(
|
||||
False, "--modify-params", "-m",
|
||||
help="Interactively modify parameters before retrying"
|
||||
)
|
||||
):
|
||||
"""
|
||||
🔄 Retry a workflow execution with the same or modified parameters
|
||||
"""
|
||||
try:
|
||||
require_project()
|
||||
|
||||
db = get_project_db()
|
||||
if not db:
|
||||
raise DatabaseError("get project database", Exception("No database found"))
|
||||
|
||||
# Get execution ID if not provided
|
||||
if not execution_id:
|
||||
recent_runs = db.list_runs(limit=1)
|
||||
if not recent_runs:
|
||||
console.print("⚠️ No executions found to retry", style="yellow")
|
||||
raise typer.Exit(0)
|
||||
execution_id = recent_runs[0].run_id
|
||||
console.print(f"🔄 Retrying most recent execution: {execution_id}")
|
||||
else:
|
||||
validate_run_id(execution_id)
|
||||
|
||||
# Get original execution
|
||||
original_run = db.get_run(execution_id)
|
||||
if not original_run:
|
||||
raise ValidationError("execution_id", execution_id, "an existing execution ID in the database")
|
||||
|
||||
console.print(f"🔄 [bold]Retrying workflow:[/bold] {original_run.workflow}")
|
||||
console.print(f" Original Execution ID: {execution_id}")
|
||||
console.print(f" Target: {original_run.target_path}")
|
||||
|
||||
parameters = original_run.parameters.copy()
|
||||
|
||||
# Modify parameters if requested
|
||||
if modify_params and parameters:
|
||||
console.print(f"\n📝 [bold]Current parameters:[/bold]")
|
||||
for key, value in parameters.items():
|
||||
new_value = Prompt.ask(
|
||||
f"{key}",
|
||||
default=str(value),
|
||||
console=console
|
||||
)
|
||||
if new_value != str(value):
|
||||
# Try to maintain type
|
||||
try:
|
||||
if isinstance(value, bool):
|
||||
parameters[key] = new_value.lower() in ("true", "yes", "1", "on")
|
||||
elif isinstance(value, int):
|
||||
parameters[key] = int(new_value)
|
||||
elif isinstance(value, float):
|
||||
parameters[key] = float(new_value)
|
||||
elif isinstance(value, list):
|
||||
parameters[key] = [item.strip() for item in new_value.split(",") if item.strip()]
|
||||
else:
|
||||
parameters[key] = new_value
|
||||
except ValueError:
|
||||
parameters[key] = new_value
|
||||
|
||||
# Submit new execution
|
||||
with get_client() as client:
|
||||
submission = WorkflowSubmission(
|
||||
target_path=original_run.target_path,
|
||||
parameters=parameters
|
||||
)
|
||||
|
||||
response = client.submit_workflow(original_run.workflow, submission)
|
||||
|
||||
console.print(f"\n✅ Retry submitted successfully!", style="green")
|
||||
console.print(f" New Execution ID: [bold cyan]{response.run_id}[/bold cyan]")
|
||||
console.print(f" Status: {status_emoji(response.status)} {response.status}")
|
||||
|
||||
# Save to database
|
||||
try:
|
||||
run_record = RunRecord(
|
||||
run_id=response.run_id,
|
||||
workflow=original_run.workflow,
|
||||
status=response.status,
|
||||
target_path=original_run.target_path,
|
||||
parameters=parameters,
|
||||
created_at=datetime.now(),
|
||||
metadata={"retry_of": execution_id}
|
||||
)
|
||||
db.save_run(run_record)
|
||||
except Exception as e:
|
||||
console.print(f"⚠️ Failed to save execution to database: {e}", style="yellow")
|
||||
|
||||
console.print(f"\n💡 Monitor progress: [bold cyan]fuzzforge monitor {response.run_id}[/bold cyan]")
|
||||
|
||||
except Exception as e:
|
||||
handle_error(e, "retrying workflow")
|
||||
|
||||
|
||||
@app.callback()
|
||||
def workflow_exec_callback():
|
||||
"""
|
||||
🚀 Workflow execution management
|
||||
"""
|
||||
@@ -0,0 +1,305 @@
|
||||
"""
|
||||
Workflow management commands.
|
||||
"""
|
||||
# Copyright (c) 2025 FuzzingLabs
|
||||
#
|
||||
# Licensed under the Business Source License 1.1 (BSL). See the LICENSE file
|
||||
# at the root of this repository for details.
|
||||
#
|
||||
# After the Change Date (four years from publication), this version of the
|
||||
# Licensed Work will be made available under the Apache License, Version 2.0.
|
||||
# See the LICENSE-APACHE file or http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Additional attribution and requirements are provided in the NOTICE file.
|
||||
|
||||
|
||||
import json
|
||||
import typer
|
||||
from rich.console import Console
|
||||
from rich.table import Table
|
||||
from rich.panel import Panel
|
||||
from rich.prompt import Prompt, Confirm
|
||||
from rich.syntax import Syntax
|
||||
from rich import box
|
||||
from typing import Optional, Dict, Any
|
||||
|
||||
from ..config import get_project_config, FuzzForgeConfig
|
||||
from ..fuzzy import enhanced_workflow_not_found_handler
|
||||
from fuzzforge_sdk import FuzzForgeClient
|
||||
|
||||
console = Console()
|
||||
app = typer.Typer()
|
||||
|
||||
|
||||
def get_client() -> FuzzForgeClient:
|
||||
"""Get configured FuzzForge client"""
|
||||
config = get_project_config() or FuzzForgeConfig()
|
||||
return FuzzForgeClient(base_url=config.get_api_url(), timeout=config.get_timeout())
|
||||
|
||||
|
||||
@app.command("list")
|
||||
def list_workflows():
|
||||
"""
|
||||
📋 List all available security testing workflows
|
||||
"""
|
||||
try:
|
||||
with get_client() as client:
|
||||
workflows = client.list_workflows()
|
||||
|
||||
if not workflows:
|
||||
console.print("❌ No workflows available", style="red")
|
||||
return
|
||||
|
||||
table = Table(box=box.ROUNDED)
|
||||
table.add_column("Name", style="bold cyan")
|
||||
table.add_column("Version", justify="center")
|
||||
table.add_column("Description")
|
||||
table.add_column("Tags", style="dim")
|
||||
|
||||
for workflow in workflows:
|
||||
tags_str = ", ".join(workflow.tags) if workflow.tags else ""
|
||||
table.add_row(
|
||||
workflow.name,
|
||||
workflow.version,
|
||||
workflow.description,
|
||||
tags_str
|
||||
)
|
||||
|
||||
console.print(f"\n🔧 [bold]Available Workflows ({len(workflows)})[/bold]\n")
|
||||
console.print(table)
|
||||
|
||||
console.print(f"\n💡 Use [bold cyan]fuzzforge workflows info <name>[/bold cyan] for detailed information")
|
||||
|
||||
except Exception as e:
|
||||
console.print(f"❌ Failed to fetch workflows: {e}", style="red")
|
||||
raise typer.Exit(1)
|
||||
|
||||
|
||||
@app.command("info")
|
||||
def workflow_info(
|
||||
name: str = typer.Argument(..., help="Workflow name to get information about")
|
||||
):
|
||||
"""
|
||||
📋 Show detailed information about a specific workflow
|
||||
"""
|
||||
try:
|
||||
with get_client() as client:
|
||||
workflow = client.get_workflow_metadata(name)
|
||||
|
||||
console.print(f"\n🔧 [bold]Workflow: {workflow.name}[/bold]\n")
|
||||
|
||||
# Basic information
|
||||
info_table = Table(show_header=False, box=box.SIMPLE)
|
||||
info_table.add_column("Property", style="bold cyan")
|
||||
info_table.add_column("Value")
|
||||
|
||||
info_table.add_row("Name", workflow.name)
|
||||
info_table.add_row("Version", workflow.version)
|
||||
info_table.add_row("Description", workflow.description)
|
||||
if workflow.author:
|
||||
info_table.add_row("Author", workflow.author)
|
||||
if workflow.tags:
|
||||
info_table.add_row("Tags", ", ".join(workflow.tags))
|
||||
info_table.add_row("Volume Modes", ", ".join(workflow.supported_volume_modes))
|
||||
info_table.add_row("Custom Docker", "✅ Yes" if workflow.has_custom_docker else "❌ No")
|
||||
|
||||
console.print(
|
||||
Panel.fit(
|
||||
info_table,
|
||||
title="ℹ️ Basic Information",
|
||||
box=box.ROUNDED
|
||||
)
|
||||
)
|
||||
|
||||
# Parameters
|
||||
if workflow.parameters:
|
||||
console.print("\n📝 [bold]Parameters Schema[/bold]")
|
||||
|
||||
param_table = Table(box=box.ROUNDED)
|
||||
param_table.add_column("Parameter", style="bold")
|
||||
param_table.add_column("Type", style="cyan")
|
||||
param_table.add_column("Required", justify="center")
|
||||
param_table.add_column("Default")
|
||||
param_table.add_column("Description", style="dim")
|
||||
|
||||
# Extract parameter information from JSON schema
|
||||
properties = workflow.parameters.get("properties", {})
|
||||
required_params = set(workflow.parameters.get("required", []))
|
||||
defaults = workflow.default_parameters
|
||||
|
||||
for param_name, param_schema in properties.items():
|
||||
param_type = param_schema.get("type", "unknown")
|
||||
is_required = "✅" if param_name in required_params else "❌"
|
||||
default_val = str(defaults.get(param_name, "")) if param_name in defaults else ""
|
||||
description = param_schema.get("description", "")
|
||||
|
||||
# Handle array types
|
||||
if param_type == "array":
|
||||
items_type = param_schema.get("items", {}).get("type", "unknown")
|
||||
param_type = f"array[{items_type}]"
|
||||
|
||||
param_table.add_row(
|
||||
param_name,
|
||||
param_type,
|
||||
is_required,
|
||||
default_val[:30] + "..." if len(default_val) > 30 else default_val,
|
||||
description[:50] + "..." if len(description) > 50 else description
|
||||
)
|
||||
|
||||
console.print(param_table)
|
||||
|
||||
# Required modules
|
||||
if workflow.required_modules:
|
||||
console.print(f"\n🔧 [bold]Required Modules:[/bold] {', '.join(workflow.required_modules)}")
|
||||
|
||||
console.print(f"\n💡 Use [bold cyan]fuzzforge workflows parameters {name}[/bold cyan] for interactive parameter builder")
|
||||
|
||||
except Exception as e:
|
||||
error_message = str(e)
|
||||
if "not found" in error_message.lower() or "404" in error_message:
|
||||
# Try fuzzy matching for workflow name
|
||||
enhanced_workflow_not_found_handler(name)
|
||||
else:
|
||||
console.print(f"❌ Failed to get workflow info: {e}", style="red")
|
||||
raise typer.Exit(1)
|
||||
|
||||
|
||||
@app.command("parameters")
|
||||
def workflow_parameters(
|
||||
name: str = typer.Argument(..., help="Workflow name"),
|
||||
output_file: Optional[str] = typer.Option(
|
||||
None, "--output", "-o",
|
||||
help="Save parameters to JSON file"
|
||||
),
|
||||
interactive: bool = typer.Option(
|
||||
True, "--interactive/--no-interactive", "-i/-n",
|
||||
help="Interactive parameter builder"
|
||||
)
|
||||
):
|
||||
"""
|
||||
📝 Interactive parameter builder for workflows
|
||||
"""
|
||||
try:
|
||||
with get_client() as client:
|
||||
workflow = client.get_workflow_metadata(name)
|
||||
param_response = client.get_workflow_parameters(name)
|
||||
|
||||
console.print(f"\n📝 [bold]Parameter Builder: {name}[/bold]\n")
|
||||
|
||||
if not workflow.parameters.get("properties"):
|
||||
console.print("ℹ️ This workflow has no configurable parameters")
|
||||
return
|
||||
|
||||
parameters = {}
|
||||
properties = workflow.parameters.get("properties", {})
|
||||
required_params = set(workflow.parameters.get("required", []))
|
||||
defaults = param_response.defaults
|
||||
|
||||
if interactive:
|
||||
console.print("🔧 Enter parameter values (press Enter for default):\n")
|
||||
|
||||
for param_name, param_schema in properties.items():
|
||||
param_type = param_schema.get("type", "string")
|
||||
description = param_schema.get("description", "")
|
||||
is_required = param_name in required_params
|
||||
default_value = defaults.get(param_name)
|
||||
|
||||
# Build prompt
|
||||
prompt_text = f"{param_name}"
|
||||
if description:
|
||||
prompt_text += f" ({description})"
|
||||
if param_type:
|
||||
prompt_text += f" [{param_type}]"
|
||||
if is_required:
|
||||
prompt_text += " [bold red]*required*[/bold red]"
|
||||
|
||||
# Get user input
|
||||
while True:
|
||||
if default_value is not None:
|
||||
user_input = Prompt.ask(
|
||||
prompt_text,
|
||||
default=str(default_value),
|
||||
console=console
|
||||
)
|
||||
else:
|
||||
user_input = Prompt.ask(
|
||||
prompt_text,
|
||||
console=console
|
||||
)
|
||||
|
||||
# Validate and convert input
|
||||
if user_input.strip() == "" and not is_required:
|
||||
break
|
||||
|
||||
if user_input.strip() == "" and is_required:
|
||||
console.print("❌ This parameter is required", style="red")
|
||||
continue
|
||||
|
||||
try:
|
||||
# Type conversion
|
||||
if param_type == "integer":
|
||||
parameters[param_name] = int(user_input)
|
||||
elif param_type == "number":
|
||||
parameters[param_name] = float(user_input)
|
||||
elif param_type == "boolean":
|
||||
parameters[param_name] = user_input.lower() in ("true", "yes", "1", "on")
|
||||
elif param_type == "array":
|
||||
# Simple comma-separated array
|
||||
parameters[param_name] = [item.strip() for item in user_input.split(",") if item.strip()]
|
||||
else:
|
||||
parameters[param_name] = user_input
|
||||
|
||||
break
|
||||
|
||||
except ValueError as e:
|
||||
console.print(f"❌ Invalid {param_type}: {e}", style="red")
|
||||
|
||||
# Show summary
|
||||
console.print("\n📋 [bold]Parameter Summary:[/bold]")
|
||||
summary_table = Table(show_header=False, box=box.SIMPLE)
|
||||
summary_table.add_column("Parameter", style="cyan")
|
||||
summary_table.add_column("Value", style="white")
|
||||
|
||||
for key, value in parameters.items():
|
||||
summary_table.add_row(key, str(value))
|
||||
|
||||
console.print(summary_table)
|
||||
|
||||
else:
|
||||
# Non-interactive mode - show schema
|
||||
console.print("📋 Parameter Schema:")
|
||||
schema_json = json.dumps(workflow.parameters, indent=2)
|
||||
console.print(Syntax(schema_json, "json", theme="monokai"))
|
||||
|
||||
if defaults:
|
||||
console.print("\n📋 Default Values:")
|
||||
defaults_json = json.dumps(defaults, indent=2)
|
||||
console.print(Syntax(defaults_json, "json", theme="monokai"))
|
||||
|
||||
# Save to file if requested
|
||||
if output_file:
|
||||
if parameters or not interactive:
|
||||
data_to_save = parameters if interactive else {"schema": workflow.parameters, "defaults": defaults}
|
||||
with open(output_file, 'w') as f:
|
||||
json.dump(data_to_save, f, indent=2)
|
||||
console.print(f"\n💾 Parameters saved to: {output_file}")
|
||||
else:
|
||||
console.print("\n❌ No parameters to save", style="red")
|
||||
|
||||
except Exception as e:
|
||||
console.print(f"❌ Failed to build parameters: {e}", style="red")
|
||||
raise typer.Exit(1)
|
||||
|
||||
|
||||
@app.callback(invoke_without_command=True)
|
||||
def workflows_callback(ctx: typer.Context):
|
||||
"""
|
||||
🔧 Manage security testing workflows
|
||||
"""
|
||||
# Check if a subcommand is being invoked
|
||||
if ctx.invoked_subcommand is not None:
|
||||
# Let the subcommand handle it
|
||||
return
|
||||
|
||||
# Default to list when no subcommand provided
|
||||
list_workflows()
|
||||
@@ -0,0 +1,190 @@
|
||||
"""
|
||||
Shell auto-completion support for FuzzForge CLI.
|
||||
|
||||
Provides intelligent tab completion for commands, workflows, run IDs, and parameters.
|
||||
"""
|
||||
# Copyright (c) 2025 FuzzingLabs
|
||||
#
|
||||
# Licensed under the Business Source License 1.1 (BSL). See the LICENSE file
|
||||
# at the root of this repository for details.
|
||||
#
|
||||
# After the Change Date (four years from publication), this version of the
|
||||
# Licensed Work will be made available under the Apache License, Version 2.0.
|
||||
# See the LICENSE-APACHE file or http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Additional attribution and requirements are provided in the NOTICE file.
|
||||
|
||||
|
||||
import typer
|
||||
from typing import List, Optional
|
||||
from pathlib import Path
|
||||
|
||||
from .config import get_project_config, FuzzForgeConfig
|
||||
from .database import get_project_db
|
||||
from fuzzforge_sdk import FuzzForgeClient
|
||||
|
||||
|
||||
def complete_workflow_names(incomplete: str) -> List[str]:
|
||||
"""Auto-complete workflow names from the API."""
|
||||
try:
|
||||
config = get_project_config() or FuzzForgeConfig()
|
||||
with FuzzForgeClient(base_url=config.get_api_url(), timeout=5.0) as client:
|
||||
workflows = client.list_workflows()
|
||||
workflow_names = [w.name for w in workflows]
|
||||
return [name for name in workflow_names if name.startswith(incomplete)]
|
||||
except Exception:
|
||||
# Fallback to common workflow names if API is unavailable
|
||||
common_workflows = [
|
||||
"security_assessment",
|
||||
"language_fuzzing",
|
||||
"infrastructure_scan",
|
||||
"static_analysis_scan",
|
||||
"penetration_testing_scan",
|
||||
"secret_detection_scan"
|
||||
]
|
||||
return [name for name in common_workflows if name.startswith(incomplete)]
|
||||
|
||||
|
||||
def complete_run_ids(incomplete: str) -> List[str]:
|
||||
"""Auto-complete run IDs from local database."""
|
||||
try:
|
||||
db = get_project_db()
|
||||
if db:
|
||||
runs = db.get_recent_runs(limit=50) # Get recent runs for completion
|
||||
run_ids = [run.run_id for run in runs]
|
||||
return [run_id for run_id in run_ids if run_id.startswith(incomplete)]
|
||||
except Exception:
|
||||
pass
|
||||
return []
|
||||
|
||||
|
||||
def complete_target_paths(incomplete: str) -> List[str]:
|
||||
"""Auto-complete file/directory paths."""
|
||||
try:
|
||||
# Convert incomplete path to Path object
|
||||
path = Path(incomplete) if incomplete else Path.cwd()
|
||||
|
||||
if path.is_dir():
|
||||
# Complete directory contents
|
||||
try:
|
||||
entries = []
|
||||
for entry in path.iterdir():
|
||||
entry_str = str(entry)
|
||||
if entry.is_dir():
|
||||
entry_str += "/"
|
||||
entries.append(entry_str)
|
||||
return entries
|
||||
except PermissionError:
|
||||
return []
|
||||
else:
|
||||
# Complete parent directory contents that match the incomplete name
|
||||
parent = path.parent
|
||||
name = path.name
|
||||
try:
|
||||
entries = []
|
||||
for entry in parent.iterdir():
|
||||
if entry.name.startswith(name):
|
||||
entry_str = str(entry)
|
||||
if entry.is_dir():
|
||||
entry_str += "/"
|
||||
entries.append(entry_str)
|
||||
return entries
|
||||
except (PermissionError, FileNotFoundError):
|
||||
return []
|
||||
except Exception:
|
||||
return []
|
||||
|
||||
|
||||
def complete_volume_modes(incomplete: str) -> List[str]:
|
||||
"""Auto-complete volume mount modes."""
|
||||
modes = ["ro", "rw"]
|
||||
return [mode for mode in modes if mode.startswith(incomplete)]
|
||||
|
||||
|
||||
def complete_export_formats(incomplete: str) -> List[str]:
|
||||
"""Auto-complete export formats."""
|
||||
formats = ["json", "csv", "html", "sarif"]
|
||||
return [fmt for fmt in formats if fmt.startswith(incomplete)]
|
||||
|
||||
|
||||
def complete_severity_levels(incomplete: str) -> List[str]:
|
||||
"""Auto-complete severity levels."""
|
||||
severities = ["critical", "high", "medium", "low", "info"]
|
||||
return [sev for sev in severities if sev.startswith(incomplete)]
|
||||
|
||||
|
||||
def complete_workflow_tags(incomplete: str) -> List[str]:
|
||||
"""Auto-complete workflow tags."""
|
||||
try:
|
||||
config = get_project_config() or FuzzForgeConfig()
|
||||
with FuzzForgeClient(base_url=config.get_api_url(), timeout=5.0) as client:
|
||||
workflows = client.list_workflows()
|
||||
all_tags = set()
|
||||
for w in workflows:
|
||||
if w.tags:
|
||||
all_tags.update(w.tags)
|
||||
return [tag for tag in sorted(all_tags) if tag.startswith(incomplete)]
|
||||
except Exception:
|
||||
# Fallback tags
|
||||
common_tags = [
|
||||
"security", "fuzzing", "static-analysis", "infrastructure",
|
||||
"secrets", "containers", "vulnerabilities", "pentest"
|
||||
]
|
||||
return [tag for tag in common_tags if tag.startswith(incomplete)]
|
||||
|
||||
|
||||
def complete_config_keys(incomplete: str) -> List[str]:
|
||||
"""Auto-complete configuration keys."""
|
||||
config_keys = [
|
||||
"api_url",
|
||||
"api_timeout",
|
||||
"default_workflow",
|
||||
"default_volume_mode",
|
||||
"project_name",
|
||||
"data_retention_days",
|
||||
"auto_save_findings",
|
||||
"notification_webhook"
|
||||
]
|
||||
return [key for key in config_keys if key.startswith(incomplete)]
|
||||
|
||||
|
||||
# Completion callbacks for Typer
|
||||
WorkflowNameComplete = typer.Option(
|
||||
autocompletion=complete_workflow_names,
|
||||
help="Workflow name (tab completion available)"
|
||||
)
|
||||
|
||||
RunIdComplete = typer.Option(
|
||||
autocompletion=complete_run_ids,
|
||||
help="Run ID (tab completion available)"
|
||||
)
|
||||
|
||||
TargetPathComplete = typer.Argument(
|
||||
autocompletion=complete_target_paths,
|
||||
help="Target path (tab completion available)"
|
||||
)
|
||||
|
||||
VolumeModetComplete = typer.Option(
|
||||
autocompletion=complete_volume_modes,
|
||||
help="Volume mode: ro or rw (tab completion available)"
|
||||
)
|
||||
|
||||
ExportFormatComplete = typer.Option(
|
||||
autocompletion=complete_export_formats,
|
||||
help="Export format (tab completion available)"
|
||||
)
|
||||
|
||||
SeverityComplete = typer.Option(
|
||||
autocompletion=complete_severity_levels,
|
||||
help="Severity level (tab completion available)"
|
||||
)
|
||||
|
||||
WorkflowTagComplete = typer.Option(
|
||||
autocompletion=complete_workflow_tags,
|
||||
help="Workflow tag (tab completion available)"
|
||||
)
|
||||
|
||||
ConfigKeyComplete = typer.Option(
|
||||
autocompletion=complete_config_keys,
|
||||
help="Configuration key (tab completion available)"
|
||||
)
|
||||
@@ -0,0 +1,420 @@
|
||||
"""
|
||||
Configuration management for FuzzForge CLI.
|
||||
|
||||
Extends project configuration with Cognee integration metadata
|
||||
and provides helpers for AI modules.
|
||||
"""
|
||||
# Copyright (c) 2025 FuzzingLabs
|
||||
#
|
||||
# Licensed under the Business Source License 1.1 (BSL). See the LICENSE file
|
||||
# at the root of this repository for details.
|
||||
#
|
||||
# After the Change Date (four years from publication), this version of the
|
||||
# Licensed Work will be made available under the Apache License, Version 2.0.
|
||||
# See the LICENSE-APACHE file or http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Additional attribution and requirements are provided in the NOTICE file.
|
||||
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import hashlib
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
try: # Optional dependency; fall back if not installed
|
||||
from dotenv import load_dotenv
|
||||
except ImportError: # pragma: no cover - optional dependency
|
||||
load_dotenv = None
|
||||
|
||||
import yaml
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
def _generate_project_id(project_dir: Path, project_name: str) -> str:
|
||||
"""Generate a deterministic project identifier based on path and name."""
|
||||
resolved_path = str(project_dir.resolve())
|
||||
hash_input = f"{resolved_path}:{project_name}".encode()
|
||||
return hashlib.sha256(hash_input).hexdigest()[:16]
|
||||
|
||||
|
||||
class ProjectConfig(BaseModel):
|
||||
"""Project configuration model."""
|
||||
|
||||
name: str = "fuzzforge-project"
|
||||
api_url: str = "http://localhost:8000"
|
||||
default_timeout: int = 3600
|
||||
default_workflow: Optional[str] = None
|
||||
id: Optional[str] = None
|
||||
tenant_id: Optional[str] = None
|
||||
|
||||
|
||||
class RetentionConfig(BaseModel):
|
||||
"""Data retention configuration."""
|
||||
|
||||
max_runs: int = 100
|
||||
keep_findings_days: int = 90
|
||||
|
||||
|
||||
class PreferencesConfig(BaseModel):
|
||||
"""User preferences."""
|
||||
|
||||
auto_save_findings: bool = True
|
||||
show_progress_bars: bool = True
|
||||
table_style: str = "rich"
|
||||
color_output: bool = True
|
||||
|
||||
|
||||
class CogneeConfig(BaseModel):
|
||||
"""Cognee integration metadata."""
|
||||
|
||||
enabled: bool = True
|
||||
graph_database_provider: str = "kuzu"
|
||||
data_directory: Optional[str] = None
|
||||
system_directory: Optional[str] = None
|
||||
backend_access_control: bool = True
|
||||
project_id: Optional[str] = None
|
||||
tenant_id: Optional[str] = None
|
||||
|
||||
|
||||
class FuzzForgeConfig(BaseModel):
|
||||
"""Complete FuzzForge CLI configuration."""
|
||||
|
||||
project: ProjectConfig = Field(default_factory=ProjectConfig)
|
||||
retention: RetentionConfig = Field(default_factory=RetentionConfig)
|
||||
preferences: PreferencesConfig = Field(default_factory=PreferencesConfig)
|
||||
cognee: CogneeConfig = Field(default_factory=CogneeConfig)
|
||||
|
||||
@classmethod
|
||||
def from_file(cls, config_path: Path) -> "FuzzForgeConfig":
|
||||
"""Load configuration from YAML file."""
|
||||
if not config_path.exists():
|
||||
return cls()
|
||||
|
||||
try:
|
||||
with open(config_path, "r", encoding="utf-8") as fh:
|
||||
data = yaml.safe_load(fh) or {}
|
||||
return cls(**data)
|
||||
except Exception as exc: # pragma: no cover - defensive fallback
|
||||
print(f"Warning: Failed to load config from {config_path}: {exc}")
|
||||
return cls()
|
||||
|
||||
def save_to_file(self, config_path: Path) -> None:
|
||||
"""Save configuration to YAML file."""
|
||||
config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
with open(config_path, "w", encoding="utf-8") as fh:
|
||||
yaml.dump(
|
||||
self.model_dump(),
|
||||
fh,
|
||||
default_flow_style=False,
|
||||
sort_keys=False,
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Convenience helpers used by CLI and AI modules
|
||||
# ------------------------------------------------------------------
|
||||
def ensure_project_metadata(self, project_dir: Path) -> bool:
|
||||
"""Ensure project id/tenant metadata is populated."""
|
||||
changed = False
|
||||
project = self.project
|
||||
if not project.id:
|
||||
project.id = _generate_project_id(project_dir, project.name)
|
||||
changed = True
|
||||
if not project.tenant_id:
|
||||
project.tenant_id = f"fuzzforge_project_{project.id}"
|
||||
changed = True
|
||||
return changed
|
||||
|
||||
def ensure_cognee_defaults(self, project_dir: Path) -> bool:
|
||||
"""Ensure Cognee configuration and directories exist."""
|
||||
self.ensure_project_metadata(project_dir)
|
||||
changed = False
|
||||
|
||||
cognee = self.cognee
|
||||
if not cognee.project_id:
|
||||
cognee.project_id = self.project.id
|
||||
changed = True
|
||||
if not cognee.tenant_id:
|
||||
cognee.tenant_id = self.project.tenant_id
|
||||
changed = True
|
||||
|
||||
base_dir = project_dir / ".fuzzforge" / "cognee" / f"project_{self.project.id}"
|
||||
data_dir = base_dir / "data"
|
||||
system_dir = base_dir / "system"
|
||||
|
||||
for path in (
|
||||
base_dir,
|
||||
data_dir,
|
||||
system_dir,
|
||||
system_dir / "kuzu_db",
|
||||
system_dir / "lancedb",
|
||||
):
|
||||
if not path.exists():
|
||||
path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if cognee.data_directory != str(data_dir):
|
||||
cognee.data_directory = str(data_dir)
|
||||
changed = True
|
||||
if cognee.system_directory != str(system_dir):
|
||||
cognee.system_directory = str(system_dir)
|
||||
changed = True
|
||||
|
||||
return changed
|
||||
|
||||
def get_api_url(self) -> str:
|
||||
"""Get API URL with environment variable override."""
|
||||
return os.getenv("FUZZFORGE_API_URL", self.project.api_url)
|
||||
|
||||
def get_timeout(self) -> int:
|
||||
"""Get timeout with environment variable override."""
|
||||
env_timeout = os.getenv("FUZZFORGE_TIMEOUT")
|
||||
if env_timeout and env_timeout.isdigit():
|
||||
return int(env_timeout)
|
||||
return self.project.default_timeout
|
||||
|
||||
def get_project_context(self, project_dir: Path) -> Dict[str, str]:
|
||||
"""Return project metadata for AI integrations."""
|
||||
self.ensure_cognee_defaults(project_dir)
|
||||
return {
|
||||
"project_id": self.project.id or "unknown_project",
|
||||
"project_name": self.project.name,
|
||||
"tenant_id": self.project.tenant_id or "fuzzforge_tenant",
|
||||
"data_directory": self.cognee.data_directory,
|
||||
"system_directory": self.cognee.system_directory,
|
||||
}
|
||||
|
||||
def get_cognee_config(self, project_dir: Path) -> Dict[str, Any]:
|
||||
"""Expose Cognee configuration as a plain dictionary."""
|
||||
self.ensure_cognee_defaults(project_dir)
|
||||
return self.cognee.model_dump()
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# Project-level helpers used across the CLI
|
||||
# ----------------------------------------------------------------------
|
||||
|
||||
def _get_project_paths(project_dir: Path) -> Dict[str, Path]:
|
||||
config_dir = project_dir / ".fuzzforge"
|
||||
return {
|
||||
"config_dir": config_dir,
|
||||
"config_path": config_dir / "config.yaml",
|
||||
}
|
||||
|
||||
|
||||
def get_project_config(project_dir: Optional[Path] = None) -> Optional[FuzzForgeConfig]:
|
||||
"""Get configuration for the current project."""
|
||||
project_dir = Path(project_dir or Path.cwd())
|
||||
paths = _get_project_paths(project_dir)
|
||||
config_path = paths["config_path"]
|
||||
|
||||
if not config_path.exists():
|
||||
return None
|
||||
|
||||
config = FuzzForgeConfig.from_file(config_path)
|
||||
if config.ensure_cognee_defaults(project_dir):
|
||||
config.save_to_file(config_path)
|
||||
return config
|
||||
|
||||
|
||||
def ensure_project_config(
|
||||
project_dir: Optional[Path] = None,
|
||||
project_name: Optional[str] = None,
|
||||
api_url: Optional[str] = None,
|
||||
) -> FuzzForgeConfig:
|
||||
"""Ensure project configuration exists, creating defaults if needed."""
|
||||
project_dir = Path(project_dir or Path.cwd())
|
||||
paths = _get_project_paths(project_dir)
|
||||
config_dir = paths["config_dir"]
|
||||
config_path = paths["config_path"]
|
||||
|
||||
config_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if config_path.exists():
|
||||
config = FuzzForgeConfig.from_file(config_path)
|
||||
else:
|
||||
config = FuzzForgeConfig()
|
||||
|
||||
if project_name:
|
||||
config.project.name = project_name
|
||||
if api_url:
|
||||
config.project.api_url = api_url
|
||||
|
||||
if config.ensure_cognee_defaults(project_dir):
|
||||
config.save_to_file(config_path)
|
||||
else:
|
||||
# Still ensure latest values persisted (e.g., updated name/url)
|
||||
config.save_to_file(config_path)
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def get_global_config() -> FuzzForgeConfig:
|
||||
"""Get global user configuration."""
|
||||
home = Path.home()
|
||||
global_config_dir = home / ".config" / "fuzzforge"
|
||||
global_config_path = global_config_dir / "config.yaml"
|
||||
|
||||
if global_config_path.exists():
|
||||
return FuzzForgeConfig.from_file(global_config_path)
|
||||
|
||||
return FuzzForgeConfig()
|
||||
|
||||
|
||||
def save_global_config(config: FuzzForgeConfig) -> None:
|
||||
"""Save global user configuration."""
|
||||
home = Path.home()
|
||||
global_config_dir = home / ".config" / "fuzzforge"
|
||||
global_config_path = global_config_dir / "config.yaml"
|
||||
config.save_to_file(global_config_path)
|
||||
|
||||
|
||||
# ----------------------------------------------------------------------
|
||||
# Compatibility layer for AI modules
|
||||
# ----------------------------------------------------------------------
|
||||
|
||||
class ProjectConfigManager:
|
||||
"""Lightweight wrapper mimicking the legacy Config class used by the AI module."""
|
||||
|
||||
def __init__(self, project_dir: Optional[Path] = None):
|
||||
self.project_dir = Path(project_dir or Path.cwd())
|
||||
paths = _get_project_paths(self.project_dir)
|
||||
self.config_path = paths["config_dir"]
|
||||
self.file_path = paths["config_path"]
|
||||
self._config = get_project_config(self.project_dir)
|
||||
if self._config is None:
|
||||
raise FileNotFoundError(
|
||||
f"FuzzForge project not initialized in {self.project_dir}. Run 'ff init'."
|
||||
)
|
||||
|
||||
# Legacy API ------------------------------------------------------
|
||||
def is_initialized(self) -> bool:
|
||||
return self.file_path.exists()
|
||||
|
||||
def get_project_context(self) -> Dict[str, str]:
|
||||
return self._config.get_project_context(self.project_dir)
|
||||
|
||||
def get_cognee_config(self) -> Dict[str, Any]:
|
||||
return self._config.get_cognee_config(self.project_dir)
|
||||
|
||||
def setup_cognee_environment(self) -> None:
|
||||
cognee = self.get_cognee_config()
|
||||
if not cognee.get("enabled", True):
|
||||
return
|
||||
|
||||
# Load project-specific environment overrides from .fuzzforge/.env if available
|
||||
env_file = self.project_dir / ".fuzzforge" / ".env"
|
||||
if env_file.exists():
|
||||
if load_dotenv:
|
||||
load_dotenv(env_file, override=False)
|
||||
else:
|
||||
try:
|
||||
for line in env_file.read_text(encoding="utf-8").splitlines():
|
||||
stripped = line.strip()
|
||||
if not stripped or stripped.startswith("#"):
|
||||
continue
|
||||
if "=" not in stripped:
|
||||
continue
|
||||
key, value = stripped.split("=", 1)
|
||||
os.environ.setdefault(key.strip(), value.strip())
|
||||
except Exception: # pragma: no cover - best effort fallback
|
||||
pass
|
||||
|
||||
backend_access = "true" if cognee.get("backend_access_control", True) else "false"
|
||||
os.environ["ENABLE_BACKEND_ACCESS_CONTROL"] = backend_access
|
||||
os.environ["GRAPH_DATABASE_PROVIDER"] = cognee.get("graph_database_provider", "kuzu")
|
||||
|
||||
data_dir = cognee.get("data_directory")
|
||||
system_dir = cognee.get("system_directory")
|
||||
tenant_id = cognee.get("tenant_id", "fuzzforge_tenant")
|
||||
|
||||
if data_dir:
|
||||
os.environ["COGNEE_DATA_ROOT"] = data_dir
|
||||
if system_dir:
|
||||
os.environ["COGNEE_SYSTEM_ROOT"] = system_dir
|
||||
os.environ["COGNEE_USER_ID"] = tenant_id
|
||||
os.environ["COGNEE_TENANT_ID"] = tenant_id
|
||||
|
||||
# Configure LLM provider defaults for Cognee. Values prefixed with COGNEE_
|
||||
# take precedence so users can segregate credentials.
|
||||
def _env(*names: str, default: str | None = None) -> str | None:
|
||||
for name in names:
|
||||
value = os.getenv(name)
|
||||
if value:
|
||||
return value
|
||||
return default
|
||||
|
||||
provider = _env(
|
||||
"LLM_COGNEE_PROVIDER",
|
||||
"COGNEE_LLM_PROVIDER",
|
||||
"LLM_PROVIDER",
|
||||
default="openai",
|
||||
)
|
||||
model = _env(
|
||||
"LLM_COGNEE_MODEL",
|
||||
"COGNEE_LLM_MODEL",
|
||||
"LLM_MODEL",
|
||||
"LITELLM_MODEL",
|
||||
default="gpt-4o-mini",
|
||||
)
|
||||
api_key = _env(
|
||||
"LLM_COGNEE_API_KEY",
|
||||
"COGNEE_LLM_API_KEY",
|
||||
"LLM_API_KEY",
|
||||
"OPENAI_API_KEY",
|
||||
)
|
||||
endpoint = _env("LLM_COGNEE_ENDPOINT", "COGNEE_LLM_ENDPOINT", "LLM_ENDPOINT")
|
||||
api_version = _env(
|
||||
"LLM_COGNEE_API_VERSION",
|
||||
"COGNEE_LLM_API_VERSION",
|
||||
"LLM_API_VERSION",
|
||||
)
|
||||
max_tokens = _env(
|
||||
"LLM_COGNEE_MAX_TOKENS",
|
||||
"COGNEE_LLM_MAX_TOKENS",
|
||||
"LLM_MAX_TOKENS",
|
||||
)
|
||||
|
||||
if provider:
|
||||
os.environ["LLM_PROVIDER"] = provider
|
||||
if model:
|
||||
os.environ["LLM_MODEL"] = model
|
||||
# Maintain backwards compatibility with components expecting LITELLM_MODEL
|
||||
os.environ.setdefault("LITELLM_MODEL", model)
|
||||
if api_key:
|
||||
os.environ["LLM_API_KEY"] = api_key
|
||||
# Provide OPENAI_API_KEY fallback when using OpenAI-compatible providers
|
||||
if provider and provider.lower() in {"openai", "azure_openai", "custom"}:
|
||||
os.environ.setdefault("OPENAI_API_KEY", api_key)
|
||||
if endpoint:
|
||||
os.environ["LLM_ENDPOINT"] = endpoint
|
||||
if api_version:
|
||||
os.environ["LLM_API_VERSION"] = api_version
|
||||
if max_tokens:
|
||||
os.environ["LLM_MAX_TOKENS"] = str(max_tokens)
|
||||
|
||||
# Provide a default MCP endpoint for local FuzzForge backend access when unset
|
||||
if not os.getenv("FUZZFORGE_MCP_URL"):
|
||||
os.environ["FUZZFORGE_MCP_URL"] = os.getenv(
|
||||
"FUZZFORGE_DEFAULT_MCP_URL",
|
||||
"http://localhost:8010/mcp",
|
||||
)
|
||||
|
||||
def refresh(self) -> None:
|
||||
"""Reload configuration from disk."""
|
||||
self._config = get_project_config(self.project_dir)
|
||||
if self._config is None:
|
||||
raise FileNotFoundError(
|
||||
f"FuzzForge project not initialized in {self.project_dir}. Run 'ff init'."
|
||||
)
|
||||
|
||||
# Convenience accessors ------------------------------------------
|
||||
@property
|
||||
def fuzzforge_dir(self) -> Path:
|
||||
return self.config_path
|
||||
|
||||
def get_api_url(self) -> str:
|
||||
return self._config.get_api_url()
|
||||
|
||||
def get_timeout(self) -> int:
|
||||
return self._config.get_timeout()
|
||||
@@ -0,0 +1,73 @@
|
||||
"""
|
||||
Constants for FuzzForge CLI.
|
||||
"""
|
||||
# Copyright (c) 2025 FuzzingLabs
|
||||
#
|
||||
# Licensed under the Business Source License 1.1 (BSL). See the LICENSE file
|
||||
# at the root of this repository for details.
|
||||
#
|
||||
# After the Change Date (four years from publication), this version of the
|
||||
# Licensed Work will be made available under the Apache License, Version 2.0.
|
||||
# See the LICENSE-APACHE file or http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Additional attribution and requirements are provided in the NOTICE file.
|
||||
|
||||
|
||||
# Database constants
|
||||
DEFAULT_DB_TIMEOUT = 30.0
|
||||
DEFAULT_CLEANUP_DAYS = 90
|
||||
STATS_SAMPLE_SIZE = 100
|
||||
|
||||
# Network constants
|
||||
DEFAULT_API_TIMEOUT = 30.0
|
||||
MAX_RETRIES = 3
|
||||
RETRY_DELAY = 1.0
|
||||
POLL_INTERVAL = 5.0
|
||||
|
||||
# Display constants
|
||||
MAX_RUN_ID_DISPLAY_LENGTH = 15
|
||||
MAX_DESCRIPTION_LENGTH = 50
|
||||
MAX_DEFAULT_VALUE_LENGTH = 30
|
||||
|
||||
# Progress constants
|
||||
PROGRESS_STEP_DELAYS = {
|
||||
"validating": 0.3,
|
||||
"connecting": 0.2,
|
||||
"uploading": 0.2,
|
||||
"creating": 0.3,
|
||||
"initializing": 0.2
|
||||
}
|
||||
|
||||
# Status emojis
|
||||
STATUS_EMOJIS = {
|
||||
"completed": "✅",
|
||||
"running": "🔄",
|
||||
"failed": "❌",
|
||||
"queued": "⏳",
|
||||
"cancelled": "⏹️",
|
||||
"pending": "📋",
|
||||
"unknown": "❓"
|
||||
}
|
||||
|
||||
# Severity styles for Rich
|
||||
SEVERITY_STYLES = {
|
||||
"error": "bold red",
|
||||
"warning": "bold yellow",
|
||||
"note": "bold blue",
|
||||
"info": "bold cyan"
|
||||
}
|
||||
|
||||
# Default volume modes
|
||||
DEFAULT_VOLUME_MODE = "ro"
|
||||
SUPPORTED_VOLUME_MODES = ["ro", "rw"]
|
||||
|
||||
# Default export formats
|
||||
DEFAULT_EXPORT_FORMAT = "sarif"
|
||||
SUPPORTED_EXPORT_FORMATS = ["sarif", "json", "csv"]
|
||||
|
||||
# Default configuration
|
||||
DEFAULT_CONFIG = {
|
||||
"api_url": "http://localhost:8000",
|
||||
"timeout": DEFAULT_API_TIMEOUT,
|
||||
"max_retries": MAX_RETRIES,
|
||||
}
|
||||
@@ -0,0 +1,661 @@
|
||||
"""
|
||||
Database module for FuzzForge CLI.
|
||||
|
||||
Handles SQLite database operations for local project management,
|
||||
including runs, findings, and crash storage.
|
||||
"""
|
||||
# Copyright (c) 2025 FuzzingLabs
|
||||
#
|
||||
# Licensed under the Business Source License 1.1 (BSL). See the LICENSE file
|
||||
# at the root of this repository for details.
|
||||
#
|
||||
# After the Change Date (four years from publication), this version of the
|
||||
# Licensed Work will be made available under the Apache License, Version 2.0.
|
||||
# See the LICENSE-APACHE file or http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Additional attribution and requirements are provided in the NOTICE file.
|
||||
|
||||
|
||||
import sqlite3
|
||||
import json
|
||||
import logging
|
||||
from datetime import datetime
|
||||
from pathlib import Path
|
||||
from typing import Dict, Any, List, Optional, Union
|
||||
from contextlib import contextmanager
|
||||
|
||||
from pydantic import BaseModel
|
||||
from .constants import DEFAULT_DB_TIMEOUT, DEFAULT_CLEANUP_DAYS, STATS_SAMPLE_SIZE
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RunRecord(BaseModel):
|
||||
"""Database record for workflow runs"""
|
||||
run_id: str
|
||||
workflow: str
|
||||
status: str
|
||||
target_path: str
|
||||
parameters: Dict[str, Any] = {}
|
||||
created_at: datetime
|
||||
completed_at: Optional[datetime] = None
|
||||
metadata: Dict[str, Any] = {}
|
||||
|
||||
|
||||
class FindingRecord(BaseModel):
|
||||
"""Database record for findings"""
|
||||
id: Optional[int] = None
|
||||
run_id: str
|
||||
sarif_data: Dict[str, Any]
|
||||
summary: Dict[str, Any] = {}
|
||||
created_at: datetime
|
||||
|
||||
|
||||
class CrashRecord(BaseModel):
|
||||
"""Database record for crash reports"""
|
||||
id: Optional[int] = None
|
||||
run_id: str
|
||||
crash_id: str
|
||||
signal: Optional[str] = None
|
||||
stack_trace: Optional[str] = None
|
||||
input_file: Optional[str] = None
|
||||
severity: str = "medium"
|
||||
timestamp: datetime
|
||||
|
||||
|
||||
class FuzzForgeDatabase:
|
||||
"""SQLite database manager for FuzzForge CLI projects"""
|
||||
|
||||
SCHEMA = """
|
||||
CREATE TABLE IF NOT EXISTS runs (
|
||||
run_id TEXT PRIMARY KEY,
|
||||
workflow TEXT NOT NULL,
|
||||
status TEXT NOT NULL,
|
||||
target_path TEXT NOT NULL,
|
||||
parameters TEXT DEFAULT '{}',
|
||||
created_at TIMESTAMP NOT NULL,
|
||||
completed_at TIMESTAMP,
|
||||
metadata TEXT DEFAULT '{}'
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS findings (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
run_id TEXT NOT NULL,
|
||||
sarif_data TEXT NOT NULL,
|
||||
summary TEXT DEFAULT '{}',
|
||||
created_at TIMESTAMP NOT NULL,
|
||||
FOREIGN KEY (run_id) REFERENCES runs (run_id)
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS crashes (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
run_id TEXT NOT NULL,
|
||||
crash_id TEXT NOT NULL,
|
||||
signal TEXT,
|
||||
stack_trace TEXT,
|
||||
input_file TEXT,
|
||||
severity TEXT DEFAULT 'medium',
|
||||
timestamp TIMESTAMP NOT NULL,
|
||||
FOREIGN KEY (run_id) REFERENCES runs (run_id)
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_runs_status ON runs (status);
|
||||
CREATE INDEX IF NOT EXISTS idx_runs_workflow ON runs (workflow);
|
||||
CREATE INDEX IF NOT EXISTS idx_runs_created_at ON runs (created_at);
|
||||
CREATE INDEX IF NOT EXISTS idx_findings_run_id ON findings (run_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_crashes_run_id ON crashes (run_id);
|
||||
"""
|
||||
|
||||
def __init__(self, db_path: Union[str, Path]):
|
||||
self.db_path = Path(db_path)
|
||||
self.db_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
self._initialize_db()
|
||||
|
||||
def _initialize_db(self):
|
||||
"""Initialize database with schema, handling corruption"""
|
||||
try:
|
||||
with self.connection() as conn:
|
||||
# Test database integrity first
|
||||
conn.execute("PRAGMA integrity_check").fetchone()
|
||||
conn.executescript(self.SCHEMA)
|
||||
except sqlite3.DatabaseError as e:
|
||||
logger.warning(f"Database corruption detected: {e}")
|
||||
# Backup corrupted database
|
||||
backup_path = self.db_path.with_suffix('.db.corrupted')
|
||||
if self.db_path.exists():
|
||||
self.db_path.rename(backup_path)
|
||||
logger.info(f"Corrupted database backed up to: {backup_path}")
|
||||
|
||||
# Create fresh database
|
||||
with self.connection() as conn:
|
||||
conn.executescript(self.SCHEMA)
|
||||
logger.info("Created fresh database after corruption")
|
||||
|
||||
@contextmanager
|
||||
def connection(self):
|
||||
"""Context manager for database connections with proper resource management"""
|
||||
conn = None
|
||||
try:
|
||||
conn = sqlite3.connect(
|
||||
self.db_path,
|
||||
detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES,
|
||||
timeout=DEFAULT_DB_TIMEOUT
|
||||
)
|
||||
conn.row_factory = sqlite3.Row
|
||||
# Enable WAL mode for better concurrency
|
||||
conn.execute("PRAGMA journal_mode=WAL")
|
||||
# Enable query optimization
|
||||
conn.execute("PRAGMA optimize")
|
||||
yield conn
|
||||
conn.commit()
|
||||
except sqlite3.OperationalError as e:
|
||||
if conn:
|
||||
try:
|
||||
conn.rollback()
|
||||
except:
|
||||
pass # Connection might be broken
|
||||
if "database is locked" in str(e).lower():
|
||||
raise sqlite3.OperationalError(
|
||||
"Database is locked. Another FuzzForge process may be running."
|
||||
) from e
|
||||
elif "database disk image is malformed" in str(e).lower():
|
||||
raise sqlite3.DatabaseError(
|
||||
"Database is corrupted. Use 'ff init --force' to reset."
|
||||
) from e
|
||||
raise
|
||||
except Exception as e:
|
||||
if conn:
|
||||
try:
|
||||
conn.rollback()
|
||||
except:
|
||||
pass # Connection might be broken
|
||||
raise
|
||||
finally:
|
||||
if conn:
|
||||
try:
|
||||
conn.close()
|
||||
except:
|
||||
pass # Ensure cleanup even if close fails
|
||||
|
||||
# Run management methods
|
||||
|
||||
def save_run(self, run: RunRecord) -> None:
|
||||
"""Save or update a run record with validation"""
|
||||
try:
|
||||
# Validate JSON serialization before database write
|
||||
parameters_json = json.dumps(run.parameters)
|
||||
metadata_json = json.dumps(run.metadata)
|
||||
|
||||
with self.connection() as conn:
|
||||
conn.execute("""
|
||||
INSERT OR REPLACE INTO runs
|
||||
(run_id, workflow, status, target_path, parameters, created_at, completed_at, metadata)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""", (
|
||||
run.run_id,
|
||||
run.workflow,
|
||||
run.status,
|
||||
run.target_path,
|
||||
parameters_json,
|
||||
run.created_at,
|
||||
run.completed_at,
|
||||
metadata_json
|
||||
))
|
||||
except (TypeError, ValueError) as e:
|
||||
raise ValueError(f"Failed to serialize run data: {e}") from e
|
||||
|
||||
def get_run(self, run_id: str) -> Optional[RunRecord]:
|
||||
"""Get a run record by ID with error handling"""
|
||||
with self.connection() as conn:
|
||||
row = conn.execute(
|
||||
"SELECT * FROM runs WHERE run_id = ?",
|
||||
(run_id,)
|
||||
).fetchone()
|
||||
|
||||
if row:
|
||||
try:
|
||||
return RunRecord(
|
||||
run_id=row["run_id"],
|
||||
workflow=row["workflow"],
|
||||
status=row["status"],
|
||||
target_path=row["target_path"],
|
||||
parameters=json.loads(row["parameters"] or "{}"),
|
||||
created_at=row["created_at"],
|
||||
completed_at=row["completed_at"],
|
||||
metadata=json.loads(row["metadata"] or "{}")
|
||||
)
|
||||
except (json.JSONDecodeError, TypeError) as e:
|
||||
logger.warning(f"Failed to deserialize run {run_id}: {e}")
|
||||
# Return with empty dicts for corrupted JSON
|
||||
return RunRecord(
|
||||
run_id=row["run_id"],
|
||||
workflow=row["workflow"],
|
||||
status=row["status"],
|
||||
target_path=row["target_path"],
|
||||
parameters={},
|
||||
created_at=row["created_at"],
|
||||
completed_at=row["completed_at"],
|
||||
metadata={}
|
||||
)
|
||||
return None
|
||||
|
||||
def list_runs(
|
||||
self,
|
||||
workflow: Optional[str] = None,
|
||||
status: Optional[str] = None,
|
||||
limit: int = 50
|
||||
) -> List[RunRecord]:
|
||||
"""List runs with optional filters"""
|
||||
query = "SELECT * FROM runs WHERE 1=1"
|
||||
params = []
|
||||
|
||||
if workflow:
|
||||
query += " AND workflow = ?"
|
||||
params.append(workflow)
|
||||
|
||||
if status:
|
||||
query += " AND status = ?"
|
||||
params.append(status)
|
||||
|
||||
query += " ORDER BY created_at DESC LIMIT ?"
|
||||
params.append(limit)
|
||||
|
||||
with self.connection() as conn:
|
||||
rows = conn.execute(query, params).fetchall()
|
||||
runs = []
|
||||
for row in rows:
|
||||
try:
|
||||
runs.append(RunRecord(
|
||||
run_id=row["run_id"],
|
||||
workflow=row["workflow"],
|
||||
status=row["status"],
|
||||
target_path=row["target_path"],
|
||||
parameters=json.loads(row["parameters"] or "{}"),
|
||||
created_at=row["created_at"],
|
||||
completed_at=row["completed_at"],
|
||||
metadata=json.loads(row["metadata"] or "{}")
|
||||
))
|
||||
except (json.JSONDecodeError, TypeError) as e:
|
||||
logger.warning(f"Skipping corrupted run {row['run_id']}: {e}")
|
||||
# Skip corrupted records instead of failing
|
||||
continue
|
||||
return runs
|
||||
|
||||
def update_run_status(self, run_id: str, status: str, completed_at: Optional[datetime] = None):
|
||||
"""Update run status"""
|
||||
with self.connection() as conn:
|
||||
conn.execute(
|
||||
"UPDATE runs SET status = ?, completed_at = ? WHERE run_id = ?",
|
||||
(status, completed_at, run_id)
|
||||
)
|
||||
|
||||
# Findings management methods
|
||||
|
||||
def save_findings(self, finding: FindingRecord) -> int:
|
||||
"""Save findings and return the ID"""
|
||||
with self.connection() as conn:
|
||||
cursor = conn.execute("""
|
||||
INSERT INTO findings (run_id, sarif_data, summary, created_at)
|
||||
VALUES (?, ?, ?, ?)
|
||||
""", (
|
||||
finding.run_id,
|
||||
json.dumps(finding.sarif_data),
|
||||
json.dumps(finding.summary),
|
||||
finding.created_at
|
||||
))
|
||||
return cursor.lastrowid
|
||||
|
||||
def get_findings(self, run_id: str) -> Optional[FindingRecord]:
|
||||
"""Get findings for a run"""
|
||||
with self.connection() as conn:
|
||||
row = conn.execute(
|
||||
"SELECT * FROM findings WHERE run_id = ? ORDER BY created_at DESC LIMIT 1",
|
||||
(run_id,)
|
||||
).fetchone()
|
||||
|
||||
if row:
|
||||
return FindingRecord(
|
||||
id=row["id"],
|
||||
run_id=row["run_id"],
|
||||
sarif_data=json.loads(row["sarif_data"]),
|
||||
summary=json.loads(row["summary"]),
|
||||
created_at=row["created_at"]
|
||||
)
|
||||
return None
|
||||
|
||||
def list_findings(self, limit: int = 50) -> List[FindingRecord]:
|
||||
"""List recent findings"""
|
||||
with self.connection() as conn:
|
||||
rows = conn.execute("""
|
||||
SELECT * FROM findings
|
||||
ORDER BY created_at DESC
|
||||
LIMIT ?
|
||||
""", (limit,)).fetchall()
|
||||
|
||||
return [
|
||||
FindingRecord(
|
||||
id=row["id"],
|
||||
run_id=row["run_id"],
|
||||
sarif_data=json.loads(row["sarif_data"]),
|
||||
summary=json.loads(row["summary"]),
|
||||
created_at=row["created_at"]
|
||||
)
|
||||
for row in rows
|
||||
]
|
||||
|
||||
def get_all_findings(self,
|
||||
workflow: Optional[str] = None,
|
||||
severity: Optional[List[str]] = None,
|
||||
since_date: Optional[datetime] = None,
|
||||
limit: Optional[int] = None) -> List[FindingRecord]:
|
||||
"""Get all findings with optional filters"""
|
||||
with self.connection() as conn:
|
||||
query = """
|
||||
SELECT f.*, r.workflow
|
||||
FROM findings f
|
||||
JOIN runs r ON f.run_id = r.run_id
|
||||
WHERE 1=1
|
||||
"""
|
||||
params = []
|
||||
|
||||
if workflow:
|
||||
query += " AND r.workflow = ?"
|
||||
params.append(workflow)
|
||||
|
||||
if since_date:
|
||||
query += " AND f.created_at >= ?"
|
||||
params.append(since_date)
|
||||
|
||||
query += " ORDER BY f.created_at DESC"
|
||||
|
||||
if limit:
|
||||
query += " LIMIT ?"
|
||||
params.append(limit)
|
||||
|
||||
rows = conn.execute(query, params).fetchall()
|
||||
|
||||
findings = []
|
||||
for row in rows:
|
||||
try:
|
||||
finding = FindingRecord(
|
||||
id=row["id"],
|
||||
run_id=row["run_id"],
|
||||
sarif_data=json.loads(row["sarif_data"]),
|
||||
summary=json.loads(row["summary"]),
|
||||
created_at=row["created_at"]
|
||||
)
|
||||
|
||||
# Filter by severity if specified
|
||||
if severity:
|
||||
finding_severities = set()
|
||||
if "runs" in finding.sarif_data:
|
||||
for run in finding.sarif_data["runs"]:
|
||||
for result in run.get("results", []):
|
||||
finding_severities.add(result.get("level", "note").lower())
|
||||
|
||||
if not any(sev.lower() in finding_severities for sev in severity):
|
||||
continue
|
||||
|
||||
findings.append(finding)
|
||||
except (json.JSONDecodeError, KeyError) as e:
|
||||
logger.warning(f"Skipping malformed finding {row['id']}: {e}")
|
||||
continue
|
||||
|
||||
return findings
|
||||
|
||||
def get_findings_by_workflow(self, workflow: str) -> List[FindingRecord]:
|
||||
"""Get all findings for a specific workflow"""
|
||||
return self.get_all_findings(workflow=workflow)
|
||||
|
||||
def get_aggregated_stats(self) -> Dict[str, Any]:
|
||||
"""Get aggregated statistics for all findings using SQL aggregation"""
|
||||
with self.connection() as conn:
|
||||
# Total findings and runs
|
||||
total_findings = conn.execute("SELECT COUNT(*) FROM findings").fetchone()[0]
|
||||
total_runs = conn.execute("SELECT COUNT(DISTINCT run_id) FROM findings").fetchone()[0]
|
||||
|
||||
# Findings by workflow
|
||||
workflow_stats = conn.execute("""
|
||||
SELECT r.workflow, COUNT(f.id) as count
|
||||
FROM findings f
|
||||
JOIN runs r ON f.run_id = r.run_id
|
||||
GROUP BY r.workflow
|
||||
ORDER BY count DESC
|
||||
""").fetchall()
|
||||
|
||||
# Recent activity
|
||||
recent_findings = conn.execute("""
|
||||
SELECT COUNT(*) FROM findings
|
||||
WHERE created_at > datetime('now', '-7 days')
|
||||
""").fetchone()[0]
|
||||
|
||||
# Use SQL JSON functions to aggregate severity stats efficiently
|
||||
# This avoids loading all findings into memory
|
||||
severity_stats = conn.execute("""
|
||||
SELECT
|
||||
SUM(json_array_length(json_extract(sarif_data, '$.runs[0].results'))) as total_issues,
|
||||
COUNT(*) as finding_count
|
||||
FROM findings
|
||||
WHERE json_extract(sarif_data, '$.runs[0].results') IS NOT NULL
|
||||
""").fetchone()
|
||||
|
||||
total_issues = severity_stats["total_issues"] or 0
|
||||
|
||||
# Get severity distribution using SQL
|
||||
# Note: This is a simplified version - for full accuracy we'd need JSON parsing
|
||||
# But it's much more efficient than loading all data into Python
|
||||
severity_counts = {"error": 0, "warning": 0, "note": 0, "info": 0}
|
||||
|
||||
# Sample the first N findings for severity distribution
|
||||
# This gives a good approximation without loading everything
|
||||
sample_findings = conn.execute("""
|
||||
SELECT sarif_data
|
||||
FROM findings
|
||||
LIMIT ?
|
||||
""", (STATS_SAMPLE_SIZE,)).fetchall()
|
||||
|
||||
for row in sample_findings:
|
||||
try:
|
||||
data = json.loads(row["sarif_data"])
|
||||
if "runs" in data:
|
||||
for run in data["runs"]:
|
||||
for result in run.get("results", []):
|
||||
level = result.get("level", "note").lower()
|
||||
severity_counts[level] = severity_counts.get(level, 0) + 1
|
||||
except (json.JSONDecodeError, KeyError):
|
||||
continue
|
||||
|
||||
# Extrapolate severity counts if we have more than sample size
|
||||
if total_findings > STATS_SAMPLE_SIZE:
|
||||
multiplier = total_findings / STATS_SAMPLE_SIZE
|
||||
for key in severity_counts:
|
||||
severity_counts[key] = int(severity_counts[key] * multiplier)
|
||||
|
||||
return {
|
||||
"total_findings_records": total_findings,
|
||||
"total_runs": total_runs,
|
||||
"total_issues": total_issues,
|
||||
"severity_distribution": severity_counts,
|
||||
"workflows": {row["workflow"]: row["count"] for row in workflow_stats},
|
||||
"recent_findings": recent_findings,
|
||||
"last_updated": datetime.now()
|
||||
}
|
||||
|
||||
# Crash management methods
|
||||
|
||||
def save_crash(self, crash: CrashRecord) -> int:
|
||||
"""Save crash report and return the ID"""
|
||||
with self.connection() as conn:
|
||||
cursor = conn.execute("""
|
||||
INSERT INTO crashes
|
||||
(run_id, crash_id, signal, stack_trace, input_file, severity, timestamp)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?)
|
||||
""", (
|
||||
crash.run_id,
|
||||
crash.crash_id,
|
||||
crash.signal,
|
||||
crash.stack_trace,
|
||||
crash.input_file,
|
||||
crash.severity,
|
||||
crash.timestamp
|
||||
))
|
||||
return cursor.lastrowid
|
||||
|
||||
def get_crashes(self, run_id: str) -> List[CrashRecord]:
|
||||
"""Get all crashes for a run"""
|
||||
with self.connection() as conn:
|
||||
rows = conn.execute(
|
||||
"SELECT * FROM crashes WHERE run_id = ? ORDER BY timestamp DESC",
|
||||
(run_id,)
|
||||
).fetchall()
|
||||
|
||||
return [
|
||||
CrashRecord(
|
||||
id=row["id"],
|
||||
run_id=row["run_id"],
|
||||
crash_id=row["crash_id"],
|
||||
signal=row["signal"],
|
||||
stack_trace=row["stack_trace"],
|
||||
input_file=row["input_file"],
|
||||
severity=row["severity"],
|
||||
timestamp=row["timestamp"]
|
||||
)
|
||||
for row in rows
|
||||
]
|
||||
|
||||
# Utility methods
|
||||
|
||||
def cleanup_old_runs(self, keep_days: int = DEFAULT_CLEANUP_DAYS) -> int:
|
||||
"""Remove old runs and associated data"""
|
||||
cutoff_date = datetime.now().replace(
|
||||
hour=0, minute=0, second=0, microsecond=0
|
||||
) - datetime.timedelta(days=keep_days)
|
||||
|
||||
with self.connection() as conn:
|
||||
# Get run IDs to delete
|
||||
old_runs = conn.execute(
|
||||
"SELECT run_id FROM runs WHERE created_at < ?",
|
||||
(cutoff_date,)
|
||||
).fetchall()
|
||||
|
||||
if not old_runs:
|
||||
return 0
|
||||
|
||||
run_ids = [row["run_id"] for row in old_runs]
|
||||
placeholders = ",".join("?" * len(run_ids))
|
||||
|
||||
# Delete associated findings and crashes
|
||||
conn.execute(f"DELETE FROM findings WHERE run_id IN ({placeholders})", run_ids)
|
||||
conn.execute(f"DELETE FROM crashes WHERE run_id IN ({placeholders})", run_ids)
|
||||
|
||||
# Delete runs
|
||||
conn.execute(f"DELETE FROM runs WHERE run_id IN ({placeholders})", run_ids)
|
||||
|
||||
return len(run_ids)
|
||||
|
||||
def get_stats(self) -> Dict[str, Any]:
|
||||
"""Get database statistics"""
|
||||
with self.connection() as conn:
|
||||
stats = {}
|
||||
|
||||
# Run counts by status
|
||||
run_stats = conn.execute("""
|
||||
SELECT status, COUNT(*) as count
|
||||
FROM runs
|
||||
GROUP BY status
|
||||
""").fetchall()
|
||||
stats["runs_by_status"] = {row["status"]: row["count"] for row in run_stats}
|
||||
|
||||
# Total counts
|
||||
stats["total_runs"] = conn.execute("SELECT COUNT(*) FROM runs").fetchone()[0]
|
||||
stats["total_findings"] = conn.execute("SELECT COUNT(*) FROM findings").fetchone()[0]
|
||||
stats["total_crashes"] = conn.execute("SELECT COUNT(*) FROM crashes").fetchone()[0]
|
||||
|
||||
# Recent activity
|
||||
stats["runs_last_7_days"] = conn.execute("""
|
||||
SELECT COUNT(*) FROM runs
|
||||
WHERE created_at > datetime('now', '-7 days')
|
||||
""").fetchone()[0]
|
||||
|
||||
return stats
|
||||
|
||||
def health_check(self) -> Dict[str, Any]:
|
||||
"""Perform database health check"""
|
||||
health = {
|
||||
"healthy": True,
|
||||
"issues": [],
|
||||
"recommendations": []
|
||||
}
|
||||
|
||||
try:
|
||||
with self.connection() as conn:
|
||||
# Check database integrity
|
||||
integrity_result = conn.execute("PRAGMA integrity_check").fetchone()
|
||||
if integrity_result[0] != "ok":
|
||||
health["healthy"] = False
|
||||
health["issues"].append(f"Database integrity check failed: {integrity_result[0]}")
|
||||
|
||||
# Check for orphaned records
|
||||
orphaned_findings = conn.execute("""
|
||||
SELECT COUNT(*) FROM findings
|
||||
WHERE run_id NOT IN (SELECT run_id FROM runs)
|
||||
""").fetchone()[0]
|
||||
|
||||
if orphaned_findings > 0:
|
||||
health["issues"].append(f"Found {orphaned_findings} orphaned findings")
|
||||
health["recommendations"].append("Run database cleanup to remove orphaned records")
|
||||
|
||||
orphaned_crashes = conn.execute("""
|
||||
SELECT COUNT(*) FROM crashes
|
||||
WHERE run_id NOT IN (SELECT run_id FROM runs)
|
||||
""").fetchone()[0]
|
||||
|
||||
if orphaned_crashes > 0:
|
||||
health["issues"].append(f"Found {orphaned_crashes} orphaned crashes")
|
||||
|
||||
# Check database size
|
||||
db_size = self.db_path.stat().st_size if self.db_path.exists() else 0
|
||||
if db_size > 100 * 1024 * 1024: # 100MB
|
||||
health["recommendations"].append("Database is large (>100MB). Consider cleanup.")
|
||||
|
||||
except Exception as e:
|
||||
health["healthy"] = False
|
||||
health["issues"].append(f"Health check failed: {e}")
|
||||
|
||||
return health
|
||||
|
||||
|
||||
def get_project_db(project_dir: Optional[Path] = None) -> Optional[FuzzForgeDatabase]:
|
||||
"""Get the database for the current project with error handling"""
|
||||
if project_dir is None:
|
||||
project_dir = Path.cwd()
|
||||
|
||||
fuzzforge_dir = project_dir / ".fuzzforge"
|
||||
if not fuzzforge_dir.exists():
|
||||
return None
|
||||
|
||||
db_path = fuzzforge_dir / "findings.db"
|
||||
try:
|
||||
return FuzzForgeDatabase(db_path)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to open project database: {e}")
|
||||
raise sqlite3.DatabaseError(f"Failed to open project database: {e}") from e
|
||||
|
||||
|
||||
def ensure_project_db(project_dir: Optional[Path] = None) -> FuzzForgeDatabase:
|
||||
"""Ensure project database exists, create if needed with error handling"""
|
||||
if project_dir is None:
|
||||
project_dir = Path.cwd()
|
||||
|
||||
fuzzforge_dir = project_dir / ".fuzzforge"
|
||||
try:
|
||||
fuzzforge_dir.mkdir(exist_ok=True)
|
||||
except PermissionError as e:
|
||||
raise PermissionError(f"Cannot create .fuzzforge directory: {e}") from e
|
||||
|
||||
db_path = fuzzforge_dir / "findings.db"
|
||||
try:
|
||||
return FuzzForgeDatabase(db_path)
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to create/open project database: {e}")
|
||||
raise sqlite3.DatabaseError(f"Failed to create project database: {e}") from e
|
||||
@@ -0,0 +1,487 @@
|
||||
"""
|
||||
Enhanced exception handling and error utilities for FuzzForge CLI with rich context display.
|
||||
"""
|
||||
# Copyright (c) 2025 FuzzingLabs
|
||||
#
|
||||
# Licensed under the Business Source License 1.1 (BSL). See the LICENSE file
|
||||
# at the root of this repository for details.
|
||||
#
|
||||
# After the Change Date (four years from publication), this version of the
|
||||
# Licensed Work will be made available under the Apache License, Version 2.0.
|
||||
# See the LICENSE-APACHE file or http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Additional attribution and requirements are provided in the NOTICE file.
|
||||
|
||||
|
||||
import time
|
||||
import functools
|
||||
from typing import Any, Callable, Optional, Type, Union, List
|
||||
from pathlib import Path
|
||||
|
||||
import typer
|
||||
import httpx
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
from rich.text import Text
|
||||
from rich.table import Table
|
||||
from rich.columns import Columns
|
||||
from rich.syntax import Syntax
|
||||
from rich.markdown import Markdown
|
||||
|
||||
# Import SDK exceptions for rich handling
|
||||
from fuzzforge_sdk.exceptions import (
|
||||
FuzzForgeError as SDKFuzzForgeError,
|
||||
FuzzForgeHTTPError,
|
||||
DeploymentError,
|
||||
WorkflowExecutionError,
|
||||
ContainerError,
|
||||
VolumeError,
|
||||
ValidationError as SDKValidationError,
|
||||
ConnectionError as SDKConnectionError
|
||||
)
|
||||
|
||||
console = Console()
|
||||
|
||||
|
||||
class FuzzForgeError(Exception):
|
||||
"""Base exception for FuzzForge CLI errors (legacy CLI-specific errors)"""
|
||||
|
||||
def __init__(self, message: str, hint: Optional[str] = None, exit_code: int = 1):
|
||||
self.message = message
|
||||
self.hint = hint
|
||||
self.exit_code = exit_code
|
||||
super().__init__(message)
|
||||
|
||||
|
||||
class ProjectNotFoundError(FuzzForgeError):
|
||||
"""Raised when no FuzzForge project is found in current directory"""
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(
|
||||
"No FuzzForge project found in current directory",
|
||||
"Run 'ff init' to initialize a new project"
|
||||
)
|
||||
|
||||
|
||||
class APIConnectionError(FuzzForgeError):
|
||||
"""Legacy API connection error for backward compatibility"""
|
||||
|
||||
def __init__(self, url: str, original_error: Exception):
|
||||
self.url = url
|
||||
self.original_error = original_error
|
||||
|
||||
if isinstance(original_error, httpx.ConnectTimeout):
|
||||
message = f"Connection timeout to FuzzForge API at {url}"
|
||||
hint = "Check if the API server is running and the URL is correct"
|
||||
elif isinstance(original_error, httpx.ConnectError):
|
||||
message = f"Failed to connect to FuzzForge API at {url}"
|
||||
hint = "Verify the API URL is correct and the server is accessible"
|
||||
elif isinstance(original_error, httpx.TimeoutException):
|
||||
message = f"Request timeout to FuzzForge API at {url}"
|
||||
hint = "The API server may be overloaded. Try again later"
|
||||
else:
|
||||
message = f"API connection error: {str(original_error)}"
|
||||
hint = "Check your network connection and API configuration"
|
||||
|
||||
super().__init__(message, hint)
|
||||
|
||||
|
||||
class DatabaseError(FuzzForgeError):
|
||||
"""Raised when database operations fail"""
|
||||
|
||||
def __init__(self, operation: str, original_error: Exception):
|
||||
self.operation = operation
|
||||
self.original_error = original_error
|
||||
|
||||
message = f"Database error during {operation}: {str(original_error)}"
|
||||
hint = "The database may be corrupted. Try 'ff init --force' to reset"
|
||||
|
||||
super().__init__(message, hint)
|
||||
|
||||
|
||||
class ValidationError(FuzzForgeError):
|
||||
"""Legacy validation error for CLI-specific validation"""
|
||||
|
||||
def __init__(self, field: str, value: Any, expected: str):
|
||||
self.field = field
|
||||
self.value = value
|
||||
self.expected = expected
|
||||
|
||||
message = f"Invalid {field}: {value}"
|
||||
hint = f"Expected {expected}"
|
||||
|
||||
super().__init__(message, hint)
|
||||
|
||||
|
||||
class FileOperationError(FuzzForgeError):
|
||||
"""Raised when file operations fail"""
|
||||
|
||||
def __init__(self, operation: str, path: Union[str, Path], original_error: Exception):
|
||||
self.operation = operation
|
||||
self.path = Path(path)
|
||||
self.original_error = original_error
|
||||
|
||||
if isinstance(original_error, FileNotFoundError):
|
||||
message = f"File not found: {path}"
|
||||
hint = "Check the path exists and you have permission to access it"
|
||||
elif isinstance(original_error, PermissionError):
|
||||
message = f"Permission denied: {path}"
|
||||
hint = "Check file permissions or run with appropriate privileges"
|
||||
else:
|
||||
message = f"File operation failed ({operation}): {str(original_error)}"
|
||||
hint = "Check the file path and permissions"
|
||||
|
||||
super().__init__(message, hint)
|
||||
|
||||
|
||||
def display_container_logs(diagnostics, title: str = "Container Logs"):
|
||||
"""Display container logs in a rich format."""
|
||||
if not diagnostics or not diagnostics.logs:
|
||||
return
|
||||
|
||||
# Show last 20 lines of logs
|
||||
recent_logs = diagnostics.logs[-20:] if len(diagnostics.logs) > 20 else diagnostics.logs
|
||||
|
||||
log_content = []
|
||||
for log_entry in recent_logs:
|
||||
timestamp = log_entry.timestamp.strftime("%H:%M:%S")
|
||||
level_color = {
|
||||
'ERROR': 'red',
|
||||
'WARNING': 'yellow',
|
||||
'INFO': 'blue',
|
||||
'DEBUG': 'dim white'
|
||||
}.get(log_entry.level, 'white')
|
||||
|
||||
log_line = f"[dim]{timestamp}[/dim] [{level_color}]{log_entry.level}[/{level_color}] {log_entry.message}"
|
||||
log_content.append(log_line)
|
||||
|
||||
if log_content:
|
||||
logs_panel = Panel(
|
||||
"\n".join(log_content),
|
||||
title=title,
|
||||
title_align="left",
|
||||
border_style="dim",
|
||||
expand=False
|
||||
)
|
||||
console.print(logs_panel)
|
||||
|
||||
|
||||
def display_container_diagnostics(diagnostics):
|
||||
"""Display comprehensive container diagnostics."""
|
||||
if not diagnostics:
|
||||
return
|
||||
|
||||
# Container Status Table
|
||||
status_table = Table(title="Container Status", show_header=False, box=None)
|
||||
status_table.add_column("Property", style="bold")
|
||||
status_table.add_column("Value")
|
||||
|
||||
status_color = {
|
||||
'running': 'green',
|
||||
'exited': 'red',
|
||||
'failed': 'red',
|
||||
'created': 'yellow',
|
||||
'unknown': 'dim'
|
||||
}.get(diagnostics.status.lower(), 'white')
|
||||
|
||||
status_table.add_row("Status", f"[{status_color}]{diagnostics.status}[/{status_color}]")
|
||||
|
||||
if diagnostics.exit_code is not None:
|
||||
exit_color = 'green' if diagnostics.exit_code == 0 else 'red'
|
||||
status_table.add_row("Exit Code", f"[{exit_color}]{diagnostics.exit_code}[/{exit_color}]")
|
||||
|
||||
if diagnostics.error:
|
||||
status_table.add_row("Error", f"[red]{diagnostics.error}[/red]")
|
||||
|
||||
# Resource Usage
|
||||
if diagnostics.resource_usage:
|
||||
memory_limit = diagnostics.resource_usage.get('memory_limit', 0)
|
||||
if memory_limit > 0:
|
||||
memory_mb = memory_limit // (1024 * 1024)
|
||||
status_table.add_row("Memory Limit", f"{memory_mb} MB")
|
||||
|
||||
console.print(status_table)
|
||||
|
||||
# Volume Mounts
|
||||
if diagnostics.volume_mounts:
|
||||
console.print("\n[bold]Volume Mounts:[/bold]")
|
||||
for mount in diagnostics.volume_mounts:
|
||||
mount_info = f" {mount['source']} → {mount['destination']} ([dim]{mount['mode']}[/dim])"
|
||||
console.print(mount_info)
|
||||
|
||||
|
||||
def display_error_patterns(error_patterns):
|
||||
"""Display detected error patterns."""
|
||||
if not error_patterns:
|
||||
return
|
||||
|
||||
console.print("\n[bold red]🔍 Detected Issues:[/bold red]")
|
||||
|
||||
for error_type, messages in error_patterns.items():
|
||||
# Format error type name
|
||||
formatted_type = error_type.replace('_', ' ').title()
|
||||
console.print(f"\n[bold yellow]• {formatted_type}:[/bold yellow]")
|
||||
|
||||
for message in messages[:3]: # Show first 3 messages
|
||||
console.print(f" [dim]▸[/dim] {message}")
|
||||
|
||||
if len(messages) > 3:
|
||||
console.print(f" [dim]▸ ... and {len(messages) - 3} more similar messages[/dim]")
|
||||
|
||||
|
||||
def display_suggestions(suggestions: List[str]):
|
||||
"""Display actionable suggestions."""
|
||||
if not suggestions:
|
||||
return
|
||||
|
||||
console.print("\n[bold green]💡 Suggested Fixes:[/bold green]")
|
||||
|
||||
for i, suggestion in enumerate(suggestions[:6], 1): # Show max 6 suggestions
|
||||
console.print(f" [bold green]{i}.[/bold green] {suggestion}")
|
||||
|
||||
|
||||
def handle_error(error: Exception, context: str = "") -> None:
|
||||
"""
|
||||
Display comprehensive error messages with rich context and exit appropriately.
|
||||
|
||||
Args:
|
||||
error: The exception that occurred
|
||||
context: Additional context about where the error occurred
|
||||
"""
|
||||
# Handle SDK errors with rich context
|
||||
if isinstance(error, SDKFuzzForgeError):
|
||||
console.print() # Add some spacing
|
||||
|
||||
# Main error message
|
||||
error_title = f"❌ {error.__class__.__name__}"
|
||||
if context:
|
||||
error_title += f" during {context}"
|
||||
|
||||
console.print(Panel(
|
||||
error.get_summary(),
|
||||
title=error_title,
|
||||
title_align="left",
|
||||
border_style="red",
|
||||
expand=False
|
||||
))
|
||||
|
||||
# Show detailed context if available
|
||||
if hasattr(error, 'context') and error.context:
|
||||
ctx = error.context
|
||||
|
||||
# Container diagnostics
|
||||
if ctx.container_diagnostics:
|
||||
console.print("\n[bold]Container Diagnostics:[/bold]")
|
||||
display_container_diagnostics(ctx.container_diagnostics)
|
||||
display_container_logs(ctx.container_diagnostics)
|
||||
|
||||
# Error patterns
|
||||
if ctx.error_patterns:
|
||||
display_error_patterns(ctx.error_patterns)
|
||||
|
||||
# API context
|
||||
if ctx.url:
|
||||
console.print(f"\n[dim]Request URL: {ctx.url}[/dim]")
|
||||
|
||||
if ctx.response_data and isinstance(ctx.response_data, dict) and 'raw' not in ctx.response_data:
|
||||
console.print(f"[dim]API Response: {ctx.response_data}[/dim]")
|
||||
|
||||
# Suggestions
|
||||
if ctx.suggested_fixes:
|
||||
display_suggestions(ctx.suggested_fixes)
|
||||
|
||||
console.print() # Add spacing before exit
|
||||
raise typer.Exit(1)
|
||||
|
||||
# Handle legacy CLI errors
|
||||
elif isinstance(error, FuzzForgeError):
|
||||
error_text = Text()
|
||||
error_text.append("❌ ", style="red")
|
||||
error_text.append(error.message, style="red")
|
||||
|
||||
if context:
|
||||
error_text.append(f" ({context})", style="dim red")
|
||||
|
||||
console.print(error_text)
|
||||
|
||||
if error.hint:
|
||||
hint_text = Text()
|
||||
hint_text.append("💡 ", style="yellow")
|
||||
hint_text.append(error.hint, style="yellow")
|
||||
console.print(hint_text)
|
||||
|
||||
raise typer.Exit(error.exit_code)
|
||||
|
||||
elif isinstance(error, KeyboardInterrupt):
|
||||
console.print("\n⏹️ Operation cancelled by user", style="yellow")
|
||||
raise typer.Exit(130) # Standard exit code for SIGINT
|
||||
|
||||
else:
|
||||
# Unexpected errors - show minimal info to user, log details
|
||||
console.print()
|
||||
|
||||
error_panel = Panel(
|
||||
f"An unexpected error occurred: {str(error)}",
|
||||
title="❌ Unexpected Error",
|
||||
title_align="left",
|
||||
border_style="red",
|
||||
expand=False
|
||||
)
|
||||
|
||||
if context:
|
||||
error_panel.title += f" during {context}"
|
||||
|
||||
console.print(error_panel)
|
||||
|
||||
# Show error details for debugging
|
||||
console.print(f"\n[dim yellow]Error type: {type(error).__name__}[/dim yellow]")
|
||||
console.print(f"[dim yellow]Please report this issue if it persists[/dim yellow]")
|
||||
console.print()
|
||||
|
||||
raise typer.Exit(1)
|
||||
|
||||
|
||||
def retry_on_network_error(max_retries: int = 3, delay: float = 1.0, backoff_multiplier: float = 2.0):
|
||||
"""
|
||||
Decorator to retry network operations with exponential backoff.
|
||||
|
||||
Args:
|
||||
max_retries: Maximum number of retry attempts
|
||||
delay: Initial delay between retries in seconds
|
||||
backoff_multiplier: Multiplier for exponential backoff
|
||||
"""
|
||||
def decorator(func: Callable) -> Callable:
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
last_exception = None
|
||||
current_delay = delay
|
||||
|
||||
for attempt in range(max_retries + 1):
|
||||
try:
|
||||
return func(*args, **kwargs)
|
||||
except (httpx.ConnectError, httpx.TimeoutException, httpx.NetworkError) as e:
|
||||
last_exception = e
|
||||
|
||||
if attempt < max_retries:
|
||||
console.print(
|
||||
f"🔄 Network error, retrying in {current_delay:.1f}s... "
|
||||
f"(attempt {attempt + 1}/{max_retries})",
|
||||
style="yellow"
|
||||
)
|
||||
time.sleep(current_delay)
|
||||
current_delay *= backoff_multiplier
|
||||
else:
|
||||
# Convert to our custom error type
|
||||
api_url = getattr(args[0], 'base_url', 'unknown') if args else 'unknown'
|
||||
raise APIConnectionError(str(api_url), e)
|
||||
|
||||
# Should never reach here, but just in case
|
||||
if last_exception:
|
||||
raise last_exception
|
||||
|
||||
return wrapper
|
||||
return decorator
|
||||
|
||||
|
||||
def validate_path(path: Union[str, Path], must_exist: bool = True, must_be_file: bool = False,
|
||||
must_be_dir: bool = False) -> Path:
|
||||
"""
|
||||
Validate file/directory paths with user-friendly error messages.
|
||||
|
||||
Args:
|
||||
path: Path to validate
|
||||
must_exist: Whether the path must exist
|
||||
must_be_file: Whether the path must be a file
|
||||
must_be_dir: Whether the path must be a directory
|
||||
|
||||
Returns:
|
||||
Validated Path object
|
||||
|
||||
Raises:
|
||||
ValidationError: If validation fails
|
||||
"""
|
||||
path_obj = Path(path)
|
||||
|
||||
if must_exist and not path_obj.exists():
|
||||
raise ValidationError("path", str(path), "an existing path")
|
||||
|
||||
if must_be_file and path_obj.exists() and not path_obj.is_file():
|
||||
raise ValidationError("path", str(path), "a file")
|
||||
|
||||
if must_be_dir and path_obj.exists() and not path_obj.is_dir():
|
||||
raise ValidationError("path", str(path), "a directory")
|
||||
|
||||
return path_obj
|
||||
|
||||
|
||||
def validate_run_id(run_id: str) -> str:
|
||||
"""
|
||||
Validate run ID format.
|
||||
|
||||
Args:
|
||||
run_id: Run ID to validate
|
||||
|
||||
Returns:
|
||||
Validated run ID
|
||||
|
||||
Raises:
|
||||
ValidationError: If run ID format is invalid
|
||||
"""
|
||||
if not run_id or len(run_id) < 8:
|
||||
raise ValidationError("run_id", run_id, "at least 8 characters")
|
||||
|
||||
if not run_id.replace('-', '').isalnum():
|
||||
raise ValidationError("run_id", run_id, "alphanumeric characters and hyphens only")
|
||||
|
||||
return run_id
|
||||
|
||||
|
||||
def safe_json_load(file_path: Union[str, Path]) -> dict:
|
||||
"""
|
||||
Safely load JSON file with proper error handling.
|
||||
|
||||
Args:
|
||||
file_path: Path to JSON file
|
||||
|
||||
Returns:
|
||||
Parsed JSON data
|
||||
|
||||
Raises:
|
||||
FileOperationError: If file operation fails
|
||||
ValidationError: If JSON is invalid
|
||||
"""
|
||||
path_obj = Path(file_path)
|
||||
|
||||
try:
|
||||
with open(path_obj, 'r', encoding='utf-8') as f:
|
||||
import json
|
||||
return json.load(f)
|
||||
except FileNotFoundError as e:
|
||||
raise FileOperationError("read", path_obj, e)
|
||||
except PermissionError as e:
|
||||
raise FileOperationError("read", path_obj, e)
|
||||
except json.JSONDecodeError as e:
|
||||
raise ValidationError("JSON file", str(path_obj), f"valid JSON format (error: {e})")
|
||||
except Exception as e:
|
||||
raise FileOperationError("read", path_obj, e)
|
||||
|
||||
|
||||
def require_project() -> Path:
|
||||
"""
|
||||
Ensure we're in a FuzzForge project directory.
|
||||
|
||||
Returns:
|
||||
Path to project root
|
||||
|
||||
Raises:
|
||||
ProjectNotFoundError: If not in a project directory
|
||||
"""
|
||||
current = Path.cwd()
|
||||
|
||||
# Look for .fuzzforge directory in current or parent directories
|
||||
for path in [current] + list(current.parents):
|
||||
fuzzforge_dir = path / ".fuzzforge"
|
||||
if fuzzforge_dir.is_dir():
|
||||
return path
|
||||
|
||||
raise ProjectNotFoundError()
|
||||
@@ -0,0 +1,309 @@
|
||||
"""
|
||||
Fuzzy matching and smart suggestions for FuzzForge CLI.
|
||||
|
||||
Provides "Did you mean...?" functionality and intelligent command/parameter suggestions.
|
||||
"""
|
||||
# Copyright (c) 2025 FuzzingLabs
|
||||
#
|
||||
# Licensed under the Business Source License 1.1 (BSL). See the LICENSE file
|
||||
# at the root of this repository for details.
|
||||
#
|
||||
# After the Change Date (four years from publication), this version of the
|
||||
# Licensed Work will be made available under the Apache License, Version 2.0.
|
||||
# See the LICENSE-APACHE file or http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Additional attribution and requirements are provided in the NOTICE file.
|
||||
|
||||
|
||||
import difflib
|
||||
from typing import List, Optional, Dict, Any, Tuple
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
from rich.text import Text
|
||||
|
||||
console = Console()
|
||||
|
||||
|
||||
class FuzzyMatcher:
|
||||
"""Fuzzy matching engine for CLI commands and parameters."""
|
||||
|
||||
def __init__(self):
|
||||
# Known commands and subcommands
|
||||
self.commands = {
|
||||
"init": ["project"],
|
||||
"workflows": ["list", "info"],
|
||||
"runs": ["submit", "status", "list", "rerun"],
|
||||
"findings": ["get", "list", "export", "all"],
|
||||
"monitor": ["stats", "crashes", "live"],
|
||||
"config": ["set", "get", "list", "init"],
|
||||
"ai": ["ask", "summarize", "explain"],
|
||||
"ingest": ["project", "findings"]
|
||||
}
|
||||
|
||||
# Common workflow names
|
||||
self.workflow_names = [
|
||||
"security_assessment",
|
||||
"language_fuzzing",
|
||||
"infrastructure_scan",
|
||||
"static_analysis_scan",
|
||||
"penetration_testing_scan",
|
||||
"secret_detection_scan"
|
||||
]
|
||||
|
||||
# Common parameter names
|
||||
self.parameter_names = [
|
||||
"target_path",
|
||||
"volume_mode",
|
||||
"timeout",
|
||||
"workflow",
|
||||
"param",
|
||||
"param-file",
|
||||
"interactive",
|
||||
"wait",
|
||||
"live",
|
||||
"format",
|
||||
"output",
|
||||
"severity",
|
||||
"since",
|
||||
"limit",
|
||||
"stats",
|
||||
"export"
|
||||
]
|
||||
|
||||
# Common values
|
||||
self.common_values = {
|
||||
"volume_mode": ["ro", "rw"],
|
||||
"format": ["json", "csv", "html", "sarif"],
|
||||
"severity": ["critical", "high", "medium", "low", "info"]
|
||||
}
|
||||
|
||||
def find_closest_command(self, user_input: str, command_group: Optional[str] = None) -> Optional[Tuple[str, float]]:
|
||||
"""Find the closest matching command."""
|
||||
if command_group and command_group in self.commands:
|
||||
# Search within a specific command group
|
||||
candidates = self.commands[command_group]
|
||||
else:
|
||||
# Search all main commands
|
||||
candidates = list(self.commands.keys())
|
||||
|
||||
matches = difflib.get_close_matches(
|
||||
user_input, candidates, n=1, cutoff=0.6
|
||||
)
|
||||
|
||||
if matches:
|
||||
match = matches[0]
|
||||
# Calculate similarity ratio
|
||||
ratio = difflib.SequenceMatcher(None, user_input, match).ratio()
|
||||
return match, ratio
|
||||
|
||||
return None
|
||||
|
||||
def find_closest_workflow(self, user_input: str) -> Optional[Tuple[str, float]]:
|
||||
"""Find the closest matching workflow name."""
|
||||
matches = difflib.get_close_matches(
|
||||
user_input, self.workflow_names, n=1, cutoff=0.6
|
||||
)
|
||||
|
||||
if matches:
|
||||
match = matches[0]
|
||||
ratio = difflib.SequenceMatcher(None, user_input, match).ratio()
|
||||
return match, ratio
|
||||
|
||||
return None
|
||||
|
||||
def find_closest_parameter(self, user_input: str) -> Optional[Tuple[str, float]]:
|
||||
"""Find the closest matching parameter name."""
|
||||
# Remove leading dashes
|
||||
clean_input = user_input.lstrip('-')
|
||||
|
||||
matches = difflib.get_close_matches(
|
||||
clean_input, self.parameter_names, n=1, cutoff=0.6
|
||||
)
|
||||
|
||||
if matches:
|
||||
match = matches[0]
|
||||
ratio = difflib.SequenceMatcher(None, clean_input, match).ratio()
|
||||
return match, ratio
|
||||
|
||||
return None
|
||||
|
||||
def suggest_parameter_values(self, parameter: str, user_input: str) -> List[str]:
|
||||
"""Suggest parameter values based on known options."""
|
||||
if parameter in self.common_values:
|
||||
values = self.common_values[parameter]
|
||||
if user_input:
|
||||
# Filter values that start with user input
|
||||
return [v for v in values if v.startswith(user_input.lower())]
|
||||
else:
|
||||
return values
|
||||
|
||||
return []
|
||||
|
||||
def get_command_suggestions(self, user_command: List[str]) -> Optional[Dict[str, Any]]:
|
||||
"""Get suggestions for a user command that may have typos."""
|
||||
if not user_command:
|
||||
return None
|
||||
|
||||
suggestions = {"type": None, "original": user_command, "suggestions": []}
|
||||
|
||||
# Check main command
|
||||
main_cmd = user_command[0]
|
||||
if main_cmd not in self.commands:
|
||||
closest = self.find_closest_command(main_cmd)
|
||||
if closest:
|
||||
match, confidence = closest
|
||||
suggestions["type"] = "main_command"
|
||||
suggestions["suggestions"].append({
|
||||
"text": match,
|
||||
"confidence": confidence,
|
||||
"type": "command"
|
||||
})
|
||||
|
||||
# Check subcommand if present
|
||||
elif len(user_command) > 1:
|
||||
sub_cmd = user_command[1]
|
||||
if main_cmd in self.commands and sub_cmd not in self.commands[main_cmd]:
|
||||
closest = self.find_closest_command(sub_cmd, main_cmd)
|
||||
if closest:
|
||||
match, confidence = closest
|
||||
suggestions["type"] = "subcommand"
|
||||
suggestions["suggestions"].append({
|
||||
"text": f"{main_cmd} {match}",
|
||||
"confidence": confidence,
|
||||
"type": "subcommand"
|
||||
})
|
||||
|
||||
return suggestions if suggestions["suggestions"] else None
|
||||
|
||||
def suggest_workflow_fix(self, user_workflow: str) -> Optional[str]:
|
||||
"""Suggest a workflow name correction."""
|
||||
closest = self.find_closest_workflow(user_workflow)
|
||||
if closest:
|
||||
match, confidence = closest
|
||||
if confidence > 0.6: # Only suggest if reasonably confident
|
||||
return match
|
||||
return None
|
||||
|
||||
|
||||
def display_command_suggestion(suggestions: Dict[str, Any]):
|
||||
"""Display command suggestions to the user."""
|
||||
if not suggestions or not suggestions["suggestions"]:
|
||||
return
|
||||
|
||||
original = " ".join(suggestions["original"])
|
||||
suggestion_type = suggestions["type"]
|
||||
|
||||
# Create suggestion text
|
||||
text = Text()
|
||||
text.append("❓ Command not found: ", style="red")
|
||||
text.append(f"'{original}'", style="bold red")
|
||||
text.append("\n\n")
|
||||
|
||||
text.append("💡 Did you mean:\n", style="yellow")
|
||||
|
||||
for i, suggestion in enumerate(suggestions["suggestions"], 1):
|
||||
confidence_percent = int(suggestion["confidence"] * 100)
|
||||
text.append(f" {i}. ", style="bold cyan")
|
||||
text.append(f"{suggestion['text']}", style="bold white")
|
||||
text.append(f" ({confidence_percent}% match)", style="dim")
|
||||
text.append("\n")
|
||||
|
||||
# Add helpful context
|
||||
if suggestion_type == "main_command":
|
||||
text.append("\n💡 Use 'fuzzforge --help' to see all available commands", style="dim")
|
||||
elif suggestion_type == "subcommand":
|
||||
main_cmd = suggestions["original"][0]
|
||||
text.append(f"\n💡 Use 'fuzzforge {main_cmd} --help' to see available subcommands", style="dim")
|
||||
|
||||
console.print(Panel(
|
||||
text,
|
||||
title="🤔 Command Suggestion",
|
||||
border_style="yellow",
|
||||
expand=False
|
||||
))
|
||||
|
||||
|
||||
def display_workflow_suggestion(original: str, suggestion: str):
|
||||
"""Display workflow name suggestion."""
|
||||
text = Text()
|
||||
text.append("❓ Workflow not found: ", style="red")
|
||||
text.append(f"'{original}'", style="bold red")
|
||||
text.append("\n\n")
|
||||
|
||||
text.append("💡 Did you mean: ", style="yellow")
|
||||
text.append(f"'{suggestion}'", style="bold green")
|
||||
text.append("?\n\n")
|
||||
|
||||
text.append("💡 Use 'fuzzforge workflows' to see all available workflows", style="dim")
|
||||
|
||||
console.print(Panel(
|
||||
text,
|
||||
title="🔧 Workflow Suggestion",
|
||||
border_style="yellow",
|
||||
expand=False
|
||||
))
|
||||
|
||||
|
||||
def display_parameter_suggestion(original: str, suggestion: str):
|
||||
"""Display parameter name suggestion."""
|
||||
text = Text()
|
||||
text.append("❓ Unknown parameter: ", style="red")
|
||||
text.append(f"'{original}'", style="bold red")
|
||||
text.append("\n\n")
|
||||
|
||||
text.append("💡 Did you mean: ", style="yellow")
|
||||
text.append(f"'--{suggestion}'", style="bold green")
|
||||
text.append("?\n\n")
|
||||
|
||||
text.append("💡 Use '--help' to see all available parameters", style="dim")
|
||||
|
||||
console.print(Panel(
|
||||
text,
|
||||
title="⚙️ Parameter Suggestion",
|
||||
border_style="yellow",
|
||||
expand=False
|
||||
))
|
||||
|
||||
|
||||
def enhanced_command_not_found_handler(command_parts: List[str]):
|
||||
"""Handle command not found with fuzzy matching suggestions."""
|
||||
matcher = FuzzyMatcher()
|
||||
suggestions = matcher.get_command_suggestions(command_parts)
|
||||
|
||||
if suggestions:
|
||||
display_command_suggestion(suggestions)
|
||||
else:
|
||||
# Fallback to generic help
|
||||
console.print("❌ [red]Command not found[/red]")
|
||||
console.print("💡 Use 'fuzzforge --help' to see available commands")
|
||||
|
||||
|
||||
def enhanced_workflow_not_found_handler(workflow_name: str):
|
||||
"""Handle workflow not found with suggestions."""
|
||||
matcher = FuzzyMatcher()
|
||||
suggestion = matcher.suggest_workflow_fix(workflow_name)
|
||||
|
||||
if suggestion:
|
||||
display_workflow_suggestion(workflow_name, suggestion)
|
||||
else:
|
||||
console.print(f"❌ [red]Workflow '{workflow_name}' not found[/red]")
|
||||
console.print("💡 Use 'fuzzforge workflows' to see available workflows")
|
||||
|
||||
|
||||
def enhanced_parameter_not_found_handler(parameter_name: str):
|
||||
"""Handle unknown parameter with suggestions."""
|
||||
matcher = FuzzyMatcher()
|
||||
closest = matcher.find_closest_parameter(parameter_name)
|
||||
|
||||
if closest:
|
||||
match, confidence = closest
|
||||
if confidence > 0.6:
|
||||
display_parameter_suggestion(parameter_name, match)
|
||||
return
|
||||
|
||||
console.print(f"❌ [red]Unknown parameter: '{parameter_name}'[/red]")
|
||||
console.print("💡 Use '--help' to see available parameters")
|
||||
|
||||
|
||||
# Global fuzzy matcher instance
|
||||
fuzzy_matcher = FuzzyMatcher()
|
||||
@@ -0,0 +1,105 @@
|
||||
"""Utilities for collecting files to ingest into Cognee."""
|
||||
# Copyright (c) 2025 FuzzingLabs
|
||||
#
|
||||
# Licensed under the Business Source License 1.1 (BSL). See the LICENSE file
|
||||
# at the root of this repository for details.
|
||||
#
|
||||
# After the Change Date (four years from publication), this version of the
|
||||
# Licensed Work will be made available under the Apache License, Version 2.0.
|
||||
# See the LICENSE-APACHE file or http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Additional attribution and requirements are provided in the NOTICE file.
|
||||
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import fnmatch
|
||||
from pathlib import Path
|
||||
from typing import Iterable, List, Optional
|
||||
|
||||
# Default extensions and exclusions mirrored from the OSS implementation
|
||||
_DEFAULT_FILE_TYPES = [
|
||||
".py",
|
||||
".js",
|
||||
".ts",
|
||||
".java",
|
||||
".cpp",
|
||||
".c",
|
||||
".h",
|
||||
".rs",
|
||||
".go",
|
||||
".rb",
|
||||
".php",
|
||||
".cs",
|
||||
".swift",
|
||||
".kt",
|
||||
".scala",
|
||||
".clj",
|
||||
".hs",
|
||||
".md",
|
||||
".txt",
|
||||
".yaml",
|
||||
".yml",
|
||||
".json",
|
||||
".toml",
|
||||
".cfg",
|
||||
".ini",
|
||||
]
|
||||
|
||||
_DEFAULT_EXCLUDE = [
|
||||
"*.pyc",
|
||||
"__pycache__",
|
||||
".git",
|
||||
".svn",
|
||||
".hg",
|
||||
"node_modules",
|
||||
".venv",
|
||||
"venv",
|
||||
".env",
|
||||
"dist",
|
||||
"build",
|
||||
".pytest_cache",
|
||||
".mypy_cache",
|
||||
".tox",
|
||||
"coverage",
|
||||
"*.log",
|
||||
"*.tmp",
|
||||
]
|
||||
|
||||
|
||||
def collect_ingest_files(
|
||||
path: Path,
|
||||
recursive: bool = True,
|
||||
file_types: Optional[Iterable[str]] = None,
|
||||
exclude: Optional[Iterable[str]] = None,
|
||||
) -> List[Path]:
|
||||
"""Return a list of files eligible for ingestion."""
|
||||
path = path.resolve()
|
||||
files: List[Path] = []
|
||||
|
||||
extensions = list(file_types) if file_types else list(_DEFAULT_FILE_TYPES)
|
||||
exclusions = list(exclude) if exclude else []
|
||||
exclusions.extend(_DEFAULT_EXCLUDE)
|
||||
|
||||
def should_exclude(file_path: Path) -> bool:
|
||||
file_str = str(file_path)
|
||||
for pattern in exclusions:
|
||||
if fnmatch.fnmatch(file_str, f"*{pattern}*") or fnmatch.fnmatch(file_path.name, pattern):
|
||||
return True
|
||||
return False
|
||||
|
||||
if path.is_file():
|
||||
if not should_exclude(path) and any(str(path).endswith(ext) for ext in extensions):
|
||||
files.append(path)
|
||||
return files
|
||||
|
||||
pattern = "**/*" if recursive else "*"
|
||||
for file_path in path.glob(pattern):
|
||||
if file_path.is_file() and not should_exclude(file_path):
|
||||
if any(str(file_path).endswith(ext) for ext in extensions):
|
||||
files.append(file_path)
|
||||
|
||||
return files
|
||||
|
||||
|
||||
__all__ = ["collect_ingest_files"]
|
||||
@@ -0,0 +1,486 @@
|
||||
"""
|
||||
Main CLI application with improved command structure.
|
||||
"""
|
||||
# Copyright (c) 2025 FuzzingLabs
|
||||
#
|
||||
# Licensed under the Business Source License 1.1 (BSL). See the LICENSE file
|
||||
# at the root of this repository for details.
|
||||
#
|
||||
# After the Change Date (four years from publication), this version of the
|
||||
# Licensed Work will be made available under the Apache License, Version 2.0.
|
||||
# See the LICENSE-APACHE file or http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Additional attribution and requirements are provided in the NOTICE file.
|
||||
|
||||
|
||||
import typer
|
||||
from rich.console import Console
|
||||
from rich.traceback import install
|
||||
from typing import Optional, List
|
||||
import sys
|
||||
|
||||
from .commands import (
|
||||
init,
|
||||
workflows,
|
||||
workflow_exec,
|
||||
findings,
|
||||
monitor,
|
||||
config as config_cmd,
|
||||
ai,
|
||||
ingest,
|
||||
)
|
||||
from .fuzzy import enhanced_command_not_found_handler
|
||||
|
||||
# Install rich traceback handler
|
||||
install(show_locals=True)
|
||||
|
||||
# Create console for rich output
|
||||
console = Console()
|
||||
|
||||
# Create the main Typer app
|
||||
app = typer.Typer(
|
||||
name="fuzzforge",
|
||||
help=(
|
||||
"\b\n"
|
||||
"[cyan]███████╗██╗ ██╗███████╗███████╗███████╗ ██████╗ ██████╗ ██████╗ ███████╗\n"
|
||||
"██╔════╝██║ ██║╚══███╔╝╚══███╔╝██╔════╝██╔═══██╗██╔══██╗██╔════╝ ██╔════╝\n"
|
||||
"█████╗ ██║ ██║ ███╔╝ ███╔╝ █████╗ ██║ ██║██████╔╝██║ ███╗█████╗ \n"
|
||||
"██╔══╝ ██║ ██║ ███╔╝ ███╔╝ ██╔══╝ ██║ ██║██╔══██╗██║ ██║██╔══╝ \n"
|
||||
"██║ ╚██████╔╝███████╗███████╗██║ ╚██████╔╝██║ ██║╚██████╔╝███████╗\n"
|
||||
"╚═╝ ╚═════╝ ╚══════╝╚══════╝╚═╝ ╚═════╝ ╚═╝ ╚═╝ ╚═════╝ ╚══════╝[/cyan]\n\n"
|
||||
"🛡️ Security testing workflow orchestration platform"
|
||||
),
|
||||
rich_markup_mode="rich",
|
||||
no_args_is_help=True,
|
||||
context_settings={
|
||||
# Prevent help text from wrapping so ASCII art stays aligned
|
||||
"max_content_width": 200,
|
||||
# Keep common help flags
|
||||
"help_option_names": ["--help", "-h"],
|
||||
},
|
||||
)
|
||||
|
||||
# Create workflow singular command group
|
||||
workflow_app = typer.Typer(
|
||||
name="workflow",
|
||||
help="🚀 Execute and manage individual workflows",
|
||||
no_args_is_help=False, # Allow direct execution
|
||||
)
|
||||
|
||||
# Create finding singular command group
|
||||
finding_app = typer.Typer(
|
||||
name="finding",
|
||||
help="🔍 View and analyze individual findings",
|
||||
no_args_is_help=False,
|
||||
)
|
||||
|
||||
|
||||
# === Top-level commands ===
|
||||
|
||||
@app.command()
|
||||
def init(
|
||||
name: Optional[str] = typer.Option(
|
||||
None, "--name", "-n",
|
||||
help="Project name (defaults to current directory name)"
|
||||
),
|
||||
api_url: Optional[str] = typer.Option(
|
||||
None, "--api-url", "-u",
|
||||
help="FuzzForge API URL (defaults to http://localhost:8000)"
|
||||
),
|
||||
force: bool = typer.Option(
|
||||
False, "--force", "-f",
|
||||
help="Force initialization even if project already exists"
|
||||
)
|
||||
):
|
||||
"""
|
||||
📁 Initialize a new FuzzForge project
|
||||
"""
|
||||
from .commands.init import project
|
||||
project(name=name, api_url=api_url, force=force)
|
||||
|
||||
|
||||
@app.command()
|
||||
def status():
|
||||
"""
|
||||
📊 Show project and latest execution status
|
||||
"""
|
||||
from .commands.status import show_status
|
||||
show_status()
|
||||
|
||||
|
||||
@app.command()
|
||||
def config(
|
||||
key: Optional[str] = typer.Argument(None, help="Configuration key"),
|
||||
value: Optional[str] = typer.Argument(None, help="Configuration value to set")
|
||||
):
|
||||
"""
|
||||
⚙️ Manage configuration (show all, get, or set values)
|
||||
"""
|
||||
from .commands import config as config_cmd
|
||||
|
||||
if key is None:
|
||||
# No arguments: show all config
|
||||
config_cmd.show_config(global_config=False)
|
||||
elif value is None:
|
||||
# Key only: get specific value
|
||||
config_cmd.get_config(key=key, global_config=False)
|
||||
else:
|
||||
# Key and value: set value
|
||||
config_cmd.set_config(key=key, value=value, global_config=False)
|
||||
|
||||
|
||||
@app.command()
|
||||
def clean(
|
||||
days: int = typer.Option(
|
||||
90, "--days", "-d",
|
||||
help="Remove data older than this many days"
|
||||
),
|
||||
dry_run: bool = typer.Option(
|
||||
False, "--dry-run",
|
||||
help="Show what would be deleted without actually deleting"
|
||||
)
|
||||
):
|
||||
"""
|
||||
🧹 Clean old execution data and findings
|
||||
"""
|
||||
from .database import get_project_db
|
||||
from .exceptions import require_project
|
||||
|
||||
try:
|
||||
require_project()
|
||||
db = get_project_db()
|
||||
if not db:
|
||||
console.print("❌ No project database found", style="red")
|
||||
raise typer.Exit(1)
|
||||
|
||||
if dry_run:
|
||||
console.print(f"🔍 [bold]Dry run:[/bold] Would clean data older than {days} days")
|
||||
|
||||
deleted = db.cleanup_old_runs(keep_days=days)
|
||||
|
||||
if not dry_run:
|
||||
console.print(f"✅ Cleaned {deleted} old executions", style="green")
|
||||
else:
|
||||
console.print(f"Would delete {deleted} executions", style="yellow")
|
||||
except Exception as e:
|
||||
console.print(f"❌ Failed to clean data: {e}", style="red")
|
||||
raise typer.Exit(1)
|
||||
|
||||
|
||||
# === Workflow commands (singular) ===
|
||||
|
||||
# Add workflow subcommands first (before callback)
|
||||
workflow_app.command("status")(workflow_exec.workflow_status)
|
||||
workflow_app.command("history")(workflow_exec.workflow_history)
|
||||
workflow_app.command("retry")(workflow_exec.retry_workflow)
|
||||
workflow_app.command("info")(workflows.workflow_info)
|
||||
workflow_app.command("params")(workflows.workflow_parameters)
|
||||
|
||||
@workflow_app.command("run")
|
||||
def run_workflow(
|
||||
workflow: str = typer.Argument(help="Workflow name"),
|
||||
target: str = typer.Argument(help="Target path"),
|
||||
):
|
||||
"""
|
||||
🚀 Execute a security testing workflow
|
||||
"""
|
||||
from .commands.workflow_exec import execute_workflow
|
||||
|
||||
execute_workflow(
|
||||
workflow=workflow,
|
||||
target_path=target,
|
||||
params=[],
|
||||
param_file=None,
|
||||
volume_mode='ro',
|
||||
timeout=None,
|
||||
interactive=True,
|
||||
wait=False,
|
||||
live=False
|
||||
)
|
||||
|
||||
@workflow_app.callback()
|
||||
def workflow_main():
|
||||
"""
|
||||
Execute workflows and manage workflow executions
|
||||
|
||||
Examples:
|
||||
fuzzforge workflow security_assessment ./target # Execute workflow
|
||||
fuzzforge workflow status # Check latest status
|
||||
fuzzforge workflow history # Show execution history
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
# === Finding commands (singular) ===
|
||||
|
||||
@finding_app.command("export")
|
||||
def export_finding(
|
||||
execution_id: Optional[str] = typer.Argument(None, help="Execution ID (defaults to latest)"),
|
||||
format: str = typer.Option(
|
||||
"sarif", "--format", "-f",
|
||||
help="Export format: sarif, json, csv"
|
||||
),
|
||||
output: Optional[str] = typer.Option(
|
||||
None, "--output", "-o",
|
||||
help="Output file (defaults to stdout)"
|
||||
)
|
||||
):
|
||||
"""
|
||||
📤 Export findings to file
|
||||
"""
|
||||
from .commands.findings import export_findings
|
||||
from .database import get_project_db
|
||||
from .exceptions import require_project
|
||||
|
||||
try:
|
||||
require_project()
|
||||
|
||||
# If no ID provided, get the latest
|
||||
if not execution_id:
|
||||
db = get_project_db()
|
||||
if db:
|
||||
recent_runs = db.list_runs(limit=1)
|
||||
if recent_runs:
|
||||
execution_id = recent_runs[0].run_id
|
||||
console.print(f"🔍 Using most recent execution: {execution_id}")
|
||||
else:
|
||||
console.print("⚠️ No findings found in project database", style="yellow")
|
||||
return
|
||||
else:
|
||||
console.print("❌ No project database found", style="red")
|
||||
return
|
||||
|
||||
export_findings(run_id=execution_id, format=format, output=output)
|
||||
except Exception as e:
|
||||
console.print(f"❌ Failed to export findings: {e}", style="red")
|
||||
|
||||
|
||||
@finding_app.command("analyze")
|
||||
def analyze_finding(
|
||||
finding_id: Optional[str] = typer.Argument(None, help="Finding ID to analyze")
|
||||
):
|
||||
"""
|
||||
🤖 AI analysis of a finding
|
||||
"""
|
||||
from .commands.ai import analyze_finding as ai_analyze
|
||||
ai_analyze(finding_id)
|
||||
|
||||
@finding_app.callback(invoke_without_command=True)
|
||||
def finding_main(
|
||||
ctx: typer.Context,
|
||||
):
|
||||
"""
|
||||
View and analyze individual findings
|
||||
|
||||
Examples:
|
||||
fuzzforge finding # Show latest finding
|
||||
fuzzforge finding <id> # Show specific finding
|
||||
fuzzforge finding export # Export latest findings
|
||||
"""
|
||||
# Check if a subcommand is being invoked
|
||||
if ctx.invoked_subcommand is not None:
|
||||
# Let the subcommand handle it
|
||||
return
|
||||
|
||||
# Get remaining arguments for direct viewing
|
||||
args = ctx.args if hasattr(ctx, 'args') else []
|
||||
finding_id = args[0] if args else None
|
||||
|
||||
# Direct viewing: fuzzforge finding [id]
|
||||
from .commands.findings import get_findings
|
||||
from .database import get_project_db
|
||||
from .exceptions import require_project
|
||||
|
||||
try:
|
||||
require_project()
|
||||
|
||||
# If no ID provided, get the latest
|
||||
if not finding_id:
|
||||
db = get_project_db()
|
||||
if db:
|
||||
recent_runs = db.list_runs(limit=1)
|
||||
if recent_runs:
|
||||
finding_id = recent_runs[0].run_id
|
||||
console.print(f"🔍 Using most recent execution: {finding_id}")
|
||||
else:
|
||||
console.print("⚠️ No findings found in project database", style="yellow")
|
||||
return
|
||||
else:
|
||||
console.print("❌ No project database found", style="red")
|
||||
return
|
||||
|
||||
get_findings(run_id=finding_id, save=True, format="table")
|
||||
except Exception as e:
|
||||
console.print(f"❌ Failed to get findings: {e}", style="red")
|
||||
|
||||
|
||||
# === Add command groups ===
|
||||
|
||||
# Plural commands (for browsing/listing)
|
||||
app.add_typer(workflows.app, name="workflows", help="📋 Browse available workflows")
|
||||
app.add_typer(findings.app, name="findings", help="📋 Browse all findings")
|
||||
|
||||
# Singular commands (for actions)
|
||||
app.add_typer(workflow_app, name="workflow", help="🚀 Execute and manage workflows")
|
||||
app.add_typer(finding_app, name="finding", help="🔍 View and analyze findings")
|
||||
|
||||
# Other command groups
|
||||
app.add_typer(monitor.app, name="monitor", help="📊 Real-time monitoring")
|
||||
app.add_typer(ai.app, name="ai", help="🤖 AI integration features")
|
||||
app.add_typer(ingest.app, name="ingest", help="🧠 Ingest knowledge into AI")
|
||||
|
||||
# Help and utility commands
|
||||
@app.command()
|
||||
def examples():
|
||||
"""
|
||||
📚 Show usage examples
|
||||
"""
|
||||
examples_text = """
|
||||
[bold cyan]FuzzForge CLI Examples[/bold cyan]
|
||||
|
||||
[bold]Getting Started:[/bold]
|
||||
ff init # Initialize a project
|
||||
ff workflows # List available workflows
|
||||
ff workflow info afl-fuzzing # Get workflow details
|
||||
|
||||
[bold]Execute Workflows:[/bold]
|
||||
ff workflow afl-fuzzing ./target # Run fuzzing on target
|
||||
ff workflow afl-fuzzing . --live # Run with live monitoring
|
||||
ff workflow scan-c ./src timeout=300 threads=4 # With parameters
|
||||
|
||||
[bold]Monitor Execution:[/bold]
|
||||
ff status # Check latest execution
|
||||
ff workflow status # Same as above
|
||||
ff monitor # Live monitoring dashboard
|
||||
ff workflow history # Show past executions
|
||||
|
||||
[bold]Review Findings:[/bold]
|
||||
ff findings # List all findings
|
||||
ff finding # Show latest finding
|
||||
ff finding export --format sarif # Export findings
|
||||
|
||||
[bold]AI Features:[/bold]
|
||||
ff ai chat # Interactive AI chat
|
||||
ff ai suggest ./src # Get workflow suggestions
|
||||
ff finding analyze # AI analysis of latest finding
|
||||
"""
|
||||
console.print(examples_text)
|
||||
|
||||
|
||||
@app.command()
|
||||
def version():
|
||||
"""
|
||||
📦 Show version information
|
||||
"""
|
||||
from . import __version__
|
||||
console.print(f"FuzzForge CLI v{__version__}")
|
||||
console.print(f"Short command: ff")
|
||||
|
||||
|
||||
@app.callback()
|
||||
def main_callback(
|
||||
ctx: typer.Context,
|
||||
version: Optional[bool] = typer.Option(
|
||||
None, "--version", "-v",
|
||||
help="Show version information"
|
||||
),
|
||||
):
|
||||
"""
|
||||
🛡️ FuzzForge CLI - Security testing workflow orchestration platform
|
||||
|
||||
Quick start:
|
||||
• ff init - Initialize a new project
|
||||
• ff workflows - See available workflows
|
||||
• ff workflow <name> <target> - Execute a workflow
|
||||
• ff examples - Show usage examples
|
||||
"""
|
||||
if version:
|
||||
from . import __version__
|
||||
console.print(f"FuzzForge CLI v{__version__}")
|
||||
raise typer.Exit()
|
||||
|
||||
|
||||
def main():
|
||||
"""Main entry point with smart command routing and error handling"""
|
||||
# Smart command routing BEFORE Typer processes arguments
|
||||
if len(sys.argv) > 1:
|
||||
args = sys.argv[1:]
|
||||
|
||||
# Handle workflow command with pattern recognition
|
||||
if len(args) >= 3 and args[0] == 'workflow':
|
||||
workflow_subcommands = ['run', 'status', 'history', 'retry', 'info', 'params']
|
||||
# Skip custom dispatching if help flags are present
|
||||
if not any(arg in ['--help', '-h', '--version', '-v'] for arg in args):
|
||||
if args[1] not in workflow_subcommands:
|
||||
# Direct workflow execution: ff workflow <name> <target>
|
||||
from .commands.workflow_exec import execute_workflow
|
||||
|
||||
workflow_name = args[1]
|
||||
target_path = args[2]
|
||||
remaining_params = args[3:] if len(args) > 3 else []
|
||||
|
||||
console.print(f"🚀 Executing workflow: {workflow_name} on {target_path}")
|
||||
|
||||
try:
|
||||
execute_workflow(
|
||||
workflow=workflow_name,
|
||||
target_path=target_path,
|
||||
params=remaining_params,
|
||||
param_file=None,
|
||||
volume_mode='ro',
|
||||
timeout=None,
|
||||
interactive=True,
|
||||
wait=False,
|
||||
live=False
|
||||
)
|
||||
return
|
||||
except Exception as e:
|
||||
console.print(f"❌ Failed to execute workflow: {e}", style="red")
|
||||
sys.exit(1)
|
||||
|
||||
# Handle finding command with pattern recognition
|
||||
if len(args) >= 2 and args[0] == 'finding':
|
||||
finding_subcommands = ['export', 'analyze']
|
||||
# Skip custom dispatching if help flags are present
|
||||
if not any(arg in ['--help', '-h', '--version', '-v'] for arg in args):
|
||||
if args[1] not in finding_subcommands:
|
||||
# Direct finding display: ff finding <id>
|
||||
from .commands.findings import get_findings
|
||||
|
||||
finding_id = args[1]
|
||||
console.print(f"🔍 Displaying finding: {finding_id}")
|
||||
|
||||
try:
|
||||
get_findings(run_id=finding_id, save=True, format="table")
|
||||
return
|
||||
except Exception as e:
|
||||
console.print(f"❌ Failed to get finding: {e}", style="red")
|
||||
sys.exit(1)
|
||||
|
||||
# Default Typer app handling
|
||||
try:
|
||||
app()
|
||||
except SystemExit as e:
|
||||
# Enhanced error handling for command not found
|
||||
if hasattr(e, 'code') and e.code != 0 and len(sys.argv) > 1:
|
||||
command_parts = sys.argv[1:]
|
||||
clean_parts = [part for part in command_parts if not part.startswith('-')]
|
||||
|
||||
if clean_parts:
|
||||
main_cmd = clean_parts[0]
|
||||
valid_commands = [
|
||||
'init', 'status', 'config', 'clean',
|
||||
'workflows', 'workflow',
|
||||
'findings', 'finding',
|
||||
'monitor', 'ai', 'ingest',
|
||||
'examples', 'version'
|
||||
]
|
||||
|
||||
if main_cmd not in valid_commands:
|
||||
enhanced_command_not_found_handler(clean_parts)
|
||||
sys.exit(1)
|
||||
raise
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -0,0 +1,371 @@
|
||||
"""
|
||||
Enhanced progress indicators and loading animations for FuzzForge CLI.
|
||||
|
||||
Provides rich progress bars, spinners, and status displays for all long-running operations.
|
||||
"""
|
||||
# Copyright (c) 2025 FuzzingLabs
|
||||
#
|
||||
# Licensed under the Business Source License 1.1 (BSL). See the LICENSE file
|
||||
# at the root of this repository for details.
|
||||
#
|
||||
# After the Change Date (four years from publication), this version of the
|
||||
# Licensed Work will be made available under the Apache License, Version 2.0.
|
||||
# See the LICENSE-APACHE file or http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Additional attribution and requirements are provided in the NOTICE file.
|
||||
|
||||
|
||||
import time
|
||||
import asyncio
|
||||
from contextlib import contextmanager
|
||||
from typing import Optional, Callable, Any, Dict, List
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
from rich.console import Console
|
||||
from rich.progress import (
|
||||
Progress, SpinnerColumn, TextColumn, BarColumn, TaskProgressColumn,
|
||||
TimeElapsedColumn, TimeRemainingColumn, MofNCompleteColumn
|
||||
)
|
||||
from rich.panel import Panel
|
||||
from rich.live import Live
|
||||
from rich.table import Table
|
||||
from rich.text import Text
|
||||
from rich import box
|
||||
|
||||
console = Console()
|
||||
|
||||
|
||||
class ProgressManager:
|
||||
"""Enhanced progress manager with multiple progress types."""
|
||||
|
||||
def __init__(self):
|
||||
self.progress = None
|
||||
self.live = None
|
||||
|
||||
def create_progress(self, show_speed: bool = False, show_eta: bool = False) -> Progress:
|
||||
"""Create a rich progress instance with customizable columns."""
|
||||
columns = [
|
||||
SpinnerColumn(),
|
||||
TextColumn("[bold blue]{task.description}"),
|
||||
BarColumn(bar_width=40),
|
||||
TaskProgressColumn(),
|
||||
]
|
||||
|
||||
if show_speed:
|
||||
columns.append(TextColumn("[cyan]{task.fields[speed]}/s"))
|
||||
|
||||
columns.extend([
|
||||
TimeElapsedColumn(),
|
||||
])
|
||||
|
||||
if show_eta:
|
||||
columns.append(TimeRemainingColumn())
|
||||
|
||||
return Progress(*columns, console=console)
|
||||
|
||||
@contextmanager
|
||||
def workflow_submission(self, workflow_name: str, target_path: str):
|
||||
"""Progress context for workflow submission."""
|
||||
with self.create_progress() as progress:
|
||||
task = progress.add_task(
|
||||
f"🚀 Submitting workflow: [yellow]{workflow_name}[/yellow]",
|
||||
total=4
|
||||
)
|
||||
|
||||
# Step 1: Validation
|
||||
progress.update(task, description="🔍 Validating parameters...", advance=1)
|
||||
yield progress, task
|
||||
|
||||
# Step 2: API Connection
|
||||
progress.update(task, description="🌐 Connecting to API...", advance=1)
|
||||
time.sleep(0.5) # Brief pause for visual feedback
|
||||
|
||||
# Step 3: Submission
|
||||
progress.update(task, description="📤 Submitting workflow...", advance=1)
|
||||
time.sleep(0.3)
|
||||
|
||||
# Step 4: Complete
|
||||
progress.update(task, description="✅ Workflow submitted successfully!", advance=1)
|
||||
|
||||
@contextmanager
|
||||
def data_export(self, format_type: str, record_count: int):
|
||||
"""Progress context for data export operations."""
|
||||
with self.create_progress(show_eta=True) as progress:
|
||||
task = progress.add_task(
|
||||
f"📊 Exporting {record_count} records as [yellow]{format_type.upper()}[/yellow]",
|
||||
total=record_count
|
||||
)
|
||||
yield progress, task
|
||||
|
||||
@contextmanager
|
||||
def file_operations(self, operation: str, file_count: int):
|
||||
"""Progress context for file operations."""
|
||||
with self.create_progress(show_eta=True) as progress:
|
||||
task = progress.add_task(
|
||||
f"📁 {operation} {file_count} files...",
|
||||
total=file_count
|
||||
)
|
||||
yield progress, task
|
||||
|
||||
@contextmanager
|
||||
def api_requests(self, operation: str, request_count: Optional[int] = None):
|
||||
"""Progress context for API requests."""
|
||||
if request_count:
|
||||
with self.create_progress() as progress:
|
||||
task = progress.add_task(
|
||||
f"🌐 {operation}...",
|
||||
total=request_count
|
||||
)
|
||||
yield progress, task
|
||||
else:
|
||||
# Indeterminate progress for unknown request count
|
||||
with self.create_progress() as progress:
|
||||
task = progress.add_task(
|
||||
f"🌐 {operation}...",
|
||||
total=None
|
||||
)
|
||||
yield progress, task
|
||||
|
||||
def create_live_stats_display(self) -> Dict[str, Any]:
|
||||
"""Create a live statistics display layout."""
|
||||
return {
|
||||
"layout": None,
|
||||
"stats_table": None,
|
||||
"progress_bars": None
|
||||
}
|
||||
|
||||
|
||||
@contextmanager
|
||||
def spinner(text: str, success_text: Optional[str] = None):
|
||||
"""Simple spinner context manager for quick operations."""
|
||||
with Progress(
|
||||
SpinnerColumn(),
|
||||
TextColumn("[bold blue]{task.description}"),
|
||||
console=console
|
||||
) as progress:
|
||||
task = progress.add_task(text, total=None)
|
||||
try:
|
||||
yield progress
|
||||
if success_text:
|
||||
progress.update(task, description=f"✅ {success_text}")
|
||||
time.sleep(0.5) # Brief pause to show success
|
||||
except Exception as e:
|
||||
progress.update(task, description=f"❌ Failed: {str(e)}")
|
||||
time.sleep(0.5)
|
||||
raise
|
||||
|
||||
|
||||
@contextmanager
|
||||
def step_progress(steps: List[str], title: str = "Processing"):
|
||||
"""Multi-step progress with predefined steps."""
|
||||
with Progress(
|
||||
SpinnerColumn(),
|
||||
TextColumn("[bold blue]{task.description}"),
|
||||
BarColumn(bar_width=30),
|
||||
MofNCompleteColumn(),
|
||||
console=console
|
||||
) as progress:
|
||||
task = progress.add_task(f"🔄 {title}", total=len(steps))
|
||||
|
||||
class StepProgressController:
|
||||
def __init__(self, progress_instance, task_id):
|
||||
self.progress = progress_instance
|
||||
self.task = task_id
|
||||
self.current_step = 0
|
||||
|
||||
def next_step(self):
|
||||
if self.current_step < len(steps):
|
||||
step_text = steps[self.current_step]
|
||||
self.progress.update(
|
||||
self.task,
|
||||
description=f"🔄 {step_text}",
|
||||
advance=1
|
||||
)
|
||||
self.current_step += 1
|
||||
|
||||
def complete(self, success_text: str = "Completed"):
|
||||
self.progress.update(
|
||||
self.task,
|
||||
description=f"✅ {success_text}",
|
||||
completed=len(steps)
|
||||
)
|
||||
|
||||
yield StepProgressController(progress, task)
|
||||
|
||||
|
||||
def create_workflow_monitoring_display(run_id: str, workflow_name: str) -> Table:
|
||||
"""Create a monitoring display for workflow execution."""
|
||||
table = Table(show_header=False, box=box.ROUNDED)
|
||||
table.add_column("Metric", style="bold cyan")
|
||||
table.add_column("Value", justify="right")
|
||||
|
||||
table.add_row("Run ID", f"[dim]{run_id[:12]}...[/dim]")
|
||||
table.add_row("Workflow", f"[yellow]{workflow_name}[/yellow]")
|
||||
table.add_row("Status", "[orange]Running[/orange]")
|
||||
table.add_row("Started", datetime.now().strftime("%H:%M:%S"))
|
||||
|
||||
return Panel.fit(
|
||||
table,
|
||||
title="🔄 Workflow Monitoring",
|
||||
border_style="blue"
|
||||
)
|
||||
|
||||
|
||||
def create_fuzzing_progress_display(stats: Dict[str, Any]) -> Panel:
|
||||
"""Create a rich display for fuzzing progress."""
|
||||
# Main stats table
|
||||
stats_table = Table(show_header=False, box=box.SIMPLE)
|
||||
stats_table.add_column("Metric", style="bold")
|
||||
stats_table.add_column("Value", justify="right", style="bold white")
|
||||
|
||||
stats_table.add_row("Executions", f"{stats.get('executions', 0):,}")
|
||||
stats_table.add_row("Exec/sec", f"{stats.get('executions_per_sec', 0):.1f}")
|
||||
stats_table.add_row("Crashes", f"[red]{stats.get('crashes', 0):,}[/red]")
|
||||
stats_table.add_row("Coverage", f"{stats.get('coverage', 0):.1f}%")
|
||||
|
||||
# Progress bars
|
||||
progress_table = Table(show_header=False, box=box.SIMPLE)
|
||||
progress_table.add_column("Metric", style="bold")
|
||||
progress_table.add_column("Progress", min_width=25)
|
||||
|
||||
# Execution rate progress (as percentage of target rate)
|
||||
exec_rate = stats.get('executions_per_sec', 0)
|
||||
target_rate = 1000 # Target 1000 exec/sec
|
||||
exec_progress = min(100, (exec_rate / target_rate) * 100)
|
||||
progress_table.add_row(
|
||||
"Exec Rate",
|
||||
create_progress_bar(exec_progress, color="green")
|
||||
)
|
||||
|
||||
# Coverage progress
|
||||
coverage = stats.get('coverage', 0)
|
||||
progress_table.add_row(
|
||||
"Coverage",
|
||||
create_progress_bar(coverage, color="blue")
|
||||
)
|
||||
|
||||
# Combine tables
|
||||
combined = Table(show_header=False, box=None)
|
||||
combined.add_column("Stats", ratio=1)
|
||||
combined.add_column("Progress", ratio=1)
|
||||
combined.add_row(stats_table, progress_table)
|
||||
|
||||
return Panel(
|
||||
combined,
|
||||
title="🎯 Fuzzing Progress",
|
||||
border_style="green"
|
||||
)
|
||||
|
||||
|
||||
def create_progress_bar(percentage: float, color: str = "green", width: int = 20) -> Text:
|
||||
"""Create a visual progress bar using Rich Text."""
|
||||
filled = int((percentage / 100) * width)
|
||||
bar = "█" * filled + "░" * (width - filled)
|
||||
text = Text(bar, style=color)
|
||||
text.append(f" {percentage:.1f}%", style="dim")
|
||||
return text
|
||||
|
||||
|
||||
def create_loading_animation(text: str) -> Live:
|
||||
"""Create a loading animation with rotating spinner."""
|
||||
frames = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]
|
||||
frame_index = 0
|
||||
|
||||
def get_spinner_frame():
|
||||
nonlocal frame_index
|
||||
frame = frames[frame_index]
|
||||
frame_index = (frame_index + 1) % len(frames)
|
||||
return frame
|
||||
|
||||
panel = Panel(
|
||||
f"{get_spinner_frame()} [bold blue]{text}[/bold blue]",
|
||||
box=box.ROUNDED,
|
||||
border_style="cyan"
|
||||
)
|
||||
|
||||
return Live(panel, auto_refresh=True, refresh_per_second=10)
|
||||
|
||||
|
||||
class WorkflowProgressTracker:
|
||||
"""Advanced progress tracker for workflow execution."""
|
||||
|
||||
def __init__(self, workflow_name: str, run_id: str):
|
||||
self.workflow_name = workflow_name
|
||||
self.run_id = run_id
|
||||
self.start_time = datetime.now()
|
||||
self.phases = []
|
||||
self.current_phase = None
|
||||
|
||||
def add_phase(self, name: str, description: str, estimated_duration: Optional[int] = None):
|
||||
"""Add a phase to the workflow progress."""
|
||||
self.phases.append({
|
||||
"name": name,
|
||||
"description": description,
|
||||
"estimated_duration": estimated_duration,
|
||||
"start_time": None,
|
||||
"end_time": None,
|
||||
"status": "pending"
|
||||
})
|
||||
|
||||
def start_phase(self, phase_name: str):
|
||||
"""Start a specific phase."""
|
||||
for phase in self.phases:
|
||||
if phase["name"] == phase_name:
|
||||
phase["start_time"] = datetime.now()
|
||||
phase["status"] = "running"
|
||||
self.current_phase = phase_name
|
||||
break
|
||||
|
||||
def complete_phase(self, phase_name: str, success: bool = True):
|
||||
"""Complete a specific phase."""
|
||||
for phase in self.phases:
|
||||
if phase["name"] == phase_name:
|
||||
phase["end_time"] = datetime.now()
|
||||
phase["status"] = "completed" if success else "failed"
|
||||
self.current_phase = None
|
||||
break
|
||||
|
||||
def get_progress_display(self) -> Panel:
|
||||
"""Get the current progress display."""
|
||||
# Create progress table
|
||||
table = Table(show_header=True, box=box.ROUNDED)
|
||||
table.add_column("Phase", style="bold")
|
||||
table.add_column("Status", justify="center")
|
||||
table.add_column("Duration")
|
||||
|
||||
for phase in self.phases:
|
||||
status_emoji = {
|
||||
"pending": "⏳",
|
||||
"running": "🔄",
|
||||
"completed": "✅",
|
||||
"failed": "❌"
|
||||
}
|
||||
|
||||
status_text = f"{status_emoji.get(phase['status'], '❓')} {phase['status'].title()}"
|
||||
|
||||
# Calculate duration
|
||||
if phase["start_time"]:
|
||||
end_time = phase["end_time"] or datetime.now()
|
||||
duration = end_time - phase["start_time"]
|
||||
duration_text = f"{duration.seconds}s"
|
||||
else:
|
||||
duration_text = "-"
|
||||
|
||||
table.add_row(
|
||||
phase["description"],
|
||||
status_text,
|
||||
duration_text
|
||||
)
|
||||
|
||||
total_duration = datetime.now() - self.start_time
|
||||
title = f"🔄 {self.workflow_name} Progress (Run: {self.run_id[:8]}..., {total_duration.seconds}s)"
|
||||
|
||||
return Panel(
|
||||
table,
|
||||
title=title,
|
||||
border_style="blue"
|
||||
)
|
||||
|
||||
|
||||
# Global progress manager instance
|
||||
progress_manager = ProgressManager()
|
||||
@@ -0,0 +1,180 @@
|
||||
"""
|
||||
Input validation utilities for FuzzForge CLI.
|
||||
"""
|
||||
# Copyright (c) 2025 FuzzingLabs
|
||||
#
|
||||
# Licensed under the Business Source License 1.1 (BSL). See the LICENSE file
|
||||
# at the root of this repository for details.
|
||||
#
|
||||
# After the Change Date (four years from publication), this version of the
|
||||
# Licensed Work will be made available under the Apache License, Version 2.0.
|
||||
# See the LICENSE-APACHE file or http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Additional attribution and requirements are provided in the NOTICE file.
|
||||
|
||||
|
||||
import re
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
from .constants import SUPPORTED_VOLUME_MODES, SUPPORTED_EXPORT_FORMATS
|
||||
from .exceptions import ValidationError
|
||||
|
||||
|
||||
def validate_run_id(run_id: str) -> None:
|
||||
"""Validate a run/execution ID format"""
|
||||
if not run_id or not isinstance(run_id, str):
|
||||
raise ValidationError("run_id", run_id, "a non-empty string")
|
||||
|
||||
# Check for reasonable length (UUIDs are typically 36 chars)
|
||||
if len(run_id) < 8 or len(run_id) > 128:
|
||||
raise ValidationError("run_id", run_id, "between 8 and 128 characters")
|
||||
|
||||
# Check for valid characters (alphanumeric, hyphens, underscores)
|
||||
if not re.match(r'^[a-zA-Z0-9_-]+$', run_id):
|
||||
raise ValidationError("run_id", run_id, "alphanumeric characters, hyphens, and underscores only")
|
||||
|
||||
|
||||
def validate_workflow_name(workflow: str) -> None:
|
||||
"""Validate workflow name format"""
|
||||
if not workflow or not isinstance(workflow, str):
|
||||
raise ValidationError("workflow_name", workflow, "a non-empty string")
|
||||
|
||||
# Check for reasonable length
|
||||
if len(workflow) < 2 or len(workflow) > 64:
|
||||
raise ValidationError("workflow_name", workflow, "between 2 and 64 characters")
|
||||
|
||||
# Check for valid characters (alphanumeric, hyphens, underscores)
|
||||
if not re.match(r'^[a-zA-Z0-9_-]+$', workflow):
|
||||
raise ValidationError("workflow_name", workflow, "alphanumeric characters, hyphens, and underscores only")
|
||||
|
||||
|
||||
def validate_target_path(target_path: str, must_exist: bool = True) -> Path:
|
||||
"""Validate and normalize a target path"""
|
||||
if not target_path or not isinstance(target_path, str):
|
||||
raise ValidationError("target_path", target_path, "a non-empty string")
|
||||
|
||||
try:
|
||||
path = Path(target_path).resolve()
|
||||
except Exception as e:
|
||||
raise ValidationError("target_path", target_path, f"a valid path: {e}")
|
||||
|
||||
if must_exist and not path.exists():
|
||||
raise ValidationError("target_path", target_path, "an existing path")
|
||||
|
||||
return path
|
||||
|
||||
|
||||
def validate_volume_mode(volume_mode: str) -> None:
|
||||
"""Validate volume mode"""
|
||||
if volume_mode not in SUPPORTED_VOLUME_MODES:
|
||||
raise ValidationError(
|
||||
"volume_mode", volume_mode,
|
||||
f"one of: {', '.join(SUPPORTED_VOLUME_MODES)}"
|
||||
)
|
||||
|
||||
|
||||
def validate_export_format(export_format: str) -> None:
|
||||
"""Validate export format"""
|
||||
if export_format not in SUPPORTED_EXPORT_FORMATS:
|
||||
raise ValidationError(
|
||||
"export_format", export_format,
|
||||
f"one of: {', '.join(SUPPORTED_EXPORT_FORMATS)}"
|
||||
)
|
||||
|
||||
|
||||
def validate_parameter_value(key: str, value: str, param_type: str) -> Any:
|
||||
"""Validate and convert a parameter value based on its type"""
|
||||
if param_type == "integer":
|
||||
try:
|
||||
return int(value)
|
||||
except ValueError:
|
||||
raise ValidationError(f"parameter '{key}'", value, "an integer")
|
||||
|
||||
elif param_type == "number":
|
||||
try:
|
||||
return float(value)
|
||||
except ValueError:
|
||||
raise ValidationError(f"parameter '{key}'", value, "a number")
|
||||
|
||||
elif param_type == "boolean":
|
||||
lower_value = value.lower()
|
||||
if lower_value in ("true", "yes", "1", "on"):
|
||||
return True
|
||||
elif lower_value in ("false", "no", "0", "off"):
|
||||
return False
|
||||
else:
|
||||
raise ValidationError(f"parameter '{key}'", value, "a boolean (true/false, yes/no, 1/0, on/off)")
|
||||
|
||||
elif param_type == "array":
|
||||
# Split by comma and strip whitespace
|
||||
items = [item.strip() for item in value.split(",") if item.strip()]
|
||||
if not items:
|
||||
raise ValidationError(f"parameter '{key}'", value, "a non-empty comma-separated list")
|
||||
return items
|
||||
|
||||
else:
|
||||
# String type - basic validation
|
||||
if not value:
|
||||
raise ValidationError(f"parameter '{key}'", value, "a non-empty string")
|
||||
return value
|
||||
|
||||
|
||||
def validate_parameters(params: List[str]) -> Dict[str, Any]:
|
||||
"""Validate and parse parameter list"""
|
||||
parameters = {}
|
||||
|
||||
for param_str in params:
|
||||
if "=" not in param_str:
|
||||
raise ValidationError("parameter format", param_str, "key=value format")
|
||||
|
||||
key, value = param_str.split("=", 1)
|
||||
key = key.strip()
|
||||
value = value.strip()
|
||||
|
||||
if not key:
|
||||
raise ValidationError("parameter key", param_str, "a non-empty key")
|
||||
|
||||
if not value:
|
||||
raise ValidationError(f"parameter '{key}'", param_str, "a non-empty value")
|
||||
|
||||
# Auto-detect type and convert
|
||||
try:
|
||||
if value.lower() in ("true", "false"):
|
||||
parameters[key] = value.lower() == "true"
|
||||
elif value.isdigit():
|
||||
parameters[key] = int(value)
|
||||
elif re.match(r'^\d+\.\d+$', value):
|
||||
parameters[key] = float(value)
|
||||
else:
|
||||
parameters[key] = value
|
||||
except ValueError:
|
||||
parameters[key] = value
|
||||
|
||||
return parameters
|
||||
|
||||
|
||||
def validate_config_key(key: str) -> None:
|
||||
"""Validate configuration key format"""
|
||||
if not key or not isinstance(key, str):
|
||||
raise ValidationError("config_key", key, "a non-empty string")
|
||||
|
||||
# Check for valid key format (e.g., "api.url", "timeout")
|
||||
if not re.match(r'^[a-zA-Z0-9._-]+$', key):
|
||||
raise ValidationError("config_key", key, "alphanumeric characters, dots, hyphens, and underscores only")
|
||||
|
||||
|
||||
def validate_positive_integer(value: int, name: str) -> None:
|
||||
"""Validate that a value is a positive integer"""
|
||||
if not isinstance(value, int) or value <= 0:
|
||||
raise ValidationError(name, value, "a positive integer")
|
||||
|
||||
|
||||
def validate_timeout(timeout: Optional[int]) -> None:
|
||||
"""Validate timeout value"""
|
||||
if timeout is not None:
|
||||
if not isinstance(timeout, int) or timeout <= 0:
|
||||
raise ValidationError("timeout", timeout, "a positive integer (seconds)")
|
||||
|
||||
if timeout > 86400: # 24 hours
|
||||
raise ValidationError("timeout", timeout, "less than 24 hours (86400 seconds)")
|
||||
Reference in New Issue
Block a user