mirror of
https://github.com/FuzzingLabs/fuzzforge_ai.git
synced 2026-03-25 14:50:21 +01:00
Compare commits
2 Commits
v0.8.0
...
fix/cross-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2921c92732 | ||
|
|
ac6e8a8a34 |
86
.github/workflows/ci.yml
vendored
86
.github/workflows/ci.yml
vendored
@@ -1,86 +0,0 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main, dev, feature/*]
|
||||
pull_request:
|
||||
branches: [main, dev]
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
lint-and-typecheck:
|
||||
name: Lint & Type Check
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v5
|
||||
with:
|
||||
version: "latest"
|
||||
|
||||
- name: Set up Python
|
||||
run: uv python install 3.14
|
||||
|
||||
- name: Install dependencies
|
||||
run: uv sync
|
||||
|
||||
- name: Ruff check (fuzzforge-cli)
|
||||
run: |
|
||||
cd fuzzforge-cli
|
||||
uv run --extra lints ruff check src/
|
||||
|
||||
- name: Ruff check (fuzzforge-mcp)
|
||||
run: |
|
||||
cd fuzzforge-mcp
|
||||
uv run --extra lints ruff check src/
|
||||
|
||||
- name: Ruff check (fuzzforge-common)
|
||||
run: |
|
||||
cd fuzzforge-common
|
||||
uv run --extra lints ruff check src/
|
||||
|
||||
- name: Mypy type check (fuzzforge-cli)
|
||||
run: |
|
||||
cd fuzzforge-cli
|
||||
uv run --extra lints mypy src/
|
||||
|
||||
- name: Mypy type check (fuzzforge-mcp)
|
||||
run: |
|
||||
cd fuzzforge-mcp
|
||||
uv run --extra lints mypy src/
|
||||
|
||||
# NOTE: Mypy check for fuzzforge-common temporarily disabled
|
||||
# due to 37 pre-existing type errors in legacy code.
|
||||
# TODO: Fix type errors and re-enable strict checking
|
||||
#- name: Mypy type check (fuzzforge-common)
|
||||
# run: |
|
||||
# cd fuzzforge-common
|
||||
# uv run --extra lints mypy src/
|
||||
|
||||
test:
|
||||
name: Tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v5
|
||||
with:
|
||||
version: "latest"
|
||||
|
||||
- name: Set up Python
|
||||
run: uv python install 3.14
|
||||
|
||||
- name: Install dependencies
|
||||
run: uv sync --all-extras
|
||||
|
||||
- name: Run MCP tests
|
||||
run: |
|
||||
cd fuzzforge-mcp
|
||||
uv run --extra tests pytest -v
|
||||
|
||||
- name: Run common tests
|
||||
run: |
|
||||
cd fuzzforge-common
|
||||
uv run --extra tests pytest -v
|
||||
49
.github/workflows/mcp-server.yml
vendored
49
.github/workflows/mcp-server.yml
vendored
@@ -1,49 +0,0 @@
|
||||
name: MCP Server Smoke Test
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main, dev]
|
||||
pull_request:
|
||||
branches: [main, dev]
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
mcp-server:
|
||||
name: MCP Server Test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install uv
|
||||
uses: astral-sh/setup-uv@v5
|
||||
with:
|
||||
version: "latest"
|
||||
|
||||
- name: Set up Python
|
||||
run: uv python install 3.14
|
||||
|
||||
- name: Install dependencies
|
||||
run: uv sync --all-extras
|
||||
|
||||
- name: Start MCP server in background
|
||||
run: |
|
||||
cd fuzzforge-mcp
|
||||
nohup uv run python -m fuzzforge_mcp.server > server.log 2>&1 &
|
||||
echo $! > server.pid
|
||||
sleep 3
|
||||
|
||||
- name: Run MCP tool tests
|
||||
run: |
|
||||
cd fuzzforge-mcp
|
||||
uv run --extra tests pytest tests/test_resources.py -v
|
||||
|
||||
- name: Stop MCP server
|
||||
if: always()
|
||||
run: |
|
||||
if [ -f fuzzforge-mcp/server.pid ]; then
|
||||
kill $(cat fuzzforge-mcp/server.pid) || true
|
||||
fi
|
||||
|
||||
- name: Show server logs
|
||||
if: failure()
|
||||
run: cat fuzzforge-mcp/server.log || true
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -10,6 +10,3 @@ __pycache__
|
||||
|
||||
# Podman/Docker container storage artifacts
|
||||
~/.fuzzforge/
|
||||
|
||||
# User-specific hub config (generated at runtime)
|
||||
hub-config.json
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Contributing to FuzzForge AI
|
||||
# Contributing to FuzzForge OSS
|
||||
|
||||
Thank you for your interest in contributing to FuzzForge AI! We welcome contributions from the community and are excited to collaborate with you.
|
||||
Thank you for your interest in contributing to FuzzForge OSS! We welcome contributions from the community and are excited to collaborate with you.
|
||||
|
||||
**Our Vision**: FuzzForge aims to be a **universal platform for security research** across all cybersecurity domains. Through our modular architecture, any security tool—from fuzzing engines to cloud scanners, from mobile app analyzers to IoT security tools—can be integrated as a containerized module and controlled via AI agents.
|
||||
|
||||
@@ -360,8 +360,8 @@ Beyond modules, you can contribute to FuzzForge's core components.
|
||||
|
||||
1. **Clone and Install**
|
||||
```bash
|
||||
git clone https://github.com/FuzzingLabs/fuzzforge_ai.git
|
||||
cd fuzzforge_ai
|
||||
git clone https://github.com/FuzzingLabs/fuzzforge-oss.git
|
||||
cd fuzzforge-oss
|
||||
uv sync --all-extras
|
||||
```
|
||||
|
||||
@@ -538,7 +538,7 @@ Before submitting a new module:
|
||||
|
||||
## License
|
||||
|
||||
By contributing to FuzzForge AI, you agree that your contributions will be licensed under the same license as the project (see [LICENSE](LICENSE)).
|
||||
By contributing to FuzzForge OSS, you agree that your contributions will be licensed under the same license as the project (see [LICENSE](LICENSE)).
|
||||
|
||||
For module contributions:
|
||||
- Modules you create remain under the project license
|
||||
|
||||
39
Makefile
39
Makefile
@@ -1,10 +1,10 @@
|
||||
.PHONY: help install sync format lint typecheck test build-hub-images clean
|
||||
.PHONY: help install sync format lint typecheck test build-modules clean
|
||||
|
||||
SHELL := /bin/bash
|
||||
|
||||
# Default target
|
||||
help:
|
||||
@echo "FuzzForge AI Development Commands"
|
||||
@echo "FuzzForge OSS Development Commands"
|
||||
@echo ""
|
||||
@echo " make install - Install all dependencies"
|
||||
@echo " make sync - Sync shared packages from upstream"
|
||||
@@ -12,8 +12,8 @@ help:
|
||||
@echo " make lint - Lint code with ruff"
|
||||
@echo " make typecheck - Type check with mypy"
|
||||
@echo " make test - Run all tests"
|
||||
@echo " make build-hub-images - Build all mcp-security-hub images"
|
||||
@echo " make clean - Clean build artifacts"
|
||||
@echo " make build-modules - Build all module container images"
|
||||
@echo " make clean - Clean build artifacts"
|
||||
@echo ""
|
||||
|
||||
# Install all dependencies
|
||||
@@ -64,9 +64,34 @@ test:
|
||||
fi \
|
||||
done
|
||||
|
||||
# Build all mcp-security-hub images for the firmware analysis pipeline
|
||||
build-hub-images:
|
||||
@bash scripts/build-hub-images.sh
|
||||
# Build all module container images
|
||||
# Uses Docker by default, or Podman if FUZZFORGE_ENGINE=podman
|
||||
build-modules:
|
||||
@echo "Building FuzzForge module images..."
|
||||
@if [ "$$FUZZFORGE_ENGINE" = "podman" ]; then \
|
||||
if [ -n "$$SNAP" ]; then \
|
||||
echo "Using Podman with isolated storage (Snap detected)"; \
|
||||
CONTAINER_CMD="podman --root ~/.fuzzforge/containers/storage --runroot ~/.fuzzforge/containers/run"; \
|
||||
else \
|
||||
echo "Using Podman"; \
|
||||
CONTAINER_CMD="podman"; \
|
||||
fi; \
|
||||
else \
|
||||
echo "Using Docker"; \
|
||||
CONTAINER_CMD="docker"; \
|
||||
fi; \
|
||||
for module in fuzzforge-modules/*/; do \
|
||||
if [ -f "$$module/Dockerfile" ] && \
|
||||
[ "$$module" != "fuzzforge-modules/fuzzforge-modules-sdk/" ] && \
|
||||
[ "$$module" != "fuzzforge-modules/fuzzforge-module-template/" ]; then \
|
||||
name=$$(basename $$module); \
|
||||
version=$$(grep 'version' "$$module/pyproject.toml" 2>/dev/null | head -1 | sed 's/.*"\(.*\\)".*/\\1/' || echo "0.1.0"); \
|
||||
echo "Building $$name:$$version..."; \
|
||||
$$CONTAINER_CMD build -t "fuzzforge-$$name:$$version" "$$module" || exit 1; \
|
||||
fi \
|
||||
done
|
||||
@echo ""
|
||||
@echo "✓ All modules built successfully!"
|
||||
|
||||
# Clean build artifacts
|
||||
clean:
|
||||
|
||||
245
README.md
245
README.md
@@ -1,9 +1,9 @@
|
||||
<h1 align="center"> FuzzForge AI</h1>
|
||||
<h1 align="center"> FuzzForge OSS</h1>
|
||||
<h3 align="center">AI-Powered Security Research Orchestration via MCP</h3>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://discord.gg/8XEX33UUwZ"><img src="https://img.shields.io/discord/1420767905255133267?logo=discord&label=Discord" alt="Discord"></a>
|
||||
<a href="LICENSE"><img src="https://img.shields.io/badge/license-BSL%201.1-blue" alt="License: BSL 1.1"></a>
|
||||
<a href="LICENSE"><img src="https://img.shields.io/badge/license-Apache%202.0-blue" alt="License: Apache 2.0"></a>
|
||||
<a href="https://www.python.org/downloads/"><img src="https://img.shields.io/badge/python-3.12%2B-blue" alt="Python 3.12+"/></a>
|
||||
<a href="https://modelcontextprotocol.io"><img src="https://img.shields.io/badge/MCP-compatible-green" alt="MCP Compatible"/></a>
|
||||
<a href="https://fuzzforge.ai"><img src="https://img.shields.io/badge/Website-fuzzforge.ai-purple" alt="Website"/></a>
|
||||
@@ -17,59 +17,54 @@
|
||||
<sub>
|
||||
<a href="#-overview"><b>Overview</b></a> •
|
||||
<a href="#-features"><b>Features</b></a> •
|
||||
<a href="#-mcp-security-hub"><b>Security Hub</b></a> •
|
||||
<a href="#-installation"><b>Installation</b></a> •
|
||||
<a href="USAGE.md"><b>Usage Guide</b></a> •
|
||||
<a href="#-modules"><b>Modules</b></a> •
|
||||
<a href="#-contributing"><b>Contributing</b></a>
|
||||
</sub>
|
||||
</p>
|
||||
|
||||
---
|
||||
|
||||
> 🚧 **FuzzForge AI is under active development.** Expect breaking changes and new features!
|
||||
> 🚧 **FuzzForge OSS is under active development.** Expect breaking changes and new features!
|
||||
|
||||
---
|
||||
|
||||
## 🚀 Overview
|
||||
|
||||
**FuzzForge AI** is an open-source MCP server that enables AI agents (GitHub Copilot, Claude, etc.) to orchestrate security research workflows through the **Model Context Protocol (MCP)**.
|
||||
**FuzzForge OSS** is an open-source runtime that enables AI agents (GitHub Copilot, Claude, etc.) to orchestrate security research workflows through the **Model Context Protocol (MCP)**.
|
||||
|
||||
FuzzForge connects your AI assistant to **MCP tool hubs** — collections of containerized security tools that the agent can discover, chain, and execute autonomously. Instead of manually running security tools, describe what you want and let your AI assistant handle it.
|
||||
### The Core: Modules
|
||||
|
||||
### The Core: Hub Architecture
|
||||
At the heart of FuzzForge are **modules** - containerized security tools that AI agents can discover, configure, and orchestrate. Each module encapsulates a specific security capability (static analysis, fuzzing, crash analysis, etc.) and runs in an isolated container.
|
||||
|
||||
FuzzForge acts as a **meta-MCP server** — a single MCP endpoint that gives your AI agent access to tools from multiple MCP hub servers. Each hub server is a containerized security tool (Binwalk, YARA, Radare2, Nmap, etc.) that the agent can discover at runtime.
|
||||
- **🔌 Plug & Play**: Modules are self-contained - just pull and run
|
||||
- **🤖 AI-Native**: Designed for AI agent orchestration via MCP
|
||||
- **🔗 Composable**: Chain modules together into automated workflows
|
||||
- **📦 Extensible**: Build custom modules with the Python SDK
|
||||
|
||||
- **🔍 Discovery**: The agent lists available hub servers and discovers their tools
|
||||
- **🤖 AI-Native**: Hub tools provide agent context — usage tips, workflow guidance, and domain knowledge
|
||||
- **🔗 Composable**: Chain tools from different hubs into automated pipelines
|
||||
- **📦 Extensible**: Add your own MCP servers to the hub registry
|
||||
The OSS runtime handles module discovery, execution, and result collection. Security modules (developed separately) provide the actual security tooling - from static analyzers to fuzzers to crash triagers.
|
||||
|
||||
### 🎬 Use Case: Firmware Vulnerability Research
|
||||
|
||||
> **Scenario**: Analyze a firmware image to find security vulnerabilities — fully automated by an AI agent.
|
||||
|
||||
```
|
||||
User: "Search for vulnerabilities in firmware.bin"
|
||||
|
||||
Agent → Binwalk: Extract filesystem from firmware image
|
||||
Agent → YARA: Scan extracted files for vulnerability patterns
|
||||
Agent → Radare2: Trace dangerous function calls in prioritized binaries
|
||||
Agent → Report: 8 vulnerabilities found (2 critical, 4 high, 2 medium)
|
||||
```
|
||||
Instead of manually running security tools, describe what you want and let your AI assistant handle it.
|
||||
|
||||
### 🎬 Use Case: Rust Fuzzing Pipeline
|
||||
|
||||
> **Scenario**: Fuzz a Rust crate to discover vulnerabilities using AI-assisted harness generation and parallel fuzzing.
|
||||
|
||||
```
|
||||
User: "Fuzz the blurhash crate for vulnerabilities"
|
||||
|
||||
Agent → Rust Analyzer: Identify fuzzable functions and attack surface
|
||||
Agent → Harness Gen: Generate and validate fuzzing harnesses
|
||||
Agent → Cargo Fuzzer: Run parallel coverage-guided fuzzing sessions
|
||||
Agent → Crash Analysis: Deduplicate and triage discovered crashes
|
||||
```
|
||||
<table align="center">
|
||||
<tr>
|
||||
<th>1️⃣ Analyze, Generate & Validate Harnesses</th>
|
||||
<th>2️⃣ Run Parallel Continuous Fuzzing</th>
|
||||
</tr>
|
||||
<tr>
|
||||
<td><img src="assets/demopart2.gif" alt="FuzzForge Demo - Analysis Pipeline" width="100%"></td>
|
||||
<td><img src="assets/demopart1.gif" alt="FuzzForge Demo - Parallel Fuzzing" width="100%"></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td align="center"><sub>AI agent analyzes code, generates harnesses, and validates they compile</sub></td>
|
||||
<td align="center"><sub>Multiple fuzzing sessions run in parallel with live metrics</sub></td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
---
|
||||
|
||||
@@ -87,13 +82,13 @@ If you find FuzzForge useful, please **star the repo** to support development!
|
||||
|
||||
| Feature | Description |
|
||||
|---------|-------------|
|
||||
| 🤖 **AI-Native** | Built for MCP — works with GitHub Copilot, Claude, and any MCP-compatible agent |
|
||||
| 🔌 **Hub System** | Connect to MCP tool hubs — each hub brings dozens of containerized security tools |
|
||||
| 🔍 **Tool Discovery** | Agents discover available tools at runtime with built-in usage guidance |
|
||||
| 🔗 **Pipelines** | Chain tools from different hubs into automated multi-step workflows |
|
||||
| 🔄 **Persistent Sessions** | Long-running tools (Radare2, fuzzers) with stateful container sessions |
|
||||
| 🏠 **Local First** | All execution happens on your machine — no cloud required |
|
||||
| 🔒 **Sandboxed** | Every tool runs in an isolated container via Docker or Podman |
|
||||
| 🤖 **AI-Native** | Built for MCP - works with GitHub Copilot, Claude, and any MCP-compatible agent |
|
||||
| 📦 **Containerized** | Each module runs in isolation via Docker or Podman |
|
||||
| 🔄 **Continuous Mode** | Long-running tasks (fuzzing) with real-time metrics streaming |
|
||||
| 🔗 **Workflows** | Chain multiple modules together in automated pipelines |
|
||||
| 🛠️ **Extensible** | Create custom modules with the Python SDK |
|
||||
| 🏠 **Local First** | All execution happens on your machine - no cloud required |
|
||||
| 🔒 **Secure** | Sandboxed containers with no network access by default |
|
||||
|
||||
---
|
||||
|
||||
@@ -107,57 +102,27 @@ If you find FuzzForge useful, please **star the repo** to support development!
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ FuzzForge MCP Server │
|
||||
│ │
|
||||
│ Projects Hub Discovery Hub Execution │
|
||||
│ ┌──────────────┐ ┌──────────────────┐ ┌───────────────────┐ │
|
||||
│ │init_project │ │list_hub_servers │ │execute_hub_tool │ │
|
||||
│ │set_assets │ │discover_hub_tools│ │start_hub_server │ │
|
||||
│ │list_results │ │get_tool_schema │ │stop_hub_server │ │
|
||||
│ └──────────────┘ └──────────────────┘ └───────────────────┘ │
|
||||
│ ┌─────────────┐ ┌──────────────┐ ┌────────────────────────┐ │
|
||||
│ │list_modules │ │execute_module│ │start_continuous_module │ │
|
||||
│ └─────────────┘ └──────────────┘ └────────────────────────┘ │
|
||||
└───────────────────────────┬─────────────────────────────────────┘
|
||||
│ Docker/Podman
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────────┐
|
||||
│ MCP Hub Servers │
|
||||
│ │
|
||||
│ ┌───────────┐ ┌───────────┐ ┌───────────┐ ┌───────────┐ │
|
||||
│ │ Binwalk │ │ YARA │ │ Radare2 │ │ Nmap │ │
|
||||
│ │ 6 tools │ │ 5 tools │ │ 32 tools │ │ 8 tools │ │
|
||||
│ └───────────┘ └───────────┘ └───────────┘ └───────────┘ │
|
||||
│ ┌───────────┐ ┌───────────┐ ┌───────────┐ ┌───────────┐ │
|
||||
│ │ Nuclei │ │ SQLMap │ │ Trivy │ │ ... │ │
|
||||
│ │ 7 tools │ │ 8 tools │ │ 7 tools │ │ 36 hubs │ │
|
||||
│ └───────────┘ └───────────┘ └───────────┘ └───────────┘ │
|
||||
└─────────────────────────────────────────────────────────────────┘
|
||||
│ FuzzForge Runner │
|
||||
│ Container Engine (Docker/Podman) │
|
||||
└───────────────────────────┬─────────────────────────────────────┘
|
||||
│
|
||||
┌───────────────────┼───────────────────┐
|
||||
▼ ▼ ▼
|
||||
┌───────────────┐ ┌───────────────┐ ┌───────────────┐
|
||||
│ Module A │ │ Module B │ │ Module C │
|
||||
│ (Container) │ │ (Container) │ │ (Container) │
|
||||
└───────────────┘ └───────────────┘ └───────────────┘
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 🔧 MCP Security Hub
|
||||
|
||||
FuzzForge ships with built-in support for the **[MCP Security Hub](https://github.com/FuzzingLabs/mcp-security-hub)** — a collection of 36 production-ready, Dockerized MCP servers covering offensive security:
|
||||
|
||||
| Category | Servers | Examples |
|
||||
|----------|---------|----------|
|
||||
| 🔍 **Reconnaissance** | 8 | Nmap, Masscan, Shodan, WhatWeb |
|
||||
| 🌐 **Web Security** | 6 | Nuclei, SQLMap, ffuf, Nikto |
|
||||
| 🔬 **Binary Analysis** | 6 | Radare2, Binwalk, YARA, Capa, Ghidra |
|
||||
| ⛓️ **Blockchain** | 3 | Medusa, Solazy, DAML Viewer |
|
||||
| ☁️ **Cloud Security** | 3 | Trivy, Prowler, RoadRecon |
|
||||
| 💻 **Code Security** | 1 | Semgrep |
|
||||
| 🔑 **Secrets Detection** | 1 | Gitleaks |
|
||||
| 💥 **Exploitation** | 1 | SearchSploit |
|
||||
| 🎯 **Fuzzing** | 2 | Boofuzz, Dharma |
|
||||
| 🕵️ **OSINT** | 2 | Maigret, DNSTwist |
|
||||
| 🛡️ **Threat Intel** | 2 | VirusTotal, AlienVault OTX |
|
||||
| 🏰 **Active Directory** | 1 | BloodHound |
|
||||
|
||||
> 185+ individual tools accessible through a single MCP connection.
|
||||
|
||||
The hub is open source and can be extended with your own MCP servers. See the [mcp-security-hub repository](https://github.com/FuzzingLabs/mcp-security-hub) for details.
|
||||
|
||||
---
|
||||
|
||||
## 📦 Installation
|
||||
|
||||
### Prerequisites
|
||||
@@ -175,20 +140,11 @@ cd fuzzforge_ai
|
||||
|
||||
# Install dependencies
|
||||
uv sync
|
||||
|
||||
# Build module images
|
||||
make build-modules
|
||||
```
|
||||
|
||||
### Link the Security Hub
|
||||
|
||||
```bash
|
||||
# Clone the MCP Security Hub
|
||||
git clone https://github.com/FuzzingLabs/mcp-security-hub.git ~/.fuzzforge/hubs/mcp-security-hub
|
||||
|
||||
# Build the Docker images for the hub tools
|
||||
./scripts/build-hub-images.sh
|
||||
```
|
||||
|
||||
Or use the terminal UI (`uv run fuzzforge ui`) to link hubs interactively.
|
||||
|
||||
### Configure MCP for Your AI Agent
|
||||
|
||||
```bash
|
||||
@@ -209,20 +165,81 @@ uv run fuzzforge mcp status
|
||||
|
||||
---
|
||||
|
||||
## 🧑💻 Usage
|
||||
## 📦 Modules
|
||||
|
||||
Once installed, just talk to your AI agent:
|
||||
FuzzForge modules are containerized security tools that AI agents can orchestrate. The module ecosystem is designed around a simple principle: **the OSS runtime orchestrates, enterprise modules execute**.
|
||||
|
||||
```
|
||||
"What security tools are available?"
|
||||
"Scan this firmware image for vulnerabilities"
|
||||
"Analyze this binary with radare2"
|
||||
"Run nuclei against https://example.com"
|
||||
### Module Ecosystem
|
||||
|
||||
| | FuzzForge OSS | FuzzForge Enterprise Modules |
|
||||
|---|---|---|
|
||||
| **What** | Runtime & MCP server | Security research modules |
|
||||
| **License** | Apache 2.0 | BSL 1.1 (Business Source License) |
|
||||
| **Compatibility** | ✅ Runs any compatible module | ✅ Works with OSS runtime |
|
||||
|
||||
**Enterprise modules** are developed separately and provide production-ready security tooling:
|
||||
|
||||
| Category | Modules | Description |
|
||||
|----------|---------|-------------|
|
||||
| 🔍 **Static Analysis** | Rust Analyzer, Solidity Analyzer, Cairo Analyzer | Code analysis and fuzzable function detection |
|
||||
| 🎯 **Fuzzing** | Cargo Fuzzer, Honggfuzz, AFL++ | Coverage-guided fuzz testing |
|
||||
| 💥 **Crash Analysis** | Crash Triager, Root Cause Analyzer | Automated crash deduplication and analysis |
|
||||
| 🔐 **Vulnerability Detection** | Pattern Matcher, Taint Analyzer | Security vulnerability scanning |
|
||||
| 📝 **Reporting** | Report Generator, SARIF Exporter | Automated security report generation |
|
||||
|
||||
> 💡 **Build your own modules!** The FuzzForge SDK allows you to create custom modules that integrate seamlessly with the OSS runtime. See [Creating Custom Modules](#-creating-custom-modules).
|
||||
|
||||
### Execution Modes
|
||||
|
||||
Modules run in two execution modes:
|
||||
|
||||
#### One-shot Execution
|
||||
|
||||
Run a module once and get results:
|
||||
|
||||
```python
|
||||
result = execute_module("my-analyzer", assets_path="/path/to/project")
|
||||
```
|
||||
|
||||
The agent will use FuzzForge to discover the right hub tools, chain them into a pipeline, and return results — all without you touching a terminal.
|
||||
#### Continuous Execution
|
||||
|
||||
See the [Usage Guide](USAGE.md) for detailed setup and advanced workflows.
|
||||
For long-running tasks like fuzzing, with real-time metrics:
|
||||
|
||||
```python
|
||||
# Start continuous execution
|
||||
session = start_continuous_module("my-fuzzer",
|
||||
assets_path="/path/to/project",
|
||||
configuration={"target": "my_target"})
|
||||
|
||||
# Check status with live metrics
|
||||
status = get_continuous_status(session["session_id"])
|
||||
|
||||
# Stop and collect results
|
||||
stop_continuous_module(session["session_id"])
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 🛠️ Creating Custom Modules
|
||||
|
||||
Build your own security modules with the FuzzForge SDK:
|
||||
|
||||
```python
|
||||
from fuzzforge_modules_sdk import FuzzForgeModule, FuzzForgeModuleResults
|
||||
|
||||
class MySecurityModule(FuzzForgeModule):
|
||||
def _run(self, resources):
|
||||
self.emit_event("started", target=resources[0].path)
|
||||
|
||||
# Your analysis logic here
|
||||
results = self.analyze(resources)
|
||||
|
||||
self.emit_progress(100, status="completed",
|
||||
message=f"Analysis complete")
|
||||
return FuzzForgeModuleResults.SUCCESS
|
||||
```
|
||||
|
||||
📖 See the [Module SDK Guide](fuzzforge-modules/fuzzforge-modules-sdk/README.md) for details.
|
||||
|
||||
---
|
||||
|
||||
@@ -230,13 +247,14 @@ See the [Usage Guide](USAGE.md) for detailed setup and advanced workflows.
|
||||
|
||||
```
|
||||
fuzzforge_ai/
|
||||
├── fuzzforge-mcp/ # MCP server — the core of FuzzForge
|
||||
├── fuzzforge-cli/ # Command-line interface & terminal UI
|
||||
├── fuzzforge-cli/ # Command-line interface
|
||||
├── fuzzforge-common/ # Shared abstractions (containers, storage)
|
||||
├── fuzzforge-runner/ # Container execution engine (Docker/Podman)
|
||||
├── fuzzforge-tests/ # Integration tests
|
||||
├── mcp-security-hub/ # Default hub: 36 offensive security MCP servers
|
||||
└── scripts/ # Hub image build scripts
|
||||
├── fuzzforge-mcp/ # MCP server for AI agents
|
||||
├── fuzzforge-modules/ # Security modules
|
||||
│ └── fuzzforge-modules-sdk/ # Module development SDK
|
||||
├── fuzzforge-runner/ # Local execution engine
|
||||
├── fuzzforge-types/ # Type definitions & schemas
|
||||
└── demo/ # Demo projects for testing
|
||||
```
|
||||
|
||||
---
|
||||
@@ -248,7 +266,7 @@ We welcome contributions from the community!
|
||||
- 🐛 Report bugs via [GitHub Issues](../../issues)
|
||||
- 💡 Suggest features or improvements
|
||||
- 🔧 Submit pull requests
|
||||
- 🔌 Add new MCP servers to the [Security Hub](https://github.com/FuzzingLabs/mcp-security-hub)
|
||||
- 📦 Share your custom modules
|
||||
|
||||
See [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines.
|
||||
|
||||
@@ -256,11 +274,10 @@ See [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines.
|
||||
|
||||
## 📄 License
|
||||
|
||||
BSL 1.1 - See [LICENSE](LICENSE) for details.
|
||||
Apache 2.0 - See [LICENSE](LICENSE) for details.
|
||||
|
||||
---
|
||||
|
||||
<p align="center">
|
||||
<strong>Maintained by <a href="https://fuzzinglabs.com">FuzzingLabs</a></strong>
|
||||
<br>
|
||||
<strong>Built with ❤️ by <a href="https://fuzzinglabs.com">FuzzingLabs</a></strong>
|
||||
</p>
|
||||
125
ROADMAP.md
125
ROADMAP.md
@@ -1,125 +0,0 @@
|
||||
# FuzzForge AI Roadmap
|
||||
|
||||
This document outlines the planned features and development direction for FuzzForge AI.
|
||||
|
||||
---
|
||||
|
||||
## 🎯 Upcoming Features
|
||||
|
||||
### 1. MCP Security Hub Integration
|
||||
|
||||
**Status:** 🔄 Planned
|
||||
|
||||
Integrate [mcp-security-hub](https://github.com/FuzzingLabs/mcp-security-hub) tools into FuzzForge, giving AI agents access to 28 MCP servers and 163+ security tools through a unified interface.
|
||||
|
||||
#### How It Works
|
||||
|
||||
Unlike native FuzzForge modules (built with the SDK), mcp-security-hub tools are **standalone MCP servers**. The integration will bridge these tools so they can be:
|
||||
|
||||
- Discovered via `list_modules` alongside native modules
|
||||
- Executed through FuzzForge's orchestration layer
|
||||
- Chained with native modules in workflows
|
||||
|
||||
| Aspect | Native Modules | MCP Hub Tools |
|
||||
|--------|----------------|---------------|
|
||||
| **Runtime** | FuzzForge SDK container | Standalone MCP server container |
|
||||
| **Protocol** | Direct execution | MCP-to-MCP bridge |
|
||||
| **Configuration** | Module config | Tool-specific args |
|
||||
| **Output** | FuzzForge results format | Tool-native format (normalized) |
|
||||
|
||||
#### Goals
|
||||
|
||||
- Unified discovery of all available tools (native + hub)
|
||||
- Orchestrate hub tools through FuzzForge's workflow engine
|
||||
- Normalize outputs for consistent result handling
|
||||
- No modification required to mcp-security-hub tools
|
||||
|
||||
#### Planned Tool Categories
|
||||
|
||||
| Category | Tools | Example Use Cases |
|
||||
|----------|-------|-------------------|
|
||||
| **Reconnaissance** | nmap, masscan, whatweb, shodan | Network scanning, service discovery |
|
||||
| **Web Security** | nuclei, sqlmap, ffuf, nikto | Vulnerability scanning, fuzzing |
|
||||
| **Binary Analysis** | radare2, binwalk, yara, capa, ghidra | Reverse engineering, malware analysis |
|
||||
| **Cloud Security** | trivy, prowler | Container scanning, cloud auditing |
|
||||
| **Secrets Detection** | gitleaks | Credential scanning |
|
||||
| **OSINT** | maigret, dnstwist | Username tracking, typosquatting |
|
||||
| **Threat Intel** | virustotal, otx | Malware analysis, IOC lookup |
|
||||
|
||||
#### Example Workflow
|
||||
|
||||
```
|
||||
You: "Scan example.com for vulnerabilities and analyze any suspicious binaries"
|
||||
|
||||
AI Agent:
|
||||
1. Uses nmap module for port discovery
|
||||
2. Uses nuclei module for vulnerability scanning
|
||||
3. Uses binwalk module to extract firmware
|
||||
4. Uses yara module for malware detection
|
||||
5. Generates consolidated report
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 2. User Interface
|
||||
|
||||
**Status:** 🔄 Planned
|
||||
|
||||
A graphical interface to manage FuzzForge without the command line.
|
||||
|
||||
#### Goals
|
||||
|
||||
- Provide an alternative to CLI for users who prefer visual tools
|
||||
- Make configuration and monitoring more accessible
|
||||
- Complement (not replace) the CLI experience
|
||||
|
||||
#### Planned Capabilities
|
||||
|
||||
| Capability | Description |
|
||||
|------------|-------------|
|
||||
| **Configuration** | Change MCP server settings, engine options, paths |
|
||||
| **Module Management** | Browse, configure, and launch modules |
|
||||
| **Execution Monitoring** | View running tasks, logs, progress, metrics |
|
||||
| **Project Overview** | Manage projects and browse execution results |
|
||||
| **Workflow Management** | Create and run multi-module workflows |
|
||||
|
||||
---
|
||||
|
||||
## 📋 Backlog
|
||||
|
||||
Features under consideration for future releases:
|
||||
|
||||
| Feature | Description |
|
||||
|---------|-------------|
|
||||
| **Module Marketplace** | Browse and install community modules |
|
||||
| **Scheduled Executions** | Run modules on a schedule (cron-style) |
|
||||
| **Team Collaboration** | Share projects, results, and workflows |
|
||||
| **Reporting Engine** | Generate PDF/HTML security reports |
|
||||
| **Notifications** | Slack, Discord, email alerts for findings |
|
||||
|
||||
---
|
||||
|
||||
## ✅ Completed
|
||||
|
||||
| Feature | Version | Date |
|
||||
|---------|---------|------|
|
||||
| Docker as default engine | 0.1.0 | Jan 2026 |
|
||||
| MCP server for AI agents | 0.1.0 | Jan 2026 |
|
||||
| CLI for project management | 0.1.0 | Jan 2026 |
|
||||
| Continuous execution mode | 0.1.0 | Jan 2026 |
|
||||
| Workflow orchestration | 0.1.0 | Jan 2026 |
|
||||
|
||||
---
|
||||
|
||||
## 💬 Feedback
|
||||
|
||||
Have suggestions for the roadmap?
|
||||
|
||||
- Open an issue on [GitHub](https://github.com/FuzzingLabs/fuzzforge_ai/issues)
|
||||
- Join our [Discord](https://discord.gg/8XEX33UUwZ)
|
||||
|
||||
---
|
||||
|
||||
<p align="center">
|
||||
<strong>Built with ❤️ by <a href="https://fuzzinglabs.com">FuzzingLabs</a></strong>
|
||||
</p>
|
||||
537
USAGE.md
537
USAGE.md
@@ -1,9 +1,8 @@
|
||||
# FuzzForge AI Usage Guide
|
||||
# FuzzForge OSS Usage Guide
|
||||
|
||||
This guide covers everything you need to know to get started with FuzzForge AI — from installation to linking your first MCP hub and running security research workflows with AI.
|
||||
This guide covers everything you need to know to get started with FuzzForge OSS - from installation to running your first security research workflow with AI.
|
||||
|
||||
> **FuzzForge is designed to be used with AI agents** (GitHub Copilot, Claude, etc.) via MCP.
|
||||
> A terminal UI (`fuzzforge ui`) is provided for managing agents and hubs.
|
||||
> The CLI is available for advanced users but the primary experience is through natural language interaction with your AI assistant.
|
||||
|
||||
---
|
||||
@@ -13,17 +12,8 @@ This guide covers everything you need to know to get started with FuzzForge AI
|
||||
- [Quick Start](#quick-start)
|
||||
- [Prerequisites](#prerequisites)
|
||||
- [Installation](#installation)
|
||||
- [Terminal UI](#terminal-ui)
|
||||
- [Launching the UI](#launching-the-ui)
|
||||
- [Dashboard](#dashboard)
|
||||
- [Agent Setup](#agent-setup)
|
||||
- [Hub Manager](#hub-manager)
|
||||
- [MCP Hub System](#mcp-hub-system)
|
||||
- [What is an MCP Hub?](#what-is-an-mcp-hub)
|
||||
- [FuzzingLabs Security Hub](#fuzzinglabs-security-hub)
|
||||
- [Linking a Custom Hub](#linking-a-custom-hub)
|
||||
- [Building Hub Images](#building-hub-images)
|
||||
- [MCP Server Configuration (CLI)](#mcp-server-configuration-cli)
|
||||
- [Building Modules](#building-modules)
|
||||
- [MCP Server Configuration](#mcp-server-configuration)
|
||||
- [GitHub Copilot](#github-copilot)
|
||||
- [Claude Code (CLI)](#claude-code-cli)
|
||||
- [Claude Desktop](#claude-desktop)
|
||||
@@ -37,43 +27,37 @@ This guide covers everything you need to know to get started with FuzzForge AI
|
||||
## Quick Start
|
||||
|
||||
> **Prerequisites:** You need [uv](https://docs.astral.sh/uv/) and [Docker](https://docs.docker.com/get-docker/) installed.
|
||||
> See the [Prerequisites](#prerequisites) section for details.
|
||||
> See the [Prerequisites](#prerequisites) section for installation instructions.
|
||||
|
||||
```bash
|
||||
# 1. Clone and install
|
||||
git clone https://github.com/FuzzingLabs/fuzzforge_ai.git
|
||||
cd fuzzforge_ai
|
||||
uv sync
|
||||
git clone https://github.com/FuzzingLabs/fuzzforge-oss.git
|
||||
cd fuzzforge-oss
|
||||
uv sync --all-extras
|
||||
|
||||
# 2. Launch the terminal UI
|
||||
uv run fuzzforge ui
|
||||
# 2. Build the SDK and module images (one-time setup)
|
||||
# First, build the SDK base image and wheel
|
||||
cd fuzzforge-modules/fuzzforge-modules-sdk
|
||||
uv build
|
||||
mkdir -p .wheels
|
||||
cp ../../dist/fuzzforge_modules_sdk-*.whl .wheels/
|
||||
cd ../..
|
||||
docker build -t localhost/fuzzforge-modules-sdk:0.1.0 fuzzforge-modules/fuzzforge-modules-sdk/
|
||||
|
||||
# 3. Press 'h' → "FuzzingLabs Hub" to clone & link the default security hub
|
||||
# 4. Select an agent row and press Enter to install the MCP server for your agent
|
||||
# 5. Build the Docker images for the hub tools (required before tools can run)
|
||||
./scripts/build-hub-images.sh
|
||||
# Then build all modules
|
||||
make build-modules
|
||||
|
||||
# 6. Restart your AI agent and start talking:
|
||||
# "What security tools are available?"
|
||||
# "Scan this binary with binwalk and yara"
|
||||
# "Analyze this Rust crate for fuzzable functions"
|
||||
```
|
||||
|
||||
Or do it entirely from the command line:
|
||||
|
||||
```bash
|
||||
# Install MCP for your AI agent
|
||||
uv run fuzzforge mcp install copilot # For VS Code + GitHub Copilot
|
||||
# 3. Install MCP for your AI agent
|
||||
uv run fuzzforge mcp install copilot # For VS Code + GitHub Copilot
|
||||
# OR
|
||||
uv run fuzzforge mcp install claude-code # For Claude Code CLI
|
||||
uv run fuzzforge mcp install claude-code # For Claude Code CLI
|
||||
|
||||
# Clone and link the default security hub
|
||||
git clone git@github.com:FuzzingLabs/mcp-security-hub.git ~/.fuzzforge/hubs/mcp-security-hub
|
||||
# 4. Restart your AI agent (VS Code, Claude, etc.)
|
||||
|
||||
# Build hub tool images (required — tools only run once their image is built)
|
||||
./scripts/build-hub-images.sh
|
||||
|
||||
# Restart your AI agent — done!
|
||||
# 5. Start talking to your AI:
|
||||
# "List available FuzzForge modules"
|
||||
# "Analyze this Rust crate for fuzzable functions"
|
||||
# "Start fuzzing the parse_input function"
|
||||
```
|
||||
|
||||
> **Note:** FuzzForge uses Docker by default. Podman is also supported via `--engine podman`.
|
||||
@@ -82,12 +66,11 @@ git clone git@github.com:FuzzingLabs/mcp-security-hub.git ~/.fuzzforge/hubs/mcp-
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Before installing FuzzForge AI, ensure you have:
|
||||
Before installing FuzzForge OSS, ensure you have:
|
||||
|
||||
- **Python 3.12+** — [Download Python](https://www.python.org/downloads/)
|
||||
- **uv** package manager — [Install uv](https://docs.astral.sh/uv/)
|
||||
- **Docker** — Container runtime ([Install Docker](https://docs.docker.com/get-docker/))
|
||||
- **Git** — For cloning hub repositories
|
||||
- **Python 3.12+** - [Download Python](https://www.python.org/downloads/)
|
||||
- **uv** package manager - [Install uv](https://docs.astral.sh/uv/)
|
||||
- **Docker** - Container runtime ([Install Docker](https://docs.docker.com/get-docker/))
|
||||
|
||||
### Installing uv
|
||||
|
||||
@@ -121,17 +104,22 @@ sudo usermod -aG docker $USER
|
||||
### 1. Clone the Repository
|
||||
|
||||
```bash
|
||||
git clone https://github.com/FuzzingLabs/fuzzforge_ai.git
|
||||
cd fuzzforge_ai
|
||||
git clone https://github.com/FuzzingLabs/fuzzforge-oss.git
|
||||
cd fuzzforge-oss
|
||||
```
|
||||
|
||||
### 2. Install Dependencies
|
||||
|
||||
```bash
|
||||
uv sync
|
||||
# Install all workspace dependencies including the CLI
|
||||
uv sync --all-extras
|
||||
```
|
||||
|
||||
This installs all FuzzForge components in a virtual environment.
|
||||
This installs all FuzzForge components in a virtual environment, including:
|
||||
- `fuzzforge-cli` - Command-line interface
|
||||
- `fuzzforge-mcp` - MCP server
|
||||
- `fuzzforge-runner` - Module execution engine
|
||||
- All supporting libraries
|
||||
|
||||
### 3. Verify Installation
|
||||
|
||||
@@ -141,164 +129,110 @@ uv run fuzzforge --help
|
||||
|
||||
---
|
||||
|
||||
## Terminal UI
|
||||
## Building Modules
|
||||
|
||||
FuzzForge ships with a terminal user interface (TUI) built on [Textual](https://textual.textualize.io/) for managing AI agents and MCP hub servers from a single dashboard.
|
||||
FuzzForge modules are containerized security tools. After cloning, you need to build them once.
|
||||
|
||||
### Launching the UI
|
||||
> **Important:** The modules depend on a base SDK image that must be built first.
|
||||
|
||||
### Build the SDK Base Image (Required First)
|
||||
|
||||
```bash
|
||||
uv run fuzzforge ui
|
||||
# 1. Build the SDK Python package wheel
|
||||
cd fuzzforge-modules/fuzzforge-modules-sdk
|
||||
uv build
|
||||
|
||||
# 2. Copy wheel to the .wheels directory
|
||||
mkdir -p .wheels
|
||||
cp ../../dist/fuzzforge_modules_sdk-*.whl .wheels/
|
||||
|
||||
# 3. Build the SDK Docker image
|
||||
cd ../..
|
||||
docker build -t localhost/fuzzforge-modules-sdk:0.1.0 fuzzforge-modules/fuzzforge-modules-sdk/
|
||||
```
|
||||
|
||||
### Dashboard
|
||||
### Build All Modules
|
||||
|
||||
The main screen is split into two panels:
|
||||
Once the SDK is built, build all modules:
|
||||
|
||||
| Panel | Content |
|
||||
|-------|---------|
|
||||
| **AI Agents** (left) | Shows GitHub Copilot, Claude Desktop, and Claude Code with live link status and config file path |
|
||||
| **Hub Servers** (right) | Shows all configured MCP hub tools with Docker image name, source hub, and build status (✓ Ready / ✗ Not built) |
|
||||
```bash
|
||||
# From the fuzzforge-oss directory
|
||||
make build-modules
|
||||
```
|
||||
|
||||
### Keyboard Shortcuts
|
||||
This builds all available modules:
|
||||
- `fuzzforge-rust-analyzer` - Analyzes Rust code for fuzzable functions
|
||||
- `fuzzforge-cargo-fuzzer` - Runs cargo-fuzz on Rust crates
|
||||
- `fuzzforge-harness-validator` - Validates generated fuzzing harnesses
|
||||
- `fuzzforge-crash-analyzer` - Analyzes crash inputs
|
||||
|
||||
| Key | Action |
|
||||
|-----|--------|
|
||||
| `Enter` | **Select** — Act on the selected row (setup/unlink an agent) |
|
||||
| `h` | **Hub Manager** — Open the hub management screen |
|
||||
| `r` | **Refresh** — Re-check all agent and hub statuses |
|
||||
| `q` | **Quit** |
|
||||
> **Note:** The first build will take several minutes as it downloads Rust toolchains and dependencies.
|
||||
|
||||
### Agent Setup
|
||||
### Build a Single Module
|
||||
|
||||
Select an agent row in the AI Agents table and press `Enter`:
|
||||
```bash
|
||||
# Build a specific module (after SDK is built)
|
||||
cd fuzzforge-modules/rust-analyzer
|
||||
docker build -t fuzzforge-rust-analyzer:0.1.0 .
|
||||
```
|
||||
|
||||
- **If the agent is not linked** → a setup dialog opens asking for your container engine (Docker or Podman), then installs the FuzzForge MCP configuration
|
||||
- **If the agent is already linked** → a confirmation dialog offers to unlink it (removes the `fuzzforge` entry without touching other MCP servers)
|
||||
### Verify Modules are Built
|
||||
|
||||
The setup auto-detects:
|
||||
- FuzzForge installation root
|
||||
- Docker/Podman socket path
|
||||
- Hub configuration from `hub-config.json`
|
||||
```bash
|
||||
# List built module images
|
||||
docker images | grep fuzzforge
|
||||
```
|
||||
|
||||
### Hub Manager
|
||||
You should see at least 5 images:
|
||||
```
|
||||
localhost/fuzzforge-modules-sdk 0.1.0 abc123def456 5 minutes ago 465 MB
|
||||
fuzzforge-rust-analyzer 0.1.0 def789ghi012 2 minutes ago 2.0 GB
|
||||
fuzzforge-cargo-fuzzer 0.1.0 ghi012jkl345 2 minutes ago 1.9 GB
|
||||
fuzzforge-harness-validator 0.1.0 jkl345mno678 2 minutes ago 1.9 GB
|
||||
fuzzforge-crash-analyzer 0.1.0 mno678pqr901 2 minutes ago 517 MB
|
||||
```
|
||||
|
||||
Press `h` to open the hub manager. This is where you manage your MCP hub repositories:
|
||||
### Verify CLI Installation
|
||||
|
||||
| Button | Action |
|
||||
|--------|--------|
|
||||
| **FuzzingLabs Hub** | One-click clone of the official [mcp-security-hub](https://github.com/FuzzingLabs/mcp-security-hub) repository — clones to `~/.fuzzforge/hubs/mcp-security-hub`, scans for tools, and registers them in `hub-config.json` |
|
||||
| **Link Path** | Link any local directory as a hub — enter a name and path, FuzzForge scans it for `category/tool-name/Dockerfile` patterns |
|
||||
| **Clone URL** | Clone any git repository and link it as a hub |
|
||||
| **Remove** | Unlink the selected hub and remove its servers from the configuration |
|
||||
```bash
|
||||
# Test the CLI
|
||||
uv run fuzzforge --help
|
||||
|
||||
The hub table shows:
|
||||
- **Name** — Hub name (★ prefix for the default hub)
|
||||
- **Path** — Local directory path
|
||||
- **Servers** — Number of MCP tools discovered
|
||||
- **Source** — Git URL or "local"
|
||||
# List modules (with environment variable for modules path)
|
||||
FUZZFORGE_MODULES_PATH=/path/to/fuzzforge-modules uv run fuzzforge modules list
|
||||
```
|
||||
|
||||
You should see 4 available modules listed.
|
||||
|
||||
---
|
||||
|
||||
## MCP Hub System
|
||||
## MCP Server Configuration
|
||||
|
||||
### What is an MCP Hub?
|
||||
|
||||
An MCP hub is a directory containing one or more containerized MCP tools, organized by category:
|
||||
|
||||
```
|
||||
my-hub/
|
||||
├── category-a/
|
||||
│ ├── tool-1/
|
||||
│ │ └── Dockerfile
|
||||
│ └── tool-2/
|
||||
│ └── Dockerfile
|
||||
├── category-b/
|
||||
│ └── tool-3/
|
||||
│ └── Dockerfile
|
||||
└── ...
|
||||
```
|
||||
|
||||
FuzzForge scans for the pattern `category/tool-name/Dockerfile` and auto-generates server configuration entries for each discovered tool.
|
||||
|
||||
### FuzzingLabs Security Hub
|
||||
|
||||
The default MCP hub is [mcp-security-hub](https://github.com/FuzzingLabs/mcp-security-hub), maintained by FuzzingLabs. It includes **40+ security tools** across categories:
|
||||
|
||||
| Category | Tools |
|
||||
|----------|-------|
|
||||
| **Reconnaissance** | nmap, masscan, shodan, zoomeye, whatweb, pd-tools, externalattacker, networksdb |
|
||||
| **Binary Analysis** | binwalk, yara, capa, radare2, ghidra, ida |
|
||||
| **Code Security** | semgrep, rust-analyzer, harness-tester, cargo-fuzzer, crash-analyzer |
|
||||
| **Web Security** | nuclei, nikto, sqlmap, ffuf, burp, waybackurls |
|
||||
| **Fuzzing** | boofuzz, dharma |
|
||||
| **Exploitation** | searchsploit |
|
||||
| **Secrets** | gitleaks |
|
||||
| **Cloud Security** | trivy, prowler, roadrecon |
|
||||
| **OSINT** | maigret, dnstwist |
|
||||
| **Threat Intel** | virustotal, otx |
|
||||
| **Password Cracking** | hashcat |
|
||||
| **Blockchain** | medusa, solazy, daml-viewer |
|
||||
|
||||
**Clone it via the UI:**
|
||||
|
||||
1. `uv run fuzzforge ui`
|
||||
2. Press `h` → click **FuzzingLabs Hub**
|
||||
3. Wait for the clone to finish — servers are auto-registered
|
||||
|
||||
**Or clone manually:**
|
||||
|
||||
```bash
|
||||
git clone git@github.com:FuzzingLabs/mcp-security-hub.git ~/.fuzzforge/hubs/mcp-security-hub
|
||||
```
|
||||
|
||||
### Linking a Custom Hub
|
||||
|
||||
You can link any directory that follows the `category/tool-name/Dockerfile` layout:
|
||||
|
||||
**Via the UI:**
|
||||
|
||||
1. Press `h` → **Link Path**
|
||||
2. Enter a name and the directory path
|
||||
|
||||
**Via the CLI (planned):** Not yet available — use the UI.
|
||||
|
||||
### Building Hub Images
|
||||
|
||||
After linking a hub, you need to build the Docker images before the tools can be used:
|
||||
|
||||
```bash
|
||||
# Build all images from the default security hub
|
||||
./scripts/build-hub-images.sh
|
||||
|
||||
# Or build a single tool image
|
||||
docker build -t semgrep-mcp:latest mcp-security-hub/code-security/semgrep-mcp/
|
||||
```
|
||||
|
||||
The dashboard hub table shows ✓ Ready for built images and ✗ Not built for missing ones.
|
||||
|
||||
---
|
||||
|
||||
## MCP Server Configuration (CLI)
|
||||
|
||||
If you prefer the command line over the TUI, you can configure agents directly:
|
||||
FuzzForge integrates with AI agents through the Model Context Protocol (MCP). Configure your preferred AI agent to use FuzzForge tools.
|
||||
|
||||
### GitHub Copilot
|
||||
|
||||
```bash
|
||||
# That's it! Just run this command:
|
||||
uv run fuzzforge mcp install copilot
|
||||
```
|
||||
|
||||
The command auto-detects:
|
||||
- **FuzzForge root** — Where FuzzForge is installed
|
||||
- **Docker socket** — Auto-detects `/var/run/docker.sock`
|
||||
The command auto-detects everything:
|
||||
- **FuzzForge root** - Where FuzzForge is installed
|
||||
- **Modules path** - Defaults to `fuzzforge-oss/fuzzforge-modules`
|
||||
- **Docker socket** - Auto-detects `/var/run/docker.sock`
|
||||
|
||||
**Optional overrides:**
|
||||
**Optional overrides** (usually not needed):
|
||||
```bash
|
||||
uv run fuzzforge mcp install copilot --engine podman
|
||||
uv run fuzzforge mcp install copilot \
|
||||
--modules /path/to/modules \
|
||||
--engine podman # if using Podman instead of Docker
|
||||
```
|
||||
|
||||
**After installation:** Restart VS Code. FuzzForge tools appear in GitHub Copilot Chat.
|
||||
**After installation:**
|
||||
1. Restart VS Code
|
||||
2. Open GitHub Copilot Chat
|
||||
3. FuzzForge tools are now available!
|
||||
|
||||
### Claude Code (CLI)
|
||||
|
||||
@@ -306,89 +240,158 @@ uv run fuzzforge mcp install copilot --engine podman
|
||||
uv run fuzzforge mcp install claude-code
|
||||
```
|
||||
|
||||
Installs to `~/.claude.json`. FuzzForge tools are available from any directory after restarting Claude.
|
||||
Installs to `~/.claude.json` so FuzzForge tools are available from any directory.
|
||||
|
||||
**After installation:**
|
||||
1. Run `claude` from any directory
|
||||
2. FuzzForge tools are now available!
|
||||
|
||||
### Claude Desktop
|
||||
|
||||
```bash
|
||||
# Automatic installation
|
||||
uv run fuzzforge mcp install claude-desktop
|
||||
|
||||
# Verify
|
||||
uv run fuzzforge mcp status
|
||||
```
|
||||
|
||||
**After installation:** Restart Claude Desktop.
|
||||
**After installation:**
|
||||
1. Restart Claude Desktop
|
||||
2. FuzzForge tools are now available!
|
||||
|
||||
### Check Status
|
||||
### Check MCP Status
|
||||
|
||||
```bash
|
||||
uv run fuzzforge mcp status
|
||||
```
|
||||
|
||||
### Remove Configuration
|
||||
Shows configuration status for all supported AI agents:
|
||||
|
||||
```
|
||||
┏━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━┓
|
||||
┃ Agent ┃ Config Path ┃ Status ┃ FuzzForge Configured ┃
|
||||
┡━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━┩
|
||||
│ GitHub Copilot │ ~/.config/Code/User/mcp.json │ ✓ Exists │ ✓ Yes │
|
||||
│ Claude Desktop │ ~/.config/Claude/claude_desktop_config... │ Not found │ - │
|
||||
│ Claude Code │ ~/.claude.json │ ✓ Exists │ ✓ Yes │
|
||||
└──────────────────────┴───────────────────────────────────────────┴──────────────┴─────────────────────────┘
|
||||
```
|
||||
|
||||
### Generate Config Without Installing
|
||||
|
||||
```bash
|
||||
# Preview the configuration that would be installed
|
||||
uv run fuzzforge mcp generate copilot
|
||||
uv run fuzzforge mcp generate claude-desktop
|
||||
uv run fuzzforge mcp generate claude-code
|
||||
```
|
||||
|
||||
### Remove MCP Configuration
|
||||
|
||||
```bash
|
||||
uv run fuzzforge mcp uninstall copilot
|
||||
uv run fuzzforge mcp uninstall claude-code
|
||||
uv run fuzzforge mcp uninstall claude-desktop
|
||||
uv run fuzzforge mcp uninstall claude-code
|
||||
```
|
||||
|
||||
### Test MCP Server
|
||||
|
||||
After installation, verify the MCP server is working:
|
||||
|
||||
```bash
|
||||
# Check if MCP server process is running (in VS Code)
|
||||
ps aux | grep fuzzforge_mcp
|
||||
```
|
||||
|
||||
You can also test the MCP integration directly in your AI agent:
|
||||
- **GitHub Copilot**: Ask "List available FuzzForge modules"
|
||||
- **Claude**: Ask "What FuzzForge modules are available?"
|
||||
|
||||
The AI should respond with a list of 4 modules (rust-analyzer, cargo-fuzzer, harness-validator, crash-analyzer).
|
||||
|
||||
---
|
||||
|
||||
## Using FuzzForge with AI
|
||||
|
||||
Once MCP is configured and hub images are built, interact with FuzzForge through natural language with your AI assistant.
|
||||
Once MCP is configured, you interact with FuzzForge through natural language with your AI assistant.
|
||||
|
||||
### Example Conversations
|
||||
|
||||
**Discover available tools:**
|
||||
```
|
||||
You: "What security tools are available in FuzzForge?"
|
||||
AI: Queries hub tools → "I found 15 tools across categories: nmap for
|
||||
port scanning, binwalk for firmware analysis, semgrep for code
|
||||
scanning, cargo-fuzzer for Rust fuzzing..."
|
||||
You: "What FuzzForge modules are available?"
|
||||
AI: Uses list_modules → "I found 4 modules: rust-analyzer, cargo-fuzzer,
|
||||
harness-validator, and crash-analyzer..."
|
||||
```
|
||||
|
||||
**Analyze a binary:**
|
||||
```
|
||||
You: "Extract and analyze this firmware image"
|
||||
AI: Uses binwalk to extract → yara for pattern matching → capa for
|
||||
capability detection → "Found 3 embedded filesystems, 2 YARA
|
||||
matches for known vulnerabilities..."
|
||||
```
|
||||
|
||||
**Fuzz Rust code:**
|
||||
**Analyze code for fuzzing targets:**
|
||||
```
|
||||
You: "Analyze this Rust crate for functions I should fuzz"
|
||||
AI: Uses rust-analyzer → "Found 3 fuzzable entry points..."
|
||||
AI: Uses execute_module("rust-analyzer") → "I found 3 good fuzzing candidates:
|
||||
- parse_input() in src/parser.rs - handles untrusted input
|
||||
- decode_message() in src/codec.rs - complex parsing logic
|
||||
..."
|
||||
```
|
||||
|
||||
**Generate and validate harnesses:**
|
||||
```
|
||||
You: "Generate a fuzzing harness for the parse_input function"
|
||||
AI: Creates harness code, then uses execute_module("harness-validator")
|
||||
→ "Here's a harness that compiles successfully..."
|
||||
```
|
||||
|
||||
**Run continuous fuzzing:**
|
||||
```
|
||||
You: "Start fuzzing parse_input for 10 minutes"
|
||||
AI: Uses cargo-fuzzer → "Fuzzing session started. 2 crashes found..."
|
||||
AI: Uses start_continuous_module("cargo-fuzzer") → "Started fuzzing session abc123"
|
||||
|
||||
You: "How's the fuzzing going?"
|
||||
AI: Uses get_continuous_status("abc123") → "Running for 5 minutes:
|
||||
- 150,000 executions
|
||||
- 2 crashes found
|
||||
- 45% edge coverage"
|
||||
|
||||
You: "Stop and show me the crashes"
|
||||
AI: Uses stop_continuous_module("abc123") → "Found 2 unique crashes..."
|
||||
```
|
||||
|
||||
**Scan for vulnerabilities:**
|
||||
```
|
||||
You: "Scan this codebase with semgrep for security issues"
|
||||
AI: Uses semgrep-mcp → "Found 5 findings: 2 high severity SQL injection
|
||||
patterns, 3 medium severity hardcoded secrets..."
|
||||
```
|
||||
### Available MCP Tools
|
||||
|
||||
| Tool | Description |
|
||||
|------|-------------|
|
||||
| `list_modules` | List all available security modules |
|
||||
| `execute_module` | Run a module once and get results |
|
||||
| `start_continuous_module` | Start a long-running module (e.g., fuzzing) |
|
||||
| `get_continuous_status` | Check status of a continuous session |
|
||||
| `stop_continuous_module` | Stop a continuous session |
|
||||
| `list_continuous_sessions` | List all active sessions |
|
||||
| `get_execution_results` | Retrieve results from an execution |
|
||||
| `execute_workflow` | Run a multi-step workflow |
|
||||
|
||||
---
|
||||
|
||||
## CLI Reference
|
||||
|
||||
### UI Command
|
||||
|
||||
```bash
|
||||
uv run fuzzforge ui # Launch the terminal dashboard
|
||||
```
|
||||
> **Note:** The CLI is for advanced users. Most users should interact with FuzzForge through their AI assistant.
|
||||
|
||||
### MCP Commands
|
||||
|
||||
```bash
|
||||
uv run fuzzforge mcp status # Check agent configuration status
|
||||
uv run fuzzforge mcp install <agent> # Install MCP config (copilot|claude-code|claude-desktop)
|
||||
uv run fuzzforge mcp status # Check configuration status
|
||||
uv run fuzzforge mcp install <agent> # Install MCP config
|
||||
uv run fuzzforge mcp uninstall <agent> # Remove MCP config
|
||||
uv run fuzzforge mcp generate <agent> # Preview config without installing
|
||||
```
|
||||
|
||||
### Module Commands
|
||||
|
||||
```bash
|
||||
uv run fuzzforge modules list # List available modules
|
||||
uv run fuzzforge modules info <module> # Show module details
|
||||
uv run fuzzforge modules run <module> --assets . # Run a module
|
||||
```
|
||||
|
||||
### Project Commands
|
||||
|
||||
```bash
|
||||
@@ -405,20 +408,14 @@ uv run fuzzforge project results <id> # Get execution results
|
||||
Configure FuzzForge using environment variables:
|
||||
|
||||
```bash
|
||||
# Override the FuzzForge installation root (auto-detected from cwd by default)
|
||||
export FUZZFORGE_ROOT=/path/to/fuzzforge_ai
|
||||
|
||||
# Override the user-global data directory (default: ~/.fuzzforge)
|
||||
# Useful for isolated testing without touching your real installation
|
||||
export FUZZFORGE_USER_DIR=/tmp/my-fuzzforge-test
|
||||
|
||||
# Storage path for projects and execution results (default: <workspace>/.fuzzforge/storage)
|
||||
export FUZZFORGE_STORAGE__PATH=/path/to/storage
|
||||
# Project paths
|
||||
export FUZZFORGE_MODULES_PATH=/path/to/modules
|
||||
export FUZZFORGE_STORAGE_PATH=/path/to/storage
|
||||
|
||||
# Container engine (Docker is default)
|
||||
export FUZZFORGE_ENGINE__TYPE=docker # or podman
|
||||
|
||||
# Podman-specific container storage paths
|
||||
# Podman-specific settings (only needed if using Podman under Snap)
|
||||
export FUZZFORGE_ENGINE__GRAPHROOT=~/.fuzzforge/containers/storage
|
||||
export FUZZFORGE_ENGINE__RUNROOT=~/.fuzzforge/containers/run
|
||||
```
|
||||
@@ -452,62 +449,112 @@ Error: Permission denied connecting to Docker socket
|
||||
|
||||
**Solution:**
|
||||
```bash
|
||||
# Add your user to the docker group
|
||||
sudo usermod -aG docker $USER
|
||||
# Log out and back in, then verify:
|
||||
|
||||
# Log out and back in for changes to take effect
|
||||
# Then verify:
|
||||
docker run --rm hello-world
|
||||
```
|
||||
|
||||
### Hub Images Not Built
|
||||
### Module Build Fails: "fuzzforge-modules-sdk not found"
|
||||
|
||||
The dashboard shows ✗ Not built for tools:
|
||||
|
||||
```bash
|
||||
# Build all hub images
|
||||
./scripts/build-hub-images.sh
|
||||
|
||||
# Or build a single tool
|
||||
docker build -t <tool-name>:latest mcp-security-hub/<category>/<tool-name>/
|
||||
```
|
||||
ERROR: failed to solve: localhost/fuzzforge-modules-sdk:0.1.0: not found
|
||||
```
|
||||
|
||||
**Solution:** You need to build the SDK base image first:
|
||||
```bash
|
||||
# 1. Build SDK wheel
|
||||
cd fuzzforge-modules/fuzzforge-modules-sdk
|
||||
uv build
|
||||
mkdir -p .wheels
|
||||
cp ../../dist/fuzzforge_modules_sdk-*.whl .wheels/
|
||||
|
||||
# 2. Build SDK Docker image
|
||||
cd ../..
|
||||
docker build -t localhost/fuzzforge-modules-sdk:0.1.0 fuzzforge-modules/fuzzforge-modules-sdk/
|
||||
|
||||
# 3. Now build modules
|
||||
make build-modules
|
||||
```
|
||||
|
||||
### fuzzforge Command Not Found
|
||||
|
||||
```
|
||||
error: Failed to spawn: `fuzzforge`
|
||||
```
|
||||
|
||||
**Solution:** Install with `--all-extras` to include the CLI:
|
||||
```bash
|
||||
uv sync --all-extras
|
||||
```
|
||||
|
||||
### No Modules Found
|
||||
|
||||
```
|
||||
No modules found.
|
||||
```
|
||||
|
||||
**Solution:**
|
||||
1. Build the SDK first (see above)
|
||||
2. Build the modules: `make build-modules`
|
||||
3. Check the modules path with environment variable:
|
||||
```bash
|
||||
FUZZFORGE_MODULES_PATH=/path/to/fuzzforge-modules uv run fuzzforge modules list
|
||||
```
|
||||
4. Verify images exist: `docker images | grep fuzzforge`
|
||||
|
||||
### MCP Server Not Starting
|
||||
|
||||
Check the MCP configuration:
|
||||
```bash
|
||||
# Check agent configuration
|
||||
uv run fuzzforge mcp status
|
||||
```
|
||||
|
||||
# Verify the config file path exists and contains valid JSON
|
||||
cat ~/.config/Code/User/mcp.json # Copilot
|
||||
cat ~/.claude.json # Claude Code
|
||||
Verify the configuration file path exists and contains valid JSON.
|
||||
|
||||
If the server process isn't running:
|
||||
```bash
|
||||
# Check if MCP server is running
|
||||
ps aux | grep fuzzforge_mcp
|
||||
|
||||
# Test the MCP server manually
|
||||
uv run python -m fuzzforge_mcp
|
||||
```
|
||||
|
||||
### Module Container Fails to Build
|
||||
|
||||
```bash
|
||||
# Build module container manually to see errors
|
||||
cd fuzzforge-modules/<module-name>
|
||||
docker build -t <module-name> .
|
||||
```
|
||||
|
||||
### Using Podman Instead of Docker
|
||||
|
||||
If you prefer Podman:
|
||||
```bash
|
||||
# Install with Podman engine
|
||||
# Use --engine podman with CLI
|
||||
uv run fuzzforge mcp install copilot --engine podman
|
||||
|
||||
# Or set environment variable
|
||||
export FUZZFORGE_ENGINE=podman
|
||||
```
|
||||
|
||||
### Hub Registry
|
||||
|
||||
FuzzForge stores linked hub information in `~/.fuzzforge/hubs.json`. If something goes wrong:
|
||||
### Check Logs
|
||||
|
||||
FuzzForge stores execution logs in the storage directory:
|
||||
```bash
|
||||
# View registry
|
||||
cat ~/.fuzzforge/hubs.json
|
||||
|
||||
# Reset registry
|
||||
rm ~/.fuzzforge/hubs.json
|
||||
ls -la ~/.fuzzforge/storage/<project-id>/<execution-id>/
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Next Steps
|
||||
|
||||
- 🖥️ Launch `uv run fuzzforge ui` and explore the dashboard
|
||||
- 🔒 Clone the [mcp-security-hub](https://github.com/FuzzingLabs/mcp-security-hub) for 40+ security tools
|
||||
- 📖 Read the [Module SDK Guide](fuzzforge-modules/fuzzforge-modules-sdk/README.md) to create custom modules
|
||||
- 🎬 Check the demos in the [README](README.md)
|
||||
- 💬 Join our [Discord](https://discord.gg/8XEX33UUwZ) for support
|
||||
|
||||
---
|
||||
|
||||
BIN
assets/demopart1.gif
Normal file
BIN
assets/demopart1.gif
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 360 KiB |
BIN
assets/demopart2.gif
Normal file
BIN
assets/demopart2.gif
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 2.1 MiB |
@@ -1,14 +1,14 @@
|
||||
[project]
|
||||
name = "fuzzforge-cli"
|
||||
version = "0.0.1"
|
||||
description = "FuzzForge CLI - Command-line interface for FuzzForge AI."
|
||||
description = "FuzzForge CLI - Command-line interface for FuzzForge OSS."
|
||||
authors = []
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.14"
|
||||
dependencies = [
|
||||
"fuzzforge-mcp==0.0.1",
|
||||
"fuzzforge-runner==0.0.1",
|
||||
"fuzzforge-types==0.0.1",
|
||||
"rich>=14.0.0",
|
||||
"textual>=1.0.0",
|
||||
"typer==0.20.1",
|
||||
]
|
||||
|
||||
@@ -26,4 +26,5 @@ tests = [
|
||||
fuzzforge = "fuzzforge_cli.__main__:main"
|
||||
|
||||
[tool.uv.sources]
|
||||
fuzzforge-mcp = { workspace = true }
|
||||
fuzzforge-runner = { workspace = true }
|
||||
fuzzforge-types = { workspace = true }
|
||||
|
||||
@@ -13,49 +13,3 @@ ignore = [
|
||||
"PLR2004", # allowing comparisons using unamed numerical constants in tests
|
||||
"S101", # allowing 'assert' statements in tests
|
||||
]
|
||||
"src/fuzzforge_cli/tui/**" = [
|
||||
"ARG002", # unused method argument: callback signature
|
||||
"BLE001", # blind exception: broad error handling in UI
|
||||
"C901", # complexity: UI logic
|
||||
"D107", # missing docstring in __init__: simple dataclasses
|
||||
"FBT001", # boolean positional arg
|
||||
"FBT002", # boolean default arg
|
||||
"PLC0415", # import outside top-level: lazy loading
|
||||
"PLR0911", # too many return statements
|
||||
"PLR0912", # too many branches
|
||||
"PLR2004", # magic value comparison
|
||||
"RUF012", # mutable class default: Textual pattern
|
||||
"S603", # subprocess: validated inputs
|
||||
"S607", # subprocess: PATH lookup
|
||||
"SIM108", # ternary: readability preference
|
||||
"TC001", # TYPE_CHECKING: runtime type needs
|
||||
"TC002", # TYPE_CHECKING: runtime type needs
|
||||
"TC003", # TYPE_CHECKING: runtime type needs
|
||||
"TRY300", # try-else: existing pattern
|
||||
]
|
||||
"tui/*.py" = [
|
||||
"D107", # missing docstring in __init__: simple dataclasses
|
||||
"TC001", # TYPE_CHECKING: runtime type needs
|
||||
"TC002", # TYPE_CHECKING: runtime type needs
|
||||
"TC003", # TYPE_CHECKING: runtime type needs
|
||||
]
|
||||
"src/fuzzforge_cli/commands/mcp.py" = [
|
||||
"ARG001", # unused argument: callback signature
|
||||
"B904", # raise from: existing pattern
|
||||
"F841", # unused variable: legacy code
|
||||
"FBT002", # boolean default arg
|
||||
"PLR0912", # too many branches
|
||||
"PLR0915", # too many statements
|
||||
"SIM108", # ternary: readability preference
|
||||
]
|
||||
"src/fuzzforge_cli/application.py" = [
|
||||
"B008", # function call in default: Path.cwd()
|
||||
"PLC0415", # import outside top-level: lazy loading
|
||||
]
|
||||
"src/fuzzforge_cli/commands/projects.py" = [
|
||||
"TC003", # TYPE_CHECKING: runtime type needs
|
||||
]
|
||||
"src/fuzzforge_cli/context.py" = [
|
||||
"TC002", # TYPE_CHECKING: runtime type needs
|
||||
"TC003", # TYPE_CHECKING: runtime type needs
|
||||
]
|
||||
|
||||
@@ -3,16 +3,16 @@
|
||||
from pathlib import Path
|
||||
from typing import Annotated
|
||||
|
||||
from fuzzforge_mcp.storage import LocalStorage # type: ignore[import-untyped]
|
||||
from fuzzforge_runner import Runner, Settings
|
||||
from typer import Context as TyperContext
|
||||
from typer import Option, Typer
|
||||
|
||||
from fuzzforge_cli.commands import mcp, projects
|
||||
from fuzzforge_cli.commands import mcp, modules, projects
|
||||
from fuzzforge_cli.context import Context
|
||||
|
||||
application: Typer = Typer(
|
||||
name="fuzzforge",
|
||||
help="FuzzForge AI - Security research orchestration platform.",
|
||||
help="FuzzForge OSS - Security research orchestration platform.",
|
||||
)
|
||||
|
||||
|
||||
@@ -27,6 +27,15 @@ def main(
|
||||
help="Path to the FuzzForge project directory.",
|
||||
),
|
||||
] = Path.cwd(),
|
||||
modules_path: Annotated[
|
||||
Path,
|
||||
Option(
|
||||
"--modules",
|
||||
"-m",
|
||||
envvar="FUZZFORGE_MODULES_PATH",
|
||||
help="Path to the modules directory.",
|
||||
),
|
||||
] = Path.home() / ".fuzzforge" / "modules",
|
||||
storage_path: Annotated[
|
||||
Path,
|
||||
Option(
|
||||
@@ -34,37 +43,54 @@ def main(
|
||||
envvar="FUZZFORGE_STORAGE__PATH",
|
||||
help="Path to the storage directory.",
|
||||
),
|
||||
] = Path.cwd() / ".fuzzforge" / "storage",
|
||||
] = Path.home() / ".fuzzforge" / "storage",
|
||||
engine_type: Annotated[
|
||||
str,
|
||||
Option(
|
||||
"--engine",
|
||||
envvar="FUZZFORGE_ENGINE__TYPE",
|
||||
help="Container engine type (docker or podman).",
|
||||
),
|
||||
] = "docker",
|
||||
engine_socket: Annotated[
|
||||
str,
|
||||
Option(
|
||||
"--socket",
|
||||
envvar="FUZZFORGE_ENGINE__SOCKET",
|
||||
help="Container engine socket path.",
|
||||
),
|
||||
] = "",
|
||||
context: TyperContext = None, # type: ignore[assignment]
|
||||
) -> None:
|
||||
"""FuzzForge AI - Security research orchestration platform.
|
||||
"""FuzzForge OSS - Security research orchestration platform.
|
||||
|
||||
Discover and execute MCP hub tools for security research.
|
||||
Execute security research modules in isolated containers.
|
||||
|
||||
"""
|
||||
storage = LocalStorage(base_path=storage_path)
|
||||
from fuzzforge_runner.settings import EngineSettings, ProjectSettings, StorageSettings
|
||||
|
||||
settings = Settings(
|
||||
engine=EngineSettings(
|
||||
type=engine_type, # type: ignore[arg-type]
|
||||
socket=engine_socket,
|
||||
),
|
||||
storage=StorageSettings(
|
||||
path=storage_path,
|
||||
),
|
||||
project=ProjectSettings(
|
||||
default_path=project_path,
|
||||
modules_path=modules_path,
|
||||
),
|
||||
)
|
||||
|
||||
runner = Runner(settings)
|
||||
|
||||
context.obj = Context(
|
||||
storage=storage,
|
||||
runner=runner,
|
||||
project_path=project_path,
|
||||
)
|
||||
|
||||
|
||||
application.add_typer(mcp.application)
|
||||
application.add_typer(modules.application)
|
||||
application.add_typer(projects.application)
|
||||
|
||||
|
||||
@application.command(
|
||||
name="ui",
|
||||
help="Launch the FuzzForge terminal interface.",
|
||||
)
|
||||
def launch_ui() -> None:
|
||||
"""Launch the interactive FuzzForge TUI dashboard.
|
||||
|
||||
Provides a visual dashboard showing AI agent connection status
|
||||
and hub server availability, with wizards for setup and configuration.
|
||||
|
||||
"""
|
||||
from fuzzforge_cli.tui.app import FuzzForgeApp
|
||||
|
||||
FuzzForgeApp().run()
|
||||
|
||||
@@ -12,7 +12,7 @@ import os
|
||||
import sys
|
||||
from enum import StrEnum
|
||||
from pathlib import Path
|
||||
from typing import Annotated, Any
|
||||
from typing import Annotated
|
||||
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
@@ -44,10 +44,10 @@ def _get_copilot_mcp_path() -> Path:
|
||||
"""
|
||||
if sys.platform == "darwin":
|
||||
return Path.home() / "Library" / "Application Support" / "Code" / "User" / "mcp.json"
|
||||
if sys.platform == "win32":
|
||||
elif sys.platform == "win32":
|
||||
return Path(os.environ.get("APPDATA", "")) / "Code" / "User" / "mcp.json"
|
||||
# Linux
|
||||
return Path.home() / ".config" / "Code" / "User" / "mcp.json"
|
||||
else: # Linux
|
||||
return Path.home() / ".config" / "Code" / "User" / "mcp.json"
|
||||
|
||||
|
||||
def _get_claude_desktop_mcp_path() -> Path:
|
||||
@@ -58,10 +58,10 @@ def _get_claude_desktop_mcp_path() -> Path:
|
||||
"""
|
||||
if sys.platform == "darwin":
|
||||
return Path.home() / "Library" / "Application Support" / "Claude" / "claude_desktop_config.json"
|
||||
if sys.platform == "win32":
|
||||
elif sys.platform == "win32":
|
||||
return Path(os.environ.get("APPDATA", "")) / "Claude" / "claude_desktop_config.json"
|
||||
# Linux
|
||||
return Path.home() / ".config" / "Claude" / "claude_desktop_config.json"
|
||||
else: # Linux
|
||||
return Path.home() / ".config" / "Claude" / "claude_desktop_config.json"
|
||||
|
||||
|
||||
def _get_claude_code_mcp_path(project_path: Path | None = None) -> Path:
|
||||
@@ -114,13 +114,13 @@ def _detect_docker_socket() -> str:
|
||||
:returns: Path to the Docker socket.
|
||||
|
||||
"""
|
||||
socket_paths: list[Path] = [
|
||||
Path("/var/run/docker.sock"),
|
||||
socket_paths = [
|
||||
"/var/run/docker.sock",
|
||||
Path.home() / ".docker" / "run" / "docker.sock",
|
||||
]
|
||||
|
||||
for path in socket_paths:
|
||||
if path.exists():
|
||||
if Path(path).exists():
|
||||
return str(path)
|
||||
|
||||
return "/var/run/docker.sock"
|
||||
@@ -132,33 +132,28 @@ def _find_fuzzforge_root() -> Path:
|
||||
:returns: Path to fuzzforge-oss directory.
|
||||
|
||||
"""
|
||||
# Check environment variable override first
|
||||
env_root = os.environ.get("FUZZFORGE_ROOT")
|
||||
if env_root:
|
||||
return Path(env_root).resolve()
|
||||
|
||||
# Walk up from cwd to find a fuzzforge root (hub-config.json is the marker)
|
||||
for parent in [Path.cwd(), *Path.cwd().parents]:
|
||||
if (parent / "hub-config.json").is_file():
|
||||
return parent
|
||||
|
||||
# Fall back to __file__-based search (dev install inside fuzzforge-oss)
|
||||
# Try to find from current file location
|
||||
current = Path(__file__).resolve()
|
||||
|
||||
# Walk up to find fuzzforge-oss root
|
||||
for parent in current.parents:
|
||||
if (parent / "fuzzforge-mcp").is_dir():
|
||||
if (parent / "fuzzforge-mcp").is_dir() and (parent / "fuzzforge-runner").is_dir():
|
||||
return parent
|
||||
|
||||
# Fall back to cwd
|
||||
return Path.cwd()
|
||||
|
||||
|
||||
def _generate_mcp_config(
|
||||
fuzzforge_root: Path,
|
||||
modules_path: Path,
|
||||
engine_type: str,
|
||||
engine_socket: str,
|
||||
) -> dict[str, Any]:
|
||||
) -> dict:
|
||||
"""Generate MCP server configuration.
|
||||
|
||||
:param fuzzforge_root: Path to fuzzforge-oss installation.
|
||||
:param modules_path: Path to the modules directory.
|
||||
:param engine_type: Container engine type (podman or docker).
|
||||
:param engine_socket: Container engine socket path.
|
||||
:returns: MCP configuration dictionary.
|
||||
@@ -174,12 +169,9 @@ def _generate_mcp_config(
|
||||
command = "uv"
|
||||
args = ["--directory", str(fuzzforge_root), "run", "fuzzforge-mcp"]
|
||||
|
||||
# User-global storage paths for FuzzForge containers.
|
||||
# Kept under ~/.fuzzforge so images are built once and shared across
|
||||
# all workspaces — regardless of where `fuzzforge mcp install` is run.
|
||||
# Override with FUZZFORGE_USER_DIR for isolated testing.
|
||||
user_dir_env = os.environ.get("FUZZFORGE_USER_DIR")
|
||||
fuzzforge_home = Path(user_dir_env).resolve() if user_dir_env else Path.home() / ".fuzzforge"
|
||||
# Self-contained storage paths for FuzzForge containers
|
||||
# This isolates FuzzForge from system Podman and avoids snap issues
|
||||
fuzzforge_home = Path.home() / ".fuzzforge"
|
||||
graphroot = fuzzforge_home / "containers" / "storage"
|
||||
runroot = fuzzforge_home / "containers" / "run"
|
||||
|
||||
@@ -189,11 +181,10 @@ def _generate_mcp_config(
|
||||
"args": args,
|
||||
"cwd": str(fuzzforge_root),
|
||||
"env": {
|
||||
"FUZZFORGE_MODULES_PATH": str(modules_path),
|
||||
"FUZZFORGE_ENGINE__TYPE": engine_type,
|
||||
"FUZZFORGE_ENGINE__GRAPHROOT": str(graphroot),
|
||||
"FUZZFORGE_ENGINE__RUNROOT": str(runroot),
|
||||
"FUZZFORGE_HUB__ENABLED": "true",
|
||||
"FUZZFORGE_HUB__CONFIG_PATH": str(fuzzforge_root / "hub-config.json"),
|
||||
},
|
||||
}
|
||||
|
||||
@@ -273,6 +264,14 @@ def generate(
|
||||
help="AI agent to generate config for (copilot, claude-desktop, or claude-code).",
|
||||
),
|
||||
],
|
||||
modules_path: Annotated[
|
||||
Path | None,
|
||||
Option(
|
||||
"--modules",
|
||||
"-m",
|
||||
help="Path to the modules directory.",
|
||||
),
|
||||
] = None,
|
||||
engine: Annotated[
|
||||
str,
|
||||
Option(
|
||||
@@ -286,12 +285,16 @@ def generate(
|
||||
|
||||
:param context: Typer context.
|
||||
:param agent: Target AI agent.
|
||||
:param modules_path: Override modules path.
|
||||
:param engine: Container engine type.
|
||||
|
||||
"""
|
||||
console = Console()
|
||||
fuzzforge_root = _find_fuzzforge_root()
|
||||
|
||||
# Use defaults if not specified
|
||||
resolved_modules = modules_path or (fuzzforge_root / "fuzzforge-modules")
|
||||
|
||||
# Detect socket
|
||||
if engine == "podman":
|
||||
socket = _detect_podman_socket()
|
||||
@@ -301,6 +304,7 @@ def generate(
|
||||
# Generate config
|
||||
server_config = _generate_mcp_config(
|
||||
fuzzforge_root=fuzzforge_root,
|
||||
modules_path=resolved_modules,
|
||||
engine_type=engine,
|
||||
engine_socket=socket,
|
||||
)
|
||||
@@ -344,6 +348,14 @@ def install(
|
||||
help="AI agent to install config for (copilot, claude-desktop, or claude-code).",
|
||||
),
|
||||
],
|
||||
modules_path: Annotated[
|
||||
Path | None,
|
||||
Option(
|
||||
"--modules",
|
||||
"-m",
|
||||
help="Path to the modules directory.",
|
||||
),
|
||||
] = None,
|
||||
engine: Annotated[
|
||||
str,
|
||||
Option(
|
||||
@@ -368,6 +380,7 @@ def install(
|
||||
|
||||
:param context: Typer context.
|
||||
:param agent: Target AI agent.
|
||||
:param modules_path: Override modules path.
|
||||
:param engine: Container engine type.
|
||||
:param force: Overwrite existing configuration.
|
||||
|
||||
@@ -386,6 +399,9 @@ def install(
|
||||
config_path = _get_claude_desktop_mcp_path()
|
||||
servers_key = "mcpServers"
|
||||
|
||||
# Use defaults if not specified
|
||||
resolved_modules = modules_path or (fuzzforge_root / "fuzzforge-modules")
|
||||
|
||||
# Detect socket
|
||||
if engine == "podman":
|
||||
socket = _detect_podman_socket()
|
||||
@@ -395,6 +411,7 @@ def install(
|
||||
# Generate server config
|
||||
server_config = _generate_mcp_config(
|
||||
fuzzforge_root=fuzzforge_root,
|
||||
modules_path=resolved_modules,
|
||||
engine_type=engine,
|
||||
engine_socket=socket,
|
||||
)
|
||||
@@ -434,9 +451,9 @@ def install(
|
||||
console.print(f"[bold]Configuration file:[/bold] {config_path}")
|
||||
console.print()
|
||||
console.print("[bold]Settings:[/bold]")
|
||||
console.print(f" Modules Path: {resolved_modules}")
|
||||
console.print(f" Engine: {engine}")
|
||||
console.print(f" Socket: {socket}")
|
||||
console.print(f" Hub Config: {fuzzforge_root / 'hub-config.json'}")
|
||||
console.print()
|
||||
|
||||
console.print("[bold]Next steps:[/bold]")
|
||||
|
||||
166
fuzzforge-cli/src/fuzzforge_cli/commands/modules.py
Normal file
166
fuzzforge-cli/src/fuzzforge_cli/commands/modules.py
Normal file
@@ -0,0 +1,166 @@
|
||||
"""Module management commands for FuzzForge CLI."""
|
||||
|
||||
import asyncio
|
||||
from pathlib import Path
|
||||
from typing import Annotated, Any
|
||||
|
||||
from rich.console import Console
|
||||
from rich.table import Table
|
||||
from typer import Argument, Context, Option, Typer
|
||||
|
||||
from fuzzforge_cli.context import get_project_path, get_runner
|
||||
|
||||
application: Typer = Typer(
|
||||
name="modules",
|
||||
help="Module management commands.",
|
||||
)
|
||||
|
||||
|
||||
@application.command(
|
||||
help="List available modules.",
|
||||
name="list",
|
||||
)
|
||||
def list_modules(
|
||||
context: Context,
|
||||
) -> None:
|
||||
"""List all available modules.
|
||||
|
||||
:param context: Typer context.
|
||||
|
||||
"""
|
||||
runner = get_runner(context)
|
||||
modules = runner.list_modules()
|
||||
|
||||
console = Console()
|
||||
|
||||
if not modules:
|
||||
console.print("[yellow]No modules found.[/yellow]")
|
||||
console.print(f" Modules directory: {runner.settings.modules_path}")
|
||||
return
|
||||
|
||||
table = Table(title="Available Modules")
|
||||
table.add_column("Identifier", style="cyan")
|
||||
table.add_column("Available")
|
||||
table.add_column("Description")
|
||||
|
||||
for module in modules:
|
||||
table.add_row(
|
||||
module.identifier,
|
||||
"✓" if module.available else "✗",
|
||||
module.description or "-",
|
||||
)
|
||||
|
||||
console.print(table)
|
||||
|
||||
|
||||
@application.command(
|
||||
help="Execute a module.",
|
||||
name="run",
|
||||
)
|
||||
def run_module(
|
||||
context: Context,
|
||||
module_identifier: Annotated[
|
||||
str,
|
||||
Argument(
|
||||
help="Identifier of the module to execute.",
|
||||
),
|
||||
],
|
||||
assets_path: Annotated[
|
||||
Path | None,
|
||||
Option(
|
||||
"--assets",
|
||||
"-a",
|
||||
help="Path to input assets.",
|
||||
),
|
||||
] = None,
|
||||
config: Annotated[
|
||||
str | None,
|
||||
Option(
|
||||
"--config",
|
||||
"-c",
|
||||
help="Module configuration as JSON string.",
|
||||
),
|
||||
] = None,
|
||||
) -> None:
|
||||
"""Execute a module.
|
||||
|
||||
:param context: Typer context.
|
||||
:param module_identifier: Module to execute.
|
||||
:param assets_path: Optional path to input assets.
|
||||
:param config: Optional JSON configuration.
|
||||
|
||||
"""
|
||||
import json
|
||||
|
||||
runner = get_runner(context)
|
||||
project_path = get_project_path(context)
|
||||
|
||||
configuration: dict[str, Any] | None = None
|
||||
if config:
|
||||
try:
|
||||
configuration = json.loads(config)
|
||||
except json.JSONDecodeError as e:
|
||||
console = Console()
|
||||
console.print(f"[red]✗[/red] Invalid JSON configuration: {e}")
|
||||
return
|
||||
|
||||
console = Console()
|
||||
console.print(f"[blue]→[/blue] Executing module: {module_identifier}")
|
||||
|
||||
async def execute() -> None:
|
||||
result = await runner.execute_module(
|
||||
module_identifier=module_identifier,
|
||||
project_path=project_path,
|
||||
configuration=configuration,
|
||||
assets_path=assets_path,
|
||||
)
|
||||
|
||||
if result.success:
|
||||
console.print(f"[green]✓[/green] Module execution completed")
|
||||
console.print(f" Execution ID: {result.execution_id}")
|
||||
console.print(f" Results: {result.results_path}")
|
||||
else:
|
||||
console.print(f"[red]✗[/red] Module execution failed")
|
||||
console.print(f" Error: {result.error}")
|
||||
|
||||
asyncio.run(execute())
|
||||
|
||||
|
||||
@application.command(
|
||||
help="Show module information.",
|
||||
name="info",
|
||||
)
|
||||
def module_info(
|
||||
context: Context,
|
||||
module_identifier: Annotated[
|
||||
str,
|
||||
Argument(
|
||||
help="Identifier of the module.",
|
||||
),
|
||||
],
|
||||
) -> None:
|
||||
"""Show information about a specific module.
|
||||
|
||||
:param context: Typer context.
|
||||
:param module_identifier: Module to get info for.
|
||||
|
||||
"""
|
||||
runner = get_runner(context)
|
||||
module = runner.get_module_info(module_identifier)
|
||||
|
||||
console = Console()
|
||||
|
||||
if module is None:
|
||||
console.print(f"[red]✗[/red] Module not found: {module_identifier}")
|
||||
return
|
||||
|
||||
table = Table(title=f"Module: {module.identifier}")
|
||||
table.add_column("Property", style="cyan")
|
||||
table.add_column("Value")
|
||||
|
||||
table.add_row("Identifier", module.identifier)
|
||||
table.add_row("Available", "Yes" if module.available else "No")
|
||||
table.add_row("Description", module.description or "-")
|
||||
table.add_row("Version", module.version or "-")
|
||||
|
||||
console.print(table)
|
||||
@@ -7,7 +7,7 @@ from rich.console import Console
|
||||
from rich.table import Table
|
||||
from typer import Argument, Context, Option, Typer
|
||||
|
||||
from fuzzforge_cli.context import get_project_path, get_storage
|
||||
from fuzzforge_cli.context import get_project_path, get_runner
|
||||
|
||||
application: Typer = Typer(
|
||||
name="project",
|
||||
@@ -36,10 +36,10 @@ def init_project(
|
||||
:param path: Path to initialize (defaults to current directory).
|
||||
|
||||
"""
|
||||
storage = get_storage(context)
|
||||
runner = get_runner(context)
|
||||
project_path = path or get_project_path(context)
|
||||
|
||||
storage_path = storage.init_project(project_path)
|
||||
storage_path = runner.init_project(project_path)
|
||||
|
||||
console = Console()
|
||||
console.print(f"[green]✓[/green] Project initialized at {project_path}")
|
||||
@@ -65,10 +65,10 @@ def set_assets(
|
||||
:param assets_path: Path to assets.
|
||||
|
||||
"""
|
||||
storage = get_storage(context)
|
||||
runner = get_runner(context)
|
||||
project_path = get_project_path(context)
|
||||
|
||||
stored_path = storage.set_project_assets(project_path, assets_path)
|
||||
stored_path = runner.set_project_assets(project_path, assets_path)
|
||||
|
||||
console = Console()
|
||||
console.print(f"[green]✓[/green] Assets stored from {assets_path}")
|
||||
@@ -87,11 +87,11 @@ def show_info(
|
||||
:param context: Typer context.
|
||||
|
||||
"""
|
||||
storage = get_storage(context)
|
||||
runner = get_runner(context)
|
||||
project_path = get_project_path(context)
|
||||
|
||||
executions = storage.list_executions(project_path)
|
||||
assets_path = storage.get_project_assets_path(project_path)
|
||||
executions = runner.list_executions(project_path)
|
||||
assets_path = runner.storage.get_project_assets_path(project_path)
|
||||
|
||||
console = Console()
|
||||
table = Table(title=f"Project: {project_path.name}")
|
||||
@@ -118,10 +118,10 @@ def list_executions(
|
||||
:param context: Typer context.
|
||||
|
||||
"""
|
||||
storage = get_storage(context)
|
||||
runner = get_runner(context)
|
||||
project_path = get_project_path(context)
|
||||
|
||||
executions = storage.list_executions(project_path)
|
||||
executions = runner.list_executions(project_path)
|
||||
|
||||
console = Console()
|
||||
|
||||
@@ -134,7 +134,7 @@ def list_executions(
|
||||
table.add_column("Has Results")
|
||||
|
||||
for exec_id in executions:
|
||||
has_results = storage.get_execution_results(project_path, exec_id) is not None
|
||||
has_results = runner.get_execution_results(project_path, exec_id) is not None
|
||||
table.add_row(exec_id, "✓" if has_results else "-")
|
||||
|
||||
console.print(table)
|
||||
@@ -168,10 +168,10 @@ def get_results(
|
||||
:param extract_to: Optional directory to extract to.
|
||||
|
||||
"""
|
||||
storage = get_storage(context)
|
||||
runner = get_runner(context)
|
||||
project_path = get_project_path(context)
|
||||
|
||||
results_path = storage.get_execution_results(project_path, execution_id)
|
||||
results_path = runner.get_execution_results(project_path, execution_id)
|
||||
|
||||
console = Console()
|
||||
|
||||
@@ -182,5 +182,5 @@ def get_results(
|
||||
console.print(f"[green]✓[/green] Results: {results_path}")
|
||||
|
||||
if extract_to:
|
||||
extracted = storage.extract_results(results_path, extract_to)
|
||||
extracted = runner.extract_results(results_path, extract_to)
|
||||
console.print(f" Extracted to: {extracted}")
|
||||
|
||||
@@ -5,35 +5,35 @@ from __future__ import annotations
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, cast
|
||||
|
||||
from fuzzforge_mcp.storage import LocalStorage # type: ignore[import-untyped]
|
||||
from fuzzforge_runner import Runner, Settings
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typer import Context as TyperContext
|
||||
|
||||
|
||||
class Context:
|
||||
"""CLI context holding the storage instance and settings."""
|
||||
"""CLI context holding the runner instance and settings."""
|
||||
|
||||
_storage: LocalStorage
|
||||
_runner: Runner
|
||||
_project_path: Path
|
||||
|
||||
def __init__(self, storage: LocalStorage, project_path: Path) -> None:
|
||||
def __init__(self, runner: Runner, project_path: Path) -> None:
|
||||
"""Initialize an instance of the class.
|
||||
|
||||
:param storage: FuzzForge local storage instance.
|
||||
:param runner: FuzzForge runner instance.
|
||||
:param project_path: Path to the current project.
|
||||
|
||||
"""
|
||||
self._storage = storage
|
||||
self._runner = runner
|
||||
self._project_path = project_path
|
||||
|
||||
def get_storage(self) -> LocalStorage:
|
||||
"""Get the storage instance.
|
||||
def get_runner(self) -> Runner:
|
||||
"""Get the runner instance.
|
||||
|
||||
:return: LocalStorage instance.
|
||||
:return: Runner instance.
|
||||
|
||||
"""
|
||||
return self._storage
|
||||
return self._runner
|
||||
|
||||
def get_project_path(self) -> Path:
|
||||
"""Get the current project path.
|
||||
@@ -44,14 +44,14 @@ class Context:
|
||||
return self._project_path
|
||||
|
||||
|
||||
def get_storage(context: TyperContext) -> LocalStorage:
|
||||
"""Get storage from Typer context.
|
||||
def get_runner(context: TyperContext) -> Runner:
|
||||
"""Get runner from Typer context.
|
||||
|
||||
:param context: Typer context.
|
||||
:return: LocalStorage instance.
|
||||
:return: Runner instance.
|
||||
|
||||
"""
|
||||
return cast("Context", context.obj).get_storage()
|
||||
return cast("Context", context.obj).get_runner()
|
||||
|
||||
|
||||
def get_project_path(context: TyperContext) -> Path:
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
"""FuzzForge terminal user interface."""
|
||||
@@ -1,562 +0,0 @@
|
||||
"""FuzzForge TUI application.
|
||||
|
||||
Main terminal user interface for FuzzForge, providing a dashboard
|
||||
with AI agent connection status, hub server availability, and
|
||||
hub management capabilities.
|
||||
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from collections import defaultdict
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from rich.text import Text
|
||||
from textual import events, work
|
||||
from textual.app import App, ComposeResult
|
||||
from textual.binding import Binding
|
||||
from textual.containers import Horizontal, Vertical, VerticalScroll
|
||||
from textual.message import Message
|
||||
from textual.widgets import Button, DataTable, Footer, Header
|
||||
|
||||
from fuzzforge_cli.tui.helpers import (
|
||||
check_agent_status,
|
||||
check_hub_image,
|
||||
find_fuzzforge_root,
|
||||
get_agent_configs,
|
||||
load_hub_config,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fuzzforge_cli.commands.mcp import AIAgent
|
||||
|
||||
# Agent config entries stored alongside their linked status for row mapping
|
||||
_AgentRow = tuple[str, "AIAgent", Path, str, bool]
|
||||
|
||||
|
||||
class SingleClickDataTable(DataTable[Any]):
|
||||
"""DataTable subclass that also fires ``RowClicked`` on a single mouse click.
|
||||
|
||||
Textual's built-in ``RowSelected`` only fires on Enter or on a second click
|
||||
of an already-highlighted row. ``RowClicked`` fires on every first click,
|
||||
enabling single-click-to-act UX without requiring Enter.
|
||||
"""
|
||||
|
||||
class RowClicked(Message):
|
||||
"""Fired on every single mouse click on a data row."""
|
||||
|
||||
def __init__(self, data_table: SingleClickDataTable, cursor_row: int) -> None:
|
||||
self.data_table = data_table
|
||||
self.cursor_row = cursor_row
|
||||
super().__init__()
|
||||
|
||||
@property
|
||||
def control(self) -> SingleClickDataTable:
|
||||
"""Return the data table that fired this event."""
|
||||
return self.data_table
|
||||
|
||||
async def _on_click(self, event: events.Click) -> None:
|
||||
"""Forward to parent, then post RowClicked on every mouse click.
|
||||
|
||||
The hub table is handled exclusively via RowClicked. RowSelected is
|
||||
intentionally NOT used for the hub table to avoid double-dispatch.
|
||||
"""
|
||||
await super()._on_click(event)
|
||||
meta = event.style.meta
|
||||
if meta and "row" in meta and self.cursor_type == "row":
|
||||
row_index: int = int(meta["row"])
|
||||
if row_index >= 0:
|
||||
self.post_message(SingleClickDataTable.RowClicked(self, row_index))
|
||||
|
||||
|
||||
class FuzzForgeApp(App[None]):
|
||||
"""FuzzForge AI terminal user interface."""
|
||||
|
||||
TITLE = "FuzzForge AI"
|
||||
SUB_TITLE = "Security Research Orchestration"
|
||||
|
||||
CSS = """
|
||||
Screen {
|
||||
background: $surface;
|
||||
}
|
||||
|
||||
#main {
|
||||
height: 1fr;
|
||||
margin: 1 2;
|
||||
}
|
||||
|
||||
.panel {
|
||||
width: 1fr;
|
||||
border: round #4699fc;
|
||||
padding: 1 2;
|
||||
margin: 0 0 1 0;
|
||||
}
|
||||
|
||||
#hub-panel {
|
||||
height: 12;
|
||||
}
|
||||
|
||||
#hub-table {
|
||||
height: 1fr;
|
||||
}
|
||||
|
||||
#agents-panel {
|
||||
height: auto;
|
||||
}
|
||||
|
||||
.panel-title {
|
||||
text-style: bold;
|
||||
color: #4699fc;
|
||||
text-align: left;
|
||||
margin-bottom: 1;
|
||||
}
|
||||
|
||||
#hub-title-bar {
|
||||
height: auto;
|
||||
align: center middle;
|
||||
margin: 0 0 1 0;
|
||||
}
|
||||
|
||||
#btn-hub-manager {
|
||||
min-width: 40;
|
||||
margin-right: 2;
|
||||
}
|
||||
|
||||
#btn-fuzzinglabs-hub {
|
||||
min-width: 30;
|
||||
}
|
||||
|
||||
#agents-table {
|
||||
height: auto;
|
||||
}
|
||||
|
||||
/* Modal screens */
|
||||
AgentSetupScreen, AgentUnlinkScreen,
|
||||
HubManagerScreen, LinkHubScreen, CloneHubScreen,
|
||||
BuildImageScreen, BuildLogScreen {
|
||||
align: center middle;
|
||||
}
|
||||
|
||||
#setup-dialog, #unlink-dialog {
|
||||
width: 56;
|
||||
height: auto;
|
||||
max-height: 80%;
|
||||
border: thick #4699fc;
|
||||
background: $surface;
|
||||
padding: 2 3;
|
||||
overflow-y: auto;
|
||||
}
|
||||
|
||||
#hub-manager-dialog {
|
||||
width: 100;
|
||||
height: auto;
|
||||
max-height: 85%;
|
||||
border: thick #4699fc;
|
||||
background: $surface;
|
||||
padding: 2 3;
|
||||
overflow-y: auto;
|
||||
}
|
||||
|
||||
#link-dialog, #clone-dialog {
|
||||
width: 72;
|
||||
height: auto;
|
||||
max-height: 80%;
|
||||
border: thick #4699fc;
|
||||
background: $surface;
|
||||
padding: 2 3;
|
||||
overflow-y: auto;
|
||||
}
|
||||
|
||||
#build-dialog {
|
||||
width: 72;
|
||||
height: auto;
|
||||
max-height: 80%;
|
||||
border: thick #4699fc;
|
||||
background: $surface;
|
||||
padding: 2 3;
|
||||
}
|
||||
|
||||
#confirm-text {
|
||||
margin: 1 0 2 0;
|
||||
}
|
||||
|
||||
#build-log {
|
||||
height: 30;
|
||||
border: round $panel;
|
||||
margin: 1 0;
|
||||
}
|
||||
|
||||
#build-subtitle {
|
||||
color: $text-muted;
|
||||
margin-bottom: 1;
|
||||
}
|
||||
|
||||
#build-status {
|
||||
height: 1;
|
||||
margin-top: 1;
|
||||
}
|
||||
|
||||
.dialog-title {
|
||||
text-style: bold;
|
||||
text-align: center;
|
||||
color: #4699fc;
|
||||
margin-bottom: 1;
|
||||
}
|
||||
|
||||
.field-label {
|
||||
margin-top: 1;
|
||||
text-style: bold;
|
||||
}
|
||||
|
||||
RadioSet {
|
||||
height: auto;
|
||||
margin: 0 0 1 2;
|
||||
}
|
||||
|
||||
Input {
|
||||
margin: 0 0 1 0;
|
||||
}
|
||||
|
||||
.dialog-buttons {
|
||||
layout: horizontal;
|
||||
height: 3;
|
||||
align: center middle;
|
||||
margin-top: 1;
|
||||
}
|
||||
|
||||
.dialog-buttons Button {
|
||||
margin: 0 1;
|
||||
min-width: 14;
|
||||
}
|
||||
"""
|
||||
|
||||
BINDINGS = [
|
||||
Binding("q", "quit", "Quit"),
|
||||
Binding("h", "manage_hubs", "Hub Manager"),
|
||||
Binding("r", "refresh", "Refresh"),
|
||||
Binding("enter", "select_row", "Select", show=False),
|
||||
]
|
||||
|
||||
def compose(self) -> ComposeResult:
|
||||
"""Compose the dashboard layout."""
|
||||
yield Header()
|
||||
with VerticalScroll(id="main"):
|
||||
with Vertical(id="hub-panel", classes="panel"):
|
||||
yield SingleClickDataTable(id="hub-table")
|
||||
with Horizontal(id="hub-title-bar"):
|
||||
yield Button(
|
||||
"Hub Manager (h)",
|
||||
variant="primary",
|
||||
id="btn-hub-manager",
|
||||
)
|
||||
yield Button(
|
||||
"FuzzingLabs Hub",
|
||||
variant="primary",
|
||||
id="btn-fuzzinglabs-hub",
|
||||
)
|
||||
with Vertical(id="agents-panel", classes="panel"):
|
||||
yield DataTable(id="agents-table")
|
||||
yield Footer()
|
||||
|
||||
def on_mount(self) -> None:
|
||||
"""Populate tables on startup."""
|
||||
self._agent_rows: list[_AgentRow] = []
|
||||
self._hub_rows: list[tuple[str, str, str, bool] | None] = []
|
||||
# Background build tracking
|
||||
self._active_builds: dict[str, object] = {} # image -> Popen
|
||||
self._build_logs: dict[str, list[str]] = {} # image -> log lines
|
||||
self._build_results: dict[str, bool] = {} # image -> success
|
||||
self.query_one("#hub-panel").border_title = "Hub Servers [dim](click ✗ Not built to build)[/dim]"
|
||||
self.query_one("#agents-panel").border_title = "AI Agents"
|
||||
self._refresh_agents()
|
||||
self._refresh_hub()
|
||||
|
||||
def _refresh_agents(self) -> None:
|
||||
"""Refresh the AI agents status table."""
|
||||
table = self.query_one("#agents-table", DataTable)
|
||||
table.clear(columns=True)
|
||||
table.add_columns("Agent", "Status", "Config Path")
|
||||
table.cursor_type = "row"
|
||||
|
||||
self._agent_rows = []
|
||||
for display_name, agent, config_path, servers_key in get_agent_configs():
|
||||
is_linked, status_text = check_agent_status(config_path, servers_key)
|
||||
if is_linked:
|
||||
status_cell = Text(f"✓ {status_text}", style="green")
|
||||
else:
|
||||
status_cell = Text(f"✗ {status_text}", style="red")
|
||||
table.add_row(display_name, status_cell, str(config_path))
|
||||
self._agent_rows.append(
|
||||
(display_name, agent, config_path, servers_key, is_linked)
|
||||
)
|
||||
|
||||
def _refresh_hub(self) -> None:
|
||||
"""Refresh the hub servers table, grouped by source hub."""
|
||||
self._hub_rows = []
|
||||
table = self.query_one("#hub-table", SingleClickDataTable)
|
||||
table.clear(columns=True)
|
||||
table.add_columns("Server", "Image", "Hub", "Status")
|
||||
table.cursor_type = "row"
|
||||
|
||||
try:
|
||||
fuzzforge_root = find_fuzzforge_root()
|
||||
hub_config = load_hub_config(fuzzforge_root)
|
||||
except Exception:
|
||||
table.add_row(
|
||||
Text("Error loading config", style="red"), "", "", ""
|
||||
)
|
||||
return
|
||||
|
||||
servers = hub_config.get("servers", [])
|
||||
if not servers:
|
||||
table.add_row(
|
||||
Text("No servers — press h", style="dim"), "", "", ""
|
||||
)
|
||||
return
|
||||
|
||||
# Group servers by source hub
|
||||
groups: dict[str, list[dict[str, Any]]] = defaultdict(list)
|
||||
for server in servers:
|
||||
source = server.get("source_hub", "manual")
|
||||
groups[source].append(server)
|
||||
|
||||
for hub_name, hub_servers in groups.items():
|
||||
ready_count = 0
|
||||
total = len(hub_servers)
|
||||
|
||||
statuses: list[tuple[dict[str, Any], bool, str]] = []
|
||||
for server in hub_servers:
|
||||
enabled = server.get("enabled", True)
|
||||
if not enabled:
|
||||
statuses.append((server, False, "Disabled"))
|
||||
else:
|
||||
is_ready, status_text = check_hub_image(
|
||||
server.get("image", "")
|
||||
)
|
||||
if is_ready:
|
||||
ready_count += 1
|
||||
statuses.append((server, is_ready, status_text))
|
||||
|
||||
# Group header row
|
||||
if hub_name == "manual":
|
||||
header = Text(
|
||||
f"▼ 📦 Local config ({ready_count}/{total} ready)",
|
||||
style="bold",
|
||||
)
|
||||
else:
|
||||
header = Text(
|
||||
f"▼ 🔗 {hub_name} ({ready_count}/{total} ready)",
|
||||
style="bold",
|
||||
)
|
||||
table.add_row(header, "", "", "")
|
||||
self._hub_rows.append(None) # group header — not selectable
|
||||
|
||||
# Tool rows
|
||||
for server, is_ready, status_text in statuses:
|
||||
name = server.get("name", "unknown")
|
||||
image = server.get("image", "unknown")
|
||||
enabled = server.get("enabled", True)
|
||||
|
||||
if image in getattr(self, "_active_builds", {}):
|
||||
status_cell = Text("⏳ Building…", style="yellow")
|
||||
elif not enabled:
|
||||
status_cell = Text("Disabled", style="dim")
|
||||
elif is_ready:
|
||||
status_cell = Text("✓ Ready", style="green")
|
||||
else:
|
||||
status_cell = Text(f"✗ {status_text}", style="red dim")
|
||||
|
||||
table.add_row(
|
||||
f" {name}",
|
||||
Text(image, style="dim"),
|
||||
hub_name,
|
||||
status_cell,
|
||||
)
|
||||
self._hub_rows.append((name, image, hub_name, is_ready))
|
||||
|
||||
def on_data_table_row_selected(self, event: DataTable.RowSelected) -> None:
|
||||
"""Handle Enter-key row selection (agents table only).
|
||||
|
||||
Hub table uses RowClicked exclusively — wiring it to RowSelected too
|
||||
would cause a double push on every click since Textual 8 fires
|
||||
RowSelected on ALL clicks, not just second-click-on-same-row.
|
||||
"""
|
||||
if event.data_table.id == "agents-table":
|
||||
self._handle_agent_row(event.cursor_row)
|
||||
|
||||
def on_single_click_data_table_row_clicked(
|
||||
self, event: SingleClickDataTable.RowClicked
|
||||
) -> None:
|
||||
"""Handle single mouse-click on a hub table row."""
|
||||
if event.data_table.id == "hub-table":
|
||||
self._handle_hub_row(event.cursor_row)
|
||||
|
||||
def _handle_agent_row(self, idx: int) -> None:
|
||||
"""Open agent setup/unlink for the selected agent row."""
|
||||
if idx < 0 or idx >= len(self._agent_rows):
|
||||
return
|
||||
|
||||
display_name, agent, _config_path, _servers_key, is_linked = self._agent_rows[idx]
|
||||
|
||||
if is_linked:
|
||||
from fuzzforge_cli.tui.screens.agent_setup import AgentUnlinkScreen
|
||||
|
||||
self.push_screen(
|
||||
AgentUnlinkScreen(agent, display_name),
|
||||
callback=self._on_agent_changed,
|
||||
)
|
||||
else:
|
||||
from fuzzforge_cli.tui.screens.agent_setup import AgentSetupScreen
|
||||
|
||||
self.push_screen(
|
||||
AgentSetupScreen(agent, display_name),
|
||||
callback=self._on_agent_changed,
|
||||
)
|
||||
|
||||
def _handle_hub_row(self, idx: int) -> None:
|
||||
"""Handle a click on a hub table row."""
|
||||
# Guard: never push two build dialogs at once (double-click protection)
|
||||
if getattr(self, "_build_dialog_open", False):
|
||||
return
|
||||
|
||||
if idx < 0 or idx >= len(self._hub_rows):
|
||||
return
|
||||
row_data = self._hub_rows[idx]
|
||||
if row_data is None:
|
||||
return # group header row — ignore
|
||||
|
||||
server_name, image, hub_name, is_ready = row_data
|
||||
|
||||
# If a build is already running, open the live log viewer
|
||||
if image in self._active_builds:
|
||||
from fuzzforge_cli.tui.screens.build_log import BuildLogScreen
|
||||
self._build_dialog_open = True
|
||||
self.push_screen(
|
||||
BuildLogScreen(image),
|
||||
callback=lambda _: setattr(self, "_build_dialog_open", False),
|
||||
)
|
||||
return
|
||||
|
||||
if is_ready:
|
||||
self.notify(f"{image} is already built ✓", severity="information")
|
||||
return
|
||||
|
||||
if hub_name == "manual":
|
||||
self.notify("Manual servers must be built outside FuzzForge")
|
||||
return
|
||||
|
||||
from fuzzforge_cli.tui.screens.build_image import BuildImageScreen
|
||||
|
||||
self._build_dialog_open = True
|
||||
|
||||
def _on_build_dialog_done(result: bool | None) -> None:
|
||||
self._build_dialog_open = False
|
||||
if result is not None:
|
||||
self._on_build_confirmed(result, server_name, image, hub_name)
|
||||
|
||||
self.push_screen(
|
||||
BuildImageScreen(server_name, image, hub_name),
|
||||
callback=_on_build_dialog_done,
|
||||
)
|
||||
|
||||
def _on_build_confirmed(self, confirmed: bool, server_name: str, image: str, hub_name: str) -> None:
|
||||
"""Start a background build if the user confirmed."""
|
||||
if not confirmed:
|
||||
return
|
||||
self._build_logs[image] = []
|
||||
self._build_results.pop(image, None)
|
||||
self._active_builds[image] = True # mark as pending so ⏳ shows immediately
|
||||
self._refresh_hub() # show ⏳ Building… immediately
|
||||
self._run_build(server_name, image, hub_name)
|
||||
|
||||
@work(thread=True)
|
||||
def _run_build(self, server_name: str, image: str, hub_name: str) -> None:
|
||||
"""Build a Docker/Podman image in a background thread."""
|
||||
from fuzzforge_cli.tui.helpers import build_image, find_dockerfile_for_server
|
||||
|
||||
logs = self._build_logs.setdefault(image, [])
|
||||
|
||||
dockerfile = find_dockerfile_for_server(server_name, hub_name)
|
||||
if dockerfile is None:
|
||||
logs.append(f"ERROR: Dockerfile not found for '{server_name}' in hub '{hub_name}'")
|
||||
self._build_results[image] = False
|
||||
self._active_builds.pop(image, None)
|
||||
self.call_from_thread(self._on_build_done, image, success=False)
|
||||
return
|
||||
|
||||
logs.append(f"Building {image} from {dockerfile.parent}")
|
||||
logs.append("")
|
||||
|
||||
try:
|
||||
proc = build_image(image, dockerfile)
|
||||
except FileNotFoundError as exc:
|
||||
logs.append(f"ERROR: {exc}")
|
||||
self._build_results[image] = False
|
||||
self._active_builds.pop(image, None)
|
||||
self.call_from_thread(self._on_build_done, image, success=False)
|
||||
return
|
||||
|
||||
self._active_builds[image] = proc # replace pending marker with actual process
|
||||
self.call_from_thread(self._refresh_hub) # show ⏳ in table
|
||||
|
||||
if proc.stdout is None:
|
||||
return
|
||||
for line in proc.stdout:
|
||||
logs.append(line.rstrip())
|
||||
|
||||
proc.wait()
|
||||
self._active_builds.pop(image, None)
|
||||
success = proc.returncode == 0
|
||||
self._build_results[image] = success
|
||||
self.call_from_thread(self._on_build_done, image, success=success)
|
||||
|
||||
def _on_build_done(self, image: str, *, success: bool) -> None:
|
||||
"""Handle completion of a background build on the main thread."""
|
||||
self._refresh_hub()
|
||||
if success:
|
||||
self.notify(f"✓ {image} built successfully", severity="information")
|
||||
else:
|
||||
self.notify(f"✗ {image} build failed — click row for log", severity="error")
|
||||
|
||||
def on_button_pressed(self, event: Button.Pressed) -> None:
|
||||
"""Handle button presses."""
|
||||
if event.button.id == "btn-hub-manager":
|
||||
self.action_manage_hubs()
|
||||
elif event.button.id == "btn-fuzzinglabs-hub":
|
||||
self.action_add_fuzzinglabs_hub()
|
||||
|
||||
def action_add_fuzzinglabs_hub(self) -> None:
|
||||
"""Open the clone dialog pre-filled with the FuzzingLabs hub URL."""
|
||||
from fuzzforge_cli.tui.screens.hub_manager import CloneHubScreen
|
||||
|
||||
self.push_screen(
|
||||
CloneHubScreen(
|
||||
default_url="https://github.com/FuzzingLabs/mcp-security-hub",
|
||||
default_name="mcp-security-hub",
|
||||
is_default=True,
|
||||
),
|
||||
callback=self._on_hub_changed,
|
||||
)
|
||||
|
||||
def action_manage_hubs(self) -> None:
|
||||
"""Open the hub manager."""
|
||||
from fuzzforge_cli.tui.screens.hub_manager import HubManagerScreen
|
||||
|
||||
self.push_screen(HubManagerScreen(), callback=self._on_hub_changed)
|
||||
|
||||
def _on_agent_changed(self, result: str | None) -> None:
|
||||
"""Handle agent setup/unlink completion."""
|
||||
if result:
|
||||
self.notify(result)
|
||||
self._refresh_agents()
|
||||
|
||||
def _on_hub_changed(self, result: str | None) -> None:
|
||||
"""Handle hub manager completion — refresh the hub table."""
|
||||
self._refresh_hub()
|
||||
|
||||
def action_refresh(self) -> None:
|
||||
"""Refresh all status panels."""
|
||||
self._refresh_agents()
|
||||
self._refresh_hub()
|
||||
self.notify("Status refreshed")
|
||||
@@ -1,687 +0,0 @@
|
||||
"""Shared helpers for FuzzForge TUI and CLI.
|
||||
|
||||
Provides utility functions for checking AI agent configuration status,
|
||||
hub server image availability, installing/removing MCP configurations,
|
||||
and managing linked MCP hub repositories.
|
||||
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import contextlib
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from fuzzforge_cli.commands.mcp import (
|
||||
AIAgent,
|
||||
_detect_docker_socket,
|
||||
_detect_podman_socket,
|
||||
_find_fuzzforge_root,
|
||||
_generate_mcp_config,
|
||||
_get_claude_code_user_mcp_path,
|
||||
_get_claude_desktop_mcp_path,
|
||||
_get_copilot_mcp_path,
|
||||
)
|
||||
|
||||
# --- Hub Management Constants ---
|
||||
|
||||
FUZZFORGE_DEFAULT_HUB_URL = "git@github.com:FuzzingLabs/mcp-security-hub.git"
|
||||
FUZZFORGE_DEFAULT_HUB_NAME = "mcp-security-hub"
|
||||
|
||||
|
||||
def get_fuzzforge_user_dir() -> Path:
|
||||
"""Return the user-global ``~/.fuzzforge/`` directory.
|
||||
|
||||
Stores data that is shared across all workspaces: cloned hub
|
||||
repositories, the hub registry, container storage (graphroot/runroot),
|
||||
and the hub workspace volume.
|
||||
|
||||
Override with the ``FUZZFORGE_USER_DIR`` environment variable to
|
||||
redirect all user-global data to a custom path — useful for testing
|
||||
a fresh install without touching the real ``~/.fuzzforge/``.
|
||||
|
||||
:return: ``Path.home() / ".fuzzforge"`` or ``$FUZZFORGE_USER_DIR``
|
||||
|
||||
"""
|
||||
env_dir = os.environ.get("FUZZFORGE_USER_DIR")
|
||||
if env_dir:
|
||||
return Path(env_dir).resolve()
|
||||
return Path.home() / ".fuzzforge"
|
||||
|
||||
|
||||
def get_fuzzforge_dir() -> Path:
|
||||
"""Return the project-local ``.fuzzforge/`` directory.
|
||||
|
||||
Stores data that is specific to the current workspace: fuzzing
|
||||
results and project artifacts. Similar to how ``.git/`` scopes
|
||||
version-control data to a single project.
|
||||
|
||||
:return: ``Path.cwd() / ".fuzzforge"``
|
||||
|
||||
"""
|
||||
return Path.cwd() / ".fuzzforge"
|
||||
|
||||
# Categories that typically need NET_RAW capability for network access
|
||||
_NET_RAW_CATEGORIES = {"reconnaissance", "web-security"}
|
||||
|
||||
# Directories to skip when scanning a hub for MCP tool Dockerfiles
|
||||
_SCAN_SKIP_DIRS = {
|
||||
".git",
|
||||
".github",
|
||||
"scripts",
|
||||
"tests",
|
||||
"examples",
|
||||
"meta",
|
||||
"__pycache__",
|
||||
"node_modules",
|
||||
".venv",
|
||||
}
|
||||
|
||||
|
||||
def get_agent_configs() -> list[tuple[str, AIAgent, Path, str]]:
|
||||
"""Return agent display configs with resolved paths.
|
||||
|
||||
Each tuple contains:
|
||||
- Display name
|
||||
- AIAgent enum value
|
||||
- Config file path
|
||||
- Servers JSON key
|
||||
|
||||
:return: List of agent configuration tuples.
|
||||
|
||||
"""
|
||||
return [
|
||||
("GitHub Copilot", AIAgent.COPILOT, _get_copilot_mcp_path(), "servers"),
|
||||
("Claude Desktop", AIAgent.CLAUDE_DESKTOP, _get_claude_desktop_mcp_path(), "mcpServers"),
|
||||
("Claude Code", AIAgent.CLAUDE_CODE, _get_claude_code_user_mcp_path(), "mcpServers"),
|
||||
]
|
||||
|
||||
|
||||
def check_agent_status(config_path: Path, servers_key: str) -> tuple[bool, str]:
|
||||
"""Check whether an AI agent has FuzzForge configured.
|
||||
|
||||
:param config_path: Path to the agent's MCP config file.
|
||||
:param servers_key: JSON key for the servers dict (e.g. "servers" or "mcpServers").
|
||||
:return: Tuple of (is_linked, status_description).
|
||||
|
||||
"""
|
||||
if not config_path.exists():
|
||||
return False, "Not configured"
|
||||
try:
|
||||
config = json.loads(config_path.read_text())
|
||||
servers = config.get(servers_key, {})
|
||||
if "fuzzforge" in servers:
|
||||
return True, "Linked"
|
||||
return False, "Config exists, not linked"
|
||||
except json.JSONDecodeError:
|
||||
return False, "Invalid config file"
|
||||
|
||||
|
||||
def check_hub_image(image: str) -> tuple[bool, str]:
|
||||
"""Check whether a container image exists locally.
|
||||
|
||||
Respects the ``FUZZFORGE_ENGINE__TYPE`` environment variable so that
|
||||
Podman users see the correct build status instead of always "Not built".
|
||||
|
||||
:param image: Image name (e.g. "semgrep-mcp:latest").
|
||||
:return: Tuple of (is_ready, status_description).
|
||||
|
||||
"""
|
||||
engine = os.environ.get("FUZZFORGE_ENGINE__TYPE", "docker").lower()
|
||||
cmd = "podman" if engine == "podman" else "docker"
|
||||
try:
|
||||
result = subprocess.run(
|
||||
[cmd, "image", "inspect", image],
|
||||
check=False, capture_output=True,
|
||||
text=True,
|
||||
timeout=5,
|
||||
)
|
||||
if result.returncode == 0:
|
||||
return True, "Ready"
|
||||
return False, "Not built"
|
||||
except subprocess.TimeoutExpired:
|
||||
return False, "Timeout"
|
||||
except FileNotFoundError:
|
||||
return False, f"{cmd} not found"
|
||||
|
||||
|
||||
def load_hub_config(fuzzforge_root: Path) -> dict[str, Any]:
|
||||
"""Load hub-config.json from the FuzzForge root.
|
||||
|
||||
:param fuzzforge_root: Path to fuzzforge-oss directory.
|
||||
:return: Parsed hub configuration dict, empty dict on error.
|
||||
|
||||
"""
|
||||
config_path = fuzzforge_root / "hub-config.json"
|
||||
if not config_path.exists():
|
||||
return {}
|
||||
try:
|
||||
data: dict[str, Any] = json.loads(config_path.read_text())
|
||||
return data
|
||||
except json.JSONDecodeError:
|
||||
return {}
|
||||
|
||||
|
||||
def find_fuzzforge_root() -> Path:
|
||||
"""Find the FuzzForge installation root directory.
|
||||
|
||||
:return: Path to the fuzzforge-oss directory.
|
||||
|
||||
"""
|
||||
return _find_fuzzforge_root()
|
||||
|
||||
|
||||
def install_agent_config(agent: AIAgent, engine: str, force: bool = False) -> str:
|
||||
"""Install FuzzForge MCP configuration for an AI agent.
|
||||
|
||||
:param agent: Target AI agent.
|
||||
:param engine: Container engine type ("docker" or "podman").
|
||||
:param force: Overwrite existing configuration.
|
||||
:return: Result message string.
|
||||
|
||||
"""
|
||||
fuzzforge_root = _find_fuzzforge_root()
|
||||
|
||||
if agent == AIAgent.COPILOT:
|
||||
config_path = _get_copilot_mcp_path()
|
||||
servers_key = "servers"
|
||||
elif agent == AIAgent.CLAUDE_CODE:
|
||||
config_path = _get_claude_code_user_mcp_path()
|
||||
servers_key = "mcpServers"
|
||||
else:
|
||||
config_path = _get_claude_desktop_mcp_path()
|
||||
servers_key = "mcpServers"
|
||||
|
||||
socket = _detect_docker_socket() if engine == "docker" else _detect_podman_socket()
|
||||
|
||||
server_config = _generate_mcp_config(
|
||||
fuzzforge_root=fuzzforge_root,
|
||||
engine_type=engine,
|
||||
engine_socket=socket,
|
||||
)
|
||||
|
||||
if config_path.exists():
|
||||
try:
|
||||
existing = json.loads(config_path.read_text())
|
||||
except json.JSONDecodeError:
|
||||
return f"Error: Invalid JSON in {config_path}"
|
||||
|
||||
servers = existing.get(servers_key, {})
|
||||
if "fuzzforge" in servers and not force:
|
||||
return "Already configured (use force to overwrite)"
|
||||
|
||||
if servers_key not in existing:
|
||||
existing[servers_key] = {}
|
||||
existing[servers_key]["fuzzforge"] = server_config
|
||||
full_config = existing
|
||||
else:
|
||||
config_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
full_config = {servers_key: {"fuzzforge": server_config}}
|
||||
|
||||
config_path.write_text(json.dumps(full_config, indent=4))
|
||||
return f"Installed FuzzForge for {agent.value}"
|
||||
|
||||
|
||||
def uninstall_agent_config(agent: AIAgent) -> str:
|
||||
"""Remove FuzzForge MCP configuration from an AI agent.
|
||||
|
||||
:param agent: Target AI agent.
|
||||
:return: Result message string.
|
||||
|
||||
"""
|
||||
if agent == AIAgent.COPILOT:
|
||||
config_path = _get_copilot_mcp_path()
|
||||
servers_key = "servers"
|
||||
elif agent == AIAgent.CLAUDE_CODE:
|
||||
config_path = _get_claude_code_user_mcp_path()
|
||||
servers_key = "mcpServers"
|
||||
else:
|
||||
config_path = _get_claude_desktop_mcp_path()
|
||||
servers_key = "mcpServers"
|
||||
|
||||
if not config_path.exists():
|
||||
return "Configuration file not found"
|
||||
|
||||
try:
|
||||
config = json.loads(config_path.read_text())
|
||||
except json.JSONDecodeError:
|
||||
return "Error: Invalid JSON in config file"
|
||||
|
||||
servers = config.get(servers_key, {})
|
||||
if "fuzzforge" not in servers:
|
||||
return "FuzzForge is not configured for this agent"
|
||||
|
||||
del servers["fuzzforge"]
|
||||
config_path.write_text(json.dumps(config, indent=4))
|
||||
return f"Removed FuzzForge from {agent.value}"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Hub Management
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def get_hubs_registry_path() -> Path:
|
||||
"""Return path to the hubs registry file (``~/.fuzzforge/hubs.json``).
|
||||
|
||||
Stored in the user-global directory so the registry is shared across
|
||||
all workspaces.
|
||||
|
||||
:return: Path to the registry JSON file.
|
||||
|
||||
"""
|
||||
return get_fuzzforge_user_dir() / "hubs.json"
|
||||
|
||||
|
||||
def get_default_hubs_dir() -> Path:
|
||||
"""Return default directory for cloned hubs (``~/.fuzzforge/hubs/``).
|
||||
|
||||
Stored in the user-global directory so hubs are cloned once and
|
||||
reused in every workspace.
|
||||
|
||||
:return: Path to the default hubs directory.
|
||||
|
||||
"""
|
||||
return get_fuzzforge_user_dir() / "hubs"
|
||||
|
||||
|
||||
def _discover_hub_dirs() -> list[Path]:
|
||||
"""Scan known hub directories for cloned repos.
|
||||
|
||||
Checks both the current global location (``~/.fuzzforge/hubs/``) and the
|
||||
legacy workspace-local location (``<cwd>/.fuzzforge/hubs/``) so that hubs
|
||||
cloned before the global-dir migration are still found.
|
||||
|
||||
:return: List of hub directory paths (each is a direct child with a ``.git``
|
||||
sub-directory).
|
||||
|
||||
"""
|
||||
candidates: list[Path] = []
|
||||
for base in (get_fuzzforge_user_dir() / "hubs", get_fuzzforge_dir() / "hubs"):
|
||||
if base.is_dir():
|
||||
candidates.extend(
|
||||
entry for entry in base.iterdir()
|
||||
if entry.is_dir() and (entry / ".git").is_dir()
|
||||
)
|
||||
return candidates
|
||||
|
||||
|
||||
def load_hubs_registry() -> dict[str, Any]:
|
||||
"""Load the hubs registry from disk.
|
||||
|
||||
If the registry file does not exist, auto-recovers it by scanning known hub
|
||||
directories and rebuilding entries for any discovered hubs. This handles
|
||||
the migration from the old workspace-local ``<cwd>/.fuzzforge/hubs.json``
|
||||
path to the global ``~/.fuzzforge/hubs.json`` path, as well as any case
|
||||
where the registry was lost.
|
||||
|
||||
:return: Registry dict with ``hubs`` key containing a list of hub entries.
|
||||
|
||||
"""
|
||||
path = get_hubs_registry_path()
|
||||
if path.exists():
|
||||
try:
|
||||
data: dict[str, Any] = json.loads(path.read_text())
|
||||
return data
|
||||
except (json.JSONDecodeError, OSError):
|
||||
pass
|
||||
|
||||
# Registry missing — attempt to rebuild from discovered hub directories.
|
||||
discovered = _discover_hub_dirs()
|
||||
if not discovered:
|
||||
return {"hubs": []}
|
||||
|
||||
hubs: list[dict[str, Any]] = []
|
||||
for hub_dir in discovered:
|
||||
name = hub_dir.name
|
||||
# Try to read the git remote URL
|
||||
git_url: str = ""
|
||||
try:
|
||||
import subprocess as _sp
|
||||
r = _sp.run(
|
||||
["git", "-C", str(hub_dir), "remote", "get-url", "origin"],
|
||||
check=False, capture_output=True, text=True, timeout=5,
|
||||
)
|
||||
if r.returncode == 0:
|
||||
git_url = r.stdout.strip()
|
||||
except Exception: # noqa: S110 - git URL is optional, failure is acceptable
|
||||
pass
|
||||
hubs.append({
|
||||
"name": name,
|
||||
"path": str(hub_dir),
|
||||
"git_url": git_url,
|
||||
"is_default": name == FUZZFORGE_DEFAULT_HUB_NAME,
|
||||
})
|
||||
|
||||
registry: dict[str, Any] = {"hubs": hubs}
|
||||
# Persist so we don't re-scan on every load
|
||||
with contextlib.suppress(OSError):
|
||||
save_hubs_registry(registry)
|
||||
return registry
|
||||
|
||||
|
||||
def save_hubs_registry(registry: dict[str, Any]) -> None:
|
||||
"""Save the hubs registry to disk.
|
||||
|
||||
:param registry: Registry dict to persist.
|
||||
|
||||
"""
|
||||
path = get_hubs_registry_path()
|
||||
path.parent.mkdir(parents=True, exist_ok=True)
|
||||
path.write_text(json.dumps(registry, indent=2))
|
||||
|
||||
|
||||
def scan_hub_for_servers(hub_path: Path) -> list[dict[str, Any]]:
|
||||
"""Scan a hub directory for MCP tool Dockerfiles.
|
||||
|
||||
Looks for the ``category/tool-name/Dockerfile`` pattern and generates
|
||||
a server configuration entry for each discovered tool.
|
||||
|
||||
:param hub_path: Root directory of the hub repository.
|
||||
:return: Sorted list of server configuration dicts.
|
||||
|
||||
"""
|
||||
servers: list[dict[str, Any]] = []
|
||||
|
||||
if not hub_path.is_dir():
|
||||
return servers
|
||||
|
||||
for dockerfile in sorted(hub_path.rglob("Dockerfile")):
|
||||
rel = dockerfile.relative_to(hub_path)
|
||||
parts = rel.parts
|
||||
|
||||
# Expected layout: category/tool-name/Dockerfile (exactly 3 parts)
|
||||
if len(parts) != 3:
|
||||
continue
|
||||
|
||||
category, tool_name, _ = parts
|
||||
|
||||
if category in _SCAN_SKIP_DIRS:
|
||||
continue
|
||||
|
||||
capabilities: list[str] = []
|
||||
if category in _NET_RAW_CATEGORIES:
|
||||
capabilities = ["NET_RAW"]
|
||||
|
||||
servers.append(
|
||||
{
|
||||
"name": tool_name,
|
||||
"description": f"{tool_name} — {category}",
|
||||
"type": "docker",
|
||||
"image": f"{tool_name}:latest",
|
||||
"category": category,
|
||||
"capabilities": capabilities,
|
||||
"volumes": [f"{get_fuzzforge_user_dir()}/hub/workspace:/data"],
|
||||
"enabled": True,
|
||||
}
|
||||
)
|
||||
|
||||
return servers
|
||||
|
||||
|
||||
def link_hub(
|
||||
name: str,
|
||||
path: str | Path,
|
||||
git_url: str | None = None,
|
||||
is_default: bool = False,
|
||||
) -> str:
|
||||
"""Link a hub directory and add its servers to hub-config.json.
|
||||
|
||||
:param name: Display name for the hub.
|
||||
:param path: Local directory path containing the hub.
|
||||
:param git_url: Optional git remote URL (for tracking).
|
||||
:param is_default: Whether this is the default FuzzingLabs hub.
|
||||
:return: Result message string.
|
||||
|
||||
"""
|
||||
hub_path = Path(path).resolve()
|
||||
|
||||
if not hub_path.is_dir():
|
||||
return f"Error: directory not found: {hub_path}"
|
||||
|
||||
# Update registry
|
||||
registry = load_hubs_registry()
|
||||
hubs = registry.get("hubs", [])
|
||||
|
||||
# Remove existing entry with same name
|
||||
hubs = [h for h in hubs if h.get("name") != name]
|
||||
|
||||
hubs.append(
|
||||
{
|
||||
"name": name,
|
||||
"path": str(hub_path),
|
||||
"git_url": git_url,
|
||||
"is_default": is_default,
|
||||
}
|
||||
)
|
||||
|
||||
registry["hubs"] = hubs
|
||||
save_hubs_registry(registry)
|
||||
|
||||
# Scan and update hub-config.json
|
||||
scanned = scan_hub_for_servers(hub_path)
|
||||
if not scanned:
|
||||
return f"Linked '{name}' (0 servers found)"
|
||||
|
||||
try:
|
||||
added = _merge_servers_into_hub_config(name, scanned)
|
||||
except Exception as exc:
|
||||
return f"Linked '{name}' but config update failed: {exc}"
|
||||
|
||||
return f"Linked '{name}' — {added} new servers added ({len(scanned)} scanned)"
|
||||
|
||||
|
||||
def unlink_hub(name: str) -> str:
|
||||
"""Unlink a hub and remove its servers from hub-config.json.
|
||||
|
||||
:param name: Name of the hub to unlink.
|
||||
:return: Result message string.
|
||||
|
||||
"""
|
||||
registry = load_hubs_registry()
|
||||
hubs = registry.get("hubs", [])
|
||||
|
||||
if not any(h.get("name") == name for h in hubs):
|
||||
return f"Hub '{name}' is not linked"
|
||||
|
||||
hubs = [h for h in hubs if h.get("name") != name]
|
||||
registry["hubs"] = hubs
|
||||
save_hubs_registry(registry)
|
||||
|
||||
try:
|
||||
removed = _remove_hub_servers_from_config(name)
|
||||
except Exception:
|
||||
removed = 0
|
||||
|
||||
return f"Unlinked '{name}' — {removed} server(s) removed"
|
||||
|
||||
|
||||
def clone_hub(
|
||||
git_url: str,
|
||||
dest: Path | None = None,
|
||||
name: str | None = None,
|
||||
) -> tuple[bool, str, Path | None]:
|
||||
"""Clone a git hub repository.
|
||||
|
||||
If the destination already exists and is a git repo, pulls instead.
|
||||
|
||||
:param git_url: Git remote URL to clone.
|
||||
:param dest: Destination directory (auto-derived from URL if *None*).
|
||||
:param name: Hub name (auto-derived from URL if *None*).
|
||||
:return: Tuple of ``(success, message, clone_path)``.
|
||||
|
||||
"""
|
||||
if name is None:
|
||||
name = git_url.rstrip("/").split("/")[-1]
|
||||
name = name.removesuffix(".git")
|
||||
|
||||
if dest is None:
|
||||
dest = get_default_hubs_dir() / name
|
||||
|
||||
if dest.exists():
|
||||
if (dest / ".git").is_dir():
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["git", "-C", str(dest), "pull"],
|
||||
check=False, capture_output=True,
|
||||
text=True,
|
||||
timeout=120,
|
||||
)
|
||||
if result.returncode == 0:
|
||||
return True, f"Updated existing clone at {dest}", dest
|
||||
return False, f"Git pull failed: {result.stderr.strip()}", None
|
||||
except subprocess.TimeoutExpired:
|
||||
return False, "Git pull timed out", None
|
||||
except FileNotFoundError:
|
||||
return False, "Git not found", None
|
||||
return False, f"Directory already exists (not a git repo): {dest}", None
|
||||
|
||||
dest.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["git", "clone", git_url, str(dest)],
|
||||
check=False, capture_output=True,
|
||||
text=True,
|
||||
timeout=300,
|
||||
)
|
||||
if result.returncode == 0:
|
||||
return True, f"Cloned to {dest}", dest
|
||||
return False, f"Git clone failed: {result.stderr.strip()}", None
|
||||
except subprocess.TimeoutExpired:
|
||||
return False, "Git clone timed out (5 min limit)", None
|
||||
except FileNotFoundError:
|
||||
return False, "Git not found on PATH", None
|
||||
|
||||
|
||||
def _merge_servers_into_hub_config(
|
||||
hub_name: str,
|
||||
servers: list[dict[str, Any]],
|
||||
) -> int:
|
||||
"""Merge scanned servers into hub-config.json.
|
||||
|
||||
Only adds servers whose name does not already exist in the config.
|
||||
New entries are tagged with ``source_hub`` for later removal.
|
||||
|
||||
:param hub_name: Name of the source hub (used for tagging).
|
||||
:param servers: List of server dicts from :func:`scan_hub_for_servers`.
|
||||
:return: Number of newly added servers.
|
||||
|
||||
"""
|
||||
fuzzforge_root = find_fuzzforge_root()
|
||||
config_path = fuzzforge_root / "hub-config.json"
|
||||
|
||||
if config_path.exists():
|
||||
try:
|
||||
config = json.loads(config_path.read_text())
|
||||
except json.JSONDecodeError:
|
||||
config = {"servers": [], "default_timeout": 300, "cache_tools": True}
|
||||
else:
|
||||
config = {"servers": [], "default_timeout": 300, "cache_tools": True}
|
||||
|
||||
existing = config.get("servers", [])
|
||||
existing_names = {s.get("name") for s in existing}
|
||||
|
||||
added = 0
|
||||
for server in servers:
|
||||
if server["name"] not in existing_names:
|
||||
server["source_hub"] = hub_name
|
||||
existing.append(server)
|
||||
existing_names.add(server["name"])
|
||||
added += 1
|
||||
|
||||
config["servers"] = existing
|
||||
config_path.write_text(json.dumps(config, indent=2))
|
||||
return added
|
||||
|
||||
|
||||
def _remove_hub_servers_from_config(hub_name: str) -> int:
|
||||
"""Remove servers belonging to a hub from hub-config.json.
|
||||
|
||||
Only removes servers tagged with the given ``source_hub`` value.
|
||||
Manually-added servers (without a tag) are preserved.
|
||||
|
||||
:param hub_name: Name of the hub whose servers should be removed.
|
||||
:return: Number of servers removed.
|
||||
|
||||
"""
|
||||
fuzzforge_root = find_fuzzforge_root()
|
||||
config_path = fuzzforge_root / "hub-config.json"
|
||||
|
||||
if not config_path.exists():
|
||||
return 0
|
||||
|
||||
try:
|
||||
config = json.loads(config_path.read_text())
|
||||
except json.JSONDecodeError:
|
||||
return 0
|
||||
|
||||
existing = config.get("servers", [])
|
||||
before = len(existing)
|
||||
config["servers"] = [s for s in existing if s.get("source_hub") != hub_name]
|
||||
after = len(config["servers"])
|
||||
|
||||
config_path.write_text(json.dumps(config, indent=2))
|
||||
return before - after
|
||||
|
||||
|
||||
def find_dockerfile_for_server(server_name: str, hub_name: str) -> Path | None:
|
||||
"""Find the Dockerfile for a hub server tool.
|
||||
|
||||
Looks up the hub path from the registry, then scans for
|
||||
``category/<server_name>/Dockerfile``.
|
||||
|
||||
:param server_name: Tool name (e.g. ``"nmap-mcp"``).
|
||||
:param hub_name: Hub name as stored in the registry.
|
||||
:return: Absolute path to the Dockerfile, or ``None`` if not found.
|
||||
|
||||
"""
|
||||
registry = load_hubs_registry()
|
||||
hub_entry = next(
|
||||
(h for h in registry.get("hubs", []) if h.get("name") == hub_name),
|
||||
None,
|
||||
)
|
||||
if not hub_entry:
|
||||
return None
|
||||
|
||||
hub_path = Path(hub_entry["path"])
|
||||
for dockerfile in hub_path.rglob("Dockerfile"):
|
||||
rel = dockerfile.relative_to(hub_path)
|
||||
parts = rel.parts
|
||||
if len(parts) == 3 and parts[1] == server_name:
|
||||
return dockerfile
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def build_image(
|
||||
image: str,
|
||||
dockerfile: Path,
|
||||
*,
|
||||
engine: str | None = None,
|
||||
) -> subprocess.Popen[str]:
|
||||
"""Start a non-blocking ``docker/podman build`` subprocess.
|
||||
|
||||
Returns the running :class:`subprocess.Popen` object so the caller
|
||||
can stream ``stdout`` / ``stderr`` lines incrementally.
|
||||
|
||||
:param image: Image tag (e.g. ``"nmap-mcp:latest"``).
|
||||
:param dockerfile: Path to the ``Dockerfile``.
|
||||
:param engine: ``"docker"`` or ``"podman"`` (auto-detected if ``None``).
|
||||
:return: Running subprocess with merged stdout+stderr.
|
||||
|
||||
"""
|
||||
if engine is None:
|
||||
engine = os.environ.get("FUZZFORGE_ENGINE__TYPE", "docker").lower()
|
||||
engine = "podman" if engine == "podman" else "docker"
|
||||
|
||||
context_dir = str(dockerfile.parent)
|
||||
return subprocess.Popen(
|
||||
[engine, "build", "-t", image, context_dir],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
text=True,
|
||||
)
|
||||
@@ -1 +0,0 @@
|
||||
"""TUI screens for FuzzForge."""
|
||||
@@ -1,96 +0,0 @@
|
||||
"""Agent setup and unlink modal screens for FuzzForge TUI.
|
||||
|
||||
Provides context-aware modals that receive the target agent directly
|
||||
from the dashboard row selection — no redundant agent picker needed.
|
||||
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from textual.app import ComposeResult
|
||||
from textual.containers import Horizontal, Vertical
|
||||
from textual.screen import ModalScreen
|
||||
from textual.widgets import Button, Label, RadioButton, RadioSet
|
||||
|
||||
from fuzzforge_cli.commands.mcp import AIAgent
|
||||
from fuzzforge_cli.tui.helpers import install_agent_config, uninstall_agent_config
|
||||
|
||||
|
||||
class AgentSetupScreen(ModalScreen[str | None]):
|
||||
"""Modal for linking a specific agent — only asks for engine choice."""
|
||||
|
||||
BINDINGS = [("escape", "cancel", "Cancel")]
|
||||
|
||||
def __init__(self, agent: AIAgent, display_name: str) -> None:
|
||||
super().__init__()
|
||||
self._agent = agent
|
||||
self._display_name = display_name
|
||||
|
||||
def compose(self) -> ComposeResult:
|
||||
"""Compose the setup dialog layout."""
|
||||
with Vertical(id="setup-dialog"):
|
||||
yield Label(f"Setup {self._display_name}", classes="dialog-title")
|
||||
|
||||
yield Label("Container Engine:", classes="field-label")
|
||||
yield RadioSet(
|
||||
RadioButton("Docker", value=True),
|
||||
RadioButton("Podman"),
|
||||
id="engine-select",
|
||||
)
|
||||
|
||||
with Horizontal(classes="dialog-buttons"):
|
||||
yield Button("Install", variant="primary", id="btn-install")
|
||||
yield Button("Cancel", variant="default", id="btn-cancel")
|
||||
|
||||
def on_button_pressed(self, event: Button.Pressed) -> None:
|
||||
"""Handle button clicks."""
|
||||
if event.button.id == "btn-cancel":
|
||||
self.dismiss(None)
|
||||
elif event.button.id == "btn-install":
|
||||
self._do_install()
|
||||
|
||||
def action_cancel(self) -> None:
|
||||
"""Dismiss the dialog without action."""
|
||||
self.dismiss(None)
|
||||
|
||||
def _do_install(self) -> None:
|
||||
"""Execute the installation."""
|
||||
engine_set = self.query_one("#engine-select", RadioSet)
|
||||
engine = "docker" if engine_set.pressed_index <= 0 else "podman"
|
||||
result = install_agent_config(self._agent, engine, force=True)
|
||||
self.dismiss(result)
|
||||
|
||||
|
||||
class AgentUnlinkScreen(ModalScreen[str | None]):
|
||||
"""Confirmation modal for unlinking a specific agent."""
|
||||
|
||||
BINDINGS = [("escape", "cancel", "Cancel")]
|
||||
|
||||
def __init__(self, agent: AIAgent, display_name: str) -> None:
|
||||
super().__init__()
|
||||
self._agent = agent
|
||||
self._display_name = display_name
|
||||
|
||||
def compose(self) -> ComposeResult:
|
||||
"""Compose the unlink confirmation layout."""
|
||||
with Vertical(id="unlink-dialog"):
|
||||
yield Label(f"Unlink {self._display_name}?", classes="dialog-title")
|
||||
yield Label(
|
||||
f"This will remove the FuzzForge MCP configuration from {self._display_name}.",
|
||||
)
|
||||
|
||||
with Horizontal(classes="dialog-buttons"):
|
||||
yield Button("Unlink", variant="warning", id="btn-unlink")
|
||||
yield Button("Cancel", variant="default", id="btn-cancel")
|
||||
|
||||
def on_button_pressed(self, event: Button.Pressed) -> None:
|
||||
"""Handle button clicks."""
|
||||
if event.button.id == "btn-cancel":
|
||||
self.dismiss(None)
|
||||
elif event.button.id == "btn-unlink":
|
||||
result = uninstall_agent_config(self._agent)
|
||||
self.dismiss(result)
|
||||
|
||||
def action_cancel(self) -> None:
|
||||
"""Dismiss without action."""
|
||||
self.dismiss(None)
|
||||
@@ -1,58 +0,0 @@
|
||||
"""Build-image confirm dialog for FuzzForge TUI.
|
||||
|
||||
Simple modal that asks the user to confirm before starting a background
|
||||
build. The actual build is managed by the app so the user is never
|
||||
locked on this screen.
|
||||
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from textual.app import ComposeResult
|
||||
from textual.containers import Horizontal, Vertical
|
||||
from textual.screen import ModalScreen
|
||||
from textual.widgets import Button, Label
|
||||
|
||||
|
||||
class _NoFocusButton(Button):
|
||||
can_focus = False
|
||||
|
||||
|
||||
class BuildImageScreen(ModalScreen[bool]):
|
||||
"""Quick confirmation before starting a background Docker/Podman build."""
|
||||
|
||||
BINDINGS = [("escape", "cancel", "Cancel")]
|
||||
|
||||
def __init__(self, server_name: str, image: str, hub_name: str) -> None:
|
||||
super().__init__()
|
||||
self._server_name = server_name
|
||||
self._image = image
|
||||
self._hub_name = hub_name
|
||||
|
||||
def compose(self) -> ComposeResult:
|
||||
"""Build the confirmation dialog UI."""
|
||||
with Vertical(id="build-dialog"):
|
||||
yield Label(f"Build {self._image}", classes="dialog-title")
|
||||
yield Label(
|
||||
f"Hub: {self._hub_name} • Tool: {self._server_name}",
|
||||
id="build-subtitle",
|
||||
)
|
||||
yield Label(
|
||||
"The image will be built in the background.\n"
|
||||
"You'll receive a notification when it's done.",
|
||||
id="confirm-text",
|
||||
)
|
||||
with Horizontal(classes="dialog-buttons"):
|
||||
yield _NoFocusButton("Build", variant="primary", id="btn-build")
|
||||
yield _NoFocusButton("Cancel", variant="default", id="btn-cancel")
|
||||
|
||||
def on_button_pressed(self, event: Button.Pressed) -> None:
|
||||
"""Handle Build or Cancel button clicks."""
|
||||
if event.button.id == "btn-build":
|
||||
self.dismiss(result=True)
|
||||
elif event.button.id == "btn-cancel":
|
||||
self.dismiss(result=False)
|
||||
|
||||
def action_cancel(self) -> None:
|
||||
"""Dismiss the dialog when Escape is pressed."""
|
||||
self.dismiss(result=False)
|
||||
@@ -1,80 +0,0 @@
|
||||
"""Build-log viewer screen for FuzzForge TUI.
|
||||
|
||||
Shows live output of a background build started by the app. Polls the
|
||||
app's ``_build_logs`` buffer every 500 ms so the user can pop this screen
|
||||
open at any time while the build is running and see up-to-date output.
|
||||
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
from textual.app import ComposeResult
|
||||
from textual.containers import Horizontal, Vertical
|
||||
from textual.screen import ModalScreen
|
||||
from textual.widgets import Button, Label, Log
|
||||
|
||||
|
||||
class _NoFocusButton(Button):
|
||||
can_focus = False
|
||||
|
||||
|
||||
class BuildLogScreen(ModalScreen[None]):
|
||||
"""Live log viewer for a background build job managed by the app."""
|
||||
|
||||
BINDINGS = [("escape", "close", "Close")]
|
||||
|
||||
def __init__(self, image: str) -> None:
|
||||
super().__init__()
|
||||
self._image = image
|
||||
self._last_line: int = 0
|
||||
|
||||
def compose(self) -> ComposeResult:
|
||||
"""Build the log viewer UI."""
|
||||
with Vertical(id="build-dialog"):
|
||||
yield Label(f"Build log — {self._image}", classes="dialog-title")
|
||||
yield Label("", id="build-status")
|
||||
yield Log(id="build-log", auto_scroll=True)
|
||||
with Horizontal(classes="dialog-buttons"):
|
||||
yield _NoFocusButton("Close", variant="default", id="btn-close")
|
||||
|
||||
def on_mount(self) -> None:
|
||||
"""Initialize log polling when the screen is mounted."""
|
||||
self._flush_log()
|
||||
self.set_interval(0.5, self._poll_log)
|
||||
|
||||
def _flush_log(self) -> None:
|
||||
"""Write any new lines since the last flush."""
|
||||
logs: list[str] = getattr(self.app, "_build_logs", {}).get(self._image, [])
|
||||
log_widget = self.query_one("#build-log", Log)
|
||||
new_lines = logs[self._last_line :]
|
||||
for line in new_lines:
|
||||
log_widget.write_line(line)
|
||||
self._last_line += len(new_lines)
|
||||
|
||||
active: dict[str, Any] = getattr(self.app, "_active_builds", {})
|
||||
status = self.query_one("#build-status", Label)
|
||||
if self._image in active:
|
||||
status.update("[yellow]⏳ Building…[/yellow]")
|
||||
else:
|
||||
# Build is done — check if we have a result stored
|
||||
results: dict[str, Any] = getattr(self.app, "_build_results", {})
|
||||
if self._image in results:
|
||||
if results[self._image]:
|
||||
status.update(f"[green]✓ {self._image} built successfully[/green]")
|
||||
else:
|
||||
status.update(f"[red]✗ {self._image} build failed[/red]")
|
||||
|
||||
def _poll_log(self) -> None:
|
||||
"""Poll for new log lines periodically."""
|
||||
self._flush_log()
|
||||
|
||||
def on_button_pressed(self, event: Button.Pressed) -> None:
|
||||
"""Handle Close button click."""
|
||||
if event.button.id == "btn-close":
|
||||
self.dismiss(None)
|
||||
|
||||
def action_close(self) -> None:
|
||||
"""Dismiss the dialog when Escape is pressed."""
|
||||
self.dismiss(None)
|
||||
@@ -1,301 +0,0 @@
|
||||
"""Hub management screens for FuzzForge TUI.
|
||||
|
||||
Provides modal dialogs for managing linked MCP hub repositories:
|
||||
- HubManagerScreen: list, add, remove linked hubs
|
||||
- LinkHubScreen: link a local directory as a hub
|
||||
- CloneHubScreen: clone a git repo and link it (defaults to FuzzingLabs hub)
|
||||
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
|
||||
from rich.text import Text
|
||||
from textual import work
|
||||
from textual.app import ComposeResult
|
||||
from textual.containers import Horizontal, Vertical
|
||||
from textual.screen import ModalScreen
|
||||
from textual.widgets import Button, DataTable, Input, Label, Static
|
||||
|
||||
from fuzzforge_cli.tui.helpers import (
|
||||
FUZZFORGE_DEFAULT_HUB_NAME,
|
||||
FUZZFORGE_DEFAULT_HUB_URL,
|
||||
clone_hub,
|
||||
link_hub,
|
||||
load_hubs_registry,
|
||||
scan_hub_for_servers,
|
||||
unlink_hub,
|
||||
)
|
||||
|
||||
|
||||
class HubManagerScreen(ModalScreen[str | None]):
|
||||
"""Modal screen for managing linked MCP hubs."""
|
||||
|
||||
BINDINGS = [("escape", "cancel", "Close")]
|
||||
|
||||
def compose(self) -> ComposeResult:
|
||||
"""Compose the hub manager layout."""
|
||||
with Vertical(id="hub-manager-dialog"):
|
||||
yield Label("Hub Manager", classes="dialog-title")
|
||||
yield DataTable(id="hubs-table")
|
||||
yield Label("", id="hub-status")
|
||||
with Horizontal(classes="dialog-buttons"):
|
||||
yield Button(
|
||||
"FuzzingLabs Hub",
|
||||
variant="primary",
|
||||
id="btn-clone-default",
|
||||
)
|
||||
yield Button("Link Path", variant="default", id="btn-link")
|
||||
yield Button("Clone URL", variant="default", id="btn-clone")
|
||||
yield Button("Remove", variant="primary", id="btn-remove")
|
||||
yield Button("Close", variant="default", id="btn-close")
|
||||
|
||||
def on_mount(self) -> None:
|
||||
"""Populate the hubs table on startup."""
|
||||
self._refresh_hubs()
|
||||
|
||||
def _refresh_hubs(self) -> None:
|
||||
"""Refresh the linked hubs table."""
|
||||
table = self.query_one("#hubs-table", DataTable)
|
||||
table.clear(columns=True)
|
||||
table.add_columns("Name", "Path", "Servers", "Source")
|
||||
table.cursor_type = "row"
|
||||
|
||||
registry = load_hubs_registry()
|
||||
hubs = registry.get("hubs", [])
|
||||
|
||||
if not hubs:
|
||||
table.add_row(
|
||||
Text("No hubs linked", style="dim"),
|
||||
Text("Press 'FuzzingLabs Hub' to get started", style="dim"),
|
||||
"",
|
||||
"",
|
||||
)
|
||||
return
|
||||
|
||||
for hub in hubs:
|
||||
name = hub.get("name", "unknown")
|
||||
path = hub.get("path", "")
|
||||
git_url = hub.get("git_url", "")
|
||||
is_default = hub.get("is_default", False)
|
||||
|
||||
hub_path = Path(path)
|
||||
count: str | Text
|
||||
if hub_path.is_dir():
|
||||
servers = scan_hub_for_servers(hub_path)
|
||||
count = str(len(servers))
|
||||
else:
|
||||
count = Text("dir missing", style="yellow")
|
||||
|
||||
source = git_url or "local"
|
||||
name_cell: str | Text
|
||||
if is_default:
|
||||
name_cell = Text(f"★ {name}", style="bold")
|
||||
else:
|
||||
name_cell = name
|
||||
|
||||
table.add_row(name_cell, path, count, source)
|
||||
|
||||
def on_button_pressed(self, event: Button.Pressed) -> None:
|
||||
"""Route button actions."""
|
||||
if event.button.id == "btn-close":
|
||||
self.dismiss("refreshed")
|
||||
elif event.button.id == "btn-clone-default":
|
||||
self.app.push_screen(
|
||||
CloneHubScreen(
|
||||
FUZZFORGE_DEFAULT_HUB_URL,
|
||||
FUZZFORGE_DEFAULT_HUB_NAME,
|
||||
is_default=True,
|
||||
),
|
||||
callback=self._on_hub_action,
|
||||
)
|
||||
elif event.button.id == "btn-link":
|
||||
self.app.push_screen(
|
||||
LinkHubScreen(),
|
||||
callback=self._on_hub_action,
|
||||
)
|
||||
elif event.button.id == "btn-clone":
|
||||
self.app.push_screen(
|
||||
CloneHubScreen(),
|
||||
callback=self._on_hub_action,
|
||||
)
|
||||
elif event.button.id == "btn-remove":
|
||||
self._remove_selected()
|
||||
|
||||
def _on_hub_action(self, result: str | None) -> None:
|
||||
"""Handle result from a sub-screen."""
|
||||
if result:
|
||||
self.query_one("#hub-status", Label).update(result)
|
||||
self.app.notify(result)
|
||||
self._refresh_hubs()
|
||||
|
||||
def _remove_selected(self) -> None:
|
||||
"""Remove the currently selected hub."""
|
||||
table = self.query_one("#hubs-table", DataTable)
|
||||
registry = load_hubs_registry()
|
||||
hubs = registry.get("hubs", [])
|
||||
|
||||
if not hubs:
|
||||
self.app.notify("No hubs to remove", severity="warning")
|
||||
return
|
||||
|
||||
idx = table.cursor_row
|
||||
if idx is None or idx < 0 or idx >= len(hubs):
|
||||
self.app.notify("Select a hub to remove", severity="warning")
|
||||
return
|
||||
|
||||
name = hubs[idx].get("name", "")
|
||||
result = unlink_hub(name)
|
||||
self.query_one("#hub-status", Label).update(result)
|
||||
self._refresh_hubs()
|
||||
self.app.notify(result)
|
||||
|
||||
def action_cancel(self) -> None:
|
||||
"""Close the hub manager."""
|
||||
self.dismiss("refreshed")
|
||||
|
||||
|
||||
class LinkHubScreen(ModalScreen[str | None]):
|
||||
"""Modal for linking a local directory as an MCP hub."""
|
||||
|
||||
BINDINGS = [("escape", "cancel", "Cancel")]
|
||||
|
||||
def compose(self) -> ComposeResult:
|
||||
"""Compose the link dialog layout."""
|
||||
with Vertical(id="link-dialog"):
|
||||
yield Label("Link Local Hub", classes="dialog-title")
|
||||
|
||||
yield Label("Hub Name:", classes="field-label")
|
||||
yield Input(placeholder="my-hub", id="name-input")
|
||||
|
||||
yield Label("Directory Path:", classes="field-label")
|
||||
yield Input(placeholder="/path/to/hub-directory", id="path-input")
|
||||
|
||||
yield Label("", id="link-status")
|
||||
with Horizontal(classes="dialog-buttons"):
|
||||
yield Button("Link", variant="primary", id="btn-link")
|
||||
yield Button("Cancel", variant="default", id="btn-cancel")
|
||||
|
||||
def on_button_pressed(self, event: Button.Pressed) -> None:
|
||||
"""Handle button clicks."""
|
||||
if event.button.id == "btn-cancel":
|
||||
self.dismiss(None)
|
||||
elif event.button.id == "btn-link":
|
||||
self._do_link()
|
||||
|
||||
def _do_link(self) -> None:
|
||||
"""Execute the link operation."""
|
||||
name = self.query_one("#name-input", Input).value.strip()
|
||||
path = self.query_one("#path-input", Input).value.strip()
|
||||
|
||||
if not name:
|
||||
self.app.notify("Please enter a hub name", severity="warning")
|
||||
return
|
||||
if not path:
|
||||
self.app.notify("Please enter a directory path", severity="warning")
|
||||
return
|
||||
|
||||
result = link_hub(name, path)
|
||||
self.dismiss(result)
|
||||
|
||||
def action_cancel(self) -> None:
|
||||
"""Dismiss without action."""
|
||||
self.dismiss(None)
|
||||
|
||||
|
||||
class CloneHubScreen(ModalScreen[str | None]):
|
||||
"""Modal for cloning a git hub repository and linking it.
|
||||
|
||||
When instantiated with *is_default=True* and FuzzingLabs URL,
|
||||
provides a one-click setup for the standard security hub.
|
||||
|
||||
"""
|
||||
|
||||
BINDINGS = [("escape", "cancel", "Cancel")]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
default_url: str = "",
|
||||
default_name: str = "",
|
||||
is_default: bool = False,
|
||||
) -> None:
|
||||
super().__init__()
|
||||
self._default_url = default_url
|
||||
self._default_name = default_name
|
||||
self._is_default = is_default
|
||||
|
||||
def compose(self) -> ComposeResult:
|
||||
"""Compose the clone dialog layout."""
|
||||
title = "Clone FuzzingLabs Hub" if self._is_default else "Clone Git Hub"
|
||||
with Vertical(id="clone-dialog"):
|
||||
yield Label(title, classes="dialog-title")
|
||||
|
||||
yield Label("Git URL:", classes="field-label")
|
||||
yield Input(
|
||||
value=self._default_url,
|
||||
placeholder="git@github.com:org/repo.git",
|
||||
id="url-input",
|
||||
)
|
||||
|
||||
yield Label("Hub Name (optional):", classes="field-label")
|
||||
yield Input(
|
||||
value=self._default_name,
|
||||
placeholder="auto-detect from URL",
|
||||
id="name-input",
|
||||
)
|
||||
|
||||
yield Static("", id="clone-status")
|
||||
with Horizontal(classes="dialog-buttons"):
|
||||
yield Button(
|
||||
"Clone & Link",
|
||||
variant="primary",
|
||||
id="btn-clone",
|
||||
)
|
||||
yield Button("Cancel", variant="default", id="btn-cancel")
|
||||
|
||||
def on_button_pressed(self, event: Button.Pressed) -> None:
|
||||
"""Handle button clicks."""
|
||||
if event.button.id == "btn-cancel":
|
||||
self.dismiss(None)
|
||||
elif event.button.id == "btn-clone":
|
||||
self._start_clone()
|
||||
|
||||
def _start_clone(self) -> None:
|
||||
"""Validate input and start the async clone operation."""
|
||||
url = self.query_one("#url-input", Input).value.strip()
|
||||
if not url:
|
||||
self.app.notify("Please enter a git URL", severity="warning")
|
||||
return
|
||||
|
||||
self.query_one("#btn-clone", Button).disabled = True
|
||||
self.query_one("#clone-status", Static).update("⏳ Cloning repository...")
|
||||
self._do_clone(url)
|
||||
|
||||
@work(thread=True)
|
||||
def _do_clone(self, url: str) -> None:
|
||||
"""Clone the repo in a background thread."""
|
||||
name_input = self.query_one("#name-input", Input).value.strip()
|
||||
name = name_input or None
|
||||
|
||||
success, msg, path = clone_hub(url, name=name)
|
||||
if success and path:
|
||||
hub_name = name or path.name
|
||||
link_result = link_hub(
|
||||
hub_name,
|
||||
path,
|
||||
git_url=url,
|
||||
is_default=self._is_default,
|
||||
)
|
||||
self.app.call_from_thread(self.dismiss, f"✓ {link_result}")
|
||||
else:
|
||||
self.app.call_from_thread(self._on_clone_failed, msg)
|
||||
|
||||
def _on_clone_failed(self, msg: str) -> None:
|
||||
"""Handle a failed clone — re-enable the button and show the error."""
|
||||
self.query_one("#clone-status", Static).update(f"✗ {msg}")
|
||||
self.query_one("#btn-clone", Button).disabled = False
|
||||
|
||||
def action_cancel(self) -> None:
|
||||
"""Dismiss without action."""
|
||||
self.dismiss(None)
|
||||
@@ -6,6 +6,7 @@ authors = []
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.14"
|
||||
dependencies = [
|
||||
"fuzzforge-types==0.0.1",
|
||||
"podman==5.6.0",
|
||||
"pydantic==2.12.4",
|
||||
"structlog>=24.0.0",
|
||||
@@ -21,4 +22,5 @@ tests = [
|
||||
"pytest==9.0.2",
|
||||
]
|
||||
|
||||
|
||||
[tool.uv.sources]
|
||||
fuzzforge-types = { workspace = true }
|
||||
|
||||
@@ -18,32 +18,3 @@ ignore = [
|
||||
"PLR2004", # allowing comparisons using unamed numerical constants in tests
|
||||
"S101", # allowing 'assert' statements in tests
|
||||
]
|
||||
"src/**" = [
|
||||
"ANN201", # missing return type: legacy code
|
||||
"ARG002", # unused argument: callback pattern
|
||||
"ASYNC109", # async with timeout param: intentional pattern
|
||||
"BLE001", # blind exception: broad error handling needed
|
||||
"C901", # complexity: legacy code
|
||||
"EM102", # f-string in exception: existing pattern
|
||||
"F401", # unused import: re-export pattern
|
||||
"FBT001", # boolean positional arg
|
||||
"FBT002", # boolean default arg
|
||||
"FIX002", # TODO comments: documented tech debt
|
||||
"N806", # variable naming: intentional constants
|
||||
"PERF401", # list comprehension: readability over perf
|
||||
"PLW0603", # global statement: intentional for shared state
|
||||
"PTH111", # os.path usage: legacy code
|
||||
"RUF005", # collection literal: legacy style
|
||||
"S110", # try-except-pass: intentional suppression
|
||||
"S603", # subprocess: validated inputs
|
||||
"SIM108", # ternary: readability preference
|
||||
"TC001", # TYPE_CHECKING: causes circular imports
|
||||
"TC003", # TYPE_CHECKING: causes circular imports
|
||||
"TRY003", # message in exception: existing pattern
|
||||
"TRY300", # try-else: existing pattern
|
||||
"TRY400", # logging.error vs exception: existing pattern
|
||||
"UP017", # datetime.UTC: Python 3.11+ only
|
||||
"UP041", # TimeoutError alias: compatibility
|
||||
"UP043", # unnecessary type args: compatibility
|
||||
"W293", # blank line whitespace: formatting
|
||||
]
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
This package provides:
|
||||
- Sandbox engine abstractions (Podman, Docker)
|
||||
- Storage abstractions (S3) - requires 'storage' extra
|
||||
- Common exceptions
|
||||
|
||||
Example usage:
|
||||
@@ -11,6 +12,9 @@ Example usage:
|
||||
Podman,
|
||||
PodmanConfiguration,
|
||||
)
|
||||
|
||||
# For storage (requires boto3):
|
||||
from fuzzforge_common.storage import Storage
|
||||
"""
|
||||
|
||||
from fuzzforge_common.exceptions import FuzzForgeError
|
||||
@@ -25,6 +29,14 @@ from fuzzforge_common.sandboxes import (
|
||||
PodmanConfiguration,
|
||||
)
|
||||
|
||||
# Storage exceptions are always available (no boto3 required)
|
||||
from fuzzforge_common.storage.exceptions import (
|
||||
FuzzForgeStorageError,
|
||||
StorageConnectionError,
|
||||
StorageDownloadError,
|
||||
StorageUploadError,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"AbstractFuzzForgeEngineConfiguration",
|
||||
"AbstractFuzzForgeSandboxEngine",
|
||||
@@ -32,7 +44,11 @@ __all__ = [
|
||||
"DockerConfiguration",
|
||||
"FuzzForgeError",
|
||||
"FuzzForgeSandboxEngines",
|
||||
"FuzzForgeStorageError",
|
||||
"ImageInfo",
|
||||
"Podman",
|
||||
"PodmanConfiguration",
|
||||
"StorageConnectionError",
|
||||
"StorageDownloadError",
|
||||
"StorageUploadError",
|
||||
]
|
||||
|
||||
@@ -1,43 +0,0 @@
|
||||
"""FuzzForge Hub - Generic MCP server bridge.
|
||||
|
||||
This module provides a generic bridge to connect FuzzForge with any MCP server.
|
||||
It allows AI agents to discover and execute tools from external MCP servers
|
||||
(like mcp-security-hub) through the same interface as native FuzzForge modules.
|
||||
|
||||
The hub is server-agnostic: it doesn't hardcode any specific tools or servers.
|
||||
Instead, it dynamically discovers tools by connecting to configured MCP servers
|
||||
and calling their `list_tools()` method.
|
||||
|
||||
Supported transport types:
|
||||
- docker: Run MCP server as a Docker container with stdio transport
|
||||
- command: Run MCP server as a local process with stdio transport
|
||||
- sse: Connect to a remote MCP server via Server-Sent Events
|
||||
|
||||
"""
|
||||
|
||||
from fuzzforge_common.hub.client import HubClient, HubClientError, PersistentSession
|
||||
from fuzzforge_common.hub.executor import HubExecutionResult, HubExecutor
|
||||
from fuzzforge_common.hub.models import (
|
||||
HubConfig,
|
||||
HubServer,
|
||||
HubServerConfig,
|
||||
HubServerType,
|
||||
HubTool,
|
||||
HubToolParameter,
|
||||
)
|
||||
from fuzzforge_common.hub.registry import HubRegistry
|
||||
|
||||
__all__ = [
|
||||
"HubClient",
|
||||
"HubClientError",
|
||||
"HubConfig",
|
||||
"HubExecutionResult",
|
||||
"HubExecutor",
|
||||
"HubRegistry",
|
||||
"HubServer",
|
||||
"HubServerConfig",
|
||||
"HubServerType",
|
||||
"HubTool",
|
||||
"HubToolParameter",
|
||||
"PersistentSession",
|
||||
]
|
||||
@@ -1,753 +0,0 @@
|
||||
"""MCP client for communicating with hub servers.
|
||||
|
||||
This module provides a generic MCP client that can connect to any MCP server
|
||||
via stdio (docker/command) or SSE transport. It handles:
|
||||
- Starting containers/processes for stdio transport
|
||||
- Connecting to SSE endpoints
|
||||
- Discovering tools via list_tools()
|
||||
- Executing tools via call_tool()
|
||||
- Persistent container sessions for stateful interactions
|
||||
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
from contextlib import asynccontextmanager
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime, timezone
|
||||
from typing import TYPE_CHECKING, Any, cast
|
||||
|
||||
from fuzzforge_common.hub.models import (
|
||||
HubServer,
|
||||
HubServerConfig,
|
||||
HubServerType,
|
||||
HubTool,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from asyncio.subprocess import Process
|
||||
from collections.abc import AsyncGenerator
|
||||
|
||||
from structlog.stdlib import BoundLogger
|
||||
|
||||
|
||||
def get_logger() -> BoundLogger:
|
||||
"""Get structlog logger instance.
|
||||
|
||||
:returns: Configured structlog logger.
|
||||
|
||||
"""
|
||||
from structlog import get_logger # noqa: PLC0415
|
||||
|
||||
return cast("BoundLogger", get_logger())
|
||||
|
||||
|
||||
class HubClientError(Exception):
|
||||
"""Error in hub client operations."""
|
||||
|
||||
|
||||
@dataclass
|
||||
class PersistentSession:
|
||||
"""A persistent container session with an active MCP connection.
|
||||
|
||||
Keeps a Docker container running between tool calls to allow
|
||||
stateful interactions (e.g., radare2 analysis, long-running fuzzing).
|
||||
|
||||
"""
|
||||
|
||||
#: Server name this session belongs to.
|
||||
server_name: str
|
||||
|
||||
#: Docker container name.
|
||||
container_name: str
|
||||
|
||||
#: Underlying process (docker run).
|
||||
process: Process
|
||||
|
||||
#: Stream reader (process stdout).
|
||||
reader: asyncio.StreamReader
|
||||
|
||||
#: Stream writer (process stdin).
|
||||
writer: asyncio.StreamWriter
|
||||
|
||||
#: Whether the MCP session has been initialized.
|
||||
initialized: bool = False
|
||||
|
||||
#: Lock to serialise concurrent requests on the same session.
|
||||
lock: asyncio.Lock = field(default_factory=asyncio.Lock)
|
||||
|
||||
#: When the session was started.
|
||||
started_at: datetime = field(default_factory=lambda: datetime.now(tz=timezone.utc))
|
||||
|
||||
#: Monotonic counter for JSON-RPC request IDs.
|
||||
request_id: int = 0
|
||||
|
||||
@property
|
||||
def alive(self) -> bool:
|
||||
"""Check if the underlying process is still running."""
|
||||
return self.process.returncode is None
|
||||
|
||||
|
||||
class HubClient:
|
||||
"""Client for communicating with MCP hub servers.
|
||||
|
||||
Supports stdio (via docker/command) and SSE transports.
|
||||
Uses the MCP protocol for tool discovery and execution.
|
||||
|
||||
"""
|
||||
|
||||
#: Default timeout for operations.
|
||||
DEFAULT_TIMEOUT: int = 30
|
||||
|
||||
def __init__(self, timeout: int = DEFAULT_TIMEOUT) -> None:
|
||||
"""Initialize the hub client.
|
||||
|
||||
:param timeout: Default timeout for operations in seconds.
|
||||
|
||||
"""
|
||||
self._timeout = timeout
|
||||
self._persistent_sessions: dict[str, PersistentSession] = {}
|
||||
self._request_id: int = 0
|
||||
|
||||
async def discover_tools(self, server: HubServer) -> list[HubTool]:
|
||||
"""Discover tools from a hub server.
|
||||
|
||||
Connects to the server, calls list_tools(), and returns
|
||||
parsed HubTool instances.
|
||||
|
||||
:param server: Hub server to discover tools from.
|
||||
:returns: List of discovered tools.
|
||||
:raises HubClientError: If discovery fails.
|
||||
|
||||
"""
|
||||
logger = get_logger()
|
||||
config = server.config
|
||||
|
||||
logger.info("Discovering tools", server=config.name, type=config.type.value)
|
||||
|
||||
try:
|
||||
async with self._connect(config) as (reader, writer):
|
||||
# Initialise MCP session (skip for persistent — already done)
|
||||
if not self._persistent_sessions.get(config.name):
|
||||
await self._initialize_session(reader, writer, config.name)
|
||||
|
||||
# List tools
|
||||
tools_data = await self._call_method(
|
||||
reader,
|
||||
writer,
|
||||
"tools/list",
|
||||
{},
|
||||
)
|
||||
|
||||
# Parse tools
|
||||
tools = []
|
||||
for tool_data in tools_data.get("tools", []):
|
||||
tool = HubTool.from_mcp_tool(
|
||||
server_name=config.name,
|
||||
name=tool_data["name"],
|
||||
description=tool_data.get("description"),
|
||||
input_schema=tool_data.get("inputSchema", {}),
|
||||
)
|
||||
tools.append(tool)
|
||||
|
||||
logger.info(
|
||||
"Discovered tools",
|
||||
server=config.name,
|
||||
count=len(tools),
|
||||
)
|
||||
return tools
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Tool discovery failed",
|
||||
server=config.name,
|
||||
error=str(e),
|
||||
)
|
||||
raise HubClientError(f"Discovery failed for {config.name}: {e}") from e
|
||||
|
||||
async def execute_tool(
|
||||
self,
|
||||
server: HubServer,
|
||||
tool_name: str,
|
||||
arguments: dict[str, Any],
|
||||
*,
|
||||
timeout: int | None = None,
|
||||
extra_volumes: list[str] | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Execute a tool on a hub server.
|
||||
|
||||
:param server: Hub server to execute on.
|
||||
:param tool_name: Name of the tool to execute.
|
||||
:param arguments: Tool arguments.
|
||||
:param timeout: Execution timeout (uses default if None).
|
||||
:param extra_volumes: Additional Docker volume mounts to inject.
|
||||
:returns: Tool execution result.
|
||||
:raises HubClientError: If execution fails.
|
||||
|
||||
"""
|
||||
logger = get_logger()
|
||||
config = server.config
|
||||
exec_timeout = timeout or config.timeout or self._timeout
|
||||
|
||||
logger.info(
|
||||
"Executing hub tool",
|
||||
server=config.name,
|
||||
tool=tool_name,
|
||||
timeout=exec_timeout,
|
||||
)
|
||||
|
||||
try:
|
||||
async with self._connect(config, extra_volumes=extra_volumes) as (reader, writer):
|
||||
# Initialise MCP session (skip for persistent — already done)
|
||||
if not self._persistent_sessions.get(config.name):
|
||||
await self._initialize_session(reader, writer, config.name)
|
||||
|
||||
# Call tool
|
||||
result = await asyncio.wait_for(
|
||||
self._call_method(
|
||||
reader,
|
||||
writer,
|
||||
"tools/call",
|
||||
{"name": tool_name, "arguments": arguments},
|
||||
),
|
||||
timeout=exec_timeout,
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Tool execution completed",
|
||||
server=config.name,
|
||||
tool=tool_name,
|
||||
)
|
||||
return result
|
||||
|
||||
except asyncio.TimeoutError as e:
|
||||
logger.error(
|
||||
"Tool execution timed out",
|
||||
server=config.name,
|
||||
tool=tool_name,
|
||||
timeout=exec_timeout,
|
||||
)
|
||||
raise HubClientError(
|
||||
f"Execution timed out for {config.name}:{tool_name}"
|
||||
) from e
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Tool execution failed",
|
||||
server=config.name,
|
||||
tool=tool_name,
|
||||
error=str(e),
|
||||
)
|
||||
raise HubClientError(
|
||||
f"Execution failed for {config.name}:{tool_name}: {e}"
|
||||
) from e
|
||||
|
||||
@asynccontextmanager
|
||||
async def _connect(
|
||||
self,
|
||||
config: HubServerConfig,
|
||||
extra_volumes: list[str] | None = None,
|
||||
) -> AsyncGenerator[tuple[asyncio.StreamReader, asyncio.StreamWriter], None]:
|
||||
"""Connect to an MCP server.
|
||||
|
||||
If a persistent session exists for this server, reuse it (with a lock
|
||||
to serialise concurrent requests). Otherwise, fall through to the
|
||||
ephemeral per-call connection logic.
|
||||
|
||||
:param config: Server configuration.
|
||||
:param extra_volumes: Additional Docker volume mounts to inject.
|
||||
:yields: Tuple of (reader, writer) for communication.
|
||||
|
||||
"""
|
||||
# Check for active persistent session
|
||||
session = self._persistent_sessions.get(config.name)
|
||||
if session and session.initialized and session.alive:
|
||||
async with session.lock:
|
||||
yield session.reader, session.writer # type: ignore[misc]
|
||||
return
|
||||
|
||||
# Ephemeral connection (original behaviour)
|
||||
if config.type == HubServerType.DOCKER:
|
||||
async with self._connect_docker(config, extra_volumes=extra_volumes) as streams:
|
||||
yield streams
|
||||
elif config.type == HubServerType.COMMAND:
|
||||
async with self._connect_command(config) as streams:
|
||||
yield streams
|
||||
elif config.type == HubServerType.SSE:
|
||||
async with self._connect_sse(config) as streams:
|
||||
yield streams
|
||||
else:
|
||||
msg = f"Unsupported server type: {config.type}"
|
||||
raise HubClientError(msg)
|
||||
|
||||
@asynccontextmanager
|
||||
async def _connect_docker(
|
||||
self,
|
||||
config: HubServerConfig,
|
||||
extra_volumes: list[str] | None = None,
|
||||
) -> AsyncGenerator[tuple[asyncio.StreamReader, asyncio.StreamWriter], None]:
|
||||
"""Connect to a Docker-based MCP server.
|
||||
|
||||
:param config: Server configuration with image name.
|
||||
:param extra_volumes: Additional volume mounts to inject (e.g. project assets).
|
||||
:yields: Tuple of (reader, writer) for stdio communication.
|
||||
|
||||
"""
|
||||
if not config.image:
|
||||
msg = f"Docker image not specified for server '{config.name}'"
|
||||
raise HubClientError(msg)
|
||||
|
||||
# Build docker command
|
||||
cmd = ["docker", "run", "-i", "--rm"]
|
||||
|
||||
# Add capabilities
|
||||
for cap in config.capabilities:
|
||||
cmd.extend(["--cap-add", cap])
|
||||
|
||||
# Add volumes from server config
|
||||
for volume in config.volumes:
|
||||
cmd.extend(["-v", os.path.expanduser(volume)])
|
||||
|
||||
# Add extra volumes (e.g. project assets injected at runtime)
|
||||
for volume in (extra_volumes or []):
|
||||
cmd.extend(["-v", os.path.expanduser(volume)])
|
||||
|
||||
# Add environment variables
|
||||
for key, value in config.environment.items():
|
||||
cmd.extend(["-e", f"{key}={value}"])
|
||||
|
||||
cmd.append(config.image)
|
||||
|
||||
# Use 4 MB buffer to handle large tool responses (YARA rulesets, trivy output, etc.)
|
||||
_STREAM_LIMIT = 4 * 1024 * 1024
|
||||
|
||||
process: Process = await asyncio.create_subprocess_exec(
|
||||
*cmd,
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
limit=_STREAM_LIMIT,
|
||||
)
|
||||
|
||||
try:
|
||||
if process.stdin is None or process.stdout is None:
|
||||
msg = "Failed to get process streams"
|
||||
raise HubClientError(msg)
|
||||
|
||||
# Create asyncio streams from process pipes
|
||||
reader = process.stdout
|
||||
writer = process.stdin
|
||||
|
||||
yield reader, writer # type: ignore[misc]
|
||||
|
||||
finally:
|
||||
process.terminate()
|
||||
try:
|
||||
await asyncio.wait_for(process.wait(), timeout=5)
|
||||
except asyncio.TimeoutError:
|
||||
process.kill()
|
||||
|
||||
@asynccontextmanager
|
||||
async def _connect_command(
|
||||
self,
|
||||
config: HubServerConfig,
|
||||
) -> AsyncGenerator[tuple[asyncio.StreamReader, asyncio.StreamWriter], None]:
|
||||
"""Connect to a command-based MCP server.
|
||||
|
||||
:param config: Server configuration with command.
|
||||
:yields: Tuple of (reader, writer) for stdio communication.
|
||||
|
||||
"""
|
||||
if not config.command:
|
||||
msg = f"Command not specified for server '{config.name}'"
|
||||
raise HubClientError(msg)
|
||||
|
||||
# Set up environment
|
||||
env = dict(config.environment) if config.environment else None
|
||||
|
||||
# Use 4 MB buffer to handle large tool responses
|
||||
_STREAM_LIMIT = 4 * 1024 * 1024
|
||||
|
||||
process: Process = await asyncio.create_subprocess_exec(
|
||||
*config.command,
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
env=env,
|
||||
limit=_STREAM_LIMIT,
|
||||
)
|
||||
|
||||
try:
|
||||
if process.stdin is None or process.stdout is None:
|
||||
msg = "Failed to get process streams"
|
||||
raise HubClientError(msg)
|
||||
|
||||
reader = process.stdout
|
||||
writer = process.stdin
|
||||
|
||||
yield reader, writer # type: ignore[misc]
|
||||
|
||||
finally:
|
||||
process.terminate()
|
||||
try:
|
||||
await asyncio.wait_for(process.wait(), timeout=5)
|
||||
except asyncio.TimeoutError:
|
||||
process.kill()
|
||||
|
||||
@asynccontextmanager
|
||||
async def _connect_sse(
|
||||
self,
|
||||
config: HubServerConfig,
|
||||
) -> AsyncGenerator[tuple[asyncio.StreamReader, asyncio.StreamWriter], None]:
|
||||
"""Connect to an SSE-based MCP server.
|
||||
|
||||
:param config: Server configuration with URL.
|
||||
:yields: Tuple of (reader, writer) for SSE communication.
|
||||
|
||||
"""
|
||||
# SSE support requires additional dependencies
|
||||
# For now, raise not implemented
|
||||
msg = "SSE transport not yet implemented"
|
||||
raise NotImplementedError(msg)
|
||||
|
||||
async def _initialize_session(
|
||||
self,
|
||||
reader: asyncio.StreamReader,
|
||||
writer: asyncio.StreamWriter,
|
||||
server_name: str,
|
||||
) -> dict[str, Any]:
|
||||
"""Initialize MCP session with the server.
|
||||
|
||||
:param reader: Stream reader.
|
||||
:param writer: Stream writer.
|
||||
:param server_name: Server name for logging.
|
||||
:returns: Server capabilities.
|
||||
|
||||
"""
|
||||
# Send initialize request
|
||||
result = await self._call_method(
|
||||
reader,
|
||||
writer,
|
||||
"initialize",
|
||||
{
|
||||
"protocolVersion": "2024-11-05",
|
||||
"capabilities": {},
|
||||
"clientInfo": {
|
||||
"name": "fuzzforge-hub",
|
||||
"version": "0.1.0",
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
# Send initialized notification
|
||||
await self._send_notification(reader, writer, "notifications/initialized", {})
|
||||
|
||||
return result
|
||||
|
||||
async def _call_method(
|
||||
self,
|
||||
reader: asyncio.StreamReader,
|
||||
writer: asyncio.StreamWriter,
|
||||
method: str,
|
||||
params: dict[str, Any],
|
||||
) -> dict[str, Any]:
|
||||
"""Call an MCP method.
|
||||
|
||||
:param reader: Stream reader.
|
||||
:param writer: Stream writer.
|
||||
:param method: Method name.
|
||||
:param params: Method parameters.
|
||||
:returns: Method result.
|
||||
|
||||
"""
|
||||
# Create JSON-RPC request with unique ID
|
||||
self._request_id += 1
|
||||
request = {
|
||||
"jsonrpc": "2.0",
|
||||
"id": self._request_id,
|
||||
"method": method,
|
||||
"params": params,
|
||||
}
|
||||
|
||||
# Send request
|
||||
request_line = json.dumps(request) + "\n"
|
||||
writer.write(request_line.encode())
|
||||
await writer.drain()
|
||||
|
||||
# Read response
|
||||
response_line = await asyncio.wait_for(
|
||||
reader.readline(),
|
||||
timeout=self._timeout,
|
||||
)
|
||||
|
||||
if not response_line:
|
||||
msg = "Empty response from server"
|
||||
raise HubClientError(msg)
|
||||
|
||||
response = json.loads(response_line.decode())
|
||||
|
||||
if "error" in response:
|
||||
error = response["error"]
|
||||
msg = f"MCP error: {error.get('message', 'Unknown error')}"
|
||||
raise HubClientError(msg)
|
||||
|
||||
result = response.get("result", {})
|
||||
|
||||
# Check for tool-level errors in content items
|
||||
for item in result.get("content", []):
|
||||
if item.get("isError", False):
|
||||
error_text = item.get("text", "unknown error")
|
||||
msg = f"Tool returned error: {error_text}"
|
||||
raise HubClientError(msg)
|
||||
|
||||
return result
|
||||
|
||||
async def _send_notification(
|
||||
self,
|
||||
reader: asyncio.StreamReader,
|
||||
writer: asyncio.StreamWriter,
|
||||
method: str,
|
||||
params: dict[str, Any],
|
||||
) -> None:
|
||||
"""Send an MCP notification (no response expected).
|
||||
|
||||
:param reader: Stream reader (unused but kept for consistency).
|
||||
:param writer: Stream writer.
|
||||
:param method: Notification method name.
|
||||
:param params: Notification parameters.
|
||||
|
||||
"""
|
||||
# Create JSON-RPC notification (no id)
|
||||
notification = {
|
||||
"jsonrpc": "2.0",
|
||||
"method": method,
|
||||
"params": params,
|
||||
}
|
||||
|
||||
notification_line = json.dumps(notification) + "\n"
|
||||
writer.write(notification_line.encode())
|
||||
await writer.drain()
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Persistent session management
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
async def start_persistent_session(
|
||||
self,
|
||||
config: HubServerConfig,
|
||||
extra_volumes: list[str] | None = None,
|
||||
) -> PersistentSession:
|
||||
"""Start a persistent Docker container and initialise MCP session.
|
||||
|
||||
The container stays running until :meth:`stop_persistent_session` is
|
||||
called, allowing multiple tool calls on the same session.
|
||||
|
||||
:param config: Server configuration (must be Docker type).
|
||||
:param extra_volumes: Additional host:container volume mounts to inject.
|
||||
:returns: The created persistent session.
|
||||
:raises HubClientError: If the container cannot be started.
|
||||
|
||||
"""
|
||||
logger = get_logger()
|
||||
|
||||
if config.name in self._persistent_sessions:
|
||||
session = self._persistent_sessions[config.name]
|
||||
if session.alive:
|
||||
logger.info("Persistent session already running", server=config.name)
|
||||
return session
|
||||
# Dead session — clean up and restart
|
||||
await self._cleanup_session(config.name)
|
||||
|
||||
if config.type != HubServerType.DOCKER:
|
||||
msg = f"Persistent mode only supports Docker servers (got {config.type.value})"
|
||||
raise HubClientError(msg)
|
||||
|
||||
if not config.image:
|
||||
msg = f"Docker image not specified for server '{config.name}'"
|
||||
raise HubClientError(msg)
|
||||
|
||||
container_name = f"fuzzforge-{config.name}"
|
||||
|
||||
# Remove stale container with same name if it exists
|
||||
try:
|
||||
rm_proc = await asyncio.create_subprocess_exec(
|
||||
"docker", "rm", "-f", container_name,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
)
|
||||
await rm_proc.wait()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Build docker run command (no --rm, with --name)
|
||||
cmd = ["docker", "run", "-i", "--name", container_name]
|
||||
|
||||
for cap in config.capabilities:
|
||||
cmd.extend(["--cap-add", cap])
|
||||
|
||||
for volume in config.volumes:
|
||||
cmd.extend(["-v", os.path.expanduser(volume)])
|
||||
|
||||
for extra_vol in (extra_volumes or []):
|
||||
cmd.extend(["-v", extra_vol])
|
||||
|
||||
for key, value in config.environment.items():
|
||||
cmd.extend(["-e", f"{key}={value}"])
|
||||
|
||||
cmd.append(config.image)
|
||||
|
||||
_STREAM_LIMIT = 4 * 1024 * 1024
|
||||
|
||||
logger.info(
|
||||
"Starting persistent container",
|
||||
server=config.name,
|
||||
container=container_name,
|
||||
image=config.image,
|
||||
)
|
||||
|
||||
process: Process = await asyncio.create_subprocess_exec(
|
||||
*cmd,
|
||||
stdin=subprocess.PIPE,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
limit=_STREAM_LIMIT,
|
||||
)
|
||||
|
||||
if process.stdin is None or process.stdout is None:
|
||||
process.terminate()
|
||||
msg = "Failed to get process streams"
|
||||
raise HubClientError(msg)
|
||||
|
||||
session = PersistentSession(
|
||||
server_name=config.name,
|
||||
container_name=container_name,
|
||||
process=process,
|
||||
reader=process.stdout,
|
||||
writer=process.stdin,
|
||||
)
|
||||
|
||||
# Initialise MCP session
|
||||
try:
|
||||
await self._initialize_session(
|
||||
session.reader, # type: ignore[arg-type]
|
||||
session.writer, # type: ignore[arg-type]
|
||||
config.name,
|
||||
)
|
||||
session.initialized = True
|
||||
except Exception as e:
|
||||
process.terminate()
|
||||
try:
|
||||
await asyncio.wait_for(process.wait(), timeout=5)
|
||||
except asyncio.TimeoutError:
|
||||
process.kill()
|
||||
msg = f"Failed to initialise MCP session for {config.name}: {e}"
|
||||
raise HubClientError(msg) from e
|
||||
|
||||
self._persistent_sessions[config.name] = session
|
||||
|
||||
logger.info(
|
||||
"Persistent session started",
|
||||
server=config.name,
|
||||
container=container_name,
|
||||
)
|
||||
return session
|
||||
|
||||
async def stop_persistent_session(self, server_name: str) -> bool:
|
||||
"""Stop a persistent container session.
|
||||
|
||||
:param server_name: Name of the server whose session to stop.
|
||||
:returns: True if a session was stopped, False if none found.
|
||||
|
||||
"""
|
||||
return await self._cleanup_session(server_name)
|
||||
|
||||
def get_persistent_session(self, server_name: str) -> PersistentSession | None:
|
||||
"""Get a persistent session by server name.
|
||||
|
||||
:param server_name: Server name.
|
||||
:returns: The session if running, None otherwise.
|
||||
|
||||
"""
|
||||
session = self._persistent_sessions.get(server_name)
|
||||
if session and not session.alive:
|
||||
# Mark dead session — don't remove here to avoid async issues
|
||||
return None
|
||||
return session
|
||||
|
||||
def list_persistent_sessions(self) -> list[dict[str, Any]]:
|
||||
"""List all persistent sessions with their status.
|
||||
|
||||
:returns: List of session info dictionaries.
|
||||
|
||||
"""
|
||||
sessions = []
|
||||
for name, session in self._persistent_sessions.items():
|
||||
sessions.append({
|
||||
"server_name": name,
|
||||
"container_name": session.container_name,
|
||||
"alive": session.alive,
|
||||
"initialized": session.initialized,
|
||||
"started_at": session.started_at.isoformat(),
|
||||
"uptime_seconds": int(
|
||||
(datetime.now(tz=timezone.utc) - session.started_at).total_seconds()
|
||||
),
|
||||
})
|
||||
return sessions
|
||||
|
||||
async def stop_all_persistent_sessions(self) -> int:
|
||||
"""Stop all persistent sessions.
|
||||
|
||||
:returns: Number of sessions stopped.
|
||||
|
||||
"""
|
||||
names = list(self._persistent_sessions.keys())
|
||||
count = 0
|
||||
for name in names:
|
||||
if await self._cleanup_session(name):
|
||||
count += 1
|
||||
return count
|
||||
|
||||
async def _cleanup_session(self, server_name: str) -> bool:
|
||||
"""Clean up a persistent session (terminate process, remove container).
|
||||
|
||||
:param server_name: Server name.
|
||||
:returns: True if cleaned up, False if not found.
|
||||
|
||||
"""
|
||||
logger = get_logger()
|
||||
session = self._persistent_sessions.pop(server_name, None)
|
||||
if session is None:
|
||||
return False
|
||||
|
||||
logger.info("Stopping persistent session", server=server_name)
|
||||
|
||||
# Terminate process
|
||||
if session.alive:
|
||||
session.process.terminate()
|
||||
try:
|
||||
await asyncio.wait_for(session.process.wait(), timeout=10)
|
||||
except asyncio.TimeoutError:
|
||||
session.process.kill()
|
||||
await session.process.wait()
|
||||
|
||||
# Remove Docker container
|
||||
try:
|
||||
rm_proc = await asyncio.create_subprocess_exec(
|
||||
"docker", "rm", "-f", session.container_name,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
)
|
||||
await rm_proc.wait()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
logger.info(
|
||||
"Persistent session stopped",
|
||||
server=server_name,
|
||||
container=session.container_name,
|
||||
)
|
||||
return True
|
||||
@@ -1,627 +0,0 @@
|
||||
"""Hub executor for managing MCP server lifecycle and tool execution.
|
||||
|
||||
This module provides a high-level interface for:
|
||||
- Discovering tools from all registered hub servers
|
||||
- Executing tools with proper error handling
|
||||
- Managing the lifecycle of hub operations
|
||||
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Any, cast
|
||||
|
||||
from fuzzforge_common.hub.client import HubClient, HubClientError, PersistentSession
|
||||
from fuzzforge_common.hub.models import HubServer, HubServerConfig, HubTool
|
||||
from fuzzforge_common.hub.registry import HubRegistry
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from structlog.stdlib import BoundLogger
|
||||
|
||||
|
||||
def get_logger() -> BoundLogger:
|
||||
"""Get structlog logger instance.
|
||||
|
||||
:returns: Configured structlog logger.
|
||||
|
||||
"""
|
||||
from structlog import get_logger # noqa: PLC0415
|
||||
|
||||
return cast("BoundLogger", get_logger())
|
||||
|
||||
|
||||
class HubExecutionResult:
|
||||
"""Result of a hub tool execution."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
success: bool,
|
||||
server_name: str,
|
||||
tool_name: str,
|
||||
result: dict[str, Any] | None = None,
|
||||
error: str | None = None,
|
||||
) -> None:
|
||||
"""Initialize execution result.
|
||||
|
||||
:param success: Whether execution succeeded.
|
||||
:param server_name: Name of the hub server.
|
||||
:param tool_name: Name of the executed tool.
|
||||
:param result: Tool execution result data.
|
||||
:param error: Error message if execution failed.
|
||||
|
||||
"""
|
||||
self.success = success
|
||||
self.server_name = server_name
|
||||
self.tool_name = tool_name
|
||||
self.result = result or {}
|
||||
self.error = error
|
||||
|
||||
@property
|
||||
def identifier(self) -> str:
|
||||
"""Get full tool identifier."""
|
||||
return f"hub:{self.server_name}:{self.tool_name}"
|
||||
|
||||
def to_dict(self) -> dict[str, Any]:
|
||||
"""Convert to dictionary.
|
||||
|
||||
:returns: Dictionary representation.
|
||||
|
||||
"""
|
||||
return {
|
||||
"success": self.success,
|
||||
"identifier": self.identifier,
|
||||
"server": self.server_name,
|
||||
"tool": self.tool_name,
|
||||
"result": self.result,
|
||||
"error": self.error,
|
||||
}
|
||||
|
||||
|
||||
class HubExecutor:
|
||||
"""Executor for hub server operations.
|
||||
|
||||
Provides high-level methods for discovering and executing
|
||||
tools from hub servers.
|
||||
|
||||
"""
|
||||
|
||||
#: Hub registry instance.
|
||||
_registry: HubRegistry
|
||||
|
||||
#: MCP client instance.
|
||||
_client: HubClient
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
config_path: Path | None = None,
|
||||
timeout: int = 300,
|
||||
) -> None:
|
||||
"""Initialize the hub executor.
|
||||
|
||||
:param config_path: Path to hub-servers.json config file.
|
||||
:param timeout: Default timeout for tool execution.
|
||||
|
||||
"""
|
||||
self._registry = HubRegistry(config_path)
|
||||
self._client = HubClient(timeout=timeout)
|
||||
self._continuous_sessions: dict[str, dict[str, Any]] = {}
|
||||
|
||||
@property
|
||||
def registry(self) -> HubRegistry:
|
||||
"""Get the hub registry.
|
||||
|
||||
:returns: Hub registry instance.
|
||||
|
||||
"""
|
||||
return self._registry
|
||||
|
||||
def add_server(self, config: HubServerConfig) -> HubServer:
|
||||
"""Add a server to the registry.
|
||||
|
||||
:param config: Server configuration.
|
||||
:returns: Created HubServer instance.
|
||||
|
||||
"""
|
||||
return self._registry.add_server(config)
|
||||
|
||||
async def discover_all_tools(self) -> dict[str, list[HubTool]]:
|
||||
"""Discover tools from all enabled servers.
|
||||
|
||||
:returns: Dict mapping server names to lists of discovered tools.
|
||||
|
||||
"""
|
||||
logger = get_logger()
|
||||
results: dict[str, list[HubTool]] = {}
|
||||
|
||||
for server in self._registry.enabled_servers:
|
||||
try:
|
||||
tools = await self._client.discover_tools(server)
|
||||
self._registry.update_server_tools(server.name, tools)
|
||||
results[server.name] = tools
|
||||
|
||||
except HubClientError as e:
|
||||
logger.warning(
|
||||
"Failed to discover tools",
|
||||
server=server.name,
|
||||
error=str(e),
|
||||
)
|
||||
self._registry.update_server_tools(server.name, [], error=str(e))
|
||||
results[server.name] = []
|
||||
|
||||
return results
|
||||
|
||||
async def discover_server_tools(self, server_name: str) -> list[HubTool]:
|
||||
"""Discover tools from a specific server.
|
||||
|
||||
:param server_name: Name of the server.
|
||||
:returns: List of discovered tools.
|
||||
:raises ValueError: If server not found.
|
||||
|
||||
"""
|
||||
server = self._registry.get_server(server_name)
|
||||
if not server:
|
||||
msg = f"Server '{server_name}' not found"
|
||||
raise ValueError(msg)
|
||||
|
||||
try:
|
||||
tools = await self._client.discover_tools(server)
|
||||
self._registry.update_server_tools(server_name, tools)
|
||||
return tools
|
||||
|
||||
except HubClientError as e:
|
||||
self._registry.update_server_tools(server_name, [], error=str(e))
|
||||
raise
|
||||
|
||||
async def execute_tool(
|
||||
self,
|
||||
identifier: str,
|
||||
arguments: dict[str, Any] | None = None,
|
||||
*,
|
||||
timeout: int | None = None,
|
||||
extra_volumes: list[str] | None = None,
|
||||
) -> HubExecutionResult:
|
||||
"""Execute a hub tool.
|
||||
|
||||
:param identifier: Tool identifier (hub:server:tool or server:tool).
|
||||
:param arguments: Tool arguments.
|
||||
:param timeout: Execution timeout.
|
||||
:param extra_volumes: Additional Docker volume mounts to inject.
|
||||
:returns: Execution result.
|
||||
|
||||
"""
|
||||
logger = get_logger()
|
||||
arguments = arguments or {}
|
||||
|
||||
# Parse identifier and find tool
|
||||
server, tool = self._registry.find_tool(identifier)
|
||||
|
||||
if not server or not tool:
|
||||
# Try to parse as server:tool and discover
|
||||
parts = identifier.replace("hub:", "").split(":")
|
||||
if len(parts) == 2: # noqa: PLR2004
|
||||
server_name, tool_name = parts
|
||||
server = self._registry.get_server(server_name)
|
||||
|
||||
if server and not server.discovered:
|
||||
# Try to discover tools first
|
||||
try:
|
||||
await self.discover_server_tools(server_name)
|
||||
tool = server.get_tool(tool_name)
|
||||
except HubClientError:
|
||||
pass
|
||||
|
||||
if server and not tool:
|
||||
# Tool not found, but server exists - try to execute anyway
|
||||
# The server might have the tool even if discovery failed
|
||||
tool_name_to_use = tool_name
|
||||
else:
|
||||
tool_name_to_use = tool.name if tool else ""
|
||||
|
||||
if not server:
|
||||
return HubExecutionResult(
|
||||
success=False,
|
||||
server_name=server_name,
|
||||
tool_name=tool_name,
|
||||
error=f"Server '{server_name}' not found",
|
||||
)
|
||||
|
||||
# Execute even if tool wasn't discovered (server might still have it)
|
||||
try:
|
||||
result = await self._client.execute_tool(
|
||||
server,
|
||||
tool_name_to_use or tool_name,
|
||||
arguments,
|
||||
timeout=timeout,
|
||||
extra_volumes=extra_volumes,
|
||||
)
|
||||
return HubExecutionResult(
|
||||
success=True,
|
||||
server_name=server.name,
|
||||
tool_name=tool_name_to_use or tool_name,
|
||||
result=result,
|
||||
)
|
||||
except HubClientError as e:
|
||||
return HubExecutionResult(
|
||||
success=False,
|
||||
server_name=server.name,
|
||||
tool_name=tool_name_to_use or tool_name,
|
||||
error=str(e),
|
||||
)
|
||||
else:
|
||||
return HubExecutionResult(
|
||||
success=False,
|
||||
server_name="unknown",
|
||||
tool_name=identifier,
|
||||
error=f"Invalid tool identifier: {identifier}",
|
||||
)
|
||||
|
||||
# Execute the tool
|
||||
logger.info(
|
||||
"Executing hub tool",
|
||||
server=server.name,
|
||||
tool=tool.name,
|
||||
arguments=arguments,
|
||||
)
|
||||
|
||||
try:
|
||||
result = await self._client.execute_tool(
|
||||
server,
|
||||
tool.name,
|
||||
arguments,
|
||||
timeout=timeout,
|
||||
extra_volumes=extra_volumes,
|
||||
)
|
||||
return HubExecutionResult(
|
||||
success=True,
|
||||
server_name=server.name,
|
||||
tool_name=tool.name,
|
||||
result=result,
|
||||
)
|
||||
|
||||
except HubClientError as e:
|
||||
return HubExecutionResult(
|
||||
success=False,
|
||||
server_name=server.name,
|
||||
tool_name=tool.name,
|
||||
error=str(e),
|
||||
)
|
||||
|
||||
def list_servers(self) -> list[dict[str, Any]]:
|
||||
"""List all registered servers with their status.
|
||||
|
||||
:returns: List of server info dicts.
|
||||
|
||||
"""
|
||||
servers = []
|
||||
for server in self._registry.servers:
|
||||
session = self._client.get_persistent_session(server.name)
|
||||
servers.append({
|
||||
"name": server.name,
|
||||
"identifier": server.identifier,
|
||||
"type": server.config.type.value,
|
||||
"enabled": server.config.enabled,
|
||||
"category": server.config.category,
|
||||
"description": server.config.description,
|
||||
"persistent": server.config.persistent,
|
||||
"persistent_session_active": session is not None and session.alive,
|
||||
"discovered": server.discovered,
|
||||
"tool_count": len(server.tools),
|
||||
"error": server.discovery_error,
|
||||
})
|
||||
return servers
|
||||
|
||||
def list_tools(self) -> list[dict[str, Any]]:
|
||||
"""List all discovered tools.
|
||||
|
||||
:returns: List of tool info dicts.
|
||||
|
||||
"""
|
||||
tools = []
|
||||
for tool in self._registry.get_all_tools():
|
||||
tools.append({
|
||||
"identifier": tool.identifier,
|
||||
"name": tool.name,
|
||||
"server": tool.server_name,
|
||||
"description": tool.description,
|
||||
"parameters": [p.model_dump() for p in tool.parameters],
|
||||
})
|
||||
return tools
|
||||
|
||||
def get_tool_schema(self, identifier: str) -> dict[str, Any] | None:
|
||||
"""Get the JSON Schema for a tool's input.
|
||||
|
||||
:param identifier: Tool identifier.
|
||||
:returns: JSON Schema dict or None if not found.
|
||||
|
||||
"""
|
||||
_, tool = self._registry.find_tool(identifier)
|
||||
if tool:
|
||||
return tool.input_schema
|
||||
return None
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Persistent session management
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
async def start_persistent_server(self, server_name: str, extra_volumes: list[str] | None = None) -> dict[str, Any]:
|
||||
"""Start a persistent container session for a server.
|
||||
|
||||
The container stays running between tool calls, allowing stateful
|
||||
interactions (e.g., radare2 sessions, long-running fuzzing).
|
||||
|
||||
:param server_name: Name of the hub server to start.
|
||||
:param extra_volumes: Additional host:container volume mounts to inject.
|
||||
:returns: Session status dictionary.
|
||||
:raises ValueError: If server not found.
|
||||
|
||||
"""
|
||||
logger = get_logger()
|
||||
server = self._registry.get_server(server_name)
|
||||
if not server:
|
||||
msg = f"Server '{server_name}' not found"
|
||||
raise ValueError(msg)
|
||||
|
||||
session = await self._client.start_persistent_session(server.config, extra_volumes=extra_volumes)
|
||||
|
||||
# Auto-discover tools on the new session
|
||||
try:
|
||||
tools = await self._client.discover_tools(server)
|
||||
self._registry.update_server_tools(server_name, tools)
|
||||
except HubClientError as e:
|
||||
logger.warning(
|
||||
"Tool discovery failed on persistent session",
|
||||
server=server_name,
|
||||
error=str(e),
|
||||
)
|
||||
|
||||
# Include discovered tools in the result so agent knows what's available
|
||||
discovered_tools = []
|
||||
server_obj = self._registry.get_server(server_name)
|
||||
if server_obj:
|
||||
for tool in server_obj.tools:
|
||||
discovered_tools.append({
|
||||
"identifier": tool.identifier,
|
||||
"name": tool.name,
|
||||
"description": tool.description,
|
||||
})
|
||||
|
||||
return {
|
||||
"server_name": session.server_name,
|
||||
"container_name": session.container_name,
|
||||
"alive": session.alive,
|
||||
"initialized": session.initialized,
|
||||
"started_at": session.started_at.isoformat(),
|
||||
"tools": discovered_tools,
|
||||
"tool_count": len(discovered_tools),
|
||||
}
|
||||
|
||||
async def stop_persistent_server(self, server_name: str) -> bool:
|
||||
"""Stop a persistent container session.
|
||||
|
||||
:param server_name: Server name.
|
||||
:returns: True if a session was stopped.
|
||||
|
||||
"""
|
||||
return await self._client.stop_persistent_session(server_name)
|
||||
|
||||
def get_persistent_status(self, server_name: str) -> dict[str, Any] | None:
|
||||
"""Get status of a persistent session.
|
||||
|
||||
:param server_name: Server name.
|
||||
:returns: Status dict or None if no session.
|
||||
|
||||
"""
|
||||
session = self._client.get_persistent_session(server_name)
|
||||
if not session:
|
||||
return None
|
||||
|
||||
from datetime import datetime, timezone # noqa: PLC0415
|
||||
|
||||
return {
|
||||
"server_name": session.server_name,
|
||||
"container_name": session.container_name,
|
||||
"alive": session.alive,
|
||||
"initialized": session.initialized,
|
||||
"started_at": session.started_at.isoformat(),
|
||||
"uptime_seconds": int(
|
||||
(datetime.now(tz=timezone.utc) - session.started_at).total_seconds()
|
||||
),
|
||||
}
|
||||
|
||||
def list_persistent_sessions(self) -> list[dict[str, Any]]:
|
||||
"""List all persistent sessions.
|
||||
|
||||
:returns: List of session status dicts.
|
||||
|
||||
"""
|
||||
return self._client.list_persistent_sessions()
|
||||
|
||||
async def stop_all_persistent_servers(self) -> int:
|
||||
"""Stop all persistent sessions.
|
||||
|
||||
:returns: Number of sessions stopped.
|
||||
|
||||
"""
|
||||
return await self._client.stop_all_persistent_sessions()
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Continuous session management
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
async def start_continuous_tool(
|
||||
self,
|
||||
server_name: str,
|
||||
start_tool: str,
|
||||
arguments: dict[str, Any],
|
||||
) -> dict[str, Any]:
|
||||
"""Start a continuous hub tool session.
|
||||
|
||||
Ensures a persistent container is running, then calls the start tool
|
||||
(e.g., ``cargo_fuzz_start``) which returns a session_id. Tracks the
|
||||
session for subsequent status/stop calls.
|
||||
|
||||
:param server_name: Hub server name.
|
||||
:param start_tool: Name of the start tool on the server.
|
||||
:param arguments: Arguments for the start tool.
|
||||
:returns: Start result including session_id.
|
||||
:raises ValueError: If server not found.
|
||||
|
||||
"""
|
||||
logger = get_logger()
|
||||
|
||||
server = self._registry.get_server(server_name)
|
||||
if not server:
|
||||
msg = f"Server '{server_name}' not found"
|
||||
raise ValueError(msg)
|
||||
|
||||
# Ensure persistent session is running
|
||||
persistent = self._client.get_persistent_session(server_name)
|
||||
if not persistent or not persistent.alive:
|
||||
logger.info(
|
||||
"Auto-starting persistent session for continuous tool",
|
||||
server=server_name,
|
||||
)
|
||||
await self._client.start_persistent_session(server.config)
|
||||
# Discover tools on the new session
|
||||
try:
|
||||
tools = await self._client.discover_tools(server)
|
||||
self._registry.update_server_tools(server_name, tools)
|
||||
except HubClientError as e:
|
||||
logger.warning(
|
||||
"Tool discovery failed on persistent session",
|
||||
server=server_name,
|
||||
error=str(e),
|
||||
)
|
||||
|
||||
# Call the start tool
|
||||
result = await self._client.execute_tool(
|
||||
server, start_tool, arguments,
|
||||
)
|
||||
|
||||
# Extract session_id from result
|
||||
content_text = ""
|
||||
for item in result.get("content", []):
|
||||
if item.get("type") == "text":
|
||||
content_text = item.get("text", "")
|
||||
break
|
||||
|
||||
import json # noqa: PLC0415
|
||||
|
||||
try:
|
||||
start_result = json.loads(content_text) if content_text else result
|
||||
except json.JSONDecodeError:
|
||||
start_result = result
|
||||
|
||||
session_id = start_result.get("session_id", "")
|
||||
|
||||
if session_id:
|
||||
from datetime import datetime, timezone # noqa: PLC0415
|
||||
|
||||
self._continuous_sessions[session_id] = {
|
||||
"session_id": session_id,
|
||||
"server_name": server_name,
|
||||
"start_tool": start_tool,
|
||||
"status_tool": start_tool.replace("_start", "_status"),
|
||||
"stop_tool": start_tool.replace("_start", "_stop"),
|
||||
"started_at": datetime.now(tz=timezone.utc).isoformat(),
|
||||
"status": "running",
|
||||
}
|
||||
|
||||
return start_result
|
||||
|
||||
async def get_continuous_tool_status(
|
||||
self,
|
||||
session_id: str,
|
||||
) -> dict[str, Any]:
|
||||
"""Get status of a continuous hub tool session.
|
||||
|
||||
:param session_id: Session ID from start_continuous_tool.
|
||||
:returns: Status dict from the hub server's status tool.
|
||||
:raises ValueError: If session not found.
|
||||
|
||||
"""
|
||||
session_info = self._continuous_sessions.get(session_id)
|
||||
if not session_info:
|
||||
msg = f"Unknown continuous session: {session_id}"
|
||||
raise ValueError(msg)
|
||||
|
||||
server = self._registry.get_server(session_info["server_name"])
|
||||
if not server:
|
||||
msg = f"Server '{session_info['server_name']}' not found"
|
||||
raise ValueError(msg)
|
||||
|
||||
result = await self._client.execute_tool(
|
||||
server,
|
||||
session_info["status_tool"],
|
||||
{"session_id": session_id},
|
||||
)
|
||||
|
||||
# Parse the text content
|
||||
content_text = ""
|
||||
for item in result.get("content", []):
|
||||
if item.get("type") == "text":
|
||||
content_text = item.get("text", "")
|
||||
break
|
||||
|
||||
import json # noqa: PLC0415
|
||||
|
||||
try:
|
||||
return json.loads(content_text) if content_text else result
|
||||
except json.JSONDecodeError:
|
||||
return result
|
||||
|
||||
async def stop_continuous_tool(
|
||||
self,
|
||||
session_id: str,
|
||||
) -> dict[str, Any]:
|
||||
"""Stop a continuous hub tool session.
|
||||
|
||||
:param session_id: Session ID to stop.
|
||||
:returns: Final results from the hub server's stop tool.
|
||||
:raises ValueError: If session not found.
|
||||
|
||||
"""
|
||||
session_info = self._continuous_sessions.get(session_id)
|
||||
if not session_info:
|
||||
msg = f"Unknown continuous session: {session_id}"
|
||||
raise ValueError(msg)
|
||||
|
||||
server = self._registry.get_server(session_info["server_name"])
|
||||
if not server:
|
||||
msg = f"Server '{session_info['server_name']}' not found"
|
||||
raise ValueError(msg)
|
||||
|
||||
result = await self._client.execute_tool(
|
||||
server,
|
||||
session_info["stop_tool"],
|
||||
{"session_id": session_id},
|
||||
)
|
||||
|
||||
# Parse the text content
|
||||
content_text = ""
|
||||
for item in result.get("content", []):
|
||||
if item.get("type") == "text":
|
||||
content_text = item.get("text", "")
|
||||
break
|
||||
|
||||
import json # noqa: PLC0415
|
||||
|
||||
try:
|
||||
stop_result = json.loads(content_text) if content_text else result
|
||||
except json.JSONDecodeError:
|
||||
stop_result = result
|
||||
|
||||
# Update session tracking
|
||||
session_info["status"] = "stopped"
|
||||
|
||||
return stop_result
|
||||
|
||||
def list_continuous_sessions(self) -> list[dict[str, Any]]:
|
||||
"""List all tracked continuous sessions.
|
||||
|
||||
:returns: List of continuous session info dicts.
|
||||
|
||||
"""
|
||||
return list(self._continuous_sessions.values())
|
||||
@@ -1,296 +0,0 @@
|
||||
"""Data models for FuzzForge Hub.
|
||||
|
||||
This module defines the Pydantic models used to represent MCP servers
|
||||
and their tools in the hub registry.
|
||||
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from enum import Enum
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class HubServerType(str, Enum):
|
||||
"""Type of MCP server connection."""
|
||||
|
||||
#: Run as Docker container with stdio transport.
|
||||
DOCKER = "docker"
|
||||
#: Run as local command/process with stdio transport.
|
||||
COMMAND = "command"
|
||||
#: Connect via Server-Sent Events (HTTP).
|
||||
SSE = "sse"
|
||||
|
||||
|
||||
class HubServerConfig(BaseModel):
|
||||
"""Configuration for an MCP server in the hub.
|
||||
|
||||
This defines how to connect to an MCP server, not what tools it provides.
|
||||
Tools are discovered dynamically at runtime.
|
||||
|
||||
"""
|
||||
|
||||
#: Unique identifier for this server (e.g., "nmap", "nuclei").
|
||||
name: str = Field(description="Unique server identifier")
|
||||
|
||||
#: Human-readable description of the server.
|
||||
description: str | None = Field(
|
||||
default=None,
|
||||
description="Human-readable description",
|
||||
)
|
||||
|
||||
#: Type of connection to use.
|
||||
type: HubServerType = Field(description="Connection type")
|
||||
|
||||
#: Docker image name (for type=docker).
|
||||
image: str | None = Field(
|
||||
default=None,
|
||||
description="Docker image name (for docker type)",
|
||||
)
|
||||
|
||||
#: Command to run (for type=command).
|
||||
command: list[str] | None = Field(
|
||||
default=None,
|
||||
description="Command and args (for command type)",
|
||||
)
|
||||
|
||||
#: URL endpoint (for type=sse).
|
||||
url: str | None = Field(
|
||||
default=None,
|
||||
description="SSE endpoint URL (for sse type)",
|
||||
)
|
||||
|
||||
#: Environment variables to pass to the server.
|
||||
environment: dict[str, str] = Field(
|
||||
default_factory=dict,
|
||||
description="Environment variables",
|
||||
)
|
||||
|
||||
#: Docker capabilities to add (e.g., ["NET_RAW"] for nmap).
|
||||
capabilities: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="Docker capabilities to add",
|
||||
)
|
||||
|
||||
#: Volume mounts for Docker (e.g., ["/host/path:/container/path:ro"]).
|
||||
volumes: list[str] = Field(
|
||||
default_factory=list,
|
||||
description="Docker volume mounts",
|
||||
)
|
||||
|
||||
#: Whether this server is enabled.
|
||||
enabled: bool = Field(
|
||||
default=True,
|
||||
description="Whether server is enabled",
|
||||
)
|
||||
|
||||
#: Category for grouping (e.g., "reconnaissance", "web-security").
|
||||
category: str | None = Field(
|
||||
default=None,
|
||||
description="Category for grouping servers",
|
||||
)
|
||||
|
||||
#: Per-server timeout override in seconds (None = use default_timeout).
|
||||
timeout: int | None = Field(
|
||||
default=None,
|
||||
description="Per-server execution timeout override in seconds",
|
||||
)
|
||||
|
||||
#: Whether to use persistent container mode (keep container running between calls).
|
||||
persistent: bool = Field(
|
||||
default=False,
|
||||
description="Keep container running between tool calls for stateful interactions",
|
||||
)
|
||||
|
||||
|
||||
class HubToolParameter(BaseModel):
|
||||
"""A parameter for an MCP tool.
|
||||
|
||||
Parsed from the tool's JSON Schema inputSchema.
|
||||
|
||||
"""
|
||||
|
||||
#: Parameter name.
|
||||
name: str
|
||||
|
||||
#: Parameter type (string, integer, boolean, array, object).
|
||||
type: str
|
||||
|
||||
#: Human-readable description.
|
||||
description: str | None = None
|
||||
|
||||
#: Whether this parameter is required.
|
||||
required: bool = False
|
||||
|
||||
#: Default value if any.
|
||||
default: Any = None
|
||||
|
||||
#: Enum values if constrained.
|
||||
enum: list[Any] | None = None
|
||||
|
||||
|
||||
class HubTool(BaseModel):
|
||||
"""An MCP tool discovered from a hub server.
|
||||
|
||||
This is populated by calling `list_tools()` on the MCP server.
|
||||
|
||||
"""
|
||||
|
||||
#: Tool name as defined by the MCP server.
|
||||
name: str = Field(description="Tool name from MCP server")
|
||||
|
||||
#: Human-readable description.
|
||||
description: str | None = Field(
|
||||
default=None,
|
||||
description="Tool description",
|
||||
)
|
||||
|
||||
#: Name of the hub server this tool belongs to.
|
||||
server_name: str = Field(description="Parent server name")
|
||||
|
||||
#: Parsed parameters from inputSchema.
|
||||
parameters: list[HubToolParameter] = Field(
|
||||
default_factory=list,
|
||||
description="Tool parameters",
|
||||
)
|
||||
|
||||
#: Raw JSON Schema for the tool input.
|
||||
input_schema: dict[str, Any] = Field(
|
||||
default_factory=dict,
|
||||
description="Raw JSON Schema from MCP",
|
||||
)
|
||||
|
||||
@property
|
||||
def identifier(self) -> str:
|
||||
"""Get the full tool identifier (hub:server:tool)."""
|
||||
return f"hub:{self.server_name}:{self.name}"
|
||||
|
||||
@classmethod
|
||||
def from_mcp_tool(
|
||||
cls,
|
||||
server_name: str,
|
||||
name: str,
|
||||
description: str | None,
|
||||
input_schema: dict[str, Any],
|
||||
) -> HubTool:
|
||||
"""Create a HubTool from MCP tool metadata.
|
||||
|
||||
:param server_name: Name of the parent hub server.
|
||||
:param name: Tool name.
|
||||
:param description: Tool description.
|
||||
:param input_schema: JSON Schema for tool input.
|
||||
:returns: HubTool instance.
|
||||
|
||||
"""
|
||||
parameters = cls._parse_parameters(input_schema)
|
||||
return cls(
|
||||
name=name,
|
||||
description=description,
|
||||
server_name=server_name,
|
||||
parameters=parameters,
|
||||
input_schema=input_schema,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _parse_parameters(schema: dict[str, Any]) -> list[HubToolParameter]:
|
||||
"""Parse parameters from JSON Schema.
|
||||
|
||||
:param schema: JSON Schema dict.
|
||||
:returns: List of parsed parameters.
|
||||
|
||||
"""
|
||||
parameters: list[HubToolParameter] = []
|
||||
properties = schema.get("properties", {})
|
||||
required_params = set(schema.get("required", []))
|
||||
|
||||
for name, prop in properties.items():
|
||||
param = HubToolParameter(
|
||||
name=name,
|
||||
type=prop.get("type", "string"),
|
||||
description=prop.get("description"),
|
||||
required=name in required_params,
|
||||
default=prop.get("default"),
|
||||
enum=prop.get("enum"),
|
||||
)
|
||||
parameters.append(param)
|
||||
|
||||
return parameters
|
||||
|
||||
|
||||
class HubServer(BaseModel):
|
||||
"""A hub server with its discovered tools.
|
||||
|
||||
Combines configuration with dynamically discovered tools.
|
||||
|
||||
"""
|
||||
|
||||
#: Server configuration.
|
||||
config: HubServerConfig
|
||||
|
||||
#: Tools discovered from the server (populated at runtime).
|
||||
tools: list[HubTool] = Field(
|
||||
default_factory=list,
|
||||
description="Discovered tools",
|
||||
)
|
||||
|
||||
#: Whether tools have been discovered.
|
||||
discovered: bool = Field(
|
||||
default=False,
|
||||
description="Whether tools have been discovered",
|
||||
)
|
||||
|
||||
#: Error message if discovery failed.
|
||||
discovery_error: str | None = Field(
|
||||
default=None,
|
||||
description="Error message if discovery failed",
|
||||
)
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
"""Get server name."""
|
||||
return self.config.name
|
||||
|
||||
@property
|
||||
def identifier(self) -> str:
|
||||
"""Get server identifier for module listing."""
|
||||
return f"hub:{self.config.name}"
|
||||
|
||||
def get_tool(self, tool_name: str) -> HubTool | None:
|
||||
"""Get a tool by name.
|
||||
|
||||
:param tool_name: Name of the tool.
|
||||
:returns: HubTool if found, None otherwise.
|
||||
|
||||
"""
|
||||
for tool in self.tools:
|
||||
if tool.name == tool_name:
|
||||
return tool
|
||||
return None
|
||||
|
||||
|
||||
class HubConfig(BaseModel):
|
||||
"""Configuration for the entire hub.
|
||||
|
||||
Loaded from hub-servers.json or similar config file.
|
||||
|
||||
"""
|
||||
|
||||
#: List of configured servers.
|
||||
servers: list[HubServerConfig] = Field(
|
||||
default_factory=list,
|
||||
description="Configured MCP servers",
|
||||
)
|
||||
|
||||
#: Default timeout for tool execution (seconds).
|
||||
default_timeout: int = Field(
|
||||
default=300,
|
||||
description="Default execution timeout",
|
||||
)
|
||||
|
||||
#: Whether to cache discovered tools.
|
||||
cache_tools: bool = Field(
|
||||
default=True,
|
||||
description="Cache discovered tools",
|
||||
)
|
||||
@@ -1,258 +0,0 @@
|
||||
"""Hub registry for managing MCP server configurations.
|
||||
|
||||
The registry loads server configurations from a JSON file and provides
|
||||
methods to access and manage them. It does not hardcode any specific
|
||||
servers or tools - everything is configured by the user.
|
||||
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, cast
|
||||
|
||||
from fuzzforge_common.hub.models import (
|
||||
HubConfig,
|
||||
HubServer,
|
||||
HubServerConfig,
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from structlog.stdlib import BoundLogger
|
||||
|
||||
|
||||
def get_logger() -> BoundLogger:
|
||||
"""Get structlog logger instance.
|
||||
|
||||
:returns: Configured structlog logger.
|
||||
|
||||
"""
|
||||
from structlog import get_logger # noqa: PLC0415
|
||||
|
||||
return cast("BoundLogger", get_logger())
|
||||
|
||||
|
||||
class HubRegistry:
|
||||
"""Registry for MCP hub servers.
|
||||
|
||||
Manages the configuration and state of hub servers.
|
||||
Configurations are loaded from a JSON file.
|
||||
|
||||
"""
|
||||
|
||||
#: Loaded hub configuration.
|
||||
_config: HubConfig
|
||||
|
||||
#: Server instances with discovered tools.
|
||||
_servers: dict[str, HubServer]
|
||||
|
||||
#: Path to the configuration file.
|
||||
_config_path: Path | None
|
||||
|
||||
def __init__(self, config_path: Path | str | None = None) -> None:
|
||||
"""Initialize the hub registry.
|
||||
|
||||
:param config_path: Path to hub-servers.json config file.
|
||||
If None, starts with empty configuration.
|
||||
|
||||
"""
|
||||
if config_path is not None:
|
||||
self._config_path = Path(config_path)
|
||||
else:
|
||||
self._config_path = None
|
||||
self._servers = {}
|
||||
self._config = HubConfig()
|
||||
|
||||
if self._config_path and self._config_path.exists():
|
||||
self._load_config(self._config_path)
|
||||
|
||||
def _load_config(self, config_path: Path) -> None:
|
||||
"""Load configuration from JSON file.
|
||||
|
||||
:param config_path: Path to config file.
|
||||
|
||||
"""
|
||||
logger = get_logger()
|
||||
try:
|
||||
with config_path.open() as f:
|
||||
data = json.load(f)
|
||||
|
||||
self._config = HubConfig.model_validate(data)
|
||||
|
||||
# Create server instances from config
|
||||
for server_config in self._config.servers:
|
||||
if server_config.enabled:
|
||||
self._servers[server_config.name] = HubServer(
|
||||
config=server_config,
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Loaded hub configuration",
|
||||
path=str(config_path),
|
||||
servers=len(self._servers),
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(
|
||||
"Failed to load hub configuration",
|
||||
path=str(config_path),
|
||||
error=str(e),
|
||||
)
|
||||
raise
|
||||
|
||||
def reload(self) -> None:
|
||||
"""Reload configuration from file."""
|
||||
if self._config_path and self._config_path.exists():
|
||||
self._servers.clear()
|
||||
self._load_config(self._config_path)
|
||||
|
||||
@property
|
||||
def servers(self) -> list[HubServer]:
|
||||
"""Get all registered servers.
|
||||
|
||||
:returns: List of hub servers.
|
||||
|
||||
"""
|
||||
return list(self._servers.values())
|
||||
|
||||
@property
|
||||
def enabled_servers(self) -> list[HubServer]:
|
||||
"""Get all enabled servers.
|
||||
|
||||
:returns: List of enabled hub servers.
|
||||
|
||||
"""
|
||||
return [s for s in self._servers.values() if s.config.enabled]
|
||||
|
||||
def get_server(self, name: str) -> HubServer | None:
|
||||
"""Get a server by name.
|
||||
|
||||
:param name: Server name.
|
||||
:returns: HubServer if found, None otherwise.
|
||||
|
||||
"""
|
||||
return self._servers.get(name)
|
||||
|
||||
def add_server(self, config: HubServerConfig) -> HubServer:
|
||||
"""Add a server to the registry.
|
||||
|
||||
:param config: Server configuration.
|
||||
:returns: Created HubServer instance.
|
||||
:raises ValueError: If server with same name exists.
|
||||
|
||||
"""
|
||||
if config.name in self._servers:
|
||||
msg = f"Server '{config.name}' already exists"
|
||||
raise ValueError(msg)
|
||||
|
||||
server = HubServer(config=config)
|
||||
self._servers[config.name] = server
|
||||
self._config.servers.append(config)
|
||||
|
||||
get_logger().info("Added hub server", name=config.name, type=config.type)
|
||||
return server
|
||||
|
||||
def remove_server(self, name: str) -> bool:
|
||||
"""Remove a server from the registry.
|
||||
|
||||
:param name: Server name.
|
||||
:returns: True if removed, False if not found.
|
||||
|
||||
"""
|
||||
if name not in self._servers:
|
||||
return False
|
||||
|
||||
del self._servers[name]
|
||||
self._config.servers = [s for s in self._config.servers if s.name != name]
|
||||
|
||||
get_logger().info("Removed hub server", name=name)
|
||||
return True
|
||||
|
||||
def save_config(self, path: Path | None = None) -> None:
|
||||
"""Save current configuration to file.
|
||||
|
||||
:param path: Path to save to. Uses original path if None.
|
||||
|
||||
"""
|
||||
save_path = path or self._config_path
|
||||
if not save_path:
|
||||
msg = "No config path specified"
|
||||
raise ValueError(msg)
|
||||
|
||||
with save_path.open("w") as f:
|
||||
json.dump(
|
||||
self._config.model_dump(mode="json"),
|
||||
f,
|
||||
indent=2,
|
||||
)
|
||||
|
||||
get_logger().info("Saved hub configuration", path=str(save_path))
|
||||
|
||||
def update_server_tools(
|
||||
self,
|
||||
server_name: str,
|
||||
tools: list,
|
||||
*,
|
||||
error: str | None = None,
|
||||
) -> None:
|
||||
"""Update discovered tools for a server.
|
||||
|
||||
Called by the hub client after tool discovery.
|
||||
|
||||
:param server_name: Server name.
|
||||
:param tools: List of HubTool instances.
|
||||
:param error: Error message if discovery failed.
|
||||
|
||||
"""
|
||||
server = self._servers.get(server_name)
|
||||
if not server:
|
||||
return
|
||||
|
||||
if error:
|
||||
server.discovered = False
|
||||
server.discovery_error = error
|
||||
server.tools = []
|
||||
else:
|
||||
server.discovered = True
|
||||
server.discovery_error = None
|
||||
server.tools = tools
|
||||
|
||||
def get_all_tools(self) -> list:
|
||||
"""Get all discovered tools from all servers.
|
||||
|
||||
:returns: Flat list of all HubTool instances.
|
||||
|
||||
"""
|
||||
tools = []
|
||||
for server in self._servers.values():
|
||||
if server.discovered:
|
||||
tools.extend(server.tools)
|
||||
return tools
|
||||
|
||||
def find_tool(self, identifier: str):
|
||||
"""Find a tool by its full identifier.
|
||||
|
||||
:param identifier: Full identifier (hub:server:tool or server:tool).
|
||||
:returns: Tuple of (HubServer, HubTool) if found, (None, None) otherwise.
|
||||
|
||||
"""
|
||||
# Parse identifier
|
||||
parts = identifier.split(":")
|
||||
if len(parts) == 3 and parts[0] == "hub": # noqa: PLR2004
|
||||
# hub:server:tool format
|
||||
server_name = parts[1]
|
||||
tool_name = parts[2]
|
||||
elif len(parts) == 2: # noqa: PLR2004
|
||||
# server:tool format
|
||||
server_name = parts[0]
|
||||
tool_name = parts[1]
|
||||
else:
|
||||
return None, None
|
||||
|
||||
server = self._servers.get(server_name)
|
||||
if not server:
|
||||
return None, None
|
||||
|
||||
tool = server.get_tool(tool_name)
|
||||
return server, tool
|
||||
@@ -4,7 +4,7 @@ from typing import TYPE_CHECKING
|
||||
from pydantic import BaseModel
|
||||
|
||||
from fuzzforge_common.sandboxes.engines.enumeration import (
|
||||
FuzzForgeSandboxEngines,
|
||||
FuzzForgeSandboxEngines, # noqa: TC001 (required by 'pydantic' at runtime)
|
||||
)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
|
||||
@@ -25,9 +25,6 @@ class ImageInfo:
|
||||
#: Image size in bytes.
|
||||
size: int | None = None
|
||||
|
||||
#: Image labels/metadata.
|
||||
labels: dict[str, str] | None = None
|
||||
|
||||
|
||||
class AbstractFuzzForgeSandboxEngine(ABC):
|
||||
"""Abstract class used as a base for all FuzzForge sandbox engine classes."""
|
||||
@@ -272,23 +269,6 @@ class AbstractFuzzForgeSandboxEngine(ABC):
|
||||
message: str = f"method 'read_file_from_container' is not implemented for class '{self.__class__.__name__}'"
|
||||
raise NotImplementedError(message)
|
||||
|
||||
@abstractmethod
|
||||
def tail_file_from_container(self, identifier: str, path: str, start_line: int = 1) -> str:
|
||||
"""Read a file from a running container starting at a given line number.
|
||||
|
||||
Uses ``tail -n +{start_line}`` to avoid re-reading the entire file on
|
||||
every poll. This is the preferred method for incremental reads of
|
||||
append-only files such as ``stream.jsonl``.
|
||||
|
||||
:param identifier: Container identifier.
|
||||
:param path: Path to file inside container.
|
||||
:param start_line: 1-based line number to start reading from.
|
||||
:returns: File contents from *start_line* onwards (may be empty).
|
||||
|
||||
"""
|
||||
message: str = f"method 'tail_file_from_container' is not implemented for class '{self.__class__.__name__}'"
|
||||
raise NotImplementedError(message)
|
||||
|
||||
@abstractmethod
|
||||
def list_containers(self, all_containers: bool = True) -> list[dict]:
|
||||
"""List containers.
|
||||
@@ -299,17 +279,3 @@ class AbstractFuzzForgeSandboxEngine(ABC):
|
||||
"""
|
||||
message: str = f"method 'list_containers' is not implemented for class '{self.__class__.__name__}'"
|
||||
raise NotImplementedError(message)
|
||||
|
||||
@abstractmethod
|
||||
def read_file_from_image(self, image: str, path: str) -> str:
|
||||
"""Read a file from inside an image without starting a container.
|
||||
|
||||
Creates a temporary container, copies the file, and removes the container.
|
||||
|
||||
:param image: Image reference (e.g., "fuzzforge-rust-analyzer:latest").
|
||||
:param path: Path to file inside image.
|
||||
:returns: File contents as string.
|
||||
|
||||
"""
|
||||
message: str = f"method 'read_file_from_image' is not implemented for class '{self.__class__.__name__}'"
|
||||
raise NotImplementedError(message)
|
||||
|
||||
@@ -99,17 +99,6 @@ class DockerCLI(AbstractFuzzForgeSandboxEngine):
|
||||
if filter_prefix and filter_prefix not in reference:
|
||||
continue
|
||||
|
||||
# Try to get labels from image inspect
|
||||
labels = {}
|
||||
try:
|
||||
inspect_result = self._run(["image", "inspect", reference], check=False)
|
||||
if inspect_result.returncode == 0:
|
||||
inspect_data = json.loads(inspect_result.stdout)
|
||||
if inspect_data and len(inspect_data) > 0:
|
||||
labels = inspect_data[0].get("Config", {}).get("Labels") or {}
|
||||
except (json.JSONDecodeError, IndexError):
|
||||
pass
|
||||
|
||||
images.append(
|
||||
ImageInfo(
|
||||
reference=reference,
|
||||
@@ -117,7 +106,6 @@ class DockerCLI(AbstractFuzzForgeSandboxEngine):
|
||||
tag=tag,
|
||||
image_id=image.get("ID", "")[:12],
|
||||
size=image.get("Size"),
|
||||
labels=labels,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -389,24 +377,6 @@ class DockerCLI(AbstractFuzzForgeSandboxEngine):
|
||||
return ""
|
||||
return result.stdout
|
||||
|
||||
def tail_file_from_container(self, identifier: str, path: str, start_line: int = 1) -> str:
|
||||
"""Read a file from a container starting at a given line number.
|
||||
|
||||
:param identifier: Container identifier.
|
||||
:param path: Path to file in container.
|
||||
:param start_line: 1-based line number to start reading from.
|
||||
:returns: File contents from *start_line* onwards.
|
||||
|
||||
"""
|
||||
result = self._run(
|
||||
["exec", identifier, "tail", "-n", f"+{start_line}", path],
|
||||
check=False,
|
||||
)
|
||||
if result.returncode != 0:
|
||||
get_logger().debug("failed to tail file from container", path=path, start_line=start_line)
|
||||
return ""
|
||||
return result.stdout
|
||||
|
||||
def list_containers(self, all_containers: bool = True) -> list[dict]:
|
||||
"""List containers.
|
||||
|
||||
@@ -434,27 +404,3 @@ class DockerCLI(AbstractFuzzForgeSandboxEngine):
|
||||
]
|
||||
except json.JSONDecodeError:
|
||||
return []
|
||||
|
||||
def read_file_from_image(self, image: str, path: str) -> str:
|
||||
"""Read a file from inside an image without starting a long-running container.
|
||||
|
||||
Uses docker run with --entrypoint override to read the file via cat.
|
||||
|
||||
:param image: Image reference (e.g., "fuzzforge-rust-analyzer:latest").
|
||||
:param path: Path to file inside image.
|
||||
:returns: File contents as string.
|
||||
|
||||
"""
|
||||
logger = get_logger()
|
||||
|
||||
# Use docker run with --entrypoint to override any container entrypoint
|
||||
result = self._run(
|
||||
["run", "--rm", "--entrypoint", "cat", image, path],
|
||||
check=False,
|
||||
)
|
||||
|
||||
if result.returncode != 0:
|
||||
logger.debug("failed to read file from image", image=image, path=path, stderr=result.stderr)
|
||||
return ""
|
||||
|
||||
return result.stdout
|
||||
|
||||
@@ -168,17 +168,7 @@ class Docker(AbstractFuzzForgeSandboxEngine):
|
||||
message: str = "Docker engine read_file_from_container is not yet implemented"
|
||||
raise NotImplementedError(message)
|
||||
|
||||
def tail_file_from_container(self, identifier: str, path: str, start_line: int = 1) -> str:
|
||||
"""Read a file from a container starting at a given line number."""
|
||||
message: str = "Docker engine tail_file_from_container is not yet implemented"
|
||||
raise NotImplementedError(message)
|
||||
|
||||
def list_containers(self, all_containers: bool = True) -> list[dict]:
|
||||
"""List containers."""
|
||||
message: str = "Docker engine list_containers is not yet implemented"
|
||||
raise NotImplementedError(message)
|
||||
|
||||
def read_file_from_image(self, image: str, path: str) -> str:
|
||||
"""Read a file from inside an image without starting a long-running container."""
|
||||
message: str = "Docker engine read_file_from_image is not yet implemented"
|
||||
raise NotImplementedError(message)
|
||||
|
||||
@@ -166,9 +166,6 @@ class PodmanCLI(AbstractFuzzForgeSandboxEngine):
|
||||
repo = name
|
||||
tag = "latest"
|
||||
|
||||
# Get labels if available
|
||||
labels = image.get("Labels") or {}
|
||||
|
||||
images.append(
|
||||
ImageInfo(
|
||||
reference=name,
|
||||
@@ -176,7 +173,6 @@ class PodmanCLI(AbstractFuzzForgeSandboxEngine):
|
||||
tag=tag,
|
||||
image_id=image.get("Id", "")[:12],
|
||||
size=image.get("Size"),
|
||||
labels=labels,
|
||||
)
|
||||
)
|
||||
|
||||
@@ -449,24 +445,6 @@ class PodmanCLI(AbstractFuzzForgeSandboxEngine):
|
||||
return ""
|
||||
return result.stdout
|
||||
|
||||
def tail_file_from_container(self, identifier: str, path: str, start_line: int = 1) -> str:
|
||||
"""Read a file from a container starting at a given line number.
|
||||
|
||||
:param identifier: Container identifier.
|
||||
:param path: Path to file in container.
|
||||
:param start_line: 1-based line number to start reading from.
|
||||
:returns: File contents from *start_line* onwards.
|
||||
|
||||
"""
|
||||
result = self._run(
|
||||
["exec", identifier, "tail", "-n", f"+{start_line}", path],
|
||||
check=False,
|
||||
)
|
||||
if result.returncode != 0:
|
||||
get_logger().debug("failed to tail file from container", path=path, start_line=start_line)
|
||||
return ""
|
||||
return result.stdout
|
||||
|
||||
def list_containers(self, all_containers: bool = True) -> list[dict]:
|
||||
"""List containers.
|
||||
|
||||
@@ -496,30 +474,6 @@ class PodmanCLI(AbstractFuzzForgeSandboxEngine):
|
||||
except json.JSONDecodeError:
|
||||
return []
|
||||
|
||||
def read_file_from_image(self, image: str, path: str) -> str:
|
||||
"""Read a file from inside an image without starting a long-running container.
|
||||
|
||||
Uses podman run with --entrypoint override to read the file via cat.
|
||||
|
||||
:param image: Image reference (e.g., "fuzzforge-rust-analyzer:latest").
|
||||
:param path: Path to file inside image.
|
||||
:returns: File contents as string.
|
||||
|
||||
"""
|
||||
logger = get_logger()
|
||||
|
||||
# Use podman run with --entrypoint to override any container entrypoint
|
||||
result = self._run(
|
||||
["run", "--rm", "--entrypoint", "cat", image, path],
|
||||
check=False,
|
||||
)
|
||||
|
||||
if result.returncode != 0:
|
||||
logger.debug("failed to read file from image", image=image, path=path, stderr=result.stderr)
|
||||
return ""
|
||||
|
||||
return result.stdout
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Utility Methods
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
@@ -475,30 +475,6 @@ class Podman(AbstractFuzzForgeSandboxEngine):
|
||||
return ""
|
||||
return stdout.decode("utf-8", errors="replace") if stdout else ""
|
||||
|
||||
def tail_file_from_container(self, identifier: str, path: str, start_line: int = 1) -> str:
|
||||
"""Read a file from a container starting at a given line number.
|
||||
|
||||
:param identifier: Container identifier.
|
||||
:param path: Path to file inside container.
|
||||
:param start_line: 1-based line number to start reading from.
|
||||
:returns: File contents from *start_line* onwards.
|
||||
|
||||
"""
|
||||
client: PodmanClient = self.get_client()
|
||||
with client:
|
||||
container: Container = client.containers.get(key=identifier)
|
||||
(status, (stdout, stderr)) = container.exec_run(
|
||||
cmd=["tail", "-n", f"+{start_line}", path],
|
||||
demux=True,
|
||||
)
|
||||
if status != 0:
|
||||
error_msg = stderr.decode("utf-8", errors="replace") if stderr else "File not found"
|
||||
get_logger().debug(
|
||||
"failed to tail file from container", path=path, start_line=start_line, error=error_msg,
|
||||
)
|
||||
return ""
|
||||
return stdout.decode("utf-8", errors="replace") if stdout else ""
|
||||
|
||||
def list_containers(self, all_containers: bool = True) -> list[dict]:
|
||||
"""List containers.
|
||||
|
||||
@@ -518,40 +494,3 @@ class Podman(AbstractFuzzForgeSandboxEngine):
|
||||
}
|
||||
for c in containers
|
||||
]
|
||||
|
||||
def read_file_from_image(self, image: str, path: str) -> str:
|
||||
"""Read a file from inside an image without starting a long-running container.
|
||||
|
||||
Creates a temporary container, reads the file, and removes the container.
|
||||
|
||||
:param image: Image reference (e.g., "fuzzforge-rust-analyzer:latest").
|
||||
:param path: Path to file inside image.
|
||||
:returns: File contents as string.
|
||||
|
||||
"""
|
||||
logger = get_logger()
|
||||
client: PodmanClient = self.get_client()
|
||||
|
||||
with client:
|
||||
try:
|
||||
# Create a container that just runs cat on the file
|
||||
container = client.containers.create(
|
||||
image=image,
|
||||
command=["cat", path],
|
||||
remove=True,
|
||||
)
|
||||
|
||||
# Start it and wait for completion
|
||||
container.start()
|
||||
container.wait()
|
||||
|
||||
# Get the logs (which contain stdout)
|
||||
output = container.logs(stdout=True, stderr=False)
|
||||
|
||||
if isinstance(output, bytes):
|
||||
return output.decode("utf-8", errors="replace")
|
||||
return str(output)
|
||||
|
||||
except Exception as exc:
|
||||
logger.debug("failed to read file from image", image=image, path=path, error=str(exc))
|
||||
return ""
|
||||
|
||||
19
fuzzforge-common/src/fuzzforge_common/storage/__init__.py
Normal file
19
fuzzforge-common/src/fuzzforge_common/storage/__init__.py
Normal file
@@ -0,0 +1,19 @@
|
||||
"""FuzzForge storage abstractions.
|
||||
|
||||
Storage class requires boto3. Import it explicitly:
|
||||
from fuzzforge_common.storage.s3 import Storage
|
||||
"""
|
||||
|
||||
from fuzzforge_common.storage.exceptions import (
|
||||
FuzzForgeStorageError,
|
||||
StorageConnectionError,
|
||||
StorageDownloadError,
|
||||
StorageUploadError,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"FuzzForgeStorageError",
|
||||
"StorageConnectionError",
|
||||
"StorageDownloadError",
|
||||
"StorageUploadError",
|
||||
]
|
||||
@@ -0,0 +1,20 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
from fuzzforge_common.storage.s3 import Storage
|
||||
|
||||
|
||||
class StorageConfiguration(BaseModel):
|
||||
"""TODO."""
|
||||
|
||||
#: S3 endpoint URL (e.g., "http://localhost:9000" for MinIO).
|
||||
endpoint: str
|
||||
|
||||
#: S3 access key ID for authentication.
|
||||
access_key: str
|
||||
|
||||
#: S3 secret access key for authentication.
|
||||
secret_key: str
|
||||
|
||||
def into_storage(self) -> Storage:
|
||||
"""TODO."""
|
||||
return Storage(endpoint=self.endpoint, access_key=self.access_key, secret_key=self.secret_key)
|
||||
108
fuzzforge-common/src/fuzzforge_common/storage/exceptions.py
Normal file
108
fuzzforge-common/src/fuzzforge_common/storage/exceptions.py
Normal file
@@ -0,0 +1,108 @@
|
||||
from fuzzforge_common.exceptions import FuzzForgeError
|
||||
|
||||
|
||||
class FuzzForgeStorageError(FuzzForgeError):
|
||||
"""Base exception for all storage-related errors.
|
||||
|
||||
Raised when storage operations (upload, download, connection) fail
|
||||
during workflow execution.
|
||||
|
||||
"""
|
||||
|
||||
|
||||
class StorageConnectionError(FuzzForgeStorageError):
|
||||
"""Failed to connect to storage service.
|
||||
|
||||
:param endpoint: The storage endpoint that failed to connect.
|
||||
:param reason: The underlying exception message.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, endpoint: str, reason: str) -> None:
|
||||
"""Initialize storage connection error.
|
||||
|
||||
:param endpoint: The storage endpoint that failed to connect.
|
||||
:param reason: The underlying exception message.
|
||||
|
||||
"""
|
||||
FuzzForgeStorageError.__init__(
|
||||
self,
|
||||
f"Failed to connect to storage at {endpoint}: {reason}",
|
||||
)
|
||||
self.endpoint = endpoint
|
||||
self.reason = reason
|
||||
|
||||
|
||||
class StorageUploadError(FuzzForgeStorageError):
|
||||
"""Failed to upload object to storage.
|
||||
|
||||
:param bucket: The target bucket name.
|
||||
:param object_key: The target object key.
|
||||
:param reason: The underlying exception message.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, bucket: str, object_key: str, reason: str) -> None:
|
||||
"""Initialize storage upload error.
|
||||
|
||||
:param bucket: The target bucket name.
|
||||
:param object_key: The target object key.
|
||||
:param reason: The underlying exception message.
|
||||
|
||||
"""
|
||||
FuzzForgeStorageError.__init__(
|
||||
self,
|
||||
f"Failed to upload to {bucket}/{object_key}: {reason}",
|
||||
)
|
||||
self.bucket = bucket
|
||||
self.object_key = object_key
|
||||
self.reason = reason
|
||||
|
||||
|
||||
class StorageDownloadError(FuzzForgeStorageError):
|
||||
"""Failed to download object from storage.
|
||||
|
||||
:param bucket: The source bucket name.
|
||||
:param object_key: The source object key.
|
||||
:param reason: The underlying exception message.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, bucket: str, object_key: str, reason: str) -> None:
|
||||
"""Initialize storage download error.
|
||||
|
||||
:param bucket: The source bucket name.
|
||||
:param object_key: The source object key.
|
||||
:param reason: The underlying exception message.
|
||||
|
||||
"""
|
||||
FuzzForgeStorageError.__init__(
|
||||
self,
|
||||
f"Failed to download from {bucket}/{object_key}: {reason}",
|
||||
)
|
||||
self.bucket = bucket
|
||||
self.object_key = object_key
|
||||
self.reason = reason
|
||||
|
||||
|
||||
class StorageDeletionError(FuzzForgeStorageError):
|
||||
"""Failed to delete bucket from storage.
|
||||
|
||||
:param bucket: The bucket name that failed to delete.
|
||||
:param reason: The underlying exception message.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, bucket: str, reason: str) -> None:
|
||||
"""Initialize storage deletion error.
|
||||
|
||||
:param bucket: The bucket name that failed to delete.
|
||||
:param reason: The underlying exception message.
|
||||
|
||||
"""
|
||||
FuzzForgeStorageError.__init__(
|
||||
self,
|
||||
f"Failed to delete bucket {bucket}: {reason}",
|
||||
)
|
||||
self.bucket = bucket
|
||||
self.reason = reason
|
||||
351
fuzzforge-common/src/fuzzforge_common/storage/s3.py
Normal file
351
fuzzforge-common/src/fuzzforge_common/storage/s3.py
Normal file
@@ -0,0 +1,351 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path, PurePath
|
||||
from tarfile import TarInfo
|
||||
from tarfile import open as Archive # noqa: N812
|
||||
from tempfile import NamedTemporaryFile
|
||||
from typing import TYPE_CHECKING, Any, cast
|
||||
|
||||
from botocore.exceptions import ClientError
|
||||
|
||||
from fuzzforge_common.storage.exceptions import StorageDeletionError, StorageDownloadError, StorageUploadError
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from botocore.client import BaseClient
|
||||
from structlog.stdlib import BoundLogger
|
||||
|
||||
|
||||
def get_logger() -> BoundLogger:
|
||||
"""Get structlog logger instance.
|
||||
|
||||
Uses deferred import pattern required by Temporal for serialization.
|
||||
|
||||
:returns: Configured structlog logger.
|
||||
|
||||
"""
|
||||
from structlog import get_logger # noqa: PLC0415 (required by temporal)
|
||||
|
||||
return cast("BoundLogger", get_logger())
|
||||
|
||||
|
||||
class Storage:
|
||||
"""S3-compatible storage backend implementation using boto3.
|
||||
|
||||
Supports MinIO, AWS S3, and other S3-compatible storage services.
|
||||
Uses error-driven approach (EAFP) to handle bucket creation and
|
||||
avoid race conditions.
|
||||
|
||||
"""
|
||||
|
||||
#: S3 endpoint URL (e.g., "http://localhost:9000" for MinIO).
|
||||
__endpoint: str
|
||||
|
||||
#: S3 access key ID for authentication.
|
||||
__access_key: str
|
||||
|
||||
#: S3 secret access key for authentication.
|
||||
__secret_key: str
|
||||
|
||||
def __init__(self, endpoint: str, access_key: str, secret_key: str) -> None:
|
||||
"""Initialize an instance of the class.
|
||||
|
||||
:param endpoint: TODO.
|
||||
:param access_key: TODO.
|
||||
:param secret_key: TODO.
|
||||
|
||||
"""
|
||||
self.__endpoint = endpoint
|
||||
self.__access_key = access_key
|
||||
self.__secret_key = secret_key
|
||||
|
||||
def _get_client(self) -> BaseClient:
|
||||
"""Create boto3 S3 client with configured credentials.
|
||||
|
||||
Uses deferred import pattern required by Temporal for serialization.
|
||||
|
||||
:returns: Configured boto3 S3 client.
|
||||
|
||||
"""
|
||||
import boto3 # noqa: PLC0415 (required by temporal)
|
||||
|
||||
return boto3.client(
|
||||
"s3",
|
||||
endpoint_url=self.__endpoint,
|
||||
aws_access_key_id=self.__access_key,
|
||||
aws_secret_access_key=self.__secret_key,
|
||||
)
|
||||
|
||||
def create_bucket(self, bucket: str) -> None:
|
||||
"""Create the S3 bucket if it does not already exist.
|
||||
|
||||
Idempotent operation - succeeds if bucket already exists and is owned by you.
|
||||
Fails if bucket exists but is owned by another account.
|
||||
|
||||
:raise ClientError: If bucket creation fails (permissions, name conflicts, etc.).
|
||||
|
||||
"""
|
||||
logger = get_logger()
|
||||
client = self._get_client()
|
||||
|
||||
logger.debug("creating_bucket", bucket=bucket)
|
||||
|
||||
try:
|
||||
client.create_bucket(Bucket=bucket)
|
||||
logger.info("bucket_created", bucket=bucket)
|
||||
|
||||
except ClientError as e:
|
||||
error_code = e.response.get("Error", {}).get("Code")
|
||||
|
||||
# Bucket already exists and we own it - this is fine
|
||||
if error_code in ("BucketAlreadyOwnedByYou", "BucketAlreadyExists"):
|
||||
logger.debug(
|
||||
"bucket_already_exists",
|
||||
bucket=bucket,
|
||||
error_code=error_code,
|
||||
)
|
||||
return
|
||||
|
||||
# Other errors are actual failures
|
||||
logger.exception(
|
||||
"bucket_creation_failed",
|
||||
bucket=bucket,
|
||||
error_code=error_code,
|
||||
)
|
||||
raise
|
||||
|
||||
def delete_bucket(self, bucket: str) -> None:
|
||||
"""Delete an S3 bucket and all its contents.
|
||||
|
||||
Idempotent operation - succeeds if bucket doesn't exist.
|
||||
Handles pagination for buckets with many objects.
|
||||
|
||||
:param bucket: The name of the bucket to delete.
|
||||
:raises StorageDeletionError: If bucket deletion fails.
|
||||
|
||||
"""
|
||||
logger = get_logger()
|
||||
client = self._get_client()
|
||||
|
||||
logger.debug("deleting_bucket", bucket=bucket)
|
||||
|
||||
try:
|
||||
# S3 requires bucket to be empty before deletion
|
||||
# Delete all objects first with pagination support
|
||||
continuation_token = None
|
||||
|
||||
while True:
|
||||
# List objects (up to 1000 per request)
|
||||
list_params = {"Bucket": bucket}
|
||||
if continuation_token:
|
||||
list_params["ContinuationToken"] = continuation_token
|
||||
|
||||
response = client.list_objects_v2(**list_params)
|
||||
|
||||
# Delete objects if any exist (max 1000 per delete_objects call)
|
||||
if "Contents" in response:
|
||||
objects = [{"Key": obj["Key"]} for obj in response["Contents"]]
|
||||
client.delete_objects(Bucket=bucket, Delete={"Objects": objects})
|
||||
logger.debug("deleted_objects", bucket=bucket, count=len(objects))
|
||||
|
||||
# Check if more objects exist
|
||||
if not response.get("IsTruncated", False):
|
||||
break
|
||||
|
||||
continuation_token = response.get("NextContinuationToken")
|
||||
|
||||
# Now delete the empty bucket
|
||||
client.delete_bucket(Bucket=bucket)
|
||||
logger.info("bucket_deleted", bucket=bucket)
|
||||
|
||||
except ClientError as error:
|
||||
error_code = error.response.get("Error", {}).get("Code")
|
||||
|
||||
# Idempotent - bucket already doesn't exist
|
||||
if error_code == "NoSuchBucket":
|
||||
logger.debug("bucket_does_not_exist", bucket=bucket)
|
||||
return
|
||||
|
||||
# Other errors are actual failures
|
||||
logger.exception(
|
||||
"bucket_deletion_failed",
|
||||
bucket=bucket,
|
||||
error_code=error_code,
|
||||
)
|
||||
raise StorageDeletionError(bucket=bucket, reason=str(error)) from error
|
||||
|
||||
def upload_file(
|
||||
self,
|
||||
bucket: str,
|
||||
file: Path,
|
||||
key: str,
|
||||
) -> None:
|
||||
"""Upload archive file to S3 storage at specified object key.
|
||||
|
||||
Assumes bucket exists. Fails gracefully if bucket or other resources missing.
|
||||
|
||||
:param bucket: TODO.
|
||||
:param file: Local path to the archive file to upload.
|
||||
:param key: Object key (path) in S3 where file should be uploaded.
|
||||
:raise StorageUploadError: If upload operation fails.
|
||||
|
||||
"""
|
||||
from boto3.exceptions import S3UploadFailedError # noqa: PLC0415 (required by 'temporal' at runtime)
|
||||
|
||||
logger = get_logger()
|
||||
client = self._get_client()
|
||||
|
||||
logger.debug(
|
||||
"uploading_archive_to_storage",
|
||||
bucket=bucket,
|
||||
object_key=key,
|
||||
archive_path=str(file),
|
||||
)
|
||||
|
||||
try:
|
||||
client.upload_file(
|
||||
Filename=str(file),
|
||||
Bucket=bucket,
|
||||
Key=key,
|
||||
)
|
||||
logger.info(
|
||||
"archive_uploaded_successfully",
|
||||
bucket=bucket,
|
||||
object_key=key,
|
||||
)
|
||||
|
||||
except S3UploadFailedError as e:
|
||||
# Check if this is a NoSuchBucket error - create bucket and retry
|
||||
if "NoSuchBucket" in str(e):
|
||||
logger.info(
|
||||
"bucket_does_not_exist_creating",
|
||||
bucket=bucket,
|
||||
)
|
||||
self.create_bucket(bucket=bucket)
|
||||
# Retry upload after creating bucket
|
||||
try:
|
||||
client.upload_file(
|
||||
Filename=str(file),
|
||||
Bucket=bucket,
|
||||
Key=key,
|
||||
)
|
||||
logger.info(
|
||||
"archive_uploaded_successfully_after_bucket_creation",
|
||||
bucket=bucket,
|
||||
object_key=key,
|
||||
)
|
||||
except S3UploadFailedError as retry_error:
|
||||
logger.exception(
|
||||
"upload_failed_after_bucket_creation",
|
||||
bucket=bucket,
|
||||
object_key=key,
|
||||
)
|
||||
raise StorageUploadError(
|
||||
bucket=bucket,
|
||||
object_key=key,
|
||||
reason=str(retry_error),
|
||||
) from retry_error
|
||||
else:
|
||||
logger.exception(
|
||||
"upload_failed",
|
||||
bucket=bucket,
|
||||
object_key=key,
|
||||
)
|
||||
raise StorageUploadError(
|
||||
bucket=bucket,
|
||||
object_key=key,
|
||||
reason=str(e),
|
||||
) from e
|
||||
|
||||
def download_file(self, bucket: str, key: PurePath) -> Path:
|
||||
"""Download a single file from S3 storage.
|
||||
|
||||
Downloads the file to a temporary location and returns the path.
|
||||
|
||||
:param bucket: S3 bucket name.
|
||||
:param key: Object key (path) in S3 to download.
|
||||
:returns: Path to the downloaded file.
|
||||
:raise StorageDownloadError: If download operation fails.
|
||||
|
||||
"""
|
||||
logger = get_logger()
|
||||
client = self._get_client()
|
||||
|
||||
logger.debug(
|
||||
"downloading_file_from_storage",
|
||||
bucket=bucket,
|
||||
object_key=str(key),
|
||||
)
|
||||
|
||||
try:
|
||||
# Create temporary file for download
|
||||
with NamedTemporaryFile(delete=False, suffix=".tar.gz") as temp_file:
|
||||
temp_path = Path(temp_file.name)
|
||||
|
||||
# Download object to temp file
|
||||
client.download_file(
|
||||
Bucket=bucket,
|
||||
Key=str(key),
|
||||
Filename=str(temp_path),
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"file_downloaded_successfully",
|
||||
bucket=bucket,
|
||||
object_key=str(key),
|
||||
local_path=str(temp_path),
|
||||
)
|
||||
|
||||
return temp_path
|
||||
|
||||
except ClientError as error:
|
||||
error_code = error.response.get("Error", {}).get("Code")
|
||||
logger.exception(
|
||||
"download_failed",
|
||||
bucket=bucket,
|
||||
object_key=str(key),
|
||||
error_code=error_code,
|
||||
)
|
||||
raise StorageDownloadError(
|
||||
bucket=bucket,
|
||||
object_key=str(key),
|
||||
reason=f"{error_code}: {error!s}",
|
||||
) from error
|
||||
|
||||
def download_directory(self, bucket: str, directory: PurePath) -> Path:
|
||||
"""TODO.
|
||||
|
||||
:param bucket: TODO.
|
||||
:param directory: TODO.
|
||||
:returns: TODO.
|
||||
|
||||
"""
|
||||
with NamedTemporaryFile(delete=False) as file:
|
||||
path: Path = Path(file.name)
|
||||
# end-with
|
||||
client: Any = self._get_client()
|
||||
with Archive(name=str(path), mode="w:gz") as archive:
|
||||
paginator = client.get_paginator("list_objects_v2")
|
||||
try:
|
||||
pages = paginator.paginate(Bucket=bucket, Prefix=str(directory))
|
||||
except ClientError as exception:
|
||||
raise StorageDownloadError(
|
||||
bucket=bucket,
|
||||
object_key=str(directory),
|
||||
reason=exception.response["Error"]["Code"],
|
||||
) from exception
|
||||
for page in pages:
|
||||
for entry in page.get("Contents", []):
|
||||
key: str = entry["Key"]
|
||||
try:
|
||||
response: dict[str, Any] = client.get_object(Bucket=bucket, Key=key)
|
||||
except ClientError as exception:
|
||||
raise StorageDownloadError(
|
||||
bucket=bucket,
|
||||
object_key=key,
|
||||
reason=exception.response["Error"]["Code"],
|
||||
) from exception
|
||||
archive.addfile(TarInfo(name=key), fileobj=response["Body"])
|
||||
# end-for
|
||||
# end-for
|
||||
# end-with
|
||||
return path
|
||||
8
fuzzforge-common/src/fuzzforge_common/temporal/queues.py
Normal file
8
fuzzforge-common/src/fuzzforge_common/temporal/queues.py
Normal file
@@ -0,0 +1,8 @@
|
||||
from enum import StrEnum
|
||||
|
||||
|
||||
class TemporalQueues(StrEnum):
|
||||
"""Enumeration of available `Temporal Task Queues`."""
|
||||
|
||||
#: The default task queue.
|
||||
DEFAULT = "default-task-queue"
|
||||
@@ -0,0 +1,46 @@
|
||||
from enum import StrEnum
|
||||
from typing import Literal
|
||||
|
||||
from fuzzforge_types import FuzzForgeWorkflowIdentifier # noqa: TC002 (required by 'pydantic' at runtime)
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class Base(BaseModel):
|
||||
"""TODO."""
|
||||
|
||||
|
||||
class FuzzForgeWorkflowSteps(StrEnum):
|
||||
"""Workflow step types."""
|
||||
|
||||
#: Execute a FuzzForge module
|
||||
RUN_FUZZFORGE_MODULE = "run-fuzzforge-module"
|
||||
|
||||
|
||||
class FuzzForgeWorkflowStep(Base):
|
||||
"""TODO."""
|
||||
|
||||
#: The type of the workflow's step.
|
||||
kind: FuzzForgeWorkflowSteps
|
||||
|
||||
|
||||
class RunFuzzForgeModule(FuzzForgeWorkflowStep):
|
||||
"""Execute a FuzzForge module."""
|
||||
|
||||
kind: Literal[FuzzForgeWorkflowSteps.RUN_FUZZFORGE_MODULE] = FuzzForgeWorkflowSteps.RUN_FUZZFORGE_MODULE
|
||||
#: The name of the module.
|
||||
module: str
|
||||
#: The container of the module.
|
||||
container: str
|
||||
|
||||
|
||||
class FuzzForgeWorkflowDefinition(Base):
|
||||
"""The definition of a FuzzForge workflow."""
|
||||
|
||||
#: The author of the workflow.
|
||||
author: str
|
||||
#: The identifier of the workflow.
|
||||
identifier: FuzzForgeWorkflowIdentifier
|
||||
#: The name of the workflow.
|
||||
name: str
|
||||
#: The collection of steps that compose the workflow.
|
||||
steps: list[RunFuzzForgeModule]
|
||||
@@ -0,0 +1,24 @@
|
||||
from pydantic import BaseModel
|
||||
|
||||
from fuzzforge_common.sandboxes.engines.docker.configuration import (
|
||||
DockerConfiguration, # noqa: TC001 (required by pydantic at runtime)
|
||||
)
|
||||
from fuzzforge_common.sandboxes.engines.podman.configuration import (
|
||||
PodmanConfiguration, # noqa: TC001 (required by pydantic at runtime)
|
||||
)
|
||||
from fuzzforge_common.storage.configuration import StorageConfiguration # noqa: TC001 (required by pydantic at runtime)
|
||||
|
||||
|
||||
class TemporalWorkflowParameters(BaseModel):
|
||||
"""Base parameters for Temporal workflows.
|
||||
|
||||
Provides common configuration shared across all workflow types,
|
||||
including sandbox engine and storage backend instances.
|
||||
|
||||
"""
|
||||
|
||||
#: Sandbox engine for container operations (Docker or Podman).
|
||||
engine_configuration: PodmanConfiguration | DockerConfiguration
|
||||
|
||||
#: Storage backend for uploading/downloading execution artifacts.
|
||||
storage_configuration: StorageConfiguration
|
||||
108
fuzzforge-common/src/fuzzforge_common/workflows/bridge_utils.py
Normal file
108
fuzzforge-common/src/fuzzforge_common/workflows/bridge_utils.py
Normal file
@@ -0,0 +1,108 @@
|
||||
"""Helper utilities for working with bridge transformations."""
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
|
||||
def load_transform_from_file(file_path: str | Path) -> str:
|
||||
"""Load bridge transformation code from a Python file.
|
||||
|
||||
This reads the transformation function from a .py file and extracts
|
||||
the code as a string suitable for the bridge module.
|
||||
|
||||
Args:
|
||||
file_path: Path to Python file containing transform() function
|
||||
|
||||
Returns:
|
||||
Python code as a string
|
||||
|
||||
Example:
|
||||
>>> code = load_transform_from_file("transformations/add_line_numbers.py")
|
||||
>>> # code contains the transform() function as a string
|
||||
|
||||
"""
|
||||
path = Path(file_path)
|
||||
|
||||
if not path.exists():
|
||||
raise FileNotFoundError(f"Transformation file not found: {file_path}")
|
||||
|
||||
if path.suffix != ".py":
|
||||
raise ValueError(f"Transformation file must be .py file, got: {path.suffix}")
|
||||
|
||||
# Read the entire file
|
||||
code = path.read_text()
|
||||
|
||||
return code
|
||||
|
||||
|
||||
def create_bridge_input(
|
||||
transform_file: str | Path,
|
||||
input_filename: str | None = None,
|
||||
output_filename: str | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Create bridge module input configuration from a transformation file.
|
||||
|
||||
Args:
|
||||
transform_file: Path to Python file with transform() function
|
||||
input_filename: Optional specific input file to transform
|
||||
output_filename: Optional specific output filename
|
||||
|
||||
Returns:
|
||||
Dictionary suitable for bridge module's input.json
|
||||
|
||||
Example:
|
||||
>>> config = create_bridge_input("transformations/add_line_numbers.py")
|
||||
>>> import json
|
||||
>>> json.dump(config, open("input.json", "w"))
|
||||
|
||||
"""
|
||||
code = load_transform_from_file(transform_file)
|
||||
|
||||
return {
|
||||
"code": code,
|
||||
"input_filename": input_filename,
|
||||
"output_filename": output_filename,
|
||||
}
|
||||
|
||||
|
||||
def validate_transform_function(file_path: str | Path) -> bool:
|
||||
"""Validate that a Python file contains a valid transform() function.
|
||||
|
||||
Args:
|
||||
file_path: Path to Python file to validate
|
||||
|
||||
Returns:
|
||||
True if valid, raises exception otherwise
|
||||
|
||||
Raises:
|
||||
ValueError: If transform() function is not found or invalid
|
||||
|
||||
"""
|
||||
code = load_transform_from_file(file_path)
|
||||
|
||||
# Check if transform function is defined
|
||||
if "def transform(" not in code:
|
||||
raise ValueError(
|
||||
f"File {file_path} must contain a 'def transform(data)' function"
|
||||
)
|
||||
|
||||
# Try to compile the code
|
||||
try:
|
||||
compile(code, str(file_path), "exec")
|
||||
except SyntaxError as e:
|
||||
raise ValueError(f"Syntax error in {file_path}: {e}") from e
|
||||
|
||||
# Try to execute and verify transform exists
|
||||
namespace: dict[str, Any] = {"__builtins__": __builtins__}
|
||||
try:
|
||||
exec(code, namespace)
|
||||
except Exception as e:
|
||||
raise ValueError(f"Failed to execute {file_path}: {e}") from e
|
||||
|
||||
if "transform" not in namespace:
|
||||
raise ValueError(f"No 'transform' function found in {file_path}")
|
||||
|
||||
if not callable(namespace["transform"]):
|
||||
raise ValueError(f"'transform' in {file_path} is not callable")
|
||||
|
||||
return True
|
||||
27
fuzzforge-common/src/fuzzforge_common/workflows/default.py
Normal file
27
fuzzforge-common/src/fuzzforge_common/workflows/default.py
Normal file
@@ -0,0 +1,27 @@
|
||||
from fuzzforge_types import (
|
||||
FuzzForgeExecutionIdentifier, # noqa: TC002 (required by pydantic at runtime)
|
||||
FuzzForgeProjectIdentifier, # noqa: TC002 (required by pydantic at runtime)
|
||||
)
|
||||
|
||||
from fuzzforge_common.workflows.base.definitions import (
|
||||
FuzzForgeWorkflowDefinition, # noqa: TC001 (required by pydantic at runtime)
|
||||
)
|
||||
from fuzzforge_common.workflows.base.parameters import TemporalWorkflowParameters
|
||||
|
||||
|
||||
class ExecuteFuzzForgeWorkflowParameters(TemporalWorkflowParameters):
|
||||
"""Parameters for the default FuzzForge workflow orchestration.
|
||||
|
||||
Contains workflow definition and execution tracking identifiers
|
||||
for coordinating multi-module workflows.
|
||||
|
||||
"""
|
||||
|
||||
#: UUID7 identifier of this specific workflow execution.
|
||||
execution_identifier: FuzzForgeExecutionIdentifier
|
||||
|
||||
#: UUID7 identifier of the project this execution belongs to.
|
||||
project_identifier: FuzzForgeProjectIdentifier
|
||||
|
||||
#: The definition of the FuzzForge workflow to run.
|
||||
workflow_definition: FuzzForgeWorkflowDefinition
|
||||
80
fuzzforge-common/src/fuzzforge_common/workflows/modules.py
Normal file
80
fuzzforge-common/src/fuzzforge_common/workflows/modules.py
Normal file
@@ -0,0 +1,80 @@
|
||||
from typing import Any, Literal
|
||||
|
||||
from fuzzforge_types import (
|
||||
FuzzForgeExecutionIdentifier, # noqa: TC002 (required by pydantic at runtime)
|
||||
FuzzForgeProjectIdentifier, # noqa: TC002 (required by pydantic at runtime)
|
||||
)
|
||||
|
||||
from fuzzforge_common.workflows.base.parameters import TemporalWorkflowParameters
|
||||
|
||||
|
||||
class ExecuteFuzzForgeModuleParameters(TemporalWorkflowParameters):
|
||||
"""Parameters for executing a single FuzzForge module workflow.
|
||||
|
||||
Contains module execution configuration including container image,
|
||||
project context, and execution tracking identifiers.
|
||||
|
||||
Supports workflow chaining where modules can be executed in sequence,
|
||||
with each module's output becoming the next module's input.
|
||||
|
||||
"""
|
||||
|
||||
#: The identifier of this module execution.
|
||||
execution_identifier: FuzzForgeExecutionIdentifier
|
||||
|
||||
#: The identifier/name of the module to execute.
|
||||
#: FIXME: Currently accepts both UUID (for registry lookups) and container names (e.g., "text-generator:0.0.1").
|
||||
#: This should be split into module_identifier (UUID) and container_image (string) in the future.
|
||||
module_identifier: str
|
||||
|
||||
#: The identifier of the project this module execution belongs to.
|
||||
project_identifier: FuzzForgeProjectIdentifier
|
||||
|
||||
#: Optional configuration dictionary for the module.
|
||||
#: Will be written to /data/input/config.json in the sandbox.
|
||||
module_configuration: dict[str, Any] | None = None
|
||||
|
||||
# Workflow chaining fields
|
||||
|
||||
#: The identifier of the parent workflow execution (if part of a multi-module workflow).
|
||||
#: For standalone module executions, this equals execution_identifier.
|
||||
workflow_execution_identifier: FuzzForgeExecutionIdentifier | None = None
|
||||
|
||||
#: Position of this module in the workflow (0-based).
|
||||
#: 0 = first module (reads from project assets)
|
||||
#: N > 0 = subsequent module (reads from previous module's output)
|
||||
step_index: int = 0
|
||||
|
||||
#: Execution identifier of the previous module in the workflow chain.
|
||||
#: None for first module (step_index=0).
|
||||
#: Used to locate previous module's output in storage.
|
||||
previous_step_execution_identifier: FuzzForgeExecutionIdentifier | None = None
|
||||
|
||||
|
||||
class WorkflowStep(TemporalWorkflowParameters):
|
||||
"""A step in a workflow - a module execution.
|
||||
|
||||
Steps are executed sequentially in a workflow. Each step runs a containerized module.
|
||||
|
||||
Examples:
|
||||
# Module step
|
||||
WorkflowStep(
|
||||
step_index=0,
|
||||
step_type="module",
|
||||
module_identifier="text-generator:0.0.1"
|
||||
)
|
||||
|
||||
"""
|
||||
|
||||
#: Position of this step in the workflow (0-based)
|
||||
step_index: int
|
||||
|
||||
#: Type of step: "module" (bridges are also modules now)
|
||||
step_type: Literal["module"]
|
||||
|
||||
#: Module identifier (container image name like "text-generator:0.0.1")
|
||||
#: Required if step_type="module"
|
||||
module_identifier: str | None = None
|
||||
|
||||
#: Optional module configuration
|
||||
module_configuration: dict[str, Any] | None = None
|
||||
0
fuzzforge-common/tests/unit/storage/__init__.py
Normal file
0
fuzzforge-common/tests/unit/storage/__init__.py
Normal file
42
fuzzforge-common/tests/unit/storage/test_storage.py
Normal file
42
fuzzforge-common/tests/unit/storage/test_storage.py
Normal file
@@ -0,0 +1,42 @@
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fuzzforge_common.storage.configuration import StorageConfiguration
|
||||
|
||||
|
||||
def test_download_directory(
|
||||
storage_configuration: StorageConfiguration,
|
||||
boto3_client: Any,
|
||||
random_bucket: str,
|
||||
tmp_path: Path,
|
||||
) -> None:
|
||||
"""TODO."""
|
||||
bucket = random_bucket
|
||||
storage = storage_configuration.into_storage()
|
||||
|
||||
d1 = tmp_path.joinpath("d1")
|
||||
f1 = d1.joinpath("f1")
|
||||
d2 = tmp_path.joinpath("d2")
|
||||
f2 = d2.joinpath("f2")
|
||||
d3 = d2.joinpath("d3")
|
||||
f3 = d3.joinpath("d3")
|
||||
|
||||
d1.mkdir()
|
||||
d2.mkdir()
|
||||
d3.mkdir()
|
||||
f1.touch()
|
||||
f2.touch()
|
||||
f3.touch()
|
||||
|
||||
for path in [f1, f2, f3]:
|
||||
key: Path = Path("assets", path.relative_to(other=tmp_path))
|
||||
boto3_client.upload_file(
|
||||
Bucket=bucket,
|
||||
Filename=str(path),
|
||||
Key=str(key),
|
||||
)
|
||||
|
||||
path = storage.download_directory(bucket=bucket, directory="assets")
|
||||
|
||||
assert path.is_file()
|
||||
@@ -45,11 +45,11 @@ For custom setups, you can manually configure the MCP server.
|
||||
{
|
||||
"mcpServers": {
|
||||
"fuzzforge": {
|
||||
"command": "/path/to/fuzzforge_ai/.venv/bin/python",
|
||||
"command": "/path/to/fuzzforge-oss/.venv/bin/python",
|
||||
"args": ["-m", "fuzzforge_mcp"],
|
||||
"cwd": "/path/to/fuzzforge_ai",
|
||||
"cwd": "/path/to/fuzzforge-oss",
|
||||
"env": {
|
||||
"FUZZFORGE_MODULES_PATH": "/path/to/fuzzforge_ai/fuzzforge-modules",
|
||||
"FUZZFORGE_MODULES_PATH": "/path/to/fuzzforge-oss/fuzzforge-modules",
|
||||
"FUZZFORGE_ENGINE__TYPE": "docker"
|
||||
}
|
||||
}
|
||||
@@ -64,11 +64,11 @@ For custom setups, you can manually configure the MCP server.
|
||||
"servers": {
|
||||
"fuzzforge": {
|
||||
"type": "stdio",
|
||||
"command": "/path/to/fuzzforge_ai/.venv/bin/python",
|
||||
"command": "/path/to/fuzzforge-oss/.venv/bin/python",
|
||||
"args": ["-m", "fuzzforge_mcp"],
|
||||
"cwd": "/path/to/fuzzforge_ai",
|
||||
"cwd": "/path/to/fuzzforge-oss",
|
||||
"env": {
|
||||
"FUZZFORGE_MODULES_PATH": "/path/to/fuzzforge_ai/fuzzforge-modules",
|
||||
"FUZZFORGE_MODULES_PATH": "/path/to/fuzzforge-oss/fuzzforge-modules",
|
||||
"FUZZFORGE_ENGINE__TYPE": "docker"
|
||||
}
|
||||
}
|
||||
@@ -83,11 +83,11 @@ For custom setups, you can manually configure the MCP server.
|
||||
"mcpServers": {
|
||||
"fuzzforge": {
|
||||
"type": "stdio",
|
||||
"command": "/path/to/fuzzforge_ai/.venv/bin/python",
|
||||
"command": "/path/to/fuzzforge-oss/.venv/bin/python",
|
||||
"args": ["-m", "fuzzforge_mcp"],
|
||||
"cwd": "/path/to/fuzzforge_ai",
|
||||
"cwd": "/path/to/fuzzforge-oss",
|
||||
"env": {
|
||||
"FUZZFORGE_MODULES_PATH": "/path/to/fuzzforge_ai/fuzzforge-modules",
|
||||
"FUZZFORGE_MODULES_PATH": "/path/to/fuzzforge-oss/fuzzforge-modules",
|
||||
"FUZZFORGE_ENGINE__TYPE": "docker"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
[project]
|
||||
name = "fuzzforge-mcp"
|
||||
version = "0.0.1"
|
||||
description = "FuzzForge MCP Server - AI agent gateway for FuzzForge AI."
|
||||
description = "FuzzForge MCP Server - AI agent gateway for FuzzForge OSS."
|
||||
authors = []
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.14"
|
||||
dependencies = [
|
||||
"fastmcp==2.14.1",
|
||||
"fuzzforge-common==0.0.1",
|
||||
"fuzzforge-runner==0.0.1",
|
||||
"fuzzforge-types==0.0.1",
|
||||
"pydantic==2.12.4",
|
||||
"pydantic-settings==2.12.0",
|
||||
"structlog==25.5.0",
|
||||
@@ -23,12 +24,11 @@ lints = [
|
||||
"ruff==0.14.4",
|
||||
]
|
||||
tests = [
|
||||
"fuzzforge-tests==0.0.1",
|
||||
"pytest==9.0.2",
|
||||
"pytest-asyncio==1.3.0",
|
||||
"pytest-httpx==0.36.0",
|
||||
]
|
||||
|
||||
[tool.uv.sources]
|
||||
fuzzforge-common = { workspace = true }
|
||||
fuzzforge-tests = { workspace = true }
|
||||
fuzzforge-runner = { workspace = true }
|
||||
fuzzforge-types = { workspace = true }
|
||||
|
||||
@@ -14,18 +14,3 @@ ignore = [
|
||||
"PLR2004", # allowing comparisons using unamed numerical constants in tests
|
||||
"S101", # allowing 'assert' statements in tests
|
||||
]
|
||||
"src/**" = [
|
||||
"ASYNC109", # async with timeout param: intentional pattern
|
||||
"EM102", # f-string in exception: existing pattern
|
||||
"PERF401", # list comprehension: readability over perf
|
||||
"PLR0913", # too many arguments: API compatibility
|
||||
"PLW0602", # global variable: intentional for shared state
|
||||
"PLW0603", # global statement: intentional for shared state
|
||||
"RET504", # unnecessary assignment: readability
|
||||
"RET505", # unnecessary elif after return: readability
|
||||
"TC001", # TYPE_CHECKING: causes circular imports
|
||||
"TC003", # TYPE_CHECKING: causes circular imports
|
||||
"TRY300", # try-else: existing pattern
|
||||
"TRY301", # abstract raise: existing pattern
|
||||
"TRY003", # message in exception: existing pattern
|
||||
]
|
||||
|
||||
@@ -1,8 +1,7 @@
|
||||
"""FuzzForge MCP Server Application.
|
||||
|
||||
This is the main entry point for the FuzzForge MCP server, providing
|
||||
AI agents with tools to discover and execute MCP hub tools for
|
||||
security research.
|
||||
AI agents with tools to execute security research modules.
|
||||
|
||||
"""
|
||||
|
||||
@@ -13,7 +12,7 @@ from fastmcp import FastMCP
|
||||
from fastmcp.server.middleware.error_handling import ErrorHandlingMiddleware
|
||||
|
||||
from fuzzforge_mcp import resources, tools
|
||||
from fuzzforge_mcp.settings import Settings
|
||||
from fuzzforge_runner import Settings
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from collections.abc import AsyncGenerator
|
||||
@@ -39,54 +38,18 @@ mcp: FastMCP = FastMCP(
|
||||
instructions="""
|
||||
FuzzForge is a security research orchestration platform. Use these tools to:
|
||||
|
||||
1. **List hub servers**: Discover registered MCP tool servers
|
||||
2. **Discover tools**: Find available tools from hub servers
|
||||
3. **Execute hub tools**: Run security tools in isolated containers
|
||||
1. **List modules**: Discover available security research modules
|
||||
2. **Execute modules**: Run modules in isolated containers
|
||||
3. **Execute workflows**: Chain multiple modules together
|
||||
4. **Manage projects**: Initialize and configure projects
|
||||
5. **Get results**: Retrieve execution results
|
||||
|
||||
Typical workflow:
|
||||
1. Initialize a project with `init_project`
|
||||
2. Set project assets with `set_project_assets` — path to the directory containing
|
||||
target files (firmware images, binaries, source code, etc.)
|
||||
3. List available hub servers with `list_hub_servers`
|
||||
4. Discover tools from servers with `discover_hub_tools`
|
||||
5. Execute hub tools with `execute_hub_tool`
|
||||
|
||||
Agent context convention:
|
||||
When you call `discover_hub_tools`, some servers return an `agent_context` field
|
||||
with usage tips, known issues, rule templates, and workflow guidance. Always read
|
||||
this context before using the server's tools.
|
||||
|
||||
File access in containers:
|
||||
- Assets set via `set_project_assets` are mounted read-only at `/app/uploads/` and `/app/samples/`
|
||||
- A writable output directory is mounted at `/app/output/` — use it for extraction results, reports, etc.
|
||||
- Always use container paths (e.g. `/app/uploads/file`) when passing file arguments to hub tools
|
||||
|
||||
Stateful tools:
|
||||
- Some tools (e.g. radare2-mcp) require multi-step sessions. Use `start_hub_server` to launch
|
||||
a persistent container, then `execute_hub_tool` calls reuse that container. Stop with `stop_hub_server`.
|
||||
|
||||
Firmware analysis pipeline (when analyzing firmware images):
|
||||
1. **binwalk-mcp** (`binwalk_scan` + `binwalk_extract`) — identify and extract filesystem from firmware
|
||||
2. **yara-mcp** (`yara_scan_with_rules`) — scan extracted files with vulnerability rules to prioritize targets
|
||||
3. **radare2-mcp** (persistent session) — confirm dangerous code paths
|
||||
4. **searchsploit-mcp** (`search_exploitdb`) — query version strings from radare2 against ExploitDB
|
||||
Run steps 3 and 4 outputs feed into a final triage summary.
|
||||
|
||||
radare2-mcp agent context (upstream tool — no embedded context):
|
||||
- Start a persistent session with `start_hub_server("radare2-mcp")` before any calls.
|
||||
- IMPORTANT: the `open_file` tool requires the parameter name `file_path` (with underscore),
|
||||
not `filepath`. Example: `execute_hub_tool("hub:radare2-mcp:open_file", {"file_path": "/app/output/..."})`
|
||||
- Workflow: `open_file` → `analyze` → `list_imports` → `xrefs_to` → `run_command` with `pdf @ <addr>`.
|
||||
- Static binary fallback: firmware binaries are often statically linked. When `list_imports`
|
||||
returns an empty result, fall back to `list_symbols` and search for dangerous function names
|
||||
(system, strcpy, gets, popen, sprintf) in the output. Then use `xrefs_to` on their addresses.
|
||||
- For string extraction, use `run_command` with `iz` (data section strings).
|
||||
The `list_all_strings` tool may return garbled output for large binaries.
|
||||
- For decompilation, use `run_command` with `pdc @ <addr>` (pseudo-C) or `pdf @ <addr>`
|
||||
(annotated disassembly). The `decompile` tool may fail with "not available in current mode".
|
||||
- Stop the session with `stop_hub_server("radare2-mcp")` when done.
|
||||
2. Set project assets with `set_project_assets` (optional)
|
||||
3. List available modules with `list_modules`
|
||||
4. Execute a module with `execute_module`
|
||||
5. Get results with `get_execution_results`
|
||||
""",
|
||||
lifespan=lifespan,
|
||||
)
|
||||
|
||||
@@ -6,34 +6,14 @@ from pathlib import Path
|
||||
from typing import TYPE_CHECKING, cast
|
||||
|
||||
from fastmcp.server.dependencies import get_context
|
||||
from fuzzforge_runner import Runner, Settings
|
||||
|
||||
from fuzzforge_mcp.exceptions import FuzzForgeMCPError
|
||||
from fuzzforge_mcp.settings import Settings
|
||||
from fuzzforge_mcp.storage import LocalStorage
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fastmcp import Context
|
||||
|
||||
|
||||
# Track the current active project path (set by init_project)
|
||||
_current_project_path: Path | None = None
|
||||
|
||||
# Singleton storage instance
|
||||
_storage: LocalStorage | None = None
|
||||
|
||||
|
||||
def set_current_project_path(project_path: Path) -> None:
|
||||
"""Set the current project path.
|
||||
|
||||
Called by init_project to track which project is active.
|
||||
|
||||
:param project_path: Path to the project directory.
|
||||
|
||||
"""
|
||||
global _current_project_path
|
||||
_current_project_path = project_path
|
||||
|
||||
|
||||
def get_settings() -> Settings:
|
||||
"""Get MCP server settings from context.
|
||||
|
||||
@@ -51,27 +31,18 @@ def get_settings() -> Settings:
|
||||
def get_project_path() -> Path:
|
||||
"""Get the current project path.
|
||||
|
||||
Returns the project path set by init_project, or falls back to
|
||||
the current working directory if no project has been initialized.
|
||||
|
||||
:return: Path to the current project.
|
||||
|
||||
"""
|
||||
global _current_project_path
|
||||
if _current_project_path is not None:
|
||||
return _current_project_path
|
||||
# Fall back to current working directory (where the AI agent is working)
|
||||
return Path.cwd()
|
||||
settings: Settings = get_settings()
|
||||
return Path(settings.project.default_path)
|
||||
|
||||
|
||||
def get_storage() -> LocalStorage:
|
||||
"""Get the storage backend instance.
|
||||
def get_runner() -> Runner:
|
||||
"""Get a configured Runner instance.
|
||||
|
||||
:return: LocalStorage instance.
|
||||
:return: Runner instance configured from MCP settings.
|
||||
|
||||
"""
|
||||
global _storage
|
||||
if _storage is None:
|
||||
settings = get_settings()
|
||||
_storage = LocalStorage(settings.storage.path)
|
||||
return _storage
|
||||
settings: Settings = get_settings()
|
||||
return Runner(settings)
|
||||
|
||||
@@ -2,12 +2,14 @@
|
||||
|
||||
from fastmcp import FastMCP
|
||||
|
||||
from fuzzforge_mcp.resources import executions, project
|
||||
from fuzzforge_mcp.resources import executions, modules, project, workflows
|
||||
|
||||
mcp: FastMCP = FastMCP()
|
||||
|
||||
mcp.mount(executions.mcp)
|
||||
mcp.mount(modules.mcp)
|
||||
mcp.mount(project.mcp)
|
||||
mcp.mount(workflows.mcp)
|
||||
|
||||
__all__ = [
|
||||
"mcp",
|
||||
|
||||
@@ -3,12 +3,16 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from fastmcp import FastMCP
|
||||
from fastmcp.exceptions import ResourceError
|
||||
|
||||
from fuzzforge_mcp.dependencies import get_project_path, get_storage
|
||||
from fuzzforge_mcp.dependencies import get_project_path, get_runner
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fuzzforge_runner import Runner
|
||||
|
||||
|
||||
mcp: FastMCP = FastMCP()
|
||||
|
||||
@@ -22,18 +26,18 @@ async def list_executions() -> list[dict[str, Any]]:
|
||||
:return: List of execution information dictionaries.
|
||||
|
||||
"""
|
||||
storage = get_storage()
|
||||
runner: Runner = get_runner()
|
||||
project_path: Path = get_project_path()
|
||||
|
||||
try:
|
||||
execution_ids = storage.list_executions(project_path)
|
||||
execution_ids = runner.list_executions(project_path)
|
||||
|
||||
return [
|
||||
{
|
||||
"execution_id": entry["execution_id"],
|
||||
"has_results": storage.get_execution_results(project_path, entry["execution_id"]) is not None,
|
||||
"execution_id": exec_id,
|
||||
"has_results": runner.get_execution_results(project_path, exec_id) is not None,
|
||||
}
|
||||
for entry in execution_ids
|
||||
for exec_id in execution_ids
|
||||
]
|
||||
|
||||
except Exception as exception:
|
||||
@@ -49,11 +53,11 @@ async def get_execution(execution_id: str) -> dict[str, Any]:
|
||||
:return: Execution information dictionary.
|
||||
|
||||
"""
|
||||
storage = get_storage()
|
||||
runner: Runner = get_runner()
|
||||
project_path: Path = get_project_path()
|
||||
|
||||
try:
|
||||
results_path = storage.get_execution_results(project_path, execution_id)
|
||||
results_path = runner.get_execution_results(project_path, execution_id)
|
||||
|
||||
if results_path is None:
|
||||
raise ResourceError(f"Execution not found: {execution_id}")
|
||||
|
||||
78
fuzzforge-mcp/src/fuzzforge_mcp/resources/modules.py
Normal file
78
fuzzforge-mcp/src/fuzzforge_mcp/resources/modules.py
Normal file
@@ -0,0 +1,78 @@
|
||||
"""Module resources for FuzzForge MCP."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from fastmcp import FastMCP
|
||||
from fastmcp.exceptions import ResourceError
|
||||
|
||||
from fuzzforge_mcp.dependencies import get_runner
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fuzzforge_runner import Runner
|
||||
from fuzzforge_runner.runner import ModuleInfo
|
||||
|
||||
|
||||
mcp: FastMCP = FastMCP()
|
||||
|
||||
|
||||
@mcp.resource("fuzzforge://modules/")
|
||||
async def list_modules() -> list[dict[str, Any]]:
|
||||
"""List all available FuzzForge modules.
|
||||
|
||||
Returns information about modules that can be executed,
|
||||
including their identifiers and availability status.
|
||||
|
||||
:return: List of module information dictionaries.
|
||||
|
||||
"""
|
||||
runner: Runner = get_runner()
|
||||
|
||||
try:
|
||||
modules: list[ModuleInfo] = runner.list_modules()
|
||||
|
||||
return [
|
||||
{
|
||||
"identifier": module.identifier,
|
||||
"description": module.description,
|
||||
"version": module.version,
|
||||
"available": module.available,
|
||||
}
|
||||
for module in modules
|
||||
]
|
||||
|
||||
except Exception as exception:
|
||||
message: str = f"Failed to list modules: {exception}"
|
||||
raise ResourceError(message) from exception
|
||||
|
||||
|
||||
@mcp.resource("fuzzforge://modules/{module_identifier}")
|
||||
async def get_module(module_identifier: str) -> dict[str, Any]:
|
||||
"""Get information about a specific module.
|
||||
|
||||
:param module_identifier: The identifier of the module to retrieve.
|
||||
:return: Module information dictionary.
|
||||
|
||||
"""
|
||||
runner: Runner = get_runner()
|
||||
|
||||
try:
|
||||
module: ModuleInfo | None = runner.get_module_info(module_identifier)
|
||||
|
||||
if module is None:
|
||||
raise ResourceError(f"Module not found: {module_identifier}")
|
||||
|
||||
return {
|
||||
"identifier": module.identifier,
|
||||
"description": module.description,
|
||||
"version": module.version,
|
||||
"available": module.available,
|
||||
}
|
||||
|
||||
except ResourceError:
|
||||
raise
|
||||
except Exception as exception:
|
||||
message: str = f"Failed to get module: {exception}"
|
||||
raise ResourceError(message) from exception
|
||||
|
||||
@@ -3,12 +3,16 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from fastmcp import FastMCP
|
||||
from fastmcp.exceptions import ResourceError
|
||||
|
||||
from fuzzforge_mcp.dependencies import get_project_path, get_settings, get_storage
|
||||
from fuzzforge_mcp.dependencies import get_project_path, get_runner
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fuzzforge_runner import Runner
|
||||
|
||||
|
||||
mcp: FastMCP = FastMCP()
|
||||
|
||||
@@ -23,12 +27,12 @@ async def get_project() -> dict[str, Any]:
|
||||
:return: Project information dictionary.
|
||||
|
||||
"""
|
||||
storage = get_storage()
|
||||
runner: Runner = get_runner()
|
||||
project_path: Path = get_project_path()
|
||||
|
||||
try:
|
||||
executions = storage.list_executions(project_path)
|
||||
assets_path = storage.get_project_assets_path(project_path)
|
||||
executions = runner.list_executions(project_path)
|
||||
assets_path = runner.storage.get_project_assets_path(project_path)
|
||||
|
||||
return {
|
||||
"path": str(project_path),
|
||||
@@ -36,7 +40,7 @@ async def get_project() -> dict[str, Any]:
|
||||
"has_assets": assets_path is not None,
|
||||
"assets_path": str(assets_path) if assets_path else None,
|
||||
"execution_count": len(executions),
|
||||
"recent_executions": executions[:10],
|
||||
"recent_executions": executions[:10], # Last 10 executions
|
||||
}
|
||||
|
||||
except Exception as exception:
|
||||
@@ -49,11 +53,13 @@ async def get_project_settings() -> dict[str, Any]:
|
||||
"""Get current FuzzForge settings.
|
||||
|
||||
Returns the active configuration for the MCP server including
|
||||
engine, storage, and hub settings.
|
||||
engine, storage, and project settings.
|
||||
|
||||
:return: Settings dictionary.
|
||||
|
||||
"""
|
||||
from fuzzforge_mcp.dependencies import get_settings
|
||||
|
||||
try:
|
||||
settings = get_settings()
|
||||
|
||||
@@ -65,10 +71,9 @@ async def get_project_settings() -> dict[str, Any]:
|
||||
"storage": {
|
||||
"path": str(settings.storage.path),
|
||||
},
|
||||
"hub": {
|
||||
"enabled": settings.hub.enabled,
|
||||
"config_path": str(settings.hub.config_path),
|
||||
"timeout": settings.hub.timeout,
|
||||
"project": {
|
||||
"path": str(settings.project.path),
|
||||
"modules_path": str(settings.modules_path),
|
||||
},
|
||||
"debug": settings.debug,
|
||||
}
|
||||
|
||||
53
fuzzforge-mcp/src/fuzzforge_mcp/resources/workflows.py
Normal file
53
fuzzforge-mcp/src/fuzzforge_mcp/resources/workflows.py
Normal file
@@ -0,0 +1,53 @@
|
||||
"""Workflow resources for FuzzForge MCP.
|
||||
|
||||
Note: In FuzzForge OSS, workflows are defined at runtime rather than
|
||||
stored. This resource provides documentation about workflow capabilities.
|
||||
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
from fastmcp import FastMCP
|
||||
|
||||
|
||||
mcp: FastMCP = FastMCP()
|
||||
|
||||
|
||||
@mcp.resource("fuzzforge://workflows/help")
|
||||
async def get_workflow_help() -> dict[str, Any]:
|
||||
"""Get help information about creating workflows.
|
||||
|
||||
Workflows in FuzzForge OSS are defined at execution time rather
|
||||
than stored. Use the execute_workflow tool with step definitions.
|
||||
|
||||
:return: Workflow documentation.
|
||||
|
||||
"""
|
||||
return {
|
||||
"description": "Workflows chain multiple modules together",
|
||||
"usage": "Use the execute_workflow tool with step definitions",
|
||||
"example": {
|
||||
"workflow_name": "security-audit",
|
||||
"steps": [
|
||||
{
|
||||
"module": "compile-contracts",
|
||||
"configuration": {"solc_version": "0.8.0"},
|
||||
},
|
||||
{
|
||||
"module": "slither",
|
||||
"configuration": {},
|
||||
},
|
||||
{
|
||||
"module": "echidna",
|
||||
"configuration": {"test_limit": 10000},
|
||||
},
|
||||
],
|
||||
},
|
||||
"step_format": {
|
||||
"module": "Module identifier (required)",
|
||||
"configuration": "Module-specific configuration (optional)",
|
||||
"name": "Step name for logging (optional)",
|
||||
},
|
||||
}
|
||||
@@ -1,113 +0,0 @@
|
||||
"""FuzzForge MCP Server settings.
|
||||
|
||||
Standalone settings for the MCP server. Replaces the previous dependency
|
||||
on fuzzforge-runner Settings now that the module system has been removed
|
||||
and FuzzForge operates exclusively through MCP hub tools.
|
||||
|
||||
All settings can be configured via environment variables with the prefix
|
||||
``FUZZFORGE_``. Nested settings use double-underscore as delimiter.
|
||||
|
||||
Example:
|
||||
``FUZZFORGE_ENGINE__TYPE=docker``
|
||||
``FUZZFORGE_STORAGE__PATH=/data/fuzzforge``
|
||||
``FUZZFORGE_HUB__CONFIG_PATH=/path/to/hub-config.json``
|
||||
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from enum import StrEnum
|
||||
from pathlib import Path
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from pydantic_settings import BaseSettings, SettingsConfigDict
|
||||
|
||||
|
||||
class EngineType(StrEnum):
|
||||
"""Supported container engine types."""
|
||||
|
||||
DOCKER = "docker"
|
||||
PODMAN = "podman"
|
||||
|
||||
|
||||
class EngineSettings(BaseModel):
|
||||
"""Container engine configuration."""
|
||||
|
||||
#: Type of container engine to use.
|
||||
type: EngineType = EngineType.DOCKER
|
||||
|
||||
#: Path to the container engine socket.
|
||||
socket: str = Field(default="")
|
||||
|
||||
#: Custom graph root for Podman storage.
|
||||
graphroot: Path = Field(default=Path.home() / ".fuzzforge" / "containers" / "storage")
|
||||
|
||||
#: Custom run root for Podman runtime state.
|
||||
runroot: Path = Field(default=Path.home() / ".fuzzforge" / "containers" / "run")
|
||||
|
||||
|
||||
class StorageSettings(BaseModel):
|
||||
"""Storage configuration for local filesystem storage."""
|
||||
|
||||
#: Base path for local storage.
|
||||
path: Path = Field(default=Path.home() / ".fuzzforge" / "storage")
|
||||
|
||||
|
||||
class ProjectSettings(BaseModel):
|
||||
"""Project configuration."""
|
||||
|
||||
#: Default path for FuzzForge projects.
|
||||
default_path: Path = Field(default=Path.home() / ".fuzzforge" / "projects")
|
||||
|
||||
|
||||
class HubSettings(BaseModel):
|
||||
"""MCP Hub configuration for external tool servers.
|
||||
|
||||
Controls the hub that bridges FuzzForge with external MCP servers
|
||||
(e.g., mcp-security-hub). AI agents discover and execute tools
|
||||
from registered MCP servers.
|
||||
|
||||
Configure via environment variables:
|
||||
``FUZZFORGE_HUB__ENABLED=true``
|
||||
``FUZZFORGE_HUB__CONFIG_PATH=/path/to/hub-config.json``
|
||||
``FUZZFORGE_HUB__TIMEOUT=300``
|
||||
"""
|
||||
|
||||
#: Whether the MCP hub is enabled.
|
||||
enabled: bool = Field(default=True)
|
||||
|
||||
#: Path to the hub configuration JSON file.
|
||||
config_path: Path = Field(default=Path.home() / ".fuzzforge" / "hub-config.json")
|
||||
|
||||
#: Default timeout in seconds for hub tool execution.
|
||||
timeout: int = Field(default=300)
|
||||
|
||||
|
||||
class Settings(BaseSettings):
|
||||
"""FuzzForge MCP Server settings.
|
||||
|
||||
Settings can be configured via environment variables with the prefix
|
||||
``FUZZFORGE_``. Nested settings use double-underscore as delimiter.
|
||||
|
||||
"""
|
||||
|
||||
model_config = SettingsConfigDict(
|
||||
case_sensitive=False,
|
||||
env_nested_delimiter="__",
|
||||
env_prefix="FUZZFORGE_",
|
||||
)
|
||||
|
||||
#: Container engine settings.
|
||||
engine: EngineSettings = Field(default_factory=EngineSettings)
|
||||
|
||||
#: Storage settings.
|
||||
storage: StorageSettings = Field(default_factory=StorageSettings)
|
||||
|
||||
#: Project settings.
|
||||
project: ProjectSettings = Field(default_factory=ProjectSettings)
|
||||
|
||||
#: MCP Hub settings.
|
||||
hub: HubSettings = Field(default_factory=HubSettings)
|
||||
|
||||
#: Enable debug logging.
|
||||
debug: bool = False
|
||||
@@ -1,275 +0,0 @@
|
||||
"""FuzzForge MCP Server - Local project storage.
|
||||
|
||||
Lightweight project storage for managing `.fuzzforge/` directories,
|
||||
execution results, and project configuration. Extracted from the
|
||||
former fuzzforge-runner storage module.
|
||||
|
||||
Storage is placed directly in the project directory as `.fuzzforge/`
|
||||
for maximum visibility and ease of debugging.
|
||||
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
from datetime import UTC, datetime
|
||||
from pathlib import Path
|
||||
from tarfile import open as Archive # noqa: N812
|
||||
from typing import Any
|
||||
from uuid import uuid4
|
||||
|
||||
logger = logging.getLogger("fuzzforge-mcp")
|
||||
|
||||
#: Name of the FuzzForge storage directory within projects.
|
||||
FUZZFORGE_DIR_NAME: str = ".fuzzforge"
|
||||
|
||||
#: Standard results archive filename.
|
||||
RESULTS_ARCHIVE_FILENAME: str = "results.tar.gz"
|
||||
|
||||
|
||||
class StorageError(Exception):
|
||||
"""Raised when a storage operation fails."""
|
||||
|
||||
|
||||
class LocalStorage:
|
||||
"""Local filesystem storage backend for FuzzForge.
|
||||
|
||||
Provides lightweight storage for project configuration and
|
||||
execution results tracking.
|
||||
|
||||
Directory structure (inside project directory)::
|
||||
|
||||
{project_path}/.fuzzforge/
|
||||
config.json # Project config (source path reference)
|
||||
runs/ # Execution results
|
||||
{execution_id}/
|
||||
results.tar.gz
|
||||
|
||||
"""
|
||||
|
||||
_base_path: Path
|
||||
|
||||
def __init__(self, base_path: Path) -> None:
|
||||
"""Initialize storage backend.
|
||||
|
||||
:param base_path: Root directory for global storage (fallback).
|
||||
|
||||
"""
|
||||
self._base_path = base_path
|
||||
self._base_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
def _get_project_path(self, project_path: Path) -> Path:
|
||||
"""Get the .fuzzforge storage path for a project.
|
||||
|
||||
:param project_path: Path to the project directory.
|
||||
:returns: Storage path (.fuzzforge inside project).
|
||||
|
||||
"""
|
||||
return project_path / FUZZFORGE_DIR_NAME
|
||||
|
||||
def init_project(self, project_path: Path) -> Path:
|
||||
"""Initialize storage for a new project.
|
||||
|
||||
Creates a .fuzzforge/ directory inside the project for storing
|
||||
configuration and execution results.
|
||||
|
||||
:param project_path: Path to the project directory.
|
||||
:returns: Path to the project storage directory.
|
||||
|
||||
"""
|
||||
storage_path = self._get_project_path(project_path)
|
||||
storage_path.mkdir(parents=True, exist_ok=True)
|
||||
(storage_path / "runs").mkdir(parents=True, exist_ok=True)
|
||||
(storage_path / "output").mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Create .gitignore to avoid committing large files
|
||||
gitignore_path = storage_path / ".gitignore"
|
||||
if not gitignore_path.exists():
|
||||
gitignore_path.write_text(
|
||||
"# FuzzForge storage - ignore large/temporary files\n"
|
||||
"runs/\n"
|
||||
"output/\n"
|
||||
"!config.json\n"
|
||||
)
|
||||
|
||||
logger.info("Initialized project storage: %s", storage_path)
|
||||
return storage_path
|
||||
|
||||
def get_project_assets_path(self, project_path: Path) -> Path | None:
|
||||
"""Get the configured source path for a project.
|
||||
|
||||
:param project_path: Path to the project directory.
|
||||
:returns: Path to source directory, or None if not configured.
|
||||
|
||||
"""
|
||||
storage_path = self._get_project_path(project_path)
|
||||
config_path = storage_path / "config.json"
|
||||
|
||||
if config_path.exists():
|
||||
config = json.loads(config_path.read_text())
|
||||
source_path = config.get("source_path")
|
||||
if source_path:
|
||||
path = Path(source_path)
|
||||
if path.exists():
|
||||
return path
|
||||
|
||||
return None
|
||||
|
||||
def set_project_assets(self, project_path: Path, assets_path: Path) -> Path:
|
||||
"""Set the source path for a project (reference only, no copying).
|
||||
|
||||
:param project_path: Path to the project directory.
|
||||
:param assets_path: Path to source directory.
|
||||
:returns: The assets path (unchanged).
|
||||
:raises StorageError: If path doesn't exist.
|
||||
|
||||
"""
|
||||
if not assets_path.exists():
|
||||
msg = f"Assets path does not exist: {assets_path}"
|
||||
raise StorageError(msg)
|
||||
|
||||
assets_path = assets_path.resolve()
|
||||
|
||||
storage_path = self._get_project_path(project_path)
|
||||
storage_path.mkdir(parents=True, exist_ok=True)
|
||||
config_path = storage_path / "config.json"
|
||||
|
||||
config: dict[str, Any] = {}
|
||||
if config_path.exists():
|
||||
config = json.loads(config_path.read_text())
|
||||
|
||||
config["source_path"] = str(assets_path)
|
||||
config_path.write_text(json.dumps(config, indent=2))
|
||||
|
||||
logger.info("Set project assets: %s -> %s", project_path.name, assets_path)
|
||||
return assets_path
|
||||
|
||||
def get_project_output_path(self, project_path: Path) -> Path | None:
|
||||
"""Get the output directory path for a project.
|
||||
|
||||
Returns the path to the writable output directory that is mounted
|
||||
into hub tool containers at /app/output.
|
||||
|
||||
:param project_path: Path to the project directory.
|
||||
:returns: Path to output directory, or None if project not initialized.
|
||||
|
||||
"""
|
||||
output_path = self._get_project_path(project_path) / "output"
|
||||
if output_path.exists():
|
||||
return output_path
|
||||
return None
|
||||
|
||||
def record_execution(
|
||||
self,
|
||||
project_path: Path,
|
||||
server_name: str,
|
||||
tool_name: str,
|
||||
arguments: dict[str, Any],
|
||||
result: dict[str, Any],
|
||||
) -> str:
|
||||
"""Record an execution result to the project's runs directory.
|
||||
|
||||
:param project_path: Path to the project directory.
|
||||
:param server_name: Hub server name.
|
||||
:param tool_name: Tool name that was executed.
|
||||
:param arguments: Arguments passed to the tool.
|
||||
:param result: Execution result dictionary.
|
||||
:returns: Execution ID.
|
||||
|
||||
"""
|
||||
execution_id = f"{datetime.now(tz=UTC).strftime('%Y%m%dT%H%M%SZ')}_{uuid4().hex[:8]}"
|
||||
run_dir = self._get_project_path(project_path) / "runs" / execution_id
|
||||
run_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
metadata = {
|
||||
"execution_id": execution_id,
|
||||
"timestamp": datetime.now(tz=UTC).isoformat(),
|
||||
"server": server_name,
|
||||
"tool": tool_name,
|
||||
"arguments": arguments,
|
||||
"success": result.get("success", False),
|
||||
"result": result,
|
||||
}
|
||||
(run_dir / "metadata.json").write_text(json.dumps(metadata, indent=2, default=str))
|
||||
|
||||
logger.info("Recorded execution %s: %s:%s", execution_id, server_name, tool_name)
|
||||
return execution_id
|
||||
|
||||
def list_executions(self, project_path: Path) -> list[dict[str, Any]]:
|
||||
"""List all executions for a project with summary metadata.
|
||||
|
||||
:param project_path: Path to the project directory.
|
||||
:returns: List of execution summaries (id, timestamp, server, tool, success).
|
||||
|
||||
"""
|
||||
runs_dir = self._get_project_path(project_path) / "runs"
|
||||
if not runs_dir.exists():
|
||||
return []
|
||||
|
||||
executions: list[dict[str, Any]] = []
|
||||
for run_dir in sorted(runs_dir.iterdir(), reverse=True):
|
||||
if not run_dir.is_dir():
|
||||
continue
|
||||
meta_path = run_dir / "metadata.json"
|
||||
if meta_path.exists():
|
||||
meta = json.loads(meta_path.read_text())
|
||||
executions.append({
|
||||
"execution_id": meta.get("execution_id", run_dir.name),
|
||||
"timestamp": meta.get("timestamp"),
|
||||
"server": meta.get("server"),
|
||||
"tool": meta.get("tool"),
|
||||
"success": meta.get("success"),
|
||||
})
|
||||
else:
|
||||
executions.append({"execution_id": run_dir.name})
|
||||
return executions
|
||||
|
||||
def get_execution_results(
|
||||
self,
|
||||
project_path: Path,
|
||||
execution_id: str,
|
||||
) -> Path | None:
|
||||
"""Retrieve execution results path.
|
||||
|
||||
:param project_path: Path to the project directory.
|
||||
:param execution_id: Execution ID.
|
||||
:returns: Path to results archive, or None if not found.
|
||||
|
||||
"""
|
||||
storage_path = self._get_project_path(project_path)
|
||||
|
||||
# Try direct path
|
||||
results_path = storage_path / "runs" / execution_id / RESULTS_ARCHIVE_FILENAME
|
||||
if results_path.exists():
|
||||
return results_path
|
||||
|
||||
# Search in all run directories
|
||||
runs_dir = storage_path / "runs"
|
||||
if runs_dir.exists():
|
||||
for run_dir in runs_dir.iterdir():
|
||||
if run_dir.is_dir() and execution_id in run_dir.name:
|
||||
candidate = run_dir / RESULTS_ARCHIVE_FILENAME
|
||||
if candidate.exists():
|
||||
return candidate
|
||||
|
||||
return None
|
||||
|
||||
def extract_results(self, results_path: Path, destination: Path) -> Path:
|
||||
"""Extract a results archive to a destination directory.
|
||||
|
||||
:param results_path: Path to the results archive.
|
||||
:param destination: Directory to extract to.
|
||||
:returns: Path to extracted directory.
|
||||
:raises StorageError: If extraction fails.
|
||||
|
||||
"""
|
||||
try:
|
||||
destination.mkdir(parents=True, exist_ok=True)
|
||||
with Archive(results_path, "r:gz") as tar:
|
||||
tar.extractall(path=destination) # noqa: S202
|
||||
logger.info("Extracted results: %s -> %s", results_path, destination)
|
||||
return destination
|
||||
except Exception as exc:
|
||||
msg = f"Failed to extract results: {exc}"
|
||||
raise StorageError(msg) from exc
|
||||
@@ -2,12 +2,13 @@
|
||||
|
||||
from fastmcp import FastMCP
|
||||
|
||||
from fuzzforge_mcp.tools import hub, projects
|
||||
from fuzzforge_mcp.tools import modules, projects, workflows
|
||||
|
||||
mcp: FastMCP = FastMCP()
|
||||
|
||||
mcp.mount(modules.mcp)
|
||||
mcp.mount(projects.mcp)
|
||||
mcp.mount(hub.mcp)
|
||||
mcp.mount(workflows.mcp)
|
||||
|
||||
__all__ = [
|
||||
"mcp",
|
||||
|
||||
@@ -1,683 +0,0 @@
|
||||
"""MCP Hub tools for FuzzForge MCP server.
|
||||
|
||||
This module provides tools for interacting with external MCP servers
|
||||
through the FuzzForge hub. AI agents can:
|
||||
- List available hub servers and their tools
|
||||
- Discover tools from hub servers
|
||||
- Execute hub tools
|
||||
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
from fastmcp import FastMCP
|
||||
from fastmcp.exceptions import ToolError
|
||||
from fuzzforge_common.hub import HubExecutor, HubServerConfig, HubServerType
|
||||
|
||||
from fuzzforge_mcp.dependencies import get_project_path, get_settings, get_storage
|
||||
|
||||
mcp: FastMCP = FastMCP()
|
||||
|
||||
# Name of the convention tool that hub servers can implement to provide
|
||||
# rich usage context for AI agents (known issues, workflow tips, rules, etc.).
|
||||
_AGENT_CONTEXT_TOOL = "get_agent_context"
|
||||
|
||||
# Global hub executor instance (lazy initialization)
|
||||
_hub_executor: HubExecutor | None = None
|
||||
|
||||
|
||||
async def _fetch_agent_context(
|
||||
executor: HubExecutor,
|
||||
server_name: str,
|
||||
tools: list[Any],
|
||||
) -> str | None:
|
||||
"""Call get_agent_context if the server provides it.
|
||||
|
||||
Returns the context string, or None if the server doesn't implement
|
||||
the convention or the call fails.
|
||||
"""
|
||||
if not any(t.name == _AGENT_CONTEXT_TOOL for t in tools):
|
||||
return None
|
||||
try:
|
||||
result = await executor.execute_tool(
|
||||
identifier=f"hub:{server_name}:{_AGENT_CONTEXT_TOOL}",
|
||||
arguments={},
|
||||
)
|
||||
if result.success and result.result:
|
||||
content = result.result.get("content", [])
|
||||
if content and isinstance(content, list):
|
||||
text: str = content[0].get("text", "")
|
||||
return text
|
||||
except Exception: # noqa: BLE001, S110 - best-effort context fetch
|
||||
pass
|
||||
return None
|
||||
|
||||
|
||||
def _get_hub_executor() -> HubExecutor:
|
||||
"""Get or create the hub executor instance.
|
||||
|
||||
:returns: Hub executor instance.
|
||||
:raises ToolError: If hub is disabled.
|
||||
|
||||
"""
|
||||
global _hub_executor
|
||||
|
||||
settings = get_settings()
|
||||
|
||||
if not settings.hub.enabled:
|
||||
msg = "MCP Hub is disabled. Enable it via FUZZFORGE_HUB__ENABLED=true"
|
||||
raise ToolError(msg)
|
||||
|
||||
if _hub_executor is None:
|
||||
config_path = settings.hub.config_path
|
||||
_hub_executor = HubExecutor(
|
||||
config_path=config_path,
|
||||
timeout=settings.hub.timeout,
|
||||
)
|
||||
|
||||
return _hub_executor
|
||||
|
||||
|
||||
@mcp.tool
|
||||
async def list_hub_servers(category: str | None = None) -> dict[str, Any]:
|
||||
"""List all registered MCP hub servers.
|
||||
|
||||
Returns information about configured hub servers, including
|
||||
their connection type, status, and discovered tool count.
|
||||
|
||||
:param category: Optional category to filter by (e.g. "binary-analysis",
|
||||
"web-security", "reconnaissance"). Only servers in this category
|
||||
are returned.
|
||||
:return: Dictionary with list of hub servers.
|
||||
|
||||
"""
|
||||
try:
|
||||
executor = _get_hub_executor()
|
||||
servers = executor.list_servers()
|
||||
|
||||
if category:
|
||||
servers = [s for s in servers if s.get("category") == category]
|
||||
|
||||
return {
|
||||
"servers": servers,
|
||||
"count": len(servers),
|
||||
"enabled_count": len([s for s in servers if s["enabled"]]),
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
if isinstance(e, ToolError):
|
||||
raise
|
||||
msg = f"Failed to list hub servers: {e}"
|
||||
raise ToolError(msg) from e
|
||||
|
||||
|
||||
@mcp.tool
|
||||
async def discover_hub_tools(server_name: str | None = None) -> dict[str, Any]:
|
||||
"""Discover tools from hub servers.
|
||||
|
||||
Connects to hub servers and retrieves their available tools.
|
||||
If server_name is provided, only discovers from that server.
|
||||
Otherwise discovers from all enabled servers.
|
||||
|
||||
:param server_name: Optional specific server to discover from.
|
||||
:return: Dictionary with discovered tools.
|
||||
|
||||
"""
|
||||
try:
|
||||
executor = _get_hub_executor()
|
||||
|
||||
if server_name:
|
||||
tools = await executor.discover_server_tools(server_name)
|
||||
|
||||
# Convention: auto-fetch agent context if server provides it.
|
||||
agent_context = await _fetch_agent_context(executor, server_name, tools)
|
||||
|
||||
# Hide the convention tool from the agent's tool list.
|
||||
visible_tools = [t for t in tools if t.name != "get_agent_context"]
|
||||
|
||||
result: dict[str, Any] = {
|
||||
"server": server_name,
|
||||
"tools": [
|
||||
{
|
||||
"identifier": t.identifier,
|
||||
"name": t.name,
|
||||
"description": t.description,
|
||||
"parameters": [p.model_dump() for p in t.parameters],
|
||||
}
|
||||
for t in visible_tools
|
||||
],
|
||||
"count": len(visible_tools),
|
||||
}
|
||||
if agent_context:
|
||||
result["agent_context"] = agent_context
|
||||
return result
|
||||
else:
|
||||
results = await executor.discover_all_tools()
|
||||
all_tools = []
|
||||
contexts: dict[str, str] = {}
|
||||
for server, tools in results.items():
|
||||
ctx = await _fetch_agent_context(executor, server, tools)
|
||||
if ctx:
|
||||
contexts[server] = ctx
|
||||
for tool in tools:
|
||||
if tool.name == "get_agent_context":
|
||||
continue
|
||||
all_tools.append({
|
||||
"identifier": tool.identifier,
|
||||
"name": tool.name,
|
||||
"server": server,
|
||||
"description": tool.description,
|
||||
"parameters": [p.model_dump() for p in tool.parameters],
|
||||
})
|
||||
|
||||
result = {
|
||||
"servers_discovered": len(results),
|
||||
"tools": all_tools,
|
||||
"count": len(all_tools),
|
||||
}
|
||||
if contexts:
|
||||
result["agent_contexts"] = contexts
|
||||
return result
|
||||
|
||||
except Exception as e:
|
||||
if isinstance(e, ToolError):
|
||||
raise
|
||||
msg = f"Failed to discover hub tools: {e}"
|
||||
raise ToolError(msg) from e
|
||||
|
||||
|
||||
@mcp.tool
|
||||
async def list_hub_tools() -> dict[str, Any]:
|
||||
"""List all discovered hub tools.
|
||||
|
||||
Returns tools that have been previously discovered from hub servers.
|
||||
Run discover_hub_tools first if no tools are listed.
|
||||
|
||||
:return: Dictionary with list of discovered tools.
|
||||
|
||||
"""
|
||||
try:
|
||||
executor = _get_hub_executor()
|
||||
tools = executor.list_tools()
|
||||
|
||||
return {
|
||||
"tools": tools,
|
||||
"count": len(tools),
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
if isinstance(e, ToolError):
|
||||
raise
|
||||
msg = f"Failed to list hub tools: {e}"
|
||||
raise ToolError(msg) from e
|
||||
|
||||
|
||||
@mcp.tool
|
||||
async def execute_hub_tool(
|
||||
identifier: str,
|
||||
arguments: dict[str, Any] | None = None,
|
||||
timeout: int | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Execute a tool from a hub server.
|
||||
|
||||
:param identifier: Tool identifier (format: hub:server:tool or server:tool).
|
||||
:param arguments: Tool arguments matching the tool's input schema.
|
||||
:param timeout: Optional execution timeout in seconds.
|
||||
:return: Tool execution result.
|
||||
|
||||
Example identifiers:
|
||||
- "hub:binwalk-mcp:binwalk_scan"
|
||||
- "hub:yara-mcp:yara_scan_with_rules"
|
||||
- "hub:nmap:nmap_scan"
|
||||
|
||||
FILE ACCESS — if set_project_assets was called, the assets directory is
|
||||
mounted read-only inside the container at two standard paths:
|
||||
- /app/uploads/ (used by binwalk, and tools with UPLOAD_DIR)
|
||||
- /app/samples/ (used by yara, capa, and tools with SAMPLES_DIR)
|
||||
Always use /app/uploads/<filename> or /app/samples/<filename> when
|
||||
passing file paths to hub tools — do NOT use the host path.
|
||||
|
||||
Tool outputs are persisted to a writable shared volume:
|
||||
- /app/output/ (writable — extraction results, reports, etc.)
|
||||
Files written here survive container destruction and are available
|
||||
to subsequent tool calls. The host path is .fuzzforge/output/.
|
||||
|
||||
"""
|
||||
try:
|
||||
executor = _get_hub_executor()
|
||||
|
||||
# Inject project assets as Docker volume mounts if configured.
|
||||
# Mounts the assets directory at the standard paths used by hub tools:
|
||||
# /app/uploads — binwalk, and other tools that use UPLOAD_DIR
|
||||
# /app/samples — yara, capa, and other tools that use SAMPLES_DIR
|
||||
# /app/output — writable volume for tool outputs (persists across calls)
|
||||
extra_volumes: list[str] = []
|
||||
try:
|
||||
storage = get_storage()
|
||||
project_path = get_project_path()
|
||||
assets_path = storage.get_project_assets_path(project_path)
|
||||
if assets_path:
|
||||
assets_str = str(assets_path)
|
||||
extra_volumes = [
|
||||
f"{assets_str}:/app/uploads:ro",
|
||||
f"{assets_str}:/app/samples:ro",
|
||||
]
|
||||
output_path = storage.get_project_output_path(project_path)
|
||||
if output_path:
|
||||
extra_volumes.append(f"{output_path!s}:/app/output:rw")
|
||||
except Exception: # noqa: BLE001 - never block tool execution due to asset injection failure
|
||||
extra_volumes = []
|
||||
|
||||
result = await executor.execute_tool(
|
||||
identifier=identifier,
|
||||
arguments=arguments or {},
|
||||
timeout=timeout,
|
||||
extra_volumes=extra_volumes or None,
|
||||
)
|
||||
|
||||
# Record execution history for list_executions / get_execution_results.
|
||||
try:
|
||||
storage = get_storage()
|
||||
project_path = get_project_path()
|
||||
storage.record_execution(
|
||||
project_path=project_path,
|
||||
server_name=result.server_name,
|
||||
tool_name=result.tool_name,
|
||||
arguments=arguments or {},
|
||||
result=result.to_dict(),
|
||||
)
|
||||
except Exception: # noqa: BLE001, S110 - never fail the tool call due to recording issues
|
||||
pass
|
||||
|
||||
return result.to_dict()
|
||||
|
||||
except Exception as e:
|
||||
if isinstance(e, ToolError):
|
||||
raise
|
||||
msg = f"Hub tool execution failed: {e}"
|
||||
raise ToolError(msg) from e
|
||||
|
||||
|
||||
@mcp.tool
|
||||
async def get_hub_tool_schema(identifier: str) -> dict[str, Any]:
|
||||
"""Get the input schema for a hub tool.
|
||||
|
||||
Returns the JSON Schema that describes the tool's expected arguments.
|
||||
|
||||
:param identifier: Tool identifier (format: hub:server:tool or server:tool).
|
||||
:return: JSON Schema for the tool's input.
|
||||
|
||||
"""
|
||||
try:
|
||||
executor = _get_hub_executor()
|
||||
schema = executor.get_tool_schema(identifier)
|
||||
|
||||
if schema is None:
|
||||
msg = f"Tool '{identifier}' not found. Run discover_hub_tools first."
|
||||
raise ToolError(msg)
|
||||
|
||||
return {
|
||||
"identifier": identifier,
|
||||
"schema": schema,
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
if isinstance(e, ToolError):
|
||||
raise
|
||||
msg = f"Failed to get tool schema: {e}"
|
||||
raise ToolError(msg) from e
|
||||
|
||||
|
||||
@mcp.tool
|
||||
async def add_hub_server(
|
||||
name: str,
|
||||
server_type: str,
|
||||
image: str | None = None,
|
||||
command: list[str] | None = None,
|
||||
url: str | None = None,
|
||||
category: str | None = None,
|
||||
description: str | None = None,
|
||||
capabilities: list[str] | None = None,
|
||||
environment: dict[str, str] | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Add a new MCP server to the hub.
|
||||
|
||||
Register a new external MCP server that can be used for tool discovery
|
||||
and execution. Servers can be Docker images, local commands, or SSE endpoints.
|
||||
|
||||
:param name: Unique name for the server (e.g., "nmap", "nuclei").
|
||||
:param server_type: Connection type ("docker", "command", or "sse").
|
||||
:param image: Docker image name (for docker type).
|
||||
:param command: Command and args (for command type).
|
||||
:param url: SSE endpoint URL (for sse type).
|
||||
:param category: Category for grouping (e.g., "reconnaissance").
|
||||
:param description: Human-readable description.
|
||||
:param capabilities: Docker capabilities to add (e.g., ["NET_RAW"]).
|
||||
:param environment: Environment variables to pass.
|
||||
:return: Information about the added server.
|
||||
|
||||
Examples:
|
||||
- Docker: add_hub_server("nmap", "docker", image="nmap-mcp:latest", capabilities=["NET_RAW"])
|
||||
- Command: add_hub_server("custom", "command", command=["python", "server.py"])
|
||||
|
||||
"""
|
||||
try:
|
||||
executor = _get_hub_executor()
|
||||
|
||||
# Parse server type
|
||||
try:
|
||||
stype = HubServerType(server_type)
|
||||
except ValueError:
|
||||
msg = f"Invalid server type: {server_type}. Use 'docker', 'command', or 'sse'."
|
||||
raise ToolError(msg) from None
|
||||
|
||||
# Validate required fields based on type
|
||||
if stype == HubServerType.DOCKER and not image:
|
||||
msg = "Docker image required for docker type"
|
||||
raise ToolError(msg)
|
||||
if stype == HubServerType.COMMAND and not command:
|
||||
msg = "Command required for command type"
|
||||
raise ToolError(msg)
|
||||
if stype == HubServerType.SSE and not url:
|
||||
msg = "URL required for sse type"
|
||||
raise ToolError(msg)
|
||||
|
||||
config = HubServerConfig(
|
||||
name=name,
|
||||
type=stype,
|
||||
image=image,
|
||||
command=command,
|
||||
url=url,
|
||||
category=category,
|
||||
description=description,
|
||||
capabilities=capabilities or [],
|
||||
environment=environment or {},
|
||||
)
|
||||
|
||||
server = executor.add_server(config)
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"server": {
|
||||
"name": server.name,
|
||||
"identifier": server.identifier,
|
||||
"type": server.config.type.value,
|
||||
"enabled": server.config.enabled,
|
||||
},
|
||||
"message": f"Server '{name}' added. Use discover_hub_tools('{name}') to discover its tools.",
|
||||
}
|
||||
|
||||
except ValueError as e:
|
||||
msg = f"Failed to add server: {e}"
|
||||
raise ToolError(msg) from e
|
||||
except Exception as e:
|
||||
if isinstance(e, ToolError):
|
||||
raise
|
||||
msg = f"Failed to add hub server: {e}"
|
||||
raise ToolError(msg) from e
|
||||
|
||||
|
||||
@mcp.tool
|
||||
async def start_hub_server(server_name: str) -> dict[str, Any]:
|
||||
"""Start a persistent container session for a hub server.
|
||||
|
||||
Starts a Docker container that stays running between tool calls,
|
||||
allowing stateful interactions. Tools are auto-discovered on start.
|
||||
|
||||
Use this for servers like radare2 or ghidra where you want to
|
||||
keep an analysis session open across multiple tool calls.
|
||||
|
||||
After starting, use execute_hub_tool as normal - calls will be
|
||||
routed to the persistent container automatically.
|
||||
|
||||
:param server_name: Name of the hub server to start (e.g., "radare2-mcp").
|
||||
:return: Session status with container name and start time.
|
||||
|
||||
"""
|
||||
try:
|
||||
executor = _get_hub_executor()
|
||||
|
||||
# Inject project assets as Docker volume mounts (same logic as execute_hub_tool).
|
||||
extra_volumes: list[str] = []
|
||||
try:
|
||||
storage = get_storage()
|
||||
project_path = get_project_path()
|
||||
assets_path = storage.get_project_assets_path(project_path)
|
||||
if assets_path:
|
||||
assets_str = str(assets_path)
|
||||
extra_volumes = [
|
||||
f"{assets_str}:/app/uploads:ro",
|
||||
f"{assets_str}:/app/samples:ro",
|
||||
]
|
||||
output_path = storage.get_project_output_path(project_path)
|
||||
if output_path:
|
||||
extra_volumes.append(f"{output_path!s}:/app/output:rw")
|
||||
except Exception: # noqa: BLE001 - never block server start due to asset injection failure
|
||||
extra_volumes = []
|
||||
|
||||
result = await executor.start_persistent_server(server_name, extra_volumes=extra_volumes or None)
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"session": result,
|
||||
"tools": result.get("tools", []),
|
||||
"tool_count": result.get("tool_count", 0),
|
||||
"message": (
|
||||
f"Persistent session started for '{server_name}'. "
|
||||
f"Discovered {result.get('tool_count', 0)} tools. "
|
||||
"Use execute_hub_tool to call them — they will reuse this container. "
|
||||
f"Stop with stop_hub_server('{server_name}') when done."
|
||||
),
|
||||
}
|
||||
|
||||
except ValueError as e:
|
||||
msg = f"Server not found: {e}"
|
||||
raise ToolError(msg) from e
|
||||
except Exception as e:
|
||||
if isinstance(e, ToolError):
|
||||
raise
|
||||
msg = f"Failed to start persistent server: {e}"
|
||||
raise ToolError(msg) from e
|
||||
|
||||
|
||||
@mcp.tool
|
||||
async def stop_hub_server(server_name: str) -> dict[str, Any]:
|
||||
"""Stop a persistent container session for a hub server.
|
||||
|
||||
Terminates the running Docker container and cleans up resources.
|
||||
After stopping, tool calls will fall back to ephemeral mode
|
||||
(a new container per call).
|
||||
|
||||
:param server_name: Name of the hub server to stop.
|
||||
:return: Result indicating if the session was stopped.
|
||||
|
||||
"""
|
||||
try:
|
||||
executor = _get_hub_executor()
|
||||
|
||||
stopped = await executor.stop_persistent_server(server_name)
|
||||
|
||||
if stopped:
|
||||
return {
|
||||
"success": True,
|
||||
"message": f"Persistent session for '{server_name}' stopped and container removed.",
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"success": False,
|
||||
"message": f"No active persistent session found for '{server_name}'.",
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
if isinstance(e, ToolError):
|
||||
raise
|
||||
msg = f"Failed to stop persistent server: {e}"
|
||||
raise ToolError(msg) from e
|
||||
|
||||
|
||||
@mcp.tool
|
||||
async def hub_server_status(server_name: str | None = None) -> dict[str, Any]:
|
||||
"""Get status of persistent hub server sessions.
|
||||
|
||||
If server_name is provided, returns status for that specific server.
|
||||
Otherwise returns status for all active persistent sessions.
|
||||
|
||||
:param server_name: Optional specific server to check.
|
||||
:return: Session status information.
|
||||
|
||||
"""
|
||||
try:
|
||||
executor = _get_hub_executor()
|
||||
|
||||
if server_name:
|
||||
status = executor.get_persistent_status(server_name)
|
||||
if status:
|
||||
return {"active": True, "session": status}
|
||||
else:
|
||||
return {
|
||||
"active": False,
|
||||
"message": f"No active persistent session for '{server_name}'.",
|
||||
}
|
||||
else:
|
||||
sessions = executor.list_persistent_sessions()
|
||||
return {
|
||||
"active_sessions": sessions,
|
||||
"count": len(sessions),
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
if isinstance(e, ToolError):
|
||||
raise
|
||||
msg = f"Failed to get server status: {e}"
|
||||
raise ToolError(msg) from e
|
||||
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Continuous mode tools
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
|
||||
@mcp.tool
|
||||
async def start_continuous_hub_tool(
|
||||
server_name: str,
|
||||
start_tool: str,
|
||||
arguments: dict[str, Any] | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Start a continuous/background tool on a hub server.
|
||||
|
||||
Automatically starts a persistent container if not already running,
|
||||
then calls the server's start tool (e.g., cargo_fuzz_start) which
|
||||
launches a background process and returns a session_id.
|
||||
|
||||
The tool runs indefinitely until stopped with stop_continuous_hub_tool.
|
||||
Use get_continuous_hub_status to monitor progress.
|
||||
|
||||
Example workflow for continuous cargo fuzzing:
|
||||
1. start_continuous_hub_tool("cargo-fuzzer-mcp", "cargo_fuzz_start", {"project_path": "/data/myproject"})
|
||||
2. get_continuous_hub_status(session_id) -- poll every 10-30s
|
||||
3. stop_continuous_hub_tool(session_id) -- when done
|
||||
|
||||
:param server_name: Hub server name (e.g., "cargo-fuzzer-mcp").
|
||||
:param start_tool: Name of the start tool on the server.
|
||||
:param arguments: Arguments for the start tool.
|
||||
:return: Start result including session_id for monitoring.
|
||||
|
||||
"""
|
||||
try:
|
||||
executor = _get_hub_executor()
|
||||
|
||||
result = await executor.start_continuous_tool(
|
||||
server_name=server_name,
|
||||
start_tool=start_tool,
|
||||
arguments=arguments or {},
|
||||
)
|
||||
|
||||
# Return the server's response directly — it already contains
|
||||
# session_id, status, targets, and a message.
|
||||
return result
|
||||
|
||||
except ValueError as e:
|
||||
msg = f"Server not found: {e}"
|
||||
raise ToolError(msg) from e
|
||||
except Exception as e:
|
||||
if isinstance(e, ToolError):
|
||||
raise
|
||||
msg = f"Failed to start continuous tool: {e}"
|
||||
raise ToolError(msg) from e
|
||||
|
||||
|
||||
@mcp.tool
|
||||
async def get_continuous_hub_status(session_id: str) -> dict[str, Any]:
|
||||
"""Get live status of a continuous hub tool session.
|
||||
|
||||
Returns current metrics, progress, and recent output from the
|
||||
running tool. Call periodically (every 10-30 seconds) to monitor.
|
||||
|
||||
:param session_id: Session ID returned by start_continuous_hub_tool.
|
||||
:return: Current status with metrics (executions, coverage, crashes, etc.).
|
||||
|
||||
"""
|
||||
try:
|
||||
executor = _get_hub_executor()
|
||||
|
||||
return await executor.get_continuous_tool_status(session_id)
|
||||
|
||||
except ValueError as e:
|
||||
msg = str(e)
|
||||
raise ToolError(msg) from e
|
||||
except Exception as e:
|
||||
if isinstance(e, ToolError):
|
||||
raise
|
||||
msg = f"Failed to get continuous status: {e}"
|
||||
raise ToolError(msg) from e
|
||||
|
||||
|
||||
@mcp.tool
|
||||
async def stop_continuous_hub_tool(session_id: str) -> dict[str, Any]:
|
||||
"""Stop a running continuous hub tool session.
|
||||
|
||||
Gracefully stops the background process and returns final results
|
||||
including total metrics and any artifacts (crash files, etc.).
|
||||
|
||||
:param session_id: Session ID of the session to stop.
|
||||
:return: Final metrics and results summary.
|
||||
|
||||
"""
|
||||
try:
|
||||
executor = _get_hub_executor()
|
||||
|
||||
return await executor.stop_continuous_tool(session_id)
|
||||
|
||||
except ValueError as e:
|
||||
msg = str(e)
|
||||
raise ToolError(msg) from e
|
||||
except Exception as e:
|
||||
if isinstance(e, ToolError):
|
||||
raise
|
||||
msg = f"Failed to stop continuous tool: {e}"
|
||||
raise ToolError(msg) from e
|
||||
|
||||
|
||||
@mcp.tool
|
||||
async def list_continuous_hub_sessions() -> dict[str, Any]:
|
||||
"""List all active and recent continuous hub tool sessions.
|
||||
|
||||
:return: List of sessions with their status and server info.
|
||||
|
||||
"""
|
||||
try:
|
||||
executor = _get_hub_executor()
|
||||
|
||||
sessions = executor.list_continuous_sessions()
|
||||
return {
|
||||
"sessions": sessions,
|
||||
"count": len(sessions),
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
if isinstance(e, ToolError):
|
||||
raise
|
||||
msg = f"Failed to list continuous sessions: {e}"
|
||||
raise ToolError(msg) from e
|
||||
348
fuzzforge-mcp/src/fuzzforge_mcp/tools/modules.py
Normal file
348
fuzzforge-mcp/src/fuzzforge_mcp/tools/modules.py
Normal file
@@ -0,0 +1,348 @@
|
||||
"""Module tools for FuzzForge MCP."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import uuid
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from fastmcp import FastMCP
|
||||
from fastmcp.exceptions import ToolError
|
||||
|
||||
from fuzzforge_mcp.dependencies import get_project_path, get_runner, get_settings
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fuzzforge_runner import Runner
|
||||
from fuzzforge_runner.orchestrator import StepResult
|
||||
|
||||
|
||||
mcp: FastMCP = FastMCP()
|
||||
|
||||
# Track running background executions
|
||||
_background_executions: dict[str, dict[str, Any]] = {}
|
||||
|
||||
|
||||
@mcp.tool
|
||||
async def list_modules() -> dict[str, Any]:
|
||||
"""List all available FuzzForge modules.
|
||||
|
||||
Returns information about modules that can be executed,
|
||||
including their identifiers and availability status.
|
||||
|
||||
:return: Dictionary with list of available modules and their details.
|
||||
|
||||
"""
|
||||
try:
|
||||
runner: Runner = get_runner()
|
||||
settings = get_settings()
|
||||
|
||||
# Use the engine abstraction to list images
|
||||
# Default filter matches locally-built fuzzforge-* modules
|
||||
modules = runner.list_module_images(filter_prefix="fuzzforge-")
|
||||
|
||||
available_modules = [
|
||||
{
|
||||
"identifier": module.identifier,
|
||||
"image": f"{module.identifier}:{module.version or 'latest'}",
|
||||
"available": module.available,
|
||||
}
|
||||
for module in modules
|
||||
]
|
||||
|
||||
return {
|
||||
"modules": available_modules,
|
||||
"count": len(available_modules),
|
||||
"container_engine": settings.engine.type,
|
||||
"registry_url": settings.registry.url,
|
||||
"registry_tag": settings.registry.default_tag,
|
||||
}
|
||||
|
||||
except Exception as exception:
|
||||
message: str = f"Failed to list modules: {exception}"
|
||||
raise ToolError(message) from exception
|
||||
|
||||
|
||||
@mcp.tool
|
||||
async def execute_module(
|
||||
module_identifier: str,
|
||||
configuration: dict[str, Any] | None = None,
|
||||
assets_path: str | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Execute a FuzzForge module in an isolated container.
|
||||
|
||||
This tool runs a module in a sandboxed environment.
|
||||
The module receives input assets and produces output results.
|
||||
|
||||
:param module_identifier: The identifier of the module to execute.
|
||||
:param configuration: Optional configuration dict to pass to the module.
|
||||
:param assets_path: Optional path to input assets. If not provided, uses project assets.
|
||||
:return: Execution result including status and results path.
|
||||
|
||||
"""
|
||||
runner: Runner = get_runner()
|
||||
project_path: Path = get_project_path()
|
||||
|
||||
try:
|
||||
result: StepResult = await runner.execute_module(
|
||||
module_identifier=module_identifier,
|
||||
project_path=project_path,
|
||||
configuration=configuration,
|
||||
assets_path=Path(assets_path) if assets_path else None,
|
||||
)
|
||||
|
||||
return {
|
||||
"success": result.success,
|
||||
"execution_id": result.execution_id,
|
||||
"module": result.module_identifier,
|
||||
"results_path": str(result.results_path) if result.results_path else None,
|
||||
"started_at": result.started_at.isoformat(),
|
||||
"completed_at": result.completed_at.isoformat(),
|
||||
"error": result.error,
|
||||
}
|
||||
|
||||
except Exception as exception:
|
||||
message: str = f"Module execution failed: {exception}"
|
||||
raise ToolError(message) from exception
|
||||
|
||||
|
||||
@mcp.tool
|
||||
async def start_continuous_module(
|
||||
module_identifier: str,
|
||||
configuration: dict[str, Any] | None = None,
|
||||
assets_path: str | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Start a module in continuous/background mode.
|
||||
|
||||
The module will run indefinitely until stopped with stop_continuous_module().
|
||||
Use get_continuous_status() to check progress and metrics.
|
||||
|
||||
This is useful for long-running modules that should run until
|
||||
the user decides to stop them.
|
||||
|
||||
:param module_identifier: The module to run.
|
||||
:param configuration: Optional configuration. Set max_duration to 0 for infinite.
|
||||
:param assets_path: Optional path to input assets.
|
||||
:return: Execution info including session_id for monitoring.
|
||||
|
||||
"""
|
||||
runner: Runner = get_runner()
|
||||
project_path: Path = get_project_path()
|
||||
session_id = str(uuid.uuid4())[:8]
|
||||
|
||||
# Set infinite duration if not specified
|
||||
if configuration is None:
|
||||
configuration = {}
|
||||
if "max_duration" not in configuration:
|
||||
configuration["max_duration"] = 0 # 0 = infinite
|
||||
|
||||
try:
|
||||
# Determine assets path
|
||||
if assets_path:
|
||||
actual_assets_path = Path(assets_path)
|
||||
else:
|
||||
storage = runner.storage
|
||||
actual_assets_path = storage.get_project_assets_path(project_path)
|
||||
|
||||
# Use the new non-blocking executor method
|
||||
executor = runner._executor
|
||||
result = executor.start_module_continuous(
|
||||
module_identifier=module_identifier,
|
||||
assets_path=actual_assets_path,
|
||||
configuration=configuration,
|
||||
)
|
||||
|
||||
# Store execution info for tracking
|
||||
_background_executions[session_id] = {
|
||||
"session_id": session_id,
|
||||
"module": module_identifier,
|
||||
"configuration": configuration,
|
||||
"started_at": datetime.now(timezone.utc).isoformat(),
|
||||
"status": "running",
|
||||
"container_id": result["container_id"],
|
||||
"input_dir": result["input_dir"],
|
||||
}
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"session_id": session_id,
|
||||
"module": module_identifier,
|
||||
"container_id": result["container_id"],
|
||||
"status": "running",
|
||||
"message": f"Continuous module started. Use get_continuous_status('{session_id}') to monitor progress.",
|
||||
}
|
||||
|
||||
except Exception as exception:
|
||||
message: str = f"Failed to start continuous module: {exception}"
|
||||
raise ToolError(message) from exception
|
||||
|
||||
|
||||
def _get_continuous_status_impl(session_id: str) -> dict[str, Any]:
|
||||
"""Internal helper to get continuous session status (non-tool version)."""
|
||||
if session_id not in _background_executions:
|
||||
raise ToolError(f"Unknown session: {session_id}. Use list_continuous_sessions() to see active sessions.")
|
||||
|
||||
execution = _background_executions[session_id]
|
||||
container_id = execution.get("container_id")
|
||||
|
||||
# Initialize metrics
|
||||
metrics: dict[str, Any] = {
|
||||
"total_executions": 0,
|
||||
"total_crashes": 0,
|
||||
"exec_per_sec": 0,
|
||||
"coverage": 0,
|
||||
"current_target": "",
|
||||
"latest_events": [],
|
||||
}
|
||||
|
||||
# Read stream.jsonl from inside the running container
|
||||
if container_id:
|
||||
try:
|
||||
runner: Runner = get_runner()
|
||||
executor = runner._executor
|
||||
|
||||
# Check container status first
|
||||
container_status = executor.get_module_status(container_id)
|
||||
if container_status != "running":
|
||||
execution["status"] = "stopped" if container_status == "exited" else container_status
|
||||
|
||||
# Read stream.jsonl from container
|
||||
stream_content = executor.read_module_output(container_id, "/data/output/stream.jsonl")
|
||||
|
||||
if stream_content:
|
||||
lines = stream_content.strip().split("\n")
|
||||
# Get last 20 events
|
||||
recent_lines = lines[-20:] if len(lines) > 20 else lines
|
||||
crash_count = 0
|
||||
|
||||
for line in recent_lines:
|
||||
try:
|
||||
event = json.loads(line)
|
||||
metrics["latest_events"].append(event)
|
||||
|
||||
# Extract metrics from events
|
||||
if event.get("event") == "metrics":
|
||||
metrics["total_executions"] = event.get("executions", 0)
|
||||
metrics["current_target"] = event.get("target", "")
|
||||
metrics["exec_per_sec"] = event.get("exec_per_sec", 0)
|
||||
metrics["coverage"] = event.get("coverage", 0)
|
||||
|
||||
if event.get("event") == "crash_detected":
|
||||
crash_count += 1
|
||||
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
|
||||
metrics["total_crashes"] = crash_count
|
||||
|
||||
except Exception as e:
|
||||
metrics["error"] = str(e)
|
||||
|
||||
# Calculate elapsed time
|
||||
started_at = execution.get("started_at", "")
|
||||
elapsed_seconds = 0
|
||||
if started_at:
|
||||
try:
|
||||
start_time = datetime.fromisoformat(started_at)
|
||||
elapsed_seconds = int((datetime.now(timezone.utc) - start_time).total_seconds())
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
return {
|
||||
"session_id": session_id,
|
||||
"module": execution.get("module"),
|
||||
"status": execution.get("status"),
|
||||
"container_id": container_id,
|
||||
"started_at": started_at,
|
||||
"elapsed_seconds": elapsed_seconds,
|
||||
"elapsed_human": f"{elapsed_seconds // 60}m {elapsed_seconds % 60}s",
|
||||
"metrics": metrics,
|
||||
}
|
||||
|
||||
|
||||
@mcp.tool
|
||||
async def get_continuous_status(session_id: str) -> dict[str, Any]:
|
||||
"""Get the current status and metrics of a running continuous session.
|
||||
|
||||
Call this periodically (e.g., every 30 seconds) to get live updates
|
||||
on progress and metrics.
|
||||
|
||||
:param session_id: The session ID returned by start_continuous_module().
|
||||
:return: Current status, metrics, and any events found.
|
||||
|
||||
"""
|
||||
return _get_continuous_status_impl(session_id)
|
||||
|
||||
|
||||
@mcp.tool
|
||||
async def stop_continuous_module(session_id: str) -> dict[str, Any]:
|
||||
"""Stop a running continuous session.
|
||||
|
||||
This will gracefully stop the module and collect any results.
|
||||
|
||||
:param session_id: The session ID of the session to stop.
|
||||
:return: Final status and summary of the session.
|
||||
|
||||
"""
|
||||
if session_id not in _background_executions:
|
||||
raise ToolError(f"Unknown session: {session_id}")
|
||||
|
||||
execution = _background_executions[session_id]
|
||||
container_id = execution.get("container_id")
|
||||
input_dir = execution.get("input_dir")
|
||||
|
||||
try:
|
||||
# Get final metrics before stopping (use helper, not the tool)
|
||||
final_metrics = _get_continuous_status_impl(session_id)
|
||||
|
||||
# Stop the container and collect results
|
||||
results_path = None
|
||||
if container_id:
|
||||
runner: Runner = get_runner()
|
||||
executor = runner._executor
|
||||
|
||||
try:
|
||||
results_path = executor.stop_module_continuous(container_id, input_dir)
|
||||
except Exception:
|
||||
# Container may have already stopped
|
||||
pass
|
||||
|
||||
execution["status"] = "stopped"
|
||||
execution["stopped_at"] = datetime.now(timezone.utc).isoformat()
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"session_id": session_id,
|
||||
"message": "Continuous session stopped",
|
||||
"results_path": str(results_path) if results_path else None,
|
||||
"final_metrics": final_metrics.get("metrics", {}),
|
||||
"elapsed": final_metrics.get("elapsed_human", ""),
|
||||
}
|
||||
|
||||
except Exception as exception:
|
||||
message: str = f"Failed to stop continuous module: {exception}"
|
||||
raise ToolError(message) from exception
|
||||
|
||||
|
||||
@mcp.tool
|
||||
async def list_continuous_sessions() -> dict[str, Any]:
|
||||
"""List all active and recent continuous sessions.
|
||||
|
||||
:return: List of continuous sessions with their status.
|
||||
|
||||
"""
|
||||
sessions = []
|
||||
for session_id, execution in _background_executions.items():
|
||||
sessions.append({
|
||||
"session_id": session_id,
|
||||
"module": execution.get("module"),
|
||||
"status": execution.get("status"),
|
||||
"started_at": execution.get("started_at"),
|
||||
})
|
||||
|
||||
return {
|
||||
"sessions": sessions,
|
||||
"count": len(sessions),
|
||||
}
|
||||
|
||||
@@ -3,44 +3,42 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from fastmcp import FastMCP
|
||||
from fastmcp.exceptions import ToolError
|
||||
|
||||
from fuzzforge_mcp.dependencies import get_project_path, get_storage, set_current_project_path
|
||||
from fuzzforge_mcp.dependencies import get_project_path, get_runner
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fuzzforge_runner import Runner
|
||||
|
||||
|
||||
mcp: FastMCP = FastMCP()
|
||||
|
||||
|
||||
@mcp.tool
|
||||
async def init_project(project_path: str | None = None) -> dict[str, Any]:
|
||||
"""Initialize a new FuzzForge project workspace.
|
||||
"""Initialize a new FuzzForge project.
|
||||
|
||||
Creates a `.fuzzforge/` directory for storing configuration and execution results.
|
||||
Call this once before using hub tools. The project path is a working directory
|
||||
for FuzzForge state — it does not need to contain the files you want to analyze.
|
||||
Use `set_project_assets` separately to specify the target files.
|
||||
Creates the necessary storage directories for a project. This should
|
||||
be called before executing modules or workflows.
|
||||
|
||||
:param project_path: Working directory for FuzzForge state. Defaults to current directory.
|
||||
:param project_path: Path to the project directory. If not provided, uses current directory.
|
||||
:return: Project initialization result.
|
||||
|
||||
"""
|
||||
storage = get_storage()
|
||||
runner: Runner = get_runner()
|
||||
|
||||
try:
|
||||
path = Path(project_path) if project_path else get_project_path()
|
||||
|
||||
# Track this as the current active project
|
||||
set_current_project_path(path)
|
||||
|
||||
storage_path = storage.init_project(path)
|
||||
storage_path = runner.init_project(path)
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"project_path": str(path),
|
||||
"storage_path": str(storage_path),
|
||||
"message": f"Project initialized. Storage at {path}/.fuzzforge/",
|
||||
"message": f"Project initialized at {path}",
|
||||
}
|
||||
|
||||
except Exception as exception:
|
||||
@@ -50,21 +48,20 @@ async def init_project(project_path: str | None = None) -> dict[str, Any]:
|
||||
|
||||
@mcp.tool
|
||||
async def set_project_assets(assets_path: str) -> dict[str, Any]:
|
||||
"""Set the directory containing target files to analyze.
|
||||
"""Set the initial assets for a project.
|
||||
|
||||
Points FuzzForge to the directory with your analysis targets
|
||||
(firmware images, binaries, source code, etc.). This directory
|
||||
is mounted read-only into hub tool containers.
|
||||
Assets are input files that will be provided to modules during execution.
|
||||
This could be source code, contracts, binaries, etc.
|
||||
|
||||
:param assets_path: Path to the directory containing files to analyze.
|
||||
:param assets_path: Path to assets file (archive) or directory.
|
||||
:return: Result including stored assets path.
|
||||
|
||||
"""
|
||||
storage = get_storage()
|
||||
runner: Runner = get_runner()
|
||||
project_path: Path = get_project_path()
|
||||
|
||||
try:
|
||||
stored_path = storage.set_project_assets(
|
||||
stored_path = runner.set_project_assets(
|
||||
project_path=project_path,
|
||||
assets_path=Path(assets_path),
|
||||
)
|
||||
@@ -85,16 +82,16 @@ async def set_project_assets(assets_path: str) -> dict[str, Any]:
|
||||
async def list_executions() -> dict[str, Any]:
|
||||
"""List all executions for the current project.
|
||||
|
||||
Returns execution summaries including server, tool, timestamp, and success status.
|
||||
Returns a list of execution IDs that can be used to retrieve results.
|
||||
|
||||
:return: List of execution summaries.
|
||||
:return: List of execution IDs.
|
||||
|
||||
"""
|
||||
storage = get_storage()
|
||||
runner: Runner = get_runner()
|
||||
project_path: Path = get_project_path()
|
||||
|
||||
try:
|
||||
executions = storage.list_executions(project_path)
|
||||
executions = runner.list_executions(project_path)
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
@@ -117,11 +114,11 @@ async def get_execution_results(execution_id: str, extract_to: str | None = None
|
||||
:return: Result including path to results archive.
|
||||
|
||||
"""
|
||||
storage = get_storage()
|
||||
runner: Runner = get_runner()
|
||||
project_path: Path = get_project_path()
|
||||
|
||||
try:
|
||||
results_path = storage.get_execution_results(project_path, execution_id)
|
||||
results_path = runner.get_execution_results(project_path, execution_id)
|
||||
|
||||
if results_path is None:
|
||||
return {
|
||||
@@ -130,7 +127,7 @@ async def get_execution_results(execution_id: str, extract_to: str | None = None
|
||||
"error": "Execution results not found",
|
||||
}
|
||||
|
||||
result: dict[str, Any] = {
|
||||
result = {
|
||||
"success": True,
|
||||
"execution_id": execution_id,
|
||||
"results_path": str(results_path),
|
||||
@@ -138,7 +135,7 @@ async def get_execution_results(execution_id: str, extract_to: str | None = None
|
||||
|
||||
# Extract if requested
|
||||
if extract_to:
|
||||
extracted_path = storage.extract_results(results_path, Path(extract_to))
|
||||
extracted_path = runner.extract_results(results_path, Path(extract_to))
|
||||
result["extracted_path"] = str(extracted_path)
|
||||
|
||||
return result
|
||||
|
||||
92
fuzzforge-mcp/src/fuzzforge_mcp/tools/workflows.py
Normal file
92
fuzzforge-mcp/src/fuzzforge_mcp/tools/workflows.py
Normal file
@@ -0,0 +1,92 @@
|
||||
"""Workflow tools for FuzzForge MCP."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Any
|
||||
|
||||
from fastmcp import FastMCP
|
||||
from fastmcp.exceptions import ToolError
|
||||
from fuzzforge_runner.orchestrator import WorkflowDefinition, WorkflowStep
|
||||
|
||||
from fuzzforge_mcp.dependencies import get_project_path, get_runner
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fuzzforge_runner import Runner
|
||||
from fuzzforge_runner.orchestrator import WorkflowResult
|
||||
|
||||
|
||||
mcp: FastMCP = FastMCP()
|
||||
|
||||
|
||||
@mcp.tool
|
||||
async def execute_workflow(
|
||||
workflow_name: str,
|
||||
steps: list[dict[str, Any]],
|
||||
initial_assets_path: str | None = None,
|
||||
) -> dict[str, Any]:
|
||||
"""Execute a workflow consisting of multiple module steps.
|
||||
|
||||
A workflow chains multiple modules together, passing the output of each
|
||||
module as input to the next. This enables complex pipelines.
|
||||
|
||||
:param workflow_name: Name for this workflow execution.
|
||||
:param steps: List of step definitions, each with "module" and optional "configuration".
|
||||
:param initial_assets_path: Optional path to initial assets for the first step.
|
||||
:return: Workflow execution result including status of each step.
|
||||
|
||||
Example steps format:
|
||||
[
|
||||
{"module": "module-a", "configuration": {"key": "value"}},
|
||||
{"module": "module-b", "configuration": {}},
|
||||
{"module": "module-c"}
|
||||
]
|
||||
|
||||
"""
|
||||
runner: Runner = get_runner()
|
||||
project_path: Path = get_project_path()
|
||||
|
||||
try:
|
||||
# Convert step dicts to WorkflowStep objects
|
||||
workflow_steps = [
|
||||
WorkflowStep(
|
||||
module_identifier=step["module"],
|
||||
configuration=step.get("configuration"),
|
||||
name=step.get("name", f"step-{i}"),
|
||||
)
|
||||
for i, step in enumerate(steps)
|
||||
]
|
||||
|
||||
workflow = WorkflowDefinition(
|
||||
name=workflow_name,
|
||||
steps=workflow_steps,
|
||||
)
|
||||
|
||||
result: WorkflowResult = await runner.execute_workflow(
|
||||
workflow=workflow,
|
||||
project_path=project_path,
|
||||
initial_assets_path=Path(initial_assets_path) if initial_assets_path else None,
|
||||
)
|
||||
|
||||
return {
|
||||
"success": result.success,
|
||||
"execution_id": result.execution_id,
|
||||
"workflow_name": result.name,
|
||||
"final_results_path": str(result.final_results_path) if result.final_results_path else None,
|
||||
"steps": [
|
||||
{
|
||||
"step_index": step.step_index,
|
||||
"module": step.module_identifier,
|
||||
"success": step.success,
|
||||
"execution_id": step.execution_id,
|
||||
"results_path": str(step.results_path) if step.results_path else None,
|
||||
"error": step.error,
|
||||
}
|
||||
for step in result.steps
|
||||
],
|
||||
}
|
||||
|
||||
except Exception as exception:
|
||||
message: str = f"Workflow execution failed: {exception}"
|
||||
raise ToolError(message) from exception
|
||||
|
||||
@@ -11,7 +11,7 @@ if TYPE_CHECKING:
|
||||
from collections.abc import AsyncGenerator, Callable
|
||||
|
||||
from fastmcp.client import FastMCPTransport
|
||||
from fuzzforge_tests.fixtures import FuzzForgeProjectIdentifier
|
||||
from fuzzforge_types import FuzzForgeProjectIdentifier
|
||||
|
||||
pytest_plugins = ["fuzzforge_tests.fixtures"]
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
"""MCP tool tests for FuzzForge AI.
|
||||
"""MCP tool tests for FuzzForge OSS.
|
||||
|
||||
Tests the MCP tools that are available in FuzzForge AI.
|
||||
Tests the MCP tools that are available in the OSS version.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
@@ -11,6 +11,16 @@ if TYPE_CHECKING:
|
||||
from fastmcp.client import FastMCPTransport
|
||||
|
||||
|
||||
async def test_list_modules_tool_exists(
|
||||
mcp_client: "Client[FastMCPTransport]",
|
||||
) -> None:
|
||||
"""Test that the list_modules tool is available."""
|
||||
tools = await mcp_client.list_tools()
|
||||
tool_names = [tool.name for tool in tools]
|
||||
|
||||
assert "list_modules" in tool_names
|
||||
|
||||
|
||||
async def test_init_project_tool_exists(
|
||||
mcp_client: "Client[FastMCPTransport]",
|
||||
) -> None:
|
||||
@@ -21,11 +31,31 @@ async def test_init_project_tool_exists(
|
||||
assert "init_project" in tool_names
|
||||
|
||||
|
||||
async def test_execute_module_tool_exists(
|
||||
mcp_client: "Client[FastMCPTransport]",
|
||||
) -> None:
|
||||
"""Test that the execute_module tool is available."""
|
||||
tools = await mcp_client.list_tools()
|
||||
tool_names = [tool.name for tool in tools]
|
||||
|
||||
assert "execute_module" in tool_names
|
||||
|
||||
|
||||
async def test_execute_workflow_tool_exists(
|
||||
mcp_client: "Client[FastMCPTransport]",
|
||||
) -> None:
|
||||
"""Test that the execute_workflow tool is available."""
|
||||
tools = await mcp_client.list_tools()
|
||||
tool_names = [tool.name for tool in tools]
|
||||
|
||||
assert "execute_workflow" in tool_names
|
||||
|
||||
|
||||
async def test_mcp_has_expected_tool_count(
|
||||
mcp_client: "Client[FastMCPTransport]",
|
||||
) -> None:
|
||||
"""Test that MCP has the expected number of tools."""
|
||||
tools = await mcp_client.list_tools()
|
||||
|
||||
# Should have project tools + hub tools
|
||||
assert len(tools) >= 2
|
||||
# Should have at least 4 core tools
|
||||
assert len(tools) >= 4
|
||||
|
||||
24
fuzzforge-modules/cargo-fuzzer/Dockerfile
Normal file
24
fuzzforge-modules/cargo-fuzzer/Dockerfile
Normal file
@@ -0,0 +1,24 @@
|
||||
FROM localhost/fuzzforge-modules-sdk:0.1.0
|
||||
|
||||
# Install system dependencies for Rust compilation
|
||||
RUN apt-get update && apt-get install -y \
|
||||
curl \
|
||||
build-essential \
|
||||
pkg-config \
|
||||
libssl-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install Rust toolchain with nightly (required for cargo-fuzz)
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --default-toolchain nightly
|
||||
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||
|
||||
# Install cargo-fuzz
|
||||
RUN cargo install cargo-fuzz --locked || true
|
||||
|
||||
COPY ./src /app/src
|
||||
COPY ./pyproject.toml /app/pyproject.toml
|
||||
|
||||
# Remove workspace reference since we're using wheels
|
||||
RUN sed -i '/\[tool\.uv\.sources\]/,/^$/d' /app/pyproject.toml
|
||||
|
||||
RUN uv sync --find-links /wheels
|
||||
45
fuzzforge-modules/cargo-fuzzer/Makefile
Normal file
45
fuzzforge-modules/cargo-fuzzer/Makefile
Normal file
@@ -0,0 +1,45 @@
|
||||
PACKAGE=$(word 1, $(shell uv version))
|
||||
VERSION=$(word 2, $(shell uv version))
|
||||
|
||||
PODMAN?=/usr/bin/podman
|
||||
|
||||
SOURCES=./src
|
||||
TESTS=./tests
|
||||
|
||||
.PHONY: bandit build clean format mypy pytest ruff version
|
||||
|
||||
bandit:
|
||||
uv run bandit --recursive $(SOURCES)
|
||||
|
||||
build:
|
||||
$(PODMAN) build --file ./Dockerfile --no-cache --tag $(PACKAGE):$(VERSION)
|
||||
|
||||
save: build
|
||||
$(PODMAN) save --format oci-archive --output /tmp/$(PACKAGE)-$(VERSION).oci $(PACKAGE):$(VERSION)
|
||||
|
||||
clean:
|
||||
@find . -type d \( \
|
||||
-name '*.egg-info' \
|
||||
-o -name '.mypy_cache' \
|
||||
-o -name '.pytest_cache' \
|
||||
-o -name '.ruff_cache' \
|
||||
-o -name '__pycache__' \
|
||||
\) -printf 'removing directory %p\n' -exec rm -rf {} +
|
||||
|
||||
cloc:
|
||||
cloc $(SOURCES)
|
||||
|
||||
format:
|
||||
uv run ruff format $(SOURCES) $(TESTS)
|
||||
|
||||
mypy:
|
||||
uv run mypy $(SOURCES)
|
||||
|
||||
pytest:
|
||||
uv run pytest $(TESTS)
|
||||
|
||||
ruff:
|
||||
uv run ruff check --fix $(SOURCES) $(TESTS)
|
||||
|
||||
version:
|
||||
@echo '$(PACKAGE)@$(VERSION)'
|
||||
46
fuzzforge-modules/cargo-fuzzer/README.md
Normal file
46
fuzzforge-modules/cargo-fuzzer/README.md
Normal file
@@ -0,0 +1,46 @@
|
||||
# FuzzForge Modules - FIXME
|
||||
|
||||
## Installation
|
||||
|
||||
### Python
|
||||
|
||||
```shell
|
||||
# install the package (users)
|
||||
uv sync
|
||||
# install the package and all development dependencies (developers)
|
||||
uv sync --all-extras
|
||||
```
|
||||
|
||||
### Container
|
||||
|
||||
```shell
|
||||
# build the image
|
||||
make build
|
||||
# run the container
|
||||
mkdir -p "${PWD}/data" "${PWD}/data/input" "${PWD}/data/output"
|
||||
echo '{"settings":{},"resources":[]}' > "${PWD}/data/input/input.json"
|
||||
podman run --rm \
|
||||
--volume "${PWD}/data:/data" \
|
||||
'<name>:<version>' 'uv run module'
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```shell
|
||||
uv run module
|
||||
```
|
||||
|
||||
## Development tools
|
||||
|
||||
```shell
|
||||
# run ruff (formatter)
|
||||
make format
|
||||
# run mypy (type checker)
|
||||
make mypy
|
||||
# run tests (pytest)
|
||||
make pytest
|
||||
# run ruff (linter)
|
||||
make ruff
|
||||
```
|
||||
|
||||
See the file `Makefile` at the root of this directory for more tools.
|
||||
6
fuzzforge-modules/cargo-fuzzer/mypy.ini
Normal file
6
fuzzforge-modules/cargo-fuzzer/mypy.ini
Normal file
@@ -0,0 +1,6 @@
|
||||
[mypy]
|
||||
plugins = pydantic.mypy
|
||||
strict = True
|
||||
warn_unused_ignores = True
|
||||
warn_redundant_casts = True
|
||||
warn_return_any = True
|
||||
31
fuzzforge-modules/cargo-fuzzer/pyproject.toml
Normal file
31
fuzzforge-modules/cargo-fuzzer/pyproject.toml
Normal file
@@ -0,0 +1,31 @@
|
||||
[project]
|
||||
name = "cargo-fuzzer"
|
||||
version = "0.1.0"
|
||||
description = "FuzzForge module that runs cargo-fuzz with libFuzzer on Rust targets"
|
||||
authors = []
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.14"
|
||||
dependencies = [
|
||||
"fuzzforge-modules-sdk==0.0.1",
|
||||
"pydantic==2.12.4",
|
||||
"structlog==25.5.0",
|
||||
]
|
||||
|
||||
[project.optional-dependencies]
|
||||
lints = [
|
||||
"bandit==1.8.6",
|
||||
"mypy==1.18.2",
|
||||
"ruff==0.14.4",
|
||||
]
|
||||
tests = [
|
||||
"pytest==9.0.2",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
module = "module.__main__:main"
|
||||
|
||||
[tool.uv.sources]
|
||||
fuzzforge-modules-sdk = { workspace = true }
|
||||
|
||||
[tool.uv]
|
||||
package = true
|
||||
19
fuzzforge-modules/cargo-fuzzer/ruff.toml
Normal file
19
fuzzforge-modules/cargo-fuzzer/ruff.toml
Normal file
@@ -0,0 +1,19 @@
|
||||
line-length = 120
|
||||
|
||||
[lint]
|
||||
select = [ "ALL" ]
|
||||
ignore = [
|
||||
"COM812", # conflicts with the formatter
|
||||
"D100", # ignoring missing docstrings in public modules
|
||||
"D104", # ignoring missing docstrings in public packages
|
||||
"D203", # conflicts with 'D211'
|
||||
"D213", # conflicts with 'D212'
|
||||
"TD002", # ignoring missing author in 'TODO' statements
|
||||
"TD003", # ignoring missing issue link in 'TODO' statements
|
||||
]
|
||||
|
||||
[lint.per-file-ignores]
|
||||
"tests/*" = [
|
||||
"PLR2004", # allowing comparisons using unamed numerical constants in tests
|
||||
"S101", # allowing 'assert' statements in tests
|
||||
]
|
||||
19
fuzzforge-modules/cargo-fuzzer/src/module/__main__.py
Normal file
19
fuzzforge-modules/cargo-fuzzer/src/module/__main__.py
Normal file
@@ -0,0 +1,19 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from fuzzforge_modules_sdk.api import logs
|
||||
|
||||
from module.mod import Module
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fuzzforge_modules_sdk.api.modules.base import FuzzForgeModule
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""TODO."""
|
||||
logs.configure()
|
||||
module: FuzzForgeModule = Module()
|
||||
module.main()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
516
fuzzforge-modules/cargo-fuzzer/src/module/mod.py
Normal file
516
fuzzforge-modules/cargo-fuzzer/src/module/mod.py
Normal file
@@ -0,0 +1,516 @@
|
||||
"""Cargo Fuzzer module for FuzzForge.
|
||||
|
||||
This module runs cargo-fuzz (libFuzzer) on validated Rust fuzz targets.
|
||||
It takes a fuzz project with compiled harnesses and runs fuzzing for a
|
||||
configurable duration, collecting crashes and statistics.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import subprocess
|
||||
import signal
|
||||
import time
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import structlog
|
||||
|
||||
from fuzzforge_modules_sdk.api.constants import PATH_TO_INPUTS, PATH_TO_OUTPUTS
|
||||
from fuzzforge_modules_sdk.api.models import FuzzForgeModuleResults
|
||||
from fuzzforge_modules_sdk.api.modules.base import FuzzForgeModule
|
||||
|
||||
from module.models import Input, Output, CrashInfo, FuzzingStats, TargetResult
|
||||
from module.settings import Settings
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fuzzforge_modules_sdk.api.models import FuzzForgeModuleResource
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class Module(FuzzForgeModule):
|
||||
"""Cargo Fuzzer module - runs cargo-fuzz with libFuzzer on Rust targets."""
|
||||
|
||||
_settings: Settings | None
|
||||
_fuzz_project_path: Path | None
|
||||
_target_results: list[TargetResult]
|
||||
_crashes_path: Path | None
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Initialize an instance of the class."""
|
||||
name: str = "cargo-fuzzer"
|
||||
version: str = "0.1.0"
|
||||
FuzzForgeModule.__init__(self, name=name, version=version)
|
||||
self._settings = None
|
||||
self._fuzz_project_path = None
|
||||
self._target_results = []
|
||||
self._crashes_path = None
|
||||
|
||||
@classmethod
|
||||
def _get_input_type(cls) -> type[Input]:
|
||||
"""Return the input type."""
|
||||
return Input
|
||||
|
||||
@classmethod
|
||||
def _get_output_type(cls) -> type[Output]:
|
||||
"""Return the output type."""
|
||||
return Output
|
||||
|
||||
def _prepare(self, settings: Settings) -> None: # type: ignore[override]
|
||||
"""Prepare the module with settings.
|
||||
|
||||
:param settings: Module settings.
|
||||
|
||||
"""
|
||||
self._settings = settings
|
||||
logger.info("cargo-fuzzer preparing", settings=settings.model_dump() if settings else {})
|
||||
|
||||
def _run(self, resources: list[FuzzForgeModuleResource]) -> FuzzForgeModuleResults:
|
||||
"""Run the fuzzer.
|
||||
|
||||
:param resources: Input resources (fuzz project + source).
|
||||
:returns: Module execution result.
|
||||
|
||||
"""
|
||||
logger.info("cargo-fuzzer starting", resource_count=len(resources))
|
||||
|
||||
# Emit initial progress
|
||||
self.emit_progress(0, status="initializing", message="Setting up fuzzing environment")
|
||||
self.emit_event("module_started", resource_count=len(resources))
|
||||
|
||||
# Setup the fuzzing environment
|
||||
if not self._setup_environment(resources):
|
||||
self.emit_progress(100, status="failed", message="Failed to setup environment")
|
||||
return FuzzForgeModuleResults.FAILURE
|
||||
|
||||
# Get list of fuzz targets
|
||||
targets = self._get_fuzz_targets()
|
||||
if not targets:
|
||||
logger.error("no fuzz targets found")
|
||||
self.emit_progress(100, status="failed", message="No fuzz targets found")
|
||||
return FuzzForgeModuleResults.FAILURE
|
||||
|
||||
# Filter targets if specific ones were requested
|
||||
if self._settings and self._settings.targets:
|
||||
requested = set(self._settings.targets)
|
||||
targets = [t for t in targets if t in requested]
|
||||
if not targets:
|
||||
logger.error("none of the requested targets found", requested=list(requested))
|
||||
self.emit_progress(100, status="failed", message="Requested targets not found")
|
||||
return FuzzForgeModuleResults.FAILURE
|
||||
|
||||
logger.info("found fuzz targets", targets=targets)
|
||||
self.emit_event("targets_found", targets=targets, count=len(targets))
|
||||
|
||||
# Setup output directories
|
||||
self._crashes_path = PATH_TO_OUTPUTS / "crashes"
|
||||
self._crashes_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Run fuzzing on each target
|
||||
# max_duration=0 means infinite/continuous mode
|
||||
max_duration = self._settings.max_duration if self._settings else 60
|
||||
is_continuous = max_duration == 0
|
||||
|
||||
if is_continuous:
|
||||
# Continuous mode: cycle through targets indefinitely
|
||||
# Each target runs for 60 seconds before moving to next
|
||||
duration_per_target = 60
|
||||
else:
|
||||
duration_per_target = max_duration // max(len(targets), 1)
|
||||
total_crashes = 0
|
||||
|
||||
# In continuous mode, loop forever; otherwise loop once
|
||||
round_num = 0
|
||||
while True:
|
||||
round_num += 1
|
||||
|
||||
for i, target in enumerate(targets):
|
||||
if is_continuous:
|
||||
progress_msg = f"Round {round_num}: Fuzzing {target}"
|
||||
else:
|
||||
progress_msg = f"Fuzzing target {i+1}/{len(targets)}"
|
||||
|
||||
progress = int((i / len(targets)) * 100) if not is_continuous else 50
|
||||
self.emit_progress(
|
||||
progress,
|
||||
status="running",
|
||||
message=progress_msg,
|
||||
current_task=target,
|
||||
metrics={
|
||||
"targets_completed": i,
|
||||
"total_targets": len(targets),
|
||||
"crashes_found": total_crashes,
|
||||
"round": round_num if is_continuous else 1,
|
||||
}
|
||||
)
|
||||
self.emit_event("target_started", target=target, index=i, total=len(targets), round=round_num)
|
||||
|
||||
result = self._fuzz_target(target, duration_per_target)
|
||||
self._target_results.append(result)
|
||||
total_crashes += len(result.crashes)
|
||||
|
||||
# Emit target completion
|
||||
self.emit_event(
|
||||
"target_completed",
|
||||
target=target,
|
||||
crashes=len(result.crashes),
|
||||
executions=result.stats.total_executions if result.stats else 0,
|
||||
coverage=result.stats.coverage_edges if result.stats else 0,
|
||||
)
|
||||
|
||||
logger.info("target completed",
|
||||
target=target,
|
||||
crashes=len(result.crashes),
|
||||
execs=result.stats.total_executions if result.stats else 0)
|
||||
|
||||
# Exit loop if not continuous mode
|
||||
if not is_continuous:
|
||||
break
|
||||
|
||||
# Write output
|
||||
self._write_output()
|
||||
|
||||
# Emit final progress
|
||||
self.emit_progress(
|
||||
100,
|
||||
status="completed",
|
||||
message=f"Fuzzing completed. Found {total_crashes} crashes.",
|
||||
metrics={
|
||||
"targets_fuzzed": len(self._target_results),
|
||||
"total_crashes": total_crashes,
|
||||
"total_executions": sum(r.stats.total_executions for r in self._target_results if r.stats),
|
||||
}
|
||||
)
|
||||
self.emit_event("module_completed", total_crashes=total_crashes, targets_fuzzed=len(targets))
|
||||
|
||||
logger.info("cargo-fuzzer completed",
|
||||
targets=len(self._target_results),
|
||||
total_crashes=total_crashes)
|
||||
|
||||
return FuzzForgeModuleResults.SUCCESS
|
||||
|
||||
def _cleanup(self, settings: Settings) -> None: # type: ignore[override]
|
||||
"""Clean up after execution.
|
||||
|
||||
:param settings: Module settings.
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
def _setup_environment(self, resources: list[FuzzForgeModuleResource]) -> bool:
|
||||
"""Setup the fuzzing environment.
|
||||
|
||||
:param resources: Input resources.
|
||||
:returns: True if setup successful.
|
||||
|
||||
"""
|
||||
import shutil
|
||||
|
||||
# Find fuzz project in resources
|
||||
source_fuzz_project = None
|
||||
source_project_root = None
|
||||
|
||||
for resource in resources:
|
||||
path = Path(resource.path)
|
||||
if path.is_dir():
|
||||
# Check for fuzz subdirectory
|
||||
fuzz_dir = path / "fuzz"
|
||||
if fuzz_dir.is_dir() and (fuzz_dir / "Cargo.toml").exists():
|
||||
source_fuzz_project = fuzz_dir
|
||||
source_project_root = path
|
||||
break
|
||||
# Or direct fuzz project
|
||||
if (path / "Cargo.toml").exists() and (path / "fuzz_targets").is_dir():
|
||||
source_fuzz_project = path
|
||||
source_project_root = path.parent
|
||||
break
|
||||
|
||||
if source_fuzz_project is None:
|
||||
logger.error("no fuzz project found in resources")
|
||||
return False
|
||||
|
||||
# Copy project to writable location since /data/input is read-only
|
||||
# and cargo-fuzz needs to write corpus, artifacts, and build cache
|
||||
work_dir = Path("/tmp/fuzz-work")
|
||||
if work_dir.exists():
|
||||
shutil.rmtree(work_dir)
|
||||
|
||||
# Copy the entire project root
|
||||
work_project = work_dir / source_project_root.name
|
||||
shutil.copytree(source_project_root, work_project, dirs_exist_ok=True)
|
||||
|
||||
# Update fuzz_project_path to point to the copied location
|
||||
relative_fuzz = source_fuzz_project.relative_to(source_project_root)
|
||||
self._fuzz_project_path = work_project / relative_fuzz
|
||||
|
||||
logger.info("using fuzz project", path=str(self._fuzz_project_path))
|
||||
return True
|
||||
|
||||
def _get_fuzz_targets(self) -> list[str]:
|
||||
"""Get list of fuzz target names.
|
||||
|
||||
:returns: List of target names.
|
||||
|
||||
"""
|
||||
if self._fuzz_project_path is None:
|
||||
return []
|
||||
|
||||
targets = []
|
||||
fuzz_targets_dir = self._fuzz_project_path / "fuzz_targets"
|
||||
|
||||
if fuzz_targets_dir.is_dir():
|
||||
for rs_file in fuzz_targets_dir.glob("*.rs"):
|
||||
targets.append(rs_file.stem)
|
||||
|
||||
return targets
|
||||
|
||||
def _fuzz_target(self, target: str, duration: int) -> TargetResult:
|
||||
"""Run fuzzing on a single target.
|
||||
|
||||
:param target: Name of the fuzz target.
|
||||
:param duration: Maximum duration in seconds.
|
||||
:returns: Fuzzing result for this target.
|
||||
|
||||
"""
|
||||
logger.info("fuzzing target", target=target, duration=duration)
|
||||
|
||||
crashes: list[CrashInfo] = []
|
||||
stats = FuzzingStats()
|
||||
|
||||
if self._fuzz_project_path is None:
|
||||
return TargetResult(target=target, crashes=crashes, stats=stats)
|
||||
|
||||
# Create corpus directory for this target
|
||||
corpus_dir = self._fuzz_project_path / "corpus" / target
|
||||
corpus_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Build the command
|
||||
cmd = [
|
||||
"cargo", "+nightly", "fuzz", "run",
|
||||
target,
|
||||
"--",
|
||||
]
|
||||
|
||||
# Add time limit
|
||||
if duration > 0:
|
||||
cmd.append(f"-max_total_time={duration}")
|
||||
|
||||
# Use fork mode to continue after crashes
|
||||
# This makes libFuzzer restart worker after crash instead of exiting
|
||||
cmd.append("-fork=1")
|
||||
cmd.append("-ignore_crashes=1")
|
||||
cmd.append("-print_final_stats=1")
|
||||
|
||||
# Add jobs if specified
|
||||
if self._settings and self._settings.jobs > 1:
|
||||
cmd.extend([f"-jobs={self._settings.jobs}"])
|
||||
|
||||
try:
|
||||
env = os.environ.copy()
|
||||
env["CARGO_INCREMENTAL"] = "0"
|
||||
|
||||
process = subprocess.Popen(
|
||||
cmd,
|
||||
cwd=self._fuzz_project_path,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
text=True,
|
||||
env=env,
|
||||
)
|
||||
|
||||
output_lines = []
|
||||
start_time = time.time()
|
||||
last_metrics_emit = 0.0
|
||||
current_execs = 0
|
||||
current_cov = 0
|
||||
current_exec_s = 0
|
||||
crash_count = 0
|
||||
|
||||
# Read output with timeout (skip timeout check in infinite mode)
|
||||
while True:
|
||||
if process.poll() is not None:
|
||||
break
|
||||
|
||||
elapsed = time.time() - start_time
|
||||
# Only enforce timeout if duration > 0 (not infinite mode)
|
||||
if duration > 0 and elapsed > duration + 30: # Grace period
|
||||
logger.warning("fuzzer timeout, terminating", target=target)
|
||||
process.terminate()
|
||||
try:
|
||||
process.wait(timeout=10)
|
||||
except subprocess.TimeoutExpired:
|
||||
process.kill()
|
||||
break
|
||||
|
||||
try:
|
||||
if process.stdout:
|
||||
line = process.stdout.readline()
|
||||
if line:
|
||||
output_lines.append(line)
|
||||
|
||||
# Parse real-time metrics from libFuzzer output
|
||||
# Example: "#12345 NEW cov: 100 ft: 50 corp: 25/1Kb exec/s: 1000"
|
||||
exec_match = re.search(r"#(\d+)", line)
|
||||
if exec_match:
|
||||
current_execs = int(exec_match.group(1))
|
||||
|
||||
cov_match = re.search(r"cov:\s*(\d+)", line)
|
||||
if cov_match:
|
||||
current_cov = int(cov_match.group(1))
|
||||
|
||||
exec_s_match = re.search(r"exec/s:\s*(\d+)", line)
|
||||
if exec_s_match:
|
||||
current_exec_s = int(exec_s_match.group(1))
|
||||
|
||||
# Check for crash indicators
|
||||
if "SUMMARY:" in line or "ERROR:" in line or "crash-" in line.lower():
|
||||
crash_count += 1
|
||||
self.emit_event(
|
||||
"crash_detected",
|
||||
target=target,
|
||||
crash_number=crash_count,
|
||||
line=line.strip(),
|
||||
)
|
||||
logger.debug("fuzzer output", line=line.strip())
|
||||
|
||||
# Emit metrics periodically (every 2 seconds)
|
||||
if elapsed - last_metrics_emit >= 2.0:
|
||||
last_metrics_emit = elapsed
|
||||
self.emit_event(
|
||||
"metrics",
|
||||
target=target,
|
||||
executions=current_execs,
|
||||
coverage=current_cov,
|
||||
exec_per_sec=current_exec_s,
|
||||
crashes=crash_count,
|
||||
elapsed_seconds=int(elapsed),
|
||||
remaining_seconds=max(0, duration - int(elapsed)),
|
||||
)
|
||||
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
# Parse statistics from output
|
||||
stats = self._parse_fuzzer_stats(output_lines)
|
||||
|
||||
# Collect crashes
|
||||
crashes = self._collect_crashes(target)
|
||||
|
||||
# Emit final event for this target if crashes were found
|
||||
if crashes:
|
||||
self.emit_event(
|
||||
"crashes_collected",
|
||||
target=target,
|
||||
count=len(crashes),
|
||||
paths=[c.file_path for c in crashes],
|
||||
)
|
||||
|
||||
except FileNotFoundError:
|
||||
logger.error("cargo-fuzz not found, please install with: cargo install cargo-fuzz")
|
||||
stats.error = "cargo-fuzz not installed"
|
||||
self.emit_event("error", target=target, message="cargo-fuzz not installed")
|
||||
except Exception as e:
|
||||
logger.exception("fuzzing error", target=target, error=str(e))
|
||||
stats.error = str(e)
|
||||
self.emit_event("error", target=target, message=str(e))
|
||||
|
||||
return TargetResult(target=target, crashes=crashes, stats=stats)
|
||||
|
||||
def _parse_fuzzer_stats(self, output_lines: list[str]) -> FuzzingStats:
|
||||
"""Parse fuzzer output for statistics.
|
||||
|
||||
:param output_lines: Lines of fuzzer output.
|
||||
:returns: Parsed statistics.
|
||||
|
||||
"""
|
||||
stats = FuzzingStats()
|
||||
full_output = "".join(output_lines)
|
||||
|
||||
# Parse libFuzzer stats
|
||||
# Example: "#12345 DONE cov: 100 ft: 50 corp: 25/1Kb exec/s: 1000"
|
||||
exec_match = re.search(r"#(\d+)", full_output)
|
||||
if exec_match:
|
||||
stats.total_executions = int(exec_match.group(1))
|
||||
|
||||
cov_match = re.search(r"cov:\s*(\d+)", full_output)
|
||||
if cov_match:
|
||||
stats.coverage_edges = int(cov_match.group(1))
|
||||
|
||||
corp_match = re.search(r"corp:\s*(\d+)", full_output)
|
||||
if corp_match:
|
||||
stats.corpus_size = int(corp_match.group(1))
|
||||
|
||||
exec_s_match = re.search(r"exec/s:\s*(\d+)", full_output)
|
||||
if exec_s_match:
|
||||
stats.executions_per_second = int(exec_s_match.group(1))
|
||||
|
||||
return stats
|
||||
|
||||
def _collect_crashes(self, target: str) -> list[CrashInfo]:
|
||||
"""Collect crash files from fuzzer output.
|
||||
|
||||
:param target: Name of the fuzz target.
|
||||
:returns: List of crash info.
|
||||
|
||||
"""
|
||||
crashes: list[CrashInfo] = []
|
||||
|
||||
if self._fuzz_project_path is None or self._crashes_path is None:
|
||||
return crashes
|
||||
|
||||
# Check for crashes in the artifacts directory
|
||||
artifacts_dir = self._fuzz_project_path / "artifacts" / target
|
||||
|
||||
if artifacts_dir.is_dir():
|
||||
for crash_file in artifacts_dir.glob("crash-*"):
|
||||
if crash_file.is_file():
|
||||
# Copy crash to output
|
||||
output_crash = self._crashes_path / target
|
||||
output_crash.mkdir(parents=True, exist_ok=True)
|
||||
dest = output_crash / crash_file.name
|
||||
shutil.copy2(crash_file, dest)
|
||||
|
||||
# Read crash input
|
||||
crash_data = crash_file.read_bytes()
|
||||
|
||||
crash_info = CrashInfo(
|
||||
file_path=str(dest),
|
||||
input_hash=crash_file.name,
|
||||
input_size=len(crash_data),
|
||||
)
|
||||
crashes.append(crash_info)
|
||||
|
||||
logger.info("found crash", target=target, file=crash_file.name)
|
||||
|
||||
return crashes
|
||||
|
||||
def _write_output(self) -> None:
|
||||
"""Write the fuzzing results to output."""
|
||||
output_path = PATH_TO_OUTPUTS / "fuzzing_results.json"
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
total_crashes = sum(len(r.crashes) for r in self._target_results)
|
||||
total_execs = sum(r.stats.total_executions for r in self._target_results if r.stats)
|
||||
|
||||
output_data = {
|
||||
"fuzz_project": str(self._fuzz_project_path),
|
||||
"targets_fuzzed": len(self._target_results),
|
||||
"total_crashes": total_crashes,
|
||||
"total_executions": total_execs,
|
||||
"crashes_path": str(self._crashes_path),
|
||||
"results": [
|
||||
{
|
||||
"target": r.target,
|
||||
"crashes": [c.model_dump() for c in r.crashes],
|
||||
"stats": r.stats.model_dump() if r.stats else None,
|
||||
}
|
||||
for r in self._target_results
|
||||
],
|
||||
}
|
||||
|
||||
output_path.write_text(json.dumps(output_data, indent=2))
|
||||
logger.info("wrote fuzzing results", path=str(output_path))
|
||||
88
fuzzforge-modules/cargo-fuzzer/src/module/models.py
Normal file
88
fuzzforge-modules/cargo-fuzzer/src/module/models.py
Normal file
@@ -0,0 +1,88 @@
|
||||
"""Models for the cargo-fuzzer module."""
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from fuzzforge_modules_sdk.api.models import FuzzForgeModuleInputBase, FuzzForgeModuleOutputBase
|
||||
|
||||
from module.settings import Settings
|
||||
|
||||
|
||||
class FuzzingStats(BaseModel):
|
||||
"""Statistics from a fuzzing run."""
|
||||
|
||||
#: Total number of test case executions
|
||||
total_executions: int = 0
|
||||
|
||||
#: Executions per second
|
||||
executions_per_second: int = 0
|
||||
|
||||
#: Number of coverage edges discovered
|
||||
coverage_edges: int = 0
|
||||
|
||||
#: Size of the corpus
|
||||
corpus_size: int = 0
|
||||
|
||||
#: Any error message
|
||||
error: str = ""
|
||||
|
||||
|
||||
class CrashInfo(BaseModel):
|
||||
"""Information about a discovered crash."""
|
||||
|
||||
#: Path to the crash input file
|
||||
file_path: str
|
||||
|
||||
#: Hash/name of the crash input
|
||||
input_hash: str
|
||||
|
||||
#: Size of the crash input in bytes
|
||||
input_size: int = 0
|
||||
|
||||
#: Crash type (if identified)
|
||||
crash_type: str = ""
|
||||
|
||||
#: Stack trace (if available)
|
||||
stack_trace: str = ""
|
||||
|
||||
|
||||
class TargetResult(BaseModel):
|
||||
"""Result of fuzzing a single target."""
|
||||
|
||||
#: Name of the fuzz target
|
||||
target: str
|
||||
|
||||
#: List of crashes found
|
||||
crashes: list[CrashInfo] = Field(default_factory=list)
|
||||
|
||||
#: Fuzzing statistics
|
||||
stats: FuzzingStats = Field(default_factory=FuzzingStats)
|
||||
|
||||
|
||||
class Input(FuzzForgeModuleInputBase[Settings]):
|
||||
"""Input for the cargo-fuzzer module.
|
||||
|
||||
Expects:
|
||||
- A fuzz project directory with validated harnesses
|
||||
- Optionally the source crate to link against
|
||||
"""
|
||||
|
||||
|
||||
class Output(FuzzForgeModuleOutputBase):
|
||||
"""Output from the cargo-fuzzer module."""
|
||||
|
||||
#: Path to the fuzz project
|
||||
fuzz_project: str = ""
|
||||
|
||||
#: Number of targets fuzzed
|
||||
targets_fuzzed: int = 0
|
||||
|
||||
#: Total crashes found across all targets
|
||||
total_crashes: int = 0
|
||||
|
||||
#: Total executions across all targets
|
||||
total_executions: int = 0
|
||||
|
||||
#: Path to collected crash files
|
||||
crashes_path: str = ""
|
||||
|
||||
#: Results per target
|
||||
results: list[TargetResult] = Field(default_factory=list)
|
||||
35
fuzzforge-modules/cargo-fuzzer/src/module/settings.py
Normal file
35
fuzzforge-modules/cargo-fuzzer/src/module/settings.py
Normal file
@@ -0,0 +1,35 @@
|
||||
"""Settings for the cargo-fuzzer module."""
|
||||
|
||||
from typing import Optional
|
||||
from pydantic import model_validator
|
||||
from fuzzforge_modules_sdk.api.models import FuzzForgeModulesSettingsBase
|
||||
|
||||
|
||||
class Settings(FuzzForgeModulesSettingsBase):
|
||||
"""Settings for the cargo-fuzzer module."""
|
||||
|
||||
#: Maximum fuzzing duration in seconds (total across all targets)
|
||||
#: Set to 0 for infinite/continuous mode
|
||||
max_duration: int = 60
|
||||
|
||||
#: Number of parallel fuzzing jobs
|
||||
jobs: int = 1
|
||||
|
||||
#: Maximum length of generated inputs
|
||||
max_len: int = 4096
|
||||
|
||||
#: Whether to use AddressSanitizer
|
||||
use_asan: bool = True
|
||||
|
||||
#: Specific targets to fuzz (empty = all targets)
|
||||
targets: list[str] = []
|
||||
|
||||
#: Single target to fuzz (convenience alias for targets)
|
||||
target: Optional[str] = None
|
||||
|
||||
@model_validator(mode="after")
|
||||
def handle_single_target(self) -> "Settings":
|
||||
"""Convert single target to targets list if provided."""
|
||||
if self.target and self.target not in self.targets:
|
||||
self.targets.append(self.target)
|
||||
return self
|
||||
0
fuzzforge-modules/cargo-fuzzer/tests/.gitkeep
Normal file
0
fuzzforge-modules/cargo-fuzzer/tests/.gitkeep
Normal file
9
fuzzforge-modules/crash-analyzer/Dockerfile
Normal file
9
fuzzforge-modules/crash-analyzer/Dockerfile
Normal file
@@ -0,0 +1,9 @@
|
||||
FROM localhost/fuzzforge-modules-sdk:0.1.0
|
||||
|
||||
COPY ./src /app/src
|
||||
COPY ./pyproject.toml /app/pyproject.toml
|
||||
|
||||
# Remove workspace reference since we're using wheels
|
||||
RUN sed -i '/\[tool\.uv\.sources\]/,/^$/d' /app/pyproject.toml
|
||||
|
||||
RUN uv sync --find-links /wheels
|
||||
45
fuzzforge-modules/crash-analyzer/Makefile
Normal file
45
fuzzforge-modules/crash-analyzer/Makefile
Normal file
@@ -0,0 +1,45 @@
|
||||
PACKAGE=$(word 1, $(shell uv version))
|
||||
VERSION=$(word 2, $(shell uv version))
|
||||
|
||||
PODMAN?=/usr/bin/podman
|
||||
|
||||
SOURCES=./src
|
||||
TESTS=./tests
|
||||
|
||||
.PHONY: bandit build clean format mypy pytest ruff version
|
||||
|
||||
bandit:
|
||||
uv run bandit --recursive $(SOURCES)
|
||||
|
||||
build:
|
||||
$(PODMAN) build --file ./Dockerfile --no-cache --tag $(PACKAGE):$(VERSION)
|
||||
|
||||
save: build
|
||||
$(PODMAN) save --format oci-archive --output /tmp/$(PACKAGE)-$(VERSION).oci $(PACKAGE):$(VERSION)
|
||||
|
||||
clean:
|
||||
@find . -type d \( \
|
||||
-name '*.egg-info' \
|
||||
-o -name '.mypy_cache' \
|
||||
-o -name '.pytest_cache' \
|
||||
-o -name '.ruff_cache' \
|
||||
-o -name '__pycache__' \
|
||||
\) -printf 'removing directory %p\n' -exec rm -rf {} +
|
||||
|
||||
cloc:
|
||||
cloc $(SOURCES)
|
||||
|
||||
format:
|
||||
uv run ruff format $(SOURCES) $(TESTS)
|
||||
|
||||
mypy:
|
||||
uv run mypy $(SOURCES)
|
||||
|
||||
pytest:
|
||||
uv run pytest $(TESTS)
|
||||
|
||||
ruff:
|
||||
uv run ruff check --fix $(SOURCES) $(TESTS)
|
||||
|
||||
version:
|
||||
@echo '$(PACKAGE)@$(VERSION)'
|
||||
46
fuzzforge-modules/crash-analyzer/README.md
Normal file
46
fuzzforge-modules/crash-analyzer/README.md
Normal file
@@ -0,0 +1,46 @@
|
||||
# FuzzForge Modules - FIXME
|
||||
|
||||
## Installation
|
||||
|
||||
### Python
|
||||
|
||||
```shell
|
||||
# install the package (users)
|
||||
uv sync
|
||||
# install the package and all development dependencies (developers)
|
||||
uv sync --all-extras
|
||||
```
|
||||
|
||||
### Container
|
||||
|
||||
```shell
|
||||
# build the image
|
||||
make build
|
||||
# run the container
|
||||
mkdir -p "${PWD}/data" "${PWD}/data/input" "${PWD}/data/output"
|
||||
echo '{"settings":{},"resources":[]}' > "${PWD}/data/input/input.json"
|
||||
podman run --rm \
|
||||
--volume "${PWD}/data:/data" \
|
||||
'<name>:<version>' 'uv run module'
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```shell
|
||||
uv run module
|
||||
```
|
||||
|
||||
## Development tools
|
||||
|
||||
```shell
|
||||
# run ruff (formatter)
|
||||
make format
|
||||
# run mypy (type checker)
|
||||
make mypy
|
||||
# run tests (pytest)
|
||||
make pytest
|
||||
# run ruff (linter)
|
||||
make ruff
|
||||
```
|
||||
|
||||
See the file `Makefile` at the root of this directory for more tools.
|
||||
6
fuzzforge-modules/crash-analyzer/mypy.ini
Normal file
6
fuzzforge-modules/crash-analyzer/mypy.ini
Normal file
@@ -0,0 +1,6 @@
|
||||
[mypy]
|
||||
plugins = pydantic.mypy
|
||||
strict = True
|
||||
warn_unused_ignores = True
|
||||
warn_redundant_casts = True
|
||||
warn_return_any = True
|
||||
32
fuzzforge-modules/crash-analyzer/pyproject.toml
Normal file
32
fuzzforge-modules/crash-analyzer/pyproject.toml
Normal file
@@ -0,0 +1,32 @@
|
||||
[project]
|
||||
name = "crash-analyzer"
|
||||
version = "0.1.0"
|
||||
description = "FuzzForge module that analyzes fuzzing crashes and generates security reports"
|
||||
authors = []
|
||||
readme = "README.md"
|
||||
requires-python = ">=3.14"
|
||||
dependencies = [
|
||||
"fuzzforge-modules-sdk==0.0.1",
|
||||
"pydantic==2.12.4",
|
||||
"structlog==25.5.0",
|
||||
"jinja2==3.1.6",
|
||||
]
|
||||
|
||||
[project.optional-dependencies]
|
||||
lints = [
|
||||
"bandit==1.8.6",
|
||||
"mypy==1.18.2",
|
||||
"ruff==0.14.4",
|
||||
]
|
||||
tests = [
|
||||
"pytest==9.0.2",
|
||||
]
|
||||
|
||||
[project.scripts]
|
||||
module = "module.__main__:main"
|
||||
|
||||
[tool.uv.sources]
|
||||
fuzzforge-modules-sdk = { workspace = true }
|
||||
|
||||
[tool.uv]
|
||||
package = true
|
||||
19
fuzzforge-modules/crash-analyzer/ruff.toml
Normal file
19
fuzzforge-modules/crash-analyzer/ruff.toml
Normal file
@@ -0,0 +1,19 @@
|
||||
line-length = 120
|
||||
|
||||
[lint]
|
||||
select = [ "ALL" ]
|
||||
ignore = [
|
||||
"COM812", # conflicts with the formatter
|
||||
"D100", # ignoring missing docstrings in public modules
|
||||
"D104", # ignoring missing docstrings in public packages
|
||||
"D203", # conflicts with 'D211'
|
||||
"D213", # conflicts with 'D212'
|
||||
"TD002", # ignoring missing author in 'TODO' statements
|
||||
"TD003", # ignoring missing issue link in 'TODO' statements
|
||||
]
|
||||
|
||||
[lint.per-file-ignores]
|
||||
"tests/*" = [
|
||||
"PLR2004", # allowing comparisons using unamed numerical constants in tests
|
||||
"S101", # allowing 'assert' statements in tests
|
||||
]
|
||||
19
fuzzforge-modules/crash-analyzer/src/module/__main__.py
Normal file
19
fuzzforge-modules/crash-analyzer/src/module/__main__.py
Normal file
@@ -0,0 +1,19 @@
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from fuzzforge_modules_sdk.api import logs
|
||||
|
||||
from module.mod import Module
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fuzzforge_modules_sdk.api.modules.base import FuzzForgeModule
|
||||
|
||||
|
||||
def main() -> None:
|
||||
"""TODO."""
|
||||
logs.configure()
|
||||
module: FuzzForgeModule = Module()
|
||||
module.main()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
340
fuzzforge-modules/crash-analyzer/src/module/mod.py
Normal file
340
fuzzforge-modules/crash-analyzer/src/module/mod.py
Normal file
@@ -0,0 +1,340 @@
|
||||
"""Crash Analyzer module for FuzzForge.
|
||||
|
||||
This module analyzes crashes from cargo-fuzz, deduplicates them,
|
||||
extracts stack traces, and triages them by severity.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import hashlib
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import structlog
|
||||
|
||||
from fuzzforge_modules_sdk.api.constants import PATH_TO_INPUTS, PATH_TO_OUTPUTS
|
||||
from fuzzforge_modules_sdk.api.models import FuzzForgeModuleResults
|
||||
from fuzzforge_modules_sdk.api.modules.base import FuzzForgeModule
|
||||
|
||||
from module.models import Input, Output, CrashAnalysis, Severity
|
||||
from module.settings import Settings
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from fuzzforge_modules_sdk.api.models import FuzzForgeModuleResource
|
||||
|
||||
logger = structlog.get_logger()
|
||||
|
||||
|
||||
class Module(FuzzForgeModule):
|
||||
"""Crash Analyzer module - analyzes and triages fuzzer crashes."""
|
||||
|
||||
_settings: Settings | None
|
||||
_analyses: list[CrashAnalysis]
|
||||
_fuzz_project_path: Path | None
|
||||
|
||||
def __init__(self) -> None:
|
||||
"""Initialize an instance of the class."""
|
||||
name: str = "crash-analyzer"
|
||||
version: str = "0.1.0"
|
||||
FuzzForgeModule.__init__(self, name=name, version=version)
|
||||
self._settings = None
|
||||
self._analyses = []
|
||||
self._fuzz_project_path = None
|
||||
|
||||
@classmethod
|
||||
def _get_input_type(cls) -> type[Input]:
|
||||
"""Return the input type."""
|
||||
return Input
|
||||
|
||||
@classmethod
|
||||
def _get_output_type(cls) -> type[Output]:
|
||||
"""Return the output type."""
|
||||
return Output
|
||||
|
||||
def _prepare(self, settings: Settings) -> None: # type: ignore[override]
|
||||
"""Prepare the module.
|
||||
|
||||
:param settings: Module settings.
|
||||
|
||||
"""
|
||||
self._settings = settings
|
||||
logger.info("crash-analyzer preparing", settings=settings.model_dump() if settings else {})
|
||||
|
||||
def _run(self, resources: list[FuzzForgeModuleResource]) -> FuzzForgeModuleResults:
|
||||
"""Run the crash analyzer.
|
||||
|
||||
:param resources: Input resources (fuzzing results + crashes).
|
||||
:returns: Module execution result.
|
||||
|
||||
"""
|
||||
logger.info("crash-analyzer starting", resource_count=len(resources))
|
||||
|
||||
# Find crashes directory and fuzz project
|
||||
crashes_path = None
|
||||
for resource in resources:
|
||||
path = Path(resource.path)
|
||||
if path.is_dir():
|
||||
if path.name == "crashes" or (path / "crashes").is_dir():
|
||||
crashes_path = path if path.name == "crashes" else path / "crashes"
|
||||
if (path / "fuzz_targets").is_dir():
|
||||
self._fuzz_project_path = path
|
||||
if (path / "fuzz" / "fuzz_targets").is_dir():
|
||||
self._fuzz_project_path = path / "fuzz"
|
||||
|
||||
if crashes_path is None:
|
||||
# Try to find crashes in fuzzing_results.json
|
||||
for resource in resources:
|
||||
path = Path(resource.path)
|
||||
if path.name == "fuzzing_results.json" and path.exists():
|
||||
with open(path) as f:
|
||||
data = json.load(f)
|
||||
if "crashes_path" in data:
|
||||
crashes_path = Path(data["crashes_path"])
|
||||
break
|
||||
|
||||
if crashes_path is None or not crashes_path.exists():
|
||||
logger.warning("no crashes found to analyze")
|
||||
self._write_output()
|
||||
return FuzzForgeModuleResults.SUCCESS
|
||||
|
||||
logger.info("analyzing crashes", path=str(crashes_path))
|
||||
|
||||
# Analyze crashes per target
|
||||
for target_dir in crashes_path.iterdir():
|
||||
if target_dir.is_dir():
|
||||
target = target_dir.name
|
||||
for crash_file in target_dir.glob("crash-*"):
|
||||
if crash_file.is_file():
|
||||
analysis = self._analyze_crash(target, crash_file)
|
||||
self._analyses.append(analysis)
|
||||
|
||||
# Deduplicate crashes
|
||||
self._deduplicate_crashes()
|
||||
|
||||
# Write output
|
||||
self._write_output()
|
||||
|
||||
unique_count = sum(1 for a in self._analyses if not a.is_duplicate)
|
||||
logger.info("crash-analyzer completed",
|
||||
total=len(self._analyses),
|
||||
unique=unique_count)
|
||||
|
||||
return FuzzForgeModuleResults.SUCCESS
|
||||
|
||||
def _cleanup(self, settings: Settings) -> None: # type: ignore[override]
|
||||
"""Clean up after execution.
|
||||
|
||||
:param settings: Module settings.
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
def _analyze_crash(self, target: str, crash_file: Path) -> CrashAnalysis:
|
||||
"""Analyze a single crash.
|
||||
|
||||
:param target: Name of the fuzz target.
|
||||
:param crash_file: Path to the crash input file.
|
||||
:returns: Crash analysis result.
|
||||
|
||||
"""
|
||||
logger.debug("analyzing crash", target=target, file=crash_file.name)
|
||||
|
||||
# Read crash input
|
||||
crash_data = crash_file.read_bytes()
|
||||
input_hash = hashlib.sha256(crash_data).hexdigest()[:16]
|
||||
|
||||
# Try to reproduce and get stack trace
|
||||
stack_trace = ""
|
||||
crash_type = "unknown"
|
||||
severity = Severity.UNKNOWN
|
||||
|
||||
if self._fuzz_project_path:
|
||||
stack_trace, crash_type = self._reproduce_crash(target, crash_file)
|
||||
severity = self._determine_severity(crash_type, stack_trace)
|
||||
|
||||
return CrashAnalysis(
|
||||
target=target,
|
||||
input_file=str(crash_file),
|
||||
input_hash=input_hash,
|
||||
input_size=len(crash_data),
|
||||
crash_type=crash_type,
|
||||
severity=severity,
|
||||
stack_trace=stack_trace,
|
||||
is_duplicate=False,
|
||||
)
|
||||
|
||||
def _reproduce_crash(self, target: str, crash_file: Path) -> tuple[str, str]:
|
||||
"""Reproduce a crash to get stack trace.
|
||||
|
||||
:param target: Name of the fuzz target.
|
||||
:param crash_file: Path to the crash input file.
|
||||
:returns: Tuple of (stack_trace, crash_type).
|
||||
|
||||
"""
|
||||
if self._fuzz_project_path is None:
|
||||
return "", "unknown"
|
||||
|
||||
try:
|
||||
env = os.environ.copy()
|
||||
env["RUST_BACKTRACE"] = "1"
|
||||
|
||||
result = subprocess.run(
|
||||
[
|
||||
"cargo", "+nightly", "fuzz", "run",
|
||||
target,
|
||||
str(crash_file),
|
||||
"--",
|
||||
"-runs=1",
|
||||
],
|
||||
cwd=self._fuzz_project_path,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=30,
|
||||
env=env,
|
||||
)
|
||||
|
||||
output = result.stdout + result.stderr
|
||||
|
||||
# Extract crash type
|
||||
crash_type = "unknown"
|
||||
if "heap-buffer-overflow" in output.lower():
|
||||
crash_type = "heap-buffer-overflow"
|
||||
elif "stack-buffer-overflow" in output.lower():
|
||||
crash_type = "stack-buffer-overflow"
|
||||
elif "heap-use-after-free" in output.lower():
|
||||
crash_type = "use-after-free"
|
||||
elif "null" in output.lower() and "deref" in output.lower():
|
||||
crash_type = "null-pointer-dereference"
|
||||
elif "panic" in output.lower():
|
||||
crash_type = "panic"
|
||||
elif "assertion" in output.lower():
|
||||
crash_type = "assertion-failure"
|
||||
elif "timeout" in output.lower():
|
||||
crash_type = "timeout"
|
||||
elif "out of memory" in output.lower() or "oom" in output.lower():
|
||||
crash_type = "out-of-memory"
|
||||
|
||||
# Extract stack trace
|
||||
stack_lines = []
|
||||
in_stack = False
|
||||
for line in output.splitlines():
|
||||
if "SUMMARY:" in line or "ERROR:" in line:
|
||||
in_stack = True
|
||||
if in_stack:
|
||||
stack_lines.append(line)
|
||||
if len(stack_lines) > 50: # Limit stack trace length
|
||||
break
|
||||
|
||||
return "\n".join(stack_lines), crash_type
|
||||
|
||||
except subprocess.TimeoutExpired:
|
||||
return "", "timeout"
|
||||
except Exception as e:
|
||||
logger.warning("failed to reproduce crash", error=str(e))
|
||||
return "", "unknown"
|
||||
|
||||
def _determine_severity(self, crash_type: str, stack_trace: str) -> Severity:
|
||||
"""Determine crash severity based on type and stack trace.
|
||||
|
||||
:param crash_type: Type of the crash.
|
||||
:param stack_trace: Stack trace string.
|
||||
:returns: Severity level.
|
||||
|
||||
"""
|
||||
high_severity = [
|
||||
"heap-buffer-overflow",
|
||||
"stack-buffer-overflow",
|
||||
"use-after-free",
|
||||
"double-free",
|
||||
]
|
||||
|
||||
medium_severity = [
|
||||
"null-pointer-dereference",
|
||||
"out-of-memory",
|
||||
"integer-overflow",
|
||||
]
|
||||
|
||||
low_severity = [
|
||||
"panic",
|
||||
"assertion-failure",
|
||||
"timeout",
|
||||
]
|
||||
|
||||
if crash_type in high_severity:
|
||||
return Severity.HIGH
|
||||
elif crash_type in medium_severity:
|
||||
return Severity.MEDIUM
|
||||
elif crash_type in low_severity:
|
||||
return Severity.LOW
|
||||
else:
|
||||
return Severity.UNKNOWN
|
||||
|
||||
def _deduplicate_crashes(self) -> None:
|
||||
"""Mark duplicate crashes based on stack trace similarity."""
|
||||
seen_signatures: set[str] = set()
|
||||
|
||||
for analysis in self._analyses:
|
||||
# Create a signature from crash type and key stack frames
|
||||
signature = self._create_signature(analysis)
|
||||
|
||||
if signature in seen_signatures:
|
||||
analysis.is_duplicate = True
|
||||
else:
|
||||
seen_signatures.add(signature)
|
||||
|
||||
def _create_signature(self, analysis: CrashAnalysis) -> str:
|
||||
"""Create a unique signature for a crash.
|
||||
|
||||
:param analysis: Crash analysis.
|
||||
:returns: Signature string.
|
||||
|
||||
"""
|
||||
# Use crash type + first few significant stack frames
|
||||
parts = [analysis.target, analysis.crash_type]
|
||||
|
||||
# Extract function names from stack trace
|
||||
func_pattern = re.compile(r"in (\S+)")
|
||||
funcs = func_pattern.findall(analysis.stack_trace)
|
||||
|
||||
# Use first 3 unique functions
|
||||
seen = set()
|
||||
for func in funcs:
|
||||
if func not in seen and not func.startswith("std::"):
|
||||
parts.append(func)
|
||||
seen.add(func)
|
||||
if len(seen) >= 3:
|
||||
break
|
||||
|
||||
return "|".join(parts)
|
||||
|
||||
def _write_output(self) -> None:
|
||||
"""Write the analysis results to output."""
|
||||
output_path = PATH_TO_OUTPUTS / "crash_analysis.json"
|
||||
output_path.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
unique = [a for a in self._analyses if not a.is_duplicate]
|
||||
duplicates = [a for a in self._analyses if a.is_duplicate]
|
||||
|
||||
# Group by severity
|
||||
by_severity = {
|
||||
"high": [a for a in unique if a.severity == Severity.HIGH],
|
||||
"medium": [a for a in unique if a.severity == Severity.MEDIUM],
|
||||
"low": [a for a in unique if a.severity == Severity.LOW],
|
||||
"unknown": [a for a in unique if a.severity == Severity.UNKNOWN],
|
||||
}
|
||||
|
||||
output_data = {
|
||||
"total_crashes": len(self._analyses),
|
||||
"unique_crashes": len(unique),
|
||||
"duplicate_crashes": len(duplicates),
|
||||
"severity_summary": {k: len(v) for k, v in by_severity.items()},
|
||||
"unique_analyses": [a.model_dump() for a in unique],
|
||||
"duplicate_analyses": [a.model_dump() for a in duplicates],
|
||||
}
|
||||
|
||||
output_path.write_text(json.dumps(output_data, indent=2, default=str))
|
||||
logger.info("wrote crash analysis", path=str(output_path))
|
||||
79
fuzzforge-modules/crash-analyzer/src/module/models.py
Normal file
79
fuzzforge-modules/crash-analyzer/src/module/models.py
Normal file
@@ -0,0 +1,79 @@
|
||||
"""Models for the crash-analyzer module."""
|
||||
|
||||
from enum import Enum
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from fuzzforge_modules_sdk.api.models import FuzzForgeModuleInputBase, FuzzForgeModuleOutputBase
|
||||
|
||||
from module.settings import Settings
|
||||
|
||||
|
||||
class Severity(str, Enum):
|
||||
"""Severity level of a crash."""
|
||||
|
||||
HIGH = "high"
|
||||
MEDIUM = "medium"
|
||||
LOW = "low"
|
||||
UNKNOWN = "unknown"
|
||||
|
||||
|
||||
class CrashAnalysis(BaseModel):
|
||||
"""Analysis of a single crash."""
|
||||
|
||||
#: Name of the fuzz target
|
||||
target: str
|
||||
|
||||
#: Path to the input file that caused the crash
|
||||
input_file: str
|
||||
|
||||
#: Hash of the input for identification
|
||||
input_hash: str
|
||||
|
||||
#: Size of the input in bytes
|
||||
input_size: int = 0
|
||||
|
||||
#: Type of crash (e.g., "heap-buffer-overflow", "panic")
|
||||
crash_type: str = "unknown"
|
||||
|
||||
#: Severity level
|
||||
severity: Severity = Severity.UNKNOWN
|
||||
|
||||
#: Stack trace from reproducing the crash
|
||||
stack_trace: str = ""
|
||||
|
||||
#: Whether this crash is a duplicate of another
|
||||
is_duplicate: bool = False
|
||||
|
||||
#: Signature for deduplication
|
||||
signature: str = ""
|
||||
|
||||
|
||||
class Input(FuzzForgeModuleInputBase[Settings]):
|
||||
"""Input for the crash-analyzer module.
|
||||
|
||||
Expects:
|
||||
- Crashes directory from cargo-fuzzer
|
||||
- Optionally the fuzz project for reproduction
|
||||
"""
|
||||
|
||||
|
||||
class Output(FuzzForgeModuleOutputBase):
|
||||
"""Output from the crash-analyzer module."""
|
||||
|
||||
#: Total number of crashes analyzed
|
||||
total_crashes: int = 0
|
||||
|
||||
#: Number of unique crashes (after deduplication)
|
||||
unique_crashes: int = 0
|
||||
|
||||
#: Number of duplicate crashes
|
||||
duplicate_crashes: int = 0
|
||||
|
||||
#: Summary by severity
|
||||
severity_summary: dict[str, int] = Field(default_factory=dict)
|
||||
|
||||
#: Unique crash analyses
|
||||
unique_analyses: list[CrashAnalysis] = Field(default_factory=list)
|
||||
|
||||
#: Duplicate crash analyses
|
||||
duplicate_analyses: list[CrashAnalysis] = Field(default_factory=list)
|
||||
16
fuzzforge-modules/crash-analyzer/src/module/settings.py
Normal file
16
fuzzforge-modules/crash-analyzer/src/module/settings.py
Normal file
@@ -0,0 +1,16 @@
|
||||
"""Settings for the crash-analyzer module."""
|
||||
|
||||
from fuzzforge_modules_sdk.api.models import FuzzForgeModulesSettingsBase
|
||||
|
||||
|
||||
class Settings(FuzzForgeModulesSettingsBase):
|
||||
"""Settings for the crash-analyzer module."""
|
||||
|
||||
#: Whether to reproduce crashes for stack traces
|
||||
reproduce_crashes: bool = True
|
||||
|
||||
#: Timeout for reproducing each crash (seconds)
|
||||
reproduce_timeout: int = 30
|
||||
|
||||
#: Whether to deduplicate crashes
|
||||
deduplicate: bool = True
|
||||
0
fuzzforge-modules/crash-analyzer/tests/.gitkeep
Normal file
0
fuzzforge-modules/crash-analyzer/tests/.gitkeep
Normal file
9
fuzzforge-modules/fuzzforge-module-template/Dockerfile
Normal file
9
fuzzforge-modules/fuzzforge-module-template/Dockerfile
Normal file
@@ -0,0 +1,9 @@
|
||||
FROM localhost/fuzzforge-modules-sdk:0.0.1
|
||||
|
||||
COPY ./src /app/src
|
||||
COPY ./pyproject.toml /app/pyproject.toml
|
||||
|
||||
# Remove workspace reference since we're using wheels
|
||||
RUN sed -i '/\[tool\.uv\.sources\]/,/^$/d' /app/pyproject.toml
|
||||
|
||||
RUN uv sync --find-links /wheels
|
||||
45
fuzzforge-modules/fuzzforge-module-template/Makefile
Normal file
45
fuzzforge-modules/fuzzforge-module-template/Makefile
Normal file
@@ -0,0 +1,45 @@
|
||||
PACKAGE=$(word 1, $(shell uv version))
|
||||
VERSION=$(word 2, $(shell uv version))
|
||||
|
||||
PODMAN?=/usr/bin/podman
|
||||
|
||||
SOURCES=./src
|
||||
TESTS=./tests
|
||||
|
||||
.PHONY: bandit build clean format mypy pytest ruff version
|
||||
|
||||
bandit:
|
||||
uv run bandit --recursive $(SOURCES)
|
||||
|
||||
build:
|
||||
$(PODMAN) build --file ./Dockerfile --no-cache --tag $(PACKAGE):$(VERSION)
|
||||
|
||||
save: build
|
||||
$(PODMAN) save --format oci-archive --output /tmp/$(PACKAGE)-$(VERSION).oci $(PACKAGE):$(VERSION)
|
||||
|
||||
clean:
|
||||
@find . -type d \( \
|
||||
-name '*.egg-info' \
|
||||
-o -name '.mypy_cache' \
|
||||
-o -name '.pytest_cache' \
|
||||
-o -name '.ruff_cache' \
|
||||
-o -name '__pycache__' \
|
||||
\) -printf 'removing directory %p\n' -exec rm -rf {} +
|
||||
|
||||
cloc:
|
||||
cloc $(SOURCES)
|
||||
|
||||
format:
|
||||
uv run ruff format $(SOURCES) $(TESTS)
|
||||
|
||||
mypy:
|
||||
uv run mypy $(SOURCES)
|
||||
|
||||
pytest:
|
||||
uv run pytest $(TESTS)
|
||||
|
||||
ruff:
|
||||
uv run ruff check --fix $(SOURCES) $(TESTS)
|
||||
|
||||
version:
|
||||
@echo '$(PACKAGE)@$(VERSION)'
|
||||
46
fuzzforge-modules/fuzzforge-module-template/README.md
Normal file
46
fuzzforge-modules/fuzzforge-module-template/README.md
Normal file
@@ -0,0 +1,46 @@
|
||||
# FuzzForge Modules - FIXME
|
||||
|
||||
## Installation
|
||||
|
||||
### Python
|
||||
|
||||
```shell
|
||||
# install the package (users)
|
||||
uv sync
|
||||
# install the package and all development dependencies (developers)
|
||||
uv sync --all-extras
|
||||
```
|
||||
|
||||
### Container
|
||||
|
||||
```shell
|
||||
# build the image
|
||||
make build
|
||||
# run the container
|
||||
mkdir -p "${PWD}/data" "${PWD}/data/input" "${PWD}/data/output"
|
||||
echo '{"settings":{},"resources":[]}' > "${PWD}/data/input/input.json"
|
||||
podman run --rm \
|
||||
--volume "${PWD}/data:/data" \
|
||||
'<name>:<version>' 'uv run module'
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```shell
|
||||
uv run module
|
||||
```
|
||||
|
||||
## Development tools
|
||||
|
||||
```shell
|
||||
# run ruff (formatter)
|
||||
make format
|
||||
# run mypy (type checker)
|
||||
make mypy
|
||||
# run tests (pytest)
|
||||
make pytest
|
||||
# run ruff (linter)
|
||||
make ruff
|
||||
```
|
||||
|
||||
See the file `Makefile` at the root of this directory for more tools.
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user