mirror of
https://github.com/FuzzingLabs/fuzzforge_ai.git
synced 2026-04-21 22:26:25 +02:00
Compare commits
8 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 574e0cbce7 | |||
| f6cdb1ae2e | |||
| 731927667d | |||
| 75df59ddef | |||
| 4e14b4207d | |||
| 4cf4a1e5e8 | |||
| 076ec71482 | |||
| fe58b39abf |
@@ -240,6 +240,10 @@ yarn-error.log*
|
|||||||
!**/secret_detection_benchmark_GROUND_TRUTH.json
|
!**/secret_detection_benchmark_GROUND_TRUTH.json
|
||||||
!**/secret_detection/results/
|
!**/secret_detection/results/
|
||||||
|
|
||||||
|
# Exception: Allow workers/secrets/ directory (secrets detection worker)
|
||||||
|
!workers/secrets/
|
||||||
|
!workers/secrets/**
|
||||||
|
|
||||||
secret*
|
secret*
|
||||||
secrets/
|
secrets/
|
||||||
credentials*
|
credentials*
|
||||||
|
|||||||
@@ -26,27 +26,50 @@ class RemoteAgentConnection:
|
|||||||
"""Initialize connection to a remote agent"""
|
"""Initialize connection to a remote agent"""
|
||||||
self.url = url.rstrip('/')
|
self.url = url.rstrip('/')
|
||||||
self.agent_card = None
|
self.agent_card = None
|
||||||
self.client = httpx.AsyncClient(timeout=120.0)
|
self.client = httpx.AsyncClient(timeout=120.0, follow_redirects=True)
|
||||||
self.context_id = None
|
self.context_id = None
|
||||||
|
|
||||||
async def get_agent_card(self) -> Optional[Dict[str, Any]]:
|
async def get_agent_card(self) -> Optional[Dict[str, Any]]:
|
||||||
"""Get the agent card from the remote agent"""
|
"""Get the agent card from the remote agent"""
|
||||||
try:
|
# If URL already points to a .json file, fetch it directly
|
||||||
# Try new path first (A2A 0.3.0+)
|
if self.url.endswith('.json'):
|
||||||
response = await self.client.get(f"{self.url}/.well-known/agent-card.json")
|
|
||||||
response.raise_for_status()
|
|
||||||
self.agent_card = response.json()
|
|
||||||
return self.agent_card
|
|
||||||
except Exception:
|
|
||||||
# Try old path for compatibility
|
|
||||||
try:
|
try:
|
||||||
response = await self.client.get(f"{self.url}/.well-known/agent.json")
|
response = await self.client.get(self.url)
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
self.agent_card = response.json()
|
self.agent_card = response.json()
|
||||||
|
|
||||||
|
# Use canonical URL from agent card if provided
|
||||||
|
if isinstance(self.agent_card, dict) and "url" in self.agent_card:
|
||||||
|
self.url = self.agent_card["url"].rstrip('/')
|
||||||
|
|
||||||
return self.agent_card
|
return self.agent_card
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f"Failed to get agent card from {self.url}: {e}")
|
print(f"Failed to get agent card from {self.url}: {e}")
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
# Try both agent-card.json (A2A 0.3.0+) and agent.json (legacy)
|
||||||
|
well_known_paths = [
|
||||||
|
"/.well-known/agent-card.json",
|
||||||
|
"/.well-known/agent.json",
|
||||||
|
]
|
||||||
|
|
||||||
|
for path in well_known_paths:
|
||||||
|
try:
|
||||||
|
response = await self.client.get(f"{self.url}{path}")
|
||||||
|
response.raise_for_status()
|
||||||
|
self.agent_card = response.json()
|
||||||
|
|
||||||
|
# Use canonical URL from agent card if provided
|
||||||
|
if isinstance(self.agent_card, dict) and "url" in self.agent_card:
|
||||||
|
self.url = self.agent_card["url"].rstrip('/')
|
||||||
|
|
||||||
|
return self.agent_card
|
||||||
|
except Exception:
|
||||||
|
continue
|
||||||
|
|
||||||
|
print(f"Failed to get agent card from {self.url}")
|
||||||
|
print("Tip: If agent is at /a2a/something, use full URL: /register http://host:port/a2a/something")
|
||||||
|
return None
|
||||||
|
|
||||||
async def send_message(self, message: str | Dict[str, Any] | List[Dict[str, Any]]) -> str:
|
async def send_message(self, message: str | Dict[str, Any] | List[Dict[str, Any]]) -> str:
|
||||||
"""Send a message to the remote agent using A2A protocol"""
|
"""Send a message to the remote agent using A2A protocol"""
|
||||||
@@ -93,7 +116,7 @@ class RemoteAgentConnection:
|
|||||||
payload["params"]["contextId"] = self.context_id
|
payload["params"]["contextId"] = self.context_id
|
||||||
|
|
||||||
# Send to root endpoint per A2A protocol
|
# Send to root endpoint per A2A protocol
|
||||||
response = await self.client.post(f"{self.url}/", json=payload)
|
response = await self.client.post(self.url, json=payload)
|
||||||
response.raise_for_status()
|
response.raise_for_status()
|
||||||
result = response.json()
|
result = response.json()
|
||||||
|
|
||||||
|
|||||||
@@ -55,9 +55,12 @@ async def get_run_status(
|
|||||||
is_failed = workflow_status == "FAILED"
|
is_failed = workflow_status == "FAILED"
|
||||||
is_running = workflow_status == "RUNNING"
|
is_running = workflow_status == "RUNNING"
|
||||||
|
|
||||||
|
# Extract workflow name from run_id (format: workflow_name-unique_id)
|
||||||
|
workflow_name = run_id.rsplit('-', 1)[0] if '-' in run_id else "unknown"
|
||||||
|
|
||||||
return WorkflowStatus(
|
return WorkflowStatus(
|
||||||
run_id=run_id,
|
run_id=run_id,
|
||||||
workflow="unknown", # Temporal doesn't track workflow name in status
|
workflow=workflow_name,
|
||||||
status=workflow_status,
|
status=workflow_status,
|
||||||
is_completed=is_completed,
|
is_completed=is_completed,
|
||||||
is_failed=is_failed,
|
is_failed=is_failed,
|
||||||
@@ -123,6 +126,9 @@ async def get_run_findings(
|
|||||||
else:
|
else:
|
||||||
sarif = {}
|
sarif = {}
|
||||||
|
|
||||||
|
# Extract workflow name from run_id (format: workflow_name-unique_id)
|
||||||
|
workflow_name = run_id.rsplit('-', 1)[0] if '-' in run_id else "unknown"
|
||||||
|
|
||||||
# Metadata
|
# Metadata
|
||||||
metadata = {
|
metadata = {
|
||||||
"completion_time": status.get("close_time"),
|
"completion_time": status.get("close_time"),
|
||||||
@@ -130,7 +136,7 @@ async def get_run_findings(
|
|||||||
}
|
}
|
||||||
|
|
||||||
return WorkflowFindings(
|
return WorkflowFindings(
|
||||||
workflow="unknown",
|
workflow=workflow_name,
|
||||||
run_id=run_id,
|
run_id=run_id,
|
||||||
sarif=sarif,
|
sarif=sarif,
|
||||||
metadata=metadata
|
metadata=metadata
|
||||||
|
|||||||
@@ -566,7 +566,6 @@ async def get_workflow_worker_info(
|
|||||||
return {
|
return {
|
||||||
"workflow": workflow_name,
|
"workflow": workflow_name,
|
||||||
"vertical": vertical,
|
"vertical": vertical,
|
||||||
"worker_container": f"fuzzforge-worker-{vertical}",
|
|
||||||
"worker_service": f"worker-{vertical}",
|
"worker_service": f"worker-{vertical}",
|
||||||
"task_queue": f"{vertical}-queue",
|
"task_queue": f"{vertical}-queue",
|
||||||
"required": True
|
"required": True
|
||||||
|
|||||||
@@ -17,22 +17,22 @@ parameters:
|
|||||||
agent_url:
|
agent_url:
|
||||||
type: string
|
type: string
|
||||||
default: "http://fuzzforge-task-agent:8000/a2a/litellm_agent"
|
default: "http://fuzzforge-task-agent:8000/a2a/litellm_agent"
|
||||||
|
|
||||||
llm_model:
|
llm_model:
|
||||||
type: string
|
type: string
|
||||||
default: "gpt-4o-mini"
|
default: "gpt-5-mini"
|
||||||
|
|
||||||
llm_provider:
|
llm_provider:
|
||||||
type: string
|
type: string
|
||||||
default: "openai"
|
default: "openai"
|
||||||
|
|
||||||
max_files:
|
max_files:
|
||||||
type: integer
|
type: integer
|
||||||
default: 20
|
default: 20
|
||||||
|
|
||||||
default_parameters:
|
default_parameters:
|
||||||
agent_url: "http://fuzzforge-task-agent:8000/a2a/litellm_agent"
|
agent_url: "http://fuzzforge-task-agent:8000/a2a/litellm_agent"
|
||||||
llm_model: "gpt-4o-mini"
|
llm_model: "gpt-5-mini"
|
||||||
llm_provider: "openai"
|
llm_provider: "openai"
|
||||||
max_files: 20
|
max_files: 20
|
||||||
|
|
||||||
|
|||||||
+6
-6
@@ -78,7 +78,7 @@ fuzzforge workflows list
|
|||||||
fuzzforge workflows info security_assessment
|
fuzzforge workflows info security_assessment
|
||||||
|
|
||||||
# Submit a workflow for analysis
|
# Submit a workflow for analysis
|
||||||
fuzzforge workflow security_assessment /path/to/your/code
|
fuzzforge workflow run security_assessment /path/to/your/code
|
||||||
|
|
||||||
|
|
||||||
# View findings when complete
|
# View findings when complete
|
||||||
@@ -150,24 +150,24 @@ fuzzforge workflows parameters security_assessment --no-interactive
|
|||||||
|
|
||||||
### Workflow Execution
|
### Workflow Execution
|
||||||
|
|
||||||
#### `fuzzforge workflow <workflow> <target-path>`
|
#### `fuzzforge workflow run <workflow> <target-path>`
|
||||||
Execute a security testing workflow with **automatic file upload**.
|
Execute a security testing workflow with **automatic file upload**.
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# Basic execution - CLI automatically detects local files and uploads them
|
# Basic execution - CLI automatically detects local files and uploads them
|
||||||
fuzzforge workflow security_assessment /path/to/code
|
fuzzforge workflow run security_assessment /path/to/code
|
||||||
|
|
||||||
# With parameters
|
# With parameters
|
||||||
fuzzforge workflow security_assessment /path/to/binary \
|
fuzzforge workflow run security_assessment /path/to/binary \
|
||||||
--param timeout=3600 \
|
--param timeout=3600 \
|
||||||
--param iterations=10000
|
--param iterations=10000
|
||||||
|
|
||||||
# With parameter file
|
# With parameter file
|
||||||
fuzzforge workflow security_assessment /path/to/code \
|
fuzzforge workflow run security_assessment /path/to/code \
|
||||||
--param-file my-params.json
|
--param-file my-params.json
|
||||||
|
|
||||||
# Wait for completion
|
# Wait for completion
|
||||||
fuzzforge workflow security_assessment /path/to/code --wait
|
fuzzforge workflow run security_assessment /path/to/code --wait
|
||||||
```
|
```
|
||||||
|
|
||||||
**Automatic File Upload Behavior:**
|
**Automatic File Upload Behavior:**
|
||||||
|
|||||||
@@ -140,11 +140,145 @@ def get_findings(
|
|||||||
else: # table format
|
else: # table format
|
||||||
display_findings_table(findings.sarif)
|
display_findings_table(findings.sarif)
|
||||||
|
|
||||||
|
# Suggest export command and show command
|
||||||
|
console.print(f"\n💡 View full details of a finding: [bold cyan]ff finding show {run_id} --rule <rule-id>[/bold cyan]")
|
||||||
|
console.print(f"💡 Export these findings: [bold cyan]ff findings export {run_id} --format sarif[/bold cyan]")
|
||||||
|
console.print(" Supported formats: [cyan]sarif[/cyan] (standard), [cyan]json[/cyan], [cyan]csv[/cyan], [cyan]html[/cyan]")
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
console.print(f"❌ Failed to get findings: {e}", style="red")
|
console.print(f"❌ Failed to get findings: {e}", style="red")
|
||||||
raise typer.Exit(1)
|
raise typer.Exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
def show_finding(
|
||||||
|
run_id: str = typer.Argument(..., help="Run ID to get finding from"),
|
||||||
|
rule_id: str = typer.Option(..., "--rule", "-r", help="Rule ID of the specific finding to show")
|
||||||
|
):
|
||||||
|
"""
|
||||||
|
🔍 Show detailed information about a specific finding
|
||||||
|
|
||||||
|
This function is registered as a command in main.py under the finding (singular) command group.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
require_project()
|
||||||
|
validate_run_id(run_id)
|
||||||
|
|
||||||
|
# Try to get from database first, fallback to API
|
||||||
|
db = get_project_db()
|
||||||
|
findings_data = None
|
||||||
|
if db:
|
||||||
|
findings_data = db.get_findings(run_id)
|
||||||
|
|
||||||
|
if not findings_data:
|
||||||
|
with get_client() as client:
|
||||||
|
console.print(f"🔍 Fetching findings for run: {run_id}")
|
||||||
|
findings = client.get_run_findings(run_id)
|
||||||
|
sarif_data = findings.sarif
|
||||||
|
else:
|
||||||
|
sarif_data = findings_data.sarif_data
|
||||||
|
|
||||||
|
# Find the specific finding by rule_id
|
||||||
|
runs = sarif_data.get("runs", [])
|
||||||
|
if not runs:
|
||||||
|
console.print("❌ No findings data available", style="red")
|
||||||
|
raise typer.Exit(1)
|
||||||
|
|
||||||
|
run_data = runs[0]
|
||||||
|
results = run_data.get("results", [])
|
||||||
|
tool = run_data.get("tool", {}).get("driver", {})
|
||||||
|
|
||||||
|
# Search for matching finding
|
||||||
|
matching_finding = None
|
||||||
|
for result in results:
|
||||||
|
if result.get("ruleId") == rule_id:
|
||||||
|
matching_finding = result
|
||||||
|
break
|
||||||
|
|
||||||
|
if not matching_finding:
|
||||||
|
console.print(f"❌ No finding found with rule ID: {rule_id}", style="red")
|
||||||
|
console.print(f"💡 Use [bold cyan]ff findings get {run_id}[/bold cyan] to see all findings", style="dim")
|
||||||
|
raise typer.Exit(1)
|
||||||
|
|
||||||
|
# Display detailed finding
|
||||||
|
display_finding_detail(matching_finding, tool, run_id)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
console.print(f"❌ Failed to get finding: {e}", style="red")
|
||||||
|
raise typer.Exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
def display_finding_detail(finding: Dict[str, Any], tool: Dict[str, Any], run_id: str):
|
||||||
|
"""Display detailed information about a single finding"""
|
||||||
|
rule_id = finding.get("ruleId", "unknown")
|
||||||
|
level = finding.get("level", "note")
|
||||||
|
message = finding.get("message", {})
|
||||||
|
message_text = message.get("text", "No summary available")
|
||||||
|
message_markdown = message.get("markdown", message_text)
|
||||||
|
|
||||||
|
# Get location
|
||||||
|
locations = finding.get("locations", [])
|
||||||
|
location_str = "Unknown location"
|
||||||
|
code_snippet = None
|
||||||
|
|
||||||
|
if locations:
|
||||||
|
physical_location = locations[0].get("physicalLocation", {})
|
||||||
|
artifact_location = physical_location.get("artifactLocation", {})
|
||||||
|
region = physical_location.get("region", {})
|
||||||
|
|
||||||
|
file_path = artifact_location.get("uri", "")
|
||||||
|
if file_path:
|
||||||
|
location_str = file_path
|
||||||
|
if region.get("startLine"):
|
||||||
|
location_str += f":{region['startLine']}"
|
||||||
|
if region.get("startColumn"):
|
||||||
|
location_str += f":{region['startColumn']}"
|
||||||
|
|
||||||
|
# Get code snippet if available
|
||||||
|
if region.get("snippet", {}).get("text"):
|
||||||
|
code_snippet = region["snippet"]["text"].strip()
|
||||||
|
|
||||||
|
# Get severity style
|
||||||
|
severity_color = {
|
||||||
|
"error": "red",
|
||||||
|
"warning": "yellow",
|
||||||
|
"note": "blue",
|
||||||
|
"info": "cyan"
|
||||||
|
}.get(level.lower(), "white")
|
||||||
|
|
||||||
|
# Build detailed content
|
||||||
|
content_lines = []
|
||||||
|
content_lines.append(f"[bold]Rule ID:[/bold] {rule_id}")
|
||||||
|
content_lines.append(f"[bold]Severity:[/bold] [{severity_color}]{level.upper()}[/{severity_color}]")
|
||||||
|
content_lines.append(f"[bold]Location:[/bold] {location_str}")
|
||||||
|
content_lines.append(f"[bold]Tool:[/bold] {tool.get('name', 'Unknown')} v{tool.get('version', 'unknown')}")
|
||||||
|
content_lines.append(f"[bold]Run ID:[/bold] {run_id}")
|
||||||
|
content_lines.append("")
|
||||||
|
content_lines.append(f"[bold]Summary:[/bold]")
|
||||||
|
content_lines.append(message_text)
|
||||||
|
content_lines.append("")
|
||||||
|
content_lines.append(f"[bold]Description:[/bold]")
|
||||||
|
content_lines.append(message_markdown)
|
||||||
|
|
||||||
|
if code_snippet:
|
||||||
|
content_lines.append("")
|
||||||
|
content_lines.append(f"[bold]Code Snippet:[/bold]")
|
||||||
|
content_lines.append(f"[dim]{code_snippet}[/dim]")
|
||||||
|
|
||||||
|
content = "\n".join(content_lines)
|
||||||
|
|
||||||
|
# Display in panel
|
||||||
|
console.print()
|
||||||
|
console.print(Panel(
|
||||||
|
content,
|
||||||
|
title=f"🔍 Finding Detail",
|
||||||
|
border_style=severity_color,
|
||||||
|
box=box.ROUNDED,
|
||||||
|
padding=(1, 2)
|
||||||
|
))
|
||||||
|
console.print()
|
||||||
|
console.print(f"💡 Export this run: [bold cyan]ff findings export {run_id} --format sarif[/bold cyan]")
|
||||||
|
|
||||||
|
|
||||||
def display_findings_table(sarif_data: Dict[str, Any]):
|
def display_findings_table(sarif_data: Dict[str, Any]):
|
||||||
"""Display SARIF findings in a rich table format"""
|
"""Display SARIF findings in a rich table format"""
|
||||||
runs = sarif_data.get("runs", [])
|
runs = sarif_data.get("runs", [])
|
||||||
@@ -195,8 +329,8 @@ def display_findings_table(sarif_data: Dict[str, Any]):
|
|||||||
# Detailed results - Rich Text-based table with proper emoji alignment
|
# Detailed results - Rich Text-based table with proper emoji alignment
|
||||||
results_table = Table(box=box.ROUNDED)
|
results_table = Table(box=box.ROUNDED)
|
||||||
results_table.add_column("Severity", width=12, justify="left", no_wrap=True)
|
results_table.add_column("Severity", width=12, justify="left", no_wrap=True)
|
||||||
results_table.add_column("Rule", width=25, justify="left", style="bold cyan", no_wrap=True)
|
results_table.add_column("Rule", justify="left", style="bold cyan", no_wrap=True)
|
||||||
results_table.add_column("Message", width=55, justify="left", no_wrap=True)
|
results_table.add_column("Message", width=45, justify="left", no_wrap=True)
|
||||||
results_table.add_column("Location", width=20, justify="left", style="dim", no_wrap=True)
|
results_table.add_column("Location", width=20, justify="left", style="dim", no_wrap=True)
|
||||||
|
|
||||||
for result in results[:50]: # Limit to first 50 results
|
for result in results[:50]: # Limit to first 50 results
|
||||||
@@ -224,18 +358,16 @@ def display_findings_table(sarif_data: Dict[str, Any]):
|
|||||||
severity_text = Text(level.upper(), style=severity_style(level))
|
severity_text = Text(level.upper(), style=severity_style(level))
|
||||||
severity_text.truncate(12, overflow="ellipsis")
|
severity_text.truncate(12, overflow="ellipsis")
|
||||||
|
|
||||||
rule_text = Text(rule_id)
|
# Show full rule ID without truncation
|
||||||
rule_text.truncate(25, overflow="ellipsis")
|
|
||||||
|
|
||||||
message_text = Text(message)
|
message_text = Text(message)
|
||||||
message_text.truncate(55, overflow="ellipsis")
|
message_text.truncate(45, overflow="ellipsis")
|
||||||
|
|
||||||
location_text = Text(location_str)
|
location_text = Text(location_str)
|
||||||
location_text.truncate(20, overflow="ellipsis")
|
location_text.truncate(20, overflow="ellipsis")
|
||||||
|
|
||||||
results_table.add_row(
|
results_table.add_row(
|
||||||
severity_text,
|
severity_text,
|
||||||
rule_text,
|
rule_id, # Pass string directly to show full UUID
|
||||||
message_text,
|
message_text,
|
||||||
location_text
|
location_text
|
||||||
)
|
)
|
||||||
@@ -307,16 +439,20 @@ def findings_history(
|
|||||||
def export_findings(
|
def export_findings(
|
||||||
run_id: str = typer.Argument(..., help="Run ID to export findings for"),
|
run_id: str = typer.Argument(..., help="Run ID to export findings for"),
|
||||||
format: str = typer.Option(
|
format: str = typer.Option(
|
||||||
"json", "--format", "-f",
|
"sarif", "--format", "-f",
|
||||||
help="Export format: json, csv, html, sarif"
|
help="Export format: sarif (standard), json, csv, html"
|
||||||
),
|
),
|
||||||
output: Optional[str] = typer.Option(
|
output: Optional[str] = typer.Option(
|
||||||
None, "--output", "-o",
|
None, "--output", "-o",
|
||||||
help="Output file path (defaults to findings-<run-id>.<format>)"
|
help="Output file path (defaults to findings-<run-id>-<timestamp>.<format>)"
|
||||||
)
|
)
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
📤 Export security findings in various formats
|
📤 Export security findings in various formats
|
||||||
|
|
||||||
|
SARIF is the standard format for security findings and is recommended
|
||||||
|
for interoperability with other security tools. Filenames are automatically
|
||||||
|
made unique with timestamps to prevent overwriting previous exports.
|
||||||
"""
|
"""
|
||||||
db = get_project_db()
|
db = get_project_db()
|
||||||
if not db:
|
if not db:
|
||||||
@@ -334,9 +470,10 @@ def export_findings(
|
|||||||
else:
|
else:
|
||||||
sarif_data = findings_data.sarif_data
|
sarif_data = findings_data.sarif_data
|
||||||
|
|
||||||
# Generate output filename
|
# Generate output filename with timestamp for uniqueness
|
||||||
if not output:
|
if not output:
|
||||||
output = f"findings-{run_id[:8]}.{format}"
|
timestamp = datetime.now().strftime("%Y%m%d-%H%M%S")
|
||||||
|
output = f"findings-{run_id[:8]}-{timestamp}.{format}"
|
||||||
|
|
||||||
output_path = Path(output)
|
output_path = Path(output)
|
||||||
|
|
||||||
|
|||||||
@@ -141,7 +141,7 @@ FuzzForge security testing project.
|
|||||||
fuzzforge workflows
|
fuzzforge workflows
|
||||||
|
|
||||||
# Submit a workflow for analysis
|
# Submit a workflow for analysis
|
||||||
fuzzforge workflow <workflow-name> /path/to/target
|
fuzzforge workflow run <workflow-name> /path/to/target
|
||||||
|
|
||||||
# View findings
|
# View findings
|
||||||
fuzzforge finding <run-id>
|
fuzzforge finding <run-id>
|
||||||
|
|||||||
@@ -59,66 +59,6 @@ def format_number(num: int) -> str:
|
|||||||
return str(num)
|
return str(num)
|
||||||
|
|
||||||
|
|
||||||
@app.command("stats")
|
|
||||||
def fuzzing_stats(
|
|
||||||
run_id: str = typer.Argument(..., help="Run ID to get statistics for"),
|
|
||||||
refresh: int = typer.Option(
|
|
||||||
5, "--refresh", "-r",
|
|
||||||
help="Refresh interval in seconds"
|
|
||||||
),
|
|
||||||
once: bool = typer.Option(
|
|
||||||
False, "--once",
|
|
||||||
help="Show stats once and exit"
|
|
||||||
)
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
📊 Show current fuzzing statistics for a run
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
with get_client() as client:
|
|
||||||
if once:
|
|
||||||
# Show stats once
|
|
||||||
stats = client.get_fuzzing_stats(run_id)
|
|
||||||
display_stats_table(stats)
|
|
||||||
else:
|
|
||||||
# Live updating stats
|
|
||||||
console.print(f"📊 [bold]Live Fuzzing Statistics[/bold] (Run: {run_id[:12]}...)")
|
|
||||||
console.print(f"Refreshing every {refresh}s. Press Ctrl+C to stop.\n")
|
|
||||||
|
|
||||||
with Live(auto_refresh=False, console=console) as live:
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
# Check workflow status
|
|
||||||
run_status = client.get_run_status(run_id)
|
|
||||||
stats = client.get_fuzzing_stats(run_id)
|
|
||||||
table = create_stats_table(stats)
|
|
||||||
live.update(table, refresh=True)
|
|
||||||
|
|
||||||
# Exit if workflow completed or failed
|
|
||||||
if getattr(run_status, 'is_completed', False) or getattr(run_status, 'is_failed', False):
|
|
||||||
final_status = getattr(run_status, 'status', 'Unknown')
|
|
||||||
if getattr(run_status, 'is_completed', False):
|
|
||||||
console.print("\n✅ [bold green]Workflow completed[/bold green]", style="green")
|
|
||||||
else:
|
|
||||||
console.print(f"\n⚠️ [bold yellow]Workflow ended[/bold yellow] | Status: {final_status}", style="yellow")
|
|
||||||
break
|
|
||||||
|
|
||||||
time.sleep(refresh)
|
|
||||||
except KeyboardInterrupt:
|
|
||||||
console.print("\n📊 Monitoring stopped", style="yellow")
|
|
||||||
break
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
console.print(f"❌ Failed to get fuzzing stats: {e}", style="red")
|
|
||||||
raise typer.Exit(1)
|
|
||||||
|
|
||||||
|
|
||||||
def display_stats_table(stats):
|
|
||||||
"""Display stats in a simple table"""
|
|
||||||
table = create_stats_table(stats)
|
|
||||||
console.print(table)
|
|
||||||
|
|
||||||
|
|
||||||
def create_stats_table(stats) -> Panel:
|
def create_stats_table(stats) -> Panel:
|
||||||
"""Create a rich table for fuzzing statistics"""
|
"""Create a rich table for fuzzing statistics"""
|
||||||
# Create main stats table
|
# Create main stats table
|
||||||
@@ -266,8 +206,8 @@ def crash_reports(
|
|||||||
raise typer.Exit(1)
|
raise typer.Exit(1)
|
||||||
|
|
||||||
|
|
||||||
def _live_monitor(run_id: str, refresh: int):
|
def _live_monitor(run_id: str, refresh: int, once: bool = False, style: str = "inline"):
|
||||||
"""Helper for live monitoring with inline real-time display"""
|
"""Helper for live monitoring with inline real-time display or table display"""
|
||||||
with get_client() as client:
|
with get_client() as client:
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
|
|
||||||
@@ -319,16 +259,29 @@ def _live_monitor(run_id: str, refresh: int):
|
|||||||
self.elapsed_time = 0
|
self.elapsed_time = 0
|
||||||
self.last_crash_time = None
|
self.last_crash_time = None
|
||||||
|
|
||||||
with Live(auto_refresh=False, console=console) as live:
|
# Initial fetch
|
||||||
# Initial fetch
|
try:
|
||||||
try:
|
run_status = client.get_run_status(run_id)
|
||||||
run_status = client.get_run_status(run_id)
|
stats = client.get_fuzzing_stats(run_id)
|
||||||
stats = client.get_fuzzing_stats(run_id)
|
except Exception:
|
||||||
except Exception:
|
stats = FallbackStats(run_id)
|
||||||
stats = FallbackStats(run_id)
|
run_status = type("RS", (), {"status":"Unknown","is_completed":False,"is_failed":False})()
|
||||||
run_status = type("RS", (), {"status":"Unknown","is_completed":False,"is_failed":False})()
|
|
||||||
|
|
||||||
live.update(render_inline_stats(run_status, stats), refresh=True)
|
# Handle --once mode: show stats once and exit
|
||||||
|
if once:
|
||||||
|
if style == "table":
|
||||||
|
console.print(create_stats_table(stats))
|
||||||
|
else:
|
||||||
|
console.print(render_inline_stats(run_status, stats))
|
||||||
|
return
|
||||||
|
|
||||||
|
# Live monitoring mode
|
||||||
|
with Live(auto_refresh=False, console=console) as live:
|
||||||
|
# Render based on style
|
||||||
|
if style == "table":
|
||||||
|
live.update(create_stats_table(stats), refresh=True)
|
||||||
|
else:
|
||||||
|
live.update(render_inline_stats(run_status, stats), refresh=True)
|
||||||
|
|
||||||
# Polling loop
|
# Polling loop
|
||||||
consecutive_errors = 0
|
consecutive_errors = 0
|
||||||
@@ -354,8 +307,11 @@ def _live_monitor(run_id: str, refresh: int):
|
|||||||
except Exception:
|
except Exception:
|
||||||
stats = FallbackStats(run_id)
|
stats = FallbackStats(run_id)
|
||||||
|
|
||||||
# Update display
|
# Update display based on style
|
||||||
live.update(render_inline_stats(run_status, stats), refresh=True)
|
if style == "table":
|
||||||
|
live.update(create_stats_table(stats), refresh=True)
|
||||||
|
else:
|
||||||
|
live.update(render_inline_stats(run_status, stats), refresh=True)
|
||||||
|
|
||||||
# Check if completed
|
# Check if completed
|
||||||
if getattr(run_status, 'is_completed', False) or getattr(run_status, 'is_failed', False):
|
if getattr(run_status, 'is_completed', False) or getattr(run_status, 'is_failed', False):
|
||||||
@@ -386,17 +342,36 @@ def live_monitor(
|
|||||||
refresh: int = typer.Option(
|
refresh: int = typer.Option(
|
||||||
2, "--refresh", "-r",
|
2, "--refresh", "-r",
|
||||||
help="Refresh interval in seconds"
|
help="Refresh interval in seconds"
|
||||||
|
),
|
||||||
|
once: bool = typer.Option(
|
||||||
|
False, "--once",
|
||||||
|
help="Show stats once and exit"
|
||||||
|
),
|
||||||
|
style: str = typer.Option(
|
||||||
|
"inline", "--style",
|
||||||
|
help="Display style: 'inline' (default) or 'table'"
|
||||||
)
|
)
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
📺 Real-time inline monitoring with live statistics updates
|
📺 Real-time monitoring with live statistics updates
|
||||||
|
|
||||||
|
Display styles:
|
||||||
|
- inline: Visual inline display with emojis (default)
|
||||||
|
- table: Clean table-based display
|
||||||
|
|
||||||
|
Use --once to show stats once without continuous monitoring (useful for scripts)
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
_live_monitor(run_id, refresh)
|
# Validate style
|
||||||
|
if style not in ["inline", "table"]:
|
||||||
|
console.print(f"❌ Invalid style: {style}. Must be 'inline' or 'table'", style="red")
|
||||||
|
raise typer.Exit(1)
|
||||||
|
|
||||||
|
_live_monitor(run_id, refresh, once, style)
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
console.print("\n\n📊 Monitoring stopped by user.", style="yellow")
|
console.print("\n\n📊 Monitoring stopped by user.", style="yellow")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
console.print(f"\n❌ Failed to start live monitoring: {e}", style="red")
|
console.print(f"\n❌ Failed to start monitoring: {e}", style="red")
|
||||||
raise typer.Exit(1)
|
raise typer.Exit(1)
|
||||||
|
|
||||||
|
|
||||||
@@ -423,6 +398,5 @@ def monitor_callback(ctx: typer.Context):
|
|||||||
console = Console()
|
console = Console()
|
||||||
console.print("📊 [bold cyan]Monitor Command[/bold cyan]")
|
console.print("📊 [bold cyan]Monitor Command[/bold cyan]")
|
||||||
console.print("\nAvailable subcommands:")
|
console.print("\nAvailable subcommands:")
|
||||||
console.print(" • [cyan]ff monitor stats <run-id>[/cyan] - Show execution statistics")
|
console.print(" • [cyan]ff monitor live <run-id>[/cyan] - Monitor with live updates (supports --once, --style)")
|
||||||
console.print(" • [cyan]ff monitor crashes <run-id>[/cyan] - Show crash reports")
|
console.print(" • [cyan]ff monitor crashes <run-id>[/cyan] - Show crash reports")
|
||||||
console.print(" • [cyan]ff monitor live <run-id>[/cyan] - Real-time inline monitoring")
|
|
||||||
|
|||||||
@@ -365,7 +365,7 @@ def execute_workflow(
|
|||||||
should_auto_start = auto_start if auto_start is not None else config.workers.auto_start_workers
|
should_auto_start = auto_start if auto_start is not None else config.workers.auto_start_workers
|
||||||
should_auto_stop = auto_stop if auto_stop is not None else config.workers.auto_stop_workers
|
should_auto_stop = auto_stop if auto_stop is not None else config.workers.auto_stop_workers
|
||||||
|
|
||||||
worker_container = None # Track for cleanup
|
worker_service = None # Track for cleanup
|
||||||
worker_mgr = None
|
worker_mgr = None
|
||||||
wait_completed = False # Track if wait completed successfully
|
wait_completed = False # Track if wait completed successfully
|
||||||
|
|
||||||
@@ -384,7 +384,6 @@ def execute_workflow(
|
|||||||
)
|
)
|
||||||
|
|
||||||
# Ensure worker is running
|
# Ensure worker is running
|
||||||
worker_container = worker_info["worker_container"]
|
|
||||||
worker_service = worker_info.get("worker_service", f"worker-{worker_info['vertical']}")
|
worker_service = worker_info.get("worker_service", f"worker-{worker_info['vertical']}")
|
||||||
if not worker_mgr.ensure_worker_running(worker_info, auto_start=should_auto_start):
|
if not worker_mgr.ensure_worker_running(worker_info, auto_start=should_auto_start):
|
||||||
console.print(
|
console.print(
|
||||||
@@ -434,12 +433,12 @@ def execute_workflow(
|
|||||||
# Don't fail the whole operation if database save fails
|
# Don't fail the whole operation if database save fails
|
||||||
console.print(f"⚠️ Failed to save execution to database: {e}", style="yellow")
|
console.print(f"⚠️ Failed to save execution to database: {e}", style="yellow")
|
||||||
|
|
||||||
console.print(f"\n💡 Monitor progress: [bold cyan]fuzzforge monitor stats {response.run_id}[/bold cyan]")
|
console.print(f"\n💡 Monitor progress: [bold cyan]fuzzforge monitor live {response.run_id}[/bold cyan]")
|
||||||
console.print(f"💡 Check status: [bold cyan]fuzzforge workflow status {response.run_id}[/bold cyan]")
|
console.print(f"💡 Check status: [bold cyan]fuzzforge workflow status {response.run_id}[/bold cyan]")
|
||||||
|
|
||||||
# Suggest --live for fuzzing workflows
|
# Suggest --live for fuzzing workflows
|
||||||
if not live and not wait and "fuzzing" in workflow.lower():
|
if not live and not wait and "fuzzing" in workflow.lower():
|
||||||
console.print(f"💡 Next time try: [bold cyan]fuzzforge workflow {workflow} {target_path} --live[/bold cyan] for real-time monitoring", style="dim")
|
console.print(f"💡 Next time try: [bold cyan]fuzzforge workflow run {workflow} {target_path} --live[/bold cyan] for real-time monitoring", style="dim")
|
||||||
|
|
||||||
# Start live monitoring if requested
|
# Start live monitoring if requested
|
||||||
if live:
|
if live:
|
||||||
@@ -461,7 +460,7 @@ def execute_workflow(
|
|||||||
console.print("\n⏹️ Live monitoring stopped (execution continues in background)", style="yellow")
|
console.print("\n⏹️ Live monitoring stopped (execution continues in background)", style="yellow")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
console.print(f"⚠️ Failed to start live monitoring: {e}", style="yellow")
|
console.print(f"⚠️ Failed to start live monitoring: {e}", style="yellow")
|
||||||
console.print(f"💡 You can still monitor manually: [bold cyan]fuzzforge monitor {response.run_id}[/bold cyan]")
|
console.print(f"💡 You can still monitor manually: [bold cyan]fuzzforge monitor live {response.run_id}[/bold cyan]")
|
||||||
|
|
||||||
# Wait for completion if requested
|
# Wait for completion if requested
|
||||||
elif wait:
|
elif wait:
|
||||||
@@ -527,11 +526,11 @@ def execute_workflow(
|
|||||||
handle_error(e, "executing workflow")
|
handle_error(e, "executing workflow")
|
||||||
finally:
|
finally:
|
||||||
# Stop worker if auto-stop is enabled and wait completed
|
# Stop worker if auto-stop is enabled and wait completed
|
||||||
if should_auto_stop and worker_container and worker_mgr and wait_completed:
|
if should_auto_stop and worker_service and worker_mgr and wait_completed:
|
||||||
try:
|
try:
|
||||||
console.print("\n🛑 Stopping worker (auto-stop enabled)...")
|
console.print("\n🛑 Stopping worker (auto-stop enabled)...")
|
||||||
if worker_mgr.stop_worker(worker_container):
|
if worker_mgr.stop_worker(worker_service):
|
||||||
console.print(f"✅ Worker stopped: {worker_container}")
|
console.print(f"✅ Worker stopped: {worker_service}")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
console.print(
|
console.print(
|
||||||
f"⚠️ Failed to stop worker: {e}",
|
f"⚠️ Failed to stop worker: {e}",
|
||||||
@@ -608,7 +607,7 @@ def workflow_status(
|
|||||||
|
|
||||||
# Show next steps
|
# Show next steps
|
||||||
if status.is_running:
|
if status.is_running:
|
||||||
console.print(f"\n💡 Monitor live: [bold cyan]fuzzforge monitor {execution_id}[/bold cyan]")
|
console.print(f"\n💡 Monitor live: [bold cyan]fuzzforge monitor live {execution_id}[/bold cyan]")
|
||||||
elif status.is_completed:
|
elif status.is_completed:
|
||||||
console.print(f"💡 View findings: [bold cyan]fuzzforge finding {execution_id}[/bold cyan]")
|
console.print(f"💡 View findings: [bold cyan]fuzzforge finding {execution_id}[/bold cyan]")
|
||||||
elif status.is_failed:
|
elif status.is_failed:
|
||||||
@@ -770,7 +769,7 @@ def retry_workflow(
|
|||||||
except Exception as e:
|
except Exception as e:
|
||||||
console.print(f"⚠️ Failed to save execution to database: {e}", style="yellow")
|
console.print(f"⚠️ Failed to save execution to database: {e}", style="yellow")
|
||||||
|
|
||||||
console.print(f"\n💡 Monitor progress: [bold cyan]fuzzforge monitor stats {response.run_id}[/bold cyan]")
|
console.print(f"\n💡 Monitor progress: [bold cyan]fuzzforge monitor live {response.run_id}[/bold cyan]")
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
handle_error(e, "retrying workflow")
|
handle_error(e, "retrying workflow")
|
||||||
|
|||||||
@@ -251,7 +251,7 @@ def workflow_main():
|
|||||||
Execute workflows and manage workflow executions
|
Execute workflows and manage workflow executions
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
fuzzforge workflow security_assessment ./target # Execute workflow
|
fuzzforge workflow run security_assessment ./target # Execute workflow
|
||||||
fuzzforge workflow status # Check latest status
|
fuzzforge workflow status # Check latest status
|
||||||
fuzzforge workflow history # Show execution history
|
fuzzforge workflow history # Show execution history
|
||||||
"""
|
"""
|
||||||
@@ -260,57 +260,17 @@ def workflow_main():
|
|||||||
|
|
||||||
# === Finding commands (singular) ===
|
# === Finding commands (singular) ===
|
||||||
|
|
||||||
@finding_app.command("export")
|
@finding_app.command("show")
|
||||||
def export_finding(
|
def show_finding_detail(
|
||||||
execution_id: Optional[str] = typer.Argument(None, help="Execution ID (defaults to latest)"),
|
run_id: str = typer.Argument(..., help="Run ID to get finding from"),
|
||||||
format: str = typer.Option(
|
rule_id: str = typer.Option(..., "--rule", "-r", help="Rule ID of the specific finding to show")
|
||||||
"sarif", "--format", "-f",
|
|
||||||
help="Export format: sarif, json, csv"
|
|
||||||
),
|
|
||||||
output: Optional[str] = typer.Option(
|
|
||||||
None, "--output", "-o",
|
|
||||||
help="Output file (defaults to stdout)"
|
|
||||||
)
|
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
📤 Export findings to file
|
🔍 Show detailed information about a specific finding
|
||||||
"""
|
"""
|
||||||
from .commands.findings import export_findings
|
from .commands.findings import show_finding
|
||||||
from .database import get_project_db
|
show_finding(run_id=run_id, rule_id=rule_id)
|
||||||
from .exceptions import require_project
|
|
||||||
|
|
||||||
try:
|
|
||||||
require_project()
|
|
||||||
|
|
||||||
# If no ID provided, get the latest
|
|
||||||
if not execution_id:
|
|
||||||
db = get_project_db()
|
|
||||||
if db:
|
|
||||||
recent_runs = db.list_runs(limit=1)
|
|
||||||
if recent_runs:
|
|
||||||
execution_id = recent_runs[0].run_id
|
|
||||||
console.print(f"🔍 Using most recent execution: {execution_id}")
|
|
||||||
else:
|
|
||||||
console.print("⚠️ No findings found in project database", style="yellow")
|
|
||||||
return
|
|
||||||
else:
|
|
||||||
console.print("❌ No project database found", style="red")
|
|
||||||
return
|
|
||||||
|
|
||||||
export_findings(run_id=execution_id, format=format, output=output)
|
|
||||||
except Exception as e:
|
|
||||||
console.print(f"❌ Failed to export findings: {e}", style="red")
|
|
||||||
|
|
||||||
|
|
||||||
@finding_app.command("analyze")
|
|
||||||
def analyze_finding(
|
|
||||||
finding_id: Optional[str] = typer.Argument(None, help="Finding ID to analyze")
|
|
||||||
):
|
|
||||||
"""
|
|
||||||
🤖 AI analysis of a finding
|
|
||||||
"""
|
|
||||||
from .commands.ai import analyze_finding as ai_analyze
|
|
||||||
ai_analyze(finding_id)
|
|
||||||
|
|
||||||
@finding_app.callback(invoke_without_command=True)
|
@finding_app.callback(invoke_without_command=True)
|
||||||
def finding_main(
|
def finding_main(
|
||||||
@@ -320,9 +280,9 @@ def finding_main(
|
|||||||
View and analyze individual findings
|
View and analyze individual findings
|
||||||
|
|
||||||
Examples:
|
Examples:
|
||||||
fuzzforge finding # Show latest finding
|
fuzzforge finding # Show latest finding
|
||||||
fuzzforge finding <id> # Show specific finding
|
fuzzforge finding <id> # Show specific finding
|
||||||
fuzzforge finding export # Export latest findings
|
fuzzforge finding show <run-id> --rule <id> # Show specific finding detail
|
||||||
"""
|
"""
|
||||||
# Check if a subcommand is being invoked
|
# Check if a subcommand is being invoked
|
||||||
if ctx.invoked_subcommand is not None:
|
if ctx.invoked_subcommand is not None:
|
||||||
@@ -418,7 +378,7 @@ def main():
|
|||||||
|
|
||||||
# Handle finding command with pattern recognition
|
# Handle finding command with pattern recognition
|
||||||
if len(args) >= 2 and args[0] == 'finding':
|
if len(args) >= 2 and args[0] == 'finding':
|
||||||
finding_subcommands = ['export', 'analyze']
|
finding_subcommands = ['show']
|
||||||
# Skip custom dispatching if help flags are present
|
# Skip custom dispatching if help flags are present
|
||||||
if not any(arg in ['--help', '-h', '--version', '-v'] for arg in args):
|
if not any(arg in ['--help', '-h', '--version', '-v'] for arg in args):
|
||||||
if args[1] not in finding_subcommands:
|
if args[1] not in finding_subcommands:
|
||||||
|
|||||||
@@ -95,17 +95,30 @@ class WorkerManager:
|
|||||||
check=True
|
check=True
|
||||||
)
|
)
|
||||||
|
|
||||||
def is_worker_running(self, container_name: str) -> bool:
|
def _service_to_container_name(self, service_name: str) -> str:
|
||||||
"""
|
"""
|
||||||
Check if a worker container is running.
|
Convert service name to container name based on docker-compose naming convention.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
container_name: Name of the Docker container (e.g., "fuzzforge-worker-ossfuzz")
|
service_name: Docker Compose service name (e.g., "worker-python")
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Container name (e.g., "fuzzforge-worker-python")
|
||||||
|
"""
|
||||||
|
return f"fuzzforge-{service_name}"
|
||||||
|
|
||||||
|
def is_worker_running(self, service_name: str) -> bool:
|
||||||
|
"""
|
||||||
|
Check if a worker service is running.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
service_name: Name of the Docker Compose service (e.g., "worker-ossfuzz")
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
True if container is running, False otherwise
|
True if container is running, False otherwise
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
|
container_name = self._service_to_container_name(service_name)
|
||||||
result = subprocess.run(
|
result = subprocess.run(
|
||||||
["docker", "inspect", "-f", "{{.State.Running}}", container_name],
|
["docker", "inspect", "-f", "{{.State.Running}}", container_name],
|
||||||
capture_output=True,
|
capture_output=True,
|
||||||
@@ -120,46 +133,42 @@ class WorkerManager:
|
|||||||
logger.debug(f"Failed to check worker status: {e}")
|
logger.debug(f"Failed to check worker status: {e}")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def start_worker(self, container_name: str) -> bool:
|
def start_worker(self, service_name: str) -> bool:
|
||||||
"""
|
"""
|
||||||
Start a worker container using docker.
|
Start a worker service using docker-compose.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
container_name: Name of the Docker container to start
|
service_name: Name of the Docker Compose service to start (e.g., "worker-python")
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
True if started successfully, False otherwise
|
True if started successfully, False otherwise
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
console.print(f"🚀 Starting worker: {container_name}")
|
console.print(f"🚀 Starting worker: {service_name}")
|
||||||
|
|
||||||
# Use docker start directly (works with container name)
|
# Use docker-compose up to create and start the service
|
||||||
subprocess.run(
|
result = self._run_docker_compose("up", "-d", service_name)
|
||||||
["docker", "start", container_name],
|
|
||||||
capture_output=True,
|
|
||||||
text=True,
|
|
||||||
check=True
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.info(f"Worker {container_name} started")
|
logger.info(f"Worker {service_name} started")
|
||||||
return True
|
return True
|
||||||
|
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
logger.error(f"Failed to start worker {container_name}: {e.stderr}")
|
logger.error(f"Failed to start worker {service_name}: {e.stderr}")
|
||||||
console.print(f"❌ Failed to start worker: {e.stderr}", style="red")
|
console.print(f"❌ Failed to start worker: {e.stderr}", style="red")
|
||||||
|
console.print(f"💡 Start the worker manually: docker compose up -d {service_name}", style="yellow")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Unexpected error starting worker {container_name}: {e}")
|
logger.error(f"Unexpected error starting worker {service_name}: {e}")
|
||||||
console.print(f"❌ Unexpected error: {e}", style="red")
|
console.print(f"❌ Unexpected error: {e}", style="red")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def wait_for_worker_ready(self, container_name: str, timeout: Optional[int] = None) -> bool:
|
def wait_for_worker_ready(self, service_name: str, timeout: Optional[int] = None) -> bool:
|
||||||
"""
|
"""
|
||||||
Wait for a worker to be healthy and ready to process tasks.
|
Wait for a worker to be healthy and ready to process tasks.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
container_name: Name of the Docker container
|
service_name: Name of the Docker Compose service
|
||||||
timeout: Maximum seconds to wait (uses instance default if not specified)
|
timeout: Maximum seconds to wait (uses instance default if not specified)
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
@@ -170,13 +179,14 @@ class WorkerManager:
|
|||||||
"""
|
"""
|
||||||
timeout = timeout or self.startup_timeout
|
timeout = timeout or self.startup_timeout
|
||||||
start_time = time.time()
|
start_time = time.time()
|
||||||
|
container_name = self._service_to_container_name(service_name)
|
||||||
|
|
||||||
console.print("⏳ Waiting for worker to be ready...")
|
console.print("⏳ Waiting for worker to be ready...")
|
||||||
|
|
||||||
while time.time() - start_time < timeout:
|
while time.time() - start_time < timeout:
|
||||||
# Check if container is running
|
# Check if container is running
|
||||||
if not self.is_worker_running(container_name):
|
if not self.is_worker_running(service_name):
|
||||||
logger.debug(f"Worker {container_name} not running yet")
|
logger.debug(f"Worker {service_name} not running yet")
|
||||||
time.sleep(self.health_check_interval)
|
time.sleep(self.health_check_interval)
|
||||||
continue
|
continue
|
||||||
|
|
||||||
@@ -193,16 +203,16 @@ class WorkerManager:
|
|||||||
|
|
||||||
# If no health check is defined, assume healthy after running
|
# If no health check is defined, assume healthy after running
|
||||||
if health_status == "<no value>" or health_status == "":
|
if health_status == "<no value>" or health_status == "":
|
||||||
logger.info(f"Worker {container_name} is running (no health check)")
|
logger.info(f"Worker {service_name} is running (no health check)")
|
||||||
console.print(f"✅ Worker ready: {container_name}")
|
console.print(f"✅ Worker ready: {service_name}")
|
||||||
return True
|
return True
|
||||||
|
|
||||||
if health_status == "healthy":
|
if health_status == "healthy":
|
||||||
logger.info(f"Worker {container_name} is healthy")
|
logger.info(f"Worker {service_name} is healthy")
|
||||||
console.print(f"✅ Worker ready: {container_name}")
|
console.print(f"✅ Worker ready: {service_name}")
|
||||||
return True
|
return True
|
||||||
|
|
||||||
logger.debug(f"Worker {container_name} health: {health_status}")
|
logger.debug(f"Worker {service_name} health: {health_status}")
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.debug(f"Failed to check health: {e}")
|
logger.debug(f"Failed to check health: {e}")
|
||||||
@@ -210,41 +220,36 @@ class WorkerManager:
|
|||||||
time.sleep(self.health_check_interval)
|
time.sleep(self.health_check_interval)
|
||||||
|
|
||||||
elapsed = time.time() - start_time
|
elapsed = time.time() - start_time
|
||||||
logger.warning(f"Worker {container_name} did not become ready within {elapsed:.1f}s")
|
logger.warning(f"Worker {service_name} did not become ready within {elapsed:.1f}s")
|
||||||
console.print(f"⚠️ Worker startup timeout after {elapsed:.1f}s", style="yellow")
|
console.print(f"⚠️ Worker startup timeout after {elapsed:.1f}s", style="yellow")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def stop_worker(self, container_name: str) -> bool:
|
def stop_worker(self, service_name: str) -> bool:
|
||||||
"""
|
"""
|
||||||
Stop a worker container using docker.
|
Stop a worker service using docker-compose.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
container_name: Name of the Docker container to stop
|
service_name: Name of the Docker Compose service to stop
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
True if stopped successfully, False otherwise
|
True if stopped successfully, False otherwise
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
console.print(f"🛑 Stopping worker: {container_name}")
|
console.print(f"🛑 Stopping worker: {service_name}")
|
||||||
|
|
||||||
# Use docker stop directly (works with container name)
|
# Use docker-compose down to stop and remove the service
|
||||||
subprocess.run(
|
result = self._run_docker_compose("stop", service_name)
|
||||||
["docker", "stop", container_name],
|
|
||||||
capture_output=True,
|
|
||||||
text=True,
|
|
||||||
check=True
|
|
||||||
)
|
|
||||||
|
|
||||||
logger.info(f"Worker {container_name} stopped")
|
logger.info(f"Worker {service_name} stopped")
|
||||||
return True
|
return True
|
||||||
|
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
logger.error(f"Failed to stop worker {container_name}: {e.stderr}")
|
logger.error(f"Failed to stop worker {service_name}: {e.stderr}")
|
||||||
console.print(f"❌ Failed to stop worker: {e.stderr}", style="red")
|
console.print(f"❌ Failed to stop worker: {e.stderr}", style="red")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(f"Unexpected error stopping worker {container_name}: {e}")
|
logger.error(f"Unexpected error stopping worker {service_name}: {e}")
|
||||||
console.print(f"❌ Unexpected error: {e}", style="red")
|
console.print(f"❌ Unexpected error: {e}", style="red")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
@@ -257,17 +262,18 @@ class WorkerManager:
|
|||||||
Ensure a worker is running, starting it if necessary.
|
Ensure a worker is running, starting it if necessary.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
worker_info: Worker information dict from API (contains worker_container, etc.)
|
worker_info: Worker information dict from API (contains worker_service, etc.)
|
||||||
auto_start: Whether to automatically start the worker if not running
|
auto_start: Whether to automatically start the worker if not running
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
True if worker is running, False otherwise
|
True if worker is running, False otherwise
|
||||||
"""
|
"""
|
||||||
container_name = worker_info["worker_container"]
|
# Get worker_service (docker-compose service name)
|
||||||
|
service_name = worker_info.get("worker_service", f"worker-{worker_info['vertical']}")
|
||||||
vertical = worker_info["vertical"]
|
vertical = worker_info["vertical"]
|
||||||
|
|
||||||
# Check if already running
|
# Check if already running
|
||||||
if self.is_worker_running(container_name):
|
if self.is_worker_running(service_name):
|
||||||
console.print(f"✓ Worker already running: {vertical}")
|
console.print(f"✓ Worker already running: {vertical}")
|
||||||
return True
|
return True
|
||||||
|
|
||||||
@@ -279,8 +285,8 @@ class WorkerManager:
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
# Start the worker
|
# Start the worker
|
||||||
if not self.start_worker(container_name):
|
if not self.start_worker(service_name):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# Wait for it to be ready
|
# Wait for it to be ready
|
||||||
return self.wait_for_worker_ready(container_name)
|
return self.wait_for_worker_ready(service_name)
|
||||||
|
|||||||
Generated
+3
-3
@@ -1257,7 +1257,7 @@ wheels = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "fuzzforge-ai"
|
name = "fuzzforge-ai"
|
||||||
version = "0.6.0"
|
version = "0.7.0"
|
||||||
source = { editable = "../ai" }
|
source = { editable = "../ai" }
|
||||||
dependencies = [
|
dependencies = [
|
||||||
{ name = "a2a-sdk" },
|
{ name = "a2a-sdk" },
|
||||||
@@ -1303,7 +1303,7 @@ dev = [
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "fuzzforge-cli"
|
name = "fuzzforge-cli"
|
||||||
version = "0.6.0"
|
version = "0.7.0"
|
||||||
source = { editable = "." }
|
source = { editable = "." }
|
||||||
dependencies = [
|
dependencies = [
|
||||||
{ name = "fuzzforge-ai" },
|
{ name = "fuzzforge-ai" },
|
||||||
@@ -1347,7 +1347,7 @@ provides-extras = ["dev"]
|
|||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "fuzzforge-sdk"
|
name = "fuzzforge-sdk"
|
||||||
version = "0.6.0"
|
version = "0.7.0"
|
||||||
source = { editable = "../sdk" }
|
source = { editable = "../sdk" }
|
||||||
dependencies = [
|
dependencies = [
|
||||||
{ name = "httpx" },
|
{ name = "httpx" },
|
||||||
|
|||||||
@@ -9,8 +9,6 @@
|
|||||||
# Development: docker-compose -f docker-compose.temporal.yaml up
|
# Development: docker-compose -f docker-compose.temporal.yaml up
|
||||||
# Production: docker-compose -f docker-compose.temporal.yaml -f docker-compose.temporal.prod.yaml up
|
# Production: docker-compose -f docker-compose.temporal.yaml -f docker-compose.temporal.prod.yaml up
|
||||||
|
|
||||||
version: '3.8'
|
|
||||||
|
|
||||||
services:
|
services:
|
||||||
# ============================================================================
|
# ============================================================================
|
||||||
# Temporal Server - Workflow Orchestration
|
# Temporal Server - Workflow Orchestration
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ FuzzForge security testing project.
|
|||||||
fuzzforge workflows
|
fuzzforge workflows
|
||||||
|
|
||||||
# Submit a workflow for analysis
|
# Submit a workflow for analysis
|
||||||
fuzzforge workflow <workflow-name> /path/to/target
|
fuzzforge workflow run <workflow-name> /path/to/target
|
||||||
|
|
||||||
# View findings
|
# View findings
|
||||||
fuzzforge finding <run-id>
|
fuzzforge finding <run-id>
|
||||||
|
|||||||
@@ -0,0 +1,61 @@
|
|||||||
|
# FuzzForge Vertical Worker: Secret Detection
|
||||||
|
#
|
||||||
|
# Pre-installed tools for secret detection:
|
||||||
|
# - Gitleaks v8.18.0
|
||||||
|
# - TruffleHog v3.63.2
|
||||||
|
# - Temporal worker
|
||||||
|
|
||||||
|
FROM python:3.11-slim
|
||||||
|
|
||||||
|
# Set working directory
|
||||||
|
WORKDIR /app
|
||||||
|
|
||||||
|
# Install system dependencies
|
||||||
|
RUN apt-get update && apt-get install -y \
|
||||||
|
# Build essentials
|
||||||
|
build-essential \
|
||||||
|
# Development tools
|
||||||
|
git \
|
||||||
|
curl \
|
||||||
|
wget \
|
||||||
|
# Cleanup
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
|
# Install Gitleaks v8.18.0
|
||||||
|
RUN wget -q https://github.com/gitleaks/gitleaks/releases/download/v8.18.0/gitleaks_8.18.0_linux_x64.tar.gz && \
|
||||||
|
tar -xzf gitleaks_8.18.0_linux_x64.tar.gz && \
|
||||||
|
mv gitleaks /usr/local/bin/ && \
|
||||||
|
chmod +x /usr/local/bin/gitleaks && \
|
||||||
|
rm gitleaks_8.18.0_linux_x64.tar.gz
|
||||||
|
|
||||||
|
# Install TruffleHog v3.63.2
|
||||||
|
RUN wget -q https://github.com/trufflesecurity/trufflehog/releases/download/v3.63.2/trufflehog_3.63.2_linux_amd64.tar.gz && \
|
||||||
|
tar -xzf trufflehog_3.63.2_linux_amd64.tar.gz && \
|
||||||
|
mv trufflehog /usr/local/bin/ && \
|
||||||
|
chmod +x /usr/local/bin/trufflehog && \
|
||||||
|
rm trufflehog_3.63.2_linux_amd64.tar.gz
|
||||||
|
|
||||||
|
# Verify installations
|
||||||
|
RUN gitleaks version && trufflehog --version
|
||||||
|
|
||||||
|
# Install Python dependencies for Temporal worker
|
||||||
|
COPY requirements.txt /tmp/requirements.txt
|
||||||
|
RUN pip3 install --no-cache-dir -r /tmp/requirements.txt && \
|
||||||
|
rm /tmp/requirements.txt
|
||||||
|
|
||||||
|
# Create cache directory for downloaded targets
|
||||||
|
RUN mkdir -p /cache && chmod 755 /cache
|
||||||
|
|
||||||
|
# Copy worker entrypoint
|
||||||
|
COPY worker.py /app/worker.py
|
||||||
|
|
||||||
|
# Add toolbox and AI module to Python path (mounted at runtime)
|
||||||
|
ENV PYTHONPATH="/app:/app/toolbox:/app/ai_src:${PYTHONPATH}"
|
||||||
|
ENV PYTHONUNBUFFERED=1
|
||||||
|
|
||||||
|
# Healthcheck
|
||||||
|
HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \
|
||||||
|
CMD python3 -c "import sys; sys.exit(0)"
|
||||||
|
|
||||||
|
# Run worker
|
||||||
|
CMD ["python3", "/app/worker.py"]
|
||||||
@@ -0,0 +1,15 @@
|
|||||||
|
# Temporal worker dependencies
|
||||||
|
temporalio>=1.5.0
|
||||||
|
pydantic>=2.0.0
|
||||||
|
|
||||||
|
# Storage (MinIO/S3)
|
||||||
|
boto3>=1.34.0
|
||||||
|
|
||||||
|
# Configuration
|
||||||
|
pyyaml>=6.0.0
|
||||||
|
|
||||||
|
# HTTP Client (for real-time stats reporting)
|
||||||
|
httpx>=0.27.0
|
||||||
|
|
||||||
|
# A2A Agent Communication (for LLM-based secret detection)
|
||||||
|
a2a-sdk[all]>=0.1.0
|
||||||
@@ -0,0 +1,309 @@
|
|||||||
|
"""
|
||||||
|
FuzzForge Vertical Worker: Secret Detection
|
||||||
|
|
||||||
|
This worker:
|
||||||
|
1. Discovers workflows for the 'secrets' vertical from mounted toolbox
|
||||||
|
2. Dynamically imports and registers workflow classes
|
||||||
|
3. Connects to Temporal and processes tasks
|
||||||
|
4. Handles activities for target download/upload from MinIO
|
||||||
|
"""
|
||||||
|
|
||||||
|
import asyncio
|
||||||
|
import importlib
|
||||||
|
import inspect
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import List, Any
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
from temporalio.client import Client
|
||||||
|
from temporalio.worker import Worker
|
||||||
|
|
||||||
|
# Add toolbox to path for workflow and activity imports
|
||||||
|
sys.path.insert(0, '/app/toolbox')
|
||||||
|
|
||||||
|
# Import common storage activities
|
||||||
|
from toolbox.common.storage_activities import (
|
||||||
|
get_target_activity,
|
||||||
|
cleanup_cache_activity,
|
||||||
|
upload_results_activity
|
||||||
|
)
|
||||||
|
|
||||||
|
# Configure logging
|
||||||
|
logging.basicConfig(
|
||||||
|
level=os.getenv('LOG_LEVEL', 'INFO'),
|
||||||
|
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
||||||
|
)
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
async def discover_workflows(vertical: str) -> List[Any]:
|
||||||
|
"""
|
||||||
|
Discover workflows for this vertical from mounted toolbox.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
vertical: The vertical name (e.g., 'secrets', 'python', 'web')
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of workflow classes decorated with @workflow.defn
|
||||||
|
"""
|
||||||
|
workflows = []
|
||||||
|
toolbox_path = Path("/app/toolbox/workflows")
|
||||||
|
|
||||||
|
if not toolbox_path.exists():
|
||||||
|
logger.warning(f"Toolbox path does not exist: {toolbox_path}")
|
||||||
|
return workflows
|
||||||
|
|
||||||
|
logger.info(f"Scanning for workflows in: {toolbox_path}")
|
||||||
|
|
||||||
|
for workflow_dir in toolbox_path.iterdir():
|
||||||
|
if not workflow_dir.is_dir():
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Skip special directories
|
||||||
|
if workflow_dir.name.startswith('.') or workflow_dir.name == '__pycache__':
|
||||||
|
continue
|
||||||
|
|
||||||
|
metadata_file = workflow_dir / "metadata.yaml"
|
||||||
|
if not metadata_file.exists():
|
||||||
|
logger.debug(f"No metadata.yaml in {workflow_dir.name}, skipping")
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Parse metadata
|
||||||
|
with open(metadata_file) as f:
|
||||||
|
metadata = yaml.safe_load(f)
|
||||||
|
|
||||||
|
# Check if workflow is for this vertical
|
||||||
|
workflow_vertical = metadata.get("vertical")
|
||||||
|
if workflow_vertical != vertical:
|
||||||
|
logger.debug(
|
||||||
|
f"Workflow {workflow_dir.name} is for vertical '{workflow_vertical}', "
|
||||||
|
f"not '{vertical}', skipping"
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Check if workflow.py exists
|
||||||
|
workflow_file = workflow_dir / "workflow.py"
|
||||||
|
if not workflow_file.exists():
|
||||||
|
logger.warning(
|
||||||
|
f"Workflow {workflow_dir.name} has metadata but no workflow.py, skipping"
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Dynamically import workflow module
|
||||||
|
module_name = f"toolbox.workflows.{workflow_dir.name}.workflow"
|
||||||
|
logger.info(f"Importing workflow module: {module_name}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
module = importlib.import_module(module_name)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(
|
||||||
|
f"Failed to import workflow module {module_name}: {e}",
|
||||||
|
exc_info=True
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Find @workflow.defn decorated classes
|
||||||
|
found_workflows = False
|
||||||
|
for name, obj in inspect.getmembers(module, inspect.isclass):
|
||||||
|
# Check if class has Temporal workflow definition
|
||||||
|
if hasattr(obj, '__temporal_workflow_definition'):
|
||||||
|
workflows.append(obj)
|
||||||
|
found_workflows = True
|
||||||
|
logger.info(
|
||||||
|
f"✓ Discovered workflow: {name} from {workflow_dir.name} "
|
||||||
|
f"(vertical: {vertical})"
|
||||||
|
)
|
||||||
|
|
||||||
|
if not found_workflows:
|
||||||
|
logger.warning(
|
||||||
|
f"Workflow {workflow_dir.name} has no @workflow.defn decorated classes"
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(
|
||||||
|
f"Error processing workflow {workflow_dir.name}: {e}",
|
||||||
|
exc_info=True
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
logger.info(f"Discovered {len(workflows)} workflows for vertical '{vertical}'")
|
||||||
|
return workflows
|
||||||
|
|
||||||
|
|
||||||
|
async def discover_activities(workflows_dir: Path) -> List[Any]:
|
||||||
|
"""
|
||||||
|
Discover activities from workflow directories.
|
||||||
|
|
||||||
|
Looks for activities.py files alongside workflow.py in each workflow directory.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
workflows_dir: Path to workflows directory
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
List of activity functions decorated with @activity.defn
|
||||||
|
"""
|
||||||
|
activities = []
|
||||||
|
|
||||||
|
if not workflows_dir.exists():
|
||||||
|
logger.warning(f"Workflows directory does not exist: {workflows_dir}")
|
||||||
|
return activities
|
||||||
|
|
||||||
|
logger.info(f"Scanning for workflow activities in: {workflows_dir}")
|
||||||
|
|
||||||
|
for workflow_dir in workflows_dir.iterdir():
|
||||||
|
if not workflow_dir.is_dir():
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Skip special directories
|
||||||
|
if workflow_dir.name.startswith('.') or workflow_dir.name == '__pycache__':
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Check if activities.py exists
|
||||||
|
activities_file = workflow_dir / "activities.py"
|
||||||
|
if not activities_file.exists():
|
||||||
|
logger.debug(f"No activities.py in {workflow_dir.name}, skipping")
|
||||||
|
continue
|
||||||
|
|
||||||
|
try:
|
||||||
|
# Dynamically import activities module
|
||||||
|
module_name = f"toolbox.workflows.{workflow_dir.name}.activities"
|
||||||
|
logger.info(f"Importing activities module: {module_name}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
module = importlib.import_module(module_name)
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(
|
||||||
|
f"Failed to import activities module {module_name}: {e}",
|
||||||
|
exc_info=True
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Find @activity.defn decorated functions
|
||||||
|
found_activities = False
|
||||||
|
for name, obj in inspect.getmembers(module, inspect.isfunction):
|
||||||
|
# Check if function has Temporal activity definition
|
||||||
|
if hasattr(obj, '__temporal_activity_definition'):
|
||||||
|
activities.append(obj)
|
||||||
|
found_activities = True
|
||||||
|
logger.info(
|
||||||
|
f"✓ Discovered activity: {name} from {workflow_dir.name}"
|
||||||
|
)
|
||||||
|
|
||||||
|
if not found_activities:
|
||||||
|
logger.warning(
|
||||||
|
f"Workflow {workflow_dir.name} has activities.py but no @activity.defn decorated functions"
|
||||||
|
)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(
|
||||||
|
f"Error processing activities from {workflow_dir.name}: {e}",
|
||||||
|
exc_info=True
|
||||||
|
)
|
||||||
|
continue
|
||||||
|
|
||||||
|
logger.info(f"Discovered {len(activities)} workflow-specific activities")
|
||||||
|
return activities
|
||||||
|
|
||||||
|
|
||||||
|
async def main():
|
||||||
|
"""Main worker entry point"""
|
||||||
|
# Get configuration from environment
|
||||||
|
vertical = os.getenv("WORKER_VERTICAL", "secrets")
|
||||||
|
temporal_address = os.getenv("TEMPORAL_ADDRESS", "localhost:7233")
|
||||||
|
temporal_namespace = os.getenv("TEMPORAL_NAMESPACE", "default")
|
||||||
|
task_queue = os.getenv("WORKER_TASK_QUEUE", f"{vertical}-queue")
|
||||||
|
max_concurrent_activities = int(os.getenv("MAX_CONCURRENT_ACTIVITIES", "5"))
|
||||||
|
|
||||||
|
logger.info("=" * 60)
|
||||||
|
logger.info(f"FuzzForge Vertical Worker: {vertical}")
|
||||||
|
logger.info("=" * 60)
|
||||||
|
logger.info(f"Temporal Address: {temporal_address}")
|
||||||
|
logger.info(f"Temporal Namespace: {temporal_namespace}")
|
||||||
|
logger.info(f"Task Queue: {task_queue}")
|
||||||
|
logger.info(f"Max Concurrent Activities: {max_concurrent_activities}")
|
||||||
|
logger.info("=" * 60)
|
||||||
|
|
||||||
|
# Discover workflows for this vertical
|
||||||
|
logger.info(f"Discovering workflows for vertical: {vertical}")
|
||||||
|
workflows = await discover_workflows(vertical)
|
||||||
|
|
||||||
|
if not workflows:
|
||||||
|
logger.error(f"No workflows found for vertical: {vertical}")
|
||||||
|
logger.error("Worker cannot start without workflows. Exiting...")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Discover activities from workflow directories
|
||||||
|
logger.info("Discovering workflow-specific activities...")
|
||||||
|
workflows_dir = Path("/app/toolbox/workflows")
|
||||||
|
workflow_activities = await discover_activities(workflows_dir)
|
||||||
|
|
||||||
|
# Combine common storage activities with workflow-specific activities
|
||||||
|
activities = [
|
||||||
|
get_target_activity,
|
||||||
|
cleanup_cache_activity,
|
||||||
|
upload_results_activity
|
||||||
|
] + workflow_activities
|
||||||
|
|
||||||
|
logger.info(
|
||||||
|
f"Total activities registered: {len(activities)} "
|
||||||
|
f"(3 common + {len(workflow_activities)} workflow-specific)"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Connect to Temporal
|
||||||
|
logger.info(f"Connecting to Temporal at {temporal_address}...")
|
||||||
|
try:
|
||||||
|
client = await Client.connect(
|
||||||
|
temporal_address,
|
||||||
|
namespace=temporal_namespace
|
||||||
|
)
|
||||||
|
logger.info("✓ Connected to Temporal successfully")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to connect to Temporal: {e}", exc_info=True)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Create worker with discovered workflows and activities
|
||||||
|
logger.info(f"Creating worker on task queue: {task_queue}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
worker = Worker(
|
||||||
|
client,
|
||||||
|
task_queue=task_queue,
|
||||||
|
workflows=workflows,
|
||||||
|
activities=activities,
|
||||||
|
max_concurrent_activities=max_concurrent_activities
|
||||||
|
)
|
||||||
|
logger.info("✓ Worker created successfully")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Failed to create worker: {e}", exc_info=True)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Start worker
|
||||||
|
logger.info("=" * 60)
|
||||||
|
logger.info(f"🚀 Worker started for vertical '{vertical}'")
|
||||||
|
logger.info(f"📦 Registered {len(workflows)} workflows")
|
||||||
|
logger.info(f"⚙️ Registered {len(activities)} activities")
|
||||||
|
logger.info(f"📨 Listening on task queue: {task_queue}")
|
||||||
|
logger.info("=" * 60)
|
||||||
|
logger.info("Worker is ready to process tasks...")
|
||||||
|
|
||||||
|
try:
|
||||||
|
await worker.run()
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
logger.info("Shutting down worker (keyboard interrupt)...")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Worker error: {e}", exc_info=True)
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
try:
|
||||||
|
asyncio.run(main())
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
logger.info("Worker stopped")
|
||||||
|
except Exception as e:
|
||||||
|
logger.error(f"Fatal error: {e}", exc_info=True)
|
||||||
|
sys.exit(1)
|
||||||
Reference in New Issue
Block a user