mirror of
https://github.com/FuzzingLabs/fuzzforge_ai.git
synced 2026-02-12 20:32:46 +00:00
Merge pull request #24 from FuzzingLabs/fix/cleanup-and-bugs
fix: resolve live monitoring bug, remove deprecated parameters, and auto-start Python worker
This commit is contained in:
65
.github/workflows/test.yml
vendored
65
.github/workflows/test.yml
vendored
@@ -26,7 +26,7 @@ jobs:
|
||||
with:
|
||||
fetch-depth: 0 # Fetch all history for proper diff
|
||||
|
||||
- name: Check if workers were modified
|
||||
- name: Check which workers were modified
|
||||
id: check-workers
|
||||
run: |
|
||||
if [ "${{ github.event_name }}" == "pull_request" ]; then
|
||||
@@ -34,22 +34,54 @@ jobs:
|
||||
CHANGED_FILES=$(git diff --name-only origin/${{ github.base_ref }}...HEAD)
|
||||
echo "Changed files:"
|
||||
echo "$CHANGED_FILES"
|
||||
|
||||
if echo "$CHANGED_FILES" | grep -q "^workers/\|^docker-compose.yml"; then
|
||||
echo "workers_modified=true" >> $GITHUB_OUTPUT
|
||||
echo "✅ Workers or docker-compose.yml modified - will build"
|
||||
else
|
||||
echo "workers_modified=false" >> $GITHUB_OUTPUT
|
||||
echo "⏭️ No worker changes detected - skipping build"
|
||||
fi
|
||||
else
|
||||
# For direct pushes, check last commit
|
||||
CHANGED_FILES=$(git diff --name-only HEAD~1 HEAD)
|
||||
if echo "$CHANGED_FILES" | grep -q "^workers/\|^docker-compose.yml"; then
|
||||
echo "workers_modified=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "workers_modified=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check if docker-compose.yml changed (build all workers)
|
||||
if echo "$CHANGED_FILES" | grep -q "^docker-compose.yml"; then
|
||||
echo "workers_to_build=worker-python worker-secrets worker-rust worker-android worker-ossfuzz" >> $GITHUB_OUTPUT
|
||||
echo "workers_modified=true" >> $GITHUB_OUTPUT
|
||||
echo "✅ docker-compose.yml modified - building all workers"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Detect which specific workers changed
|
||||
WORKERS_TO_BUILD=""
|
||||
|
||||
if echo "$CHANGED_FILES" | grep -q "^workers/python/"; then
|
||||
WORKERS_TO_BUILD="$WORKERS_TO_BUILD worker-python"
|
||||
echo "✅ Python worker modified"
|
||||
fi
|
||||
|
||||
if echo "$CHANGED_FILES" | grep -q "^workers/secrets/"; then
|
||||
WORKERS_TO_BUILD="$WORKERS_TO_BUILD worker-secrets"
|
||||
echo "✅ Secrets worker modified"
|
||||
fi
|
||||
|
||||
if echo "$CHANGED_FILES" | grep -q "^workers/rust/"; then
|
||||
WORKERS_TO_BUILD="$WORKERS_TO_BUILD worker-rust"
|
||||
echo "✅ Rust worker modified"
|
||||
fi
|
||||
|
||||
if echo "$CHANGED_FILES" | grep -q "^workers/android/"; then
|
||||
WORKERS_TO_BUILD="$WORKERS_TO_BUILD worker-android"
|
||||
echo "✅ Android worker modified"
|
||||
fi
|
||||
|
||||
if echo "$CHANGED_FILES" | grep -q "^workers/ossfuzz/"; then
|
||||
WORKERS_TO_BUILD="$WORKERS_TO_BUILD worker-ossfuzz"
|
||||
echo "✅ OSS-Fuzz worker modified"
|
||||
fi
|
||||
|
||||
if [ -z "$WORKERS_TO_BUILD" ]; then
|
||||
echo "workers_modified=false" >> $GITHUB_OUTPUT
|
||||
echo "⏭️ No worker changes detected - skipping build"
|
||||
else
|
||||
echo "workers_to_build=$WORKERS_TO_BUILD" >> $GITHUB_OUTPUT
|
||||
echo "workers_modified=true" >> $GITHUB_OUTPUT
|
||||
echo "Building workers:$WORKERS_TO_BUILD"
|
||||
fi
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
@@ -59,8 +91,9 @@ jobs:
|
||||
- name: Build worker images
|
||||
if: steps.check-workers.outputs.workers_modified == 'true'
|
||||
run: |
|
||||
echo "Building worker Docker images..."
|
||||
docker compose build worker-python worker-secrets worker-rust worker-android worker-ossfuzz --no-cache
|
||||
WORKERS="${{ steps.check-workers.outputs.workers_to_build }}"
|
||||
echo "Building worker Docker images: $WORKERS"
|
||||
docker compose build $WORKERS --no-cache
|
||||
continue-on-error: false
|
||||
|
||||
lint:
|
||||
|
||||
@@ -40,7 +40,6 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
|
||||
#### Documentation
|
||||
- Updated README for Temporal + MinIO architecture
|
||||
- Removed obsolete `volume_mode` references across all documentation
|
||||
- Added `.env` configuration guide for AI agent API keys
|
||||
- Fixed worker startup instructions with correct service names
|
||||
- Updated docker compose commands to modern syntax
|
||||
|
||||
@@ -831,20 +831,9 @@ class FuzzForgeExecutor:
|
||||
async def submit_security_scan_mcp(
|
||||
workflow_name: str,
|
||||
target_path: str = "",
|
||||
volume_mode: str = "",
|
||||
parameters: Dict[str, Any] | None = None,
|
||||
tool_context: ToolContext | None = None,
|
||||
) -> Any:
|
||||
# Normalise volume mode to supported values
|
||||
normalised_mode = (volume_mode or "ro").strip().lower().replace("-", "_")
|
||||
if normalised_mode in {"read_only", "readonly", "ro"}:
|
||||
normalised_mode = "ro"
|
||||
elif normalised_mode in {"read_write", "readwrite", "rw"}:
|
||||
normalised_mode = "rw"
|
||||
else:
|
||||
# Fall back to read-only if we can't recognise the input
|
||||
normalised_mode = "ro"
|
||||
|
||||
# Resolve the target path to an absolute path for validation
|
||||
resolved_path = target_path or "."
|
||||
try:
|
||||
@@ -883,7 +872,6 @@ class FuzzForgeExecutor:
|
||||
payload = {
|
||||
"workflow_name": workflow_name,
|
||||
"target_path": resolved_path,
|
||||
"volume_mode": normalised_mode,
|
||||
"parameters": cleaned_parameters,
|
||||
}
|
||||
result = await _call_fuzzforge_mcp("submit_security_scan_mcp", payload)
|
||||
|
||||
@@ -22,7 +22,6 @@
|
||||
"parameters": {
|
||||
"workflow_name": "string",
|
||||
"target_path": "string",
|
||||
"volume_mode": "string (ro|rw)",
|
||||
"parameters": "object"
|
||||
}
|
||||
},
|
||||
|
||||
@@ -212,14 +212,6 @@ def _lookup_workflow(workflow_name: str):
|
||||
metadata = info.metadata
|
||||
defaults = metadata.get("default_parameters", {})
|
||||
default_target_path = metadata.get("default_target_path") or defaults.get("target_path")
|
||||
supported_modes = metadata.get("supported_volume_modes") or ["ro", "rw"]
|
||||
if not isinstance(supported_modes, list) or not supported_modes:
|
||||
supported_modes = ["ro", "rw"]
|
||||
default_volume_mode = (
|
||||
metadata.get("default_volume_mode")
|
||||
or defaults.get("volume_mode")
|
||||
or supported_modes[0]
|
||||
)
|
||||
return {
|
||||
"name": workflow_name,
|
||||
"version": metadata.get("version", "0.6.0"),
|
||||
@@ -229,9 +221,7 @@ def _lookup_workflow(workflow_name: str):
|
||||
"parameters": metadata.get("parameters", {}),
|
||||
"default_parameters": metadata.get("default_parameters", {}),
|
||||
"required_modules": metadata.get("required_modules", []),
|
||||
"supported_volume_modes": supported_modes,
|
||||
"default_target_path": default_target_path,
|
||||
"default_volume_mode": default_volume_mode
|
||||
"default_target_path": default_target_path
|
||||
}
|
||||
|
||||
|
||||
@@ -256,10 +246,6 @@ async def list_workflows_mcp() -> Dict[str, Any]:
|
||||
"description": metadata.get("description", ""),
|
||||
"author": metadata.get("author"),
|
||||
"tags": metadata.get("tags", []),
|
||||
"supported_volume_modes": metadata.get("supported_volume_modes", ["ro", "rw"]),
|
||||
"default_volume_mode": metadata.get("default_volume_mode")
|
||||
or defaults.get("volume_mode")
|
||||
or "ro",
|
||||
"default_target_path": metadata.get("default_target_path")
|
||||
or defaults.get("target_path")
|
||||
})
|
||||
|
||||
@@ -14,7 +14,7 @@ Models for workflow findings and submissions
|
||||
# Additional attribution and requirements are provided in the NOTICE file.
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
from typing import Dict, Any, Optional, Literal, List
|
||||
from typing import Dict, Any, Optional, List
|
||||
from datetime import datetime
|
||||
|
||||
|
||||
@@ -73,10 +73,6 @@ class WorkflowMetadata(BaseModel):
|
||||
default_factory=list,
|
||||
description="Required module names"
|
||||
)
|
||||
supported_volume_modes: List[Literal["ro", "rw"]] = Field(
|
||||
default=["ro", "rw"],
|
||||
description="Supported volume mount modes"
|
||||
)
|
||||
|
||||
|
||||
class WorkflowListItem(BaseModel):
|
||||
|
||||
@@ -16,11 +16,6 @@ tags:
|
||||
# - "copy-on-write": Download once, copy for each run (balances performance and isolation)
|
||||
workspace_isolation: "isolated"
|
||||
|
||||
default_parameters:
|
||||
target_file: null
|
||||
max_iterations: 1000000
|
||||
timeout_seconds: 1800
|
||||
|
||||
parameters:
|
||||
type: object
|
||||
properties:
|
||||
|
||||
@@ -16,12 +16,6 @@ tags:
|
||||
# - "copy-on-write": Download once, copy for each run (balances performance and isolation)
|
||||
workspace_isolation: "isolated"
|
||||
|
||||
default_parameters:
|
||||
target_name: null
|
||||
max_iterations: 1000000
|
||||
timeout_seconds: 1800
|
||||
sanitizer: "address"
|
||||
|
||||
parameters:
|
||||
type: object
|
||||
properties:
|
||||
|
||||
@@ -30,13 +30,5 @@ parameters:
|
||||
default: false
|
||||
description: "Scan files without Git context"
|
||||
|
||||
default_parameters:
|
||||
scan_mode: "detect"
|
||||
redact: true
|
||||
no_git: false
|
||||
|
||||
required_modules:
|
||||
- "gitleaks"
|
||||
|
||||
supported_volume_modes:
|
||||
- "ro"
|
||||
|
||||
@@ -13,12 +13,6 @@ tags:
|
||||
# Workspace isolation mode
|
||||
workspace_isolation: "shared"
|
||||
|
||||
default_parameters:
|
||||
agent_url: "http://fuzzforge-task-agent:8000/a2a/litellm_agent"
|
||||
llm_model: "gpt-5-mini"
|
||||
llm_provider: "openai"
|
||||
max_files: 5
|
||||
|
||||
parameters:
|
||||
type: object
|
||||
properties:
|
||||
|
||||
@@ -30,14 +30,5 @@ parameters:
|
||||
type: integer
|
||||
default: 20
|
||||
|
||||
default_parameters:
|
||||
agent_url: "http://fuzzforge-task-agent:8000/a2a/litellm_agent"
|
||||
llm_model: "gpt-5-mini"
|
||||
llm_provider: "openai"
|
||||
max_files: 20
|
||||
|
||||
required_modules:
|
||||
- "llm_secret_detector"
|
||||
|
||||
supported_volume_modes:
|
||||
- "ro"
|
||||
|
||||
@@ -16,13 +16,6 @@ tags:
|
||||
# OSS-Fuzz campaigns use isolated mode for safe concurrent campaigns
|
||||
workspace_isolation: "isolated"
|
||||
|
||||
default_parameters:
|
||||
project_name: null
|
||||
campaign_duration_hours: 1
|
||||
override_engine: null
|
||||
override_sanitizer: null
|
||||
max_iterations: null
|
||||
|
||||
parameters:
|
||||
type: object
|
||||
required:
|
||||
|
||||
@@ -18,12 +18,6 @@ tags:
|
||||
# Using "shared" mode for read-only SAST analysis (no file modifications)
|
||||
workspace_isolation: "shared"
|
||||
|
||||
default_parameters:
|
||||
dependency_config: {}
|
||||
bandit_config: {}
|
||||
mypy_config: {}
|
||||
reporter_config: {}
|
||||
|
||||
parameters:
|
||||
type: object
|
||||
properties:
|
||||
|
||||
@@ -18,11 +18,6 @@ tags:
|
||||
# Using "shared" mode for read-only security analysis (no file modifications)
|
||||
workspace_isolation: "shared"
|
||||
|
||||
default_parameters:
|
||||
scanner_config: {}
|
||||
analyzer_config: {}
|
||||
reporter_config: {}
|
||||
|
||||
parameters:
|
||||
type: object
|
||||
properties:
|
||||
|
||||
@@ -23,12 +23,5 @@ parameters:
|
||||
default: 10
|
||||
description: "Maximum directory depth to scan"
|
||||
|
||||
default_parameters:
|
||||
verify: true
|
||||
max_depth: 10
|
||||
|
||||
required_modules:
|
||||
- "trufflehog"
|
||||
|
||||
supported_volume_modes:
|
||||
- "ro"
|
||||
|
||||
@@ -253,15 +253,15 @@ def display_finding_detail(finding: Dict[str, Any], tool: Dict[str, Any], run_id
|
||||
content_lines.append(f"[bold]Tool:[/bold] {tool.get('name', 'Unknown')} v{tool.get('version', 'unknown')}")
|
||||
content_lines.append(f"[bold]Run ID:[/bold] {run_id}")
|
||||
content_lines.append("")
|
||||
content_lines.append(f"[bold]Summary:[/bold]")
|
||||
content_lines.append("[bold]Summary:[/bold]")
|
||||
content_lines.append(message_text)
|
||||
content_lines.append("")
|
||||
content_lines.append(f"[bold]Description:[/bold]")
|
||||
content_lines.append("[bold]Description:[/bold]")
|
||||
content_lines.append(message_markdown)
|
||||
|
||||
if code_snippet:
|
||||
content_lines.append("")
|
||||
content_lines.append(f"[bold]Code Snippet:[/bold]")
|
||||
content_lines.append("[bold]Code Snippet:[/bold]")
|
||||
content_lines.append(f"[dim]{code_snippet}[/dim]")
|
||||
|
||||
content = "\n".join(content_lines)
|
||||
@@ -270,7 +270,7 @@ def display_finding_detail(finding: Dict[str, Any], tool: Dict[str, Any], run_id
|
||||
console.print()
|
||||
console.print(Panel(
|
||||
content,
|
||||
title=f"🔍 Finding Detail",
|
||||
title="🔍 Finding Detail",
|
||||
border_style=severity_color,
|
||||
box=box.ROUNDED,
|
||||
padding=(1, 2)
|
||||
|
||||
@@ -39,7 +39,7 @@ from ..validation import (
|
||||
)
|
||||
from ..progress import step_progress
|
||||
from ..constants import (
|
||||
STATUS_EMOJIS, MAX_RUN_ID_DISPLAY_LENGTH, DEFAULT_VOLUME_MODE,
|
||||
STATUS_EMOJIS, MAX_RUN_ID_DISPLAY_LENGTH,
|
||||
PROGRESS_STEP_DELAYS, MAX_RETRIES, RETRY_DELAY, POLL_INTERVAL
|
||||
)
|
||||
from ..worker_manager import WorkerManager
|
||||
@@ -112,7 +112,6 @@ def execute_workflow_submission(
|
||||
workflow: str,
|
||||
target_path: str,
|
||||
parameters: Dict[str, Any],
|
||||
volume_mode: str,
|
||||
timeout: Optional[int],
|
||||
interactive: bool
|
||||
) -> Any:
|
||||
@@ -160,13 +159,10 @@ def execute_workflow_submission(
|
||||
except ValueError as e:
|
||||
console.print(f"❌ Invalid {param_type}: {e}", style="red")
|
||||
|
||||
# Note: volume_mode is no longer used (Temporal uses MinIO storage)
|
||||
|
||||
# Show submission summary
|
||||
console.print("\n🎯 [bold]Executing workflow:[/bold]")
|
||||
console.print(f" Workflow: {workflow}")
|
||||
console.print(f" Target: {target_path}")
|
||||
console.print(f" Volume Mode: {volume_mode}")
|
||||
if parameters:
|
||||
console.print(f" Parameters: {len(parameters)} provided")
|
||||
if timeout:
|
||||
@@ -252,8 +248,6 @@ def execute_workflow_submission(
|
||||
|
||||
progress.next_step() # Submitting
|
||||
submission = WorkflowSubmission(
|
||||
target_path=target_path,
|
||||
volume_mode=volume_mode,
|
||||
parameters=parameters,
|
||||
timeout=timeout
|
||||
)
|
||||
@@ -281,10 +275,6 @@ def execute_workflow(
|
||||
None, "--param-file", "-f",
|
||||
help="JSON file containing workflow parameters"
|
||||
),
|
||||
volume_mode: str = typer.Option(
|
||||
DEFAULT_VOLUME_MODE, "--volume-mode", "-v",
|
||||
help="Volume mount mode: ro (read-only) or rw (read-write)"
|
||||
),
|
||||
timeout: Optional[int] = typer.Option(
|
||||
None, "--timeout", "-t",
|
||||
help="Execution timeout in seconds"
|
||||
@@ -410,7 +400,7 @@ def execute_workflow(
|
||||
|
||||
response = execute_workflow_submission(
|
||||
client, workflow, target_path, parameters,
|
||||
volume_mode, timeout, interactive
|
||||
timeout, interactive
|
||||
)
|
||||
|
||||
console.print("✅ Workflow execution started!", style="green")
|
||||
@@ -453,9 +443,9 @@ def execute_workflow(
|
||||
console.print("Press Ctrl+C to stop monitoring (execution continues in background).\n")
|
||||
|
||||
try:
|
||||
from ..commands.monitor import live_monitor
|
||||
# Import monitor command and run it
|
||||
live_monitor(response.run_id, refresh=3)
|
||||
from ..commands.monitor import _live_monitor
|
||||
# Call helper function directly with proper parameters
|
||||
_live_monitor(response.run_id, refresh=3, once=False, style="inline")
|
||||
except KeyboardInterrupt:
|
||||
console.print("\n⏹️ Live monitoring stopped (execution continues in background)", style="yellow")
|
||||
except Exception as e:
|
||||
|
||||
@@ -95,12 +95,6 @@ def complete_target_paths(incomplete: str) -> List[str]:
|
||||
return []
|
||||
|
||||
|
||||
def complete_volume_modes(incomplete: str) -> List[str]:
|
||||
"""Auto-complete volume mount modes."""
|
||||
modes = ["ro", "rw"]
|
||||
return [mode for mode in modes if mode.startswith(incomplete)]
|
||||
|
||||
|
||||
def complete_export_formats(incomplete: str) -> List[str]:
|
||||
"""Auto-complete export formats."""
|
||||
formats = ["json", "csv", "html", "sarif"]
|
||||
@@ -139,7 +133,6 @@ def complete_config_keys(incomplete: str) -> List[str]:
|
||||
"api_url",
|
||||
"api_timeout",
|
||||
"default_workflow",
|
||||
"default_volume_mode",
|
||||
"project_name",
|
||||
"data_retention_days",
|
||||
"auto_save_findings",
|
||||
@@ -164,11 +157,6 @@ TargetPathComplete = typer.Argument(
|
||||
help="Target path (tab completion available)"
|
||||
)
|
||||
|
||||
VolumeModetComplete = typer.Option(
|
||||
autocompletion=complete_volume_modes,
|
||||
help="Volume mode: ro or rw (tab completion available)"
|
||||
)
|
||||
|
||||
ExportFormatComplete = typer.Option(
|
||||
autocompletion=complete_export_formats,
|
||||
help="Export format (tab completion available)"
|
||||
|
||||
@@ -57,10 +57,6 @@ SEVERITY_STYLES = {
|
||||
"info": "bold cyan"
|
||||
}
|
||||
|
||||
# Default volume modes
|
||||
DEFAULT_VOLUME_MODE = "ro"
|
||||
SUPPORTED_VOLUME_MODES = ["ro", "rw"]
|
||||
|
||||
# Default export formats
|
||||
DEFAULT_EXPORT_FORMAT = "sarif"
|
||||
SUPPORTED_EXPORT_FORMATS = ["sarif", "json", "csv"]
|
||||
|
||||
@@ -52,7 +52,6 @@ class FuzzyMatcher:
|
||||
# Common parameter names
|
||||
self.parameter_names = [
|
||||
"target_path",
|
||||
"volume_mode",
|
||||
"timeout",
|
||||
"workflow",
|
||||
"param",
|
||||
@@ -70,7 +69,6 @@ class FuzzyMatcher:
|
||||
|
||||
# Common values
|
||||
self.common_values = {
|
||||
"volume_mode": ["ro", "rw"],
|
||||
"format": ["json", "csv", "html", "sarif"],
|
||||
"severity": ["critical", "high", "medium", "low", "info"],
|
||||
}
|
||||
|
||||
@@ -28,7 +28,6 @@ from .commands import (
|
||||
ai,
|
||||
ingest,
|
||||
)
|
||||
from .constants import DEFAULT_VOLUME_MODE
|
||||
from .fuzzy import enhanced_command_not_found_handler
|
||||
|
||||
# Install rich traceback handler
|
||||
@@ -184,10 +183,6 @@ def run_workflow(
|
||||
None, "--param-file", "-f",
|
||||
help="JSON file containing workflow parameters"
|
||||
),
|
||||
volume_mode: str = typer.Option(
|
||||
DEFAULT_VOLUME_MODE, "--volume-mode", "-v",
|
||||
help="Volume mount mode: ro (read-only) or rw (read-write)"
|
||||
),
|
||||
timeout: Optional[int] = typer.Option(
|
||||
None, "--timeout", "-t",
|
||||
help="Execution timeout in seconds"
|
||||
@@ -234,7 +229,6 @@ def run_workflow(
|
||||
target_path=target,
|
||||
params=params,
|
||||
param_file=param_file,
|
||||
volume_mode=volume_mode,
|
||||
timeout=timeout,
|
||||
interactive=interactive,
|
||||
wait=wait,
|
||||
|
||||
@@ -17,7 +17,7 @@ import re
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from .constants import SUPPORTED_VOLUME_MODES, SUPPORTED_EXPORT_FORMATS
|
||||
from .constants import SUPPORTED_EXPORT_FORMATS
|
||||
from .exceptions import ValidationError
|
||||
|
||||
|
||||
@@ -65,15 +65,6 @@ def validate_target_path(target_path: str, must_exist: bool = True) -> Path:
|
||||
return path
|
||||
|
||||
|
||||
def validate_volume_mode(volume_mode: str) -> None:
|
||||
"""Validate volume mode"""
|
||||
if volume_mode not in SUPPORTED_VOLUME_MODES:
|
||||
raise ValidationError(
|
||||
"volume_mode", volume_mode,
|
||||
f"one of: {', '.join(SUPPORTED_VOLUME_MODES)}"
|
||||
)
|
||||
|
||||
|
||||
def validate_export_format(export_format: str) -> None:
|
||||
"""Validate export format"""
|
||||
if export_format not in SUPPORTED_EXPORT_FORMATS:
|
||||
|
||||
@@ -217,9 +217,6 @@ services:
|
||||
context: ./workers/python
|
||||
dockerfile: Dockerfile
|
||||
container_name: fuzzforge-worker-python
|
||||
profiles:
|
||||
- workers
|
||||
- python
|
||||
depends_on:
|
||||
postgresql:
|
||||
condition: service_healthy
|
||||
|
||||
@@ -64,7 +64,6 @@ def main():
|
||||
print("📝 Workflow metadata:")
|
||||
print(f" Author: {metadata.author}")
|
||||
print(f" Required modules: {metadata.required_modules}")
|
||||
print(f" Supported volume modes: {metadata.supported_volume_modes}")
|
||||
print()
|
||||
|
||||
# Prepare target path (use current directory as example)
|
||||
@@ -74,7 +73,6 @@ def main():
|
||||
# Create workflow submission
|
||||
submission = create_workflow_submission(
|
||||
target_path=target_path,
|
||||
volume_mode="ro",
|
||||
timeout=300, # 5 minutes
|
||||
)
|
||||
|
||||
@@ -234,7 +232,6 @@ async def async_main():
|
||||
target_path = Path.cwd().absolute()
|
||||
submission = create_workflow_submission(
|
||||
target_path=target_path,
|
||||
volume_mode="ro",
|
||||
timeout=300,
|
||||
)
|
||||
|
||||
|
||||
@@ -135,23 +135,18 @@ class BatchAnalyzer:
|
||||
# Determine appropriate timeout based on workflow type
|
||||
if "fuzzing" in metadata.tags:
|
||||
timeout = 1800 # 30 minutes for fuzzing
|
||||
volume_mode = "rw"
|
||||
elif "dynamic" in metadata.tags:
|
||||
timeout = 900 # 15 minutes for dynamic analysis
|
||||
volume_mode = "rw"
|
||||
else:
|
||||
timeout = 300 # 5 minutes for static analysis
|
||||
volume_mode = "ro"
|
||||
|
||||
except Exception:
|
||||
# Fallback settings
|
||||
timeout = 600
|
||||
volume_mode = "ro"
|
||||
|
||||
# Create submission
|
||||
submission = create_workflow_submission(
|
||||
target_path=project_path,
|
||||
volume_mode=volume_mode,
|
||||
timeout=timeout
|
||||
)
|
||||
|
||||
|
||||
@@ -193,7 +193,6 @@ async def main():
|
||||
|
||||
submission = create_workflow_submission(
|
||||
target_path=target_path,
|
||||
volume_mode="rw", # Fuzzing may need to write files
|
||||
timeout=3600, # 1 hour timeout
|
||||
resource_limits=resource_limits,
|
||||
parameters={
|
||||
|
||||
@@ -33,7 +33,6 @@ def main():
|
||||
workflow_name = workflows[0].name
|
||||
submission = create_workflow_submission(
|
||||
target_path=Path.cwd().absolute(),
|
||||
volume_mode="ro",
|
||||
timeout=300
|
||||
)
|
||||
|
||||
|
||||
@@ -440,7 +440,6 @@ class FuzzForgeClient:
|
||||
workflow_name: str,
|
||||
target_path: Union[str, Path],
|
||||
parameters: Optional[Dict[str, Any]] = None,
|
||||
volume_mode: str = "ro",
|
||||
timeout: Optional[int] = None,
|
||||
progress_callback: Optional[Callable[[int, int], None]] = None
|
||||
) -> RunSubmissionResponse:
|
||||
@@ -454,7 +453,6 @@ class FuzzForgeClient:
|
||||
workflow_name: Name of the workflow to execute
|
||||
target_path: Local path to file or directory to analyze
|
||||
parameters: Workflow-specific parameters
|
||||
volume_mode: Volume mount mode ("ro" or "rw")
|
||||
timeout: Timeout in seconds
|
||||
progress_callback: Optional callback(bytes_uploaded, total_bytes) for progress
|
||||
|
||||
|
||||
@@ -193,8 +193,6 @@ class WorkflowTester:
|
||||
|
||||
# Create workflow submission
|
||||
submission = create_workflow_submission(
|
||||
target_path=str(test_path),
|
||||
volume_mode="ro",
|
||||
**workflow_params
|
||||
)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user