chore: remove old fuzzforge_ai files

This commit is contained in:
AFredefon
2026-01-30 10:06:21 +01:00
parent 9a97cc0f31
commit 1186f57a5c
374 changed files with 0 additions and 87514 deletions

View File

@@ -1,165 +0,0 @@
name: Benchmarks
on:
# Disabled automatic runs - benchmarks not ready for CI/CD yet
# schedule:
# - cron: '0 2 * * *' # 2 AM UTC every day
# Allow manual trigger for testing
workflow_dispatch:
inputs:
compare_with:
description: 'Baseline commit to compare against (optional)'
required: false
default: ''
# pull_request:
# paths:
# - 'backend/benchmarks/**'
# - 'backend/toolbox/modules/**'
# - '.github/workflows/benchmark.yml'
jobs:
benchmark:
name: Run Benchmarks
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0 # Fetch all history for comparison
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Install system dependencies
run: |
sudo apt-get update
sudo apt-get install -y build-essential
- name: Install Python dependencies
working-directory: ./backend
run: |
python -m pip install --upgrade pip
pip install -e ".[dev]"
pip install pytest pytest-asyncio pytest-benchmark pytest-benchmark[histogram]
pip install -e ../sdk # Install SDK for benchmarks
- name: Run benchmarks
working-directory: ./backend
run: |
pytest benchmarks/ \
-v \
--benchmark-only \
--benchmark-json=benchmark-results.json \
--benchmark-histogram=benchmark-histogram
- name: Store benchmark results
uses: actions/upload-artifact@v4
with:
name: benchmark-results-${{ github.run_number }}
path: |
backend/benchmark-results.json
backend/benchmark-histogram.svg
- name: Download baseline benchmarks
if: github.event_name == 'pull_request'
uses: dawidd6/action-download-artifact@v3
continue-on-error: true
with:
workflow: benchmark.yml
branch: ${{ github.base_ref }}
name: benchmark-results-*
path: ./baseline
search_artifacts: true
- name: Compare with baseline
if: github.event_name == 'pull_request' && hashFiles('baseline/benchmark-results.json') != ''
run: |
python -c "
import json
import sys
with open('backend/benchmark-results.json') as f:
current = json.load(f)
with open('baseline/benchmark-results.json') as f:
baseline = json.load(f)
print('\\n## Benchmark Comparison\\n')
print('| Benchmark | Current | Baseline | Change |')
print('|-----------|---------|----------|--------|')
regressions = []
for bench in current['benchmarks']:
name = bench['name']
current_time = bench['stats']['mean']
# Find matching baseline
baseline_bench = next((b for b in baseline['benchmarks'] if b['name'] == name), None)
if baseline_bench:
baseline_time = baseline_bench['stats']['mean']
change = ((current_time - baseline_time) / baseline_time) * 100
print(f'| {name} | {current_time:.4f}s | {baseline_time:.4f}s | {change:+.2f}% |')
# Flag regressions > 10%
if change > 10:
regressions.append((name, change))
else:
print(f'| {name} | {current_time:.4f}s | N/A | NEW |')
if regressions:
print('\\n⚠ **Performance Regressions Detected:**')
for name, change in regressions:
print(f'- {name}: +{change:.2f}%')
sys.exit(1)
else:
print('\\n✅ No significant performance regressions detected')
"
- name: Comment PR with results
if: github.event_name == 'pull_request'
uses: actions/github-script@v7
with:
script: |
const fs = require('fs');
const results = JSON.parse(fs.readFileSync('backend/benchmark-results.json', 'utf8'));
let body = '## Benchmark Results\\n\\n';
body += '| Category | Benchmark | Mean Time | Std Dev |\\n';
body += '|----------|-----------|-----------|---------|\\n';
for (const bench of results.benchmarks) {
const group = bench.group || 'ungrouped';
const name = bench.name.split('::').pop();
const mean = bench.stats.mean.toFixed(4);
const stddev = bench.stats.stddev.toFixed(4);
body += `| ${group} | ${name} | ${mean}s | ${stddev}s |\\n`;
}
body += '\\n📊 Full benchmark results available in artifacts.';
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: body
});
benchmark-summary:
name: Benchmark Summary
runs-on: ubuntu-latest
needs: benchmark
if: always()
steps:
- name: Check results
run: |
if [ "${{ needs.benchmark.result }}" != "success" ]; then
echo "Benchmarks failed or detected regressions"
exit 1
fi
echo "Benchmarks completed successfully!"

View File

@@ -1,70 +0,0 @@
name: Python CI
# This is a dumb Ci to ensure that the python client and backend builds correctly
# It could be optimized to run faster, building, testing and linting only changed code
# but for now it is good enough. It runs on every push and PR to any branch.
# It also runs on demand.
on:
workflow_dispatch:
push:
paths:
- "ai/**"
- "backend/**"
- "cli/**"
- "sdk/**"
- "src/**"
pull_request:
paths:
- "ai/**"
- "backend/**"
- "cli/**"
- "sdk/**"
- "src/**"
jobs:
ci:
name: ci
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- name: Setup uv
uses: astral-sh/setup-uv@v6
with:
enable-cache: true
- name: Set up Python
run: uv python install
# Validate no obvious issues
# Quick hack because CLI returns non-zero exit code when no args are provided
- name: Run base command
run: |
set +e
uv run ff
if [ $? -ne 2 ]; then
echo "Expected exit code 2 from 'uv run ff', got $?"
exit 1
fi
- name: Build fuzzforge_ai package
run: uv build
- name: Build ai package
working-directory: ai
run: uv build
- name: Build cli package
working-directory: cli
run: uv build
- name: Build sdk package
working-directory: sdk
run: uv build
- name: Build backend package
working-directory: backend
run: uv build

View File

@@ -1,57 +0,0 @@
name: Deploy Docusaurus to GitHub Pages
on:
workflow_dispatch:
push:
branches:
- master
paths:
- "docs/**"
jobs:
build:
name: Build Docusaurus
runs-on: ubuntu-latest
defaults:
run:
working-directory: ./docs
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: actions/setup-node@v4
with:
node-version: 24
cache: npm
cache-dependency-path: "**/package-lock.json"
- name: Install dependencies
run: npm ci
- name: Build website
run: npm run build
- name: Upload Build Artifact
uses: actions/upload-pages-artifact@v3
with:
path: ./docs/build
deploy:
name: Deploy to GitHub Pages
needs: build
# Grant GITHUB_TOKEN the permissions required to make a Pages deployment
permissions:
pages: write # to deploy to Pages
id-token: write # to verify the deployment originates from an appropriate source
# Deploy to the github-pages environment
environment:
name: github-pages
url: ${{ steps.deployment.outputs.page_url }}
runs-on: ubuntu-latest
steps:
- name: Deploy to GitHub Pages
id: deployment
uses: actions/deploy-pages@v4

View File

@@ -1,33 +0,0 @@
name: Docusaurus test deployment
on:
workflow_dispatch:
push:
paths:
- "docs/**"
pull_request:
paths:
- "docs/**"
jobs:
test-deploy:
name: Test deployment
runs-on: ubuntu-latest
defaults:
run:
working-directory: ./docs
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: actions/setup-node@v4
with:
node-version: 24
cache: npm
cache-dependency-path: "**/package-lock.json"
- name: Install dependencies
run: npm ci
- name: Test build website
run: npm run build

View File

@@ -1,152 +0,0 @@
# FuzzForge CI/CD Example - Security Scanning
#
# This workflow demonstrates how to integrate FuzzForge into your CI/CD pipeline
# for automated security testing on pull requests and pushes.
#
# Features:
# - Runs entirely in GitHub Actions (no external infrastructure needed)
# - Auto-starts FuzzForge services on-demand
# - Fails builds on error-level SARIF findings
# - Uploads SARIF results to GitHub Security tab
# - Exports findings as artifacts
#
# Prerequisites:
# - Ubuntu runner with Docker support
# - At least 4GB RAM available
# - ~90 seconds startup time
name: Security Scan Example
on:
pull_request:
branches: [main, develop]
push:
branches: [main]
jobs:
security-scan:
name: Security Assessment
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- name: Checkout code
uses: actions/checkout@v4
- name: Start FuzzForge
run: |
bash scripts/ci-start.sh
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Install FuzzForge CLI
run: |
pip install ./cli
- name: Initialize FuzzForge
run: |
ff init --api-url http://localhost:8000 --name "GitHub Actions Security Scan"
- name: Run Security Assessment
run: |
ff workflow run security_assessment . \
--wait \
--fail-on error \
--export-sarif results.sarif
- name: Upload SARIF to GitHub Security
if: always()
uses: github/codeql-action/upload-sarif@v3
with:
sarif_file: results.sarif
- name: Upload findings as artifact
if: always()
uses: actions/upload-artifact@v4
with:
name: security-findings
path: results.sarif
retention-days: 30
- name: Stop FuzzForge
if: always()
run: |
bash scripts/ci-stop.sh
secret-scan:
name: Secret Detection
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- uses: actions/checkout@v4
- name: Start FuzzForge
run: bash scripts/ci-start.sh
- name: Install CLI
run: |
pip install ./cli
- name: Initialize & Scan
run: |
ff init --api-url http://localhost:8000 --name "Secret Detection"
ff workflow run secret_detection . \
--wait \
--fail-on all \
--export-sarif secrets.sarif
- name: Upload results
if: always()
uses: actions/upload-artifact@v4
with:
name: secret-scan-results
path: secrets.sarif
retention-days: 30
- name: Cleanup
if: always()
run: bash scripts/ci-stop.sh
# Example: Nightly fuzzing campaign (long-running)
nightly-fuzzing:
name: Nightly Fuzzing
runs-on: ubuntu-latest
timeout-minutes: 120
# Only run on schedule
if: github.event_name == 'schedule'
steps:
- uses: actions/checkout@v4
- name: Start FuzzForge
run: bash scripts/ci-start.sh
- name: Install CLI
run: pip install ./cli
- name: Run Fuzzing Campaign
run: |
ff init --api-url http://localhost:8000
ff workflow run atheris_fuzzing . \
max_iterations=100000000 \
timeout_seconds=7200 \
--wait \
--export-sarif fuzzing-results.sarif
# Don't fail on fuzzing findings, just report
continue-on-error: true
- name: Upload fuzzing results
if: always()
uses: actions/upload-artifact@v4
with:
name: fuzzing-results
path: fuzzing-results.sarif
retention-days: 90
- name: Cleanup
if: always()
run: bash scripts/ci-stop.sh

View File

@@ -1,248 +0,0 @@
name: Tests
on:
push:
branches: [ main, master, dev, develop, feature/** ]
pull_request:
branches: [ main, master, dev, develop ]
jobs:
validate-workers:
name: Validate Workers
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Run worker validation
run: |
chmod +x .github/scripts/validate-workers.sh
.github/scripts/validate-workers.sh
build-workers:
name: Build Worker Docker Images
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0 # Fetch all history for proper diff
- name: Check which workers were modified
id: check-workers
run: |
if [ "${{ github.event_name }}" == "pull_request" ]; then
# For PRs, check changed files
CHANGED_FILES=$(git diff --name-only origin/${{ github.base_ref }}...HEAD)
echo "Changed files:"
echo "$CHANGED_FILES"
else
# For direct pushes, check last commit
CHANGED_FILES=$(git diff --name-only HEAD~1 HEAD)
fi
# Check if docker-compose.yml changed (build all workers)
if echo "$CHANGED_FILES" | grep -q "^docker-compose.yml"; then
echo "workers_to_build=worker-python worker-secrets worker-rust worker-android worker-ossfuzz" >> $GITHUB_OUTPUT
echo "workers_modified=true" >> $GITHUB_OUTPUT
echo "✅ docker-compose.yml modified - building all workers"
exit 0
fi
# Detect which specific workers changed
WORKERS_TO_BUILD=""
if echo "$CHANGED_FILES" | grep -q "^workers/python/"; then
WORKERS_TO_BUILD="$WORKERS_TO_BUILD worker-python"
echo "✅ Python worker modified"
fi
if echo "$CHANGED_FILES" | grep -q "^workers/secrets/"; then
WORKERS_TO_BUILD="$WORKERS_TO_BUILD worker-secrets"
echo "✅ Secrets worker modified"
fi
if echo "$CHANGED_FILES" | grep -q "^workers/rust/"; then
WORKERS_TO_BUILD="$WORKERS_TO_BUILD worker-rust"
echo "✅ Rust worker modified"
fi
if echo "$CHANGED_FILES" | grep -q "^workers/android/"; then
WORKERS_TO_BUILD="$WORKERS_TO_BUILD worker-android"
echo "✅ Android worker modified"
fi
if echo "$CHANGED_FILES" | grep -q "^workers/ossfuzz/"; then
WORKERS_TO_BUILD="$WORKERS_TO_BUILD worker-ossfuzz"
echo "✅ OSS-Fuzz worker modified"
fi
if [ -z "$WORKERS_TO_BUILD" ]; then
echo "workers_modified=false" >> $GITHUB_OUTPUT
echo "⏭️ No worker changes detected - skipping build"
else
echo "workers_to_build=$WORKERS_TO_BUILD" >> $GITHUB_OUTPUT
echo "workers_modified=true" >> $GITHUB_OUTPUT
echo "Building workers:$WORKERS_TO_BUILD"
fi
- name: Set up Docker Buildx
if: steps.check-workers.outputs.workers_modified == 'true'
uses: docker/setup-buildx-action@v3
- name: Build worker images
if: steps.check-workers.outputs.workers_modified == 'true'
run: |
WORKERS="${{ steps.check-workers.outputs.workers_to_build }}"
echo "Building worker Docker images: $WORKERS"
docker compose build $WORKERS --no-cache
continue-on-error: false
lint:
name: Lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: '3.11'
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install ruff mypy
- name: Run ruff
run: ruff check backend/src backend/toolbox backend/tests backend/benchmarks --output-format=github
- name: Run mypy (continue on error)
run: mypy backend/src backend/toolbox || true
continue-on-error: true
unit-tests:
name: Unit Tests
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ['3.11', '3.12']
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install system dependencies
run: |
sudo apt-get update
sudo apt-get install -y build-essential
- name: Install Python dependencies
working-directory: ./backend
run: |
python -m pip install --upgrade pip
pip install -e ".[dev]"
pip install pytest pytest-asyncio pytest-cov pytest-xdist
- name: Run unit tests
working-directory: ./backend
run: |
pytest tests/unit/ \
-v \
--cov=toolbox/modules \
--cov=src \
--cov-report=xml \
--cov-report=term \
--cov-report=html \
-n auto
- name: Upload coverage to Codecov
if: matrix.python-version == '3.11'
uses: codecov/codecov-action@v4
with:
file: ./backend/coverage.xml
flags: unittests
name: codecov-backend
- name: Upload coverage HTML
if: matrix.python-version == '3.11'
uses: actions/upload-artifact@v4
with:
name: coverage-report
path: ./backend/htmlcov/
# integration-tests:
# name: Integration Tests
# runs-on: ubuntu-latest
# needs: unit-tests
#
# services:
# postgres:
# image: postgres:15
# env:
# POSTGRES_USER: postgres
# POSTGRES_PASSWORD: postgres
# POSTGRES_DB: fuzzforge_test
# options: >-
# --health-cmd pg_isready
# --health-interval 10s
# --health-timeout 5s
# --health-retries 5
# ports:
# - 5432:5432
#
# steps:
# - uses: actions/checkout@v4
#
# - name: Set up Python
# uses: actions/setup-python@v5
# with:
# python-version: '3.11'
#
# - name: Set up Docker Buildx
# uses: docker/setup-buildx-action@v3
#
# - name: Install Python dependencies
# working-directory: ./backend
# run: |
# python -m pip install --upgrade pip
# pip install -e ".[dev]"
# pip install pytest pytest-asyncio
#
# - name: Start services (Temporal, MinIO)
# run: |
# docker-compose -f docker-compose.yml up -d temporal minio
# sleep 30
#
# - name: Run integration tests
# working-directory: ./backend
# run: |
# pytest tests/integration/ -v --tb=short
# env:
# DATABASE_URL: postgresql://postgres:postgres@localhost:5432/fuzzforge_test
# TEMPORAL_ADDRESS: localhost:7233
# MINIO_ENDPOINT: localhost:9000
#
# - name: Shutdown services
# if: always()
# run: docker-compose down
test-summary:
name: Test Summary
runs-on: ubuntu-latest
needs: [validate-workers, lint, unit-tests]
if: always()
steps:
- name: Check test results
run: |
if [ "${{ needs.validate-workers.result }}" != "success" ]; then
echo "Worker validation failed"
exit 1
fi
if [ "${{ needs.unit-tests.result }}" != "success" ]; then
echo "Unit tests failed"
exit 1
fi
echo "All tests passed!"