refactor: Update all modules to use new create_finding signature

Updated 10 modules to use the new create_finding() signature with required rule_id and found_by parameters:

- llm_analyzer.py: Added FoundBy and LLMContext for AI-detected findings
- bandit_analyzer.py: Added tool attribution and moved CWE/confidence to proper fields
- security_analyzer.py: Updated all three finding types (secrets, SQL injection, dangerous functions)
- mypy_analyzer.py: Added tool attribution and moved column info to column_start
- mobsf_scanner.py: Updated all 6 finding types (permissions, manifest, code analysis, behavior) with proper line number handling
- opengrep_android.py: Added tool attribution, proper CWE/OWASP formatting, and confidence mapping
- dependency_scanner.py: Added pip-audit attribution for CVE findings
- file_scanner.py: Updated both sensitive file and enumeration findings
- cargo_fuzzer.py: Added fuzzer type attribution for crash findings
- atheris_fuzzer.py: Added fuzzer type attribution for Python crash findings

All modules now properly track:
- Finding source (module, tool name, version, type)
- Confidence levels (high/medium/low)
- CWE and OWASP mappings where applicable
- LLM context for AI-detected issues
This commit is contained in:
tduhamel42
2025-11-02 15:36:30 +01:00
parent 99cee284b6
commit fccd8f32ab
10 changed files with 275 additions and 46 deletions

View File

@@ -21,12 +21,12 @@ from pathlib import Path
from typing import Dict, Any, List
try:
from toolbox.modules.base import BaseModule, ModuleMetadata, ModuleResult, ModuleFinding
from toolbox.modules.base import BaseModule, ModuleMetadata, ModuleResult, ModuleFinding, FoundBy
except ImportError:
try:
from modules.base import BaseModule, ModuleMetadata, ModuleResult, ModuleFinding
from modules.base import BaseModule, ModuleMetadata, ModuleResult, ModuleFinding, FoundBy
except ImportError:
from src.toolbox.modules.base import BaseModule, ModuleMetadata, ModuleResult, ModuleFinding
from src.toolbox.modules.base import BaseModule, ModuleMetadata, ModuleResult, ModuleFinding, FoundBy
logger = logging.getLogger(__name__)
@@ -237,12 +237,34 @@ class BanditAnalyzer(BaseModule):
except (ValueError, TypeError):
rel_path = Path(filename).name
# Extract confidence and CWE
confidence = issue.get("issue_confidence", "LOW").lower()
cwe_info = issue.get("issue_cwe", {})
cwe_id = f"CWE-{cwe_info.get('id')}" if cwe_info and cwe_info.get("id") else None
# Create FoundBy attribution
# Try to get Bandit version from metrics, fall back to unknown
bandit_version = "unknown"
if "metrics" in bandit_result:
bandit_version = bandit_result["metrics"].get("_version", "unknown")
found_by = FoundBy(
module="bandit_analyzer",
tool_name="Bandit",
tool_version=bandit_version,
type="tool"
)
# Create finding
finding = self.create_finding(
rule_id=test_id,
title=f"{test_name} ({test_id})",
description=issue_text,
severity=severity,
category="security-issue",
found_by=found_by,
confidence=confidence,
cwe=cwe_id,
file_path=str(rel_path),
line_start=line_number,
line_end=line_number,
@@ -251,8 +273,6 @@ class BanditAnalyzer(BaseModule):
metadata={
"test_id": test_id,
"test_name": test_name,
"confidence": issue.get("issue_confidence", "LOW").lower(),
"cwe": issue.get("issue_cwe", {}).get("id") if issue.get("issue_cwe") else None,
"more_info": issue.get("more_info", "")
}
)

View File

@@ -18,12 +18,12 @@ from pathlib import Path
from typing import Dict, Any, List
try:
from toolbox.modules.base import BaseModule, ModuleMetadata, ModuleResult
from toolbox.modules.base import BaseModule, ModuleMetadata, ModuleResult, FoundBy, LLMContext
except ImportError:
try:
from modules.base import BaseModule, ModuleMetadata, ModuleResult
from modules.base import BaseModule, ModuleMetadata, ModuleResult, FoundBy, LLMContext
except ImportError:
from src.toolbox.modules.base import BaseModule, ModuleMetadata, ModuleResult
from src.toolbox.modules.base import BaseModule, ModuleMetadata, ModuleResult, FoundBy, LLMContext
logger = logging.getLogger(__name__)
@@ -270,10 +270,14 @@ class LLMAnalyzer(BaseModule):
return []
# Parse LLM response into findings
full_prompt = f"{system_prompt}\n\n{user_message}"
findings = self._parse_llm_response(
llm_response=llm_response,
file_path=file_path,
workspace=workspace
workspace=workspace,
llm_model=llm_model,
llm_provider=llm_provider,
prompt=full_prompt
)
return findings
@@ -282,7 +286,10 @@ class LLMAnalyzer(BaseModule):
self,
llm_response: str,
file_path: Path,
workspace: Path
workspace: Path,
llm_model: str,
llm_provider: str,
prompt: str
) -> List:
"""Parse LLM response into structured findings"""
@@ -302,7 +309,9 @@ class LLMAnalyzer(BaseModule):
if line.startswith("ISSUE:"):
# Save previous issue if exists
if current_issue:
findings.append(self._create_module_finding(current_issue, relative_path))
findings.append(self._create_module_finding(
current_issue, relative_path, llm_model, llm_provider, prompt
))
current_issue = {"title": line.replace("ISSUE:", "").strip()}
elif line.startswith("SEVERITY:"):
@@ -320,11 +329,20 @@ class LLMAnalyzer(BaseModule):
# Save last issue
if current_issue:
findings.append(self._create_module_finding(current_issue, relative_path))
findings.append(self._create_module_finding(
current_issue, relative_path, llm_model, llm_provider, prompt
))
return findings
def _create_module_finding(self, issue: Dict[str, Any], file_path: str):
def _create_module_finding(
self,
issue: Dict[str, Any],
file_path: str,
llm_model: str,
llm_provider: str,
prompt: str
):
"""Create a ModuleFinding from parsed issue"""
severity_map = {
@@ -334,12 +352,39 @@ class LLMAnalyzer(BaseModule):
"info": "low"
}
# Determine confidence based on severity (LLM is more confident on critical issues)
confidence_map = {
"error": "high",
"warning": "medium",
"note": "medium",
"info": "low"
}
# Create FoundBy attribution
found_by = FoundBy(
module="llm_analyzer",
tool_name=f"{llm_provider}/{llm_model}",
tool_version="1.0.0",
type="llm"
)
# Create LLM context
llm_context = LLMContext(
model=llm_model,
prompt=prompt,
temperature=None # Not exposed in current config
)
# Use base class helper to create proper ModuleFinding
return self.create_finding(
rule_id=f"llm_security_{issue.get('severity', 'warning')}",
title=issue.get("title", "Security issue detected"),
description=issue.get("description", ""),
severity=severity_map.get(issue.get("severity", "warning"), "medium"),
category="security",
found_by=found_by,
confidence=confidence_map.get(issue.get("severity", "warning"), "medium"),
llm_context=llm_context,
file_path=file_path,
line_start=issue.get("line"),
metadata={

View File

@@ -21,12 +21,12 @@ from pathlib import Path
from typing import Dict, Any, List
try:
from toolbox.modules.base import BaseModule, ModuleMetadata, ModuleResult, ModuleFinding
from toolbox.modules.base import BaseModule, ModuleMetadata, ModuleResult, ModuleFinding, FoundBy
except ImportError:
try:
from modules.base import BaseModule, ModuleMetadata, ModuleResult, ModuleFinding
from modules.base import BaseModule, ModuleMetadata, ModuleResult, ModuleFinding, FoundBy
except ImportError:
from src.toolbox.modules.base import BaseModule, ModuleMetadata, ModuleResult, ModuleFinding
from src.toolbox.modules.base import BaseModule, ModuleMetadata, ModuleResult, ModuleFinding, FoundBy
logger = logging.getLogger(__name__)
@@ -189,18 +189,29 @@ class MypyAnalyzer(BaseModule):
title = f"Type error: {error_code or 'type-issue'}"
description = message
# Create FoundBy attribution
found_by = FoundBy(
module="mypy_analyzer",
tool_name="Mypy",
tool_version="unknown", # Mypy doesn't include version in output
type="tool"
)
finding = self.create_finding(
rule_id=error_code or "type-issue",
title=title,
description=description,
severity=severity,
category="type-error",
found_by=found_by,
confidence="high", # Mypy is highly confident in its type checking
file_path=str(rel_path),
line_start=int(line_num),
line_end=int(line_num),
column_start=int(column) if column else None,
recommendation="Review and fix the type inconsistency or add appropriate type annotations",
metadata={
"error_code": error_code or "unknown",
"column": int(column) if column else None,
"level": level
}
)

View File

@@ -19,12 +19,12 @@ from pathlib import Path
from typing import Dict, Any, List
try:
from toolbox.modules.base import BaseModule, ModuleMetadata, ModuleResult, ModuleFinding
from toolbox.modules.base import BaseModule, ModuleMetadata, ModuleResult, ModuleFinding, FoundBy
except ImportError:
try:
from modules.base import BaseModule, ModuleMetadata, ModuleResult, ModuleFinding
from modules.base import BaseModule, ModuleMetadata, ModuleResult, ModuleFinding, FoundBy
except ImportError:
from src.toolbox.modules.base import BaseModule, ModuleMetadata, ModuleResult, ModuleFinding
from src.toolbox.modules.base import BaseModule, ModuleMetadata, ModuleResult, ModuleFinding, FoundBy
logger = logging.getLogger(__name__)
@@ -217,11 +217,22 @@ class SecurityAnalyzer(BaseModule):
if self._is_false_positive_secret(match.group(0)):
continue
# Create FoundBy attribution
found_by = FoundBy(
module="security_analyzer",
tool_name="Security Analyzer",
tool_version="1.0.0",
type="tool"
)
findings.append(self.create_finding(
rule_id=f"hardcoded_{secret_type.lower().replace(' ', '_')}",
title=f"Hardcoded {secret_type} detected",
description=f"Found potential hardcoded {secret_type} in {file_path}",
severity="high" if "key" in secret_type.lower() else "medium",
category="hardcoded_secret",
found_by=found_by,
confidence="medium",
file_path=str(file_path),
line_start=line_num,
code_snippet=line_content.strip()[:100],
@@ -261,11 +272,23 @@ class SecurityAnalyzer(BaseModule):
line_num = content[:match.start()].count('\n') + 1
line_content = lines[line_num - 1] if line_num <= len(lines) else ""
# Create FoundBy attribution
found_by = FoundBy(
module="security_analyzer",
tool_name="Security Analyzer",
tool_version="1.0.0",
type="tool"
)
findings.append(self.create_finding(
rule_id=f"sql_injection_{vuln_type.lower().replace(' ', '_')}",
title=f"Potential SQL Injection: {vuln_type}",
description=f"Detected potential SQL injection vulnerability via {vuln_type}",
severity="high",
category="sql_injection",
found_by=found_by,
confidence="medium",
cwe="CWE-89",
file_path=str(file_path),
line_start=line_num,
code_snippet=line_content.strip()[:100],
@@ -323,11 +346,22 @@ class SecurityAnalyzer(BaseModule):
line_num = content[:match.start()].count('\n') + 1
line_content = lines[line_num - 1] if line_num <= len(lines) else ""
# Create FoundBy attribution
found_by = FoundBy(
module="security_analyzer",
tool_name="Security Analyzer",
tool_version="1.0.0",
type="tool"
)
findings.append(self.create_finding(
rule_id=f"dangerous_function_{func_name.replace('()', '').replace('.', '_')}",
title=f"Dangerous function: {func_name}",
description=f"Use of potentially dangerous function {func_name}: {risk_type}",
severity="medium",
category="dangerous_function",
found_by=found_by,
confidence="medium",
file_path=str(file_path),
line_start=line_num,
code_snippet=line_content.strip()[:100],

View File

@@ -24,12 +24,12 @@ from typing import Dict, Any, List
import aiohttp
try:
from toolbox.modules.base import BaseModule, ModuleMetadata, ModuleFinding, ModuleResult
from toolbox.modules.base import BaseModule, ModuleMetadata, ModuleFinding, ModuleResult, FoundBy
except ImportError:
try:
from modules.base import BaseModule, ModuleMetadata, ModuleFinding, ModuleResult
from modules.base import BaseModule, ModuleMetadata, ModuleFinding, ModuleResult, FoundBy
except ImportError:
from src.toolbox.modules.base import BaseModule, ModuleMetadata, ModuleFinding, ModuleResult
from src.toolbox.modules.base import BaseModule, ModuleMetadata, ModuleFinding, ModuleResult, FoundBy
logger = logging.getLogger(__name__)
@@ -278,6 +278,14 @@ class MobSFScanner(BaseModule):
"""Parse MobSF JSON results into standardized findings"""
findings = []
# Create FoundBy attribution for all MobSF findings
found_by = FoundBy(
module="mobsf_scanner",
tool_name="MobSF",
tool_version="3.9.7",
type="tool"
)
# Parse permissions
if 'permissions' in scan_data:
for perm_name, perm_attrs in scan_data['permissions'].items():
@@ -287,10 +295,13 @@ class MobSFScanner(BaseModule):
)
finding = self.create_finding(
rule_id=f"android_permission_{perm_name.replace('.', '_')}",
title=f"Android Permission: {perm_name}",
description=perm_attrs.get('description', 'No description'),
severity=severity,
category="android-permission",
found_by=found_by,
confidence="high",
metadata={
'permission': perm_name,
'status': perm_attrs.get('status'),
@@ -307,13 +318,19 @@ class MobSFScanner(BaseModule):
if isinstance(item, dict):
severity = self.SEVERITY_MAP.get(item.get('severity', '').lower(), 'medium')
title = item.get('title') or item.get('name') or "Manifest Issue"
rule = item.get('rule') or "manifest_issue"
finding = self.create_finding(
title=item.get('title') or item.get('name') or "Manifest Issue",
rule_id=f"android_manifest_{rule.replace(' ', '_').replace('-', '_')}",
title=title,
description=item.get('description', 'No description'),
severity=severity,
category="android-manifest",
found_by=found_by,
confidence="high",
metadata={
'rule': item.get('rule'),
'rule': rule,
'tool': 'mobsf',
}
)
@@ -335,16 +352,32 @@ class MobSFScanner(BaseModule):
# Create a finding for each affected file
if isinstance(files_dict, dict) and files_dict:
for file_path, line_numbers in files_dict.items():
# Extract first line number if available
line_start = None
if line_numbers:
try:
# Can be string like "28" or "65,81"
line_start = int(str(line_numbers).split(',')[0])
except (ValueError, AttributeError):
pass
# Extract CWE from metadata
cwe_value = metadata_dict.get('cwe')
cwe_id = f"CWE-{cwe_value}" if cwe_value else None
finding = self.create_finding(
rule_id=finding_name.replace(' ', '_').replace('-', '_'),
title=finding_name,
description=metadata_dict.get('description', 'No description'),
severity=severity,
category="android-code-analysis",
found_by=found_by,
confidence="medium",
cwe=cwe_id,
owasp=metadata_dict.get('owasp'),
file_path=file_path,
line_number=line_numbers, # Can be string like "28" or "65,81"
line_start=line_start,
metadata={
'cwe': metadata_dict.get('cwe'),
'owasp': metadata_dict.get('owasp'),
'masvs': metadata_dict.get('masvs'),
'cvss': metadata_dict.get('cvss'),
'ref': metadata_dict.get('ref'),
@@ -355,14 +388,21 @@ class MobSFScanner(BaseModule):
findings.append(finding)
else:
# Fallback: create one finding without file info
# Extract CWE from metadata
cwe_value = metadata_dict.get('cwe')
cwe_id = f"CWE-{cwe_value}" if cwe_value else None
finding = self.create_finding(
rule_id=finding_name.replace(' ', '_').replace('-', '_'),
title=finding_name,
description=metadata_dict.get('description', 'No description'),
severity=severity,
category="android-code-analysis",
found_by=found_by,
confidence="medium",
cwe=cwe_id,
owasp=metadata_dict.get('owasp'),
metadata={
'cwe': metadata_dict.get('cwe'),
'owasp': metadata_dict.get('owasp'),
'masvs': metadata_dict.get('masvs'),
'cvss': metadata_dict.get('cvss'),
'ref': metadata_dict.get('ref'),
@@ -389,13 +429,25 @@ class MobSFScanner(BaseModule):
# Create a finding for each affected file
if isinstance(files_dict, dict) and files_dict:
for file_path, line_numbers in files_dict.items():
# Extract first line number if available
line_start = None
if line_numbers:
try:
# Can be string like "28" or "65,81"
line_start = int(str(line_numbers).split(',')[0])
except (ValueError, AttributeError):
pass
finding = self.create_finding(
rule_id=f"android_behavior_{key.replace(' ', '_').replace('-', '_')}",
title=f"Behavior: {label}",
description=metadata_dict.get('description', 'No description'),
severity=severity,
category="android-behavior",
found_by=found_by,
confidence="medium",
file_path=file_path,
line_number=line_numbers,
line_start=line_start,
metadata={
'line_numbers': line_numbers,
'behavior_key': key,
@@ -406,10 +458,13 @@ class MobSFScanner(BaseModule):
else:
# Fallback: create one finding without file info
finding = self.create_finding(
rule_id=f"android_behavior_{key.replace(' ', '_').replace('-', '_')}",
title=f"Behavior: {label}",
description=metadata_dict.get('description', 'No description'),
severity=severity,
category="android-behavior",
found_by=found_by,
confidence="medium",
metadata={
'behavior_key': key,
'tool': 'mobsf',

View File

@@ -23,12 +23,12 @@ from pathlib import Path
from typing import Dict, Any, List
try:
from toolbox.modules.base import BaseModule, ModuleMetadata, ModuleFinding, ModuleResult
from toolbox.modules.base import BaseModule, ModuleMetadata, ModuleFinding, ModuleResult, FoundBy
except ImportError:
try:
from modules.base import BaseModule, ModuleMetadata, ModuleFinding, ModuleResult
from modules.base import BaseModule, ModuleMetadata, ModuleFinding, ModuleResult, FoundBy
except ImportError:
from src.toolbox.modules.base import BaseModule, ModuleMetadata, ModuleFinding, ModuleResult
from src.toolbox.modules.base import BaseModule, ModuleMetadata, ModuleFinding, ModuleResult, FoundBy
logger = logging.getLogger(__name__)
@@ -302,23 +302,40 @@ class OpenGrepAndroid(BaseModule):
# Map severity to our standard levels
finding_severity = self._map_severity(severity)
# Map confidence
confidence_map = {"HIGH": "high", "MEDIUM": "medium", "LOW": "low"}
finding_confidence = confidence_map.get(confidence, "medium")
# Format CWE and OWASP
cwe_id = f"CWE-{cwe[0]}" if cwe and isinstance(cwe, list) and cwe else None
owasp_str = owasp[0] if owasp and isinstance(owasp, list) and owasp else None
# Create FoundBy attribution
found_by = FoundBy(
module="opengrep_android",
tool_name="OpenGrep",
tool_version="1.45.0",
type="tool"
)
# Create finding
finding = self.create_finding(
rule_id=rule_id,
title=f"Android Security: {rule_id}",
description=message or f"OpenGrep rule {rule_id} triggered",
severity=finding_severity,
category=self._get_category(rule_id, extra),
found_by=found_by,
confidence=finding_confidence,
cwe=cwe_id,
owasp=owasp_str,
file_path=path_info if path_info else None,
line_start=start_line if start_line > 0 else None,
line_end=end_line if end_line > 0 and end_line != start_line else None,
code_snippet=lines.strip() if lines else None,
recommendation=self._get_recommendation(rule_id, extra),
metadata={
"rule_id": rule_id,
"opengrep_severity": severity,
"confidence": confidence,
"cwe": cwe,
"owasp": owasp,
"fix": extra.get("fix", ""),
"impact": extra.get("impact", ""),
"likelihood": extra.get("likelihood", ""),

View File

@@ -19,7 +19,7 @@ from typing import Dict, Any, List, Optional, Callable
import uuid
import httpx
from modules.base import BaseModule, ModuleMetadata, ModuleResult, ModuleFinding
from modules.base import BaseModule, ModuleMetadata, ModuleResult, ModuleFinding, FoundBy
logger = logging.getLogger(__name__)
@@ -556,7 +556,16 @@ class AtherisFuzzer(BaseModule):
# Encode crash input for storage
crash_input_b64 = base64.b64encode(crash["input"]).decode()
# Create FoundBy attribution
found_by = FoundBy(
module="atheris_fuzzer",
tool_name="Atheris",
tool_version="unknown",
type="fuzzer"
)
finding = self.create_finding(
rule_id=f"fuzzer_crash_{crash['exception_type'].lower().replace(' ', '_')}",
title=f"Crash: {crash['exception_type']}",
description=(
f"Atheris found crash during fuzzing:\n"
@@ -566,6 +575,8 @@ class AtherisFuzzer(BaseModule):
),
severity="critical",
category="crash",
found_by=found_by,
confidence="high", # Fuzzer-found crashes are highly reliable
file_path=str(target_path),
metadata={
"crash_input_base64": crash_input_b64,

View File

@@ -13,7 +13,7 @@ import time
from pathlib import Path
from typing import Dict, Any, List, Optional, Callable
from modules.base import BaseModule, ModuleMetadata, ModuleResult, ModuleFinding
from modules.base import BaseModule, ModuleMetadata, ModuleResult, ModuleFinding, FoundBy
logger = logging.getLogger(__name__)
@@ -426,14 +426,25 @@ class CargoFuzzer(BaseModule):
else:
severity = "high"
# Create FoundBy attribution
found_by = FoundBy(
module="cargo_fuzz",
tool_name="cargo-fuzz",
tool_version="0.11.2",
type="fuzzer"
)
# Create finding
finding = self.create_finding(
rule_id=f"fuzzer_crash_{error_type.lower().replace(' ', '_')}",
title=f"Crash: {error_type} in {target_name}",
description=f"Cargo-fuzz discovered a crash in target '{target_name}'. "
f"Error type: {error_type}. "
f"Input size: {len(crash_input)} bytes.",
severity=severity,
category="crash",
found_by=found_by,
confidence="high", # Fuzzer-found crashes are highly reliable
file_path=f"fuzz/fuzz_targets/{target_name}.rs",
code_snippet=stack_trace[:500],
recommendation="Review the crash details and fix the underlying bug. "

View File

@@ -21,12 +21,12 @@ from pathlib import Path
from typing import Dict, Any, List
try:
from toolbox.modules.base import BaseModule, ModuleMetadata, ModuleResult, ModuleFinding
from toolbox.modules.base import BaseModule, ModuleMetadata, ModuleResult, ModuleFinding, FoundBy
except ImportError:
try:
from modules.base import BaseModule, ModuleMetadata, ModuleResult, ModuleFinding
from modules.base import BaseModule, ModuleMetadata, ModuleResult, ModuleFinding, FoundBy
except ImportError:
from src.toolbox.modules.base import BaseModule, ModuleMetadata, ModuleResult, ModuleFinding
from src.toolbox.modules.base import BaseModule, ModuleMetadata, ModuleResult, ModuleFinding, FoundBy
logger = logging.getLogger(__name__)
@@ -201,11 +201,22 @@ class DependencyScanner(BaseModule):
recommendation = f"Upgrade {package_name} to a fixed version: {', '.join(fix_versions)}" if fix_versions else f"Check for updates to {package_name}"
# Create FoundBy attribution
found_by = FoundBy(
module="dependency_scanner",
tool_name="pip-audit",
tool_version="unknown",
type="tool"
)
finding = self.create_finding(
rule_id=f"vulnerable_dependency_{package_name}",
title=f"Vulnerable dependency: {package_name} ({vuln_id})",
description=f"{description}\n\nAffected package: {package_name} {package_version}",
severity=severity,
category="vulnerable-dependency",
found_by=found_by,
confidence="high", # pip-audit uses official CVE database
file_path=str(rel_path),
recommendation=recommendation,
metadata={

View File

@@ -20,12 +20,12 @@ from typing import Dict, Any
import hashlib
try:
from toolbox.modules.base import BaseModule, ModuleMetadata, ModuleResult
from toolbox.modules.base import BaseModule, ModuleMetadata, ModuleResult, FoundBy
except ImportError:
try:
from modules.base import BaseModule, ModuleMetadata, ModuleResult
from modules.base import BaseModule, ModuleMetadata, ModuleResult, FoundBy
except ImportError:
from src.toolbox.modules.base import BaseModule, ModuleMetadata, ModuleResult
from src.toolbox.modules.base import BaseModule, ModuleMetadata, ModuleResult, FoundBy
logger = logging.getLogger(__name__)
@@ -122,6 +122,14 @@ class FileScanner(BaseModule):
logger.info(f"Scanning workspace with patterns: {patterns}")
# Create FoundBy attribution for all findings
found_by = FoundBy(
module="file_scanner",
tool_name="File Scanner",
tool_version="1.0.0",
type="tool"
)
try:
# Scan for each pattern
for pattern in patterns:
@@ -152,10 +160,13 @@ class FileScanner(BaseModule):
# Check for sensitive files
if check_sensitive and self._is_sensitive_file(file_path):
findings.append(self.create_finding(
rule_id="sensitive_file",
title=f"Potentially sensitive file: {relative_path.name}",
description=f"Found potentially sensitive file at {relative_path}",
severity="medium",
category="sensitive_file",
found_by=found_by,
confidence="medium",
file_path=str(relative_path),
metadata={
"file_size": file_size,
@@ -170,10 +181,13 @@ class FileScanner(BaseModule):
# Create informational finding for each file
findings.append(self.create_finding(
rule_id="file_enumeration",
title=f"File discovered: {relative_path.name}",
description=f"File: {relative_path}",
severity="info",
category="file_enumeration",
found_by=found_by,
confidence="high",
file_path=str(relative_path),
metadata={
"file_size": file_size,