Files
ai-llm-red-team-handbook/scripts/prompt_injection/inject_plugin.py
T
shiva108 b3d3bac51f Add practical scripts directory with 400+ tools
- Extracted all code examples from handbook chapters
- Organized into 15 attack categories
- Created shared utilities (api_client, validators, logging, constants)
- Added workflow orchestration scripts
- Implemented install.sh for easy setup
- Renamed all scripts to descriptive functional names
- Added comprehensive README and documentation
- Included pytest test suite and configuration
2026-01-07 11:39:46 +01:00

46 lines
1.2 KiB
Python

#!/usr/bin/env python3
"""
3. Sandboxing and Least Privilege for Plugins
Source: Chapter_14_Prompt_Injection
Category: prompt_injection
"""
import argparse
import sys
class SandboxedPluginExecutor:
def execute_tool(self, tool_name, arguments, user_context):
# Principle of least privilege
allowed_tools = self.get_allowed_tools_for_user(user_context)
if tool_name not in allowed_tools:
raise PermissionError(f"Tool {tool_name} not allowed for user")
# Execute in sandbox
sandbox = PluginSandbox(
network_access=False,
file_system_access='read_only',
memory_limit='100MB',
timeout=5 # seconds
)
try:
result = sandbox.execute(tool_name, arguments)
return self.validate_result(result)
except SandboxViolation as e:
log_security_incident(tool_name, arguments, e)
raise
def main():
"""Command-line interface."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--verbose", "-v", action="store_true", help="Verbose output")
args = parser.parse_args()
# TODO: Add main execution logic
pass
if __name__ == "__main__":
main()