Skip to main content

Permissions

The athena.core.permissions module provides a comprehensive permission system with three core concepts:
  1. Capability Tokens - Control which operations a caller can perform
  2. Sensitivity Labels - Classify data flowing through the system
  3. Secret Mode - Restrict all output to public-only data

Quick Start

from athena.core.permissions import (
    get_permissions,
    Permission,
    Sensitivity,
    Action
)

perms = get_permissions()
perms.check("smart_search")  # Raises PermissionDenied if blocked
perms.label("session_content")  # Returns Sensitivity.INTERNAL
perms.set_secret_mode(True)  # Activates demo/external mode

Permission Levels

Permission Enum

class Permission(str, Enum):
    READ = "read"          # Can query / read data
    WRITE = "write"        # Can modify session logs, checkpoints
    ADMIN = "admin"        # Can modify config, clear caches, manage sessions
    DANGEROUS = "dangerous" # Can delete data, run shell commands
Hierarchy:
DANGEROUS > ADMIN > WRITE > READ
Level Comparison:
_PERMISSION_LEVEL = {
    Permission.READ: 0,
    Permission.WRITE: 1,
    Permission.ADMIN: 2,
    Permission.DANGEROUS: 3,
}

Sensitivity Classification

Sensitivity Enum

class Sensitivity(str, Enum):
    PUBLIC = "public"      # Safe for external sharing, demos, GitHub
    INTERNAL = "internal"  # Normal operational data, session logs
    SECRET = "secret"      # API keys, credentials, personal finances, trading

Secret Patterns

Content is auto-classified as SECRET if it matches:
SECRET_PATTERNS = [
    "api_key", "api-key", "apikey",
    "secret_key", "secret-key",
    "password", "passwd",
    "SUPABASE_KEY", "GOOGLE_API_KEY", "ANTHROPIC_API_KEY",
    "private_key", "access_token", "bearer",
    "trading", "eurusd", "forex",
    "P&L", "profit_loss",
    "bank_account", "credit_card",
    ".env",
]

Internal Patterns

Content is auto-classified as INTERNAL if it matches:
INTERNAL_PATTERNS = [
    "session_log",
    "checkpoint",
    "canonical",
    "memory_bank",
    "decision_ledger",
    "user_profile",
    "userContext",
]

Tool Registry

Tools are registered with their required permission level and data sensitivity:
TOOL_REGISTRY: dict[str, dict[str, Any]] = {
    "smart_search": {
        "permission": Permission.READ,
        "sensitivity": Sensitivity.INTERNAL,
        "description": "Search knowledge base",
    },
    "quicksave": {
        "permission": Permission.WRITE,
        "sensitivity": Sensitivity.INTERNAL,
        "description": "Save checkpoint to session log",
    },
    "health_check": {
        "permission": Permission.READ,
        "sensitivity": Sensitivity.PUBLIC,
        "description": "System health audit",
    },
    "recall_session": {
        "permission": Permission.READ,
        "sensitivity": Sensitivity.INTERNAL,
        "description": "Read session log content",
    },
    "clear_cache": {
        "permission": Permission.ADMIN,
        "sensitivity": Sensitivity.INTERNAL,
        "description": "Clear search cache",
    },
    "update_canonical": {
        "permission": Permission.ADMIN,
        "sensitivity": Sensitivity.SECRET,
        "description": "Modify canonical memory",
    },
}

Permission Engine

get_permissions()

Returns the singleton PermissionEngine instance.
from athena.core.permissions import get_permissions

perms = get_permissions()

PermissionEngine Class

@dataclass
class PermissionEngine:
    # Current caller's maximum permission level
    caller_level: Permission = Permission.WRITE
    
    # Secret mode — when True, blocks access to INTERNAL and SECRET data
    secret_mode: bool = False
    
    # Audit log
    audit_log: list[dict] = field(default_factory=list)
    
    # State file for persistence
    _state_path: Path | None = None
    
    # Granular permission engine
    _granular: GranularPermissionEngine | None = None

Core API

check(tool_name)

Check if the caller has permission to execute a tool.
def check(self, tool_name: str) -> bool:
    """
    Check if the current caller has permission to execute a tool.
    Raises PermissionDenied if not.
    Returns True if allowed.
    """
Example:
from athena.core.permissions import get_permissions, PermissionDenied

perms = get_permissions()
try:
    perms.check("smart_search")  # OK if caller_level >= READ
    perms.check("clear_cache")   # Raises if caller_level < ADMIN
except PermissionDenied as e:
    print(f"Blocked: {e}")

check_sensitivity(tool_name)

Check if tool output is allowed under current sensitivity mode.
def check_sensitivity(self, tool_name: str) -> bool:
    """
    Check if tool output is allowed under current sensitivity mode.
    In secret_mode, only PUBLIC tools are allowed.
    Raises SecretModeViolation if blocked.
    """
Example:
perms.set_secret_mode(True)

try:
    perms.check_sensitivity("health_check")  # OK (PUBLIC)
    perms.check_sensitivity("smart_search")  # Raises (INTERNAL)
except SecretModeViolation as e:
    print(f"Blocked in demo mode: {e}")

gate(tool_name, input_str)

Combined gate checking permission, sensitivity, AND granular rules.
def gate(self, tool_name: str, input_str: str = "*") -> bool:
    """
    Combined gate — checks permission, sensitivity, AND granular rules.
    This is the main entry point for the MCP middleware.
    """
Example:
perms.gate("bash", "git status")  # OK
perms.gate("bash", "rm -rf /")    # Raises PermissionDenied
perms.gate("read", ".env")       # Raises PermissionDenied

label(content)

Auto-classify content sensitivity based on pattern matching.
def label(self, content: str) -> Sensitivity:
    """Auto-classify content sensitivity based on pattern matching."""
Example:
perms.label("ANTHROPIC_API_KEY=sk-...")  # Sensitivity.SECRET
perms.label("session_log content")       # Sensitivity.INTERNAL
perms.label("public documentation")      # Sensitivity.PUBLIC

redact(content)

Redact secret patterns from content.
def redact(self, content: str) -> str:
    """
    Redact secret patterns from content.
    Used when secret_mode is active but data must still flow.
    """
Example:
perms.set_secret_mode(True)
text = "API key: GOOGLE_API_KEY=abc123"
redacted = perms.redact(text)
print(redacted)  # "API key: [REDACTED]=abc123"

Mode Control

set_secret_mode(enabled)

Toggle secret/demo mode.
def set_secret_mode(self, enabled: bool) -> dict:
    """Toggle secret/demo mode."""
Example:
result = perms.set_secret_mode(True)
print(result)
Output:
{
    "secret_mode": True,
    "effect": "Only PUBLIC tools accessible",
    "blocked_tools": [
        "smart_search",
        "quicksave",
        "recall_session",
        "governance_status",
        "clear_cache",
        "update_canonical",
        "run_evaluator"
    ]
}

set_caller_level(level)

Set the caller’s permission level.
def set_caller_level(self, level: Permission) -> dict:
    """Set the caller's permission level."""
Example:
from athena.core.permissions import Permission

result = perms.set_caller_level(Permission.ADMIN)
print(result)
Output:
{
    "caller_level": "admin",
    "accessible_tools": [
        "smart_search",
        "quicksave",
        "health_check",
        "recall_session",
        "governance_status",
        "list_memory_paths",
        "clear_cache",
        "run_evaluator"
    ]
}

Granular Permissions

Action Enum

class Action(str, Enum):
    ALLOW = "allow"  # Run without approval
    ASK = "ask"      # Prompt for approval
    DENY = "deny"    # Block the action

GranularPermissionEngine

Glob-based permission engine with allow/ask/deny per tool. Origin: OpenCode (anomalyco/opencode, 109K stars)
Athena Integration: Feb 2026
class GranularPermissionEngine:
    """
    Glob-based permission engine with allow/ask/deny per tool.
    Rules are evaluated in order; last matching rule wins.
    """

Default Rules

DEFAULT_GRANULAR_RULES = [
    {"tool": "*", "pattern": "*", "action": "allow"},
    {"tool": "read", "pattern": "*.env", "action": "deny"},
    {"tool": "read", "pattern": "*.env.*", "action": "deny"},
    {"tool": "read", "pattern": "*.env.example", "action": "allow"},
    {"tool": "bash", "pattern": "rm *", "action": "deny"},
    {"tool": "bash", "pattern": "git *", "action": "allow"},
    {"tool": "doom_loop", "pattern": "*", "action": "ask"},
    {"tool": "external_directory", "pattern": "*", "action": "ask"},
]

GranularRule Matching

@dataclass
class GranularRule:
    tool: str      # Tool name or "*" for all
    pattern: str   # Glob pattern for tool input
    action: Action # allow, ask, or deny
    
    def matches(self, tool_name: str, input_str: str) -> bool:
        """Check if this rule matches the given tool and input."""
        tool_match = self.tool == "*" or fnmatch.fnmatch(tool_name, self.tool)
        pattern_match = fnmatch.fnmatch(input_str, self.pattern)
        return tool_match and pattern_match
Example:
rule = GranularRule(
    tool="bash",
    pattern="git *",
    action=Action.ALLOW
)

rule.matches("bash", "git status")  # True
rule.matches("bash", "rm file")     # False
rule.matches("read", "git status")  # False

Adding Custom Rules

perms = get_permissions()
perms._granular.add_rule(
    tool="bash",
    pattern="docker *",
    action=Action.ASK
)

Exceptions

PermissionDenied

class PermissionDenied(Exception):
    """Raised when a tool call lacks required capability."""
    
    def __init__(self, tool: str, required: Permission, granted: Permission):
        self.tool = tool
        self.required = required
        self.granted = granted
Example:
try:
    perms.check("clear_cache")
except PermissionDenied as e:
    print(f"Tool: {e.tool}")
    print(f"Required: {e.required}")
    print(f"Granted: {e.granted}")

SecretModeViolation

class SecretModeViolation(Exception):
    """Raised when secret data is accessed in public/demo mode."""
    
    def __init__(self, tool: str, data_sensitivity: Sensitivity):
        self.tool = tool
        self.data_sensitivity = data_sensitivity

Introspection

get_status()

Return current permission state.
status = perms.get_status()
print(status)
Output:
{
    "caller_level": "write",
    "secret_mode": False,
    "registered_tools": 18,
    "accessible_tools": [
        "smart_search",
        "quicksave",
        "health_check",
        "recall_session",
        "governance_status",
        "list_memory_paths"
    ],
    "blocked_tools": [
        "clear_cache",
        "update_canonical",
        "run_evaluator"
    ],
    "audit_entries": 42,
    "granular_rules": [
        {"tool": "*", "pattern": "*", "action": "allow"},
        {"tool": "read", "pattern": "*.env", "action": "deny"}
    ]
}

get_tool_manifest()

Return the full tool permission manifest.
manifest = perms.get_tool_manifest()
for tool in manifest:
    print(f"{tool['tool']}: {tool['permission']} / {tool['sensitivity']}")
Output:
[
    {
        "tool": "smart_search",
        "permission": "read",
        "sensitivity": "internal",
        "description": "Search knowledge base",
        "accessible": True
    },
    ...
]

Persistence

Permission state is persisted to disk:
_state_path = PROJECT_ROOT / ".agent" / "state" / "permissions.json"
State File Format:
{
  "secret_mode": false,
  "caller_level": "write",
  "last_updated": "2026-03-03T14:32:15.123456"
}
Granular Rules:
rules_path = PROJECT_ROOT / ".agent" / "state" / "permission_rules.json"

Audit Log

All permission checks are audited:
perms.audit_log
# [
#   {
#     "timestamp": "2026-03-03T14:32:15.123456",
#     "action": "check",
#     "target": "smart_search",
#     "required": "read",
#     "granted": "write",
#     "allowed": True
#   },
#   ...
# ]
Log Rotation:
  • Max entries: 1000
  • Trimmed to: 500 (when limit exceeded)

See Also

Build docs developers (and LLMs) love