Skip to content

Verification

Formal and functional verification utilities for SNN designs.

  • Temporal property checking: verify that SNN outputs satisfy temporal logic specifications
  • Equivalence checking: verify Python simulation matches Verilog RTL bit-for-bit
  • Coverage metrics: track which neuron states and transitions have been exercised

7 SymbiYosys formal verification scripts + 67 properties in hdl/formal/.

from sc_neurocore.verification import TemporalPropertyChecker

sc_neurocore.verification

sc_neurocore.verification -- Tier: research (experimental / research).

FormalVerifier

Interval arithmetic checker for stochastic probability bounds and energy safety constraints. Not an SMT solver.

Source code in src/sc_neurocore/verification/formal_proofs.py
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
class FormalVerifier:
    """
    Interval arithmetic checker for stochastic probability bounds and
    energy safety constraints. Not an SMT solver.
    """

    @staticmethod
    def verify_probability_bounds(input_interval: Interval, weight_interval: Interval) -> bool:
        """
        Prove that Output Probability is always in [0, 1].
        Logic: Out = Input * Weight (AND gate)
        """
        # Logic: P(A & B) = P(A) * P(B) assuming independence
        out = input_interval * weight_interval

        is_safe = out.min_val >= 0.0 and out.max_val <= 1.0
        logger.info(
            "Verification: Input %s * Weight %s -> Output %s", input_interval, weight_interval, out
        )
        logger.info("Property (0 <= p <= 1): %s", "HELD" if is_safe else "VIOLATED")
        return is_safe

    @staticmethod
    def verify_energy_safety(energy: float, cost: float) -> bool:
        """
        Prove that operation will not consume more energy than available.
        """
        # Symbolic check
        # Precondition: Energy >= Cost
        # Postcondition: NewEnergy >= 0
        if energy >= cost:
            new_e = energy - cost
            logger.info("Verification: %s - %s = %s >= 0. HELD.", energy, cost, new_e)
            return True
        else:
            logger.warning("Verification: %s < %s. VIOLATED (Halt).", energy, cost)
            return False

verify_probability_bounds(input_interval, weight_interval) staticmethod

Prove that Output Probability is always in [0, 1]. Logic: Out = Input * Weight (AND gate)

Source code in src/sc_neurocore/verification/formal_proofs.py
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
@staticmethod
def verify_probability_bounds(input_interval: Interval, weight_interval: Interval) -> bool:
    """
    Prove that Output Probability is always in [0, 1].
    Logic: Out = Input * Weight (AND gate)
    """
    # Logic: P(A & B) = P(A) * P(B) assuming independence
    out = input_interval * weight_interval

    is_safe = out.min_val >= 0.0 and out.max_val <= 1.0
    logger.info(
        "Verification: Input %s * Weight %s -> Output %s", input_interval, weight_interval, out
    )
    logger.info("Property (0 <= p <= 1): %s", "HELD" if is_safe else "VIOLATED")
    return is_safe

verify_energy_safety(energy, cost) staticmethod

Prove that operation will not consume more energy than available.

Source code in src/sc_neurocore/verification/formal_proofs.py
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
@staticmethod
def verify_energy_safety(energy: float, cost: float) -> bool:
    """
    Prove that operation will not consume more energy than available.
    """
    # Symbolic check
    # Precondition: Energy >= Cost
    # Postcondition: NewEnergy >= 0
    if energy >= cost:
        new_e = energy - cost
        logger.info("Verification: %s - %s = %s >= 0. HELD.", energy, cost, new_e)
        return True
    else:
        logger.warning("Verification: %s < %s. VIOLATED (Halt).", energy, cost)
        return False

CodeSafetyVerifier dataclass

AST blocklist screen for auto-generated code.

Walks the AST and rejects code containing known-dangerous patterns: filesystem mutation, process spawning, network access, code execution, and unrestricted imports.

Limitations: this is a static blocklist, not a sandbox. It catches common dangerous patterns but cannot prevent all malicious code. Obfuscated calls (getattr chains, importlib indirection) may bypass it. Do not use as a security boundary without additional sandboxing.

Source code in src/sc_neurocore/verification/safety.py
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
@dataclass
class CodeSafetyVerifier:
    """AST blocklist screen for auto-generated code.

    Walks the AST and rejects code containing known-dangerous patterns:
    filesystem mutation, process spawning, network access, code execution,
    and unrestricted imports.

    Limitations: this is a static blocklist, not a sandbox. It catches
    common dangerous patterns but cannot prevent all malicious code.
    Obfuscated calls (getattr chains, importlib indirection) may bypass it.
    Do not use as a security boundary without additional sandboxing.
    """

    _BLOCKED_ATTRS = frozenset(
        {
            # Process
            "system",
            "popen",
            "spawn",
            "spawnl",
            "spawnle",
            "kill",
            "fork",
            # Filesystem mutation
            "rmtree",
            "unlink",
            "remove",
            "rmdir",
            "rename",
            "truncate",
            "makedirs",
            # Network
            "urlopen",
            "urlretrieve",
            # Subprocess
            "Popen",
            "call",
            "check_call",
            "check_output",
            "run",
            # Reflection (write)
            "setattr",
            "delattr",
        }
    )

    _BLOCKED_BUILTINS = frozenset(
        {
            "exec",
            "eval",
            "compile",
            "__import__",
            "breakpoint",
        }
    )

    _BLOCKED_IMPORTS = frozenset(
        {
            "subprocess",
            "shutil",
            "socket",
            "http",
            "urllib",
            "requests",
            "importlib",
            "ctypes",
            "signal",
            "multiprocessing",
        }
    )

    def verify_code_safety(self, source_code: str) -> bool:
        """Static analysis of source code for dangerous patterns.

        Returns True if no blocked patterns found, False otherwise.
        """
        try:
            tree = ast.parse(source_code)
        except SyntaxError:
            logger.error("Safety Violation: Syntax Error in generated code.")
            return False

        for node in ast.walk(tree):
            if isinstance(node, ast.Call):
                if isinstance(node.func, ast.Attribute):
                    if node.func.attr in self._BLOCKED_ATTRS:
                        logger.warning(
                            "Safety Violation: blocked call '%s'.",
                            node.func.attr,
                        )
                        return False
                elif isinstance(node.func, ast.Name):
                    if node.func.id in self._BLOCKED_BUILTINS:
                        logger.warning(
                            "Safety Violation: blocked builtin '%s'.",
                            node.func.id,
                        )
                        return False

            if isinstance(node, (ast.Import, ast.ImportFrom)):
                names = []
                if isinstance(node, ast.Import):
                    names = [alias.name.split(".")[0] for alias in node.names]
                elif node.module:
                    names = [node.module.split(".")[0]]
                for name in names:
                    if name in self._BLOCKED_IMPORTS:
                        logger.warning(
                            "Safety Violation: blocked import '%s'.",
                            name,
                        )
                        return False

        return True

    def verify_logic_invariant(self, func, input_sample, expected_condition) -> None:
        """Dynamic verification: run func and check output against condition."""
        try:
            res = func(input_sample)
            if expected_condition(res):
                return True
            else:
                logger.error(
                    "Safety Violation: Logic invariant failed. Output %s invalid.",
                    res,
                )
                return False
        except Exception as e:
            logger.error("Safety Violation: Runtime Error - %s", e)
            return False

verify_code_safety(source_code)

Static analysis of source code for dangerous patterns.

Returns True if no blocked patterns found, False otherwise.

Source code in src/sc_neurocore/verification/safety.py
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
def verify_code_safety(self, source_code: str) -> bool:
    """Static analysis of source code for dangerous patterns.

    Returns True if no blocked patterns found, False otherwise.
    """
    try:
        tree = ast.parse(source_code)
    except SyntaxError:
        logger.error("Safety Violation: Syntax Error in generated code.")
        return False

    for node in ast.walk(tree):
        if isinstance(node, ast.Call):
            if isinstance(node.func, ast.Attribute):
                if node.func.attr in self._BLOCKED_ATTRS:
                    logger.warning(
                        "Safety Violation: blocked call '%s'.",
                        node.func.attr,
                    )
                    return False
            elif isinstance(node.func, ast.Name):
                if node.func.id in self._BLOCKED_BUILTINS:
                    logger.warning(
                        "Safety Violation: blocked builtin '%s'.",
                        node.func.id,
                    )
                    return False

        if isinstance(node, (ast.Import, ast.ImportFrom)):
            names = []
            if isinstance(node, ast.Import):
                names = [alias.name.split(".")[0] for alias in node.names]
            elif node.module:
                names = [node.module.split(".")[0]]
            for name in names:
                if name in self._BLOCKED_IMPORTS:
                    logger.warning(
                        "Safety Violation: blocked import '%s'.",
                        name,
                    )
                    return False

    return True

verify_logic_invariant(func, input_sample, expected_condition)

Dynamic verification: run func and check output against condition.

Source code in src/sc_neurocore/verification/safety.py
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
def verify_logic_invariant(self, func, input_sample, expected_condition) -> None:
    """Dynamic verification: run func and check output against condition."""
    try:
        res = func(input_sample)
        if expected_condition(res):
            return True
        else:
            logger.error(
                "Safety Violation: Logic invariant failed. Output %s invalid.",
                res,
            )
            return False
    except Exception as e:
        logger.error("Safety Violation: Runtime Error - %s", e)
        return False