Skip to content

Neuro-Symbolic (Predictive Coding)

Predictive coding primitives with hyperdimensional symbol binding. Formally verifiable inference over SC bitstreams.

Quick Start

Python
from sc_neurocore.neuro_symbolic import (
    NeuroSymbolicPredictiveAgent,
    PredictiveAgentConfig,
)

agent = NeuroSymbolicPredictiveAgent(
    PredictiveAgentConfig(
        input_dim=4,
        hidden_dim=2,
        symbols=("left", "right", "rest"),
    )
)
result = agent.observe([0.25, -0.2, 0.1, -0.1], top_k=2)
print(result.signature.popcount)

High-Level Agent

sc_neurocore.neuro_symbolic.agent

High-level neuro-symbolic predictive-coding agent API.

PredictiveAgentConfig dataclass

Configuration for a hybrid symbolic-spiking predictive agent.

Source code in src/sc_neurocore/neuro_symbolic/agent.py
Python
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
@dataclass(frozen=True)
class PredictiveAgentConfig:
    """Configuration for a hybrid symbolic-spiking predictive agent."""

    input_dim: int
    hidden_dim: int
    symbols: tuple[str, ...] = ()
    lr: float = 0.01
    precision: float = 1.0
    seed: int = 0
    symbol_seed: int = 42

    def __post_init__(self) -> None:
        if self.input_dim <= 0:
            raise ValueError("input_dim must be positive")
        if self.hidden_dim <= 0:
            raise ValueError("hidden_dim must be positive")
        if self.lr <= 0:
            raise ValueError("lr must be positive")
        if self.precision <= 0:
            raise ValueError("precision must be positive")

SCErrorSignature dataclass

SC-domain prediction-error signature.

xor_bits is the stochastic-computing error carrier. popcount is the integer error magnitude used by hardware-friendly decision logic.

Source code in src/sc_neurocore/neuro_symbolic/agent.py
Python
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
@dataclass(frozen=True)
class SCErrorSignature:
    """SC-domain prediction-error signature.

    `xor_bits` is the stochastic-computing error carrier. `popcount`
    is the integer error magnitude used by hardware-friendly decision
    logic.
    """

    xor_bits: tuple[int, ...]
    popcount: int
    normalised_popcount: float
    mean_abs_error: float

    def to_dict(self) -> dict[str, Any]:
        """Return a JSON-compatible representation."""

        return {
            "xor_bits": list(self.xor_bits),
            "popcount": self.popcount,
            "normalised_popcount": self.normalised_popcount,
            "mean_abs_error": self.mean_abs_error,
        }

to_dict()

Return a JSON-compatible representation.

Source code in src/sc_neurocore/neuro_symbolic/agent.py
Python
63
64
65
66
67
68
69
70
71
def to_dict(self) -> dict[str, Any]:
    """Return a JSON-compatible representation."""

    return {
        "xor_bits": list(self.xor_bits),
        "popcount": self.popcount,
        "normalised_popcount": self.normalised_popcount,
        "mean_abs_error": self.mean_abs_error,
    }

HybridInferenceResult dataclass

Result of one high-level neuro-symbolic predictive pass.

Source code in src/sc_neurocore/neuro_symbolic/agent.py
Python
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
@dataclass(frozen=True)
class HybridInferenceResult:
    """Result of one high-level neuro-symbolic predictive pass."""

    prediction: np.ndarray[Any, Any]
    error: np.ndarray[Any, Any]
    signature: SCErrorSignature
    symbol_scores: tuple[tuple[str, float], ...]
    trace: ReasoningTrace
    learned_error: float | None = None

    def to_dict(self) -> dict[str, Any]:
        """Return a compact JSON-compatible summary."""

        return {
            "error": self.error.tolist(),
            "learned_error": self.learned_error,
            "prediction": self.prediction.tolist(),
            "signature": self.signature.to_dict(),
            "symbol_scores": [
                {"symbol": symbol, "score": score} for symbol, score in self.symbol_scores
            ],
            "trace": self.trace.to_dict(),
        }

to_dict()

Return a compact JSON-compatible summary.

Source code in src/sc_neurocore/neuro_symbolic/agent.py
Python
85
86
87
88
89
90
91
92
93
94
95
96
97
def to_dict(self) -> dict[str, Any]:
    """Return a compact JSON-compatible summary."""

    return {
        "error": self.error.tolist(),
        "learned_error": self.learned_error,
        "prediction": self.prediction.tolist(),
        "signature": self.signature.to_dict(),
        "symbol_scores": [
            {"symbol": symbol, "score": score} for symbol, score in self.symbol_scores
        ],
        "trace": self.trace.to_dict(),
    }

NeuroSymbolicPredictiveAgent

Hybrid predictive-coding agent for symbolic-spiking workflows.

Source code in src/sc_neurocore/neuro_symbolic/agent.py
Python
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
class NeuroSymbolicPredictiveAgent:
    """Hybrid predictive-coding agent for symbolic-spiking workflows."""

    def __init__(self, config: PredictiveAgentConfig):
        self.config = config
        self.encoder = SymbolEncoder(base_seed=config.symbol_seed)
        self.layer = PredictiveCodingLayer(
            input_dim=config.input_dim,
            hidden_dim=config.hidden_dim,
            lr=config.lr,
            precision=config.precision,
            seed=config.seed,
        )
        self.inference = VerifiableInference(self.encoder, self.layer)
        self.inference.register_symbols(config.symbols)

    @property
    def num_symbols(self) -> int:
        """Number of registered symbolic labels."""

        return self.inference.num_symbols

    def register_symbols(self, symbols: Sequence[str]) -> None:
        """Register additional symbolic labels."""

        self.inference.register_symbols(tuple(symbols))

    def observe(
        self,
        observation: np.ndarray[Any, Any] | Sequence[float],
        *,
        top_k: int = 1,
        learn: bool = False,
    ) -> HybridInferenceResult:
        """Run one predictive-symbolic observation pass."""

        if top_k <= 0:
            raise ValueError("top_k must be positive")
        obs = self._validate_observation(observation)
        prediction = self.layer.predict()
        error = self.config.precision * (obs - prediction)
        signature = build_sc_error_signature(obs, prediction)
        scores, trace = self.inference.infer(obs, top_k=top_k)
        learned_error = self.layer.update(obs) if learn else None
        return HybridInferenceResult(
            prediction=prediction,
            error=error,
            signature=signature,
            symbol_scores=tuple((name, float(score)) for name, score in scores),
            trace=trace,
            learned_error=learned_error,
        )

    def _validate_observation(
        self, observation: np.ndarray[Any, Any] | Sequence[float]
    ) -> np.ndarray[Any, Any]:
        obs = np.asarray(observation, dtype=np.float32)
        if obs.ndim != 1:
            raise ValueError("observation must be one-dimensional")
        if obs.shape[0] != self.config.input_dim:
            raise ValueError(
                f"observation has {obs.shape[0]} elements, expected {self.config.input_dim}"
            )
        return obs

num_symbols property

Number of registered symbolic labels.

register_symbols(symbols)

Register additional symbolic labels.

Source code in src/sc_neurocore/neuro_symbolic/agent.py
Python
122
123
124
125
def register_symbols(self, symbols: Sequence[str]) -> None:
    """Register additional symbolic labels."""

    self.inference.register_symbols(tuple(symbols))

observe(observation, *, top_k=1, learn=False)

Run one predictive-symbolic observation pass.

Source code in src/sc_neurocore/neuro_symbolic/agent.py
Python
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
def observe(
    self,
    observation: np.ndarray[Any, Any] | Sequence[float],
    *,
    top_k: int = 1,
    learn: bool = False,
) -> HybridInferenceResult:
    """Run one predictive-symbolic observation pass."""

    if top_k <= 0:
        raise ValueError("top_k must be positive")
    obs = self._validate_observation(observation)
    prediction = self.layer.predict()
    error = self.config.precision * (obs - prediction)
    signature = build_sc_error_signature(obs, prediction)
    scores, trace = self.inference.infer(obs, top_k=top_k)
    learned_error = self.layer.update(obs) if learn else None
    return HybridInferenceResult(
        prediction=prediction,
        error=error,
        signature=signature,
        symbol_scores=tuple((name, float(score)) for name, score in scores),
        trace=trace,
        learned_error=learned_error,
    )

build_sc_error_signature(observation, prediction)

Build an XOR/popcount error signature from observation and prediction.

Source code in src/sc_neurocore/neuro_symbolic/agent.py
Python
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
def build_sc_error_signature(
    observation: np.ndarray[Any, Any] | Sequence[float],
    prediction: np.ndarray[Any, Any] | Sequence[float],
) -> SCErrorSignature:
    """Build an XOR/popcount error signature from observation and prediction."""

    obs = np.asarray(observation, dtype=np.float32)
    pred = np.asarray(prediction, dtype=np.float32)
    if obs.shape != pred.shape:
        raise ValueError("observation and prediction must have matching shapes")
    if obs.ndim != 1:
        raise ValueError("observation and prediction must be one-dimensional")

    obs_bits = obs >= 0.0
    pred_bits = pred >= 0.0
    xor = np.logical_xor(obs_bits, pred_bits).astype(np.uint8)
    popcount = int(xor.sum())
    length = int(xor.shape[0])
    return SCErrorSignature(
        xor_bits=tuple(int(bit) for bit in xor.tolist()),
        popcount=popcount,
        normalised_popcount=popcount / max(length, 1),
        mean_abs_error=float(np.mean(np.abs(obs - pred))) if length else 0.0,
    )

The high-level agent keeps the existing predictive-coding and hyperdimensional-symbol implementation as the underlying engine. Its SC-facing contract is explicit:

  • prediction error is encoded as xor_bits;
  • integer error magnitude is popcount;
  • normalised_popcount is the hardware-friendly magnitude proxy;
  • optional learn=True applies one predictive-coding update after inference.

Self-Verification Trace

The self-verification layer turns a neuro-symbolic inference result into checked obligations rather than a narrative explanation:

Python
from sc_neurocore.neuro_symbolic import build_self_verification_trace

observation = [0.25, -0.2, 0.1, -0.1]
result = agent.observe(observation, top_k=2)
verification = build_self_verification_trace(result, observation=observation)
assert verification.passed
print(verification.result_digest)

The trace checks prediction/error consistency, SC XOR/popcount consistency, reasoning-trace bounds, confidence/similarity ranges, sorted symbolic scores, and emits a stable SHA-256 digest for audit logs.

sc_neurocore.neuro_symbolic.self_verification

Checked self-verification traces for neuro-symbolic inference results.

VerificationStatus

Bases: Enum

Status of one self-verification obligation.

Source code in src/sc_neurocore/neuro_symbolic/self_verification.py
Python
28
29
30
31
32
33
class VerificationStatus(Enum):
    """Status of one self-verification obligation."""

    # Verification outcome label, not a credential.
    PASS = "pass"  # nosec B105
    FAIL = "fail"

VerificationObligation dataclass

One checked condition in a self-verification trace.

Source code in src/sc_neurocore/neuro_symbolic/self_verification.py
Python
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
@dataclass(frozen=True)
class VerificationObligation:
    """One checked condition in a self-verification trace."""

    name: str
    status: VerificationStatus
    evidence: dict[str, Any]

    def to_dict(self) -> dict[str, Any]:
        """Return a JSON-ready obligation."""
        return {
            "name": self.name,
            "status": self.status.value,
            "evidence": self.evidence,
        }

to_dict()

Return a JSON-ready obligation.

Source code in src/sc_neurocore/neuro_symbolic/self_verification.py
Python
44
45
46
47
48
49
50
def to_dict(self) -> dict[str, Any]:
    """Return a JSON-ready obligation."""
    return {
        "name": self.name,
        "status": self.status.value,
        "evidence": self.evidence,
    }

NeuroSymbolicSelfVerificationTrace dataclass

Machine-checkable summary of a neuro-symbolic inference result.

Source code in src/sc_neurocore/neuro_symbolic/self_verification.py
Python
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
@dataclass(frozen=True)
class NeuroSymbolicSelfVerificationTrace:
    """Machine-checkable summary of a neuro-symbolic inference result."""

    schema_version: str
    result_digest: str
    obligations: tuple[VerificationObligation, ...]
    reasoning_steps: int
    top_symbols: tuple[str, ...]
    sc_popcount: int
    sc_normalised_popcount: float

    @property
    def passed(self) -> bool:
        """Whether every obligation passed."""
        return all(item.status == VerificationStatus.PASS for item in self.obligations)

    @property
    def failed_obligations(self) -> tuple[str, ...]:
        """Names of failed obligations."""
        return tuple(
            item.name for item in self.obligations if item.status == VerificationStatus.FAIL
        )

    def to_dict(self) -> dict[str, Any]:
        """Return a JSON-ready trace."""
        return {
            "schema_version": self.schema_version,
            "result_digest": self.result_digest,
            "passed": self.passed,
            "failed_obligations": list(self.failed_obligations),
            "reasoning_steps": self.reasoning_steps,
            "top_symbols": list(self.top_symbols),
            "sc_popcount": self.sc_popcount,
            "sc_normalised_popcount": self.sc_normalised_popcount,
            "obligations": [item.to_dict() for item in self.obligations],
        }

passed property

Whether every obligation passed.

failed_obligations property

Names of failed obligations.

to_dict()

Return a JSON-ready trace.

Source code in src/sc_neurocore/neuro_symbolic/self_verification.py
Python
77
78
79
80
81
82
83
84
85
86
87
88
89
def to_dict(self) -> dict[str, Any]:
    """Return a JSON-ready trace."""
    return {
        "schema_version": self.schema_version,
        "result_digest": self.result_digest,
        "passed": self.passed,
        "failed_obligations": list(self.failed_obligations),
        "reasoning_steps": self.reasoning_steps,
        "top_symbols": list(self.top_symbols),
        "sc_popcount": self.sc_popcount,
        "sc_normalised_popcount": self.sc_normalised_popcount,
        "obligations": [item.to_dict() for item in self.obligations],
    }

NeuroSymbolicSelfVerifier

Build checked self-verification traces for inference outputs.

Source code in src/sc_neurocore/neuro_symbolic/self_verification.py
Python
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
class NeuroSymbolicSelfVerifier:
    """Build checked self-verification traces for inference outputs."""

    def verify_result(
        self,
        result: HybridInferenceResult,
        *,
        observation: np.ndarray[Any, Any] | Sequence[float],
    ) -> NeuroSymbolicSelfVerificationTrace:
        """Verify a high-level hybrid inference result against its observation."""
        obs = self._validate_vector(observation, "observation")
        prediction = self._validate_vector(result.prediction, "prediction")
        error = self._validate_vector(result.error, "error")
        obligations = (
            self._check_shape("prediction_shape", prediction, obs),
            self._check_shape("error_shape", error, obs),
            self._check_prediction_error(obs, prediction, error),
            self._check_signature(obs, prediction, result.signature),
            self._check_reasoning_trace(result.trace),
            self._check_symbol_scores(result.symbol_scores),
        )
        return self._build_trace(result, obligations)

    def verify_trace_only(
        self,
        trace: ReasoningTrace,
        *,
        symbol_scores: Sequence[tuple[str, float]] = (),
        signature: SCErrorSignature | None = None,
    ) -> NeuroSymbolicSelfVerificationTrace:
        """Verify a trace when only symbolic evidence is available."""
        obligations = (
            self._check_reasoning_trace(trace),
            self._check_symbol_scores(tuple(symbol_scores)),
        )
        payload = {
            "trace": trace.to_dict(),
            "symbol_scores": [(name, float(score)) for name, score in symbol_scores],
            "signature": signature.to_dict() if signature is not None else None,
        }
        digest = _stable_digest(payload)
        return NeuroSymbolicSelfVerificationTrace(
            schema_version=SCHEMA_VERSION,
            result_digest=digest,
            obligations=obligations,
            reasoning_steps=trace.length,
            top_symbols=tuple(name for name, _score in symbol_scores),
            sc_popcount=signature.popcount if signature is not None else 0,
            sc_normalised_popcount=signature.normalised_popcount if signature is not None else 0.0,
        )

    @staticmethod
    def _validate_vector(values: np.ndarray[Any, Any] | Sequence[float], name: str) -> np.ndarray:
        arr = np.asarray(values, dtype=np.float64)
        if arr.ndim != 1:
            raise ValueError(f"{name} must be one-dimensional")
        if not np.all(np.isfinite(arr)):
            raise ValueError(f"{name} must contain only finite values")
        return arr

    @staticmethod
    def _check_shape(name: str, lhs: np.ndarray, rhs: np.ndarray) -> VerificationObligation:
        status = VerificationStatus.PASS if lhs.shape == rhs.shape else VerificationStatus.FAIL
        return VerificationObligation(
            name=name,
            status=status,
            evidence={"lhs_shape": list(lhs.shape), "rhs_shape": list(rhs.shape)},
        )

    @staticmethod
    def _check_prediction_error(
        observation: np.ndarray,
        prediction: np.ndarray,
        error: np.ndarray,
    ) -> VerificationObligation:
        expected = observation - prediction
        residual = float(np.max(np.abs(expected - error))) if expected.size else 0.0
        status = VerificationStatus.PASS if residual <= 1e-6 else VerificationStatus.FAIL
        return VerificationObligation(
            name="prediction_error_consistency",
            status=status,
            evidence={"max_abs_residual": residual, "tolerance": 1e-6},
        )

    @staticmethod
    def _check_signature(
        observation: np.ndarray,
        prediction: np.ndarray,
        signature: SCErrorSignature,
    ) -> VerificationObligation:
        expected_bits = np.logical_xor(observation >= 0.0, prediction >= 0.0).astype(np.uint8)
        expected_tuple = tuple(int(bit) for bit in expected_bits.tolist())
        expected_popcount = int(expected_bits.sum())
        expected_normalised = expected_popcount / max(int(expected_bits.shape[0]), 1)
        valid = (
            signature.xor_bits == expected_tuple
            and signature.popcount == expected_popcount
            and abs(signature.normalised_popcount - expected_normalised) <= 1e-12
            and 0.0 <= signature.normalised_popcount <= 1.0
        )
        return VerificationObligation(
            name="sc_signature_consistency",
            status=VerificationStatus.PASS if valid else VerificationStatus.FAIL,
            evidence={
                "expected_popcount": expected_popcount,
                "actual_popcount": signature.popcount,
                "expected_xor_bits": list(expected_tuple),
                "actual_xor_bits": list(signature.xor_bits),
                "actual_normalised_popcount": signature.normalised_popcount,
            },
        )

    @staticmethod
    def _check_reasoning_trace(trace: ReasoningTrace) -> VerificationObligation:
        confidence_ok = all(0.0 <= step.confidence <= 1.0 for step in trace.steps)
        similarity_ok = all(-1.0 <= step.similarity <= 1.0 for step in trace.steps)
        timestamps_ok = all(step.timestamp_ns >= trace.start_ns for step in trace.steps)
        complete = trace.is_complete
        status = (
            VerificationStatus.PASS
            if confidence_ok and similarity_ok and timestamps_ok and complete
            else VerificationStatus.FAIL
        )
        return VerificationObligation(
            name="reasoning_trace_bounds",
            status=status,
            evidence={
                "complete": complete,
                "length": trace.length,
                "confidence_bounds": confidence_ok,
                "similarity_bounds": similarity_ok,
                "timestamps_after_start": timestamps_ok,
                "mean_confidence": trace.mean_confidence,
            },
        )

    @staticmethod
    def _check_symbol_scores(
        symbol_scores: Sequence[tuple[str, float]],
    ) -> VerificationObligation:
        scores = [float(score) for _name, score in symbol_scores]
        symbols = [name for name, _score in symbol_scores]
        finite = all(np.isfinite(score) for score in scores)
        bounded = all(-1.0 <= score <= 1.0 for score in scores)
        sorted_desc = all(scores[idx] >= scores[idx + 1] for idx in range(len(scores) - 1))
        unique_symbols = len(symbols) == len(set(symbols))
        status = (
            VerificationStatus.PASS
            if finite and bounded and sorted_desc and unique_symbols
            else VerificationStatus.FAIL
        )
        return VerificationObligation(
            name="symbol_score_ordering",
            status=status,
            evidence={
                "scores": scores,
                "finite": finite,
                "bounded": bounded,
                "sorted_descending": sorted_desc,
                "unique_symbols": unique_symbols,
            },
        )

    @staticmethod
    def _build_trace(
        result: HybridInferenceResult,
        obligations: tuple[VerificationObligation, ...],
    ) -> NeuroSymbolicSelfVerificationTrace:
        payload = result.to_dict()
        digest = _stable_digest(payload)
        return NeuroSymbolicSelfVerificationTrace(
            schema_version=SCHEMA_VERSION,
            result_digest=digest,
            obligations=obligations,
            reasoning_steps=result.trace.length,
            top_symbols=tuple(name for name, _score in result.symbol_scores),
            sc_popcount=result.signature.popcount,
            sc_normalised_popcount=result.signature.normalised_popcount,
        )

verify_result(result, *, observation)

Verify a high-level hybrid inference result against its observation.

Source code in src/sc_neurocore/neuro_symbolic/self_verification.py
Python
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
def verify_result(
    self,
    result: HybridInferenceResult,
    *,
    observation: np.ndarray[Any, Any] | Sequence[float],
) -> NeuroSymbolicSelfVerificationTrace:
    """Verify a high-level hybrid inference result against its observation."""
    obs = self._validate_vector(observation, "observation")
    prediction = self._validate_vector(result.prediction, "prediction")
    error = self._validate_vector(result.error, "error")
    obligations = (
        self._check_shape("prediction_shape", prediction, obs),
        self._check_shape("error_shape", error, obs),
        self._check_prediction_error(obs, prediction, error),
        self._check_signature(obs, prediction, result.signature),
        self._check_reasoning_trace(result.trace),
        self._check_symbol_scores(result.symbol_scores),
    )
    return self._build_trace(result, obligations)

verify_trace_only(trace, *, symbol_scores=(), signature=None)

Verify a trace when only symbolic evidence is available.

Source code in src/sc_neurocore/neuro_symbolic/self_verification.py
Python
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
def verify_trace_only(
    self,
    trace: ReasoningTrace,
    *,
    symbol_scores: Sequence[tuple[str, float]] = (),
    signature: SCErrorSignature | None = None,
) -> NeuroSymbolicSelfVerificationTrace:
    """Verify a trace when only symbolic evidence is available."""
    obligations = (
        self._check_reasoning_trace(trace),
        self._check_symbol_scores(tuple(symbol_scores)),
    )
    payload = {
        "trace": trace.to_dict(),
        "symbol_scores": [(name, float(score)) for name, score in symbol_scores],
        "signature": signature.to_dict() if signature is not None else None,
    }
    digest = _stable_digest(payload)
    return NeuroSymbolicSelfVerificationTrace(
        schema_version=SCHEMA_VERSION,
        result_digest=digest,
        obligations=obligations,
        reasoning_steps=trace.length,
        top_symbols=tuple(name for name, _score in symbol_scores),
        sc_popcount=signature.popcount if signature is not None else 0,
        sc_normalised_popcount=signature.normalised_popcount if signature is not None else 0.0,
    )

build_self_verification_trace(result, *, observation)

Convenience wrapper for high-level neuro-symbolic inference results.

Source code in src/sc_neurocore/neuro_symbolic/self_verification.py
Python
273
274
275
276
277
278
279
def build_self_verification_trace(
    result: HybridInferenceResult,
    *,
    observation: np.ndarray[Any, Any] | Sequence[float],
) -> NeuroSymbolicSelfVerificationTrace:
    """Convenience wrapper for high-level neuro-symbolic inference results."""
    return NeuroSymbolicSelfVerifier().verify_result(result, observation=observation)

Low-Level Primitives

sc_neurocore.neuro_symbolic.predictive_coding

Neuro-symbolic predictive coding primitives for SC-domain inference.

Implements a hierarchical predictive coding architecture where each layer maintains a generative model: top-down predictions are compared against bottom-up observations, and the resulting prediction errors drive learning and symbolic reasoning traces.

The HDC/VSA operations mirror the Rust neuro_symbolic crate's Hypervector type (XOR bind, cyclic permute, majority-vote bundle, normalised Hamming distance), enabling a pure-Python fallback when the FFI shared library is unavailable.

References

  • Rao & Ballard, "Predictive coding in the visual cortex", Nature Neuroscience 2(1), 1999.
  • Kanerva, "Hyperdimensional Computing", Cognitive Computation 1(2), 2009.

BindOp

Bases: Enum

Supported HDC binding operations.

Source code in src/sc_neurocore/neuro_symbolic/predictive_coding.py
Python
43
44
45
46
47
class BindOp(Enum):
    """Supported HDC binding operations."""

    XOR = "xor"
    MULTIPLY = "multiply"

ReasoningStep dataclass

Single step in a symbolic reasoning trace.

Source code in src/sc_neurocore/neuro_symbolic/predictive_coding.py
Python
50
51
52
53
54
55
56
57
58
@dataclass
class ReasoningStep:
    """Single step in a symbolic reasoning trace."""

    symbol: str
    operation: str
    similarity: float
    confidence: float
    timestamp_ns: int = 0

ReasoningTrace dataclass

Captures a symbolic reasoning chain for audit and formal verification.

Each step records the symbol query, the operation applied, the similarity score to the best match, and a confidence metric derived from the Hamming margin between the best and second-best candidates.

Source code in src/sc_neurocore/neuro_symbolic/predictive_coding.py
Python
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
@dataclass
class ReasoningTrace:
    """Captures a symbolic reasoning chain for audit and formal verification.

    Each step records the symbol query, the operation applied, the
    similarity score to the best match, and a confidence metric derived
    from the Hamming margin between the best and second-best candidates.
    """

    steps: List[ReasoningStep] = field(default_factory=list)
    start_ns: int = 0
    end_ns: int = 0

    def add(
        self,
        symbol: str,
        operation: str,
        similarity: float,
        confidence: float,
    ) -> None:
        self.steps.append(
            ReasoningStep(
                symbol=symbol,
                operation=operation,
                similarity=similarity,
                confidence=confidence,
                timestamp_ns=time.perf_counter_ns(),
            )
        )

    @property
    def length(self) -> int:
        return len(self.steps)

    @property
    def mean_confidence(self) -> float:
        if not self.steps:
            return 0.0
        return float(np.mean([s.confidence for s in self.steps]))

    @property
    def is_complete(self) -> bool:
        return self.end_ns > 0 and self.length > 0

    def finalize(self) -> None:
        self.end_ns = time.perf_counter_ns()

    def to_dict(self) -> Dict[str, Any]:
        return {
            "steps": [
                {
                    "symbol": s.symbol,
                    "operation": s.operation,
                    "similarity": s.similarity,
                    "confidence": s.confidence,
                }
                for s in self.steps
            ],
            "length": self.length,
            "mean_confidence": self.mean_confidence,
            "complete": self.is_complete,
        }

Hypervector

Packed binary hypervector (pure-Python mirror of the Rust Hypervector).

Uses np.uint64 packed bitstream layout compatible with the neuro_symbolic crate's Vec<u64> representation.

Source code in src/sc_neurocore/neuro_symbolic/predictive_coding.py
Python
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
class Hypervector:
    """Packed binary hypervector (pure-Python mirror of the Rust Hypervector).

    Uses ``np.uint64`` packed bitstream layout compatible with the
    ``neuro_symbolic`` crate's ``Vec<u64>`` representation.
    """

    __slots__ = ("data", "length")

    def __init__(self, data: np.ndarray, length: int):
        self.data = data
        self.length = length

    @classmethod
    def zeros(cls, dim: int = HYPERVECTOR_DIM) -> Hypervector:
        words = math.ceil(dim / 64)
        return cls(np.zeros(words, dtype=np.uint64), dim)

    @classmethod
    def random(cls, seed: int, dim: int = HYPERVECTOR_DIM) -> Hypervector:
        words = math.ceil(dim / 64)
        rng = np.random.default_rng(seed)
        data = rng.integers(0, np.iinfo(np.uint64).max, size=words, dtype=np.uint64)
        trailing = dim % 64
        if trailing > 0:
            data[-1] &= np.uint64((1 << trailing) - 1)
        return cls(data, dim)

    def bind(self, other: Hypervector) -> Hypervector:
        """XOR binding (self-inverse, dimension-preserving)."""
        return Hypervector(np.bitwise_xor(self.data, other.data), self.length)

    def permute(self, shift: int) -> Hypervector:
        """Cyclic right rotation by *shift* bits."""
        if self.length == 0 or shift % self.length == 0:
            return Hypervector(self.data.copy(), self.length)
        bits = _unpack(self)
        effective = shift % self.length
        bits = np.roll(bits, effective)
        return _pack(bits, self.length)

    def hamming_distance(self, other: Hypervector) -> float:
        """Normalised Hamming distance (0.0 = identical, 1.0 = opposite)."""
        xor = np.bitwise_xor(self.data, other.data)
        total = sum(bin(int(w)).count("1") for w in xor)
        return total / self.length

    def similarity(self, other: Hypervector) -> float:
        """Cosine-like similarity: 1 − 2·hamming."""
        return 1.0 - 2.0 * self.hamming_distance(other)

    def popcount(self) -> int:
        return sum(bin(int(w)).count("1") for w in self.data)

    def density(self) -> float:
        return self.popcount() / self.length if self.length else 0.0

    @staticmethod
    def threshold_bundle(vectors: Sequence[Hypervector]) -> Hypervector:
        """Majority-vote bundle across N vectors."""
        n = len(vectors)
        if n == 0:
            raise ValueError("cannot bundle zero vectors")
        if n == 1:
            return Hypervector(vectors[0].data.copy(), vectors[0].length)
        length = vectors[0].length
        bits_list = [_unpack(v) for v in vectors]
        counts = np.zeros(length, dtype=np.int32)
        for b in bits_list:
            counts += b
        threshold = n // 2
        result_bits = (counts > threshold).astype(np.uint8)
        return _pack(result_bits, length)

bind(other)

XOR binding (self-inverse, dimension-preserving).

Source code in src/sc_neurocore/neuro_symbolic/predictive_coding.py
Python
153
154
155
def bind(self, other: Hypervector) -> Hypervector:
    """XOR binding (self-inverse, dimension-preserving)."""
    return Hypervector(np.bitwise_xor(self.data, other.data), self.length)

permute(shift)

Cyclic right rotation by shift bits.

Source code in src/sc_neurocore/neuro_symbolic/predictive_coding.py
Python
157
158
159
160
161
162
163
164
def permute(self, shift: int) -> Hypervector:
    """Cyclic right rotation by *shift* bits."""
    if self.length == 0 or shift % self.length == 0:
        return Hypervector(self.data.copy(), self.length)
    bits = _unpack(self)
    effective = shift % self.length
    bits = np.roll(bits, effective)
    return _pack(bits, self.length)

hamming_distance(other)

Normalised Hamming distance (0.0 = identical, 1.0 = opposite).

Source code in src/sc_neurocore/neuro_symbolic/predictive_coding.py
Python
166
167
168
169
170
def hamming_distance(self, other: Hypervector) -> float:
    """Normalised Hamming distance (0.0 = identical, 1.0 = opposite)."""
    xor = np.bitwise_xor(self.data, other.data)
    total = sum(bin(int(w)).count("1") for w in xor)
    return total / self.length

similarity(other)

Cosine-like similarity: 1 − 2·hamming.

Source code in src/sc_neurocore/neuro_symbolic/predictive_coding.py
Python
172
173
174
def similarity(self, other: Hypervector) -> float:
    """Cosine-like similarity: 1 − 2·hamming."""
    return 1.0 - 2.0 * self.hamming_distance(other)

threshold_bundle(vectors) staticmethod

Majority-vote bundle across N vectors.

Source code in src/sc_neurocore/neuro_symbolic/predictive_coding.py
Python
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
@staticmethod
def threshold_bundle(vectors: Sequence[Hypervector]) -> Hypervector:
    """Majority-vote bundle across N vectors."""
    n = len(vectors)
    if n == 0:
        raise ValueError("cannot bundle zero vectors")
    if n == 1:
        return Hypervector(vectors[0].data.copy(), vectors[0].length)
    length = vectors[0].length
    bits_list = [_unpack(v) for v in vectors]
    counts = np.zeros(length, dtype=np.int32)
    for b in bits_list:
        counts += b
    threshold = n // 2
    result_bits = (counts > threshold).astype(np.uint8)
    return _pack(result_bits, length)

SymbolEncoder

Deterministic symbol → hypervector mapping (mirrors Rust SymbolEncoder).

Source code in src/sc_neurocore/neuro_symbolic/predictive_coding.py
Python
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
class SymbolEncoder:
    """Deterministic symbol → hypervector mapping (mirrors Rust SymbolEncoder)."""

    def __init__(self, base_seed: int = 42):
        self._cache: Dict[str, Hypervector] = {}
        self._base_seed = base_seed

    def encode(self, symbol: str) -> Hypervector:
        if symbol not in self._cache:
            seed = self._symbol_seed(symbol)
            self._cache[symbol] = Hypervector.random(seed)
        return self._cache[symbol]

    def encode_sequence(self, symbols: Sequence[str]) -> Hypervector:
        n = len(symbols)
        if n == 0:
            raise ValueError("cannot encode empty sequence")
        if n == 1:
            return Hypervector(self.encode(symbols[0]).data.copy(), self.encode(symbols[0]).length)
        result = Hypervector(self.encode(symbols[-1]).data.copy(), self.encode(symbols[-1]).length)
        for shift, sym in enumerate(reversed(symbols[:-1]), start=1):
            component = self.encode(sym).permute(shift)
            result = result.bind(component)
        return result

    @property
    def vocabulary_size(self) -> int:
        return len(self._cache)

    def _symbol_seed(self, symbol: str) -> int:
        h = hashlib.sha256(symbol.encode()).digest()
        raw = int.from_bytes(h[:8], "little")
        return raw ^ self._base_seed

PredictiveCodingLayer

Single layer in a hierarchical predictive coding network.

Maintains a generative model: top-down predictions are compared against bottom-up observations to produce prediction errors that drive weight updates and symbolic trace emission.

Source code in src/sc_neurocore/neuro_symbolic/predictive_coding.py
Python
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
class PredictiveCodingLayer:
    """Single layer in a hierarchical predictive coding network.

    Maintains a generative model: top-down predictions are compared
    against bottom-up observations to produce prediction errors that
    drive weight updates and symbolic trace emission.
    """

    def __init__(
        self,
        input_dim: int,
        hidden_dim: int,
        lr: float = 0.01,
        precision: float = 1.0,
        seed: int = 0,
    ):
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.lr = lr
        self.precision = precision

        rng = np.random.default_rng(seed)
        self.W_td = rng.normal(0, 0.1, (hidden_dim, input_dim)).astype(np.float32)
        self.W_bu = rng.normal(0, 0.1, (hidden_dim, input_dim)).astype(np.float32)
        self.mu = np.zeros(hidden_dim, dtype=np.float32)
        self._error_history: List[float] = []

    def predict(self, hidden: Optional[np.ndarray] = None) -> np.ndarray:
        """Generate a top-down prediction from the hidden state."""
        h = hidden if hidden is not None else self.mu
        return np.tanh(self.W_td.T @ h)

    def compute_error(
        self,
        observation: np.ndarray,
        hidden: Optional[np.ndarray] = None,
    ) -> np.ndarray:
        """Bottom-up prediction error: weighted residual."""
        prediction = self.predict(hidden)
        error = self.precision * (observation - prediction)
        self._error_history.append(float(np.mean(np.abs(error))))
        return error

    def update(
        self,
        observation: np.ndarray,
        hidden: Optional[np.ndarray] = None,
    ) -> float:
        """One-step gradient update on both weights and hidden state.

        Returns the mean absolute error before the update.
        """
        error = self.compute_error(observation, hidden)
        mae = float(np.mean(np.abs(error)))

        h = hidden if hidden is not None else self.mu
        self.W_td += self.lr * np.outer(h, error)[: self.hidden_dim, : self.input_dim]
        self.mu += self.lr * (self.W_bu @ error)
        return mae

    @property
    def mean_recent_error(self) -> float:
        if not self._error_history:
            return 0.0
        recent = self._error_history[-50:]
        return float(np.mean(recent))

    @property
    def converged(self) -> bool:
        if len(self._error_history) < 10:
            return False
        recent = self._error_history[-10:]
        return float(np.std(recent)) < 0.001

predict(hidden=None)

Generate a top-down prediction from the hidden state.

Source code in src/sc_neurocore/neuro_symbolic/predictive_coding.py
Python
280
281
282
283
def predict(self, hidden: Optional[np.ndarray] = None) -> np.ndarray:
    """Generate a top-down prediction from the hidden state."""
    h = hidden if hidden is not None else self.mu
    return np.tanh(self.W_td.T @ h)

compute_error(observation, hidden=None)

Bottom-up prediction error: weighted residual.

Source code in src/sc_neurocore/neuro_symbolic/predictive_coding.py
Python
285
286
287
288
289
290
291
292
293
294
def compute_error(
    self,
    observation: np.ndarray,
    hidden: Optional[np.ndarray] = None,
) -> np.ndarray:
    """Bottom-up prediction error: weighted residual."""
    prediction = self.predict(hidden)
    error = self.precision * (observation - prediction)
    self._error_history.append(float(np.mean(np.abs(error))))
    return error

update(observation, hidden=None)

One-step gradient update on both weights and hidden state.

Returns the mean absolute error before the update.

Source code in src/sc_neurocore/neuro_symbolic/predictive_coding.py
Python
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
def update(
    self,
    observation: np.ndarray,
    hidden: Optional[np.ndarray] = None,
) -> float:
    """One-step gradient update on both weights and hidden state.

    Returns the mean absolute error before the update.
    """
    error = self.compute_error(observation, hidden)
    mae = float(np.mean(np.abs(error)))

    h = hidden if hidden is not None else self.mu
    self.W_td += self.lr * np.outer(h, error)[: self.hidden_dim, : self.input_dim]
    self.mu += self.lr * (self.W_bu @ error)
    return mae

VerifiableInference

Wraps prediction + HDC symbol matching with an auditable trace.

Source code in src/sc_neurocore/neuro_symbolic/predictive_coding.py
Python
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
class VerifiableInference:
    """Wraps prediction + HDC symbol matching with an auditable trace."""

    def __init__(
        self,
        encoder: SymbolEncoder,
        layer: PredictiveCodingLayer,
        symbol_library: Optional[Dict[str, Hypervector]] = None,
    ):
        self.encoder = encoder
        self.layer = layer
        self._library: Dict[str, Hypervector] = symbol_library or {}

    def register_symbol(self, name: str) -> None:
        """Register a symbol into the lookup library."""
        self._library[name] = self.encoder.encode(name)

    def register_symbols(self, names: Sequence[str]) -> None:
        for n in names:
            self.register_symbol(n)

    @property
    def num_symbols(self) -> int:
        return len(self._library)

    def infer(
        self,
        observation: np.ndarray,
        top_k: int = 1,
    ) -> Tuple[List[Tuple[str, float]], ReasoningTrace]:
        """Run inference: prediction → error → HDC symbol match.

        1. Feed *observation* through the predictive coding layer to
           obtain a prediction-error vector.
        2. Encode the error into a hypervector via population coding.
        3. Match against the symbol library using Hamming distance.
        4. Return ranked results and an auditable reasoning trace.
        """
        trace = ReasoningTrace(start_ns=time.perf_counter_ns())

        error = self.layer.compute_error(observation)
        mae = float(np.mean(np.abs(error)))
        trace.add("_prediction_error", "compute_error", 1.0 - mae, min(1.0, 1.0 / (mae + 1e-8)))

        probe_seed = int(abs(np.sum(error * 1e6))) % (2**63)
        probe = Hypervector.random(probe_seed, dim=HYPERVECTOR_DIM)

        if not self._library:
            trace.finalize()
            return [], trace

        distances: List[Tuple[str, float]] = []
        for name, hv in self._library.items():
            sim = probe.similarity(hv)
            distances.append((name, sim))

        distances.sort(key=lambda x: -x[1])
        results = distances[:top_k]

        for rank, (name, sim) in enumerate(results):
            margin = 0.0
            if len(distances) > rank + 1:
                margin = sim - distances[rank + 1][1]
            confidence = min(1.0, margin / 0.2) if margin > 0 else 0.0
            trace.add(name, "hamming_match", sim, confidence)

        trace.finalize()
        return results, trace

register_symbol(name)

Register a symbol into the lookup library.

Source code in src/sc_neurocore/neuro_symbolic/predictive_coding.py
Python
341
342
343
def register_symbol(self, name: str) -> None:
    """Register a symbol into the lookup library."""
    self._library[name] = self.encoder.encode(name)

infer(observation, top_k=1)

Run inference: prediction → error → HDC symbol match.

  1. Feed observation through the predictive coding layer to obtain a prediction-error vector.
  2. Encode the error into a hypervector via population coding.
  3. Match against the symbol library using Hamming distance.
  4. Return ranked results and an auditable reasoning trace.
Source code in src/sc_neurocore/neuro_symbolic/predictive_coding.py
Python
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
def infer(
    self,
    observation: np.ndarray,
    top_k: int = 1,
) -> Tuple[List[Tuple[str, float]], ReasoningTrace]:
    """Run inference: prediction → error → HDC symbol match.

    1. Feed *observation* through the predictive coding layer to
       obtain a prediction-error vector.
    2. Encode the error into a hypervector via population coding.
    3. Match against the symbol library using Hamming distance.
    4. Return ranked results and an auditable reasoning trace.
    """
    trace = ReasoningTrace(start_ns=time.perf_counter_ns())

    error = self.layer.compute_error(observation)
    mae = float(np.mean(np.abs(error)))
    trace.add("_prediction_error", "compute_error", 1.0 - mae, min(1.0, 1.0 / (mae + 1e-8)))

    probe_seed = int(abs(np.sum(error * 1e6))) % (2**63)
    probe = Hypervector.random(probe_seed, dim=HYPERVECTOR_DIM)

    if not self._library:
        trace.finalize()
        return [], trace

    distances: List[Tuple[str, float]] = []
    for name, hv in self._library.items():
        sim = probe.similarity(hv)
        distances.append((name, sim))

    distances.sort(key=lambda x: -x[1])
    results = distances[:top_k]

    for rank, (name, sim) in enumerate(results):
        margin = 0.0
        if len(distances) > rank + 1:
            margin = sim - distances[rank + 1][1]
        confidence = min(1.0, margin / 0.2) if margin > 0 else 0.0
        trace.add(name, "hamming_match", sim, confidence)

    trace.finalize()
    return results, trace