Skip to content

Layers

Pre-built layer compositions combining neurons, synapses, encoders, and recorders into reusable building blocks.

Class Architecture Backend
SCDenseLayer Fully-connected LIF NumPy (loop-based)
VectorizedSCLayer Fully-connected, packed bitwise NumPy / CuPy GPU
SCConv2DLayer 2D convolution NumPy
SCRecurrentLayer Echo state / reservoir NumPy
SCLearningLayer Dense + online STDP NumPy
SCFusionLayer Multi-modal MUX fusion NumPy
StochasticAttention SC attention mechanism NumPy
MemristiveDenseLayer Memristive device model NumPy
JaxSCDenseLayer Fully-connected LIF JAX (JIT, GPU/TPU)
HardwareAwareSCLayer Dense + memristive defects NumPy
PredictiveCodingSCLayer XOR error, zero-multiplication NumPy
RallDendrite Compartmental dendritic tree NumPy
LateralInhibition Gaussian surround suppression NumPy
WinnerTakeAll k-WTA competitive layer NumPy

Dense Layer

sc_neurocore.layers.sc_dense_layer.SCDenseLayer dataclass

Stochastic-computing dense layer of LIF neurons.

Each neuron receives shared SC dot-product input current and produces independent spike trains. Software-only but fully SC-driven at the input/synapse level.

Example

layer = SCDenseLayer( ... n_neurons=4, x_inputs=[0.5, 0.3], weight_values=[0.8, 0.6], ... x_min=0.0, x_max=1.0, w_min=0.0, w_max=1.0, length=256, ... ) layer.run(T=100) trains = layer.get_spike_trains() trains.shape (4, 100)

Source code in src/sc_neurocore/layers/sc_dense_layer.py
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
@dataclass
class SCDenseLayer:
    """
    Stochastic-computing dense layer of LIF neurons.

    Each neuron receives shared SC dot-product input current and
    produces independent spike trains. Software-only but fully
    SC-driven at the input/synapse level.

    Example
    -------
    >>> layer = SCDenseLayer(
    ...     n_neurons=4, x_inputs=[0.5, 0.3], weight_values=[0.8, 0.6],
    ...     x_min=0.0, x_max=1.0, w_min=0.0, w_max=1.0, length=256,
    ... )
    >>> layer.run(T=100)
    >>> trains = layer.get_spike_trains()
    >>> trains.shape
    (4, 100)
    """

    n_neurons: int
    x_inputs: Sequence[float]
    weight_values: Sequence[float]
    x_min: float
    x_max: float
    w_min: float
    w_max: float
    length: int = DENSE_LAYER_LENGTH
    y_min: float = DENSE_Y_MIN
    y_max: float = DENSE_Y_MAX
    dt_ms: float = LIF_DT
    neuron_params: Optional[Dict[str, Any]] = None
    base_seed: Optional[int] = None

    def __post_init__(self) -> None:
        if len(self.x_inputs) != len(self.weight_values):
            raise ValueError("x_inputs and weight_values must have same length.")

        # Shared SC current source for now (can be extended to per-neuron later)
        self.source = BitstreamCurrentSource(
            x_inputs=self.x_inputs,
            x_min=self.x_min,
            x_max=self.x_max,
            weight_values=self.weight_values,
            w_min=self.w_min,
            w_max=self.w_max,
            length=self.length,
            y_min=self.y_min,
            y_max=self.y_max,
            seed=self.base_seed,
        )

        # Build neurons
        if self.neuron_params is None:
            self.neuron_params = {}

        self.neurons: List[StochasticLIFNeuron] = []
        self.recorders: List[BitstreamSpikeRecorder] = []
        for i in range(self.n_neurons):
            # Give each neuron its own seed so they don't behave identically
            seed = None
            if self.base_seed is not None:
                seed = self.base_seed + NEURON_SEED_OFFSET + i

            neuron = StochasticLIFNeuron(
                v_rest=self.neuron_params.get("v_rest", LIF_V_REST),
                v_reset=self.neuron_params.get("v_reset", LIF_V_RESET),
                v_threshold=self.neuron_params.get("v_threshold", LIF_V_THRESHOLD),
                tau_mem=self.neuron_params.get("tau_mem", LIF_TAU_MEM),
                dt=self.dt_ms,
                noise_std=self.neuron_params.get("noise_std", LIF_LAYER_NOISE_STD),
                resistance=self.neuron_params.get("resistance", LIF_RESISTANCE),
                seed=seed,
            )
            self.neurons.append(neuron)
            self.recorders.append(BitstreamSpikeRecorder(dt_ms=self.dt_ms))

    def reset(self) -> None:
        self.source.reset()
        for neuron, rec in zip(self.neurons, self.recorders):
            neuron.reset_state()
            rec.reset()

    def run(self, T: int) -> None:
        """
        Run the layer for T time steps, updating all neurons.

        The current I_t is shared across all neurons (common input
        processed through SC dot-product). Neurons differ by their
        internal noise and parameters.
        """
        for _ in range(T):
            I_t = self.source.step()
            for neuron, rec in zip(self.neurons, self.recorders):
                spike = neuron.step(I_t)
                rec.record(spike)

    def get_spike_trains(self) -> np.ndarray[Any, Any]:
        """
        Return spike matrix of shape (n_neurons, T).
        """
        if not self.recorders:
            return np.zeros((0, 0), dtype=np.uint8)

        T = len(self.recorders[0].spikes)
        spikes = np.zeros((self.n_neurons, T), dtype=np.uint8)
        for i, rec in enumerate(self.recorders):
            spikes[i] = rec.as_array()
        return spikes

    def summary(self) -> Dict[str, Any]:
        """
        Return firing statistics for each neuron.
        """
        stats = []
        for i, rec in enumerate(self.recorders):
            stats.append(
                {
                    "neuron": i,
                    "total_spikes": rec.total_spikes(),
                    "firing_rate_hz": rec.firing_rate_hz(),
                }
            )
        return {
            "n_neurons": self.n_neurons,
            "stats": stats,
            "avg_firing_rate_hz": float(
                np.mean([s["firing_rate_hz"] for s in stats]) if stats else 0.0
            ),
        }

run(T)

Run the layer for T time steps, updating all neurons.

The current I_t is shared across all neurons (common input processed through SC dot-product). Neurons differ by their internal noise and parameters.

Source code in src/sc_neurocore/layers/sc_dense_layer.py
117
118
119
120
121
122
123
124
125
126
127
128
129
def run(self, T: int) -> None:
    """
    Run the layer for T time steps, updating all neurons.

    The current I_t is shared across all neurons (common input
    processed through SC dot-product). Neurons differ by their
    internal noise and parameters.
    """
    for _ in range(T):
        I_t = self.source.step()
        for neuron, rec in zip(self.neurons, self.recorders):
            spike = neuron.step(I_t)
            rec.record(spike)

get_spike_trains()

Return spike matrix of shape (n_neurons, T).

Source code in src/sc_neurocore/layers/sc_dense_layer.py
131
132
133
134
135
136
137
138
139
140
141
142
def get_spike_trains(self) -> np.ndarray[Any, Any]:
    """
    Return spike matrix of shape (n_neurons, T).
    """
    if not self.recorders:
        return np.zeros((0, 0), dtype=np.uint8)

    T = len(self.recorders[0].spikes)
    spikes = np.zeros((self.n_neurons, T), dtype=np.uint8)
    for i, rec in enumerate(self.recorders):
        spikes[i] = rec.as_array()
    return spikes

summary()

Return firing statistics for each neuron.

Source code in src/sc_neurocore/layers/sc_dense_layer.py
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
def summary(self) -> Dict[str, Any]:
    """
    Return firing statistics for each neuron.
    """
    stats = []
    for i, rec in enumerate(self.recorders):
        stats.append(
            {
                "neuron": i,
                "total_spikes": rec.total_spikes(),
                "firing_rate_hz": rec.firing_rate_hz(),
            }
        )
    return {
        "n_neurons": self.n_neurons,
        "stats": stats,
        "avg_firing_rate_hz": float(
            np.mean([s["firing_rate_hz"] for s in stats]) if stats else 0.0
        ),
    }

Vectorized Layer

sc_neurocore.layers.vectorized_layer.VectorizedSCLayer dataclass

High-performance SC layer using packed bitwise operations.

Uses GPU (CuPy) when available, otherwise pure NumPy. Optional sparse connectivity via scipy.sparse.

Example

import numpy as np layer = VectorizedSCLayer(n_inputs=8, n_neurons=4, length=512) out = layer.forward(np.random.rand(8)) out.shape (4,) (out >= 0).all() and (out <= 1).all() True

Source code in src/sc_neurocore/layers/vectorized_layer.py
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
@dataclass
class VectorizedSCLayer:
    """
    High-performance SC layer using packed bitwise operations.

    Uses GPU (CuPy) when available, otherwise pure NumPy.
    Optional sparse connectivity via ``scipy.sparse``.

    Example
    -------
    >>> import numpy as np
    >>> layer = VectorizedSCLayer(n_inputs=8, n_neurons=4, length=512)
    >>> out = layer.forward(np.random.rand(8))
    >>> out.shape
    (4,)
    >>> (out >= 0).all() and (out <= 1).all()
    True
    """

    n_inputs: int
    n_neurons: int
    length: int = LAYER_DEFAULT_LENGTH
    use_gpu: bool = True
    sparse: bool = False
    connectivity: float = 1.0

    def __post_init__(self) -> None:
        if self.n_inputs < 1:
            raise ValueError(f"n_inputs must be >= 1, got {self.n_inputs}")
        if self.n_neurons < 1:
            raise ValueError(f"n_neurons must be >= 1, got {self.n_neurons}")
        if self.length < 1:
            raise ValueError(f"length must be >= 1, got {self.length}")
        if self.sparse and not _has_scipy_sparse():
            raise ImportError("scipy is required for sparse=True")
        if not 0.0 < self.connectivity <= 1.0:
            raise ValueError(f"connectivity must be in (0, 1], got {self.connectivity}")

        self._on_gpu = self.use_gpu and HAS_CUPY

        if self.sparse:
            self._init_sparse()
        else:
            self.weights = np.random.uniform(0.0, 1.0, (self.n_neurons, self.n_inputs))
            self.packed_weights = None
            self._refresh_packed_weights()

    # -- Dense path (unchanged) ------------------------------------------------

    def _refresh_packed_weights(self) -> None:
        w_probs = self.weights
        bits = (
            np.random.random((self.n_neurons, self.n_inputs, self.length)) < w_probs[:, :, None]
        ).astype(np.uint8)

        flat = bits.reshape(-1, self.length)
        packed_flat = pack_bitstream(flat)
        pw = packed_flat.reshape(self.n_neurons, self.n_inputs, -1)

        if self._on_gpu:  # pragma: no cover
            self.packed_weights = to_device(pw)
        else:
            self.packed_weights = pw

    # -- Sparse path -----------------------------------------------------------

    def _init_sparse(self):
        sp = _get_scipy_sparse()
        n_total = self.n_neurons * self.n_inputs
        n_nonzero = max(1, int(round(n_total * self.connectivity)))
        indices = np.random.choice(n_total, size=n_nonzero, replace=False)
        rows, cols = np.divmod(indices, self.n_inputs)
        weight_vals = np.random.uniform(0.0, 1.0, n_nonzero)

        self.mask_csr = sp.csr_matrix(
            (np.ones(n_nonzero, dtype=np.float32), (rows, cols)),
            shape=(self.n_neurons, self.n_inputs),
        )
        self.weights_csr = sp.csr_matrix(
            (weight_vals, (rows, cols)),
            shape=(self.n_neurons, self.n_inputs),
        )
        self._pack_sparse_weights()

    def _pack_sparse_weights(self):
        """Pack bitstreams only for non-zero synapses, stored in a flat array."""
        csr = self.weights_csr
        n_words = (self.length + 63) // 64
        self._sparse_packed = np.empty((csr.nnz, n_words), dtype=np.uint64)
        for k in range(csr.nnz):
            w = csr.data[k]
            bits = (np.random.random(self.length) < w).astype(np.uint8)
            self._sparse_packed[k] = pack_bitstream(bits)

    # -- Forward ---------------------------------------------------------------

    def forward(self, input_values: Sequence[float]) -> np.ndarray[Any, Any]:
        """Compute output firing rates for the layer."""
        in_probs = np.asarray(input_values, dtype=np.float64)
        if in_probs.ndim != 1 or in_probs.shape[0] != self.n_inputs:
            raise ValueError(
                f"Expected 1-D input of length {self.n_inputs}, got shape {in_probs.shape}"
            )
        if not np.all(np.isfinite(in_probs)):
            raise ValueError("Input contains NaN or Inf")
        if np.any(in_probs < 0.0) or np.any(in_probs > 1.0):
            raise ValueError("Input probabilities must be in [0, 1]")

        if self.sparse:
            return self._forward_sparse(in_probs)
        return self._forward_dense(in_probs)

    def _forward_dense(self, in_probs: np.ndarray) -> np.ndarray:
        input_bits = (np.random.random((self.n_inputs, self.length)) < in_probs[:, None]).astype(
            np.uint8
        )
        packed_inputs = pack_bitstream(input_bits)

        if self._on_gpu:  # pragma: no cover
            packed_inputs_dev = to_device(packed_inputs)
            counts = gpu_vec_mac(self.packed_weights, packed_inputs_dev)
            outputs = to_host(counts).astype(np.float64)
        else:
            products = vec_and(self.packed_weights, packed_inputs[None, :, :])
            flat_products = products.reshape(self.n_neurons, -1)
            outputs = _popcount_rows(flat_products)

        return outputs / self.length

    def _forward_sparse(self, in_probs: np.ndarray) -> np.ndarray:
        input_bits = (np.random.random((self.n_inputs, self.length)) < in_probs[:, None]).astype(
            np.uint8
        )
        packed_inputs = pack_bitstream(input_bits)

        csr = self.weights_csr
        if csr.nnz == 0:  # pragma: no cover
            return np.zeros(self.n_neurons, dtype=np.float64)

        if self._on_gpu:  # pragma: no cover
            return self._forward_sparse_gpu(packed_inputs)

        gathered_inputs = packed_inputs[csr.indices]
        products = vec_and(self._sparse_packed, gathered_inputs)
        counts = _popcount_rows(products)

        outputs = np.zeros(self.n_neurons, dtype=np.float64)
        np.add.at(outputs, np.repeat(np.arange(self.n_neurons), np.diff(csr.indptr)), counts)

        return outputs / self.length

    def _forward_sparse_gpu(self, packed_inputs: np.ndarray) -> np.ndarray:  # pragma: no cover
        """CuPy CSR matmul path for sparse connectivity on GPU."""
        import cupy
        import cupyx.scipy.sparse as cusp

        csr = self.weights_csr
        w_gpu = cusp.csr_matrix(
            (
                cupy.asarray(csr.data.astype(np.float32)),
                cupy.asarray(csr.indices),
                cupy.asarray(csr.indptr),
            ),
            shape=csr.shape,
        )
        in_probs_flat = _popcount_rows(packed_inputs).astype(np.float32) / self.length
        in_gpu = cupy.asarray(in_probs_flat)
        out_gpu = w_gpu @ in_gpu
        return cupy.asnumpy(out_gpu).astype(np.float64)

forward(input_values)

Compute output firing rates for the layer.

Source code in src/sc_neurocore/layers/vectorized_layer.py
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
def forward(self, input_values: Sequence[float]) -> np.ndarray[Any, Any]:
    """Compute output firing rates for the layer."""
    in_probs = np.asarray(input_values, dtype=np.float64)
    if in_probs.ndim != 1 or in_probs.shape[0] != self.n_inputs:
        raise ValueError(
            f"Expected 1-D input of length {self.n_inputs}, got shape {in_probs.shape}"
        )
    if not np.all(np.isfinite(in_probs)):
        raise ValueError("Input contains NaN or Inf")
    if np.any(in_probs < 0.0) or np.any(in_probs > 1.0):
        raise ValueError("Input probabilities must be in [0, 1]")

    if self.sparse:
        return self._forward_sparse(in_probs)
    return self._forward_dense(in_probs)

Convolutional Layer

sc_neurocore.layers.sc_conv_layer.SCConv2DLayer dataclass

SC 2D convolutional layer using unipolar probability multiplication.

Example

import numpy as np conv = SCConv2DLayer(in_channels=1, out_channels=2, kernel_size=3, padding=1) img = np.random.rand(1, 8, 8) out = conv.forward(img) out.shape (2, 8, 8)

Source code in src/sc_neurocore/layers/sc_conv_layer.py
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
@dataclass
class SCConv2DLayer:
    """
    SC 2D convolutional layer using unipolar probability multiplication.

    Example
    -------
    >>> import numpy as np
    >>> conv = SCConv2DLayer(in_channels=1, out_channels=2, kernel_size=3, padding=1)
    >>> img = np.random.rand(1, 8, 8)
    >>> out = conv.forward(img)
    >>> out.shape
    (2, 8, 8)
    """

    in_channels: int
    out_channels: int
    kernel_size: int
    stride: int = 1
    padding: int = 0
    length: int = LAYER_CONV_LENGTH

    def __post_init__(self) -> None:
        # Kernels: (out_channels, in_channels, k, k)
        self.kernels = np.random.uniform(
            0.0, 1.0, (self.out_channels, self.in_channels, self.kernel_size, self.kernel_size)
        )

    def forward(self, input_image: np.ndarray[Any, Any]) -> np.ndarray[Any, Any]:
        """
        input_image: (in_channels, H, W)
        Returns: (out_channels, H_out, W_out) as probabilities (or firing rates).
        """
        C_in, H, W = input_image.shape
        if C_in != self.in_channels:
            raise IndexError(f"Expected {self.in_channels} input channels, got {C_in}")
        k = self.kernel_size
        H_out = (H + 2 * self.padding - k) // self.stride + 1
        W_out = (W + 2 * self.padding - k) // self.stride + 1

        if self.padding > 0:
            input_image = np.pad(
                input_image, ((0, 0), (self.padding, self.padding), (self.padding, self.padding))
            )

        # im2col: extract all patches → (H_out*W_out, C_in*k*k)
        col = np.empty((H_out * W_out, C_in * k * k), dtype=input_image.dtype)
        idx = 0
        for i in range(H_out):
            for j in range(W_out):
                hs = i * self.stride
                ws = j * self.stride
                col[idx] = input_image[:, hs : hs + k, ws : ws + k].ravel()
                idx += 1

        # SC multiply-accumulate: P(A&B) = P(A)*P(B) for unipolar [0,1]
        filters = self.kernels.reshape(self.out_channels, -1)  # (out, C_in*k*k)
        output = filters @ col.T  # (out, H_out*W_out)

        return output.reshape(self.out_channels, H_out, W_out)

forward(input_image)

input_image: (in_channels, H, W) Returns: (out_channels, H_out, W_out) as probabilities (or firing rates).

Source code in src/sc_neurocore/layers/sc_conv_layer.py
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
def forward(self, input_image: np.ndarray[Any, Any]) -> np.ndarray[Any, Any]:
    """
    input_image: (in_channels, H, W)
    Returns: (out_channels, H_out, W_out) as probabilities (or firing rates).
    """
    C_in, H, W = input_image.shape
    if C_in != self.in_channels:
        raise IndexError(f"Expected {self.in_channels} input channels, got {C_in}")
    k = self.kernel_size
    H_out = (H + 2 * self.padding - k) // self.stride + 1
    W_out = (W + 2 * self.padding - k) // self.stride + 1

    if self.padding > 0:
        input_image = np.pad(
            input_image, ((0, 0), (self.padding, self.padding), (self.padding, self.padding))
        )

    # im2col: extract all patches → (H_out*W_out, C_in*k*k)
    col = np.empty((H_out * W_out, C_in * k * k), dtype=input_image.dtype)
    idx = 0
    for i in range(H_out):
        for j in range(W_out):
            hs = i * self.stride
            ws = j * self.stride
            col[idx] = input_image[:, hs : hs + k, ws : ws + k].ravel()
            idx += 1

    # SC multiply-accumulate: P(A&B) = P(A)*P(B) for unipolar [0,1]
    filters = self.kernels.reshape(self.out_channels, -1)  # (out, C_in*k*k)
    output = filters @ col.T  # (out, H_out*W_out)

    return output.reshape(self.out_channels, H_out, W_out)

Recurrent / Reservoir Layer

sc_neurocore.layers.recurrent.SCRecurrentLayer dataclass

SC recurrent / reservoir layer (echo state network).

Spectral radius bound follows Jaeger, GMD Report 148, 2001.

Example

import numpy as np res = SCRecurrentLayer(n_inputs=3, n_neurons=10, seed=0) state = res.step(np.array([0.5, 0.3, 0.8])) state.shape (10,)

Source code in src/sc_neurocore/layers/recurrent.py
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
@dataclass
class SCRecurrentLayer:
    """
    SC recurrent / reservoir layer (echo state network).

    Spectral radius bound follows Jaeger, GMD Report 148, 2001.

    Example
    -------
    >>> import numpy as np
    >>> res = SCRecurrentLayer(n_inputs=3, n_neurons=10, seed=0)
    >>> state = res.step(np.array([0.5, 0.3, 0.8]))
    >>> state.shape
    (10,)
    """

    n_inputs: int
    n_neurons: int
    feedback_strength: float = RESERVOIR_FEEDBACK_STRENGTH
    input_strength: float = RESERVOIR_INPUT_STRENGTH
    spectral_radius: float = RESERVOIR_SPECTRAL_RADIUS
    length: int = LAYER_DEFAULT_LENGTH
    seed: Optional[int] = None

    def __post_init__(self) -> None:
        rng = np.random.RandomState(self.seed)

        self.W_in = rng.uniform(0, 1, (self.n_neurons, self.n_inputs)) * self.input_strength
        self.W_rec = rng.uniform(0, 0.2, (self.n_neurons, self.n_neurons))

        # Neurons
        self.neurons = [
            StochasticLIFNeuron(seed=self.seed + i if self.seed else None)
            for i in range(self.n_neurons)
        ]

        # Previous State (Firing Rate / Probability)
        self.state = np.zeros(self.n_neurons)

        # Encoder for state feedback
        self.state_encoders = [
            BitstreamEncoder(x_min=0, x_max=1, length=self.length) for _ in range(self.n_neurons)
        ]
        self.input_encoders = [
            BitstreamEncoder(x_min=0, x_max=1, length=self.length) for _ in range(self.n_inputs)
        ]

    def step(self, input_vector: np.ndarray[Any, Any]) -> np.ndarray[Any, Any]:
        """
        Process one time step (e.g., one frame of audio).
        Input: (n_inputs,)
        Output: (n_neurons,) - New State
        """
        currents = np.dot(self.W_in, input_vector) + np.dot(self.W_rec, self.state)
        new_rates = np.clip(currents, 0.0, 1.0)

        self.state = new_rates
        return self.state

    def reset(self) -> None:
        self.state = np.zeros(self.n_neurons)

step(input_vector)

Process one time step (e.g., one frame of audio). Input: (n_inputs,) Output: (n_neurons,) - New State

Source code in src/sc_neurocore/layers/recurrent.py
70
71
72
73
74
75
76
77
78
79
80
def step(self, input_vector: np.ndarray[Any, Any]) -> np.ndarray[Any, Any]:
    """
    Process one time step (e.g., one frame of audio).
    Input: (n_inputs,)
    Output: (n_neurons,) - New State
    """
    currents = np.dot(self.W_in, input_vector) + np.dot(self.W_rec, self.state)
    new_rates = np.clip(currents, 0.0, 1.0)

    self.state = new_rates
    return self.state

Learning Layer

sc_neurocore.layers.sc_learning_layer.SCLearningLayer dataclass

SC dense layer with integrated STDP learning.

Each neuron has per-input STDP synapses. Plasticity follows Bi & Poo 1998 asymmetry convention.

Source code in src/sc_neurocore/layers/sc_learning_layer.py
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
@dataclass
class SCLearningLayer:
    """
    SC dense layer with integrated STDP learning.

    Each neuron has per-input STDP synapses. Plasticity follows
    Bi & Poo 1998 asymmetry convention.
    """

    n_inputs: int
    n_neurons: int
    w_min: float = 0.0
    w_max: float = 1.0
    learning_rate: float = STDP_LEARNING_RATE
    ltd_ratio: float = STDP_LTD_RATIO
    length: int = LAYER_DEFAULT_LENGTH
    base_seed: Optional[int] = None

    def __post_init__(self) -> None:
        self.neurons: List[StochasticLIFNeuron] = []
        # synapses[neuron_idx][input_idx]
        self.synapses: List[List[StochasticSTDPSynapse]] = []
        self.recorders: List[BitstreamSpikeRecorder] = []

        self.input_encoders = [
            BitstreamEncoder(
                x_min=0,
                x_max=1,
                length=self.length,
                seed=self.base_seed + i if self.base_seed else None,
            )
            for i in range(self.n_inputs)
        ]

        for i in range(self.n_neurons):
            neuron_seed = self.base_seed + 1000 + i if self.base_seed else None
            self.neurons.append(StochasticLIFNeuron(seed=neuron_seed))
            self.recorders.append(BitstreamSpikeRecorder())

            neuron_syns = []
            for j in range(self.n_inputs):
                syn_seed = self.base_seed + 2000 + i * self.n_inputs + j if self.base_seed else None
                initial_w = np.random.uniform(self.w_min, self.w_max)
                neuron_syns.append(
                    StochasticSTDPSynapse(
                        w_min=self.w_min,
                        w_max=self.w_max,
                        w=initial_w,
                        learning_rate=self.learning_rate,
                        length=self.length,
                        seed=syn_seed,
                    )
                )
            self.synapses.append(neuron_syns)

    def run_epoch(self, input_values: Sequence[float]) -> np.ndarray[Any, Any]:
        """
        Run one bitstream epoch (length 'length').
        """
        # 1. Encode inputs
        input_bitstreams = [
            self.input_encoders[i].encode(input_values[i]) for i in range(self.n_inputs)
        ]

        # 2. Process time steps
        epoch_spikes = np.zeros((self.n_neurons, self.length), dtype=np.uint8)

        for t in range(self.length):
            for i in range(self.n_neurons):
                neuron = self.neurons[i]
                neuron_syns = self.synapses[i]

                # Compute total input current for this neuron at time t
                current_sum = 0.0
                weight_bits = []

                for j in range(self.n_inputs):
                    pre_bit = input_bitstreams[j][t]
                    # We need a bit from the synapse.
                    # We'll use the probability to get a bit.
                    w_prob = neuron_syns[j].effective_weight_probability()
                    w_bit = 1 if np.random.random() < w_prob else 0

                    current_sum += pre_bit & w_bit
                    weight_bits.append(w_bit)

                # Step neuron
                post_spike = neuron.step(current_sum)
                epoch_spikes[i, t] = post_spike
                self.recorders[i].record(post_spike)

                # 3. Update STDP for all synapses of this neuron
                for j in range(self.n_inputs):
                    pre_bit = input_bitstreams[j][t]
                    # Use the synapse's internal logic for update (if we had it step-wise)
                    # We'll manually call potentiate/depress here to be explicit
                    if pre_bit == 1 and post_spike == 1:
                        if np.random.random() < self.learning_rate:
                            neuron_syns[j]._potentiate()  # type: ignore
                    elif pre_bit == 1 and post_spike == 0:
                        if np.random.random() < self.learning_rate * self.ltd_ratio:
                            neuron_syns[j]._depress()  # type: ignore

        return epoch_spikes

    def get_weights(self) -> np.ndarray[Any, Any]:
        weights = np.zeros((self.n_neurons, self.n_inputs))
        for i in range(self.n_neurons):
            for j in range(self.n_inputs):
                weights[i, j] = self.synapses[i][j].w
        return weights

run_epoch(input_values)

Run one bitstream epoch (length 'length').

Source code in src/sc_neurocore/layers/sc_learning_layer.py
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
def run_epoch(self, input_values: Sequence[float]) -> np.ndarray[Any, Any]:
    """
    Run one bitstream epoch (length 'length').
    """
    # 1. Encode inputs
    input_bitstreams = [
        self.input_encoders[i].encode(input_values[i]) for i in range(self.n_inputs)
    ]

    # 2. Process time steps
    epoch_spikes = np.zeros((self.n_neurons, self.length), dtype=np.uint8)

    for t in range(self.length):
        for i in range(self.n_neurons):
            neuron = self.neurons[i]
            neuron_syns = self.synapses[i]

            # Compute total input current for this neuron at time t
            current_sum = 0.0
            weight_bits = []

            for j in range(self.n_inputs):
                pre_bit = input_bitstreams[j][t]
                # We need a bit from the synapse.
                # We'll use the probability to get a bit.
                w_prob = neuron_syns[j].effective_weight_probability()
                w_bit = 1 if np.random.random() < w_prob else 0

                current_sum += pre_bit & w_bit
                weight_bits.append(w_bit)

            # Step neuron
            post_spike = neuron.step(current_sum)
            epoch_spikes[i, t] = post_spike
            self.recorders[i].record(post_spike)

            # 3. Update STDP for all synapses of this neuron
            for j in range(self.n_inputs):
                pre_bit = input_bitstreams[j][t]
                # Use the synapse's internal logic for update (if we had it step-wise)
                # We'll manually call potentiate/depress here to be explicit
                if pre_bit == 1 and post_spike == 1:
                    if np.random.random() < self.learning_rate:
                        neuron_syns[j]._potentiate()  # type: ignore
                elif pre_bit == 1 and post_spike == 0:
                    if np.random.random() < self.learning_rate * self.ltd_ratio:
                        neuron_syns[j]._depress()  # type: ignore

    return epoch_spikes

Fusion Layer

sc_neurocore.layers.fusion.SCFusionLayer dataclass

Fuses multiple data modalities using stochastic multiplexing (MUX).

Example

import numpy as np layer = SCFusionLayer( ... input_dims={"audio": 4, "visual": 4}, ... fusion_weights={"audio": 0.7, "visual": 0.3}, ... ) out = layer.forward({"audio": np.ones(4), "visual": np.zeros(4)}) out.shape (4,)

Source code in src/sc_neurocore/layers/fusion.py
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
@dataclass
class SCFusionLayer:
    """
    Fuses multiple data modalities using stochastic multiplexing (MUX).

    Example
    -------
    >>> import numpy as np
    >>> layer = SCFusionLayer(
    ...     input_dims={"audio": 4, "visual": 4},
    ...     fusion_weights={"audio": 0.7, "visual": 0.3},
    ... )
    >>> out = layer.forward({"audio": np.ones(4), "visual": np.zeros(4)})
    >>> out.shape
    (4,)
    """

    input_dims: Dict[str, int]
    fusion_weights: Dict[str, float]
    length: int = LAYER_DEFAULT_LENGTH

    def __post_init__(self) -> None:
        # Verify weights sum to <= 1 (or normalized)
        total = sum(self.fusion_weights.values())
        self.norm_weights = {k: v / total for k, v in self.fusion_weights.items()}

    def forward(self, inputs: Dict[str, np.ndarray[Any, Any]]) -> np.ndarray[Any, Any]:
        """
        inputs: {'modality': np.array([values])}
        """
        # Determine output size (must match? or we fuse mapped features?)
        # For simplicity, assume all modalities map to same latent dimension size
        # or we just fuse scalar decisions.

        # Let's assume input vectors are same length N
        n_features = list(inputs.values())[0].shape[0]

        fused_output = np.zeros(n_features)

        # In SC, fusion is often MUX-based.
        # Out = sum(Input_i * Weight_i)
        # This is exactly what the Neuron does, but here we do it explicitly for fusion.

        for modality, data in inputs.items():
            if modality not in self.norm_weights:
                continue

            weight = self.norm_weights[modality]

            # Encode data and weight
            # (Simulation shortcut: use float math which is expected value of SC)
            # SC Fusion: P(out) = P(in1)*P(w1) + P(in2)*P(w2) ...

            # Real bitstream implementation:
            # We would generate bitstreams for 'data' and 'weight'.
            # Then MUX them.

            # Simulation:
            fused_output += data * weight

        return fused_output

forward(inputs)

inputs: {'modality': np.array([values])}

Source code in src/sc_neurocore/layers/fusion.py
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
def forward(self, inputs: Dict[str, np.ndarray[Any, Any]]) -> np.ndarray[Any, Any]:
    """
    inputs: {'modality': np.array([values])}
    """
    # Determine output size (must match? or we fuse mapped features?)
    # For simplicity, assume all modalities map to same latent dimension size
    # or we just fuse scalar decisions.

    # Let's assume input vectors are same length N
    n_features = list(inputs.values())[0].shape[0]

    fused_output = np.zeros(n_features)

    # In SC, fusion is often MUX-based.
    # Out = sum(Input_i * Weight_i)
    # This is exactly what the Neuron does, but here we do it explicitly for fusion.

    for modality, data in inputs.items():
        if modality not in self.norm_weights:
            continue

        weight = self.norm_weights[modality]

        # Encode data and weight
        # (Simulation shortcut: use float math which is expected value of SC)
        # SC Fusion: P(out) = P(in1)*P(w1) + P(in2)*P(w2) ...

        # Real bitstream implementation:
        # We would generate bitstreams for 'data' and 'weight'.
        # Then MUX them.

        # Simulation:
        fused_output += data * weight

    return fused_output

Attention Layer

sc_neurocore.layers.attention.StochasticAttention dataclass

Stochastic Computing Attention Block.

Two modes:

  • forward() — row-sum normalised (SC-native, no exp). Matches Rust engine forward().
  • forward_softmax() — proper softmax with temperature scaling.

Example

Q = np.random.default_rng(0).uniform(0, 1, (4, 8)) K = np.random.default_rng(1).uniform(0, 1, (6, 8)) V = np.random.default_rng(2).uniform(0, 1, (6, 5)) attn = StochasticAttention(dim_k=8) attn.forward(Q, K, V).shape (4, 5) attn.forward_softmax(Q, K, V).shape (4, 5)

Source code in src/sc_neurocore/layers/attention.py
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
@dataclass
class StochasticAttention:
    """
    Stochastic Computing Attention Block.

    Two modes:

    - ``forward()`` — row-sum normalised (SC-native, no exp). Matches Rust engine ``forward()``.
    - ``forward_softmax()`` — proper softmax with temperature scaling.

    Example
    -------
    >>> Q = np.random.default_rng(0).uniform(0, 1, (4, 8))
    >>> K = np.random.default_rng(1).uniform(0, 1, (6, 8))
    >>> V = np.random.default_rng(2).uniform(0, 1, (6, 5))
    >>> attn = StochasticAttention(dim_k=8)
    >>> attn.forward(Q, K, V).shape
    (4, 5)
    >>> attn.forward_softmax(Q, K, V).shape
    (4, 5)
    """

    dim_k: int
    temperature: float = 1.0

    def _ensure_2d(
        self,
        Q: np.ndarray[Any, Any],
        K: np.ndarray[Any, Any],
        V: np.ndarray[Any, Any],
    ) -> tuple[np.ndarray, np.ndarray, np.ndarray]:
        if Q.ndim == 1:
            Q = Q[None, :]
        if K.ndim == 1:
            K = K[None, :]
        if V.ndim == 1:
            V = V[None, :]
        return Q, K, V

    def forward(
        self, Q: np.ndarray[Any, Any], K: np.ndarray[Any, Any], V: np.ndarray[Any, Any]
    ) -> np.ndarray[Any, Any]:
        """
        Row-sum normalised attention (SC-native, no exp).

        Parameters
        ----------
        Q : (N, dim_k)
        K : (M, dim_k)
        V : (M, dim_v)

        Returns
        -------
        (N, dim_v)
        """
        Q, K, V = self._ensure_2d(Q, K, V)
        scores = np.dot(Q, K.T)
        row_sums = np.sum(scores, axis=1, keepdims=True)
        row_sums[row_sums == 0] = 1.0
        attn_weights = scores / row_sums
        return np.dot(attn_weights, V)

    def forward_softmax(
        self, Q: np.ndarray[Any, Any], K: np.ndarray[Any, Any], V: np.ndarray[Any, Any]
    ) -> np.ndarray[Any, Any]:
        """
        Proper softmax attention with temperature scaling.

        softmax(Q @ K^T / temperature) @ V

        Numerically stable via max-subtraction before exp.

        Parameters
        ----------
        Q : (N, dim_k)
        K : (M, dim_k)
        V : (M, dim_v)

        Returns
        -------
        (N, dim_v)
        """
        Q, K, V = self._ensure_2d(Q, K, V)
        scores = np.dot(Q, K.T) / self.temperature
        scores -= scores.max(axis=1, keepdims=True)
        exp_scores = np.exp(scores)
        attn_weights = exp_scores / exp_scores.sum(axis=1, keepdims=True)
        return np.dot(attn_weights, V)

    def forward_bitstream(
        self,
        Q: np.ndarray[Any, Any],
        K: np.ndarray[Any, Any],
        V: np.ndarray[Any, Any],
        length: int = 1024,
        use_sobol: bool = False,
    ) -> np.ndarray[Any, Any]:
        """SC-native attention via bitstream AND gates.

        Each element is encoded as a bitstream, inner products computed
        via AND (bit-level multiply), results decoded by popcount.

        When use_sobol=True, Sobol low-discrepancy sequences replace
        Bernoulli random streams, reducing variance from O(1/√L) to O(1/L).

        Parameters
        ----------
        Q : (N, dim_k) — query probabilities in [0, 1]
        K : (M, dim_k) — key probabilities in [0, 1]
        V : (M, dim_v) — value probabilities in [0, 1]
        length : int — bitstream length
        use_sobol : bool — use Sobol sequences for variance reduction

        Returns
        -------
        (N, dim_v) — attention output probabilities
        """
        Q, K, V = self._ensure_2d(Q, K, V)
        N, dk = Q.shape
        M, dv = V.shape

        gen = generate_sobol_bitstream if use_sobol else generate_bernoulli_bitstream

        # Encode Q, K as bitstreams
        Q_bits = np.array(
            [[gen(float(np.clip(Q[i, d], 0, 1)), length) for d in range(dk)] for i in range(N)]
        )  # (N, dk, L)
        K_bits = np.array(
            [[gen(float(np.clip(K[j, d], 0, 1)), length) for d in range(dk)] for j in range(M)]
        )  # (M, dk, L)

        # Compute attention scores via AND (SC multiply) + popcount
        scores = np.zeros((N, M))
        for i in range(N):
            for j in range(M):
                # Inner product: sum of AND across dim_k
                and_sum = 0.0
                for d in range(dk):
                    and_result = np.bitwise_and(Q_bits[i, d], K_bits[j, d])
                    and_sum += np.sum(and_result)
                scores[i, j] = and_sum / (dk * length)

        # Row-sum normalization (SC-native, no exp)
        row_sums = scores.sum(axis=1, keepdims=True)
        row_sums[row_sums == 0] = 1.0
        attn_weights = scores / row_sums

        # Weighted sum over V
        return np.dot(attn_weights, np.clip(V, 0, 1))

forward(Q, K, V)

Row-sum normalised attention (SC-native, no exp).

Parameters

Q : (N, dim_k) K : (M, dim_k) V : (M, dim_v)

Returns

(N, dim_v)

Source code in src/sc_neurocore/layers/attention.py
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
def forward(
    self, Q: np.ndarray[Any, Any], K: np.ndarray[Any, Any], V: np.ndarray[Any, Any]
) -> np.ndarray[Any, Any]:
    """
    Row-sum normalised attention (SC-native, no exp).

    Parameters
    ----------
    Q : (N, dim_k)
    K : (M, dim_k)
    V : (M, dim_v)

    Returns
    -------
    (N, dim_v)
    """
    Q, K, V = self._ensure_2d(Q, K, V)
    scores = np.dot(Q, K.T)
    row_sums = np.sum(scores, axis=1, keepdims=True)
    row_sums[row_sums == 0] = 1.0
    attn_weights = scores / row_sums
    return np.dot(attn_weights, V)

forward_softmax(Q, K, V)

Proper softmax attention with temperature scaling.

softmax(Q @ K^T / temperature) @ V

Numerically stable via max-subtraction before exp.

Parameters

Q : (N, dim_k) K : (M, dim_k) V : (M, dim_v)

Returns

(N, dim_v)

Source code in src/sc_neurocore/layers/attention.py
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
def forward_softmax(
    self, Q: np.ndarray[Any, Any], K: np.ndarray[Any, Any], V: np.ndarray[Any, Any]
) -> np.ndarray[Any, Any]:
    """
    Proper softmax attention with temperature scaling.

    softmax(Q @ K^T / temperature) @ V

    Numerically stable via max-subtraction before exp.

    Parameters
    ----------
    Q : (N, dim_k)
    K : (M, dim_k)
    V : (M, dim_v)

    Returns
    -------
    (N, dim_v)
    """
    Q, K, V = self._ensure_2d(Q, K, V)
    scores = np.dot(Q, K.T) / self.temperature
    scores -= scores.max(axis=1, keepdims=True)
    exp_scores = np.exp(scores)
    attn_weights = exp_scores / exp_scores.sum(axis=1, keepdims=True)
    return np.dot(attn_weights, V)

forward_bitstream(Q, K, V, length=1024, use_sobol=False)

SC-native attention via bitstream AND gates.

Each element is encoded as a bitstream, inner products computed via AND (bit-level multiply), results decoded by popcount.

When use_sobol=True, Sobol low-discrepancy sequences replace Bernoulli random streams, reducing variance from O(1/√L) to O(1/L).

Parameters

Q : (N, dim_k) — query probabilities in [0, 1] K : (M, dim_k) — key probabilities in [0, 1] V : (M, dim_v) — value probabilities in [0, 1] length : int — bitstream length use_sobol : bool — use Sobol sequences for variance reduction

Returns

(N, dim_v) — attention output probabilities

Source code in src/sc_neurocore/layers/attention.py
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
def forward_bitstream(
    self,
    Q: np.ndarray[Any, Any],
    K: np.ndarray[Any, Any],
    V: np.ndarray[Any, Any],
    length: int = 1024,
    use_sobol: bool = False,
) -> np.ndarray[Any, Any]:
    """SC-native attention via bitstream AND gates.

    Each element is encoded as a bitstream, inner products computed
    via AND (bit-level multiply), results decoded by popcount.

    When use_sobol=True, Sobol low-discrepancy sequences replace
    Bernoulli random streams, reducing variance from O(1/√L) to O(1/L).

    Parameters
    ----------
    Q : (N, dim_k) — query probabilities in [0, 1]
    K : (M, dim_k) — key probabilities in [0, 1]
    V : (M, dim_v) — value probabilities in [0, 1]
    length : int — bitstream length
    use_sobol : bool — use Sobol sequences for variance reduction

    Returns
    -------
    (N, dim_v) — attention output probabilities
    """
    Q, K, V = self._ensure_2d(Q, K, V)
    N, dk = Q.shape
    M, dv = V.shape

    gen = generate_sobol_bitstream if use_sobol else generate_bernoulli_bitstream

    # Encode Q, K as bitstreams
    Q_bits = np.array(
        [[gen(float(np.clip(Q[i, d], 0, 1)), length) for d in range(dk)] for i in range(N)]
    )  # (N, dk, L)
    K_bits = np.array(
        [[gen(float(np.clip(K[j, d], 0, 1)), length) for d in range(dk)] for j in range(M)]
    )  # (M, dk, L)

    # Compute attention scores via AND (SC multiply) + popcount
    scores = np.zeros((N, M))
    for i in range(N):
        for j in range(M):
            # Inner product: sum of AND across dim_k
            and_sum = 0.0
            for d in range(dk):
                and_result = np.bitwise_and(Q_bits[i, d], K_bits[j, d])
                and_sum += np.sum(and_result)
            scores[i, j] = and_sum / (dk * length)

    # Row-sum normalization (SC-native, no exp)
    row_sums = scores.sum(axis=1, keepdims=True)
    row_sums[row_sums == 0] = 1.0
    attn_weights = scores / row_sums

    # Weighted sum over V
    return np.dot(attn_weights, np.clip(V, 0, 1))

Memristive Layer

sc_neurocore.layers.memristive.MemristiveDenseLayer dataclass

Bases: VectorizedSCLayer

Dense layer mapped to a memristor crossbar with hardware non-idealities.

Defect parameters from Prezioso et al., Nature 521:61-64, 2015.

Source code in src/sc_neurocore/layers/memristive.py
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
@dataclass
class MemristiveDenseLayer(VectorizedSCLayer):
    """
    Dense layer mapped to a memristor crossbar with hardware non-idealities.

    Defect parameters from Prezioso et al., Nature 521:61-64, 2015.
    """

    stuck_rate: float = MEMRISTIVE_STUCK_RATE
    variability: float = MEMRISTIVE_VARIABILITY

    def __post_init__(self):
        super().__post_init__()
        self.apply_hardware_defects()

    def apply_hardware_defects(self):
        """
        Corrupt weights based on physical properties.
        """
        # 1. Variability (Write Noise)
        noise = np.random.normal(0, self.variability, self.weights.shape)
        self.weights = np.clip(self.weights + noise, 0, 1)

        # 2. Stuck-At Faults
        mask = np.random.random(self.weights.shape) < self.stuck_rate
        stuck_vals = np.random.randint(0, 2, self.weights.shape)  # 0 or 1
        self.weights[mask] = stuck_vals[mask]

        # Refresh packed representation
        self._refresh_packed_weights()

apply_hardware_defects()

Corrupt weights based on physical properties.

Source code in src/sc_neurocore/layers/memristive.py
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
def apply_hardware_defects(self):
    """
    Corrupt weights based on physical properties.
    """
    # 1. Variability (Write Noise)
    noise = np.random.normal(0, self.variability, self.weights.shape)
    self.weights = np.clip(self.weights + noise, 0, 1)

    # 2. Stuck-At Faults
    mask = np.random.random(self.weights.shape) < self.stuck_rate
    stuck_vals = np.random.randint(0, 2, self.weights.shape)  # 0 or 1
    self.weights[mask] = stuck_vals[mask]

    # Refresh packed representation
    self._refresh_packed_weights()

JAX Dense Layer

sc_neurocore.layers.jax_dense_layer.JaxSCDenseLayer dataclass

JAX-accelerated stochastic dense layer of LIF neurons.

Example

layer = JaxSCDenseLayer(n_neurons=10, n_inputs=5, seed=0) # doctest: +SKIP import jax.numpy as jnp # doctest: +SKIP spikes = layer.step(jnp.ones(10) * 0.5) # doctest: +SKIP spikes.shape # doctest: +SKIP (10,)

Source code in src/sc_neurocore/layers/jax_dense_layer.py
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
@dataclass
class JaxSCDenseLayer:
    """
    JAX-accelerated stochastic dense layer of LIF neurons.

    Example
    -------
    >>> layer = JaxSCDenseLayer(n_neurons=10, n_inputs=5, seed=0)  # doctest: +SKIP
    >>> import jax.numpy as jnp  # doctest: +SKIP
    >>> spikes = layer.step(jnp.ones(10) * 0.5)  # doctest: +SKIP
    >>> spikes.shape  # doctest: +SKIP
    (10,)
    """

    n_neurons: int
    n_inputs: int
    bitstream_length: int = LAYER_DEFAULT_LENGTH
    dt_ms: float = LIF_DT
    neuron_params: Optional[Dict[str, Any]] = None
    seed: Optional[int] = None

    def __post_init__(self) -> None:
        if not HAS_JAX:
            from sc_neurocore.exceptions import SCDependencyError

            raise SCDependencyError("JAX is required for JaxSCDenseLayer.")

        if self.neuron_params is None:
            self.neuron_params = {}

        # Layer Parameters (JAX Arrays)
        self.v_rest = float(self.neuron_params.get("v_rest", LIF_V_REST))
        self.v_reset = float(self.neuron_params.get("v_reset", LIF_V_RESET))
        self.v_threshold = float(self.neuron_params.get("v_threshold", LIF_V_THRESHOLD))
        self.tau_mem = float(self.neuron_params.get("tau_mem", LIF_TAU_MEM))
        self.resistance = float(self.neuron_params.get("resistance", LIF_RESISTANCE))
        self.noise_std = float(self.neuron_params.get("noise_std", LIF_LAYER_NOISE_STD))
        self.alpha = float(self.dt_ms / self.tau_mem)

        # State (JAX Arrays)
        self.v = jnp.full((self.n_neurons,), self.v_rest)

        # RNG State
        self.rng_key = jax.random.PRNGKey(self.seed or 42)

    def step(self, I_t: jax.Array) -> jax.Array:
        """
        Advance the entire layer by one time step.

        I_t: (n_neurons,) input current for each neuron.
        Returns:
        spikes: (n_neurons,) uint8 array.
        """
        # Generate noise
        self.rng_key, subkey = jax.random.split(self.rng_key)
        noise = jax.random.normal(subkey, (self.n_neurons,)) * self.noise_std

        # Update neurons
        self.v, spikes = jax_lif_step(
            self.v,
            I_t,
            self.v_rest,
            self.v_reset,
            self.v_threshold,
            self.alpha,
            self.resistance,
            noise,
        )
        res: jax.Array = spikes
        return res

    def run(self, currents: jax.Array) -> jax.Array:
        """
        Run for multiple steps.

        currents: (T, n_neurons)
        Returns:
        spikes: (T, n_neurons)
        """
        # Note: In a production JAX implementation, we would use jax.lax.scan
        # for maximum performance.

        T = currents.shape[0]
        all_spikes = []

        for t in range(T):
            all_spikes.append(self.step(currents[t]))

        return jnp.stack(all_spikes)

    def reset(self) -> None:
        self.v = jnp.full((self.n_neurons,), self.v_rest)

step(I_t)

Advance the entire layer by one time step.

I_t: (n_neurons,) input current for each neuron. Returns: spikes: (n_neurons,) uint8 array.

Source code in src/sc_neurocore/layers/jax_dense_layer.py
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
def step(self, I_t: jax.Array) -> jax.Array:
    """
    Advance the entire layer by one time step.

    I_t: (n_neurons,) input current for each neuron.
    Returns:
    spikes: (n_neurons,) uint8 array.
    """
    # Generate noise
    self.rng_key, subkey = jax.random.split(self.rng_key)
    noise = jax.random.normal(subkey, (self.n_neurons,)) * self.noise_std

    # Update neurons
    self.v, spikes = jax_lif_step(
        self.v,
        I_t,
        self.v_rest,
        self.v_reset,
        self.v_threshold,
        self.alpha,
        self.resistance,
        noise,
    )
    res: jax.Array = spikes
    return res

run(currents)

Run for multiple steps.

currents: (T, n_neurons) Returns: spikes: (T, n_neurons)

Source code in src/sc_neurocore/layers/jax_dense_layer.py
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
def run(self, currents: jax.Array) -> jax.Array:
    """
    Run for multiple steps.

    currents: (T, n_neurons)
    Returns:
    spikes: (T, n_neurons)
    """
    # Note: In a production JAX implementation, we would use jax.lax.scan
    # for maximum performance.

    T = currents.shape[0]
    all_spikes = []

    for t in range(T):
        all_spikes.append(self.step(currents[t]))

    return jnp.stack(all_spikes)

Hardware-Aware SC Layer

Trains around memristive defects (stuck-at faults) by masking gradients on defective synapses.

sc_neurocore.layers.hardware_aware.HardwareAwareSCLayer dataclass

SC layer with memristive hardware defect injection.

Parameters

n_inputs : int Number of input channels. n_neurons : int Number of output neurons. length : int Bitstream length. stuck_rate : float Fraction of synapses with stuck-at faults (0 or 1). Default 0.05. variability : float Additive weight noise std. Default 0.02. seed : int Random seed for defect generation.

Source code in src/sc_neurocore/layers/hardware_aware.py
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
@dataclass
class HardwareAwareSCLayer:
    """SC layer with memristive hardware defect injection.

    Parameters
    ----------
    n_inputs : int
        Number of input channels.
    n_neurons : int
        Number of output neurons.
    length : int
        Bitstream length.
    stuck_rate : float
        Fraction of synapses with stuck-at faults (0 or 1). Default 0.05.
    variability : float
        Additive weight noise std. Default 0.02.
    seed : int
        Random seed for defect generation.
    """

    n_inputs: int
    n_neurons: int
    length: int = 1024
    stuck_rate: float = 0.05
    variability: float = 0.02
    seed: int = 42

    def __post_init__(self):
        self._layer = VectorizedSCLayer(
            n_inputs=self.n_inputs,
            n_neurons=self.n_neurons,
            length=self.length,
            use_gpu=False,
        )
        rng = np.random.RandomState(self.seed)
        shape = (self.n_neurons, self.n_inputs)

        # Stuck-at mask: True where synapse is stuck
        self.stuck_mask = rng.random(shape) < self.stuck_rate
        self.stuck_values = rng.choice([0.0, 1.0], size=shape)

        # Apply stuck-at defects to initial weights
        self._apply_defects()

    def _apply_defects(self):
        self._layer.weights[self.stuck_mask] = self.stuck_values[self.stuck_mask]
        if self.variability > 0:
            noise = np.random.RandomState(self.seed + 1).normal(
                0, self.variability, self._layer.weights.shape
            )
            mask = ~self.stuck_mask
            self._layer.weights[mask] = np.clip(self._layer.weights[mask] + noise[mask], 0.0, 1.0)
        self._layer._refresh_packed_weights()

    def forward(self, input_values) -> np.ndarray:
        return self._layer.forward(input_values)

    def update_weights(self, gradient: np.ndarray, lr: float = 0.01):
        """Update weights with gradient, respecting stuck-at mask.

        Stuck synapses receive zero gradient — the network learns
        around the defects.
        """
        masked_gradient = gradient.copy()
        masked_gradient[self.stuck_mask] = 0.0
        self._layer.weights -= lr * masked_gradient
        self._layer.weights = np.clip(self._layer.weights, 0.0, 1.0)
        self._apply_defects()

    @property
    def weights(self) -> np.ndarray:
        return self._layer.weights

    @property
    def n_stuck(self) -> int:
        return int(self.stuck_mask.sum())

    @property
    def stuck_fraction(self) -> float:
        return float(self.stuck_mask.mean())

update_weights(gradient, lr=0.01)

Update weights with gradient, respecting stuck-at mask.

Stuck synapses receive zero gradient — the network learns around the defects.

Source code in src/sc_neurocore/layers/hardware_aware.py
85
86
87
88
89
90
91
92
93
94
95
def update_weights(self, gradient: np.ndarray, lr: float = 0.01):
    """Update weights with gradient, respecting stuck-at mask.

    Stuck synapses receive zero gradient — the network learns
    around the defects.
    """
    masked_gradient = gradient.copy()
    masked_gradient[self.stuck_mask] = 0.0
    self._layer.weights -= lr * masked_gradient
    self._layer.weights = np.clip(self._layer.weights, 0.0, 1.0)
    self._apply_defects()

Predictive Coding SC Layer (Conjecture C9)

Zero-multiplication predictive coding: XOR = error, popcount = magnitude, STDP = precision. First SC implementation of Bayesian prediction error minimization.

sc_neurocore.layers.predictive_coding.PredictiveCodingSCLayer dataclass

Zero-multiplication predictive coding in SC.

Parameters

n_inputs : int Number of input channels. n_neurons : int Number of predictive neurons. length : int Bitstream length. lr : float STDP-like learning rate for prediction weights. seed : int or None Random seed.

Source code in src/sc_neurocore/layers/predictive_coding.py
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
@dataclass
class PredictiveCodingSCLayer:
    """Zero-multiplication predictive coding in SC.

    Parameters
    ----------
    n_inputs : int
        Number of input channels.
    n_neurons : int
        Number of predictive neurons.
    length : int
        Bitstream length.
    lr : float
        STDP-like learning rate for prediction weights.
    seed : int or None
        Random seed.
    """

    n_inputs: int
    n_neurons: int
    length: int = 256
    lr: float = 0.01
    seed: int | None = None

    def __post_init__(self):
        rng = np.random.RandomState(self.seed)
        # Prediction weights: each neuron predicts the next input
        self.weights = rng.uniform(0.1, 0.9, (self.n_neurons, self.n_inputs))
        self._prev_input: np.ndarray | None = None

    def forward(self, inputs: list[float] | np.ndarray) -> dict[str, Any]:
        """Process one timestep.

        Parameters
        ----------
        inputs : array-like
            Input probabilities, shape (n_inputs,).

        Returns
        -------
        dict with keys:
            'prediction_error': float — mean Hamming distance across neurons
            'surprises': ndarray shape (n_neurons,) — per-neuron surprise
            'predictions': ndarray shape (n_neurons, n_inputs) — predicted probs
        """
        inputs = np.asarray(inputs, dtype=np.float64)
        rng = np.random.RandomState(None)

        # Generate actual input bitstreams
        actual_streams = np.array(
            [generate_bernoulli_bitstream(float(np.clip(p, 0, 1)), self.length) for p in inputs]
        )  # shape: (n_inputs, length)

        surprises = np.zeros(self.n_neurons)
        predictions = np.zeros((self.n_neurons, self.n_inputs))

        for j in range(self.n_neurons):
            neuron_error = 0.0
            for i in range(self.n_inputs):
                # Generate predicted bitstream from weight
                pred_stream = generate_bernoulli_bitstream(
                    float(np.clip(self.weights[j, i], 0, 1)), self.length
                )
                predictions[j, i] = self.weights[j, i]

                # XOR = prediction error bitstream (zero multiplications)
                error_stream = np.bitwise_xor(pred_stream, actual_streams[i])

                # Popcount = error magnitude
                error_magnitude = float(np.sum(error_stream)) / self.length
                neuron_error += error_magnitude

                # STDP-like precision update: reduce weight error
                # Move weight toward actual input probability
                actual_p = bitstream_to_probability(actual_streams[i])
                self.weights[j, i] += self.lr * (actual_p - self.weights[j, i])

            surprises[j] = neuron_error / self.n_inputs

        # Clip weights
        np.clip(self.weights, 0.0, 1.0, out=self.weights)

        mean_error = float(np.mean(surprises))

        return {
            "prediction_error": mean_error,
            "surprises": surprises,
            "predictions": predictions,
        }

    def reset(self):
        rng = np.random.RandomState(self.seed)
        self.weights = rng.uniform(0.1, 0.9, (self.n_neurons, self.n_inputs))
        self._prev_input = None

forward(inputs)

Process one timestep.

Parameters

inputs : array-like Input probabilities, shape (n_inputs,).

Returns

dict with keys: 'prediction_error': float — mean Hamming distance across neurons 'surprises': ndarray shape (n_neurons,) — per-neuron surprise 'predictions': ndarray shape (n_neurons, n_inputs) — predicted probs

Source code in src/sc_neurocore/layers/predictive_coding.py
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
def forward(self, inputs: list[float] | np.ndarray) -> dict[str, Any]:
    """Process one timestep.

    Parameters
    ----------
    inputs : array-like
        Input probabilities, shape (n_inputs,).

    Returns
    -------
    dict with keys:
        'prediction_error': float — mean Hamming distance across neurons
        'surprises': ndarray shape (n_neurons,) — per-neuron surprise
        'predictions': ndarray shape (n_neurons, n_inputs) — predicted probs
    """
    inputs = np.asarray(inputs, dtype=np.float64)
    rng = np.random.RandomState(None)

    # Generate actual input bitstreams
    actual_streams = np.array(
        [generate_bernoulli_bitstream(float(np.clip(p, 0, 1)), self.length) for p in inputs]
    )  # shape: (n_inputs, length)

    surprises = np.zeros(self.n_neurons)
    predictions = np.zeros((self.n_neurons, self.n_inputs))

    for j in range(self.n_neurons):
        neuron_error = 0.0
        for i in range(self.n_inputs):
            # Generate predicted bitstream from weight
            pred_stream = generate_bernoulli_bitstream(
                float(np.clip(self.weights[j, i], 0, 1)), self.length
            )
            predictions[j, i] = self.weights[j, i]

            # XOR = prediction error bitstream (zero multiplications)
            error_stream = np.bitwise_xor(pred_stream, actual_streams[i])

            # Popcount = error magnitude
            error_magnitude = float(np.sum(error_stream)) / self.length
            neuron_error += error_magnitude

            # STDP-like precision update: reduce weight error
            # Move weight toward actual input probability
            actual_p = bitstream_to_probability(actual_streams[i])
            self.weights[j, i] += self.lr * (actual_p - self.weights[j, i])

        surprises[j] = neuron_error / self.n_inputs

    # Clip weights
    np.clip(self.weights, 0.0, 1.0, out=self.weights)

    mean_error = float(np.mean(surprises))

    return {
        "prediction_error": mean_error,
        "surprises": surprises,
        "predictions": predictions,
    }

Rall Branching Dendrite

Compartmental dendritic tree with Rall's 3/2 power rule for impedance matching. Distal-to-proximal propagation with inter-compartment coupling.

sc_neurocore.layers.rall_dendrite.RallDendrite dataclass

Dendritic tree with Rall branching and compartmental dynamics.

Parameters

n_branches : int Number of dendritic branches. branch_length : int Number of compartments per branch. tau : float Membrane time constant (ms). coupling : float Inter-compartment coupling strength (0 to 1). dt : float Timestep (ms).

Source code in src/sc_neurocore/layers/rall_dendrite.py
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
@dataclass
class RallDendrite:
    """Dendritic tree with Rall branching and compartmental dynamics.

    Parameters
    ----------
    n_branches : int
        Number of dendritic branches.
    branch_length : int
        Number of compartments per branch.
    tau : float
        Membrane time constant (ms).
    coupling : float
        Inter-compartment coupling strength (0 to 1).
    dt : float
        Timestep (ms).
    """

    n_branches: int = 4
    branch_length: int = 3
    tau: float = 10.0
    coupling: float = 0.5
    dt: float = 1.0

    def __post_init__(self):
        # Each branch has branch_length compartments
        # Compartment voltages: shape (n_branches, branch_length)
        self.v = np.zeros((self.n_branches, self.branch_length))
        self.soma_v = 0.0
        self._decay = np.exp(-self.dt / self.tau)
        # Rall 3/2 rule: branch diameters for impedance matching
        # Daughter diameters normalized so d_parent^1.5 = sum(d_i^1.5)
        self.diameters = np.ones(self.n_branches)
        parent_d = (self.n_branches) ** (2.0 / 3.0)
        self.attenuation = (self.diameters / parent_d) ** 1.5

    def step(self, branch_inputs: np.ndarray) -> float:
        """Advance one timestep.

        Parameters
        ----------
        branch_inputs : np.ndarray
            Shape (n_branches,) — synaptic current injected at distal tip of each branch.

        Returns
        -------
        float
            Somatic voltage.
        """
        branch_inputs = np.atleast_1d(np.asarray(branch_inputs, dtype=np.float64))

        # Decay all compartments
        self.v *= self._decay

        # Inject input at distal tip (last compartment)
        self.v[:, -1] += branch_inputs[: self.n_branches] * self.dt / self.tau

        # Propagate along branch: distal → proximal (toward soma)
        for k in range(self.branch_length - 1, 0, -1):
            flow = self.coupling * (self.v[:, k] - self.v[:, k - 1])
            self.v[:, k] -= flow
            self.v[:, k - 1] += flow

        # Sum proximal compartments at soma with Rall attenuation
        proximal = self.v[:, 0]
        soma_input = np.sum(proximal * self.attenuation)
        self.soma_v = self._decay * self.soma_v + soma_input * self.dt / self.tau

        return float(self.soma_v)

    @property
    def branch_voltages(self) -> np.ndarray:
        """Current compartment voltages, shape (n_branches, branch_length)."""
        return self.v.copy()

    def reset(self):
        self.v[:] = 0.0
        self.soma_v = 0.0

branch_voltages property

Current compartment voltages, shape (n_branches, branch_length).

step(branch_inputs)

Advance one timestep.

Parameters

branch_inputs : np.ndarray Shape (n_branches,) — synaptic current injected at distal tip of each branch.

Returns

float Somatic voltage.

Source code in src/sc_neurocore/layers/rall_dendrite.py
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
def step(self, branch_inputs: np.ndarray) -> float:
    """Advance one timestep.

    Parameters
    ----------
    branch_inputs : np.ndarray
        Shape (n_branches,) — synaptic current injected at distal tip of each branch.

    Returns
    -------
    float
        Somatic voltage.
    """
    branch_inputs = np.atleast_1d(np.asarray(branch_inputs, dtype=np.float64))

    # Decay all compartments
    self.v *= self._decay

    # Inject input at distal tip (last compartment)
    self.v[:, -1] += branch_inputs[: self.n_branches] * self.dt / self.tau

    # Propagate along branch: distal → proximal (toward soma)
    for k in range(self.branch_length - 1, 0, -1):
        flow = self.coupling * (self.v[:, k] - self.v[:, k - 1])
        self.v[:, k] -= flow
        self.v[:, k - 1] += flow

    # Sum proximal compartments at soma with Rall attenuation
    proximal = self.v[:, 0]
    soma_input = np.sum(proximal * self.attenuation)
    self.soma_v = self._decay * self.soma_v + soma_input * self.dt / self.tau

    return float(self.soma_v)

Lateral Inhibition

sc_neurocore.layers.circuit_primitives.LateralInhibition dataclass

Lateral inhibition: each neuron inhibits its neighbors.

Models the surround suppression found in retinal ganglion cells, cortical simple cells, and throughout sensory processing.

The inhibition kernel is a Gaussian centered on each neuron with width radius, producing a Mexican-hat (center-surround) response when combined with the neuron's own excitation.

Source code in src/sc_neurocore/layers/circuit_primitives.py
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
@dataclass
class LateralInhibition:
    """Lateral inhibition: each neuron inhibits its neighbors.

    Models the surround suppression found in retinal ganglion cells,
    cortical simple cells, and throughout sensory processing.

    The inhibition kernel is a Gaussian centered on each neuron with
    width `radius`, producing a Mexican-hat (center-surround) response
    when combined with the neuron's own excitation.
    """

    n_neurons: int
    inhibition_strength: float = 0.3
    radius: int = 2

    def __post_init__(self):
        # Build inhibition kernel matrix
        kernel = np.zeros((self.n_neurons, self.n_neurons))
        for i in range(self.n_neurons):
            for j in range(self.n_neurons):
                d = min(abs(i - j), self.n_neurons - abs(i - j))  # circular distance
                if 0 < d <= self.radius:
                    kernel[i, j] = self.inhibition_strength * np.exp(
                        -(d**2) / (2 * (self.radius / 2) ** 2)
                    )
        self._kernel = kernel

    def apply(self, rates: np.ndarray) -> np.ndarray:
        """Apply lateral inhibition to firing rates.

        Parameters
        ----------
        rates : np.ndarray, shape (n_neurons,)
            Input firing rates or probabilities.

        Returns
        -------
        np.ndarray, shape (n_neurons,)
            Inhibited firing rates, clipped to [0, inf).
        """
        inhibition = self._kernel @ rates
        return np.maximum(rates - inhibition, 0.0)

apply(rates)

Apply lateral inhibition to firing rates.

Parameters

rates : np.ndarray, shape (n_neurons,) Input firing rates or probabilities.

Returns

np.ndarray, shape (n_neurons,) Inhibited firing rates, clipped to [0, inf).

Source code in src/sc_neurocore/layers/circuit_primitives.py
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
def apply(self, rates: np.ndarray) -> np.ndarray:
    """Apply lateral inhibition to firing rates.

    Parameters
    ----------
    rates : np.ndarray, shape (n_neurons,)
        Input firing rates or probabilities.

    Returns
    -------
    np.ndarray, shape (n_neurons,)
        Inhibited firing rates, clipped to [0, inf).
    """
    inhibition = self._kernel @ rates
    return np.maximum(rates - inhibition, 0.0)

Winner-Take-All

sc_neurocore.layers.circuit_primitives.WinnerTakeAll dataclass

k-Winner-Take-All circuit.

Only the top-k neurons remain active; all others are suppressed to zero. Models competitive dynamics in cortical columns and basal ganglia action selection.

With k=1, this is a hard argmax over the population.

Source code in src/sc_neurocore/layers/circuit_primitives.py
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
@dataclass
class WinnerTakeAll:
    """k-Winner-Take-All circuit.

    Only the top-k neurons remain active; all others are suppressed to zero.
    Models competitive dynamics in cortical columns and basal ganglia
    action selection.

    With k=1, this is a hard argmax over the population.
    """

    n_neurons: int
    k: int = 1

    def apply(self, rates: np.ndarray) -> np.ndarray:
        """Apply k-WTA to firing rates.

        Parameters
        ----------
        rates : np.ndarray, shape (n_neurons,)
            Input firing rates.

        Returns
        -------
        np.ndarray, shape (n_neurons,)
            Only top-k values survive; rest are zero.
        """
        if self.k >= self.n_neurons:
            return rates.copy()
        top_k = np.argsort(rates)[-self.k :]
        result = np.zeros_like(rates)
        result[top_k] = rates[top_k]
        return result

    def winners(self, rates: np.ndarray) -> np.ndarray:
        """Return indices of the k winning neurons."""
        return np.argsort(rates)[-self.k :][::-1]

apply(rates)

Apply k-WTA to firing rates.

Parameters

rates : np.ndarray, shape (n_neurons,) Input firing rates.

Returns

np.ndarray, shape (n_neurons,) Only top-k values survive; rest are zero.

Source code in src/sc_neurocore/layers/circuit_primitives.py
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
def apply(self, rates: np.ndarray) -> np.ndarray:
    """Apply k-WTA to firing rates.

    Parameters
    ----------
    rates : np.ndarray, shape (n_neurons,)
        Input firing rates.

    Returns
    -------
    np.ndarray, shape (n_neurons,)
        Only top-k values survive; rest are zero.
    """
    if self.k >= self.n_neurons:
        return rates.copy()
    top_k = np.argsort(rates)[-self.k :]
    result = np.zeros_like(rates)
    result[top_k] = rates[top_k]
    return result

winners(rates)

Return indices of the k winning neurons.

Source code in src/sc_neurocore/layers/circuit_primitives.py
107
108
109
def winners(self, rates: np.ndarray) -> np.ndarray:
    """Return indices of the k winning neurons."""
    return np.argsort(rates)[-self.k :][::-1]