Skip to content

Sources

Input current sources that drive neurons via SC-encoded bitstreams.

Bitstream Current Source

Converts multiple scalar inputs through weighted SC synapses into a single current value per timestep.

sc_neurocore.sources.bitstream_current_source.BitstreamCurrentSource dataclass

Multi-channel bitstream current source.

  • Takes scalar inputs x_i in [x_min, x_max]
  • Encodes each into a bitstream via BitstreamEncoder
  • Passes them through BitstreamSynapses
  • Uses BitstreamDotProduct to compute a scalar current I(t) for the neuron.

For now we assume static inputs and weights over the full length, but you can extend this to time-varying later.

Source code in src/sc_neurocore/sources/bitstream_current_source.py
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
@dataclass
class BitstreamCurrentSource:
    """
    Multi-channel bitstream current source.

    - Takes scalar inputs x_i in [x_min, x_max]
    - Encodes each into a bitstream via BitstreamEncoder
    - Passes them through BitstreamSynapses
    - Uses BitstreamDotProduct to compute a scalar current I(t)
      for the neuron.

    For now we assume static inputs and weights over the full length,
    but you can extend this to time-varying later.
    """

    x_inputs: Sequence[float]
    x_min: float
    x_max: float
    weight_values: Sequence[float]
    w_min: float
    w_max: float
    length: int = 1024
    y_min: float = 0.0  # output current min
    y_max: float = 0.1  # output current max
    seed: Optional[int] = None

    def __post_init__(self) -> None:
        self.n_inputs = len(self.x_inputs)
        if len(self.weight_values) != self.n_inputs:
            raise ValueError("x_inputs and weight_values must have same length.")

        # Encoders for input channels
        self._encoders: List[BitstreamEncoder] = []
        for i in range(self.n_inputs):
            self._encoders.append(
                BitstreamEncoder(
                    x_min=self.x_min,
                    x_max=self.x_max,
                    length=self.length,
                    seed=None if self.seed is None else self.seed + i,
                )
            )

        # Generate pre-synaptic bitstreams
        self.pre_matrix = np.zeros((self.n_inputs, self.length), dtype=np.uint8)
        for i, (enc, x) in enumerate(zip(self._encoders, self.x_inputs)):
            self.pre_matrix[i] = enc.encode(x)

        # Build synapses
        self.synapses: List[BitstreamSynapse] = []
        for i, w in enumerate(self.weight_values):
            self.synapses.append(
                BitstreamSynapse(
                    w_min=self.w_min,
                    w_max=self.w_max,
                    length=self.length,
                    w=w,
                    seed=None if self.seed is None else self.seed + 1000 + i,
                )
            )

        # Dot-product engine
        self.dot = BitstreamDotProduct(self.synapses)

        # Post-synaptic streams and scalar current
        self.post_matrix, self.current_scalar = self.dot.apply(
            self.pre_matrix, y_min=self.y_min, y_max=self.y_max
        )

        # We'll treat each timestep as one index in the bitstreams
        self._t = 0

    def reset(self) -> None:
        self._t = 0

    def step(self) -> float:
        """
        Return the current I_t at the current time index and advance.

        We approximate I_t by reading the t-th bit of each post-synaptic
        stream, then mapping their sum to [y_min, y_max].
        """
        idx = self._t
        if idx >= self.length:
            # Clamp at last timestep (or you can wrap)
            idx = self.length - 1

        # Retrieve bits from all post-synaptic streams at time idx
        bits = self.post_matrix[:, idx]

        # Sum bits and normalize
        n_ones = int(bits.sum())
        prob = n_ones / max(self.n_inputs, 1)

        # Map probability into [y_min, y_max]
        I_t = self.y_min + prob * (self.y_max - self.y_min)

        self._t += 1
        return float(I_t)

    def full_current_estimate(self) -> float:
        """
        Estimate average current over full bitstream duration
        using the dot-product's scalar value.
        """
        return float(self.current_scalar)

step()

Return the current I_t at the current time index and advance.

We approximate I_t by reading the t-th bit of each post-synaptic stream, then mapping their sum to [y_min, y_max].

Source code in src/sc_neurocore/sources/bitstream_current_source.py
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
def step(self) -> float:
    """
    Return the current I_t at the current time index and advance.

    We approximate I_t by reading the t-th bit of each post-synaptic
    stream, then mapping their sum to [y_min, y_max].
    """
    idx = self._t
    if idx >= self.length:
        # Clamp at last timestep (or you can wrap)
        idx = self.length - 1

    # Retrieve bits from all post-synaptic streams at time idx
    bits = self.post_matrix[:, idx]

    # Sum bits and normalize
    n_ones = int(bits.sum())
    prob = n_ones / max(self.n_inputs, 1)

    # Map probability into [y_min, y_max]
    I_t = self.y_min + prob * (self.y_max - self.y_min)

    self._t += 1
    return float(I_t)

full_current_estimate()

Estimate average current over full bitstream duration using the dot-product's scalar value.

Source code in src/sc_neurocore/sources/bitstream_current_source.py
119
120
121
122
123
124
def full_current_estimate(self) -> float:
    """
    Estimate average current over full bitstream duration
    using the dot-product's scalar value.
    """
    return float(self.current_scalar)

Quantum Entropy Source

Optional integration with quantum random number generators (Qiskit, PennyLane) for true randomness in SC encoding.

sc_neurocore.sources.quantum_entropy

QuantumEntropySource dataclass

Generates entropy based on simulated Quantum Measurement Collapse. Used to inject 'True' (Simulated) Quantum Indeterminacy into Neural Models.

Physics: - Maintains a Qubit State |psi> - Applies Hadamard (Superposition) and Phase Rotations - Measures (Collapse) to generate noise

Source code in src/sc_neurocore/sources/quantum_entropy.py
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
@dataclass
class QuantumEntropySource:
    """
    Generates entropy based on simulated Quantum Measurement Collapse.
    Used to inject 'True' (Simulated) Quantum Indeterminacy into Neural Models.

    Physics:
    - Maintains a Qubit State |psi>
    - Applies Hadamard (Superposition) and Phase Rotations
    - Measures (Collapse) to generate noise
    """

    n_qubits: int = 1
    seed: Optional[int] = None

    def __post_init__(self) -> None:
        self._rng = np.random.RandomState(self.seed)
        # Initialize |0> state
        self.state = np.zeros(2**self.n_qubits, dtype=np.complex128)
        self.state[0] = 1.0

    def _hadamard(self) -> None:
        """Apply Hadamard gate H = (1/√2)[[1,1],[1,-1]] to each qubit."""
        H = np.array([[1, 1], [1, -1]], dtype=np.complex128) / np.sqrt(2)
        result = self.state.copy()
        n = self.n_qubits
        dim = 2**n
        for q in range(n):
            new_result = np.zeros(dim, dtype=np.complex128)
            block = 2 ** (n - q)
            half = block // 2
            for start in range(0, dim, block):
                for i in range(half):
                    a = result[start + i]
                    b = result[start + half + i]
                    new_result[start + i] = H[0, 0] * a + H[0, 1] * b
                    new_result[start + half + i] = H[1, 0] * a + H[1, 1] * b
            result = new_result
        self.state = result

    def _measure(self) -> int:
        """Apply Hadamard, measure via Born rule, collapse state."""
        self._hadamard()
        probs = np.abs(self.state) ** 2
        idx = self._rng.choice(len(probs), p=probs)
        # Wavefunction collapse to measured basis state
        self.state = np.zeros_like(self.state)
        self.state[idx] = 1.0
        return int(idx)

    def sample_normal(self, mean: float = 0.0, std: float = 1.0) -> float:
        """
        Two independent measurements → Box-Muller → Gaussian sample.

        Discrete outcomes dithered with uniform jitter for continuous input.
        """
        N = len(self.state)

        u1 = (self._measure() + self._rng.uniform()) / N
        u1 = np.clip(u1, 1e-10, 1.0 - 1e-10)
        u2 = (self._measure() + self._rng.uniform()) / N

        z = np.sqrt(-2.0 * np.log(u1)) * np.cos(2.0 * np.pi * u2)
        return float(mean + z * std)

    def sample(self) -> float:
        return self.sample_normal()

sample_normal(mean=0.0, std=1.0)

Two independent measurements → Box-Muller → Gaussian sample.

Discrete outcomes dithered with uniform jitter for continuous input.

Source code in src/sc_neurocore/sources/quantum_entropy.py
63
64
65
66
67
68
69
70
71
72
73
74
75
76
def sample_normal(self, mean: float = 0.0, std: float = 1.0) -> float:
    """
    Two independent measurements → Box-Muller → Gaussian sample.

    Discrete outcomes dithered with uniform jitter for continuous input.
    """
    N = len(self.state)

    u1 = (self._measure() + self._rng.uniform()) / N
    u1 = np.clip(u1, 1e-10, 1.0 - 1e-10)
    u2 = (self._measure() + self._rng.uniform()) / N

    z = np.sqrt(-2.0 * np.log(u1)) * np.cos(2.0 * np.pi * u2)
    return float(mean + z * std)