Skip to content

Synapses

Stochastic-computing synapses implement weighted connections between neurons using bitstream multiplication (AND gates).

Class Learning Use case
BitstreamSynapse None (static weight) Inference, fixed networks
StochasticSTDPSynapse Hebbian STDP Unsupervised learning
RewardModulatedSTDPSynapse Three-factor R-STDP Reinforcement learning
BitstreamDotProduct None Multi-input weighted sum
TripletSTDP Pfister-Gerstner 2006 Rate-dependent cortical plasticity
BCMSynapse Sliding threshold Metaplasticity, selectivity
ClopathSTDP Voltage-based Unifies rate + timing plasticity
TripartiteSynapse Astrocyte-modulated Neuron-glia-synapse coupling
GapJunction Electrical coupling Interneuron synchrony

Static Synapse

sc_neurocore.synapses.sc_synapse.BitstreamSynapse dataclass

Stochastic-computing synapse using bitstreams.

Each synapse has a weight w in [w_min, w_max]. SC multiplication via bitwise AND: P(out=1) ~ P(pre=1) * P(w=1).

Example

import numpy as np syn = BitstreamSynapse(w_min=0.0, w_max=1.0, w=0.5, length=1024, seed=42) pre = np.ones(1024, dtype=np.uint8) # all-ones input post = syn.apply(pre) abs(post.mean() - 0.5) < 0.1 # output ~50% ones True

Source code in src/sc_neurocore/synapses/sc_synapse.py
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
@dataclass
class BitstreamSynapse:
    """
    Stochastic-computing synapse using bitstreams.

    Each synapse has a weight w in [w_min, w_max].
    SC multiplication via bitwise AND: P(out=1) ~ P(pre=1) * P(w=1).

    Example
    -------
    >>> import numpy as np
    >>> syn = BitstreamSynapse(w_min=0.0, w_max=1.0, w=0.5, length=1024, seed=42)
    >>> pre = np.ones(1024, dtype=np.uint8)  # all-ones input
    >>> post = syn.apply(pre)
    >>> abs(post.mean() - 0.5) < 0.1  # output ~50% ones
    True
    """

    w_min: float
    w_max: float
    length: int = SYNAPSE_DEFAULT_LENGTH
    w: float = SYNAPSE_DEFAULT_WEIGHT
    seed: Optional[int] = None

    def __post_init__(self) -> None:
        if self.w_min >= self.w_max:
            raise ValueError("w_min must be < w_max.")
        self._rng = RNG(self.seed)
        self._weight_encoder = BitstreamEncoder(
            x_min=self.w_min,
            x_max=self.w_max,
            length=self.length,
            seed=self.seed,
        )
        self.weight_bits = self.encode_weight(self.w)

    def encode_weight(self, w: float) -> np.ndarray[Any, Any]:
        """
        Encode scalar weight w into a unipolar bitstream.
        """
        return self._weight_encoder.encode(w)

    def update_weight(self, new_w: float) -> None:
        """
        Change synaptic weight and recompute its bitstream.
        """
        self.w = new_w
        self.weight_bits = self.encode_weight(new_w)

    def apply(self, pre_bits: np.ndarray[Any, Any]) -> np.ndarray[Any, Any]:
        """
        Apply synapse to a pre-synaptic bitstream.

        Parameters
        ----------
        pre_bits : np.ndarray
            Bitstream of shape (length,) with values {0,1}.

        Returns
        -------
        np.ndarray
            Post-synaptic bitstream of shape (length,).
        """
        if pre_bits.shape[0] != self.weight_bits.shape[0]:
            raise ValueError(
                f"Bitstream length mismatch: pre={pre_bits.shape[0]}, "
                f"weight={self.weight_bits.shape[0]}"
            )
        # Logical AND implements multiplication in SC domain
        return (pre_bits & self.weight_bits).astype(np.uint8)

    def effective_weight_probability(self) -> float:
        """
        Decode the weight bitstream's probability P(weight_bit=1).
        This is the effective unipolar probability representation.
        """
        return bitstream_to_probability(self.weight_bits)

encode_weight(w)

Encode scalar weight w into a unipolar bitstream.

Source code in src/sc_neurocore/synapses/sc_synapse.py
57
58
59
60
61
def encode_weight(self, w: float) -> np.ndarray[Any, Any]:
    """
    Encode scalar weight w into a unipolar bitstream.
    """
    return self._weight_encoder.encode(w)

update_weight(new_w)

Change synaptic weight and recompute its bitstream.

Source code in src/sc_neurocore/synapses/sc_synapse.py
63
64
65
66
67
68
def update_weight(self, new_w: float) -> None:
    """
    Change synaptic weight and recompute its bitstream.
    """
    self.w = new_w
    self.weight_bits = self.encode_weight(new_w)

apply(pre_bits)

Apply synapse to a pre-synaptic bitstream.

Parameters

pre_bits : np.ndarray Bitstream of shape (length,) with values {0,1}.

Returns

np.ndarray Post-synaptic bitstream of shape (length,).

Source code in src/sc_neurocore/synapses/sc_synapse.py
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
def apply(self, pre_bits: np.ndarray[Any, Any]) -> np.ndarray[Any, Any]:
    """
    Apply synapse to a pre-synaptic bitstream.

    Parameters
    ----------
    pre_bits : np.ndarray
        Bitstream of shape (length,) with values {0,1}.

    Returns
    -------
    np.ndarray
        Post-synaptic bitstream of shape (length,).
    """
    if pre_bits.shape[0] != self.weight_bits.shape[0]:
        raise ValueError(
            f"Bitstream length mismatch: pre={pre_bits.shape[0]}, "
            f"weight={self.weight_bits.shape[0]}"
        )
    # Logical AND implements multiplication in SC domain
    return (pre_bits & self.weight_bits).astype(np.uint8)

effective_weight_probability()

Decode the weight bitstream's probability P(weight_bit=1). This is the effective unipolar probability representation.

Source code in src/sc_neurocore/synapses/sc_synapse.py
92
93
94
95
96
97
def effective_weight_probability(self) -> float:
    """
    Decode the weight bitstream's probability P(weight_bit=1).
    This is the effective unipolar probability representation.
    """
    return bitstream_to_probability(self.weight_bits)

STDP Synapse

sc_neurocore.synapses.stochastic_stdp.StochasticSTDPSynapse dataclass

Bases: BitstreamSynapse

Stochastic synapse with spike-timing-dependent plasticity.

LTP on pre→post coincidence, LTD on pre-without-post. Asymmetry ratio from Bi & Poo, J. Neurosci. 18(24), 1998.

Example

syn = StochasticSTDPSynapse(w_min=0.0, w_max=1.0, w=0.5, length=64) for _ in range(100): ... syn.process_step(pre_bit=1, post_bit=1) # correlated activity → LTP syn.w >= 0.5 # weight increased or stayed True

Source code in src/sc_neurocore/synapses/stochastic_stdp.py
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
@dataclass
class StochasticSTDPSynapse(BitstreamSynapse):
    """
    Stochastic synapse with spike-timing-dependent plasticity.

    LTP on pre→post coincidence, LTD on pre-without-post.
    Asymmetry ratio from Bi & Poo, J. Neurosci. 18(24), 1998.

    Example
    -------
    >>> syn = StochasticSTDPSynapse(w_min=0.0, w_max=1.0, w=0.5, length=64)
    >>> for _ in range(100):
    ...     syn.process_step(pre_bit=1, post_bit=1)  # correlated activity → LTP
    >>> syn.w >= 0.5  # weight increased or stayed
    True
    """

    learning_rate: float = STDP_LEARNING_RATE
    window_size: int = STDP_WINDOW_SIZE
    ltd_ratio: float = STDP_LTD_RATIO

    _pre_trace: np.ndarray[Any, Any] = field(init=False, repr=False)

    def __post_init__(self) -> None:
        super().__post_init__()
        # Buffer to store recent pre-synaptic bits
        self._pre_trace = np.zeros(self.window_size, dtype=np.uint8)

    def process_step(self, pre_bit: int, post_bit: int) -> int:
        """Process one timestep: compute output, update trace, apply STDP."""
        weight_bit = 1 if self._rng.random() < self.effective_weight_probability() else 0
        output_bit = pre_bit & weight_bit

        self._pre_trace = np.roll(self._pre_trace, 1)
        self._pre_trace[0] = pre_bit

        # Trace-based STDP: post spike + recent pre activity → LTP.
        # Pre spike without post → LTD. Mutually exclusive per timestep.
        if post_bit == 1 and np.any(self._pre_trace[1:]):
            if self._rng.random() < self.learning_rate:
                self._potentiate()
        elif pre_bit == 1 and post_bit == 0:
            if self._rng.random() < self.learning_rate * self.ltd_ratio:
                self._depress()

        return output_bit

    def _potentiate(self):
        new_w = min(self.w_max, self.w + self.learning_rate * (self.w_max - self.w_min))
        self.update_weight(new_w)

    def _depress(self):
        new_w = max(self.w_min, self.w - self.learning_rate * (self.w_max - self.w_min))
        self.update_weight(new_w)

process_step(pre_bit, post_bit)

Process one timestep: compute output, update trace, apply STDP.

Source code in src/sc_neurocore/synapses/stochastic_stdp.py
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
def process_step(self, pre_bit: int, post_bit: int) -> int:
    """Process one timestep: compute output, update trace, apply STDP."""
    weight_bit = 1 if self._rng.random() < self.effective_weight_probability() else 0
    output_bit = pre_bit & weight_bit

    self._pre_trace = np.roll(self._pre_trace, 1)
    self._pre_trace[0] = pre_bit

    # Trace-based STDP: post spike + recent pre activity → LTP.
    # Pre spike without post → LTD. Mutually exclusive per timestep.
    if post_bit == 1 and np.any(self._pre_trace[1:]):
        if self._rng.random() < self.learning_rate:
            self._potentiate()
    elif pre_bit == 1 and post_bit == 0:
        if self._rng.random() < self.learning_rate * self.ltd_ratio:
            self._depress()

    return output_bit

Reward-Modulated STDP

sc_neurocore.synapses.r_stdp.RewardModulatedSTDPSynapse dataclass

Bases: StochasticSTDPSynapse

Reward-modulated STDP synapse (Izhikevich, Cerebral Cortex 17(10), 2007).

Eligibility trace accumulates Hebbian coincidences; weight update fires only when a global reward signal arrives.

Example

syn = RewardModulatedSTDPSynapse(w_min=0.0, w_max=1.0, w=0.5, length=64) for _ in range(20): ... syn.process_step(pre_bit=1, post_bit=1) syn.apply_reward(reward=1.0) # positive reward → potentiate syn.w >= 0.5 True

Source code in src/sc_neurocore/synapses/r_stdp.py
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
@dataclass
class RewardModulatedSTDPSynapse(StochasticSTDPSynapse):
    """
    Reward-modulated STDP synapse (Izhikevich, Cerebral Cortex 17(10), 2007).

    Eligibility trace accumulates Hebbian coincidences; weight update
    fires only when a global reward signal arrives.

    Example
    -------
    >>> syn = RewardModulatedSTDPSynapse(w_min=0.0, w_max=1.0, w=0.5, length=64)
    >>> for _ in range(20):
    ...     syn.process_step(pre_bit=1, post_bit=1)
    >>> syn.apply_reward(reward=1.0)  # positive reward → potentiate
    >>> syn.w >= 0.5
    True
    """

    eligibility_trace: float = 0.0
    trace_decay: float = RSTDP_TRACE_DECAY
    anti_hebbian_scale: float = RSTDP_ANTI_HEBBIAN_SCALE

    def process_step(self, pre_bit: int, post_bit: int) -> int:
        # 1. Compute Output (Same as standard)
        w_prob = self.effective_weight_probability()
        weight_bit = 1 if self._rng.random() < w_prob else 0
        output_bit = pre_bit & weight_bit

        # 2. Update Eligibility Trace instead of Weight
        # (Simplified Hebbian / STDP logic)

        # Hebbian Term: Pre * Post
        # If both fire, trace goes up (Potentiation eligibility)
        if pre_bit == 1 and post_bit == 1:
            self.eligibility_trace += 1.0

        # Anti-Hebbian Term: Pre * !Post (or vice versa depending on rule)
        # If Pre fires but Post doesn't, trace goes down (Depression eligibility)
        elif pre_bit == 1 and post_bit == 0:
            self.eligibility_trace -= self.anti_hebbian_scale

        # Decay trace
        self.eligibility_trace *= self.trace_decay

        return output_bit

    def apply_reward(self, reward: float) -> None:
        """
        Global reward signal triggers weight update.
        """
        # Delta W ~ Reward * Trace
        update = self.learning_rate * reward * self.eligibility_trace

        new_w = self.w + update
        # Clip
        new_w = max(self.w_min, min(self.w_max, new_w))

        self.update_weight(new_w)

apply_reward(reward)

Global reward signal triggers weight update.

Source code in src/sc_neurocore/synapses/r_stdp.py
60
61
62
63
64
65
66
67
68
69
70
71
def apply_reward(self, reward: float) -> None:
    """
    Global reward signal triggers weight update.
    """
    # Delta W ~ Reward * Trace
    update = self.learning_rate * reward * self.eligibility_trace

    new_w = self.w + update
    # Clip
    new_w = max(self.w_min, min(self.w_max, new_w))

    self.update_weight(new_w)

Dot Product

sc_neurocore.synapses.dot_product.BitstreamDotProduct dataclass

Bitstream-level dot product via SC synapses.

For each input i, applies synapse_i (AND gate), then sums decoded probabilities: y ~ sum_i w_i * x_i.

Example

import numpy as np from sc_neurocore import BitstreamSynapse syns = [BitstreamSynapse(w_min=0.0, w_max=1.0, w=0.5, length=256) ... for _ in range(3)] dp = BitstreamDotProduct(synapses=syns) pre = np.ones((3, 256), dtype=np.uint8) post_matrix, y_scalar = dp.apply(pre) post_matrix.shape (3, 256)

Source code in src/sc_neurocore/synapses/dot_product.py
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
@dataclass
class BitstreamDotProduct:
    """
    Bitstream-level dot product via SC synapses.

    For each input i, applies synapse_i (AND gate), then sums decoded
    probabilities: y ~ sum_i w_i * x_i.

    Example
    -------
    >>> import numpy as np
    >>> from sc_neurocore import BitstreamSynapse
    >>> syns = [BitstreamSynapse(w_min=0.0, w_max=1.0, w=0.5, length=256)
    ...         for _ in range(3)]
    >>> dp = BitstreamDotProduct(synapses=syns)
    >>> pre = np.ones((3, 256), dtype=np.uint8)
    >>> post_matrix, y_scalar = dp.apply(pre)
    >>> post_matrix.shape
    (3, 256)
    """

    synapses: List[BitstreamSynapse]

    def __post_init__(self) -> None:
        if len(self.synapses) == 0:
            raise ValueError("Need at least one synapse.")

    @property
    def n_inputs(self) -> int:
        return len(self.synapses)

    def apply(
        self,
        pre_matrix: np.ndarray[Any, Any],
        y_min: float = 0.0,
        y_max: float = 1.0,
    ) -> Tuple[np.ndarray[Any, Any], float]:
        """
        Apply all synapses to the pre-synaptic bitstreams and compute
        a scalar 'dot-product-like' value.

        Parameters
        ----------
        pre_matrix : np.ndarray
            Shape (n_inputs, length), entries {0,1}.
        y_min, y_max : float
            Range in which the final scalar output is interpreted
            (e.g., current range for the neuron).

        Returns
        -------
        post_matrix : np.ndarray
            Post-synaptic bitstreams of shape (n_inputs, length).
        y_scalar : float
            Scalar result representing sum_i P(post_i=1) mapped into [y_min, y_max].
        """
        if pre_matrix.shape[0] != self.n_inputs:
            raise ValueError(
                f"Expected {self.n_inputs} input bitstreams, got {pre_matrix.shape[0]}"
            )

        post_matrix = np.zeros_like(pre_matrix, dtype=np.uint8)
        probs = []

        for i, syn in enumerate(self.synapses):
            post_i = syn.apply(pre_matrix[i])
            post_matrix[i] = post_i
            probs.append(bitstream_to_probability(post_i))

        # Dot-product in probability space (weights already baked into probs)
        y_prob_sum = float(sum(probs))

        # Normalize by number of inputs if desired
        # Here we just keep the sum and clamp into [0, 1]
        y_prob_clamped = max(min(y_prob_sum, 1.0), 0.0)

        # Map that into [y_min, y_max]
        y_scalar = unipolar_prob_to_value(y_prob_clamped, y_min, y_max)

        return post_matrix, y_scalar

apply(pre_matrix, y_min=0.0, y_max=1.0)

Apply all synapses to the pre-synaptic bitstreams and compute a scalar 'dot-product-like' value.

Parameters

pre_matrix : np.ndarray Shape (n_inputs, length), entries {0,1}. y_min, y_max : float Range in which the final scalar output is interpreted (e.g., current range for the neuron).

Returns

post_matrix : np.ndarray Post-synaptic bitstreams of shape (n_inputs, length). y_scalar : float Scalar result representing sum_i P(post_i=1) mapped into [y_min, y_max].

Source code in src/sc_neurocore/synapses/dot_product.py
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
def apply(
    self,
    pre_matrix: np.ndarray[Any, Any],
    y_min: float = 0.0,
    y_max: float = 1.0,
) -> Tuple[np.ndarray[Any, Any], float]:
    """
    Apply all synapses to the pre-synaptic bitstreams and compute
    a scalar 'dot-product-like' value.

    Parameters
    ----------
    pre_matrix : np.ndarray
        Shape (n_inputs, length), entries {0,1}.
    y_min, y_max : float
        Range in which the final scalar output is interpreted
        (e.g., current range for the neuron).

    Returns
    -------
    post_matrix : np.ndarray
        Post-synaptic bitstreams of shape (n_inputs, length).
    y_scalar : float
        Scalar result representing sum_i P(post_i=1) mapped into [y_min, y_max].
    """
    if pre_matrix.shape[0] != self.n_inputs:
        raise ValueError(
            f"Expected {self.n_inputs} input bitstreams, got {pre_matrix.shape[0]}"
        )

    post_matrix = np.zeros_like(pre_matrix, dtype=np.uint8)
    probs = []

    for i, syn in enumerate(self.synapses):
        post_i = syn.apply(pre_matrix[i])
        post_matrix[i] = post_i
        probs.append(bitstream_to_probability(post_i))

    # Dot-product in probability space (weights already baked into probs)
    y_prob_sum = float(sum(probs))

    # Normalize by number of inputs if desired
    # Here we just keep the sum and clamp into [0, 1]
    y_prob_clamped = max(min(y_prob_sum, 1.0), 0.0)

    # Map that into [y_min, y_max]
    y_scalar = unipolar_prob_to_value(y_prob_clamped, y_min, y_max)

    return post_matrix, y_scalar

Triplet STDP (Pfister-Gerstner 2006)

sc_neurocore.synapses.triplet_stdp.TripletSTDP dataclass

Triplet STDP synapse (Pfister-Gerstner 2006).

Parameters

tau_plus : float Pre-synaptic trace decay (ms). Default: 16.8 (visual cortex fit). tau_minus : float Post-synaptic trace decay (ms). Default: 33.7. tau_x : float Slow pre-synaptic trace decay (ms). Default: 101. tau_y : float Slow post-synaptic trace decay (ms). Default: 125. a2_plus : float Pair LTP amplitude. Default: 7.5e-10. a3_plus : float Triplet LTP amplitude. Default: 9.3e-3. a2_minus : float Pair LTD amplitude. Default: 7.0e-3. a3_minus : float Triplet LTD amplitude. Default: 2.3e-4. w_min : float Minimum weight. Default: 0.0. w_max : float Maximum weight. Default: 1.0.

Source code in src/sc_neurocore/synapses/triplet_stdp.py
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
@dataclass
class TripletSTDP:
    """Triplet STDP synapse (Pfister-Gerstner 2006).

    Parameters
    ----------
    tau_plus : float
        Pre-synaptic trace decay (ms). Default: 16.8 (visual cortex fit).
    tau_minus : float
        Post-synaptic trace decay (ms). Default: 33.7.
    tau_x : float
        Slow pre-synaptic trace decay (ms). Default: 101.
    tau_y : float
        Slow post-synaptic trace decay (ms). Default: 125.
    a2_plus : float
        Pair LTP amplitude. Default: 7.5e-10.
    a3_plus : float
        Triplet LTP amplitude. Default: 9.3e-3.
    a2_minus : float
        Pair LTD amplitude. Default: 7.0e-3.
    a3_minus : float
        Triplet LTD amplitude. Default: 2.3e-4.
    w_min : float
        Minimum weight. Default: 0.0.
    w_max : float
        Maximum weight. Default: 1.0.
    """

    tau_plus: float = 16.8
    tau_minus: float = 33.7
    tau_x: float = 101.0
    tau_y: float = 125.0
    a2_plus: float = 7.5e-10
    a3_plus: float = 9.3e-3
    a2_minus: float = 7.0e-3
    a3_minus: float = 2.3e-4
    w_min: float = 0.0
    w_max: float = 1.0
    weight: float = 0.5

    def __post_init__(self):
        self.r1 = 0.0  # fast pre-synaptic trace
        self.r2 = 0.0  # slow pre-synaptic trace
        self.o1 = 0.0  # fast post-synaptic trace
        self.o2 = 0.0  # slow post-synaptic trace

    def step(self, pre_spike: bool, post_spike: bool, dt: float = 1.0) -> float:
        """Advance one timestep.

        Returns the current weight after update.
        """
        import math

        # Decay traces
        self.r1 *= math.exp(-dt / self.tau_plus)
        self.r2 *= math.exp(-dt / self.tau_x)
        self.o1 *= math.exp(-dt / self.tau_minus)
        self.o2 *= math.exp(-dt / self.tau_y)

        # Weight updates on spikes
        if post_spike:
            # LTP: pair + triplet pre-post-post
            self.weight += self.r1 * (self.a2_plus + self.a3_plus * self.o2)
        if pre_spike:
            # LTD: pair + triplet pre-pre-post
            self.weight -= self.o1 * (self.a2_minus + self.a3_minus * self.r2)

        # Clamp
        self.weight = max(self.w_min, min(self.w_max, self.weight))

        # Update traces after weight change (order matters — Pfister 2006 Eq. 3-4)
        if pre_spike:
            self.r1 += 1.0
            self.r2 += 1.0
        if post_spike:
            self.o1 += 1.0
            self.o2 += 1.0

        return self.weight

    def reset(self):
        self.r1 = self.r2 = self.o1 = self.o2 = 0.0

step(pre_spike, post_spike, dt=1.0)

Advance one timestep.

Returns the current weight after update.

Source code in src/sc_neurocore/synapses/triplet_stdp.py
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
def step(self, pre_spike: bool, post_spike: bool, dt: float = 1.0) -> float:
    """Advance one timestep.

    Returns the current weight after update.
    """
    import math

    # Decay traces
    self.r1 *= math.exp(-dt / self.tau_plus)
    self.r2 *= math.exp(-dt / self.tau_x)
    self.o1 *= math.exp(-dt / self.tau_minus)
    self.o2 *= math.exp(-dt / self.tau_y)

    # Weight updates on spikes
    if post_spike:
        # LTP: pair + triplet pre-post-post
        self.weight += self.r1 * (self.a2_plus + self.a3_plus * self.o2)
    if pre_spike:
        # LTD: pair + triplet pre-pre-post
        self.weight -= self.o1 * (self.a2_minus + self.a3_minus * self.r2)

    # Clamp
    self.weight = max(self.w_min, min(self.w_max, self.weight))

    # Update traces after weight change (order matters — Pfister 2006 Eq. 3-4)
    if pre_spike:
        self.r1 += 1.0
        self.r2 += 1.0
    if post_spike:
        self.o1 += 1.0
        self.o2 += 1.0

    return self.weight

BCM Metaplasticity (Bienenstock-Cooper-Munro 1982)

sc_neurocore.synapses.bcm.BCMSynapse dataclass

BCM synapse with sliding modification threshold.

Parameters

eta : float Learning rate. tau_theta : float Time constant for sliding threshold (ms). theta_init : float Initial threshold value. w_min, w_max : float Weight bounds.

Source code in src/sc_neurocore/synapses/bcm.py
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
@dataclass
class BCMSynapse:
    """BCM synapse with sliding modification threshold.

    Parameters
    ----------
    eta : float
        Learning rate.
    tau_theta : float
        Time constant for sliding threshold (ms).
    theta_init : float
        Initial threshold value.
    w_min, w_max : float
        Weight bounds.
    """

    eta: float = 0.01
    tau_theta: float = 1000.0
    theta_init: float = 0.1
    w_min: float = 0.0
    w_max: float = 1.0
    weight: float = 0.5

    def __post_init__(self):
        self.theta_m = self.theta_init

    def step(self, pre_rate: float, post_rate: float, dt: float = 1.0) -> float:
        """Advance one timestep.

        Parameters
        ----------
        pre_rate : float
            Pre-synaptic firing rate (or spike indicator).
        post_rate : float
            Post-synaptic firing rate (or membrane proxy).
        dt : float
            Timestep in ms.

        Returns
        -------
        float
            Updated weight.
        """
        # BCM update: dw = eta * y * (y - theta_M) * x
        dw = self.eta * post_rate * (post_rate - self.theta_m) * pre_rate * dt
        self.weight += dw
        self.weight = max(self.w_min, min(self.w_max, self.weight))

        # Sliding threshold: d(theta)/dt = (y^2 - theta) / tau_theta
        self.theta_m += (post_rate**2 - self.theta_m) * dt / self.tau_theta

        return self.weight

    def reset(self):
        self.theta_m = self.theta_init

step(pre_rate, post_rate, dt=1.0)

Advance one timestep.

Parameters

pre_rate : float Pre-synaptic firing rate (or spike indicator). post_rate : float Post-synaptic firing rate (or membrane proxy). dt : float Timestep in ms.

Returns

float Updated weight.

Source code in src/sc_neurocore/synapses/bcm.py
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
def step(self, pre_rate: float, post_rate: float, dt: float = 1.0) -> float:
    """Advance one timestep.

    Parameters
    ----------
    pre_rate : float
        Pre-synaptic firing rate (or spike indicator).
    post_rate : float
        Post-synaptic firing rate (or membrane proxy).
    dt : float
        Timestep in ms.

    Returns
    -------
    float
        Updated weight.
    """
    # BCM update: dw = eta * y * (y - theta_M) * x
    dw = self.eta * post_rate * (post_rate - self.theta_m) * pre_rate * dt
    self.weight += dw
    self.weight = max(self.w_min, min(self.w_max, self.weight))

    # Sliding threshold: d(theta)/dt = (y^2 - theta) / tau_theta
    self.theta_m += (post_rate**2 - self.theta_m) * dt / self.tau_theta

    return self.weight

Voltage-Based STDP (Clopath et al. 2010)

sc_neurocore.synapses.clopath_stdp.ClopathSTDP dataclass

Voltage-based STDP (Clopath et al. 2010).

Parameters

a_ltd : float LTD amplitude. Default: 14e-5 (Clopath 2010, Table 1). a_ltp : float LTP amplitude. Default: 8e-5. tau_x : float Pre-synaptic trace decay (ms). Default: 15. tau_minus : float Slow voltage trace decay (ms). Default: 10. tau_plus : float Fast voltage trace decay (ms). Default: 7. theta_minus : float LTD voltage threshold (mV). Default: -70.6 (rest). theta_plus : float LTP voltage threshold (mV). Default: -45.3 (depolarization). w_min, w_max : float Weight bounds.

Source code in src/sc_neurocore/synapses/clopath_stdp.py
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
@dataclass
class ClopathSTDP:
    """Voltage-based STDP (Clopath et al. 2010).

    Parameters
    ----------
    a_ltd : float
        LTD amplitude. Default: 14e-5 (Clopath 2010, Table 1).
    a_ltp : float
        LTP amplitude. Default: 8e-5.
    tau_x : float
        Pre-synaptic trace decay (ms). Default: 15.
    tau_minus : float
        Slow voltage trace decay (ms). Default: 10.
    tau_plus : float
        Fast voltage trace decay (ms). Default: 7.
    theta_minus : float
        LTD voltage threshold (mV). Default: -70.6 (rest).
    theta_plus : float
        LTP voltage threshold (mV). Default: -45.3 (depolarization).
    w_min, w_max : float
        Weight bounds.
    """

    a_ltd: float = 14e-5
    a_ltp: float = 8e-5
    tau_x: float = 15.0
    tau_minus: float = 10.0
    tau_plus: float = 7.0
    theta_minus: float = -70.6
    theta_plus: float = -45.3
    w_min: float = 0.0
    w_max: float = 1.0
    weight: float = 0.5

    def __post_init__(self):
        self.x_bar = 0.0  # low-pass filtered pre-synaptic trace
        self.u_bar_minus = 0.0  # slow voltage trace (LTD)
        self.u_bar_plus = 0.0  # fast voltage trace (LTP)

    def step(self, pre_spike: bool, u_post: float, dt: float = 1.0) -> float:
        """Advance one timestep.

        Parameters
        ----------
        pre_spike : bool
            Whether the pre-synaptic neuron spiked.
        u_post : float
            Post-synaptic membrane voltage (mV).
        dt : float
            Timestep in ms.

        Returns
        -------
        float
            Updated weight.
        """
        decay_x = math.exp(-dt / self.tau_x)
        decay_minus = math.exp(-dt / self.tau_minus)
        decay_plus = math.exp(-dt / self.tau_plus)

        # LTD: pre-synaptic spike × post depolarization (Clopath 2010, Eq. 2)
        if pre_spike:
            ltd = self.a_ltd * self.x_bar * max(0.0, self.u_bar_minus - self.theta_minus)
            self.weight -= ltd

        # LTP: evaluated every timestep, pre contribution via x_bar trace (Clopath 2010, Eq. 3)
        ltp_post = max(0.0, u_post - self.theta_plus)
        ltp_pre = max(0.0, self.u_bar_plus - self.theta_minus)
        if ltp_post > 0 and ltp_pre > 0:
            self.weight += self.a_ltp * self.x_bar * ltp_post * ltp_pre

        self.weight = max(self.w_min, min(self.w_max, self.weight))

        # Update traces: exact exponential filter (no double-decay)
        self.x_bar *= decay_x
        if pre_spike:
            self.x_bar += 1.0
        self.u_bar_minus = decay_minus * self.u_bar_minus + (1 - decay_minus) * u_post
        self.u_bar_plus = decay_plus * self.u_bar_plus + (1 - decay_plus) * u_post

        return self.weight

    def reset(self):
        self.x_bar = 0.0
        self.u_bar_minus = 0.0
        self.u_bar_plus = 0.0

step(pre_spike, u_post, dt=1.0)

Advance one timestep.

Parameters

pre_spike : bool Whether the pre-synaptic neuron spiked. u_post : float Post-synaptic membrane voltage (mV). dt : float Timestep in ms.

Returns

float Updated weight.

Source code in src/sc_neurocore/synapses/clopath_stdp.py
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
def step(self, pre_spike: bool, u_post: float, dt: float = 1.0) -> float:
    """Advance one timestep.

    Parameters
    ----------
    pre_spike : bool
        Whether the pre-synaptic neuron spiked.
    u_post : float
        Post-synaptic membrane voltage (mV).
    dt : float
        Timestep in ms.

    Returns
    -------
    float
        Updated weight.
    """
    decay_x = math.exp(-dt / self.tau_x)
    decay_minus = math.exp(-dt / self.tau_minus)
    decay_plus = math.exp(-dt / self.tau_plus)

    # LTD: pre-synaptic spike × post depolarization (Clopath 2010, Eq. 2)
    if pre_spike:
        ltd = self.a_ltd * self.x_bar * max(0.0, self.u_bar_minus - self.theta_minus)
        self.weight -= ltd

    # LTP: evaluated every timestep, pre contribution via x_bar trace (Clopath 2010, Eq. 3)
    ltp_post = max(0.0, u_post - self.theta_plus)
    ltp_pre = max(0.0, self.u_bar_plus - self.theta_minus)
    if ltp_post > 0 and ltp_pre > 0:
        self.weight += self.a_ltp * self.x_bar * ltp_post * ltp_pre

    self.weight = max(self.w_min, min(self.w_max, self.weight))

    # Update traces: exact exponential filter (no double-decay)
    self.x_bar *= decay_x
    if pre_spike:
        self.x_bar += 1.0
    self.u_bar_minus = decay_minus * self.u_bar_minus + (1 - decay_minus) * u_post
    self.u_bar_plus = decay_plus * self.u_bar_plus + (1 - decay_plus) * u_post

    return self.weight

Tripartite Synapse (Astrocyte Coupling)

sc_neurocore.synapses.tripartite.TripartiteSynapse dataclass

Synapse with bidirectional astrocyte coupling.

Parameters

base_weight : float Baseline synaptic weight. glut_per_spike : float IP3 production rate per pre-synaptic spike (µM/s). ca_threshold : float Astrocyte Ca²⁺ threshold for gliotransmitter release (µM). facilitation : float Multiplicative gain when astrocyte is active (> 1 for facilitation). depression_rate : float Weight depression rate when astrocyte Ca²⁺ is below threshold. w_min, w_max : float Weight bounds.

Source code in src/sc_neurocore/synapses/tripartite.py
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
@dataclass
class TripartiteSynapse:
    """Synapse with bidirectional astrocyte coupling.

    Parameters
    ----------
    base_weight : float
        Baseline synaptic weight.
    glut_per_spike : float
        IP3 production rate per pre-synaptic spike (µM/s).
    ca_threshold : float
        Astrocyte Ca²⁺ threshold for gliotransmitter release (µM).
    facilitation : float
        Multiplicative gain when astrocyte is active (> 1 for facilitation).
    depression_rate : float
        Weight depression rate when astrocyte Ca²⁺ is below threshold.
    w_min, w_max : float
        Weight bounds.
    """

    base_weight: float = 0.5
    glut_per_spike: float = 2.0
    ca_threshold: float = 0.3
    facilitation: float = 1.5
    depression_rate: float = 0.001
    w_min: float = 0.0
    w_max: float = 1.0

    def __post_init__(self):
        self.weight = self.base_weight
        self.astrocyte = AstrocyteModel()
        self._glut_current = 0.0  # accumulated glutamate signal

    def step(self, pre_spike: bool, post_spike: bool, dt: float = 0.01) -> float:
        """Advance one timestep.

        Parameters
        ----------
        pre_spike : bool
            Pre-synaptic spike.
        post_spike : bool
            Post-synaptic spike (unused in basic model, reserved for Hebbian extension).
        dt : float
            Timestep in seconds.

        Returns
        -------
        float
            Effective synaptic weight (base_weight * astrocyte modulation).
        """
        # Pre-synaptic activity → glutamate → IP3
        if pre_spike:
            self._glut_current += self.glut_per_spike
        # Glutamate decays (tau_glut ~ 0.2s)
        self._glut_current *= math.exp(-dt / 0.2)

        # Step the astrocyte with glutamate-driven IP3 production
        self.astrocyte.dt = dt
        ca = self.astrocyte.step(self._glut_current)

        # Astrocyte modulation of synaptic weight
        if ca > self.ca_threshold:
            # Gliotransmitter release → synaptic facilitation
            self.weight += self.facilitation * (ca - self.ca_threshold) * dt
        else:
            # Slow depression toward baseline without astrocyte support
            self.weight += (self.base_weight - self.weight) * self.depression_rate

        self.weight = max(self.w_min, min(self.w_max, self.weight))
        return self.weight

    @property
    def ca(self) -> float:
        """Current astrocyte Ca²⁺ concentration (µM)."""
        return self.astrocyte.ca

    @property
    def ip3(self) -> float:
        """Current astrocyte IP3 concentration (µM)."""
        return self.astrocyte.ip3

    def effective_weight(self) -> float:
        """Current effective synaptic weight."""
        return self.weight

    def reset(self):
        self.weight = self.base_weight
        self.astrocyte.reset()
        self._glut_current = 0.0

ca property

Current astrocyte Ca²⁺ concentration (µM).

ip3 property

Current astrocyte IP3 concentration (µM).

step(pre_spike, post_spike, dt=0.01)

Advance one timestep.

Parameters

pre_spike : bool Pre-synaptic spike. post_spike : bool Post-synaptic spike (unused in basic model, reserved for Hebbian extension). dt : float Timestep in seconds.

Returns

float Effective synaptic weight (base_weight * astrocyte modulation).

Source code in src/sc_neurocore/synapses/tripartite.py
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
def step(self, pre_spike: bool, post_spike: bool, dt: float = 0.01) -> float:
    """Advance one timestep.

    Parameters
    ----------
    pre_spike : bool
        Pre-synaptic spike.
    post_spike : bool
        Post-synaptic spike (unused in basic model, reserved for Hebbian extension).
    dt : float
        Timestep in seconds.

    Returns
    -------
    float
        Effective synaptic weight (base_weight * astrocyte modulation).
    """
    # Pre-synaptic activity → glutamate → IP3
    if pre_spike:
        self._glut_current += self.glut_per_spike
    # Glutamate decays (tau_glut ~ 0.2s)
    self._glut_current *= math.exp(-dt / 0.2)

    # Step the astrocyte with glutamate-driven IP3 production
    self.astrocyte.dt = dt
    ca = self.astrocyte.step(self._glut_current)

    # Astrocyte modulation of synaptic weight
    if ca > self.ca_threshold:
        # Gliotransmitter release → synaptic facilitation
        self.weight += self.facilitation * (ca - self.ca_threshold) * dt
    else:
        # Slow depression toward baseline without astrocyte support
        self.weight += (self.base_weight - self.weight) * self.depression_rate

    self.weight = max(self.w_min, min(self.w_max, self.weight))
    return self.weight

effective_weight()

Current effective synaptic weight.

Source code in src/sc_neurocore/synapses/tripartite.py
116
117
118
def effective_weight(self) -> float:
    """Current effective synaptic weight."""
    return self.weight

Gap Junction (Electrical Synapse)

sc_neurocore.synapses.gap_junction.GapJunction dataclass

Bidirectional electrical synapse.

Parameters

conductance : float Gap junction conductance g_c (nS). Typical: 0.01-1.0 nS. Bennett & Zukin, Neuron 2004. rectification : float Rectification factor in [0, 1]. 0 = fully bidirectional (ohmic), 1 = fully rectifying (current flows in one direction only). Default 0 (standard gap junction).

Source code in src/sc_neurocore/synapses/gap_junction.py
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
@dataclass
class GapJunction:
    """Bidirectional electrical synapse.

    Parameters
    ----------
    conductance : float
        Gap junction conductance g_c (nS). Typical: 0.01-1.0 nS.
        Bennett & Zukin, Neuron 2004.
    rectification : float
        Rectification factor in [0, 1]. 0 = fully bidirectional (ohmic),
        1 = fully rectifying (current flows in one direction only).
        Default 0 (standard gap junction).
    """

    conductance: float = 0.1
    rectification: float = 0.0

    def current(self, v_pre: float, v_post: float) -> float:
        """Compute gap junction current flowing INTO v_post.

        I_gap = g_c * (V_pre - V_post) * rectification_factor

        Positive current depolarizes post. The same junction produces
        equal and opposite current for the pre-synaptic neuron.
        """
        dv = v_pre - v_post
        if self.rectification > 0:
            # Rectification: reduce current in one direction
            factor = 1.0 - self.rectification * (1.0 if dv < 0 else 0.0)
            return self.conductance * dv * factor
        return self.conductance * dv

    def current_matrix(self, voltages: np.ndarray, adjacency: np.ndarray) -> np.ndarray:
        """Compute gap junction currents for a population.

        Parameters
        ----------
        voltages : np.ndarray, shape (N,)
            Membrane voltages of all neurons.
        adjacency : np.ndarray, shape (N, N)
            Binary or weighted adjacency matrix. A[i,j] = 1 means
            neurons i and j are connected by a gap junction.

        Returns
        -------
        np.ndarray, shape (N,)
            Net gap junction current for each neuron.
        """
        N = len(voltages)
        dv_matrix = voltages[np.newaxis, :] - voltages[:, np.newaxis]  # dv[i,j] = V[j] - V[i]
        currents = self.conductance * dv_matrix * adjacency
        return currents.sum(axis=1)

current(v_pre, v_post)

Compute gap junction current flowing INTO v_post.

I_gap = g_c * (V_pre - V_post) * rectification_factor

Positive current depolarizes post. The same junction produces equal and opposite current for the pre-synaptic neuron.

Source code in src/sc_neurocore/synapses/gap_junction.py
49
50
51
52
53
54
55
56
57
58
59
60
61
62
def current(self, v_pre: float, v_post: float) -> float:
    """Compute gap junction current flowing INTO v_post.

    I_gap = g_c * (V_pre - V_post) * rectification_factor

    Positive current depolarizes post. The same junction produces
    equal and opposite current for the pre-synaptic neuron.
    """
    dv = v_pre - v_post
    if self.rectification > 0:
        # Rectification: reduce current in one direction
        factor = 1.0 - self.rectification * (1.0 if dv < 0 else 0.0)
        return self.conductance * dv * factor
    return self.conductance * dv

current_matrix(voltages, adjacency)

Compute gap junction currents for a population.

Parameters

voltages : np.ndarray, shape (N,) Membrane voltages of all neurons. adjacency : np.ndarray, shape (N, N) Binary or weighted adjacency matrix. A[i,j] = 1 means neurons i and j are connected by a gap junction.

Returns

np.ndarray, shape (N,) Net gap junction current for each neuron.

Source code in src/sc_neurocore/synapses/gap_junction.py
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
def current_matrix(self, voltages: np.ndarray, adjacency: np.ndarray) -> np.ndarray:
    """Compute gap junction currents for a population.

    Parameters
    ----------
    voltages : np.ndarray, shape (N,)
        Membrane voltages of all neurons.
    adjacency : np.ndarray, shape (N, N)
        Binary or weighted adjacency matrix. A[i,j] = 1 means
        neurons i and j are connected by a gap junction.

    Returns
    -------
    np.ndarray, shape (N,)
        Net gap junction current for each neuron.
    """
    N = len(voltages)
    dv_matrix = voltages[np.newaxis, :] - voltages[:, np.newaxis]  # dv[i,j] = V[j] - V[i]
    currents = self.conductance * dv_matrix * adjacency
    return currents.sum(axis=1)