Skip to main content

sc_neurocore_engine/neurons/
rate.rs

1// SPDX-License-Identifier: AGPL-3.0-or-later
2// Commercial license available
3// © Concepts 1996–2026 Miroslav Šotek. All rights reserved.
4// © Code 2020–2026 Miroslav Šotek. All rights reserved.
5// ORCID: 0009-0009-3560-0851
6// Contact: www.anulum.li | protoscience@anulum.li
7// SC-NeuroCore — Rate-based models, synaptic plasticity neurons, and
8
9//! Rate-based models, synaptic plasticity neurons, and other special types.
10
11/// McCulloch-Pitts 1943 — binary threshold unit.
12#[derive(Clone, Debug)]
13pub struct McCullochPittsNeuron {
14    pub theta: f64,
15}
16
17impl McCullochPittsNeuron {
18    pub fn new(theta: f64) -> Self {
19        Self { theta }
20    }
21    pub fn step(&self, weighted_input: f64) -> i32 {
22        if weighted_input >= self.theta {
23            1
24        } else {
25            0
26        }
27    }
28}
29impl Default for McCullochPittsNeuron {
30    fn default() -> Self {
31        Self::new(1.0)
32    }
33}
34
35/// Sigmoid rate neuron — Wilson-Cowan-style single unit.
36#[derive(Clone, Debug)]
37pub struct SigmoidRateNeuron {
38    pub r: f64,
39    pub tau: f64,
40    pub beta: f64,
41    pub theta: f64,
42    pub dt: f64,
43}
44
45impl SigmoidRateNeuron {
46    pub fn new() -> Self {
47        Self {
48            r: 0.0,
49            tau: 10.0,
50            beta: 1.0,
51            theta: 0.0,
52            dt: 0.1,
53        }
54    }
55    pub fn step(&mut self, current: f64) -> f64 {
56        let sigma = 1.0 / (1.0 + (-self.beta * (current - self.theta)).exp());
57        self.r += (-self.r + sigma) / self.tau * self.dt;
58        self.r
59    }
60    pub fn reset(&mut self) {
61        self.r = 0.0;
62    }
63}
64impl Default for SigmoidRateNeuron {
65    fn default() -> Self {
66        Self::new()
67    }
68}
69
70/// Threshold-linear rate neuron — ReLU-like firing curve.
71#[derive(Clone, Debug)]
72pub struct ThresholdLinearRateNeuron {
73    pub r: f64,
74    pub theta: f64,
75    pub gain: f64,
76}
77
78impl ThresholdLinearRateNeuron {
79    pub fn new() -> Self {
80        Self {
81            r: 0.0,
82            theta: 0.0,
83            gain: 1.0,
84        }
85    }
86    pub fn step(&mut self, current: f64) -> f64 {
87        self.r = self.gain * (current - self.theta).max(0.0);
88        self.r
89    }
90    pub fn reset(&mut self) {
91        self.r = 0.0;
92    }
93}
94impl Default for ThresholdLinearRateNeuron {
95    fn default() -> Self {
96        Self::new()
97    }
98}
99
100/// Li-Rinzel IP3R astrocyte model — Ca²⁺ dynamics.
101#[derive(Clone, Debug)]
102pub struct AstrocyteModel {
103    pub ca: f64,
104    pub h: f64,
105    pub ip3: f64,
106    pub v_er: f64,
107    pub k_er: f64,
108    pub v_serca: f64,
109    pub d1: f64,
110    pub d2: f64,
111    pub d3: f64,
112    pub d5: f64,
113    pub c0: f64,
114    pub c1: f64,
115    pub dt: f64,
116}
117
118impl AstrocyteModel {
119    pub fn new() -> Self {
120        Self {
121            ca: 0.05,
122            h: 0.8,
123            ip3: 0.5,
124            v_er: 0.9,
125            k_er: 0.15,
126            v_serca: 0.4,
127            d1: 0.13,
128            d2: 1.049,
129            d3: 0.9434,
130            d5: 0.08234,
131            c0: 2.0,
132            c1: 0.185,
133            dt: 0.01,
134        }
135    }
136    pub fn step(&mut self, current: f64) -> f64 {
137        let ca_er = (self.c0 - self.ca) / self.c1;
138        let m_inf = self.ip3 / (self.ip3 + self.d1);
139        let n_inf = self.ca / (self.ca + self.d5);
140        let j_chan = self.v_er * (m_inf * n_inf * self.h).powi(3) * (ca_er - self.ca);
141        let j_leak = self.k_er * (ca_er - self.ca);
142        let j_pump = self.v_serca * self.ca.powi(2) / (self.ca.powi(2) + self.k_er.powi(2));
143        let q2 = self.d2 * (self.ip3 + self.d1) / (self.ip3 + self.d3);
144        let h_inf = q2 / (q2 + self.ca);
145        let tau_h = 1.0 / (0.2 * (q2 + self.ca));
146        self.ca += (j_chan + j_leak - j_pump + current) * self.dt;
147        self.ca = self.ca.max(0.0);
148        self.h += (h_inf - self.h) / tau_h * self.dt;
149        self.ca
150    }
151    pub fn reset(&mut self) {
152        self.ca = 0.05;
153        self.h = 0.8;
154        self.ip3 = 0.5;
155    }
156}
157impl Default for AstrocyteModel {
158    fn default() -> Self {
159        Self::new()
160    }
161}
162
163/// Tsodyks-Markram 1997 — LIF with short-term synaptic plasticity.
164#[derive(Clone, Debug)]
165pub struct TsodyksMarkramNeuron {
166    pub v: f64,
167    pub x: f64,
168    pub u: f64,
169    pub v_rest: f64,
170    pub v_reset: f64,
171    pub v_threshold: f64,
172    pub tau_m: f64,
173    pub tau_d: f64,
174    pub tau_f: f64,
175    pub u_se: f64,
176    pub a_se: f64,
177    pub r_m: f64,
178    pub dt: f64,
179}
180
181impl TsodyksMarkramNeuron {
182    pub fn new() -> Self {
183        Self {
184            v: -65.0,
185            x: 1.0,
186            u: 0.2,
187            v_rest: -65.0,
188            v_reset: -65.0,
189            v_threshold: -50.0,
190            tau_m: 20.0,
191            tau_d: 200.0,
192            tau_f: 600.0,
193            u_se: 0.2,
194            a_se: 50.0,
195            r_m: 1.0,
196            dt: 0.1,
197        }
198    }
199    pub fn step(&mut self, current: f64, presynaptic_spike: bool) -> i32 {
200        self.x += (1.0 - self.x) / self.tau_d * self.dt;
201        self.u += (self.u_se - self.u) / self.tau_f * self.dt;
202        let mut i_syn = 0.0;
203        if presynaptic_spike {
204            self.u += self.u_se * (1.0 - self.u);
205            i_syn = self.a_se * self.u * self.x;
206            self.x -= self.u * self.x;
207        }
208        self.v += (-(self.v - self.v_rest) + self.r_m * (i_syn + current)) / self.tau_m * self.dt;
209        if self.v >= self.v_threshold {
210            self.v = self.v_reset;
211            1
212        } else {
213            0
214        }
215    }
216    pub fn reset(&mut self) {
217        self.v = self.v_rest;
218        self.x = 1.0;
219        self.u = self.u_se;
220    }
221}
222impl Default for TsodyksMarkramNeuron {
223    fn default() -> Self {
224        Self::new()
225    }
226}
227
228/// Liquid Time-Constant neuron — input-dependent time constant. Hasani et al. 2021.
229#[derive(Clone, Debug)]
230pub struct LiquidTimeConstantNeuron {
231    pub x: f64,
232    pub tau_base: f64,
233    pub w_tau: f64,
234    pub w_x: f64,
235    pub w_in: f64,
236    pub bias: f64,
237    pub v_threshold: f64,
238    pub dt: f64,
239}
240
241impl LiquidTimeConstantNeuron {
242    pub fn new() -> Self {
243        Self {
244            x: 0.0,
245            tau_base: 10.0,
246            w_tau: -0.5,
247            w_x: 0.8,
248            w_in: 1.0,
249            bias: 0.0,
250            v_threshold: 1.0,
251            dt: 1.0,
252        }
253    }
254    pub fn step(&mut self, current: f64) -> i32 {
255        let sigma_tau = 1.0 / (1.0 + (-(self.w_tau * current + self.bias)).exp());
256        let tau = (self.tau_base * sigma_tau).max(0.1);
257        let f_target = (self.w_x * self.x + self.w_in * current).tanh();
258        self.x += self.dt / tau * (-self.x + f_target);
259        if self.x >= self.v_threshold {
260            self.x = 0.0;
261            1
262        } else {
263            0
264        }
265    }
266    pub fn reset(&mut self) {
267        self.x = 0.0;
268    }
269}
270impl Default for LiquidTimeConstantNeuron {
271    fn default() -> Self {
272        Self::new()
273    }
274}
275
276/// Compte WM — NMDA-based working-memory neuron. Compte et al. 2000.
277#[derive(Clone, Debug)]
278pub struct CompteWMNeuron {
279    pub v: f64,
280    pub s_ampa: f64,
281    pub s_nmda: f64,
282    pub x_nmda: f64,
283    pub s_gaba: f64,
284    pub g_l: f64,
285    pub g_ampa: f64,
286    pub g_nmda: f64,
287    pub g_gaba: f64,
288    pub e_l: f64,
289    pub e_exc: f64,
290    pub e_inh: f64,
291    pub c_m: f64,
292    pub mg: f64,
293    pub tau_ampa: f64,
294    pub tau_nmda: f64,
295    pub tau_x: f64,
296    pub alpha_nmda: f64,
297    pub v_threshold: f64,
298    pub v_reset: f64,
299    pub dt: f64,
300}
301
302impl CompteWMNeuron {
303    pub fn new() -> Self {
304        Self {
305            v: -70.0,
306            s_ampa: 0.0,
307            s_nmda: 0.0,
308            x_nmda: 0.0,
309            s_gaba: 0.0,
310            g_l: 0.025,
311            g_ampa: 0.005,
312            g_nmda: 0.165,
313            g_gaba: 0.013,
314            e_l: -70.0,
315            e_exc: 0.0,
316            e_inh: -70.0,
317            c_m: 0.5,
318            mg: 1.0,
319            tau_ampa: 2.0,
320            tau_nmda: 100.0,
321            tau_x: 2.0,
322            alpha_nmda: 0.5,
323            v_threshold: -50.0,
324            v_reset: -55.0,
325            dt: 0.1,
326        }
327    }
328    pub fn step(&mut self, current: f64, spike_in: bool) -> i32 {
329        if spike_in {
330            self.s_ampa += 1.0;
331            self.x_nmda += 1.0;
332        }
333        self.s_ampa *= (-self.dt / self.tau_ampa).exp();
334        self.s_nmda += (-self.s_nmda / self.tau_nmda
335            + self.alpha_nmda * self.x_nmda * (1.0 - self.s_nmda))
336            * self.dt;
337        self.x_nmda *= (-self.dt / self.tau_x).exp();
338        self.s_gaba *= (-self.dt / 5.0).exp();
339        let mg_block = 1.0 / (1.0 + self.mg / 3.57 * (-0.062 * self.v).exp());
340        let i_l = self.g_l * (self.v - self.e_l);
341        let i_ampa = self.g_ampa * self.s_ampa * (self.v - self.e_exc);
342        let i_nmda = self.g_nmda * mg_block * self.s_nmda * (self.v - self.e_exc);
343        let i_gaba = self.g_gaba * self.s_gaba * (self.v - self.e_inh);
344        self.v += (-i_l - i_ampa - i_nmda - i_gaba + current) / self.c_m * self.dt;
345        if self.v >= self.v_threshold {
346            self.v = self.v_reset;
347            self.s_gaba += 1.0;
348            1
349        } else {
350            0
351        }
352    }
353    pub fn reset(&mut self) {
354        self.v = self.e_l;
355        self.s_ampa = 0.0;
356        self.s_nmda = 0.0;
357        self.x_nmda = 0.0;
358        self.s_gaba = 0.0;
359    }
360}
361impl Default for CompteWMNeuron {
362    fn default() -> Self {
363        Self::new()
364    }
365}
366
367/// Parallel Spiking Neuron — convolution-based filter. Fang et al. 2023.
368#[derive(Clone, Debug)]
369pub struct ParallelSpikingNeuron {
370    pub kernel: Vec<f64>,
371    pub buffer: Vec<f64>,
372    pub v_threshold: f64,
373    ptr: usize,
374}
375
376impl ParallelSpikingNeuron {
377    pub fn new(kernel_size: usize, v_threshold: f64) -> Self {
378        let k = 1.0 / kernel_size as f64;
379        Self {
380            kernel: vec![k; kernel_size],
381            buffer: vec![0.0; kernel_size],
382            v_threshold,
383            ptr: 0,
384        }
385    }
386    pub fn step(&mut self, current: f64) -> i32 {
387        let ks = self.buffer.len();
388        self.buffer[self.ptr % ks] = current;
389        self.ptr += 1;
390        let n = self.ptr.min(ks);
391        let score: f64 = self.kernel[..n]
392            .iter()
393            .zip(self.buffer[..n].iter())
394            .map(|(&w, &b)| w * b)
395            .sum();
396        if score >= self.v_threshold {
397            self.buffer.fill(0.0);
398            1
399        } else {
400            0
401        }
402    }
403    pub fn reset(&mut self) {
404        self.buffer.fill(0.0);
405        self.ptr = 0;
406    }
407}
408
409/// Fractional-order LIF — Grünwald-Letnikov approximation. Teka et al. 2014.
410#[derive(Clone, Debug)]
411pub struct FractionalLIFNeuron {
412    pub v: f64,
413    pub v_rest: f64,
414    pub v_reset: f64,
415    pub v_threshold: f64,
416    pub alpha: f64,
417    pub resistance: f64,
418    pub dt: f64,
419    history: Vec<f64>,
420    gl_coeffs: Vec<f64>,
421    _max_hist: usize,
422}
423
424impl FractionalLIFNeuron {
425    pub fn new(alpha: f64, max_hist: usize) -> Self {
426        let mut coeffs = vec![0.0; max_hist + 1];
427        coeffs[0] = 1.0;
428        for j in 1..=max_hist {
429            coeffs[j] = coeffs[j - 1] * (1.0 - (alpha + 1.0) / j as f64);
430        }
431        Self {
432            v: 0.0,
433            v_rest: 0.0,
434            v_reset: 0.0,
435            v_threshold: 1.0,
436            alpha,
437            resistance: 1.0,
438            dt: 1.0,
439            history: vec![0.0; max_hist],
440            gl_coeffs: coeffs,
441            _max_hist: max_hist,
442        }
443    }
444    pub fn step(&mut self, current: f64) -> i32 {
445        // Grünwald-Letnikov: D^α v ≈ (1/dt^α) Σ_j c_j v(t-j·dt)
446        let mut gl_sum = 0.0;
447        let n = self.history.len().min(self.gl_coeffs.len() - 1);
448        for j in 0..n {
449            gl_sum += self.gl_coeffs[j + 1] * self.history[n - 1 - j];
450        }
451        let rhs = -(self.v - self.v_rest) + self.resistance * current;
452        self.v = rhs * self.dt.powf(self.alpha) - gl_sum;
453        // Shift history
454        let len = self.history.len();
455        if len > 0 {
456            for i in 0..len - 1 {
457                self.history[i] = self.history[i + 1];
458            }
459            self.history[len - 1] = self.v;
460        }
461        if self.v >= self.v_threshold {
462            self.v = self.v_reset;
463            1
464        } else {
465            0
466        }
467    }
468    pub fn reset(&mut self) {
469        self.v = self.v_rest;
470        self.history.fill(0.0);
471    }
472}
473
474/// Siegert transfer function — analytical stationary firing rate of a LIF neuron.
475#[derive(Clone, Debug)]
476pub struct SiegertTransferFunction {
477    pub tau_m: f64,
478    pub tau_rp: f64,
479    pub v_threshold: f64,
480    pub v_reset: f64,
481    pub v_rest: f64,
482}
483
484impl SiegertTransferFunction {
485    pub fn new() -> Self {
486        Self {
487            tau_m: 20.0,
488            tau_rp: 2.0,
489            v_threshold: -50.0,
490            v_reset: -70.0,
491            v_rest: -65.0,
492        }
493    }
494    pub fn step(&self, current: f64) -> f64 {
495        let mu = self.v_rest + current;
496        let sigma = current.abs().max(1e-6) * 0.1;
497        let upper = (self.v_threshold - mu) / sigma;
498        let lower = (self.v_reset - mu) / sigma;
499        // Gauss-Legendre 20-point quadrature for ∫ exp(u²)(1+erf(u)) du
500        let nodes = [
501            -0.993128599185095,
502            -0.963971927277914,
503            -0.912234428251326,
504            -0.839116971822219,
505            -0.746331906460151,
506            -0.636053680726515,
507            -0.510867001950827,
508            -0.373706088715420,
509            -0.227785851141645,
510            -0.076526521133497,
511            0.076526521133497,
512            0.227785851141645,
513            0.373706088715420,
514            0.510867001950827,
515            0.636053680726515,
516            0.746331906460151,
517            0.839116971822219,
518            0.912234428251326,
519            0.963971927277914,
520            0.993128599185095,
521        ];
522        let weights = [
523            0.017614007139152,
524            0.040601429800387,
525            0.062672048334109,
526            0.083276741576704,
527            0.101930119817240,
528            0.118194531961518,
529            0.131688638449177,
530            0.142096109318382,
531            0.149172986472604,
532            0.152753387130726,
533            0.152753387130726,
534            0.149172986472604,
535            0.142096109318382,
536            0.131688638449177,
537            0.118194531961518,
538            0.101930119817240,
539            0.083276741576704,
540            0.062672048334109,
541            0.040601429800387,
542            0.017614007139152,
543        ];
544        let half = (upper - lower) / 2.0;
545        let mid = (upper + lower) / 2.0;
546        let mut integral = 0.0;
547        for (&node, &w) in nodes.iter().zip(weights.iter()) {
548            let u = mid + half * node;
549            let eu2 = (u * u).min(50.0).exp();
550            let erf_u = Self::erf_approx(u);
551            integral += w * eu2 * (1.0 + erf_u);
552        }
553        integral *= half;
554        let t_isi = self.tau_rp + self.tau_m * std::f64::consts::PI.sqrt() * integral;
555        1000.0 / t_isi.max(0.01)
556    }
557    fn erf_approx(x: f64) -> f64 {
558        // Abramowitz-Stegun approximation
559        let t = 1.0 / (1.0 + 0.3275911 * x.abs());
560        let poly = t
561            * (0.254829592
562                + t * (-0.284496736 + t * (1.421413741 + t * (-1.453152027 + t * 1.061405429))));
563        let result = 1.0 - poly * (-x * x).exp();
564        if x >= 0.0 {
565            result
566        } else {
567            -result
568        }
569    }
570}
571impl Default for SiegertTransferFunction {
572    fn default() -> Self {
573        Self::new()
574    }
575}
576
577/// Amari neural field — continuous attractor with Mexican hat kernel.
578#[derive(Clone, Debug)]
579pub struct AmariNeuralField {
580    pub u: Vec<f64>,
581    pub n: usize,
582    pub tau: f64,
583    pub a_exc: f64,
584    pub a_width: f64,
585    pub b_inh: f64,
586    pub b_width: f64,
587    pub dx: f64,
588    pub dt: f64,
589    w: Vec<f64>,
590}
591
592impl AmariNeuralField {
593    pub fn new(n: usize) -> Self {
594        let dx = 0.5;
595        let mut w = vec![0.0; n];
596        let a_exc = 1.5;
597        let a_width = 1.0;
598        let b_inh = 0.75;
599        let b_width = 2.0;
600        // Exponential Mexican hat kernel, rolled to match Python's np.roll
601        for i in 0..n {
602            let x = ((i as isize - n as isize / 2).unsigned_abs() as f64) * dx;
603            w[i] = a_exc * (-a_width * x).exp() - b_inh * (-b_width * x).exp();
604        }
605        // Roll by -n/2 to match Python's np.roll(k, -n//2)
606        w.rotate_left(n / 2);
607        Self {
608            u: vec![0.0; n],
609            n,
610            tau: 10.0,
611            a_exc,
612            a_width,
613            b_inh,
614            b_width,
615            dx,
616            dt: 0.5,
617            w,
618        }
619    }
620    pub fn step(&mut self, input: &[f64]) -> f64 {
621        let n = self.n;
622        // f(u) = max(0, u)
623        let f_u: Vec<f64> = self.u.iter().map(|&v| v.max(0.0)).collect();
624        // Circular convolution (matching Python FFT-based conv)
625        let mut conv = vec![0.0; n];
626        for i in 0..n {
627            let mut s = 0.0;
628            for j in 0..n {
629                let idx = (i + n - j) % n;
630                s += self.w[idx] * f_u[j];
631            }
632            conv[i] = s * self.dx;
633        }
634        for i in 0..n {
635            let inp = if i < input.len() { input[i] } else { 0.0 };
636            self.u[i] += (-self.u[i] + conv[i] + inp) / self.tau * self.dt;
637        }
638        // Return mean of ReLU activations
639        self.u.iter().map(|&v| v.max(0.0)).sum::<f64>() / n as f64
640    }
641    pub fn reset(&mut self) {
642        self.u.fill(0.0);
643    }
644}
645
646/// Leaky Compete-and-Fire — winner-take-all with lateral inhibition. Oster et al. 2009.
647#[derive(Clone, Debug)]
648pub struct LeakyCompeteFireNeuron {
649    pub v: Vec<f64>,
650    pub n_units: usize,
651    pub tau: f64,
652    pub v_threshold: f64,
653    pub w_inh: f64,
654    pub dt: f64,
655}
656
657impl LeakyCompeteFireNeuron {
658    pub fn new(n_units: usize) -> Self {
659        Self {
660            v: vec![0.0; n_units],
661            n_units,
662            tau: 10.0,
663            v_threshold: 1.0,
664            w_inh: 0.5,
665            dt: 1.0,
666        }
667    }
668
669    pub fn step(&mut self, currents: &[f64]) -> Vec<i32> {
670        let n = self.n_units;
671        for i in 0..n {
672            let c = if i < currents.len() { currents[i] } else { 0.0 };
673            self.v[i] += (-self.v[i] + c) / self.tau * self.dt;
674        }
675        let mut spikes = vec![0i32; n];
676        for i in 0..n {
677            if self.v[i] >= self.v_threshold {
678                spikes[i] = 1;
679                self.v[i] = 0.0;
680                for j in 0..n {
681                    if j != i {
682                        self.v[j] = (self.v[j] - self.w_inh).max(0.0);
683                    }
684                }
685            }
686        }
687        spikes
688    }
689
690    pub fn reset(&mut self) {
691        self.v.fill(0.0);
692    }
693}
694
695#[cfg(test)]
696mod tests {
697    use super::*;
698
699    #[test]
700    fn mcp_threshold() {
701        let n = McCullochPittsNeuron::default();
702        assert_eq!(n.step(2.0), 1);
703        assert_eq!(n.step(0.5), 0);
704    }
705    #[test]
706    fn sigmoid_rate() {
707        let mut n = SigmoidRateNeuron::new();
708        for _ in 0..100 {
709            n.step(5.0);
710        }
711        assert!(n.r > 0.5);
712    }
713    #[test]
714    fn tl_rate() {
715        let mut n = ThresholdLinearRateNeuron::new();
716        assert!(n.step(5.0) > 0.0);
717        assert!(n.step(-1.0) == 0.0);
718    }
719    #[test]
720    fn astrocyte_ca() {
721        let mut n = AstrocyteModel::new();
722        let mut max_ca = 0.0_f64;
723        for _ in 0..5000 {
724            let c = n.step(0.1);
725            max_ca = max_ca.max(c);
726        }
727        assert!(max_ca > 0.05);
728    }
729    #[test]
730    fn tm_fires() {
731        let mut n = TsodyksMarkramNeuron::new();
732        let t: i32 = (0..500).map(|_| n.step(50.0, false)).sum();
733        assert!(t > 0);
734    }
735    #[test]
736    fn ltc_fires() {
737        let mut n = LiquidTimeConstantNeuron {
738            v_threshold: 0.9,
739            ..LiquidTimeConstantNeuron::new()
740        };
741        let t: i32 = (0..100).map(|_| n.step(5.0)).sum();
742        assert!(t > 0);
743    }
744    #[test]
745    fn compte_fires() {
746        let mut n = CompteWMNeuron::new();
747        let t: i32 = (0..500).map(|_| n.step(5.0, false)).sum();
748        assert!(t > 0);
749    }
750    #[test]
751    fn psn_fires() {
752        let mut n = ParallelSpikingNeuron::new(4, 0.5);
753        let t: i32 = (0..20).map(|_| n.step(1.0)).sum();
754        assert!(t > 0);
755    }
756    #[test]
757    fn frac_lif_fires() {
758        let mut n = FractionalLIFNeuron::new(0.8, 50);
759        let t: i32 = (0..200).map(|_| n.step(2.0)).sum();
760        assert!(t > 0);
761    }
762    #[test]
763    fn siegert_rate() {
764        let n = SiegertTransferFunction::new();
765        let r = n.step(20.0);
766        assert!(r > 0.0);
767    }
768    #[test]
769    fn amari_activates() {
770        let mut n = AmariNeuralField::new(32);
771        let inp = vec![0.5; 32];
772        for _ in 0..100 {
773            n.step(&inp);
774        }
775        assert!(n.u.iter().any(|&x| x.abs() > 0.01));
776    }
777
778    // ── Multi-angle tests for rate models ──
779
780    // -- McCullochPitts --
781    #[test]
782    fn mcp_below_threshold() {
783        let n = McCullochPittsNeuron::default();
784        assert_eq!(n.step(0.0), 0);
785    }
786    #[test]
787    fn mcp_nan_no_panic() {
788        McCullochPittsNeuron::default().step(f64::NAN);
789    }
790
791    // -- SigmoidRate --
792    #[test]
793    fn sigmoid_rate_reset() {
794        let mut n = SigmoidRateNeuron::new();
795        for _ in 0..100 {
796            n.step(5.0);
797        }
798        n.reset();
799        assert!((n.r - 0.0).abs() < 1e-10);
800    }
801    #[test]
802    fn sigmoid_rate_bounded() {
803        let mut n = SigmoidRateNeuron::new();
804        for _ in 0..1000 {
805            n.step(1e4);
806        }
807        assert!(n.r.is_finite());
808        assert!(
809            n.r >= 0.0 && n.r <= 1.0,
810            "sigmoid output bounded [0,1]: {}",
811            n.r
812        );
813    }
814    #[test]
815    fn sigmoid_rate_nan_no_panic() {
816        SigmoidRateNeuron::new().step(f64::NAN);
817    }
818
819    // -- ThresholdLinear --
820    #[test]
821    fn tl_rate_reset() {
822        let mut n = ThresholdLinearRateNeuron::new();
823        for _ in 0..100 {
824            n.step(5.0);
825        }
826        n.reset();
827        assert!((n.r - 0.0).abs() < 1e-10);
828    }
829    #[test]
830    fn tl_rate_nan_no_panic() {
831        ThresholdLinearRateNeuron::new().step(f64::NAN);
832    }
833    #[test]
834    fn tl_rate_below_threshold() {
835        let mut n = ThresholdLinearRateNeuron::new();
836        assert!(n.step(-5.0) == 0.0, "below threshold → zero rate");
837    }
838
839    // -- Astrocyte --
840    #[test]
841    fn astrocyte_reset() {
842        let mut n = AstrocyteModel::new();
843        for _ in 0..1000 {
844            n.step(0.1);
845        }
846        n.reset();
847        assert!((n.ca - 0.05).abs() < 1e-10);
848    }
849    #[test]
850    fn astrocyte_nan_no_panic() {
851        AstrocyteModel::new().step(f64::NAN);
852    }
853    #[test]
854    fn astrocyte_ca_nonneg() {
855        let mut n = AstrocyteModel::new();
856        for _ in 0..5000 {
857            n.step(0.1);
858        }
859        assert!(n.ca >= 0.0, "Ca²⁺ must be non-negative");
860    }
861
862    // -- TsodyksMarkram --
863    #[test]
864    fn tm_reset() {
865        let mut n = TsodyksMarkramNeuron::new();
866        for _ in 0..100 {
867            n.step(50.0, false);
868        }
869        n.reset();
870        assert!((n.v - n.v_rest).abs() < 1e-10);
871        assert!((n.x - 1.0).abs() < 1e-10);
872    }
873    #[test]
874    fn tm_bounded() {
875        let mut n = TsodyksMarkramNeuron::new();
876        for _ in 0..1000 {
877            n.step(1e4, false);
878        }
879        assert!(n.v.is_finite());
880    }
881    #[test]
882    fn tm_nan_no_panic() {
883        TsodyksMarkramNeuron::new().step(f64::NAN, false);
884    }
885    #[test]
886    fn tm_stp_depression() {
887        let mut n = TsodyksMarkramNeuron::new();
888        for _ in 0..500 {
889            n.step(50.0, true);
890        }
891        // With repeated presynaptic spikes, x (available fraction) should decrease
892        assert!(
893            n.x < 1.0,
894            "STP depression: x should be < 1.0 after spikes: {}",
895            n.x
896        );
897    }
898
899    // -- LiquidTimeConstant --
900    #[test]
901    fn ltc_reset() {
902        let mut n = LiquidTimeConstantNeuron {
903            v_threshold: 0.9,
904            ..LiquidTimeConstantNeuron::new()
905        };
906        for _ in 0..50 {
907            n.step(5.0);
908        }
909        n.reset();
910        assert!((n.x - 0.0).abs() < 1e-10);
911    }
912    #[test]
913    fn ltc_nan_no_panic() {
914        LiquidTimeConstantNeuron::new().step(f64::NAN);
915    }
916
917    // -- CompteWM --
918    #[test]
919    fn compte_reset() {
920        let mut n = CompteWMNeuron::new();
921        for _ in 0..100 {
922            n.step(5.0, false);
923        }
924        n.reset();
925        assert!((n.v - n.e_l).abs() < 1e-10);
926    }
927    #[test]
928    fn compte_nan_no_panic() {
929        CompteWMNeuron::new().step(f64::NAN, false);
930    }
931
932    // -- ParallelSpiking --
933    #[test]
934    fn psn_reset() {
935        let mut n = ParallelSpikingNeuron::new(4, 0.5);
936        for _ in 0..20 {
937            n.step(1.0);
938        }
939        n.reset();
940    }
941    #[test]
942    fn psn_nan_no_panic() {
943        ParallelSpikingNeuron::new(4, 0.5).step(f64::NAN);
944    }
945
946    // -- FractionalLIF --
947    #[test]
948    fn frac_lif_reset() {
949        let mut n = FractionalLIFNeuron::new(0.8, 50);
950        for _ in 0..100 {
951            n.step(2.0);
952        }
953        n.reset();
954        assert!((n.v - n.v_rest).abs() < 1e-10);
955    }
956    #[test]
957    fn frac_lif_nan_no_panic() {
958        FractionalLIFNeuron::new(0.8, 50).step(f64::NAN);
959    }
960
961    // -- Siegert --
962    #[test]
963    fn siegert_zero() {
964        let n = SiegertTransferFunction::new();
965        let r = n.step(0.0);
966        assert!(r >= 0.0);
967    }
968    #[test]
969    fn siegert_nan_no_panic() {
970        SiegertTransferFunction::new().step(f64::NAN);
971    }
972
973    // -- Amari --
974    #[test]
975    fn amari_reset() {
976        let mut n = AmariNeuralField::new(32);
977        let inp = vec![0.5; 32];
978        for _ in 0..100 {
979            n.step(&inp);
980        }
981        n.reset();
982        assert!(n.u.iter().all(|&x| x == 0.0));
983    }
984    #[test]
985    fn amari_bounded() {
986        let mut n = AmariNeuralField::new(16);
987        let inp = vec![1e3; 16];
988        for _ in 0..1000 {
989            n.step(&inp);
990        }
991        assert!(n.u.iter().all(|x| x.is_finite()));
992    }
993
994    // -- LeakyCompeteFireNeuron tests --
995
996    #[test]
997    fn lcf_fires_with_strong_input() {
998        let mut n = LeakyCompeteFireNeuron::new(3);
999        let inp = vec![5.0, 0.0, 0.0];
1000        let mut any_spike = false;
1001        for _ in 0..200 {
1002            let spikes = n.step(&inp);
1003            if spikes.contains(&1) {
1004                any_spike = true;
1005            }
1006        }
1007        assert!(any_spike, "LeakyCompeteFire should fire with strong input");
1008    }
1009
1010    #[test]
1011    fn lcf_silent_without_input() {
1012        let mut n = LeakyCompeteFireNeuron::new(4);
1013        let inp = vec![0.0; 4];
1014        for _ in 0..200 {
1015            let spikes = n.step(&inp);
1016            assert!(
1017                spikes.iter().all(|&s| s == 0),
1018                "should be silent at zero input"
1019            );
1020        }
1021    }
1022
1023    #[test]
1024    fn lcf_winner_take_all() {
1025        let mut n = LeakyCompeteFireNeuron::new(3);
1026        // Unit 0 receives strong input, others receive moderate
1027        let inp = vec![5.0, 2.0, 2.0];
1028        let mut spike_counts = [0i32; 3];
1029        for _ in 0..1000 {
1030            let spikes = n.step(&inp);
1031            for (i, &s) in spikes.iter().enumerate() {
1032                spike_counts[i] += s;
1033            }
1034        }
1035        // Winner (unit 0) should spike more than losers due to lateral inhibition
1036        assert!(
1037            spike_counts[0] > spike_counts[1],
1038            "unit 0 ({}) should spike more than unit 1 ({}) — winner-take-all",
1039            spike_counts[0],
1040            spike_counts[1]
1041        );
1042    }
1043
1044    #[test]
1045    fn lcf_lateral_inhibition_suppresses() {
1046        let mut n = LeakyCompeteFireNeuron::new(2);
1047        n.w_inh = 2.0; // Strong inhibition
1048        let inp = vec![3.0, 3.0];
1049        let mut spike_counts = [0i32; 2];
1050        for _ in 0..500 {
1051            let spikes = n.step(&inp);
1052            for (i, &s) in spikes.iter().enumerate() {
1053                spike_counts[i] += s;
1054            }
1055        }
1056        // With equal input + strong inhibition, total spikes should be less
1057        // than with no inhibition (competitive suppression)
1058        let mut n_no_inh = LeakyCompeteFireNeuron::new(2);
1059        n_no_inh.w_inh = 0.0;
1060        let mut total_no_inh = 0i32;
1061        for _ in 0..500 {
1062            let spikes = n_no_inh.step(&inp);
1063            total_no_inh += spikes.iter().sum::<i32>();
1064        }
1065        let total_inh: i32 = spike_counts.iter().sum();
1066        assert!(
1067            total_inh <= total_no_inh,
1068            "inhibition ({}) should reduce total spikes vs no inhibition ({})",
1069            total_inh,
1070            total_no_inh
1071        );
1072    }
1073
1074    #[test]
1075    fn lcf_reset_clears_state() {
1076        let mut n = LeakyCompeteFireNeuron::new(4);
1077        let inp = vec![3.0; 4];
1078        for _ in 0..100 {
1079            n.step(&inp);
1080        }
1081        n.reset();
1082        assert!(
1083            n.v.iter().all(|&x| x == 0.0),
1084            "reset must zero all voltages"
1085        );
1086    }
1087
1088    #[test]
1089    fn lcf_voltages_bounded() {
1090        let mut n = LeakyCompeteFireNeuron::new(3);
1091        let inp = vec![1e6, 1e6, 1e6];
1092        for _ in 0..1000 {
1093            n.step(&inp);
1094        }
1095        assert!(
1096            n.v.iter().all(|x| x.is_finite()),
1097            "voltages must stay finite under extreme input"
1098        );
1099    }
1100
1101    #[test]
1102    fn lcf_negative_input_no_crash() {
1103        let mut n = LeakyCompeteFireNeuron::new(3);
1104        let inp = vec![-10.0, -5.0, -1.0];
1105        for _ in 0..500 {
1106            n.step(&inp);
1107        }
1108        assert!(
1109            n.v.iter().all(|x| x.is_finite()),
1110            "must handle negative input"
1111        );
1112    }
1113
1114    #[test]
1115    fn lcf_output_length_matches_units() {
1116        let n_units = 7;
1117        let mut n = LeakyCompeteFireNeuron::new(n_units);
1118        let inp = vec![1.0; n_units];
1119        let spikes = n.step(&inp);
1120        assert_eq!(spikes.len(), n_units, "output length must match n_units");
1121    }
1122}