Skip to main content

sc_neurocore_engine/neurons/
special.rs

1// SPDX-License-Identifier: AGPL-3.0-or-later
2// Commercial license available
3// © Concepts 1996–2026 Miroslav Šotek. All rights reserved.
4// © Code 2020–2026 Miroslav Šotek. All rights reserved.
5// ORCID: 0009-0009-3560-0851
6// Contact: www.anulum.li | protoscience@anulum.li
7// SC-NeuroCore — Stochastic generators, spike-response models, and neural
8
9//! Stochastic generators, spike-response models, and neural mass/population models.
10
11use rand::{RngExt, SeedableRng};
12use rand_xoshiro::Xoshiro256PlusPlus;
13
14/// Poisson spike generator — P(spike in dt) = λ·dt.
15#[derive(Clone, Debug)]
16pub struct PoissonNeuron {
17    pub rate_hz: f64,
18    pub dt_ms: f64,
19    rng: Xoshiro256PlusPlus,
20}
21
22impl PoissonNeuron {
23    pub fn new(rate_hz: f64, dt_ms: f64, seed: u64) -> Self {
24        Self {
25            rate_hz,
26            dt_ms,
27            rng: Xoshiro256PlusPlus::seed_from_u64(seed),
28        }
29    }
30    pub fn step(&mut self, rate_override: f64) -> i32 {
31        let r = if rate_override < 0.0 {
32            self.rate_hz
33        } else {
34            rate_override
35        };
36        let p = r * self.dt_ms / 1000.0;
37        if self.rng.random::<f64>() < p {
38            1
39        } else {
40            0
41        }
42    }
43    pub fn reset(&mut self) {}
44}
45
46/// Inhomogeneous Poisson — rate passed per step.
47#[derive(Clone, Debug)]
48pub struct InhomogeneousPoissonNeuron {
49    pub dt_ms: f64,
50    rng: Xoshiro256PlusPlus,
51}
52
53impl InhomogeneousPoissonNeuron {
54    pub fn new(dt_ms: f64, seed: u64) -> Self {
55        Self {
56            dt_ms,
57            rng: Xoshiro256PlusPlus::seed_from_u64(seed),
58        }
59    }
60    pub fn step(&mut self, rate_hz: f64) -> i32 {
61        let p = rate_hz.max(0.0) * self.dt_ms / 1000.0;
62        if self.rng.random::<f64>() < p {
63            1
64        } else {
65            0
66        }
67    }
68    pub fn reset(&mut self) {}
69}
70
71/// Gamma-renewal process — order-k Poisson with refractory ISI structure.
72#[derive(Clone, Debug)]
73pub struct GammaRenewalNeuron {
74    pub rate_hz: f64,
75    pub shape_k: u32,
76    pub dt_ms: f64,
77    time_since_spike: f64,
78    rng: Xoshiro256PlusPlus,
79}
80
81impl GammaRenewalNeuron {
82    pub fn new(rate_hz: f64, shape_k: u32, seed: u64) -> Self {
83        Self {
84            rate_hz,
85            shape_k,
86            dt_ms: 1.0,
87            time_since_spike: 0.0,
88            rng: Xoshiro256PlusPlus::seed_from_u64(seed),
89        }
90    }
91    fn log_gamma_int(k: u32) -> f64 {
92        // ln((k-1)!) for integer k
93        (1..k).map(|i| (i as f64).ln()).sum()
94    }
95    pub fn step(&mut self, rate_override: f64) -> i32 {
96        self.time_since_spike += self.dt_ms;
97        let r = if rate_override < 0.0 {
98            self.rate_hz
99        } else {
100            rate_override
101        };
102        let lam = r / 1000.0;
103        let k = self.shape_k;
104        let t = self.time_since_spike;
105        // Gamma hazard approximation: h(t) ≈ (λk)^k * t^(k-1) / Γ(k) * exp(-λk*t) / S(t)
106        // Simplified: use Poisson sub-events approach
107        let mu = lam * (k as f64);
108        let log_pdf = (k as f64 - 1.0) * (mu * t).max(1e-30).ln() + mu.max(1e-30).ln()
109            - mu * t
110            - Self::log_gamma_int(k);
111        let surv = 1.0 - self.incomplete_gamma_ratio(k, mu * t);
112        let hazard = if surv > 1e-15 {
113            log_pdf.exp() / surv
114        } else {
115            mu
116        };
117        let p = hazard * self.dt_ms;
118        if self.rng.random::<f64>() < p {
119            self.time_since_spike = 0.0;
120            1
121        } else {
122            0
123        }
124    }
125    fn incomplete_gamma_ratio(&self, k: u32, x: f64) -> f64 {
126        // Regularized lower incomplete gamma P(k, x) via series for integer k
127        if x <= 0.0 {
128            return 0.0;
129        }
130        let mut sum = 0.0_f64;
131        let mut term = 1.0_f64;
132        for n in 0..k {
133            if n > 0 {
134                term *= x / n as f64;
135            }
136            sum += term;
137        }
138        1.0 - (-x).exp() * sum
139    }
140    pub fn reset(&mut self) {
141        self.time_since_spike = 0.0;
142    }
143}
144
145/// Stochastic IF — Ornstein-Uhlenbeck noise on LIF membrane.
146#[derive(Clone, Debug)]
147pub struct StochasticIFNeuron {
148    pub v: f64,
149    pub v_rest: f64,
150    pub v_reset: f64,
151    pub v_threshold: f64,
152    pub tau_m: f64,
153    pub sigma: f64,
154    pub dt: f64,
155    rng: Xoshiro256PlusPlus,
156}
157
158impl StochasticIFNeuron {
159    pub fn new(seed: u64) -> Self {
160        Self {
161            v: -70.0,
162            v_rest: -70.0,
163            v_reset: -70.0,
164            v_threshold: -50.0,
165            tau_m: 20.0,
166            sigma: 3.0,
167            dt: 1.0,
168            rng: Xoshiro256PlusPlus::seed_from_u64(seed),
169        }
170    }
171    fn randn(&mut self) -> f64 {
172        // Box-Muller
173        let u1: f64 = self.rng.random::<f64>().max(1e-30);
174        let u2: f64 = self.rng.random::<f64>();
175        (-2.0 * u1.ln()).sqrt() * (2.0 * std::f64::consts::PI * u2).cos()
176    }
177    pub fn step(&mut self, current: f64) -> i32 {
178        let noise = self.sigma * (self.dt / self.tau_m).sqrt() * self.randn();
179        self.v += (-(self.v - self.v_rest) + current) / self.tau_m * self.dt + noise;
180        if self.v >= self.v_threshold {
181            self.v = self.v_reset;
182            1
183        } else {
184            0
185        }
186    }
187    pub fn reset(&mut self) {
188        self.v = self.v_rest;
189    }
190}
191
192/// Galves-Löcherbach 2013 — stochastic point process with memory.
193#[derive(Clone, Debug)]
194pub struct GalvesLocherbachNeuron {
195    pub v: f64,
196    pub v_rest: f64,
197    pub decay: f64,
198    pub threshold_rate: f64,
199    pub steepness: f64,
200    pub dt: f64,
201    rng: Xoshiro256PlusPlus,
202}
203
204impl GalvesLocherbachNeuron {
205    pub fn new(seed: u64) -> Self {
206        Self {
207            v: 0.0,
208            v_rest: 0.0,
209            decay: 0.95,
210            threshold_rate: 0.5,
211            steepness: 5.0,
212            dt: 1.0,
213            rng: Xoshiro256PlusPlus::seed_from_u64(seed),
214        }
215    }
216    pub fn step(&mut self, weighted_input: f64) -> i32 {
217        self.v = self.decay * self.v + weighted_input;
218        let p = 1.0 / (1.0 + (-(self.steepness * (self.v - self.threshold_rate))).exp());
219        if self.rng.random::<f64>() < p * self.dt {
220            self.v = self.v_rest;
221            1
222        } else {
223            0
224        }
225    }
226    pub fn reset(&mut self) {
227        self.v = self.v_rest;
228    }
229}
230
231/// SRM0 — Spike Response Model (kernel-based). Gerstner 1995.
232#[derive(Clone, Debug)]
233pub struct SpikeResponseNeuron {
234    pub v: f64,
235    pub v_threshold: f64,
236    pub tau_eta: f64,
237    pub tau_kappa: f64,
238    pub eta_reset: f64,
239    pub time_since_spike: f64,
240    pub dt: f64,
241}
242
243impl SpikeResponseNeuron {
244    pub fn new() -> Self {
245        Self {
246            v: 0.0,
247            v_threshold: 1.0,
248            tau_eta: 10.0,
249            tau_kappa: 5.0,
250            eta_reset: -5.0,
251            time_since_spike: 1000.0,
252            dt: 1.0,
253        }
254    }
255    pub fn step(&mut self, weighted_input: f64) -> i32 {
256        self.time_since_spike += self.dt;
257        let eta = self.eta_reset * (-self.time_since_spike / self.tau_eta).exp();
258        let kappa = weighted_input * (1.0 - (-self.dt / self.tau_kappa).exp());
259        self.v = eta + kappa;
260        if self.v >= self.v_threshold {
261            self.time_since_spike = 0.0;
262            1
263        } else {
264            0
265        }
266    }
267    pub fn reset(&mut self) {
268        self.v = 0.0;
269        self.time_since_spike = 1000.0;
270    }
271}
272impl Default for SpikeResponseNeuron {
273    fn default() -> Self {
274        Self::new()
275    }
276}
277
278/// GLM neuron — point-process generalized linear model. Pillow et al. 2008.
279#[derive(Clone, Debug)]
280pub struct GLMNeuron {
281    pub mu: f64,
282    pub dt_ms: f64,
283    pub k: Vec<f64>,
284    pub h: Vec<f64>,
285    stim_buf: Vec<f64>,
286    spike_buf: Vec<f64>,
287    rng: Xoshiro256PlusPlus,
288}
289
290impl GLMNeuron {
291    pub fn new(n_k: usize, n_h: usize, seed: u64) -> Self {
292        Self {
293            mu: -3.0,
294            dt_ms: 1.0,
295            k: vec![0.1; n_k],
296            h: vec![-0.5; n_h],
297            stim_buf: vec![0.0; n_k],
298            spike_buf: vec![0.0; n_h],
299            rng: Xoshiro256PlusPlus::seed_from_u64(seed),
300        }
301    }
302    pub fn step(&mut self, stimulus: f64) -> i32 {
303        // Shift buffers
304        let nk = self.stim_buf.len();
305        let nh = self.spike_buf.len();
306        for i in (1..nk).rev() {
307            self.stim_buf[i] = self.stim_buf[i - 1];
308        }
309        if nk > 0 {
310            self.stim_buf[0] = stimulus;
311        }
312        let dot_k: f64 = self
313            .k
314            .iter()
315            .zip(self.stim_buf.iter())
316            .map(|(a, b)| a * b)
317            .sum();
318        let dot_h: f64 = self
319            .h
320            .iter()
321            .zip(self.spike_buf.iter())
322            .map(|(a, b)| a * b)
323            .sum();
324        let lam = (dot_k + dot_h + self.mu).exp();
325        let p = lam * self.dt_ms / 1000.0;
326        let spike = if self.rng.random::<f64>() < p { 1 } else { 0 };
327        for i in (1..nh).rev() {
328            self.spike_buf[i] = self.spike_buf[i - 1];
329        }
330        if nh > 0 {
331            self.spike_buf[0] = spike as f64;
332        }
333        spike
334    }
335    pub fn reset(&mut self) {
336        self.stim_buf.fill(0.0);
337        self.spike_buf.fill(0.0);
338    }
339}
340
341/// Wilson-Cowan 1972 — excitatory/inhibitory population rate model.
342#[derive(Clone, Debug)]
343pub struct WilsonCowanUnit {
344    pub e: f64,
345    pub i: f64,
346    pub w_ee: f64,
347    pub w_ei: f64,
348    pub w_ie: f64,
349    pub w_ii: f64,
350    pub tau_e: f64,
351    pub tau_i: f64,
352    pub a: f64,
353    pub theta: f64,
354    pub dt: f64,
355}
356
357impl WilsonCowanUnit {
358    pub fn new() -> Self {
359        Self {
360            e: 0.1,
361            i: 0.05,
362            w_ee: 10.0,
363            w_ei: 6.0,
364            w_ie: 10.0,
365            w_ii: 1.0,
366            tau_e: 1.0,
367            tau_i: 2.0,
368            a: 1.2,
369            theta: 4.0,
370            dt: 0.1,
371        }
372    }
373    fn sigmoid(&self, x: f64) -> f64 {
374        1.0 / (1.0 + (-self.a * (x - self.theta)).exp())
375    }
376    pub fn step(&mut self, ext_input: f64) -> f64 {
377        let se = self.sigmoid(self.w_ee * self.e - self.w_ei * self.i + ext_input);
378        let si = self.sigmoid(self.w_ie * self.e - self.w_ii * self.i);
379        self.e += (-self.e + se) / self.tau_e * self.dt;
380        self.i += (-self.i + si) / self.tau_i * self.dt;
381        self.e
382    }
383    pub fn reset(&mut self) {
384        self.e = 0.1;
385        self.i = 0.05;
386    }
387}
388impl Default for WilsonCowanUnit {
389    fn default() -> Self {
390        Self::new()
391    }
392}
393
394/// Jansen-Rit 1995 — neural mass model for EEG generation.
395#[derive(Clone, Debug)]
396pub struct JansenRitUnit {
397    pub y: [f64; 6],
398    pub a_exc: f64,
399    pub b_exc: f64,
400    pub a_rate: f64,
401    pub b_rate: f64,
402    pub c: f64,
403    pub e0: f64,
404    pub v0: f64,
405    pub r: f64,
406    pub dt: f64,
407}
408
409impl JansenRitUnit {
410    pub fn new() -> Self {
411        Self {
412            y: [0.0; 6],
413            a_exc: 3.25,
414            b_exc: 22.0,
415            a_rate: 100.0,
416            b_rate: 50.0,
417            c: 135.0,
418            e0: 2.5,
419            v0: 6.0,
420            r: 0.56,
421            dt: 0.001,
422        }
423    }
424    fn sigmoid(&self, x: f64) -> f64 {
425        2.0 * self.e0 / (1.0 + (self.r * (self.v0 - x)).exp())
426    }
427    pub fn step(&mut self, p_ext: f64) -> f64 {
428        let s1 = self.sigmoid(self.y[1] - self.y[2]);
429        let s0 = self.sigmoid(self.c * 0.8 * self.y[0]);
430        let s2 = self.sigmoid(self.c * 0.25 * self.y[0]);
431        let dy0 = self.y[3];
432        let dy3 = self.a_exc * self.a_rate * s1
433            - 2.0 * self.a_rate * self.y[3]
434            - self.a_rate.powi(2) * self.y[0];
435        let dy1 = self.y[4];
436        let dy4 = self.a_exc * self.a_rate * (p_ext + self.c * 0.8 * s0)
437            - 2.0 * self.a_rate * self.y[4]
438            - self.a_rate.powi(2) * self.y[1];
439        let dy2 = self.y[5];
440        let dy5 = self.b_exc * self.b_rate * self.c * 0.25 * s2
441            - 2.0 * self.b_rate * self.y[5]
442            - self.b_rate.powi(2) * self.y[2];
443        self.y[0] += dy0 * self.dt;
444        self.y[3] += dy3 * self.dt;
445        self.y[1] += dy1 * self.dt;
446        self.y[4] += dy4 * self.dt;
447        self.y[2] += dy2 * self.dt;
448        self.y[5] += dy5 * self.dt;
449        self.y[1] - self.y[2]
450    }
451    pub fn reset(&mut self) {
452        self.y = [0.0; 6];
453    }
454}
455impl Default for JansenRitUnit {
456    fn default() -> Self {
457        Self::new()
458    }
459}
460
461/// Wong-Wang 2006 — attractor decision-making model.
462#[derive(Clone, Debug)]
463pub struct WongWangUnit {
464    pub s1: f64,
465    pub s2: f64,
466    pub tau_s: f64,
467    pub gamma: f64,
468    pub j_n: f64,
469    pub j_cross: f64,
470    pub i_0: f64,
471    pub sigma: f64,
472    pub dt: f64,
473    rng: Xoshiro256PlusPlus,
474}
475
476impl WongWangUnit {
477    pub fn new(seed: u64) -> Self {
478        Self {
479            s1: 0.1,
480            s2: 0.1,
481            tau_s: 0.1,
482            gamma: 0.641,
483            j_n: 0.2609,
484            j_cross: 0.0497,
485            i_0: 0.3255,
486            sigma: 0.02,
487            dt: 0.001,
488            rng: Xoshiro256PlusPlus::seed_from_u64(seed),
489        }
490    }
491    fn phi(&self, i_total: f64) -> f64 {
492        let a = 270.0;
493        let b = 108.0;
494        let d = 0.154;
495        let x = a * i_total - b;
496        let denom = 1.0 - (-d * x).exp();
497        if denom.abs() < 1e-10 {
498            1.0 / d
499        } else {
500            x / denom
501        }
502    }
503    fn randn(&mut self) -> f64 {
504        let u1 = self.rng.random::<f64>().max(1e-30);
505        let u2 = self.rng.random::<f64>();
506        (-2.0 * u1.ln()).sqrt() * (2.0 * std::f64::consts::PI * u2).cos()
507    }
508    pub fn step(&mut self, stim1: f64, stim2: f64) -> (f64, f64) {
509        let i1 = self.j_n * self.s1 - self.j_cross * self.s2
510            + self.i_0
511            + stim1
512            + self.sigma * self.randn();
513        let i2 = self.j_n * self.s2 - self.j_cross * self.s1
514            + self.i_0
515            + stim2
516            + self.sigma * self.randn();
517        let r1 = self.phi(i1);
518        let r2 = self.phi(i2);
519        self.s1 += (-self.s1 / self.tau_s + self.gamma * (1.0 - self.s1) * r1) * self.dt;
520        self.s2 += (-self.s2 / self.tau_s + self.gamma * (1.0 - self.s2) * r2) * self.dt;
521        (r1, r2)
522    }
523    pub fn reset(&mut self) {
524        self.s1 = 0.1;
525        self.s2 = 0.1;
526    }
527}
528
529/// Ermentrout-Kopell / Montbrió theta-neuron mean field. Montbrió et al. 2015.
530#[derive(Clone, Debug)]
531pub struct ErmentroutKopellPopulation {
532    pub r: f64,
533    pub v: f64,
534    pub tau: f64,
535    pub delta: f64,
536    pub eta_bar: f64,
537    pub j: f64,
538    pub dt: f64,
539}
540
541impl ErmentroutKopellPopulation {
542    pub fn new() -> Self {
543        Self {
544            r: 0.1,
545            v: -2.0,
546            tau: 1.0,
547            delta: 1.0,
548            eta_bar: -5.0,
549            j: 15.0,
550            dt: 0.01,
551        }
552    }
553    pub fn step(&mut self, ext_input: f64) -> f64 {
554        let dr =
555            (self.delta / (std::f64::consts::PI * self.tau) + 2.0 * self.r * self.v) / self.tau;
556        let dv = (self.v * self.v + self.eta_bar + ext_input + self.j * self.tau * self.r
557            - std::f64::consts::PI.powi(2) * self.tau.powi(2) * self.r.powi(2))
558            / self.tau;
559        self.r += dr * self.dt;
560        self.v += dv * self.dt;
561        self.r = self.r.max(0.0);
562        self.r
563    }
564    pub fn reset(&mut self) {
565        self.r = 0.1;
566        self.v = -2.0;
567    }
568}
569impl Default for ErmentroutKopellPopulation {
570    fn default() -> Self {
571        Self::new()
572    }
573}
574
575/// Wendling et al. 2002 — extended Jansen-Rit with slow GABA_B.
576#[derive(Clone, Debug)]
577pub struct WendlingNeuron {
578    pub y: [f64; 10],
579    pub a_exc: f64,
580    pub b_fast: f64,
581    pub g_slow: f64,
582    pub a_rate: f64,
583    pub b_rate: f64,
584    pub g_rate: f64,
585    pub c: f64,
586    pub e0: f64,
587    pub v0: f64,
588    pub r: f64,
589    pub dt: f64,
590}
591
592impl WendlingNeuron {
593    pub fn new() -> Self {
594        Self {
595            y: [0.0; 10],
596            a_exc: 3.25,
597            b_fast: 22.0,
598            g_slow: 10.0,
599            a_rate: 100.0,
600            b_rate: 500.0,
601            g_rate: 20.0,
602            c: 135.0,
603            e0: 2.5,
604            v0: 6.0,
605            r: 0.56,
606            dt: 0.001,
607        }
608    }
609    fn sigmoid(&self, x: f64) -> f64 {
610        2.0 * self.e0 / (1.0 + (self.r * (self.v0 - x)).exp())
611    }
612    pub fn step(&mut self, p_ext: f64) -> f64 {
613        let sig_main = self.sigmoid(self.y[1] - self.y[2] - self.y[3]);
614        let sig_0 = self.sigmoid(self.c * 0.8 * self.y[0]);
615        let sig_fast = self.sigmoid(self.c * 0.25 * self.y[0]);
616        let sig_slow = self.sigmoid(self.c * 0.1 * self.y[0]);
617        let a = self.a_rate;
618        let b = self.b_rate;
619        let g = self.g_rate;
620        let dy0 = self.y[5];
621        let dy5 = self.a_exc * a * sig_main - 2.0 * a * self.y[5] - a * a * self.y[0];
622        let dy1 = self.y[6];
623        let dy6 = self.a_exc * a * (p_ext + self.c * 0.8 * sig_0)
624            - 2.0 * a * self.y[6]
625            - a * a * self.y[1];
626        let dy2 = self.y[7];
627        let dy7 =
628            self.b_fast * b * self.c * 0.25 * sig_fast - 2.0 * b * self.y[7] - b * b * self.y[2];
629        let dy3 = self.y[8];
630        let dy8 =
631            self.g_slow * g * self.c * 0.1 * sig_slow - 2.0 * g * self.y[8] - g * g * self.y[3];
632        self.y[0] += dy0 * self.dt;
633        self.y[5] += dy5 * self.dt;
634        self.y[1] += dy1 * self.dt;
635        self.y[6] += dy6 * self.dt;
636        self.y[2] += dy2 * self.dt;
637        self.y[7] += dy7 * self.dt;
638        self.y[3] += dy3 * self.dt;
639        self.y[8] += dy8 * self.dt;
640        self.y[1] - self.y[2] - self.y[3]
641    }
642    pub fn reset(&mut self) {
643        self.y = [0.0; 10];
644    }
645}
646impl Default for WendlingNeuron {
647    fn default() -> Self {
648        Self::new()
649    }
650}
651
652/// Larter-Breakspear 2003 — neural mass with ion channels for whole-brain modelling.
653#[derive(Clone, Debug)]
654pub struct LarterBreakspearNeuron {
655    pub v: f64,
656    pub w: f64,
657    pub z: f64,
658    pub g_ca: f64,
659    pub g_na: f64,
660    pub g_k: f64,
661    pub g_l: f64,
662    pub v_ca: f64,
663    pub v_na: f64,
664    pub v_k: f64,
665    pub v_l: f64,
666    pub phi: f64,
667    pub tau_k: f64,
668    pub b: f64,
669    pub a_ee: f64,
670    pub i_ext: f64,
671    pub dt: f64,
672}
673
674impl LarterBreakspearNeuron {
675    pub fn new() -> Self {
676        Self {
677            v: -0.5,
678            w: 0.0,
679            z: 0.0,
680            g_ca: 1.1,
681            g_na: 6.7,
682            g_k: 2.0,
683            g_l: 0.5,
684            v_ca: 1.0,
685            v_na: 0.53,
686            v_k: -0.7,
687            v_l: -0.5,
688            phi: 0.7,
689            tau_k: 1.0,
690            b: 0.1,
691            a_ee: 0.36,
692            i_ext: 0.3,
693            dt: 0.01,
694        }
695    }
696    pub fn step(&mut self, coupling: f64) -> f64 {
697        let m_ca = 0.5 * (1.0 + ((self.v + 0.01) / 0.15).tanh());
698        let m_na = 0.5 * (1.0 + ((self.v - 0.12) / 0.15).tanh());
699        let m_k = 0.5 * (1.0 + (self.v / 0.3).tanh());
700        let i_ca = self.g_ca * m_ca * (self.v - self.v_ca);
701        let i_na = self.g_na * m_na * (self.v - self.v_na);
702        let i_k = self.g_k * self.w * (self.v - self.v_k);
703        let i_l = self.g_l * (self.v - self.v_l);
704        let dv = -i_ca - i_na - i_k - i_l + self.i_ext + coupling + self.a_ee * self.v;
705        let dw = self.phi * (m_k - self.w) / self.tau_k;
706        let dz = self.b * (self.v + 0.5 - self.z);
707        self.v += dv * self.dt;
708        self.w += dw * self.dt;
709        self.z += dz * self.dt;
710        self.v
711    }
712    pub fn reset(&mut self) {
713        self.v = -0.5;
714        self.w = 0.0;
715        self.z = 0.0;
716    }
717}
718impl Default for LarterBreakspearNeuron {
719    fn default() -> Self {
720        Self::new()
721    }
722}
723
724#[cfg(test)]
725mod tests {
726    use super::*;
727
728    #[test]
729    fn poisson_fires() {
730        let mut n = PoissonNeuron::new(200.0, 1.0, 42);
731        let t: i32 = (0..1000).map(|_| n.step(-1.0)).sum();
732        assert!(t > 10);
733    }
734    #[test]
735    fn inhom_poisson_fires() {
736        let mut n = InhomogeneousPoissonNeuron::new(1.0, 42);
737        let t: i32 = (0..1000).map(|_| n.step(200.0)).sum();
738        assert!(t > 10);
739    }
740    #[test]
741    fn gamma_renewal_fires() {
742        let mut n = GammaRenewalNeuron::new(100.0, 3, 42);
743        let t: i32 = (0..2000).map(|_| n.step(-1.0)).sum();
744        assert!(t > 0);
745    }
746    #[test]
747    fn stochastic_if_fires() {
748        let mut n = StochasticIFNeuron::new(42);
749        let t: i32 = (0..500).map(|_| n.step(30.0)).sum();
750        assert!(t > 0);
751    }
752    #[test]
753    fn gl_fires() {
754        let mut n = GalvesLocherbachNeuron::new(42);
755        let t: i32 = (0..200).map(|_| n.step(2.0)).sum();
756        assert!(t > 0);
757    }
758    #[test]
759    fn srm_fires() {
760        let mut n = SpikeResponseNeuron::new();
761        let t: i32 = (0..200).map(|_| n.step(10.0)).sum();
762        assert!(t > 0);
763    }
764    #[test]
765    fn glm_fires() {
766        let mut n = GLMNeuron::new(5, 10, 42);
767        let t: i32 = (0..5000).map(|_| n.step(20.0)).sum();
768        assert!(t > 0);
769    }
770    #[test]
771    fn wc_oscillates() {
772        let mut n = WilsonCowanUnit::new();
773        let mut last = n.e;
774        let mut changes = 0;
775        for _ in 0..500 {
776            let e = n.step(5.0);
777            if (e - last).abs() > 0.001 {
778                changes += 1;
779            }
780            last = e;
781        }
782        assert!(changes > 0);
783    }
784    #[test]
785    fn jr_produces_eeg() {
786        let mut n = JansenRitUnit::new();
787        let mut nz = 0;
788        for _ in 0..5000 {
789            let v = n.step(220.0);
790            if v.abs() > 0.001 {
791                nz += 1;
792            }
793        }
794        assert!(nz > 0);
795    }
796    #[test]
797    fn ww_diverges() {
798        let mut n = WongWangUnit::new(42);
799        for _ in 0..5000 {
800            n.step(0.02, 0.0);
801        }
802        assert!((n.s1 - n.s2).abs() > 0.001);
803    }
804    #[test]
805    fn ek_pop_fires() {
806        let mut n = ErmentroutKopellPopulation::new();
807        for _ in 0..1000 {
808            n.step(0.0);
809        }
810        assert!(n.r > 0.0);
811    }
812    #[test]
813    fn wendling_produces() {
814        let mut n = WendlingNeuron::new();
815        let mut nz = 0;
816        for _ in 0..5000 {
817            let v = n.step(220.0);
818            if v.abs() > 0.001 {
819                nz += 1;
820            }
821        }
822        assert!(nz > 0);
823    }
824    #[test]
825    fn lb_evolves() {
826        let mut n = LarterBreakspearNeuron::new();
827        let v0 = n.v;
828        for _ in 0..1000 {
829            n.step(0.0);
830        }
831        assert!((n.v - v0).abs() > 0.001);
832    }
833
834    // ── Multi-angle tests for special models ──
835
836    // -- Poisson --
837    #[test]
838    fn poisson_reset_no_panic() {
839        let mut n = PoissonNeuron::new(200.0, 1.0, 42);
840        for _ in 0..100 {
841            n.step(-1.0);
842        }
843        n.reset();
844    }
845    #[test]
846    fn poisson_nan_no_panic() {
847        let mut n = PoissonNeuron::new(200.0, 1.0, 42);
848        n.step(f64::NAN);
849    }
850    #[test]
851    fn poisson_seed_varies() {
852        let mut n1 = PoissonNeuron::new(200.0, 1.0, 1);
853        let mut n2 = PoissonNeuron::new(200.0, 1.0, 999);
854        let t1: i32 = (0..1000).map(|_| n1.step(-1.0)).sum();
855        let t2: i32 = (0..1000).map(|_| n2.step(-1.0)).sum();
856        assert!(t1 > 0 && t2 > 0);
857    }
858
859    // -- InhomogeneousPoisson --
860    #[test]
861    fn inhom_poisson_zero_rate() {
862        let mut n = InhomogeneousPoissonNeuron::new(1.0, 42);
863        let t: i32 = (0..1000).map(|_| n.step(0.0)).sum();
864        assert_eq!(t, 0);
865    }
866    #[test]
867    fn inhom_poisson_nan_no_panic() {
868        let mut n = InhomogeneousPoissonNeuron::new(1.0, 42);
869        n.step(f64::NAN);
870    }
871
872    // -- GammaRenewal --
873    #[test]
874    fn gamma_renewal_reset_clears() {
875        let mut n = GammaRenewalNeuron::new(100.0, 3, 42);
876        for _ in 0..100 {
877            n.step(-1.0);
878        }
879        n.reset();
880        assert!((n.time_since_spike - 0.0).abs() < 1e-10);
881    }
882    #[test]
883    fn gamma_renewal_nan_no_panic() {
884        let mut n = GammaRenewalNeuron::new(100.0, 3, 42);
885        n.step(f64::NAN);
886    }
887
888    // -- StochasticIF --
889    #[test]
890    fn stochastic_if_reset_clears() {
891        let mut n = StochasticIFNeuron::new(42);
892        for _ in 0..100 {
893            n.step(30.0);
894        }
895        n.reset();
896        assert!((n.v - n.v_rest).abs() < 1e-10);
897    }
898    #[test]
899    fn stochastic_if_bounded() {
900        let mut n = StochasticIFNeuron::new(42);
901        for _ in 0..1000 {
902            n.step(1e4);
903        }
904        assert!(n.v.is_finite());
905    }
906    #[test]
907    fn stochastic_if_nan_no_panic() {
908        StochasticIFNeuron::new(42).step(f64::NAN);
909    }
910    #[test]
911    fn stochastic_if_negative_no_crash() {
912        let mut n = StochasticIFNeuron::new(42);
913        for _ in 0..500 {
914            n.step(-10.0);
915        }
916        assert!(n.v.is_finite());
917    }
918
919    // -- GalvesLocherbach --
920    #[test]
921    fn gl_reset_clears() {
922        let mut n = GalvesLocherbachNeuron::new(42);
923        for _ in 0..100 {
924            n.step(2.0);
925        }
926        n.reset();
927        assert!((n.v - n.v_rest).abs() < 1e-10);
928    }
929    #[test]
930    fn gl_bounded() {
931        let mut n = GalvesLocherbachNeuron::new(42);
932        for _ in 0..1000 {
933            n.step(1e4);
934        }
935        assert!(n.v.is_finite());
936    }
937    #[test]
938    fn gl_nan_no_panic() {
939        GalvesLocherbachNeuron::new(42).step(f64::NAN);
940    }
941
942    // -- SpikeResponse --
943    #[test]
944    fn srm_reset_clears() {
945        let mut n = SpikeResponseNeuron::new();
946        for _ in 0..100 {
947            n.step(10.0);
948        }
949        n.reset();
950        assert!((n.v - 0.0).abs() < 1e-10);
951    }
952    #[test]
953    fn srm_bounded() {
954        let mut n = SpikeResponseNeuron::new();
955        for _ in 0..1000 {
956            n.step(1e4);
957        }
958        assert!(n.v.is_finite());
959    }
960    #[test]
961    fn srm_nan_no_panic() {
962        SpikeResponseNeuron::new().step(f64::NAN);
963    }
964    #[test]
965    fn srm_silent_without_input() {
966        let mut n = SpikeResponseNeuron::new();
967        let t: i32 = (0..200).map(|_| n.step(0.0)).sum();
968        assert_eq!(t, 0);
969    }
970
971    // -- GLM --
972    #[test]
973    fn glm_reset_clears() {
974        let mut n = GLMNeuron::new(5, 10, 42);
975        for _ in 0..100 {
976            n.step(20.0);
977        }
978        n.reset();
979    }
980    #[test]
981    fn glm_nan_no_panic() {
982        GLMNeuron::new(5, 10, 42).step(f64::NAN);
983    }
984
985    // -- WilsonCowan --
986    #[test]
987    fn wc_reset_clears() {
988        let mut n = WilsonCowanUnit::new();
989        for _ in 0..200 {
990            n.step(5.0);
991        }
992        n.reset();
993        assert!((n.e - 0.1).abs() < 1e-10);
994        assert!((n.i - 0.05).abs() < 1e-10);
995    }
996    #[test]
997    fn wc_bounded() {
998        let mut n = WilsonCowanUnit::new();
999        for _ in 0..5000 {
1000            n.step(1e3);
1001        }
1002        assert!(n.e.is_finite());
1003        assert!(n.i.is_finite());
1004    }
1005    #[test]
1006    fn wc_nan_no_panic() {
1007        WilsonCowanUnit::new().step(f64::NAN);
1008    }
1009
1010    // -- JansenRit --
1011    #[test]
1012    fn jr_reset_clears() {
1013        let mut n = JansenRitUnit::new();
1014        for _ in 0..1000 {
1015            n.step(220.0);
1016        }
1017        n.reset();
1018        assert!(n.y.iter().all(|&x| x == 0.0));
1019    }
1020    #[test]
1021    fn jr_bounded() {
1022        let mut n = JansenRitUnit::new();
1023        for _ in 0..5000 {
1024            n.step(1e3);
1025        }
1026        assert!(n.y.iter().all(|x| x.is_finite()));
1027    }
1028    #[test]
1029    fn jr_nan_no_panic() {
1030        JansenRitUnit::new().step(f64::NAN);
1031    }
1032
1033    // -- WongWang --
1034    #[test]
1035    fn ww_reset_clears() {
1036        let mut n = WongWangUnit::new(42);
1037        for _ in 0..1000 {
1038            n.step(0.02, 0.0);
1039        }
1040        n.reset();
1041        assert!((n.s1 - 0.1).abs() < 1e-10);
1042        assert!((n.s2 - 0.1).abs() < 1e-10);
1043    }
1044    #[test]
1045    fn ww_bounded() {
1046        let mut n = WongWangUnit::new(42);
1047        for _ in 0..5000 {
1048            n.step(1.0, 0.0);
1049        }
1050        assert!(n.s1.is_finite());
1051        assert!(n.s2.is_finite());
1052    }
1053    #[test]
1054    fn ww_nan_no_panic() {
1055        WongWangUnit::new(42).step(f64::NAN, 0.0);
1056    }
1057
1058    // -- ErmentroutKopellPopulation --
1059    #[test]
1060    fn ek_pop_reset_clears() {
1061        let mut n = ErmentroutKopellPopulation::new();
1062        for _ in 0..500 {
1063            n.step(0.0);
1064        }
1065        n.reset();
1066        assert!((n.r - 0.1).abs() < 1e-10);
1067        assert!((n.v - (-2.0)).abs() < 1e-10);
1068    }
1069    #[test]
1070    fn ek_pop_moderate_stable() {
1071        let mut n = ErmentroutKopellPopulation::new();
1072        for _ in 0..5000 {
1073            n.step(1.0);
1074        }
1075        assert!(n.r.is_finite());
1076        assert!(n.v.is_finite());
1077    }
1078    #[test]
1079    fn ek_pop_nan_no_panic() {
1080        ErmentroutKopellPopulation::new().step(f64::NAN);
1081    }
1082
1083    // -- Wendling --
1084    #[test]
1085    fn wendling_reset_clears() {
1086        let mut n = WendlingNeuron::new();
1087        for _ in 0..1000 {
1088            n.step(220.0);
1089        }
1090        n.reset();
1091        assert!(n.y.iter().all(|&x| x == 0.0));
1092    }
1093    #[test]
1094    fn wendling_bounded() {
1095        let mut n = WendlingNeuron::new();
1096        for _ in 0..5000 {
1097            n.step(1e3);
1098        }
1099        assert!(n.y.iter().all(|x| x.is_finite()));
1100    }
1101    #[test]
1102    fn wendling_nan_no_panic() {
1103        WendlingNeuron::new().step(f64::NAN);
1104    }
1105
1106    // -- LarterBreakspear --
1107    #[test]
1108    fn lb_reset_clears() {
1109        let mut n = LarterBreakspearNeuron::new();
1110        for _ in 0..500 {
1111            n.step(0.0);
1112        }
1113        n.reset();
1114        assert!((n.v - (-0.5)).abs() < 1e-10);
1115        assert!((n.w - 0.0).abs() < 1e-10);
1116        assert!((n.z - 0.0).abs() < 1e-10);
1117    }
1118    #[test]
1119    fn lb_bounded() {
1120        let mut n = LarterBreakspearNeuron::new();
1121        for _ in 0..5000 {
1122            n.step(10.0);
1123        }
1124        assert!(n.v.is_finite());
1125    }
1126    #[test]
1127    fn lb_nan_no_panic() {
1128        LarterBreakspearNeuron::new().step(f64::NAN);
1129    }
1130}