Skip to main content

sc_neurocore_engine/neurons/
special.rs

1// SPDX-License-Identifier: AGPL-3.0-or-later | Commercial license available
2// © Concepts 1996–2026 Miroslav Šotek. All rights reserved.
3// © Code 2020–2026 Miroslav Šotek. All rights reserved.
4// ORCID: 0009-0009-3560-0851
5// Contact: www.anulum.li | protoscience@anulum.li
6// SC-NeuroCore — Stochastic generators, spike-response models, and neural
7
8//! Stochastic generators, spike-response models, and neural mass/population models.
9
10use rand::{RngExt, SeedableRng};
11use rand_xoshiro::Xoshiro256PlusPlus;
12
13/// Poisson spike generator — P(spike in dt) = λ·dt.
14#[derive(Clone, Debug)]
15pub struct PoissonNeuron {
16    pub rate_hz: f64,
17    pub dt_ms: f64,
18    rng: Xoshiro256PlusPlus,
19}
20
21impl PoissonNeuron {
22    pub fn new(rate_hz: f64, dt_ms: f64, seed: u64) -> Self {
23        Self {
24            rate_hz,
25            dt_ms,
26            rng: Xoshiro256PlusPlus::seed_from_u64(seed),
27        }
28    }
29    pub fn step(&mut self, rate_override: f64) -> i32 {
30        let r = if rate_override < 0.0 {
31            self.rate_hz
32        } else {
33            rate_override
34        };
35        let p = r * self.dt_ms / 1000.0;
36        if self.rng.random::<f64>() < p {
37            1
38        } else {
39            0
40        }
41    }
42    pub fn reset(&mut self) {}
43}
44
45/// Inhomogeneous Poisson — rate passed per step.
46#[derive(Clone, Debug)]
47pub struct InhomogeneousPoissonNeuron {
48    pub dt_ms: f64,
49    rng: Xoshiro256PlusPlus,
50}
51
52impl InhomogeneousPoissonNeuron {
53    pub fn new(dt_ms: f64, seed: u64) -> Self {
54        Self {
55            dt_ms,
56            rng: Xoshiro256PlusPlus::seed_from_u64(seed),
57        }
58    }
59    pub fn step(&mut self, rate_hz: f64) -> i32 {
60        let p = rate_hz.max(0.0) * self.dt_ms / 1000.0;
61        if self.rng.random::<f64>() < p {
62            1
63        } else {
64            0
65        }
66    }
67    pub fn reset(&mut self) {}
68}
69
70/// Gamma-renewal process — order-k Poisson with refractory ISI structure.
71#[derive(Clone, Debug)]
72pub struct GammaRenewalNeuron {
73    pub rate_hz: f64,
74    pub shape_k: u32,
75    pub dt_ms: f64,
76    time_since_spike: f64,
77    rng: Xoshiro256PlusPlus,
78}
79
80impl GammaRenewalNeuron {
81    pub fn new(rate_hz: f64, shape_k: u32, seed: u64) -> Self {
82        Self {
83            rate_hz,
84            shape_k,
85            dt_ms: 1.0,
86            time_since_spike: 0.0,
87            rng: Xoshiro256PlusPlus::seed_from_u64(seed),
88        }
89    }
90    fn log_gamma_int(k: u32) -> f64 {
91        // ln((k-1)!) for integer k
92        (1..k).map(|i| (i as f64).ln()).sum()
93    }
94    pub fn step(&mut self, rate_override: f64) -> i32 {
95        self.time_since_spike += self.dt_ms;
96        let r = if rate_override < 0.0 {
97            self.rate_hz
98        } else {
99            rate_override
100        };
101        let lam = r / 1000.0;
102        let k = self.shape_k;
103        let t = self.time_since_spike;
104        // Gamma hazard approximation: h(t) ≈ (λk)^k * t^(k-1) / Γ(k) * exp(-λk*t) / S(t)
105        // Simplified: use Poisson sub-events approach
106        let mu = lam * (k as f64);
107        let log_pdf = (k as f64 - 1.0) * (mu * t).max(1e-30).ln() + mu.max(1e-30).ln()
108            - mu * t
109            - Self::log_gamma_int(k);
110        let surv = 1.0 - self.incomplete_gamma_ratio(k, mu * t);
111        let hazard = if surv > 1e-15 {
112            log_pdf.exp() / surv
113        } else {
114            mu
115        };
116        let p = hazard * self.dt_ms;
117        if self.rng.random::<f64>() < p {
118            self.time_since_spike = 0.0;
119            1
120        } else {
121            0
122        }
123    }
124    fn incomplete_gamma_ratio(&self, k: u32, x: f64) -> f64 {
125        // Regularized lower incomplete gamma P(k, x) via series for integer k
126        if x <= 0.0 {
127            return 0.0;
128        }
129        let mut sum = 0.0_f64;
130        let mut term = 1.0_f64;
131        for n in 0..k {
132            if n > 0 {
133                term *= x / n as f64;
134            }
135            sum += term;
136        }
137        1.0 - (-x).exp() * sum
138    }
139    pub fn reset(&mut self) {
140        self.time_since_spike = 0.0;
141    }
142}
143
144/// Stochastic IF — Ornstein-Uhlenbeck noise on LIF membrane.
145#[derive(Clone, Debug)]
146pub struct StochasticIFNeuron {
147    pub v: f64,
148    pub v_rest: f64,
149    pub v_reset: f64,
150    pub v_threshold: f64,
151    pub tau_m: f64,
152    pub sigma: f64,
153    pub dt: f64,
154    rng: Xoshiro256PlusPlus,
155}
156
157impl StochasticIFNeuron {
158    pub fn new(seed: u64) -> Self {
159        Self {
160            v: -70.0,
161            v_rest: -70.0,
162            v_reset: -70.0,
163            v_threshold: -50.0,
164            tau_m: 20.0,
165            sigma: 3.0,
166            dt: 1.0,
167            rng: Xoshiro256PlusPlus::seed_from_u64(seed),
168        }
169    }
170    fn randn(&mut self) -> f64 {
171        // Box-Muller
172        let u1: f64 = self.rng.random::<f64>().max(1e-30);
173        let u2: f64 = self.rng.random::<f64>();
174        (-2.0 * u1.ln()).sqrt() * (2.0 * std::f64::consts::PI * u2).cos()
175    }
176    pub fn step(&mut self, current: f64) -> i32 {
177        let noise = self.sigma * (self.dt / self.tau_m).sqrt() * self.randn();
178        self.v += (-(self.v - self.v_rest) + current) / self.tau_m * self.dt + noise;
179        if self.v >= self.v_threshold {
180            self.v = self.v_reset;
181            1
182        } else {
183            0
184        }
185    }
186    pub fn reset(&mut self) {
187        self.v = self.v_rest;
188    }
189}
190
191/// Galves-Löcherbach 2013 — stochastic point process with memory.
192#[derive(Clone, Debug)]
193pub struct GalvesLocherbachNeuron {
194    pub v: f64,
195    pub v_rest: f64,
196    pub decay: f64,
197    pub threshold_rate: f64,
198    pub steepness: f64,
199    pub dt: f64,
200    rng: Xoshiro256PlusPlus,
201}
202
203impl GalvesLocherbachNeuron {
204    pub fn new(seed: u64) -> Self {
205        Self {
206            v: 0.0,
207            v_rest: 0.0,
208            decay: 0.95,
209            threshold_rate: 0.5,
210            steepness: 5.0,
211            dt: 1.0,
212            rng: Xoshiro256PlusPlus::seed_from_u64(seed),
213        }
214    }
215    pub fn step(&mut self, weighted_input: f64) -> i32 {
216        self.v = self.decay * self.v + weighted_input;
217        let p = 1.0 / (1.0 + (-(self.steepness * (self.v - self.threshold_rate))).exp());
218        if self.rng.random::<f64>() < p * self.dt {
219            self.v = self.v_rest;
220            1
221        } else {
222            0
223        }
224    }
225    pub fn reset(&mut self) {
226        self.v = self.v_rest;
227    }
228}
229
230/// SRM0 — Spike Response Model (kernel-based). Gerstner 1995.
231#[derive(Clone, Debug)]
232pub struct SpikeResponseNeuron {
233    pub v: f64,
234    pub v_threshold: f64,
235    pub tau_eta: f64,
236    pub tau_kappa: f64,
237    pub eta_reset: f64,
238    pub time_since_spike: f64,
239    pub dt: f64,
240}
241
242impl SpikeResponseNeuron {
243    pub fn new() -> Self {
244        Self {
245            v: 0.0,
246            v_threshold: 1.0,
247            tau_eta: 10.0,
248            tau_kappa: 5.0,
249            eta_reset: -5.0,
250            time_since_spike: 1000.0,
251            dt: 1.0,
252        }
253    }
254    pub fn step(&mut self, weighted_input: f64) -> i32 {
255        self.time_since_spike += self.dt;
256        let eta = self.eta_reset * (-self.time_since_spike / self.tau_eta).exp();
257        let kappa = weighted_input * (1.0 - (-self.dt / self.tau_kappa).exp());
258        self.v = eta + kappa;
259        if self.v >= self.v_threshold {
260            self.time_since_spike = 0.0;
261            1
262        } else {
263            0
264        }
265    }
266    pub fn reset(&mut self) {
267        self.v = 0.0;
268        self.time_since_spike = 1000.0;
269    }
270}
271impl Default for SpikeResponseNeuron {
272    fn default() -> Self {
273        Self::new()
274    }
275}
276
277/// GLM neuron — point-process generalized linear model. Pillow et al. 2008.
278#[derive(Clone, Debug)]
279pub struct GLMNeuron {
280    pub mu: f64,
281    pub dt_ms: f64,
282    pub k: Vec<f64>,
283    pub h: Vec<f64>,
284    stim_buf: Vec<f64>,
285    spike_buf: Vec<f64>,
286    rng: Xoshiro256PlusPlus,
287}
288
289impl GLMNeuron {
290    pub fn new(n_k: usize, n_h: usize, seed: u64) -> Self {
291        Self {
292            mu: -3.0,
293            dt_ms: 1.0,
294            k: vec![0.1; n_k],
295            h: vec![-0.5; n_h],
296            stim_buf: vec![0.0; n_k],
297            spike_buf: vec![0.0; n_h],
298            rng: Xoshiro256PlusPlus::seed_from_u64(seed),
299        }
300    }
301    pub fn step(&mut self, stimulus: f64) -> i32 {
302        // Shift buffers
303        let nk = self.stim_buf.len();
304        let nh = self.spike_buf.len();
305        for i in (1..nk).rev() {
306            self.stim_buf[i] = self.stim_buf[i - 1];
307        }
308        if nk > 0 {
309            self.stim_buf[0] = stimulus;
310        }
311        let dot_k: f64 = self
312            .k
313            .iter()
314            .zip(self.stim_buf.iter())
315            .map(|(a, b)| a * b)
316            .sum();
317        let dot_h: f64 = self
318            .h
319            .iter()
320            .zip(self.spike_buf.iter())
321            .map(|(a, b)| a * b)
322            .sum();
323        let lam = (dot_k + dot_h + self.mu).exp();
324        let p = lam * self.dt_ms / 1000.0;
325        let spike = if self.rng.random::<f64>() < p { 1 } else { 0 };
326        for i in (1..nh).rev() {
327            self.spike_buf[i] = self.spike_buf[i - 1];
328        }
329        if nh > 0 {
330            self.spike_buf[0] = spike as f64;
331        }
332        spike
333    }
334    pub fn reset(&mut self) {
335        self.stim_buf.fill(0.0);
336        self.spike_buf.fill(0.0);
337    }
338}
339
340/// Wilson-Cowan 1972 — excitatory/inhibitory population rate model.
341#[derive(Clone, Debug)]
342pub struct WilsonCowanUnit {
343    pub e: f64,
344    pub i: f64,
345    pub w_ee: f64,
346    pub w_ei: f64,
347    pub w_ie: f64,
348    pub w_ii: f64,
349    pub tau_e: f64,
350    pub tau_i: f64,
351    pub a: f64,
352    pub theta: f64,
353    pub dt: f64,
354}
355
356impl WilsonCowanUnit {
357    pub fn new() -> Self {
358        Self {
359            e: 0.1,
360            i: 0.05,
361            w_ee: 10.0,
362            w_ei: 6.0,
363            w_ie: 10.0,
364            w_ii: 1.0,
365            tau_e: 1.0,
366            tau_i: 2.0,
367            a: 1.2,
368            theta: 4.0,
369            dt: 0.1,
370        }
371    }
372    fn sigmoid(&self, x: f64) -> f64 {
373        1.0 / (1.0 + (-self.a * (x - self.theta)).exp())
374    }
375    pub fn step(&mut self, ext_input: f64) -> f64 {
376        let se = self.sigmoid(self.w_ee * self.e - self.w_ei * self.i + ext_input);
377        let si = self.sigmoid(self.w_ie * self.e - self.w_ii * self.i);
378        self.e += (-self.e + se) / self.tau_e * self.dt;
379        self.i += (-self.i + si) / self.tau_i * self.dt;
380        self.e
381    }
382    pub fn reset(&mut self) {
383        self.e = 0.1;
384        self.i = 0.05;
385    }
386}
387impl Default for WilsonCowanUnit {
388    fn default() -> Self {
389        Self::new()
390    }
391}
392
393/// Jansen-Rit 1995 — neural mass model for EEG generation.
394#[derive(Clone, Debug)]
395pub struct JansenRitUnit {
396    pub y: [f64; 6],
397    pub a_exc: f64,
398    pub b_exc: f64,
399    pub a_rate: f64,
400    pub b_rate: f64,
401    pub c: f64,
402    pub e0: f64,
403    pub v0: f64,
404    pub r: f64,
405    pub dt: f64,
406}
407
408impl JansenRitUnit {
409    pub fn new() -> Self {
410        Self {
411            y: [0.0; 6],
412            a_exc: 3.25,
413            b_exc: 22.0,
414            a_rate: 100.0,
415            b_rate: 50.0,
416            c: 135.0,
417            e0: 2.5,
418            v0: 6.0,
419            r: 0.56,
420            dt: 0.001,
421        }
422    }
423    fn sigmoid(&self, x: f64) -> f64 {
424        2.0 * self.e0 / (1.0 + (self.r * (self.v0 - x)).exp())
425    }
426    pub fn step(&mut self, p_ext: f64) -> f64 {
427        let s1 = self.sigmoid(self.y[1] - self.y[2]);
428        let s0 = self.sigmoid(self.c * 0.8 * self.y[0]);
429        let s2 = self.sigmoid(self.c * 0.25 * self.y[0]);
430        let dy0 = self.y[3];
431        let dy3 = self.a_exc * self.a_rate * s1
432            - 2.0 * self.a_rate * self.y[3]
433            - self.a_rate.powi(2) * self.y[0];
434        let dy1 = self.y[4];
435        let dy4 = self.a_exc * self.a_rate * (p_ext + self.c * 0.8 * s0)
436            - 2.0 * self.a_rate * self.y[4]
437            - self.a_rate.powi(2) * self.y[1];
438        let dy2 = self.y[5];
439        let dy5 = self.b_exc * self.b_rate * self.c * 0.25 * s2
440            - 2.0 * self.b_rate * self.y[5]
441            - self.b_rate.powi(2) * self.y[2];
442        self.y[0] += dy0 * self.dt;
443        self.y[3] += dy3 * self.dt;
444        self.y[1] += dy1 * self.dt;
445        self.y[4] += dy4 * self.dt;
446        self.y[2] += dy2 * self.dt;
447        self.y[5] += dy5 * self.dt;
448        self.y[1] - self.y[2]
449    }
450    pub fn reset(&mut self) {
451        self.y = [0.0; 6];
452    }
453}
454impl Default for JansenRitUnit {
455    fn default() -> Self {
456        Self::new()
457    }
458}
459
460/// Wong-Wang 2006 — attractor decision-making model.
461#[derive(Clone, Debug)]
462pub struct WongWangUnit {
463    pub s1: f64,
464    pub s2: f64,
465    pub tau_s: f64,
466    pub gamma: f64,
467    pub j_n: f64,
468    pub j_cross: f64,
469    pub i_0: f64,
470    pub sigma: f64,
471    pub dt: f64,
472    rng: Xoshiro256PlusPlus,
473}
474
475impl WongWangUnit {
476    pub fn new(seed: u64) -> Self {
477        Self {
478            s1: 0.1,
479            s2: 0.1,
480            tau_s: 0.1,
481            gamma: 0.641,
482            j_n: 0.2609,
483            j_cross: 0.0497,
484            i_0: 0.3255,
485            sigma: 0.02,
486            dt: 0.001,
487            rng: Xoshiro256PlusPlus::seed_from_u64(seed),
488        }
489    }
490    fn phi(&self, i_total: f64) -> f64 {
491        let a = 270.0;
492        let b = 108.0;
493        let d = 0.154;
494        let x = a * i_total - b;
495        let denom = 1.0 - (-d * x).exp();
496        if denom.abs() < 1e-10 {
497            1.0 / d
498        } else {
499            x / denom
500        }
501    }
502    fn randn(&mut self) -> f64 {
503        let u1 = self.rng.random::<f64>().max(1e-30);
504        let u2 = self.rng.random::<f64>();
505        (-2.0 * u1.ln()).sqrt() * (2.0 * std::f64::consts::PI * u2).cos()
506    }
507    pub fn step(&mut self, stim1: f64, stim2: f64) -> (f64, f64) {
508        let i1 = self.j_n * self.s1 - self.j_cross * self.s2
509            + self.i_0
510            + stim1
511            + self.sigma * self.randn();
512        let i2 = self.j_n * self.s2 - self.j_cross * self.s1
513            + self.i_0
514            + stim2
515            + self.sigma * self.randn();
516        let r1 = self.phi(i1);
517        let r2 = self.phi(i2);
518        self.s1 += (-self.s1 / self.tau_s + self.gamma * (1.0 - self.s1) * r1) * self.dt;
519        self.s2 += (-self.s2 / self.tau_s + self.gamma * (1.0 - self.s2) * r2) * self.dt;
520        (r1, r2)
521    }
522    pub fn reset(&mut self) {
523        self.s1 = 0.1;
524        self.s2 = 0.1;
525    }
526}
527
528/// Ermentrout-Kopell / Montbrió theta-neuron mean field. Montbrió et al. 2015.
529#[derive(Clone, Debug)]
530pub struct ErmentroutKopellPopulation {
531    pub r: f64,
532    pub v: f64,
533    pub tau: f64,
534    pub delta: f64,
535    pub eta_bar: f64,
536    pub j: f64,
537    pub dt: f64,
538}
539
540impl ErmentroutKopellPopulation {
541    pub fn new() -> Self {
542        Self {
543            r: 0.1,
544            v: -2.0,
545            tau: 1.0,
546            delta: 1.0,
547            eta_bar: -5.0,
548            j: 15.0,
549            dt: 0.01,
550        }
551    }
552    pub fn step(&mut self, ext_input: f64) -> f64 {
553        let dr =
554            (self.delta / (std::f64::consts::PI * self.tau) + 2.0 * self.r * self.v) / self.tau;
555        let dv = (self.v * self.v + self.eta_bar + ext_input + self.j * self.tau * self.r
556            - std::f64::consts::PI.powi(2) * self.tau.powi(2) * self.r.powi(2))
557            / self.tau;
558        self.r += dr * self.dt;
559        self.v += dv * self.dt;
560        self.r = self.r.max(0.0);
561        self.r
562    }
563    pub fn reset(&mut self) {
564        self.r = 0.1;
565        self.v = -2.0;
566    }
567}
568impl Default for ErmentroutKopellPopulation {
569    fn default() -> Self {
570        Self::new()
571    }
572}
573
574/// Wendling et al. 2002 — extended Jansen-Rit with slow GABA_B.
575#[derive(Clone, Debug)]
576pub struct WendlingNeuron {
577    pub y: [f64; 10],
578    pub a_exc: f64,
579    pub b_fast: f64,
580    pub g_slow: f64,
581    pub a_rate: f64,
582    pub b_rate: f64,
583    pub g_rate: f64,
584    pub c: f64,
585    pub e0: f64,
586    pub v0: f64,
587    pub r: f64,
588    pub dt: f64,
589}
590
591impl WendlingNeuron {
592    pub fn new() -> Self {
593        Self {
594            y: [0.0; 10],
595            a_exc: 3.25,
596            b_fast: 22.0,
597            g_slow: 10.0,
598            a_rate: 100.0,
599            b_rate: 500.0,
600            g_rate: 20.0,
601            c: 135.0,
602            e0: 2.5,
603            v0: 6.0,
604            r: 0.56,
605            dt: 0.001,
606        }
607    }
608    fn sigmoid(&self, x: f64) -> f64 {
609        2.0 * self.e0 / (1.0 + (self.r * (self.v0 - x)).exp())
610    }
611    pub fn step(&mut self, p_ext: f64) -> f64 {
612        let sig_main = self.sigmoid(self.y[1] - self.y[2] - self.y[3]);
613        let sig_0 = self.sigmoid(self.c * 0.8 * self.y[0]);
614        let sig_fast = self.sigmoid(self.c * 0.25 * self.y[0]);
615        let sig_slow = self.sigmoid(self.c * 0.1 * self.y[0]);
616        let a = self.a_rate;
617        let b = self.b_rate;
618        let g = self.g_rate;
619        let dy0 = self.y[5];
620        let dy5 = self.a_exc * a * sig_main - 2.0 * a * self.y[5] - a * a * self.y[0];
621        let dy1 = self.y[6];
622        let dy6 = self.a_exc * a * (p_ext + self.c * 0.8 * sig_0)
623            - 2.0 * a * self.y[6]
624            - a * a * self.y[1];
625        let dy2 = self.y[7];
626        let dy7 =
627            self.b_fast * b * self.c * 0.25 * sig_fast - 2.0 * b * self.y[7] - b * b * self.y[2];
628        let dy3 = self.y[8];
629        let dy8 =
630            self.g_slow * g * self.c * 0.1 * sig_slow - 2.0 * g * self.y[8] - g * g * self.y[3];
631        self.y[0] += dy0 * self.dt;
632        self.y[5] += dy5 * self.dt;
633        self.y[1] += dy1 * self.dt;
634        self.y[6] += dy6 * self.dt;
635        self.y[2] += dy2 * self.dt;
636        self.y[7] += dy7 * self.dt;
637        self.y[3] += dy3 * self.dt;
638        self.y[8] += dy8 * self.dt;
639        self.y[1] - self.y[2] - self.y[3]
640    }
641    pub fn reset(&mut self) {
642        self.y = [0.0; 10];
643    }
644}
645impl Default for WendlingNeuron {
646    fn default() -> Self {
647        Self::new()
648    }
649}
650
651/// Larter-Breakspear 2003 — neural mass with ion channels for whole-brain modelling.
652#[derive(Clone, Debug)]
653pub struct LarterBreakspearNeuron {
654    pub v: f64,
655    pub w: f64,
656    pub z: f64,
657    pub g_ca: f64,
658    pub g_na: f64,
659    pub g_k: f64,
660    pub g_l: f64,
661    pub v_ca: f64,
662    pub v_na: f64,
663    pub v_k: f64,
664    pub v_l: f64,
665    pub phi: f64,
666    pub tau_k: f64,
667    pub b: f64,
668    pub a_ee: f64,
669    pub i_ext: f64,
670    pub dt: f64,
671}
672
673impl LarterBreakspearNeuron {
674    pub fn new() -> Self {
675        Self {
676            v: -0.5,
677            w: 0.0,
678            z: 0.0,
679            g_ca: 1.1,
680            g_na: 6.7,
681            g_k: 2.0,
682            g_l: 0.5,
683            v_ca: 1.0,
684            v_na: 0.53,
685            v_k: -0.7,
686            v_l: -0.5,
687            phi: 0.7,
688            tau_k: 1.0,
689            b: 0.1,
690            a_ee: 0.36,
691            i_ext: 0.3,
692            dt: 0.01,
693        }
694    }
695    pub fn step(&mut self, coupling: f64) -> f64 {
696        let m_ca = 0.5 * (1.0 + ((self.v + 0.01) / 0.15).tanh());
697        let m_na = 0.5 * (1.0 + ((self.v - 0.12) / 0.15).tanh());
698        let m_k = 0.5 * (1.0 + (self.v / 0.3).tanh());
699        let i_ca = self.g_ca * m_ca * (self.v - self.v_ca);
700        let i_na = self.g_na * m_na * (self.v - self.v_na);
701        let i_k = self.g_k * self.w * (self.v - self.v_k);
702        let i_l = self.g_l * (self.v - self.v_l);
703        let dv = -i_ca - i_na - i_k - i_l + self.i_ext + coupling + self.a_ee * self.v;
704        let dw = self.phi * (m_k - self.w) / self.tau_k;
705        let dz = self.b * (self.v + 0.5 - self.z);
706        self.v += dv * self.dt;
707        self.w += dw * self.dt;
708        self.z += dz * self.dt;
709        self.v
710    }
711    pub fn reset(&mut self) {
712        self.v = -0.5;
713        self.w = 0.0;
714        self.z = 0.0;
715    }
716}
717impl Default for LarterBreakspearNeuron {
718    fn default() -> Self {
719        Self::new()
720    }
721}
722
723#[cfg(test)]
724mod tests {
725    use super::*;
726
727    #[test]
728    fn poisson_fires() {
729        let mut n = PoissonNeuron::new(200.0, 1.0, 42);
730        let t: i32 = (0..1000).map(|_| n.step(-1.0)).sum();
731        assert!(t > 10);
732    }
733    #[test]
734    fn inhom_poisson_fires() {
735        let mut n = InhomogeneousPoissonNeuron::new(1.0, 42);
736        let t: i32 = (0..1000).map(|_| n.step(200.0)).sum();
737        assert!(t > 10);
738    }
739    #[test]
740    fn gamma_renewal_fires() {
741        let mut n = GammaRenewalNeuron::new(100.0, 3, 42);
742        let t: i32 = (0..2000).map(|_| n.step(-1.0)).sum();
743        assert!(t > 0);
744    }
745    #[test]
746    fn stochastic_if_fires() {
747        let mut n = StochasticIFNeuron::new(42);
748        let t: i32 = (0..500).map(|_| n.step(30.0)).sum();
749        assert!(t > 0);
750    }
751    #[test]
752    fn gl_fires() {
753        let mut n = GalvesLocherbachNeuron::new(42);
754        let t: i32 = (0..200).map(|_| n.step(2.0)).sum();
755        assert!(t > 0);
756    }
757    #[test]
758    fn srm_fires() {
759        let mut n = SpikeResponseNeuron::new();
760        let t: i32 = (0..200).map(|_| n.step(10.0)).sum();
761        assert!(t > 0);
762    }
763    #[test]
764    fn glm_fires() {
765        let mut n = GLMNeuron::new(5, 10, 42);
766        let t: i32 = (0..5000).map(|_| n.step(20.0)).sum();
767        assert!(t > 0);
768    }
769    #[test]
770    fn wc_oscillates() {
771        let mut n = WilsonCowanUnit::new();
772        let mut last = n.e;
773        let mut changes = 0;
774        for _ in 0..500 {
775            let e = n.step(5.0);
776            if (e - last).abs() > 0.001 {
777                changes += 1;
778            }
779            last = e;
780        }
781        assert!(changes > 0);
782    }
783    #[test]
784    fn jr_produces_eeg() {
785        let mut n = JansenRitUnit::new();
786        let mut nz = 0;
787        for _ in 0..5000 {
788            let v = n.step(220.0);
789            if v.abs() > 0.001 {
790                nz += 1;
791            }
792        }
793        assert!(nz > 0);
794    }
795    #[test]
796    fn ww_diverges() {
797        let mut n = WongWangUnit::new(42);
798        for _ in 0..5000 {
799            n.step(0.02, 0.0);
800        }
801        assert!((n.s1 - n.s2).abs() > 0.001);
802    }
803    #[test]
804    fn ek_pop_fires() {
805        let mut n = ErmentroutKopellPopulation::new();
806        for _ in 0..1000 {
807            n.step(0.0);
808        }
809        assert!(n.r > 0.0);
810    }
811    #[test]
812    fn wendling_produces() {
813        let mut n = WendlingNeuron::new();
814        let mut nz = 0;
815        for _ in 0..5000 {
816            let v = n.step(220.0);
817            if v.abs() > 0.001 {
818                nz += 1;
819            }
820        }
821        assert!(nz > 0);
822    }
823    #[test]
824    fn lb_evolves() {
825        let mut n = LarterBreakspearNeuron::new();
826        let v0 = n.v;
827        for _ in 0..1000 {
828            n.step(0.0);
829        }
830        assert!((n.v - v0).abs() > 0.001);
831    }
832}