Skip to main content

sc_neurocore_engine/neurons/
rate.rs

1// SPDX-License-Identifier: AGPL-3.0-or-later | Commercial license available
2// © Concepts 1996–2026 Miroslav Šotek. All rights reserved.
3// © Code 2020–2026 Miroslav Šotek. All rights reserved.
4// ORCID: 0009-0009-3560-0851
5// Contact: www.anulum.li | protoscience@anulum.li
6// SC-NeuroCore — Rate-based models, synaptic plasticity neurons, and
7
8//! Rate-based models, synaptic plasticity neurons, and other special types.
9
10/// McCulloch-Pitts 1943 — binary threshold unit.
11#[derive(Clone, Debug)]
12pub struct McCullochPittsNeuron {
13    pub theta: f64,
14}
15
16impl McCullochPittsNeuron {
17    pub fn new(theta: f64) -> Self {
18        Self { theta }
19    }
20    pub fn step(&self, weighted_input: f64) -> i32 {
21        if weighted_input >= self.theta {
22            1
23        } else {
24            0
25        }
26    }
27}
28impl Default for McCullochPittsNeuron {
29    fn default() -> Self {
30        Self::new(1.0)
31    }
32}
33
34/// Sigmoid rate neuron — Wilson-Cowan-style single unit.
35#[derive(Clone, Debug)]
36pub struct SigmoidRateNeuron {
37    pub r: f64,
38    pub tau: f64,
39    pub beta: f64,
40    pub theta: f64,
41    pub dt: f64,
42}
43
44impl SigmoidRateNeuron {
45    pub fn new() -> Self {
46        Self {
47            r: 0.0,
48            tau: 10.0,
49            beta: 1.0,
50            theta: 0.0,
51            dt: 0.1,
52        }
53    }
54    pub fn step(&mut self, current: f64) -> f64 {
55        let sigma = 1.0 / (1.0 + (-self.beta * (current - self.theta)).exp());
56        self.r += (-self.r + sigma) / self.tau * self.dt;
57        self.r
58    }
59    pub fn reset(&mut self) {
60        self.r = 0.0;
61    }
62}
63impl Default for SigmoidRateNeuron {
64    fn default() -> Self {
65        Self::new()
66    }
67}
68
69/// Threshold-linear rate neuron — ReLU-like firing curve.
70#[derive(Clone, Debug)]
71pub struct ThresholdLinearRateNeuron {
72    pub r: f64,
73    pub theta: f64,
74    pub gain: f64,
75}
76
77impl ThresholdLinearRateNeuron {
78    pub fn new() -> Self {
79        Self {
80            r: 0.0,
81            theta: 0.0,
82            gain: 1.0,
83        }
84    }
85    pub fn step(&mut self, current: f64) -> f64 {
86        self.r = self.gain * (current - self.theta).max(0.0);
87        self.r
88    }
89    pub fn reset(&mut self) {
90        self.r = 0.0;
91    }
92}
93impl Default for ThresholdLinearRateNeuron {
94    fn default() -> Self {
95        Self::new()
96    }
97}
98
99/// Li-Rinzel IP3R astrocyte model — Ca²⁺ dynamics.
100#[derive(Clone, Debug)]
101pub struct AstrocyteModel {
102    pub ca: f64,
103    pub h: f64,
104    pub ip3: f64,
105    pub v_er: f64,
106    pub k_er: f64,
107    pub v_serca: f64,
108    pub d1: f64,
109    pub d2: f64,
110    pub d3: f64,
111    pub d5: f64,
112    pub c0: f64,
113    pub c1: f64,
114    pub dt: f64,
115}
116
117impl AstrocyteModel {
118    pub fn new() -> Self {
119        Self {
120            ca: 0.05,
121            h: 0.8,
122            ip3: 0.5,
123            v_er: 0.9,
124            k_er: 0.15,
125            v_serca: 0.4,
126            d1: 0.13,
127            d2: 1.049,
128            d3: 0.9434,
129            d5: 0.08234,
130            c0: 2.0,
131            c1: 0.185,
132            dt: 0.01,
133        }
134    }
135    pub fn step(&mut self, current: f64) -> f64 {
136        let ca_er = (self.c0 - self.ca) / self.c1;
137        let m_inf = self.ip3 / (self.ip3 + self.d1);
138        let n_inf = self.ca / (self.ca + self.d5);
139        let j_chan = self.v_er * (m_inf * n_inf * self.h).powi(3) * (ca_er - self.ca);
140        let j_leak = self.k_er * (ca_er - self.ca);
141        let j_pump = self.v_serca * self.ca.powi(2) / (self.ca.powi(2) + 0.1_f64.powi(2));
142        let q2 = self.d2 * (self.ip3 + self.d1) / (self.ip3 + self.d3);
143        let h_inf = q2 / (q2 + self.ca);
144        let tau_h = 1.0 / (0.2 * (q2 + self.ca));
145        self.ca += (j_chan + j_leak - j_pump + current) * self.dt;
146        self.ca = self.ca.max(0.0);
147        self.h += (h_inf - self.h) / tau_h * self.dt;
148        self.ca
149    }
150    pub fn reset(&mut self) {
151        self.ca = 0.05;
152        self.h = 0.8;
153        self.ip3 = 0.5;
154    }
155}
156impl Default for AstrocyteModel {
157    fn default() -> Self {
158        Self::new()
159    }
160}
161
162/// Tsodyks-Markram 1997 — LIF with short-term synaptic plasticity.
163#[derive(Clone, Debug)]
164pub struct TsodyksMarkramNeuron {
165    pub v: f64,
166    pub x: f64,
167    pub u: f64,
168    pub v_rest: f64,
169    pub v_reset: f64,
170    pub v_threshold: f64,
171    pub tau_m: f64,
172    pub tau_d: f64,
173    pub tau_f: f64,
174    pub u_se: f64,
175    pub a_se: f64,
176    pub r_m: f64,
177    pub dt: f64,
178}
179
180impl TsodyksMarkramNeuron {
181    pub fn new() -> Self {
182        Self {
183            v: -65.0,
184            x: 1.0,
185            u: 0.2,
186            v_rest: -65.0,
187            v_reset: -65.0,
188            v_threshold: -50.0,
189            tau_m: 20.0,
190            tau_d: 200.0,
191            tau_f: 600.0,
192            u_se: 0.2,
193            a_se: 50.0,
194            r_m: 1.0,
195            dt: 0.1,
196        }
197    }
198    pub fn step(&mut self, current: f64, presynaptic_spike: bool) -> i32 {
199        self.x += (1.0 - self.x) / self.tau_d * self.dt;
200        self.u += (self.u_se - self.u) / self.tau_f * self.dt;
201        let mut i_syn = 0.0;
202        if presynaptic_spike {
203            self.u += self.u_se * (1.0 - self.u);
204            i_syn = self.a_se * self.u * self.x;
205            self.x -= self.u * self.x;
206        }
207        self.v += (-(self.v - self.v_rest) + self.r_m * (i_syn + current)) / self.tau_m * self.dt;
208        if self.v >= self.v_threshold {
209            self.v = self.v_reset;
210            1
211        } else {
212            0
213        }
214    }
215    pub fn reset(&mut self) {
216        self.v = self.v_rest;
217        self.x = 1.0;
218        self.u = self.u_se;
219    }
220}
221impl Default for TsodyksMarkramNeuron {
222    fn default() -> Self {
223        Self::new()
224    }
225}
226
227/// Liquid Time-Constant neuron — input-dependent time constant. Hasani et al. 2021.
228#[derive(Clone, Debug)]
229pub struct LiquidTimeConstantNeuron {
230    pub x: f64,
231    pub tau_base: f64,
232    pub w_tau: f64,
233    pub w_x: f64,
234    pub w_in: f64,
235    pub bias: f64,
236    pub v_threshold: f64,
237    pub dt: f64,
238}
239
240impl LiquidTimeConstantNeuron {
241    pub fn new() -> Self {
242        Self {
243            x: 0.0,
244            tau_base: 10.0,
245            w_tau: -0.5,
246            w_x: 0.8,
247            w_in: 1.0,
248            bias: 0.0,
249            v_threshold: 1.0,
250            dt: 1.0,
251        }
252    }
253    pub fn step(&mut self, current: f64) -> i32 {
254        let sigma_tau = 1.0 / (1.0 + (-(self.w_tau * current + self.bias)).exp());
255        let tau = (self.tau_base * sigma_tau).max(0.1);
256        let f_target = (self.w_x * self.x + self.w_in * current).tanh();
257        let decay = (-self.dt / tau).exp();
258        self.x = self.x * decay + f_target * (1.0 - decay);
259        if self.x >= self.v_threshold {
260            self.x -= self.v_threshold;
261            1
262        } else {
263            0
264        }
265    }
266    pub fn reset(&mut self) {
267        self.x = 0.0;
268    }
269}
270impl Default for LiquidTimeConstantNeuron {
271    fn default() -> Self {
272        Self::new()
273    }
274}
275
276/// Compte WM — NMDA-based working-memory neuron. Compte et al. 2000.
277#[derive(Clone, Debug)]
278pub struct CompteWMNeuron {
279    pub v: f64,
280    pub s_ampa: f64,
281    pub s_nmda: f64,
282    pub x_nmda: f64,
283    pub s_gaba: f64,
284    pub g_l: f64,
285    pub g_ampa: f64,
286    pub g_nmda: f64,
287    pub g_gaba: f64,
288    pub e_l: f64,
289    pub e_ampa: f64,
290    pub e_nmda: f64,
291    pub e_gaba: f64,
292    pub c_m: f64,
293    pub mg: f64,
294    pub tau_ampa: f64,
295    pub tau_nmda_rise: f64,
296    pub tau_nmda_decay: f64,
297    pub tau_gaba: f64,
298    pub v_threshold: f64,
299    pub v_reset: f64,
300    pub dt: f64,
301}
302
303impl CompteWMNeuron {
304    pub fn new() -> Self {
305        Self {
306            v: -70.0,
307            s_ampa: 0.0,
308            s_nmda: 0.0,
309            x_nmda: 0.0,
310            s_gaba: 0.0,
311            g_l: 0.025,
312            g_ampa: 0.005,
313            g_nmda: 0.165,
314            g_gaba: 0.013,
315            e_l: -70.0,
316            e_ampa: 0.0,
317            e_nmda: 0.0,
318            e_gaba: -70.0,
319            c_m: 0.5,
320            mg: 1.0,
321            tau_ampa: 2.0,
322            tau_nmda_rise: 2.0,
323            tau_nmda_decay: 100.0,
324            tau_gaba: 5.0,
325            v_threshold: -50.0,
326            v_reset: -55.0,
327            dt: 0.1,
328        }
329    }
330    pub fn step(&mut self, current: f64, spike_in: bool) -> i32 {
331        let mg_block = 1.0 / (1.0 + self.mg * (-0.062 * self.v).exp() / 3.57);
332        let i_l = self.g_l * (self.v - self.e_l);
333        let i_ampa = self.g_ampa * self.s_ampa * (self.v - self.e_ampa);
334        let i_nmda = self.g_nmda * self.s_nmda * mg_block * (self.v - self.e_nmda);
335        let i_gaba = self.g_gaba * self.s_gaba * (self.v - self.e_gaba);
336        self.v += (-i_l - i_ampa - i_nmda - i_gaba + current) / self.c_m * self.dt;
337        let spike_f = if spike_in { 1.0 } else { 0.0 };
338        self.s_ampa += (-self.s_ampa / self.tau_ampa + spike_f) * self.dt;
339        self.x_nmda += (-self.x_nmda / self.tau_nmda_rise + spike_f) * self.dt;
340        self.s_nmda += (-self.s_nmda / self.tau_nmda_decay
341            + 0.5 * self.x_nmda * (1.0 - self.s_nmda))
342            * self.dt;
343        self.s_gaba += (-self.s_gaba / self.tau_gaba + spike_f * 0.5) * self.dt;
344        if self.v >= self.v_threshold {
345            self.v = self.v_reset;
346            1
347        } else {
348            0
349        }
350    }
351    pub fn reset(&mut self) {
352        self.v = self.e_l;
353        self.s_ampa = 0.0;
354        self.s_nmda = 0.0;
355        self.x_nmda = 0.0;
356        self.s_gaba = 0.0;
357    }
358}
359impl Default for CompteWMNeuron {
360    fn default() -> Self {
361        Self::new()
362    }
363}
364
365/// Parallel Spiking Neuron — convolution-based filter. Fang et al. 2023.
366#[derive(Clone, Debug)]
367pub struct ParallelSpikingNeuron {
368    pub kernel: Vec<f64>,
369    pub buffer: Vec<f64>,
370    pub v_threshold: f64,
371    ptr: usize,
372}
373
374impl ParallelSpikingNeuron {
375    pub fn new(kernel_size: usize, v_threshold: f64) -> Self {
376        let k = 1.0 / kernel_size as f64;
377        Self {
378            kernel: vec![k; kernel_size],
379            buffer: vec![0.0; kernel_size],
380            v_threshold,
381            ptr: 0,
382        }
383    }
384    pub fn step(&mut self, current: f64) -> i32 {
385        self.buffer[self.ptr] = current;
386        self.ptr = (self.ptr + 1) % self.buffer.len();
387        let v: f64 = self
388            .kernel
389            .iter()
390            .enumerate()
391            .map(|(i, &w)| w * self.buffer[(self.ptr + i) % self.buffer.len()])
392            .sum();
393        if v >= self.v_threshold {
394            1
395        } else {
396            0
397        }
398    }
399    pub fn reset(&mut self) {
400        self.buffer.fill(0.0);
401        self.ptr = 0;
402    }
403}
404
405/// Fractional-order LIF — Grünwald-Letnikov approximation. Teka et al. 2014.
406#[derive(Clone, Debug)]
407pub struct FractionalLIFNeuron {
408    pub v: f64,
409    pub v_rest: f64,
410    pub v_reset: f64,
411    pub v_threshold: f64,
412    pub alpha: f64,
413    pub resistance: f64,
414    pub dt: f64,
415    history: Vec<f64>,
416    gl_coeffs: Vec<f64>,
417    _max_hist: usize,
418}
419
420impl FractionalLIFNeuron {
421    pub fn new(alpha: f64, max_hist: usize) -> Self {
422        let mut coeffs = vec![0.0; max_hist + 1];
423        coeffs[0] = 1.0;
424        for j in 1..=max_hist {
425            coeffs[j] = coeffs[j - 1] * (1.0 - (alpha + 1.0) / j as f64);
426        }
427        Self {
428            v: 0.0,
429            v_rest: 0.0,
430            v_reset: 0.0,
431            v_threshold: 1.0,
432            alpha,
433            resistance: 1.0,
434            dt: 1.0,
435            history: vec![0.0; max_hist],
436            gl_coeffs: coeffs,
437            _max_hist: max_hist,
438        }
439    }
440    pub fn step(&mut self, current: f64) -> i32 {
441        // Grünwald-Letnikov: D^α v ≈ (1/dt^α) Σ_j c_j v(t-j·dt)
442        let mut gl_sum = 0.0;
443        let n = self.history.len().min(self.gl_coeffs.len() - 1);
444        for j in 0..n {
445            gl_sum += self.gl_coeffs[j + 1] * self.history[n - 1 - j];
446        }
447        let rhs = -(self.v - self.v_rest) + self.resistance * current;
448        self.v = rhs * self.dt.powf(self.alpha) - gl_sum;
449        // Shift history
450        let len = self.history.len();
451        if len > 0 {
452            for i in 0..len - 1 {
453                self.history[i] = self.history[i + 1];
454            }
455            self.history[len - 1] = self.v;
456        }
457        if self.v >= self.v_threshold {
458            self.v = self.v_reset;
459            1
460        } else {
461            0
462        }
463    }
464    pub fn reset(&mut self) {
465        self.v = self.v_rest;
466        self.history.fill(0.0);
467    }
468}
469
470/// Siegert transfer function — analytical stationary firing rate of a LIF neuron.
471#[derive(Clone, Debug)]
472pub struct SiegertTransferFunction {
473    pub tau_m: f64,
474    pub tau_rp: f64,
475    pub v_threshold: f64,
476    pub v_reset: f64,
477    pub v_rest: f64,
478}
479
480impl SiegertTransferFunction {
481    pub fn new() -> Self {
482        Self {
483            tau_m: 20.0,
484            tau_rp: 2.0,
485            v_threshold: -50.0,
486            v_reset: -70.0,
487            v_rest: -65.0,
488        }
489    }
490    pub fn step(&self, current: f64) -> f64 {
491        let mu = self.v_rest + current;
492        let sigma: f64 = 3.0; // noise amplitude (fixed default)
493        if sigma.abs() < 1e-10 {
494            return if mu > self.v_threshold {
495                1000.0 / self.tau_rp
496            } else {
497                0.0
498            };
499        }
500        let upper = (self.v_threshold - mu) / sigma;
501        let lower = (self.v_reset - mu) / sigma;
502        // Gauss-Legendre 10-point quadrature for ∫ exp(u²)(1+erf(u)) du
503        let nodes = [
504            -0.973906528517172,
505            -0.865063366688985,
506            -0.679409568299024,
507            -0.433395394129247,
508            -0.148874338981631,
509            0.148874338981631,
510            0.433395394129247,
511            0.679409568299024,
512            0.865063366688985,
513            0.973906528517172,
514        ];
515        let weights = [
516            0.066671344308688,
517            0.149451349150581,
518            0.219086362515982,
519            0.269266719309996,
520            0.295524224714753,
521            0.295524224714753,
522            0.269266719309996,
523            0.219086362515982,
524            0.149451349150581,
525            0.066671344308688,
526        ];
527        let half = (upper - lower) / 2.0;
528        let mid = (upper + lower) / 2.0;
529        let mut integral = 0.0;
530        for (&node, &w) in nodes.iter().zip(weights.iter()) {
531            let u = mid + half * node;
532            let eu2 = (u * u).min(500.0).exp();
533            let erf_u = Self::erf_approx(u);
534            integral += w * eu2 * (1.0 + erf_u);
535        }
536        integral *= half;
537        let rate = 1.0 / (self.tau_rp + self.tau_m * std::f64::consts::PI.sqrt() * integral);
538        rate.max(0.0) * 1000.0
539    }
540    fn erf_approx(x: f64) -> f64 {
541        // Abramowitz-Stegun approximation
542        let t = 1.0 / (1.0 + 0.3275911 * x.abs());
543        let poly = t
544            * (0.254829592
545                + t * (-0.284496736 + t * (1.421413741 + t * (-1.453152027 + t * 1.061405429))));
546        let result = 1.0 - poly * (-x * x).exp();
547        if x >= 0.0 {
548            result
549        } else {
550            -result
551        }
552    }
553}
554impl Default for SiegertTransferFunction {
555    fn default() -> Self {
556        Self::new()
557    }
558}
559
560/// Amari neural field — continuous attractor with Mexican hat kernel.
561#[derive(Clone, Debug)]
562pub struct AmariNeuralField {
563    pub u: Vec<f64>,
564    pub n: usize,
565    pub tau: f64,
566    pub a_exc: f64,
567    pub a_width: f64,
568    pub b_inh: f64,
569    pub b_width: f64,
570    pub dx: f64,
571    pub dt: f64,
572    w: Vec<f64>,
573}
574
575impl AmariNeuralField {
576    pub fn new(n: usize) -> Self {
577        let dx = 0.5;
578        let mut w = vec![0.0; n];
579        let a_exc = 1.5;
580        let a_width = 1.0;
581        let b_inh = 0.75;
582        let b_width = 2.0;
583        for i in 0..n {
584            let d = (i as f64 - n as f64 / 2.0) * dx;
585            w[i] = a_exc * (-d * d / (2.0 * a_width * a_width)).exp()
586                - b_inh * (-d * d / (2.0 * b_width * b_width)).exp();
587        }
588        Self {
589            u: vec![0.0; n],
590            n,
591            tau: 10.0,
592            a_exc,
593            a_width,
594            b_inh,
595            b_width,
596            dx,
597            dt: 0.5,
598            w,
599        }
600    }
601    pub fn step(&mut self, input: &[f64]) -> f64 {
602        let n = self.n;
603        let mut du = vec![0.0; n];
604        for i in 0..n {
605            let s_i = if self.u[i] > 0.0 { self.u[i] } else { 0.0 };
606            let mut conv = 0.0;
607            for j in 0..n {
608                let s_j = if self.u[j] > 0.0 { self.u[j] } else { 0.0 };
609                let idx = ((i as i64 - j as i64).unsigned_abs() as usize).min(n - 1);
610                conv += self.w[idx] * s_j * self.dx;
611            }
612            let inp = if i < input.len() { input[i] } else { 0.0 };
613            du[i] = (-self.u[i] + conv + inp) / self.tau * self.dt;
614            let _ = s_i; // used for convolution input
615        }
616        for i in 0..n {
617            self.u[i] += du[i];
618        }
619        self.u.iter().sum::<f64>() / n as f64
620    }
621    pub fn reset(&mut self) {
622        self.u.fill(0.0);
623    }
624}
625
626/// Leaky Compete-and-Fire — winner-take-all with lateral inhibition. Oster et al. 2009.
627#[derive(Clone, Debug)]
628pub struct LeakyCompeteFireNeuron {
629    pub v: Vec<f64>,
630    pub n_units: usize,
631    pub tau: f64,
632    pub v_threshold: f64,
633    pub w_inh: f64,
634    pub dt: f64,
635}
636
637impl LeakyCompeteFireNeuron {
638    pub fn new(n_units: usize) -> Self {
639        Self {
640            v: vec![0.0; n_units],
641            n_units,
642            tau: 10.0,
643            v_threshold: 1.0,
644            w_inh: 0.5,
645            dt: 1.0,
646        }
647    }
648
649    pub fn step(&mut self, currents: &[f64]) -> Vec<i32> {
650        let n = self.n_units;
651        for i in 0..n {
652            let c = if i < currents.len() { currents[i] } else { 0.0 };
653            self.v[i] += (-self.v[i] + c) / self.tau * self.dt;
654        }
655        let mut spikes = vec![0i32; n];
656        for i in 0..n {
657            if self.v[i] >= self.v_threshold {
658                spikes[i] = 1;
659                self.v[i] = 0.0;
660                for j in 0..n {
661                    if j != i {
662                        self.v[j] = (self.v[j] - self.w_inh).max(0.0);
663                    }
664                }
665            }
666        }
667        spikes
668    }
669
670    pub fn reset(&mut self) {
671        self.v.fill(0.0);
672    }
673}
674
675#[cfg(test)]
676mod tests {
677    use super::*;
678
679    #[test]
680    fn mcp_threshold() {
681        let n = McCullochPittsNeuron::default();
682        assert_eq!(n.step(2.0), 1);
683        assert_eq!(n.step(0.5), 0);
684    }
685    #[test]
686    fn sigmoid_rate() {
687        let mut n = SigmoidRateNeuron::new();
688        for _ in 0..100 {
689            n.step(5.0);
690        }
691        assert!(n.r > 0.5);
692    }
693    #[test]
694    fn tl_rate() {
695        let mut n = ThresholdLinearRateNeuron::new();
696        assert!(n.step(5.0) > 0.0);
697        assert!(n.step(-1.0) == 0.0);
698    }
699    #[test]
700    fn astrocyte_ca() {
701        let mut n = AstrocyteModel::new();
702        let mut max_ca = 0.0_f64;
703        for _ in 0..5000 {
704            let c = n.step(0.1);
705            max_ca = max_ca.max(c);
706        }
707        assert!(max_ca > 0.05);
708    }
709    #[test]
710    fn tm_fires() {
711        let mut n = TsodyksMarkramNeuron::new();
712        let t: i32 = (0..500).map(|_| n.step(50.0, false)).sum();
713        assert!(t > 0);
714    }
715    #[test]
716    fn ltc_fires() {
717        let mut n = LiquidTimeConstantNeuron {
718            v_threshold: 0.9,
719            ..LiquidTimeConstantNeuron::new()
720        };
721        let t: i32 = (0..100).map(|_| n.step(5.0)).sum();
722        assert!(t > 0);
723    }
724    #[test]
725    fn compte_fires() {
726        let mut n = CompteWMNeuron::new();
727        let t: i32 = (0..500).map(|_| n.step(5.0, false)).sum();
728        assert!(t > 0);
729    }
730    #[test]
731    fn psn_fires() {
732        let mut n = ParallelSpikingNeuron::new(4, 0.5);
733        let t: i32 = (0..20).map(|_| n.step(1.0)).sum();
734        assert!(t > 0);
735    }
736    #[test]
737    fn frac_lif_fires() {
738        let mut n = FractionalLIFNeuron::new(0.8, 50);
739        let t: i32 = (0..200).map(|_| n.step(2.0)).sum();
740        assert!(t > 0);
741    }
742    #[test]
743    fn siegert_rate() {
744        let n = SiegertTransferFunction::new();
745        let r = n.step(20.0);
746        assert!(r > 0.0);
747    }
748    #[test]
749    fn amari_activates() {
750        let mut n = AmariNeuralField::new(32);
751        let inp = vec![0.5; 32];
752        for _ in 0..100 {
753            n.step(&inp);
754        }
755        assert!(n.u.iter().any(|&x| x.abs() > 0.01));
756    }
757}