Skip to main content

sc_neurocore_engine/neurons/
ai_optimized.rs

1// SPDX-License-Identifier: AGPL-3.0-or-later
2// Commercial license available
3// © Concepts 1996–2026 Miroslav Šotek. All rights reserved.
4// © Code 2020–2026 Miroslav Šotek. All rights reserved.
5// ORCID: 0009-0009-3560-0851
6// Contact: www.anulum.li | protoscience@anulum.li
7// SC-NeuroCore — AI-optimized spiking neuron models (original designs)
8
9//! Eight novel neuron models designed for AI workloads, not biological simulation.
10
11/// Three-compartment memory neuron (fast/medium/slow timescales).
12/// Slow compartment accumulates context, modulating excitability.
13#[derive(Clone, Debug)]
14pub struct MultiTimescaleNeuron {
15    pub v_fast: f64,
16    pub v_medium: f64,
17    pub v_slow: f64,
18    pub tau_fast: f64,
19    pub tau_medium: f64,
20    pub tau_slow: f64,
21    pub alpha: f64,
22    pub beta: f64,
23    pub gamma: f64,
24    pub theta_base: f64,
25    pub dt: f64,
26}
27
28impl MultiTimescaleNeuron {
29    pub fn new() -> Self {
30        Self {
31            v_fast: 0.0,
32            v_medium: 0.0,
33            v_slow: 0.0,
34            tau_fast: 5.0,
35            tau_medium: 200.0,
36            tau_slow: 10000.0,
37            alpha: 10.0,
38            beta: 0.05,
39            gamma: 0.3,
40            theta_base: 1.0,
41            dt: 1.0,
42        }
43    }
44
45    pub fn step(&mut self, current: f64) -> i32 {
46        self.v_fast += (-self.v_fast + current) / self.tau_fast * self.dt;
47        let theta_eff = self.theta_base - self.gamma * self.v_slow;
48        let fired = if self.v_fast >= theta_eff { 1 } else { 0 };
49        self.v_medium += (-self.v_medium + self.alpha * fired as f64) / self.tau_medium * self.dt;
50        self.v_slow += (-self.v_slow + self.beta * self.v_medium) / self.tau_slow * self.dt;
51        if fired == 1 {
52            self.v_fast = 0.0;
53        }
54        fired
55    }
56
57    pub fn reset(&mut self) {
58        self.v_fast = 0.0;
59        self.v_medium = 0.0;
60        self.v_slow = 0.0;
61    }
62}
63
64impl Default for MultiTimescaleNeuron {
65    fn default() -> Self {
66        Self::new()
67    }
68}
69
70/// Spiking neuron with learned sigmoid attention gate.
71/// gate = sigmoid(w_key * I + w_query * v), modulates input before integration.
72#[derive(Clone, Debug)]
73pub struct AttentionGatedNeuron {
74    pub v: f64,
75    pub w_key: f64,
76    pub w_query: f64,
77    pub tau: f64,
78    pub theta: f64,
79    pub dt: f64,
80}
81
82impl AttentionGatedNeuron {
83    pub fn new() -> Self {
84        Self {
85            v: 0.0,
86            w_key: 1.0,
87            w_query: 0.5,
88            tau: 10.0,
89            theta: 1.0,
90            dt: 1.0,
91        }
92    }
93
94    pub fn step(&mut self, current: f64) -> i32 {
95        let gate = 1.0 / (1.0 + (-(self.w_key * current + self.w_query * self.v)).exp());
96        self.v += (-self.v + gate * current) / self.tau * self.dt;
97        if self.v >= self.theta {
98            self.v = 0.0;
99            1
100        } else {
101            0
102        }
103    }
104
105    pub fn reset(&mut self) {
106        self.v = 0.0;
107    }
108}
109
110impl Default for AttentionGatedNeuron {
111    fn default() -> Self {
112        Self::new()
113    }
114}
115
116/// Fires only on prediction errors. Silent when input matches prediction.
117#[derive(Clone, Debug)]
118pub struct PredictiveCodingNeuron {
119    pub v: f64,
120    pub pred: f64,
121    pub tau: f64,
122    pub tau_pred: f64,
123    pub theta: f64,
124    pub dt: f64,
125}
126
127impl PredictiveCodingNeuron {
128    pub fn new() -> Self {
129        Self {
130            v: 0.0,
131            pred: 0.0,
132            tau: 10.0,
133            tau_pred: 50.0,
134            theta: 1.0,
135            dt: 1.0,
136        }
137    }
138
139    pub fn step(&mut self, current: f64) -> i32 {
140        let surprise = (current - self.pred).abs();
141        self.pred += (current - self.pred) / self.tau_pred * self.dt;
142        self.v += (-self.v + surprise) / self.tau * self.dt;
143        if self.v >= self.theta {
144            self.v = 0.0;
145            1
146        } else {
147            0
148        }
149    }
150
151    pub fn reset(&mut self) {
152        self.v = 0.0;
153        self.pred = 0.0;
154    }
155}
156
157impl Default for PredictiveCodingNeuron {
158    fn default() -> Self {
159        Self::new()
160    }
161}
162
163/// Introspects on its own spike history; adjusts tau based on firing rate.
164#[derive(Clone, Debug)]
165pub struct SelfReferentialNeuron {
166    pub v: f64,
167    pub tau: f64,
168    pub theta: f64,
169    pub target_rate: f64,
170    pub dt: f64,
171    history: Vec<u8>,
172    head: usize,
173    window: usize,
174}
175
176impl SelfReferentialNeuron {
177    pub fn new() -> Self {
178        let window = 50;
179        Self {
180            v: 0.0,
181            tau: 10.0,
182            theta: 1.0,
183            target_rate: 0.1,
184            dt: 1.0,
185            history: vec![0; window],
186            head: 0,
187            window,
188        }
189    }
190
191    pub fn step(&mut self, current: f64) -> i32 {
192        let n_spikes: u32 = self.history.iter().map(|&x| x as u32).sum();
193        let rate = n_spikes as f64 / self.window as f64;
194        let tau_eff = self.tau * (1.0 + rate / self.target_rate);
195        self.v += (-self.v + current) / tau_eff * self.dt;
196        let fired = if self.v >= self.theta {
197            self.v = 0.0;
198            1
199        } else {
200            0
201        };
202        self.history[self.head] = fired as u8;
203        self.head = (self.head + 1) % self.window;
204        fired
205    }
206
207    pub fn reset(&mut self) {
208        self.v = 0.0;
209        self.history.fill(0);
210        self.head = 0;
211    }
212}
213
214impl Default for SelfReferentialNeuron {
215    fn default() -> Self {
216        Self::new()
217    }
218}
219
220/// Phase-coding neuron for compositional variable binding.
221/// Spike when amplitude * cos(phase) > threshold.
222#[derive(Clone, Debug)]
223pub struct CompositionalBindingNeuron {
224    pub phi: f64,
225    pub amplitude: f64,
226    pub omega: f64,
227    pub coupling: f64,
228    pub tau: f64,
229    pub theta: f64,
230    pub dt: f64,
231}
232
233impl CompositionalBindingNeuron {
234    pub fn new() -> Self {
235        Self {
236            phi: 0.0,
237            amplitude: 0.0,
238            omega: 0.1,
239            coupling: 0.5,
240            tau: 10.0,
241            theta: 0.8,
242            dt: 1.0,
243        }
244    }
245
246    pub fn step(&mut self, current: f64) -> i32 {
247        self.phi += self.omega * self.dt;
248        self.amplitude += (-self.amplitude + current) / self.tau * self.dt;
249        if self.amplitude * self.phi.cos() > self.theta {
250            1
251        } else {
252            0
253        }
254    }
255
256    pub fn reset(&mut self) {
257        self.phi = 0.0;
258        self.amplitude = 0.0;
259    }
260}
261
262impl Default for CompositionalBindingNeuron {
263    fn default() -> Self {
264        Self::new()
265    }
266}
267
268/// Spiking neuron with learnable surrogate gradient parameters.
269/// alpha (decay), beta (steepness), theta (threshold) all trainable.
270#[derive(Clone, Debug)]
271pub struct DifferentiableSurrogateNeuron {
272    pub v: f64,
273    pub alpha: f64,
274    pub beta: f64,
275    pub theta: f64,
276}
277
278impl DifferentiableSurrogateNeuron {
279    pub fn new() -> Self {
280        Self {
281            v: 0.0,
282            alpha: 0.9,
283            beta: 5.0,
284            theta: 1.0,
285        }
286    }
287
288    pub fn step(&mut self, current: f64) -> i32 {
289        let spike = if self.v >= self.theta { 1 } else { 0 };
290        self.v = self.alpha * self.v * (1.0 - spike as f64) + current;
291        spike
292    }
293
294    pub fn surrogate_grad(&self) -> f64 {
295        let d = (self.v - self.theta).abs();
296        1.0 / ((1.0 + self.beta * d) * (1.0 + self.beta * d))
297    }
298
299    pub fn reset(&mut self) {
300        self.v = 0.0;
301    }
302}
303
304impl Default for DifferentiableSurrogateNeuron {
305    fn default() -> Self {
306        Self::new()
307    }
308}
309
310/// Ring attractor for continuous working memory.
311/// Mexican hat connectivity; holds a continuous value in persistent activity.
312#[derive(Clone, Debug)]
313pub struct ContinuousAttractorNeuron {
314    pub u: Vec<f64>,
315    pub tau: f64,
316    pub dt: f64,
317    weights: Vec<Vec<f64>>,
318    n_units: usize,
319}
320
321impl ContinuousAttractorNeuron {
322    pub fn new(n_units: usize) -> Self {
323        let sigma_e: f64 = 1.0;
324        let excitation: f64 = 4.0;
325        let inhibition: f64 = 0.5;
326        let mut weights = vec![vec![0.0; n_units]; n_units];
327        for i in 0..n_units {
328            for j in 0..n_units {
329                let d = (i as f64 - j as f64)
330                    .abs()
331                    .min((n_units as f64) - (i as f64 - j as f64).abs());
332                weights[i][j] =
333                    excitation * (-d * d / (2.0 * sigma_e * sigma_e)).exp() - inhibition;
334            }
335        }
336        Self {
337            u: vec![0.0; n_units],
338            tau: 10.0,
339            dt: 1.0,
340            weights,
341            n_units,
342        }
343    }
344
345    fn activation(x: f64) -> f64 {
346        let r = x.max(0.0);
347        r * r / (1.0 + r * r)
348    }
349
350    pub fn step(&mut self, current: f64) -> i32 {
351        let mut new_u = vec![0.0; self.n_units];
352        for i in 0..self.n_units {
353            let mut recurrent = 0.0;
354            for j in 0..self.n_units {
355                recurrent += self.weights[i][j] * Self::activation(self.u[j]);
356            }
357            new_u[i] = self.u[i] + (-self.u[i] + recurrent + current) / self.tau * self.dt;
358        }
359        self.u = new_u;
360        let peak = self.u.iter().cloned().fold(f64::NEG_INFINITY, f64::max);
361        if peak > 1.0 {
362            1
363        } else {
364            0
365        }
366    }
367
368    pub fn bump_position(&self) -> usize {
369        self.u
370            .iter()
371            .enumerate()
372            .max_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal))
373            .map(|(i, _)| i)
374            .unwrap_or(0)
375    }
376
377    pub fn reset(&mut self) {
378        self.u.fill(0.0);
379    }
380}
381
382impl Default for ContinuousAttractorNeuron {
383    fn default() -> Self {
384        Self::new(16)
385    }
386}
387
388/// Neuron with self-regulating meta-learning rate.
389/// error_trace adapts learning speed: high error → learn faster, low error → stabilize.
390#[derive(Clone, Debug)]
391pub struct MetaPlasticNeuron {
392    pub v: f64,
393    pub error_trace: f64,
394    pub expected_reward: f64,
395    pub tau: f64,
396    pub tau_meta: f64,
397    pub theta: f64,
398    pub lr0: f64,
399    pub kappa: f64,
400    pub target_error: f64,
401    pub dt: f64,
402}
403
404impl MetaPlasticNeuron {
405    pub fn new() -> Self {
406        Self {
407            v: 0.0,
408            error_trace: 0.0,
409            expected_reward: 0.0,
410            tau: 10.0,
411            tau_meta: 500.0,
412            theta: 1.0,
413            lr0: 0.01,
414            kappa: 5.0,
415            target_error: 0.3,
416            dt: 1.0,
417        }
418    }
419
420    pub fn step(&mut self, current: f64) -> i32 {
421        self.v += (-self.v + current) / self.tau * self.dt;
422        if self.v >= self.theta {
423            self.v = 0.0;
424            1
425        } else {
426            0
427        }
428    }
429
430    pub fn update_meta(&mut self, reward: f64) {
431        let error = (reward - self.expected_reward).abs();
432        self.error_trace += (-self.error_trace + error) / self.tau_meta * self.dt;
433        let meta_lr = self.meta_lr();
434        self.expected_reward += meta_lr * (reward - self.expected_reward);
435    }
436
437    pub fn meta_lr(&self) -> f64 {
438        self.lr0 / (1.0 + (-self.kappa * (self.error_trace - self.target_error)).exp())
439    }
440
441    pub fn reset(&mut self) {
442        self.v = 0.0;
443        self.error_trace = 0.0;
444        self.expected_reward = 0.0;
445    }
446}
447
448impl Default for MetaPlasticNeuron {
449    fn default() -> Self {
450        Self::new()
451    }
452}
453
454#[cfg(test)]
455mod tests {
456    use super::*;
457
458    #[test]
459    fn multi_timescale_fires() {
460        let mut n = MultiTimescaleNeuron::new();
461        let total: i32 = (0..200).map(|_| n.step(2.0)).sum();
462        assert!(total > 0);
463    }
464
465    #[test]
466    fn multi_timescale_slow_accumulates() {
467        let mut n = MultiTimescaleNeuron::new();
468        for _ in 0..500 {
469            n.step(2.0);
470        }
471        assert!(n.v_slow > 0.0);
472    }
473
474    #[test]
475    fn multi_timescale_reset() {
476        let mut n = MultiTimescaleNeuron::new();
477        for _ in 0..100 {
478            n.step(2.0);
479        }
480        n.reset();
481        assert_eq!(n.v_fast, 0.0);
482        assert_eq!(n.v_medium, 0.0);
483        assert_eq!(n.v_slow, 0.0);
484    }
485
486    #[test]
487    fn attention_gated_fires() {
488        let mut n = AttentionGatedNeuron::new();
489        let total: i32 = (0..200).map(|_| n.step(2.0)).sum();
490        assert!(total > 0);
491    }
492
493    #[test]
494    fn attention_gated_gate_suppresses_low_input() {
495        let mut n = AttentionGatedNeuron {
496            w_key: -2.0,
497            ..AttentionGatedNeuron::new()
498        };
499        let total: i32 = (0..200).map(|_| n.step(0.1)).sum();
500        assert_eq!(total, 0);
501    }
502
503    #[test]
504    fn attention_gated_reset() {
505        let mut n = AttentionGatedNeuron::new();
506        for _ in 0..50 {
507            n.step(2.0);
508        }
509        n.reset();
510        assert_eq!(n.v, 0.0);
511    }
512
513    #[test]
514    fn predictive_coding_fires_on_change() {
515        let mut n = PredictiveCodingNeuron::new();
516        for _ in 0..200 {
517            n.step(1.0);
518        }
519        let spikes_after_change: i32 = (0..50).map(|_| n.step(10.0)).sum();
520        assert!(spikes_after_change > 0);
521    }
522
523    #[test]
524    fn predictive_coding_silent_on_constant() {
525        let mut n = PredictiveCodingNeuron::new();
526        for _ in 0..500 {
527            n.step(0.5);
528        }
529        let late: i32 = (0..100).map(|_| n.step(0.5)).sum();
530        assert_eq!(late, 0);
531    }
532
533    #[test]
534    fn predictive_coding_reset() {
535        let mut n = PredictiveCodingNeuron::new();
536        for _ in 0..50 {
537            n.step(5.0);
538        }
539        n.reset();
540        assert_eq!(n.v, 0.0);
541        assert_eq!(n.pred, 0.0);
542    }
543
544    #[test]
545    fn self_referential_fires() {
546        let mut n = SelfReferentialNeuron::new();
547        let total: i32 = (0..200).map(|_| n.step(2.0)).sum();
548        assert!(total > 0);
549    }
550
551    #[test]
552    fn self_referential_adapts_tau() {
553        let mut n = SelfReferentialNeuron::new();
554        for _ in 0..100 {
555            n.step(2.0);
556        }
557        let n_spikes: u32 = n.history.iter().map(|&x| x as u32).sum();
558        assert!(n_spikes > 0);
559    }
560
561    #[test]
562    fn self_referential_reset() {
563        let mut n = SelfReferentialNeuron::new();
564        for _ in 0..100 {
565            n.step(2.0);
566        }
567        n.reset();
568        assert_eq!(n.v, 0.0);
569        assert!(n.history.iter().all(|&x| x == 0));
570    }
571
572    #[test]
573    fn compositional_binding_fires() {
574        let mut n = CompositionalBindingNeuron::new();
575        let total: i32 = (0..200).map(|_| n.step(2.0)).sum();
576        assert!(total > 0);
577    }
578
579    #[test]
580    fn compositional_binding_phase_advances() {
581        let mut n = CompositionalBindingNeuron::new();
582        for _ in 0..100 {
583            n.step(1.0);
584        }
585        assert!(n.phi > 0.0);
586    }
587
588    #[test]
589    fn compositional_binding_reset() {
590        let mut n = CompositionalBindingNeuron::new();
591        for _ in 0..100 {
592            n.step(2.0);
593        }
594        n.reset();
595        assert_eq!(n.phi, 0.0);
596        assert_eq!(n.amplitude, 0.0);
597    }
598
599    #[test]
600    fn differentiable_surrogate_fires() {
601        let mut n = DifferentiableSurrogateNeuron::new();
602        let total: i32 = (0..20).map(|_| n.step(1.5)).sum();
603        assert!(total > 0);
604    }
605
606    #[test]
607    fn differentiable_surrogate_grad_positive() {
608        let n = DifferentiableSurrogateNeuron::new();
609        assert!(n.surrogate_grad() > 0.0);
610    }
611
612    #[test]
613    fn differentiable_surrogate_reset() {
614        let mut n = DifferentiableSurrogateNeuron::new();
615        for _ in 0..10 {
616            n.step(1.5);
617        }
618        n.reset();
619        assert_eq!(n.v, 0.0);
620    }
621
622    #[test]
623    fn continuous_attractor_fires() {
624        let mut n = ContinuousAttractorNeuron::new(16);
625        let total: i32 = (0..200).map(|_| n.step(2.0)).sum();
626        assert!(total > 0);
627    }
628
629    #[test]
630    fn continuous_attractor_bump_forms() {
631        let mut n = ContinuousAttractorNeuron::new(16);
632        for _ in 0..200 {
633            n.step(2.0);
634        }
635        let peak = n.u.iter().cloned().fold(f64::NEG_INFINITY, f64::max);
636        assert!(peak > 0.0);
637    }
638
639    #[test]
640    fn continuous_attractor_reset() {
641        let mut n = ContinuousAttractorNeuron::new(16);
642        for _ in 0..100 {
643            n.step(2.0);
644        }
645        n.reset();
646        assert!(n.u.iter().all(|&x| x == 0.0));
647    }
648
649    #[test]
650    fn meta_plastic_fires() {
651        let mut n = MetaPlasticNeuron::new();
652        let total: i32 = (0..200).map(|_| n.step(2.0)).sum();
653        assert!(total > 0);
654    }
655
656    #[test]
657    fn meta_plastic_adapts_lr() {
658        let mut n = MetaPlasticNeuron::new();
659        let lr_before = n.meta_lr();
660        for _ in 0..100 {
661            n.step(2.0);
662            n.update_meta(1.0);
663        }
664        let lr_after = n.meta_lr();
665        assert!((lr_after - lr_before).abs() > 1e-6);
666    }
667
668    #[test]
669    fn meta_plastic_reset() {
670        let mut n = MetaPlasticNeuron::new();
671        for _ in 0..100 {
672            n.step(2.0);
673            n.update_meta(1.0);
674        }
675        n.reset();
676        assert_eq!(n.v, 0.0);
677        assert_eq!(n.error_trace, 0.0);
678        assert_eq!(n.expected_reward, 0.0);
679    }
680
681    // ── ArcaneNeuron tests ──────────────────────────────────────
682
683    #[test]
684    fn arcane_fires() {
685        let mut n = ArcaneNeuron::new();
686        let t: i32 = (0..500).map(|_| n.step(2.0)).sum();
687        assert!(t > 0);
688    }
689
690    #[test]
691    fn arcane_deep_accumulates() {
692        let mut n = ArcaneNeuron::new();
693        for _ in 0..1000 {
694            n.step(3.0);
695        }
696        assert!(n.v_deep.abs() > 1e-10, "deep state must accumulate");
697    }
698
699    #[test]
700    fn arcane_deep_survives_reset() {
701        let mut n = ArcaneNeuron::new();
702        for _ in 0..500 {
703            n.step(3.0);
704        }
705        let deep_before = n.v_deep;
706        n.reset();
707        assert_eq!(n.v_fast, 0.0);
708        assert_eq!(n.v_work, 0.0);
709        assert!(
710            (n.v_deep - deep_before).abs() < 1e-15,
711            "deep must survive reset"
712        );
713    }
714
715    #[test]
716    fn arcane_novelty_increases_deep_change() {
717        let mut n = ArcaneNeuron::new();
718        // Constant input
719        for _ in 0..200 {
720            n.step(2.0);
721        }
722        let deep_after_constant = n.v_deep;
723        // Novel input
724        for _ in 0..200 {
725            n.step(8.0);
726        }
727        let deep_after_novel = n.v_deep;
728        let delta = (deep_after_novel - deep_after_constant).abs();
729        assert!(delta > 0.0, "novel input must change deep state");
730    }
731}
732
733/// ArcaneNeuron — unified self-referential cognition model.
734///
735/// 3-compartment (fast/working/deep) with attention gate, predictive
736/// self-model, and meta-plastic learning rate. Deep compartment
737/// accumulates identity and survives reset.
738///
739/// Original design: Šotek & Arcane Sapience 2026.
740#[derive(Clone, Debug)]
741pub struct ArcaneNeuron {
742    pub v_fast: f64,
743    pub v_work: f64,
744    pub v_deep: f64,
745    pub tau_fast: f64,
746    pub tau_work: f64,
747    pub tau_deep: f64,
748    pub alpha_w: f64,
749    pub alpha_d: f64,
750    pub theta: f64,
751    pub gamma: f64,
752    pub delta_conf: f64,
753    pub w_gate: [f64; 4],
754    pub w_pred: [f64; 3],
755    pub kappa: f64,
756    pub surprise_baseline: f64,
757    pub lr_base: f64,
758    pub eta: f64,
759    pub w_inh: f64,
760    pub dt: f64,
761    prediction: f64,
762    surprise: f64,
763    novelty: f64,
764    confidence: f64,
765    spike_history: Vec<u8>,
766    novelty_history: Vec<f64>,
767    hist_idx: usize,
768    nov_idx: usize,
769    total_steps: usize,
770}
771
772impl ArcaneNeuron {
773    pub fn new() -> Self {
774        Self {
775            v_fast: 0.0,
776            v_work: 0.0,
777            v_deep: 0.0,
778            tau_fast: 5.0,
779            tau_work: 200.0,
780            tau_deep: 10000.0,
781            alpha_w: 0.3,
782            alpha_d: 0.05,
783            theta: 1.0,
784            gamma: 0.2,
785            delta_conf: 0.3,
786            w_gate: [0.8, 0.1, 0.05, 0.05],
787            w_pred: [0.6, 0.3, 0.1],
788            kappa: 5.0,
789            surprise_baseline: 0.1,
790            lr_base: 0.01,
791            eta: 2.0,
792            w_inh: 0.3,
793            dt: 1.0,
794            prediction: 0.0,
795            surprise: 0.0,
796            novelty: 0.0,
797            confidence: 0.5,
798            spike_history: vec![0; 50],
799            novelty_history: vec![0.5; 20],
800            hist_idx: 0,
801            nov_idx: 0,
802            total_steps: 0,
803        }
804    }
805
806    pub fn step(&mut self, current: f64) -> i32 {
807        let sh_len = self.spike_history.len() as f64;
808        let nh_len = self.novelty_history.len() as f64;
809        let spike_rate: f64 = self.spike_history.iter().map(|&s| s as f64).sum::<f64>() / sh_len;
810        self.confidence = 1.0 - self.novelty_history.iter().sum::<f64>() / nh_len;
811
812        let gate_in = self.w_gate[0] * current
813            + self.w_gate[1] * self.v_fast
814            + self.w_gate[2] * self.v_work
815            + self.w_gate[3] * self.confidence;
816        let gate = 1.0 / (1.0 + (-gate_in).exp());
817        let i_eff = gate * current;
818
819        self.v_fast += (-self.v_fast + i_eff - self.w_inh * spike_rate) / self.tau_fast * self.dt;
820
821        self.prediction = self.w_pred[0] * self.v_fast
822            + self.w_pred[1] * self.v_work
823            + self.w_pred[2] * self.v_deep;
824        self.surprise = (self.v_fast - self.prediction).abs();
825        self.novelty = 1.0 / (1.0 + (-self.kappa * (self.surprise - self.surprise_baseline)).exp());
826
827        let nh_sz = self.novelty_history.len();
828        self.novelty_history[self.nov_idx % nh_sz] = self.novelty;
829        self.nov_idx += 1;
830
831        let eff_threshold = (self.theta
832            * (1.0 + self.gamma * self.v_deep)
833            * (1.0 - self.delta_conf * self.confidence))
834            .max(0.1);
835
836        let spike = if self.v_fast >= eff_threshold { 1 } else { 0 };
837
838        if spike == 1 {
839            self.v_work += self.alpha_w * self.v_fast / self.tau_work * self.dt;
840            self.v_fast = 0.0;
841        }
842
843        self.v_work += -self.v_work / self.tau_work * self.dt;
844        self.v_deep +=
845            (-self.v_deep + self.alpha_d * self.v_work * self.novelty) / self.tau_deep * self.dt;
846
847        let meta_lr = self.lr_base * (1.0 + self.eta * self.novelty);
848        let error = self.v_fast - self.prediction;
849        self.w_pred[0] += meta_lr * error * self.v_fast;
850        self.w_pred[1] += meta_lr * error * self.v_work;
851        self.w_pred[2] += meta_lr * error * self.v_deep;
852        let norm =
853            (self.w_pred[0].powi(2) + self.w_pred[1].powi(2) + self.w_pred[2].powi(2)).sqrt();
854        if norm > 0.0 {
855            for w in &mut self.w_pred {
856                *w /= norm;
857            }
858        }
859
860        let sh_sz = self.spike_history.len();
861        self.spike_history[self.hist_idx % sh_sz] = spike as u8;
862        self.hist_idx += 1;
863        self.total_steps += 1;
864
865        spike
866    }
867
868    pub fn reset(&mut self) {
869        self.v_fast = 0.0;
870        self.v_work = 0.0;
871        // v_deep does NOT reset — it IS the identity
872        self.prediction = 0.0;
873        self.surprise = 0.0;
874        self.novelty = 0.0;
875        self.spike_history.fill(0);
876        self.hist_idx = 0;
877    }
878}
879
880impl Default for ArcaneNeuron {
881    fn default() -> Self {
882        Self::new()
883    }
884}
885
886/// Adaptive threshold spiking neuron matching the SpikingBrain architecture.
887///
888/// Converts activations into integer spike counts via data-dependent threshold,
889/// enabling addition-based event-driven computation with ~69% sparsity.
890///
891/// Exact equations from arXiv:2509.05276v2 (SpikingBrain Technical Report):
892///
893///   V_th(x) = (1/k) · mean(|x|)          (adaptive threshold)
894///   v[t+1] = v[t] - V_th · s[t] + x[t+1] (membrane with soft reset)
895///   s_INT = round(v_T / V_th)              (integer spike count)
896///
897/// In time-collapsed mode: v_T = x, s_INT = round(x / V_th).
898/// Parameter k controls the firing rate / sparsity trade-off.
899///
900/// Reference: SpikingBrain-1.0, arXiv:2509.05276v2, September 2025.
901#[derive(Clone, Debug)]
902pub struct AdaptiveThresholdMoENeuron {
903    /// Membrane potential.
904    pub v: f64,
905    /// Current adaptive threshold.
906    pub v_th: f64,
907    /// Firing rate control parameter (higher k → lower threshold → more spikes).
908    pub k: f64,
909    /// Running EMA of |input| for threshold computation.
910    mean_abs_x: f64,
911    /// EMA decay for mean estimation.
912    ema_alpha: f64,
913}
914
915impl AdaptiveThresholdMoENeuron {
916    pub fn new() -> Self {
917        Self {
918            v: 0.0,
919            v_th: 1.0,
920            k: 4.0,
921            mean_abs_x: 0.0,
922            ema_alpha: 0.1,
923        }
924    }
925
926    pub fn with_k(k: f64) -> Self {
927        Self { k, ..Self::new() }
928    }
929
930    /// Returns integer spike count (0 or more) — not binary.
931    ///
932    /// Implements: V_th = (1/k)·mean(|x|), s = round(v/V_th), soft reset v -= V_th·s.
933    pub fn step(&mut self, current: f64) -> i32 {
934        // Update running mean of |activation|.
935        self.mean_abs_x = (1.0 - self.ema_alpha) * self.mean_abs_x + self.ema_alpha * current.abs();
936
937        // Adaptive threshold: V_th = (1/k) · mean(|x|).
938        self.v_th = if self.mean_abs_x > 1e-12 {
939            self.mean_abs_x / self.k
940        } else {
941            1.0 // fallback to avoid division by near-zero
942        };
943
944        // Membrane: v[t+1] = v[t] + x[t+1] (integrate input).
945        self.v += current;
946
947        // Integer spike count: s_INT = round(v / V_th).
948        let s_int = if self.v_th > 1e-12 {
949            (self.v / self.v_th).round() as i32
950        } else {
951            0
952        };
953
954        // Soft reset: v -= V_th · s.
955        if s_int != 0 {
956            self.v -= self.v_th * s_int as f64;
957        }
958
959        s_int.max(0) // non-negative spike counts
960    }
961
962    /// Time-collapsed single-step mode: s_INT = round(x / V_th).
963    pub fn step_collapsed(&mut self, activation: f64) -> i32 {
964        self.mean_abs_x =
965            (1.0 - self.ema_alpha) * self.mean_abs_x + self.ema_alpha * activation.abs();
966        self.v_th = if self.mean_abs_x > 1e-12 {
967            self.mean_abs_x / self.k
968        } else {
969            1.0
970        };
971        let s_int = (activation / self.v_th).round() as i32;
972        s_int.max(0)
973    }
974
975    /// Current activation sparsity estimate (1 if below threshold, 0 if firing).
976    pub fn sparsity(&self) -> f64 {
977        if self.v.abs() < self.v_th {
978            1.0
979        } else {
980            0.0
981        }
982    }
983
984    pub fn reset(&mut self) {
985        self.v = 0.0;
986        self.mean_abs_x = 0.0;
987        self.v_th = 1.0;
988    }
989}
990
991impl Default for AdaptiveThresholdMoENeuron {
992    fn default() -> Self {
993        Self::new()
994    }
995}
996
997/// Hybrid linear attention neuron for spiking environments.
998///
999/// Combines local windowed attention with linear (kernel-based) global attention,
1000/// achieving near-linear training complexity O(L) instead of O(L²).
1001/// Inspired by SpikingBrain's hybrid attention architecture.
1002///
1003/// The neuron accumulates spike-weighted keys and values via a recurrent
1004/// state S, avoiding the quadratic attention matrix:
1005///
1006///   S(t+1) = λ S(t) + φ(k_t) ⊗ v_t
1007///   output = φ(q_t)ᵀ S(t)
1008///
1009/// where φ is an elu+1 feature map.
1010#[derive(Clone, Debug)]
1011pub struct HybridLinearAttentionNeuron {
1012    pub v: f64,
1013    state_kv: Vec<f64>,
1014    pub dim: usize,
1015    pub lambda: f64,
1016    pub window_size: usize,
1017    window_buf: Vec<f64>,
1018    window_idx: usize,
1019    pub dt: f64,
1020}
1021
1022impl HybridLinearAttentionNeuron {
1023    pub fn new(dim: usize) -> Self {
1024        Self {
1025            v: 0.0,
1026            state_kv: vec![0.0; dim],
1027            dim,
1028            lambda: 0.95,
1029            window_size: 16,
1030            window_buf: vec![0.0; 16],
1031            window_idx: 0,
1032            dt: 1.0,
1033        }
1034    }
1035
1036    /// Step with query, key, value (each scalar projections).
1037    pub fn step_qkv(&mut self, query: f64, key: f64, value: f64) -> f64 {
1038        // Feature map: elu(x) + 1.
1039        let phi_q = if query > 0.0 {
1040            query + 1.0
1041        } else {
1042            query.exp()
1043        };
1044        let phi_k = if key > 0.0 { key + 1.0 } else { key.exp() };
1045
1046        // Update recurrent KV state (linear attention).
1047        for s in &mut self.state_kv {
1048            *s *= self.lambda;
1049        }
1050        let idx = (phi_k.abs() * self.dim as f64) as usize % self.dim;
1051        self.state_kv[idx] += phi_k * value;
1052
1053        // Global attention output.
1054        let global = phi_q * self.state_kv[idx];
1055
1056        // Local windowed attention (sliding window buffer).
1057        self.window_buf[self.window_idx % self.window_size] = value;
1058        self.window_idx += 1;
1059        let local: f64 = self.window_buf.iter().sum::<f64>() / self.window_size as f64;
1060
1061        // Combine global + local.
1062        self.v = 0.5 * global + 0.5 * local;
1063        self.v
1064    }
1065
1066    /// Simple step (input treated as combined qkv).
1067    pub fn step(&mut self, current: f64) -> i32 {
1068        let out = self.step_qkv(current, current, current);
1069        if out > 1.0 {
1070            1
1071        } else {
1072            0
1073        }
1074    }
1075
1076    pub fn reset(&mut self) {
1077        self.v = 0.0;
1078        self.state_kv.fill(0.0);
1079        self.window_buf.fill(0.0);
1080        self.window_idx = 0;
1081    }
1082}
1083
1084impl Default for HybridLinearAttentionNeuron {
1085    fn default() -> Self {
1086        Self::new(16)
1087    }
1088}
1089
1090/// Quantum-inspired LIF neuron with non-classical probability logic.
1091///
1092/// Extends standard LIF by maintaining a complex-valued amplitude z = a + bi
1093/// whose squared modulus |z|² determines the firing probability. Interference
1094/// between excitatory and inhibitory inputs can produce non-classical
1095/// suppression patterns (destructive interference).
1096///
1097///   dz/dt = (-z + I_complex) / τ
1098///   P(spike) = |z|² / θ²
1099///
1100/// Reference: Quantum-neural hybrid models, IBM Heron r2 noise models.
1101#[derive(Clone, Debug)]
1102pub struct QuantumInspiredLIFNeuron {
1103    pub z_re: f64,
1104    pub z_im: f64,
1105    pub tau: f64,
1106    pub theta: f64,
1107    pub dt: f64,
1108    pub v_reset: f64,
1109    rng_state: u64,
1110}
1111
1112impl QuantumInspiredLIFNeuron {
1113    pub fn new() -> Self {
1114        Self {
1115            z_re: 0.0,
1116            z_im: 0.0,
1117            tau: 20.0,
1118            theta: 1.0,
1119            dt: 0.1,
1120            v_reset: 0.0,
1121            rng_state: 12345,
1122        }
1123    }
1124
1125    /// Step with real and imaginary current components.
1126    pub fn step_complex(&mut self, i_re: f64, i_im: f64) -> i32 {
1127        let dz_re = (-self.z_re + i_re) / self.tau;
1128        let dz_im = (-self.z_im + i_im) / self.tau;
1129        self.z_re += dz_re * self.dt;
1130        self.z_im += dz_im * self.dt;
1131
1132        let prob = (self.z_re * self.z_re + self.z_im * self.z_im) / (self.theta * self.theta);
1133
1134        // Stochastic spike with probability |z|²/θ².
1135        self.rng_state ^= self.rng_state << 13;
1136        self.rng_state ^= self.rng_state >> 7;
1137        self.rng_state ^= self.rng_state << 17;
1138        let uniform = (self.rng_state & 0xFFFFFFFF) as f64 / 4294967296.0;
1139
1140        if uniform < prob.min(1.0) {
1141            self.z_re = self.v_reset;
1142            self.z_im = self.v_reset;
1143            1
1144        } else {
1145            0
1146        }
1147    }
1148
1149    /// Standard step: real input only (imaginary = 0).
1150    pub fn step(&mut self, current: f64) -> i32 {
1151        self.step_complex(current, 0.0)
1152    }
1153
1154    /// Firing probability from current amplitude.
1155    pub fn firing_probability(&self) -> f64 {
1156        let p = (self.z_re * self.z_re + self.z_im * self.z_im) / (self.theta * self.theta);
1157        p.min(1.0)
1158    }
1159
1160    pub fn reset(&mut self) {
1161        self.z_re = 0.0;
1162        self.z_im = 0.0;
1163    }
1164}
1165
1166impl Default for QuantumInspiredLIFNeuron {
1167    fn default() -> Self {
1168        Self::new()
1169    }
1170}
1171
1172// ---- Tests ----
1173
1174#[cfg(test)]
1175mod gap_tests {
1176    use super::*;
1177
1178    #[test]
1179    fn adaptive_threshold_fires_integer_counts() {
1180        let mut n = AdaptiveThresholdMoENeuron::new();
1181        let mut total_spikes = 0;
1182        for _ in 0..100 {
1183            total_spikes += n.step(2.0);
1184        }
1185        assert!(total_spikes > 0, "Must fire with positive input");
1186        // V_th adapts to mean(|x|)/k = 2.0/4.0 = 0.5, so round(2.0/0.5) = 4 per step.
1187        assert!(
1188            total_spikes > 100,
1189            "Should produce multi-spike counts, got {total_spikes}"
1190        );
1191    }
1192
1193    #[test]
1194    fn adaptive_threshold_adapts_to_input_scale() {
1195        let mut n = AdaptiveThresholdMoENeuron::new();
1196        // Feed large inputs to set mean_abs_x.
1197        for _ in 0..50 {
1198            n.step(10.0);
1199        }
1200        let th_large = n.v_th;
1201        n.reset();
1202        // Feed small inputs.
1203        for _ in 0..50 {
1204            n.step(0.1);
1205        }
1206        let th_small = n.v_th;
1207        assert!(
1208            th_large > th_small,
1209            "Larger input → larger threshold: {th_large:.4} > {th_small:.4}"
1210        );
1211    }
1212
1213    #[test]
1214    fn adaptive_threshold_collapsed_mode() {
1215        let mut n = AdaptiveThresholdMoENeuron::with_k(2.0);
1216        // Warm up threshold.
1217        for _ in 0..20 {
1218            n.step_collapsed(5.0);
1219        }
1220        let s = n.step_collapsed(5.0);
1221        // V_th ≈ 5.0/2.0 = 2.5, s ≈ round(5.0/2.5) = 2.
1222        assert!(s >= 1, "Collapsed mode must fire, got {s}");
1223    }
1224
1225    #[test]
1226    fn adaptive_threshold_sparsity() {
1227        // Varying input with some near-zero values → sparse activations.
1228        let mut n = AdaptiveThresholdMoENeuron::with_k(4.0);
1229        let mut zeros = 0;
1230        let total = 200;
1231        for i in 0..total {
1232            // Alternate strong and near-zero input.
1233            let input = if i % 3 == 0 { 2.0 } else { 0.01 };
1234            if n.step(input) == 0 {
1235                zeros += 1;
1236            }
1237        }
1238        let sparsity = zeros as f64 / total as f64;
1239        assert!(
1240            sparsity > 0.1,
1241            "Should have some sparsity with varying input, got {sparsity:.2}"
1242        );
1243    }
1244
1245    #[test]
1246    fn hybrid_linear_attention_step() {
1247        let mut n = HybridLinearAttentionNeuron::new(8);
1248        let mut nonzero = false;
1249        for i in 0..100 {
1250            let out = n.step_qkv(i as f64 * 0.1, 0.5, 1.0);
1251            if out.abs() > 1e-10 {
1252                nonzero = true;
1253            }
1254        }
1255        assert!(nonzero, "Should produce non-zero output");
1256    }
1257
1258    #[test]
1259    fn hybrid_linear_attention_deterministic() {
1260        let mut n1 = HybridLinearAttentionNeuron::new(8);
1261        let mut n2 = HybridLinearAttentionNeuron::new(8);
1262        for i in 0..50 {
1263            let a = n1.step_qkv(i as f64 * 0.1, 0.3, 0.7);
1264            let b = n2.step_qkv(i as f64 * 0.1, 0.3, 0.7);
1265            assert_eq!(a, b, "Must be deterministic");
1266        }
1267    }
1268
1269    #[test]
1270    fn hybrid_linear_attention_reset() {
1271        let mut n = HybridLinearAttentionNeuron::new(8);
1272        for _ in 0..50 {
1273            n.step_qkv(1.0, 1.0, 1.0);
1274        }
1275        n.reset();
1276        assert_eq!(n.v, 0.0);
1277        assert!(n.state_kv.iter().all(|&x| x == 0.0));
1278    }
1279
1280    #[test]
1281    fn quantum_lif_fires_stochastically() {
1282        let mut n = QuantumInspiredLIFNeuron::new();
1283        let mut spikes = 0;
1284        for _ in 0..10_000 {
1285            spikes += n.step(1.5);
1286        }
1287        assert!(spikes > 0, "Must fire with strong input");
1288        assert!(spikes < 10_000, "Must not fire every step (stochastic)");
1289    }
1290
1291    #[test]
1292    fn quantum_lif_interference() {
1293        // Destructive interference: opposing real + imaginary should reduce firing.
1294        let mut n_constructive = QuantumInspiredLIFNeuron::new();
1295        let mut n_destructive = QuantumInspiredLIFNeuron::new();
1296        n_destructive.rng_state = n_constructive.rng_state;
1297
1298        let mut spikes_c = 0;
1299        let mut spikes_d = 0;
1300        for _ in 0..5000 {
1301            spikes_c += n_constructive.step_complex(1.0, 1.0);
1302            // Same magnitude but opposing — should have similar |z|².
1303            spikes_d += n_destructive.step_complex(1.0, -1.0);
1304        }
1305        // Both should fire (|z|² = 2 in both cases for steady state).
1306        assert!(spikes_c > 0, "Constructive must fire");
1307        assert!(spikes_d > 0, "Destructive must fire");
1308    }
1309
1310    #[test]
1311    fn quantum_lif_zero_input_no_fire() {
1312        let mut n = QuantumInspiredLIFNeuron::new();
1313        let spikes: i32 = (0..1000).map(|_| n.step(0.0)).sum();
1314        assert_eq!(spikes, 0, "Zero input must not fire");
1315    }
1316
1317    #[test]
1318    fn quantum_lif_probability_range() {
1319        let mut n = QuantumInspiredLIFNeuron::new();
1320        for _ in 0..100 {
1321            n.step(0.5);
1322            let p = n.firing_probability();
1323            assert!((0.0..=1.0).contains(&p), "P must be in [0,1], got {p}");
1324        }
1325    }
1326}