Skip to main content

sc_neurocore_engine/neurons/
ai_optimized.rs

1// SPDX-License-Identifier: AGPL-3.0-or-later | Commercial license available
2// © Concepts 1996–2026 Miroslav Šotek. All rights reserved.
3// © Code 2020–2026 Miroslav Šotek. All rights reserved.
4// ORCID: 0009-0009-3560-0851
5// Contact: www.anulum.li | protoscience@anulum.li
6// SC-NeuroCore — AI-optimized spiking neuron models (original designs)
7
8//! Eight novel neuron models designed for AI workloads, not biological simulation.
9
10/// Three-compartment memory neuron (fast/medium/slow timescales).
11/// Slow compartment accumulates context, modulating excitability.
12#[derive(Clone, Debug)]
13pub struct MultiTimescaleNeuron {
14    pub v_fast: f64,
15    pub v_medium: f64,
16    pub v_slow: f64,
17    pub tau_fast: f64,
18    pub tau_medium: f64,
19    pub tau_slow: f64,
20    pub alpha: f64,
21    pub beta: f64,
22    pub gamma: f64,
23    pub theta_base: f64,
24    pub dt: f64,
25}
26
27impl MultiTimescaleNeuron {
28    pub fn new() -> Self {
29        Self {
30            v_fast: 0.0,
31            v_medium: 0.0,
32            v_slow: 0.0,
33            tau_fast: 5.0,
34            tau_medium: 200.0,
35            tau_slow: 10000.0,
36            alpha: 10.0,
37            beta: 0.05,
38            gamma: 0.3,
39            theta_base: 1.0,
40            dt: 1.0,
41        }
42    }
43
44    pub fn step(&mut self, current: f64) -> i32 {
45        self.v_fast += (-self.v_fast + current) / self.tau_fast * self.dt;
46        let theta_eff = self.theta_base - self.gamma * self.v_slow;
47        let fired = if self.v_fast >= theta_eff { 1 } else { 0 };
48        self.v_medium += (-self.v_medium + self.alpha * fired as f64) / self.tau_medium * self.dt;
49        self.v_slow += (-self.v_slow + self.beta * self.v_medium) / self.tau_slow * self.dt;
50        if fired == 1 {
51            self.v_fast = 0.0;
52        }
53        fired
54    }
55
56    pub fn reset(&mut self) {
57        self.v_fast = 0.0;
58        self.v_medium = 0.0;
59        self.v_slow = 0.0;
60    }
61}
62
63impl Default for MultiTimescaleNeuron {
64    fn default() -> Self {
65        Self::new()
66    }
67}
68
69/// Spiking neuron with learned sigmoid attention gate.
70/// gate = sigmoid(w_key * I + w_query * v), modulates input before integration.
71#[derive(Clone, Debug)]
72pub struct AttentionGatedNeuron {
73    pub v: f64,
74    pub w_key: f64,
75    pub w_query: f64,
76    pub tau: f64,
77    pub theta: f64,
78    pub dt: f64,
79}
80
81impl AttentionGatedNeuron {
82    pub fn new() -> Self {
83        Self {
84            v: 0.0,
85            w_key: 1.0,
86            w_query: 0.5,
87            tau: 10.0,
88            theta: 1.0,
89            dt: 1.0,
90        }
91    }
92
93    pub fn step(&mut self, current: f64) -> i32 {
94        let gate = 1.0 / (1.0 + (-(self.w_key * current + self.w_query * self.v)).exp());
95        self.v += (-self.v + gate * current) / self.tau * self.dt;
96        if self.v >= self.theta {
97            self.v = 0.0;
98            1
99        } else {
100            0
101        }
102    }
103
104    pub fn reset(&mut self) {
105        self.v = 0.0;
106    }
107}
108
109impl Default for AttentionGatedNeuron {
110    fn default() -> Self {
111        Self::new()
112    }
113}
114
115/// Fires only on prediction errors. Silent when input matches prediction.
116#[derive(Clone, Debug)]
117pub struct PredictiveCodingNeuron {
118    pub v: f64,
119    pub pred: f64,
120    pub tau: f64,
121    pub tau_pred: f64,
122    pub theta: f64,
123    pub dt: f64,
124}
125
126impl PredictiveCodingNeuron {
127    pub fn new() -> Self {
128        Self {
129            v: 0.0,
130            pred: 0.0,
131            tau: 10.0,
132            tau_pred: 50.0,
133            theta: 1.0,
134            dt: 1.0,
135        }
136    }
137
138    pub fn step(&mut self, current: f64) -> i32 {
139        let surprise = (current - self.pred).abs();
140        self.pred += (current - self.pred) / self.tau_pred * self.dt;
141        self.v += (-self.v + surprise) / self.tau * self.dt;
142        if self.v >= self.theta {
143            self.v = 0.0;
144            1
145        } else {
146            0
147        }
148    }
149
150    pub fn reset(&mut self) {
151        self.v = 0.0;
152        self.pred = 0.0;
153    }
154}
155
156impl Default for PredictiveCodingNeuron {
157    fn default() -> Self {
158        Self::new()
159    }
160}
161
162/// Introspects on its own spike history; adjusts tau based on firing rate.
163#[derive(Clone, Debug)]
164pub struct SelfReferentialNeuron {
165    pub v: f64,
166    pub tau: f64,
167    pub theta: f64,
168    pub target_rate: f64,
169    pub dt: f64,
170    history: Vec<u8>,
171    head: usize,
172    window: usize,
173}
174
175impl SelfReferentialNeuron {
176    pub fn new() -> Self {
177        let window = 50;
178        Self {
179            v: 0.0,
180            tau: 10.0,
181            theta: 1.0,
182            target_rate: 0.1,
183            dt: 1.0,
184            history: vec![0; window],
185            head: 0,
186            window,
187        }
188    }
189
190    pub fn step(&mut self, current: f64) -> i32 {
191        let n_spikes: u32 = self.history.iter().map(|&x| x as u32).sum();
192        let rate = n_spikes as f64 / self.window as f64;
193        let tau_eff = self.tau * (1.0 + rate / self.target_rate);
194        self.v += (-self.v + current) / tau_eff * self.dt;
195        let fired = if self.v >= self.theta {
196            self.v = 0.0;
197            1
198        } else {
199            0
200        };
201        self.history[self.head] = fired as u8;
202        self.head = (self.head + 1) % self.window;
203        fired
204    }
205
206    pub fn reset(&mut self) {
207        self.v = 0.0;
208        self.history.fill(0);
209        self.head = 0;
210    }
211}
212
213impl Default for SelfReferentialNeuron {
214    fn default() -> Self {
215        Self::new()
216    }
217}
218
219/// Phase-coding neuron for compositional variable binding.
220/// Spike when amplitude * cos(phase) > threshold.
221#[derive(Clone, Debug)]
222pub struct CompositionalBindingNeuron {
223    pub phi: f64,
224    pub amplitude: f64,
225    pub omega: f64,
226    pub coupling: f64,
227    pub tau: f64,
228    pub theta: f64,
229    pub dt: f64,
230}
231
232impl CompositionalBindingNeuron {
233    pub fn new() -> Self {
234        Self {
235            phi: 0.0,
236            amplitude: 0.0,
237            omega: 0.1,
238            coupling: 0.5,
239            tau: 10.0,
240            theta: 0.8,
241            dt: 1.0,
242        }
243    }
244
245    pub fn step(&mut self, current: f64) -> i32 {
246        self.phi += self.omega * self.dt;
247        self.amplitude += (-self.amplitude + current) / self.tau * self.dt;
248        if self.amplitude * self.phi.cos() > self.theta {
249            1
250        } else {
251            0
252        }
253    }
254
255    pub fn reset(&mut self) {
256        self.phi = 0.0;
257        self.amplitude = 0.0;
258    }
259}
260
261impl Default for CompositionalBindingNeuron {
262    fn default() -> Self {
263        Self::new()
264    }
265}
266
267/// Spiking neuron with learnable surrogate gradient parameters.
268/// alpha (decay), beta (steepness), theta (threshold) all trainable.
269#[derive(Clone, Debug)]
270pub struct DifferentiableSurrogateNeuron {
271    pub v: f64,
272    pub alpha: f64,
273    pub beta: f64,
274    pub theta: f64,
275}
276
277impl DifferentiableSurrogateNeuron {
278    pub fn new() -> Self {
279        Self {
280            v: 0.0,
281            alpha: 0.9,
282            beta: 5.0,
283            theta: 1.0,
284        }
285    }
286
287    pub fn step(&mut self, current: f64) -> i32 {
288        let spike = if self.v >= self.theta { 1 } else { 0 };
289        self.v = self.alpha * self.v * (1.0 - spike as f64) + current;
290        spike
291    }
292
293    pub fn surrogate_grad(&self) -> f64 {
294        let d = (self.v - self.theta).abs();
295        1.0 / ((1.0 + self.beta * d) * (1.0 + self.beta * d))
296    }
297
298    pub fn reset(&mut self) {
299        self.v = 0.0;
300    }
301}
302
303impl Default for DifferentiableSurrogateNeuron {
304    fn default() -> Self {
305        Self::new()
306    }
307}
308
309/// Ring attractor for continuous working memory.
310/// Mexican hat connectivity; holds a continuous value in persistent activity.
311#[derive(Clone, Debug)]
312pub struct ContinuousAttractorNeuron {
313    pub u: Vec<f64>,
314    pub tau: f64,
315    pub dt: f64,
316    weights: Vec<Vec<f64>>,
317    n_units: usize,
318}
319
320impl ContinuousAttractorNeuron {
321    pub fn new(n_units: usize) -> Self {
322        let sigma_e: f64 = 1.0;
323        let excitation: f64 = 4.0;
324        let inhibition: f64 = 0.5;
325        let mut weights = vec![vec![0.0; n_units]; n_units];
326        for i in 0..n_units {
327            for j in 0..n_units {
328                let d = (i as f64 - j as f64)
329                    .abs()
330                    .min((n_units as f64) - (i as f64 - j as f64).abs());
331                weights[i][j] =
332                    excitation * (-d * d / (2.0 * sigma_e * sigma_e)).exp() - inhibition;
333            }
334        }
335        Self {
336            u: vec![0.0; n_units],
337            tau: 10.0,
338            dt: 1.0,
339            weights,
340            n_units,
341        }
342    }
343
344    fn activation(x: f64) -> f64 {
345        let r = x.max(0.0);
346        r * r / (1.0 + r * r)
347    }
348
349    pub fn step(&mut self, current: f64) -> i32 {
350        let mut new_u = vec![0.0; self.n_units];
351        for i in 0..self.n_units {
352            let mut recurrent = 0.0;
353            for j in 0..self.n_units {
354                recurrent += self.weights[i][j] * Self::activation(self.u[j]);
355            }
356            new_u[i] = self.u[i] + (-self.u[i] + recurrent + current) / self.tau * self.dt;
357        }
358        self.u = new_u;
359        let peak = self.u.iter().cloned().fold(f64::NEG_INFINITY, f64::max);
360        if peak > 1.0 {
361            1
362        } else {
363            0
364        }
365    }
366
367    pub fn bump_position(&self) -> usize {
368        self.u
369            .iter()
370            .enumerate()
371            .max_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap_or(std::cmp::Ordering::Equal))
372            .map(|(i, _)| i)
373            .unwrap_or(0)
374    }
375
376    pub fn reset(&mut self) {
377        self.u.fill(0.0);
378    }
379}
380
381impl Default for ContinuousAttractorNeuron {
382    fn default() -> Self {
383        Self::new(16)
384    }
385}
386
387/// Neuron with self-regulating meta-learning rate.
388/// error_trace adapts learning speed: high error → learn faster, low error → stabilize.
389#[derive(Clone, Debug)]
390pub struct MetaPlasticNeuron {
391    pub v: f64,
392    pub error_trace: f64,
393    pub expected_reward: f64,
394    pub tau: f64,
395    pub tau_meta: f64,
396    pub theta: f64,
397    pub lr0: f64,
398    pub kappa: f64,
399    pub target_error: f64,
400    pub dt: f64,
401}
402
403impl MetaPlasticNeuron {
404    pub fn new() -> Self {
405        Self {
406            v: 0.0,
407            error_trace: 0.0,
408            expected_reward: 0.0,
409            tau: 10.0,
410            tau_meta: 500.0,
411            theta: 1.0,
412            lr0: 0.01,
413            kappa: 5.0,
414            target_error: 0.3,
415            dt: 1.0,
416        }
417    }
418
419    pub fn step(&mut self, current: f64) -> i32 {
420        self.v += (-self.v + current) / self.tau * self.dt;
421        if self.v >= self.theta {
422            self.v = 0.0;
423            1
424        } else {
425            0
426        }
427    }
428
429    pub fn update_meta(&mut self, reward: f64) {
430        let error = (reward - self.expected_reward).abs();
431        self.error_trace += (-self.error_trace + error) / self.tau_meta * self.dt;
432        let meta_lr = self.meta_lr();
433        self.expected_reward += meta_lr * (reward - self.expected_reward);
434    }
435
436    pub fn meta_lr(&self) -> f64 {
437        self.lr0 / (1.0 + (-self.kappa * (self.error_trace - self.target_error)).exp())
438    }
439
440    pub fn reset(&mut self) {
441        self.v = 0.0;
442        self.error_trace = 0.0;
443        self.expected_reward = 0.0;
444    }
445}
446
447impl Default for MetaPlasticNeuron {
448    fn default() -> Self {
449        Self::new()
450    }
451}
452
453#[cfg(test)]
454mod tests {
455    use super::*;
456
457    #[test]
458    fn multi_timescale_fires() {
459        let mut n = MultiTimescaleNeuron::new();
460        let total: i32 = (0..200).map(|_| n.step(2.0)).sum();
461        assert!(total > 0);
462    }
463
464    #[test]
465    fn multi_timescale_slow_accumulates() {
466        let mut n = MultiTimescaleNeuron::new();
467        for _ in 0..500 {
468            n.step(2.0);
469        }
470        assert!(n.v_slow > 0.0);
471    }
472
473    #[test]
474    fn multi_timescale_reset() {
475        let mut n = MultiTimescaleNeuron::new();
476        for _ in 0..100 {
477            n.step(2.0);
478        }
479        n.reset();
480        assert_eq!(n.v_fast, 0.0);
481        assert_eq!(n.v_medium, 0.0);
482        assert_eq!(n.v_slow, 0.0);
483    }
484
485    #[test]
486    fn attention_gated_fires() {
487        let mut n = AttentionGatedNeuron::new();
488        let total: i32 = (0..200).map(|_| n.step(2.0)).sum();
489        assert!(total > 0);
490    }
491
492    #[test]
493    fn attention_gated_gate_suppresses_low_input() {
494        let mut n = AttentionGatedNeuron {
495            w_key: -2.0,
496            ..AttentionGatedNeuron::new()
497        };
498        let total: i32 = (0..200).map(|_| n.step(0.1)).sum();
499        assert_eq!(total, 0);
500    }
501
502    #[test]
503    fn attention_gated_reset() {
504        let mut n = AttentionGatedNeuron::new();
505        for _ in 0..50 {
506            n.step(2.0);
507        }
508        n.reset();
509        assert_eq!(n.v, 0.0);
510    }
511
512    #[test]
513    fn predictive_coding_fires_on_change() {
514        let mut n = PredictiveCodingNeuron::new();
515        for _ in 0..200 {
516            n.step(1.0);
517        }
518        let spikes_after_change: i32 = (0..50).map(|_| n.step(10.0)).sum();
519        assert!(spikes_after_change > 0);
520    }
521
522    #[test]
523    fn predictive_coding_silent_on_constant() {
524        let mut n = PredictiveCodingNeuron::new();
525        for _ in 0..500 {
526            n.step(0.5);
527        }
528        let late: i32 = (0..100).map(|_| n.step(0.5)).sum();
529        assert_eq!(late, 0);
530    }
531
532    #[test]
533    fn predictive_coding_reset() {
534        let mut n = PredictiveCodingNeuron::new();
535        for _ in 0..50 {
536            n.step(5.0);
537        }
538        n.reset();
539        assert_eq!(n.v, 0.0);
540        assert_eq!(n.pred, 0.0);
541    }
542
543    #[test]
544    fn self_referential_fires() {
545        let mut n = SelfReferentialNeuron::new();
546        let total: i32 = (0..200).map(|_| n.step(2.0)).sum();
547        assert!(total > 0);
548    }
549
550    #[test]
551    fn self_referential_adapts_tau() {
552        let mut n = SelfReferentialNeuron::new();
553        for _ in 0..100 {
554            n.step(2.0);
555        }
556        let n_spikes: u32 = n.history.iter().map(|&x| x as u32).sum();
557        assert!(n_spikes > 0);
558    }
559
560    #[test]
561    fn self_referential_reset() {
562        let mut n = SelfReferentialNeuron::new();
563        for _ in 0..100 {
564            n.step(2.0);
565        }
566        n.reset();
567        assert_eq!(n.v, 0.0);
568        assert!(n.history.iter().all(|&x| x == 0));
569    }
570
571    #[test]
572    fn compositional_binding_fires() {
573        let mut n = CompositionalBindingNeuron::new();
574        let total: i32 = (0..200).map(|_| n.step(2.0)).sum();
575        assert!(total > 0);
576    }
577
578    #[test]
579    fn compositional_binding_phase_advances() {
580        let mut n = CompositionalBindingNeuron::new();
581        for _ in 0..100 {
582            n.step(1.0);
583        }
584        assert!(n.phi > 0.0);
585    }
586
587    #[test]
588    fn compositional_binding_reset() {
589        let mut n = CompositionalBindingNeuron::new();
590        for _ in 0..100 {
591            n.step(2.0);
592        }
593        n.reset();
594        assert_eq!(n.phi, 0.0);
595        assert_eq!(n.amplitude, 0.0);
596    }
597
598    #[test]
599    fn differentiable_surrogate_fires() {
600        let mut n = DifferentiableSurrogateNeuron::new();
601        let total: i32 = (0..20).map(|_| n.step(1.5)).sum();
602        assert!(total > 0);
603    }
604
605    #[test]
606    fn differentiable_surrogate_grad_positive() {
607        let n = DifferentiableSurrogateNeuron::new();
608        assert!(n.surrogate_grad() > 0.0);
609    }
610
611    #[test]
612    fn differentiable_surrogate_reset() {
613        let mut n = DifferentiableSurrogateNeuron::new();
614        for _ in 0..10 {
615            n.step(1.5);
616        }
617        n.reset();
618        assert_eq!(n.v, 0.0);
619    }
620
621    #[test]
622    fn continuous_attractor_fires() {
623        let mut n = ContinuousAttractorNeuron::new(16);
624        let total: i32 = (0..200).map(|_| n.step(2.0)).sum();
625        assert!(total > 0);
626    }
627
628    #[test]
629    fn continuous_attractor_bump_forms() {
630        let mut n = ContinuousAttractorNeuron::new(16);
631        for _ in 0..200 {
632            n.step(2.0);
633        }
634        let peak = n.u.iter().cloned().fold(f64::NEG_INFINITY, f64::max);
635        assert!(peak > 0.0);
636    }
637
638    #[test]
639    fn continuous_attractor_reset() {
640        let mut n = ContinuousAttractorNeuron::new(16);
641        for _ in 0..100 {
642            n.step(2.0);
643        }
644        n.reset();
645        assert!(n.u.iter().all(|&x| x == 0.0));
646    }
647
648    #[test]
649    fn meta_plastic_fires() {
650        let mut n = MetaPlasticNeuron::new();
651        let total: i32 = (0..200).map(|_| n.step(2.0)).sum();
652        assert!(total > 0);
653    }
654
655    #[test]
656    fn meta_plastic_adapts_lr() {
657        let mut n = MetaPlasticNeuron::new();
658        let lr_before = n.meta_lr();
659        for _ in 0..100 {
660            n.step(2.0);
661            n.update_meta(1.0);
662        }
663        let lr_after = n.meta_lr();
664        assert!((lr_after - lr_before).abs() > 1e-6);
665    }
666
667    #[test]
668    fn meta_plastic_reset() {
669        let mut n = MetaPlasticNeuron::new();
670        for _ in 0..100 {
671            n.step(2.0);
672            n.update_meta(1.0);
673        }
674        n.reset();
675        assert_eq!(n.v, 0.0);
676        assert_eq!(n.error_trace, 0.0);
677        assert_eq!(n.expected_reward, 0.0);
678    }
679
680    // ── ArcaneNeuron tests ──────────────────────────────────────
681
682    #[test]
683    fn arcane_fires() {
684        let mut n = ArcaneNeuron::new();
685        let t: i32 = (0..500).map(|_| n.step(2.0)).sum();
686        assert!(t > 0);
687    }
688
689    #[test]
690    fn arcane_deep_accumulates() {
691        let mut n = ArcaneNeuron::new();
692        for _ in 0..1000 {
693            n.step(3.0);
694        }
695        assert!(n.v_deep.abs() > 1e-10, "deep state must accumulate");
696    }
697
698    #[test]
699    fn arcane_deep_survives_reset() {
700        let mut n = ArcaneNeuron::new();
701        for _ in 0..500 {
702            n.step(3.0);
703        }
704        let deep_before = n.v_deep;
705        n.reset();
706        assert_eq!(n.v_fast, 0.0);
707        assert_eq!(n.v_work, 0.0);
708        assert!(
709            (n.v_deep - deep_before).abs() < 1e-15,
710            "deep must survive reset"
711        );
712    }
713
714    #[test]
715    fn arcane_novelty_increases_deep_change() {
716        let mut n = ArcaneNeuron::new();
717        // Constant input
718        for _ in 0..200 {
719            n.step(2.0);
720        }
721        let deep_after_constant = n.v_deep;
722        // Novel input
723        for _ in 0..200 {
724            n.step(8.0);
725        }
726        let deep_after_novel = n.v_deep;
727        let delta = (deep_after_novel - deep_after_constant).abs();
728        assert!(delta > 0.0, "novel input must change deep state");
729    }
730}
731
732/// ArcaneNeuron — unified self-referential cognition model.
733///
734/// 3-compartment (fast/working/deep) with attention gate, predictive
735/// self-model, and meta-plastic learning rate. Deep compartment
736/// accumulates identity and survives reset.
737///
738/// Original design: Šotek & Arcane Sapience 2026.
739#[derive(Clone, Debug)]
740pub struct ArcaneNeuron {
741    pub v_fast: f64,
742    pub v_work: f64,
743    pub v_deep: f64,
744    pub tau_fast: f64,
745    pub tau_work: f64,
746    pub tau_deep: f64,
747    pub alpha_w: f64,
748    pub alpha_d: f64,
749    pub theta: f64,
750    pub gamma: f64,
751    pub delta_conf: f64,
752    pub w_gate: [f64; 4],
753    pub w_pred: [f64; 3],
754    pub kappa: f64,
755    pub surprise_baseline: f64,
756    pub lr_base: f64,
757    pub eta: f64,
758    pub w_inh: f64,
759    pub dt: f64,
760    prediction: f64,
761    surprise: f64,
762    novelty: f64,
763    confidence: f64,
764    spike_history: Vec<u8>,
765    novelty_history: Vec<f64>,
766    hist_idx: usize,
767    nov_idx: usize,
768    total_steps: usize,
769}
770
771impl ArcaneNeuron {
772    pub fn new() -> Self {
773        Self {
774            v_fast: 0.0,
775            v_work: 0.0,
776            v_deep: 0.0,
777            tau_fast: 5.0,
778            tau_work: 200.0,
779            tau_deep: 10000.0,
780            alpha_w: 0.3,
781            alpha_d: 0.05,
782            theta: 1.0,
783            gamma: 0.2,
784            delta_conf: 0.3,
785            w_gate: [0.8, 0.1, 0.05, 0.05],
786            w_pred: [0.6, 0.3, 0.1],
787            kappa: 5.0,
788            surprise_baseline: 0.1,
789            lr_base: 0.01,
790            eta: 2.0,
791            w_inh: 0.3,
792            dt: 1.0,
793            prediction: 0.0,
794            surprise: 0.0,
795            novelty: 0.0,
796            confidence: 0.5,
797            spike_history: vec![0; 50],
798            novelty_history: vec![0.5; 20],
799            hist_idx: 0,
800            nov_idx: 0,
801            total_steps: 0,
802        }
803    }
804
805    pub fn step(&mut self, current: f64) -> i32 {
806        let sh_len = self.spike_history.len() as f64;
807        let nh_len = self.novelty_history.len() as f64;
808        let spike_rate: f64 = self.spike_history.iter().map(|&s| s as f64).sum::<f64>() / sh_len;
809        self.confidence = 1.0 - self.novelty_history.iter().sum::<f64>() / nh_len;
810
811        let gate_in = self.w_gate[0] * current
812            + self.w_gate[1] * self.v_fast
813            + self.w_gate[2] * self.v_work
814            + self.w_gate[3] * self.confidence;
815        let gate = 1.0 / (1.0 + (-gate_in).exp());
816        let i_eff = gate * current;
817
818        self.v_fast += (-self.v_fast + i_eff - self.w_inh * spike_rate) / self.tau_fast * self.dt;
819
820        self.prediction = self.w_pred[0] * self.v_fast
821            + self.w_pred[1] * self.v_work
822            + self.w_pred[2] * self.v_deep;
823        self.surprise = (self.v_fast - self.prediction).abs();
824        self.novelty = 1.0 / (1.0 + (-self.kappa * (self.surprise - self.surprise_baseline)).exp());
825
826        let nh_sz = self.novelty_history.len();
827        self.novelty_history[self.nov_idx % nh_sz] = self.novelty;
828        self.nov_idx += 1;
829
830        let eff_threshold = (self.theta
831            * (1.0 + self.gamma * self.v_deep)
832            * (1.0 - self.delta_conf * self.confidence))
833            .max(0.1);
834
835        let spike = if self.v_fast >= eff_threshold { 1 } else { 0 };
836
837        if spike == 1 {
838            self.v_work += self.alpha_w * self.v_fast / self.tau_work * self.dt;
839            self.v_fast = 0.0;
840        }
841
842        self.v_work += -self.v_work / self.tau_work * self.dt;
843        self.v_deep +=
844            (-self.v_deep + self.alpha_d * self.v_work * self.novelty) / self.tau_deep * self.dt;
845
846        let meta_lr = self.lr_base * (1.0 + self.eta * self.novelty);
847        let error = self.v_fast - self.prediction;
848        self.w_pred[0] += meta_lr * error * self.v_fast;
849        self.w_pred[1] += meta_lr * error * self.v_work;
850        self.w_pred[2] += meta_lr * error * self.v_deep;
851        let norm =
852            (self.w_pred[0].powi(2) + self.w_pred[1].powi(2) + self.w_pred[2].powi(2)).sqrt();
853        if norm > 0.0 {
854            for w in &mut self.w_pred {
855                *w /= norm;
856            }
857        }
858
859        let sh_sz = self.spike_history.len();
860        self.spike_history[self.hist_idx % sh_sz] = spike as u8;
861        self.hist_idx += 1;
862        self.total_steps += 1;
863
864        spike
865    }
866
867    pub fn reset(&mut self) {
868        self.v_fast = 0.0;
869        self.v_work = 0.0;
870        // v_deep does NOT reset — it IS the identity
871        self.prediction = 0.0;
872        self.surprise = 0.0;
873        self.novelty = 0.0;
874        self.spike_history.fill(0);
875        self.hist_idx = 0;
876    }
877}
878
879impl Default for ArcaneNeuron {
880    fn default() -> Self {
881        Self::new()
882    }
883}