Skip to main content

sc_neurocore_engine/
lib.rs

1// SPDX-License-Identifier: AGPL-3.0-or-later | Commercial license available
2// © Concepts 1996–2026 Miroslav Šotek. All rights reserved.
3// © Code 2020–2026 Miroslav Šotek. All rights reserved.
4// ORCID: 0009-0009-3560-0851
5// Contact: www.anulum.li | protoscience@anulum.li
6// SC-NeuroCore — Engine Crate Root
7
8#![allow(
9    clippy::useless_conversion,
10    clippy::needless_range_loop,
11    clippy::too_many_arguments,
12    deprecated
13)]
14
15use numpy::{
16    IntoPyArray, PyArray1, PyArray2, PyArrayMethods, PyReadonlyArray1, PyReadonlyArray2,
17    PyUntypedArrayMethods,
18};
19use pyo3::exceptions::PyValueError;
20use pyo3::prelude::*;
21use pyo3::types::PyDict;
22use pyo3::IntoPyObject;
23
24pub mod attention;
25pub mod bitstream;
26pub mod brunel;
27pub mod connectome;
28pub mod conv;
29pub mod cordiv;
30pub mod cortical_column;
31pub mod encoder;
32pub mod fault;
33pub mod fusion;
34pub mod grad;
35pub mod graph;
36pub mod ir;
37pub mod layer;
38pub mod network_runner;
39pub mod neuron;
40pub mod neurons;
41pub mod phi;
42pub mod predictive_coding;
43pub mod pyo3_neurons;
44pub mod rall_dendrite;
45pub mod recorder;
46pub mod recurrent;
47pub mod scpn;
48pub mod simd;
49pub mod sobol;
50pub mod synapses;
51
52// ── HDC / VSA PyO3 wrapper ───────────────────────────────────────────
53
54#[pyclass(
55    name = "BitStreamTensor",
56    module = "sc_neurocore_engine.sc_neurocore_engine"
57)]
58pub struct PyBitStreamTensor {
59    inner: bitstream::BitStreamTensor,
60}
61
62#[pymethods]
63impl PyBitStreamTensor {
64    /// Create a random binary vector of `dimension` bits.
65    #[new]
66    #[pyo3(signature = (dimension=10000, seed=0xACE1))]
67    fn new(dimension: usize, seed: u64) -> Self {
68        use rand::SeedableRng;
69        let mut rng = rand_xoshiro::Xoshiro256PlusPlus::seed_from_u64(seed);
70        let data = bitstream::bernoulli_packed(0.5, dimension, &mut rng);
71        Self {
72            inner: bitstream::BitStreamTensor::from_words(data, dimension),
73        }
74    }
75
76    /// Create from pre-packed u64 words.
77    #[staticmethod]
78    fn from_packed(data: Vec<u64>, length: usize) -> PyResult<Self> {
79        if length == 0 {
80            return Err(pyo3::exceptions::PyValueError::new_err(
81                "bitstream length must be > 0",
82            ));
83        }
84        Ok(Self {
85            inner: bitstream::BitStreamTensor::from_words(data, length),
86        })
87    }
88
89    /// In-place XOR (HDC bind).
90    fn xor_inplace(&mut self, other: &PyBitStreamTensor) {
91        self.inner.xor_inplace(&other.inner);
92    }
93
94    /// XOR returning a new tensor (HDC bind).
95    fn xor(&self, other: &PyBitStreamTensor) -> PyBitStreamTensor {
96        PyBitStreamTensor {
97            inner: self.inner.xor(&other.inner),
98        }
99    }
100
101    /// Cyclic right rotation by `shift` bits (HDC permute).
102    fn rotate_right(&mut self, shift: usize) {
103        self.inner.rotate_right(shift);
104    }
105
106    /// Normalized Hamming distance (0.0 = identical, 1.0 = opposite).
107    fn hamming_distance(&self, other: &PyBitStreamTensor) -> f32 {
108        self.inner.hamming_distance(&other.inner)
109    }
110
111    /// Majority-vote bundle of multiple tensors.
112    #[staticmethod]
113    fn bundle(vectors: Vec<PyRef<'_, PyBitStreamTensor>>) -> PyBitStreamTensor {
114        let refs: Vec<&bitstream::BitStreamTensor> = vectors.iter().map(|v| &v.inner).collect();
115        PyBitStreamTensor {
116            inner: bitstream::BitStreamTensor::bundle(&refs),
117        }
118    }
119
120    /// Count of set bits.
121    fn popcount(&self) -> u64 {
122        bitstream::popcount(&self.inner)
123    }
124
125    /// Packed u64 words (read-only copy).
126    #[getter]
127    fn data(&self) -> Vec<u64> {
128        self.inner.data.clone()
129    }
130
131    /// Logical bit length.
132    #[getter]
133    fn length(&self) -> usize {
134        self.inner.length
135    }
136
137    fn __len__(&self) -> usize {
138        self.inner.length
139    }
140
141    fn __repr__(&self) -> String {
142        format!(
143            "BitStreamTensor(length={}, popcount={})",
144            self.inner.length,
145            bitstream::popcount(&self.inner)
146        )
147    }
148}
149
150#[pyclass(
151    name = "StdpSynapse",
152    module = "sc_neurocore_engine.sc_neurocore_engine"
153)]
154pub struct StdpSynapse {
155    inner: synapses::StdpSynapse,
156}
157
158#[pymethods]
159impl StdpSynapse {
160    #[new]
161    #[pyo3(signature = (initial_weight, data_width=16, fraction=8))]
162    fn new(initial_weight: i16, data_width: u32, fraction: u32) -> Self {
163        Self {
164            inner: synapses::StdpSynapse::new(initial_weight, data_width, fraction),
165        }
166    }
167
168    #[allow(clippy::too_many_arguments)]
169    #[pyo3(signature = (pre_spike, post_spike, a_plus=16, a_minus=-16, decay=250, w_min=0, w_max=32767))]
170    fn step(
171        &mut self,
172        pre_spike: bool,
173        post_spike: bool,
174        a_plus: i16,
175        a_minus: i16,
176        decay: i16,
177        w_min: i16,
178        w_max: i16,
179    ) {
180        let params = synapses::StdpParams {
181            a_plus,
182            a_minus,
183            decay,
184            w_min,
185            w_max,
186        };
187        self.inner.step(pre_spike, post_spike, &params);
188    }
189
190    #[getter]
191    fn weight(&self) -> i16 {
192        self.inner.weight
193    }
194
195    #[setter]
196    fn set_weight(&mut self, value: i16) {
197        self.inner.weight = value;
198    }
199}
200
201// ── Brunel Network PyO3 wrapper ──────────────────────────────────────
202
203#[pyclass(
204    name = "BrunelNetwork",
205    module = "sc_neurocore_engine.sc_neurocore_engine"
206)]
207pub struct PyBrunelNetwork {
208    inner: brunel::BrunelNetwork,
209}
210
211#[pymethods]
212impl PyBrunelNetwork {
213    #[new]
214    #[pyo3(signature = (
215        n_neurons,
216        w_indptr,
217        w_indices,
218        w_data,
219        leak_k,
220        gain_k,
221        ext_lambda,
222        ext_weight_fp,
223        data_width=16,
224        fraction=8,
225        v_rest=0,
226        v_reset=0,
227        v_threshold=256,
228        refractory_period=2,
229        seed=42
230    ))]
231    #[allow(clippy::too_many_arguments)]
232    fn new(
233        n_neurons: usize,
234        w_indptr: PyReadonlyArray1<'_, i64>,
235        w_indices: PyReadonlyArray1<'_, i64>,
236        w_data: PyReadonlyArray1<'_, i16>,
237        leak_k: i16,
238        gain_k: i16,
239        ext_lambda: f64,
240        ext_weight_fp: i16,
241        data_width: u32,
242        fraction: u32,
243        v_rest: i16,
244        v_reset: i16,
245        v_threshold: i16,
246        refractory_period: i32,
247        seed: u64,
248    ) -> PyResult<Self> {
249        let indptr = w_indptr
250            .as_slice()
251            .map_err(|e| PyValueError::new_err(format!("Cannot read w_indptr: {e}")))?;
252        let indices = w_indices
253            .as_slice()
254            .map_err(|e| PyValueError::new_err(format!("Cannot read w_indices: {e}")))?;
255        let data = w_data
256            .as_slice()
257            .map_err(|e| PyValueError::new_err(format!("Cannot read w_data: {e}")))?;
258
259        let row_offsets: Vec<usize> = indptr.iter().map(|&v| v as usize).collect();
260        let col_indices: Vec<usize> = indices.iter().map(|&v| v as usize).collect();
261        let values: Vec<i16> = data.to_vec();
262
263        let inner = brunel::BrunelNetwork::new(
264            n_neurons,
265            row_offsets,
266            col_indices,
267            values,
268            data_width,
269            fraction,
270            v_rest,
271            v_reset,
272            v_threshold,
273            refractory_period,
274            leak_k,
275            gain_k,
276            ext_lambda,
277            ext_weight_fp,
278            seed,
279        )
280        .map_err(PyValueError::new_err)?;
281
282        Ok(Self { inner })
283    }
284
285    fn run<'py>(&mut self, py: Python<'py>, n_steps: usize) -> Bound<'py, PyArray1<u32>> {
286        let counts = self.inner.run(n_steps);
287        counts.into_pyarray(py)
288    }
289}
290
291// ── NetworkRunner PyO3 wrapper ────────────────────────────────────────
292
293#[pyclass(
294    name = "NetworkRunner",
295    module = "sc_neurocore_engine.sc_neurocore_engine"
296)]
297pub struct PyNetworkRunner {
298    inner: network_runner::NetworkRunner,
299}
300
301#[pymethods]
302impl PyNetworkRunner {
303    #[new]
304    fn new() -> Self {
305        Self {
306            inner: network_runner::NetworkRunner::new(),
307        }
308    }
309
310    fn add_population(&mut self, model: &str, n: usize) -> PyResult<usize> {
311        let pop = network_runner::create_population(model, n).map_err(PyValueError::new_err)?;
312        Ok(self.inner.add_population(pop))
313    }
314
315    #[pyo3(signature = (src, tgt, row_offsets, col_indices, values, delay=0))]
316    fn add_projection(
317        &mut self,
318        src: usize,
319        tgt: usize,
320        row_offsets: Vec<i64>,
321        col_indices: Vec<i64>,
322        values: Vec<f64>,
323        delay: usize,
324    ) {
325        let ro: Vec<usize> = row_offsets.iter().map(|&x| x as usize).collect();
326        let ci: Vec<usize> = col_indices.iter().map(|&x| x as usize).collect();
327        let proj = network_runner::ProjectionRunner::new(src, tgt, ro, ci, values, delay);
328        self.inner.add_projection(proj);
329    }
330
331    fn run<'py>(&mut self, py: Python<'py>, n_steps: usize) -> PyResult<Py<PyAny>> {
332        let results = self.inner.run(n_steps);
333        let dict = PyDict::new(py);
334        let spike_counts: Vec<u64> = results.spike_counts.iter().map(|&c| c as u64).collect();
335        dict.set_item("spike_counts", spike_counts.into_pyarray(py))?;
336        let spike_data: Vec<Py<PyArray1<u64>>> = results
337            .spike_data
338            .into_iter()
339            .map(|v: Vec<u64>| v.into_pyarray(py).unbind())
340            .collect();
341        dict.set_item("spike_data", spike_data)?;
342        let voltages: Vec<Py<PyArray1<f64>>> = results
343            .voltages
344            .into_iter()
345            .map(|v: Vec<f64>| v.into_pyarray(py).unbind())
346            .collect();
347        dict.set_item("voltages", voltages)?;
348        Ok(dict.into_any().unbind())
349    }
350
351    #[staticmethod]
352    fn supported_models() -> Vec<&'static str> {
353        network_runner::supported_models()
354    }
355}
356
357/// SC-NeuroCore ─ High-Performance Rust Engine
358
359#[pymodule]
360fn sc_neurocore_engine(m: &Bound<'_, PyModule>) -> PyResult<()> {
361    m.add("__version__", env!("CARGO_PKG_VERSION"))?;
362    m.add_function(wrap_pyfunction!(simd_tier, m)?)?;
363    m.add_function(wrap_pyfunction!(set_num_threads, m)?)?;
364    m.add_function(wrap_pyfunction!(pack_bitstream, m)?)?;
365    m.add_function(wrap_pyfunction!(unpack_bitstream, m)?)?;
366    m.add_function(wrap_pyfunction!(popcount, m)?)?;
367    m.add_function(wrap_pyfunction!(pack_bitstream_numpy, m)?)?;
368    m.add_function(wrap_pyfunction!(popcount_numpy, m)?)?;
369    m.add_function(wrap_pyfunction!(unpack_bitstream_numpy, m)?)?;
370    m.add_function(wrap_pyfunction!(batch_lif_run, m)?)?;
371    m.add_function(wrap_pyfunction!(batch_lif_run_multi, m)?)?;
372    m.add_function(wrap_pyfunction!(batch_lif_run_varying, m)?)?;
373    m.add_function(wrap_pyfunction!(batch_encode, m)?)?;
374    m.add_function(wrap_pyfunction!(batch_encode_numpy, m)?)?;
375    m.add_class::<Lfsr16>()?;
376    m.add_class::<BitstreamEncoder>()?;
377    m.add_class::<FixedPointLif>()?;
378    m.add_class::<DenseLayer>()?;
379    m.add_class::<StdpSynapse>()?;
380    m.add_class::<PySurrogateLif>()?;
381    m.add_class::<PyDifferentiableDenseLayer>()?;
382    m.add_class::<PyStochasticAttention>()?;
383    m.add_class::<PyStochasticGraphLayer>()?;
384    m.add_class::<PyKuramotoSolver>()?;
385    m.add_class::<PySCPNMetrics>()?;
386    m.add_class::<PyBitStreamTensor>()?;
387    m.add_class::<PyBrunelNetwork>()?;
388    m.add_class::<PyIzhikevich>()?;
389    m.add_class::<PyBitstreamAverager>()?;
390    m.add_class::<PyScGraph>()?;
391    m.add_class::<PyScGraphBuilder>()?;
392    m.add_function(wrap_pyfunction!(ir_verify, m)?)?;
393    m.add_function(wrap_pyfunction!(ir_print, m)?)?;
394    m.add_function(wrap_pyfunction!(ir_parse, m)?)?;
395    m.add_function(wrap_pyfunction!(ir_emit_sv, m)?)?;
396    m.add_class::<PyAdExNeuron>()?;
397    m.add_class::<PyExpIFNeuron>()?;
398    m.add_class::<PyLapicqueNeuron>()?;
399    pyo3_neurons::register_neuron_classes(m)?;
400    m.add_class::<PyNetworkRunner>()?;
401    m.add_function(wrap_pyfunction!(py_cordiv, m)?)?;
402    m.add_function(wrap_pyfunction!(py_adaptive_length, m)?)?;
403    m.add_function(wrap_pyfunction!(py_prediction_error, m)?)?;
404    m.add_function(wrap_pyfunction!(py_predict_xor_ema, m)?)?;
405    m.add_function(wrap_pyfunction!(py_predict_xor_lfsr, m)?)?;
406    m.add_function(wrap_pyfunction!(py_recover_xor_ema, m)?)?;
407    m.add_function(wrap_pyfunction!(py_recover_xor_lfsr, m)?)?;
408    m.add_function(wrap_pyfunction!(py_phi_star, m)?)?;
409    m.add_class::<PyCorticalColumn>()?;
410    m.add_class::<PyRallDendrite>()?;
411    Ok(())
412}
413
414// ── CORDIV + adaptive length ─────────────────────────────────────────
415
416#[pyfunction]
417fn py_cordiv(
418    py: Python<'_>,
419    numerator: PyReadonlyArray1<'_, u8>,
420    denominator: PyReadonlyArray1<'_, u8>,
421) -> PyResult<Py<PyArray1<u8>>> {
422    let num = numerator.as_slice()?;
423    let den = denominator.as_slice()?;
424    let result = cordiv::cordiv(num, den);
425    Ok(result.into_pyarray(py).into())
426}
427
428#[pyfunction]
429fn py_adaptive_length(epsilon: f64, confidence: f64) -> usize {
430    cordiv::adaptive_length_hoeffding(epsilon, confidence)
431}
432
433// ── Predictive coding ────────────────────────────────────────────────
434
435#[pyfunction]
436fn py_prediction_error(
437    _py: Python<'_>,
438    predicted: PyReadonlyArray1<'_, u64>,
439    actual: PyReadonlyArray1<'_, u64>,
440    length: usize,
441) -> PyResult<f64> {
442    let pred = predicted
443        .as_slice()
444        .map_err(|e| PyValueError::new_err(format!("predicted array must be contiguous: {e}")))?;
445    let act = actual
446        .as_slice()
447        .map_err(|e| PyValueError::new_err(format!("actual array must be contiguous: {e}")))?;
448    Ok(predictive_coding::prediction_error_packed(
449        pred, act, length,
450    ))
451}
452
453// ── Spike codec predictors (EMA + LFSR) ─────────────────────────────
454
455#[pyfunction]
456fn py_predict_xor_ema(
457    _py: Python<'_>,
458    spikes: PyReadonlyArray1<'_, i8>,
459    n_channels: usize,
460    alpha: f64,
461    threshold: f64,
462) -> PyResult<(Py<PyArray1<i8>>, usize)> {
463    let data = spikes
464        .as_slice()
465        .map_err(|e| PyValueError::new_err(format!("spikes must be contiguous: {e}")))?;
466    let (errors, correct) =
467        predictive_coding::predict_and_xor_ema(data, n_channels, alpha, threshold);
468    Ok((PyArray1::from_vec(_py, errors).into(), correct))
469}
470
471#[pyfunction]
472fn py_recover_xor_ema(
473    _py: Python<'_>,
474    errors: PyReadonlyArray1<'_, i8>,
475    n_channels: usize,
476    alpha: f64,
477    threshold: f64,
478) -> PyResult<Py<PyArray1<i8>>> {
479    let data = errors
480        .as_slice()
481        .map_err(|e| PyValueError::new_err(format!("errors must be contiguous: {e}")))?;
482    let spikes = predictive_coding::xor_and_recover_ema(data, n_channels, alpha, threshold);
483    Ok(PyArray1::from_vec(_py, spikes).into())
484}
485
486#[pyfunction]
487fn py_predict_xor_lfsr(
488    _py: Python<'_>,
489    spikes: PyReadonlyArray1<'_, i8>,
490    n_channels: usize,
491    alpha_q8: i32,
492    seed: u16,
493) -> PyResult<(Py<PyArray1<i8>>, usize)> {
494    let data = spikes
495        .as_slice()
496        .map_err(|e| PyValueError::new_err(format!("spikes must be contiguous: {e}")))?;
497    let (errors, correct) =
498        predictive_coding::predict_and_xor_lfsr(data, n_channels, alpha_q8, seed);
499    Ok((PyArray1::from_vec(_py, errors).into(), correct))
500}
501
502#[pyfunction]
503fn py_recover_xor_lfsr(
504    _py: Python<'_>,
505    errors: PyReadonlyArray1<'_, i8>,
506    n_channels: usize,
507    alpha_q8: i32,
508    seed: u16,
509) -> PyResult<Py<PyArray1<i8>>> {
510    let data = errors
511        .as_slice()
512        .map_err(|e| PyValueError::new_err(format!("errors must be contiguous: {e}")))?;
513    let spikes = predictive_coding::xor_and_recover_lfsr(data, n_channels, alpha_q8, seed);
514    Ok(PyArray1::from_vec(_py, spikes).into())
515}
516
517// ── Phi* ─────────────────────────────────────────────────────────────
518
519#[pyfunction]
520fn py_phi_star(_py: Python<'_>, data: PyReadonlyArray2<'_, f64>, tau: usize) -> f64 {
521    let shape = data.shape();
522    let n_channels = shape[0];
523    let n_timesteps = shape[1];
524    let flat = data.as_slice().unwrap_or(&[]);
525    let channels: Vec<Vec<f64>> = (0..n_channels)
526        .map(|i| flat[i * n_timesteps..(i + 1) * n_timesteps].to_vec())
527        .collect();
528    phi::phi_star(&channels, tau)
529}
530
531// ── Cortical column ──────────────────────────────────────────────────
532
533#[pyclass(
534    name = "CorticalColumnRust",
535    module = "sc_neurocore_engine.sc_neurocore_engine"
536)]
537pub struct PyCorticalColumn {
538    inner: cortical_column::CorticalColumnRust,
539}
540
541#[pymethods]
542impl PyCorticalColumn {
543    #[new]
544    fn new(n: usize, tau: f64, dt: f64, threshold: f64, w_exc: f64, w_inh: f64, seed: u64) -> Self {
545        Self {
546            inner: cortical_column::CorticalColumnRust::new(
547                n, tau, dt, threshold, w_exc, w_inh, seed,
548            ),
549        }
550    }
551
552    fn step<'py>(
553        &mut self,
554        py: Python<'py>,
555        thalamic_input: PyReadonlyArray1<'py, f64>,
556    ) -> PyResult<Py<PyDict>> {
557        let input = thalamic_input.as_slice()?;
558        let spikes = self.inner.step(input);
559        let dict = PyDict::new(py);
560        let names = ["l4", "l23_exc", "l23_inh", "l5", "l6"];
561        for (i, name) in names.iter().enumerate() {
562            dict.set_item(*name, spikes[i].clone().into_pyarray(py))?;
563        }
564        Ok(dict.into())
565    }
566
567    fn reset(&mut self) {
568        self.inner.reset();
569    }
570}
571
572// ── Rall dendrite ────────────────────────────────────────────────────
573
574#[pyclass(
575    name = "RallDendriteRust",
576    module = "sc_neurocore_engine.sc_neurocore_engine"
577)]
578pub struct PyRallDendrite {
579    inner: rall_dendrite::RallDendriteRust,
580}
581
582#[pymethods]
583impl PyRallDendrite {
584    #[new]
585    fn new(n_branches: usize, branch_length: usize, tau: f64, coupling: f64, dt: f64) -> Self {
586        Self {
587            inner: rall_dendrite::RallDendriteRust::new(
588                n_branches,
589                branch_length,
590                tau,
591                coupling,
592                dt,
593            ),
594        }
595    }
596
597    fn step(&mut self, branch_inputs: PyReadonlyArray1<'_, f64>) -> PyResult<f64> {
598        let inputs = branch_inputs.as_slice()?;
599        Ok(self.inner.step(inputs))
600    }
601
602    fn reset(&mut self) {
603        self.inner.reset();
604    }
605
606    #[getter]
607    fn soma_v(&self) -> f64 {
608        self.inner.soma_v
609    }
610}
611
612/// Returns the highest SIMD tier available on this CPU.
613#[pyfunction]
614fn simd_tier() -> &'static str {
615    #[cfg(target_arch = "x86_64")]
616    {
617        if is_x86_feature_detected!("avx512vpopcntdq") {
618            return "avx512-vpopcntdq";
619        }
620        if is_x86_feature_detected!("avx512bw") {
621            return "avx512bw";
622        }
623        if is_x86_feature_detected!("avx512f") {
624            return "avx512f";
625        }
626        if is_x86_feature_detected!("avx2") {
627            return "avx2";
628        }
629        if is_x86_feature_detected!("popcnt") {
630            return "popcnt";
631        }
632    }
633    #[cfg(target_arch = "aarch64")]
634    {
635        return "neon";
636    }
637    "portable"
638}
639
640/// Set the number of threads in the global rayon thread pool.
641///
642/// Must be called before any parallel operation.
643/// Passing 0 uses rayon's default (number of CPU cores).
644#[pyfunction]
645fn set_num_threads(n: usize) -> PyResult<()> {
646    if n == 0 {
647        return Ok(());
648    }
649    rayon::ThreadPoolBuilder::new()
650        .num_threads(n)
651        .build_global()
652        .map_err(|e| PyValueError::new_err(format!("Cannot set thread pool: {e}")))
653}
654
655#[pyfunction]
656fn pack_bitstream(py: Python<'_>, bits: &Bound<'_, PyAny>) -> PyResult<Py<PyAny>> {
657    if let Ok(rows) = bits.extract::<Vec<Vec<u8>>>() {
658        let packed_rows: Vec<Vec<u64>> = rows.iter().map(|row| bitstream::pack(row).data).collect();
659        return Ok(packed_rows
660            .into_pyobject(py)
661            .map_err(|e| PyValueError::new_err(e.to_string()))?
662            .into_any()
663            .unbind());
664    }
665
666    let flat = bits
667        .extract::<Vec<u8>>()
668        .map_err(|_| PyValueError::new_err("Expected a 1-D or 2-D array of uint8 bits."))?;
669    Ok(bitstream::pack(&flat)
670        .data
671        .into_pyobject(py)
672        .map_err(|e| PyValueError::new_err(e.to_string()))?
673        .into_any()
674        .unbind())
675}
676
677#[pyfunction]
678#[pyo3(signature = (packed, original_length, original_shape=None))]
679fn unpack_bitstream(
680    py: Python<'_>,
681    packed: &Bound<'_, PyAny>,
682    original_length: usize,
683    original_shape: Option<(usize, usize)>,
684) -> PyResult<Py<PyAny>> {
685    if let Ok(rows) = packed.extract::<Vec<Vec<u64>>>() {
686        let batch = rows.len();
687        let per_batch_len = if let Some((expected_batch, length)) = original_shape {
688            if expected_batch != batch {
689                return Err(PyValueError::new_err(format!(
690                    "original_shape batch {} does not match packed batch {}.",
691                    expected_batch, batch
692                )));
693            }
694            length
695        } else if batch == 0 {
696            0
697        } else {
698            original_length / batch
699        };
700
701        let unpacked_rows: Vec<Vec<u8>> = rows
702            .into_iter()
703            .map(|row| {
704                bitstream::unpack(&bitstream::BitStreamTensor::from_words(row, per_batch_len))
705            })
706            .collect();
707        return Ok(unpacked_rows
708            .into_pyobject(py)
709            .map_err(|e| PyValueError::new_err(e.to_string()))?
710            .into_any()
711            .unbind());
712    }
713
714    let words = packed.extract::<Vec<u64>>().map_err(|_| {
715        PyValueError::new_err("Expected packed uint64 words as 1-D or 2-D sequence.")
716    })?;
717    let tensor = bitstream::BitStreamTensor::from_words(words, original_length);
718    Ok(bitstream::unpack(&tensor)
719        .into_pyobject(py)
720        .map_err(|e| PyValueError::new_err(e.to_string()))?
721        .into_any()
722        .unbind())
723}
724
725#[pyfunction]
726fn popcount(packed: &Bound<'_, PyAny>) -> PyResult<u64> {
727    if let Ok(rows) = packed.extract::<Vec<Vec<u64>>>() {
728        return Ok(rows
729            .iter()
730            .map(|row| simd::popcount_dispatch(row))
731            .sum::<u64>());
732    }
733
734    let words = packed.extract::<Vec<u64>>().map_err(|_| {
735        PyValueError::new_err("Expected packed uint64 words as 1-D or 2-D sequence.")
736    })?;
737    Ok(simd::popcount_dispatch(&words))
738}
739
740/// Pack a 1-D numpy uint8 array into packed u64 words, returning a numpy array.
741/// Zero-copy input, single-allocation output.
742#[pyfunction]
743fn pack_bitstream_numpy<'py>(
744    py: Python<'py>,
745    bits: PyReadonlyArray1<'py, u8>,
746) -> PyResult<Bound<'py, PyArray1<u64>>> {
747    let slice = bits
748        .as_slice()
749        .map_err(|e| PyValueError::new_err(format!("Cannot read numpy array: {e}")))?;
750    let tensor = simd::pack_dispatch(slice);
751    Ok(tensor.data.into_pyarray(py))
752}
753
754/// Popcount on a numpy uint64 array — zero-copy input.
755#[pyfunction]
756fn popcount_numpy(packed: PyReadonlyArray1<'_, u64>) -> PyResult<u64> {
757    let words = packed
758        .as_slice()
759        .map_err(|e| PyValueError::new_err(format!("Cannot read numpy array: {e}")))?;
760    Ok(simd::popcount_dispatch(words))
761}
762
763/// Unpack a numpy uint64 array back to a numpy uint8 array.
764#[pyfunction]
765fn unpack_bitstream_numpy<'py>(
766    py: Python<'py>,
767    packed: PyReadonlyArray1<'py, u64>,
768    original_length: usize,
769) -> PyResult<Bound<'py, PyArray1<u8>>> {
770    let words = packed
771        .as_slice()
772        .map_err(|e| PyValueError::new_err(format!("Cannot read numpy array: {e}")))?;
773    let tensor = bitstream::BitStreamTensor::from_words(words.to_vec(), original_length);
774    let bits = bitstream::unpack(&tensor);
775    Ok(bits.into_pyarray(py))
776}
777
778/// Run a LIF neuron for N steps with constant inputs.
779///
780/// Returns (spikes: ndarray[i32], voltages: ndarray[i16]).
781#[pyfunction]
782#[pyo3(signature = (
783    n_steps,
784    leak_k,
785    gain_k,
786    i_t,
787    noise_in=0,
788    data_width=16,
789    fraction=8,
790    v_rest=0,
791    v_reset=0,
792    v_threshold=256,
793    refractory_period=2
794))]
795#[allow(clippy::too_many_arguments)]
796fn batch_lif_run<'py>(
797    py: Python<'py>,
798    n_steps: usize,
799    leak_k: i16,
800    gain_k: i16,
801    i_t: i16,
802    noise_in: i16,
803    data_width: u32,
804    fraction: u32,
805    v_rest: i16,
806    v_reset: i16,
807    v_threshold: i16,
808    refractory_period: i32,
809) -> (Bound<'py, PyArray1<i32>>, Bound<'py, PyArray1<i16>>) {
810    let mut lif = neuron::FixedPointLif::new(
811        data_width,
812        fraction,
813        v_rest,
814        v_reset,
815        v_threshold,
816        refractory_period,
817    );
818    let spikes_arr = PyArray1::<i32>::zeros(py, n_steps, false);
819    let voltages_arr = PyArray1::<i16>::zeros(py, n_steps, false);
820
821    // SAFETY: Arrays are newly allocated and contiguous.
822    let spikes_slice = unsafe {
823        spikes_arr
824            .as_slice_mut()
825            .expect("newly allocated spikes array must be contiguous")
826    };
827    // SAFETY: Arrays are newly allocated and contiguous.
828    let voltages_slice = unsafe {
829        voltages_arr
830            .as_slice_mut()
831            .expect("newly allocated voltages array must be contiguous")
832    };
833
834    for i in 0..n_steps {
835        let (s, v) = lif.step(leak_k, gain_k, i_t, noise_in);
836        spikes_slice[i] = s;
837        voltages_slice[i] = v;
838    }
839
840    (spikes_arr, voltages_arr)
841}
842
843/// Run N independent LIF neurons in parallel, each with its own constant input.
844///
845/// Returns (spikes: ndarray[i32, (n_neurons, n_steps)],
846///          voltages: ndarray[i16, (n_neurons, n_steps)]).
847#[pyfunction]
848#[pyo3(signature = (
849    n_neurons,
850    n_steps,
851    leak_k,
852    gain_k,
853    currents,
854    data_width=16,
855    fraction=8,
856    v_rest=0,
857    v_reset=0,
858    v_threshold=256,
859    refractory_period=2
860))]
861#[allow(clippy::too_many_arguments)]
862#[allow(clippy::type_complexity)]
863fn batch_lif_run_multi<'py>(
864    py: Python<'py>,
865    n_neurons: usize,
866    n_steps: usize,
867    leak_k: i16,
868    gain_k: i16,
869    currents: PyReadonlyArray1<'py, i16>,
870    data_width: u32,
871    fraction: u32,
872    v_rest: i16,
873    v_reset: i16,
874    v_threshold: i16,
875    refractory_period: i32,
876) -> PyResult<(Bound<'py, PyArray2<i32>>, Bound<'py, PyArray2<i16>>)> {
877    use rayon::prelude::*;
878
879    let curr_slice = currents
880        .as_slice()
881        .map_err(|e| PyValueError::new_err(format!("Cannot read currents: {e}")))?;
882    if curr_slice.len() != n_neurons {
883        return Err(PyValueError::new_err(format!(
884            "currents length {} does not match n_neurons {}.",
885            curr_slice.len(),
886            n_neurons
887        )));
888    }
889
890    let spikes_arr = PyArray2::<i32>::zeros(py, [n_neurons, n_steps], false);
891    let voltages_arr = PyArray2::<i16>::zeros(py, [n_neurons, n_steps], false);
892
893    if n_neurons == 0 || n_steps == 0 {
894        return Ok((spikes_arr, voltages_arr));
895    }
896
897    // SAFETY: Arrays are newly allocated and contiguous.
898    let spikes_flat = unsafe {
899        spikes_arr
900            .as_slice_mut()
901            .expect("newly allocated spikes array must be contiguous")
902    };
903    // SAFETY: Arrays are newly allocated and contiguous.
904    let voltages_flat = unsafe {
905        voltages_arr
906            .as_slice_mut()
907            .expect("newly allocated voltages array must be contiguous")
908    };
909
910    spikes_flat
911        .par_chunks_mut(n_steps)
912        .zip(voltages_flat.par_chunks_mut(n_steps))
913        .zip(curr_slice.par_iter().copied())
914        .for_each(|((spike_row, voltage_row), i_t)| {
915            let mut lif = neuron::FixedPointLif::new(
916                data_width,
917                fraction,
918                v_rest,
919                v_reset,
920                v_threshold,
921                refractory_period,
922            );
923            for step in 0..n_steps {
924                let (s, v) = lif.step(leak_k, gain_k, i_t, 0);
925                spike_row[step] = s;
926                voltage_row[step] = v;
927            }
928        });
929
930    Ok((spikes_arr, voltages_arr))
931}
932
933/// Run a LIF neuron for N steps with per-step current and optional noise arrays.
934#[pyfunction]
935#[pyo3(signature = (
936    leak_k,
937    gain_k,
938    currents,
939    noises=None,
940    data_width=16,
941    fraction=8,
942    v_rest=0,
943    v_reset=0,
944    v_threshold=256,
945    refractory_period=2
946))]
947#[allow(clippy::too_many_arguments)]
948#[allow(clippy::type_complexity)]
949fn batch_lif_run_varying<'py>(
950    py: Python<'py>,
951    leak_k: i16,
952    gain_k: i16,
953    currents: PyReadonlyArray1<'py, i16>,
954    noises: Option<PyReadonlyArray1<'py, i16>>,
955    data_width: u32,
956    fraction: u32,
957    v_rest: i16,
958    v_reset: i16,
959    v_threshold: i16,
960    refractory_period: i32,
961) -> PyResult<(Bound<'py, PyArray1<i32>>, Bound<'py, PyArray1<i16>>)> {
962    let curr_slice = currents
963        .as_slice()
964        .map_err(|e| PyValueError::new_err(format!("Cannot read currents: {e}")))?;
965    let noise_slice: Option<&[i16]> = match noises.as_ref() {
966        Some(n) => Some(
967            n.as_slice()
968                .map_err(|e| PyValueError::new_err(format!("Cannot read noises: {e}")))?,
969        ),
970        None => None,
971    };
972
973    let n_steps = curr_slice.len();
974    if let Some(ns) = noise_slice {
975        if ns.len() != n_steps {
976            return Err(PyValueError::new_err(format!(
977                "noises length {} does not match currents length {}.",
978                ns.len(),
979                n_steps
980            )));
981        }
982    }
983
984    let mut lif = neuron::FixedPointLif::new(
985        data_width,
986        fraction,
987        v_rest,
988        v_reset,
989        v_threshold,
990        refractory_period,
991    );
992    let spikes_arr = PyArray1::<i32>::zeros(py, n_steps, false);
993    let voltages_arr = PyArray1::<i16>::zeros(py, n_steps, false);
994
995    // SAFETY: Arrays are newly allocated and contiguous.
996    let spikes_slice = unsafe {
997        spikes_arr
998            .as_slice_mut()
999            .expect("newly allocated spikes array must be contiguous")
1000    };
1001    // SAFETY: Arrays are newly allocated and contiguous.
1002    let voltages_slice = unsafe {
1003        voltages_arr
1004            .as_slice_mut()
1005            .expect("newly allocated voltages array must be contiguous")
1006    };
1007
1008    for i in 0..n_steps {
1009        let noise_in = noise_slice.map_or(0, |ns| ns[i]);
1010        let (s, v) = lif.step(leak_k, gain_k, curr_slice[i], noise_in);
1011        spikes_slice[i] = s;
1012        voltages_slice[i] = v;
1013    }
1014
1015    Ok((spikes_arr, voltages_arr))
1016}
1017
1018/// Bernoulli-encode a numpy float64 array into packed bitstream words.
1019///
1020/// Returns nested packed words with shape (n_probs, ceil(length / 64)).
1021#[pyfunction]
1022#[pyo3(signature = (probs, length=1024, seed=0xACE1))]
1023fn batch_encode<'py>(
1024    _py: Python<'py>,
1025    probs: PyReadonlyArray1<'py, f64>,
1026    length: usize,
1027    seed: u64,
1028) -> PyResult<Vec<Vec<u64>>> {
1029    let prob_slice = probs
1030        .as_slice()
1031        .map_err(|e| PyValueError::new_err(format!("Cannot read probs: {e}")))?;
1032    let words = length.div_ceil(64);
1033
1034    use rand::SeedableRng;
1035    let mut rng = rand_chacha::ChaCha8Rng::seed_from_u64(seed);
1036
1037    let packed: Vec<Vec<u64>> = prob_slice
1038        .iter()
1039        .map(|&p| {
1040            let mut data = bitstream::bernoulli_packed(p, length, &mut rng);
1041            data.resize(words, 0);
1042            data
1043        })
1044        .collect();
1045
1046    Ok(packed)
1047}
1048
1049/// Bernoulli-encode a numpy float64 array into a 2-D numpy uint64 array.
1050///
1051/// Returns shape `(n_probs, ceil(length / 64))`.
1052#[pyfunction]
1053#[pyo3(signature = (probs, length=1024, seed=0xACE1))]
1054fn batch_encode_numpy<'py>(
1055    py: Python<'py>,
1056    probs: PyReadonlyArray1<'py, f64>,
1057    length: usize,
1058    seed: u64,
1059) -> PyResult<Bound<'py, PyArray2<u64>>> {
1060    use rayon::prelude::*;
1061
1062    let prob_slice = probs
1063        .as_slice()
1064        .map_err(|e| PyValueError::new_err(format!("Cannot read probs: {e}")))?;
1065    let words = length.div_ceil(64);
1066    let n_probs = prob_slice.len();
1067
1068    let rows: Vec<Vec<u64>> = prob_slice
1069        .par_iter()
1070        .enumerate()
1071        .map(|(idx, &p)| {
1072            use rand::SeedableRng;
1073
1074            let prob_seed = seed.wrapping_add(idx as u64);
1075            let mut rng = rand_xoshiro::Xoshiro256PlusPlus::seed_from_u64(prob_seed);
1076            let mut row = bitstream::bernoulli_packed_simd(p, length, &mut rng);
1077            row.resize(words, 0);
1078            row
1079        })
1080        .collect();
1081
1082    let mut flat = Vec::with_capacity(n_probs * words);
1083    for row in &rows {
1084        flat.extend_from_slice(row);
1085    }
1086
1087    let arr = ndarray::Array2::from_shape_vec((n_probs, words), flat)
1088        .map_err(|e| PyValueError::new_err(format!("Shape construction failed: {e}")))?;
1089    Ok(arr.into_pyarray(py))
1090}
1091
1092#[pyclass(module = "sc_neurocore_engine.sc_neurocore_engine")]
1093pub struct Lfsr16 {
1094    inner: encoder::Lfsr16,
1095    seed_init: u16,
1096}
1097
1098#[pymethods]
1099impl Lfsr16 {
1100    #[new]
1101    #[pyo3(signature = (seed=0xACE1))]
1102    fn new(seed: u16) -> PyResult<Self> {
1103        if seed == 0 {
1104            return Err(PyValueError::new_err("LFSR seed must be non-zero."));
1105        }
1106        Ok(Self {
1107            inner: encoder::Lfsr16::new(seed),
1108            seed_init: seed,
1109        })
1110    }
1111
1112    fn step(&mut self) -> u16 {
1113        self.inner.step()
1114    }
1115
1116    #[getter]
1117    fn reg(&self) -> u16 {
1118        self.inner.reg
1119    }
1120
1121    #[getter]
1122    fn width(&self) -> u32 {
1123        self.inner.width
1124    }
1125
1126    #[pyo3(signature = (seed=None))]
1127    fn reset(&mut self, seed: Option<u16>) -> PyResult<()> {
1128        let next = seed.unwrap_or(self.seed_init);
1129        if next == 0 {
1130            return Err(PyValueError::new_err("LFSR seed must be non-zero."));
1131        }
1132        self.inner = encoder::Lfsr16::new(next);
1133        self.seed_init = next;
1134        Ok(())
1135    }
1136}
1137
1138#[pyclass(module = "sc_neurocore_engine.sc_neurocore_engine")]
1139pub struct BitstreamEncoder {
1140    inner: encoder::BitstreamEncoder,
1141    seed_init: u16,
1142}
1143
1144#[pymethods]
1145impl BitstreamEncoder {
1146    #[new]
1147    #[pyo3(signature = (data_width=16, seed=0xACE1))]
1148    fn new(data_width: u32, seed: u16) -> PyResult<Self> {
1149        if seed == 0 {
1150            return Err(PyValueError::new_err("LFSR seed must be non-zero."));
1151        }
1152        Ok(Self {
1153            inner: encoder::BitstreamEncoder::new(data_width, seed),
1154            seed_init: seed,
1155        })
1156    }
1157
1158    fn step(&mut self, x_value: u16) -> u8 {
1159        self.inner.step(x_value)
1160    }
1161
1162    #[getter]
1163    fn data_width(&self) -> u32 {
1164        self.inner.data_width
1165    }
1166
1167    #[getter]
1168    fn reg(&self) -> u16 {
1169        self.inner.lfsr.reg
1170    }
1171
1172    #[pyo3(signature = (seed=None))]
1173    fn reset(&mut self, seed: Option<u16>) -> PyResult<()> {
1174        let next = seed.unwrap_or(self.seed_init);
1175        if next == 0 {
1176            return Err(PyValueError::new_err("LFSR seed must be non-zero."));
1177        }
1178        self.inner.reset(Some(next));
1179        self.seed_init = next;
1180        Ok(())
1181    }
1182}
1183
1184#[pyclass(module = "sc_neurocore_engine.sc_neurocore_engine")]
1185pub struct FixedPointLif {
1186    inner: neuron::FixedPointLif,
1187}
1188
1189#[pymethods]
1190impl FixedPointLif {
1191    #[new]
1192    #[pyo3(signature = (
1193        data_width=16,
1194        fraction=8,
1195        v_rest=0,
1196        v_reset=0,
1197        v_threshold=256,
1198        refractory_period=2
1199    ))]
1200    fn new(
1201        data_width: u32,
1202        fraction: u32,
1203        v_rest: i16,
1204        v_reset: i16,
1205        v_threshold: i16,
1206        refractory_period: i32,
1207    ) -> Self {
1208        Self {
1209            inner: neuron::FixedPointLif::new(
1210                data_width,
1211                fraction,
1212                v_rest,
1213                v_reset,
1214                v_threshold,
1215                refractory_period,
1216            ),
1217        }
1218    }
1219
1220    #[pyo3(signature = (leak_k, gain_k, i_t, noise_in=0))]
1221    fn step(&mut self, leak_k: i16, gain_k: i16, i_t: i16, noise_in: i16) -> (i32, i16) {
1222        self.inner.step(leak_k, gain_k, i_t, noise_in)
1223    }
1224
1225    fn reset(&mut self) {
1226        self.inner.reset();
1227    }
1228
1229    fn reset_state(&mut self) {
1230        self.reset();
1231    }
1232
1233    fn get_state(&self, py: Python<'_>) -> PyResult<Py<PyAny>> {
1234        let dict = PyDict::new(py);
1235        dict.set_item("v", self.inner.v)?;
1236        dict.set_item("refractory_counter", self.inner.refractory_counter)?;
1237        Ok(dict.into_any().unbind())
1238    }
1239}
1240
1241// ── Izhikevich PyO3 wrapper ─────────────────────────────────────
1242
1243#[pyclass(
1244    name = "Izhikevich",
1245    module = "sc_neurocore_engine.sc_neurocore_engine"
1246)]
1247pub struct PyIzhikevich {
1248    inner: neuron::Izhikevich,
1249}
1250
1251#[pymethods]
1252impl PyIzhikevich {
1253    #[new]
1254    #[pyo3(signature = (a=0.02, b=0.2, c=-65.0, d=8.0, dt=1.0))]
1255    fn new(a: f64, b: f64, c: f64, d: f64, dt: f64) -> Self {
1256        Self {
1257            inner: neuron::Izhikevich::new(a, b, c, d, dt),
1258        }
1259    }
1260
1261    fn step(&mut self, current: f64) -> i32 {
1262        self.inner.step(current)
1263    }
1264
1265    fn reset(&mut self) {
1266        self.inner.reset();
1267    }
1268
1269    fn reset_state(&mut self) {
1270        self.reset();
1271    }
1272
1273    fn get_state(&self, py: Python<'_>) -> PyResult<Py<PyAny>> {
1274        let dict = PyDict::new(py);
1275        dict.set_item("v", self.inner.v)?;
1276        dict.set_item("u", self.inner.u)?;
1277        Ok(dict.into_any().unbind())
1278    }
1279}
1280
1281// ── BitstreamAverager PyO3 wrapper ──────────────────────────────
1282
1283#[pyclass(
1284    name = "BitstreamAverager",
1285    module = "sc_neurocore_engine.sc_neurocore_engine"
1286)]
1287pub struct PyBitstreamAverager {
1288    inner: neuron::BitstreamAverager,
1289}
1290
1291#[pymethods]
1292impl PyBitstreamAverager {
1293    #[new]
1294    #[pyo3(signature = (window=1024))]
1295    fn new(window: usize) -> Self {
1296        Self {
1297            inner: neuron::BitstreamAverager::new(window),
1298        }
1299    }
1300
1301    fn push(&mut self, bit: u8) {
1302        self.inner.push(bit);
1303    }
1304
1305    fn estimate(&self) -> f64 {
1306        self.inner.estimate()
1307    }
1308
1309    fn reset(&mut self) {
1310        self.inner.reset();
1311    }
1312
1313    #[getter]
1314    fn window(&self) -> usize {
1315        self.inner.window()
1316    }
1317}
1318
1319// ── AdEx, ExpIF, Lapicque PyO3 wrappers ────────────────────────
1320
1321#[pyclass(
1322    name = "AdExNeuron",
1323    module = "sc_neurocore_engine.sc_neurocore_engine"
1324)]
1325#[derive(Clone)]
1326pub struct PyAdExNeuron {
1327    inner: neuron::AdExNeuron,
1328}
1329
1330#[pymethods]
1331impl PyAdExNeuron {
1332    #[new]
1333    fn new() -> Self {
1334        Self {
1335            inner: neuron::AdExNeuron::new(),
1336        }
1337    }
1338    fn step(&mut self, current: f64) -> i32 {
1339        self.inner.step(current)
1340    }
1341    fn reset(&mut self) {
1342        self.inner.reset();
1343    }
1344    fn get_state(&self, py: Python<'_>) -> PyResult<Py<PyAny>> {
1345        let d = PyDict::new(py);
1346        d.set_item("v", self.inner.v)?;
1347        d.set_item("w", self.inner.w)?;
1348        Ok(d.into_any().unbind())
1349    }
1350}
1351
1352#[pyclass(
1353    name = "ExpIFNeuron",
1354    module = "sc_neurocore_engine.sc_neurocore_engine"
1355)]
1356#[derive(Clone)]
1357pub struct PyExpIFNeuron {
1358    inner: neuron::ExpIfNeuron,
1359}
1360
1361#[pymethods]
1362impl PyExpIFNeuron {
1363    #[new]
1364    fn new() -> Self {
1365        Self {
1366            inner: neuron::ExpIfNeuron::new(),
1367        }
1368    }
1369    fn step(&mut self, current: f64) -> i32 {
1370        self.inner.step(current)
1371    }
1372    fn reset(&mut self) {
1373        self.inner.reset();
1374    }
1375    fn get_state(&self, py: Python<'_>) -> PyResult<Py<PyAny>> {
1376        let d = PyDict::new(py);
1377        d.set_item("v", self.inner.v)?;
1378        Ok(d.into_any().unbind())
1379    }
1380}
1381
1382#[pyclass(
1383    name = "LapicqueNeuron",
1384    module = "sc_neurocore_engine.sc_neurocore_engine"
1385)]
1386#[derive(Clone)]
1387pub struct PyLapicqueNeuron {
1388    inner: neuron::LapicqueNeuron,
1389}
1390
1391#[pymethods]
1392impl PyLapicqueNeuron {
1393    #[new]
1394    #[pyo3(signature = (tau=20.0, resistance=1.0, threshold=1.0, dt=1.0))]
1395    fn new(tau: f64, resistance: f64, threshold: f64, dt: f64) -> Self {
1396        Self {
1397            inner: neuron::LapicqueNeuron::new(tau, resistance, threshold, dt),
1398        }
1399    }
1400    fn step(&mut self, current: f64) -> i32 {
1401        self.inner.step(current)
1402    }
1403    fn reset(&mut self) {
1404        self.inner.reset();
1405    }
1406    fn get_state(&self, py: Python<'_>) -> PyResult<Py<PyAny>> {
1407        let d = PyDict::new(py);
1408        d.set_item("v", self.inner.v)?;
1409        Ok(d.into_any().unbind())
1410    }
1411}
1412
1413// ── DenseLayer ──────────────────────────────────────────────────
1414
1415#[pyclass(module = "sc_neurocore_engine.sc_neurocore_engine")]
1416pub struct DenseLayer {
1417    inner: layer::DenseLayer,
1418}
1419
1420#[pymethods]
1421impl DenseLayer {
1422    #[new]
1423    #[pyo3(signature = (n_inputs, n_neurons, length=1024, seed=24301))]
1424    fn new(n_inputs: usize, n_neurons: usize, length: usize, seed: u64) -> Self {
1425        Self {
1426            inner: layer::DenseLayer::new(n_inputs, n_neurons, length, seed),
1427        }
1428    }
1429
1430    fn get_weights(&self) -> Vec<Vec<f64>> {
1431        self.inner.get_weights()
1432    }
1433
1434    fn set_weights(&mut self, weights: Vec<Vec<f64>>) -> PyResult<()> {
1435        self.inner
1436            .set_weights(weights)
1437            .map_err(PyValueError::new_err)
1438    }
1439
1440    fn refresh_packed_weights(&mut self) {
1441        self.inner.refresh_packed_weights();
1442    }
1443
1444    #[pyo3(signature = (input_values, seed=44257))]
1445    fn forward(&self, input_values: Vec<f64>, seed: u64) -> PyResult<Vec<f64>> {
1446        self.inner
1447            .forward(&input_values, seed)
1448            .map_err(PyValueError::new_err)
1449    }
1450
1451    #[pyo3(signature = (input_values, seed=44257))]
1452    fn forward_fast(&self, input_values: Vec<f64>, seed: u64) -> PyResult<Vec<f64>> {
1453        self.inner
1454            .forward_fused(&input_values, seed)
1455            .map_err(PyValueError::new_err)
1456    }
1457
1458    /// Dense forward accepting numpy input and returning numpy output.
1459    ///
1460    /// This performs parallel encoding + parallel compute in one FFI call.
1461    #[pyo3(signature = (input_values, seed=44257))]
1462    fn forward_numpy<'py>(
1463        &self,
1464        py: Python<'py>,
1465        input_values: PyReadonlyArray1<'py, f64>,
1466        seed: u64,
1467    ) -> PyResult<Bound<'py, PyArray1<f64>>> {
1468        let slice = input_values
1469            .as_slice()
1470            .map_err(|e| PyValueError::new_err(format!("Cannot read input array: {e}")))?;
1471        let out = self
1472            .inner
1473            .forward_numpy_inner(slice, seed)
1474            .map_err(PyValueError::new_err)?;
1475        Ok(out.into_pyarray(py))
1476    }
1477
1478    /// Dense forward for a batch of input samples in one FFI call.
1479    ///
1480    /// `inputs` must be a contiguous float64 array of shape (n_samples, n_inputs).
1481    /// Returns float64 array of shape (n_samples, n_neurons).
1482    #[pyo3(signature = (inputs, seed=44257))]
1483    fn forward_batch_numpy<'py>(
1484        &self,
1485        py: Python<'py>,
1486        inputs: PyReadonlyArray2<'py, f64>,
1487        seed: u64,
1488    ) -> PyResult<Bound<'py, PyArray2<f64>>> {
1489        let shape = inputs.shape();
1490        let n_samples = shape[0];
1491        let n_inputs = shape[1];
1492        if n_inputs != self.inner.n_inputs {
1493            return Err(PyValueError::new_err(format!(
1494                "Expected {} input features, got {}.",
1495                self.inner.n_inputs, n_inputs
1496            )));
1497        }
1498
1499        let flat_inputs = inputs
1500            .as_slice()
1501            .map_err(|e| PyValueError::new_err(format!("Array not contiguous: {e}")))?;
1502        let out = PyArray2::<f64>::zeros(py, [n_samples, self.inner.n_neurons], false);
1503        // SAFETY: Newly allocated numpy arrays are contiguous.
1504        let out_slice = unsafe {
1505            out.as_slice_mut()
1506                .expect("newly allocated output array must be contiguous")
1507        };
1508
1509        self.inner
1510            .forward_batch_into(flat_inputs, n_samples, seed, out_slice)
1511            .map_err(PyValueError::new_err)?;
1512        Ok(out)
1513    }
1514
1515    /// Forward pass with pre-packed input bitstreams.
1516    ///
1517    /// Accepts either:
1518    /// - 2-D numpy array of dtype uint64 with shape (n_inputs, words)
1519    /// - list[list[int]]
1520    fn forward_prepacked(&self, packed_inputs: &Bound<'_, PyAny>) -> PyResult<Vec<f64>> {
1521        if let Ok(arr) = packed_inputs.extract::<PyReadonlyArray2<u64>>() {
1522            let view = arr.as_array();
1523            let rows: Vec<Vec<u64>> = (0..view.nrows()).map(|i| view.row(i).to_vec()).collect();
1524            return self
1525                .inner
1526                .forward_prepacked(&rows)
1527                .map_err(PyValueError::new_err);
1528        }
1529
1530        let rows = packed_inputs.extract::<Vec<Vec<u64>>>().map_err(|_| {
1531            PyValueError::new_err(
1532                "packed_inputs must be a 2-D numpy uint64 array or list[list[int]].",
1533            )
1534        })?;
1535        self.inner
1536            .forward_prepacked(&rows)
1537            .map_err(PyValueError::new_err)
1538    }
1539
1540    /// Dense forward with pre-packed numpy 2-D input (true zero-copy).
1541    ///
1542    /// Accepts a contiguous numpy uint64 array of shape (n_inputs, words).
1543    /// This avoids all row-copying that the `forward_prepacked` method does.
1544    #[pyo3(signature = (packed_inputs,))]
1545    fn forward_prepacked_numpy<'py>(
1546        &self,
1547        py: Python<'py>,
1548        packed_inputs: PyReadonlyArray2<'py, u64>,
1549    ) -> PyResult<Bound<'py, PyArray1<f64>>> {
1550        let shape = packed_inputs.shape();
1551        let n_inputs = shape[0];
1552        let words = shape[1];
1553        let flat = packed_inputs
1554            .as_slice()
1555            .map_err(|e| PyValueError::new_err(format!("Array not contiguous: {e}")))?;
1556        let out = self
1557            .inner
1558            .forward_prepacked_2d(flat, n_inputs, words)
1559            .map_err(PyValueError::new_err)?;
1560        Ok(out.into_pyarray(py))
1561    }
1562}
1563
1564fn parse_surrogate(name: &str, k: Option<f32>) -> PyResult<grad::SurrogateType> {
1565    let normalized = name.to_ascii_lowercase().replace('-', "_");
1566    match normalized.as_str() {
1567        "fast_sigmoid" => Ok(grad::SurrogateType::FastSigmoid {
1568            k: k.unwrap_or(25.0),
1569        }),
1570        "superspike" | "super_spike" => Ok(grad::SurrogateType::SuperSpike {
1571            k: k.unwrap_or(100.0),
1572        }),
1573        "arctan" | "arc_tan" => Ok(grad::SurrogateType::ArcTan { k: k.unwrap_or(10.0) }),
1574        "straightthrough" | "straight_through" | "ste" => Ok(grad::SurrogateType::StraightThrough),
1575        _ => Err(PyValueError::new_err(format!(
1576            "Unknown surrogate '{}'. Use one of: fast_sigmoid, superspike, arctan, straight_through.",
1577            name
1578        ))),
1579    }
1580}
1581
1582fn extract_matrix_f64(data: &Bound<'_, PyAny>, name: &str) -> PyResult<(Vec<f64>, usize, usize)> {
1583    if let Ok(rows) = data.extract::<Vec<Vec<f64>>>() {
1584        if rows.is_empty() {
1585            return Err(PyValueError::new_err(format!(
1586                "{} must not be an empty matrix.",
1587                name
1588            )));
1589        }
1590        let row_count = rows.len();
1591        let cols = rows[0].len();
1592        if cols == 0 {
1593            return Err(PyValueError::new_err(format!(
1594                "{} must not have zero columns.",
1595                name
1596            )));
1597        }
1598        if rows.iter().any(|r| r.len() != cols) {
1599            return Err(PyValueError::new_err(format!(
1600                "{} must be a rectangular matrix.",
1601                name
1602            )));
1603        }
1604        let out = rows.into_iter().flatten().collect::<Vec<f64>>();
1605        return Ok((out, row_count, cols));
1606    }
1607
1608    if let Ok(flat) = data.extract::<Vec<f64>>() {
1609        if flat.is_empty() {
1610            return Err(PyValueError::new_err(format!(
1611                "{} must not be an empty vector.",
1612                name
1613            )));
1614        }
1615        let cols = flat.len();
1616        return Ok((flat, 1, cols));
1617    }
1618
1619    Err(PyValueError::new_err(format!(
1620        "{} must be a 1-D or 2-D float array.",
1621        name
1622    )))
1623}
1624
1625fn reshape_flat_to_rows(flat: Vec<f64>, rows: usize, cols: usize) -> Vec<Vec<f64>> {
1626    let mut out = Vec::with_capacity(rows);
1627    for i in 0..rows {
1628        out.push(flat[i * cols..(i + 1) * cols].to_vec());
1629    }
1630    out
1631}
1632
1633#[pyclass(
1634    name = "SurrogateLif",
1635    module = "sc_neurocore_engine.sc_neurocore_engine"
1636)]
1637pub struct PySurrogateLif {
1638    inner: grad::SurrogateLif,
1639}
1640
1641#[pymethods]
1642impl PySurrogateLif {
1643    #[new]
1644    #[pyo3(signature = (
1645        data_width=16,
1646        fraction=8,
1647        v_rest=0,
1648        v_reset=0,
1649        v_threshold=256,
1650        refractory_period=2,
1651        surrogate="fast_sigmoid",
1652        k=None
1653    ))]
1654    #[allow(clippy::too_many_arguments)]
1655    fn new(
1656        data_width: u32,
1657        fraction: u32,
1658        v_rest: i16,
1659        v_reset: i16,
1660        v_threshold: i16,
1661        refractory_period: i32,
1662        surrogate: &str,
1663        k: Option<f32>,
1664    ) -> PyResult<Self> {
1665        let surrogate = parse_surrogate(surrogate, k)?;
1666        Ok(Self {
1667            inner: grad::SurrogateLif::new(
1668                data_width,
1669                fraction,
1670                v_rest,
1671                v_reset,
1672                v_threshold,
1673                refractory_period,
1674                surrogate,
1675            ),
1676        })
1677    }
1678
1679    #[pyo3(signature = (leak_k, gain_k, i_t, noise_in=0))]
1680    fn forward(&mut self, leak_k: i16, gain_k: i16, i_t: i16, noise_in: i16) -> (i32, i16) {
1681        self.inner.forward(leak_k, gain_k, i_t, noise_in)
1682    }
1683
1684    fn backward(&mut self, grad_output: f32) -> PyResult<f32> {
1685        self.inner
1686            .backward(grad_output)
1687            .map_err(PyValueError::new_err)
1688    }
1689
1690    fn clear_trace(&mut self) {
1691        self.inner.clear_trace();
1692    }
1693
1694    fn reset(&mut self) {
1695        self.inner.reset();
1696    }
1697
1698    fn trace_len(&self) -> usize {
1699        self.inner.trace_len()
1700    }
1701}
1702
1703#[pyclass(
1704    name = "DifferentiableDenseLayer",
1705    module = "sc_neurocore_engine.sc_neurocore_engine"
1706)]
1707pub struct PyDifferentiableDenseLayer {
1708    inner: grad::DifferentiableDenseLayer,
1709}
1710
1711#[pymethods]
1712impl PyDifferentiableDenseLayer {
1713    #[new]
1714    #[pyo3(signature = (
1715        n_inputs,
1716        n_neurons,
1717        length=1024,
1718        seed=24301,
1719        surrogate="fast_sigmoid",
1720        k=None
1721    ))]
1722    fn new(
1723        n_inputs: usize,
1724        n_neurons: usize,
1725        length: usize,
1726        seed: u64,
1727        surrogate: &str,
1728        k: Option<f32>,
1729    ) -> PyResult<Self> {
1730        let surrogate = parse_surrogate(surrogate, k)?;
1731        Ok(Self {
1732            inner: grad::DifferentiableDenseLayer::new(
1733                n_inputs, n_neurons, length, seed, surrogate,
1734            ),
1735        })
1736    }
1737
1738    fn get_weights(&self) -> Vec<Vec<f64>> {
1739        self.inner.layer.get_weights()
1740    }
1741
1742    #[pyo3(signature = (input_values, seed=44257))]
1743    fn forward(&mut self, input_values: Vec<f64>, seed: u64) -> PyResult<Vec<f64>> {
1744        self.inner
1745            .forward(&input_values, seed)
1746            .map_err(PyValueError::new_err)
1747    }
1748
1749    fn backward(&self, grad_output: Vec<f64>) -> PyResult<(Vec<f64>, Vec<Vec<f64>>)> {
1750        self.inner
1751            .backward(&grad_output)
1752            .map_err(PyValueError::new_err)
1753    }
1754
1755    fn update_weights(&mut self, weight_grads: Vec<Vec<f64>>, lr: f64) -> PyResult<()> {
1756        if weight_grads.len() != self.inner.layer.n_neurons {
1757            return Err(PyValueError::new_err(format!(
1758                "Expected {} grad rows, got {}.",
1759                self.inner.layer.n_neurons,
1760                weight_grads.len()
1761            )));
1762        }
1763        if weight_grads
1764            .iter()
1765            .any(|row| row.len() != self.inner.layer.n_inputs)
1766        {
1767            return Err(PyValueError::new_err(format!(
1768                "Expected each grad row to have length {}.",
1769                self.inner.layer.n_inputs
1770            )));
1771        }
1772        self.inner.update_weights(&weight_grads, lr);
1773        Ok(())
1774    }
1775
1776    fn clear_cache(&mut self) {
1777        self.inner.clear_cache();
1778    }
1779}
1780
1781#[pyclass(
1782    name = "StochasticAttention",
1783    module = "sc_neurocore_engine.sc_neurocore_engine"
1784)]
1785pub struct PyStochasticAttention {
1786    inner: attention::StochasticAttention,
1787}
1788
1789#[pymethods]
1790impl PyStochasticAttention {
1791    #[new]
1792    #[pyo3(signature = (dim_k, temperature=None))]
1793    fn new(dim_k: usize, temperature: Option<f64>) -> Self {
1794        Self {
1795            inner: match temperature {
1796                Some(t) => attention::StochasticAttention::with_temperature(dim_k, t),
1797                None => attention::StochasticAttention::new(dim_k),
1798            },
1799        }
1800    }
1801
1802    fn forward_softmax(
1803        &self,
1804        q: &Bound<'_, PyAny>,
1805        k: &Bound<'_, PyAny>,
1806        v: &Bound<'_, PyAny>,
1807    ) -> PyResult<Vec<Vec<f64>>> {
1808        let (q_data, q_rows, q_cols) = extract_matrix_f64(q, "Q")?;
1809        let (k_data, k_rows, k_cols) = extract_matrix_f64(k, "K")?;
1810        let (v_data, v_rows, v_cols) = extract_matrix_f64(v, "V")?;
1811
1812        let out = self
1813            .inner
1814            .forward_softmax(
1815                &q_data, q_rows, q_cols, &k_data, k_rows, k_cols, &v_data, v_rows, v_cols,
1816            )
1817            .map_err(PyValueError::new_err)?;
1818
1819        Ok(reshape_flat_to_rows(out, q_rows, v_cols))
1820    }
1821
1822    #[pyo3(signature = (q, k, v, n_heads))]
1823    fn forward_multihead_softmax(
1824        &self,
1825        q: &Bound<'_, PyAny>,
1826        k: &Bound<'_, PyAny>,
1827        v: &Bound<'_, PyAny>,
1828        n_heads: usize,
1829    ) -> PyResult<Vec<Vec<f64>>> {
1830        let (q_data, q_rows, q_cols) = extract_matrix_f64(q, "Q")?;
1831        let (k_data, k_rows, k_cols) = extract_matrix_f64(k, "K")?;
1832        let (v_data, v_rows, v_cols) = extract_matrix_f64(v, "V")?;
1833
1834        let out = self
1835            .inner
1836            .forward_multihead_softmax(
1837                &q_data, q_rows, q_cols, &k_data, k_rows, k_cols, &v_data, v_rows, v_cols, n_heads,
1838            )
1839            .map_err(PyValueError::new_err)?;
1840
1841        let out_cols = v_cols;
1842        Ok(reshape_flat_to_rows(out, q_rows, out_cols))
1843    }
1844
1845    fn forward(
1846        &self,
1847        q: &Bound<'_, PyAny>,
1848        k: &Bound<'_, PyAny>,
1849        v: &Bound<'_, PyAny>,
1850    ) -> PyResult<Vec<Vec<f64>>> {
1851        let (q_data, q_rows, q_cols) = extract_matrix_f64(q, "Q")?;
1852        let (k_data, k_rows, k_cols) = extract_matrix_f64(k, "K")?;
1853        let (v_data, v_rows, v_cols) = extract_matrix_f64(v, "V")?;
1854
1855        let out = self
1856            .inner
1857            .forward(
1858                &q_data, q_rows, q_cols, &k_data, k_rows, k_cols, &v_data, v_rows, v_cols,
1859            )
1860            .map_err(PyValueError::new_err)?;
1861
1862        Ok(reshape_flat_to_rows(out, q_rows, v_cols))
1863    }
1864
1865    #[pyo3(signature = (q, k, v, length=1024, seed=44257))]
1866    fn forward_sc(
1867        &self,
1868        q: &Bound<'_, PyAny>,
1869        k: &Bound<'_, PyAny>,
1870        v: &Bound<'_, PyAny>,
1871        length: usize,
1872        seed: u64,
1873    ) -> PyResult<Vec<Vec<f64>>> {
1874        let (q_data, q_rows, q_cols) = extract_matrix_f64(q, "Q")?;
1875        let (k_data, k_rows, k_cols) = extract_matrix_f64(k, "K")?;
1876        let (v_data, v_rows, v_cols) = extract_matrix_f64(v, "V")?;
1877
1878        let out = self
1879            .inner
1880            .forward_sc(
1881                &q_data, q_rows, q_cols, &k_data, k_rows, k_cols, &v_data, v_rows, v_cols, length,
1882                seed,
1883            )
1884            .map_err(PyValueError::new_err)?;
1885
1886        Ok(reshape_flat_to_rows(out, q_rows, v_cols))
1887    }
1888
1889    #[pyo3(signature = (q, k, v, n_heads))]
1890    fn forward_multihead(
1891        &self,
1892        q: &Bound<'_, PyAny>,
1893        k: &Bound<'_, PyAny>,
1894        v: &Bound<'_, PyAny>,
1895        n_heads: usize,
1896    ) -> PyResult<Vec<Vec<f64>>> {
1897        let (q_data, q_rows, q_cols) = extract_matrix_f64(q, "Q")?;
1898        let (k_data, k_rows, k_cols) = extract_matrix_f64(k, "K")?;
1899        let (v_data, v_rows, v_cols) = extract_matrix_f64(v, "V")?;
1900
1901        let out = self
1902            .inner
1903            .forward_multihead(
1904                &q_data, q_rows, q_cols, &k_data, k_rows, k_cols, &v_data, v_rows, v_cols, n_heads,
1905            )
1906            .map_err(PyValueError::new_err)?;
1907
1908        let out_cols = v_cols;
1909        Ok(reshape_flat_to_rows(out, q_rows, out_cols))
1910    }
1911}
1912
1913#[pyclass(
1914    name = "StochasticGraphLayer",
1915    module = "sc_neurocore_engine.sc_neurocore_engine"
1916)]
1917pub struct PyStochasticGraphLayer {
1918    inner: graph::StochasticGraphLayer,
1919}
1920
1921#[pymethods]
1922impl PyStochasticGraphLayer {
1923    #[new]
1924    #[pyo3(signature = (adj_matrix, n_features, seed=42))]
1925    fn new(adj_matrix: &Bound<'_, PyAny>, n_features: usize, seed: u64) -> PyResult<Self> {
1926        let (adj_flat, n_rows, n_cols) = extract_matrix_f64(adj_matrix, "adj_matrix")?;
1927        if n_rows != n_cols {
1928            return Err(PyValueError::new_err(format!(
1929                "adj_matrix must be square, got {}x{}.",
1930                n_rows, n_cols
1931            )));
1932        }
1933        Ok(Self {
1934            inner: graph::StochasticGraphLayer::new(adj_flat, n_rows, n_features, seed),
1935        })
1936    }
1937
1938    /// Construct from CSR arrays (row_offsets, col_indices, values).
1939    #[staticmethod]
1940    #[pyo3(signature = (row_offsets, col_indices, values, n_nodes, n_features, seed=42))]
1941    fn from_sparse(
1942        row_offsets: Vec<usize>,
1943        col_indices: Vec<usize>,
1944        values: Vec<f64>,
1945        n_nodes: usize,
1946        n_features: usize,
1947        seed: u64,
1948    ) -> PyResult<Self> {
1949        let csr = graph::CsrMatrix::new(row_offsets, col_indices, values, n_nodes, n_nodes)
1950            .map_err(PyValueError::new_err)?;
1951        let inner = graph::StochasticGraphLayer::new_sparse(csr, n_features, seed)
1952            .map_err(PyValueError::new_err)?;
1953        Ok(Self { inner })
1954    }
1955
1956    /// Dense adjacency with automatic CSR conversion if density < threshold.
1957    #[staticmethod]
1958    #[pyo3(signature = (adj_matrix, n_features, seed=42, density_threshold=0.3))]
1959    fn from_dense_auto(
1960        adj_matrix: &Bound<'_, PyAny>,
1961        n_features: usize,
1962        seed: u64,
1963        density_threshold: f64,
1964    ) -> PyResult<Self> {
1965        let (adj_flat, n_rows, n_cols) = extract_matrix_f64(adj_matrix, "adj_matrix")?;
1966        if n_rows != n_cols {
1967            return Err(PyValueError::new_err(format!(
1968                "adj_matrix must be square, got {}x{}.",
1969                n_rows, n_cols
1970            )));
1971        }
1972        Ok(Self {
1973            inner: graph::StochasticGraphLayer::from_dense_auto(
1974                adj_flat,
1975                n_rows,
1976                n_features,
1977                seed,
1978                density_threshold,
1979            ),
1980        })
1981    }
1982
1983    fn is_sparse(&self) -> bool {
1984        self.inner.is_sparse()
1985    }
1986
1987    fn forward(&self, node_features: &Bound<'_, PyAny>) -> PyResult<Vec<Vec<f64>>> {
1988        let (x_flat, x_rows, x_cols) = extract_matrix_f64(node_features, "node_features")?;
1989        if x_rows != self.inner.n_nodes || x_cols != self.inner.n_features {
1990            return Err(PyValueError::new_err(format!(
1991                "Expected node_features shape ({}, {}), got ({}, {}).",
1992                self.inner.n_nodes, self.inner.n_features, x_rows, x_cols
1993            )));
1994        }
1995        let out = self.inner.forward(&x_flat).map_err(PyValueError::new_err)?;
1996        Ok(reshape_flat_to_rows(
1997            out,
1998            self.inner.n_nodes,
1999            self.inner.n_features,
2000        ))
2001    }
2002
2003    #[pyo3(signature = (node_features, length=1024, seed=44257))]
2004    fn forward_sc(
2005        &self,
2006        node_features: &Bound<'_, PyAny>,
2007        length: usize,
2008        seed: u64,
2009    ) -> PyResult<Vec<Vec<f64>>> {
2010        let (x_flat, x_rows, x_cols) = extract_matrix_f64(node_features, "node_features")?;
2011        if x_rows != self.inner.n_nodes || x_cols != self.inner.n_features {
2012            return Err(PyValueError::new_err(format!(
2013                "Expected node_features shape ({}, {}), got ({}, {}).",
2014                self.inner.n_nodes, self.inner.n_features, x_rows, x_cols
2015            )));
2016        }
2017        let out = self
2018            .inner
2019            .forward_sc(&x_flat, length, seed)
2020            .map_err(PyValueError::new_err)?;
2021        Ok(reshape_flat_to_rows(
2022            out,
2023            self.inner.n_nodes,
2024            self.inner.n_features,
2025        ))
2026    }
2027
2028    fn get_weights(&self) -> Vec<f64> {
2029        self.inner.get_weights()
2030    }
2031
2032    fn set_weights(&mut self, weights: Vec<f64>) -> PyResult<()> {
2033        self.inner
2034            .set_weights(weights)
2035            .map_err(PyValueError::new_err)
2036    }
2037}
2038
2039#[pyclass(
2040    name = "KuramotoSolver",
2041    module = "sc_neurocore_engine.sc_neurocore_engine"
2042)]
2043pub struct PyKuramotoSolver {
2044    inner: scpn::KuramotoSolver,
2045}
2046
2047#[pymethods]
2048impl PyKuramotoSolver {
2049    #[new]
2050    #[pyo3(signature = (omega, coupling, phases, noise_amp=0.1))]
2051    fn new(
2052        omega: Vec<f64>,
2053        coupling: &Bound<'_, PyAny>,
2054        phases: Vec<f64>,
2055        noise_amp: f64,
2056    ) -> PyResult<Self> {
2057        let n = omega.len();
2058        if n == 0 {
2059            return Err(PyValueError::new_err("omega must not be empty."));
2060        }
2061        if phases.len() != n {
2062            return Err(PyValueError::new_err(format!(
2063                "phases length mismatch: got {}, expected {}.",
2064                phases.len(),
2065                n
2066            )));
2067        }
2068
2069        let (coupling_flat, rows, cols) = extract_matrix_f64(coupling, "coupling")?;
2070        if rows == 1 {
2071            if coupling_flat.len() != n * n {
2072                return Err(PyValueError::new_err(format!(
2073                    "Flat coupling length mismatch: got {}, expected {}.",
2074                    coupling_flat.len(),
2075                    n * n
2076                )));
2077            }
2078        } else if rows != n || cols != n {
2079            return Err(PyValueError::new_err(format!(
2080                "coupling must be shape ({}, {}) or flat length {}, got ({}, {}).",
2081                n,
2082                n,
2083                n * n,
2084                rows,
2085                cols
2086            )));
2087        }
2088
2089        Ok(Self {
2090            inner: scpn::KuramotoSolver::new(omega, coupling_flat, phases, noise_amp),
2091        })
2092    }
2093
2094    #[pyo3(signature = (dt, seed=0))]
2095    fn step(&mut self, dt: f64, seed: u64) -> f64 {
2096        self.inner.step(dt, seed)
2097    }
2098
2099    #[pyo3(signature = (n_steps, dt, seed=0))]
2100    fn run(&mut self, n_steps: usize, dt: f64, seed: u64) -> Vec<f64> {
2101        self.inner.run(n_steps, dt, seed)
2102    }
2103
2104    fn set_field_pressure(&mut self, f: f64) {
2105        self.inner.set_field_pressure(f);
2106    }
2107
2108    #[pyo3(signature = (
2109        dt,
2110        seed=0,
2111        w_flat=vec![],
2112        sigma_g=0.0,
2113        h_flat=vec![],
2114        pgbo_weight=0.0,
2115    ))]
2116    fn step_ssgf(
2117        &mut self,
2118        dt: f64,
2119        seed: u64,
2120        w_flat: Vec<f64>,
2121        sigma_g: f64,
2122        h_flat: Vec<f64>,
2123        pgbo_weight: f64,
2124    ) -> f64 {
2125        self.inner
2126            .step_ssgf(dt, seed, &w_flat, sigma_g, &h_flat, pgbo_weight)
2127    }
2128
2129    #[pyo3(signature = (
2130        n_steps,
2131        dt,
2132        seed=0,
2133        w_flat=vec![],
2134        sigma_g=0.0,
2135        h_flat=vec![],
2136        pgbo_weight=0.0,
2137    ))]
2138    #[allow(clippy::too_many_arguments)]
2139    fn run_ssgf(
2140        &mut self,
2141        n_steps: usize,
2142        dt: f64,
2143        seed: u64,
2144        w_flat: Vec<f64>,
2145        sigma_g: f64,
2146        h_flat: Vec<f64>,
2147        pgbo_weight: f64,
2148    ) -> Vec<f64> {
2149        self.inner
2150            .run_ssgf(n_steps, dt, seed, &w_flat, sigma_g, &h_flat, pgbo_weight)
2151    }
2152
2153    fn order_parameter(&self) -> f64 {
2154        self.inner.order_parameter()
2155    }
2156
2157    fn get_phases(&self) -> Vec<f64> {
2158        self.inner.get_phases().to_vec()
2159    }
2160
2161    fn set_phases(&mut self, phases: Vec<f64>) -> PyResult<()> {
2162        if phases.len() != self.inner.n {
2163            return Err(PyValueError::new_err(format!(
2164                "phases length mismatch: got {}, expected {}.",
2165                phases.len(),
2166                self.inner.n
2167            )));
2168        }
2169        self.inner.set_phases(phases);
2170        Ok(())
2171    }
2172
2173    fn set_coupling(&mut self, coupling: &Bound<'_, PyAny>) -> PyResult<()> {
2174        let n = self.inner.n;
2175        let (coupling_flat, rows, cols) = extract_matrix_f64(coupling, "coupling")?;
2176        if rows == 1 {
2177            if coupling_flat.len() != n * n {
2178                return Err(PyValueError::new_err(format!(
2179                    "Flat coupling length mismatch: got {}, expected {}.",
2180                    coupling_flat.len(),
2181                    n * n
2182                )));
2183            }
2184        } else if rows != n || cols != n {
2185            return Err(PyValueError::new_err(format!(
2186                "coupling must be shape ({}, {}) or flat length {}, got ({}, {}).",
2187                n,
2188                n,
2189                n * n,
2190                rows,
2191                cols
2192            )));
2193        }
2194        self.inner.set_coupling(coupling_flat);
2195        Ok(())
2196    }
2197}
2198
2199#[pyclass(
2200    name = "SCPNMetrics",
2201    module = "sc_neurocore_engine.sc_neurocore_engine"
2202)]
2203pub struct PySCPNMetrics;
2204
2205#[pymethods]
2206impl PySCPNMetrics {
2207    #[new]
2208    fn new() -> Self {
2209        Self
2210    }
2211
2212    #[staticmethod]
2213    fn global_coherence(weights: [f64; 7], metrics: [f64; 7]) -> f64 {
2214        scpn::SCPNMetrics::global_coherence(&weights, &metrics)
2215    }
2216
2217    #[staticmethod]
2218    fn consciousness_index(phases_l4: Vec<f64>, glyph_l7: [f64; 6]) -> f64 {
2219        scpn::SCPNMetrics::consciousness_index(&phases_l4, &glyph_l7)
2220    }
2221}
2222
2223// IR bridge
2224
2225#[pyclass(name = "ScGraph", module = "sc_neurocore_engine.sc_neurocore_engine")]
2226pub struct PyScGraph {
2227    inner: ir::graph::ScGraph,
2228}
2229
2230#[pymethods]
2231impl PyScGraph {
2232    /// Number of operations in the graph.
2233    fn len(&self) -> usize {
2234        self.inner.len()
2235    }
2236
2237    fn __len__(&self) -> usize {
2238        self.inner.len()
2239    }
2240
2241    /// Whether the graph is empty.
2242    fn is_empty(&self) -> bool {
2243        self.inner.is_empty()
2244    }
2245
2246    /// Graph name.
2247    #[getter]
2248    fn name(&self) -> &str {
2249        &self.inner.name
2250    }
2251
2252    /// Number of input ports.
2253    fn num_inputs(&self) -> usize {
2254        self.inner.inputs().len()
2255    }
2256
2257    /// Number of output ports.
2258    fn num_outputs(&self) -> usize {
2259        self.inner.outputs().len()
2260    }
2261
2262    fn __repr__(&self) -> String {
2263        format!("ScGraph('{}', ops={})", self.inner.name, self.inner.len())
2264    }
2265}
2266
2267#[pyclass(
2268    name = "ScGraphBuilder",
2269    module = "sc_neurocore_engine.sc_neurocore_engine"
2270)]
2271pub struct PyScGraphBuilder {
2272    inner: Option<ir::builder::ScGraphBuilder>,
2273}
2274
2275impl PyScGraphBuilder {
2276    fn builder_mut(&mut self) -> PyResult<&mut ir::builder::ScGraphBuilder> {
2277        self.inner
2278            .as_mut()
2279            .ok_or_else(|| PyValueError::new_err("Builder already consumed by build()."))
2280    }
2281}
2282
2283#[pymethods]
2284impl PyScGraphBuilder {
2285    #[new]
2286    fn new(name: String) -> Self {
2287        Self {
2288            inner: Some(ir::builder::ScGraphBuilder::new(name)),
2289        }
2290    }
2291
2292    /// Add a typed input port. Returns value ID.
2293    fn input(&mut self, name: &str, ty: &str) -> PyResult<u32> {
2294        let sc_type = parse_sc_type(ty)?;
2295        Ok(self.builder_mut()?.input(name, sc_type).0)
2296    }
2297
2298    /// Add an output port forwarding a value.
2299    fn output(&mut self, name: &str, source_id: u32) -> PyResult<u32> {
2300        Ok(self
2301            .builder_mut()?
2302            .output(name, ir::graph::ValueId(source_id))
2303            .0)
2304    }
2305
2306    /// Add a float constant.
2307    fn constant_f64(&mut self, value: f64, ty: &str) -> PyResult<u32> {
2308        let sc_type = parse_sc_type(ty)?;
2309        Ok(self
2310            .builder_mut()?
2311            .constant(ir::graph::ScConst::F64(value), sc_type)
2312            .0)
2313    }
2314
2315    /// Add an integer constant.
2316    fn constant_i64(&mut self, value: i64, ty: &str) -> PyResult<u32> {
2317        let sc_type = parse_sc_type(ty)?;
2318        Ok(self
2319            .builder_mut()?
2320            .constant(ir::graph::ScConst::I64(value), sc_type)
2321            .0)
2322    }
2323
2324    /// Add a Bernoulli encode operation.
2325    fn encode(&mut self, prob_id: u32, length: usize, seed: u64) -> PyResult<u32> {
2326        let seed = u16::try_from(seed)
2327            .map_err(|_| PyValueError::new_err(format!("Seed out of range for u16: {seed}")))?;
2328        Ok(self
2329            .builder_mut()?
2330            .encode(ir::graph::ValueId(prob_id), length, seed)
2331            .0)
2332    }
2333
2334    /// Add a bitwise AND (SC multiply).
2335    fn bitwise_and(&mut self, lhs_id: u32, rhs_id: u32) -> PyResult<u32> {
2336        Ok(self
2337            .builder_mut()?
2338            .bitwise_and(ir::graph::ValueId(lhs_id), ir::graph::ValueId(rhs_id))
2339            .0)
2340    }
2341
2342    /// Add a popcount operation.
2343    fn popcount(&mut self, input_id: u32) -> PyResult<u32> {
2344        Ok(self.builder_mut()?.popcount(ir::graph::ValueId(input_id)).0)
2345    }
2346
2347    /// Add a LIF neuron step.
2348    #[pyo3(signature = (
2349        current_id,
2350        leak_id,
2351        gain_id,
2352        noise_id,
2353        data_width=16,
2354        fraction=8,
2355        v_rest=0,
2356        v_reset=0,
2357        v_threshold=256,
2358        refractory_period=2
2359    ))]
2360    #[allow(clippy::too_many_arguments)]
2361    fn lif_step(
2362        &mut self,
2363        current_id: u32,
2364        leak_id: u32,
2365        gain_id: u32,
2366        noise_id: u32,
2367        data_width: u32,
2368        fraction: u32,
2369        v_rest: i64,
2370        v_reset: i64,
2371        v_threshold: i64,
2372        refractory_period: u32,
2373    ) -> PyResult<u32> {
2374        let params = ir::graph::LifParams {
2375            data_width,
2376            fraction,
2377            v_rest,
2378            v_reset,
2379            v_threshold,
2380            refractory_period,
2381        };
2382        Ok(self
2383            .builder_mut()?
2384            .lif_step(
2385                ir::graph::ValueId(current_id),
2386                ir::graph::ValueId(leak_id),
2387                ir::graph::ValueId(gain_id),
2388                ir::graph::ValueId(noise_id),
2389                params,
2390            )
2391            .0)
2392    }
2393
2394    /// Add a dense layer forward pass.
2395    #[pyo3(signature = (
2396        inputs_id,
2397        weights_id,
2398        leak_id,
2399        gain_id,
2400        n_inputs=3,
2401        n_neurons=7,
2402        data_width=16,
2403        stream_length=1024,
2404        seed_base=0xACE1u64,
2405        y_min=0,
2406        y_max=65535
2407    ))]
2408    #[allow(clippy::too_many_arguments)]
2409    fn dense_forward(
2410        &mut self,
2411        inputs_id: u32,
2412        weights_id: u32,
2413        leak_id: u32,
2414        gain_id: u32,
2415        n_inputs: usize,
2416        n_neurons: usize,
2417        data_width: u32,
2418        stream_length: usize,
2419        seed_base: u64,
2420        y_min: i64,
2421        y_max: i64,
2422    ) -> PyResult<u32> {
2423        let input_seed_base = u16::try_from(seed_base).map_err(|_| {
2424            PyValueError::new_err(format!("seed_base out of range for u16: {seed_base}"))
2425        })?;
2426        let params = ir::graph::DenseParams {
2427            n_inputs,
2428            n_neurons,
2429            data_width,
2430            stream_length,
2431            input_seed_base,
2432            weight_seed_base: input_seed_base.wrapping_add(1),
2433            y_min,
2434            y_max,
2435        };
2436        Ok(self
2437            .builder_mut()?
2438            .dense_forward(
2439                ir::graph::ValueId(inputs_id),
2440                ir::graph::ValueId(weights_id),
2441                ir::graph::ValueId(leak_id),
2442                ir::graph::ValueId(gain_id),
2443                params,
2444            )
2445            .0)
2446    }
2447
2448    /// Add a scale (multiply by constant factor) operation.
2449    fn scale(&mut self, input_id: u32, factor: f64) -> PyResult<u32> {
2450        Ok(self
2451            .builder_mut()?
2452            .scale(ir::graph::ValueId(input_id), factor)
2453            .0)
2454    }
2455
2456    /// Add an offset (add constant) operation.
2457    fn offset(&mut self, input_id: u32, offset_val: f64) -> PyResult<u32> {
2458        Ok(self
2459            .builder_mut()?
2460            .offset(ir::graph::ValueId(input_id), offset_val)
2461            .0)
2462    }
2463
2464    /// Add a divide-by-constant operation.
2465    fn div_const(&mut self, input_id: u32, divisor: u64) -> PyResult<u32> {
2466        Ok(self
2467            .builder_mut()?
2468            .div_const(ir::graph::ValueId(input_id), divisor)
2469            .0)
2470    }
2471
2472    /// Consume the builder and return a graph.
2473    fn build(&mut self) -> PyResult<PyScGraph> {
2474        let builder = self
2475            .inner
2476            .take()
2477            .ok_or_else(|| PyValueError::new_err("Builder already consumed by build()."))?;
2478        Ok(PyScGraph {
2479            inner: builder.build(),
2480        })
2481    }
2482}
2483
2484/// Verify an IR graph. Returns None on success, or a list of error strings.
2485#[pyfunction]
2486fn ir_verify(graph: PyRef<'_, PyScGraph>) -> Option<Vec<String>> {
2487    match ir::verify::verify(&graph.inner) {
2488        Ok(()) => None,
2489        Err(errors) => Some(errors.iter().map(|e| e.to_string()).collect()),
2490    }
2491}
2492
2493/// Print an IR graph to its stable text format.
2494#[pyfunction]
2495fn ir_print(graph: PyRef<'_, PyScGraph>) -> String {
2496    ir::printer::print(&graph.inner)
2497}
2498
2499/// Parse an IR graph from text format.
2500#[pyfunction]
2501fn ir_parse(text: &str) -> PyResult<PyScGraph> {
2502    ir::parser::parse(text)
2503        .map(|graph| PyScGraph { inner: graph })
2504        .map_err(|e| PyValueError::new_err(e.to_string()))
2505}
2506
2507/// Emit SystemVerilog from an IR graph.
2508#[pyfunction]
2509fn ir_emit_sv(graph: PyRef<'_, PyScGraph>) -> PyResult<String> {
2510    ir::emit_sv::emit(&graph.inner).map_err(PyValueError::new_err)
2511}
2512
2513/// Parse a Python type string into ScType.
2514///
2515/// Accepted formats: "bool", "rate", "u32", "u64", "i16", "i32",
2516/// "bitstream", "bitstream<1024>", "fixed<16,8>", "vec<bool,7>".
2517fn parse_sc_type(s: &str) -> PyResult<ir::graph::ScType> {
2518    let s = s.trim();
2519    let lower = s.to_ascii_lowercase();
2520    match lower.as_str() {
2521        "bool" => Ok(ir::graph::ScType::Bool),
2522        "rate" => Ok(ir::graph::ScType::Rate),
2523        "u32" => Ok(ir::graph::ScType::UInt { width: 32 }),
2524        "u64" => Ok(ir::graph::ScType::UInt { width: 64 }),
2525        "i16" => Ok(ir::graph::ScType::SInt { width: 16 }),
2526        "i32" => Ok(ir::graph::ScType::SInt { width: 32 }),
2527        "bitstream" => Ok(ir::graph::ScType::Bitstream { length: 0 }),
2528        _ => {
2529            if let Some(width) = lower.strip_prefix('u') {
2530                if let Ok(width) = width.parse::<u32>() {
2531                    return Ok(ir::graph::ScType::UInt { width });
2532                }
2533            }
2534            if let Some(width) = lower.strip_prefix('i') {
2535                if let Ok(width) = width.parse::<u32>() {
2536                    return Ok(ir::graph::ScType::SInt { width });
2537                }
2538            }
2539            if let Some(inner) = lower
2540                .strip_prefix("bitstream<")
2541                .and_then(|r| r.strip_suffix('>'))
2542            {
2543                let length = inner.parse::<usize>().map_err(|_| {
2544                    PyValueError::new_err(format!("Invalid bitstream length: '{inner}'"))
2545                })?;
2546                return Ok(ir::graph::ScType::Bitstream { length });
2547            }
2548            if let Some(inner) = lower
2549                .strip_prefix("fixed<")
2550                .and_then(|r| r.strip_suffix('>'))
2551            {
2552                let parts: Vec<&str> = inner.split(',').collect();
2553                if parts.len() != 2 {
2554                    return Err(PyValueError::new_err(format!(
2555                        "fixed type needs 2 params: '{s}'"
2556                    )));
2557                }
2558                let width = parts[0].trim().parse::<u32>().map_err(|_| {
2559                    PyValueError::new_err(format!("Invalid fixed width: '{}'", parts[0]))
2560                })?;
2561                let frac = parts[1].trim().parse::<u32>().map_err(|_| {
2562                    PyValueError::new_err(format!("Invalid fixed frac: '{}'", parts[1]))
2563                })?;
2564                return Ok(ir::graph::ScType::FixedPoint { width, frac });
2565            }
2566            if let Some(inner) = lower.strip_prefix("vec<").and_then(|r| r.strip_suffix('>')) {
2567                if let Some(comma_pos) = inner.rfind(',') {
2568                    let inner_ty_str = &inner[..comma_pos];
2569                    let count_str = inner[comma_pos + 1..].trim();
2570                    let inner_ty = parse_sc_type(inner_ty_str)?;
2571                    let count = count_str.parse::<usize>().map_err(|_| {
2572                        PyValueError::new_err(format!("Invalid vec count: '{count_str}'"))
2573                    })?;
2574                    return Ok(ir::graph::ScType::Vec {
2575                        element: Box::new(inner_ty),
2576                        count,
2577                    });
2578                }
2579            }
2580            Err(PyValueError::new_err(format!("Unknown IR type: '{s}'")))
2581        }
2582    }
2583}