1#![allow(
10 clippy::useless_conversion,
11 clippy::needless_range_loop,
12 clippy::too_many_arguments,
13 deprecated
14)]
15
16use numpy::{
17 IntoPyArray, PyArray1, PyArray2, PyArrayMethods, PyReadonlyArray1, PyReadonlyArray2,
18 PyReadwriteArray1, PyUntypedArrayMethods,
19};
20use pyo3::exceptions::PyValueError;
21use pyo3::prelude::*;
22use pyo3::types::PyDict;
23use pyo3::IntoPyObject;
24
25pub mod analysis;
26pub mod attention;
27pub mod bitstream;
28pub mod brunel;
29pub mod connectome;
30pub mod conv;
31pub mod cordiv;
32pub mod cortical_column;
33pub mod cortical_inject;
34pub mod dna;
35pub mod ei_network;
36pub mod encoder;
37pub mod evo;
38pub mod fault;
39pub mod fusion;
40#[cfg(feature = "gpu")]
41pub mod gpu;
42pub mod grad;
43pub mod graph;
44pub mod ir;
45pub mod layer;
46pub mod lgssm;
47pub mod network_runner;
48pub mod neuron;
49pub mod neurons;
50pub mod optimizer;
51pub mod partition;
52pub mod phi;
53pub mod photonic;
54pub mod ping;
55pub mod predictive_coding;
56pub mod pyo3_neurons;
57pub mod quantum;
58pub mod rall_dendrite;
59pub mod recorder;
60pub mod recurrent;
61pub mod rk4_neurons;
62pub mod scpn;
63pub mod simd;
64pub mod sobol;
65pub mod synapses;
66pub mod wilson_cowan;
67pub mod wong_wang;
68
69#[pyclass(
72 name = "BitStreamTensor",
73 module = "sc_neurocore_engine.sc_neurocore_engine"
74)]
75pub struct PyBitStreamTensor {
76 inner: bitstream::BitStreamTensor,
77}
78
79#[pymethods]
80impl PyBitStreamTensor {
81 #[new]
83 #[pyo3(signature = (dimension=10000, seed=0xACE1))]
84 fn new(dimension: usize, seed: u64) -> Self {
85 use rand::SeedableRng;
86 let mut rng = rand_xoshiro::Xoshiro256PlusPlus::seed_from_u64(seed);
87 let data = bitstream::bernoulli_packed(0.5, dimension, &mut rng);
88 Self {
89 inner: bitstream::BitStreamTensor::from_words(data, dimension),
90 }
91 }
92
93 #[staticmethod]
95 fn from_packed(data: Vec<u64>, length: usize) -> PyResult<Self> {
96 if length == 0 {
97 return Err(pyo3::exceptions::PyValueError::new_err(
98 "bitstream length must be > 0",
99 ));
100 }
101 Ok(Self {
102 inner: bitstream::BitStreamTensor::from_words(data, length),
103 })
104 }
105
106 fn xor_inplace(&mut self, other: &PyBitStreamTensor) {
108 self.inner.xor_inplace(&other.inner);
109 }
110
111 fn xor(&self, other: &PyBitStreamTensor) -> PyBitStreamTensor {
113 PyBitStreamTensor {
114 inner: self.inner.xor(&other.inner),
115 }
116 }
117
118 fn rotate_right(&mut self, shift: usize) {
120 self.inner.rotate_right(shift);
121 }
122
123 fn hamming_distance(&self, other: &PyBitStreamTensor) -> f32 {
125 self.inner.hamming_distance(&other.inner)
126 }
127
128 #[staticmethod]
130 fn bundle(vectors: Vec<PyRef<'_, PyBitStreamTensor>>) -> PyBitStreamTensor {
131 let refs: Vec<&bitstream::BitStreamTensor> = vectors.iter().map(|v| &v.inner).collect();
132 PyBitStreamTensor {
133 inner: bitstream::BitStreamTensor::bundle(&refs),
134 }
135 }
136
137 fn popcount(&self) -> u64 {
139 bitstream::popcount(&self.inner)
140 }
141
142 #[getter]
144 fn data(&self) -> Vec<u64> {
145 self.inner.data.clone()
146 }
147
148 #[getter]
150 fn length(&self) -> usize {
151 self.inner.length
152 }
153
154 fn __len__(&self) -> usize {
155 self.inner.length
156 }
157
158 fn __repr__(&self) -> String {
159 format!(
160 "BitStreamTensor(length={}, popcount={})",
161 self.inner.length,
162 bitstream::popcount(&self.inner)
163 )
164 }
165}
166
167#[pyclass(
168 name = "StdpSynapse",
169 module = "sc_neurocore_engine.sc_neurocore_engine"
170)]
171pub struct StdpSynapse {
172 inner: synapses::StdpSynapse,
173}
174
175#[pymethods]
176impl StdpSynapse {
177 #[new]
178 #[pyo3(signature = (initial_weight, data_width=16, fraction=8))]
179 fn new(initial_weight: i16, data_width: u32, fraction: u32) -> Self {
180 Self {
181 inner: synapses::StdpSynapse::new(initial_weight, data_width, fraction),
182 }
183 }
184
185 #[allow(clippy::too_many_arguments)]
186 #[pyo3(signature = (pre_spike, post_spike, a_plus=16, a_minus=-16, decay=250, w_min=0, w_max=32767))]
187 fn step(
188 &mut self,
189 pre_spike: bool,
190 post_spike: bool,
191 a_plus: i16,
192 a_minus: i16,
193 decay: i16,
194 w_min: i16,
195 w_max: i16,
196 ) {
197 let params = synapses::StdpParams {
198 a_plus,
199 a_minus,
200 decay,
201 w_min,
202 w_max,
203 };
204 self.inner.step(pre_spike, post_spike, ¶ms);
205 }
206
207 #[getter]
208 fn weight(&self) -> i16 {
209 self.inner.weight
210 }
211
212 #[setter]
213 fn set_weight(&mut self, value: i16) {
214 self.inner.weight = value;
215 }
216}
217
218#[pyclass(
221 name = "BrunelNetwork",
222 module = "sc_neurocore_engine.sc_neurocore_engine"
223)]
224pub struct PyBrunelNetwork {
225 inner: brunel::BrunelNetwork,
226}
227
228#[pymethods]
229impl PyBrunelNetwork {
230 #[new]
231 #[pyo3(signature = (
232 n_neurons,
233 w_indptr,
234 w_indices,
235 w_data,
236 leak_k,
237 gain_k,
238 ext_lambda,
239 ext_weight_fp,
240 data_width=16,
241 fraction=8,
242 v_rest=0,
243 v_reset=0,
244 v_threshold=256,
245 refractory_period=2,
246 seed=42
247 ))]
248 #[allow(clippy::too_many_arguments)]
249 fn new(
250 n_neurons: usize,
251 w_indptr: PyReadonlyArray1<'_, i64>,
252 w_indices: PyReadonlyArray1<'_, i64>,
253 w_data: PyReadonlyArray1<'_, i16>,
254 leak_k: i16,
255 gain_k: i16,
256 ext_lambda: f64,
257 ext_weight_fp: i16,
258 data_width: u32,
259 fraction: u32,
260 v_rest: i16,
261 v_reset: i16,
262 v_threshold: i16,
263 refractory_period: i32,
264 seed: u64,
265 ) -> PyResult<Self> {
266 let indptr = w_indptr
267 .as_slice()
268 .map_err(|e| PyValueError::new_err(format!("Cannot read w_indptr: {e}")))?;
269 let indices = w_indices
270 .as_slice()
271 .map_err(|e| PyValueError::new_err(format!("Cannot read w_indices: {e}")))?;
272 let data = w_data
273 .as_slice()
274 .map_err(|e| PyValueError::new_err(format!("Cannot read w_data: {e}")))?;
275
276 let row_offsets: Vec<usize> = indptr.iter().map(|&v| v as usize).collect();
277 let col_indices: Vec<usize> = indices.iter().map(|&v| v as usize).collect();
278 let values: Vec<i16> = data.to_vec();
279
280 let inner = brunel::BrunelNetwork::new(
281 n_neurons,
282 row_offsets,
283 col_indices,
284 values,
285 data_width,
286 fraction,
287 v_rest,
288 v_reset,
289 v_threshold,
290 refractory_period,
291 leak_k,
292 gain_k,
293 ext_lambda,
294 ext_weight_fp,
295 seed,
296 )
297 .map_err(PyValueError::new_err)?;
298
299 Ok(Self { inner })
300 }
301
302 fn run<'py>(&mut self, py: Python<'py>, n_steps: usize) -> Bound<'py, PyArray1<u32>> {
303 let counts = self.inner.run(n_steps);
304 counts.into_pyarray(py)
305 }
306}
307
308#[pyclass(
311 name = "NetworkRunner",
312 module = "sc_neurocore_engine.sc_neurocore_engine"
313)]
314pub struct PyNetworkRunner {
315 inner: network_runner::NetworkRunner,
316}
317
318#[pymethods]
319impl PyNetworkRunner {
320 #[new]
321 fn new() -> Self {
322 Self {
323 inner: network_runner::NetworkRunner::new(),
324 }
325 }
326
327 fn add_population(&mut self, model: &str, n: usize) -> PyResult<usize> {
328 let pop = network_runner::create_population(model, n).map_err(PyValueError::new_err)?;
329 Ok(self.inner.add_population(pop))
330 }
331
332 #[pyo3(signature = (src, tgt, row_offsets, col_indices, values, delay=0))]
333 fn add_projection(
334 &mut self,
335 src: usize,
336 tgt: usize,
337 row_offsets: Vec<i64>,
338 col_indices: Vec<i64>,
339 values: Vec<f64>,
340 delay: usize,
341 ) {
342 let ro: Vec<usize> = row_offsets.iter().map(|&x| x as usize).collect();
343 let ci: Vec<usize> = col_indices.iter().map(|&x| x as usize).collect();
344 let proj = network_runner::ProjectionRunner::new(src, tgt, ro, ci, values, delay);
345 self.inner.add_projection(proj);
346 }
347
348 fn step_population<'py>(
349 &mut self,
350 py: Python<'py>,
351 pop_index: usize,
352 currents: PyReadonlyArray1<'py, f64>,
353 ) -> PyResult<Py<PyAny>> {
354 let currents = currents.as_slice()?;
355 let (spikes, voltages) = self
356 .inner
357 .step_population_with_currents(pop_index, currents)
358 .map_err(PyValueError::new_err)?;
359 let dict = PyDict::new(py);
360 dict.set_item("spikes", spikes.into_pyarray(py))?;
361 dict.set_item("voltages", voltages.into_pyarray(py))?;
362 Ok(dict.into_any().unbind())
363 }
364
365 fn run<'py>(&mut self, py: Python<'py>, n_steps: usize) -> PyResult<Py<PyAny>> {
366 let results = self.inner.run(n_steps);
367 let dict = PyDict::new(py);
368 let spike_counts: Vec<u64> = results.spike_counts.iter().map(|&c| c as u64).collect();
369 dict.set_item("spike_counts", spike_counts.into_pyarray(py))?;
370 let spike_data: Vec<Py<PyArray1<u64>>> = results
371 .spike_data
372 .into_iter()
373 .map(|v: Vec<u64>| v.into_pyarray(py).unbind())
374 .collect();
375 dict.set_item("spike_data", spike_data)?;
376 let voltages: Vec<Py<PyArray1<f64>>> = results
377 .voltages
378 .into_iter()
379 .map(|v: Vec<f64>| v.into_pyarray(py).unbind())
380 .collect();
381 dict.set_item("voltages", voltages)?;
382 Ok(dict.into_any().unbind())
383 }
384
385 #[staticmethod]
386 fn supported_models() -> Vec<&'static str> {
387 network_runner::supported_models()
388 }
389}
390
391#[pyfunction]
396fn py_batch_simulate<'py>(
397 py: Python<'py>,
398 model_name: &str,
399 n_steps: usize,
400 current_trace: PyReadonlyArray1<'py, f64>,
401) -> PyResult<Py<PyAny>> {
402 let mut neuron = network_runner::create_neuron(model_name).map_err(PyValueError::new_err)?;
403 let currents = current_trace.as_slice()?;
404 let steps = n_steps.min(currents.len());
405
406 let mut voltages = vec![0.0f64; steps];
407 let mut spikes: Vec<u64> = Vec::new();
408
409 for t in 0..steps {
410 let fired = neuron.step(currents[t]);
411 voltages[t] = neuron.soma_voltage();
412 if fired != 0 {
413 spikes.push(t as u64);
414 }
415 }
416
417 let d = PyDict::new(py);
418 d.set_item("voltages", voltages.into_pyarray(py))?;
419 d.set_item("spikes", spikes.into_pyarray(py))?;
420 d.set_item("n_steps", steps)?;
421 Ok(d.into_any().unbind())
422}
423
424#[pyfunction]
427#[pyo3(signature = (
428 n_exc=80, n_inh=20,
429 w_ee=0.1, w_ei=0.4, w_ie=0.1, w_ii=0.4,
430 p_conn=0.2, ext_rate=5.0,
431 duration=200.0, dt=0.1, seed=42
432))]
433fn py_simulate_ei_network<'py>(
434 py: Python<'py>,
435 n_exc: usize,
436 n_inh: usize,
437 w_ee: f64,
438 w_ei: f64,
439 w_ie: f64,
440 w_ii: f64,
441 p_conn: f64,
442 ext_rate: f64,
443 duration: f64,
444 dt: f64,
445 seed: u64,
446) -> PyResult<Py<PyAny>> {
447 let r = ei_network::simulate_ei(
448 n_exc, n_inh, w_ee, w_ei, w_ie, w_ii, p_conn, ext_rate, duration, dt, seed,
449 );
450 let n_spikes = r.spike_times.len();
451 let d = PyDict::new(py);
452 d.set_item("spike_times", r.spike_times.into_pyarray(py))?;
453 d.set_item(
454 "spike_neurons",
455 r.spike_neurons
456 .iter()
457 .map(|&x| x as i64)
458 .collect::<Vec<_>>()
459 .into_pyarray(py),
460 )?;
461 d.set_item("n_exc", r.n_exc)?;
462 d.set_item("n_inh", r.n_inh)?;
463 d.set_item("n_total", r.n_exc + r.n_inh)?;
464 d.set_item("n_spikes", n_spikes)?;
465 d.set_item("rate_time", r.rate_time.into_pyarray(py))?;
466 d.set_item("exc_rates", r.exc_rates.into_pyarray(py))?;
467 d.set_item("inh_rates", r.inh_rates.into_pyarray(py))?;
468 d.set_item("duration", duration)?;
469 d.set_item("dt", dt)?;
470 d.set_item("mean_exc_rate", r.mean_exc_rate)?;
471 d.set_item("mean_inh_rate", r.mean_inh_rate)?;
472 Ok(d.into_any().unbind())
473}
474
475#[pyfunction]
488#[pyo3(signature = (
489 s1_init, s2_init,
490 tau_s, gamma, j_n, j_cross, i_0, sigma, dt,
491 stim1, stim2, xi,
492))]
493#[allow(clippy::too_many_arguments)]
494fn py_wong_wang_simulate<'py>(
495 py: Python<'py>,
496 s1_init: f64,
497 s2_init: f64,
498 tau_s: f64,
499 gamma: f64,
500 j_n: f64,
501 j_cross: f64,
502 i_0: f64,
503 sigma: f64,
504 dt: f64,
505 stim1: PyReadonlyArray1<'py, f64>,
506 stim2: PyReadonlyArray1<'py, f64>,
507 xi: PyReadonlyArray1<'py, f64>,
508) -> PyResult<Py<PyAny>> {
509 let stim1 = stim1.as_slice()?;
510 let stim2 = stim2.as_slice()?;
511 let xi = xi.as_slice()?;
512 let n = stim1.len();
513 if stim2.len() != n {
514 return Err(PyValueError::new_err(format!(
515 "stim1 and stim2 length mismatch: {n} vs {}",
516 stim2.len()
517 )));
518 }
519 if xi.len() != 2 * n {
520 return Err(PyValueError::new_err(format!(
521 "xi length must be 2 * n_steps ({}): got {}",
522 2 * n,
523 xi.len()
524 )));
525 }
526 let mut s1o = vec![0.0_f64; n];
527 let mut s2o = vec![0.0_f64; n];
528 let mut r1o = vec![0.0_f64; n];
529 let mut r2o = vec![0.0_f64; n];
530 let (s1_final, s2_final) = wong_wang::simulate(
531 s1_init, s2_init, tau_s, gamma, j_n, j_cross, i_0, sigma, dt, stim1, stim2, xi, &mut s1o,
532 &mut s2o, &mut r1o, &mut r2o,
533 );
534 let d = PyDict::new(py);
535 d.set_item("s1", s1o.into_pyarray(py))?;
536 d.set_item("s2", s2o.into_pyarray(py))?;
537 d.set_item("r1", r1o.into_pyarray(py))?;
538 d.set_item("r2", r2o.into_pyarray(py))?;
539 d.set_item("s1_final", s1_final)?;
540 d.set_item("s2_final", s2_final)?;
541 Ok(d.into_any().unbind())
542}
543
544#[pyfunction]
552#[pyo3(signature = (
553 e_init, i_init,
554 w_ee, w_ei, w_ie, w_ii,
555 tau_e, tau_i,
556 a, theta, dt,
557 ext_input,
558))]
559#[allow(clippy::too_many_arguments)]
560fn py_wilson_cowan_simulate<'py>(
561 py: Python<'py>,
562 e_init: f64,
563 i_init: f64,
564 w_ee: f64,
565 w_ei: f64,
566 w_ie: f64,
567 w_ii: f64,
568 tau_e: f64,
569 tau_i: f64,
570 a: f64,
571 theta: f64,
572 dt: f64,
573 ext_input: PyReadonlyArray1<'py, f64>,
574) -> PyResult<Py<PyAny>> {
575 let ext = ext_input.as_slice()?;
576 let n = ext.len();
577 let mut e_out = vec![0.0_f64; n];
578 let mut i_out = vec![0.0_f64; n];
579 let (e_final, i_final) = wilson_cowan::simulate(
580 e_init, i_init, w_ee, w_ei, w_ie, w_ii, tau_e, tau_i, a, theta, dt, ext, &mut e_out,
581 &mut i_out,
582 );
583 let d = PyDict::new(py);
584 d.set_item("e", e_out.into_pyarray(py))?;
585 d.set_item("i", i_out.into_pyarray(py))?;
586 d.set_item("e_final", e_final)?;
587 d.set_item("i_final", i_final)?;
588 Ok(d.into_any().unbind())
589}
590
591#[pymodule]
594fn sc_neurocore_engine(m: &Bound<'_, PyModule>) -> PyResult<()> {
595 m.add("__version__", env!("CARGO_PKG_VERSION"))?;
596 m.add_function(wrap_pyfunction!(simd_tier, m)?)?;
597 m.add_function(wrap_pyfunction!(set_num_threads, m)?)?;
598 m.add_function(wrap_pyfunction!(pack_bitstream, m)?)?;
599 m.add_function(wrap_pyfunction!(unpack_bitstream, m)?)?;
600 m.add_function(wrap_pyfunction!(popcount, m)?)?;
601 m.add_function(wrap_pyfunction!(pack_bitstream_numpy, m)?)?;
602 m.add_function(wrap_pyfunction!(popcount_numpy, m)?)?;
603 m.add_function(wrap_pyfunction!(unpack_bitstream_numpy, m)?)?;
604 m.add_function(wrap_pyfunction!(batch_lif_run, m)?)?;
605 m.add_function(wrap_pyfunction!(batch_lif_run_multi, m)?)?;
606 m.add_function(wrap_pyfunction!(batch_lif_run_varying, m)?)?;
607 m.add_function(wrap_pyfunction!(batch_encode, m)?)?;
608 m.add_function(wrap_pyfunction!(batch_encode_numpy, m)?)?;
609 m.add_function(wrap_pyfunction!(py_wong_wang_simulate, m)?)?;
610 m.add_function(wrap_pyfunction!(py_wilson_cowan_simulate, m)?)?;
611 m.add_class::<Lfsr16>()?;
612 m.add_class::<BitstreamEncoder>()?;
613 m.add_class::<FixedPointLif>()?;
614 m.add_class::<DenseLayer>()?;
615 #[cfg(feature = "gpu")]
616 m.add_class::<gpu::PyGpuDenseLayer>()?;
617 m.add_class::<StdpSynapse>()?;
618 m.add_class::<PySurrogateLif>()?;
619 m.add_class::<PyDifferentiableDenseLayer>()?;
620 m.add_class::<PyStochasticAttention>()?;
621 m.add_class::<PyStochasticGraphLayer>()?;
622 m.add_class::<PyKuramotoSolver>()?;
623 m.add_class::<PySCPNMetrics>()?;
624 m.add_class::<PyBitStreamTensor>()?;
625 m.add_class::<PyBrunelNetwork>()?;
626 m.add_class::<PyIzhikevich>()?;
627 m.add_class::<PyBitstreamAverager>()?;
628 m.add_class::<PyScGraph>()?;
629 m.add_class::<PyScGraphBuilder>()?;
630 m.add_function(wrap_pyfunction!(ir_verify, m)?)?;
631 m.add_function(wrap_pyfunction!(ir_print, m)?)?;
632 m.add_function(wrap_pyfunction!(ir_parse, m)?)?;
633 m.add_function(wrap_pyfunction!(ir_emit_sv, m)?)?;
634 m.add_class::<PyAdExNeuron>()?;
635 m.add_class::<PyExpIFNeuron>()?;
636 m.add_class::<PyLapicqueNeuron>()?;
637 pyo3_neurons::register_neuron_classes(m)?;
638 m.add_class::<PyNetworkRunner>()?;
639 m.add_function(wrap_pyfunction!(py_simulate_ei_network, m)?)?;
640 m.add_function(wrap_pyfunction!(py_batch_simulate, m)?)?;
641 m.add_function(wrap_pyfunction!(rk4_neurons::py_rk4_neuron_simulate, m)?)?;
642 m.add_function(wrap_pyfunction!(py_cordiv, m)?)?;
643 m.add_function(wrap_pyfunction!(py_adaptive_length, m)?)?;
644 m.add_function(wrap_pyfunction!(py_prediction_error, m)?)?;
645 m.add_function(wrap_pyfunction!(py_predict_xor_ema, m)?)?;
646 m.add_function(wrap_pyfunction!(py_predict_xor_lfsr, m)?)?;
647 m.add_function(wrap_pyfunction!(py_recover_xor_ema, m)?)?;
648 m.add_function(wrap_pyfunction!(py_recover_xor_lfsr, m)?)?;
649 m.add_function(wrap_pyfunction!(py_phi_star, m)?)?;
650 m.add_class::<PyCorticalColumn>()?;
651 m.add_class::<PyRallDendrite>()?;
652 m.add_function(wrap_pyfunction!(py_spike_times, m)?)?;
654 m.add_function(wrap_pyfunction!(py_isi, m)?)?;
655 m.add_function(wrap_pyfunction!(py_firing_rate, m)?)?;
656 m.add_function(wrap_pyfunction!(py_spike_count, m)?)?;
657 m.add_function(wrap_pyfunction!(py_bin_spike_train, m)?)?;
658 m.add_function(wrap_pyfunction!(py_instantaneous_rate, m)?)?;
659 m.add_function(wrap_pyfunction!(py_psth, m)?)?;
660 m.add_function(wrap_pyfunction!(py_cv_isi, m)?)?;
661 m.add_function(wrap_pyfunction!(py_cv2, m)?)?;
662 m.add_function(wrap_pyfunction!(py_local_variation, m)?)?;
663 m.add_function(wrap_pyfunction!(py_fano_factor, m)?)?;
664 m.add_function(wrap_pyfunction!(py_lempel_ziv_complexity, m)?)?;
665 m.add_function(wrap_pyfunction!(py_permutation_entropy, m)?)?;
666 m.add_function(wrap_pyfunction!(py_hurst_exponent, m)?)?;
667 m.add_function(wrap_pyfunction!(py_approximate_entropy, m)?)?;
668 m.add_function(wrap_pyfunction!(py_sample_entropy, m)?)?;
669 m.add_function(wrap_pyfunction!(py_cross_correlation, m)?)?;
671 m.add_function(wrap_pyfunction!(py_pairwise_correlation, m)?)?;
672 m.add_function(wrap_pyfunction!(py_event_synchronization, m)?)?;
673 m.add_function(wrap_pyfunction!(py_spike_train_coherence, m)?)?;
674 m.add_function(wrap_pyfunction!(py_spike_time_tiling_coefficient, m)?)?;
675 m.add_function(wrap_pyfunction!(py_covariance_matrix, m)?)?;
676 m.add_function(wrap_pyfunction!(py_autocorrelation_time, m)?)?;
677 m.add_function(wrap_pyfunction!(py_noise_correlation, m)?)?;
678 m.add_function(wrap_pyfunction!(py_signal_correlation, m)?)?;
679 m.add_function(wrap_pyfunction!(py_spike_count_covariance, m)?)?;
680 m.add_function(wrap_pyfunction!(py_joint_psth, m)?)?;
681 m.add_function(wrap_pyfunction!(py_coincidence_index, m)?)?;
682 m.add_function(wrap_pyfunction!(py_van_rossum_distance, m)?)?;
684 m.add_function(wrap_pyfunction!(py_victor_purpura_distance, m)?)?;
685 m.add_function(wrap_pyfunction!(py_isi_distance, m)?)?;
686 m.add_function(wrap_pyfunction!(py_spike_distance, m)?)?;
687 m.add_function(wrap_pyfunction!(py_spike_sync, m)?)?;
688 m.add_function(wrap_pyfunction!(py_spike_sync_profile, m)?)?;
689 m.add_function(wrap_pyfunction!(py_spike_profile, m)?)?;
690 m.add_function(wrap_pyfunction!(py_isi_profile, m)?)?;
691 m.add_function(wrap_pyfunction!(py_adaptive_spike_distance, m)?)?;
692 m.add_function(wrap_pyfunction!(py_schreiber_similarity, m)?)?;
693 m.add_function(wrap_pyfunction!(py_hunter_milton_similarity, m)?)?;
694 m.add_function(wrap_pyfunction!(py_earth_movers_distance, m)?)?;
695 m.add_function(wrap_pyfunction!(py_multi_neuron_victor_purpura, m)?)?;
696 m.add_function(wrap_pyfunction!(py_spike_distance_matrix, m)?)?;
697 m.add_function(wrap_pyfunction!(py_mutual_information, m)?)?;
699 m.add_function(wrap_pyfunction!(py_transfer_entropy, m)?)?;
700 m.add_function(wrap_pyfunction!(py_spike_train_entropy, m)?)?;
701 m.add_function(wrap_pyfunction!(py_noise_entropy, m)?)?;
702 m.add_function(wrap_pyfunction!(py_stimulus_specific_information, m)?)?;
703 m.add_function(wrap_pyfunction!(py_kozachenko_leonenko_mi, m)?)?;
704 m.add_function(wrap_pyfunction!(py_pairwise_granger_causality, m)?)?;
706 m.add_function(wrap_pyfunction!(py_conditional_granger_causality, m)?)?;
707 m.add_function(wrap_pyfunction!(py_spectral_granger_causality, m)?)?;
708 m.add_function(wrap_pyfunction!(py_partial_directed_coherence, m)?)?;
709 m.add_function(wrap_pyfunction!(py_directed_transfer_function, m)?)?;
710 m.add_function(wrap_pyfunction!(py_population_vector_decode, m)?)?;
712 m.add_function(wrap_pyfunction!(py_bayesian_decode, m)?)?;
713 m.add_function(wrap_pyfunction!(py_maximum_likelihood_decode, m)?)?;
714 m.add_function(wrap_pyfunction!(py_linear_discriminant_decode, m)?)?;
715 m.add_function(wrap_pyfunction!(py_naive_bayes_decode, m)?)?;
716 m.add_function(wrap_pyfunction!(py_tokenise_spikes, m)?)?;
718 m.add_function(wrap_pyfunction!(py_sinusoidal_position_encode, m)?)?;
719 m.add_function(wrap_pyfunction!(py_scaled_dot_product_attention, m)?)?;
720 m.add_function(wrap_pyfunction!(py_gaussian_attention, m)?)?;
721 m.add_function(wrap_pyfunction!(py_infonce_loss, m)?)?;
722 m.add_function(wrap_pyfunction!(py_functional_connectivity, m)?)?;
724 m.add_function(wrap_pyfunction!(py_unitary_events, m)?)?;
725 m.add_function(wrap_pyfunction!(py_cell_assembly_detection, m)?)?;
726 m.add_function(wrap_pyfunction!(py_synfire_chain_detection, m)?)?;
727 m.add_function(wrap_pyfunction!(py_surrogate_isi_shuffle, m)?)?;
729 m.add_function(wrap_pyfunction!(py_surrogate_dither, m)?)?;
730 m.add_function(wrap_pyfunction!(py_homogeneous_poisson, m)?)?;
731 m.add_function(wrap_pyfunction!(py_gamma_process, m)?)?;
732 m.add_function(wrap_pyfunction!(py_compound_poisson_process, m)?)?;
733 m.add_function(wrap_pyfunction!(py_surrogate_joint_isi, m)?)?;
734 m.add_function(wrap_pyfunction!(py_surrogate_bin_shuffling, m)?)?;
735 m.add_function(wrap_pyfunction!(py_surrogate_spike_train_shifting, m)?)?;
736 m.add_function(wrap_pyfunction!(py_burst_detection, m)?)?;
738 m.add_function(wrap_pyfunction!(py_first_spike_latency, m)?)?;
739 m.add_function(wrap_pyfunction!(py_response_onset, m)?)?;
740 m.add_function(wrap_pyfunction!(py_change_point_detection, m)?)?;
741 m.add_function(wrap_pyfunction!(py_spike_directionality, m)?)?;
743 m.add_function(wrap_pyfunction!(py_spike_train_order, m)?)?;
744 m.add_function(wrap_pyfunction!(py_cubic_higher_order, m)?)?;
745 m.add_function(wrap_pyfunction!(py_power_spectrum, m)?)?;
747 m.add_function(wrap_pyfunction!(py_waveform_width, m)?)?;
749 m.add_function(wrap_pyfunction!(py_waveform_amplitude, m)?)?;
750 m.add_function(wrap_pyfunction!(py_waveform_repolarization_slope, m)?)?;
751 m.add_function(wrap_pyfunction!(py_waveform_recovery_slope, m)?)?;
752 m.add_function(wrap_pyfunction!(py_waveform_halfwidth, m)?)?;
753 m.add_function(wrap_pyfunction!(py_waveform_pt_ratio, m)?)?;
754 m.add_function(wrap_pyfunction!(py_conditional_intensity, m)?)?;
756 m.add_function(wrap_pyfunction!(py_isi_hazard_function, m)?)?;
757 m.add_function(wrap_pyfunction!(py_isi_survivor_function, m)?)?;
758 m.add_function(wrap_pyfunction!(py_renewal_density, m)?)?;
759 m.add_function(wrap_pyfunction!(py_spike_triggered_average, m)?)?;
761 m.add_function(wrap_pyfunction!(py_spike_triggered_covariance, m)?)?;
762 m.add_function(wrap_pyfunction!(py_spatial_information, m)?)?;
763 m.add_function(wrap_pyfunction!(py_place_field_detection, m)?)?;
764 m.add_function(wrap_pyfunction!(py_tuning_curve, m)?)?;
765 m.add_function(wrap_pyfunction!(py_phase_locking_value, m)?)?;
767 m.add_function(wrap_pyfunction!(py_spike_field_coherence, m)?)?;
768 m.add_function(wrap_pyfunction!(py_spike_phase_histogram, m)?)?;
769 m.add_function(wrap_pyfunction!(py_isolation_distance, m)?)?;
771 m.add_function(wrap_pyfunction!(py_l_ratio, m)?)?;
772 m.add_function(wrap_pyfunction!(py_silhouette_score, m)?)?;
773 m.add_function(wrap_pyfunction!(py_d_prime, m)?)?;
774 m.add_function(wrap_pyfunction!(py_isi_violation_rate, m)?)?;
775 m.add_function(wrap_pyfunction!(py_presence_ratio, m)?)?;
776 m.add_function(wrap_pyfunction!(py_amplitude_cutoff, m)?)?;
777 m.add_function(wrap_pyfunction!(py_snr, m)?)?;
778 m.add_function(wrap_pyfunction!(py_nn_hit_rate, m)?)?;
779 m.add_function(wrap_pyfunction!(py_drift_metric, m)?)?;
780 m.add_function(wrap_pyfunction!(py_spike_train_pca, m)?)?;
782 m.add_function(wrap_pyfunction!(py_demixed_pca, m)?)?;
783 m.add_function(wrap_pyfunction!(py_factor_analysis, m)?)?;
784 m.add_function(wrap_pyfunction!(py_gpfa, m)?)?;
786 m.add_function(wrap_pyfunction!(py_gpfa_transform, m)?)?;
787 m.add_function(wrap_pyfunction!(py_spade_detect, m)?)?;
789 m.add_function(wrap_pyfunction!(py_dna_design_sequence, m)?)?;
791 m.add_function(wrap_pyfunction!(py_dna_design_orthogonal_set, m)?)?;
792 m.add_function(wrap_pyfunction!(py_dna_check_cross_hybridization, m)?)?;
793 m.add_function(wrap_pyfunction!(py_dna_simulate_kinetics, m)?)?;
794 m.add_function(wrap_pyfunction!(py_dna_detect_hairpins, m)?)?;
795 m.add_function(wrap_pyfunction!(py_qa_ising_energy, m)?)?;
797 m.add_function(wrap_pyfunction!(py_qa_batch_ising_energy, m)?)?;
798 m.add_function(wrap_pyfunction!(py_qa_simulated_annealing, m)?)?;
799 m.add_function(wrap_pyfunction!(py_qa_gauge_transform, m)?)?;
800 m.add_function(wrap_pyfunction!(py_qa_generate_gauges, m)?)?;
801 m.add_function(wrap_pyfunction!(py_qa_greedy_partition, m)?)?;
802 m.add_function(wrap_pyfunction!(py_ph_route_waveguides, m)?)?;
804 m.add_function(wrap_pyfunction!(py_ph_mzi_transfer_matrix, m)?)?;
805 m.add_function(wrap_pyfunction!(py_ph_cascade_mzi, m)?)?;
806 m.add_function(wrap_pyfunction!(py_ph_analyze_crosstalk, m)?)?;
807 m.add_function(wrap_pyfunction!(py_ph_analyze_power_budget, m)?)?;
808 m.add_function(wrap_pyfunction!(py_ph_analyze_crosstalk_bank, m)?)?;
809 m.add_function(wrap_pyfunction!(py_ph_analyze_crosstalk_pairs, m)?)?;
810 m.add_function(wrap_pyfunction!(py_opt_sa_search, m)?)?;
812 m.add_function(wrap_pyfunction!(py_opt_extract_pareto, m)?)?;
813 m.add_function(wrap_pyfunction!(py_evo_batch_mutate, m)?)?;
815 m.add_function(wrap_pyfunction!(py_evo_batch_fitness, m)?)?;
816 m.add_function(wrap_pyfunction!(py_evo_batch_crossover, m)?)?;
817 m.add_function(wrap_pyfunction!(py_evo_diversity, m)?)?;
818 m.add_function(wrap_pyfunction!(py_evo_novelty, m)?)?;
819 m.add_function(wrap_pyfunction!(py_evo_tournament, m)?)?;
820 m.add_function(wrap_pyfunction!(py_lgssm_kalman_filter, m)?)?;
822 m.add_function(wrap_pyfunction!(py_inject_bitflip_u8, m)?)?;
824 m.add_function(wrap_pyfunction!(py_inject_stuck_at_0_u8, m)?)?;
825 m.add_function(wrap_pyfunction!(py_inject_stuck_at_1_u8, m)?)?;
826 m.add_function(wrap_pyfunction!(py_inject_dropout_u8, m)?)?;
827 m.add_function(wrap_pyfunction!(py_inject_gaussian_u8, m)?)?;
828 m.add_function(wrap_pyfunction!(py_kl_refine, m)?)?;
830 m.add_function(wrap_pyfunction!(py_ping_step, m)?)?;
832 m.add_function(wrap_pyfunction!(py_parallel_csr_spmv_add, m)?)?;
834 m.add_function(wrap_pyfunction!(py_parallel_csr_multi_spmv_add, m)?)?;
835 Ok(())
836}
837
838#[pyfunction]
841fn py_cordiv(
842 py: Python<'_>,
843 numerator: PyReadonlyArray1<'_, u8>,
844 denominator: PyReadonlyArray1<'_, u8>,
845) -> PyResult<Py<PyArray1<u8>>> {
846 let num = numerator.as_slice()?;
847 let den = denominator.as_slice()?;
848 let result = cordiv::cordiv(num, den);
849 Ok(result.into_pyarray(py).into())
850}
851
852#[pyfunction]
853fn py_adaptive_length(epsilon: f64, confidence: f64) -> usize {
854 cordiv::adaptive_length_hoeffding(epsilon, confidence)
855}
856
857#[pyfunction]
860fn py_prediction_error(
861 _py: Python<'_>,
862 predicted: PyReadonlyArray1<'_, u64>,
863 actual: PyReadonlyArray1<'_, u64>,
864 length: usize,
865) -> PyResult<f64> {
866 let pred = predicted
867 .as_slice()
868 .map_err(|e| PyValueError::new_err(format!("predicted array must be contiguous: {e}")))?;
869 let act = actual
870 .as_slice()
871 .map_err(|e| PyValueError::new_err(format!("actual array must be contiguous: {e}")))?;
872 Ok(predictive_coding::prediction_error_packed(
873 pred, act, length,
874 ))
875}
876
877#[pyfunction]
880fn py_predict_xor_ema(
881 _py: Python<'_>,
882 spikes: PyReadonlyArray1<'_, i8>,
883 n_channels: usize,
884 alpha: f64,
885 threshold: f64,
886) -> PyResult<(Py<PyArray1<i8>>, usize)> {
887 let data = spikes
888 .as_slice()
889 .map_err(|e| PyValueError::new_err(format!("spikes must be contiguous: {e}")))?;
890 let (errors, correct) =
891 predictive_coding::predict_and_xor_ema(data, n_channels, alpha, threshold);
892 Ok((PyArray1::from_vec(_py, errors).into(), correct))
893}
894
895#[pyfunction]
896fn py_recover_xor_ema(
897 _py: Python<'_>,
898 errors: PyReadonlyArray1<'_, i8>,
899 n_channels: usize,
900 alpha: f64,
901 threshold: f64,
902) -> PyResult<Py<PyArray1<i8>>> {
903 let data = errors
904 .as_slice()
905 .map_err(|e| PyValueError::new_err(format!("errors must be contiguous: {e}")))?;
906 let spikes = predictive_coding::xor_and_recover_ema(data, n_channels, alpha, threshold);
907 Ok(PyArray1::from_vec(_py, spikes).into())
908}
909
910#[pyfunction]
911fn py_predict_xor_lfsr(
912 _py: Python<'_>,
913 spikes: PyReadonlyArray1<'_, i8>,
914 n_channels: usize,
915 alpha_q8: i32,
916 seed: u16,
917) -> PyResult<(Py<PyArray1<i8>>, usize)> {
918 let data = spikes
919 .as_slice()
920 .map_err(|e| PyValueError::new_err(format!("spikes must be contiguous: {e}")))?;
921 let (errors, correct) =
922 predictive_coding::predict_and_xor_lfsr(data, n_channels, alpha_q8, seed);
923 Ok((PyArray1::from_vec(_py, errors).into(), correct))
924}
925
926#[pyfunction]
927fn py_recover_xor_lfsr(
928 _py: Python<'_>,
929 errors: PyReadonlyArray1<'_, i8>,
930 n_channels: usize,
931 alpha_q8: i32,
932 seed: u16,
933) -> PyResult<Py<PyArray1<i8>>> {
934 let data = errors
935 .as_slice()
936 .map_err(|e| PyValueError::new_err(format!("errors must be contiguous: {e}")))?;
937 let spikes = predictive_coding::xor_and_recover_lfsr(data, n_channels, alpha_q8, seed);
938 Ok(PyArray1::from_vec(_py, spikes).into())
939}
940
941#[pyfunction]
944fn py_phi_star(_py: Python<'_>, data: PyReadonlyArray2<'_, f64>, tau: usize) -> PyResult<f64> {
945 if !data.is_c_contiguous() {
946 return Err(PyValueError::new_err(
947 "py_phi_star requires C-contiguous array input",
948 ));
949 }
950 let shape = data.shape();
951 let n_channels = shape[0];
952 let n_timesteps = shape[1];
953 let flat = data.as_slice().map_err(|e| {
954 PyValueError::new_err(format!("py_phi_star requires C-contiguous array: {e}"))
955 })?;
956 let channels: Vec<Vec<f64>> = (0..n_channels)
957 .map(|i| flat[i * n_timesteps..(i + 1) * n_timesteps].to_vec())
958 .collect();
959 Ok(phi::phi_star(&channels, tau))
960}
961
962#[pyclass(
965 name = "CorticalColumnRust",
966 module = "sc_neurocore_engine.sc_neurocore_engine"
967)]
968pub struct PyCorticalColumn {
969 inner: cortical_column::CorticalColumnRust,
970}
971
972#[pymethods]
973impl PyCorticalColumn {
974 #[new]
975 fn new(n: usize, tau: f64, dt: f64, threshold: f64, w_exc: f64, w_inh: f64, seed: u64) -> Self {
976 Self {
977 inner: cortical_column::CorticalColumnRust::new(
978 n, tau, dt, threshold, w_exc, w_inh, seed,
979 ),
980 }
981 }
982
983 fn step<'py>(
984 &mut self,
985 py: Python<'py>,
986 thalamic_input: PyReadonlyArray1<'py, f64>,
987 ) -> PyResult<Py<PyDict>> {
988 let input = thalamic_input.as_slice()?;
989 let spikes = self.inner.step(input);
990 let dict = PyDict::new(py);
991 let names = ["l4", "l23_exc", "l23_inh", "l5", "l6"];
992 for (i, name) in names.iter().enumerate() {
993 dict.set_item(*name, spikes[i].clone().into_pyarray(py))?;
994 }
995 Ok(dict.into())
996 }
997
998 fn reset(&mut self) {
999 self.inner.reset();
1000 }
1001}
1002
1003#[pyclass(
1006 name = "RallDendriteRust",
1007 module = "sc_neurocore_engine.sc_neurocore_engine"
1008)]
1009pub struct PyRallDendrite {
1010 inner: rall_dendrite::RallDendriteRust,
1011}
1012
1013#[pymethods]
1014impl PyRallDendrite {
1015 #[new]
1016 fn new(n_branches: usize, branch_length: usize, tau: f64, coupling: f64, dt: f64) -> Self {
1017 Self {
1018 inner: rall_dendrite::RallDendriteRust::new(
1019 n_branches,
1020 branch_length,
1021 tau,
1022 coupling,
1023 dt,
1024 ),
1025 }
1026 }
1027
1028 fn step(&mut self, branch_inputs: PyReadonlyArray1<'_, f64>) -> PyResult<f64> {
1029 let inputs = branch_inputs.as_slice()?;
1030 Ok(self.inner.step(inputs))
1031 }
1032
1033 fn reset(&mut self) {
1034 self.inner.reset();
1035 }
1036
1037 #[getter]
1038 fn soma_v(&self) -> f64 {
1039 self.inner.soma_v
1040 }
1041}
1042
1043#[pyfunction]
1045fn simd_tier() -> &'static str {
1046 #[cfg(target_arch = "x86_64")]
1047 {
1048 if is_x86_feature_detected!("avx512vpopcntdq") {
1049 return "avx512-vpopcntdq";
1050 }
1051 if is_x86_feature_detected!("avx512bw") {
1052 return "avx512bw";
1053 }
1054 if is_x86_feature_detected!("avx512f") {
1055 return "avx512f";
1056 }
1057 if is_x86_feature_detected!("avx2") {
1058 return "avx2";
1059 }
1060 if is_x86_feature_detected!("popcnt") {
1061 return "popcnt";
1062 }
1063 }
1064 #[cfg(target_arch = "aarch64")]
1065 {
1066 return "neon";
1067 }
1068 "portable"
1069}
1070
1071#[pyfunction]
1076fn set_num_threads(n: usize) -> PyResult<()> {
1077 if n == 0 {
1078 return Ok(());
1079 }
1080 rayon::ThreadPoolBuilder::new()
1081 .num_threads(n)
1082 .build_global()
1083 .map_err(|e| PyValueError::new_err(format!("Cannot set thread pool: {e}")))
1084}
1085
1086#[pyfunction]
1087fn pack_bitstream(py: Python<'_>, bits: &Bound<'_, PyAny>) -> PyResult<Py<PyAny>> {
1088 if let Ok(rows) = bits.extract::<Vec<Vec<u8>>>() {
1089 let packed_rows: Vec<Vec<u64>> = rows.iter().map(|row| bitstream::pack(row).data).collect();
1090 return Ok(packed_rows
1091 .into_pyobject(py)
1092 .map_err(|e| PyValueError::new_err(e.to_string()))?
1093 .into_any()
1094 .unbind());
1095 }
1096
1097 let flat = bits
1098 .extract::<Vec<u8>>()
1099 .map_err(|_| PyValueError::new_err("Expected a 1-D or 2-D array of uint8 bits."))?;
1100 Ok(bitstream::pack(&flat)
1101 .data
1102 .into_pyobject(py)
1103 .map_err(|e| PyValueError::new_err(e.to_string()))?
1104 .into_any()
1105 .unbind())
1106}
1107
1108#[pyfunction]
1109#[pyo3(signature = (packed, original_length, original_shape=None))]
1110fn unpack_bitstream(
1111 py: Python<'_>,
1112 packed: &Bound<'_, PyAny>,
1113 original_length: usize,
1114 original_shape: Option<(usize, usize)>,
1115) -> PyResult<Py<PyAny>> {
1116 if let Ok(rows) = packed.extract::<Vec<Vec<u64>>>() {
1117 let batch = rows.len();
1118 let per_batch_len = if let Some((expected_batch, length)) = original_shape {
1119 if expected_batch != batch {
1120 return Err(PyValueError::new_err(format!(
1121 "original_shape batch {} does not match packed batch {}.",
1122 expected_batch, batch
1123 )));
1124 }
1125 length
1126 } else {
1127 original_length.checked_div(batch).unwrap_or(0)
1128 };
1129
1130 let unpacked_rows: Vec<Vec<u8>> = rows
1131 .into_iter()
1132 .map(|row| {
1133 bitstream::unpack(&bitstream::BitStreamTensor::from_words(row, per_batch_len))
1134 })
1135 .collect();
1136 return Ok(unpacked_rows
1137 .into_pyobject(py)
1138 .map_err(|e| PyValueError::new_err(e.to_string()))?
1139 .into_any()
1140 .unbind());
1141 }
1142
1143 let words = packed.extract::<Vec<u64>>().map_err(|_| {
1144 PyValueError::new_err("Expected packed uint64 words as 1-D or 2-D sequence.")
1145 })?;
1146 let tensor = bitstream::BitStreamTensor::from_words(words, original_length);
1147 Ok(bitstream::unpack(&tensor)
1148 .into_pyobject(py)
1149 .map_err(|e| PyValueError::new_err(e.to_string()))?
1150 .into_any()
1151 .unbind())
1152}
1153
1154#[pyfunction]
1155fn popcount(packed: &Bound<'_, PyAny>) -> PyResult<u64> {
1156 if let Ok(rows) = packed.extract::<Vec<Vec<u64>>>() {
1157 return Ok(rows
1158 .iter()
1159 .map(|row| simd::popcount_dispatch(row))
1160 .sum::<u64>());
1161 }
1162
1163 let words = packed.extract::<Vec<u64>>().map_err(|_| {
1164 PyValueError::new_err("Expected packed uint64 words as 1-D or 2-D sequence.")
1165 })?;
1166 Ok(simd::popcount_dispatch(&words))
1167}
1168
1169#[pyfunction]
1172fn pack_bitstream_numpy<'py>(
1173 py: Python<'py>,
1174 bits: PyReadonlyArray1<'py, u8>,
1175) -> PyResult<Bound<'py, PyArray1<u64>>> {
1176 let slice = bits
1177 .as_slice()
1178 .map_err(|e| PyValueError::new_err(format!("Cannot read numpy array: {e}")))?;
1179 let tensor = simd::pack_dispatch(slice);
1180 Ok(tensor.data.into_pyarray(py))
1181}
1182
1183#[pyfunction]
1185fn popcount_numpy(packed: PyReadonlyArray1<'_, u64>) -> PyResult<u64> {
1186 let words = packed
1187 .as_slice()
1188 .map_err(|e| PyValueError::new_err(format!("Cannot read numpy array: {e}")))?;
1189 Ok(simd::popcount_dispatch(words))
1190}
1191
1192#[pyfunction]
1194fn unpack_bitstream_numpy<'py>(
1195 py: Python<'py>,
1196 packed: PyReadonlyArray1<'py, u64>,
1197 original_length: usize,
1198) -> PyResult<Bound<'py, PyArray1<u8>>> {
1199 let words = packed
1200 .as_slice()
1201 .map_err(|e| PyValueError::new_err(format!("Cannot read numpy array: {e}")))?;
1202 let tensor = bitstream::BitStreamTensor::from_words(words.to_vec(), original_length);
1203 let bits = bitstream::unpack(&tensor);
1204 Ok(bits.into_pyarray(py))
1205}
1206
1207#[pyfunction]
1211#[pyo3(signature = (
1212 n_steps,
1213 leak_k,
1214 gain_k,
1215 i_t,
1216 noise_in=0,
1217 data_width=16,
1218 fraction=8,
1219 v_rest=0,
1220 v_reset=0,
1221 v_threshold=256,
1222 refractory_period=2
1223))]
1224#[allow(clippy::too_many_arguments)]
1225fn batch_lif_run<'py>(
1226 py: Python<'py>,
1227 n_steps: usize,
1228 leak_k: i16,
1229 gain_k: i16,
1230 i_t: i16,
1231 noise_in: i16,
1232 data_width: u32,
1233 fraction: u32,
1234 v_rest: i16,
1235 v_reset: i16,
1236 v_threshold: i16,
1237 refractory_period: i32,
1238) -> (Bound<'py, PyArray1<i32>>, Bound<'py, PyArray1<i16>>) {
1239 let mut lif = neuron::FixedPointLif::new(
1240 data_width,
1241 fraction,
1242 v_rest,
1243 v_reset,
1244 v_threshold,
1245 refractory_period,
1246 );
1247 let spikes_arr = PyArray1::<i32>::zeros(py, n_steps, false);
1248 let voltages_arr = PyArray1::<i16>::zeros(py, n_steps, false);
1249
1250 let spikes_slice = unsafe {
1252 spikes_arr
1253 .as_slice_mut()
1254 .expect("newly allocated spikes array must be contiguous")
1255 };
1256 let voltages_slice = unsafe {
1258 voltages_arr
1259 .as_slice_mut()
1260 .expect("newly allocated voltages array must be contiguous")
1261 };
1262
1263 for i in 0..n_steps {
1264 let (s, v) = lif.step(leak_k, gain_k, i_t, noise_in);
1265 spikes_slice[i] = s;
1266 voltages_slice[i] = v;
1267 }
1268
1269 (spikes_arr, voltages_arr)
1270}
1271
1272#[pyfunction]
1277#[pyo3(signature = (
1278 n_neurons,
1279 n_steps,
1280 leak_k,
1281 gain_k,
1282 currents,
1283 data_width=16,
1284 fraction=8,
1285 v_rest=0,
1286 v_reset=0,
1287 v_threshold=256,
1288 refractory_period=2
1289))]
1290#[allow(clippy::too_many_arguments)]
1291#[allow(clippy::type_complexity)]
1292fn batch_lif_run_multi<'py>(
1293 py: Python<'py>,
1294 n_neurons: usize,
1295 n_steps: usize,
1296 leak_k: i16,
1297 gain_k: i16,
1298 currents: PyReadonlyArray1<'py, i16>,
1299 data_width: u32,
1300 fraction: u32,
1301 v_rest: i16,
1302 v_reset: i16,
1303 v_threshold: i16,
1304 refractory_period: i32,
1305) -> PyResult<(Bound<'py, PyArray2<i32>>, Bound<'py, PyArray2<i16>>)> {
1306 use rayon::prelude::*;
1307
1308 let curr_slice = currents
1309 .as_slice()
1310 .map_err(|e| PyValueError::new_err(format!("Cannot read currents: {e}")))?;
1311 if curr_slice.len() != n_neurons {
1312 return Err(PyValueError::new_err(format!(
1313 "currents length {} does not match n_neurons {}.",
1314 curr_slice.len(),
1315 n_neurons
1316 )));
1317 }
1318
1319 let spikes_arr = PyArray2::<i32>::zeros(py, [n_neurons, n_steps], false);
1320 let voltages_arr = PyArray2::<i16>::zeros(py, [n_neurons, n_steps], false);
1321
1322 if n_neurons == 0 || n_steps == 0 {
1323 return Ok((spikes_arr, voltages_arr));
1324 }
1325
1326 let spikes_flat = unsafe {
1328 spikes_arr
1329 .as_slice_mut()
1330 .expect("newly allocated spikes array must be contiguous")
1331 };
1332 let voltages_flat = unsafe {
1334 voltages_arr
1335 .as_slice_mut()
1336 .expect("newly allocated voltages array must be contiguous")
1337 };
1338
1339 spikes_flat
1340 .par_chunks_mut(n_steps)
1341 .zip(voltages_flat.par_chunks_mut(n_steps))
1342 .zip(curr_slice.par_iter().copied())
1343 .for_each(|((spike_row, voltage_row), i_t)| {
1344 let mut lif = neuron::FixedPointLif::new(
1345 data_width,
1346 fraction,
1347 v_rest,
1348 v_reset,
1349 v_threshold,
1350 refractory_period,
1351 );
1352 for step in 0..n_steps {
1353 let (s, v) = lif.step(leak_k, gain_k, i_t, 0);
1354 spike_row[step] = s;
1355 voltage_row[step] = v;
1356 }
1357 });
1358
1359 Ok((spikes_arr, voltages_arr))
1360}
1361
1362#[pyfunction]
1364#[pyo3(signature = (
1365 leak_k,
1366 gain_k,
1367 currents,
1368 noises=None,
1369 data_width=16,
1370 fraction=8,
1371 v_rest=0,
1372 v_reset=0,
1373 v_threshold=256,
1374 refractory_period=2
1375))]
1376#[allow(clippy::too_many_arguments)]
1377#[allow(clippy::type_complexity)]
1378fn batch_lif_run_varying<'py>(
1379 py: Python<'py>,
1380 leak_k: i16,
1381 gain_k: i16,
1382 currents: PyReadonlyArray1<'py, i16>,
1383 noises: Option<PyReadonlyArray1<'py, i16>>,
1384 data_width: u32,
1385 fraction: u32,
1386 v_rest: i16,
1387 v_reset: i16,
1388 v_threshold: i16,
1389 refractory_period: i32,
1390) -> PyResult<(Bound<'py, PyArray1<i32>>, Bound<'py, PyArray1<i16>>)> {
1391 let curr_slice = currents
1392 .as_slice()
1393 .map_err(|e| PyValueError::new_err(format!("Cannot read currents: {e}")))?;
1394 let noise_slice: Option<&[i16]> = match noises.as_ref() {
1395 Some(n) => Some(
1396 n.as_slice()
1397 .map_err(|e| PyValueError::new_err(format!("Cannot read noises: {e}")))?,
1398 ),
1399 None => None,
1400 };
1401
1402 let n_steps = curr_slice.len();
1403 if let Some(ns) = noise_slice {
1404 if ns.len() != n_steps {
1405 return Err(PyValueError::new_err(format!(
1406 "noises length {} does not match currents length {}.",
1407 ns.len(),
1408 n_steps
1409 )));
1410 }
1411 }
1412
1413 let mut lif = neuron::FixedPointLif::new(
1414 data_width,
1415 fraction,
1416 v_rest,
1417 v_reset,
1418 v_threshold,
1419 refractory_period,
1420 );
1421 let spikes_arr = PyArray1::<i32>::zeros(py, n_steps, false);
1422 let voltages_arr = PyArray1::<i16>::zeros(py, n_steps, false);
1423
1424 let spikes_slice = unsafe {
1426 spikes_arr
1427 .as_slice_mut()
1428 .expect("newly allocated spikes array must be contiguous")
1429 };
1430 let voltages_slice = unsafe {
1432 voltages_arr
1433 .as_slice_mut()
1434 .expect("newly allocated voltages array must be contiguous")
1435 };
1436
1437 for i in 0..n_steps {
1438 let noise_in = noise_slice.map_or(0, |ns| ns[i]);
1439 let (s, v) = lif.step(leak_k, gain_k, curr_slice[i], noise_in);
1440 spikes_slice[i] = s;
1441 voltages_slice[i] = v;
1442 }
1443
1444 Ok((spikes_arr, voltages_arr))
1445}
1446
1447#[pyfunction]
1451#[pyo3(signature = (probs, length=1024, seed=0xACE1))]
1452fn batch_encode<'py>(
1453 _py: Python<'py>,
1454 probs: PyReadonlyArray1<'py, f64>,
1455 length: usize,
1456 seed: u64,
1457) -> PyResult<Vec<Vec<u64>>> {
1458 let prob_slice = probs
1459 .as_slice()
1460 .map_err(|e| PyValueError::new_err(format!("Cannot read probs: {e}")))?;
1461 let words = length.div_ceil(64);
1462
1463 use rand::SeedableRng;
1464 let mut rng = rand_chacha::ChaCha8Rng::seed_from_u64(seed);
1465
1466 let packed: Vec<Vec<u64>> = prob_slice
1467 .iter()
1468 .map(|&p| {
1469 let mut data = bitstream::bernoulli_packed(p, length, &mut rng);
1470 data.resize(words, 0);
1471 data
1472 })
1473 .collect();
1474
1475 Ok(packed)
1476}
1477
1478#[pyfunction]
1482#[pyo3(signature = (probs, length=1024, seed=0xACE1))]
1483fn batch_encode_numpy<'py>(
1484 py: Python<'py>,
1485 probs: PyReadonlyArray1<'py, f64>,
1486 length: usize,
1487 seed: u64,
1488) -> PyResult<Bound<'py, PyArray2<u64>>> {
1489 use rayon::prelude::*;
1490
1491 let prob_slice = probs
1492 .as_slice()
1493 .map_err(|e| PyValueError::new_err(format!("Cannot read probs: {e}")))?;
1494 let words = length.div_ceil(64);
1495 let n_probs = prob_slice.len();
1496
1497 let rows: Vec<Vec<u64>> = prob_slice
1498 .par_iter()
1499 .enumerate()
1500 .map(|(idx, &p)| {
1501 use rand::SeedableRng;
1502
1503 let prob_seed = seed.wrapping_add(idx as u64);
1504 let mut rng = rand_xoshiro::Xoshiro256PlusPlus::seed_from_u64(prob_seed);
1505 let mut row = bitstream::bernoulli_packed_simd(p, length, &mut rng);
1506 row.resize(words, 0);
1507 row
1508 })
1509 .collect();
1510
1511 let mut flat = Vec::with_capacity(n_probs * words);
1512 for row in &rows {
1513 flat.extend_from_slice(row);
1514 }
1515
1516 let arr = ndarray::Array2::from_shape_vec((n_probs, words), flat)
1517 .map_err(|e| PyValueError::new_err(format!("Shape construction failed: {e}")))?;
1518 Ok(arr.into_pyarray(py))
1519}
1520
1521#[pyclass(module = "sc_neurocore_engine.sc_neurocore_engine")]
1522pub struct Lfsr16 {
1523 inner: encoder::Lfsr16,
1524 seed_init: u16,
1525}
1526
1527#[pymethods]
1528impl Lfsr16 {
1529 #[new]
1530 #[pyo3(signature = (seed=0xACE1))]
1531 fn new(seed: u16) -> PyResult<Self> {
1532 if seed == 0 {
1533 return Err(PyValueError::new_err("LFSR seed must be non-zero."));
1534 }
1535 Ok(Self {
1536 inner: encoder::Lfsr16::new(seed),
1537 seed_init: seed,
1538 })
1539 }
1540
1541 fn step(&mut self) -> u16 {
1542 self.inner.step()
1543 }
1544
1545 #[getter]
1546 fn reg(&self) -> u16 {
1547 self.inner.reg
1548 }
1549
1550 #[getter]
1551 fn width(&self) -> u32 {
1552 self.inner.width
1553 }
1554
1555 #[pyo3(signature = (seed=None))]
1556 fn reset(&mut self, seed: Option<u16>) -> PyResult<()> {
1557 let next = seed.unwrap_or(self.seed_init);
1558 if next == 0 {
1559 return Err(PyValueError::new_err("LFSR seed must be non-zero."));
1560 }
1561 self.inner = encoder::Lfsr16::new(next);
1562 self.seed_init = next;
1563 Ok(())
1564 }
1565}
1566
1567#[pyclass(module = "sc_neurocore_engine.sc_neurocore_engine")]
1568pub struct BitstreamEncoder {
1569 inner: encoder::BitstreamEncoder,
1570 seed_init: u16,
1571}
1572
1573#[pymethods]
1574impl BitstreamEncoder {
1575 #[new]
1576 #[pyo3(signature = (data_width=16, seed=0xACE1))]
1577 fn new(data_width: u32, seed: u16) -> PyResult<Self> {
1578 if seed == 0 {
1579 return Err(PyValueError::new_err("LFSR seed must be non-zero."));
1580 }
1581 Ok(Self {
1582 inner: encoder::BitstreamEncoder::new(data_width, seed),
1583 seed_init: seed,
1584 })
1585 }
1586
1587 fn step(&mut self, x_value: u16) -> u8 {
1588 self.inner.step(x_value)
1589 }
1590
1591 #[getter]
1592 fn data_width(&self) -> u32 {
1593 self.inner.data_width
1594 }
1595
1596 #[getter]
1597 fn reg(&self) -> u16 {
1598 self.inner.lfsr.reg
1599 }
1600
1601 #[pyo3(signature = (seed=None))]
1602 fn reset(&mut self, seed: Option<u16>) -> PyResult<()> {
1603 let next = seed.unwrap_or(self.seed_init);
1604 if next == 0 {
1605 return Err(PyValueError::new_err("LFSR seed must be non-zero."));
1606 }
1607 self.inner.reset(Some(next));
1608 self.seed_init = next;
1609 Ok(())
1610 }
1611}
1612
1613#[pyclass(module = "sc_neurocore_engine.sc_neurocore_engine")]
1614pub struct FixedPointLif {
1615 inner: neuron::FixedPointLif,
1616}
1617
1618#[pymethods]
1619impl FixedPointLif {
1620 #[new]
1621 #[pyo3(signature = (
1622 data_width=16,
1623 fraction=8,
1624 v_rest=0,
1625 v_reset=0,
1626 v_threshold=256,
1627 refractory_period=2
1628 ))]
1629 fn new(
1630 data_width: u32,
1631 fraction: u32,
1632 v_rest: i16,
1633 v_reset: i16,
1634 v_threshold: i16,
1635 refractory_period: i32,
1636 ) -> Self {
1637 Self {
1638 inner: neuron::FixedPointLif::new(
1639 data_width,
1640 fraction,
1641 v_rest,
1642 v_reset,
1643 v_threshold,
1644 refractory_period,
1645 ),
1646 }
1647 }
1648
1649 #[pyo3(signature = (leak_k, gain_k, i_t, noise_in=0))]
1650 fn step(&mut self, leak_k: i16, gain_k: i16, i_t: i16, noise_in: i16) -> (i32, i16) {
1651 self.inner.step(leak_k, gain_k, i_t, noise_in)
1652 }
1653
1654 fn reset(&mut self) {
1655 self.inner.reset();
1656 }
1657
1658 fn reset_state(&mut self) {
1659 self.reset();
1660 }
1661
1662 fn get_state(&self, py: Python<'_>) -> PyResult<Py<PyAny>> {
1663 let dict = PyDict::new(py);
1664 dict.set_item("v", self.inner.v)?;
1665 dict.set_item("refractory_counter", self.inner.refractory_counter)?;
1666 Ok(dict.into_any().unbind())
1667 }
1668}
1669
1670#[pyclass(
1673 name = "Izhikevich",
1674 module = "sc_neurocore_engine.sc_neurocore_engine"
1675)]
1676pub struct PyIzhikevich {
1677 inner: neuron::Izhikevich,
1678}
1679
1680#[pymethods]
1681impl PyIzhikevich {
1682 #[new]
1683 #[pyo3(signature = (a=0.02, b=0.2, c=-65.0, d=8.0, dt=1.0))]
1684 fn new(a: f64, b: f64, c: f64, d: f64, dt: f64) -> Self {
1685 Self {
1686 inner: neuron::Izhikevich::new(a, b, c, d, dt),
1687 }
1688 }
1689
1690 fn step(&mut self, current: f64) -> i32 {
1691 self.inner.step(current)
1692 }
1693
1694 fn reset(&mut self) {
1695 self.inner.reset();
1696 }
1697
1698 fn reset_state(&mut self) {
1699 self.reset();
1700 }
1701
1702 fn get_state(&self, py: Python<'_>) -> PyResult<Py<PyAny>> {
1703 let dict = PyDict::new(py);
1704 dict.set_item("v", self.inner.v)?;
1705 dict.set_item("u", self.inner.u)?;
1706 Ok(dict.into_any().unbind())
1707 }
1708}
1709
1710#[pyclass(
1713 name = "BitstreamAverager",
1714 module = "sc_neurocore_engine.sc_neurocore_engine"
1715)]
1716pub struct PyBitstreamAverager {
1717 inner: neuron::BitstreamAverager,
1718}
1719
1720#[pymethods]
1721impl PyBitstreamAverager {
1722 #[new]
1723 #[pyo3(signature = (window=1024))]
1724 fn new(window: usize) -> Self {
1725 Self {
1726 inner: neuron::BitstreamAverager::new(window),
1727 }
1728 }
1729
1730 fn push(&mut self, bit: u8) {
1731 self.inner.push(bit);
1732 }
1733
1734 fn estimate(&self) -> f64 {
1735 self.inner.estimate()
1736 }
1737
1738 fn reset(&mut self) {
1739 self.inner.reset();
1740 }
1741
1742 #[getter]
1743 fn window(&self) -> usize {
1744 self.inner.window()
1745 }
1746}
1747
1748#[pyclass(
1751 name = "AdExNeuron",
1752 module = "sc_neurocore_engine.sc_neurocore_engine"
1753)]
1754#[derive(Clone)]
1755pub struct PyAdExNeuron {
1756 inner: neuron::AdExNeuron,
1757}
1758
1759#[pymethods]
1760impl PyAdExNeuron {
1761 #[new]
1762 fn new() -> Self {
1763 Self {
1764 inner: neuron::AdExNeuron::new(),
1765 }
1766 }
1767 fn step(&mut self, current: f64) -> i32 {
1768 self.inner.step(current)
1769 }
1770 fn reset(&mut self) {
1771 self.inner.reset();
1772 }
1773 fn get_state(&self, py: Python<'_>) -> PyResult<Py<PyAny>> {
1774 let d = PyDict::new(py);
1775 d.set_item("v", self.inner.v)?;
1776 d.set_item("w", self.inner.w)?;
1777 Ok(d.into_any().unbind())
1778 }
1779}
1780
1781#[pyclass(
1782 name = "ExpIFNeuron",
1783 module = "sc_neurocore_engine.sc_neurocore_engine"
1784)]
1785#[derive(Clone)]
1786pub struct PyExpIFNeuron {
1787 inner: neuron::ExpIfNeuron,
1788}
1789
1790#[pymethods]
1791impl PyExpIFNeuron {
1792 #[new]
1793 fn new() -> Self {
1794 Self {
1795 inner: neuron::ExpIfNeuron::new(),
1796 }
1797 }
1798 fn step(&mut self, current: f64) -> i32 {
1799 self.inner.step(current)
1800 }
1801 fn reset(&mut self) {
1802 self.inner.reset();
1803 }
1804 fn get_state(&self, py: Python<'_>) -> PyResult<Py<PyAny>> {
1805 let d = PyDict::new(py);
1806 d.set_item("v", self.inner.v)?;
1807 Ok(d.into_any().unbind())
1808 }
1809}
1810
1811#[pyclass(
1812 name = "LapicqueNeuron",
1813 module = "sc_neurocore_engine.sc_neurocore_engine"
1814)]
1815#[derive(Clone)]
1816pub struct PyLapicqueNeuron {
1817 inner: neuron::LapicqueNeuron,
1818}
1819
1820#[pymethods]
1821impl PyLapicqueNeuron {
1822 #[new]
1823 #[pyo3(signature = (tau=20.0, resistance=1.0, threshold=1.0, dt=1.0))]
1824 fn new(tau: f64, resistance: f64, threshold: f64, dt: f64) -> Self {
1825 Self {
1826 inner: neuron::LapicqueNeuron::new(tau, resistance, threshold, dt),
1827 }
1828 }
1829 fn step(&mut self, current: f64) -> i32 {
1830 self.inner.step(current)
1831 }
1832 fn reset(&mut self) {
1833 self.inner.reset();
1834 }
1835 fn get_state(&self, py: Python<'_>) -> PyResult<Py<PyAny>> {
1836 let d = PyDict::new(py);
1837 d.set_item("v", self.inner.v)?;
1838 Ok(d.into_any().unbind())
1839 }
1840}
1841
1842#[pyclass(module = "sc_neurocore_engine.sc_neurocore_engine")]
1845pub struct DenseLayer {
1846 inner: layer::DenseLayer,
1847}
1848
1849#[pymethods]
1850impl DenseLayer {
1851 #[new]
1852 #[pyo3(signature = (n_inputs, n_neurons, length=1024, seed=24301))]
1853 fn new(n_inputs: usize, n_neurons: usize, length: usize, seed: u64) -> Self {
1854 Self {
1855 inner: layer::DenseLayer::new(n_inputs, n_neurons, length, seed),
1856 }
1857 }
1858
1859 fn get_weights(&self) -> Vec<Vec<f64>> {
1860 self.inner.get_weights()
1861 }
1862
1863 fn set_weights(&mut self, weights: Vec<Vec<f64>>) -> PyResult<()> {
1864 self.inner
1865 .set_weights(weights)
1866 .map_err(PyValueError::new_err)
1867 }
1868
1869 fn refresh_packed_weights(&mut self) {
1870 self.inner.refresh_packed_weights();
1871 }
1872
1873 #[pyo3(signature = (input_values, seed=44257))]
1874 fn forward(&self, input_values: Vec<f64>, seed: u64) -> PyResult<Vec<f64>> {
1875 self.inner
1876 .forward(&input_values, seed)
1877 .map_err(PyValueError::new_err)
1878 }
1879
1880 #[pyo3(signature = (input_values, seed=44257))]
1881 fn forward_fast(&self, input_values: Vec<f64>, seed: u64) -> PyResult<Vec<f64>> {
1882 self.inner
1883 .forward_fast(&input_values, seed)
1884 .map_err(PyValueError::new_err)
1885 }
1886
1887 #[pyo3(signature = (input_values, seed=44257))]
1891 fn forward_numpy<'py>(
1892 &self,
1893 py: Python<'py>,
1894 input_values: PyReadonlyArray1<'py, f64>,
1895 seed: u64,
1896 ) -> PyResult<Bound<'py, PyArray1<f64>>> {
1897 let slice = input_values
1898 .as_slice()
1899 .map_err(|e| PyValueError::new_err(format!("Cannot read input array: {e}")))?;
1900 let out = self
1901 .inner
1902 .forward_numpy_inner(slice, seed)
1903 .map_err(PyValueError::new_err)?;
1904 Ok(out.into_pyarray(py))
1905 }
1906
1907 #[pyo3(signature = (inputs, seed=44257))]
1912 fn forward_batch_numpy<'py>(
1913 &self,
1914 py: Python<'py>,
1915 inputs: PyReadonlyArray2<'py, f64>,
1916 seed: u64,
1917 ) -> PyResult<Bound<'py, PyArray2<f64>>> {
1918 let shape = inputs.shape();
1919 let n_samples = shape[0];
1920 let n_inputs = shape[1];
1921 if n_inputs != self.inner.n_inputs {
1922 return Err(PyValueError::new_err(format!(
1923 "Expected {} input features, got {}.",
1924 self.inner.n_inputs, n_inputs
1925 )));
1926 }
1927
1928 let flat_inputs = inputs
1929 .as_slice()
1930 .map_err(|e| PyValueError::new_err(format!("Array not contiguous: {e}")))?;
1931 let out = PyArray2::<f64>::zeros(py, [n_samples, self.inner.n_neurons], false);
1932 let out_slice = unsafe {
1934 out.as_slice_mut()
1935 .expect("newly allocated output array must be contiguous")
1936 };
1937
1938 self.inner
1939 .forward_batch_into(flat_inputs, n_samples, seed, out_slice)
1940 .map_err(PyValueError::new_err)?;
1941 Ok(out)
1942 }
1943
1944 fn forward_prepacked(&self, packed_inputs: &Bound<'_, PyAny>) -> PyResult<Vec<f64>> {
1950 if let Ok(arr) = packed_inputs.extract::<PyReadonlyArray2<u64>>() {
1951 let view = arr.as_array();
1952 let rows: Vec<Vec<u64>> = (0..view.nrows()).map(|i| view.row(i).to_vec()).collect();
1953 return self
1954 .inner
1955 .forward_prepacked(&rows)
1956 .map_err(PyValueError::new_err);
1957 }
1958
1959 let rows = packed_inputs.extract::<Vec<Vec<u64>>>().map_err(|_| {
1960 PyValueError::new_err(
1961 "packed_inputs must be a 2-D numpy uint64 array or list[list[int]].",
1962 )
1963 })?;
1964 self.inner
1965 .forward_prepacked(&rows)
1966 .map_err(PyValueError::new_err)
1967 }
1968
1969 #[pyo3(signature = (packed_inputs,))]
1974 fn forward_prepacked_numpy<'py>(
1975 &self,
1976 py: Python<'py>,
1977 packed_inputs: PyReadonlyArray2<'py, u64>,
1978 ) -> PyResult<Bound<'py, PyArray1<f64>>> {
1979 let shape = packed_inputs.shape();
1980 let n_inputs = shape[0];
1981 let words = shape[1];
1982 let flat = packed_inputs
1983 .as_slice()
1984 .map_err(|e| PyValueError::new_err(format!("Array not contiguous: {e}")))?;
1985 let out = self
1986 .inner
1987 .forward_prepacked_2d(flat, n_inputs, words)
1988 .map_err(PyValueError::new_err)?;
1989 Ok(out.into_pyarray(py))
1990 }
1991}
1992
1993fn parse_surrogate(name: &str, k: Option<f32>) -> PyResult<grad::SurrogateType> {
1994 let normalized = name.to_ascii_lowercase().replace('-', "_");
1995 match normalized.as_str() {
1996 "fast_sigmoid" => Ok(grad::SurrogateType::FastSigmoid {
1997 k: k.unwrap_or(25.0),
1998 }),
1999 "superspike" | "super_spike" => Ok(grad::SurrogateType::SuperSpike {
2000 k: k.unwrap_or(100.0),
2001 }),
2002 "arctan" | "arc_tan" => Ok(grad::SurrogateType::ArcTan { k: k.unwrap_or(10.0) }),
2003 "straightthrough" | "straight_through" | "ste" => Ok(grad::SurrogateType::StraightThrough),
2004 _ => Err(PyValueError::new_err(format!(
2005 "Unknown surrogate '{}'. Use one of: fast_sigmoid, superspike, arctan, straight_through.",
2006 name
2007 ))),
2008 }
2009}
2010
2011fn extract_matrix_f64(data: &Bound<'_, PyAny>, name: &str) -> PyResult<(Vec<f64>, usize, usize)> {
2012 if let Ok(rows) = data.extract::<Vec<Vec<f64>>>() {
2013 if rows.is_empty() {
2014 return Err(PyValueError::new_err(format!(
2015 "{} must not be an empty matrix.",
2016 name
2017 )));
2018 }
2019 let row_count = rows.len();
2020 let cols = rows[0].len();
2021 if cols == 0 {
2022 return Err(PyValueError::new_err(format!(
2023 "{} must not have zero columns.",
2024 name
2025 )));
2026 }
2027 if rows.iter().any(|r| r.len() != cols) {
2028 return Err(PyValueError::new_err(format!(
2029 "{} must be a rectangular matrix.",
2030 name
2031 )));
2032 }
2033 let out = rows.into_iter().flatten().collect::<Vec<f64>>();
2034 return Ok((out, row_count, cols));
2035 }
2036
2037 if let Ok(flat) = data.extract::<Vec<f64>>() {
2038 if flat.is_empty() {
2039 return Err(PyValueError::new_err(format!(
2040 "{} must not be an empty vector.",
2041 name
2042 )));
2043 }
2044 let cols = flat.len();
2045 return Ok((flat, 1, cols));
2046 }
2047
2048 Err(PyValueError::new_err(format!(
2049 "{} must be a 1-D or 2-D float array.",
2050 name
2051 )))
2052}
2053
2054fn reshape_flat_to_rows(flat: Vec<f64>, rows: usize, cols: usize) -> Vec<Vec<f64>> {
2055 let mut out = Vec::with_capacity(rows);
2056 for i in 0..rows {
2057 out.push(flat[i * cols..(i + 1) * cols].to_vec());
2058 }
2059 out
2060}
2061
2062#[pyclass(
2063 name = "SurrogateLif",
2064 module = "sc_neurocore_engine.sc_neurocore_engine"
2065)]
2066pub struct PySurrogateLif {
2067 inner: grad::SurrogateLif,
2068}
2069
2070#[pymethods]
2071impl PySurrogateLif {
2072 #[new]
2073 #[pyo3(signature = (
2074 data_width=16,
2075 fraction=8,
2076 v_rest=0,
2077 v_reset=0,
2078 v_threshold=256,
2079 refractory_period=2,
2080 surrogate="fast_sigmoid",
2081 k=None
2082 ))]
2083 #[allow(clippy::too_many_arguments)]
2084 fn new(
2085 data_width: u32,
2086 fraction: u32,
2087 v_rest: i16,
2088 v_reset: i16,
2089 v_threshold: i16,
2090 refractory_period: i32,
2091 surrogate: &str,
2092 k: Option<f32>,
2093 ) -> PyResult<Self> {
2094 let surrogate = parse_surrogate(surrogate, k)?;
2095 Ok(Self {
2096 inner: grad::SurrogateLif::new(
2097 data_width,
2098 fraction,
2099 v_rest,
2100 v_reset,
2101 v_threshold,
2102 refractory_period,
2103 surrogate,
2104 ),
2105 })
2106 }
2107
2108 #[pyo3(signature = (leak_k, gain_k, i_t, noise_in=0))]
2109 fn forward(&mut self, leak_k: i16, gain_k: i16, i_t: i16, noise_in: i16) -> (i32, i16) {
2110 self.inner.forward(leak_k, gain_k, i_t, noise_in)
2111 }
2112
2113 fn backward(&mut self, grad_output: f32) -> PyResult<f32> {
2114 self.inner
2115 .backward(grad_output)
2116 .map_err(PyValueError::new_err)
2117 }
2118
2119 fn clear_trace(&mut self) {
2120 self.inner.clear_trace();
2121 }
2122
2123 fn reset(&mut self) {
2124 self.inner.reset();
2125 }
2126
2127 fn trace_len(&self) -> usize {
2128 self.inner.trace_len()
2129 }
2130}
2131
2132#[pyclass(
2133 name = "DifferentiableDenseLayer",
2134 module = "sc_neurocore_engine.sc_neurocore_engine"
2135)]
2136pub struct PyDifferentiableDenseLayer {
2137 inner: grad::DifferentiableDenseLayer,
2138}
2139
2140#[pymethods]
2141impl PyDifferentiableDenseLayer {
2142 #[new]
2143 #[pyo3(signature = (
2144 n_inputs,
2145 n_neurons,
2146 length=1024,
2147 seed=24301,
2148 surrogate="fast_sigmoid",
2149 k=None
2150 ))]
2151 fn new(
2152 n_inputs: usize,
2153 n_neurons: usize,
2154 length: usize,
2155 seed: u64,
2156 surrogate: &str,
2157 k: Option<f32>,
2158 ) -> PyResult<Self> {
2159 let surrogate = parse_surrogate(surrogate, k)?;
2160 Ok(Self {
2161 inner: grad::DifferentiableDenseLayer::new(
2162 n_inputs, n_neurons, length, seed, surrogate,
2163 ),
2164 })
2165 }
2166
2167 fn get_weights(&self) -> Vec<Vec<f64>> {
2168 self.inner.layer.get_weights()
2169 }
2170
2171 #[pyo3(signature = (input_values, seed=44257))]
2172 fn forward(&mut self, input_values: Vec<f64>, seed: u64) -> PyResult<Vec<f64>> {
2173 self.inner
2174 .forward(&input_values, seed)
2175 .map_err(PyValueError::new_err)
2176 }
2177
2178 fn backward(&self, grad_output: Vec<f64>) -> PyResult<(Vec<f64>, Vec<Vec<f64>>)> {
2179 self.inner
2180 .backward(&grad_output)
2181 .map_err(PyValueError::new_err)
2182 }
2183
2184 fn update_weights(&mut self, weight_grads: Vec<Vec<f64>>, lr: f64) -> PyResult<()> {
2185 if weight_grads.len() != self.inner.layer.n_neurons {
2186 return Err(PyValueError::new_err(format!(
2187 "Expected {} grad rows, got {}.",
2188 self.inner.layer.n_neurons,
2189 weight_grads.len()
2190 )));
2191 }
2192 if weight_grads
2193 .iter()
2194 .any(|row| row.len() != self.inner.layer.n_inputs)
2195 {
2196 return Err(PyValueError::new_err(format!(
2197 "Expected each grad row to have length {}.",
2198 self.inner.layer.n_inputs
2199 )));
2200 }
2201 self.inner.update_weights(&weight_grads, lr);
2202 Ok(())
2203 }
2204
2205 fn clear_cache(&mut self) {
2206 self.inner.clear_cache();
2207 }
2208}
2209
2210#[pyclass(
2211 name = "StochasticAttention",
2212 module = "sc_neurocore_engine.sc_neurocore_engine"
2213)]
2214pub struct PyStochasticAttention {
2215 inner: attention::StochasticAttention,
2216}
2217
2218#[pymethods]
2219impl PyStochasticAttention {
2220 #[new]
2221 #[pyo3(signature = (dim_k, temperature=None))]
2222 fn new(dim_k: usize, temperature: Option<f64>) -> Self {
2223 Self {
2224 inner: match temperature {
2225 Some(t) => attention::StochasticAttention::with_temperature(dim_k, t),
2226 None => attention::StochasticAttention::new(dim_k),
2227 },
2228 }
2229 }
2230
2231 fn forward_softmax(
2232 &self,
2233 q: &Bound<'_, PyAny>,
2234 k: &Bound<'_, PyAny>,
2235 v: &Bound<'_, PyAny>,
2236 ) -> PyResult<Vec<Vec<f64>>> {
2237 let (q_data, q_rows, q_cols) = extract_matrix_f64(q, "Q")?;
2238 let (k_data, k_rows, k_cols) = extract_matrix_f64(k, "K")?;
2239 let (v_data, v_rows, v_cols) = extract_matrix_f64(v, "V")?;
2240
2241 let out = self
2242 .inner
2243 .forward_softmax(
2244 &q_data, q_rows, q_cols, &k_data, k_rows, k_cols, &v_data, v_rows, v_cols,
2245 )
2246 .map_err(PyValueError::new_err)?;
2247
2248 Ok(reshape_flat_to_rows(out, q_rows, v_cols))
2249 }
2250
2251 #[pyo3(signature = (q, k, v, n_heads))]
2252 fn forward_multihead_softmax(
2253 &self,
2254 q: &Bound<'_, PyAny>,
2255 k: &Bound<'_, PyAny>,
2256 v: &Bound<'_, PyAny>,
2257 n_heads: usize,
2258 ) -> PyResult<Vec<Vec<f64>>> {
2259 let (q_data, q_rows, q_cols) = extract_matrix_f64(q, "Q")?;
2260 let (k_data, k_rows, k_cols) = extract_matrix_f64(k, "K")?;
2261 let (v_data, v_rows, v_cols) = extract_matrix_f64(v, "V")?;
2262
2263 let out = self
2264 .inner
2265 .forward_multihead_softmax(
2266 &q_data, q_rows, q_cols, &k_data, k_rows, k_cols, &v_data, v_rows, v_cols, n_heads,
2267 )
2268 .map_err(PyValueError::new_err)?;
2269
2270 let out_cols = v_cols;
2271 Ok(reshape_flat_to_rows(out, q_rows, out_cols))
2272 }
2273
2274 fn forward(
2275 &self,
2276 q: &Bound<'_, PyAny>,
2277 k: &Bound<'_, PyAny>,
2278 v: &Bound<'_, PyAny>,
2279 ) -> PyResult<Vec<Vec<f64>>> {
2280 let (q_data, q_rows, q_cols) = extract_matrix_f64(q, "Q")?;
2281 let (k_data, k_rows, k_cols) = extract_matrix_f64(k, "K")?;
2282 let (v_data, v_rows, v_cols) = extract_matrix_f64(v, "V")?;
2283
2284 let out = self
2285 .inner
2286 .forward(
2287 &q_data, q_rows, q_cols, &k_data, k_rows, k_cols, &v_data, v_rows, v_cols,
2288 )
2289 .map_err(PyValueError::new_err)?;
2290
2291 Ok(reshape_flat_to_rows(out, q_rows, v_cols))
2292 }
2293
2294 #[pyo3(signature = (q, k, v, length=1024, seed=44257))]
2295 fn forward_sc(
2296 &self,
2297 q: &Bound<'_, PyAny>,
2298 k: &Bound<'_, PyAny>,
2299 v: &Bound<'_, PyAny>,
2300 length: usize,
2301 seed: u64,
2302 ) -> PyResult<Vec<Vec<f64>>> {
2303 let (q_data, q_rows, q_cols) = extract_matrix_f64(q, "Q")?;
2304 let (k_data, k_rows, k_cols) = extract_matrix_f64(k, "K")?;
2305 let (v_data, v_rows, v_cols) = extract_matrix_f64(v, "V")?;
2306
2307 let out = self
2308 .inner
2309 .forward_sc(
2310 &q_data, q_rows, q_cols, &k_data, k_rows, k_cols, &v_data, v_rows, v_cols, length,
2311 seed,
2312 )
2313 .map_err(PyValueError::new_err)?;
2314
2315 Ok(reshape_flat_to_rows(out, q_rows, v_cols))
2316 }
2317
2318 #[pyo3(signature = (q, k, v, n_heads))]
2319 fn forward_multihead(
2320 &self,
2321 q: &Bound<'_, PyAny>,
2322 k: &Bound<'_, PyAny>,
2323 v: &Bound<'_, PyAny>,
2324 n_heads: usize,
2325 ) -> PyResult<Vec<Vec<f64>>> {
2326 let (q_data, q_rows, q_cols) = extract_matrix_f64(q, "Q")?;
2327 let (k_data, k_rows, k_cols) = extract_matrix_f64(k, "K")?;
2328 let (v_data, v_rows, v_cols) = extract_matrix_f64(v, "V")?;
2329
2330 let out = self
2331 .inner
2332 .forward_multihead(
2333 &q_data, q_rows, q_cols, &k_data, k_rows, k_cols, &v_data, v_rows, v_cols, n_heads,
2334 )
2335 .map_err(PyValueError::new_err)?;
2336
2337 let out_cols = v_cols;
2338 Ok(reshape_flat_to_rows(out, q_rows, out_cols))
2339 }
2340}
2341
2342#[pyclass(
2343 name = "StochasticGraphLayer",
2344 module = "sc_neurocore_engine.sc_neurocore_engine"
2345)]
2346pub struct PyStochasticGraphLayer {
2347 inner: graph::StochasticGraphLayer,
2348}
2349
2350#[pymethods]
2351impl PyStochasticGraphLayer {
2352 #[new]
2353 #[pyo3(signature = (adj_matrix, n_features, seed=42))]
2354 fn new(adj_matrix: &Bound<'_, PyAny>, n_features: usize, seed: u64) -> PyResult<Self> {
2355 let (adj_flat, n_rows, n_cols) = extract_matrix_f64(adj_matrix, "adj_matrix")?;
2356 if n_rows != n_cols {
2357 return Err(PyValueError::new_err(format!(
2358 "adj_matrix must be square, got {}x{}.",
2359 n_rows, n_cols
2360 )));
2361 }
2362 Ok(Self {
2363 inner: graph::StochasticGraphLayer::new(adj_flat, n_rows, n_features, seed),
2364 })
2365 }
2366
2367 #[staticmethod]
2369 #[pyo3(signature = (row_offsets, col_indices, values, n_nodes, n_features, seed=42))]
2370 fn from_sparse(
2371 row_offsets: Vec<usize>,
2372 col_indices: Vec<usize>,
2373 values: Vec<f64>,
2374 n_nodes: usize,
2375 n_features: usize,
2376 seed: u64,
2377 ) -> PyResult<Self> {
2378 let csr = graph::CsrMatrix::new(row_offsets, col_indices, values, n_nodes, n_nodes)
2379 .map_err(PyValueError::new_err)?;
2380 let inner = graph::StochasticGraphLayer::new_sparse(csr, n_features, seed)
2381 .map_err(PyValueError::new_err)?;
2382 Ok(Self { inner })
2383 }
2384
2385 #[staticmethod]
2387 #[pyo3(signature = (adj_matrix, n_features, seed=42, density_threshold=0.3))]
2388 fn from_dense_auto(
2389 adj_matrix: &Bound<'_, PyAny>,
2390 n_features: usize,
2391 seed: u64,
2392 density_threshold: f64,
2393 ) -> PyResult<Self> {
2394 let (adj_flat, n_rows, n_cols) = extract_matrix_f64(adj_matrix, "adj_matrix")?;
2395 if n_rows != n_cols {
2396 return Err(PyValueError::new_err(format!(
2397 "adj_matrix must be square, got {}x{}.",
2398 n_rows, n_cols
2399 )));
2400 }
2401 Ok(Self {
2402 inner: graph::StochasticGraphLayer::from_dense_auto(
2403 adj_flat,
2404 n_rows,
2405 n_features,
2406 seed,
2407 density_threshold,
2408 ),
2409 })
2410 }
2411
2412 fn is_sparse(&self) -> bool {
2413 self.inner.is_sparse()
2414 }
2415
2416 fn forward(&self, node_features: &Bound<'_, PyAny>) -> PyResult<Vec<Vec<f64>>> {
2417 let (x_flat, x_rows, x_cols) = extract_matrix_f64(node_features, "node_features")?;
2418 if x_rows != self.inner.n_nodes || x_cols != self.inner.n_features {
2419 return Err(PyValueError::new_err(format!(
2420 "Expected node_features shape ({}, {}), got ({}, {}).",
2421 self.inner.n_nodes, self.inner.n_features, x_rows, x_cols
2422 )));
2423 }
2424 let out = self.inner.forward(&x_flat).map_err(PyValueError::new_err)?;
2425 Ok(reshape_flat_to_rows(
2426 out,
2427 self.inner.n_nodes,
2428 self.inner.n_features,
2429 ))
2430 }
2431
2432 #[pyo3(signature = (node_features, length=1024, seed=44257))]
2433 fn forward_sc(
2434 &self,
2435 node_features: &Bound<'_, PyAny>,
2436 length: usize,
2437 seed: u64,
2438 ) -> PyResult<Vec<Vec<f64>>> {
2439 let (x_flat, x_rows, x_cols) = extract_matrix_f64(node_features, "node_features")?;
2440 if x_rows != self.inner.n_nodes || x_cols != self.inner.n_features {
2441 return Err(PyValueError::new_err(format!(
2442 "Expected node_features shape ({}, {}), got ({}, {}).",
2443 self.inner.n_nodes, self.inner.n_features, x_rows, x_cols
2444 )));
2445 }
2446 let out = self
2447 .inner
2448 .forward_sc(&x_flat, length, seed)
2449 .map_err(PyValueError::new_err)?;
2450 Ok(reshape_flat_to_rows(
2451 out,
2452 self.inner.n_nodes,
2453 self.inner.n_features,
2454 ))
2455 }
2456
2457 fn get_weights(&self) -> Vec<f64> {
2458 self.inner.get_weights()
2459 }
2460
2461 fn set_weights(&mut self, weights: Vec<f64>) -> PyResult<()> {
2462 self.inner
2463 .set_weights(weights)
2464 .map_err(PyValueError::new_err)
2465 }
2466}
2467
2468#[pyclass(
2469 name = "KuramotoSolver",
2470 module = "sc_neurocore_engine.sc_neurocore_engine"
2471)]
2472pub struct PyKuramotoSolver {
2473 inner: scpn::KuramotoSolver,
2474}
2475
2476#[pymethods]
2477impl PyKuramotoSolver {
2478 #[getter]
2479 fn phases(&self) -> Vec<f64> {
2480 self.inner.phases.clone()
2481 }
2482 #[new]
2483 #[pyo3(signature = (omega, coupling, phases, noise_amp=0.1))]
2484 fn new(
2485 omega: Vec<f64>,
2486 coupling: &Bound<'_, PyAny>,
2487 phases: Vec<f64>,
2488 noise_amp: f64,
2489 ) -> PyResult<Self> {
2490 let n = omega.len();
2491 if n == 0 {
2492 return Err(PyValueError::new_err("omega must not be empty."));
2493 }
2494 if phases.len() != n {
2495 return Err(PyValueError::new_err(format!(
2496 "phases length mismatch: got {}, expected {}.",
2497 phases.len(),
2498 n
2499 )));
2500 }
2501
2502 let (coupling_flat, rows, cols) = extract_matrix_f64(coupling, "coupling")?;
2503 if rows == 1 {
2504 if coupling_flat.len() != n * n {
2505 return Err(PyValueError::new_err(format!(
2506 "Flat coupling length mismatch: got {}, expected {}.",
2507 coupling_flat.len(),
2508 n * n
2509 )));
2510 }
2511 } else if rows != n || cols != n {
2512 return Err(PyValueError::new_err(format!(
2513 "coupling must be shape ({}, {}) or flat length {}, got ({}, {}).",
2514 n,
2515 n,
2516 n * n,
2517 rows,
2518 cols
2519 )));
2520 }
2521
2522 Ok(Self {
2523 inner: scpn::KuramotoSolver::new(omega, coupling_flat, phases, noise_amp),
2524 })
2525 }
2526
2527 #[pyo3(signature = (dt, seed=0))]
2528 fn step(&mut self, dt: f64, seed: u64) -> f64 {
2529 self.inner.step(dt, seed)
2530 }
2531
2532 #[pyo3(signature = (n_steps, dt, seed=0))]
2533 fn run(&mut self, n_steps: usize, dt: f64, seed: u64) -> Vec<f64> {
2534 self.inner.run(n_steps, dt, seed)
2535 }
2536
2537 fn set_field_pressure(&mut self, f: f64) {
2538 self.inner.set_field_pressure(f);
2539 }
2540
2541 #[pyo3(signature = (
2542 dt,
2543 seed=0,
2544 W=None,
2545 sigma_g=0.0,
2546 h_munu=None,
2547 pgbo_weight=0.0,
2548 ))]
2549 #[allow(non_snake_case)]
2550 fn step_ssgf(
2551 &mut self,
2552 dt: f64,
2553 seed: u64,
2554 W: Option<&Bound<'_, PyAny>>,
2555 sigma_g: f64,
2556 h_munu: Option<&Bound<'_, PyAny>>,
2557 pgbo_weight: f64,
2558 ) -> PyResult<f64> {
2559 let (w_flat, _, _) = match W {
2560 Some(w) => extract_matrix_f64(w, "W")?,
2561 None => (vec![], 0, 0),
2562 };
2563 let (h_flat, _, _) = match h_munu {
2564 Some(h) => extract_matrix_f64(h, "h_munu")?,
2565 None => (vec![], 0, 0),
2566 };
2567 Ok(self
2568 .inner
2569 .step_ssgf(dt, seed, &w_flat, sigma_g, &h_flat, pgbo_weight))
2570 }
2571
2572 #[pyo3(signature = (
2573 n_steps,
2574 dt,
2575 seed=0,
2576 W=None,
2577 sigma_g=0.0,
2578 h_munu=None,
2579 pgbo_weight=0.0,
2580 ))]
2581 #[allow(clippy::too_many_arguments, non_snake_case)]
2582 fn run_ssgf(
2583 &mut self,
2584 n_steps: usize,
2585 dt: f64,
2586 seed: u64,
2587 W: Option<&Bound<'_, PyAny>>,
2588 sigma_g: f64,
2589 h_munu: Option<&Bound<'_, PyAny>>,
2590 pgbo_weight: f64,
2591 ) -> PyResult<Vec<f64>> {
2592 let (w_flat, _, _) = match W {
2593 Some(w) => extract_matrix_f64(w, "W")?,
2594 None => (vec![], 0, 0),
2595 };
2596 let (h_flat, _, _) = match h_munu {
2597 Some(h) => extract_matrix_f64(h, "h_munu")?,
2598 None => (vec![], 0, 0),
2599 };
2600 Ok(self
2601 .inner
2602 .run_ssgf(n_steps, dt, seed, &w_flat, sigma_g, &h_flat, pgbo_weight))
2603 }
2604
2605 fn order_parameter(&self) -> f64 {
2606 self.inner.order_parameter()
2607 }
2608
2609 fn set_phases(&mut self, phases: Vec<f64>) -> PyResult<()> {
2610 if phases.len() != self.inner.n {
2611 return Err(PyValueError::new_err(format!(
2612 "phases length mismatch: got {}, expected {}.",
2613 phases.len(),
2614 self.inner.n
2615 )));
2616 }
2617 self.inner.set_phases(phases);
2618 Ok(())
2619 }
2620
2621 fn set_coupling(&mut self, coupling: &Bound<'_, PyAny>) -> PyResult<()> {
2622 let n = self.inner.n;
2623 let (coupling_flat, rows, cols) = extract_matrix_f64(coupling, "coupling")?;
2624 if rows == 1 {
2625 if coupling_flat.len() != n * n {
2626 return Err(PyValueError::new_err(format!(
2627 "Flat coupling length mismatch: got {}, expected {}.",
2628 coupling_flat.len(),
2629 n * n
2630 )));
2631 }
2632 } else if rows != n || cols != n {
2633 return Err(PyValueError::new_err(format!(
2634 "coupling must be shape ({}, {}) or flat length {}, got ({}, {}).",
2635 n,
2636 n,
2637 n * n,
2638 rows,
2639 cols
2640 )));
2641 }
2642 self.inner.set_coupling(coupling_flat);
2643 Ok(())
2644 }
2645}
2646
2647#[pyclass(
2648 name = "SCPNMetrics",
2649 module = "sc_neurocore_engine.sc_neurocore_engine"
2650)]
2651pub struct PySCPNMetrics;
2652
2653#[pymethods]
2654impl PySCPNMetrics {
2655 #[new]
2656 fn new() -> Self {
2657 Self
2658 }
2659
2660 #[staticmethod]
2661 fn global_coherence(weights: [f64; 7], metrics: [f64; 7]) -> f64 {
2662 scpn::SCPNMetrics::global_coherence(&weights, &metrics)
2663 }
2664
2665 #[staticmethod]
2666 fn consciousness_index(phases_l4: Vec<f64>, glyph_l7: [f64; 6]) -> f64 {
2667 scpn::SCPNMetrics::consciousness_index(&phases_l4, &glyph_l7)
2668 }
2669}
2670
2671#[pyclass(name = "ScGraph", module = "sc_neurocore_engine.sc_neurocore_engine")]
2674pub struct PyScGraph {
2675 inner: ir::graph::ScGraph,
2676}
2677
2678#[pymethods]
2679impl PyScGraph {
2680 fn len(&self) -> usize {
2682 self.inner.len()
2683 }
2684
2685 fn __len__(&self) -> usize {
2686 self.inner.len()
2687 }
2688
2689 fn is_empty(&self) -> bool {
2691 self.inner.is_empty()
2692 }
2693
2694 #[getter]
2696 fn name(&self) -> &str {
2697 &self.inner.name
2698 }
2699
2700 fn num_inputs(&self) -> usize {
2702 self.inner.inputs().len()
2703 }
2704
2705 fn num_outputs(&self) -> usize {
2707 self.inner.outputs().len()
2708 }
2709
2710 fn __repr__(&self) -> String {
2711 format!("ScGraph('{}', ops={})", self.inner.name, self.inner.len())
2712 }
2713}
2714
2715#[pyclass(
2716 name = "ScGraphBuilder",
2717 module = "sc_neurocore_engine.sc_neurocore_engine"
2718)]
2719pub struct PyScGraphBuilder {
2720 inner: Option<ir::builder::ScGraphBuilder>,
2721}
2722
2723impl PyScGraphBuilder {
2724 fn builder_mut(&mut self) -> PyResult<&mut ir::builder::ScGraphBuilder> {
2725 self.inner
2726 .as_mut()
2727 .ok_or_else(|| PyValueError::new_err("Builder already consumed by build()."))
2728 }
2729}
2730
2731#[pymethods]
2732impl PyScGraphBuilder {
2733 #[new]
2734 fn new(name: String) -> Self {
2735 Self {
2736 inner: Some(ir::builder::ScGraphBuilder::new(name)),
2737 }
2738 }
2739
2740 fn input(&mut self, name: &str, ty: &str) -> PyResult<u32> {
2742 let sc_type = parse_sc_type(ty)?;
2743 Ok(self.builder_mut()?.input(name, sc_type).0)
2744 }
2745
2746 fn output(&mut self, name: &str, source_id: u32) -> PyResult<u32> {
2748 Ok(self
2749 .builder_mut()?
2750 .output(name, ir::graph::ValueId(source_id))
2751 .0)
2752 }
2753
2754 fn constant_f64(&mut self, value: f64, ty: &str) -> PyResult<u32> {
2756 let sc_type = parse_sc_type(ty)?;
2757 Ok(self
2758 .builder_mut()?
2759 .constant(ir::graph::ScConst::F64(value), sc_type)
2760 .0)
2761 }
2762
2763 fn constant_i64(&mut self, value: i64, ty: &str) -> PyResult<u32> {
2765 let sc_type = parse_sc_type(ty)?;
2766 Ok(self
2767 .builder_mut()?
2768 .constant(ir::graph::ScConst::I64(value), sc_type)
2769 .0)
2770 }
2771
2772 fn encode(&mut self, prob_id: u32, length: usize, seed: u64) -> PyResult<u32> {
2774 let seed = u16::try_from(seed)
2775 .map_err(|_| PyValueError::new_err(format!("Seed out of range for u16: {seed}")))?;
2776 Ok(self
2777 .builder_mut()?
2778 .encode(ir::graph::ValueId(prob_id), length, seed)
2779 .0)
2780 }
2781
2782 fn bitwise_and(&mut self, lhs_id: u32, rhs_id: u32) -> PyResult<u32> {
2784 Ok(self
2785 .builder_mut()?
2786 .bitwise_and(ir::graph::ValueId(lhs_id), ir::graph::ValueId(rhs_id))
2787 .0)
2788 }
2789
2790 fn popcount(&mut self, input_id: u32) -> PyResult<u32> {
2792 Ok(self.builder_mut()?.popcount(ir::graph::ValueId(input_id)).0)
2793 }
2794
2795 #[pyo3(signature = (
2797 current_id,
2798 leak_id,
2799 gain_id,
2800 noise_id,
2801 data_width=16,
2802 fraction=8,
2803 v_rest=0,
2804 v_reset=0,
2805 v_threshold=256,
2806 refractory_period=2
2807 ))]
2808 #[allow(clippy::too_many_arguments)]
2809 fn lif_step(
2810 &mut self,
2811 current_id: u32,
2812 leak_id: u32,
2813 gain_id: u32,
2814 noise_id: u32,
2815 data_width: u32,
2816 fraction: u32,
2817 v_rest: i64,
2818 v_reset: i64,
2819 v_threshold: i64,
2820 refractory_period: u32,
2821 ) -> PyResult<u32> {
2822 let params = ir::graph::LifParams {
2823 data_width,
2824 fraction,
2825 v_rest,
2826 v_reset,
2827 v_threshold,
2828 refractory_period,
2829 };
2830 Ok(self
2831 .builder_mut()?
2832 .lif_step(
2833 ir::graph::ValueId(current_id),
2834 ir::graph::ValueId(leak_id),
2835 ir::graph::ValueId(gain_id),
2836 ir::graph::ValueId(noise_id),
2837 params,
2838 )
2839 .0)
2840 }
2841
2842 #[pyo3(signature = (
2844 inputs_id,
2845 weights_id,
2846 leak_id,
2847 gain_id,
2848 n_inputs=3,
2849 n_neurons=7,
2850 data_width=16,
2851 stream_length=1024,
2852 seed_base=0xACE1u64,
2853 y_min=0,
2854 y_max=65535
2855 ))]
2856 #[allow(clippy::too_many_arguments)]
2857 fn dense_forward(
2858 &mut self,
2859 inputs_id: u32,
2860 weights_id: u32,
2861 leak_id: u32,
2862 gain_id: u32,
2863 n_inputs: usize,
2864 n_neurons: usize,
2865 data_width: u32,
2866 stream_length: usize,
2867 seed_base: u64,
2868 y_min: i64,
2869 y_max: i64,
2870 ) -> PyResult<u32> {
2871 let input_seed_base = u16::try_from(seed_base).map_err(|_| {
2872 PyValueError::new_err(format!("seed_base out of range for u16: {seed_base}"))
2873 })?;
2874 let params = ir::graph::DenseParams {
2875 n_inputs,
2876 n_neurons,
2877 data_width,
2878 stream_length,
2879 input_seed_base,
2880 weight_seed_base: input_seed_base.wrapping_add(1),
2881 y_min,
2882 y_max,
2883 };
2884 Ok(self
2885 .builder_mut()?
2886 .dense_forward(
2887 ir::graph::ValueId(inputs_id),
2888 ir::graph::ValueId(weights_id),
2889 ir::graph::ValueId(leak_id),
2890 ir::graph::ValueId(gain_id),
2891 params,
2892 )
2893 .0)
2894 }
2895
2896 fn scale(&mut self, input_id: u32, factor: f64) -> PyResult<u32> {
2898 Ok(self
2899 .builder_mut()?
2900 .scale(ir::graph::ValueId(input_id), factor)
2901 .0)
2902 }
2903
2904 fn offset(&mut self, input_id: u32, offset_val: f64) -> PyResult<u32> {
2906 Ok(self
2907 .builder_mut()?
2908 .offset(ir::graph::ValueId(input_id), offset_val)
2909 .0)
2910 }
2911
2912 fn div_const(&mut self, input_id: u32, divisor: u64) -> PyResult<u32> {
2914 Ok(self
2915 .builder_mut()?
2916 .div_const(ir::graph::ValueId(input_id), divisor)
2917 .0)
2918 }
2919
2920 fn build(&mut self) -> PyResult<PyScGraph> {
2922 let builder = self
2923 .inner
2924 .take()
2925 .ok_or_else(|| PyValueError::new_err("Builder already consumed by build()."))?;
2926 Ok(PyScGraph {
2927 inner: builder.build(),
2928 })
2929 }
2930}
2931
2932#[pyfunction]
2934fn ir_verify(graph: PyRef<'_, PyScGraph>) -> Option<Vec<String>> {
2935 match ir::verify::verify(&graph.inner) {
2936 Ok(()) => None,
2937 Err(errors) => Some(errors.iter().map(|e| e.to_string()).collect()),
2938 }
2939}
2940
2941#[pyfunction]
2943fn ir_print(graph: PyRef<'_, PyScGraph>) -> String {
2944 ir::printer::print(&graph.inner)
2945}
2946
2947#[pyfunction]
2949fn ir_parse(text: &str) -> PyResult<PyScGraph> {
2950 ir::parser::parse(text)
2951 .map(|graph| PyScGraph { inner: graph })
2952 .map_err(|e| PyValueError::new_err(e.to_string()))
2953}
2954
2955#[pyfunction]
2957fn ir_emit_sv(graph: PyRef<'_, PyScGraph>) -> PyResult<String> {
2958 ir::emit_sv::emit(&graph.inner).map_err(PyValueError::new_err)
2959}
2960
2961fn parse_sc_type(s: &str) -> PyResult<ir::graph::ScType> {
2966 let s = s.trim();
2967 let lower = s.to_ascii_lowercase();
2968 match lower.as_str() {
2969 "bool" => Ok(ir::graph::ScType::Bool),
2970 "rate" => Ok(ir::graph::ScType::Rate),
2971 "u32" => Ok(ir::graph::ScType::UInt { width: 32 }),
2972 "u64" => Ok(ir::graph::ScType::UInt { width: 64 }),
2973 "i16" => Ok(ir::graph::ScType::SInt { width: 16 }),
2974 "i32" => Ok(ir::graph::ScType::SInt { width: 32 }),
2975 "bitstream" => Ok(ir::graph::ScType::Bitstream { length: 0 }),
2976 _ => {
2977 if let Some(width) = lower.strip_prefix('u') {
2978 if let Ok(width) = width.parse::<u32>() {
2979 return Ok(ir::graph::ScType::UInt { width });
2980 }
2981 }
2982 if let Some(width) = lower.strip_prefix('i') {
2983 if let Ok(width) = width.parse::<u32>() {
2984 return Ok(ir::graph::ScType::SInt { width });
2985 }
2986 }
2987 if let Some(inner) = lower
2988 .strip_prefix("bitstream<")
2989 .and_then(|r| r.strip_suffix('>'))
2990 {
2991 let length = inner.parse::<usize>().map_err(|_| {
2992 PyValueError::new_err(format!("Invalid bitstream length: '{inner}'"))
2993 })?;
2994 return Ok(ir::graph::ScType::Bitstream { length });
2995 }
2996 if let Some(inner) = lower
2997 .strip_prefix("fixed<")
2998 .and_then(|r| r.strip_suffix('>'))
2999 {
3000 let parts: Vec<&str> = inner.split(',').collect();
3001 if parts.len() != 2 {
3002 return Err(PyValueError::new_err(format!(
3003 "fixed type needs 2 params: '{s}'"
3004 )));
3005 }
3006 let width = parts[0].trim().parse::<u32>().map_err(|_| {
3007 PyValueError::new_err(format!("Invalid fixed width: '{}'", parts[0]))
3008 })?;
3009 let frac = parts[1].trim().parse::<u32>().map_err(|_| {
3010 PyValueError::new_err(format!("Invalid fixed frac: '{}'", parts[1]))
3011 })?;
3012 return Ok(ir::graph::ScType::FixedPoint { width, frac });
3013 }
3014 if let Some(inner) = lower.strip_prefix("vec<").and_then(|r| r.strip_suffix('>')) {
3015 if let Some(comma_pos) = inner.rfind(',') {
3016 let inner_ty_str = &inner[..comma_pos];
3017 let count_str = inner[comma_pos + 1..].trim();
3018 let inner_ty = parse_sc_type(inner_ty_str)?;
3019 let count = count_str.parse::<usize>().map_err(|_| {
3020 PyValueError::new_err(format!("Invalid vec count: '{count_str}'"))
3021 })?;
3022 return Ok(ir::graph::ScType::Vec {
3023 element: Box::new(inner_ty),
3024 count,
3025 });
3026 }
3027 }
3028 Err(PyValueError::new_err(format!("Unknown IR type: '{s}'")))
3029 }
3030 }
3031}
3032
3033#[pyfunction]
3036#[pyo3(signature = (binary_train, dt=0.001))]
3037fn py_spike_times(
3038 py: Python<'_>,
3039 binary_train: PyReadonlyArray1<'_, i32>,
3040 dt: f64,
3041) -> Py<PyArray1<f64>> {
3042 let data = binary_train.as_slice().unwrap();
3043 analysis::basic::spike_times(data, dt)
3044 .into_pyarray(py)
3045 .into()
3046}
3047
3048#[pyfunction]
3049#[pyo3(signature = (binary_train, dt=0.001))]
3050fn py_isi(py: Python<'_>, binary_train: PyReadonlyArray1<'_, i32>, dt: f64) -> Py<PyArray1<f64>> {
3051 let data = binary_train.as_slice().unwrap();
3052 analysis::basic::isi(data, dt).into_pyarray(py).into()
3053}
3054
3055#[pyfunction]
3056#[pyo3(signature = (binary_train, dt=0.001))]
3057fn py_firing_rate(binary_train: PyReadonlyArray1<'_, i32>, dt: f64) -> f64 {
3058 let data = binary_train.as_slice().unwrap();
3059 analysis::basic::firing_rate(data, dt)
3060}
3061
3062#[pyfunction]
3063fn py_spike_count(binary_train: PyReadonlyArray1<'_, i32>) -> i64 {
3064 let data = binary_train.as_slice().unwrap();
3065 analysis::basic::spike_count(data)
3066}
3067
3068#[pyfunction]
3069#[pyo3(signature = (binary_train, bin_size=10))]
3070fn py_bin_spike_train(
3071 py: Python<'_>,
3072 binary_train: PyReadonlyArray1<'_, i32>,
3073 bin_size: usize,
3074) -> Py<PyArray1<i64>> {
3075 let data = binary_train.as_slice().unwrap();
3076 analysis::basic::bin_spike_train(data, bin_size)
3077 .into_pyarray(py)
3078 .into()
3079}
3080
3081#[pyfunction]
3082#[pyo3(signature = (binary_train, dt=0.001, kernel="gaussian", sigma_ms=10.0))]
3083fn py_instantaneous_rate(
3084 py: Python<'_>,
3085 binary_train: PyReadonlyArray1<'_, f64>,
3086 dt: f64,
3087 kernel: &str,
3088 sigma_ms: f64,
3089) -> Py<PyArray1<f64>> {
3090 let data = binary_train.as_slice().unwrap();
3091 analysis::rate::instantaneous_rate(data, dt, kernel, sigma_ms)
3092 .into_pyarray(py)
3093 .into()
3094}
3095
3096#[pyfunction]
3097#[pyo3(signature = (trials, bin_ms=10.0, dt=0.001))]
3098fn py_psth(
3099 py: Python<'_>,
3100 trials: Vec<PyReadonlyArray1<'_, f64>>,
3101 bin_ms: f64,
3102 dt: f64,
3103) -> (Py<PyArray1<f64>>, Py<PyArray1<f64>>) {
3104 let vecs: Vec<Vec<f64>> = trials
3105 .iter()
3106 .map(|t| t.as_slice().unwrap().to_vec())
3107 .collect();
3108 let (rates, centers) = analysis::rate::psth(&vecs, bin_ms, dt);
3109 (
3110 rates.into_pyarray(py).into(),
3111 centers.into_pyarray(py).into(),
3112 )
3113}
3114
3115#[pyfunction]
3116#[pyo3(signature = (binary_train, dt=0.001))]
3117fn py_cv_isi(binary_train: PyReadonlyArray1<'_, i32>, dt: f64) -> f64 {
3118 let data = binary_train.as_slice().unwrap();
3119 analysis::variability::cv_isi(data, dt)
3120}
3121
3122#[pyfunction]
3123#[pyo3(signature = (binary_train, dt=0.001))]
3124fn py_cv2(binary_train: PyReadonlyArray1<'_, i32>, dt: f64) -> f64 {
3125 let data = binary_train.as_slice().unwrap();
3126 analysis::variability::cv2(data, dt)
3127}
3128
3129#[pyfunction]
3130#[pyo3(signature = (binary_train, dt=0.001))]
3131fn py_local_variation(binary_train: PyReadonlyArray1<'_, i32>, dt: f64) -> f64 {
3132 let data = binary_train.as_slice().unwrap();
3133 analysis::variability::local_variation(data, dt)
3134}
3135
3136#[pyfunction]
3137#[pyo3(signature = (binary_train, window_ms=50.0, dt=0.001))]
3138fn py_fano_factor(binary_train: PyReadonlyArray1<'_, i32>, window_ms: f64, dt: f64) -> f64 {
3139 let data = binary_train.as_slice().unwrap();
3140 analysis::variability::fano_factor(data, window_ms, dt)
3141}
3142
3143#[pyfunction]
3144fn py_lempel_ziv_complexity(binary_train: PyReadonlyArray1<'_, i32>) -> f64 {
3145 let data = binary_train.as_slice().unwrap();
3146 analysis::variability::lempel_ziv_complexity(data)
3147}
3148
3149#[pyfunction]
3150#[pyo3(signature = (binary_train, order=3, delay=1))]
3151fn py_permutation_entropy(
3152 binary_train: PyReadonlyArray1<'_, i32>,
3153 order: usize,
3154 delay: usize,
3155) -> f64 {
3156 let data = binary_train.as_slice().unwrap();
3157 analysis::variability::permutation_entropy(data, order, delay)
3158}
3159
3160#[pyfunction]
3161#[pyo3(signature = (binary_train, min_window=10))]
3162fn py_hurst_exponent(binary_train: PyReadonlyArray1<'_, i32>, min_window: usize) -> f64 {
3163 let data = binary_train.as_slice().unwrap();
3164 analysis::variability::hurst_exponent(data, min_window)
3165}
3166
3167#[pyfunction]
3168#[pyo3(signature = (binary_train, m=2, r_factor=0.2))]
3169fn py_approximate_entropy(binary_train: PyReadonlyArray1<'_, i32>, m: usize, r_factor: f64) -> f64 {
3170 let data = binary_train.as_slice().unwrap();
3171 analysis::variability::approximate_entropy(data, m, r_factor)
3172}
3173
3174#[pyfunction]
3175#[pyo3(signature = (binary_train, m=2, r_factor=0.2))]
3176fn py_sample_entropy(binary_train: PyReadonlyArray1<'_, i32>, m: usize, r_factor: f64) -> f64 {
3177 let data = binary_train.as_slice().unwrap();
3178 analysis::variability::sample_entropy(data, m, r_factor)
3179}
3180
3181#[pyfunction]
3184#[pyo3(signature = (train_a, train_b, max_lag_ms=50.0, dt=0.001))]
3185fn py_cross_correlation(
3186 py: Python<'_>,
3187 train_a: PyReadonlyArray1<'_, i32>,
3188 train_b: PyReadonlyArray1<'_, i32>,
3189 max_lag_ms: f64,
3190 dt: f64,
3191) -> (Py<PyArray1<f64>>, Py<PyArray1<f64>>) {
3192 let a = train_a.as_slice().unwrap();
3193 let b = train_b.as_slice().unwrap();
3194 let (cc, lags) = analysis::correlation::cross_correlation(a, b, max_lag_ms, dt);
3195 (cc.into_pyarray(py).into(), lags.into_pyarray(py).into())
3196}
3197
3198#[pyfunction]
3199#[pyo3(signature = (trains, dt=0.001))]
3200fn py_pairwise_correlation(
3201 py: Python<'_>,
3202 trains: Vec<PyReadonlyArray1<'_, i32>>,
3203 dt: f64,
3204) -> Py<PyArray2<f64>> {
3205 let vecs: Vec<Vec<i32>> = trains
3206 .iter()
3207 .map(|t| t.as_slice().unwrap().to_vec())
3208 .collect();
3209 let refs: Vec<&[i32]> = vecs.iter().map(|v| v.as_slice()).collect();
3210 let mat = analysis::correlation::pairwise_correlation(&refs, dt);
3211 let n = mat.len();
3212 let flat: Vec<f64> = mat.into_iter().flatten().collect();
3213 numpy::PyArray2::from_vec2(py, &flat.chunks(n).map(|c| c.to_vec()).collect::<Vec<_>>())
3214 .unwrap()
3215 .into()
3216}
3217
3218#[pyfunction]
3219#[pyo3(signature = (train_a, train_b, dt=0.001, tau_ms=5.0))]
3220fn py_event_synchronization(
3221 train_a: PyReadonlyArray1<'_, i32>,
3222 train_b: PyReadonlyArray1<'_, i32>,
3223 dt: f64,
3224 tau_ms: f64,
3225) -> f64 {
3226 let a = train_a.as_slice().unwrap();
3227 let b = train_b.as_slice().unwrap();
3228 analysis::correlation::event_synchronization(a, b, dt, tau_ms)
3229}
3230
3231#[pyfunction]
3232#[pyo3(signature = (train_a, train_b, dt=0.001))]
3233fn py_spike_train_coherence(
3234 py: Python<'_>,
3235 train_a: PyReadonlyArray1<'_, i32>,
3236 train_b: PyReadonlyArray1<'_, i32>,
3237 dt: f64,
3238) -> (Py<PyArray1<f64>>, Py<PyArray1<f64>>) {
3239 let a = train_a.as_slice().unwrap();
3240 let b = train_b.as_slice().unwrap();
3241 let (coh, freqs) = analysis::correlation::spike_train_coherence(a, b, dt);
3242 (coh.into_pyarray(py).into(), freqs.into_pyarray(py).into())
3243}
3244
3245#[pyfunction]
3246#[pyo3(signature = (train_a, train_b, dt=0.001, delta_ms=5.0))]
3247fn py_spike_time_tiling_coefficient(
3248 train_a: PyReadonlyArray1<'_, i32>,
3249 train_b: PyReadonlyArray1<'_, i32>,
3250 dt: f64,
3251 delta_ms: f64,
3252) -> f64 {
3253 let a = train_a.as_slice().unwrap();
3254 let b = train_b.as_slice().unwrap();
3255 analysis::correlation::spike_time_tiling_coefficient(a, b, dt, delta_ms)
3256}
3257
3258#[pyfunction]
3259#[pyo3(signature = (trains, bin_size=10))]
3260fn py_covariance_matrix(
3261 py: Python<'_>,
3262 trains: Vec<PyReadonlyArray1<'_, i32>>,
3263 bin_size: usize,
3264) -> Py<PyArray2<f64>> {
3265 let vecs: Vec<Vec<i32>> = trains
3266 .iter()
3267 .map(|t| t.as_slice().unwrap().to_vec())
3268 .collect();
3269 let refs: Vec<&[i32]> = vecs.iter().map(|v| v.as_slice()).collect();
3270 let mat = analysis::correlation::covariance_matrix(&refs, bin_size);
3271 let n = mat.len();
3272 let rows: Vec<Vec<f64>> = mat.into_iter().collect();
3273 numpy::PyArray2::from_vec2(py, &rows)
3274 .unwrap_or_else(|_| numpy::PyArray2::zeros(py, [n, n], false))
3275 .into()
3276}
3277
3278#[pyfunction]
3279#[pyo3(signature = (binary_train, dt=0.001, max_lag_ms=100.0))]
3280fn py_autocorrelation_time(
3281 binary_train: PyReadonlyArray1<'_, i32>,
3282 dt: f64,
3283 max_lag_ms: f64,
3284) -> f64 {
3285 let data = binary_train.as_slice().unwrap();
3286 analysis::correlation::autocorrelation_time(data, dt, max_lag_ms)
3287}
3288
3289#[pyfunction]
3290#[pyo3(signature = (trains, bin_size=50))]
3291fn py_noise_correlation(
3292 py: Python<'_>,
3293 trains: Vec<PyReadonlyArray1<'_, i32>>,
3294 bin_size: usize,
3295) -> Py<PyArray2<f64>> {
3296 let vecs: Vec<Vec<i32>> = trains
3297 .iter()
3298 .map(|t| t.as_slice().unwrap().to_vec())
3299 .collect();
3300 let refs: Vec<&[i32]> = vecs.iter().map(|v| v.as_slice()).collect();
3301 let mat = analysis::correlation::noise_correlation(&refs, bin_size);
3302 let rows: Vec<Vec<f64>> = mat.into_iter().collect();
3303 let n = rows.len();
3304 numpy::PyArray2::from_vec2(py, &rows)
3305 .unwrap_or_else(|_| numpy::PyArray2::zeros(py, [n, n], false))
3306 .into()
3307}
3308
3309#[pyfunction]
3310#[pyo3(signature = (trains, bin_size=50))]
3311fn py_signal_correlation(
3312 py: Python<'_>,
3313 trains: Vec<PyReadonlyArray1<'_, i32>>,
3314 bin_size: usize,
3315) -> Py<PyArray2<f64>> {
3316 let vecs: Vec<Vec<i32>> = trains
3317 .iter()
3318 .map(|t| t.as_slice().unwrap().to_vec())
3319 .collect();
3320 let refs: Vec<&[i32]> = vecs.iter().map(|v| v.as_slice()).collect();
3321 let mat = analysis::correlation::signal_correlation(&refs, bin_size);
3322 let rows: Vec<Vec<f64>> = mat.into_iter().collect();
3323 let n = rows.len();
3324 numpy::PyArray2::from_vec2(py, &rows)
3325 .unwrap_or_else(|_| numpy::PyArray2::zeros(py, [n, n], false))
3326 .into()
3327}
3328
3329#[pyfunction]
3330#[pyo3(signature = (trains, window=50))]
3331fn py_spike_count_covariance(
3332 py: Python<'_>,
3333 trains: Vec<PyReadonlyArray1<'_, i32>>,
3334 window: usize,
3335) -> Py<PyArray2<f64>> {
3336 let vecs: Vec<Vec<i32>> = trains
3337 .iter()
3338 .map(|t| t.as_slice().unwrap().to_vec())
3339 .collect();
3340 let refs: Vec<&[i32]> = vecs.iter().map(|v| v.as_slice()).collect();
3341 let mat = analysis::correlation::spike_count_covariance(&refs, window);
3342 let rows: Vec<Vec<f64>> = mat.into_iter().collect();
3343 let n = rows.len();
3344 numpy::PyArray2::from_vec2(py, &rows)
3345 .unwrap_or_else(|_| numpy::PyArray2::zeros(py, [n, n], false))
3346 .into()
3347}
3348
3349#[pyfunction]
3350#[pyo3(signature = (train_a, train_b, bin_size=10))]
3351fn py_joint_psth(
3352 py: Python<'_>,
3353 train_a: PyReadonlyArray1<'_, i32>,
3354 train_b: PyReadonlyArray1<'_, i32>,
3355 bin_size: usize,
3356) -> Py<PyArray2<f64>> {
3357 let a = train_a.as_slice().unwrap();
3358 let b = train_b.as_slice().unwrap();
3359 let (flat, n) = analysis::correlation::joint_psth(a, b, bin_size);
3360 if n == 0 {
3361 return numpy::PyArray2::zeros(py, [0, 0], false).into();
3362 }
3363 let rows: Vec<Vec<f64>> = flat.chunks(n).map(|c| c.to_vec()).collect();
3364 numpy::PyArray2::from_vec2(py, &rows).unwrap().into()
3365}
3366
3367#[pyfunction]
3368#[pyo3(signature = (train_a, train_b, dt=0.001, delta_ms=2.0))]
3369fn py_coincidence_index(
3370 train_a: PyReadonlyArray1<'_, i32>,
3371 train_b: PyReadonlyArray1<'_, i32>,
3372 dt: f64,
3373 delta_ms: f64,
3374) -> f64 {
3375 let a = train_a.as_slice().unwrap();
3376 let b = train_b.as_slice().unwrap();
3377 analysis::correlation::coincidence_index(a, b, dt, delta_ms)
3378}
3379
3380#[pyfunction]
3383#[pyo3(signature = (train_a, train_b, dt=0.001, tau_ms=10.0))]
3384fn py_van_rossum_distance(
3385 train_a: PyReadonlyArray1<'_, i32>,
3386 train_b: PyReadonlyArray1<'_, i32>,
3387 dt: f64,
3388 tau_ms: f64,
3389) -> f64 {
3390 let a = train_a.as_slice().unwrap();
3391 let b = train_b.as_slice().unwrap();
3392 analysis::distance::van_rossum_distance(a, b, dt, tau_ms)
3393}
3394
3395#[pyfunction]
3396#[pyo3(signature = (times_a, times_b, cost_per_s=1000.0))]
3397fn py_victor_purpura_distance(
3398 times_a: PyReadonlyArray1<'_, f64>,
3399 times_b: PyReadonlyArray1<'_, f64>,
3400 cost_per_s: f64,
3401) -> f64 {
3402 let a = times_a.as_slice().unwrap();
3403 let b = times_b.as_slice().unwrap();
3404 analysis::distance::victor_purpura_distance(a, b, cost_per_s)
3405}
3406
3407#[pyfunction]
3408#[pyo3(signature = (train_a, train_b, dt=0.001))]
3409fn py_isi_distance(
3410 train_a: PyReadonlyArray1<'_, i32>,
3411 train_b: PyReadonlyArray1<'_, i32>,
3412 dt: f64,
3413) -> f64 {
3414 let a = train_a.as_slice().unwrap();
3415 let b = train_b.as_slice().unwrap();
3416 analysis::distance::isi_distance(a, b, dt)
3417}
3418
3419#[pyfunction]
3420#[pyo3(signature = (times_a, times_b, t_start=0.0, t_end=1.0))]
3421fn py_spike_distance(
3422 times_a: PyReadonlyArray1<'_, f64>,
3423 times_b: PyReadonlyArray1<'_, f64>,
3424 t_start: f64,
3425 t_end: f64,
3426) -> f64 {
3427 let a = times_a.as_slice().unwrap();
3428 let b = times_b.as_slice().unwrap();
3429 analysis::distance::spike_distance(a, b, t_start, t_end)
3430}
3431
3432#[pyfunction]
3433#[pyo3(signature = (times_a, times_b, t_start=0.0, t_end=1.0))]
3434fn py_spike_sync(
3435 times_a: PyReadonlyArray1<'_, f64>,
3436 times_b: PyReadonlyArray1<'_, f64>,
3437 t_start: f64,
3438 t_end: f64,
3439) -> f64 {
3440 let a = times_a.as_slice().unwrap();
3441 let b = times_b.as_slice().unwrap();
3442 analysis::distance::spike_sync(a, b, t_start, t_end)
3443}
3444
3445#[pyfunction]
3446#[pyo3(signature = (times_a, times_b, n_bins=50, t_start=0.0, t_end=1.0))]
3447fn py_spike_sync_profile(
3448 py: Python<'_>,
3449 times_a: PyReadonlyArray1<'_, f64>,
3450 times_b: PyReadonlyArray1<'_, f64>,
3451 n_bins: usize,
3452 t_start: f64,
3453 t_end: f64,
3454) -> Py<PyArray1<f64>> {
3455 let a = times_a.as_slice().unwrap();
3456 let b = times_b.as_slice().unwrap();
3457 analysis::distance::spike_sync_profile(a, b, n_bins, t_start, t_end)
3458 .into_pyarray(py)
3459 .into()
3460}
3461
3462#[pyfunction]
3463#[pyo3(signature = (times_a, times_b, n_bins=50, t_start=0.0, t_end=1.0))]
3464fn py_spike_profile(
3465 py: Python<'_>,
3466 times_a: PyReadonlyArray1<'_, f64>,
3467 times_b: PyReadonlyArray1<'_, f64>,
3468 n_bins: usize,
3469 t_start: f64,
3470 t_end: f64,
3471) -> Py<PyArray1<f64>> {
3472 let a = times_a.as_slice().unwrap();
3473 let b = times_b.as_slice().unwrap();
3474 analysis::distance::spike_profile(a, b, n_bins, t_start, t_end)
3475 .into_pyarray(py)
3476 .into()
3477}
3478
3479#[pyfunction]
3480#[pyo3(signature = (binary_train_a, binary_train_b, dt=0.001, n_bins=50))]
3481fn py_isi_profile(
3482 py: Python<'_>,
3483 binary_train_a: PyReadonlyArray1<'_, i32>,
3484 binary_train_b: PyReadonlyArray1<'_, i32>,
3485 dt: f64,
3486 n_bins: usize,
3487) -> Py<PyArray1<f64>> {
3488 let a = binary_train_a.as_slice().unwrap();
3489 let b = binary_train_b.as_slice().unwrap();
3490 analysis::distance::isi_profile(a, b, dt, n_bins)
3491 .into_pyarray(py)
3492 .into()
3493}
3494
3495#[pyfunction]
3496#[pyo3(signature = (times_a, times_b, t_start=0.0, t_end=1.0, cost=0.0))]
3497fn py_adaptive_spike_distance(
3498 times_a: PyReadonlyArray1<'_, f64>,
3499 times_b: PyReadonlyArray1<'_, f64>,
3500 t_start: f64,
3501 t_end: f64,
3502 cost: f64,
3503) -> f64 {
3504 let a = times_a.as_slice().unwrap();
3505 let b = times_b.as_slice().unwrap();
3506 analysis::distance::adaptive_spike_distance(a, b, t_start, t_end, cost)
3507}
3508
3509#[pyfunction]
3510#[pyo3(signature = (train_a, train_b, dt=0.001, sigma_ms=5.0))]
3511fn py_schreiber_similarity(
3512 train_a: PyReadonlyArray1<'_, i32>,
3513 train_b: PyReadonlyArray1<'_, i32>,
3514 dt: f64,
3515 sigma_ms: f64,
3516) -> f64 {
3517 let a = train_a.as_slice().unwrap();
3518 let b = train_b.as_slice().unwrap();
3519 analysis::distance::schreiber_similarity(a, b, dt, sigma_ms)
3520}
3521
3522#[pyfunction]
3523#[pyo3(signature = (times_a, times_b, dt_max=0.01))]
3524fn py_hunter_milton_similarity(
3525 times_a: PyReadonlyArray1<'_, f64>,
3526 times_b: PyReadonlyArray1<'_, f64>,
3527 dt_max: f64,
3528) -> f64 {
3529 let a = times_a.as_slice().unwrap();
3530 let b = times_b.as_slice().unwrap();
3531 analysis::distance::hunter_milton_similarity(a, b, dt_max)
3532}
3533
3534#[pyfunction]
3535#[pyo3(signature = (times_a, times_b, t_start=0.0, t_end=1.0, n_bins=100))]
3536fn py_earth_movers_distance(
3537 times_a: PyReadonlyArray1<'_, f64>,
3538 times_b: PyReadonlyArray1<'_, f64>,
3539 t_start: f64,
3540 t_end: f64,
3541 n_bins: usize,
3542) -> f64 {
3543 let a = times_a.as_slice().unwrap();
3544 let b = times_b.as_slice().unwrap();
3545 analysis::distance::earth_movers_distance(a, b, t_start, t_end, n_bins)
3546}
3547
3548#[pyfunction]
3549#[pyo3(signature = (spike_times_list, cost_per_s=1000.0))]
3550fn py_multi_neuron_victor_purpura(
3551 py: Python<'_>,
3552 spike_times_list: Vec<PyReadonlyArray1<'_, f64>>,
3553 cost_per_s: f64,
3554) -> Py<PyArray2<f64>> {
3555 let vecs: Vec<Vec<f64>> = spike_times_list
3556 .iter()
3557 .map(|t| t.as_slice().unwrap().to_vec())
3558 .collect();
3559 let refs: Vec<&[f64]> = vecs.iter().map(|v| v.as_slice()).collect();
3560 let mat = analysis::distance::multi_neuron_victor_purpura(&refs, cost_per_s);
3561 let rows: Vec<Vec<f64>> = mat.into_iter().collect();
3562 let n = rows.len();
3563 numpy::PyArray2::from_vec2(py, &rows)
3564 .unwrap_or_else(|_| numpy::PyArray2::zeros(py, [n, n], false))
3565 .into()
3566}
3567
3568#[pyfunction]
3569#[pyo3(signature = (spike_times_list, metric="spike_distance", t_start=0.0, t_end=1.0))]
3570fn py_spike_distance_matrix(
3571 py: Python<'_>,
3572 spike_times_list: Vec<PyReadonlyArray1<'_, f64>>,
3573 metric: &str,
3574 t_start: f64,
3575 t_end: f64,
3576) -> Py<PyArray2<f64>> {
3577 let vecs: Vec<Vec<f64>> = spike_times_list
3578 .iter()
3579 .map(|t| t.as_slice().unwrap().to_vec())
3580 .collect();
3581 let refs: Vec<&[f64]> = vecs.iter().map(|v| v.as_slice()).collect();
3582 let mat = analysis::distance::spike_distance_matrix(&refs, metric, t_start, t_end);
3583 let rows: Vec<Vec<f64>> = mat.into_iter().collect();
3584 let n = rows.len();
3585 numpy::PyArray2::from_vec2(py, &rows)
3586 .unwrap_or_else(|_| numpy::PyArray2::zeros(py, [n, n], false))
3587 .into()
3588}
3589
3590#[pyfunction]
3593#[pyo3(signature = (train_a, train_b, bin_size=10))]
3594fn py_mutual_information(
3595 train_a: PyReadonlyArray1<'_, i32>,
3596 train_b: PyReadonlyArray1<'_, i32>,
3597 bin_size: usize,
3598) -> f64 {
3599 let a = train_a.as_slice().unwrap();
3600 let b = train_b.as_slice().unwrap();
3601 analysis::information::mutual_information(a, b, bin_size)
3602}
3603
3604#[pyfunction]
3605#[pyo3(signature = (source, target, bin_size=10, lag=1))]
3606fn py_transfer_entropy(
3607 source: PyReadonlyArray1<'_, i32>,
3608 target: PyReadonlyArray1<'_, i32>,
3609 bin_size: usize,
3610 lag: usize,
3611) -> f64 {
3612 let s = source.as_slice().unwrap();
3613 let t = target.as_slice().unwrap();
3614 analysis::information::transfer_entropy(s, t, bin_size, lag)
3615}
3616
3617#[pyfunction]
3618#[pyo3(signature = (binary_train, bin_size=10, word_length=4))]
3619fn py_spike_train_entropy(
3620 binary_train: PyReadonlyArray1<'_, i32>,
3621 bin_size: usize,
3622 word_length: usize,
3623) -> f64 {
3624 let data = binary_train.as_slice().unwrap();
3625 analysis::information::spike_train_entropy(data, bin_size, word_length)
3626}
3627
3628#[pyfunction]
3629#[pyo3(signature = (binary_train, n_trials=10, bin_size=10, word_length=4))]
3630fn py_noise_entropy(
3631 binary_train: PyReadonlyArray1<'_, i32>,
3632 n_trials: usize,
3633 bin_size: usize,
3634 word_length: usize,
3635) -> f64 {
3636 let data = binary_train.as_slice().unwrap();
3637 analysis::information::noise_entropy(data, n_trials, bin_size, word_length)
3638}
3639
3640#[pyfunction]
3641fn py_stimulus_specific_information(
3642 spike_counts: PyReadonlyArray1<'_, f64>,
3643 stimulus_ids: PyReadonlyArray1<'_, i64>,
3644) -> f64 {
3645 let counts = spike_counts.as_slice().unwrap();
3646 let ids = stimulus_ids.as_slice().unwrap();
3647 analysis::information::stimulus_specific_information(counts, ids)
3648}
3649
3650#[pyfunction]
3651#[pyo3(signature = (x, y, k=3))]
3652fn py_kozachenko_leonenko_mi(
3653 x: PyReadonlyArray1<'_, f64>,
3654 y: PyReadonlyArray1<'_, f64>,
3655 k: usize,
3656) -> f64 {
3657 let xd = x.as_slice().unwrap();
3658 let yd = y.as_slice().unwrap();
3659 analysis::information::kozachenko_leonenko_mi(xd, yd, k)
3660}
3661
3662#[pyfunction]
3665#[pyo3(signature = (source, target, bin_size=10, order=5))]
3666fn py_pairwise_granger_causality(
3667 source: PyReadonlyArray1<'_, i32>,
3668 target: PyReadonlyArray1<'_, i32>,
3669 bin_size: usize,
3670 order: usize,
3671) -> f64 {
3672 let s = source.as_slice().unwrap();
3673 let t = target.as_slice().unwrap();
3674 analysis::causality::pairwise_granger_causality(s, t, bin_size, order)
3675}
3676
3677#[pyfunction]
3678#[pyo3(signature = (source, target, condition, bin_size=10, order=5))]
3679fn py_conditional_granger_causality(
3680 source: PyReadonlyArray1<'_, i32>,
3681 target: PyReadonlyArray1<'_, i32>,
3682 condition: PyReadonlyArray1<'_, i32>,
3683 bin_size: usize,
3684 order: usize,
3685) -> f64 {
3686 let s = source.as_slice().unwrap();
3687 let t = target.as_slice().unwrap();
3688 let c = condition.as_slice().unwrap();
3689 analysis::causality::conditional_granger_causality(s, t, c, bin_size, order)
3690}
3691
3692#[pyfunction]
3693#[pyo3(signature = (trains, bin_size=10, order=5, n_freqs=64))]
3694fn py_spectral_granger_causality(
3695 py: Python<'_>,
3696 trains: Vec<PyReadonlyArray1<'_, i32>>,
3697 bin_size: usize,
3698 order: usize,
3699 n_freqs: usize,
3700) -> (Py<PyArray1<f64>>, usize, usize) {
3701 let vecs: Vec<Vec<i32>> = trains
3702 .iter()
3703 .map(|t| t.as_slice().unwrap().to_vec())
3704 .collect();
3705 let refs: Vec<&[i32]> = vecs.iter().map(|v| v.as_slice()).collect();
3706 let (gc, d) = analysis::causality::spectral_granger_causality(&refs, bin_size, order, n_freqs);
3707 (gc.into_pyarray(py).into(), d, n_freqs)
3708}
3709
3710#[pyfunction]
3711#[pyo3(signature = (trains, bin_size=10, order=5, n_freqs=64))]
3712fn py_partial_directed_coherence(
3713 py: Python<'_>,
3714 trains: Vec<PyReadonlyArray1<'_, i32>>,
3715 bin_size: usize,
3716 order: usize,
3717 n_freqs: usize,
3718) -> (Py<PyArray1<f64>>, usize, usize) {
3719 let vecs: Vec<Vec<i32>> = trains
3720 .iter()
3721 .map(|t| t.as_slice().unwrap().to_vec())
3722 .collect();
3723 let refs: Vec<&[i32]> = vecs.iter().map(|v| v.as_slice()).collect();
3724 let (pdc, d) = analysis::causality::partial_directed_coherence(&refs, bin_size, order, n_freqs);
3725 (pdc.into_pyarray(py).into(), d, n_freqs)
3726}
3727
3728#[pyfunction]
3729#[pyo3(signature = (trains, bin_size=10, order=5, n_freqs=64))]
3730fn py_directed_transfer_function(
3731 py: Python<'_>,
3732 trains: Vec<PyReadonlyArray1<'_, i32>>,
3733 bin_size: usize,
3734 order: usize,
3735 n_freqs: usize,
3736) -> (Py<PyArray1<f64>>, usize, usize) {
3737 let vecs: Vec<Vec<i32>> = trains
3738 .iter()
3739 .map(|t| t.as_slice().unwrap().to_vec())
3740 .collect();
3741 let refs: Vec<&[i32]> = vecs.iter().map(|v| v.as_slice()).collect();
3742 let (dtf, d) = analysis::causality::directed_transfer_function(&refs, bin_size, order, n_freqs);
3743 (dtf.into_pyarray(py).into(), d, n_freqs)
3744}
3745
3746#[pyfunction]
3749#[pyo3(signature = (trains, preferred_directions, window=50))]
3750fn py_population_vector_decode(
3751 py: Python<'_>,
3752 trains: Vec<PyReadonlyArray1<'_, i32>>,
3753 preferred_directions: PyReadonlyArray1<'_, f64>,
3754 window: usize,
3755) -> Py<PyArray1<f64>> {
3756 let vecs: Vec<Vec<i32>> = trains
3757 .iter()
3758 .map(|t| t.as_slice().unwrap().to_vec())
3759 .collect();
3760 let refs: Vec<&[i32]> = vecs.iter().map(|v| v.as_slice()).collect();
3761 let dirs = preferred_directions.as_slice().unwrap();
3762 analysis::decoding::population_vector_decode(&refs, dirs, window)
3763 .into_pyarray(py)
3764 .into()
3765}
3766
3767#[pyfunction]
3768#[pyo3(signature = (spike_counts, tuning_rates, n_stimuli, n_neurons, prior=None))]
3769fn py_bayesian_decode(
3770 spike_counts: PyReadonlyArray1<'_, f64>,
3771 tuning_rates: PyReadonlyArray1<'_, f64>,
3772 n_stimuli: usize,
3773 n_neurons: usize,
3774 prior: Option<PyReadonlyArray1<'_, f64>>,
3775) -> usize {
3776 let counts = spike_counts.as_slice().unwrap();
3777 let rates = tuning_rates.as_slice().unwrap();
3778 let p: Vec<f64> = prior
3779 .map(|p| p.as_slice().unwrap().to_vec())
3780 .unwrap_or_default();
3781 analysis::decoding::bayesian_decode(counts, rates, n_stimuli, n_neurons, &p)
3782}
3783
3784#[pyfunction]
3785fn py_maximum_likelihood_decode(
3786 spike_counts: PyReadonlyArray1<'_, f64>,
3787 tuning_rates: PyReadonlyArray1<'_, f64>,
3788 n_stimuli: usize,
3789 n_neurons: usize,
3790) -> usize {
3791 let counts = spike_counts.as_slice().unwrap();
3792 let rates = tuning_rates.as_slice().unwrap();
3793 analysis::decoding::maximum_likelihood_decode(counts, rates, n_stimuli, n_neurons)
3794}
3795
3796#[pyfunction]
3797fn py_linear_discriminant_decode(
3798 train_data: PyReadonlyArray1<'_, f64>,
3799 n_samples: usize,
3800 n_features: usize,
3801 labels: PyReadonlyArray1<'_, i64>,
3802 test_point: PyReadonlyArray1<'_, f64>,
3803) -> i64 {
3804 let data = train_data.as_slice().unwrap();
3805 let lbl = labels.as_slice().unwrap();
3806 let tp = test_point.as_slice().unwrap();
3807 analysis::decoding::linear_discriminant_decode(data, n_samples, n_features, lbl, tp)
3808}
3809
3810#[pyfunction]
3811fn py_naive_bayes_decode(
3812 train_data: PyReadonlyArray1<'_, f64>,
3813 n_samples: usize,
3814 n_features: usize,
3815 labels: PyReadonlyArray1<'_, i64>,
3816 test_point: PyReadonlyArray1<'_, f64>,
3817) -> i64 {
3818 let data = train_data.as_slice().unwrap();
3819 let lbl = labels.as_slice().unwrap();
3820 let tp = test_point.as_slice().unwrap();
3821 analysis::decoding::naive_bayes_decode(data, n_samples, n_features, lbl, tp)
3822}
3823
3824#[pyfunction]
3827#[pyo3(signature = (trains, dt=1.0))]
3828fn py_tokenise_spikes(
3829 py: Python<'_>,
3830 trains: Vec<PyReadonlyArray1<'_, i32>>,
3831 dt: f64,
3832) -> (Py<PyArray1<i64>>, Py<PyArray1<f64>>) {
3833 let vecs: Vec<Vec<i32>> = trains
3834 .iter()
3835 .map(|t| t.as_slice().unwrap().to_vec())
3836 .collect();
3837 let refs: Vec<&[i32]> = vecs.iter().map(|v| v.as_slice()).collect();
3838 let tokens = analysis::neural_decoders::tokenise_spikes(&refs, dt);
3839 let uids: Vec<i64> = tokens.iter().map(|t| t.0 as i64).collect();
3840 let times: Vec<f64> = tokens.iter().map(|t| t.1).collect();
3841 (uids.into_pyarray(py).into(), times.into_pyarray(py).into())
3842}
3843
3844#[pyfunction]
3845fn py_sinusoidal_position_encode(
3846 py: Python<'_>,
3847 timestamps: PyReadonlyArray1<'_, f64>,
3848 d_model: usize,
3849) -> Py<PyArray1<f64>> {
3850 let ts = timestamps.as_slice().unwrap();
3851 analysis::neural_decoders::sinusoidal_position_encode(ts, d_model)
3852 .into_pyarray(py)
3853 .into()
3854}
3855
3856#[pyfunction]
3857fn py_scaled_dot_product_attention(
3858 py: Python<'_>,
3859 queries: PyReadonlyArray1<'_, f64>,
3860 keys: PyReadonlyArray1<'_, f64>,
3861 values: PyReadonlyArray1<'_, f64>,
3862 nq: usize,
3863 nk: usize,
3864 d: usize,
3865) -> Py<PyArray1<f64>> {
3866 let q = queries.as_slice().unwrap();
3867 let k = keys.as_slice().unwrap();
3868 let v = values.as_slice().unwrap();
3869 analysis::neural_decoders::scaled_dot_product_attention(q, k, v, nq, nk, d)
3870 .into_pyarray(py)
3871 .into()
3872}
3873
3874#[pyfunction]
3875fn py_gaussian_attention(
3876 py: Python<'_>,
3877 queries: PyReadonlyArray1<'_, f64>,
3878 keys: PyReadonlyArray1<'_, f64>,
3879 values: PyReadonlyArray1<'_, f64>,
3880 nq: usize,
3881 nk: usize,
3882 d: usize,
3883 sigma: f64,
3884) -> Py<PyArray1<f64>> {
3885 let q = queries.as_slice().unwrap();
3886 let k = keys.as_slice().unwrap();
3887 let v = values.as_slice().unwrap();
3888 analysis::neural_decoders::gaussian_attention(q, k, v, nq, nk, d, sigma)
3889 .into_pyarray(py)
3890 .into()
3891}
3892
3893#[pyfunction]
3894fn py_infonce_loss(
3895 anchors: PyReadonlyArray1<'_, f64>,
3896 positives: PyReadonlyArray1<'_, f64>,
3897 n: usize,
3898 d: usize,
3899 temperature: f64,
3900) -> f64 {
3901 let a = anchors.as_slice().unwrap();
3902 let p = positives.as_slice().unwrap();
3903 analysis::neural_decoders::infonce_loss(a, p, n, d, temperature)
3904}
3905
3906#[pyfunction]
3909#[pyo3(signature = (trains, max_lag_ms=20.0, dt=0.001))]
3910fn py_functional_connectivity(
3911 py: Python<'_>,
3912 trains: Vec<PyReadonlyArray1<'_, i32>>,
3913 max_lag_ms: f64,
3914 dt: f64,
3915) -> Py<PyArray2<f64>> {
3916 let vecs: Vec<Vec<i32>> = trains
3917 .iter()
3918 .map(|t| t.as_slice().unwrap().to_vec())
3919 .collect();
3920 let refs: Vec<&[i32]> = vecs.iter().map(|v| v.as_slice()).collect();
3921 let mat = analysis::network::functional_connectivity(&refs, max_lag_ms, dt);
3922 let n = refs.len();
3923 let rows: Vec<Vec<f64>> = mat.chunks(n).map(|c| c.to_vec()).collect();
3924 numpy::PyArray2::from_vec2(py, &rows)
3925 .unwrap_or_else(|_| numpy::PyArray2::zeros(py, [n, n], false))
3926 .into()
3927}
3928
3929#[pyfunction]
3930#[pyo3(signature = (trains, bin_size=5, alpha=0.05))]
3931fn py_unitary_events(
3932 py: Python<'_>,
3933 trains: Vec<PyReadonlyArray1<'_, i32>>,
3934 bin_size: usize,
3935 alpha: f64,
3936) -> Py<PyArray1<i64>> {
3937 let vecs: Vec<Vec<i32>> = trains
3938 .iter()
3939 .map(|t| t.as_slice().unwrap().to_vec())
3940 .collect();
3941 let refs: Vec<&[i32]> = vecs.iter().map(|v| v.as_slice()).collect();
3942 let result = analysis::network::unitary_events(&refs, bin_size, alpha);
3943 let as_i64: Vec<i64> = result.into_iter().map(|v| v as i64).collect();
3944 as_i64.into_pyarray(py).into()
3945}
3946
3947#[pyfunction]
3948#[pyo3(signature = (trains, bin_size=5, threshold=2.0))]
3949fn py_cell_assembly_detection(
3950 trains: Vec<PyReadonlyArray1<'_, i32>>,
3951 bin_size: usize,
3952 threshold: f64,
3953) -> Vec<Vec<usize>> {
3954 let vecs: Vec<Vec<i32>> = trains
3955 .iter()
3956 .map(|t| t.as_slice().unwrap().to_vec())
3957 .collect();
3958 let refs: Vec<&[i32]> = vecs.iter().map(|v| v.as_slice()).collect();
3959 analysis::network::cell_assembly_detection(&refs, bin_size, threshold)
3960}
3961
3962#[pyfunction]
3963#[pyo3(signature = (trains, dt=0.001, max_delay_ms=20.0, min_chain_length=3))]
3964fn py_synfire_chain_detection(
3965 trains: Vec<PyReadonlyArray1<'_, i32>>,
3966 dt: f64,
3967 max_delay_ms: f64,
3968 min_chain_length: usize,
3969) -> Vec<Vec<usize>> {
3970 let vecs: Vec<Vec<i32>> = trains
3971 .iter()
3972 .map(|t| t.as_slice().unwrap().to_vec())
3973 .collect();
3974 let refs: Vec<&[i32]> = vecs.iter().map(|v| v.as_slice()).collect();
3975 analysis::network::synfire_chain_detection(&refs, dt, max_delay_ms, min_chain_length)
3976}
3977
3978#[pyfunction]
3981#[pyo3(signature = (binary_train, seed=0))]
3982fn py_surrogate_isi_shuffle(
3983 py: Python<'_>,
3984 binary_train: PyReadonlyArray1<'_, i32>,
3985 seed: u64,
3986) -> Py<PyArray1<i32>> {
3987 let data = binary_train.as_slice().unwrap();
3988 analysis::surrogates::surrogate_isi_shuffle(data, seed)
3989 .into_pyarray(py)
3990 .into()
3991}
3992
3993#[pyfunction]
3994#[pyo3(signature = (binary_train, dither_ms=5.0, dt=0.001, seed=0))]
3995fn py_surrogate_dither(
3996 py: Python<'_>,
3997 binary_train: PyReadonlyArray1<'_, i32>,
3998 dither_ms: f64,
3999 dt: f64,
4000 seed: u64,
4001) -> Py<PyArray1<i32>> {
4002 let data = binary_train.as_slice().unwrap();
4003 analysis::surrogates::surrogate_dither(data, dither_ms, dt, seed)
4004 .into_pyarray(py)
4005 .into()
4006}
4007
4008#[pyfunction]
4009#[pyo3(signature = (rate_hz, duration_s, dt=0.001, seed=0))]
4010fn py_homogeneous_poisson(
4011 py: Python<'_>,
4012 rate_hz: f64,
4013 duration_s: f64,
4014 dt: f64,
4015 seed: u64,
4016) -> Py<PyArray1<f64>> {
4017 analysis::surrogates::homogeneous_poisson(rate_hz, duration_s, dt, seed)
4018 .into_pyarray(py)
4019 .into()
4020}
4021
4022#[pyfunction]
4023#[pyo3(signature = (rate_hz, shape, duration_s, dt=0.001, seed=0))]
4024fn py_gamma_process(
4025 py: Python<'_>,
4026 rate_hz: f64,
4027 shape: f64,
4028 duration_s: f64,
4029 dt: f64,
4030 seed: u64,
4031) -> Py<PyArray1<f64>> {
4032 analysis::surrogates::gamma_process(rate_hz, shape, duration_s, dt, seed)
4033 .into_pyarray(py)
4034 .into()
4035}
4036
4037#[pyfunction]
4038#[pyo3(signature = (rate_hz, burst_mean, duration_s, dt=0.001, seed=0))]
4039fn py_compound_poisson_process(
4040 py: Python<'_>,
4041 rate_hz: f64,
4042 burst_mean: f64,
4043 duration_s: f64,
4044 dt: f64,
4045 seed: u64,
4046) -> Py<PyArray1<f64>> {
4047 analysis::surrogates::compound_poisson_process(rate_hz, burst_mean, duration_s, dt, seed)
4048 .into_pyarray(py)
4049 .into()
4050}
4051
4052#[pyfunction]
4053#[pyo3(signature = (binary_train, seed=0))]
4054fn py_surrogate_joint_isi(
4055 py: Python<'_>,
4056 binary_train: PyReadonlyArray1<'_, i32>,
4057 seed: u64,
4058) -> Py<PyArray1<i32>> {
4059 let data = binary_train.as_slice().unwrap();
4060 analysis::surrogates::surrogate_joint_isi(data, seed)
4061 .into_pyarray(py)
4062 .into()
4063}
4064
4065#[pyfunction]
4066#[pyo3(signature = (binary_train, bin_size=10, seed=0))]
4067fn py_surrogate_bin_shuffling(
4068 py: Python<'_>,
4069 binary_train: PyReadonlyArray1<'_, i32>,
4070 bin_size: usize,
4071 seed: u64,
4072) -> Py<PyArray1<i32>> {
4073 let data = binary_train.as_slice().unwrap();
4074 analysis::surrogates::surrogate_bin_shuffling(data, bin_size, seed)
4075 .into_pyarray(py)
4076 .into()
4077}
4078
4079#[pyfunction]
4080#[pyo3(signature = (binary_train, max_shift=50, seed=0))]
4081fn py_surrogate_spike_train_shifting(
4082 py: Python<'_>,
4083 binary_train: PyReadonlyArray1<'_, i32>,
4084 max_shift: usize,
4085 seed: u64,
4086) -> Py<PyArray1<i32>> {
4087 let data = binary_train.as_slice().unwrap();
4088 analysis::surrogates::surrogate_spike_train_shifting(data, max_shift, seed)
4089 .into_pyarray(py)
4090 .into()
4091}
4092
4093#[pyfunction]
4096#[pyo3(signature = (binary_train, dt=0.001, max_isi_ms=10.0, min_spikes=3))]
4097fn py_burst_detection(
4098 binary_train: PyReadonlyArray1<'_, i32>,
4099 dt: f64,
4100 max_isi_ms: f64,
4101 min_spikes: usize,
4102) -> Vec<(f64, f64, usize)> {
4103 let data = binary_train.as_slice().unwrap();
4104 analysis::temporal::burst_detection(data, dt, max_isi_ms, min_spikes)
4105}
4106
4107#[pyfunction]
4108#[pyo3(signature = (binary_train, dt=0.001))]
4109fn py_first_spike_latency(binary_train: PyReadonlyArray1<'_, i32>, dt: f64) -> f64 {
4110 let data = binary_train.as_slice().unwrap();
4111 analysis::temporal::first_spike_latency(data, dt)
4112}
4113
4114#[pyfunction]
4115#[pyo3(signature = (binary_train, baseline_steps=100, dt=0.001, threshold_sigma=3.0))]
4116fn py_response_onset(
4117 binary_train: PyReadonlyArray1<'_, i32>,
4118 baseline_steps: usize,
4119 dt: f64,
4120 threshold_sigma: f64,
4121) -> f64 {
4122 let data = binary_train.as_slice().unwrap();
4123 analysis::temporal::response_onset(data, baseline_steps, dt, threshold_sigma)
4124}
4125
4126#[pyfunction]
4127#[pyo3(signature = (binary_train, bin_size=50, threshold=3.0))]
4128fn py_change_point_detection(
4129 py: Python<'_>,
4130 binary_train: PyReadonlyArray1<'_, i32>,
4131 bin_size: usize,
4132 threshold: f64,
4133) -> Py<PyArray1<i64>> {
4134 let data = binary_train.as_slice().unwrap();
4135 let cps = analysis::temporal::change_point_detection(data, bin_size, threshold);
4136 let as_i64: Vec<i64> = cps.into_iter().map(|v| v as i64).collect();
4137 as_i64.into_pyarray(py).into()
4138}
4139
4140#[pyfunction]
4143#[pyo3(signature = (times_a, times_b, t_start=0.0, t_end=1.0))]
4144fn py_spike_directionality(
4145 times_a: PyReadonlyArray1<'_, f64>,
4146 times_b: PyReadonlyArray1<'_, f64>,
4147 t_start: f64,
4148 t_end: f64,
4149) -> f64 {
4150 let a = times_a.as_slice().unwrap();
4151 let b = times_b.as_slice().unwrap();
4152 analysis::patterns::spike_directionality(a, b, t_start, t_end)
4153}
4154
4155#[pyfunction]
4156#[pyo3(signature = (times_list, t_start=0.0, t_end=1.0))]
4157fn py_spike_train_order(
4158 py: Python<'_>,
4159 times_list: Vec<PyReadonlyArray1<'_, f64>>,
4160 t_start: f64,
4161 t_end: f64,
4162) -> Py<PyArray2<f64>> {
4163 let vecs: Vec<Vec<f64>> = times_list
4164 .iter()
4165 .map(|t| t.as_slice().unwrap().to_vec())
4166 .collect();
4167 let refs: Vec<&[f64]> = vecs.iter().map(|v| v.as_slice()).collect();
4168 let mat = analysis::patterns::spike_train_order(&refs, t_start, t_end);
4169 let n = refs.len();
4170 let rows: Vec<Vec<f64>> = mat.chunks(n).map(|c| c.to_vec()).collect();
4171 numpy::PyArray2::from_vec2(py, &rows)
4172 .unwrap_or_else(|_| numpy::PyArray2::zeros(py, [n, n], false))
4173 .into()
4174}
4175
4176#[pyfunction]
4177#[pyo3(signature = (binary_train, dt=0.001, max_lag=20))]
4178fn py_cubic_higher_order(
4179 py: Python<'_>,
4180 binary_train: PyReadonlyArray1<'_, i32>,
4181 dt: f64,
4182 max_lag: usize,
4183) -> Py<PyArray2<f64>> {
4184 let data = binary_train.as_slice().unwrap();
4185 let c3 = analysis::patterns::cubic_higher_order(data, dt, max_lag);
4186 let rows: Vec<Vec<f64>> = c3.chunks(max_lag).map(|c| c.to_vec()).collect();
4187 numpy::PyArray2::from_vec2(py, &rows)
4188 .unwrap_or_else(|_| numpy::PyArray2::zeros(py, [max_lag, max_lag], false))
4189 .into()
4190}
4191
4192#[pyfunction]
4195#[pyo3(signature = (binary_train, dt=0.001))]
4196fn py_power_spectrum(
4197 py: Python<'_>,
4198 binary_train: PyReadonlyArray1<'_, i32>,
4199 dt: f64,
4200) -> (Py<PyArray1<f64>>, Py<PyArray1<f64>>) {
4201 let data = binary_train.as_slice().unwrap();
4202 let (psd, freqs) = analysis::spectral::power_spectrum(data, dt);
4203 (psd.into_pyarray(py).into(), freqs.into_pyarray(py).into())
4204}
4205
4206#[pyfunction]
4209#[pyo3(signature = (waveform, dt=3.3333333333333335e-05))]
4210fn py_waveform_width(waveform: PyReadonlyArray1<'_, f64>, dt: f64) -> f64 {
4211 analysis::waveform::waveform_width(waveform.as_slice().unwrap(), dt)
4212}
4213
4214#[pyfunction]
4215fn py_waveform_amplitude(waveform: PyReadonlyArray1<'_, f64>) -> f64 {
4216 analysis::waveform::waveform_amplitude(waveform.as_slice().unwrap())
4217}
4218
4219#[pyfunction]
4220#[pyo3(signature = (waveform, dt=3.3333333333333335e-05))]
4221fn py_waveform_repolarization_slope(waveform: PyReadonlyArray1<'_, f64>, dt: f64) -> f64 {
4222 analysis::waveform::waveform_repolarization_slope(waveform.as_slice().unwrap(), dt)
4223}
4224
4225#[pyfunction]
4226#[pyo3(signature = (waveform, dt=3.3333333333333335e-05))]
4227fn py_waveform_recovery_slope(waveform: PyReadonlyArray1<'_, f64>, dt: f64) -> f64 {
4228 analysis::waveform::waveform_recovery_slope(waveform.as_slice().unwrap(), dt)
4229}
4230
4231#[pyfunction]
4232#[pyo3(signature = (waveform, dt=3.3333333333333335e-05))]
4233fn py_waveform_halfwidth(waveform: PyReadonlyArray1<'_, f64>, dt: f64) -> f64 {
4234 analysis::waveform::waveform_halfwidth(waveform.as_slice().unwrap(), dt)
4235}
4236
4237#[pyfunction]
4238fn py_waveform_pt_ratio(waveform: PyReadonlyArray1<'_, f64>) -> f64 {
4239 analysis::waveform::waveform_pt_ratio(waveform.as_slice().unwrap())
4240}
4241
4242#[pyfunction]
4245#[pyo3(signature = (binary_train, dt=0.001, window_ms=50.0))]
4246fn py_conditional_intensity(
4247 py: Python<'_>,
4248 binary_train: PyReadonlyArray1<'_, i32>,
4249 dt: f64,
4250 window_ms: f64,
4251) -> Py<PyArray1<f64>> {
4252 let data = binary_train.as_slice().unwrap();
4253 analysis::point_process::conditional_intensity(data, dt, window_ms)
4254 .into_pyarray(py)
4255 .into()
4256}
4257
4258#[pyfunction]
4259#[pyo3(signature = (binary_train, dt=0.001, bins=30))]
4260fn py_isi_hazard_function(
4261 py: Python<'_>,
4262 binary_train: PyReadonlyArray1<'_, i32>,
4263 dt: f64,
4264 bins: usize,
4265) -> (Py<PyArray1<f64>>, Py<PyArray1<f64>>) {
4266 let data = binary_train.as_slice().unwrap();
4267 let (hazard, centres) = analysis::point_process::isi_hazard_function(data, dt, bins);
4268 (
4269 hazard.into_pyarray(py).into(),
4270 centres.into_pyarray(py).into(),
4271 )
4272}
4273
4274#[pyfunction]
4275#[pyo3(signature = (binary_train, dt=0.001, bins=30))]
4276fn py_isi_survivor_function(
4277 py: Python<'_>,
4278 binary_train: PyReadonlyArray1<'_, i32>,
4279 dt: f64,
4280 bins: usize,
4281) -> (Py<PyArray1<f64>>, Py<PyArray1<f64>>) {
4282 let data = binary_train.as_slice().unwrap();
4283 let (surv, centres) = analysis::point_process::isi_survivor_function(data, dt, bins);
4284 (
4285 surv.into_pyarray(py).into(),
4286 centres.into_pyarray(py).into(),
4287 )
4288}
4289
4290#[pyfunction]
4291#[pyo3(signature = (binary_train, dt=0.001, bins=30))]
4292fn py_renewal_density(
4293 py: Python<'_>,
4294 binary_train: PyReadonlyArray1<'_, i32>,
4295 dt: f64,
4296 bins: usize,
4297) -> (Py<PyArray1<f64>>, Py<PyArray1<f64>>) {
4298 let data = binary_train.as_slice().unwrap();
4299 let (dens, centres) = analysis::point_process::renewal_density(data, dt, bins);
4300 (
4301 dens.into_pyarray(py).into(),
4302 centres.into_pyarray(py).into(),
4303 )
4304}
4305
4306#[pyfunction]
4309#[pyo3(signature = (stimulus, binary_train, window_steps=50))]
4310fn py_spike_triggered_average(
4311 py: Python<'_>,
4312 stimulus: PyReadonlyArray1<'_, f64>,
4313 binary_train: PyReadonlyArray1<'_, i32>,
4314 window_steps: usize,
4315) -> Py<PyArray1<f64>> {
4316 analysis::stimulus::spike_triggered_average(
4317 stimulus.as_slice().unwrap(),
4318 binary_train.as_slice().unwrap(),
4319 window_steps,
4320 )
4321 .into_pyarray(py)
4322 .into()
4323}
4324
4325#[pyfunction]
4326#[pyo3(signature = (stimulus, binary_train, window_steps=50))]
4327fn py_spike_triggered_covariance(
4328 py: Python<'_>,
4329 stimulus: PyReadonlyArray1<'_, f64>,
4330 binary_train: PyReadonlyArray1<'_, i32>,
4331 window_steps: usize,
4332) -> Py<PyArray2<f64>> {
4333 let cov = analysis::stimulus::spike_triggered_covariance(
4334 stimulus.as_slice().unwrap(),
4335 binary_train.as_slice().unwrap(),
4336 window_steps,
4337 );
4338 let rows: Vec<Vec<f64>> = cov.chunks(window_steps).map(|c| c.to_vec()).collect();
4339 numpy::PyArray2::from_vec2(py, &rows)
4340 .unwrap_or_else(|_| numpy::PyArray2::zeros(py, [window_steps, window_steps], false))
4341 .into()
4342}
4343
4344#[pyfunction]
4345#[pyo3(signature = (binary_train, positions, n_bins=20, dt=0.001))]
4346fn py_spatial_information(
4347 binary_train: PyReadonlyArray1<'_, i32>,
4348 positions: PyReadonlyArray1<'_, f64>,
4349 n_bins: usize,
4350 dt: f64,
4351) -> f64 {
4352 analysis::stimulus::spatial_information(
4353 binary_train.as_slice().unwrap(),
4354 positions.as_slice().unwrap(),
4355 n_bins,
4356 dt,
4357 )
4358}
4359
4360#[pyfunction]
4361#[pyo3(signature = (binary_train, positions, n_bins=50, threshold_std=2.0, dt=0.001))]
4362fn py_place_field_detection(
4363 binary_train: PyReadonlyArray1<'_, i32>,
4364 positions: PyReadonlyArray1<'_, f64>,
4365 n_bins: usize,
4366 threshold_std: f64,
4367 dt: f64,
4368) -> Vec<(f64, f64)> {
4369 analysis::stimulus::place_field_detection(
4370 binary_train.as_slice().unwrap(),
4371 positions.as_slice().unwrap(),
4372 n_bins,
4373 threshold_std,
4374 dt,
4375 )
4376}
4377
4378#[pyfunction]
4379#[pyo3(signature = (binary_train, stimulus_values, n_bins=20, dt=0.001))]
4380fn py_tuning_curve(
4381 py: Python<'_>,
4382 binary_train: PyReadonlyArray1<'_, i32>,
4383 stimulus_values: PyReadonlyArray1<'_, f64>,
4384 n_bins: usize,
4385 dt: f64,
4386) -> (Py<PyArray1<f64>>, Py<PyArray1<f64>>) {
4387 let (rates, centres) = analysis::stimulus::tuning_curve(
4388 binary_train.as_slice().unwrap(),
4389 stimulus_values.as_slice().unwrap(),
4390 n_bins,
4391 dt,
4392 );
4393 (
4394 rates.into_pyarray(py).into(),
4395 centres.into_pyarray(py).into(),
4396 )
4397}
4398
4399#[pyfunction]
4402fn py_phase_locking_value(
4403 binary_train: PyReadonlyArray1<'_, i32>,
4404 lfp_signal: PyReadonlyArray1<'_, f64>,
4405) -> f64 {
4406 analysis::lfp::phase_locking_value(
4407 binary_train.as_slice().unwrap(),
4408 lfp_signal.as_slice().unwrap(),
4409 )
4410}
4411
4412#[pyfunction]
4413#[pyo3(signature = (binary_train, lfp_signal, dt=0.001))]
4414fn py_spike_field_coherence(
4415 py: Python<'_>,
4416 binary_train: PyReadonlyArray1<'_, i32>,
4417 lfp_signal: PyReadonlyArray1<'_, f64>,
4418 dt: f64,
4419) -> (Py<PyArray1<f64>>, Py<PyArray1<f64>>) {
4420 let (sfc, freqs) = analysis::lfp::spike_field_coherence(
4421 binary_train.as_slice().unwrap(),
4422 lfp_signal.as_slice().unwrap(),
4423 dt,
4424 );
4425 (sfc.into_pyarray(py).into(), freqs.into_pyarray(py).into())
4426}
4427
4428#[pyfunction]
4429#[pyo3(signature = (binary_train, lfp_signal, n_bins=36))]
4430fn py_spike_phase_histogram(
4431 py: Python<'_>,
4432 binary_train: PyReadonlyArray1<'_, i32>,
4433 lfp_signal: PyReadonlyArray1<'_, f64>,
4434 n_bins: usize,
4435) -> (Py<PyArray1<i64>>, Py<PyArray1<f64>>) {
4436 let (hist, centres) = analysis::lfp::spike_phase_histogram(
4437 binary_train.as_slice().unwrap(),
4438 lfp_signal.as_slice().unwrap(),
4439 n_bins,
4440 );
4441 (
4442 hist.into_pyarray(py).into(),
4443 centres.into_pyarray(py).into(),
4444 )
4445}
4446
4447#[pyfunction]
4450fn py_isolation_distance(
4451 cluster: PyReadonlyArray2<'_, f64>,
4452 noise: PyReadonlyArray2<'_, f64>,
4453) -> f64 {
4454 let c_shape = cluster.shape();
4455 let n_shape = noise.shape();
4456 let d = c_shape[1];
4457 let c_data: Vec<f64> = cluster.as_slice().unwrap().to_vec();
4458 let n_data: Vec<f64> = noise.as_slice().unwrap().to_vec();
4459 analysis::sorting_quality::isolation_distance(&c_data, c_shape[0], &n_data, n_shape[0], d)
4460}
4461
4462#[pyfunction]
4463fn py_l_ratio(cluster: PyReadonlyArray2<'_, f64>, noise: PyReadonlyArray2<'_, f64>) -> f64 {
4464 let c_shape = cluster.shape();
4465 let n_shape = noise.shape();
4466 let d = c_shape[1];
4467 let c_data: Vec<f64> = cluster.as_slice().unwrap().to_vec();
4468 let n_data: Vec<f64> = noise.as_slice().unwrap().to_vec();
4469 analysis::sorting_quality::l_ratio(&c_data, c_shape[0], &n_data, n_shape[0], d)
4470}
4471
4472#[pyfunction]
4473fn py_silhouette_score(
4474 features: PyReadonlyArray2<'_, f64>,
4475 labels: PyReadonlyArray1<'_, i64>,
4476) -> f64 {
4477 let shape = features.shape();
4478 let f_data: Vec<f64> = features.as_slice().unwrap().to_vec();
4479 let l_data: Vec<i64> = labels.as_slice().unwrap().to_vec();
4480 analysis::sorting_quality::silhouette_score(&f_data, shape[0], shape[1], &l_data)
4481}
4482
4483#[pyfunction]
4484fn py_d_prime(cluster_a: PyReadonlyArray2<'_, f64>, cluster_b: PyReadonlyArray2<'_, f64>) -> f64 {
4485 let a_shape = cluster_a.shape();
4486 let b_shape = cluster_b.shape();
4487 let d = a_shape[1];
4488 let a_data: Vec<f64> = cluster_a.as_slice().unwrap().to_vec();
4489 let b_data: Vec<f64> = cluster_b.as_slice().unwrap().to_vec();
4490 analysis::sorting_quality::d_prime(&a_data, a_shape[0], &b_data, b_shape[0], d)
4491}
4492
4493#[pyfunction]
4494#[pyo3(signature = (binary_train, dt=0.001, refractory_ms=1.5))]
4495fn py_isi_violation_rate(
4496 binary_train: PyReadonlyArray1<'_, i32>,
4497 dt: f64,
4498 refractory_ms: f64,
4499) -> f64 {
4500 analysis::sorting_quality::isi_violation_rate(
4501 binary_train.as_slice().unwrap(),
4502 dt,
4503 refractory_ms,
4504 )
4505}
4506
4507#[pyfunction]
4508#[pyo3(signature = (binary_train, n_bins=100))]
4509fn py_presence_ratio(binary_train: PyReadonlyArray1<'_, i32>, n_bins: usize) -> f64 {
4510 analysis::sorting_quality::presence_ratio(binary_train.as_slice().unwrap(), n_bins)
4511}
4512
4513#[pyfunction]
4514#[pyo3(signature = (amplitudes, bins=100))]
4515fn py_amplitude_cutoff(amplitudes: PyReadonlyArray1<'_, f64>, bins: usize) -> f64 {
4516 analysis::sorting_quality::amplitude_cutoff(amplitudes.as_slice().unwrap(), bins)
4517}
4518
4519#[pyfunction]
4520fn py_snr(waveforms: PyReadonlyArray2<'_, f64>) -> f64 {
4521 let shape = waveforms.shape();
4522 let data: Vec<f64> = waveforms.as_slice().unwrap().to_vec();
4523 analysis::sorting_quality::snr(&data, shape[0], shape[1])
4524}
4525
4526#[pyfunction]
4527#[pyo3(signature = (cluster, noise, k=4))]
4528fn py_nn_hit_rate(
4529 cluster: PyReadonlyArray2<'_, f64>,
4530 noise: PyReadonlyArray2<'_, f64>,
4531 k: usize,
4532) -> f64 {
4533 let c_shape = cluster.shape();
4534 let n_shape = noise.shape();
4535 let d = c_shape[1];
4536 let c_data: Vec<f64> = cluster.as_slice().unwrap().to_vec();
4537 let n_data: Vec<f64> = noise.as_slice().unwrap().to_vec();
4538 analysis::sorting_quality::nn_hit_rate(&c_data, c_shape[0], &n_data, n_shape[0], d, k)
4539}
4540
4541#[pyfunction]
4542#[pyo3(signature = (waveforms, timestamps, n_bins=10))]
4543fn py_drift_metric(
4544 waveforms: PyReadonlyArray2<'_, f64>,
4545 timestamps: PyReadonlyArray1<'_, f64>,
4546 n_bins: usize,
4547) -> f64 {
4548 let shape = waveforms.shape();
4549 let data: Vec<f64> = waveforms.as_slice().unwrap().to_vec();
4550 let ts: Vec<f64> = timestamps.as_slice().unwrap().to_vec();
4551 analysis::sorting_quality::drift_metric(&data, shape[0], shape[1], &ts, n_bins)
4552}
4553
4554#[pyfunction]
4557#[pyo3(signature = (trains, n_components=3, bin_size=10))]
4558fn py_spike_train_pca(
4559 py: Python<'_>,
4560 trains: Vec<PyReadonlyArray1<'_, i32>>,
4561 n_components: usize,
4562 bin_size: usize,
4563) -> (Py<PyArray1<f64>>, Py<PyArray1<f64>>) {
4564 let vecs: Vec<Vec<i32>> = trains
4565 .iter()
4566 .map(|t| t.as_slice().unwrap().to_vec())
4567 .collect();
4568 let refs: Vec<&[i32]> = vecs.iter().map(|v| v.as_slice()).collect();
4569 let (proj, expl) = analysis::dimensionality::spike_train_pca(&refs, n_components, bin_size);
4570 (proj.into_pyarray(py).into(), expl.into_pyarray(py).into())
4571}
4572
4573#[pyfunction]
4574#[pyo3(signature = (conditions, n_components=3, bin_size=10))]
4575fn py_demixed_pca(
4576 py: Python<'_>,
4577 conditions: Vec<Vec<PyReadonlyArray1<'_, i32>>>,
4578 n_components: usize,
4579 bin_size: usize,
4580) -> (Py<PyArray1<f64>>, Py<PyArray1<f64>>) {
4581 let vecs: Vec<Vec<Vec<i32>>> = conditions
4582 .iter()
4583 .map(|cond| {
4584 cond.iter()
4585 .map(|t| t.as_slice().unwrap().to_vec())
4586 .collect()
4587 })
4588 .collect();
4589 let refs: Vec<Vec<&[i32]>> = vecs
4590 .iter()
4591 .map(|cond| cond.iter().map(|v| v.as_slice()).collect())
4592 .collect();
4593 let (proj, expl) = analysis::dimensionality::demixed_pca(&refs, n_components, bin_size);
4594 (proj.into_pyarray(py).into(), expl.into_pyarray(py).into())
4595}
4596
4597#[pyfunction]
4598#[pyo3(signature = (trains, n_factors=3, bin_size=10, n_iter=50))]
4599fn py_factor_analysis(
4600 py: Python<'_>,
4601 trains: Vec<PyReadonlyArray1<'_, i32>>,
4602 n_factors: usize,
4603 bin_size: usize,
4604 n_iter: usize,
4605) -> (Py<PyArray1<f64>>, Py<PyArray1<f64>>) {
4606 let vecs: Vec<Vec<i32>> = trains
4607 .iter()
4608 .map(|t| t.as_slice().unwrap().to_vec())
4609 .collect();
4610 let refs: Vec<&[i32]> = vecs.iter().map(|v| v.as_slice()).collect();
4611 let (loadings, psi) =
4612 analysis::dimensionality::factor_analysis(&refs, n_factors, bin_size, n_iter);
4613 (
4614 loadings.into_pyarray(py).into(),
4615 psi.into_pyarray(py).into(),
4616 )
4617}
4618
4619#[pyfunction]
4622#[pyo3(signature = (trains, n_latents=3, bin_ms=20.0, dt=0.001, max_iter=50, tol=1e-4, seed=42))]
4623fn py_gpfa<'py>(
4624 py: Python<'py>,
4625 trains: Vec<PyReadonlyArray1<'py, i32>>,
4626 n_latents: usize,
4627 bin_ms: f64,
4628 dt: f64,
4629 max_iter: usize,
4630 tol: f64,
4631 seed: u64,
4632) -> PyResult<Bound<'py, PyDict>> {
4633 let vecs: Vec<Vec<i32>> = trains
4634 .iter()
4635 .map(|t| t.as_slice().unwrap().to_vec())
4636 .collect();
4637 let refs: Vec<&[i32]> = vecs.iter().map(|v| v.as_slice()).collect();
4638 let result = analysis::gpfa::gpfa(&refs, n_latents, bin_ms, dt, max_iter, tol, seed);
4639
4640 let dict = PyDict::new(py);
4641 dict.set_item("trajectories", result.trajectories.into_pyarray(py))?;
4642 dict.set_item("C", result.c.into_pyarray(py))?;
4643 dict.set_item("d", result.d.into_pyarray(py))?;
4644 dict.set_item("R", result.r.into_pyarray(py))?;
4645 dict.set_item("tau", result.tau.into_pyarray(py))?;
4646 dict.set_item("log_likelihoods", result.log_likelihoods.into_pyarray(py))?;
4647 dict.set_item("n_latents", result.n_latents)?;
4648 dict.set_item("n_bins", result.n_bins)?;
4649 dict.set_item("n_neurons", result.n_neurons)?;
4650 Ok(dict)
4651}
4652
4653#[pyfunction]
4654#[pyo3(signature = (new_trains, c, d, r, tau, n_latents, bin_ms=20.0, dt=0.001))]
4655fn py_gpfa_transform(
4656 py: Python<'_>,
4657 new_trains: Vec<PyReadonlyArray1<'_, i32>>,
4658 c: PyReadonlyArray1<'_, f64>,
4659 d: PyReadonlyArray1<'_, f64>,
4660 r: PyReadonlyArray1<'_, f64>,
4661 tau: PyReadonlyArray1<'_, f64>,
4662 n_latents: usize,
4663 bin_ms: f64,
4664 dt: f64,
4665) -> Py<PyArray1<f64>> {
4666 let vecs: Vec<Vec<i32>> = new_trains
4667 .iter()
4668 .map(|t| t.as_slice().unwrap().to_vec())
4669 .collect();
4670 let refs: Vec<&[i32]> = vecs.iter().map(|v| v.as_slice()).collect();
4671 let proj = analysis::gpfa::gpfa_transform(
4672 &refs,
4673 c.as_slice().unwrap(),
4674 d.as_slice().unwrap(),
4675 r.as_slice().unwrap(),
4676 tau.as_slice().unwrap(),
4677 n_latents,
4678 bin_ms,
4679 dt,
4680 );
4681 proj.into_pyarray(py).into()
4682}
4683
4684#[pyfunction]
4687#[pyo3(signature = (trains, bin_ms=5.0, dt=0.001, min_support=3, max_pattern_size=5, n_surrogates=100, alpha=0.05, seed=42))]
4688fn py_spade_detect<'py>(
4689 py: Python<'py>,
4690 trains: Vec<PyReadonlyArray1<'py, i32>>,
4691 bin_ms: f64,
4692 dt: f64,
4693 min_support: usize,
4694 max_pattern_size: usize,
4695 n_surrogates: usize,
4696 alpha: f64,
4697 seed: u64,
4698) -> PyResult<Vec<Bound<'py, PyDict>>> {
4699 let vecs: Vec<Vec<i32>> = trains
4700 .iter()
4701 .map(|t| t.as_slice().unwrap().to_vec())
4702 .collect();
4703 let refs: Vec<&[i32]> = vecs.iter().map(|v| v.as_slice()).collect();
4704 let results = analysis::spade::spade_detect(
4705 &refs,
4706 bin_ms,
4707 dt,
4708 min_support,
4709 max_pattern_size,
4710 n_surrogates,
4711 alpha,
4712 seed,
4713 );
4714 let mut dicts = Vec::new();
4715 for pat in results {
4716 let dict = PyDict::new(py);
4717 dict.set_item(
4718 "neurons",
4719 pat.neurons.iter().map(|&n| n as i64).collect::<Vec<_>>(),
4720 )?;
4721 dict.set_item("lags", pat.lags.clone())?;
4722 dict.set_item("count", pat.count as i64)?;
4723 dict.set_item("p_value", pat.p_value)?;
4724 dicts.push(dict);
4725 }
4726 Ok(dicts)
4727}
4728
4729#[pyfunction]
4732fn py_dna_design_sequence(_py: Python<'_>, length: usize, seed: u64) -> String {
4733 String::from_utf8(dna::design_sequence(length, seed)).unwrap_or_default()
4734}
4735
4736#[pyfunction]
4737fn py_dna_design_orthogonal_set(
4738 _py: Python<'_>,
4739 count: usize,
4740 length: usize,
4741 seed: u64,
4742) -> Vec<String> {
4743 dna::design_orthogonal_set(count, length, seed)
4744 .into_iter()
4745 .map(|s| String::from_utf8(s).unwrap_or_default())
4746 .collect()
4747}
4748
4749#[pyfunction]
4750fn py_dna_check_cross_hybridization<'py>(
4751 py: Python<'py>,
4752 sequences: Vec<String>,
4753 threshold: usize,
4754) -> PyResult<Py<PyAny>> {
4755 let seqs: Vec<Vec<u8>> = sequences.into_iter().map(|s| s.into_bytes()).collect();
4756 let flags = dna::check_cross_hybridization(&seqs, threshold);
4757 let result: Vec<Py<PyAny>> = flags
4758 .into_iter()
4759 .map(|(i, j, score)| {
4760 let d = PyDict::new(py);
4761 d.set_item("strand_a", i).unwrap();
4762 d.set_item("strand_b", j).unwrap();
4763 d.set_item("score", score).unwrap();
4764 d.into_any().unbind()
4765 })
4766 .collect();
4767 Ok(result.into_pyobject(py)?.into())
4768}
4769
4770#[pyfunction]
4771#[pyo3(signature = (
4772 gate_types, gate_inputs, gate_outputs, gate_thresholds, gate_leaks,
4773 input_names, input_concs, duration_s=1800.0, dt=1.0,
4774 k_hyb=3e5, k_disp=1.0, temperature_c=37.0, use_rk4=true
4775))]
4776#[allow(clippy::too_many_arguments)]
4777fn py_dna_simulate_kinetics<'py>(
4778 py: Python<'py>,
4779 gate_types: Vec<String>,
4780 gate_inputs: Vec<Vec<String>>,
4781 gate_outputs: Vec<String>,
4782 gate_thresholds: Vec<f64>,
4783 gate_leaks: Vec<f64>,
4784 input_names: Vec<String>,
4785 input_concs: Vec<f64>,
4786 duration_s: f64,
4787 dt: f64,
4788 k_hyb: f64,
4789 k_disp: f64,
4790 temperature_c: f64,
4791 use_rk4: bool,
4792) -> PyResult<Py<PyAny>> {
4793 let gates: Vec<dna::DnaGateSpec> = gate_types
4794 .iter()
4795 .zip(gate_inputs.iter())
4796 .zip(gate_outputs.iter())
4797 .zip(gate_thresholds.iter())
4798 .zip(gate_leaks.iter())
4799 .map(|((((gt, gi), go), th), lk)| {
4800 let gate_type = match gt.to_uppercase().as_str() {
4801 "AND" => dna::DnaGateType::And,
4802 "OR" => dna::DnaGateType::Or,
4803 "NOT" => dna::DnaGateType::Not,
4804 "THRESHOLD" => dna::DnaGateType::Threshold,
4805 "MUX" => dna::DnaGateType::Mux,
4806 "AMPLIFIER" => dna::DnaGateType::Amplifier,
4807 "BUFFER" => dna::DnaGateType::Buffer,
4808 "NAND" => dna::DnaGateType::Nand,
4809 "XOR" => dna::DnaGateType::Xor,
4810 _ => dna::DnaGateType::And,
4811 };
4812 dna::DnaGateSpec {
4813 gate_type,
4814 input_names: gi.clone(),
4815 output_name: go.clone(),
4816 threshold: *th,
4817 leak_rate: *lk,
4818 }
4819 })
4820 .collect();
4821
4822 let mut inputs = std::collections::HashMap::new();
4823 for (name, conc) in input_names.into_iter().zip(input_concs) {
4824 inputs.insert(name, conc);
4825 }
4826
4827 let config = dna::KineticConfig {
4828 k_hyb,
4829 k_disp,
4830 temperature_c,
4831 max_conc: 200.0,
4832 use_rk4,
4833 };
4834
4835 let result = dna::simulate_kinetics(&gates, &inputs, duration_s, dt, &config);
4836
4837 let dict = PyDict::new(py);
4838 for (key, trace) in result {
4839 dict.set_item(key, trace.into_pyarray(py))?;
4840 }
4841 Ok(dict.into_any().unbind())
4842}
4843
4844#[pyfunction]
4845#[pyo3(signature = (sequence, min_stem=4, min_loop=3))]
4846fn py_dna_detect_hairpins(
4847 _py: Python<'_>,
4848 sequence: &str,
4849 min_stem: usize,
4850 min_loop: usize,
4851) -> Vec<(usize, usize, usize)> {
4852 dna::detect_hairpins(sequence.as_bytes(), min_stem, min_loop)
4853}
4854
4855#[pyfunction]
4865#[pyo3(signature = (h_indices, h_values, j_i, j_j, j_values, spins, offset=0.0))]
4866fn py_qa_ising_energy(
4867 _py: Python<'_>,
4868 h_indices: Vec<usize>,
4869 h_values: Vec<f64>,
4870 j_i: Vec<usize>,
4871 j_j: Vec<usize>,
4872 j_values: Vec<f64>,
4873 spins: Vec<i8>,
4874 offset: f64,
4875) -> f64 {
4876 let h: Vec<(usize, f64)> = h_indices.into_iter().zip(h_values).collect();
4877 let j: Vec<((usize, usize), f64)> = j_i.into_iter().zip(j_j).zip(j_values).collect();
4878 quantum::ising_energy(&h, &j, &spins, offset)
4879}
4880
4881#[pyfunction]
4883#[pyo3(signature = (h_indices, h_values, j_i, j_j, j_values, configs, offset=0.0))]
4884fn py_qa_batch_ising_energy(
4885 _py: Python<'_>,
4886 h_indices: Vec<usize>,
4887 h_values: Vec<f64>,
4888 j_i: Vec<usize>,
4889 j_j: Vec<usize>,
4890 j_values: Vec<f64>,
4891 configs: Vec<Vec<i8>>,
4892 offset: f64,
4893) -> Vec<f64> {
4894 let h: Vec<(usize, f64)> = h_indices.into_iter().zip(h_values).collect();
4895 let j: Vec<((usize, usize), f64)> = j_i.into_iter().zip(j_j).zip(j_values).collect();
4896 quantum::batch_ising_energy(&h, &j, &configs, offset)
4897}
4898
4899#[pyfunction]
4903#[pyo3(signature = (h_indices, h_values, j_i, j_j, j_values, n_qubits, offset=0.0, n_sweeps=1000, num_reads=10, beta_start=0.1, beta_end=10.0, seed=42))]
4904fn py_qa_simulated_annealing<'py>(
4905 py: Python<'py>,
4906 h_indices: Vec<usize>,
4907 h_values: Vec<f64>,
4908 j_i: Vec<usize>,
4909 j_j: Vec<usize>,
4910 j_values: Vec<f64>,
4911 n_qubits: usize,
4912 offset: f64,
4913 n_sweeps: usize,
4914 num_reads: usize,
4915 beta_start: f64,
4916 beta_end: f64,
4917 seed: u64,
4918) -> PyResult<Py<PyAny>> {
4919 let h: Vec<(usize, f64)> = h_indices.into_iter().zip(h_values).collect();
4920 let j: Vec<((usize, usize), f64)> = j_i.into_iter().zip(j_j).zip(j_values).collect();
4921
4922 let (best_spins, best_energy, all_energies, all_samples) = quantum::simulated_annealing(
4923 &h, &j, n_qubits, offset, n_sweeps, num_reads, beta_start, beta_end, seed,
4924 );
4925
4926 let dict = PyDict::new(py);
4927 dict.set_item("best_spins", best_spins)?;
4928 dict.set_item("best_energy", best_energy)?;
4929 dict.set_item("energies", all_energies)?;
4930 dict.set_item("samples", all_samples)?;
4931 dict.set_item("n_sweeps", n_sweeps)?;
4932 dict.set_item("num_reads", num_reads)?;
4933 dict.set_item("backend", "rust")?;
4934 Ok(dict.into_any().unbind())
4935}
4936
4937#[pyfunction]
4939#[allow(clippy::type_complexity)] fn py_qa_gauge_transform(
4941 _py: Python<'_>,
4942 h_indices: Vec<usize>,
4943 h_values: Vec<f64>,
4944 j_i: Vec<usize>,
4945 j_j: Vec<usize>,
4946 j_values: Vec<f64>,
4947 gauge: Vec<i8>,
4948) -> (Vec<(usize, f64)>, Vec<((usize, usize), f64)>) {
4949 let h: Vec<(usize, f64)> = h_indices.into_iter().zip(h_values).collect();
4950 let j: Vec<((usize, usize), f64)> = j_i.into_iter().zip(j_j).zip(j_values).collect();
4951 quantum::gauge_transform(&h, &j, &gauge)
4952}
4953
4954#[pyfunction]
4956#[pyo3(signature = (n_qubits, n_gauges=10, seed=42))]
4957fn py_qa_generate_gauges(
4958 _py: Python<'_>,
4959 n_qubits: usize,
4960 n_gauges: usize,
4961 seed: u64,
4962) -> Vec<Vec<i8>> {
4963 quantum::generate_gauges(n_qubits, n_gauges, seed)
4964}
4965
4966#[pyfunction]
4968#[pyo3(signature = (n_qubits, j_i, j_j, j_values, max_partition_size=64))]
4969fn py_qa_greedy_partition(
4970 _py: Python<'_>,
4971 n_qubits: usize,
4972 j_i: Vec<usize>,
4973 j_j: Vec<usize>,
4974 j_values: Vec<f64>,
4975 max_partition_size: usize,
4976) -> Vec<Vec<usize>> {
4977 let j: Vec<((usize, usize), f64)> = j_i.into_iter().zip(j_j).zip(j_values).collect();
4978 quantum::greedy_partition(n_qubits, &j, max_partition_size)
4979}
4980
4981#[pyfunction]
4985#[pyo3(signature = (adjacency_flat, n, pitch_um=250.0, loss_db_per_cm=2.0))]
4986fn py_ph_route_waveguides<'py>(
4987 py: Python<'py>,
4988 adjacency_flat: Vec<f64>,
4989 n: usize,
4990 pitch_um: f64,
4991 loss_db_per_cm: f64,
4992) -> PyResult<Py<PyAny>> {
4993 let result = photonic::route_waveguides(&adjacency_flat, n, pitch_um, loss_db_per_cm);
4994
4995 let dict = PyDict::new(py);
4996 let sources: Vec<usize> = result.iter().map(|r| r.source).collect();
4997 let targets: Vec<usize> = result.iter().map(|r| r.target).collect();
4998 let lengths: Vec<f64> = result.iter().map(|r| r.length_um).collect();
4999 let losses: Vec<f64> = result.iter().map(|r| r.loss_db).collect();
5000 let crossings: Vec<usize> = result.iter().map(|r| r.n_crossings).collect();
5001
5002 dict.set_item("sources", sources)?;
5003 dict.set_item("targets", targets)?;
5004 dict.set_item("lengths_um", lengths)?;
5005 dict.set_item("losses_db", losses)?;
5006 dict.set_item("crossings", crossings)?;
5007 dict.set_item("n_segments", result.len())?;
5008 dict.set_item("backend", "rust")?;
5009 Ok(dict.into_any().unbind())
5010}
5011
5012#[pyfunction]
5014fn py_ph_mzi_transfer_matrix(_py: Python<'_>, phase_rad: f64) -> Vec<f64> {
5015 photonic::mzi_transfer_matrix(phase_rad).to_vec()
5016}
5017
5018#[pyfunction]
5020fn py_ph_cascade_mzi(_py: Python<'_>, phases: Vec<f64>) -> Vec<f64> {
5021 photonic::cascade_mzi(&phases).to_vec()
5022}
5023
5024#[pyfunction]
5026#[pyo3(signature = (channel_ids, wavelengths, bandwidths, powers, adjacent_xt_db=-25.0))]
5027fn py_ph_analyze_crosstalk<'py>(
5028 py: Python<'py>,
5029 channel_ids: Vec<usize>,
5030 wavelengths: Vec<f64>,
5031 bandwidths: Vec<f64>,
5032 powers: Vec<f64>,
5033 adjacent_xt_db: f64,
5034) -> PyResult<Py<PyAny>> {
5035 let channels: Vec<(usize, f64, f64, f64)> = channel_ids
5036 .into_iter()
5037 .zip(wavelengths)
5038 .zip(bandwidths)
5039 .zip(powers)
5040 .map(|(((id, wl), bw), p)| (id, wl, bw, p))
5041 .collect();
5042
5043 let result = photonic::analyze_crosstalk(&channels, adjacent_xt_db);
5044
5045 let dict = PyDict::new(py);
5046 let ids: Vec<usize> = result.iter().map(|r| r.channel_id).collect();
5047 let xts: Vec<f64> = result.iter().map(|r| r.crosstalk_db).collect();
5048 let osnrs: Vec<f64> = result.iter().map(|r| r.osnr_db).collect();
5049 let adjs: Vec<usize> = result.iter().map(|r| r.n_adjacent).collect();
5050
5051 dict.set_item("channel_ids", ids)?;
5052 dict.set_item("crosstalk_db", xts)?;
5053 dict.set_item("osnr_db", osnrs)?;
5054 dict.set_item("n_adjacent", adjs)?;
5055 dict.set_item("backend", "rust")?;
5056 Ok(dict.into_any().unbind())
5057}
5058
5059#[pyfunction]
5061#[pyo3(signature = (wg_sources, wg_targets, wg_losses, laser_power_dbm=0.0, detector_sensitivity_dbm=-20.0))]
5062fn py_ph_analyze_power_budget<'py>(
5063 py: Python<'py>,
5064 wg_sources: Vec<usize>,
5065 wg_targets: Vec<usize>,
5066 wg_losses: Vec<f64>,
5067 laser_power_dbm: f64,
5068 detector_sensitivity_dbm: f64,
5069) -> PyResult<Py<PyAny>> {
5070 let wgs: Vec<(usize, usize, f64)> = wg_sources
5071 .into_iter()
5072 .zip(wg_targets)
5073 .zip(wg_losses)
5074 .map(|((s, t), l)| (s, t, l))
5075 .collect();
5076
5077 let result =
5078 photonic::analyze_power_budget(&wgs, &[], laser_power_dbm, detector_sensitivity_dbm);
5079
5080 let dict = PyDict::new(py);
5081 let margins: Vec<f64> = result.iter().map(|r| r.margin_db).collect();
5082 let passed: Vec<bool> = result.iter().map(|r| r.passed).collect();
5083 let total_losses: Vec<f64> = result.iter().map(|r| r.total_loss_db).collect();
5084
5085 dict.set_item("margins_db", margins)?;
5086 dict.set_item("passed", passed)?;
5087 dict.set_item("total_losses_db", total_losses)?;
5088 dict.set_item("n_paths", result.len())?;
5089 dict.set_item("backend", "rust")?;
5090 Ok(dict.into_any().unbind())
5091}
5092
5093#[pyfunction]
5095#[pyo3(signature = (num_waveguides, gap_nm, coupling_length_um, wavelength_nm=1550.0, core_index=3.48, cladding_index=1.45))]
5096fn py_ph_analyze_crosstalk_bank<'py>(
5097 py: Python<'py>,
5098 num_waveguides: usize,
5099 gap_nm: f64,
5100 coupling_length_um: f64,
5101 wavelength_nm: f64,
5102 core_index: f64,
5103 cladding_index: f64,
5104) -> PyResult<Py<PyAny>> {
5105 let r = photonic::analyze_crosstalk_bank(
5106 num_waveguides,
5107 gap_nm,
5108 coupling_length_um,
5109 wavelength_nm,
5110 core_index,
5111 cladding_index,
5112 );
5113 let dict = PyDict::new(py);
5114 dict.set_item("num_waveguides", r.num_waveguides)?;
5115 dict.set_item("num_pairs", r.num_near_pairs + r.num_far_pairs)?;
5116 dict.set_item("num_near_pairs", r.num_near_pairs)?;
5117 dict.set_item("num_far_pairs", r.num_far_pairs)?;
5118 dict.set_item("gap_nm", r.gap_nm)?;
5119 dict.set_item("coupling_length_um", r.coupling_length_um)?;
5120 dict.set_item("adjacent_coupling_ratio", r.adjacent_coupling_ratio)?;
5121 dict.set_item("adjacent_isolation_db", r.adjacent_isolation_db)?;
5122 dict.set_item("next_nearest_coupling_ratio", r.next_nearest_coupling_ratio)?;
5123 dict.set_item("next_nearest_isolation_db", r.next_nearest_isolation_db)?;
5124 dict.set_item("worst_isolation_db", r.worst_isolation_db)?;
5125 dict.set_item("mean_coupling_ratio", r.mean_coupling_ratio)?;
5126 dict.set_item("max_coupling_ratio", r.max_coupling_ratio)?;
5127 dict.set_item("crosstalk_safe", r.crosstalk_safe)?;
5128 dict.set_item("backend", "rust")?;
5129 Ok(dict.into_any().unbind())
5130}
5131
5132#[pyfunction]
5136#[pyo3(signature = (pairs_a, pairs_b, gaps_nm, lengths_um, wavelength_nm=1550.0, core_index=3.48, cladding_index=1.45))]
5137fn py_ph_analyze_crosstalk_pairs<'py>(
5138 py: Python<'py>,
5139 pairs_a: Vec<usize>,
5140 pairs_b: Vec<usize>,
5141 gaps_nm: Vec<f64>,
5142 lengths_um: Vec<f64>,
5143 wavelength_nm: f64,
5144 core_index: f64,
5145 cladding_index: f64,
5146) -> PyResult<Py<PyAny>> {
5147 let n = pairs_a.len();
5148 if pairs_b.len() != n || gaps_nm.len() != n || lengths_um.len() != n {
5149 return Err(pyo3::exceptions::PyValueError::new_err(
5150 "pairs_a, pairs_b, gaps_nm, lengths_um must be equal length",
5151 ));
5152 }
5153 let pairs: Vec<(usize, usize, f64, f64)> = pairs_a
5154 .into_iter()
5155 .zip(pairs_b)
5156 .zip(gaps_nm)
5157 .zip(lengths_um)
5158 .map(|(((a, b), g), l)| (a, b, g, l))
5159 .collect();
5160 let results =
5161 photonic::analyze_crosstalk_pairs(&pairs, wavelength_nm, core_index, cladding_index);
5162
5163 let dict = PyDict::new(py);
5164 let idx_a: Vec<usize> = results.iter().map(|r| r.index_a).collect();
5165 let idx_b: Vec<usize> = results.iter().map(|r| r.index_b).collect();
5166 let gaps: Vec<f64> = results.iter().map(|r| r.gap_nm).collect();
5167 let lens: Vec<f64> = results.iter().map(|r| r.coupling_length_um).collect();
5168 let kappas: Vec<f64> = results
5169 .iter()
5170 .map(|r| r.coupling_coefficient_per_um)
5171 .collect();
5172 let ratios: Vec<f64> = results.iter().map(|r| r.coupling_ratio).collect();
5173 let isos: Vec<f64> = results.iter().map(|r| r.isolation_db).collect();
5174
5175 dict.set_item("pair_a", idx_a)?;
5176 dict.set_item("pair_b", idx_b)?;
5177 dict.set_item("gap_nm", gaps)?;
5178 dict.set_item("coupling_length_um", lens)?;
5179 dict.set_item("coupling_coefficient_per_um", kappas)?;
5180 dict.set_item("coupling_ratio", ratios)?;
5181 dict.set_item("isolation_db", isos)?;
5182 dict.set_item("num_pairs", n)?;
5183 dict.set_item("backend", "rust")?;
5184 Ok(dict.into_any().unbind())
5185}
5186
5187#[pyfunction]
5195#[pyo3(signature = (mac_counts, weights, max_luts, max_power, max_latency=0, t_init=1.0, t_min=0.001, alpha=0.95, max_iter=2000, seed=42))]
5196fn py_opt_sa_search<'py>(
5197 py: Python<'py>,
5198 mac_counts: Vec<i64>,
5199 weights: Vec<f64>,
5200 max_luts: i64,
5201 max_power: f64,
5202 max_latency: i64,
5203 t_init: f64,
5204 t_min: f64,
5205 alpha: f64,
5206 max_iter: usize,
5207 seed: u64,
5208) -> PyResult<Py<PyAny>> {
5209 let candidates: Vec<Vec<optimizer::Candidate>> = mac_counts
5210 .iter()
5211 .map(|&mc| optimizer::generate_candidates(mc))
5212 .collect();
5213
5214 let result = optimizer::simulated_annealing(
5215 &candidates,
5216 &weights,
5217 max_luts,
5218 max_power,
5219 max_latency,
5220 t_init,
5221 t_min,
5222 alpha,
5223 max_iter,
5224 seed,
5225 );
5226
5227 let dict = PyDict::new(py);
5228 match result {
5229 Some(r) => {
5230 let mut luts_list = Vec::new();
5232 let mut power_list = Vec::new();
5233 let mut acc_list = Vec::new();
5234 for (i, &idx) in r.best_config.iter().enumerate() {
5235 let c = &candidates[i][idx];
5236 luts_list.push(c.luts);
5237 power_list.push(c.power);
5238 acc_list.push(c.accuracy);
5239 }
5240
5241 dict.set_item("best_config", r.best_config)?;
5242 dict.set_item("best_score", r.best_score)?;
5243 dict.set_item("pareto_luts", r.pareto_luts)?;
5244 dict.set_item("pareto_power", r.pareto_power)?;
5245 dict.set_item("pareto_score", r.pareto_score)?;
5246 dict.set_item("feasible", true)?;
5247 dict.set_item("layer_luts", luts_list)?;
5248 dict.set_item("layer_power", power_list)?;
5249 dict.set_item("layer_accuracy", acc_list)?;
5250 }
5251 None => {
5252 dict.set_item("feasible", false)?;
5253 }
5254 }
5255 dict.set_item("backend", "rust")?;
5256 Ok(dict.into_any().unbind())
5257}
5258
5259#[pyfunction]
5261fn py_opt_extract_pareto<'py>(
5262 py: Python<'py>,
5263 luts: Vec<i64>,
5264 power: Vec<f64>,
5265 score: Vec<f64>,
5266) -> PyResult<Py<PyAny>> {
5267 let indices = optimizer::extract_pareto(&luts, &power, &score);
5268 let dict = PyDict::new(py);
5269 let p_luts: Vec<i64> = indices.iter().map(|&i| luts[i]).collect();
5270 let p_power: Vec<f64> = indices.iter().map(|&i| power[i]).collect();
5271 let p_score: Vec<f64> = indices.iter().map(|&i| score[i]).collect();
5272 dict.set_item("indices", indices)?;
5273 dict.set_item("luts", p_luts)?;
5274 dict.set_item("power", p_power)?;
5275 dict.set_item("score", p_score)?;
5276 dict.set_item("backend", "rust")?;
5277 Ok(dict.into_any().unbind())
5278}
5279
5280#[pyfunction]
5284#[pyo3(signature = (genomes, mutation_rate=0.1, mutation_scale=0.1, seed=42))]
5285fn py_evo_batch_mutate(
5286 _py: Python<'_>,
5287 mut genomes: Vec<Vec<f64>>,
5288 mutation_rate: f64,
5289 mutation_scale: f64,
5290 seed: u64,
5291) -> Vec<Vec<f64>> {
5292 evo::batch_mutate_weights(&mut genomes, mutation_rate, mutation_scale, seed);
5293 genomes
5294}
5295
5296#[pyfunction]
5298fn py_evo_batch_fitness(
5299 _py: Python<'_>,
5300 genomes: Vec<Vec<f64>>,
5301 inputs: Vec<f64>,
5302 target: f64,
5303) -> Vec<f64> {
5304 evo::batch_evaluate_fitness(&genomes, &inputs, target)
5305}
5306
5307#[pyfunction]
5309#[pyo3(signature = (parents_a, parents_b, seed=42))]
5310fn py_evo_batch_crossover(
5311 _py: Python<'_>,
5312 parents_a: Vec<Vec<f64>>,
5313 parents_b: Vec<Vec<f64>>,
5314 seed: u64,
5315) -> Vec<Vec<f64>> {
5316 evo::batch_crossover(&parents_a, &parents_b, seed)
5317}
5318
5319#[pyfunction]
5321fn py_evo_diversity(_py: Python<'_>, genomes: Vec<Vec<f64>>) -> f64 {
5322 evo::population_diversity(&genomes)
5323}
5324
5325#[pyfunction]
5327#[pyo3(signature = (genomes, archive, k_nearest=5))]
5328fn py_evo_novelty(
5329 _py: Python<'_>,
5330 genomes: Vec<Vec<f64>>,
5331 archive: Vec<Vec<f64>>,
5332 k_nearest: usize,
5333) -> Vec<f64> {
5334 evo::novelty_scores(&genomes, &archive, k_nearest)
5335}
5336
5337#[pyfunction]
5339#[pyo3(signature = (fitness, n_select, tournament_size=3, seed=42))]
5340fn py_evo_tournament(
5341 _py: Python<'_>,
5342 fitness: Vec<f64>,
5343 n_select: usize,
5344 tournament_size: usize,
5345 seed: u64,
5346) -> Vec<usize> {
5347 evo::tournament_select(&fitness, n_select, tournament_size, seed)
5348}
5349
5350#[pyfunction]
5368#[pyo3(signature = (
5369 obs_flat, controls_flat, t_len, p_dim, m_dim,
5370 a_flat, b_flat, c_flat, d_flat, q_flat, r_flat,
5371 mu_0, sigma_0_flat, d_dim,
5372))]
5373#[allow(clippy::too_many_arguments)]
5374fn py_lgssm_kalman_filter<'py>(
5375 py: Python<'py>,
5376 obs_flat: Vec<f64>,
5377 controls_flat: Vec<f64>,
5378 t_len: usize,
5379 p_dim: usize,
5380 m_dim: usize,
5381 a_flat: Vec<f64>,
5382 b_flat: Vec<f64>,
5383 c_flat: Vec<f64>,
5384 d_flat: Vec<f64>,
5385 q_flat: Vec<f64>,
5386 r_flat: Vec<f64>,
5387 mu_0: Vec<f64>,
5388 sigma_0_flat: Vec<f64>,
5389 d_dim: usize,
5390) -> PyResult<Py<PyAny>> {
5391 use ndarray::Array1;
5392 use ndarray::Array2;
5393
5394 let to_2d = |flat: &[f64], rows: usize, cols: usize| -> Array2<f64> {
5395 Array2::from_shape_vec((rows, cols), flat.to_vec()).expect("shape")
5396 };
5397 let obs = to_2d(&obs_flat, t_len, p_dim);
5398 let controls = to_2d(&controls_flat, t_len, m_dim);
5399 let a = to_2d(&a_flat, d_dim, d_dim);
5400 let b = to_2d(&b_flat, d_dim, m_dim);
5401 let c = to_2d(&c_flat, p_dim, d_dim);
5402 let d = to_2d(&d_flat, p_dim, m_dim);
5403 let q = to_2d(&q_flat, d_dim, d_dim);
5404 let r = to_2d(&r_flat, p_dim, p_dim);
5405 let mu_0_arr = Array1::from(mu_0);
5406 let sigma_0 = to_2d(&sigma_0_flat, d_dim, d_dim);
5407
5408 let result = lgssm::kalman_filter(
5409 obs.view(),
5410 controls.view(),
5411 a.view(),
5412 b.view(),
5413 c.view(),
5414 d.view(),
5415 q.view(),
5416 r.view(),
5417 mu_0_arr.view(),
5418 sigma_0.view(),
5419 );
5420
5421 let means: Vec<Vec<f64>> = (0..t_len)
5423 .map(|t| (0..d_dim).map(|i| result.means[(t, i)]).collect())
5424 .collect();
5425 let covs: Vec<Vec<Vec<f64>>> = (0..t_len)
5426 .map(|t| {
5427 (0..d_dim)
5428 .map(|i| (0..d_dim).map(|j| result.covariances[(t, i, j)]).collect())
5429 .collect()
5430 })
5431 .collect();
5432 let pred_means: Vec<Vec<f64>> = (0..t_len)
5433 .map(|t| (0..d_dim).map(|i| result.pred_means[(t, i)]).collect())
5434 .collect();
5435 let pred_covs: Vec<Vec<Vec<f64>>> = (0..t_len)
5436 .map(|t| {
5437 (0..d_dim)
5438 .map(|i| {
5439 (0..d_dim)
5440 .map(|j| result.pred_covariances[(t, i, j)])
5441 .collect()
5442 })
5443 .collect()
5444 })
5445 .collect();
5446
5447 let dict = PyDict::new(py);
5448 dict.set_item("means", means)?;
5449 dict.set_item("covariances", covs)?;
5450 dict.set_item("pred_means", pred_means)?;
5451 dict.set_item("pred_covariances", pred_covs)?;
5452 dict.set_item("log_likelihood", result.log_likelihood)?;
5453 dict.set_item("backend", "rust")?;
5454 Ok(dict.into_any().unbind())
5455}
5456
5457#[pyfunction]
5465fn py_inject_bitflip_u8<'py>(
5466 py: Python<'py>,
5467 bitstream: PyReadonlyArray1<'_, u8>,
5468 ber: f64,
5469 seed: u64,
5470) -> PyResult<(Py<PyArray1<u8>>, u64)> {
5471 let mut buf = bitstream.as_slice()?.to_vec();
5472 let n = fault::inject_bitflip_u8(&mut buf, ber, seed);
5473 Ok((buf.into_pyarray(py).into(), n))
5474}
5475
5476#[pyfunction]
5477fn py_inject_stuck_at_0_u8<'py>(
5478 py: Python<'py>,
5479 bitstream: PyReadonlyArray1<'_, u8>,
5480 ber: f64,
5481 seed: u64,
5482) -> PyResult<(Py<PyArray1<u8>>, u64)> {
5483 let mut buf = bitstream.as_slice()?.to_vec();
5484 let n = fault::inject_stuck_at_0_u8(&mut buf, ber, seed);
5485 Ok((buf.into_pyarray(py).into(), n))
5486}
5487
5488#[pyfunction]
5489fn py_inject_stuck_at_1_u8<'py>(
5490 py: Python<'py>,
5491 bitstream: PyReadonlyArray1<'_, u8>,
5492 ber: f64,
5493 seed: u64,
5494) -> PyResult<(Py<PyArray1<u8>>, u64)> {
5495 let mut buf = bitstream.as_slice()?.to_vec();
5496 let n = fault::inject_stuck_at_1_u8(&mut buf, ber, seed);
5497 Ok((buf.into_pyarray(py).into(), n))
5498}
5499
5500#[pyfunction]
5501fn py_inject_dropout_u8<'py>(
5502 py: Python<'py>,
5503 bitstream: PyReadonlyArray1<'_, u8>,
5504 ber: f64,
5505 seed: u64,
5506) -> PyResult<(Py<PyArray1<u8>>, u64)> {
5507 let mut buf = bitstream.as_slice()?.to_vec();
5508 let n = fault::inject_dropout_u8(&mut buf, ber, seed);
5509 Ok((buf.into_pyarray(py).into(), n))
5510}
5511
5512#[pyfunction]
5513fn py_inject_gaussian_u8<'py>(
5514 py: Python<'py>,
5515 bitstream: PyReadonlyArray1<'_, u8>,
5516 ber: f64,
5517 seed: u64,
5518) -> PyResult<(Py<PyArray1<u8>>, u64)> {
5519 let mut buf = bitstream.as_slice()?.to_vec();
5520 let n = fault::inject_gaussian_u8(&mut buf, ber, seed);
5521 Ok((buf.into_pyarray(py).into(), n))
5522}
5523
5524#[pyfunction]
5531#[pyo3(signature = (
5532 adj_offsets, adj_neighbours, adj_scc_abs, vertex_weights,
5533 part_map, parts_concat, parts_offsets,
5534 n_parts, kl_iterations, correlation_penalty,
5535))]
5536#[allow(clippy::too_many_arguments)]
5537fn py_kl_refine<'py>(
5538 py: Python<'py>,
5539 adj_offsets: PyReadonlyArray1<'_, i64>,
5540 adj_neighbours: PyReadonlyArray1<'_, i32>,
5541 adj_scc_abs: PyReadonlyArray1<'_, f64>,
5542 vertex_weights: PyReadonlyArray1<'_, f64>,
5543 part_map: PyReadonlyArray1<'_, i32>,
5544 parts_concat: PyReadonlyArray1<'_, i32>,
5545 parts_offsets: PyReadonlyArray1<'_, i64>,
5546 n_parts: i32,
5547 kl_iterations: i32,
5548 correlation_penalty: f64,
5549) -> PyResult<(Py<PyArray1<i32>>, u64)> {
5550 let mut pm = part_map.as_slice()?.to_vec();
5551 let moves = partition::kl_refine(
5552 adj_offsets.as_slice()?,
5553 adj_neighbours.as_slice()?,
5554 adj_scc_abs.as_slice()?,
5555 vertex_weights.as_slice()?,
5556 &mut pm,
5557 parts_concat.as_slice()?,
5558 parts_offsets.as_slice()?,
5559 n_parts,
5560 kl_iterations,
5561 correlation_penalty,
5562 );
5563 Ok((pm.into_pyarray(py).into(), moves))
5564}
5565
5566#[pyfunction]
5579#[pyo3(signature = (
5580 v_e, g_ampa_e, g_gaba_e, refrac_e, i_drive_e, xi_e, spikes_e_out,
5581 v_i, g_ampa_i, g_gaba_i, refrac_i, i_drive_i, xi_i, spikes_i_out,
5582 e_l, e_ampa, e_gaba, g_l, c_m, v_threshold, v_reset, t_refrac,
5583 tau_ampa, tau_gaba, sigma_e, sigma_i, dt,
5584))]
5585#[allow(clippy::too_many_arguments)]
5586fn py_ping_step<'py>(
5587 _py: Python<'py>,
5588 v_e: PyReadwriteArray1<'_, f64>,
5589 g_ampa_e: PyReadwriteArray1<'_, f64>,
5590 g_gaba_e: PyReadwriteArray1<'_, f64>,
5591 refrac_e: PyReadwriteArray1<'_, f64>,
5592 i_drive_e: PyReadonlyArray1<'_, f64>,
5593 xi_e: PyReadonlyArray1<'_, f64>,
5594 spikes_e_out: PyReadwriteArray1<'_, u8>,
5595 v_i: PyReadwriteArray1<'_, f64>,
5596 g_ampa_i: PyReadwriteArray1<'_, f64>,
5597 g_gaba_i: PyReadwriteArray1<'_, f64>,
5598 refrac_i: PyReadwriteArray1<'_, f64>,
5599 i_drive_i: PyReadonlyArray1<'_, f64>,
5600 xi_i: PyReadonlyArray1<'_, f64>,
5601 spikes_i_out: PyReadwriteArray1<'_, u8>,
5602 e_l: f64,
5603 e_ampa: f64,
5604 e_gaba: f64,
5605 g_l: f64,
5606 c_m: f64,
5607 v_threshold: f64,
5608 v_reset: f64,
5609 t_refrac: f64,
5610 tau_ampa: f64,
5611 tau_gaba: f64,
5612 sigma_e: f64,
5613 sigma_i: f64,
5614 dt: f64,
5615) -> PyResult<(u32, u32)> {
5616 let mut v_e = v_e;
5617 let mut g_ampa_e = g_ampa_e;
5618 let mut g_gaba_e = g_gaba_e;
5619 let mut refrac_e = refrac_e;
5620 let mut spikes_e_out = spikes_e_out;
5621 let mut v_i = v_i;
5622 let mut g_ampa_i = g_ampa_i;
5623 let mut g_gaba_i = g_gaba_i;
5624 let mut refrac_i = refrac_i;
5625 let mut spikes_i_out = spikes_i_out;
5626 let (ne, ni) = ping::step_kernel(
5627 v_e.as_slice_mut()?,
5628 g_ampa_e.as_slice_mut()?,
5629 g_gaba_e.as_slice_mut()?,
5630 refrac_e.as_slice_mut()?,
5631 i_drive_e.as_slice()?,
5632 xi_e.as_slice()?,
5633 spikes_e_out.as_slice_mut()?,
5634 v_i.as_slice_mut()?,
5635 g_ampa_i.as_slice_mut()?,
5636 g_gaba_i.as_slice_mut()?,
5637 refrac_i.as_slice_mut()?,
5638 i_drive_i.as_slice()?,
5639 xi_i.as_slice()?,
5640 spikes_i_out.as_slice_mut()?,
5641 e_l,
5642 e_ampa,
5643 e_gaba,
5644 g_l,
5645 c_m,
5646 v_threshold,
5647 v_reset,
5648 t_refrac,
5649 tau_ampa,
5650 tau_gaba,
5651 sigma_e,
5652 sigma_i,
5653 dt,
5654 );
5655 Ok((ne, ni))
5656}
5657
5658#[pyfunction]
5668#[pyo3(signature = (indptr, indices, data, x, y))]
5669fn py_parallel_csr_spmv_add(
5670 indptr: PyReadonlyArray1<'_, i32>,
5671 indices: PyReadonlyArray1<'_, i32>,
5672 data: PyReadonlyArray1<'_, f64>,
5673 x: PyReadonlyArray1<'_, f64>,
5674 y: PyReadwriteArray1<'_, f64>,
5675) -> PyResult<()> {
5676 let mut y = y;
5677 cortical_inject::parallel_csr_spmv_add(
5678 indptr.as_slice()?,
5679 indices.as_slice()?,
5680 data.as_slice()?,
5681 x.as_slice()?,
5682 y.as_slice_mut()?,
5683 );
5684 Ok(())
5685}
5686
5687#[pyfunction]
5693#[pyo3(signature = (indptrs, indices_list, data_list, xs, y))]
5694fn py_parallel_csr_multi_spmv_add(
5695 indptrs: Vec<PyReadonlyArray1<'_, i32>>,
5696 indices_list: Vec<PyReadonlyArray1<'_, i32>>,
5697 data_list: Vec<PyReadonlyArray1<'_, f64>>,
5698 xs: Vec<PyReadonlyArray1<'_, f64>>,
5699 y: PyReadwriteArray1<'_, f64>,
5700) -> PyResult<()> {
5701 let mut y = y;
5702 let indptr_slices: Vec<&[i32]> = indptrs
5703 .iter()
5704 .map(|a| a.as_slice())
5705 .collect::<Result<_, _>>()?;
5706 let indices_slices: Vec<&[i32]> = indices_list
5707 .iter()
5708 .map(|a| a.as_slice())
5709 .collect::<Result<_, _>>()?;
5710 let data_slices: Vec<&[f64]> = data_list
5711 .iter()
5712 .map(|a| a.as_slice())
5713 .collect::<Result<_, _>>()?;
5714 let x_slices: Vec<&[f64]> = xs.iter().map(|a| a.as_slice()).collect::<Result<_, _>>()?;
5715 cortical_inject::parallel_csr_multi_spmv_add(
5716 &indptr_slices,
5717 &indices_slices,
5718 &data_slices,
5719 &x_slices,
5720 y.as_slice_mut()?,
5721 );
5722 Ok(())
5723}