Skip to main content

sc_neurocore_engine/analysis/
network.rs

1// SPDX-License-Identifier: AGPL-3.0-or-later
2// Commercial license available
3// © Concepts 1996–2026 Miroslav Šotek. All rights reserved.
4// © Code 2020–2026 Miroslav Šotek. All rights reserved.
5// ORCID: 0009-0009-3560-0851
6// Contact: www.anulum.li | protoscience@anulum.li
7// SC-NeuroCore — Network-level spike train analysis
8
9use super::basic::bin_spike_train;
10use super::correlation::cross_correlation;
11
12/// Functional connectivity matrix from peak cross-correlation.
13/// Returns n×n matrix (flat, row-major) where (i,j) = max |cc| between neurons i and j.
14pub fn functional_connectivity(trains: &[&[i32]], max_lag_ms: f64, dt: f64) -> Vec<f64> {
15    let n = trains.len();
16    let mut mat = vec![0.0_f64; n * n];
17    for i in 0..n {
18        mat[i * n + i] = 1.0;
19        for j in (i + 1)..n {
20            let (cc, _) = cross_correlation(trains[i], trains[j], max_lag_ms, dt);
21            let peak = cc.iter().map(|v| v.abs()).fold(0.0_f64, f64::max);
22            mat[i * n + j] = peak;
23            mat[j * n + i] = peak;
24        }
25    }
26    mat
27}
28
29/// Unitary event analysis (Gruen et al. 2002).
30/// Detects bins where coincident spikes exceed Poisson chance.
31/// Returns list of significant bin indices.
32pub fn unitary_events(trains: &[&[i32]], bin_size: usize, alpha: f64) -> Vec<usize> {
33    let n_trains = trains.len();
34    if n_trains < 2 {
35        return vec![];
36    }
37    let binned: Vec<Vec<i64>> = trains
38        .iter()
39        .map(|t| bin_spike_train(t, bin_size))
40        .collect();
41    let min_bins = binned.iter().map(|b| b.len()).min().unwrap_or(0);
42    if min_bins == 0 {
43        return vec![];
44    }
45
46    // Active matrix (binary)
47    let active: Vec<Vec<bool>> = binned
48        .iter()
49        .map(|b| b[..min_bins].iter().map(|&v| v > 0).collect())
50        .collect();
51
52    // Mean rates per neuron
53    let rates: Vec<f64> = active
54        .iter()
55        .map(|row| row.iter().filter(|&&v| v).count() as f64 / min_bins as f64)
56        .collect();
57
58    let expected_rate: f64 = rates.iter().product::<f64>().powi(n_trains as i32);
59
60    let mut significant = Vec::new();
61    for k in 0..min_bins {
62        let all_active = (0..n_trains).all(|i| active[i][k]);
63        if all_active && expected_rate < alpha {
64            significant.push(k);
65        }
66    }
67    significant
68}
69
70/// Cell assembly detection via PCA on binned spike matrix (Lopes-dos-Santos et al. 2013).
71/// Returns list of assemblies (each a list of neuron indices).
72pub fn cell_assembly_detection(
73    trains: &[&[i32]],
74    bin_size: usize,
75    threshold: f64,
76) -> Vec<Vec<usize>> {
77    let n = trains.len();
78    if n < 3 {
79        return vec![];
80    }
81    let binned: Vec<Vec<f64>> = trains
82        .iter()
83        .map(|t| {
84            bin_spike_train(t, bin_size)
85                .iter()
86                .map(|&v| v as f64)
87                .collect()
88        })
89        .collect();
90    let min_bins = binned.iter().map(|b| b.len()).min().unwrap_or(0);
91    if min_bins < 2 {
92        return vec![];
93    }
94
95    // Z-score each neuron's binned counts
96    let mut mat: Vec<Vec<f64>> = binned.iter().map(|b| b[..min_bins].to_vec()).collect();
97    for row in &mut mat {
98        let mean = row.iter().sum::<f64>() / min_bins as f64;
99        let std = (row.iter().map(|v| (v - mean).powi(2)).sum::<f64>() / min_bins as f64)
100            .sqrt()
101            .max(1e-30);
102        for v in row.iter_mut() {
103            *v = (*v - mean) / std;
104        }
105    }
106
107    // Correlation matrix: C = mat * mat^T / T
108    let mut corr = vec![0.0_f64; n * n];
109    for i in 0..n {
110        for j in i..n {
111            let mut s = 0.0;
112            for k in 0..min_bins {
113                s += mat[i][k] * mat[j][k];
114            }
115            let c = s / min_bins as f64;
116            corr[i * n + j] = c;
117            corr[j * n + i] = c;
118        }
119    }
120
121    // Eigendecomposition via Jacobi iteration
122    let (eigvals, eigvecs) = jacobi_eigen(&corr, n, 100);
123
124    // Marcenko-Pastur upper bound
125    let q = n as f64 / min_bins as f64;
126    let mp_upper = (1.0 + q.sqrt()).powi(2);
127
128    let thresh_scaled = threshold / (n as f64).sqrt();
129    let mut assemblies = Vec::new();
130    for i in 0..n {
131        if eigvals[i] > mp_upper {
132            let members: Vec<usize> = (0..n)
133                .filter(|&j| eigvecs[j * n + i].abs() > thresh_scaled)
134                .collect();
135            if members.len() >= 2 {
136                assemblies.push(members);
137            }
138        }
139    }
140    assemblies
141}
142
143/// Synfire chain detection via cross-correlation peak ordering (Abeles 1991).
144/// Returns list of chains (ordered neuron indices).
145pub fn synfire_chain_detection(
146    trains: &[&[i32]],
147    dt: f64,
148    max_delay_ms: f64,
149    min_chain_length: usize,
150) -> Vec<Vec<usize>> {
151    let n = trains.len();
152    if n < min_chain_length {
153        return vec![];
154    }
155
156    // Peak lag matrix
157    let mut peak_lags = vec![0.0_f64; n * n];
158    for i in 0..n {
159        for j in 0..n {
160            if i == j {
161                continue;
162            }
163            let (cc, lags) = cross_correlation(trains[i], trains[j], max_delay_ms, dt);
164            if !cc.is_empty() {
165                let peak_idx = cc
166                    .iter()
167                    .enumerate()
168                    .max_by(|(_, a), (_, b)| a.partial_cmp(b).unwrap())
169                    .map(|(idx, _)| idx)
170                    .unwrap_or(0);
171                peak_lags[i * n + j] = lags[peak_idx];
172            }
173        }
174    }
175
176    let mut chains = Vec::new();
177    let mut visited = vec![false; n];
178
179    for start in 0..n {
180        if visited[start] {
181            continue;
182        }
183        let mut chain = vec![start];
184        let mut current = start;
185        for _ in 0..n {
186            let mut candidates: Vec<(f64, usize)> = Vec::new();
187            for j in 0..n {
188                if chain.contains(&j) {
189                    continue;
190                }
191                let lag = peak_lags[current * n + j];
192                if lag > 0.0 && lag <= max_delay_ms {
193                    candidates.push((lag, j));
194                }
195            }
196            if candidates.is_empty() {
197                break;
198            }
199            candidates.sort_by(|a, b| a.0.partial_cmp(&b.0).unwrap());
200            let nxt = candidates[0].1;
201            chain.push(nxt);
202            current = nxt;
203        }
204        if chain.len() >= min_chain_length {
205            for &idx in &chain {
206                visited[idx] = true;
207            }
208            chains.push(chain);
209        }
210    }
211    chains
212}
213
214/// Jacobi eigenvalue algorithm for symmetric n×n matrix (row-major flat).
215/// Returns (eigenvalues sorted ascending, eigenvectors as n×n column-major in flat row-major).
216fn jacobi_eigen(a: &[f64], n: usize, max_iter: usize) -> (Vec<f64>, Vec<f64>) {
217    let mut m = a.to_vec();
218    // V = identity
219    let mut v = vec![0.0_f64; n * n];
220    for i in 0..n {
221        v[i * n + i] = 1.0;
222    }
223
224    for _ in 0..max_iter {
225        // Find largest off-diagonal
226        let mut max_val = 0.0_f64;
227        let mut p = 0;
228        let mut q = 1;
229        for i in 0..n {
230            for j in (i + 1)..n {
231                let val = m[i * n + j].abs();
232                if val > max_val {
233                    max_val = val;
234                    p = i;
235                    q = j;
236                }
237            }
238        }
239        if max_val < 1e-12 {
240            break;
241        }
242
243        // Compute rotation
244        let app = m[p * n + p];
245        let aqq = m[q * n + q];
246        let apq = m[p * n + q];
247        let theta = if (app - aqq).abs() < 1e-30 {
248            std::f64::consts::FRAC_PI_4
249        } else {
250            0.5 * (2.0 * apq / (app - aqq)).atan()
251        };
252        let c = theta.cos();
253        let s = theta.sin();
254
255        // Apply Givens rotation to rows/cols p, q
256        let mut new_m = m.clone();
257        for i in 0..n {
258            if i == p || i == q {
259                continue;
260            }
261            new_m[i * n + p] = c * m[i * n + p] + s * m[i * n + q];
262            new_m[p * n + i] = new_m[i * n + p];
263            new_m[i * n + q] = -s * m[i * n + p] + c * m[i * n + q];
264            new_m[q * n + i] = new_m[i * n + q];
265        }
266        new_m[p * n + p] = c * c * app + 2.0 * s * c * apq + s * s * aqq;
267        new_m[q * n + q] = s * s * app - 2.0 * s * c * apq + c * c * aqq;
268        new_m[p * n + q] = 0.0;
269        new_m[q * n + p] = 0.0;
270        m = new_m;
271
272        // Update eigenvectors
273        for i in 0..n {
274            let vp = v[i * n + p];
275            let vq = v[i * n + q];
276            v[i * n + p] = c * vp + s * vq;
277            v[i * n + q] = -s * vp + c * vq;
278        }
279    }
280
281    let eigvals: Vec<f64> = (0..n).map(|i| m[i * n + i]).collect();
282    (eigvals, v)
283}
284
285#[cfg(test)]
286mod tests {
287    use super::*;
288
289    fn make_train(spikes: &[usize], len: usize) -> Vec<i32> {
290        let mut t = vec![0i32; len];
291        for &s in spikes {
292            t[s] = 1;
293        }
294        t
295    }
296
297    // ── functional_connectivity ─────────────────────────────────────
298
299    #[test]
300    fn test_fc_diagonal_one() {
301        let t1 = make_train(&[10, 30, 50], 100);
302        let t2 = make_train(&[20, 40, 60], 100);
303        let trains: Vec<&[i32]> = vec![&t1, &t2];
304        let mat = functional_connectivity(&trains, 20.0, 0.001);
305        assert!((mat[0] - 1.0).abs() < 1e-10, "diagonal should be 1.0");
306        assert!((mat[3] - 1.0).abs() < 1e-10);
307    }
308
309    #[test]
310    fn test_fc_symmetric() {
311        let t1 = make_train(&[10, 30, 50], 100);
312        let t2 = make_train(&[12, 32, 52], 100);
313        let trains: Vec<&[i32]> = vec![&t1, &t2];
314        let mat = functional_connectivity(&trains, 20.0, 0.001);
315        assert!((mat[1] - mat[2]).abs() < 1e-10, "should be symmetric");
316    }
317
318    #[test]
319    fn test_fc_identical_high() {
320        let t = make_train(&[10, 30, 50, 70, 90], 100);
321        let trains: Vec<&[i32]> = vec![&t, &t];
322        let mat = functional_connectivity(&trains, 20.0, 0.001);
323        assert!(
324            mat[1] > 0.9,
325            "identical trains → high connectivity, got {}",
326            mat[1]
327        );
328    }
329
330    // ── unitary_events ──────────────────────────────────────────────
331
332    #[test]
333    fn test_ue_coincident() {
334        // Sparse trains — rate < 1.0 so expected_rate^n_trains < alpha
335        // bin_size=10, 20 bins total. Each has 1 spike in bin 0 and bin 5 only → rate = 2/20 = 0.1
336        let t1 = make_train(&[5, 55], 200);
337        let t2 = make_train(&[5, 55], 200);
338        let trains: Vec<&[i32]> = vec![&t1, &t2];
339        let ue = unitary_events(&trains, 10, 0.05);
340        assert!(
341            !ue.is_empty(),
342            "sparse coincident spikes → significant bins"
343        );
344    }
345
346    #[test]
347    fn test_ue_single_train() {
348        let t = make_train(&[5, 15], 50);
349        let trains: Vec<&[i32]> = vec![&t];
350        assert!(
351            unitary_events(&trains, 5, 0.05).is_empty(),
352            "need ≥2 trains"
353        );
354    }
355
356    #[test]
357    fn test_ue_empty() {
358        let t1 = vec![0i32; 50];
359        let t2 = vec![0i32; 50];
360        let trains: Vec<&[i32]> = vec![&t1, &t2];
361        assert!(
362            unitary_events(&trains, 5, 0.05).is_empty(),
363            "no spikes → no events"
364        );
365    }
366
367    // ── cell_assembly_detection ─────────────────────────────────────
368
369    #[test]
370    fn test_assembly_too_few_neurons() {
371        let t1 = make_train(&[5, 15], 50);
372        let t2 = make_train(&[5, 15], 50);
373        let trains: Vec<&[i32]> = vec![&t1, &t2];
374        assert!(
375            cell_assembly_detection(&trains, 5, 2.0).is_empty(),
376            "need ≥3 neurons"
377        );
378    }
379
380    #[test]
381    fn test_assembly_correlated_group() {
382        // Neurons 0,1,2 fire together; neuron 3 fires independently
383        let sync = make_train(&[0, 1, 10, 11, 20, 21, 30, 31, 40, 41], 50);
384        let indep = make_train(&[3, 7, 13, 17, 23, 27, 33, 37, 43, 47], 50);
385        let trains: Vec<&[i32]> = vec![&sync, &sync, &sync, &indep];
386        let assemblies = cell_assembly_detection(&trains, 5, 1.0);
387        // May or may not detect assembly depending on eigenstructure
388        // Just verify it doesn't panic and returns valid indices
389        for asm in &assemblies {
390            for &idx in asm {
391                assert!(idx < 4, "index out of bounds");
392            }
393        }
394    }
395
396    // ── synfire_chain_detection ─────────────────────────────────────
397
398    #[test]
399    fn test_synfire_sequential() {
400        // Neurons fire in sequence: 0→1→2 with ~5ms delays
401        let t0 = make_train(&[10, 30, 50, 70, 90], 100);
402        let t1 = make_train(&[15, 35, 55, 75, 95], 100);
403        let t2 = make_train(&[20, 40, 60, 80], 100);
404        let trains: Vec<&[i32]> = vec![&t0, &t1, &t2];
405        let chains = synfire_chain_detection(&trains, 0.001, 10.0, 3);
406        // Should detect at least one chain
407        if !chains.is_empty() {
408            assert!(chains[0].len() >= 3, "chain should have ≥3 neurons");
409        }
410    }
411
412    #[test]
413    fn test_synfire_too_few() {
414        let t = make_train(&[10, 30], 50);
415        let trains: Vec<&[i32]> = vec![&t, &t];
416        assert!(
417            synfire_chain_detection(&trains, 0.001, 10.0, 3).is_empty(),
418            "need ≥ min_chain_length neurons"
419        );
420    }
421
422    // ── jacobi_eigen ────────────────────────────────────────────────
423
424    #[test]
425    fn test_jacobi_identity() {
426        let a = vec![1.0, 0.0, 0.0, 1.0];
427        let (vals, _) = jacobi_eigen(&a, 2, 100);
428        assert!((vals[0] - 1.0).abs() < 1e-10);
429        assert!((vals[1] - 1.0).abs() < 1e-10);
430    }
431
432    #[test]
433    fn test_jacobi_diagonal() {
434        let a = vec![3.0, 0.0, 0.0, 7.0];
435        let (vals, _) = jacobi_eigen(&a, 2, 100);
436        let mut sorted = vals.clone();
437        sorted.sort_by(|a, b| a.partial_cmp(b).unwrap());
438        assert!((sorted[0] - 3.0).abs() < 1e-10);
439        assert!((sorted[1] - 7.0).abs() < 1e-10);
440    }
441
442    #[test]
443    fn test_jacobi_symmetric() {
444        // [[2, 1], [1, 2]] → eigenvalues 1 and 3
445        let a = vec![2.0, 1.0, 1.0, 2.0];
446        let (vals, _) = jacobi_eigen(&a, 2, 100);
447        let mut sorted = vals.clone();
448        sorted.sort_by(|a, b| a.partial_cmp(b).unwrap());
449        assert!(
450            (sorted[0] - 1.0).abs() < 1e-8,
451            "eigenvalue 1, got {}",
452            sorted[0]
453        );
454        assert!(
455            (sorted[1] - 3.0).abs() < 1e-8,
456            "eigenvalue 3, got {}",
457            sorted[1]
458        );
459    }
460
461    #[test]
462    fn test_jacobi_eigenvectors_orthogonal() {
463        let a = vec![2.0, 1.0, 1.0, 2.0];
464        let (_, v) = jacobi_eigen(&a, 2, 100);
465        // v[:,0] . v[:,1] should be ~0
466        let dot: f64 = (0..2).map(|i| v[i * 2] * v[i * 2 + 1]).sum();
467        assert!(
468            dot.abs() < 1e-8,
469            "eigenvectors should be orthogonal, dot={dot}"
470        );
471    }
472}