Skip to main content

sc_neurocore_engine/analysis/
variability.rs

1// SPDX-License-Identifier: AGPL-3.0-or-later
2// Commercial license available
3// © Concepts 1996–2026 Miroslav Šotek. All rights reserved.
4// © Code 2020–2026 Miroslav Šotek. All rights reserved.
5// ORCID: 0009-0009-3560-0851
6// Contact: www.anulum.li | protoscience@anulum.li
7// SC-NeuroCore — Spike train variability and regularity measures
8
9use super::basic::isi;
10
11/// Coefficient of variation of ISI. CV=1 for Poisson, <1 for regular.
12pub fn cv_isi(binary_train: &[i32], dt: f64) -> f64 {
13    let intervals = isi(binary_train, dt);
14    if intervals.len() < 2 {
15        return f64::NAN;
16    }
17    let mu: f64 = intervals.iter().sum::<f64>() / intervals.len() as f64;
18    if mu == 0.0 {
19        return f64::NAN;
20    }
21    let var: f64 =
22        intervals.iter().map(|&x| (x - mu) * (x - mu)).sum::<f64>() / intervals.len() as f64;
23    var.sqrt() / mu
24}
25
26/// Local coefficient of variation CV2. Holt et al. 1996.
27pub fn cv2(binary_train: &[i32], dt: f64) -> f64 {
28    let intervals = isi(binary_train, dt);
29    if intervals.len() < 2 {
30        return f64::NAN;
31    }
32    let mut sum = 0.0;
33    let mut count = 0;
34    for i in 0..intervals.len() - 1 {
35        let s = intervals[i] + intervals[i + 1];
36        if s > 0.0 {
37            sum += 2.0 * (intervals[i + 1] - intervals[i]).abs() / s;
38            count += 1;
39        }
40    }
41    if count == 0 {
42        f64::NAN
43    } else {
44        sum / count as f64
45    }
46}
47
48/// Local variation LV. Shinomoto et al. 2003.
49pub fn local_variation(binary_train: &[i32], dt: f64) -> f64 {
50    let intervals = isi(binary_train, dt);
51    let n = intervals.len();
52    if n < 2 {
53        return f64::NAN;
54    }
55    let mut sum = 0.0;
56    let mut count = 0;
57    for i in 0..n - 1 {
58        let s = intervals[i] + intervals[i + 1];
59        if s > 0.0 {
60            let d = intervals[i + 1] - intervals[i];
61            sum += (d / s) * (d / s);
62            count += 1;
63        }
64    }
65    if count == 0 {
66        f64::NAN
67    } else {
68        3.0 / (n - 1) as f64 * sum
69    }
70}
71
72/// Revised local variation LvR. Shinomoto et al. 2009.
73pub fn lvr(binary_train: &[i32], dt: f64, refractoriness_ms: f64) -> f64 {
74    let intervals = isi(binary_train, dt);
75    let n = intervals.len();
76    if n < 2 {
77        return f64::NAN;
78    }
79    let r = refractoriness_ms / 1000.0;
80    let mut result = 0.0;
81    let mut count = 0;
82    for i in 0..n - 1 {
83        let s = intervals[i] + intervals[i + 1];
84        if s <= 0.0 {
85            continue;
86        }
87        let ratio = 4.0 * intervals[i] * intervals[i + 1] / (s * s);
88        result += (1.0 - ratio) * (1.0 + 4.0 * r / s);
89        count += 1;
90    }
91    if count == 0 {
92        f64::NAN
93    } else {
94        3.0 * result / count as f64
95    }
96}
97
98/// Fano factor: variance/mean of spike counts in sliding windows.
99pub fn fano_factor(binary_train: &[i32], window_ms: f64, dt: f64) -> f64 {
100    let window_steps = (window_ms / (dt * 1000.0)).round().max(1.0) as usize;
101    let n = binary_train.len();
102    if n < window_steps {
103        return f64::NAN;
104    }
105    let n_windows = n / window_steps;
106    let counts: Vec<f64> = (0..n_windows)
107        .map(|i| {
108            binary_train[i * window_steps..(i + 1) * window_steps]
109                .iter()
110                .map(|&s| s as f64)
111                .sum()
112        })
113        .collect();
114    let mu: f64 = counts.iter().sum::<f64>() / counts.len() as f64;
115    if mu == 0.0 {
116        return f64::NAN;
117    }
118    let var: f64 = counts.iter().map(|&c| (c - mu) * (c - mu)).sum::<f64>() / counts.len() as f64;
119    var / mu
120}
121
122/// Shannon entropy of the ISI distribution (bits).
123pub fn isi_entropy(binary_train: &[i32], dt: f64, bins: usize) -> f64 {
124    let intervals = isi(binary_train, dt);
125    if intervals.len() < 2 || bins == 0 {
126        return f64::NAN;
127    }
128    let min_v = intervals.iter().cloned().fold(f64::INFINITY, f64::min);
129    let max_v = intervals.iter().cloned().fold(f64::NEG_INFINITY, f64::max);
130    let range = max_v - min_v;
131    if range <= 0.0 {
132        return 0.0;
133    }
134    let bin_width = range / bins as f64;
135    let mut hist = vec![0usize; bins];
136    for &v in &intervals {
137        let b = ((v - min_v) / bin_width).floor() as usize;
138        let b = b.min(bins - 1);
139        hist[b] += 1;
140    }
141    let total = intervals.len() as f64;
142    let mut h = 0.0;
143    for &c in &hist {
144        if c > 0 {
145            let p = (c as f64 / total) * bin_width;
146            let _density = c as f64 / (total * bin_width);
147            // p * log2(density) — matches Python's density=True histogram
148            let p_norm = p;
149            if p_norm > 0.0 {
150                h -= p_norm * p_norm.log2();
151            }
152        }
153    }
154    h
155}
156
157/// Lempel-Ziv 1976 complexity. Normalised by N/log2(N).
158pub fn lempel_ziv_complexity(binary_train: &[i32]) -> f64 {
159    let n = binary_train.len();
160    if n == 0 {
161        return 0.0;
162    }
163    let s: Vec<u8> = binary_train
164        .iter()
165        .map(|&v| if v > 0 { 1 } else { 0 })
166        .collect();
167    let mut complexity = 1usize;
168    let mut l = 1usize;
169    let mut k = 1usize;
170    let mut k_max = 1usize;
171    while l + k <= n {
172        if s[l + k - 1] == s[k - 1] {
173            k += 1;
174        } else {
175            k_max = k_max.max(k);
176            k = 1;
177            if k_max > k {
178                k_max = k;
179            }
180            complexity += 1;
181            l += k_max;
182            k = 1;
183            k_max = 1;
184        }
185    }
186    complexity += 1;
187    let norm = n as f64 / (n as f64).max(2.0).log2();
188    complexity as f64 / norm
189}
190
191/// Approximate entropy (ApEn). Pincus 1991.
192pub fn approximate_entropy(binary_train: &[i32], m: usize, r_factor: f64) -> f64 {
193    let x: Vec<f64> = binary_train.iter().map(|&v| v as f64).collect();
194    let n = x.len();
195    if n < m + 2 {
196        return f64::NAN;
197    }
198    let mu: f64 = x.iter().sum::<f64>() / n as f64;
199    let var: f64 = x.iter().map(|&v| (v - mu) * (v - mu)).sum::<f64>() / n as f64;
200    let std = var.sqrt();
201    let r = if std > 0.0 { r_factor * std } else { 0.01 };
202
203    let phi = |dim: usize| -> f64 {
204        if n < dim {
205            return 0.0;
206        }
207        let nt = n - dim + 1;
208        let mut log_sum = 0.0;
209        for i in 0..nt {
210            let mut count = 0usize;
211            for j in 0..nt {
212                let max_diff = (0..dim)
213                    .map(|k| (x[i + k] - x[j + k]).abs())
214                    .fold(0.0_f64, f64::max);
215                if max_diff <= r {
216                    count += 1;
217                }
218            }
219            log_sum += (count as f64 / nt as f64 + 1e-30).ln();
220        }
221        log_sum / nt as f64
222    };
223
224    phi(m) - phi(m + 1)
225}
226
227/// Sample entropy (SampEn). Richman & Moorman 2000.
228pub fn sample_entropy(binary_train: &[i32], m: usize, r_factor: f64) -> f64 {
229    let x: Vec<f64> = binary_train.iter().map(|&v| v as f64).collect();
230    let n = x.len();
231    if n < m + 2 {
232        return f64::NAN;
233    }
234    let mu: f64 = x.iter().sum::<f64>() / n as f64;
235    let var: f64 = x.iter().map(|&v| (v - mu) * (v - mu)).sum::<f64>() / n as f64;
236    let std = var.sqrt();
237    let r = if std > 0.0 { r_factor * std } else { 0.01 };
238
239    let count_matches = |dim: usize| -> usize {
240        let nt = n - dim;
241        let mut total = 0;
242        for i in 0..nt {
243            for j in (i + 1)..nt {
244                let max_diff = (0..dim)
245                    .map(|k| (x[i + k] - x[j + k]).abs())
246                    .fold(0.0_f64, f64::max);
247                if max_diff <= r {
248                    total += 1;
249                }
250            }
251        }
252        total
253    };
254
255    let a = count_matches(m + 1);
256    let b = count_matches(m);
257    if b == 0 {
258        return f64::NAN;
259    }
260    -((a as f64 + 1e-30) / (b as f64 + 1e-30)).ln()
261}
262
263/// Bandt-Pompe permutation entropy. Normalised to [0, 1].
264pub fn permutation_entropy(binary_train: &[i32], order: usize, delay: usize) -> f64 {
265    let x: Vec<f64> = binary_train.iter().map(|&v| v as f64).collect();
266    let n = x.len();
267    if n < order * delay {
268        return f64::NAN;
269    }
270    let n_patterns = n - (order - 1) * delay;
271    if n_patterns < 1 {
272        return f64::NAN;
273    }
274    // Encode each ordinal pattern as a unique integer
275    let mut pattern_counts = std::collections::HashMap::new();
276    for i in 0..n_patterns {
277        let window: Vec<f64> = (0..order).map(|j| x[i + j * delay]).collect();
278        // Rank encode
279        let mut indices: Vec<usize> = (0..order).collect();
280        indices.sort_by(|&a, &b| window[a].partial_cmp(&window[b]).unwrap());
281        let mut rank = vec![0usize; order];
282        for (pos, &idx) in indices.iter().enumerate() {
283            rank[idx] = pos;
284        }
285        let mut key = 0u64;
286        for (j, &r) in rank.iter().enumerate() {
287            key += r as u64 * (order as u64).pow(j as u32);
288        }
289        *pattern_counts.entry(key).or_insert(0usize) += 1;
290    }
291    let total = n_patterns as f64;
292    let mut h = 0.0;
293    for &c in pattern_counts.values() {
294        let p = c as f64 / total;
295        if p > 0.0 {
296            h -= p * p.log2();
297        }
298    }
299    // h_max = log2(order!)
300    let h_max = (1..=order).map(|i| i as f64).product::<f64>().log2();
301    if h_max > 0.0 {
302        h / h_max
303    } else {
304        0.0
305    }
306}
307
308/// Hurst exponent via detrended fluctuation analysis (DFA). Peng et al. 1994.
309pub fn hurst_exponent(binary_train: &[i32], min_window: usize) -> f64 {
310    let x: Vec<f64> = binary_train.iter().map(|&v| v as f64).collect();
311    let n = x.len();
312    if n < 4 * min_window {
313        return f64::NAN;
314    }
315    let mean: f64 = x.iter().sum::<f64>() / n as f64;
316    let y: Vec<f64> = x
317        .iter()
318        .scan(0.0, |acc, &v| {
319            *acc += v - mean;
320            Some(*acc)
321        })
322        .collect();
323
324    let mut scales = Vec::new();
325    let mut flucts = Vec::new();
326    let mut s = min_window;
327    while s <= n / 4 {
328        let n_seg = n / s;
329        let mut f2 = 0.0;
330        for seg in 0..n_seg {
331            let chunk = &y[seg * s..(seg + 1) * s];
332            // Linear detrend via least squares
333            let (slope, intercept) = linear_fit(chunk);
334            let mut mse = 0.0;
335            for (i, &v) in chunk.iter().enumerate() {
336                let trend = slope * i as f64 + intercept;
337                mse += (v - trend) * (v - trend);
338            }
339            f2 += mse / s as f64;
340        }
341        f2 /= n_seg as f64;
342        scales.push(s as f64);
343        flucts.push(f2.sqrt());
344        s = (s as f64 * 1.5) as usize;
345        if !scales.is_empty() && s as f64 == *scales.last().unwrap() {
346            s += 1;
347        }
348    }
349    if scales.len() < 2 {
350        return f64::NAN;
351    }
352    let log_s: Vec<f64> = scales.iter().map(|&s| s.ln()).collect();
353    let log_f: Vec<f64> = flucts.iter().map(|&f| (f + 1e-30).ln()).collect();
354    linear_fit_slope(&log_s, &log_f)
355}
356
357/// Simple linear regression: returns (slope, intercept).
358fn linear_fit(y: &[f64]) -> (f64, f64) {
359    let n = y.len() as f64;
360    let sx: f64 = (0..y.len()).map(|i| i as f64).sum();
361    let sy: f64 = y.iter().sum();
362    let sxx: f64 = (0..y.len()).map(|i| (i as f64) * (i as f64)).sum();
363    let sxy: f64 = y.iter().enumerate().map(|(i, &v)| i as f64 * v).sum();
364    let denom = n * sxx - sx * sx;
365    if denom.abs() < 1e-30 {
366        return (0.0, sy / n);
367    }
368    let slope = (n * sxy - sx * sy) / denom;
369    let intercept = (sy - slope * sx) / n;
370    (slope, intercept)
371}
372
373/// Slope from linear regression of y on x.
374fn linear_fit_slope(x: &[f64], y: &[f64]) -> f64 {
375    let n = x.len() as f64;
376    let sx: f64 = x.iter().sum();
377    let sy: f64 = y.iter().sum();
378    let sxx: f64 = x.iter().map(|&v| v * v).sum();
379    let sxy: f64 = x.iter().zip(y.iter()).map(|(&xi, &yi)| xi * yi).sum();
380    let denom = n * sxx - sx * sx;
381    if denom.abs() < 1e-30 {
382        return f64::NAN;
383    }
384    (n * sxy - sx * sy) / denom
385}
386
387#[cfg(test)]
388mod tests {
389    use super::*;
390
391    fn regular_train(period: usize, n: usize) -> Vec<i32> {
392        (0..n)
393            .map(|i| if i % period == 0 { 1 } else { 0 })
394            .collect()
395    }
396
397    fn poisson_like_train(n: usize, seed: u64) -> Vec<i32> {
398        let mut rng = seed;
399        (0..n)
400            .map(|_| {
401                rng = rng.wrapping_mul(6364136223846793005).wrapping_add(1);
402                if (rng >> 33).is_multiple_of(10) {
403                    1
404                } else {
405                    0
406                }
407            })
408            .collect()
409    }
410
411    #[test]
412    fn test_cv_isi_regular() {
413        let train = regular_train(10, 1000);
414        let cv = cv_isi(&train, 0.001);
415        assert!(cv < 0.01, "Regular train should have CV ≈ 0, got {cv}");
416    }
417
418    #[test]
419    fn test_cv2_regular() {
420        let train = regular_train(10, 1000);
421        let c = cv2(&train, 0.001);
422        assert!(c < 0.01, "Regular train should have CV2 ≈ 0, got {c}");
423    }
424
425    #[test]
426    fn test_local_variation_regular() {
427        let train = regular_train(10, 1000);
428        let lv = local_variation(&train, 0.001);
429        assert!(lv < 0.01, "Regular train should have LV ≈ 0, got {lv}");
430    }
431
432    #[test]
433    fn test_fano_factor_regular() {
434        let train = regular_train(10, 1000);
435        let ff = fano_factor(&train, 50.0, 0.001);
436        assert!(ff < 0.5, "Regular train should have FF < 0.5, got {ff}");
437    }
438
439    #[test]
440    fn test_lempel_ziv() {
441        let train = regular_train(5, 100);
442        let lz = lempel_ziv_complexity(&train);
443        assert!(lz > 0.0 && lz.is_finite());
444    }
445
446    #[test]
447    fn test_permutation_entropy_constant() {
448        let train = vec![0; 100];
449        let pe = permutation_entropy(&train, 3, 1);
450        assert!(pe >= 0.0);
451    }
452
453    #[test]
454    fn test_hurst_exponent() {
455        let train = poisson_like_train(2000, 42);
456        let h = hurst_exponent(&train, 10);
457        assert!(h.is_finite(), "Hurst should be finite, got {h}");
458        assert!(h > 0.0 && h < 2.0, "Hurst should be in (0, 2), got {h}");
459    }
460
461    #[test]
462    fn test_approximate_entropy() {
463        let train = poisson_like_train(500, 123);
464        let ae = approximate_entropy(&train, 2, 0.2);
465        assert!(ae.is_finite(), "ApEn should be finite, got {ae}");
466    }
467
468    #[test]
469    fn test_sample_entropy() {
470        let train = poisson_like_train(500, 456);
471        let se = sample_entropy(&train, 2, 0.2);
472        assert!(se.is_finite(), "SampEn should be finite, got {se}");
473    }
474
475    #[test]
476    fn test_lvr() {
477        let train = regular_train(10, 1000);
478        let l = lvr(&train, 0.001, 2.0);
479        assert!(l.is_finite());
480    }
481
482    #[test]
483    fn test_isi_entropy() {
484        let train = poisson_like_train(1000, 789);
485        let h = isi_entropy(&train, 0.001, 20);
486        // May be NaN if not enough spikes, but should not panic.
487        // If non-NaN, entropy must be non-negative.
488        if !h.is_nan() {
489            assert!(h >= 0.0, "Entropy must be non-negative, got {h}");
490        }
491    }
492}