Skip to main content

sc_neurocore_engine/
conv.rs

1// SPDX-License-Identifier: AGPL-3.0-or-later | Commercial license available
2// © Concepts 1996–2026 Miroslav Šotek. All rights reserved.
3// © Code 2020–2026 Miroslav Šotek. All rights reserved.
4// ORCID: 0009-0009-3560-0851
5// Contact: www.anulum.li | protoscience@anulum.li
6// SC-NeuroCore — SC 2D convolutional layer using probability-domain
7
8//! SC 2D convolutional layer using probability-domain multiplication.
9
10use rand::{RngExt, SeedableRng};
11use rand_chacha::ChaCha8Rng;
12
13/// Stochastic 2D convolutional layer.
14///
15/// Kernels stored as probabilities in [0,1]. Forward pass uses
16/// P(A∧B) = P(A)·P(B) for unipolar stochastic multiplication.
17#[derive(Clone, Debug)]
18pub struct Conv2DLayer {
19    pub in_channels: usize,
20    pub out_channels: usize,
21    pub kernel_size: usize,
22    pub stride: usize,
23    pub padding: usize,
24    /// Flat kernels: [out_ch, in_ch, k, k]
25    pub kernels: Vec<f64>,
26}
27
28impl Conv2DLayer {
29    pub fn new(
30        in_channels: usize,
31        out_channels: usize,
32        kernel_size: usize,
33        stride: usize,
34        padding: usize,
35        seed: u64,
36    ) -> Self {
37        let mut rng = ChaCha8Rng::seed_from_u64(seed);
38        let size = out_channels * in_channels * kernel_size * kernel_size;
39        let kernels: Vec<f64> = (0..size).map(|_| rng.random::<f64>()).collect();
40        Self {
41            in_channels,
42            out_channels,
43            kernel_size,
44            stride,
45            padding,
46            kernels,
47        }
48    }
49
50    /// Forward pass: input shape [C_in, H, W] (flat), returns [C_out, H_out, W_out] (flat).
51    pub fn forward(&self, input: &[f64], h: usize, w: usize) -> (Vec<f64>, usize, usize) {
52        let k = self.kernel_size;
53        let h_out = (h + 2 * self.padding - k) / self.stride + 1;
54        let w_out = (w + 2 * self.padding - k) / self.stride + 1;
55        let c_in = self.in_channels;
56        let filter_size = c_in * k * k;
57
58        // Pad input if needed
59        let padded = if self.padding > 0 {
60            let ph = h + 2 * self.padding;
61            let pw = w + 2 * self.padding;
62            let mut p = vec![0.0; c_in * ph * pw];
63            for c in 0..c_in {
64                for i in 0..h {
65                    for j in 0..w {
66                        p[c * ph * pw + (i + self.padding) * pw + (j + self.padding)] =
67                            input[c * h * w + i * w + j];
68                    }
69                }
70            }
71            (p, ph, pw)
72        } else {
73            (input.to_vec(), h, w)
74        };
75        let (ref inp, ph, pw) = padded;
76
77        let mut output = vec![0.0; self.out_channels * h_out * w_out];
78
79        for oc in 0..self.out_channels {
80            let filter = &self.kernels[oc * filter_size..(oc + 1) * filter_size];
81            for i in 0..h_out {
82                for j in 0..w_out {
83                    let hs = i * self.stride;
84                    let ws = j * self.stride;
85                    let mut acc = 0.0;
86                    for c in 0..c_in {
87                        for ki in 0..k {
88                            for kj in 0..k {
89                                let val = inp[c * ph * pw + (hs + ki) * pw + (ws + kj)];
90                                let wt = filter[c * k * k + ki * k + kj];
91                                acc += val * wt; // P(A)·P(B) in probability domain
92                            }
93                        }
94                    }
95                    output[oc * h_out * w_out + i * w_out + j] = acc;
96                }
97            }
98        }
99
100        (output, h_out, w_out)
101    }
102}
103
104#[cfg(test)]
105mod tests {
106    use super::*;
107
108    #[test]
109    fn output_shape_no_padding() {
110        let conv = Conv2DLayer::new(1, 2, 3, 1, 0, 42);
111        let input = vec![0.5; 8 * 8];
112        let (out, h, w) = conv.forward(&input, 8, 8);
113        assert_eq!(h, 6);
114        assert_eq!(w, 6);
115        assert_eq!(out.len(), 2 * 6 * 6);
116    }
117
118    #[test]
119    fn output_shape_with_padding() {
120        let conv = Conv2DLayer::new(1, 2, 3, 1, 1, 42);
121        let input = vec![0.5; 8 * 8];
122        let (out, h, w) = conv.forward(&input, 8, 8);
123        assert_eq!(h, 8);
124        assert_eq!(w, 8);
125        assert_eq!(out.len(), 2 * 8 * 8);
126    }
127
128    #[test]
129    fn all_ones_kernel() {
130        let mut conv = Conv2DLayer::new(1, 1, 3, 1, 0, 42);
131        conv.kernels = vec![1.0; 9];
132        let input = vec![1.0; 5 * 5];
133        let (out, _, _) = conv.forward(&input, 5, 5);
134        // Each output = sum of 3×3 patch of 1.0 × 1.0 = 9.0
135        assert!((out[0] - 9.0).abs() < 1e-10);
136    }
137
138    #[test]
139    fn stride_2() {
140        let conv = Conv2DLayer::new(1, 1, 3, 2, 0, 42);
141        let input = vec![0.5; 8 * 8];
142        let (_, h, w) = conv.forward(&input, 8, 8);
143        assert_eq!(h, 3);
144        assert_eq!(w, 3);
145    }
146}