sc_neurocore_engine/
conv.rs1use rand::{RngExt, SeedableRng};
11use rand_chacha::ChaCha8Rng;
12
13#[derive(Clone, Debug)]
18pub struct Conv2DLayer {
19 pub in_channels: usize,
20 pub out_channels: usize,
21 pub kernel_size: usize,
22 pub stride: usize,
23 pub padding: usize,
24 pub kernels: Vec<f64>,
26}
27
28impl Conv2DLayer {
29 pub fn new(
30 in_channels: usize,
31 out_channels: usize,
32 kernel_size: usize,
33 stride: usize,
34 padding: usize,
35 seed: u64,
36 ) -> Self {
37 let mut rng = ChaCha8Rng::seed_from_u64(seed);
38 let size = out_channels * in_channels * kernel_size * kernel_size;
39 let kernels: Vec<f64> = (0..size).map(|_| rng.random::<f64>()).collect();
40 Self {
41 in_channels,
42 out_channels,
43 kernel_size,
44 stride,
45 padding,
46 kernels,
47 }
48 }
49
50 pub fn forward(&self, input: &[f64], h: usize, w: usize) -> (Vec<f64>, usize, usize) {
52 let k = self.kernel_size;
53 let h_out = (h + 2 * self.padding - k) / self.stride + 1;
54 let w_out = (w + 2 * self.padding - k) / self.stride + 1;
55 let c_in = self.in_channels;
56 let filter_size = c_in * k * k;
57
58 let padded = if self.padding > 0 {
60 let ph = h + 2 * self.padding;
61 let pw = w + 2 * self.padding;
62 let mut p = vec![0.0; c_in * ph * pw];
63 for c in 0..c_in {
64 for i in 0..h {
65 for j in 0..w {
66 p[c * ph * pw + (i + self.padding) * pw + (j + self.padding)] =
67 input[c * h * w + i * w + j];
68 }
69 }
70 }
71 (p, ph, pw)
72 } else {
73 (input.to_vec(), h, w)
74 };
75 let (ref inp, ph, pw) = padded;
76
77 let mut output = vec![0.0; self.out_channels * h_out * w_out];
78
79 for oc in 0..self.out_channels {
80 let filter = &self.kernels[oc * filter_size..(oc + 1) * filter_size];
81 for i in 0..h_out {
82 for j in 0..w_out {
83 let hs = i * self.stride;
84 let ws = j * self.stride;
85 let mut acc = 0.0;
86 for c in 0..c_in {
87 for ki in 0..k {
88 for kj in 0..k {
89 let val = inp[c * ph * pw + (hs + ki) * pw + (ws + kj)];
90 let wt = filter[c * k * k + ki * k + kj];
91 acc += val * wt; }
93 }
94 }
95 output[oc * h_out * w_out + i * w_out + j] = acc;
96 }
97 }
98 }
99
100 (output, h_out, w_out)
101 }
102}
103
104#[cfg(test)]
105mod tests {
106 use super::*;
107
108 #[test]
109 fn output_shape_no_padding() {
110 let conv = Conv2DLayer::new(1, 2, 3, 1, 0, 42);
111 let input = vec![0.5; 8 * 8];
112 let (out, h, w) = conv.forward(&input, 8, 8);
113 assert_eq!(h, 6);
114 assert_eq!(w, 6);
115 assert_eq!(out.len(), 2 * 6 * 6);
116 }
117
118 #[test]
119 fn output_shape_with_padding() {
120 let conv = Conv2DLayer::new(1, 2, 3, 1, 1, 42);
121 let input = vec![0.5; 8 * 8];
122 let (out, h, w) = conv.forward(&input, 8, 8);
123 assert_eq!(h, 8);
124 assert_eq!(w, 8);
125 assert_eq!(out.len(), 2 * 8 * 8);
126 }
127
128 #[test]
129 fn all_ones_kernel() {
130 let mut conv = Conv2DLayer::new(1, 1, 3, 1, 0, 42);
131 conv.kernels = vec![1.0; 9];
132 let input = vec![1.0; 5 * 5];
133 let (out, _, _) = conv.forward(&input, 5, 5);
134 assert!((out[0] - 9.0).abs() < 1e-10);
136 }
137
138 #[test]
139 fn stride_2() {
140 let conv = Conv2DLayer::new(1, 1, 3, 2, 0, 42);
141 let input = vec![0.5; 8 * 8];
142 let (_, h, w) = conv.forward(&input, 8, 8);
143 assert_eq!(h, 3);
144 assert_eq!(w, 3);
145 }
146}