Models
Pre-built network architectures and model registry.
SCDigitClassifier — Pre-configured SC network for MNIST digit classification. Architecture: Conv (28x28, 1ch->4ch, kernel 3, stride 2) + Vectorized dense + output.
10 pre-built configurations in the model zoo:
| Config |
Task |
| Brunel balanced |
E/I balance dynamics |
| Cortical column |
Layered cortical model |
| CPG |
Central pattern generator |
| Decision-making |
2-pool WTA |
| Working memory |
Persistent activity |
| Visual cortex V1 |
Orientation selectivity |
| MNIST classifier |
Digit recognition |
| SHD classifier |
Speech (Spiking Heidelberg Digits) |
| DVS gesture |
Event camera gestures |
| Auditory |
Sound processing |
from sc_neurocore.models import SCDigitClassifier
model = SCDigitClassifier()
output = model.forward(image_28x28)
sc_neurocore.models.zoo
SCDigitClassifier
Pre-configured SC Network for MNIST-like Digit Classification.
Uses: Conv Layer -> Vectorized Dense Layer
Source code in src/sc_neurocore/models/zoo.py
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58 | class SCDigitClassifier:
"""
Pre-configured SC Network for MNIST-like Digit Classification.
Uses: Conv Layer -> Vectorized Dense Layer
"""
def __init__(self) -> None:
# 1. Convolutional Front-End (Feature Extraction)
# Input: 28x28, 1 Channel
self.conv = SCConv2DLayer(
in_channels=1, out_channels=4, kernel_size=3, stride=2, length=256
)
# Output map size: (28-3)/2 + 1 = 13x13 -> 4x13x13 = 676 features
# 2. Dense Classifier
self.dense = VectorizedSCLayer(n_inputs=676, n_neurons=10, length=1024) # 10 Digits
def forward(self, image: np.ndarray[Any, Any]) -> int:
"""
Classify a 28x28 image.
"""
# Ensure correct shape (1, 28, 28)
if image.ndim == 2:
image = image[None, :, :]
# 1. Conv
features = self.conv.forward(image)
# Flatten
flat_features = features.flatten()
# 2. Dense
# Vectorized layer expects list/array of floats as probabilities
# We need to map the conv output (accumulated bit counts) to probabilities [0,1]
# Conv output is roughly sum of bits. Max bits = kernel_size^2 * length?
# Let's normalize assuming max overlap
norm_factor = (3 * 3) * 256
flat_probs = flat_features / norm_factor
flat_probs = np.clip(flat_probs, 0, 1)
outputs = self.dense.forward(flat_probs)
# Argmax
return int(np.argmax(outputs))
|
forward(image)
Classify a 28x28 image.
Source code in src/sc_neurocore/models/zoo.py
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58 | def forward(self, image: np.ndarray[Any, Any]) -> int:
"""
Classify a 28x28 image.
"""
# Ensure correct shape (1, 28, 28)
if image.ndim == 2:
image = image[None, :, :]
# 1. Conv
features = self.conv.forward(image)
# Flatten
flat_features = features.flatten()
# 2. Dense
# Vectorized layer expects list/array of floats as probabilities
# We need to map the conv output (accumulated bit counts) to probabilities [0,1]
# Conv output is roughly sum of bits. Max bits = kernel_size^2 * length?
# Let's normalize assuming max overlap
norm_factor = (3 * 3) * 256
flat_probs = flat_features / norm_factor
flat_probs = np.clip(flat_probs, 0, 1)
outputs = self.dense.forward(flat_probs)
# Argmax
return int(np.argmax(outputs))
|
SCKeywordSpotter
Audio Keyword Spotter (e.g., "Yes"/"No").
Uses: Recurrent / Dense Layer
Source code in src/sc_neurocore/models/zoo.py
61
62
63
64
65
66
67
68
69
70
71
72 | class SCKeywordSpotter:
"""
Audio Keyword Spotter (e.g., "Yes"/"No").
Uses: Recurrent / Dense Layer
"""
def __init__(self, n_keywords=2) -> None:
# Simple Dense for demo (RNN requires sequence logic implementation)
self.classifier = VectorizedSCLayer(n_inputs=16, n_neurons=n_keywords)
def predict(self, mfcc_features: np.ndarray[Any, Any]) -> int:
return int(np.argmax(self.classifier.forward(mfcc_features)))
|