Skip to content

Pipeline

Data ingestion and training orchestration for SNN workflows.

  • DataIngestor — Multimodal dataset preparation: spike encoding, batching, augmentation
  • SCTrainingLoop — Standard and RL training orchestration with logging, checkpointing, and early stopping
from sc_neurocore.pipeline import DataIngestor, SCTrainingLoop

sc_neurocore.pipeline

sc_neurocore.pipeline -- Tier: research (experimental / research).

DataIngestor

Ingests and normalizes multimodal datasets for SC training.

Source code in src/sc_neurocore/pipeline/ingestion.py
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
class DataIngestor:
    """
    Ingests and normalizes multimodal datasets for SC training.
    """

    def prepare_dataset(self, raw_data: Dict[str, Any]) -> MultimodalDataset:
        """
        Normalizes and packages raw multimodal data.
        """
        processed_data = {}
        for k, v in raw_data.items():
            arr = np.array(v)
            # Normalize to [0, 1]
            arr_min = np.min(arr)
            arr_max = np.max(arr)
            if arr_max > arr_min:
                processed_data[k] = (arr - arr_min) / (arr_max - arr_min)
            else:
                processed_data[k] = np.zeros_like(arr)

        return MultimodalDataset(
            data=processed_data, labels=np.zeros(len(list(processed_data.values())[0]))
        )

prepare_dataset(raw_data)

Normalizes and packages raw multimodal data.

Source code in src/sc_neurocore/pipeline/ingestion.py
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
def prepare_dataset(self, raw_data: Dict[str, Any]) -> MultimodalDataset:
    """
    Normalizes and packages raw multimodal data.
    """
    processed_data = {}
    for k, v in raw_data.items():
        arr = np.array(v)
        # Normalize to [0, 1]
        arr_min = np.min(arr)
        arr_max = np.max(arr)
        if arr_max > arr_min:
            processed_data[k] = (arr - arr_min) / (arr_max - arr_min)
        else:
            processed_data[k] = np.zeros_like(arr)

    return MultimodalDataset(
        data=processed_data, labels=np.zeros(len(list(processed_data.values())[0]))
    )

MultimodalDataset dataclass

A container for multimodal training data.

Source code in src/sc_neurocore/pipeline/ingestion.py
17
18
19
20
21
22
23
24
25
26
27
@dataclass
class MultimodalDataset:
    """
    A container for multimodal training data.
    """

    data: Dict[str, np.ndarray[Any, Any]]  # {'vision': [...], 'audio': [...]}
    labels: np.ndarray[Any, Any]

    def get_sample(self, idx: int) -> Dict[str, np.ndarray[Any, Any]]:
        return {k: v[idx] for k, v in self.data.items()}

SCTrainingLoop

Standard and Reinforcement Learning loops for SC Networks.

Source code in src/sc_neurocore/pipeline/training.py
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
class SCTrainingLoop:
    """
    Standard and Reinforcement Learning loops for SC Networks.
    """

    @staticmethod
    def run_rl_epoch(  # type: ignore
        agent: SCLearningLayer,
        env_step_func: Callable[[np.ndarray[Any, Any]], float],
        input_data: np.ndarray[Any, Any],
        generations: int = 10,
    ):
        """
        Runs a reinforcement learning epoch.
        Uses RewardModulatedSTDPSynapse logic.
        """
        for gen in range(generations):
            # 1. Run forward pass
            spikes = agent.run_epoch(input_data)

            # 2. Get reward from environment
            reward = env_step_func(spikes)

            # 3. Apply reward to all synapses
            for i in range(agent.n_neurons):
                for j in range(agent.n_inputs):
                    syn = agent.synapses[i][j]
                    if isinstance(syn, RewardModulatedSTDPSynapse):
                        syn.apply_reward(reward)

            logger.info("RL Epoch %d: Reward = %.4f", gen, reward)

    @staticmethod
    def train_multimodal_fusion(fusion_layer, dataset, epochs: int = 5) -> None:
        """
        Stub for training weights in a fusion layer.
        """
        raise NotImplementedError("multimodal fusion training not implemented")

run_rl_epoch(agent, env_step_func, input_data, generations=10) staticmethod

Runs a reinforcement learning epoch. Uses RewardModulatedSTDPSynapse logic.

Source code in src/sc_neurocore/pipeline/training.py
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
@staticmethod
def run_rl_epoch(  # type: ignore
    agent: SCLearningLayer,
    env_step_func: Callable[[np.ndarray[Any, Any]], float],
    input_data: np.ndarray[Any, Any],
    generations: int = 10,
):
    """
    Runs a reinforcement learning epoch.
    Uses RewardModulatedSTDPSynapse logic.
    """
    for gen in range(generations):
        # 1. Run forward pass
        spikes = agent.run_epoch(input_data)

        # 2. Get reward from environment
        reward = env_step_func(spikes)

        # 3. Apply reward to all synapses
        for i in range(agent.n_neurons):
            for j in range(agent.n_inputs):
                syn = agent.synapses[i][j]
                if isinstance(syn, RewardModulatedSTDPSynapse):
                    syn.apply_reward(reward)

        logger.info("RL Epoch %d: Reward = %.4f", gen, reward)

train_multimodal_fusion(fusion_layer, dataset, epochs=5) staticmethod

Stub for training weights in a fusion layer.

Source code in src/sc_neurocore/pipeline/training.py
53
54
55
56
57
58
@staticmethod
def train_multimodal_fusion(fusion_layer, dataset, epochs: int = 5) -> None:
    """
    Stub for training weights in a fusion layer.
    """
    raise NotImplementedError("multimodal fusion training not implemented")