Skip to content

Interfaces

External I/O bridges: brain-computer interface protocols, CCW audio bridge, dynamic vision sensor input, and real-world actuator output.

BCI

sc_neurocore.interfaces.bci

Encode continuous neural signals (EEG, LFP, intracortical) into spike trains and stochastic bitstreams for SC processing.

Uses framework-native encoding (seeded RNG for reproducibility, Sobol quasi-random for low-discrepancy encoding). Supports windowed encoding for streaming BCI pipelines.

For spike compression/telemetry, see spike_codec (6 codecs).

BCIEncoder dataclass

Encode continuous neural signals into spike trains.

Replaces the old BCIDecoder (misleading name — it encodes, not decodes). Uses seeded RNG for deterministic, reproducible encoding.

Parameters

n_channels : int Number of recording channels. sampling_rate : int Input signal sampling rate (Hz). window_ms : float Encoding window duration in milliseconds. seed : int RNG seed for reproducibility.

Source code in src/sc_neurocore/interfaces/bci.py
Python
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
@dataclass
class BCIEncoder:
    """Encode continuous neural signals into spike trains.

    Replaces the old BCIDecoder (misleading name — it encodes, not decodes).
    Uses seeded RNG for deterministic, reproducible encoding.

    Parameters
    ----------
    n_channels : int
        Number of recording channels.
    sampling_rate : int
        Input signal sampling rate (Hz).
    window_ms : float
        Encoding window duration in milliseconds.
    seed : int
        RNG seed for reproducibility.
    """

    n_channels: int
    sampling_rate: int = 20000
    window_ms: float = 1.0
    seed: int = 42

    def encode(self, signal: np.ndarray, T: int = 20) -> np.ndarray:
        """Encode a signal block into spike trains via rate coding.

        Parameters
        ----------
        signal : ndarray of shape (n_channels,) or (n_channels, n_samples)
            Continuous neural signal. Multi-sample input is averaged
            per channel to get firing probabilities.
        T : int
            Number of output timesteps per window.

        Returns
        -------
        ndarray of shape (T, n_channels), int8 binary
        """
        if signal.ndim > 1:
            probs = signal.mean(axis=1)
        else:
            probs = signal.copy()

        probs = self._normalize(probs)
        return rate_encode(probs, T, seed=self.seed)

    def encode_stream(self, signal: np.ndarray) -> np.ndarray:
        """Encode a multi-window signal stream.

        Parameters
        ----------
        signal : ndarray of shape (n_channels, total_samples)
            Full recording. Split into windows of window_ms duration.

        Returns
        -------
        ndarray of shape (total_T, n_channels), int8 binary
        """
        samples_per_window = max(1, int(self.sampling_rate * self.window_ms / 1000))
        n_windows = signal.shape[1] // samples_per_window
        T_per_window = max(1, samples_per_window // 10)

        chunks = []
        for w in range(n_windows):
            start = w * samples_per_window
            end = start + samples_per_window
            window = signal[:, start:end]
            chunk = self.encode(window, T=T_per_window)
            chunks.append(chunk)

        if not chunks:
            return np.zeros((0, self.n_channels), dtype=np.int8)

        return np.vstack(chunks)

    @staticmethod
    def _normalize(values: np.ndarray) -> np.ndarray:
        """Normalize to [0, 1] for probability encoding."""
        vmin, vmax = values.min(), values.max()
        if vmax - vmin < 1e-10:
            return np.full_like(values, 0.5)
        return (values - vmin) / (vmax - vmin)

    # --- Backward-compatible API (old BCIDecoder methods) ---

    def normalize_signal(self, signal: np.ndarray) -> np.ndarray:
        """Normalize signal to [0, 1]. Legacy API — use _normalize()."""
        s_min, s_max = np.min(signal), np.max(signal)
        if s_max - s_min == 0:
            return np.zeros_like(signal)
        return (signal - s_min) / (s_max - s_min)

    def encode_to_bitstream(self, signal: np.ndarray, length: int = 256) -> np.ndarray:
        """Legacy API. Encodes (channels, time) → (channels, length).

        New code should use .encode() which returns (T, channels).
        """
        if signal.ndim > 1:
            mean_vals = np.mean(signal, axis=1)
        else:
            mean_vals = signal

        if len(mean_vals) != self.n_channels:
            raise ValueError(f"Signal has {len(mean_vals)} channels, expected {self.n_channels}")

        probs = self.normalize_signal(mean_vals)
        rng = np.random.RandomState(self.seed)
        bits = (rng.random((self.n_channels, length)) < probs[:, None]).astype(np.uint8)
        return bits

encode(signal, T=20)

Encode a signal block into spike trains via rate coding.

Parameters

signal : ndarray of shape (n_channels,) or (n_channels, n_samples) Continuous neural signal. Multi-sample input is averaged per channel to get firing probabilities. T : int Number of output timesteps per window.

Returns

ndarray of shape (T, n_channels), int8 binary

Source code in src/sc_neurocore/interfaces/bci.py
Python
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
def encode(self, signal: np.ndarray, T: int = 20) -> np.ndarray:
    """Encode a signal block into spike trains via rate coding.

    Parameters
    ----------
    signal : ndarray of shape (n_channels,) or (n_channels, n_samples)
        Continuous neural signal. Multi-sample input is averaged
        per channel to get firing probabilities.
    T : int
        Number of output timesteps per window.

    Returns
    -------
    ndarray of shape (T, n_channels), int8 binary
    """
    if signal.ndim > 1:
        probs = signal.mean(axis=1)
    else:
        probs = signal.copy()

    probs = self._normalize(probs)
    return rate_encode(probs, T, seed=self.seed)

encode_stream(signal)

Encode a multi-window signal stream.

Parameters

signal : ndarray of shape (n_channels, total_samples) Full recording. Split into windows of window_ms duration.

Returns

ndarray of shape (total_T, n_channels), int8 binary

Source code in src/sc_neurocore/interfaces/bci.py
Python
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
def encode_stream(self, signal: np.ndarray) -> np.ndarray:
    """Encode a multi-window signal stream.

    Parameters
    ----------
    signal : ndarray of shape (n_channels, total_samples)
        Full recording. Split into windows of window_ms duration.

    Returns
    -------
    ndarray of shape (total_T, n_channels), int8 binary
    """
    samples_per_window = max(1, int(self.sampling_rate * self.window_ms / 1000))
    n_windows = signal.shape[1] // samples_per_window
    T_per_window = max(1, samples_per_window // 10)

    chunks = []
    for w in range(n_windows):
        start = w * samples_per_window
        end = start + samples_per_window
        window = signal[:, start:end]
        chunk = self.encode(window, T=T_per_window)
        chunks.append(chunk)

    if not chunks:
        return np.zeros((0, self.n_channels), dtype=np.int8)

    return np.vstack(chunks)

normalize_signal(signal)

Normalize signal to [0, 1]. Legacy API — use _normalize().

Source code in src/sc_neurocore/interfaces/bci.py
Python
114
115
116
117
118
119
def normalize_signal(self, signal: np.ndarray) -> np.ndarray:
    """Normalize signal to [0, 1]. Legacy API — use _normalize()."""
    s_min, s_max = np.min(signal), np.max(signal)
    if s_max - s_min == 0:
        return np.zeros_like(signal)
    return (signal - s_min) / (s_max - s_min)

encode_to_bitstream(signal, length=256)

Legacy API. Encodes (channels, time) → (channels, length).

New code should use .encode() which returns (T, channels).

Source code in src/sc_neurocore/interfaces/bci.py
Python
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
def encode_to_bitstream(self, signal: np.ndarray, length: int = 256) -> np.ndarray:
    """Legacy API. Encodes (channels, time) → (channels, length).

    New code should use .encode() which returns (T, channels).
    """
    if signal.ndim > 1:
        mean_vals = np.mean(signal, axis=1)
    else:
        mean_vals = signal

    if len(mean_vals) != self.n_channels:
        raise ValueError(f"Signal has {len(mean_vals)} channels, expected {self.n_channels}")

    probs = self.normalize_signal(mean_vals)
    rng = np.random.RandomState(self.seed)
    bits = (rng.random((self.n_channels, length)) < probs[:, None]).astype(np.uint8)
    return bits

BCIDecoder

Bases: BCIEncoder

Legacy alias. Use BCIEncoder instead.

Source code in src/sc_neurocore/interfaces/bci.py
Python
140
141
142
143
144
class BCIDecoder(BCIEncoder):
    """Legacy alias. Use BCIEncoder instead."""

    def __init__(self, channels: int, sampling_rate: int = 1000, **kwargs):  # type: ignore[no-untyped-def]
        super().__init__(n_channels=channels, sampling_rate=sampling_rate, **kwargs)

Closed-Loop BCI HIL

sc_neurocore.interfaces.bci_closed_loop

Deterministic closed-loop BCI template for HIL prototyping.

The template wires raw electrode windows through WaveformCodec compression, AER event payload generation, spike-rate decoding, feedback emission, and runtime telemetry. It deliberately uses an in-process implant emulator by default so tests and examples can exercise the closed loop without claiming access to a physical implant.

ClosedLoopBCIConfig dataclass

Configuration for one raw-waveform to feedback HIL loop.

Source code in src/sc_neurocore/interfaces/bci_closed_loop.py
Python
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
@dataclass(frozen=True)
class ClosedLoopBCIConfig:
    """Configuration for one raw-waveform to feedback HIL loop."""

    n_channels: int
    sampling_rate_hz: int = 30_000
    threshold_sigma: float = 4.5
    snippet_samples: int = 48
    waveform_mode: str = "spike"
    quantize_bits: int = 6
    timestamp_bits: int = 16
    feedback_gain: float = 1.0
    max_feedback: float = 1.0
    input_layer_id: str = "implant_input"
    feedback_layer_id: str = "implant_feedback"

FeedbackFrame dataclass

Feedback vector emitted to an implant emulator or hardware adapter.

Source code in src/sc_neurocore/interfaces/bci_closed_loop.py
Python
50
51
52
53
54
55
56
@dataclass(frozen=True)
class FeedbackFrame:
    """Feedback vector emitted to an implant emulator or hardware adapter."""

    values: tuple[float, ...]
    timestamp_us: int
    active_count: int

ClosedLoopBCIResult dataclass

One processed BCI/HIL loop window.

Source code in src/sc_neurocore/interfaces/bci_closed_loop.py
Python
59
60
61
62
63
64
65
66
67
68
69
70
@dataclass(frozen=True)
class ClosedLoopBCIResult:
    """One processed BCI/HIL loop window."""

    compressed_waveform: bytes
    waveform: WaveformCompressionResult
    spike_raster: np.ndarray[Any, Any]
    aer_payload: bytes
    aer: AERCompressionResult
    decoded_rates: np.ndarray[Any, Any]
    feedback: FeedbackFrame
    telemetry: dict[str, Any]

SpikeDecoder

Bases: Protocol

Decoder interface for closed-loop spike windows.

Source code in src/sc_neurocore/interfaces/bci_closed_loop.py
Python
73
74
75
76
77
class SpikeDecoder(Protocol):
    """Decoder interface for closed-loop spike windows."""

    def decode(self, spike_raster: np.ndarray[Any, Any]) -> np.ndarray[Any, Any]:
        """Decode a binary spike raster into feedback control values."""

decode(spike_raster)

Decode a binary spike raster into feedback control values.

Source code in src/sc_neurocore/interfaces/bci_closed_loop.py
Python
76
77
def decode(self, spike_raster: np.ndarray[Any, Any]) -> np.ndarray[Any, Any]:
    """Decode a binary spike raster into feedback control values."""

FeedbackSink

Bases: Protocol

Feedback interface for an implant emulator or hardware adapter.

Source code in src/sc_neurocore/interfaces/bci_closed_loop.py
Python
80
81
82
83
84
class FeedbackSink(Protocol):
    """Feedback interface for an implant emulator or hardware adapter."""

    def apply_feedback(self, values: np.ndarray[Any, Any], timestamp_us: int) -> FeedbackFrame:
        """Apply decoded feedback values and return the emitted frame."""

apply_feedback(values, timestamp_us)

Apply decoded feedback values and return the emitted frame.

Source code in src/sc_neurocore/interfaces/bci_closed_loop.py
Python
83
84
def apply_feedback(self, values: np.ndarray[Any, Any], timestamp_us: int) -> FeedbackFrame:
    """Apply decoded feedback values and return the emitted frame."""

RateSpikeDecoder dataclass

Decode spike rasters as per-channel firing rates.

Source code in src/sc_neurocore/interfaces/bci_closed_loop.py
Python
87
88
89
90
91
92
93
94
95
96
97
98
@dataclass
class RateSpikeDecoder:
    """Decode spike rasters as per-channel firing rates."""

    sampling_rate_hz: int

    def decode(self, spike_raster: np.ndarray[Any, Any]) -> np.ndarray[Any, Any]:
        raster = np.asarray(spike_raster, dtype=np.float32)
        if raster.ndim != 2:
            raise ValueError("spike_raster must have shape (samples, channels)")
        duration_s = max(raster.shape[0] / self.sampling_rate_hz, 1e-9)
        return raster.sum(axis=0) / duration_s

ImplantEmulator dataclass

Deterministic feedback sink used by the closed-loop template.

Source code in src/sc_neurocore/interfaces/bci_closed_loop.py
Python
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
@dataclass
class ImplantEmulator:
    """Deterministic feedback sink used by the closed-loop template."""

    gain: float = 1.0
    max_feedback: float = 1.0
    active_threshold: float = 1e-9
    frames: list[FeedbackFrame] = field(default_factory=list)

    def apply_feedback(self, values: np.ndarray[Any, Any], timestamp_us: int) -> FeedbackFrame:
        scaled = np.asarray(values, dtype=np.float32) * self.gain
        clipped = np.clip(scaled, -self.max_feedback, self.max_feedback)
        active = int(np.count_nonzero(np.abs(clipped) > self.active_threshold))
        frame = FeedbackFrame(
            values=tuple(float(v) for v in clipped.tolist()),
            timestamp_us=int(timestamp_us),
            active_count=active,
        )
        self.frames.append(frame)
        return frame

ClosedLoopBCITemplate dataclass

WaveformCodec + AER + telemetry closed-loop BCI scaffold.

Source code in src/sc_neurocore/interfaces/bci_closed_loop.py
Python
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
@dataclass
class ClosedLoopBCITemplate:
    """WaveformCodec + AER + telemetry closed-loop BCI scaffold."""

    config: ClosedLoopBCIConfig
    decoder: SpikeDecoder | None = None
    feedback_sink: FeedbackSink | None = None
    telemetry: DeviceTelemetry = field(default_factory=DeviceTelemetry)

    def __post_init__(self) -> None:
        self.waveform_codec = WaveformCodec(
            threshold_sigma=self.config.threshold_sigma,
            snippet_samples=self.config.snippet_samples,
            quantize_bits=self.config.quantize_bits,
            mode=self.config.waveform_mode,
        )
        self.aer_codec = AERSpikeCodec(timestamp_bits=self.config.timestamp_bits)
        if self.decoder is None:
            self.decoder = RateSpikeDecoder(self.config.sampling_rate_hz)
        if self.feedback_sink is None:
            self.feedback_sink = ImplantEmulator(
                gain=self.config.feedback_gain,
                max_feedback=self.config.max_feedback,
            )

    def process_window(
        self, waveform: np.ndarray[Any, Any], *, window_start_us: int = 0
    ) -> ClosedLoopBCIResult:
        """Process one raw electrode window through the closed-loop template."""
        window = self._validate_waveform(waveform)
        compressed, waveform_result = self.waveform_codec.compress(window)
        spike_raster = self._detect_spike_raster(window)
        aer_payload, aer_result = self.aer_codec.compress(spike_raster)

        if self.decoder is None or self.feedback_sink is None:
            raise RuntimeError("closed-loop BCI template was not initialised")
        decoded = self.decoder.decode(spike_raster)
        feedback = self.feedback_sink.apply_feedback(decoded, window_start_us)

        self.telemetry.record(
            self.config.input_layer_id,
            int(spike_raster.sum()),
            self.config.n_channels,
        )
        self.telemetry.record(
            self.config.feedback_layer_id,
            feedback.active_count,
            self.config.n_channels,
        )

        return ClosedLoopBCIResult(
            compressed_waveform=compressed,
            waveform=waveform_result,
            spike_raster=spike_raster,
            aer_payload=aer_payload,
            aer=aer_result,
            decoded_rates=decoded,
            feedback=feedback,
            telemetry=self.telemetry.summary(),
        )

    def _validate_waveform(self, waveform: np.ndarray[Any, Any]) -> np.ndarray[Any, Any]:
        window = np.asarray(waveform, dtype=np.float32)
        if window.ndim != 2:
            raise ValueError("waveform must have shape (samples, channels)")
        if window.shape[1] != self.config.n_channels:
            raise ValueError(
                f"waveform has {window.shape[1]} channels, expected {self.config.n_channels}"
            )
        if window.shape[0] == 0:
            raise ValueError("waveform must contain at least one sample")
        return window

    def _detect_spike_raster(self, waveform: np.ndarray[Any, Any]) -> np.ndarray[Any, Any]:
        noise_sigma = np.median(np.abs(waveform), axis=0) / 0.6745
        noise_sigma = np.maximum(noise_sigma, 1e-6)
        thresholds = -self.config.threshold_sigma * noise_sigma
        samples, channels = waveform.shape
        raster = np.zeros((samples, channels), dtype=np.int8)
        refractory = max(1, self.config.snippet_samples // 2)

        for channel in range(channels):
            last_spike = -refractory - 1
            for sample in range(1, samples):
                if (
                    waveform[sample, channel] < thresholds[channel]
                    and waveform[sample, channel] < waveform[sample - 1, channel]
                    and (sample - last_spike) > refractory
                ):
                    raster[sample, channel] = 1
                    last_spike = sample

        return raster

process_window(waveform, *, window_start_us=0)

Process one raw electrode window through the closed-loop template.

Source code in src/sc_neurocore/interfaces/bci_closed_loop.py
Python
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
def process_window(
    self, waveform: np.ndarray[Any, Any], *, window_start_us: int = 0
) -> ClosedLoopBCIResult:
    """Process one raw electrode window through the closed-loop template."""
    window = self._validate_waveform(waveform)
    compressed, waveform_result = self.waveform_codec.compress(window)
    spike_raster = self._detect_spike_raster(window)
    aer_payload, aer_result = self.aer_codec.compress(spike_raster)

    if self.decoder is None or self.feedback_sink is None:
        raise RuntimeError("closed-loop BCI template was not initialised")
    decoded = self.decoder.decode(spike_raster)
    feedback = self.feedback_sink.apply_feedback(decoded, window_start_us)

    self.telemetry.record(
        self.config.input_layer_id,
        int(spike_raster.sum()),
        self.config.n_channels,
    )
    self.telemetry.record(
        self.config.feedback_layer_id,
        feedback.active_count,
        self.config.n_channels,
    )

    return ClosedLoopBCIResult(
        compressed_waveform=compressed,
        waveform=waveform_result,
        spike_raster=spike_raster,
        aer_payload=aer_payload,
        aer=aer_result,
        decoded_rates=decoded,
        feedback=feedback,
        telemetry=self.telemetry.summary(),
    )

sc_neurocore.interfaces.bci_hil_manifest

Reference manifests for closed-loop BCI hardware-in-the-loop templates.

BCIHILBoardProfile dataclass

Board/input profile for a closed-loop BCI reference pipeline.

Source code in src/sc_neurocore/interfaces/bci_hil_manifest.py
Python
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
@dataclass(frozen=True)
class BCIHILBoardProfile:
    """Board/input profile for a closed-loop BCI reference pipeline."""

    profile_id: str
    display_name: str
    board: str
    input_source: str
    model_reference: str
    n_channels: int
    sampling_rate_hz: int
    transport: str
    feedback_transport: str
    required_artefacts: tuple[str, ...]
    pipeline_steps: tuple[str, ...]
    safety_contract: dict[str, Any]

    def to_dict(self) -> dict[str, Any]:
        """Return a deterministic manifest dictionary."""

        return {
            "board": self.board,
            "display_name": self.display_name,
            "feedback_transport": self.feedback_transport,
            "input_source": self.input_source,
            "model_reference": self.model_reference,
            "n_channels": self.n_channels,
            "pipeline_steps": list(self.pipeline_steps),
            "profile_id": self.profile_id,
            "required_artefacts": list(self.required_artefacts),
            "safety_contract": dict(self.safety_contract),
            "sampling_rate_hz": self.sampling_rate_hz,
            "transport": self.transport,
        }

to_dict()

Return a deterministic manifest dictionary.

Source code in src/sc_neurocore/interfaces/bci_hil_manifest.py
Python
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
def to_dict(self) -> dict[str, Any]:
    """Return a deterministic manifest dictionary."""

    return {
        "board": self.board,
        "display_name": self.display_name,
        "feedback_transport": self.feedback_transport,
        "input_source": self.input_source,
        "model_reference": self.model_reference,
        "n_channels": self.n_channels,
        "pipeline_steps": list(self.pipeline_steps),
        "profile_id": self.profile_id,
        "required_artefacts": list(self.required_artefacts),
        "safety_contract": dict(self.safety_contract),
        "sampling_rate_hz": self.sampling_rate_hz,
        "transport": self.transport,
    }

available_bci_hil_profiles()

Return all reference profiles in deterministic order.

Source code in src/sc_neurocore/interfaces/bci_hil_manifest.py
Python
118
119
120
121
def available_bci_hil_profiles() -> tuple[BCIHILBoardProfile, ...]:
    """Return all reference profiles in deterministic order."""

    return tuple(_REFERENCE_PROFILES[key] for key in sorted(_REFERENCE_PROFILES))

get_bci_hil_profile(profile_id)

Return one reference profile by identifier.

Source code in src/sc_neurocore/interfaces/bci_hil_manifest.py
Python
124
125
126
127
128
129
130
131
def get_bci_hil_profile(profile_id: str) -> BCIHILBoardProfile:
    """Return one reference profile by identifier."""

    key = profile_id.lower().replace("-", "_")
    if key not in _REFERENCE_PROFILES:
        known = ", ".join(sorted(_REFERENCE_PROFILES))
        raise KeyError(f"unknown BCI HIL profile '{profile_id}'. Known profiles: {known}")
    return _REFERENCE_PROFILES[key]

build_bci_hil_reference_manifest(profile_id='pynq_shd')

Build a deterministic closed-loop BCI/HIL reference manifest.

Source code in src/sc_neurocore/interfaces/bci_hil_manifest.py
Python
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
def build_bci_hil_reference_manifest(profile_id: str = "pynq_shd") -> dict[str, Any]:
    """Build a deterministic closed-loop BCI/HIL reference manifest."""

    profile = get_bci_hil_profile(profile_id)
    config = _profile_to_config(profile)
    return {
        "schema_version": "1.0",
        "profile": profile.to_dict(),
        "template_config": {
            "feedback_gain": config.feedback_gain,
            "feedback_layer_id": config.feedback_layer_id,
            "input_layer_id": config.input_layer_id,
            "max_feedback": config.max_feedback,
            "n_channels": config.n_channels,
            "quantize_bits": config.quantize_bits,
            "sampling_rate_hz": config.sampling_rate_hz,
            "snippet_samples": config.snippet_samples,
            "threshold_sigma": config.threshold_sigma,
            "timestamp_bits": config.timestamp_bits,
            "waveform_mode": config.waveform_mode,
        },
    }

create_bci_hil_template(profile_id='pynq_shd')

Create a ClosedLoopBCITemplate from a reference profile.

Source code in src/sc_neurocore/interfaces/bci_hil_manifest.py
Python
158
159
160
161
def create_bci_hil_template(profile_id: str = "pynq_shd") -> ClosedLoopBCITemplate:
    """Create a `ClosedLoopBCITemplate` from a reference profile."""

    return ClosedLoopBCITemplate(_profile_to_config(get_bci_hil_profile(profile_id)))

build_bci_hil_reference_manifest() exposes deterministic reference manifests for pynq_shd and probe_384ch. Both use the ClosedLoopBCITemplate path:

Text Only
raw waveform window
  -> WaveformCodec compression
  -> threshold spike raster
  -> AER payload generation
  -> rate decoder
  -> feedback frame
  -> DeviceTelemetry summary

The pynq_shd profile uses the repository's documented SHD topology (700 -> 256 -> 20) as the reference model shape and defaults to the in-process implant emulator. Physical PYNQ use still requires the external bitstream and an explicit hardware feedback sink.

CCW Bridge

sc_neurocore.interfaces.ccw_bridge

SC-NeuroCore ↔ CCW/VIBRANA bridge.

Converts stochastic bitstream outputs to audio parameters and visualization states for the CCW application.

CCWMode

Bases: str, Enum

CCW modulation modes aligned with VIBRANA.

Source code in src/sc_neurocore/interfaces/ccw_bridge.py
Python
30
31
32
33
34
35
36
37
38
class CCWMode(str, Enum):
    """CCW modulation modes aligned with VIBRANA."""

    THEURGIC = "theurgic"
    HEALING = "healing"
    MEDITATION = "meditation"
    COSMIC = "cosmic"
    FOCUS = "focus"
    CREATIVITY = "creativity"

CCWParameters dataclass

Parameters for CCW audio generation.

Source code in src/sc_neurocore/interfaces/ccw_bridge.py
Python
41
42
43
44
45
46
47
48
49
@dataclass
class CCWParameters:
    """Parameters for CCW audio generation."""

    base_frequency: float = 7.83  # Schumann resonance
    carrier_frequency: float = 432.0  # Verdi tuning (A4=432 Hz)
    binaural_offset: float = 10.0  # Hz
    modulation_depth: float = 0.5
    sample_rate: int = 44100

VIBRANAState dataclass

State for VIBRANA visualization sync.

Source code in src/sc_neurocore/interfaces/ccw_bridge.py
Python
52
53
54
55
56
57
58
59
60
@dataclass
class VIBRANAState:
    """State for VIBRANA visualization sync."""

    mode: CCWMode = CCWMode.MEDITATION
    geometry_phase: float = 0.0
    color_intensity: float = 0.5
    rotation_speed: float = 1.0
    glyph_weights: np.ndarray[Any, Any] = field(default_factory=lambda: np.zeros(6))

CCWBridge

Bridge between SC-NeuroCore and CCW/VIBRANA systems.

Converts bitstream outputs from SCPN layers into audio parameters and visualization states for the CCW application.

Source code in src/sc_neurocore/interfaces/ccw_bridge.py
Python
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
class CCWBridge:
    """
    Bridge between SC-NeuroCore and CCW/VIBRANA systems.

    Converts bitstream outputs from SCPN layers into audio parameters
    and visualization states for the CCW application.
    """

    # SCPN metric to CCW parameter mappings
    METRIC_MAPPINGS = {
        "l1_quantum_coherence": ("modulation_depth", 0.3, 0.8),
        "l2_neurochemical_activity": ("carrier_blend", 0.0, 1.0),
        "l4_cellular_sync": ("binaural_offset", 4.0, 40.0),
        "l5_organismal_coherence": ("amplitude", 0.3, 1.0),
        "l6_planetary_coherence": ("schumann_blend", 0.0, 1.0),
        "l7_symbolic_health": ("sacred_geometry_intensity", 0.0, 1.0),
    }

    # Mode to frequency mapping (aligned with VIBRANA)
    MODE_FREQUENCIES = {
        CCWMode.THEURGIC: (7.83, 14.3),  # Schumann
        CCWMode.HEALING: (528.0, 432.0),  # Solfeggio
        CCWMode.MEDITATION: (4.0, 7.83),  # Theta-Schumann
        CCWMode.COSMIC: (136.1, 272.2),  # OM
        CCWMode.FOCUS: (14.0, 18.0),  # Beta
        CCWMode.CREATIVITY: (10.0, 12.0),  # Alpha
    }

    def __init__(self, params: Optional[CCWParameters] = None):
        self.params = params or CCWParameters()
        self.vibrana_state = VIBRANAState()

        # Audio generation state
        self.phase_left = 0.0
        self.phase_right = 0.0
        self.modulation_phase = 0.0

        # History for smoothing
        self.metric_history: Dict[str, List[float]] = {}
        self.smoothing_window = 10

    def bitstream_to_frequency(
        self, bitstream: np.ndarray[Any, Any], freq_min: float = 1.0, freq_max: float = 40.0
    ) -> float:
        """
        Convert a bitstream to a frequency value.

        Args:
            bitstream: Binary array from SC layer output
            freq_min: Minimum frequency (Hz)
            freq_max: Maximum frequency (Hz)

        Returns:
            Frequency in Hz mapped from bitstream probability
        """
        prob = np.mean(bitstream)
        return freq_min + prob * (freq_max - freq_min)

    def scpn_metrics_to_ccw(self, metrics: Dict[str, float]) -> Dict[str, float]:
        """
        Convert SCPN global metrics to CCW audio parameters.

        Args:
            metrics: Dict from get_global_metrics() of SCPN layers

        Returns:
            Dict of CCW-compatible audio parameters
        """
        ccw_params = {
            "base_frequency": self.params.base_frequency,
            "carrier_frequency": self.params.carrier_frequency,
            "binaural_offset": self.params.binaural_offset,
            "modulation_depth": self.params.modulation_depth,
            "amplitude": 0.5,
            "carrier_blend": 0.5,
            "schumann_blend": 0.5,
            "sacred_geometry_intensity": 0.5,
        }

        for metric_name, (param_name, min_val, max_val) in self.METRIC_MAPPINGS.items():
            if metric_name in metrics:
                value = metrics[metric_name]
                # Smooth the value
                if metric_name not in self.metric_history:
                    self.metric_history[metric_name] = []
                self.metric_history[metric_name].append(value)
                if len(self.metric_history[metric_name]) > self.smoothing_window:
                    self.metric_history[metric_name].pop(0)
                smoothed = np.mean(self.metric_history[metric_name])

                # Map to parameter range
                ccw_params[param_name] = min_val + smoothed * (max_val - min_val)  # type: ignore[assignment]

        return ccw_params

    def glyph_vector_to_vibrana(self, glyph_vector: np.ndarray[Any, Any]) -> Dict[str, Any]:
        """
        Convert L7 glyph vector to VIBRANA visualization parameters.

        Args:
            glyph_vector: 6D vector [phi, fib, metatron, platonic, e8, health]

        Returns:
            Dict of VIBRANA visualization parameters
        """
        if len(glyph_vector) < 6:
            glyph_vector = np.pad(glyph_vector, (0, 6 - len(glyph_vector)))

        self.vibrana_state.glyph_weights = glyph_vector

        # Map glyph components to visualization
        phi_alignment = glyph_vector[0]
        fibonacci_alignment = glyph_vector[1]
        metatron_flow = glyph_vector[2]
        platonic_coherence = glyph_vector[3]
        e8_alignment = glyph_vector[4]
        symbolic_health = glyph_vector[5]

        # Determine best mode based on glyph pattern
        if metatron_flow > 0.7:
            self.vibrana_state.mode = CCWMode.THEURGIC
        elif phi_alignment > 0.8 and fibonacci_alignment > 0.8:
            self.vibrana_state.mode = CCWMode.COSMIC
        elif symbolic_health > 0.6:
            self.vibrana_state.mode = CCWMode.HEALING
        elif e8_alignment > 0.7:
            self.vibrana_state.mode = CCWMode.MEDITATION
        else:
            self.vibrana_state.mode = CCWMode.FOCUS

        # Set visualization parameters
        self.vibrana_state.color_intensity = symbolic_health
        self.vibrana_state.rotation_speed = 0.5 + metatron_flow * 2.0
        self.vibrana_state.geometry_phase += platonic_coherence * 0.1

        return {
            "mode": self.vibrana_state.mode.value,
            "geometry_phase": float(self.vibrana_state.geometry_phase % (2 * np.pi)),
            "color_intensity": float(self.vibrana_state.color_intensity),
            "rotation_speed": float(self.vibrana_state.rotation_speed),
            "glyph_weights": {
                "phi_alignment": float(phi_alignment),
                "fibonacci_alignment": float(fibonacci_alignment),
                "metatron_flow": float(metatron_flow),
                "platonic_coherence": float(platonic_coherence),
                "e8_alignment": float(e8_alignment),
                "symbolic_health": float(symbolic_health),
            },
            "frequencies": {
                "base": self.MODE_FREQUENCIES[self.vibrana_state.mode][0],
                "harmonic": self.MODE_FREQUENCIES[self.vibrana_state.mode][1],
            },
        }

    def generate_binaural_sample(
        self, ccw_params: Dict[str, float], duration_samples: int = 1024
    ) -> Tuple[np.ndarray[Any, Any], np.ndarray[Any, Any]]:
        """
        Generate binaural audio samples from CCW parameters.

        Args:
            ccw_params: Parameters from scpn_metrics_to_ccw()
            duration_samples: Number of samples to generate

        Returns:
            Tuple of (left_channel, right_channel) numpy arrays
        """
        sample_rate = self.params.sample_rate
        dt = 1.0 / sample_rate

        # Extract parameters
        carrier = ccw_params.get("carrier_frequency", 432.0)
        binaural = ccw_params.get("binaural_offset", 10.0)
        mod_depth = ccw_params.get("modulation_depth", 0.5)
        amplitude = ccw_params.get("amplitude", 0.5)
        base_freq = ccw_params.get("base_frequency", 7.83)

        # Time array
        t = np.arange(duration_samples) * dt

        # Generate binaural beat (carrier + offset for right channel)
        left_freq = carrier
        right_freq = carrier + binaural

        # Phase-continuous generation
        phase_increment_left = 2 * np.pi * left_freq * dt
        phase_increment_right = 2 * np.pi * right_freq * dt

        phases_left = self.phase_left + np.cumsum(np.ones(duration_samples) * phase_increment_left)
        phases_right = self.phase_right + np.cumsum(
            np.ones(duration_samples) * phase_increment_right
        )

        # Update phase state for continuity
        self.phase_left = phases_left[-1] % (2 * np.pi)
        self.phase_right = phases_right[-1] % (2 * np.pi)

        # Generate carriers
        left = np.sin(phases_left)
        right = np.sin(phases_right)

        # Add modulation envelope (low frequency oscillation)
        mod_phases = self.modulation_phase + np.cumsum(
            np.ones(duration_samples) * 2 * np.pi * base_freq * dt
        )
        self.modulation_phase = mod_phases[-1] % (2 * np.pi)

        modulation = 1.0 - mod_depth * (1 + np.sin(mod_phases)) / 2

        # Apply modulation and amplitude
        left = amplitude * left * modulation
        right = amplitude * right * modulation

        return left, right

    def generate_ccw_metadata(
        self, scpn_outputs: Dict[str, Any], glyph_vector: Optional[np.ndarray[Any, Any]] = None
    ) -> Dict[str, Any]:
        """
        Generate complete CCW metadata package for audio/visual sync.

        Args:
            scpn_outputs: Full output dict from run_integrated_step()
            glyph_vector: Optional L7 glyph vector

        Returns:
            Complete metadata dict for CCW system
        """
        # Extract metrics
        metrics = {}
        for layer_name, output in scpn_outputs.items():
            if isinstance(output, dict):
                if "coherence" in str(output.keys()).lower():
                    for k, v in output.items():
                        if isinstance(v, (int, float)):
                            metrics[f"{layer_name}_{k}"] = float(v)

        # Get glyph vector from L7 if not provided
        if glyph_vector is None and "l7" in scpn_outputs:
            l7_out = scpn_outputs["l7"]
            if isinstance(l7_out, dict) and "glyph_vector" in l7_out:
                glyph_vector = l7_out["glyph_vector"]

        # Convert to CCW parameters
        ccw_params = self.scpn_metrics_to_ccw(metrics)

        # Convert glyph to VIBRANA
        vibrana_params = {}
        if glyph_vector is not None:
            vibrana_params = self.glyph_vector_to_vibrana(glyph_vector)

        # Build complete metadata
        metadata = {
            "timestamp": float(np.datetime64("now").astype(np.float64)),
            "ccw_audio": ccw_params,
            "vibrana_visual": vibrana_params,
            "scpn_metrics": metrics,
            "mode": self.vibrana_state.mode.value,
            "bridge_version": "1.0.0",
        }

        return metadata

    def export_glyph_stream(
        self,
        glyph_vector: np.ndarray[Any, Any],
        cosmic_vector: Optional[Dict[str, float]] = None,
        filepath: Optional[str] = None,
    ) -> str:
        """
        Export glyph stream data for VIBRANA/CCW hardware playback.

        Args:
            glyph_vector: Normalized glyph vector from L7
            cosmic_vector: Optional L8 cosmic phase data
            filepath: Optional file path to save

        Returns:
            JSON string of glyph stream data
        """
        stream_data = {
            "glyph_vector": {
                "phi_alignment": float(glyph_vector[0]) if len(glyph_vector) > 0 else 0.0,
                "fibonacci_alignment": float(glyph_vector[1]) if len(glyph_vector) > 1 else 0.0,
                "metatron_flow": float(glyph_vector[2]) if len(glyph_vector) > 2 else 0.0,
                "platonic_coherence": float(glyph_vector[3]) if len(glyph_vector) > 3 else 0.0,
                "e8_alignment": float(glyph_vector[4]) if len(glyph_vector) > 4 else 0.0,
                "symbolic_health": float(glyph_vector[5]) if len(glyph_vector) > 5 else 0.0,
            },
            "cosmic_vector": cosmic_vector or {},
            "layer_weights": {
                "metatron_weight": 0.95,  # Default high weight for Metatron
                "phi_weight": 0.85,
                "e8_weight": 0.75,
            },
            "routing": {
                "target": "vibrana_hardware",
                "protocol": "bitstream",
                "encoding": "normalized_float",
            },
        }

        json_str = json.dumps(stream_data, indent=2)

        if filepath:
            with open(filepath, "w") as f:
                f.write(json_str)
            logger.info(f"Glyph stream exported to {filepath}")

        return json_str

    def create_session_config(
        self, mode: CCWMode = CCWMode.MEDITATION, duration_minutes: int = 20
    ) -> Dict[str, Any]:
        """
        Create a complete CCW session configuration.

        Args:
            mode: CCW/VIBRANA mode
            duration_minutes: Session duration

        Returns:
            Session configuration dict
        """
        base_freq, harmonic_freq = self.MODE_FREQUENCIES[mode]

        return {
            "session": {
                "mode": mode.value,
                "duration_minutes": duration_minutes,
                "created_at": str(np.datetime64("now")),
            },
            "audio": {
                "base_frequency": base_freq,
                "harmonic_frequency": harmonic_freq,
                "carrier_frequency": self.params.carrier_frequency,
                "binaural_offset": self.params.binaural_offset,
                "sample_rate": self.params.sample_rate,
            },
            "visual": {
                "geometry_pattern": "thirteen_fold",
                "rotation_enabled": True,
                "color_scheme": mode.value,
            },
            "scpn_integration": {
                "enabled": True,
                "update_rate_hz": 10,
                "layers": ["l1", "l4", "l5", "l6", "l7"],
            },
        }

bitstream_to_frequency(bitstream, freq_min=1.0, freq_max=40.0)

Convert a bitstream to a frequency value.

Parameters:

Name Type Description Default
bitstream ndarray[Any, Any]

Binary array from SC layer output

required
freq_min float

Minimum frequency (Hz)

1.0
freq_max float

Maximum frequency (Hz)

40.0

Returns:

Type Description
float

Frequency in Hz mapped from bitstream probability

Source code in src/sc_neurocore/interfaces/ccw_bridge.py
Python
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
def bitstream_to_frequency(
    self, bitstream: np.ndarray[Any, Any], freq_min: float = 1.0, freq_max: float = 40.0
) -> float:
    """
    Convert a bitstream to a frequency value.

    Args:
        bitstream: Binary array from SC layer output
        freq_min: Minimum frequency (Hz)
        freq_max: Maximum frequency (Hz)

    Returns:
        Frequency in Hz mapped from bitstream probability
    """
    prob = np.mean(bitstream)
    return freq_min + prob * (freq_max - freq_min)

scpn_metrics_to_ccw(metrics)

Convert SCPN global metrics to CCW audio parameters.

Parameters:

Name Type Description Default
metrics Dict[str, float]

Dict from get_global_metrics() of SCPN layers

required

Returns:

Type Description
Dict[str, float]

Dict of CCW-compatible audio parameters

Source code in src/sc_neurocore/interfaces/ccw_bridge.py
Python
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
def scpn_metrics_to_ccw(self, metrics: Dict[str, float]) -> Dict[str, float]:
    """
    Convert SCPN global metrics to CCW audio parameters.

    Args:
        metrics: Dict from get_global_metrics() of SCPN layers

    Returns:
        Dict of CCW-compatible audio parameters
    """
    ccw_params = {
        "base_frequency": self.params.base_frequency,
        "carrier_frequency": self.params.carrier_frequency,
        "binaural_offset": self.params.binaural_offset,
        "modulation_depth": self.params.modulation_depth,
        "amplitude": 0.5,
        "carrier_blend": 0.5,
        "schumann_blend": 0.5,
        "sacred_geometry_intensity": 0.5,
    }

    for metric_name, (param_name, min_val, max_val) in self.METRIC_MAPPINGS.items():
        if metric_name in metrics:
            value = metrics[metric_name]
            # Smooth the value
            if metric_name not in self.metric_history:
                self.metric_history[metric_name] = []
            self.metric_history[metric_name].append(value)
            if len(self.metric_history[metric_name]) > self.smoothing_window:
                self.metric_history[metric_name].pop(0)
            smoothed = np.mean(self.metric_history[metric_name])

            # Map to parameter range
            ccw_params[param_name] = min_val + smoothed * (max_val - min_val)  # type: ignore[assignment]

    return ccw_params

glyph_vector_to_vibrana(glyph_vector)

Convert L7 glyph vector to VIBRANA visualization parameters.

Parameters:

Name Type Description Default
glyph_vector ndarray[Any, Any]

6D vector [phi, fib, metatron, platonic, e8, health]

required

Returns:

Type Description
Dict[str, Any]

Dict of VIBRANA visualization parameters

Source code in src/sc_neurocore/interfaces/ccw_bridge.py
Python
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
def glyph_vector_to_vibrana(self, glyph_vector: np.ndarray[Any, Any]) -> Dict[str, Any]:
    """
    Convert L7 glyph vector to VIBRANA visualization parameters.

    Args:
        glyph_vector: 6D vector [phi, fib, metatron, platonic, e8, health]

    Returns:
        Dict of VIBRANA visualization parameters
    """
    if len(glyph_vector) < 6:
        glyph_vector = np.pad(glyph_vector, (0, 6 - len(glyph_vector)))

    self.vibrana_state.glyph_weights = glyph_vector

    # Map glyph components to visualization
    phi_alignment = glyph_vector[0]
    fibonacci_alignment = glyph_vector[1]
    metatron_flow = glyph_vector[2]
    platonic_coherence = glyph_vector[3]
    e8_alignment = glyph_vector[4]
    symbolic_health = glyph_vector[5]

    # Determine best mode based on glyph pattern
    if metatron_flow > 0.7:
        self.vibrana_state.mode = CCWMode.THEURGIC
    elif phi_alignment > 0.8 and fibonacci_alignment > 0.8:
        self.vibrana_state.mode = CCWMode.COSMIC
    elif symbolic_health > 0.6:
        self.vibrana_state.mode = CCWMode.HEALING
    elif e8_alignment > 0.7:
        self.vibrana_state.mode = CCWMode.MEDITATION
    else:
        self.vibrana_state.mode = CCWMode.FOCUS

    # Set visualization parameters
    self.vibrana_state.color_intensity = symbolic_health
    self.vibrana_state.rotation_speed = 0.5 + metatron_flow * 2.0
    self.vibrana_state.geometry_phase += platonic_coherence * 0.1

    return {
        "mode": self.vibrana_state.mode.value,
        "geometry_phase": float(self.vibrana_state.geometry_phase % (2 * np.pi)),
        "color_intensity": float(self.vibrana_state.color_intensity),
        "rotation_speed": float(self.vibrana_state.rotation_speed),
        "glyph_weights": {
            "phi_alignment": float(phi_alignment),
            "fibonacci_alignment": float(fibonacci_alignment),
            "metatron_flow": float(metatron_flow),
            "platonic_coherence": float(platonic_coherence),
            "e8_alignment": float(e8_alignment),
            "symbolic_health": float(symbolic_health),
        },
        "frequencies": {
            "base": self.MODE_FREQUENCIES[self.vibrana_state.mode][0],
            "harmonic": self.MODE_FREQUENCIES[self.vibrana_state.mode][1],
        },
    }

generate_binaural_sample(ccw_params, duration_samples=1024)

Generate binaural audio samples from CCW parameters.

Parameters:

Name Type Description Default
ccw_params Dict[str, float]

Parameters from scpn_metrics_to_ccw()

required
duration_samples int

Number of samples to generate

1024

Returns:

Type Description
Tuple[ndarray[Any, Any], ndarray[Any, Any]]

Tuple of (left_channel, right_channel) numpy arrays

Source code in src/sc_neurocore/interfaces/ccw_bridge.py
Python
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
def generate_binaural_sample(
    self, ccw_params: Dict[str, float], duration_samples: int = 1024
) -> Tuple[np.ndarray[Any, Any], np.ndarray[Any, Any]]:
    """
    Generate binaural audio samples from CCW parameters.

    Args:
        ccw_params: Parameters from scpn_metrics_to_ccw()
        duration_samples: Number of samples to generate

    Returns:
        Tuple of (left_channel, right_channel) numpy arrays
    """
    sample_rate = self.params.sample_rate
    dt = 1.0 / sample_rate

    # Extract parameters
    carrier = ccw_params.get("carrier_frequency", 432.0)
    binaural = ccw_params.get("binaural_offset", 10.0)
    mod_depth = ccw_params.get("modulation_depth", 0.5)
    amplitude = ccw_params.get("amplitude", 0.5)
    base_freq = ccw_params.get("base_frequency", 7.83)

    # Time array
    t = np.arange(duration_samples) * dt

    # Generate binaural beat (carrier + offset for right channel)
    left_freq = carrier
    right_freq = carrier + binaural

    # Phase-continuous generation
    phase_increment_left = 2 * np.pi * left_freq * dt
    phase_increment_right = 2 * np.pi * right_freq * dt

    phases_left = self.phase_left + np.cumsum(np.ones(duration_samples) * phase_increment_left)
    phases_right = self.phase_right + np.cumsum(
        np.ones(duration_samples) * phase_increment_right
    )

    # Update phase state for continuity
    self.phase_left = phases_left[-1] % (2 * np.pi)
    self.phase_right = phases_right[-1] % (2 * np.pi)

    # Generate carriers
    left = np.sin(phases_left)
    right = np.sin(phases_right)

    # Add modulation envelope (low frequency oscillation)
    mod_phases = self.modulation_phase + np.cumsum(
        np.ones(duration_samples) * 2 * np.pi * base_freq * dt
    )
    self.modulation_phase = mod_phases[-1] % (2 * np.pi)

    modulation = 1.0 - mod_depth * (1 + np.sin(mod_phases)) / 2

    # Apply modulation and amplitude
    left = amplitude * left * modulation
    right = amplitude * right * modulation

    return left, right

generate_ccw_metadata(scpn_outputs, glyph_vector=None)

Generate complete CCW metadata package for audio/visual sync.

Parameters:

Name Type Description Default
scpn_outputs Dict[str, Any]

Full output dict from run_integrated_step()

required
glyph_vector Optional[ndarray[Any, Any]]

Optional L7 glyph vector

None

Returns:

Type Description
Dict[str, Any]

Complete metadata dict for CCW system

Source code in src/sc_neurocore/interfaces/ccw_bridge.py
Python
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
def generate_ccw_metadata(
    self, scpn_outputs: Dict[str, Any], glyph_vector: Optional[np.ndarray[Any, Any]] = None
) -> Dict[str, Any]:
    """
    Generate complete CCW metadata package for audio/visual sync.

    Args:
        scpn_outputs: Full output dict from run_integrated_step()
        glyph_vector: Optional L7 glyph vector

    Returns:
        Complete metadata dict for CCW system
    """
    # Extract metrics
    metrics = {}
    for layer_name, output in scpn_outputs.items():
        if isinstance(output, dict):
            if "coherence" in str(output.keys()).lower():
                for k, v in output.items():
                    if isinstance(v, (int, float)):
                        metrics[f"{layer_name}_{k}"] = float(v)

    # Get glyph vector from L7 if not provided
    if glyph_vector is None and "l7" in scpn_outputs:
        l7_out = scpn_outputs["l7"]
        if isinstance(l7_out, dict) and "glyph_vector" in l7_out:
            glyph_vector = l7_out["glyph_vector"]

    # Convert to CCW parameters
    ccw_params = self.scpn_metrics_to_ccw(metrics)

    # Convert glyph to VIBRANA
    vibrana_params = {}
    if glyph_vector is not None:
        vibrana_params = self.glyph_vector_to_vibrana(glyph_vector)

    # Build complete metadata
    metadata = {
        "timestamp": float(np.datetime64("now").astype(np.float64)),
        "ccw_audio": ccw_params,
        "vibrana_visual": vibrana_params,
        "scpn_metrics": metrics,
        "mode": self.vibrana_state.mode.value,
        "bridge_version": "1.0.0",
    }

    return metadata

export_glyph_stream(glyph_vector, cosmic_vector=None, filepath=None)

Export glyph stream data for VIBRANA/CCW hardware playback.

Parameters:

Name Type Description Default
glyph_vector ndarray[Any, Any]

Normalized glyph vector from L7

required
cosmic_vector Optional[Dict[str, float]]

Optional L8 cosmic phase data

None
filepath Optional[str]

Optional file path to save

None

Returns:

Type Description
str

JSON string of glyph stream data

Source code in src/sc_neurocore/interfaces/ccw_bridge.py
Python
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
def export_glyph_stream(
    self,
    glyph_vector: np.ndarray[Any, Any],
    cosmic_vector: Optional[Dict[str, float]] = None,
    filepath: Optional[str] = None,
) -> str:
    """
    Export glyph stream data for VIBRANA/CCW hardware playback.

    Args:
        glyph_vector: Normalized glyph vector from L7
        cosmic_vector: Optional L8 cosmic phase data
        filepath: Optional file path to save

    Returns:
        JSON string of glyph stream data
    """
    stream_data = {
        "glyph_vector": {
            "phi_alignment": float(glyph_vector[0]) if len(glyph_vector) > 0 else 0.0,
            "fibonacci_alignment": float(glyph_vector[1]) if len(glyph_vector) > 1 else 0.0,
            "metatron_flow": float(glyph_vector[2]) if len(glyph_vector) > 2 else 0.0,
            "platonic_coherence": float(glyph_vector[3]) if len(glyph_vector) > 3 else 0.0,
            "e8_alignment": float(glyph_vector[4]) if len(glyph_vector) > 4 else 0.0,
            "symbolic_health": float(glyph_vector[5]) if len(glyph_vector) > 5 else 0.0,
        },
        "cosmic_vector": cosmic_vector or {},
        "layer_weights": {
            "metatron_weight": 0.95,  # Default high weight for Metatron
            "phi_weight": 0.85,
            "e8_weight": 0.75,
        },
        "routing": {
            "target": "vibrana_hardware",
            "protocol": "bitstream",
            "encoding": "normalized_float",
        },
    }

    json_str = json.dumps(stream_data, indent=2)

    if filepath:
        with open(filepath, "w") as f:
            f.write(json_str)
        logger.info(f"Glyph stream exported to {filepath}")

    return json_str

create_session_config(mode=CCWMode.MEDITATION, duration_minutes=20)

Create a complete CCW session configuration.

Parameters:

Name Type Description Default
mode CCWMode

CCW/VIBRANA mode

MEDITATION
duration_minutes int

Session duration

20

Returns:

Type Description
Dict[str, Any]

Session configuration dict

Source code in src/sc_neurocore/interfaces/ccw_bridge.py
Python
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
def create_session_config(
    self, mode: CCWMode = CCWMode.MEDITATION, duration_minutes: int = 20
) -> Dict[str, Any]:
    """
    Create a complete CCW session configuration.

    Args:
        mode: CCW/VIBRANA mode
        duration_minutes: Session duration

    Returns:
        Session configuration dict
    """
    base_freq, harmonic_freq = self.MODE_FREQUENCIES[mode]

    return {
        "session": {
            "mode": mode.value,
            "duration_minutes": duration_minutes,
            "created_at": str(np.datetime64("now")),
        },
        "audio": {
            "base_frequency": base_freq,
            "harmonic_frequency": harmonic_freq,
            "carrier_frequency": self.params.carrier_frequency,
            "binaural_offset": self.params.binaural_offset,
            "sample_rate": self.params.sample_rate,
        },
        "visual": {
            "geometry_pattern": "thirteen_fold",
            "rotation_enabled": True,
            "color_scheme": mode.value,
        },
        "scpn_integration": {
            "enabled": True,
            "update_rate_hz": 10,
            "layers": ["l1", "l4", "l5", "l6", "l7"],
        },
    }

create_bridge(ccw_params=None)

Factory function to create a CCW bridge instance.

Source code in src/sc_neurocore/interfaces/ccw_bridge.py
Python
415
416
417
def create_bridge(ccw_params: Optional[CCWParameters] = None) -> CCWBridge:
    """Factory function to create a CCW bridge instance."""
    return CCWBridge(ccw_params)

DVS Input

sc_neurocore.interfaces.dvs_input

DVSInputLayer dataclass

Interface for Dynamic Vision Sensors (Event Cameras). Converts AER events (x, y, t, p) into SC Bitstreams.

Source code in src/sc_neurocore/interfaces/dvs_input.py
Python
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
@dataclass
class DVSInputLayer:
    """
    Interface for Dynamic Vision Sensors (Event Cameras).
    Converts AER events (x, y, t, p) into SC Bitstreams.
    """

    height: int
    width: int
    decay_tau: float = 100.0  # Time constant to decay old events

    def __post_init__(self) -> None:
        if not math.isfinite(float(self.decay_tau)) or float(self.decay_tau) <= 0.0:
            raise ValueError("decay_tau must be finite and positive")
        # Surface potential representing event density
        self.surface = np.zeros((self.height, self.width), dtype=np.float32)
        self.last_update_time = 0.0

    def process_events(self, events: list[tuple[int, int, float, int]]) -> np.ndarray[Any, Any]:
        """
        Integrate a batch of events.
        Events format: (x, y, timestamp_ms, polarity)
        Returns: Frame of probabilities [0, 1]
        """
        if not events:
            return self.surface
        self._validate_events(events)

        current_time = events[-1][2]
        dt = current_time - self.last_update_time

        # Exponential decay of old activity
        # V_new = V_old * exp(-dt/tau)
        decay_factor = np.exp(-dt / self.decay_tau)
        self.surface *= decay_factor

        # Add new events
        for x, y, t, p in events:
            if 0 <= x < self.width and 0 <= y < self.height:
                # Polarity is usually -1 or 1.
                # We want activity map. Let's just accumulate magnitude or positive density.
                self.surface[y, x] += 1.0

        # Clip/Sigmoid to [0, 1] for SC generation
        # Simple saturation
        output_probs = np.tanh(self.surface)  # Maps 0->0, High->1

        self.last_update_time = current_time
        return output_probs

    @staticmethod
    def _validate_events(events: list[tuple[int, int, float, int]]) -> None:
        previous_t: float | None = None
        for _, _, t, p in events:
            timestamp = float(t)
            if not math.isfinite(timestamp):
                raise ValueError("event timestamp must be finite")
            if previous_t is not None and timestamp < previous_t:
                raise ValueError("event timestamps must be monotonically non-decreasing")
            if p not in {-1, 0, 1}:
                raise ValueError("event polarity must be -1, 0, or 1")
            previous_t = timestamp

    def generate_bitstream_frame(self, length: int = 256) -> np.ndarray[Any, Any]:
        """
        Generate a HxWxLength bitstream cube from current surface state.
        """
        probs = np.tanh(self.surface)
        # Vectorized generation
        # (H, W, Length)
        rands = np.random.random((self.height, self.width, length))
        bits = (rands < probs[:, :, None]).astype(np.uint8)
        return bits

process_events(events)

Integrate a batch of events. Events format: (x, y, timestamp_ms, polarity) Returns: Frame of probabilities [0, 1]

Source code in src/sc_neurocore/interfaces/dvs_input.py
Python
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
def process_events(self, events: list[tuple[int, int, float, int]]) -> np.ndarray[Any, Any]:
    """
    Integrate a batch of events.
    Events format: (x, y, timestamp_ms, polarity)
    Returns: Frame of probabilities [0, 1]
    """
    if not events:
        return self.surface
    self._validate_events(events)

    current_time = events[-1][2]
    dt = current_time - self.last_update_time

    # Exponential decay of old activity
    # V_new = V_old * exp(-dt/tau)
    decay_factor = np.exp(-dt / self.decay_tau)
    self.surface *= decay_factor

    # Add new events
    for x, y, t, p in events:
        if 0 <= x < self.width and 0 <= y < self.height:
            # Polarity is usually -1 or 1.
            # We want activity map. Let's just accumulate magnitude or positive density.
            self.surface[y, x] += 1.0

    # Clip/Sigmoid to [0, 1] for SC generation
    # Simple saturation
    output_probs = np.tanh(self.surface)  # Maps 0->0, High->1

    self.last_update_time = current_time
    return output_probs

generate_bitstream_frame(length=256)

Generate a HxWxLength bitstream cube from current surface state.

Source code in src/sc_neurocore/interfaces/dvs_input.py
Python
80
81
82
83
84
85
86
87
88
89
def generate_bitstream_frame(self, length: int = 256) -> np.ndarray[Any, Any]:
    """
    Generate a HxWxLength bitstream cube from current surface state.
    """
    probs = np.tanh(self.surface)
    # Vectorized generation
    # (H, W, Length)
    rands = np.random.random((self.height, self.width, length))
    bits = (rands < probs[:, :, None]).astype(np.uint8)
    return bits

Real World

sc_neurocore.interfaces.real_world

LSLBridge

Lab Streaming Layer (LSL) Bridge. Connects EEG/Physiological streams to sc-neurocore. (Mock implementation for standalone use).

Source code in src/sc_neurocore/interfaces/real_world.py
Python
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
class LSLBridge:
    """
    Lab Streaming Layer (LSL) Bridge.
    Connects EEG/Physiological streams to sc-neurocore.
    (Mock implementation for standalone use).
    """

    def __init__(self, stream_name="NeuromorphicIn") -> None:  # type: ignore[no-untyped-def]
        self.stream_name = stream_name
        logger.info("LSL: Listening for stream '%s'...", stream_name)

    def receive_chunk(self, max_samples=32) -> np.ndarray[Any, Any]:  # type: ignore[no-untyped-def]
        """
        Simulates receiving a chunk of samples.
        In real version: calls inlet.pull_chunk().
        """
        # Mock EEG data: 8 channels, random signals
        return np.random.normal(0, 50e-6, (8, max_samples))

receive_chunk(max_samples=32)

Simulates receiving a chunk of samples. In real version: calls inlet.pull_chunk().

Source code in src/sc_neurocore/interfaces/real_world.py
Python
27
28
29
30
31
32
33
def receive_chunk(self, max_samples=32) -> np.ndarray[Any, Any]:  # type: ignore[no-untyped-def]
    """
    Simulates receiving a chunk of samples.
    In real version: calls inlet.pull_chunk().
    """
    # Mock EEG data: 8 channels, random signals
    return np.random.normal(0, 50e-6, (8, max_samples))

ROS2Node

ROS 2 Interface Node. Publishes motor commands from sc-neurocore to robots.

Source code in src/sc_neurocore/interfaces/real_world.py
Python
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
class ROS2Node:
    """
    ROS 2 Interface Node.
    Publishes motor commands from sc-neurocore to robots.
    """

    def __init__(self, node_name="neuro_controller") -> None:  # type: ignore[no-untyped-def]
        self.node_name = node_name
        logger.info("ROS2: Node '%s' initialized.", node_name)

    def publish_cmd_vel(self, linear_x: float, angular_z: float) -> None:
        """
        Simulates publishing to /cmd_vel.
        """
        msg = {"linear": linear_x, "angular": angular_z}
        # print(f"ROS2: Publishing to /cmd_vel: {json.dumps(msg)}")
        # In real version: self.publisher.publish(msg)
        return True  # type: ignore[return-value]

publish_cmd_vel(linear_x, angular_z)

Simulates publishing to /cmd_vel.

Source code in src/sc_neurocore/interfaces/real_world.py
Python
46
47
48
49
50
51
52
53
def publish_cmd_vel(self, linear_x: float, angular_z: float) -> None:
    """
    Simulates publishing to /cmd_vel.
    """
    msg = {"linear": linear_x, "angular": angular_z}
    # print(f"ROS2: Publishing to /cmd_vel: {json.dumps(msg)}")
    # In real version: self.publisher.publish(msg)
    return True  # type: ignore[return-value]