Skip to content

Interfaces

External I/O bridges: brain-computer interface protocols, CCW audio bridge, dynamic vision sensor input, and real-world actuator output.

BCI

sc_neurocore.interfaces.bci

Encode continuous neural signals (EEG, LFP, intracortical) into spike trains and stochastic bitstreams for SC processing.

Uses framework-native encoding (seeded RNG for reproducibility, Sobol quasi-random for low-discrepancy encoding). Supports windowed encoding for streaming BCI pipelines.

For spike compression/telemetry, see spike_codec (6 codecs).

BCIEncoder dataclass

Encode continuous neural signals into spike trains.

Replaces the old BCIDecoder (misleading name — it encodes, not decodes). Uses seeded RNG for deterministic, reproducible encoding.

Parameters

n_channels : int Number of recording channels. sampling_rate : int Input signal sampling rate (Hz). window_ms : float Encoding window duration in milliseconds. seed : int RNG seed for reproducibility.

Source code in src/sc_neurocore/interfaces/bci.py
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
@dataclass
class BCIEncoder:
    """Encode continuous neural signals into spike trains.

    Replaces the old BCIDecoder (misleading name — it encodes, not decodes).
    Uses seeded RNG for deterministic, reproducible encoding.

    Parameters
    ----------
    n_channels : int
        Number of recording channels.
    sampling_rate : int
        Input signal sampling rate (Hz).
    window_ms : float
        Encoding window duration in milliseconds.
    seed : int
        RNG seed for reproducibility.
    """

    n_channels: int
    sampling_rate: int = 20000
    window_ms: float = 1.0
    seed: int = 42

    def encode(self, signal: np.ndarray, T: int = 20) -> np.ndarray:
        """Encode a signal block into spike trains via rate coding.

        Parameters
        ----------
        signal : ndarray of shape (n_channels,) or (n_channels, n_samples)
            Continuous neural signal. Multi-sample input is averaged
            per channel to get firing probabilities.
        T : int
            Number of output timesteps per window.

        Returns
        -------
        ndarray of shape (T, n_channels), int8 binary
        """
        if signal.ndim > 1:
            probs = signal.mean(axis=1)
        else:
            probs = signal.copy()

        probs = self._normalize(probs)
        return rate_encode(probs, T, seed=self.seed)

    def encode_stream(self, signal: np.ndarray) -> np.ndarray:
        """Encode a multi-window signal stream.

        Parameters
        ----------
        signal : ndarray of shape (n_channels, total_samples)
            Full recording. Split into windows of window_ms duration.

        Returns
        -------
        ndarray of shape (total_T, n_channels), int8 binary
        """
        samples_per_window = max(1, int(self.sampling_rate * self.window_ms / 1000))
        n_windows = signal.shape[1] // samples_per_window
        T_per_window = max(1, samples_per_window // 10)

        chunks = []
        for w in range(n_windows):
            start = w * samples_per_window
            end = start + samples_per_window
            window = signal[:, start:end]
            chunk = self.encode(window, T=T_per_window)
            chunks.append(chunk)

        if not chunks:
            return np.zeros((0, self.n_channels), dtype=np.int8)

        return np.vstack(chunks)

    @staticmethod
    def _normalize(values: np.ndarray) -> np.ndarray:
        """Normalize to [0, 1] for probability encoding."""
        vmin, vmax = values.min(), values.max()
        if vmax - vmin < 1e-10:
            return np.full_like(values, 0.5)
        return (values - vmin) / (vmax - vmin)

    # --- Backward-compatible API (old BCIDecoder methods) ---

    def normalize_signal(self, signal: np.ndarray) -> np.ndarray:
        """Normalize signal to [0, 1]. Legacy API — use _normalize()."""
        s_min, s_max = np.min(signal), np.max(signal)
        if s_max - s_min == 0:
            return np.zeros_like(signal)
        return (signal - s_min) / (s_max - s_min)

    def encode_to_bitstream(self, signal: np.ndarray, length: int = 256) -> np.ndarray:
        """Legacy API. Encodes (channels, time) → (channels, length).

        New code should use .encode() which returns (T, channels).
        """
        if signal.ndim > 1:
            mean_vals = np.mean(signal, axis=1)
        else:
            mean_vals = signal

        if len(mean_vals) != self.n_channels:
            raise ValueError(f"Signal has {len(mean_vals)} channels, expected {self.n_channels}")

        probs = self.normalize_signal(mean_vals)
        rng = np.random.RandomState(self.seed)
        bits = (rng.random((self.n_channels, length)) < probs[:, None]).astype(np.uint8)
        return bits

encode(signal, T=20)

Encode a signal block into spike trains via rate coding.

Parameters

signal : ndarray of shape (n_channels,) or (n_channels, n_samples) Continuous neural signal. Multi-sample input is averaged per channel to get firing probabilities. T : int Number of output timesteps per window.

Returns

ndarray of shape (T, n_channels), int8 binary

Source code in src/sc_neurocore/interfaces/bci.py
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
def encode(self, signal: np.ndarray, T: int = 20) -> np.ndarray:
    """Encode a signal block into spike trains via rate coding.

    Parameters
    ----------
    signal : ndarray of shape (n_channels,) or (n_channels, n_samples)
        Continuous neural signal. Multi-sample input is averaged
        per channel to get firing probabilities.
    T : int
        Number of output timesteps per window.

    Returns
    -------
    ndarray of shape (T, n_channels), int8 binary
    """
    if signal.ndim > 1:
        probs = signal.mean(axis=1)
    else:
        probs = signal.copy()

    probs = self._normalize(probs)
    return rate_encode(probs, T, seed=self.seed)

encode_stream(signal)

Encode a multi-window signal stream.

Parameters

signal : ndarray of shape (n_channels, total_samples) Full recording. Split into windows of window_ms duration.

Returns

ndarray of shape (total_T, n_channels), int8 binary

Source code in src/sc_neurocore/interfaces/bci.py
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
def encode_stream(self, signal: np.ndarray) -> np.ndarray:
    """Encode a multi-window signal stream.

    Parameters
    ----------
    signal : ndarray of shape (n_channels, total_samples)
        Full recording. Split into windows of window_ms duration.

    Returns
    -------
    ndarray of shape (total_T, n_channels), int8 binary
    """
    samples_per_window = max(1, int(self.sampling_rate * self.window_ms / 1000))
    n_windows = signal.shape[1] // samples_per_window
    T_per_window = max(1, samples_per_window // 10)

    chunks = []
    for w in range(n_windows):
        start = w * samples_per_window
        end = start + samples_per_window
        window = signal[:, start:end]
        chunk = self.encode(window, T=T_per_window)
        chunks.append(chunk)

    if not chunks:
        return np.zeros((0, self.n_channels), dtype=np.int8)

    return np.vstack(chunks)

normalize_signal(signal)

Normalize signal to [0, 1]. Legacy API — use _normalize().

Source code in src/sc_neurocore/interfaces/bci.py
113
114
115
116
117
118
def normalize_signal(self, signal: np.ndarray) -> np.ndarray:
    """Normalize signal to [0, 1]. Legacy API — use _normalize()."""
    s_min, s_max = np.min(signal), np.max(signal)
    if s_max - s_min == 0:
        return np.zeros_like(signal)
    return (signal - s_min) / (s_max - s_min)

encode_to_bitstream(signal, length=256)

Legacy API. Encodes (channels, time) → (channels, length).

New code should use .encode() which returns (T, channels).

Source code in src/sc_neurocore/interfaces/bci.py
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
def encode_to_bitstream(self, signal: np.ndarray, length: int = 256) -> np.ndarray:
    """Legacy API. Encodes (channels, time) → (channels, length).

    New code should use .encode() which returns (T, channels).
    """
    if signal.ndim > 1:
        mean_vals = np.mean(signal, axis=1)
    else:
        mean_vals = signal

    if len(mean_vals) != self.n_channels:
        raise ValueError(f"Signal has {len(mean_vals)} channels, expected {self.n_channels}")

    probs = self.normalize_signal(mean_vals)
    rng = np.random.RandomState(self.seed)
    bits = (rng.random((self.n_channels, length)) < probs[:, None]).astype(np.uint8)
    return bits

BCIDecoder

Bases: BCIEncoder

Legacy alias. Use BCIEncoder instead.

Source code in src/sc_neurocore/interfaces/bci.py
139
140
141
142
143
class BCIDecoder(BCIEncoder):
    """Legacy alias. Use BCIEncoder instead."""

    def __init__(self, channels: int, sampling_rate: int = 1000, **kwargs):
        super().__init__(n_channels=channels, sampling_rate=sampling_rate, **kwargs)

CCW Bridge

sc_neurocore.interfaces.ccw_bridge

SC-NeuroCore ↔ CCW/VIBRANA bridge.

Converts stochastic bitstream outputs to audio parameters and visualization states for the CCW application.

CCWMode

Bases: str, Enum

CCW modulation modes aligned with VIBRANA.

Source code in src/sc_neurocore/interfaces/ccw_bridge.py
29
30
31
32
33
34
35
36
37
class CCWMode(str, Enum):
    """CCW modulation modes aligned with VIBRANA."""

    THEURGIC = "theurgic"
    HEALING = "healing"
    MEDITATION = "meditation"
    COSMIC = "cosmic"
    FOCUS = "focus"
    CREATIVITY = "creativity"

CCWParameters dataclass

Parameters for CCW audio generation.

Source code in src/sc_neurocore/interfaces/ccw_bridge.py
40
41
42
43
44
45
46
47
48
@dataclass
class CCWParameters:
    """Parameters for CCW audio generation."""

    base_frequency: float = 7.83  # Schumann resonance
    carrier_frequency: float = 432.0  # Verdi tuning (A4=432 Hz)
    binaural_offset: float = 10.0  # Hz
    modulation_depth: float = 0.5
    sample_rate: int = 44100

VIBRANAState dataclass

State for VIBRANA visualization sync.

Source code in src/sc_neurocore/interfaces/ccw_bridge.py
51
52
53
54
55
56
57
58
59
@dataclass
class VIBRANAState:
    """State for VIBRANA visualization sync."""

    mode: CCWMode = CCWMode.MEDITATION
    geometry_phase: float = 0.0
    color_intensity: float = 0.5
    rotation_speed: float = 1.0
    glyph_weights: np.ndarray[Any, Any] = field(default_factory=lambda: np.zeros(6))

CCWBridge

Bridge between SC-NeuroCore and CCW/VIBRANA systems.

Converts bitstream outputs from SCPN layers into audio parameters and visualization states for the CCW application.

Source code in src/sc_neurocore/interfaces/ccw_bridge.py
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
class CCWBridge:
    """
    Bridge between SC-NeuroCore and CCW/VIBRANA systems.

    Converts bitstream outputs from SCPN layers into audio parameters
    and visualization states for the CCW application.
    """

    # SCPN metric to CCW parameter mappings
    METRIC_MAPPINGS = {
        "l1_quantum_coherence": ("modulation_depth", 0.3, 0.8),
        "l2_neurochemical_activity": ("carrier_blend", 0.0, 1.0),
        "l4_cellular_sync": ("binaural_offset", 4.0, 40.0),
        "l5_organismal_coherence": ("amplitude", 0.3, 1.0),
        "l6_planetary_coherence": ("schumann_blend", 0.0, 1.0),
        "l7_symbolic_health": ("sacred_geometry_intensity", 0.0, 1.0),
    }

    # Mode to frequency mapping (aligned with VIBRANA)
    MODE_FREQUENCIES = {
        CCWMode.THEURGIC: (7.83, 14.3),  # Schumann
        CCWMode.HEALING: (528.0, 432.0),  # Solfeggio
        CCWMode.MEDITATION: (4.0, 7.83),  # Theta-Schumann
        CCWMode.COSMIC: (136.1, 272.2),  # OM
        CCWMode.FOCUS: (14.0, 18.0),  # Beta
        CCWMode.CREATIVITY: (10.0, 12.0),  # Alpha
    }

    def __init__(self, params: Optional[CCWParameters] = None):
        self.params = params or CCWParameters()
        self.vibrana_state = VIBRANAState()

        # Audio generation state
        self.phase_left = 0.0
        self.phase_right = 0.0
        self.modulation_phase = 0.0

        # History for smoothing
        self.metric_history: Dict[str, List[float]] = {}
        self.smoothing_window = 10

    def bitstream_to_frequency(
        self, bitstream: np.ndarray[Any, Any], freq_min: float = 1.0, freq_max: float = 40.0
    ) -> float:
        """
        Convert a bitstream to a frequency value.

        Args:
            bitstream: Binary array from SC layer output
            freq_min: Minimum frequency (Hz)
            freq_max: Maximum frequency (Hz)

        Returns:
            Frequency in Hz mapped from bitstream probability
        """
        prob = np.mean(bitstream)
        return freq_min + prob * (freq_max - freq_min)

    def scpn_metrics_to_ccw(self, metrics: Dict[str, float]) -> Dict[str, float]:
        """
        Convert SCPN global metrics to CCW audio parameters.

        Args:
            metrics: Dict from get_global_metrics() of SCPN layers

        Returns:
            Dict of CCW-compatible audio parameters
        """
        ccw_params = {
            "base_frequency": self.params.base_frequency,
            "carrier_frequency": self.params.carrier_frequency,
            "binaural_offset": self.params.binaural_offset,
            "modulation_depth": self.params.modulation_depth,
            "amplitude": 0.5,
            "carrier_blend": 0.5,
            "schumann_blend": 0.5,
            "sacred_geometry_intensity": 0.5,
        }

        for metric_name, (param_name, min_val, max_val) in self.METRIC_MAPPINGS.items():
            if metric_name in metrics:
                value = metrics[metric_name]
                # Smooth the value
                if metric_name not in self.metric_history:
                    self.metric_history[metric_name] = []
                self.metric_history[metric_name].append(value)
                if len(self.metric_history[metric_name]) > self.smoothing_window:
                    self.metric_history[metric_name].pop(0)
                smoothed = np.mean(self.metric_history[metric_name])

                # Map to parameter range
                ccw_params[param_name] = min_val + smoothed * (max_val - min_val)

        return ccw_params

    def glyph_vector_to_vibrana(self, glyph_vector: np.ndarray[Any, Any]) -> Dict[str, Any]:
        """
        Convert L7 glyph vector to VIBRANA visualization parameters.

        Args:
            glyph_vector: 6D vector [phi, fib, metatron, platonic, e8, health]

        Returns:
            Dict of VIBRANA visualization parameters
        """
        if len(glyph_vector) < 6:
            glyph_vector = np.pad(glyph_vector, (0, 6 - len(glyph_vector)))

        self.vibrana_state.glyph_weights = glyph_vector

        # Map glyph components to visualization
        phi_alignment = glyph_vector[0]
        fibonacci_alignment = glyph_vector[1]
        metatron_flow = glyph_vector[2]
        platonic_coherence = glyph_vector[3]
        e8_alignment = glyph_vector[4]
        symbolic_health = glyph_vector[5]

        # Determine best mode based on glyph pattern
        if metatron_flow > 0.7:
            self.vibrana_state.mode = CCWMode.THEURGIC
        elif phi_alignment > 0.8 and fibonacci_alignment > 0.8:
            self.vibrana_state.mode = CCWMode.COSMIC
        elif symbolic_health > 0.6:
            self.vibrana_state.mode = CCWMode.HEALING
        elif e8_alignment > 0.7:
            self.vibrana_state.mode = CCWMode.MEDITATION
        else:
            self.vibrana_state.mode = CCWMode.FOCUS

        # Set visualization parameters
        self.vibrana_state.color_intensity = symbolic_health
        self.vibrana_state.rotation_speed = 0.5 + metatron_flow * 2.0
        self.vibrana_state.geometry_phase += platonic_coherence * 0.1

        return {
            "mode": self.vibrana_state.mode.value,
            "geometry_phase": float(self.vibrana_state.geometry_phase % (2 * np.pi)),
            "color_intensity": float(self.vibrana_state.color_intensity),
            "rotation_speed": float(self.vibrana_state.rotation_speed),
            "glyph_weights": {
                "phi_alignment": float(phi_alignment),
                "fibonacci_alignment": float(fibonacci_alignment),
                "metatron_flow": float(metatron_flow),
                "platonic_coherence": float(platonic_coherence),
                "e8_alignment": float(e8_alignment),
                "symbolic_health": float(symbolic_health),
            },
            "frequencies": {
                "base": self.MODE_FREQUENCIES[self.vibrana_state.mode][0],
                "harmonic": self.MODE_FREQUENCIES[self.vibrana_state.mode][1],
            },
        }

    def generate_binaural_sample(
        self, ccw_params: Dict[str, float], duration_samples: int = 1024
    ) -> Tuple[np.ndarray[Any, Any], np.ndarray[Any, Any]]:
        """
        Generate binaural audio samples from CCW parameters.

        Args:
            ccw_params: Parameters from scpn_metrics_to_ccw()
            duration_samples: Number of samples to generate

        Returns:
            Tuple of (left_channel, right_channel) numpy arrays
        """
        sample_rate = self.params.sample_rate
        dt = 1.0 / sample_rate

        # Extract parameters
        carrier = ccw_params.get("carrier_frequency", 432.0)
        binaural = ccw_params.get("binaural_offset", 10.0)
        mod_depth = ccw_params.get("modulation_depth", 0.5)
        amplitude = ccw_params.get("amplitude", 0.5)
        base_freq = ccw_params.get("base_frequency", 7.83)

        # Time array
        t = np.arange(duration_samples) * dt

        # Generate binaural beat (carrier + offset for right channel)
        left_freq = carrier
        right_freq = carrier + binaural

        # Phase-continuous generation
        phase_increment_left = 2 * np.pi * left_freq * dt
        phase_increment_right = 2 * np.pi * right_freq * dt

        phases_left = self.phase_left + np.cumsum(np.ones(duration_samples) * phase_increment_left)
        phases_right = self.phase_right + np.cumsum(
            np.ones(duration_samples) * phase_increment_right
        )

        # Update phase state for continuity
        self.phase_left = phases_left[-1] % (2 * np.pi)
        self.phase_right = phases_right[-1] % (2 * np.pi)

        # Generate carriers
        left = np.sin(phases_left)
        right = np.sin(phases_right)

        # Add modulation envelope (low frequency oscillation)
        mod_phases = self.modulation_phase + np.cumsum(
            np.ones(duration_samples) * 2 * np.pi * base_freq * dt
        )
        self.modulation_phase = mod_phases[-1] % (2 * np.pi)

        modulation = 1.0 - mod_depth * (1 + np.sin(mod_phases)) / 2

        # Apply modulation and amplitude
        left = amplitude * left * modulation
        right = amplitude * right * modulation

        return left, right

    def generate_ccw_metadata(
        self, scpn_outputs: Dict[str, Any], glyph_vector: Optional[np.ndarray[Any, Any]] = None
    ) -> Dict[str, Any]:
        """
        Generate complete CCW metadata package for audio/visual sync.

        Args:
            scpn_outputs: Full output dict from run_integrated_step()
            glyph_vector: Optional L7 glyph vector

        Returns:
            Complete metadata dict for CCW system
        """
        # Extract metrics
        metrics = {}
        for layer_name, output in scpn_outputs.items():
            if isinstance(output, dict):
                if "coherence" in str(output.keys()).lower():
                    for k, v in output.items():
                        if isinstance(v, (int, float)):
                            metrics[f"{layer_name}_{k}"] = float(v)

        # Get glyph vector from L7 if not provided
        if glyph_vector is None and "l7" in scpn_outputs:
            l7_out = scpn_outputs["l7"]
            if isinstance(l7_out, dict) and "glyph_vector" in l7_out:
                glyph_vector = l7_out["glyph_vector"]

        # Convert to CCW parameters
        ccw_params = self.scpn_metrics_to_ccw(metrics)

        # Convert glyph to VIBRANA
        vibrana_params = {}
        if glyph_vector is not None:
            vibrana_params = self.glyph_vector_to_vibrana(glyph_vector)

        # Build complete metadata
        metadata = {
            "timestamp": float(np.datetime64("now").astype(np.float64)),
            "ccw_audio": ccw_params,
            "vibrana_visual": vibrana_params,
            "scpn_metrics": metrics,
            "mode": self.vibrana_state.mode.value,
            "bridge_version": "1.0.0",
        }

        return metadata

    def export_glyph_stream(
        self,
        glyph_vector: np.ndarray[Any, Any],
        cosmic_vector: Optional[Dict[str, float]] = None,
        filepath: Optional[str] = None,
    ) -> str:
        """
        Export glyph stream data for VIBRANA/CCW hardware playback.

        Args:
            glyph_vector: Normalized glyph vector from L7
            cosmic_vector: Optional L8 cosmic phase data
            filepath: Optional file path to save

        Returns:
            JSON string of glyph stream data
        """
        stream_data = {
            "glyph_vector": {
                "phi_alignment": float(glyph_vector[0]) if len(glyph_vector) > 0 else 0.0,
                "fibonacci_alignment": float(glyph_vector[1]) if len(glyph_vector) > 1 else 0.0,
                "metatron_flow": float(glyph_vector[2]) if len(glyph_vector) > 2 else 0.0,
                "platonic_coherence": float(glyph_vector[3]) if len(glyph_vector) > 3 else 0.0,
                "e8_alignment": float(glyph_vector[4]) if len(glyph_vector) > 4 else 0.0,
                "symbolic_health": float(glyph_vector[5]) if len(glyph_vector) > 5 else 0.0,
            },
            "cosmic_vector": cosmic_vector or {},
            "layer_weights": {
                "metatron_weight": 0.95,  # Default high weight for Metatron
                "phi_weight": 0.85,
                "e8_weight": 0.75,
            },
            "routing": {
                "target": "vibrana_hardware",
                "protocol": "bitstream",
                "encoding": "normalized_float",
            },
        }

        json_str = json.dumps(stream_data, indent=2)

        if filepath:
            with open(filepath, "w") as f:
                f.write(json_str)
            logger.info(f"Glyph stream exported to {filepath}")

        return json_str

    def create_session_config(
        self, mode: CCWMode = CCWMode.MEDITATION, duration_minutes: int = 20
    ) -> Dict[str, Any]:
        """
        Create a complete CCW session configuration.

        Args:
            mode: CCW/VIBRANA mode
            duration_minutes: Session duration

        Returns:
            Session configuration dict
        """
        base_freq, harmonic_freq = self.MODE_FREQUENCIES[mode]

        return {
            "session": {
                "mode": mode.value,
                "duration_minutes": duration_minutes,
                "created_at": str(np.datetime64("now")),
            },
            "audio": {
                "base_frequency": base_freq,
                "harmonic_frequency": harmonic_freq,
                "carrier_frequency": self.params.carrier_frequency,
                "binaural_offset": self.params.binaural_offset,
                "sample_rate": self.params.sample_rate,
            },
            "visual": {
                "geometry_pattern": "thirteen_fold",
                "rotation_enabled": True,
                "color_scheme": mode.value,
            },
            "scpn_integration": {
                "enabled": True,
                "update_rate_hz": 10,
                "layers": ["l1", "l4", "l5", "l6", "l7"],
            },
        }

bitstream_to_frequency(bitstream, freq_min=1.0, freq_max=40.0)

Convert a bitstream to a frequency value.

Parameters:

Name Type Description Default
bitstream ndarray[Any, Any]

Binary array from SC layer output

required
freq_min float

Minimum frequency (Hz)

1.0
freq_max float

Maximum frequency (Hz)

40.0

Returns:

Type Description
float

Frequency in Hz mapped from bitstream probability

Source code in src/sc_neurocore/interfaces/ccw_bridge.py
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
def bitstream_to_frequency(
    self, bitstream: np.ndarray[Any, Any], freq_min: float = 1.0, freq_max: float = 40.0
) -> float:
    """
    Convert a bitstream to a frequency value.

    Args:
        bitstream: Binary array from SC layer output
        freq_min: Minimum frequency (Hz)
        freq_max: Maximum frequency (Hz)

    Returns:
        Frequency in Hz mapped from bitstream probability
    """
    prob = np.mean(bitstream)
    return freq_min + prob * (freq_max - freq_min)

scpn_metrics_to_ccw(metrics)

Convert SCPN global metrics to CCW audio parameters.

Parameters:

Name Type Description Default
metrics Dict[str, float]

Dict from get_global_metrics() of SCPN layers

required

Returns:

Type Description
Dict[str, float]

Dict of CCW-compatible audio parameters

Source code in src/sc_neurocore/interfaces/ccw_bridge.py
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
def scpn_metrics_to_ccw(self, metrics: Dict[str, float]) -> Dict[str, float]:
    """
    Convert SCPN global metrics to CCW audio parameters.

    Args:
        metrics: Dict from get_global_metrics() of SCPN layers

    Returns:
        Dict of CCW-compatible audio parameters
    """
    ccw_params = {
        "base_frequency": self.params.base_frequency,
        "carrier_frequency": self.params.carrier_frequency,
        "binaural_offset": self.params.binaural_offset,
        "modulation_depth": self.params.modulation_depth,
        "amplitude": 0.5,
        "carrier_blend": 0.5,
        "schumann_blend": 0.5,
        "sacred_geometry_intensity": 0.5,
    }

    for metric_name, (param_name, min_val, max_val) in self.METRIC_MAPPINGS.items():
        if metric_name in metrics:
            value = metrics[metric_name]
            # Smooth the value
            if metric_name not in self.metric_history:
                self.metric_history[metric_name] = []
            self.metric_history[metric_name].append(value)
            if len(self.metric_history[metric_name]) > self.smoothing_window:
                self.metric_history[metric_name].pop(0)
            smoothed = np.mean(self.metric_history[metric_name])

            # Map to parameter range
            ccw_params[param_name] = min_val + smoothed * (max_val - min_val)

    return ccw_params

glyph_vector_to_vibrana(glyph_vector)

Convert L7 glyph vector to VIBRANA visualization parameters.

Parameters:

Name Type Description Default
glyph_vector ndarray[Any, Any]

6D vector [phi, fib, metatron, platonic, e8, health]

required

Returns:

Type Description
Dict[str, Any]

Dict of VIBRANA visualization parameters

Source code in src/sc_neurocore/interfaces/ccw_bridge.py
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
def glyph_vector_to_vibrana(self, glyph_vector: np.ndarray[Any, Any]) -> Dict[str, Any]:
    """
    Convert L7 glyph vector to VIBRANA visualization parameters.

    Args:
        glyph_vector: 6D vector [phi, fib, metatron, platonic, e8, health]

    Returns:
        Dict of VIBRANA visualization parameters
    """
    if len(glyph_vector) < 6:
        glyph_vector = np.pad(glyph_vector, (0, 6 - len(glyph_vector)))

    self.vibrana_state.glyph_weights = glyph_vector

    # Map glyph components to visualization
    phi_alignment = glyph_vector[0]
    fibonacci_alignment = glyph_vector[1]
    metatron_flow = glyph_vector[2]
    platonic_coherence = glyph_vector[3]
    e8_alignment = glyph_vector[4]
    symbolic_health = glyph_vector[5]

    # Determine best mode based on glyph pattern
    if metatron_flow > 0.7:
        self.vibrana_state.mode = CCWMode.THEURGIC
    elif phi_alignment > 0.8 and fibonacci_alignment > 0.8:
        self.vibrana_state.mode = CCWMode.COSMIC
    elif symbolic_health > 0.6:
        self.vibrana_state.mode = CCWMode.HEALING
    elif e8_alignment > 0.7:
        self.vibrana_state.mode = CCWMode.MEDITATION
    else:
        self.vibrana_state.mode = CCWMode.FOCUS

    # Set visualization parameters
    self.vibrana_state.color_intensity = symbolic_health
    self.vibrana_state.rotation_speed = 0.5 + metatron_flow * 2.0
    self.vibrana_state.geometry_phase += platonic_coherence * 0.1

    return {
        "mode": self.vibrana_state.mode.value,
        "geometry_phase": float(self.vibrana_state.geometry_phase % (2 * np.pi)),
        "color_intensity": float(self.vibrana_state.color_intensity),
        "rotation_speed": float(self.vibrana_state.rotation_speed),
        "glyph_weights": {
            "phi_alignment": float(phi_alignment),
            "fibonacci_alignment": float(fibonacci_alignment),
            "metatron_flow": float(metatron_flow),
            "platonic_coherence": float(platonic_coherence),
            "e8_alignment": float(e8_alignment),
            "symbolic_health": float(symbolic_health),
        },
        "frequencies": {
            "base": self.MODE_FREQUENCIES[self.vibrana_state.mode][0],
            "harmonic": self.MODE_FREQUENCIES[self.vibrana_state.mode][1],
        },
    }

generate_binaural_sample(ccw_params, duration_samples=1024)

Generate binaural audio samples from CCW parameters.

Parameters:

Name Type Description Default
ccw_params Dict[str, float]

Parameters from scpn_metrics_to_ccw()

required
duration_samples int

Number of samples to generate

1024

Returns:

Type Description
Tuple[ndarray[Any, Any], ndarray[Any, Any]]

Tuple of (left_channel, right_channel) numpy arrays

Source code in src/sc_neurocore/interfaces/ccw_bridge.py
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
def generate_binaural_sample(
    self, ccw_params: Dict[str, float], duration_samples: int = 1024
) -> Tuple[np.ndarray[Any, Any], np.ndarray[Any, Any]]:
    """
    Generate binaural audio samples from CCW parameters.

    Args:
        ccw_params: Parameters from scpn_metrics_to_ccw()
        duration_samples: Number of samples to generate

    Returns:
        Tuple of (left_channel, right_channel) numpy arrays
    """
    sample_rate = self.params.sample_rate
    dt = 1.0 / sample_rate

    # Extract parameters
    carrier = ccw_params.get("carrier_frequency", 432.0)
    binaural = ccw_params.get("binaural_offset", 10.0)
    mod_depth = ccw_params.get("modulation_depth", 0.5)
    amplitude = ccw_params.get("amplitude", 0.5)
    base_freq = ccw_params.get("base_frequency", 7.83)

    # Time array
    t = np.arange(duration_samples) * dt

    # Generate binaural beat (carrier + offset for right channel)
    left_freq = carrier
    right_freq = carrier + binaural

    # Phase-continuous generation
    phase_increment_left = 2 * np.pi * left_freq * dt
    phase_increment_right = 2 * np.pi * right_freq * dt

    phases_left = self.phase_left + np.cumsum(np.ones(duration_samples) * phase_increment_left)
    phases_right = self.phase_right + np.cumsum(
        np.ones(duration_samples) * phase_increment_right
    )

    # Update phase state for continuity
    self.phase_left = phases_left[-1] % (2 * np.pi)
    self.phase_right = phases_right[-1] % (2 * np.pi)

    # Generate carriers
    left = np.sin(phases_left)
    right = np.sin(phases_right)

    # Add modulation envelope (low frequency oscillation)
    mod_phases = self.modulation_phase + np.cumsum(
        np.ones(duration_samples) * 2 * np.pi * base_freq * dt
    )
    self.modulation_phase = mod_phases[-1] % (2 * np.pi)

    modulation = 1.0 - mod_depth * (1 + np.sin(mod_phases)) / 2

    # Apply modulation and amplitude
    left = amplitude * left * modulation
    right = amplitude * right * modulation

    return left, right

generate_ccw_metadata(scpn_outputs, glyph_vector=None)

Generate complete CCW metadata package for audio/visual sync.

Parameters:

Name Type Description Default
scpn_outputs Dict[str, Any]

Full output dict from run_integrated_step()

required
glyph_vector Optional[ndarray[Any, Any]]

Optional L7 glyph vector

None

Returns:

Type Description
Dict[str, Any]

Complete metadata dict for CCW system

Source code in src/sc_neurocore/interfaces/ccw_bridge.py
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
def generate_ccw_metadata(
    self, scpn_outputs: Dict[str, Any], glyph_vector: Optional[np.ndarray[Any, Any]] = None
) -> Dict[str, Any]:
    """
    Generate complete CCW metadata package for audio/visual sync.

    Args:
        scpn_outputs: Full output dict from run_integrated_step()
        glyph_vector: Optional L7 glyph vector

    Returns:
        Complete metadata dict for CCW system
    """
    # Extract metrics
    metrics = {}
    for layer_name, output in scpn_outputs.items():
        if isinstance(output, dict):
            if "coherence" in str(output.keys()).lower():
                for k, v in output.items():
                    if isinstance(v, (int, float)):
                        metrics[f"{layer_name}_{k}"] = float(v)

    # Get glyph vector from L7 if not provided
    if glyph_vector is None and "l7" in scpn_outputs:
        l7_out = scpn_outputs["l7"]
        if isinstance(l7_out, dict) and "glyph_vector" in l7_out:
            glyph_vector = l7_out["glyph_vector"]

    # Convert to CCW parameters
    ccw_params = self.scpn_metrics_to_ccw(metrics)

    # Convert glyph to VIBRANA
    vibrana_params = {}
    if glyph_vector is not None:
        vibrana_params = self.glyph_vector_to_vibrana(glyph_vector)

    # Build complete metadata
    metadata = {
        "timestamp": float(np.datetime64("now").astype(np.float64)),
        "ccw_audio": ccw_params,
        "vibrana_visual": vibrana_params,
        "scpn_metrics": metrics,
        "mode": self.vibrana_state.mode.value,
        "bridge_version": "1.0.0",
    }

    return metadata

export_glyph_stream(glyph_vector, cosmic_vector=None, filepath=None)

Export glyph stream data for VIBRANA/CCW hardware playback.

Parameters:

Name Type Description Default
glyph_vector ndarray[Any, Any]

Normalized glyph vector from L7

required
cosmic_vector Optional[Dict[str, float]]

Optional L8 cosmic phase data

None
filepath Optional[str]

Optional file path to save

None

Returns:

Type Description
str

JSON string of glyph stream data

Source code in src/sc_neurocore/interfaces/ccw_bridge.py
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
def export_glyph_stream(
    self,
    glyph_vector: np.ndarray[Any, Any],
    cosmic_vector: Optional[Dict[str, float]] = None,
    filepath: Optional[str] = None,
) -> str:
    """
    Export glyph stream data for VIBRANA/CCW hardware playback.

    Args:
        glyph_vector: Normalized glyph vector from L7
        cosmic_vector: Optional L8 cosmic phase data
        filepath: Optional file path to save

    Returns:
        JSON string of glyph stream data
    """
    stream_data = {
        "glyph_vector": {
            "phi_alignment": float(glyph_vector[0]) if len(glyph_vector) > 0 else 0.0,
            "fibonacci_alignment": float(glyph_vector[1]) if len(glyph_vector) > 1 else 0.0,
            "metatron_flow": float(glyph_vector[2]) if len(glyph_vector) > 2 else 0.0,
            "platonic_coherence": float(glyph_vector[3]) if len(glyph_vector) > 3 else 0.0,
            "e8_alignment": float(glyph_vector[4]) if len(glyph_vector) > 4 else 0.0,
            "symbolic_health": float(glyph_vector[5]) if len(glyph_vector) > 5 else 0.0,
        },
        "cosmic_vector": cosmic_vector or {},
        "layer_weights": {
            "metatron_weight": 0.95,  # Default high weight for Metatron
            "phi_weight": 0.85,
            "e8_weight": 0.75,
        },
        "routing": {
            "target": "vibrana_hardware",
            "protocol": "bitstream",
            "encoding": "normalized_float",
        },
    }

    json_str = json.dumps(stream_data, indent=2)

    if filepath:
        with open(filepath, "w") as f:
            f.write(json_str)
        logger.info(f"Glyph stream exported to {filepath}")

    return json_str

create_session_config(mode=CCWMode.MEDITATION, duration_minutes=20)

Create a complete CCW session configuration.

Parameters:

Name Type Description Default
mode CCWMode

CCW/VIBRANA mode

MEDITATION
duration_minutes int

Session duration

20

Returns:

Type Description
Dict[str, Any]

Session configuration dict

Source code in src/sc_neurocore/interfaces/ccw_bridge.py
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
def create_session_config(
    self, mode: CCWMode = CCWMode.MEDITATION, duration_minutes: int = 20
) -> Dict[str, Any]:
    """
    Create a complete CCW session configuration.

    Args:
        mode: CCW/VIBRANA mode
        duration_minutes: Session duration

    Returns:
        Session configuration dict
    """
    base_freq, harmonic_freq = self.MODE_FREQUENCIES[mode]

    return {
        "session": {
            "mode": mode.value,
            "duration_minutes": duration_minutes,
            "created_at": str(np.datetime64("now")),
        },
        "audio": {
            "base_frequency": base_freq,
            "harmonic_frequency": harmonic_freq,
            "carrier_frequency": self.params.carrier_frequency,
            "binaural_offset": self.params.binaural_offset,
            "sample_rate": self.params.sample_rate,
        },
        "visual": {
            "geometry_pattern": "thirteen_fold",
            "rotation_enabled": True,
            "color_scheme": mode.value,
        },
        "scpn_integration": {
            "enabled": True,
            "update_rate_hz": 10,
            "layers": ["l1", "l4", "l5", "l6", "l7"],
        },
    }

create_bridge(ccw_params=None)

Factory function to create a CCW bridge instance.

Source code in src/sc_neurocore/interfaces/ccw_bridge.py
414
415
416
def create_bridge(ccw_params: Optional[CCWParameters] = None) -> CCWBridge:
    """Factory function to create a CCW bridge instance."""
    return CCWBridge(ccw_params)

DVS Input

sc_neurocore.interfaces.dvs_input

DVSInputLayer dataclass

Interface for Dynamic Vision Sensors (Event Cameras). Converts AER events (x, y, t, p) into SC Bitstreams.

Source code in src/sc_neurocore/interfaces/dvs_input.py
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
@dataclass
class DVSInputLayer:
    """
    Interface for Dynamic Vision Sensors (Event Cameras).
    Converts AER events (x, y, t, p) into SC Bitstreams.
    """

    height: int
    width: int
    decay_tau: float = 100.0  # Time constant to decay old events

    def __post_init__(self) -> None:
        # Surface potential representing event density
        self.surface = np.zeros((self.height, self.width), dtype=np.float32)
        self.last_update_time = 0.0

    def process_events(self, events: List[Tuple[int, int, float, int]]) -> np.ndarray[Any, Any]:
        """
        Integrate a batch of events.
        Events format: (x, y, timestamp_ms, polarity)
        Returns: Frame of probabilities [0, 1]
        """
        if not events:
            return self.surface

        current_time = events[-1][2]
        dt = current_time - self.last_update_time

        # Exponential decay of old activity
        # V_new = V_old * exp(-dt/tau)
        decay_factor = np.exp(-dt / self.decay_tau)
        self.surface *= decay_factor

        # Add new events
        for x, y, t, p in events:
            if 0 <= x < self.width and 0 <= y < self.height:
                # Polarity is usually -1 or 1.
                # We want activity map. Let's just accumulate magnitude or positive density.
                # For simplified SC vision, we map events to "Probability of Edge".
                self.surface[y, x] += 1.0

        # Clip/Sigmoid to [0, 1] for SC generation
        # Simple saturation
        output_probs = np.tanh(self.surface)  # Maps 0->0, High->1

        self.last_update_time = current_time
        return output_probs

    def generate_bitstream_frame(self, length: int = 256) -> np.ndarray[Any, Any]:
        """
        Generate a HxWxLength bitstream cube from current surface state.
        """
        probs = np.tanh(self.surface)
        # Vectorized generation
        # (H, W, Length)
        rands = np.random.random((self.height, self.width, length))
        bits = (rands < probs[:, :, None]).astype(np.uint8)
        return bits

process_events(events)

Integrate a batch of events. Events format: (x, y, timestamp_ms, polarity) Returns: Frame of probabilities [0, 1]

Source code in src/sc_neurocore/interfaces/dvs_input.py
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
def process_events(self, events: List[Tuple[int, int, float, int]]) -> np.ndarray[Any, Any]:
    """
    Integrate a batch of events.
    Events format: (x, y, timestamp_ms, polarity)
    Returns: Frame of probabilities [0, 1]
    """
    if not events:
        return self.surface

    current_time = events[-1][2]
    dt = current_time - self.last_update_time

    # Exponential decay of old activity
    # V_new = V_old * exp(-dt/tau)
    decay_factor = np.exp(-dt / self.decay_tau)
    self.surface *= decay_factor

    # Add new events
    for x, y, t, p in events:
        if 0 <= x < self.width and 0 <= y < self.height:
            # Polarity is usually -1 or 1.
            # We want activity map. Let's just accumulate magnitude or positive density.
            # For simplified SC vision, we map events to "Probability of Edge".
            self.surface[y, x] += 1.0

    # Clip/Sigmoid to [0, 1] for SC generation
    # Simple saturation
    output_probs = np.tanh(self.surface)  # Maps 0->0, High->1

    self.last_update_time = current_time
    return output_probs

generate_bitstream_frame(length=256)

Generate a HxWxLength bitstream cube from current surface state.

Source code in src/sc_neurocore/interfaces/dvs_input.py
63
64
65
66
67
68
69
70
71
72
def generate_bitstream_frame(self, length: int = 256) -> np.ndarray[Any, Any]:
    """
    Generate a HxWxLength bitstream cube from current surface state.
    """
    probs = np.tanh(self.surface)
    # Vectorized generation
    # (H, W, Length)
    rands = np.random.random((self.height, self.width, length))
    bits = (rands < probs[:, :, None]).astype(np.uint8)
    return bits

Real World

sc_neurocore.interfaces.real_world

LSLBridge

Lab Streaming Layer (LSL) Bridge. Connects EEG/Physiological streams to sc-neurocore. (Mock implementation for standalone use).

Source code in src/sc_neurocore/interfaces/real_world.py
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
class LSLBridge:
    """
    Lab Streaming Layer (LSL) Bridge.
    Connects EEG/Physiological streams to sc-neurocore.
    (Mock implementation for standalone use).
    """

    def __init__(self, stream_name="NeuromorphicIn") -> None:
        self.stream_name = stream_name
        logger.info("LSL: Listening for stream '%s'...", stream_name)

    def receive_chunk(self, max_samples=32) -> np.ndarray[Any, Any]:
        """
        Simulates receiving a chunk of samples.
        In real version: calls inlet.pull_chunk().
        """
        # Mock EEG data: 8 channels, random signals
        return np.random.normal(0, 50e-6, (8, max_samples))

receive_chunk(max_samples=32)

Simulates receiving a chunk of samples. In real version: calls inlet.pull_chunk().

Source code in src/sc_neurocore/interfaces/real_world.py
26
27
28
29
30
31
32
def receive_chunk(self, max_samples=32) -> np.ndarray[Any, Any]:
    """
    Simulates receiving a chunk of samples.
    In real version: calls inlet.pull_chunk().
    """
    # Mock EEG data: 8 channels, random signals
    return np.random.normal(0, 50e-6, (8, max_samples))

ROS2Node

ROS 2 Interface Node. Publishes motor commands from sc-neurocore to robots.

Source code in src/sc_neurocore/interfaces/real_world.py
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
class ROS2Node:
    """
    ROS 2 Interface Node.
    Publishes motor commands from sc-neurocore to robots.
    """

    def __init__(self, node_name="neuro_controller") -> None:
        self.node_name = node_name
        logger.info("ROS2: Node '%s' initialized.", node_name)

    def publish_cmd_vel(self, linear_x: float, angular_z: float) -> None:
        """
        Simulates publishing to /cmd_vel.
        """
        msg = {"linear": linear_x, "angular": angular_z}
        # print(f"ROS2: Publishing to /cmd_vel: {json.dumps(msg)}")
        # In real version: self.publisher.publish(msg)
        return True

publish_cmd_vel(linear_x, angular_z)

Simulates publishing to /cmd_vel.

Source code in src/sc_neurocore/interfaces/real_world.py
45
46
47
48
49
50
51
52
def publish_cmd_vel(self, linear_x: float, angular_z: float) -> None:
    """
    Simulates publishing to /cmd_vel.
    """
    msg = {"linear": linear_x, "angular": angular_z}
    # print(f"ROS2: Publishing to /cmd_vel: {json.dumps(msg)}")
    # In real version: self.publisher.publish(msg)
    return True