Skip to content

Spatial

3D spatial representations for spike-domain processing.

  • VoxelGrid, PointCloud, SpatialTransformer3D
Python
from sc_neurocore.spatial import VoxelGrid, PointCloud

sc_neurocore.spatial

sc_neurocore.spatial -- Tier: research (experimental / research).

VoxelGrid dataclass

A 3D Voxel Grid representation for SC. Each voxel stores a probability of being 'occupied'.

Source code in src/sc_neurocore/spatial/representations.py
Python
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
@dataclass
class VoxelGrid:
    """
    A 3D Voxel Grid representation for SC.
    Each voxel stores a probability of being 'occupied'.
    """

    resolution: int
    data: np.ndarray[Any, Any] = None  # type: ignore[assignment]

    def __post_init__(self) -> None:
        if self.data is None:
            self.data = np.zeros((self.resolution, self.resolution, self.resolution))

    def set_voxel(self, x: int, y: int, z: int, prob: float) -> None:
        if 0 <= x < self.resolution and 0 <= y < self.resolution and 0 <= z < self.resolution:
            self.data[x, y, z] = prob

    def get_as_bitstream(self, length: int = 256) -> np.ndarray[Any, Any]:
        """
        Converts the voxel grid to a 4D bitstream (X, Y, Z, Length).
        """
        rands = np.random.random((*self.data.shape, length))
        return (rands < self.data[..., None]).astype(np.uint8)

get_as_bitstream(length=256)

Converts the voxel grid to a 4D bitstream (X, Y, Z, Length).

Source code in src/sc_neurocore/spatial/representations.py
Python
33
34
35
36
37
38
def get_as_bitstream(self, length: int = 256) -> np.ndarray[Any, Any]:
    """
    Converts the voxel grid to a 4D bitstream (X, Y, Z, Length).
    """
    rands = np.random.random((*self.data.shape, length))
    return (rands < self.data[..., None]).astype(np.uint8)

PointCloud dataclass

A Point Cloud representation. Each point has (x, y, z) coordinates and an associated probability/intensity.

Source code in src/sc_neurocore/spatial/representations.py
Python
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
@dataclass
class PointCloud:
    """
    A Point Cloud representation.
    Each point has (x, y, z) coordinates and an associated probability/intensity.
    """

    points: np.ndarray[Any, Any]  # (N, 3)
    intensities: np.ndarray[Any, Any]  # (N,)

    def normalize(self) -> None:
        self.points = (self.points - np.min(self.points)) / (
            np.max(self.points) - np.min(self.points) + 1e-9
        )
        self.intensities = np.clip(self.intensities, 0, 1)

SpatialTransformer3D dataclass

A transformer block specialized for 3D spatial data. Processes voxel grids using SC attention.

Source code in src/sc_neurocore/spatial/transformer_3d.py
Python
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
@dataclass
class SpatialTransformer3D:
    """
    A transformer block specialized for 3D spatial data.
    Processes voxel grids using SC attention.
    """

    resolution: int
    dim_k: int

    def __post_init__(self) -> None:
        self.attention = StochasticAttention(dim_k=self.dim_k)

    def forward(self, voxel_grid: np.ndarray[Any, Any]) -> np.ndarray[Any, Any]:
        """
        Input: voxel_grid (res, res, res)
        We flatten the spatial dims to (res^3, 1) or similar to apply attention.
        For simplicity, we treat each voxel as a 'token'.
        """
        res = self.resolution
        # Flatten spatial dims: (res^3, 1)
        # We need a 'feature' dimension. Let's assume features=1 for now.
        flat_grid = voxel_grid.flatten()[:, np.newaxis]

        # Self-attention: Q, K, V are all projections of flat_grid
        # Since we have only 1 feature, attention weights will be simple.
        # In a real model, we'd project to dim_k features.

        # Mock projection to dim_k
        Q = np.repeat(flat_grid, self.dim_k, axis=1)
        K = Q
        V = Q

        attn_out = self.attention.forward(Q, K, V)

        # Reshape back to spatial dims
        # We take the mean of features to get back to 1 value per voxel
        output_grid = np.mean(attn_out, axis=1).reshape((res, res, res))

        return output_grid

forward(voxel_grid)

Input: voxel_grid (res, res, res) We flatten the spatial dims to (res^3, 1) or similar to apply attention. For simplicity, we treat each voxel as a 'token'.

Source code in src/sc_neurocore/spatial/transformer_3d.py
Python
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
def forward(self, voxel_grid: np.ndarray[Any, Any]) -> np.ndarray[Any, Any]:
    """
    Input: voxel_grid (res, res, res)
    We flatten the spatial dims to (res^3, 1) or similar to apply attention.
    For simplicity, we treat each voxel as a 'token'.
    """
    res = self.resolution
    # Flatten spatial dims: (res^3, 1)
    # We need a 'feature' dimension. Let's assume features=1 for now.
    flat_grid = voxel_grid.flatten()[:, np.newaxis]

    # Self-attention: Q, K, V are all projections of flat_grid
    # Since we have only 1 feature, attention weights will be simple.
    # In a real model, we'd project to dim_k features.

    # Mock projection to dim_k
    Q = np.repeat(flat_grid, self.dim_k, axis=1)
    K = Q
    V = Q

    attn_out = self.attention.forward(Q, K, V)

    # Reshape back to spatial dims
    # We take the mean of features to get back to 1 value per voxel
    output_grid = np.mean(attn_out, axis=1).reshape((res, res, res))

    return output_grid