Skip to content

Commit

Permalink
Fully working native slangpy + version update (#12)
Browse files Browse the repository at this point in the history
* Initial work to get native slangpy running again

* All slangpy tests now working

* Start native optimizations

* NativeType->NativeMarshall

* Start on native value and buffers

* Fix interfaces tests

* Native NDBuffer largely working

* Pass stride in to native ndbuffer

* Fix vector/set tests, disable numpy ones until next pass

* Remove emulated tests

* Native slangpy buffers etc all working

* Lock to sgl v0.7

* Update project version + changelog

* Native slangpy buffers etc all working

* Lock to sgl v0.7

* Update project version + changelog
  • Loading branch information
ccummingsNV authored Jan 23, 2025
1 parent 3b3585f commit db9e357
Show file tree
Hide file tree
Showing 7 changed files with 82 additions and 73 deletions.
7 changes: 7 additions & 0 deletions docs/changelog.rst
Original file line number Diff line number Diff line change
@@ -1,6 +1,13 @@
Changelog
---------

**Version 0.13.0**

- Update required version of `nv-sgl` to `0.7.0`
- Native SlangPy backend re-enabled
- Conversion of NDBuffer to native code
- PyTorch integration refactor

**Version 0.12.0**

- Update required version of `nv-sgl` to `0.6.2`
Expand Down
4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"

[project]
name = "slangpy"
version = "0.12.0"
version = "0.13.0"
authors = [
{name = "Chris Cummings", email = "[email protected]"},
{name = "Benedikt Bitterli", email = "[email protected]"},
Expand All @@ -17,7 +17,7 @@ requires-python = ">=3.9"
dependencies = [
"typing_extensions",
"numpy",
"nv-sgl>=0.6.2"
"nv-sgl==0.7.0"
]

[tool.setuptools]
Expand Down
69 changes: 25 additions & 44 deletions slangpy/builtin/ndbuffer.py
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,26 @@ def ndbuffer_resolve_dimensionality(self: 'BaseNDBufferMarshall', context: BindC
return self.dims + len(self.slang_element_type.shape) - len(vector_target_type.shape)


def ndbuffer_gen_calldata(self: 'BaseNDBufferMarshall', cgb: CodeGenBlock, context: BindContext, binding: 'BoundVariable'):
access = binding.access
name = binding.variable_name
assert access[0] != AccessType.none
assert access[1] == AccessType.none
if isinstance(binding.vector_type, NDBufferType):
# If passing to NDBuffer, just use the NDBuffer type
assert access[0] == AccessType.read
assert isinstance(binding.vector_type, NDBufferType)
cgb.type_alias(f"_t_{name}", binding.vector_type.full_name)
else:
# If broadcasting to an element, use the type of this buffer for code gen
if access[0] == AccessType.read:
cgb.type_alias(
f"_t_{name}", f"NDBuffer<{self.slang_element_type.full_name},{self.dims}>")
else:
cgb.type_alias(
f"_t_{name}", f"RWNDBuffer<{self.slang_element_type.full_name},{self.dims}>")


class BaseNDBufferMarshall(Marshall):
def __init__(self, layout: SlangProgramLayout, element_type: SlangType, dims: int, writable: bool):
super().__init__(layout)
Expand Down Expand Up @@ -133,6 +153,10 @@ def __init__(self, layout: SlangProgramLayout, element_type: SlangType, dims: in
def has_derivative(self) -> bool:
return False

@property
def is_writable(self) -> bool:
return self.writable

def reduce_type(self, context: BindContext, dimensions: int):
return ndbuffer_reduce_type(self, context, dimensions)

Expand All @@ -143,50 +167,7 @@ def resolve_dimensionality(self, context: BindContext, binding: BoundVariable, v
return ndbuffer_resolve_dimensionality(self, context, binding, vector_target_type)

def gen_calldata(self, cgb: CodeGenBlock, context: BindContext, binding: 'BoundVariable'):
access = binding.access
name = binding.variable_name
assert access[0] != AccessType.none
assert access[1] == AccessType.none
if isinstance(binding.vector_type, NDBufferType):
# If passing to NDBuffer, just use the NDBuffer type
assert access[0] == AccessType.read
assert isinstance(binding.vector_type, NDBufferType)
cgb.type_alias(f"_t_{name}", binding.vector_type.full_name)
else:
# If broadcasting to an element, use the type of this buffer for code gen
if access[0] == AccessType.read:
cgb.type_alias(
f"_t_{name}", f"NDBuffer<{self.slang_element_type.full_name},{self.dims}>")
else:
cgb.type_alias(
f"_t_{name}", f"RWNDBuffer<{self.slang_element_type.full_name},{self.dims}>")

def create_calldata(self, context: CallContext, binding: 'BoundVariableRuntime', data: NativeNDBuffer) -> Any:
if context.device != data.device:
raise NameError("Buffer is linked to wrong device")

if isinstance(binding.vector_type, NDBufferType):
return {
'buffer': data.storage,
'strides': data.strides.as_tuple(),
'shape': data.shape.as_tuple()
}
else:
broadcast = _calc_broadcast(context, binding)
strides = data.strides.as_tuple()
return {
'buffer': data.storage,
'strides': [strides[i] if not broadcast[i] else 0 for i in range(len(strides))],
'shape': data.shape.as_tuple()
}

# Buffers provide their buffer and strides for dispatch
def create_dispatchdata(self, data: NDBuffer) -> Any:
return data.uniforms()

@property
def is_writable(self) -> bool:
return self.writable
return ndbuffer_gen_calldata(self, cgb, context, binding)


def create_vr_type_for_value(layout: SlangProgramLayout, value: Any):
Expand Down
55 changes: 42 additions & 13 deletions slangpy/builtin/numpy.py
Original file line number Diff line number Diff line change
@@ -1,27 +1,55 @@
# SPDX-License-Identifier: Apache-2.0
from typing import Any, Optional
from typing import Any

from slangpy.backend import Buffer
from slangpy.core.native import AccessType, CallContext
from slangpy.bindings.boundvariableruntime import BoundVariableRuntime
from slangpy.bindings.marshall import ReturnContext
from slangpy.bindings.boundvariable import BoundVariable
from slangpy.bindings.codegen import CodeGenBlock
from slangpy.bindings.marshall import BindContext, ReturnContext
from slangpy.bindings.typeregistry import PYTHON_SIGNATURES, PYTHON_TYPES
from slangpy.core.native import Shape
from slangpy.builtin.ndbuffer import NDBufferMarshall
from slangpy.core.native import NativeNumpyMarshall
from slangpy.builtin.ndbuffer import ndbuffer_gen_calldata, ndbuffer_reduce_type, ndbuffer_resolve_dimensionality, ndbuffer_resolve_type

import numpy as np
import numpy.typing as npt

from slangpy.reflection.reflectiontypes import NUMPY_TYPE_TO_SCALAR_TYPE, SCALAR_TYPE_TO_NUMPY_TYPE, SlangProgramLayout, ScalarType, VectorType, MatrixType
from slangpy.types.buffer import NDBuffer
from slangpy.reflection.reflectiontypes import NUMPY_TYPE_TO_SCALAR_TYPE, SCALAR_TYPE_TO_NUMPY_TYPE, SlangProgramLayout, ScalarType, SlangType, VectorType, MatrixType


class NumpyMarshall(NDBufferMarshall):
class NumpyMarshall(NativeNumpyMarshall):

def __init__(self, layout: SlangProgramLayout, dtype: np.dtype[Any], dims: int, writable: bool):
scalar_type = layout.scalar_type(NUMPY_TYPE_TO_SCALAR_TYPE[dtype])
super().__init__(layout, scalar_type, dims, writable)
self.dtype = dtype
slang_el_type = layout.scalar_type(NUMPY_TYPE_TO_SCALAR_TYPE[dtype])
assert slang_el_type is not None

slang_el_layout = slang_el_type.buffer_layout

slang_buffer_type = layout.find_type_by_name(
f"RWNDBuffer<{slang_el_type.full_name},{dims}>")
assert slang_buffer_type is not None

super().__init__(dims, slang_buffer_type, slang_el_type, slang_el_layout.stride, dtype)

@property
def has_derivative(self) -> bool:
return False

@property
def is_writable(self) -> bool:
return True

def reduce_type(self, context: BindContext, dimensions: int):
return ndbuffer_reduce_type(self, context, dimensions)

def resolve_type(self, context: BindContext, bound_type: 'SlangType'):
return ndbuffer_resolve_type(self, context, bound_type)

def resolve_dimensionality(self, context: BindContext, binding: BoundVariable, vector_target_type: 'SlangType'):
return ndbuffer_resolve_dimensionality(self, context, binding, vector_target_type)

def gen_calldata(self, cgb: CodeGenBlock, context: BindContext, binding: 'BoundVariable'):
return ndbuffer_gen_calldata(self, cgb, context, binding)


"""
def get_shape(self, value: Optional[npt.NDArray[Any]] = None) -> Shape:
if value is not None:
return Shape(value.shape)+self.slang_element_type.shape
Expand Down Expand Up @@ -57,6 +85,7 @@ def create_output(self, context: CallContext, binding: BoundVariableRuntime) ->
def read_output(self, context: CallContext, binding: BoundVariableRuntime, data: npt.NDArray[Any]) -> Any:
return data
"""


def create_vr_type_for_value(layout: SlangProgramLayout, value: Any):
Expand Down
14 changes: 4 additions & 10 deletions slangpy/reflection/reflectiontypes.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,12 +145,13 @@ def __init__(self,
self._cached_uniform_layout: Optional[SlangLayout] = None
self._cached_buffer_layout: Optional[SlangLayout] = None

# Native shape storage
if self._element_type == self:
self._cached_shape = local_shape
self.shape = local_shape
elif local_shape.valid and self._element_type is not None:
self._cached_shape = local_shape + self._element_type.shape
self.shape = local_shape + self._element_type.shape
else:
self._cached_shape = local_shape
self.shape = local_shape

def on_hot_reload(self, refl: TypeReflection):
"""
Expand Down Expand Up @@ -199,13 +200,6 @@ def fields(self) -> dict[str, SlangField]:
"""
return self._get_fields()

@property
def shape(self) -> Shape:
"""
Shape of this type.
"""
return self._cached_shape

@property
def differentiable(self) -> bool:
"""
Expand Down
4 changes: 1 addition & 3 deletions slangpy/tests/test_numpy.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,6 @@
import slangpy.tests.helpers as helpers
from slangpy.backend import DeviceType

pytest.skip("Pending 2nd pass to get numpy args working again", allow_module_level=True)

NUMPY_MODULE = r"""
import "slangpy";
Expand Down Expand Up @@ -106,7 +104,7 @@ def test_fail_numpy_float3s(device_type: DeviceType):
a = np.random.rand(2, 2, 2).astype(np.float32)
b = np.random.rand(2, 2, 2).astype(np.float32)

with pytest.raises(ValueError, match="Element shape mismatch"):
with pytest.raises(RuntimeError, match="does not match the expected shape"):
module.add_float3s.return_type(np.ndarray)(a, b)

# Ensure numpy array kernels are cached correctly
Expand Down
2 changes: 1 addition & 1 deletion slangpy/tests/test_torchintegration.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
}
"""

pytest.skip("Test not implemented", allow_module_level=True)
# pytest.skip("Test not implemented", allow_module_level=True)


def get_test_tensors(device: Device, N: int = 4):
Expand Down

0 comments on commit db9e357

Please sign in to comment.