Skip to content

Draft: Reenabling bool tests #289

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 12 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 0 additions & 4 deletions slangpy/tests/device/slang/test_nested_structs.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,6 @@

@pytest.mark.parametrize("device_type", helpers.DEFAULT_DEVICE_TYPES)
def test_nested_structs(device_type: spy.DeviceType):
if device_type in [spy.DeviceType.cuda, spy.DeviceType.metal]:
pytest.skip(
"bool is currently not handled correctly on CUDA/Metal, see issue: https://github.com/shader-slang/slangpy/issues/274"
)
device = helpers.get_device(device_type)

program = device.load_program("slang/test_nested_structs.slang", ["compute_main"])
Expand Down
185 changes: 179 additions & 6 deletions slangpy/tests/device/test_buffer_cursor.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,12 @@
"float3x4(1.0, 2.0, 3.0, 4.0, -1.0, -2.0, -3.0, -4.0, 5.0, 6.0, 7.0, 8.0)",
spy.float3x4([1.0, 2.0, 3.0, 4.0, -1.0, -2.0, -3.0, -4.0, 5.0, 6.0, 7.0, 8.0]),
),
(
"f_float4x3",
"float4x3",
"float4x3(1.0, 2.0, 3.0, 4.0, -1.0, -2.0, -3.0, -4.0, 5.0, 6.0, 7.0, 8.0)",
spy.float4x3([1.0, 2.0, 3.0, 4.0, -1.0, -2.0, -3.0, -4.0, 5.0, 6.0, 7.0, 8.0]),
),
(
"f_float4x4",
"float4x4",
Expand Down Expand Up @@ -231,13 +237,8 @@
]


# Filter out all bool tests for CUDA/Metal backend, as it is not handled correct. See issue:
# https://github.com/shader-slang/slangpy/issues/274
def get_tests(device_type: spy.DeviceType):
if device_type not in [spy.DeviceType.cuda, spy.DeviceType.metal]:
return TESTS
tests = [x for x in TESTS if "bool" not in x[0]]
return tests
return TESTS


def variable_decls(tests: list[Any]):
Expand Down Expand Up @@ -531,6 +532,178 @@ def test_apply_changes(device_type: spy.DeviceType, seed: int):
check_match(test, element[name].read())


@pytest.mark.parametrize("device_type", helpers.DEFAULT_DEVICE_TYPES)
@pytest.mark.parametrize("seed", RAND_SEEDS)
@pytest.mark.parametrize("element_class", [np.array, spy.bool2, tuple, list])
def test_bool_buffers(device_type: spy.DeviceType, seed: int, element_class: Any):
code = f"""
[shader("compute")]
[numthreads(1, 1, 1)]
void compute_main(uint3 tid: SV_DispatchThreadID, StructuredBuffer<bool2> src, RWStructuredBuffer<bool2> dest) {{
uint i = tid.x;
dest[i] = src[i];
}}
"""
mod_name = (
"test_buffer_cursor_TestBoolBuffers_" + hashlib.sha256(code.encode()).hexdigest()[0:8]
)
device = helpers.get_device(device_type)
module = device.load_module_from_source(mod_name, code)
prog = device.link_program([module], [module.entry_point("compute_main")])
buffer_layout = module.layout.get_type_layout(
module.layout.find_type_by_name("StructuredBuffer<bool2>")
)
(kernel, buffer_layout) = (device.create_compute_kernel(prog), buffer_layout)

# Make a buffer with 128 elements and a cursor to wrap it
count = 128
src = kernel.device.create_buffer(
element_count=count,
struct_type=buffer_layout,
usage=spy.BufferUsage.shader_resource | spy.BufferUsage.unordered_access,
data=np.zeros(buffer_layout.element_type_layout.stride * count, dtype=np.uint8),
)
dest = kernel.device.create_buffer(
element_count=count,
struct_type=buffer_layout,
usage=spy.BufferUsage.shader_resource | spy.BufferUsage.unordered_access,
data=np.zeros(buffer_layout.element_type_layout.stride * count, dtype=np.uint8),
)
src_cursor = spy.BufferCursor(buffer_layout.element_type_layout, src)
dest_cursor = spy.BufferCursor(buffer_layout.element_type_layout, dest)

random.seed(seed)
list_data = [[random.randint(0, 1) == 1, random.randint(0, 1) == 1] for i in range(count)]
data = []
if element_class == np.array:
data = [element_class(x, dtype=np.bool_) for x in list_data]
elif element_class == spy.bool2:
data = [element_class(x) for x in list_data]
elif element_class == tuple:
data = [(x[0], x[1]) for x in list_data]
elif element_class == list:
data = list_data

for i in range(count):
src_cursor[i].write(data[i])

# Apply changes to source
src_cursor.apply()

# Dispatch the kernel
kernel.dispatch([count, 1, 1], src=src, dest=dest)

dest_cursor.load()
for i in range(count):
result = dest_cursor[i].read()
data_ref = spy.bool2(list_data[i])
src_ref = src_cursor[i].read()
assert result == data_ref
assert result == src_ref


# test introduced to warn us when issue https://github.com/shader-slang/slang/issues/7441
# has been resolved and the type information or the underlying types have changed.
@pytest.mark.parametrize("device_type", helpers.DEFAULT_DEVICE_TYPES)
def test_boolX_reflection(device_type: spy.DeviceType):
code = f"""
[shader("compute")]
[numthreads(1, 1, 1)]
void compute_main(uint3 tid: SV_DispatchThreadID, StructuredBuffer<bool2> src, RWStructuredBuffer<bool2> dest) {{
uint i = tid.x;
dest[i] = src[i];
}}
"""
mod_name = (
"test_buffer_cursor_test_boolX_reflection_" + hashlib.sha256(code.encode()).hexdigest()[0:8]
)
device = helpers.get_device(device_type)
module = device.load_module_from_source(mod_name, code)
prog = device.link_program([module], [module.entry_point("compute_main")])
sb_bool2_layout = module.layout.get_type_layout(
module.layout.find_type_by_name("StructuredBuffer<bool2>")
)
pb_bool2_layout = module.layout.get_type_layout(
module.layout.find_type_by_name("ParameterBlock<bool2>")
)
u_bool2_layout = module.layout.get_type_layout(module.layout.find_type_by_name("bool2"))

sb_bool2_element_layout = sb_bool2_layout.element_type_layout
pb_bool2_element_layout = pb_bool2_layout.element_type_layout

def make_layout(type_layout: spy.TypeLayoutReflection):
return {
"size": type_layout.size,
"stride": type_layout.size,
"element_stride": type_layout.element_stride(),
"element_type_layout.size": type_layout.element_type_layout.size,
"element_type_layout.stride": type_layout.element_type_layout.stride,
}

def make_layout_ref():
if device_type == spy.DeviceType.d3d12:
return {
"size": 8,
"stride": 8,
"element_stride": 4,
"element_type_layout.size": 4,
"element_type_layout.stride": 4,
}
if device_type == spy.DeviceType.vulkan:
return {
"size": 8,
"stride": 8,
"element_stride": 4,
"element_type_layout.size": 4,
"element_type_layout.stride": 4,
}
if device_type == spy.DeviceType.metal:
return {
"size": 2,
"stride": 2,
"element_stride": 1,
"element_type_layout.size": 1,
"element_type_layout.stride": 1,
}
if device_type == spy.DeviceType.wgpu:
return {
"size": 8,
"stride": 8,
"element_stride": 4,
"element_type_layout.size": 4,
"element_type_layout.stride": 4,
}
if device_type == spy.DeviceType.cpu:
return {
"size": 2,
"stride": 2,
"element_stride": 1,
"element_type_layout.size": 1,
"element_type_layout.stride": 1,
}
# This is actually reporting wrong, see issue: https://github.com/shader-slang/slang/issues/7441
# Once that issue has been resolved, this test should trigger and workarounds can be removed
if device_type == spy.DeviceType.cuda:
return {
"size": 8,
"stride": 8,
"element_stride": 1,
"element_type_layout.size": 1,
"element_type_layout.stride": 1,
}

layout_descs = {
"u_bool2": make_layout(u_bool2_layout),
"sb_bool2_element": make_layout(sb_bool2_element_layout),
"pb_bool2_element": make_layout(pb_bool2_element_layout),
}

ref_desc = make_layout_ref()

for k, v in layout_descs.items():
assert v == ref_desc


@pytest.mark.parametrize("device_type", helpers.DEFAULT_DEVICE_TYPES)
@pytest.mark.parametrize("seed", RAND_SEEDS)
def test_apply_changes_ndarray(device_type: spy.DeviceType, seed: int):
Expand Down
20 changes: 4 additions & 16 deletions slangpy/tests/device/test_shader_cursor.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ class TypeInfo:


TYPE_INFOS = {
"bool": TypeInfo(size=4, struct="I", dtype=np.uint32), # np.bool is 8 bits
"bool": TypeInfo(size=4, struct="I", dtype=np.bool_), # np.bool is 8 bits
"int": TypeInfo(size=4, struct="i", dtype=np.int32),
"uint": TypeInfo(size=4, struct="I", dtype=np.uint32),
"float": TypeInfo(size=4, struct="f", dtype=np.float32),
Expand All @@ -49,10 +49,7 @@ class TypeInfo:


def get_type_info(device_type: spy.DeviceType, type: str):
if device_type != spy.DeviceType.cuda or type != "bool":
return TYPE_INFOS[type]
# CUDA bool is size 1
TypeInfo(size=1, struct="I", dtype=np.uint32)
return TYPE_INFOS[type]


@dataclass
Expand Down Expand Up @@ -321,10 +318,7 @@ def write_var(
sizes.append(size)
references.append(struct.pack(struct_pattern, *flat_value).hex())

# CUDA/Metal have bool size of 1, which is currently not handled, see issue:
# https://github.com/shader-slang/slangpy/issues/274
if device_type not in [spy.DeviceType.cuda, spy.DeviceType.metal] or var.type != "bool":
cursor[name_or_index] = value
cursor[name_or_index] = value

def write_vars(
device_type: spy.DeviceType,
Expand Down Expand Up @@ -375,13 +369,7 @@ def write_vars(
named_typed_result[0] == "u_float2x2" or named_typed_result[0] == "u_float3x3"
):
continue
# CUDA/Metal have bool size of 1, which is currently not handled, see issue:
# https://github.com/shader-slang/slangpy/issues/274
if (
device_type in [spy.DeviceType.cuda, spy.DeviceType.metal]
and named_typed_result[1] == "bool"
):
continue

assert named_typed_result == named_typed_reference


Expand Down
2 changes: 1 addition & 1 deletion slangpy/tests/device/test_shader_cursor.slang
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ extension bool : IWritable
{
void write(inout Writer writer)
{
writer.buffer[writer.offset++] = asuint(this);
writer.buffer[writer.offset++] = (this) ? 1u : 0u;
}
}

Expand Down
Loading
Loading