The buggy test in test_clear_unordered_access_view_buffer() fails with this log line:
d3d12:5036:Test 22: Test failed: Got 0xffff7fff, expected 0xffff8000 at (0, 0, 0).
It seems that NVIDIA drivers convert float 0.5 to unorm 0x7fff instead of 0x8000 when writing to the UAV.
Something similar happens for the test in test_clear_unordered_access_view_image().
Signed-off-by: Giovanni Mascellani gmascellani@codeweavers.com --- Actually, I am not completely sure of what's happening here (and therefore the commit message might be wrong). My understanding is that in that test the shader cs_uav_clear_buffer_float_code is used, where the buffer is declared as "RWBuffer<float4> dst", while the underlying UAV is declared of format DXGI_FORMAT_R16G16_UNORM. I suppose that is interpreted by D3D12 as a conversion from float to a normalized uint16, and therefore the value 0.5 is converted to something in the middle of the range of uint16.
Whether 0x7fff or 0x8000 should be taken thus depends on the subtlety of how the range of uint16 is defined. Should it be half of 0x10000 or of 0xffff? It seems that Linux Vulkan NVIDIA drivers choose the second interpretation. Could somebody help me understanding if Vulkan specifications mandate which of the two behavior should be considered correct? --- tests/d3d12.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/tests/d3d12.c b/tests/d3d12.c index 5067dd97..d32e4cba 100644 --- a/tests/d3d12.c +++ b/tests/d3d12.c @@ -4878,6 +4878,7 @@ static void test_clear_unordered_access_view_buffer(void) unsigned int expected; bool is_float; bool is_todo; + bool bug_on_nvidia; } tests[] = { @@ -4930,7 +4931,7 @@ static void test_clear_unordered_access_view_buffer(void) {DXGI_FORMAT_R16G16_UNORM, { 0, BUFFER_SIZE / sizeof(uint32_t), 0, 0, D3D12_BUFFER_UAV_FLAG_NONE}, {0x1234, 0xabcd, 0, 0}, 0xabcd1234}, {DXGI_FORMAT_R16G16_UNORM, { 0, BUFFER_SIZE / sizeof(uint32_t), 0, 0, D3D12_BUFFER_UAV_FLAG_NONE}, - {0x3f000000 /* 0.5f */, 0x3f800000 /* 1.0f */, 0, 0}, 0xffff8000, true}, + {0x3f000000 /* 0.5f */, 0x3f800000 /* 1.0f */, 0, 0}, 0xffff8000, true, false, true}, {DXGI_FORMAT_R16G16_UNORM, { 0, BUFFER_SIZE / sizeof(uint32_t), 0, 0, D3D12_BUFFER_UAV_FLAG_NONE}, {0x40000000 /* 2.0f */, 0 /* 0.0f */, 0, 0}, 0x0000ffff, true}, {DXGI_FORMAT_R16G16_UNORM, { 0, BUFFER_SIZE / sizeof(uint32_t), 0, 0, D3D12_BUFFER_UAV_FLAG_NONE}, @@ -5032,6 +5033,7 @@ static void test_clear_unordered_access_view_buffer(void) check_readback_data_uint(&rb, &box, clear_value[0], 0); box.left = uav_desc.Buffer.FirstElement; box.right = uav_desc.Buffer.FirstElement + uav_desc.Buffer.NumElements; + bug_if(tests[i].bug_on_nvidia && is_nvidia_device(device)) todo_if(tests[i].is_todo) check_readback_data_uint(&rb, &box, tests[i].expected, tests[i].is_float ? 1 : 0); box.left = uav_desc.Buffer.FirstElement + uav_desc.Buffer.NumElements; @@ -5086,6 +5088,7 @@ static void test_clear_unordered_access_view_image(void) unsigned int expected; bool is_float; bool is_todo; + bool bug_on_nvidia; } tests[] = { @@ -5124,7 +5127,7 @@ static void test_clear_unordered_access_view_image(void) {DXGI_FORMAT_R11G11B10_FLOAT, 1, 1, 0, 0, 1, 0, {}, {1, 2, 3, 4}, 0x00c01001}, /* Test float clears with formats. */ {DXGI_FORMAT_R16G16_UNORM, 1, 1, 0, 0, 1, 0, {}, - {0x3f000000 /* 0.5f */, 0x3f800000 /* 1.0f */, 0, 0}, 0xffff8000, true}, + {0x3f000000 /* 0.5f */, 0x3f800000 /* 1.0f */, 0, 0}, 0xffff8000, true, false, true}, {DXGI_FORMAT_R16G16_FLOAT, 1, 1, 0, 0, 1, 0, {}, {0x3f000000 /* 0.5f */, 0x3f800000 /* 1.0f */, 0, 0}, 0x3c003800, true}, {DXGI_FORMAT_R8G8B8A8_UNORM, 1, 1, 0, 0, 1, 0, {}, @@ -5308,6 +5311,7 @@ static void test_clear_unordered_access_view_image(void) actual_colour = get_readback_uint(&rb, x, y, z); success = compare_color(actual_colour, expected_colour, tests[i].is_float ? 1 : 0);
+ bug_if(tests[i].bug_on_nvidia && is_nvidia_device(device)) todo_if(tests[i].is_todo && expected_colour) ok(success, "At layer %u, (%u,%u,%u), expected %#x, got %#x.\n", layer, x, y, z, expected_colour, actual_colour);
Signed-off-by: Giovanni Mascellani gmascellani@codeweavers.com --- Here again, I'd be happy if anybody could help navigating the Vulkan specification to determine if the problem lies with NVIDIA drivers not converting NaNs properly, or with us not translating the shader properly. --- tests/d3d12.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/tests/d3d12.c b/tests/d3d12.c index d32e4cba..b85a8826 100644 --- a/tests/d3d12.c +++ b/tests/d3d12.c @@ -9943,6 +9943,7 @@ static void test_shader_instructions(void) bool is_float64; bool is_todo; bool skip_on_warp; + bool bug_on_nvidia; } uint_tests[] = { @@ -10191,8 +10192,8 @@ static void test_shader_instructions(void) {&ps_dge, {.d = {{1.5, 1.0}}}, {{0xffffffff}}, true}, {&ps_dlt, {.d = {{0.0, 1.0}}}, {{0xffffffff}}, true}, {&ps_dlt, {.d = {{1.0, 1.0}}}, {{0x00000000}}, true}, - {&ps_dtou, {.d = {{ -NAN}}}, {{ 0, 0 }}, true}, - {&ps_dtou, {.d = {{ NAN}}}, {{ 0, 0 }}, true}, + {&ps_dtou, {.d = {{ -NAN}}}, {{ 0, 0 }}, true, false, false, true}, + {&ps_dtou, {.d = {{ NAN}}}, {{ 0, 0 }}, true, false, false, true}, {&ps_dtou, {.d = {{-INFINITY}}}, {{ 0, ~0u}}, true}, {&ps_dtou, {.d = {{ INFINITY}}}, {{~0u, 0 }}, true}, {&ps_dtou, {.d = {{ -1.0}}}, {{ 0, 1 }}, true}, @@ -10609,6 +10610,7 @@ static void test_shader_instructions(void)
transition_resource_state(command_list, context.render_target, D3D12_RESOURCE_STATE_RENDER_TARGET, D3D12_RESOURCE_STATE_COPY_SOURCE); + bug_if(uint_tests[i].bug_on_nvidia && is_nvidia_device(context.device)) check_sub_resource_uvec4(context.render_target, 0, queue, command_list, &uint_tests[i].output.u);
reset_command_list(command_list, context.allocator);