From: Zebediah Figura zfigura@codeweavers.com
--- Makefile.am | 3 --- tests/hlsl-function.shader_test | 14 ++++++------- ...lsl-return-implicit-conversion.shader_test | 10 ++++----- tests/shader_runner.c | 21 ++++++++++++++++--- 4 files changed, 30 insertions(+), 18 deletions(-)
diff --git a/Makefile.am b/Makefile.am index ed881ffa..52d057c9 100644 --- a/Makefile.am +++ b/Makefile.am @@ -317,9 +317,6 @@ tests_shader_runner_SOURCES = \ tests_vkd3d_api_LDADD = libvkd3d.la @VULKAN_LIBS@ tests_vkd3d_shader_api_LDADD = libvkd3d-shader.la SHADER_TEST_LOG_COMPILER = tests/shader_runner -XFAIL_TESTS = \ - tests/hlsl-function.shader_test \ - tests/hlsl-return-implicit-conversion.shader_test endif
EXTRA_DIST += $(vkd3d_shader_tests) diff --git a/tests/hlsl-function.shader_test b/tests/hlsl-function.shader_test index 586f1ab7..6f11e35b 100644 --- a/tests/hlsl-function.shader_test +++ b/tests/hlsl-function.shader_test @@ -1,4 +1,4 @@ -[pixel shader fail] +[pixel shader fail todo]
float4 func();
@@ -23,7 +23,7 @@ float4 main() : sv_target return 0; }
-[pixel shader fail] +[pixel shader fail todo]
void func(inout float o) { @@ -37,7 +37,7 @@ float4 main() : sv_target return 0; }
-[pixel shader fail] +[pixel shader fail todo]
void func(inout float2 o) { @@ -51,7 +51,7 @@ float4 main() : sv_target return 0; }
-[pixel shader fail] +[pixel shader fail todo]
void func(out float o) { @@ -65,7 +65,7 @@ float4 main() : sv_target return x; }
-[pixel shader fail] +[pixel shader fail todo]
void func(inout float o) { @@ -78,7 +78,7 @@ float4 main() : sv_target return x; }
-[pixel shader fail] +[pixel shader fail todo]
void func() { @@ -89,7 +89,7 @@ float4 main() : sv_target return func(); }
-[pixel shader fail] +[pixel shader fail todo]
void foo() { diff --git a/tests/hlsl-return-implicit-conversion.shader_test b/tests/hlsl-return-implicit-conversion.shader_test index 4c07cf84..38d21633 100644 --- a/tests/hlsl-return-implicit-conversion.shader_test +++ b/tests/hlsl-return-implicit-conversion.shader_test @@ -98,7 +98,7 @@ float4 main() : sv_target todo draw quad probe all rgba (0.4, 0.3, 0.2, 0.0)
-[pixel shader fail] +[pixel shader fail todo] float3x1 func() { return float1x3(0.4, 0.3, 0.2); @@ -109,7 +109,7 @@ float4 main() : sv_target return float4(func(), 0.0); }
-[pixel shader fail] +[pixel shader fail todo] float1x3 func() { return float3x1(0.4, 0.3, 0.2); @@ -165,7 +165,7 @@ float4 main() : sv_target todo draw quad probe all rgba (0.4, 0.3, 0.2, 0.0)
-[pixel shader fail] +[pixel shader fail todo] float3x1 func() { return float4(0.4, 0.3, 0.2, 0.1); @@ -191,7 +191,7 @@ float4 main() : sv_target todo draw quad probe all rgba (0.4, 0.3, 0.2, 0.0)
-[pixel shader fail] +[pixel shader fail todo] float3x1 func() { return float1x4(0.4, 0.3, 0.2, 0.1); @@ -217,7 +217,7 @@ float4 main() : sv_target todo draw quad probe all rgba (0.4, 0.3, 0.2, 0.0)
-[pixel shader fail] +[pixel shader fail todo] float1x3 func() { return float4x1(0.4, 0.3, 0.2, 0.1); diff --git a/tests/shader_runner.c b/tests/shader_runner.c index 8633ee9a..8804164e 100644 --- a/tests/shader_runner.c +++ b/tests/shader_runner.c @@ -80,6 +80,7 @@ enum parse_state STATE_REQUIRE, STATE_SAMPLER, STATE_SHADER_INVALID_PIXEL, + STATE_SHADER_INVALID_PIXEL_TODO, STATE_SHADER_PIXEL, STATE_SHADER_VERTEX, STATE_TEXTURE, @@ -642,15 +643,24 @@ void run_shader_tests(struct shader_runner *runner, int argc, char **argv, const break;
case STATE_SHADER_INVALID_PIXEL: + case STATE_SHADER_INVALID_PIXEL_TODO: { ID3D10Blob *blob = NULL, *errors = NULL; HRESULT hr;
hr = D3DCompile(shader_source, strlen(shader_source), NULL, NULL, NULL, "main", "ps_4_0", 0, 0, &blob, &errors); - ok(hr == E_FAIL, "Got unexpected hr %#x.\n", hr); - ok(!blob, "Expected no compiled shader blob.\n"); - ok(!!errors, "Expected non-NULL error blob.\n"); + todo_if (state == STATE_SHADER_INVALID_PIXEL_TODO) + ok(hr == E_FAIL, "Got unexpected hr %#x.\n", hr); + if (hr == S_OK) + { + ID3D10Blob_Release(blob); + } + else + { + ok(!blob, "Expected no compiled shader blob.\n"); + ok(!!errors, "Expected non-NULL error blob.\n"); + } if (errors) { if (vkd3d_test_state.debug_level) @@ -735,6 +745,10 @@ void run_shader_tests(struct shader_runner *runner, int argc, char **argv, const { state = STATE_SHADER_INVALID_PIXEL; } + else if (!strcmp(line, "[pixel shader fail todo]\n")) + { + state = STATE_SHADER_INVALID_PIXEL_TODO; + } else if (sscanf(line, "[sampler %u]\n", &index)) { state = STATE_SAMPLER; @@ -817,6 +831,7 @@ void run_shader_tests(struct shader_runner *runner, int argc, char **argv, const case STATE_PREPROC: case STATE_PREPROC_INVALID: case STATE_SHADER_INVALID_PIXEL: + case STATE_SHADER_INVALID_PIXEL_TODO: case STATE_SHADER_PIXEL: case STATE_SHADER_VERTEX: {
From: Zebediah Figura zfigura@codeweavers.com
--- tests/conditional.shader_test | 4 +- tests/d3d12_test_utils.h | 8 ---- tests/hlsl-for.shader_test | 6 +-- tests/shader_runner.c | 69 +++++++++++----------------------- tests/texture-load.shader_test | 8 ++-- tests/trigonometry.shader_test | 32 ++++++++-------- tests/utils.h | 8 ++++ 7 files changed, 54 insertions(+), 81 deletions(-)
diff --git a/tests/conditional.shader_test b/tests/conditional.shader_test index e665ac1d..42307777 100644 --- a/tests/conditional.shader_test +++ b/tests/conditional.shader_test @@ -15,5 +15,5 @@ float4 main(float tex : texcoord) : SV_TARGET
[test] draw quad -probe rect rgba ( 0, 0, 319, 480) (0.9, 0.8, 0.7, 0.6) -probe rect rgba (321, 0, 640, 480) (0.1, 0.2, 0.3, 0.4) +probe ( 0, 0, 319, 480) rgba (0.9, 0.8, 0.7, 0.6) +probe (321, 0, 640, 480) rgba (0.1, 0.2, 0.3, 0.4) diff --git a/tests/d3d12_test_utils.h b/tests/d3d12_test_utils.h index 7186a73e..bb298082 100644 --- a/tests/d3d12_test_utils.h +++ b/tests/d3d12_test_utils.h @@ -52,14 +52,6 @@ static inline void wait_queue_idle_no_event_(unsigned int line, ID3D12Device *de ID3D12Fence_Release(fence); }
-static void set_rect(RECT *rect, int left, int top, int right, int bottom) -{ - rect->left = left; - rect->right = right; - rect->top = top; - rect->bottom = bottom; -} - static inline void set_box(D3D12_BOX *box, unsigned int left, unsigned int top, unsigned int front, unsigned int right, unsigned int bottom, unsigned int back) { diff --git a/tests/hlsl-for.shader_test b/tests/hlsl-for.shader_test index 4e5048c3..e6329834 100644 --- a/tests/hlsl-for.shader_test +++ b/tests/hlsl-for.shader_test @@ -23,6 +23,6 @@ float4 main(float tex : texcoord) : sv_target
[test] todo draw quad -probe rect rgba ( 0, 0, 159, 480) (10.0, 35.0, 0.0, 0.0) -probe rect rgba (161, 0, 479, 480) (10.0, 38.0, 0.0, 0.0) -probe rect rgba (481, 0, 640, 480) ( 5.0, 10.0, 0.0, 0.0) +probe ( 0, 0, 159, 480) rgba (10.0, 35.0, 0.0, 0.0) +probe (161, 0, 479, 480) rgba (10.0, 38.0, 0.0, 0.0) +probe (481, 0, 640, 480) rgba ( 5.0, 10.0, 0.0, 0.0) diff --git a/tests/shader_runner.c b/tests/shader_runner.c index 8804164e..a820810a 100644 --- a/tests/shader_runner.c +++ b/tests/shader_runner.c @@ -409,67 +409,40 @@ static void parse_test_directive(struct shader_runner *runner, const char *line)
runner->last_render_failed = !runner->ops->draw(runner, topology, vertex_count); } - else if (match_string(line, "probe all rgba", &line)) - { - static const RECT rect = {0, 0, RENDER_TARGET_WIDTH, RENDER_TARGET_HEIGHT}; - unsigned int ulps; - struct vec4 v; - int ret; - - if (runner->last_render_failed) - return; - - ret = sscanf(line, "( %f , %f , %f , %f ) %u", &v.x, &v.y, &v.z, &v.w, &ulps); - if (ret < 4) - fatal_error("Malformed probe arguments '%s'.\n", line); - if (ret < 5) - ulps = 0; - - runner->ops->probe_vec4(runner, &rect, &v, ulps); - } - else if (match_string(line, "probe rect rgba", &line)) + else if (match_string(line, "probe", &line)) { unsigned int left, top, right, bottom, ulps; struct vec4 v; + int ret, len; RECT rect; - int ret;
if (runner->last_render_failed) return;
- ret = sscanf(line, "( %d , %d , %d , %d ) ( %f , %f , %f , %f ) %u", - &left, &top, &right, &bottom, &v.x, &v.y, &v.z, &v.w, &ulps); - if (ret < 8) - fatal_error("Malformed probe arguments '%s'.\n", line); - if (ret < 9) - ulps = 0; - - rect.left = left; - rect.top = top; - rect.right = right; - rect.bottom = bottom; - runner->ops->probe_vec4(runner, &rect, &v, ulps); - } - else if (match_string(line, "probe rgba", &line)) - { - unsigned int x, y, ulps; - struct vec4 v; - RECT rect; - int ret; + if (match_string(line, "all", &line)) + { + set_rect(&rect, 0, 0, RENDER_TARGET_WIDTH, RENDER_TARGET_HEIGHT); + } + else if (sscanf(line, "( %d , %d , %d , %d )%n", &left, &top, &right, &bottom, &len) == 4) + { + set_rect(&rect, left, top, right, bottom); + line += len; + } + else if (sscanf(line, "( %u , %u )%n", &left, &top, &len) == 2) + { + set_rect(&rect, left, top, left + 1, top + 1); + line += len; + }
- if (runner->last_render_failed) - return; + if (!match_string(line, "rgba", &line)) + fatal_error("Malformed probe arguments '%s'.\n", line);
- ret = sscanf(line, "( %u , %u ) ( %f , %f , %f , %f ) %u", &x, &y, &v.x, &v.y, &v.z, &v.w, &ulps); - if (ret < 6) + ret = sscanf(line, "( %f , %f , %f , %f ) %u", &v.x, &v.y, &v.z, &v.w, &ulps); + if (ret < 4) fatal_error("Malformed probe arguments '%s'.\n", line); - if (ret < 7) + if (ret < 5) ulps = 0;
- rect.left = x; - rect.right = x + 1; - rect.top = y; - rect.bottom = y + 1; runner->ops->probe_vec4(runner, &rect, &v, ulps); } else if (match_string(line, "uniform", &line)) diff --git a/tests/texture-load.shader_test b/tests/texture-load.shader_test index 951ee3ea..81d06305 100644 --- a/tests/texture-load.shader_test +++ b/tests/texture-load.shader_test @@ -16,7 +16,7 @@ float4 main(float4 pos : sv_position) : sv_target
[test] draw quad -probe rgba (0, 0) (0.1, 0.2, 0.3, 0.4) -probe rgba (1, 0) (0.5, 0.7, 0.6, 0.8) -probe rgba (0, 1) (0.6, 0.5, 0.2, 0.1) -probe rgba (1, 1) (0.8, 0.0, 0.7, 1.0) +probe (0, 0) rgba (0.1, 0.2, 0.3, 0.4) +probe (1, 0) rgba (0.5, 0.7, 0.6, 0.8) +probe (0, 1) rgba (0.6, 0.5, 0.2, 0.1) +probe (1, 1) rgba (0.8, 0.0, 0.7, 1.0) diff --git a/tests/trigonometry.shader_test b/tests/trigonometry.shader_test index e9ac3682..b2e87f4a 100644 --- a/tests/trigonometry.shader_test +++ b/tests/trigonometry.shader_test @@ -13,19 +13,19 @@ float4 main(float tex : texcoord) : sv_target
[test] todo draw quad -probe rgba ( 0, 0) ( 0.00000000, 1.00000000, 0.0, 0.0) -probe rgba ( 1, 0) ( 0.84147098, 0.54030231, 0.0, 0.0) 1024 -probe rgba ( 2, 0) ( 0.90929743, -0.41614684, 0.0, 0.0) 1024 -probe rgba ( 3, 0) ( 0.14112001, -0.98999250, 0.0, 0.0) 1024 -probe rgba ( 4, 0) (-0.75680250, -0.65364362, 0.0, 0.0) 1024 -probe rgba ( 5, 0) (-0.95892427, 0.28366219, 0.0, 0.0) 1024 -probe rgba ( 6, 0) (-0.27941550, 0.96017029, 0.0, 0.0) 1024 -probe rgba ( 7, 0) ( 0.65698660, 0.75390225, 0.0, 0.0) 1024 -probe rgba ( 8, 0) ( 0.98935825, -0.14550003, 0.0, 0.0) 1024 -probe rgba ( 9, 0) ( 0.41211849, -0.91113026, 0.0, 0.0) 1024 -probe rgba (10, 0) (-0.54402111, -0.83907153, 0.0, 0.0) 1024 -probe rgba (11, 0) (-0.99999021, 0.00442570, 0.0, 0.0) 2048 -probe rgba (12, 0) (-0.53657292, 0.84385396, 0.0, 0.0) 1024 -probe rgba (13, 0) ( 0.42016704, 0.90744678, 0.0, 0.0) 1024 -probe rgba (14, 0) ( 0.99060736, 0.13673722, 0.0, 0.0) 1024 -probe rgba (15, 0) ( 0.65028784, -0.75968791, 0.0, 0.0) 1024 +probe ( 0, 0) rgba ( 0.00000000, 1.00000000, 0.0, 0.0) +probe ( 1, 0) rgba ( 0.84147098, 0.54030231, 0.0, 0.0) 1024 +probe ( 2, 0) rgba ( 0.90929743, -0.41614684, 0.0, 0.0) 1024 +probe ( 3, 0) rgba ( 0.14112001, -0.98999250, 0.0, 0.0) 1024 +probe ( 4, 0) rgba (-0.75680250, -0.65364362, 0.0, 0.0) 1024 +probe ( 5, 0) rgba (-0.95892427, 0.28366219, 0.0, 0.0) 1024 +probe ( 6, 0) rgba (-0.27941550, 0.96017029, 0.0, 0.0) 1024 +probe ( 7, 0) rgba ( 0.65698660, 0.75390225, 0.0, 0.0) 1024 +probe ( 8, 0) rgba ( 0.98935825, -0.14550003, 0.0, 0.0) 1024 +probe ( 9, 0) rgba ( 0.41211849, -0.91113026, 0.0, 0.0) 1024 +probe (10, 0) rgba (-0.54402111, -0.83907153, 0.0, 0.0) 1024 +probe (11, 0) rgba (-0.99999021, 0.00442570, 0.0, 0.0) 2048 +probe (12, 0) rgba (-0.53657292, 0.84385396, 0.0, 0.0) 1024 +probe (13, 0) rgba ( 0.42016704, 0.90744678, 0.0, 0.0) 1024 +probe (14, 0) rgba ( 0.99060736, 0.13673722, 0.0, 0.0) 1024 +probe (15, 0) rgba ( 0.65028784, -0.75968791, 0.0, 0.0) 1024 diff --git a/tests/utils.h b/tests/utils.h index 530a3cee..563f0b0f 100644 --- a/tests/utils.h +++ b/tests/utils.h @@ -94,4 +94,12 @@ static inline bool compare_vec4(const struct vec4 *v1, const struct vec4 *v2, un && compare_float(v1->w, v2->w, ulps); }
+static inline void set_rect(RECT *rect, int left, int top, int right, int bottom) +{ + rect->left = left; + rect->right = right; + rect->top = top; + rect->bottom = bottom; +} + #endif
Signed-off-by: Giovanni Mascellani gmascellani@codeweavers.com
Il 18/04/22 08:33, Giovanni Mascellani ha scritto:
From: Zebediah Figura zfigura@codeweavers.com
tests/conditional.shader_test | 4 +- tests/d3d12_test_utils.h | 8 ---- tests/hlsl-for.shader_test | 6 +-- tests/shader_runner.c | 69 +++++++++++----------------------- tests/texture-load.shader_test | 8 ++-- tests/trigonometry.shader_test | 32 ++++++++-------- tests/utils.h | 8 ++++ 7 files changed, 54 insertions(+), 81 deletions(-)
diff --git a/tests/conditional.shader_test b/tests/conditional.shader_test index e665ac1d..42307777 100644 --- a/tests/conditional.shader_test +++ b/tests/conditional.shader_test @@ -15,5 +15,5 @@ float4 main(float tex : texcoord) : SV_TARGET
[test] draw quad -probe rect rgba ( 0, 0, 319, 480) (0.9, 0.8, 0.7, 0.6) -probe rect rgba (321, 0, 640, 480) (0.1, 0.2, 0.3, 0.4) +probe ( 0, 0, 319, 480) rgba (0.9, 0.8, 0.7, 0.6) +probe (321, 0, 640, 480) rgba (0.1, 0.2, 0.3, 0.4) diff --git a/tests/d3d12_test_utils.h b/tests/d3d12_test_utils.h index 7186a73e..bb298082 100644 --- a/tests/d3d12_test_utils.h +++ b/tests/d3d12_test_utils.h @@ -52,14 +52,6 @@ static inline void wait_queue_idle_no_event_(unsigned int line, ID3D12Device *de ID3D12Fence_Release(fence); }
-static void set_rect(RECT *rect, int left, int top, int right, int bottom) -{
- rect->left = left;
- rect->right = right;
- rect->top = top;
- rect->bottom = bottom;
-}
- static inline void set_box(D3D12_BOX *box, unsigned int left, unsigned int top, unsigned int front, unsigned int right, unsigned int bottom, unsigned int back) {
diff --git a/tests/hlsl-for.shader_test b/tests/hlsl-for.shader_test index 4e5048c3..e6329834 100644 --- a/tests/hlsl-for.shader_test +++ b/tests/hlsl-for.shader_test @@ -23,6 +23,6 @@ float4 main(float tex : texcoord) : sv_target
[test] todo draw quad -probe rect rgba ( 0, 0, 159, 480) (10.0, 35.0, 0.0, 0.0) -probe rect rgba (161, 0, 479, 480) (10.0, 38.0, 0.0, 0.0) -probe rect rgba (481, 0, 640, 480) ( 5.0, 10.0, 0.0, 0.0) +probe ( 0, 0, 159, 480) rgba (10.0, 35.0, 0.0, 0.0) +probe (161, 0, 479, 480) rgba (10.0, 38.0, 0.0, 0.0) +probe (481, 0, 640, 480) rgba ( 5.0, 10.0, 0.0, 0.0) diff --git a/tests/shader_runner.c b/tests/shader_runner.c index 8804164e..a820810a 100644 --- a/tests/shader_runner.c +++ b/tests/shader_runner.c @@ -409,67 +409,40 @@ static void parse_test_directive(struct shader_runner *runner, const char *line)
runner->last_render_failed = !runner->ops->draw(runner, topology, vertex_count); }
- else if (match_string(line, "probe all rgba", &line))
- {
static const RECT rect = {0, 0, RENDER_TARGET_WIDTH, RENDER_TARGET_HEIGHT};
unsigned int ulps;
struct vec4 v;
int ret;
if (runner->last_render_failed)
return;
ret = sscanf(line, "( %f , %f , %f , %f ) %u", &v.x, &v.y, &v.z, &v.w, &ulps);
if (ret < 4)
fatal_error("Malformed probe arguments '%s'.\n", line);
if (ret < 5)
ulps = 0;
runner->ops->probe_vec4(runner, &rect, &v, ulps);
- }
- else if (match_string(line, "probe rect rgba", &line))
- else if (match_string(line, "probe", &line)) { unsigned int left, top, right, bottom, ulps; struct vec4 v;
int ret, len; RECT rect;
int ret; if (runner->last_render_failed) return;
ret = sscanf(line, "( %d , %d , %d , %d ) ( %f , %f , %f , %f ) %u",
&left, &top, &right, &bottom, &v.x, &v.y, &v.z, &v.w, &ulps);
if (ret < 8)
fatal_error("Malformed probe arguments '%s'.\n", line);
if (ret < 9)
ulps = 0;
rect.left = left;
rect.top = top;
rect.right = right;
rect.bottom = bottom;
runner->ops->probe_vec4(runner, &rect, &v, ulps);
}
else if (match_string(line, "probe rgba", &line))
{
unsigned int x, y, ulps;
struct vec4 v;
RECT rect;
int ret;
if (match_string(line, "all", &line))
{
set_rect(&rect, 0, 0, RENDER_TARGET_WIDTH, RENDER_TARGET_HEIGHT);
}
else if (sscanf(line, "( %d , %d , %d , %d )%n", &left, &top, &right, &bottom, &len) == 4)
{
set_rect(&rect, left, top, right, bottom);
line += len;
}
else if (sscanf(line, "( %u , %u )%n", &left, &top, &len) == 2)
{
set_rect(&rect, left, top, left + 1, top + 1);
line += len;
}
if (runner->last_render_failed)
return;
if (!match_string(line, "rgba", &line))
fatal_error("Malformed probe arguments '%s'.\n", line);
ret = sscanf(line, "( %u , %u ) ( %f , %f , %f , %f ) %u", &x, &y, &v.x, &v.y, &v.z, &v.w, &ulps);
if (ret < 6)
ret = sscanf(line, "( %f , %f , %f , %f ) %u", &v.x, &v.y, &v.z, &v.w, &ulps);
if (ret < 4) fatal_error("Malformed probe arguments '%s'.\n", line);
if (ret < 7)
if (ret < 5) ulps = 0;
rect.left = x;
rect.right = x + 1;
rect.top = y;
rect.bottom = y + 1; runner->ops->probe_vec4(runner, &rect, &v, ulps); } else if (match_string(line, "uniform", &line))
diff --git a/tests/texture-load.shader_test b/tests/texture-load.shader_test index 951ee3ea..81d06305 100644 --- a/tests/texture-load.shader_test +++ b/tests/texture-load.shader_test @@ -16,7 +16,7 @@ float4 main(float4 pos : sv_position) : sv_target
[test] draw quad -probe rgba (0, 0) (0.1, 0.2, 0.3, 0.4) -probe rgba (1, 0) (0.5, 0.7, 0.6, 0.8) -probe rgba (0, 1) (0.6, 0.5, 0.2, 0.1) -probe rgba (1, 1) (0.8, 0.0, 0.7, 1.0) +probe (0, 0) rgba (0.1, 0.2, 0.3, 0.4) +probe (1, 0) rgba (0.5, 0.7, 0.6, 0.8) +probe (0, 1) rgba (0.6, 0.5, 0.2, 0.1) +probe (1, 1) rgba (0.8, 0.0, 0.7, 1.0) diff --git a/tests/trigonometry.shader_test b/tests/trigonometry.shader_test index e9ac3682..b2e87f4a 100644 --- a/tests/trigonometry.shader_test +++ b/tests/trigonometry.shader_test @@ -13,19 +13,19 @@ float4 main(float tex : texcoord) : sv_target
[test] todo draw quad -probe rgba ( 0, 0) ( 0.00000000, 1.00000000, 0.0, 0.0) -probe rgba ( 1, 0) ( 0.84147098, 0.54030231, 0.0, 0.0) 1024 -probe rgba ( 2, 0) ( 0.90929743, -0.41614684, 0.0, 0.0) 1024 -probe rgba ( 3, 0) ( 0.14112001, -0.98999250, 0.0, 0.0) 1024 -probe rgba ( 4, 0) (-0.75680250, -0.65364362, 0.0, 0.0) 1024 -probe rgba ( 5, 0) (-0.95892427, 0.28366219, 0.0, 0.0) 1024 -probe rgba ( 6, 0) (-0.27941550, 0.96017029, 0.0, 0.0) 1024 -probe rgba ( 7, 0) ( 0.65698660, 0.75390225, 0.0, 0.0) 1024 -probe rgba ( 8, 0) ( 0.98935825, -0.14550003, 0.0, 0.0) 1024 -probe rgba ( 9, 0) ( 0.41211849, -0.91113026, 0.0, 0.0) 1024 -probe rgba (10, 0) (-0.54402111, -0.83907153, 0.0, 0.0) 1024 -probe rgba (11, 0) (-0.99999021, 0.00442570, 0.0, 0.0) 2048 -probe rgba (12, 0) (-0.53657292, 0.84385396, 0.0, 0.0) 1024 -probe rgba (13, 0) ( 0.42016704, 0.90744678, 0.0, 0.0) 1024 -probe rgba (14, 0) ( 0.99060736, 0.13673722, 0.0, 0.0) 1024 -probe rgba (15, 0) ( 0.65028784, -0.75968791, 0.0, 0.0) 1024 +probe ( 0, 0) rgba ( 0.00000000, 1.00000000, 0.0, 0.0) +probe ( 1, 0) rgba ( 0.84147098, 0.54030231, 0.0, 0.0) 1024 +probe ( 2, 0) rgba ( 0.90929743, -0.41614684, 0.0, 0.0) 1024 +probe ( 3, 0) rgba ( 0.14112001, -0.98999250, 0.0, 0.0) 1024 +probe ( 4, 0) rgba (-0.75680250, -0.65364362, 0.0, 0.0) 1024 +probe ( 5, 0) rgba (-0.95892427, 0.28366219, 0.0, 0.0) 1024 +probe ( 6, 0) rgba (-0.27941550, 0.96017029, 0.0, 0.0) 1024 +probe ( 7, 0) rgba ( 0.65698660, 0.75390225, 0.0, 0.0) 1024 +probe ( 8, 0) rgba ( 0.98935825, -0.14550003, 0.0, 0.0) 1024 +probe ( 9, 0) rgba ( 0.41211849, -0.91113026, 0.0, 0.0) 1024 +probe (10, 0) rgba (-0.54402111, -0.83907153, 0.0, 0.0) 1024 +probe (11, 0) rgba (-0.99999021, 0.00442570, 0.0, 0.0) 2048 +probe (12, 0) rgba (-0.53657292, 0.84385396, 0.0, 0.0) 1024 +probe (13, 0) rgba ( 0.42016704, 0.90744678, 0.0, 0.0) 1024 +probe (14, 0) rgba ( 0.99060736, 0.13673722, 0.0, 0.0) 1024 +probe (15, 0) rgba ( 0.65028784, -0.75968791, 0.0, 0.0) 1024 diff --git a/tests/utils.h b/tests/utils.h index 530a3cee..563f0b0f 100644 --- a/tests/utils.h +++ b/tests/utils.h @@ -94,4 +94,12 @@ static inline bool compare_vec4(const struct vec4 *v1, const struct vec4 *v2, un && compare_float(v1->w, v2->w, ulps); }
+static inline void set_rect(RECT *rect, int left, int top, int right, int bottom) +{
- rect->left = left;
- rect->right = right;
- rect->top = top;
- rect->bottom = bottom;
+}
- #endif
Signed-off-by: Matteo Bruni mbruni@codeweavers.com
On Mon, Apr 18, 2022 at 8:34 AM Giovanni Mascellani gmascellani@codeweavers.com wrote:
From: Zebediah Figura zfigura@codeweavers.com
tests/conditional.shader_test | 4 +- tests/d3d12_test_utils.h | 8 ---- tests/hlsl-for.shader_test | 6 +-- tests/shader_runner.c | 69 +++++++++++----------------------- tests/texture-load.shader_test | 8 ++-- tests/trigonometry.shader_test | 32 ++++++++-------- tests/utils.h | 8 ++++ 7 files changed, 54 insertions(+), 81 deletions(-)
diff --git a/tests/conditional.shader_test b/tests/conditional.shader_test index e665ac1d..42307777 100644 --- a/tests/conditional.shader_test +++ b/tests/conditional.shader_test @@ -15,5 +15,5 @@ float4 main(float tex : texcoord) : SV_TARGET
[test] draw quad -probe rect rgba ( 0, 0, 319, 480) (0.9, 0.8, 0.7, 0.6) -probe rect rgba (321, 0, 640, 480) (0.1, 0.2, 0.3, 0.4) +probe ( 0, 0, 319, 480) rgba (0.9, 0.8, 0.7, 0.6) +probe (321, 0, 640, 480) rgba (0.1, 0.2, 0.3, 0.4)
Nothing about the patch itself, just something that I noticed: these "explicit" rect coordinates are a bit awkward since we switched to RENDER_TARGET_WIDTH / RENDER_TARGET_HEIGHT in the shader runner. OTOH specifying e.g. one-pixel offsets with normalized coords isn't particularly nice either.
On 4/20/22 08:54, Matteo Bruni wrote:
On Mon, Apr 18, 2022 at 8:34 AM Giovanni Mascellani gmascellani@codeweavers.com wrote:
From: Zebediah Figura zfigura@codeweavers.com
tests/conditional.shader_test | 4 +- tests/d3d12_test_utils.h | 8 ---- tests/hlsl-for.shader_test | 6 +-- tests/shader_runner.c | 69 +++++++++++----------------------- tests/texture-load.shader_test | 8 ++-- tests/trigonometry.shader_test | 32 ++++++++-------- tests/utils.h | 8 ++++ 7 files changed, 54 insertions(+), 81 deletions(-)
diff --git a/tests/conditional.shader_test b/tests/conditional.shader_test index e665ac1d..42307777 100644 --- a/tests/conditional.shader_test +++ b/tests/conditional.shader_test @@ -15,5 +15,5 @@ float4 main(float tex : texcoord) : SV_TARGET
[test] draw quad -probe rect rgba ( 0, 0, 319, 480) (0.9, 0.8, 0.7, 0.6) -probe rect rgba (321, 0, 640, 480) (0.1, 0.2, 0.3, 0.4) +probe ( 0, 0, 319, 480) rgba (0.9, 0.8, 0.7, 0.6) +probe (321, 0, 640, 480) rgba (0.1, 0.2, 0.3, 0.4)
Nothing about the patch itself, just something that I noticed: these "explicit" rect coordinates are a bit awkward since we switched to RENDER_TARGET_WIDTH / RENDER_TARGET_HEIGHT in the shader runner. OTOH specifying e.g. one-pixel offsets with normalized coords isn't particularly nice either.
I don't see it as particularly necessary, although I don't object to it either. Part of the point of RENDER_TARGET_WIDTH was to ensure that if we in the future wanted to make the RT size customizable, we wouldn't miss anything while changing the code to account for that.
If we do make the coörds normalized, we could probably just subtract a pixel or two from the edges after un-normalizing them.
Signed-off-by: Henri Verbeet hverbeet@codeweavers.com
Signed-off-by: Zebediah Figura zfigura@codeweavers.com
From: Zebediah Figura zfigura@codeweavers.com
--- libs/vkd3d/command.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/libs/vkd3d/command.c b/libs/vkd3d/command.c index 09171fe4..481553d4 100644 --- a/libs/vkd3d/command.c +++ b/libs/vkd3d/command.c @@ -4657,8 +4657,9 @@ static void d3d12_command_list_set_root_constants(struct d3d12_command_list *lis const struct d3d12_root_constant *c;
c = root_signature_get_32bit_constants(root_signature, index); - VK_CALL(vkCmdPushConstants(list->vk_command_buffer, root_signature->vk_pipeline_layout, - c->stage_flags, c->offset + offset * sizeof(uint32_t), count * sizeof(uint32_t), data)); + if (count) + VK_CALL(vkCmdPushConstants(list->vk_command_buffer, root_signature->vk_pipeline_layout, + c->stage_flags, c->offset + offset * sizeof(uint32_t), count * sizeof(uint32_t), data)); }
static void STDMETHODCALLTYPE d3d12_command_list_SetComputeRoot32BitConstant(ID3D12GraphicsCommandList2 *iface,
Signed-off-by: Giovanni Mascellani gmascellani@codeweavers.com
Il 18/04/22 08:33, Giovanni Mascellani ha scritto:
From: Zebediah Figura zfigura@codeweavers.com
libs/vkd3d/command.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/libs/vkd3d/command.c b/libs/vkd3d/command.c index 09171fe4..481553d4 100644 --- a/libs/vkd3d/command.c +++ b/libs/vkd3d/command.c @@ -4657,8 +4657,9 @@ static void d3d12_command_list_set_root_constants(struct d3d12_command_list *lis const struct d3d12_root_constant *c;
c = root_signature_get_32bit_constants(root_signature, index);
- VK_CALL(vkCmdPushConstants(list->vk_command_buffer, root_signature->vk_pipeline_layout,
c->stage_flags, c->offset + offset * sizeof(uint32_t), count * sizeof(uint32_t), data));
if (count)
VK_CALL(vkCmdPushConstants(list->vk_command_buffer, root_signature->vk_pipeline_layout,
c->stage_flags, c->offset + offset * sizeof(uint32_t), count * sizeof(uint32_t), data));
}
static void STDMETHODCALLTYPE d3d12_command_list_SetComputeRoot32BitConstant(ID3D12GraphicsCommandList2 *iface,
Signed-off-by: Matteo Bruni mbruni@codeweavers.com
On Mon, 18 Apr 2022 at 08:34, Giovanni Mascellani gmascellani@codeweavers.com wrote:
@@ -4657,8 +4657,9 @@ static void d3d12_command_list_set_root_constants(struct d3d12_command_list *lis const struct d3d12_root_constant *c;
c = root_signature_get_32bit_constants(root_signature, index);
- VK_CALL(vkCmdPushConstants(list->vk_command_buffer, root_signature->vk_pipeline_layout,
c->stage_flags, c->offset + offset * sizeof(uint32_t), count * sizeof(uint32_t), data));
- if (count)
VK_CALL(vkCmdPushConstants(list->vk_command_buffer, root_signature->vk_pipeline_layout,
c->stage_flags, c->offset + offset * sizeof(uint32_t), count * sizeof(uint32_t), data));
}
Is calling SetGraphicsRoot32BitConstants()/SetComputeRoot32BitConstants() valid d3d12? If not, we should be fixing the callers instead.
On 4/20/22 10:40, Henri Verbeet wrote:
On Mon, 18 Apr 2022 at 08:34, Giovanni Mascellani gmascellani@codeweavers.com wrote:
@@ -4657,8 +4657,9 @@ static void d3d12_command_list_set_root_constants(struct d3d12_command_list *lis const struct d3d12_root_constant *c;
c = root_signature_get_32bit_constants(root_signature, index);
- VK_CALL(vkCmdPushConstants(list->vk_command_buffer, root_signature->vk_pipeline_layout,
c->stage_flags, c->offset + offset * sizeof(uint32_t), count * sizeof(uint32_t), data));
- if (count)
VK_CALL(vkCmdPushConstants(list->vk_command_buffer, root_signature->vk_pipeline_layout,
}c->stage_flags, c->offset + offset * sizeof(uint32_t), count * sizeof(uint32_t), data));
Is calling SetGraphicsRoot32BitConstants()/SetComputeRoot32BitConstants() valid d3d12? If not, we should be fixing the callers instead.
Right, I believe I received the same feedback when submitting this patch before, and never found the time to properly look into it, which is why it has remained in my tree without being resubmitted.
Looking into it now, I don't see any indication that a zero count is invalid, either from the "specification" [1] or in the function documentation themselves [2] [3], and in practice it's worked on (AMD) Windows when I've tried it. I haven't tried with the debug layer, though, and I don't know of any other things I could check.
[1] https://microsoft.github.io/DirectX-Specs/d3d/ResourceBinding.html#setting-c...
[2] https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12grap...
[3] https://docs.microsoft.com/en-us/windows/win32/api/d3d12/nf-d3d12-id3d12grap...
On Wed, 20 Apr 2022 at 19:50, Zebediah Figura zfigura@codeweavers.com wrote:
On 4/20/22 10:40, Henri Verbeet wrote:
Is calling SetGraphicsRoot32BitConstants()/SetComputeRoot32BitConstants() valid d3d12? If not, we should be fixing the callers instead.
Right, I believe I received the same feedback when submitting this patch before, and never found the time to properly look into it, which is why it has remained in my tree without being resubmitted.
Looking into it now, I don't see any indication that a zero count is invalid, either from the "specification" [1] or in the function documentation themselves [2] [3], and in practice it's worked on (AMD) Windows when I've tried it. I haven't tried with the debug layer, though, and I don't know of any other things I could check.
The debug layer might be interesting to check, but I guess that's good enough.
On 4/21/22 11:30, Henri Verbeet wrote:
On Wed, 20 Apr 2022 at 19:50, Zebediah Figura zfigura@codeweavers.com wrote:
On 4/20/22 10:40, Henri Verbeet wrote:
Is calling SetGraphicsRoot32BitConstants()/SetComputeRoot32BitConstants() valid d3d12? If not, we should be fixing the callers instead.
Right, I believe I received the same feedback when submitting this patch before, and never found the time to properly look into it, which is why it has remained in my tree without being resubmitted.
Looking into it now, I don't see any indication that a zero count is invalid, either from the "specification" [1] or in the function documentation themselves [2] [3], and in practice it's worked on (AMD) Windows when I've tried it. I haven't tried with the debug layer, though, and I don't know of any other things I could check.
The debug layer might be interesting to check, but I guess that's good enough.
I was able to install the debug layer, and it turns out it actually does complain, sort of:
"ID3D12CommandList::SetGraphicsRoot32BitConstants: The currently set root signature declares parameter [0] with type 32bit constants, with Num32BitValues 0, so it is invalid to set DestOffsetIn32BitValues of 0."
It doesn't complain about setting a count of zero, though, just about offset >= size.
Given that it seems to work in practice, I'm not sure whether it makes sense to have this patch or not.
Signed-off-by: Henri Verbeet hverbeet@codeweavers.com
From: Zebediah Figura zfigura@codeweavers.com
--- The original patch also change add_load(), but I dropped that part because I need add_load()'s return value for "Support vector indexing". --- libs/vkd3d-shader/hlsl.y | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-)
diff --git a/libs/vkd3d-shader/hlsl.y b/libs/vkd3d-shader/hlsl.y index 58d71ff6..fd808648 100644 --- a/libs/vkd3d-shader/hlsl.y +++ b/libs/vkd3d-shader/hlsl.y @@ -569,19 +569,19 @@ static struct hlsl_ir_load *add_load(struct hlsl_ctx *ctx, struct list *instrs, return load; }
-static struct hlsl_ir_load *add_record_load(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *record, +static bool add_record_load(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *record, const struct hlsl_struct_field *field, const struct vkd3d_shader_location loc) { struct hlsl_ir_constant *c;
if (!(c = hlsl_new_uint_constant(ctx, field->reg_offset, &loc))) - return NULL; + return false; list_add_tail(instrs, &c->node.entry);
- return add_load(ctx, instrs, record, &c->node, field->type, loc); + return !!add_load(ctx, instrs, record, &c->node, field->type, loc); }
-static struct hlsl_ir_load *add_array_load(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *array, +static bool add_array_load(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *array, struct hlsl_ir_node *index, const struct vkd3d_shader_location loc) { const struct hlsl_type *expr_type = array->data_type; @@ -597,7 +597,7 @@ static struct hlsl_ir_load *add_array_load(struct hlsl_ctx *ctx, struct list *in { /* This needs to be lowered now, while we still have type information. */ FIXME("Index of matrix or vector type.\n"); - return NULL; + return false; } else { @@ -605,18 +605,18 @@ static struct hlsl_ir_load *add_array_load(struct hlsl_ctx *ctx, struct list *in hlsl_error(ctx, &loc, VKD3D_SHADER_ERROR_HLSL_INVALID_INDEX, "Scalar expressions cannot be array-indexed."); else hlsl_error(ctx, &loc, VKD3D_SHADER_ERROR_HLSL_INVALID_INDEX, "Expression cannot be array-indexed."); - return NULL; + return false; }
if (!(c = hlsl_new_uint_constant(ctx, hlsl_type_get_array_element_reg_size(data_type), &loc))) - return NULL; + return false; list_add_tail(instrs, &c->node.entry); if (!(mul = hlsl_new_binary_expr(ctx, HLSL_OP2_MUL, index, &c->node))) - return NULL; + return false; list_add_tail(instrs, &mul->entry); index = mul;
- return add_load(ctx, instrs, array, index, data_type, loc); + return !!add_load(ctx, instrs, array, index, data_type, loc); }
static struct hlsl_struct_field *get_struct_field(struct list *fields, const char *name)
Signed-off-by: Giovanni Mascellani gmascellani@codeweavers.com
Il 18/04/22 08:34, Giovanni Mascellani ha scritto:
From: Zebediah Figura zfigura@codeweavers.com
The original patch also change add_load(), but I dropped that part because I need add_load()'s return value for "Support vector indexing".
libs/vkd3d-shader/hlsl.y | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-)
diff --git a/libs/vkd3d-shader/hlsl.y b/libs/vkd3d-shader/hlsl.y index 58d71ff6..fd808648 100644 --- a/libs/vkd3d-shader/hlsl.y +++ b/libs/vkd3d-shader/hlsl.y @@ -569,19 +569,19 @@ static struct hlsl_ir_load *add_load(struct hlsl_ctx *ctx, struct list *instrs, return load; }
-static struct hlsl_ir_load *add_record_load(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *record, +static bool add_record_load(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *record, const struct hlsl_struct_field *field, const struct vkd3d_shader_location loc) { struct hlsl_ir_constant *c;
if (!(c = hlsl_new_uint_constant(ctx, field->reg_offset, &loc)))
return NULL;
return false; list_add_tail(instrs, &c->node.entry);
- return add_load(ctx, instrs, record, &c->node, field->type, loc);
- return !!add_load(ctx, instrs, record, &c->node, field->type, loc); }
-static struct hlsl_ir_load *add_array_load(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *array, +static bool add_array_load(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *array, struct hlsl_ir_node *index, const struct vkd3d_shader_location loc) { const struct hlsl_type *expr_type = array->data_type; @@ -597,7 +597,7 @@ static struct hlsl_ir_load *add_array_load(struct hlsl_ctx *ctx, struct list *in { /* This needs to be lowered now, while we still have type information. */ FIXME("Index of matrix or vector type.\n");
return NULL;
return false; } else {
@@ -605,18 +605,18 @@ static struct hlsl_ir_load *add_array_load(struct hlsl_ctx *ctx, struct list *in hlsl_error(ctx, &loc, VKD3D_SHADER_ERROR_HLSL_INVALID_INDEX, "Scalar expressions cannot be array-indexed."); else hlsl_error(ctx, &loc, VKD3D_SHADER_ERROR_HLSL_INVALID_INDEX, "Expression cannot be array-indexed.");
return NULL;
return false; } if (!(c = hlsl_new_uint_constant(ctx, hlsl_type_get_array_element_reg_size(data_type), &loc)))
return NULL;
return false; list_add_tail(instrs, &c->node.entry); if (!(mul = hlsl_new_binary_expr(ctx, HLSL_OP2_MUL, index, &c->node)))
return NULL;
return false; list_add_tail(instrs, &mul->entry); index = mul;
- return add_load(ctx, instrs, array, index, data_type, loc);
return !!add_load(ctx, instrs, array, index, data_type, loc); }
static struct hlsl_struct_field *get_struct_field(struct list *fields, const char *name)
Signed-off-by: Francisco Casas fcasas@codeweavers.com
April 18, 2022 2:35 AM, "Giovanni Mascellani" gmascellani@codeweavers.com wrote:
From: Zebediah Figura zfigura@codeweavers.com
The original patch also change add_load(), but I dropped that part because I need add_load()'s return value for "Support vector indexing".
libs/vkd3d-shader/hlsl.y | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-)
diff --git a/libs/vkd3d-shader/hlsl.y b/libs/vkd3d-shader/hlsl.y index 58d71ff6..fd808648 100644 --- a/libs/vkd3d-shader/hlsl.y +++ b/libs/vkd3d-shader/hlsl.y @@ -569,19 +569,19 @@ static struct hlsl_ir_load *add_load(struct hlsl_ctx *ctx, struct list *instrs, return load; }
-static struct hlsl_ir_load *add_record_load(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *record, +static bool add_record_load(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *record, const struct hlsl_struct_field *field, const struct vkd3d_shader_location loc) { struct hlsl_ir_constant *c;
if (!(c = hlsl_new_uint_constant(ctx, field->reg_offset, &loc)))
- return NULL;
- return false;
list_add_tail(instrs, &c->node.entry);
- return add_load(ctx, instrs, record, &c->node, field->type, loc);
- return !!add_load(ctx, instrs, record, &c->node, field->type, loc);
}
-static struct hlsl_ir_load *add_array_load(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *array, +static bool add_array_load(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *array, struct hlsl_ir_node *index, const struct vkd3d_shader_location loc) { const struct hlsl_type *expr_type = array->data_type; @@ -597,7 +597,7 @@ static struct hlsl_ir_load *add_array_load(struct hlsl_ctx *ctx, struct list *in { /* This needs to be lowered now, while we still have type information. */ FIXME("Index of matrix or vector type.\n");
- return NULL;
- return false;
} else { @@ -605,18 +605,18 @@ static struct hlsl_ir_load *add_array_load(struct hlsl_ctx *ctx, struct list *in hlsl_error(ctx, &loc, VKD3D_SHADER_ERROR_HLSL_INVALID_INDEX, "Scalar expressions cannot be array-indexed."); else hlsl_error(ctx, &loc, VKD3D_SHADER_ERROR_HLSL_INVALID_INDEX, "Expression cannot be array-indexed.");
- return NULL;
- return false;
}
if (!(c = hlsl_new_uint_constant(ctx, hlsl_type_get_array_element_reg_size(data_type), &loc)))
- return NULL;
- return false;
list_add_tail(instrs, &c->node.entry); if (!(mul = hlsl_new_binary_expr(ctx, HLSL_OP2_MUL, index, &c->node)))
- return NULL;
- return false;
list_add_tail(instrs, &mul->entry); index = mul;
- return add_load(ctx, instrs, array, index, data_type, loc);
- return !!add_load(ctx, instrs, array, index, data_type, loc);
}
static struct hlsl_struct_field *get_struct_field(struct list *fields, const char *name)
2.35.2
Signed-off-by: Matteo Bruni mbruni@codeweavers.com
Signed-off-by: Henri Verbeet hverbeet@codeweavers.com
On 4/18/22 01:34, Giovanni Mascellani wrote:
From: Zebediah Figura zfigura@codeweavers.com
The original patch also change add_load(), but I dropped that part because I need add_load()'s return value for "Support vector indexing".
If we're not going to change add_load(), I'm not sure there's much point in changing these ones either. My main goal was consistency.
I actually hadn't intended to submit this patch yet, since I wasn't sure it was an improvement, and since it was a pretty low priority anyway.
Alternatively we could change add_load() to return bool, and rely on node_from_list() to retrieve the last expression.
Signed-off-by: Giovanni Mascellani gmascellani@codeweavers.com --- libs/vkd3d-shader/hlsl.y | 14 ++++++++++---- tests/hlsl-vector-indexing.shader_test | 4 ++-- 2 files changed, 12 insertions(+), 6 deletions(-)
diff --git a/libs/vkd3d-shader/hlsl.y b/libs/vkd3d-shader/hlsl.y index fd808648..291f8392 100644 --- a/libs/vkd3d-shader/hlsl.y +++ b/libs/vkd3d-shader/hlsl.y @@ -592,13 +592,21 @@ static bool add_array_load(struct hlsl_ctx *ctx, struct list *instrs, struct hls if (expr_type->type == HLSL_CLASS_ARRAY) { data_type = expr_type->e.array.type; + if (!(c = hlsl_new_uint_constant(ctx, hlsl_type_get_array_element_reg_size(data_type), &loc))) + return false; } - else if (expr_type->type == HLSL_CLASS_MATRIX || expr_type->type == HLSL_CLASS_VECTOR) + else if (expr_type->type == HLSL_CLASS_MATRIX) { /* This needs to be lowered now, while we still have type information. */ - FIXME("Index of matrix or vector type.\n"); + FIXME("Index of matrix type.\n"); return false; } + else if (expr_type->type == HLSL_CLASS_VECTOR) + { + data_type = hlsl_get_scalar_type(ctx, expr_type->base_type); + if (!(c = hlsl_new_uint_constant(ctx, 1, &loc))) + return false; + } else { if (expr_type->type == HLSL_CLASS_SCALAR) @@ -608,8 +616,6 @@ static bool add_array_load(struct hlsl_ctx *ctx, struct list *instrs, struct hls return false; }
- if (!(c = hlsl_new_uint_constant(ctx, hlsl_type_get_array_element_reg_size(data_type), &loc))) - return false; list_add_tail(instrs, &c->node.entry); if (!(mul = hlsl_new_binary_expr(ctx, HLSL_OP2_MUL, index, &c->node))) return false; diff --git a/tests/hlsl-vector-indexing.shader_test b/tests/hlsl-vector-indexing.shader_test index 1c964147..f2d20c44 100644 --- a/tests/hlsl-vector-indexing.shader_test +++ b/tests/hlsl-vector-indexing.shader_test @@ -10,7 +10,7 @@ float4 main() : SV_TARGET }
[test] -todo draw quad +draw quad probe all rgba (0.02, 0.245, 0.351, 1.0)
[pixel shader] @@ -23,5 +23,5 @@ float4 main() : SV_TARGET
[test] uniform 0 float4 1.0 2.0 3.0 4.0 -todo draw quad +draw quad probe all rgba (1.0, 2.0, 2.0, 3.0)
Signed-off-by: Francisco Casas fcasas@codeweavers.com
April 18, 2022 2:34 AM, "Giovanni Mascellani" gmascellani@codeweavers.com wrote:
Signed-off-by: Giovanni Mascellani gmascellani@codeweavers.com
libs/vkd3d-shader/hlsl.y | 14 ++++++++++---- tests/hlsl-vector-indexing.shader_test | 4 ++-- 2 files changed, 12 insertions(+), 6 deletions(-)
diff --git a/libs/vkd3d-shader/hlsl.y b/libs/vkd3d-shader/hlsl.y index fd808648..291f8392 100644 --- a/libs/vkd3d-shader/hlsl.y +++ b/libs/vkd3d-shader/hlsl.y @@ -592,13 +592,21 @@ static bool add_array_load(struct hlsl_ctx *ctx, struct list *instrs, struct hls if (expr_type->type == HLSL_CLASS_ARRAY) { data_type = expr_type->e.array.type;
- if (!(c = hlsl_new_uint_constant(ctx, hlsl_type_get_array_element_reg_size(data_type), &loc)))
- return false;
}
- else if (expr_type->type == HLSL_CLASS_MATRIX || expr_type->type == HLSL_CLASS_VECTOR)
- else if (expr_type->type == HLSL_CLASS_MATRIX)
{ /* This needs to be lowered now, while we still have type information. */
- FIXME("Index of matrix or vector type.\n");
- FIXME("Index of matrix type.\n");
return false; }
- else if (expr_type->type == HLSL_CLASS_VECTOR)
- {
- data_type = hlsl_get_scalar_type(ctx, expr_type->base_type);
- if (!(c = hlsl_new_uint_constant(ctx, 1, &loc)))
- return false;
- }
else { if (expr_type->type == HLSL_CLASS_SCALAR) @@ -608,8 +616,6 @@ static bool add_array_load(struct hlsl_ctx *ctx, struct list *instrs, struct hls return false; }
- if (!(c = hlsl_new_uint_constant(ctx, hlsl_type_get_array_element_reg_size(data_type), &loc)))
- return false;
list_add_tail(instrs, &c->node.entry); if (!(mul = hlsl_new_binary_expr(ctx, HLSL_OP2_MUL, index, &c->node))) return false; diff --git a/tests/hlsl-vector-indexing.shader_test b/tests/hlsl-vector-indexing.shader_test index 1c964147..f2d20c44 100644 --- a/tests/hlsl-vector-indexing.shader_test +++ b/tests/hlsl-vector-indexing.shader_test @@ -10,7 +10,7 @@ float4 main() : SV_TARGET }
[test] -todo draw quad +draw quad probe all rgba (0.02, 0.245, 0.351, 1.0)
[pixel shader] @@ -23,5 +23,5 @@ float4 main() : SV_TARGET
[test] uniform 0 float4 1.0 2.0 3.0 4.0 -todo draw quad +draw quad probe all rgba (1.0, 2.0, 2.0, 3.0) -- 2.35.2
Signed-off-by: Matteo Bruni mbruni@codeweavers.com
Signed-off-by: Henri Verbeet hverbeet@codeweavers.com
On 4/18/22 01:34, Giovanni Mascellani wrote:
Signed-off-by: Giovanni Mascellani gmascellani@codeweavers.com
libs/vkd3d-shader/hlsl.y | 14 ++++++++++---- tests/hlsl-vector-indexing.shader_test | 4 ++-- 2 files changed, 12 insertions(+), 6 deletions(-)
diff --git a/libs/vkd3d-shader/hlsl.y b/libs/vkd3d-shader/hlsl.y index fd808648..291f8392 100644 --- a/libs/vkd3d-shader/hlsl.y +++ b/libs/vkd3d-shader/hlsl.y @@ -592,13 +592,21 @@ static bool add_array_load(struct hlsl_ctx *ctx, struct list *instrs, struct hls if (expr_type->type == HLSL_CLASS_ARRAY) { data_type = expr_type->e.array.type;
if (!(c = hlsl_new_uint_constant(ctx, hlsl_type_get_array_element_reg_size(data_type), &loc)))
return false; }
- else if (expr_type->type == HLSL_CLASS_MATRIX || expr_type->type == HLSL_CLASS_VECTOR)
- else if (expr_type->type == HLSL_CLASS_MATRIX) { /* This needs to be lowered now, while we still have type information. */
FIXME("Index of matrix or vector type.\n");
FIXME("Index of matrix type.\n"); return false; }
- else if (expr_type->type == HLSL_CLASS_VECTOR)
- {
data_type = hlsl_get_scalar_type(ctx, expr_type->base_type);
if (!(c = hlsl_new_uint_constant(ctx, 1, &loc)))
return false;
Why not just bring the constant + mul into the ARRAY path?
- } else { if (expr_type->type == HLSL_CLASS_SCALAR)
On Wed, Apr 20, 2022 at 7:57 PM Zebediah Figura zfigura@codeweavers.com wrote:
On 4/18/22 01:34, Giovanni Mascellani wrote:
Signed-off-by: Giovanni Mascellani gmascellani@codeweavers.com
libs/vkd3d-shader/hlsl.y | 14 ++++++++++---- tests/hlsl-vector-indexing.shader_test | 4 ++-- 2 files changed, 12 insertions(+), 6 deletions(-)
diff --git a/libs/vkd3d-shader/hlsl.y b/libs/vkd3d-shader/hlsl.y index fd808648..291f8392 100644 --- a/libs/vkd3d-shader/hlsl.y +++ b/libs/vkd3d-shader/hlsl.y @@ -592,13 +592,21 @@ static bool add_array_load(struct hlsl_ctx *ctx, struct list *instrs, struct hls if (expr_type->type == HLSL_CLASS_ARRAY) { data_type = expr_type->e.array.type;
if (!(c = hlsl_new_uint_constant(ctx, hlsl_type_get_array_element_reg_size(data_type), &loc)))
return false; }
- else if (expr_type->type == HLSL_CLASS_MATRIX || expr_type->type == HLSL_CLASS_VECTOR)
- else if (expr_type->type == HLSL_CLASS_MATRIX) { /* This needs to be lowered now, while we still have type information. */
FIXME("Index of matrix or vector type.\n");
FIXME("Index of matrix type.\n"); return false; }
- else if (expr_type->type == HLSL_CLASS_VECTOR)
- {
data_type = hlsl_get_scalar_type(ctx, expr_type->base_type);
if (!(c = hlsl_new_uint_constant(ctx, 1, &loc)))
return false;
Why not just bring the constant + mul into the ARRAY path?
I had basically the same comment (I was thinking about moving the c = hlsl_new_uint_constant() thing below into common code instead) but then I went "eh, whatever".
It would still be nice to follow up on that at some point though.
Signed-off-by: Giovanni Mascellani gmascellani@codeweavers.com --- libs/vkd3d-shader/hlsl_codegen.c | 52 ++++++++++++++++++++++++++ tests/hlsl-matrix-indexing.shader_test | 17 +++++++++ 2 files changed, 69 insertions(+)
diff --git a/libs/vkd3d-shader/hlsl_codegen.c b/libs/vkd3d-shader/hlsl_codegen.c index 72c00430..73e3b73f 100644 --- a/libs/vkd3d-shader/hlsl_codegen.c +++ b/libs/vkd3d-shader/hlsl_codegen.c @@ -672,6 +672,57 @@ static bool split_struct_copies(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr return true; }
+static unsigned int minor_size(const struct hlsl_type *type) +{ + if (type->type == HLSL_CLASS_VECTOR || type->modifiers & HLSL_MODIFIER_ROW_MAJOR) + return type->dimx; + else + return type->dimy; +} + +static unsigned int major_size(const struct hlsl_type *type) +{ + if (type->type == HLSL_CLASS_VECTOR || type->modifiers & HLSL_MODIFIER_ROW_MAJOR) + return type->dimy; + else + return type->dimx; +} + +static bool split_matrix_copies(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context) +{ + const struct hlsl_ir_node *rhs; + struct hlsl_type *element_type; + const struct hlsl_type *type; + unsigned int i; + struct hlsl_ir_store *store; + + if (instr->type != HLSL_IR_STORE) + return false; + + store = hlsl_ir_store(instr); + rhs = store->rhs.node; + type = rhs->data_type; + if (type->type != HLSL_CLASS_MATRIX) + return false; + element_type = hlsl_get_vector_type(ctx, type->base_type, minor_size(type)); + + if (rhs->type != HLSL_IR_LOAD) + { + hlsl_fixme(ctx, &instr->loc, "Copying from unsupported node type.\n"); + return false; + } + + for (i = 0; i < major_size(type); ++i) + { + if (!split_copy(ctx, store, hlsl_ir_load(rhs), 4 * i, element_type)) + return false; + } + + list_remove(&store->node.entry); + hlsl_free_instr(&store->node); + return true; +} + static bool lower_narrowing_casts(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context) { const struct hlsl_type *src_type, *dst_type; @@ -1685,6 +1736,7 @@ int hlsl_emit_bytecode(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *entry progress |= transform_ir(ctx, split_struct_copies, body, NULL); } while (progress); + transform_ir(ctx, split_matrix_copies, body, NULL); transform_ir(ctx, lower_narrowing_casts, body, NULL); transform_ir(ctx, lower_casts_to_bool, body, NULL); do diff --git a/tests/hlsl-matrix-indexing.shader_test b/tests/hlsl-matrix-indexing.shader_test index 1d444ffa..e2103fe2 100644 --- a/tests/hlsl-matrix-indexing.shader_test +++ b/tests/hlsl-matrix-indexing.shader_test @@ -45,3 +45,20 @@ uniform 8 float4 9.0 10.0 11.0 12.0 uniform 12 float4 13.0 14.0 15.0 16.0 todo draw quad probe all rgba (1.0, 5.0, 7.0, 12.0) + +[pixel shader] +uniform float4x4 m; + +float4 main() : SV_TARGET +{ + float4x4 m2 = m; + return float4(m2[0][0], m2[1][0], m2[1][2], m2[2][3]); +} + +[test] +uniform 0 float4 1.0 2.0 3.0 4.0 +uniform 4 float4 5.0 6.0 7.0 8.0 +uniform 8 float4 9.0 10.0 11.0 12.0 +uniform 12 float4 13.0 14.0 15.0 16.0 +todo draw quad +probe all rgba (1.0, 2.0, 10.0, 15.0)
Signed-off-by: Francisco Casas fcasas@codeweavers.com
April 18, 2022 2:35 AM, "Giovanni Mascellani" gmascellani@codeweavers.com wrote:
Signed-off-by: Giovanni Mascellani gmascellani@codeweavers.com
libs/vkd3d-shader/hlsl_codegen.c | 52 ++++++++++++++++++++++++++ tests/hlsl-matrix-indexing.shader_test | 17 +++++++++ 2 files changed, 69 insertions(+)
diff --git a/libs/vkd3d-shader/hlsl_codegen.c b/libs/vkd3d-shader/hlsl_codegen.c index 72c00430..73e3b73f 100644 --- a/libs/vkd3d-shader/hlsl_codegen.c +++ b/libs/vkd3d-shader/hlsl_codegen.c @@ -672,6 +672,57 @@ static bool split_struct_copies(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr return true; }
+static unsigned int minor_size(const struct hlsl_type *type) +{
- if (type->type == HLSL_CLASS_VECTOR || type->modifiers & HLSL_MODIFIER_ROW_MAJOR)
- return type->dimx;
- else
- return type->dimy;
+}
+static unsigned int major_size(const struct hlsl_type *type) +{
- if (type->type == HLSL_CLASS_VECTOR || type->modifiers & HLSL_MODIFIER_ROW_MAJOR)
- return type->dimy;
- else
- return type->dimx;
+}
+static bool split_matrix_copies(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context) +{
- const struct hlsl_ir_node *rhs;
- struct hlsl_type *element_type;
- const struct hlsl_type *type;
- unsigned int i;
- struct hlsl_ir_store *store;
- if (instr->type != HLSL_IR_STORE)
- return false;
- store = hlsl_ir_store(instr);
- rhs = store->rhs.node;
- type = rhs->data_type;
- if (type->type != HLSL_CLASS_MATRIX)
- return false;
- element_type = hlsl_get_vector_type(ctx, type->base_type, minor_size(type));
- if (rhs->type != HLSL_IR_LOAD)
- {
- hlsl_fixme(ctx, &instr->loc, "Copying from unsupported node type.\n");
- return false;
- }
- for (i = 0; i < major_size(type); ++i)
- {
- if (!split_copy(ctx, store, hlsl_ir_load(rhs), 4 * i, element_type))
- return false;
- }
- list_remove(&store->node.entry);
- hlsl_free_instr(&store->node);
- return true;
+}
static bool lower_narrowing_casts(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context) { const struct hlsl_type *src_type, *dst_type; @@ -1685,6 +1736,7 @@ int hlsl_emit_bytecode(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *entry progress |= transform_ir(ctx, split_struct_copies, body, NULL); } while (progress);
- transform_ir(ctx, split_matrix_copies, body, NULL);
transform_ir(ctx, lower_narrowing_casts, body, NULL); transform_ir(ctx, lower_casts_to_bool, body, NULL); do diff --git a/tests/hlsl-matrix-indexing.shader_test b/tests/hlsl-matrix-indexing.shader_test index 1d444ffa..e2103fe2 100644 --- a/tests/hlsl-matrix-indexing.shader_test +++ b/tests/hlsl-matrix-indexing.shader_test @@ -45,3 +45,20 @@ uniform 8 float4 9.0 10.0 11.0 12.0 uniform 12 float4 13.0 14.0 15.0 16.0 todo draw quad probe all rgba (1.0, 5.0, 7.0, 12.0)
+[pixel shader] +uniform float4x4 m;
+float4 main() : SV_TARGET +{
- float4x4 m2 = m;
- return float4(m2[0][0], m2[1][0], m2[1][2], m2[2][3]);
+}
+[test] +uniform 0 float4 1.0 2.0 3.0 4.0 +uniform 4 float4 5.0 6.0 7.0 8.0 +uniform 8 float4 9.0 10.0 11.0 12.0 +uniform 12 float4 13.0 14.0 15.0 16.0 +todo draw quad
+probe all rgba (1.0, 2.0, 10.0, 15.0)
2.35.2
Signed-off-by: Matteo Bruni mbruni@codeweavers.com
On Mon, Apr 18, 2022 at 8:34 AM Giovanni Mascellani gmascellani@codeweavers.com wrote:
Signed-off-by: Giovanni Mascellani gmascellani@codeweavers.com
libs/vkd3d-shader/hlsl_codegen.c | 52 ++++++++++++++++++++++++++ tests/hlsl-matrix-indexing.shader_test | 17 +++++++++ 2 files changed, 69 insertions(+)
diff --git a/libs/vkd3d-shader/hlsl_codegen.c b/libs/vkd3d-shader/hlsl_codegen.c index 72c00430..73e3b73f 100644 --- a/libs/vkd3d-shader/hlsl_codegen.c +++ b/libs/vkd3d-shader/hlsl_codegen.c @@ -672,6 +672,57 @@ static bool split_struct_copies(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr return true; }
+static unsigned int minor_size(const struct hlsl_type *type) +{
- if (type->type == HLSL_CLASS_VECTOR || type->modifiers & HLSL_MODIFIER_ROW_MAJOR)
return type->dimx;
- else
return type->dimy;
+}
+static unsigned int major_size(const struct hlsl_type *type) +{
- if (type->type == HLSL_CLASS_VECTOR || type->modifiers & HLSL_MODIFIER_ROW_MAJOR)
return type->dimy;
- else
return type->dimx;
+}
+static bool split_matrix_copies(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context) +{
- const struct hlsl_ir_node *rhs;
- struct hlsl_type *element_type;
- const struct hlsl_type *type;
- unsigned int i;
- struct hlsl_ir_store *store;
- if (instr->type != HLSL_IR_STORE)
return false;
- store = hlsl_ir_store(instr);
- rhs = store->rhs.node;
- type = rhs->data_type;
- if (type->type != HLSL_CLASS_MATRIX)
return false;
- element_type = hlsl_get_vector_type(ctx, type->base_type, minor_size(type));
- if (rhs->type != HLSL_IR_LOAD)
- {
hlsl_fixme(ctx, &instr->loc, "Copying from unsupported node type.\n");
return false;
- }
- for (i = 0; i < major_size(type); ++i)
- {
if (!split_copy(ctx, store, hlsl_ir_load(rhs), 4 * i, element_type))
return false;
- }
I was a bit confused by the major_size() / minor_size() naming, together with the coordinate and dimension flipping that we always do with matrices, but this seems to be correct.
It might be just a "me" thing but maybe names like vector_size() and vector_count() would be potentially more obvious?
We should probably update the comment above split_copy() to include split_matrix_copies(). Or maybe replace the first sentence altogether with "Copy an element of a complex variable." or something.
- list_remove(&store->node.entry);
- hlsl_free_instr(&store->node);
- return true;
+}
static bool lower_narrowing_casts(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context) { const struct hlsl_type *src_type, *dst_type; @@ -1685,6 +1736,7 @@ int hlsl_emit_bytecode(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *entry progress |= transform_ir(ctx, split_struct_copies, body, NULL); } while (progress);
- transform_ir(ctx, split_matrix_copies, body, NULL); transform_ir(ctx, lower_narrowing_casts, body, NULL); transform_ir(ctx, lower_casts_to_bool, body, NULL); do
diff --git a/tests/hlsl-matrix-indexing.shader_test b/tests/hlsl-matrix-indexing.shader_test index 1d444ffa..e2103fe2 100644 --- a/tests/hlsl-matrix-indexing.shader_test +++ b/tests/hlsl-matrix-indexing.shader_test @@ -45,3 +45,20 @@ uniform 8 float4 9.0 10.0 11.0 12.0 uniform 12 float4 13.0 14.0 15.0 16.0 todo draw quad probe all rgba (1.0, 5.0, 7.0, 12.0)
+[pixel shader] +uniform float4x4 m;
+float4 main() : SV_TARGET +{
- float4x4 m2 = m;
- return float4(m2[0][0], m2[1][0], m2[1][2], m2[2][3]);
+}
+[test] +uniform 0 float4 1.0 2.0 3.0 4.0 +uniform 4 float4 5.0 6.0 7.0 8.0 +uniform 8 float4 9.0 10.0 11.0 12.0 +uniform 12 float4 13.0 14.0 15.0 16.0 +todo draw quad +probe all rgba (1.0, 2.0, 10.0, 15.0)
Adding a test with a non-square matrix might be a good idea, if anything to give a bit more reassurance that we're using the right dimensions.
Hi,
Il 20/04/22 16:02, Matteo Bruni ha scritto:
I was a bit confused by the major_size() / minor_size() naming, together with the coordinate and dimension flipping that we always do with matrices, but this seems to be correct.
It might be just a "me" thing but maybe names like vector_size() and vector_count() would be potentially more obvious?
While I also always need to think which one is the "major" or the "minor" size, I don't find your proposals easier to understand. The words "size" and "count" have nearly the same meaning, so it's still not really clear which one applies to the vector of vectors as a whole, and which one to each single inner vector. As you say, it's probably a very personal thing.
All in all, I still think that "major size" and "minor size" is a better choice, because at least they're more or less standard terminology. So while with both proposals you still need to think which is which, with mine at least you're immediately on the track to understand what is the point you need to think about, while I don't feel that "vector size" and "vector count" really bring you in that direction.
We should probably update the comment above split_copy() to include split_matrix_copies(). Or maybe replace the first sentence altogether with "Copy an element of a complex variable." or something.
Ok, will do in a follow up.
Adding a test with a non-square matrix might be a good idea, if anything to give a bit more reassurance that we're using the right dimensions.
Ok, makes sense.
Giovanni.
On Thu, Apr 21, 2022 at 10:13 AM Giovanni Mascellani gmascellani@codeweavers.com wrote:
Hi,
Il 20/04/22 16:02, Matteo Bruni ha scritto:
I was a bit confused by the major_size() / minor_size() naming, together with the coordinate and dimension flipping that we always do with matrices, but this seems to be correct.
It might be just a "me" thing but maybe names like vector_size() and vector_count() would be potentially more obvious?
While I also always need to think which one is the "major" or the "minor" size, I don't find your proposals easier to understand. The words "size" and "count" have nearly the same meaning, so it's still not really clear which one applies to the vector of vectors as a whole, and which one to each single inner vector. As you say, it's probably a very personal thing.
All in all, I still think that "major size" and "minor size" is a better choice, because at least they're more or less standard terminology. So while with both proposals you still need to think which is which, with mine at least you're immediately on the track to understand what is the point you need to think about, while I don't feel that "vector size" and "vector count" really bring you in that direction.
Sure, that's fine. But, FWIW, I don't think there is any ambiguity between vector_size and vector_count.
Signed-off-by: Henri Verbeet hverbeet@codeweavers.com
On 4/18/22 01:34, Giovanni Mascellani wrote:
Signed-off-by: Giovanni Mascellani gmascellani@codeweavers.com
libs/vkd3d-shader/hlsl_codegen.c | 52 ++++++++++++++++++++++++++ tests/hlsl-matrix-indexing.shader_test | 17 +++++++++ 2 files changed, 69 insertions(+)
diff --git a/libs/vkd3d-shader/hlsl_codegen.c b/libs/vkd3d-shader/hlsl_codegen.c index 72c00430..73e3b73f 100644 --- a/libs/vkd3d-shader/hlsl_codegen.c +++ b/libs/vkd3d-shader/hlsl_codegen.c @@ -672,6 +672,57 @@ static bool split_struct_copies(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr return true; }
+static unsigned int minor_size(const struct hlsl_type *type) +{
- if (type->type == HLSL_CLASS_VECTOR || type->modifiers & HLSL_MODIFIER_ROW_MAJOR)
return type->dimx;
- else
return type->dimy;
+}
+static unsigned int major_size(const struct hlsl_type *type) +{
- if (type->type == HLSL_CLASS_VECTOR || type->modifiers & HLSL_MODIFIER_ROW_MAJOR)
return type->dimy;
- else
return type->dimx;
+}
Why handle HLSL_CLASS_VECTOR in these?
+static bool split_matrix_copies(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context) +{
- const struct hlsl_ir_node *rhs;
- struct hlsl_type *element_type;
- const struct hlsl_type *type;
- unsigned int i;
- struct hlsl_ir_store *store;
- if (instr->type != HLSL_IR_STORE)
return false;
- store = hlsl_ir_store(instr);
- rhs = store->rhs.node;
- type = rhs->data_type;
- if (type->type != HLSL_CLASS_MATRIX)
return false;
- element_type = hlsl_get_vector_type(ctx, type->base_type, minor_size(type));
- if (rhs->type != HLSL_IR_LOAD)
- {
hlsl_fixme(ctx, &instr->loc, "Copying from unsupported node type.\n");
return false;
- }
- for (i = 0; i < major_size(type); ++i)
- {
if (!split_copy(ctx, store, hlsl_ir_load(rhs), 4 * i, element_type))
return false;
- }
- list_remove(&store->node.entry);
- hlsl_free_instr(&store->node);
- return true;
+}
On Wed, Apr 20, 2022 at 8:09 PM Zebediah Figura zfigura@codeweavers.com wrote:
On 4/18/22 01:34, Giovanni Mascellani wrote:
Signed-off-by: Giovanni Mascellani gmascellani@codeweavers.com
libs/vkd3d-shader/hlsl_codegen.c | 52 ++++++++++++++++++++++++++ tests/hlsl-matrix-indexing.shader_test | 17 +++++++++ 2 files changed, 69 insertions(+)
diff --git a/libs/vkd3d-shader/hlsl_codegen.c b/libs/vkd3d-shader/hlsl_codegen.c index 72c00430..73e3b73f 100644 --- a/libs/vkd3d-shader/hlsl_codegen.c +++ b/libs/vkd3d-shader/hlsl_codegen.c @@ -672,6 +672,57 @@ static bool split_struct_copies(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr return true; }
+static unsigned int minor_size(const struct hlsl_type *type) +{
- if (type->type == HLSL_CLASS_VECTOR || type->modifiers & HLSL_MODIFIER_ROW_MAJOR)
return type->dimx;
- else
return type->dimy;
+}
+static unsigned int major_size(const struct hlsl_type *type) +{
- if (type->type == HLSL_CLASS_VECTOR || type->modifiers & HLSL_MODIFIER_ROW_MAJOR)
return type->dimy;
- else
return type->dimx;
+}
Why handle HLSL_CLASS_VECTOR in these?
Speaking of which, let me attach my local changes when I reviewed this patch...
On Wed, Apr 20, 2022 at 11:00 PM Matteo Bruni matteo.mystral@gmail.com wrote:
On Wed, Apr 20, 2022 at 8:09 PM Zebediah Figura zfigura@codeweavers.com wrote:
On 4/18/22 01:34, Giovanni Mascellani wrote:
Signed-off-by: Giovanni Mascellani gmascellani@codeweavers.com
libs/vkd3d-shader/hlsl_codegen.c | 52 ++++++++++++++++++++++++++ tests/hlsl-matrix-indexing.shader_test | 17 +++++++++ 2 files changed, 69 insertions(+)
diff --git a/libs/vkd3d-shader/hlsl_codegen.c b/libs/vkd3d-shader/hlsl_codegen.c index 72c00430..73e3b73f 100644 --- a/libs/vkd3d-shader/hlsl_codegen.c +++ b/libs/vkd3d-shader/hlsl_codegen.c @@ -672,6 +672,57 @@ static bool split_struct_copies(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr return true; }
+static unsigned int minor_size(const struct hlsl_type *type) +{
- if (type->type == HLSL_CLASS_VECTOR || type->modifiers & HLSL_MODIFIER_ROW_MAJOR)
return type->dimx;
- else
return type->dimy;
+}
+static unsigned int major_size(const struct hlsl_type *type) +{
- if (type->type == HLSL_CLASS_VECTOR || type->modifiers & HLSL_MODIFIER_ROW_MAJOR)
return type->dimy;
- else
return type->dimx;
+}
Why handle HLSL_CLASS_VECTOR in these?
Speaking of which, let me attach my local changes when I reviewed this patch...
Except I missed a later fixup to replace HLSL_MODIFIER_ROW_MAJOR with HLSL_MODIFIER_COLUMN_MAJOR in those conditions (which is related to my confusion WRT coordinate flipping), oh well...
Hi,
Il 20/04/22 20:08, Zebediah Figura ha scritto:
Why handle HLSL_CLASS_VECTOR in these?
Good question. I am pretty sure I had a good reason at some point, because I remember rewriting that a few times in order to find the right abstraction, but even in my queued patches minor_size() and major_size() are never called on vector types, so that could be removed now. I'll fix it in a follow up patch.
Thanks, Giovanni.
Signed-off-by: Giovanni Mascellani gmascellani@codeweavers.com --- libs/vkd3d-shader/hlsl.y | 87 +++++++++++++++++++++++++- tests/hlsl-matrix-indexing.shader_test | 8 +-- 2 files changed, 88 insertions(+), 7 deletions(-)
diff --git a/libs/vkd3d-shader/hlsl.y b/libs/vkd3d-shader/hlsl.y index 291f8392..b6a8e496 100644 --- a/libs/vkd3d-shader/hlsl.y +++ b/libs/vkd3d-shader/hlsl.y @@ -581,6 +581,89 @@ static bool add_record_load(struct hlsl_ctx *ctx, struct list *instrs, struct hl return !!add_load(ctx, instrs, record, &c->node, field->type, loc); }
+static struct hlsl_ir_expr *add_binary_arithmetic_expr(struct hlsl_ctx *ctx, struct list *instrs, + enum hlsl_ir_expr_op op, struct hlsl_ir_node *arg1, struct hlsl_ir_node *arg2, + const struct vkd3d_shader_location *loc); + +static struct hlsl_ir_node *compute_matrix_offset(struct hlsl_ctx *ctx, struct list *instrs, + struct hlsl_type *type, struct hlsl_ir_node *x, struct hlsl_ir_node *y, + const struct vkd3d_shader_location *loc) +{ + struct hlsl_ir_node *major, *minor; + struct hlsl_ir_expr *mul, *add; + struct hlsl_ir_constant *four; + + if (type->modifiers & HLSL_MODIFIER_ROW_MAJOR) + { + minor = x; + major = y; + } + else + { + minor = y; + major = x; + } + + if (!(four = hlsl_new_uint_constant(ctx, 4, loc))) + return NULL; + list_add_tail(instrs, &four->node.entry); + + if (!(mul = add_binary_arithmetic_expr(ctx, instrs, HLSL_OP2_MUL, &four->node, major, loc))) + return NULL; + + if (!(add = add_binary_arithmetic_expr(ctx, instrs, HLSL_OP2_ADD, &mul->node, minor, loc))) + return NULL; + + return &add->node; +} + +static bool add_matrix_load(struct hlsl_ctx *ctx, struct list *instrs, + struct hlsl_ir_node *matrix, struct hlsl_ir_node *index, const struct vkd3d_shader_location *loc) +{ + struct hlsl_type *mat_type = matrix->data_type, *ret_type, *scal_type; + static unsigned int counter = 0; + struct hlsl_ir_load *load; + struct hlsl_ir_var *var; + unsigned int i; + char name[32]; + + ret_type = hlsl_get_vector_type(ctx, mat_type->base_type, mat_type->dimx); + scal_type = hlsl_get_scalar_type(ctx, mat_type->base_type); + + sprintf(name, "<index-%x>", counter++); + var = hlsl_new_synthetic_var(ctx, name, ret_type, *loc); + if (!var) + return false; + + for (i = 0; i < mat_type->dimx; ++i) + { + struct hlsl_ir_node *offset; + struct hlsl_ir_store *store; + struct hlsl_ir_load *value; + struct hlsl_ir_constant *c; + + if (!(c = hlsl_new_uint_constant(ctx, i, loc))) + return false; + list_add_tail(instrs, &c->node.entry); + + if (!(offset = compute_matrix_offset(ctx, instrs, mat_type, &c->node, index, loc))) + return false; + + if (!(value = add_load(ctx, instrs, matrix, offset, scal_type, *loc))) + return false; + + if (!(store = hlsl_new_store(ctx, var, &c->node, &value->node, 0, *loc))) + return false; + list_add_tail(instrs, &store->node.entry); + } + + if (!(load = hlsl_new_load(ctx, var, NULL, ret_type, *loc))) + return false; + list_add_tail(instrs, &load->node.entry); + + return true; +} + static bool add_array_load(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *array, struct hlsl_ir_node *index, const struct vkd3d_shader_location loc) { @@ -597,9 +680,7 @@ static bool add_array_load(struct hlsl_ctx *ctx, struct list *instrs, struct hls } else if (expr_type->type == HLSL_CLASS_MATRIX) { - /* This needs to be lowered now, while we still have type information. */ - FIXME("Index of matrix type.\n"); - return false; + return add_matrix_load(ctx, instrs, array, index, &loc); } else if (expr_type->type == HLSL_CLASS_VECTOR) { diff --git a/tests/hlsl-matrix-indexing.shader_test b/tests/hlsl-matrix-indexing.shader_test index e2103fe2..3f4b878f 100644 --- a/tests/hlsl-matrix-indexing.shader_test +++ b/tests/hlsl-matrix-indexing.shader_test @@ -11,7 +11,7 @@ uniform 0 float4 1.0 2.0 3.0 4.0 uniform 4 float4 5.0 6.0 7.0 8.0 uniform 8 float4 9.0 10.0 11.0 12.0 uniform 12 float4 13.0 14.0 15.0 16.0 -todo draw quad +draw quad probe all rgba (1.0, 2.0, 10.0, 15.0)
[pixel shader] @@ -27,7 +27,7 @@ uniform 0 float4 1.0 2.0 3.0 4.0 uniform 4 float4 5.0 6.0 7.0 8.0 uniform 8 float4 9.0 10.0 11.0 12.0 uniform 12 float4 13.0 14.0 15.0 16.0 -todo draw quad +draw quad probe all rgba (1.0, 2.0, 10.0, 15.0)
[pixel shader] @@ -43,7 +43,7 @@ uniform 0 float4 1.0 2.0 3.0 4.0 uniform 4 float4 5.0 6.0 7.0 8.0 uniform 8 float4 9.0 10.0 11.0 12.0 uniform 12 float4 13.0 14.0 15.0 16.0 -todo draw quad +draw quad probe all rgba (1.0, 5.0, 7.0, 12.0)
[pixel shader] @@ -60,5 +60,5 @@ uniform 0 float4 1.0 2.0 3.0 4.0 uniform 4 float4 5.0 6.0 7.0 8.0 uniform 8 float4 9.0 10.0 11.0 12.0 uniform 12 float4 13.0 14.0 15.0 16.0 -todo draw quad +draw quad probe all rgba (1.0, 2.0, 10.0, 15.0)
Signed-off-by: Francisco Casas fcasas@codeweavers.com
April 18, 2022 2:34 AM, "Giovanni Mascellani" gmascellani@codeweavers.com wrote:
Signed-off-by: Giovanni Mascellani gmascellani@codeweavers.com
libs/vkd3d-shader/hlsl.y | 87 +++++++++++++++++++++++++- tests/hlsl-matrix-indexing.shader_test | 8 +-- 2 files changed, 88 insertions(+), 7 deletions(-)
diff --git a/libs/vkd3d-shader/hlsl.y b/libs/vkd3d-shader/hlsl.y index 291f8392..b6a8e496 100644 --- a/libs/vkd3d-shader/hlsl.y +++ b/libs/vkd3d-shader/hlsl.y @@ -581,6 +581,89 @@ static bool add_record_load(struct hlsl_ctx *ctx, struct list *instrs, struct hl return !!add_load(ctx, instrs, record, &c->node, field->type, loc); }
+static struct hlsl_ir_expr *add_binary_arithmetic_expr(struct hlsl_ctx *ctx, struct list *instrs,
- enum hlsl_ir_expr_op op, struct hlsl_ir_node *arg1, struct hlsl_ir_node *arg2,
- const struct vkd3d_shader_location *loc);
+static struct hlsl_ir_node *compute_matrix_offset(struct hlsl_ctx *ctx, struct list *instrs,
- struct hlsl_type *type, struct hlsl_ir_node *x, struct hlsl_ir_node *y,
- const struct vkd3d_shader_location *loc)
+{
- struct hlsl_ir_node *major, *minor;
- struct hlsl_ir_expr *mul, *add;
- struct hlsl_ir_constant *four;
- if (type->modifiers & HLSL_MODIFIER_ROW_MAJOR)
- {
- minor = x;
- major = y;
- }
- else
- {
- minor = y;
- major = x;
- }
- if (!(four = hlsl_new_uint_constant(ctx, 4, loc)))
- return NULL;
- list_add_tail(instrs, &four->node.entry);
- if (!(mul = add_binary_arithmetic_expr(ctx, instrs, HLSL_OP2_MUL, &four->node, major, loc)))
- return NULL;
- if (!(add = add_binary_arithmetic_expr(ctx, instrs, HLSL_OP2_ADD, &mul->node, minor, loc)))
- return NULL;
- return &add->node;
+}
+static bool add_matrix_load(struct hlsl_ctx *ctx, struct list *instrs,
- struct hlsl_ir_node *matrix, struct hlsl_ir_node *index, const struct vkd3d_shader_location *loc)
+{
- struct hlsl_type *mat_type = matrix->data_type, *ret_type, *scal_type;
- static unsigned int counter = 0;
- struct hlsl_ir_load *load;
- struct hlsl_ir_var *var;
- unsigned int i;
- char name[32];
- ret_type = hlsl_get_vector_type(ctx, mat_type->base_type, mat_type->dimx);
- scal_type = hlsl_get_scalar_type(ctx, mat_type->base_type);
- sprintf(name, "<index-%x>", counter++);
- var = hlsl_new_synthetic_var(ctx, name, ret_type, *loc);
- if (!var)
- return false;
- for (i = 0; i < mat_type->dimx; ++i)
- {
- struct hlsl_ir_node *offset;
- struct hlsl_ir_store *store;
- struct hlsl_ir_load *value;
- struct hlsl_ir_constant *c;
- if (!(c = hlsl_new_uint_constant(ctx, i, loc)))
- return false;
- list_add_tail(instrs, &c->node.entry);
- if (!(offset = compute_matrix_offset(ctx, instrs, mat_type, &c->node, index, loc)))
- return false;
- if (!(value = add_load(ctx, instrs, matrix, offset, scal_type, *loc)))
- return false;
- if (!(store = hlsl_new_store(ctx, var, &c->node, &value->node, 0, *loc)))
- return false;
- list_add_tail(instrs, &store->node.entry);
- }
- if (!(load = hlsl_new_load(ctx, var, NULL, ret_type, *loc)))
- return false;
- list_add_tail(instrs, &load->node.entry);
- return true;
+}
static bool add_array_load(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *array, struct hlsl_ir_node *index, const struct vkd3d_shader_location loc) { @@ -597,9 +680,7 @@ static bool add_array_load(struct hlsl_ctx *ctx, struct list *instrs, struct hls } else if (expr_type->type == HLSL_CLASS_MATRIX) {
- /* This needs to be lowered now, while we still have type information. */
- FIXME("Index of matrix type.\n");
- return false;
- return add_matrix_load(ctx, instrs, array, index, &loc);
} else if (expr_type->type == HLSL_CLASS_VECTOR) { diff --git a/tests/hlsl-matrix-indexing.shader_test b/tests/hlsl-matrix-indexing.shader_test index e2103fe2..3f4b878f 100644 --- a/tests/hlsl-matrix-indexing.shader_test +++ b/tests/hlsl-matrix-indexing.shader_test @@ -11,7 +11,7 @@ uniform 0 float4 1.0 2.0 3.0 4.0 uniform 4 float4 5.0 6.0 7.0 8.0 uniform 8 float4 9.0 10.0 11.0 12.0 uniform 12 float4 13.0 14.0 15.0 16.0 -todo draw quad +draw quad probe all rgba (1.0, 2.0, 10.0, 15.0)
[pixel shader] @@ -27,7 +27,7 @@ uniform 0 float4 1.0 2.0 3.0 4.0 uniform 4 float4 5.0 6.0 7.0 8.0 uniform 8 float4 9.0 10.0 11.0 12.0 uniform 12 float4 13.0 14.0 15.0 16.0 -todo draw quad +draw quad probe all rgba (1.0, 2.0, 10.0, 15.0)
[pixel shader] @@ -43,7 +43,7 @@ uniform 0 float4 1.0 2.0 3.0 4.0 uniform 4 float4 5.0 6.0 7.0 8.0 uniform 8 float4 9.0 10.0 11.0 12.0 uniform 12 float4 13.0 14.0 15.0 16.0 -todo draw quad +draw quad probe all rgba (1.0, 5.0, 7.0, 12.0)
[pixel shader] @@ -60,5 +60,5 @@ uniform 0 float4 1.0 2.0 3.0 4.0 uniform 4 float4 5.0 6.0 7.0 8.0 uniform 8 float4 9.0 10.0 11.0 12.0 uniform 12 float4 13.0 14.0 15.0 16.0 -todo draw quad +draw quad probe all rgba (1.0, 2.0, 10.0, 15.0) -- 2.35.2
Signed-off-by: Matteo Bruni mbruni@codeweavers.com
On Mon, Apr 18, 2022 at 8:34 AM Giovanni Mascellani gmascellani@codeweavers.com wrote:
Signed-off-by: Giovanni Mascellani gmascellani@codeweavers.com
libs/vkd3d-shader/hlsl.y | 87 +++++++++++++++++++++++++- tests/hlsl-matrix-indexing.shader_test | 8 +-- 2 files changed, 88 insertions(+), 7 deletions(-)
diff --git a/libs/vkd3d-shader/hlsl.y b/libs/vkd3d-shader/hlsl.y index 291f8392..b6a8e496 100644 --- a/libs/vkd3d-shader/hlsl.y +++ b/libs/vkd3d-shader/hlsl.y @@ -581,6 +581,89 @@ static bool add_record_load(struct hlsl_ctx *ctx, struct list *instrs, struct hl return !!add_load(ctx, instrs, record, &c->node, field->type, loc); }
+static struct hlsl_ir_expr *add_binary_arithmetic_expr(struct hlsl_ctx *ctx, struct list *instrs,
enum hlsl_ir_expr_op op, struct hlsl_ir_node *arg1, struct hlsl_ir_node *arg2,
const struct vkd3d_shader_location *loc);
+static struct hlsl_ir_node *compute_matrix_offset(struct hlsl_ctx *ctx, struct list *instrs,
struct hlsl_type *type, struct hlsl_ir_node *x, struct hlsl_ir_node *y,
const struct vkd3d_shader_location *loc)
+{
- struct hlsl_ir_node *major, *minor;
- struct hlsl_ir_expr *mul, *add;
- struct hlsl_ir_constant *four;
- if (type->modifiers & HLSL_MODIFIER_ROW_MAJOR)
- {
minor = x;
major = y;
- }
- else
- {
minor = y;
major = x;
- }
- if (!(four = hlsl_new_uint_constant(ctx, 4, loc)))
return NULL;
- list_add_tail(instrs, &four->node.entry);
- if (!(mul = add_binary_arithmetic_expr(ctx, instrs, HLSL_OP2_MUL, &four->node, major, loc)))
return NULL;
- if (!(add = add_binary_arithmetic_expr(ctx, instrs, HLSL_OP2_ADD, &mul->node, minor, loc)))
return NULL;
- return &add->node;
+}
+static bool add_matrix_load(struct hlsl_ctx *ctx, struct list *instrs,
struct hlsl_ir_node *matrix, struct hlsl_ir_node *index, const struct vkd3d_shader_location *loc)
+{
- struct hlsl_type *mat_type = matrix->data_type, *ret_type, *scal_type;
- static unsigned int counter = 0;
- struct hlsl_ir_load *load;
- struct hlsl_ir_var *var;
- unsigned int i;
- char name[32];
- ret_type = hlsl_get_vector_type(ctx, mat_type->base_type, mat_type->dimx);
- scal_type = hlsl_get_scalar_type(ctx, mat_type->base_type);
Nitpick, might as well name it scalar_type.
- sprintf(name, "<index-%x>", counter++);
I know that char name[32] is certainly going to be enough here, but I'd prefer if we always used vkd3d_string_buffer for this kind of temporary strings, going forward.
On Wed, 20 Apr 2022 at 15:57, Matteo Bruni matteo.mystral@gmail.com wrote:
I know that char name[32] is certainly going to be enough here, but I'd prefer if we always used vkd3d_string_buffer for this kind of temporary strings, going forward.
I think we should, yes.
Hi,
Il 20/04/22 15:57, Matteo Bruni ha scritto:
- ret_type = hlsl_get_vector_type(ctx, mat_type->base_type, mat_type->dimx);
- scal_type = hlsl_get_scalar_type(ctx, mat_type->base_type);
Nitpick, might as well name it scalar_type.
Ok, if you're sending another patch set feel free to change it. Otherwise I'll address that in a later patch.
- sprintf(name, "<index-%x>", counter++);
I know that char name[32] is certainly going to be enough here, but I'd prefer if we always used vkd3d_string_buffer for this kind of temporary strings, going forward.
Same here.
Giovanni.
On 4/18/22 01:34, Giovanni Mascellani wrote:
Signed-off-by: Giovanni Mascellani gmascellani@codeweavers.com
libs/vkd3d-shader/hlsl.y | 87 +++++++++++++++++++++++++- tests/hlsl-matrix-indexing.shader_test | 8 +-- 2 files changed, 88 insertions(+), 7 deletions(-)
diff --git a/libs/vkd3d-shader/hlsl.y b/libs/vkd3d-shader/hlsl.y index 291f8392..b6a8e496 100644 --- a/libs/vkd3d-shader/hlsl.y +++ b/libs/vkd3d-shader/hlsl.y @@ -581,6 +581,89 @@ static bool add_record_load(struct hlsl_ctx *ctx, struct list *instrs, struct hl return !!add_load(ctx, instrs, record, &c->node, field->type, loc); }
+static struct hlsl_ir_expr *add_binary_arithmetic_expr(struct hlsl_ctx *ctx, struct list *instrs,
enum hlsl_ir_expr_op op, struct hlsl_ir_node *arg1, struct hlsl_ir_node *arg2,
const struct vkd3d_shader_location *loc);
+static struct hlsl_ir_node *compute_matrix_offset(struct hlsl_ctx *ctx, struct list *instrs,
struct hlsl_type *type, struct hlsl_ir_node *x, struct hlsl_ir_node *y,
const struct vkd3d_shader_location *loc)
+{
- struct hlsl_ir_node *major, *minor;
- struct hlsl_ir_expr *mul, *add;
- struct hlsl_ir_constant *four;
- if (type->modifiers & HLSL_MODIFIER_ROW_MAJOR)
- {
minor = x;
major = y;
- }
- else
- {
minor = y;
major = x;
- }
- if (!(four = hlsl_new_uint_constant(ctx, 4, loc)))
return NULL;
- list_add_tail(instrs, &four->node.entry);
- if (!(mul = add_binary_arithmetic_expr(ctx, instrs, HLSL_OP2_MUL, &four->node, major, loc)))
return NULL;
- if (!(add = add_binary_arithmetic_expr(ctx, instrs, HLSL_OP2_ADD, &mul->node, minor, loc)))
return NULL;
Why do we need this, and not just hlsl_new_binary_expr()? We already cast the index to a scalar uint.
In general I think we want to avoid using add_*() helpers if possible, where not triggered directly by user code.
- return &add->node;
+}
+static bool add_matrix_load(struct hlsl_ctx *ctx, struct list *instrs,
struct hlsl_ir_node *matrix, struct hlsl_ir_node *index, const struct vkd3d_shader_location *loc)
How about add_matrix_index() instead?
+{
- struct hlsl_type *mat_type = matrix->data_type, *ret_type, *scal_type;
- static unsigned int counter = 0;
- struct hlsl_ir_load *load;
- struct hlsl_ir_var *var;
- unsigned int i;
- char name[32];
- ret_type = hlsl_get_vector_type(ctx, mat_type->base_type, mat_type->dimx);
- scal_type = hlsl_get_scalar_type(ctx, mat_type->base_type);
- sprintf(name, "<index-%x>", counter++);
Please always use # for hexadecimal formats (or, in this case, %u may be better). I see the constructor path doesn't, but we should change that.
- var = hlsl_new_synthetic_var(ctx, name, ret_type, *loc);
- if (!var)
return false;
- for (i = 0; i < mat_type->dimx; ++i)
- {
struct hlsl_ir_node *offset;
struct hlsl_ir_store *store;
struct hlsl_ir_load *value;
struct hlsl_ir_constant *c;
if (!(c = hlsl_new_uint_constant(ctx, i, loc)))
return false;
list_add_tail(instrs, &c->node.entry);
if (!(offset = compute_matrix_offset(ctx, instrs, mat_type, &c->node, index, loc)))
return false;
The signature of this function feels awkward to me. Could we fold the subsequent add_load() call into it, and call it something like add_matrix_scalar_load() instead? Note also that it'd be useful for matrix swizzles in that case, since I think we want to stop generating HLSL_IR_SWIZZLE instructions for those.
if (!(value = add_load(ctx, instrs, matrix, offset, scal_type, *loc)))
return false;
if (!(store = hlsl_new_store(ctx, var, &c->node, &value->node, 0, *loc)))
return false;
list_add_tail(instrs, &store->node.entry);
- }
- if (!(load = hlsl_new_load(ctx, var, NULL, ret_type, *loc)))
hlsl_new_var_load(ctx, var, *loc)
return false;
- list_add_tail(instrs, &load->node.entry);
- return true;
+}
- static bool add_array_load(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *array, struct hlsl_ir_node *index, const struct vkd3d_shader_location loc) {
On Wed, Apr 20, 2022 at 8:28 PM Zebediah Figura zfigura@codeweavers.com wrote:
On 4/18/22 01:34, Giovanni Mascellani wrote:
Signed-off-by: Giovanni Mascellani gmascellani@codeweavers.com
libs/vkd3d-shader/hlsl.y | 87 +++++++++++++++++++++++++- tests/hlsl-matrix-indexing.shader_test | 8 +-- 2 files changed, 88 insertions(+), 7 deletions(-)
diff --git a/libs/vkd3d-shader/hlsl.y b/libs/vkd3d-shader/hlsl.y index 291f8392..b6a8e496 100644 --- a/libs/vkd3d-shader/hlsl.y +++ b/libs/vkd3d-shader/hlsl.y
- var = hlsl_new_synthetic_var(ctx, name, ret_type, *loc);
- if (!var)
return false;
- for (i = 0; i < mat_type->dimx; ++i)
- {
struct hlsl_ir_node *offset;
struct hlsl_ir_store *store;
struct hlsl_ir_load *value;
struct hlsl_ir_constant *c;
if (!(c = hlsl_new_uint_constant(ctx, i, loc)))
return false;
list_add_tail(instrs, &c->node.entry);
if (!(offset = compute_matrix_offset(ctx, instrs, mat_type, &c->node, index, loc)))
return false;
The signature of this function feels awkward to me. Could we fold the subsequent add_load() call into it, and call it something like add_matrix_scalar_load() instead? Note also that it'd be useful for matrix swizzles in that case, since I think we want to stop generating HLSL_IR_SWIZZLE instructions for those.
Yes, we do.
Hi,
Il 20/04/22 20:28, Zebediah Figura ha scritto:
+Â Â Â if (!(mul = add_binary_arithmetic_expr(ctx, instrs, HLSL_OP2_MUL, &four->node, major, loc))) +Â Â Â Â Â Â Â return NULL;
+Â Â Â if (!(add = add_binary_arithmetic_expr(ctx, instrs, HLSL_OP2_ADD, &mul->node, minor, loc))) +Â Â Â Â Â Â Â return NULL;
Why do we need this, and not just hlsl_new_binary_expr()? We already cast the index to a scalar uint.
It's convenient because I don't have to manually add the new instruction to the list.
In general I think we want to avoid using add_*() helpers if possible, where not triggered directly by user code.
Why?
+static bool add_matrix_load(struct hlsl_ctx *ctx, struct list *instrs, +Â Â Â Â Â Â Â struct hlsl_ir_node *matrix, struct hlsl_ir_node *index, const struct vkd3d_shader_location *loc)
How about add_matrix_index() instead?
I can do that if you really insist, but given that we already have add_record_load() and add_array_load() it seems sensible to name this add_matrix_load().
Please always use # for hexadecimal formats (or, in this case, %u may be better). I see the constructor path doesn't, but we should change that.
Ok, I changed to %u.
+Â Â Â Â Â Â Â if (!(offset = compute_matrix_offset(ctx, instrs, mat_type, &c->node, index, loc))) +Â Â Â Â Â Â Â Â Â Â Â return false;
The signature of this function feels awkward to me. Could we fold the subsequent add_load() call into it, and call it something like add_matrix_scalar_load() instead? Note also that it'd be useful for matrix swizzles in that case, since I think we want to stop generating HLSL_IR_SWIZZLE instructions for those.
In "Parse matrix constructors." that function is used in other contexts, for which a subsequent load doesn't make sense (e.g., because you have to store rather than load). So I would still need a helper that just computes the offset and returns it to avoid code duplication. Once we have that, adding another helper to just add the load seems excessive to me (the case is different for load_index() and store_index() in "Parse matrix constructors.", because I already know that other sites will use them in subsequent patches).
WRT your comment about not generating HLSL_IR_SWIZZLE instructions, you mean that instead of --- trace:hlsl_dump_function: 2: uint | 8 trace:hlsl_dump_function: 3: float4 | m[@2] trace:hlsl_dump_function: 4: float1 | @3.y --- we'd want --- trace:hlsl_dump_function: 2: uint | 9 trace:hlsl_dump_function: 3: float4 | m[@2] --- directly?
If so, I can agree, but this seems something more easily handled in a later optimization pass, isn't it? The current architecture wouldn't allow for that easily any way, because when you're parsing the first subscript in an expression like "m[0][1]" you don't know yet if there is going to be a second one, or you just genuinely want a vector expression.
+Â Â Â if (!(load = hlsl_new_load(ctx, var, NULL, ret_type, *loc)))
hlsl_new_var_load(ctx, var, *loc)
Right, done.
Thanks, Giovanni.
On 4/21/22 04:22, Giovanni Mascellani wrote:
Hi,
Il 20/04/22 20:28, Zebediah Figura ha scritto:
+Â Â Â if (!(mul = add_binary_arithmetic_expr(ctx, instrs, HLSL_OP2_MUL, &four->node, major, loc))) +Â Â Â Â Â Â Â return NULL;
+Â Â Â if (!(add = add_binary_arithmetic_expr(ctx, instrs, HLSL_OP2_ADD, &mul->node, minor, loc))) +Â Â Â Â Â Â Â return NULL;
Why do we need this, and not just hlsl_new_binary_expr()? We already cast the index to a scalar uint.
It's convenient because I don't have to manually add the new instruction to the list.
In general I think we want to avoid using add_*() helpers if possible, where not triggered directly by user code.
Why?
The intent is that they contain most code corresponding to user actions, and as such do nontrivial interpretation and validation. In the case where we're manually synthesizing instructions I don't want to have to think about whether such functions are doing something we don't want.
I can probably live with it in this case, but I'm concerned that this mental burden will grow.
Maybe there's an argument for helpers which just wrap hlsl_new_* like that, and we can have an add_* family and a parse_* family.
+static bool add_matrix_load(struct hlsl_ctx *ctx, struct list *instrs, +Â Â Â Â Â Â Â struct hlsl_ir_node *matrix, struct hlsl_ir_node *index, const struct vkd3d_shader_location *loc)
How about add_matrix_index() instead?
I can do that if you really insist, but given that we already have add_record_load() and add_array_load() it seems sensible to name this add_matrix_load().
In isolation I guess it's fine, but it came to mind while thinking about add_matrix_scalar_load()—it'd be nice to clarify how this function is different from that one.
Please always use # for hexadecimal formats (or, in this case, %u may be better). I see the constructor path doesn't, but we should change that.
Ok, I changed to %u.
+Â Â Â Â Â Â Â if (!(offset = compute_matrix_offset(ctx, instrs, mat_type, &c->node, index, loc))) +Â Â Â Â Â Â Â Â Â Â Â return false;
The signature of this function feels awkward to me. Could we fold the subsequent add_load() call into it, and call it something like add_matrix_scalar_load() instead? Note also that it'd be useful for matrix swizzles in that case, since I think we want to stop generating HLSL_IR_SWIZZLE instructions for those.
In "Parse matrix constructors." that function is used in other contexts, for which a subsequent load doesn't make sense (e.g., because you have to store rather than load). So I would still need a helper that just computes the offset and returns it to avoid code duplication. Once we have that, adding another helper to just add the load seems excessive to me (the case is different for load_index() and store_index() in "Parse matrix constructors.", because I already know that other sites will use them in subsequent patches).
Well, right, but I'm not sure it's worth organizing patch 8/12 like that, as I say :-)
WRT your comment about not generating HLSL_IR_SWIZZLE instructions, you mean that instead of
trace:hlsl_dump_function:Â Â Â 2:Â Â Â Â Â Â uint | 8 trace:hlsl_dump_function:Â Â Â 3:Â Â Â Â float4 | m[@2] trace:hlsl_dump_function:Â Â Â 4:Â Â Â Â float1 | @3.y
we'd want
trace:hlsl_dump_function:Â Â Â 2:Â Â Â Â Â Â uint | 9 trace:hlsl_dump_function:Â Â Â 3:Â Â Â Â float4 | m[@2]
directly?
If so, I can agree, but this seems something more easily handled in a later optimization pass, isn't it? The current architecture wouldn't allow for that easily any way, because when you're parsing the first subscript in an expression like "m[0][1]" you don't know yet if there is going to be a second one, or you just genuinely want a vector expression.
I'm referring to 'x._m00_m12_m33' swizzles, rather. We currently encode the whole swizzle in the hlsl_ir_swizzle string, the same way we do for vectors, but I suspect we don't want to do that.
Hi,
Il 21/04/22 20:58, Zebediah Figura ha scritto:
The intent is that they contain most code corresponding to user actions, and as such do nontrivial interpretation and validation. In the case where we're manually synthesizing instructions I don't want to have to think about whether such functions are doing something we don't want.
I can probably live with it in this case, but I'm concerned that this mental burden will grow.
Maybe there's an argument for helpers which just wrap hlsl_new_* like that, and we can have an add_* family and a parse_* family.
Ok, I can see an argument for having another abstraction layer as you say, but I'd defer this to some later patch, so that this doesn't scope creep too much.
As for all the other comments, they should be addressed in the next series.
Giovanni.
Signed-off-by: Giovanni Mascellani gmascellani@codeweavers.com --- libs/vkd3d-shader/hlsl.y | 98 ++++++++++++++++--- tests/hlsl-matrix-indexing.shader_test | 14 +++ ...lsl-return-implicit-conversion.shader_test | 8 +- 3 files changed, 100 insertions(+), 20 deletions(-)
diff --git a/libs/vkd3d-shader/hlsl.y b/libs/vkd3d-shader/hlsl.y index b6a8e496..1ee1db4c 100644 --- a/libs/vkd3d-shader/hlsl.y +++ b/libs/vkd3d-shader/hlsl.y @@ -2081,18 +2081,87 @@ static struct list *add_call(struct hlsl_ctx *ctx, const char *name, return params->instrs; }
+static struct hlsl_ir_node *compute_offset_from_index(struct hlsl_ctx *ctx, struct list *instrs, + struct hlsl_type *type, unsigned int idx, const struct vkd3d_shader_location *loc) +{ + assert(type->type <= HLSL_CLASS_LAST_NUMERIC); + assert(idx < type->dimx * type->dimy); + + if (type->type == HLSL_CLASS_SCALAR || type->type == HLSL_CLASS_VECTOR) + { + struct hlsl_ir_constant *c; + + if (!(c = hlsl_new_uint_constant(ctx, idx, loc))) + return NULL; + list_add_tail(instrs, &c->node.entry); + + return &c->node; + } + else + { + struct hlsl_ir_constant *x, *y; + + if (!(x = hlsl_new_uint_constant(ctx, idx % type->dimx, loc))) + return NULL; + list_add_tail(instrs, &x->node.entry); + + if (!(y = hlsl_new_uint_constant(ctx, idx / type->dimx, loc))) + return NULL; + list_add_tail(instrs, &y->node.entry); + + return compute_matrix_offset(ctx, instrs, type, &x->node, &y->node, loc); + } +} + +static struct hlsl_ir_node *load_index(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *instr, + unsigned int idx, const struct vkd3d_shader_location *loc) +{ + struct hlsl_type *scal_type = hlsl_get_scalar_type(ctx, instr->data_type->base_type); + struct hlsl_ir_node *offset; + struct hlsl_ir_load *load; + + if (instr->data_type->type > HLSL_CLASS_LAST_NUMERIC) + { + hlsl_fixme(ctx, loc, "Loading from non-numeric type."); + return NULL; + } + + if (!(offset = compute_offset_from_index(ctx, instrs, instr->data_type, idx, loc))) + return NULL; + + if (!(load = add_load(ctx, instrs, instr, offset, scal_type, *loc))) + return NULL; + + return &load->node; +} + +static struct hlsl_ir_node *store_index(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_var *var, + struct hlsl_ir_node *rhs, unsigned int idx, const struct vkd3d_shader_location *loc) +{ + struct hlsl_ir_node *offset; + struct hlsl_ir_store *store; + + if (!(offset = compute_offset_from_index(ctx, instrs, var->data_type, idx, loc))) + return NULL; + + if (!(store = hlsl_new_store(ctx, var, offset, rhs, 0, *loc))) + return NULL; + list_add_tail(instrs, &store->node.entry); + + return &store->node; +} + static struct list *add_constructor(struct hlsl_ctx *ctx, struct hlsl_type *type, struct parse_initializer *params, struct vkd3d_shader_location loc) { - unsigned int i, writemask_offset = 0; - struct hlsl_ir_store *store; static unsigned int counter; + struct hlsl_type *scal_type; + unsigned int i, j, idx = 0; struct hlsl_ir_load *load; struct hlsl_ir_var *var; char name[23];
- if (type->type == HLSL_CLASS_MATRIX) - hlsl_fixme(ctx, &loc, "Matrix constructor."); + scal_type = hlsl_get_scalar_type(ctx, type->base_type);
sprintf(name, "<constructor-%x>", counter++); if (!(var = hlsl_new_synthetic_var(ctx, name, type, loc))) @@ -2115,22 +2184,19 @@ static struct list *add_constructor(struct hlsl_ctx *ctx, struct hlsl_type *type } width = hlsl_type_component_count(arg->data_type);
- if (width > 4) + for (j = 0; j < width; ++j, ++idx) { - FIXME("Constructor argument with %u components.\n", width); - continue; - } + struct hlsl_ir_node *value;
- if (!(arg = add_implicit_conversion(ctx, params->instrs, arg, - hlsl_get_vector_type(ctx, type->base_type, width), &arg->loc))) - continue; + if (!(value = load_index(ctx, params->instrs, arg, j, &loc))) + return NULL;
- if (!(store = hlsl_new_store(ctx, var, NULL, arg, - ((1u << width) - 1) << writemask_offset, arg->loc))) - return NULL; - list_add_tail(params->instrs, &store->node.entry); + if (!(value = add_implicit_conversion(ctx, params->instrs, value, scal_type, &arg->loc))) + return NULL;
- writemask_offset += width; + if (!store_index(ctx, params->instrs, var, value, idx, &loc)) + return NULL; + } }
if (!(load = hlsl_new_var_load(ctx, var, loc))) diff --git a/tests/hlsl-matrix-indexing.shader_test b/tests/hlsl-matrix-indexing.shader_test index 3f4b878f..3cef127a 100644 --- a/tests/hlsl-matrix-indexing.shader_test +++ b/tests/hlsl-matrix-indexing.shader_test @@ -62,3 +62,17 @@ uniform 8 float4 9.0 10.0 11.0 12.0 uniform 12 float4 13.0 14.0 15.0 16.0 draw quad probe all rgba (1.0, 2.0, 10.0, 15.0) + +[pixel shader] +float4 main() : SV_TARGET +{ + float4x4 m = float4x4(1.0, 2.0, 3.0, 4.0, + 5.0, 6.0, 7.0, 8.0, + 9.0, 10.0, 11.0, 12.0, + 13.0, 14.0, 15.0, 16.0); + return float4(m[0][0], m[1][0], m[1][2], m[2][3]); +} + +[test] +todo draw quad +probe all rgba (1.0, 5.0, 7.0, 12.0) diff --git a/tests/hlsl-return-implicit-conversion.shader_test b/tests/hlsl-return-implicit-conversion.shader_test index 38d21633..bf99d9cb 100644 --- a/tests/hlsl-return-implicit-conversion.shader_test +++ b/tests/hlsl-return-implicit-conversion.shader_test @@ -98,7 +98,7 @@ float4 main() : sv_target todo draw quad probe all rgba (0.4, 0.3, 0.2, 0.0)
-[pixel shader fail todo] +[pixel shader fail] float3x1 func() { return float1x3(0.4, 0.3, 0.2); @@ -109,7 +109,7 @@ float4 main() : sv_target return float4(func(), 0.0); }
-[pixel shader fail todo] +[pixel shader fail] float1x3 func() { return float3x1(0.4, 0.3, 0.2); @@ -191,7 +191,7 @@ float4 main() : sv_target todo draw quad probe all rgba (0.4, 0.3, 0.2, 0.0)
-[pixel shader fail todo] +[pixel shader fail] float3x1 func() { return float1x4(0.4, 0.3, 0.2, 0.1); @@ -217,7 +217,7 @@ float4 main() : sv_target todo draw quad probe all rgba (0.4, 0.3, 0.2, 0.0)
-[pixel shader fail todo] +[pixel shader fail] float1x3 func() { return float4x1(0.4, 0.3, 0.2, 0.1);
Signed-off-by: Francisco Casas fcasas@codeweavers.com
April 18, 2022 2:34 AM, "Giovanni Mascellani" gmascellani@codeweavers.com wrote:
Signed-off-by: Giovanni Mascellani gmascellani@codeweavers.com
libs/vkd3d-shader/hlsl.y | 98 ++++++++++++++++--- tests/hlsl-matrix-indexing.shader_test | 14 +++ ...lsl-return-implicit-conversion.shader_test | 8 +- 3 files changed, 100 insertions(+), 20 deletions(-)
diff --git a/libs/vkd3d-shader/hlsl.y b/libs/vkd3d-shader/hlsl.y index b6a8e496..1ee1db4c 100644 --- a/libs/vkd3d-shader/hlsl.y +++ b/libs/vkd3d-shader/hlsl.y @@ -2081,18 +2081,87 @@ static struct list *add_call(struct hlsl_ctx *ctx, const char *name, return params->instrs; }
+static struct hlsl_ir_node *compute_offset_from_index(struct hlsl_ctx *ctx, struct list *instrs,
- struct hlsl_type *type, unsigned int idx, const struct vkd3d_shader_location *loc)
+{
- assert(type->type <= HLSL_CLASS_LAST_NUMERIC);
- assert(idx < type->dimx * type->dimy);
- if (type->type == HLSL_CLASS_SCALAR || type->type == HLSL_CLASS_VECTOR)
- {
- struct hlsl_ir_constant *c;
- if (!(c = hlsl_new_uint_constant(ctx, idx, loc)))
- return NULL;
- list_add_tail(instrs, &c->node.entry);
- return &c->node;
- }
- else
- {
- struct hlsl_ir_constant *x, *y;
- if (!(x = hlsl_new_uint_constant(ctx, idx % type->dimx, loc)))
- return NULL;
- list_add_tail(instrs, &x->node.entry);
- if (!(y = hlsl_new_uint_constant(ctx, idx / type->dimx, loc)))
- return NULL;
- list_add_tail(instrs, &y->node.entry);
- return compute_matrix_offset(ctx, instrs, type, &x->node, &y->node, loc);
- }
+}
+static struct hlsl_ir_node *load_index(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *instr,
- unsigned int idx, const struct vkd3d_shader_location *loc)
+{
- struct hlsl_type *scal_type = hlsl_get_scalar_type(ctx, instr->data_type->base_type);
- struct hlsl_ir_node *offset;
- struct hlsl_ir_load *load;
- if (instr->data_type->type > HLSL_CLASS_LAST_NUMERIC)
- {
- hlsl_fixme(ctx, loc, "Loading from non-numeric type.");
- return NULL;
- }
- if (!(offset = compute_offset_from_index(ctx, instrs, instr->data_type, idx, loc)))
- return NULL;
- if (!(load = add_load(ctx, instrs, instr, offset, scal_type, *loc)))
- return NULL;
- return &load->node;
+}
+static struct hlsl_ir_node *store_index(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_var *var,
- struct hlsl_ir_node *rhs, unsigned int idx, const struct vkd3d_shader_location *loc)
+{
- struct hlsl_ir_node *offset;
- struct hlsl_ir_store *store;
- if (!(offset = compute_offset_from_index(ctx, instrs, var->data_type, idx, loc)))
- return NULL;
- if (!(store = hlsl_new_store(ctx, var, offset, rhs, 0, *loc)))
- return NULL;
- list_add_tail(instrs, &store->node.entry);
- return &store->node;
+}
static struct list *add_constructor(struct hlsl_ctx *ctx, struct hlsl_type *type, struct parse_initializer *params, struct vkd3d_shader_location loc) {
- unsigned int i, writemask_offset = 0;
- struct hlsl_ir_store *store;
static unsigned int counter;
- struct hlsl_type *scal_type;
- unsigned int i, j, idx = 0;
struct hlsl_ir_load *load; struct hlsl_ir_var *var; char name[23];
- if (type->type == HLSL_CLASS_MATRIX)
- hlsl_fixme(ctx, &loc, "Matrix constructor.");
- scal_type = hlsl_get_scalar_type(ctx, type->base_type);
sprintf(name, "<constructor-%x>", counter++); if (!(var = hlsl_new_synthetic_var(ctx, name, type, loc))) @@ -2115,22 +2184,19 @@ static struct list *add_constructor(struct hlsl_ctx *ctx, struct hlsl_type *type } width = hlsl_type_component_count(arg->data_type);
- if (width > 4)
- for (j = 0; j < width; ++j, ++idx)
{
- FIXME("Constructor argument with %u components.\n", width);
- continue;
- }
- struct hlsl_ir_node *value;
- if (!(arg = add_implicit_conversion(ctx, params->instrs, arg,
- hlsl_get_vector_type(ctx, type->base_type, width), &arg->loc)))
- continue;
- if (!(value = load_index(ctx, params->instrs, arg, j, &loc)))
- return NULL;
- if (!(store = hlsl_new_store(ctx, var, NULL, arg,
- ((1u << width) - 1) << writemask_offset, arg->loc)))
- return NULL;
- list_add_tail(params->instrs, &store->node.entry);
- if (!(value = add_implicit_conversion(ctx, params->instrs, value, scal_type, &arg->loc)))
- return NULL;
- writemask_offset += width;
- if (!store_index(ctx, params->instrs, var, value, idx, &loc))
- return NULL;
- }
}
if (!(load = hlsl_new_var_load(ctx, var, loc))) diff --git a/tests/hlsl-matrix-indexing.shader_test b/tests/hlsl-matrix-indexing.shader_test index 3f4b878f..3cef127a 100644 --- a/tests/hlsl-matrix-indexing.shader_test +++ b/tests/hlsl-matrix-indexing.shader_test @@ -62,3 +62,17 @@ uniform 8 float4 9.0 10.0 11.0 12.0 uniform 12 float4 13.0 14.0 15.0 16.0 draw quad probe all rgba (1.0, 2.0, 10.0, 15.0)
+[pixel shader] +float4 main() : SV_TARGET +{
- float4x4 m = float4x4(1.0, 2.0, 3.0, 4.0,
- 5.0, 6.0, 7.0, 8.0,
- 9.0, 10.0, 11.0, 12.0,
- 13.0, 14.0, 15.0, 16.0);
- return float4(m[0][0], m[1][0], m[1][2], m[2][3]);
+}
+[test] +todo draw quad +probe all rgba (1.0, 5.0, 7.0, 12.0) diff --git a/tests/hlsl-return-implicit-conversion.shader_test b/tests/hlsl-return-implicit-conversion.shader_test index 38d21633..bf99d9cb 100644 --- a/tests/hlsl-return-implicit-conversion.shader_test +++ b/tests/hlsl-return-implicit-conversion.shader_test @@ -98,7 +98,7 @@ float4 main() : sv_target todo draw quad probe all rgba (0.4, 0.3, 0.2, 0.0)
-[pixel shader fail todo] +[pixel shader fail] float3x1 func() { return float1x3(0.4, 0.3, 0.2); @@ -109,7 +109,7 @@ float4 main() : sv_target return float4(func(), 0.0); }
-[pixel shader fail todo] +[pixel shader fail] float1x3 func() { return float3x1(0.4, 0.3, 0.2); @@ -191,7 +191,7 @@ float4 main() : sv_target todo draw quad probe all rgba (0.4, 0.3, 0.2, 0.0)
-[pixel shader fail todo] +[pixel shader fail] float3x1 func() { return float1x4(0.4, 0.3, 0.2, 0.1); @@ -217,7 +217,7 @@ float4 main() : sv_target todo draw quad probe all rgba (0.4, 0.3, 0.2, 0.0)
-[pixel shader fail todo] +[pixel shader fail] float1x3 func() { return float4x1(0.4, 0.3, 0.2, 0.1); -- 2.35.2
Signed-off-by: Matteo Bruni mbruni@codeweavers.com
On 4/18/22 01:34, Giovanni Mascellani wrote:
Signed-off-by: Giovanni Mascellani gmascellani@codeweavers.com
libs/vkd3d-shader/hlsl.y | 98 ++++++++++++++++--- tests/hlsl-matrix-indexing.shader_test | 14 +++ ...lsl-return-implicit-conversion.shader_test | 8 +- 3 files changed, 100 insertions(+), 20 deletions(-)
diff --git a/libs/vkd3d-shader/hlsl.y b/libs/vkd3d-shader/hlsl.y index b6a8e496..1ee1db4c 100644 --- a/libs/vkd3d-shader/hlsl.y +++ b/libs/vkd3d-shader/hlsl.y @@ -2081,18 +2081,87 @@ static struct list *add_call(struct hlsl_ctx *ctx, const char *name, return params->instrs; }
+static struct hlsl_ir_node *compute_offset_from_index(struct hlsl_ctx *ctx, struct list *instrs,
struct hlsl_type *type, unsigned int idx, const struct vkd3d_shader_location *loc)
+{
- assert(type->type <= HLSL_CLASS_LAST_NUMERIC);
- assert(idx < type->dimx * type->dimy);
- if (type->type == HLSL_CLASS_SCALAR || type->type == HLSL_CLASS_VECTOR)
- {
struct hlsl_ir_constant *c;
if (!(c = hlsl_new_uint_constant(ctx, idx, loc)))
return NULL;
list_add_tail(instrs, &c->node.entry);
return &c->node;
- }
- else
- {
struct hlsl_ir_constant *x, *y;
if (!(x = hlsl_new_uint_constant(ctx, idx % type->dimx, loc)))
return NULL;
list_add_tail(instrs, &x->node.entry);
if (!(y = hlsl_new_uint_constant(ctx, idx / type->dimx, loc)))
return NULL;
list_add_tail(instrs, &y->node.entry);
return compute_matrix_offset(ctx, instrs, type, &x->node, &y->node, loc);
- }
+}
I guess, but have you considered just returning an unsigned int from this function? It seems like potentially less work [and wouldn't invalidate my comment on compute_matrix_offset()...]
If not, I'd prefer to name this function something that makes it obvious that it's modifying the instruction stream (i.e. it should begin with add_*, probably). That goes for load_index() and store_index() as well.
+static struct hlsl_ir_node *load_index(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *instr,
unsigned int idx, const struct vkd3d_shader_location *loc)
+{
- struct hlsl_type *scal_type = hlsl_get_scalar_type(ctx, instr->data_type->base_type);
- struct hlsl_ir_node *offset;
- struct hlsl_ir_load *load;
- if (instr->data_type->type > HLSL_CLASS_LAST_NUMERIC)
- {
hlsl_fixme(ctx, loc, "Loading from non-numeric type.");
return NULL;
- }
- if (!(offset = compute_offset_from_index(ctx, instrs, instr->data_type, idx, loc)))
return NULL;
- if (!(load = add_load(ctx, instrs, instr, offset, scal_type, *loc)))
return NULL;
- return &load->node;
+}
+static struct hlsl_ir_node *store_index(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_var *var,
struct hlsl_ir_node *rhs, unsigned int idx, const struct vkd3d_shader_location *loc)
+{
- struct hlsl_ir_node *offset;
- struct hlsl_ir_store *store;
- if (!(offset = compute_offset_from_index(ctx, instrs, var->data_type, idx, loc)))
return NULL;
- if (!(store = hlsl_new_store(ctx, var, offset, rhs, 0, *loc)))
return NULL;
- list_add_tail(instrs, &store->node.entry);
- return &store->node;
+}
- static struct list *add_constructor(struct hlsl_ctx *ctx, struct hlsl_type *type, struct parse_initializer *params, struct vkd3d_shader_location loc) {
- unsigned int i, writemask_offset = 0;
- struct hlsl_ir_store *store; static unsigned int counter;
- struct hlsl_type *scal_type;
- unsigned int i, j, idx = 0; struct hlsl_ir_load *load; struct hlsl_ir_var *var; char name[23];
- if (type->type == HLSL_CLASS_MATRIX)
hlsl_fixme(ctx, &loc, "Matrix constructor.");
scal_type = hlsl_get_scalar_type(ctx, type->base_type);
sprintf(name, "<constructor-%x>", counter++); if (!(var = hlsl_new_synthetic_var(ctx, name, type, loc)))
@@ -2115,22 +2184,19 @@ static struct list *add_constructor(struct hlsl_ctx *ctx, struct hlsl_type *type } width = hlsl_type_component_count(arg->data_type);
if (width > 4)
for (j = 0; j < width; ++j, ++idx)
I know this is a common construction, but it always throws me off. I can live with it, but I'd prefer to put the ++idx in the store_index() call.
{
FIXME("Constructor argument with %u components.\n", width);
continue;
}
struct hlsl_ir_node *value;
if (!(arg = add_implicit_conversion(ctx, params->instrs, arg,
hlsl_get_vector_type(ctx, type->base_type, width), &arg->loc)))
continue;
if (!(value = load_index(ctx, params->instrs, arg, j, &loc)))
return NULL;
if (!(store = hlsl_new_store(ctx, var, NULL, arg,
((1u << width) - 1) << writemask_offset, arg->loc)))
return NULL;
list_add_tail(params->instrs, &store->node.entry);
if (!(value = add_implicit_conversion(ctx, params->instrs, value, scal_type, &arg->loc)))
return NULL;
writemask_offset += width;
if (!store_index(ctx, params->instrs, var, value, idx, &loc))
return NULL;
} } if (!(load = hlsl_new_var_load(ctx, var, loc)))
This can be follow-up patches, but as long as you're implementing it here, would you mind adding tests for constructors containing vectors and matrices?
Hi,
Il 20/04/22 20:47, Zebediah Figura ha scritto:
+static struct hlsl_ir_node *compute_offset_from_index(struct hlsl_ctx *ctx, struct list *instrs, +Â Â Â Â Â Â Â struct hlsl_type *type, unsigned int idx, const struct vkd3d_shader_location *loc) +{ +Â Â Â assert(type->type <= HLSL_CLASS_LAST_NUMERIC); +Â Â Â assert(idx < type->dimx * type->dimy);
+Â Â Â if (type->type == HLSL_CLASS_SCALAR || type->type == HLSL_CLASS_VECTOR) +Â Â Â { +Â Â Â Â Â Â Â struct hlsl_ir_constant *c;
+Â Â Â Â Â Â Â if (!(c = hlsl_new_uint_constant(ctx, idx, loc))) +Â Â Â Â Â Â Â Â Â Â Â return NULL; +Â Â Â Â Â Â Â list_add_tail(instrs, &c->node.entry);
+Â Â Â Â Â Â Â return &c->node; +Â Â Â } +Â Â Â else +Â Â Â { +Â Â Â Â Â Â Â struct hlsl_ir_constant *x, *y;
+Â Â Â Â Â Â Â if (!(x = hlsl_new_uint_constant(ctx, idx % type->dimx, loc))) +Â Â Â Â Â Â Â Â Â Â Â return NULL; +Â Â Â Â Â Â Â list_add_tail(instrs, &x->node.entry);
+Â Â Â Â Â Â Â if (!(y = hlsl_new_uint_constant(ctx, idx / type->dimx, loc))) +Â Â Â Â Â Â Â Â Â Â Â return NULL; +Â Â Â Â Â Â Â list_add_tail(instrs, &y->node.entry);
+Â Â Â Â Â Â Â return compute_matrix_offset(ctx, instrs, type, &x->node, &y->node, loc); +Â Â Â } +}
I guess, but have you considered just returning an unsigned int from this function? It seems like potentially less work [and wouldn't invalidate my comment on compute_matrix_offset()...]
That would require to duplicate the logic behind compute_matrix_offset() (once for computing in the compiler, one for computing in the shader). Granted, it's not a particularly complicated logic, but still I'd prefer to keep one copy of it.
If not, I'd prefer to name this function something that makes it obvious that it's modifying the instruction stream (i.e. it should begin with add_*, probably). That goes for load_index() and store_index() as well.
To me that's sort of obvious, given that we're passing the instruction list as a parameter, but I can do that. I suppose that would apply to compute_matrix_offset() too, then.
width = hlsl_type_component_count(arg->data_type); -Â Â Â Â Â Â Â if (width > 4) +Â Â Â Â Â Â Â for (j = 0; j < width; ++j, ++idx)
I know this is a common construction, but it always throws me off. I can live with it, but I'd prefer to put the ++idx in the store_index() call.
OTOH, I don't like very much when operators mutating data is buried inside a larger formula, except when it happens at very idiomatic positions, like "x[i++] = ..." or "*ptr++ = ...", or of course in the third slot of a "for" statement. "x = y = z" is already something that hurts legibility a lot, IMHO.
But I can live with your suggestion, so I'll do it.
{ -Â Â Â Â Â Â Â Â Â Â Â FIXME("Constructor argument with %u components.\n", width); -Â Â Â Â Â Â Â Â Â Â Â continue; -Â Â Â Â Â Â Â } +Â Â Â Â Â Â Â Â Â Â Â struct hlsl_ir_node *value; -Â Â Â Â Â Â Â if (!(arg = add_implicit_conversion(ctx, params->instrs, arg, -Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â hlsl_get_vector_type(ctx, type->base_type, width), &arg->loc))) -Â Â Â Â Â Â Â Â Â Â Â continue; +Â Â Â Â Â Â Â Â Â Â Â if (!(value = load_index(ctx, params->instrs, arg, j, &loc))) +Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â return NULL; -Â Â Â Â Â Â Â if (!(store = hlsl_new_store(ctx, var, NULL, arg, -Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â ((1u << width) - 1) << writemask_offset, arg->loc))) -Â Â Â Â Â Â Â Â Â Â Â return NULL; -Â Â Â Â Â Â Â list_add_tail(params->instrs, &store->node.entry); +Â Â Â Â Â Â Â Â Â Â Â if (!(value = add_implicit_conversion(ctx, params->instrs, value, scal_type, &arg->loc))) +Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â return NULL; -Â Â Â Â Â Â Â writemask_offset += width; +Â Â Â Â Â Â Â Â Â Â Â if (!store_index(ctx, params->instrs, var, value, idx, &loc)) +Â Â Â Â Â Â Â Â Â Â Â Â Â Â Â return NULL; +Â Â Â Â Â Â Â } Â Â Â Â Â } Â Â Â Â Â if (!(load = hlsl_new_var_load(ctx, var, loc)))
This can be follow-up patches, but as long as you're implementing it here, would you mind adding tests for constructors containing vectors and matrices?
Ok.
Giovanni.
On 4/21/22 04:53, Giovanni Mascellani wrote:
Hi,
Il 20/04/22 20:47, Zebediah Figura ha scritto:
+static struct hlsl_ir_node *compute_offset_from_index(struct hlsl_ctx *ctx, struct list *instrs, +Â Â Â Â Â Â Â struct hlsl_type *type, unsigned int idx, const struct vkd3d_shader_location *loc) +{ +Â Â Â assert(type->type <= HLSL_CLASS_LAST_NUMERIC); +Â Â Â assert(idx < type->dimx * type->dimy);
+Â Â Â if (type->type == HLSL_CLASS_SCALAR || type->type == HLSL_CLASS_VECTOR) +Â Â Â { +Â Â Â Â Â Â Â struct hlsl_ir_constant *c;
+Â Â Â Â Â Â Â if (!(c = hlsl_new_uint_constant(ctx, idx, loc))) +Â Â Â Â Â Â Â Â Â Â Â return NULL; +Â Â Â Â Â Â Â list_add_tail(instrs, &c->node.entry);
+Â Â Â Â Â Â Â return &c->node; +Â Â Â } +Â Â Â else +Â Â Â { +Â Â Â Â Â Â Â struct hlsl_ir_constant *x, *y;
+Â Â Â Â Â Â Â if (!(x = hlsl_new_uint_constant(ctx, idx % type->dimx, loc))) +Â Â Â Â Â Â Â Â Â Â Â return NULL; +Â Â Â Â Â Â Â list_add_tail(instrs, &x->node.entry);
+Â Â Â Â Â Â Â if (!(y = hlsl_new_uint_constant(ctx, idx / type->dimx, loc))) +Â Â Â Â Â Â Â Â Â Â Â return NULL; +Â Â Â Â Â Â Â list_add_tail(instrs, &y->node.entry);
+Â Â Â Â Â Â Â return compute_matrix_offset(ctx, instrs, type, &x->node, &y->node, loc); +Â Â Â } +}
I guess, but have you considered just returning an unsigned int from this function? It seems like potentially less work [and wouldn't invalidate my comment on compute_matrix_offset()...]
That would require to duplicate the logic behind compute_matrix_offset() (once for computing in the compiler, one for computing in the shader). Granted, it's not a particularly complicated logic, but still I'd prefer to keep one copy of it.
I dunno, I see the logic as not particularly complex...
I'm also slightly concerned that we're generating terrible code too zealously. Simplicity of implementation is valuable, but it means that optimization passes have more busywork to do, and this is one of those cases where I'm not sure it's actually worth it...
If not, I'd prefer to name this function something that makes it obvious that it's modifying the instruction stream (i.e. it should begin with add_*, probably). That goes for load_index() and store_index() as well.
To me that's sort of obvious, given that we're passing the instruction list as a parameter, but I can do that. I suppose that would apply to compute_matrix_offset() too, then.
I find naming to be important enough that it's worth the extra clarity even if the argument types are unambiguous.
width = hlsl_type_component_count(arg->data_type); -Â Â Â Â Â Â Â if (width > 4) +Â Â Â Â Â Â Â for (j = 0; j < width; ++j, ++idx)
I know this is a common construction, but it always throws me off. I can live with it, but I'd prefer to put the ++idx in the store_index() call.
OTOH, I don't like very much when operators mutating data is buried inside a larger formula, except when it happens at very idiomatic positions, like "x[i++] = ..." or "*ptr++ = ...", or of course in the third slot of a "for" statement. "x = y = z" is already something that hurts legibility a lot, IMHO.
But I can live with your suggestion, so I'll do it.
Sure, it's fine on a separate line too.
I guess not being used to multiple increments is a personal thing for me, so I probably should just drop this comment anyway.
Hi,
Il 21/04/22 20:57, Zebediah Figura ha scritto:
I dunno, I see the logic as not particularly complex...
Ok, given that Francisco reworked his patch in a rather different way, I did the same for mine. I am not really sure it's the ideal solution, but let's discuss it.
I'm also slightly concerned that we're generating terrible code too zealously. Simplicity of implementation is valuable, but it means that optimization passes have more busywork to do, and this is one of those cases where I'm not sure it's actually worth it...
The optimization step required this specific case is rather simple and already implemented. However, the new patch should please you better WRT point of view.
Thanks, Giovanni.
On 4/22/22 05:03, Giovanni Mascellani wrote:
Hi,
Il 21/04/22 20:57, Zebediah Figura ha scritto:
I dunno, I see the logic as not particularly complex...
Ok, given that Francisco reworked his patch in a rather different way, I did the same for mine. I am not really sure it's the ideal solution, but let's discuss it.
I'm also slightly concerned that we're generating terrible code too zealously. Simplicity of implementation is valuable, but it means that optimization passes have more busywork to do, and this is one of those cases where I'm not sure it's actually worth it...
The optimization step required this specific case is rather simple and already implemented. However, the new patch should please you better WRT point of view.
Eh, I mean extra CPU work for the pass itself, but even now it's probably not worth worrying about.
From: Francisco Casas fcasas@codeweavers.com
--- I did some minor changes, so I'm removing Francisco's Signed-off-by. --- libs/vkd3d-shader/hlsl.h | 6 ++ libs/vkd3d-shader/hlsl.y | 115 +++++++++++++++++++-- tests/hlsl-initializer-flatten.shader_test | 8 +- 3 files changed, 118 insertions(+), 11 deletions(-)
diff --git a/libs/vkd3d-shader/hlsl.h b/libs/vkd3d-shader/hlsl.h index 802adf87..18b70853 100644 --- a/libs/vkd3d-shader/hlsl.h +++ b/libs/vkd3d-shader/hlsl.h @@ -726,6 +726,9 @@ struct hlsl_ir_function_decl *hlsl_get_func_decl(struct hlsl_ctx *ctx, const cha struct hlsl_type *hlsl_get_type(struct hlsl_scope *scope, const char *name, bool recursive); struct hlsl_ir_var *hlsl_get_var(struct hlsl_scope *scope, const char *name);
+struct hlsl_ir_node *hlsl_load_index(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *instr, + unsigned int idx, const struct vkd3d_shader_location *loc); + struct hlsl_type *hlsl_new_array_type(struct hlsl_ctx *ctx, struct hlsl_type *basic_type, unsigned int array_size); struct hlsl_ir_node *hlsl_new_binary_expr(struct hlsl_ctx *ctx, enum hlsl_ir_expr_op op, struct hlsl_ir_node *arg1, struct hlsl_ir_node *arg2); @@ -768,6 +771,9 @@ struct hlsl_ir_var *hlsl_new_var(struct hlsl_ctx *ctx, const char *name, struct struct hlsl_ir_load *hlsl_new_var_load(struct hlsl_ctx *ctx, struct hlsl_ir_var *var, const struct vkd3d_shader_location loc);
+struct hlsl_ir_node *hlsl_store_index(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_var *var, + struct hlsl_ir_node *rhs, unsigned int idx, const struct vkd3d_shader_location *loc); + void hlsl_error(struct hlsl_ctx *ctx, const struct vkd3d_shader_location *loc, enum vkd3d_shader_error error, const char *fmt, ...) VKD3D_PRINTF_FUNC(4, 5); void hlsl_fixme(struct hlsl_ctx *ctx, const struct vkd3d_shader_location *loc, diff --git a/libs/vkd3d-shader/hlsl.y b/libs/vkd3d-shader/hlsl.y index 1ee1db4c..5b57a7bd 100644 --- a/libs/vkd3d-shader/hlsl.y +++ b/libs/vkd3d-shader/hlsl.y @@ -1534,6 +1534,108 @@ static bool add_increment(struct hlsl_ctx *ctx, struct list *instrs, bool decrem return true; }
+static bool append_node_components(struct hlsl_ctx *ctx, struct list *instrs, + struct hlsl_ir_node *node, struct hlsl_ir_node **comps, unsigned int *comps_count, + const struct vkd3d_shader_location *loc) +{ + struct hlsl_type *type = node->data_type; + + switch (type->type) + { + case HLSL_CLASS_SCALAR: + case HLSL_CLASS_VECTOR: + case HLSL_CLASS_MATRIX: + { + unsigned int idx; + + for (idx = 0; idx < type->dimy * type->dimx; idx++) + { + struct hlsl_ir_node *value; + + if (!(value = hlsl_load_index(ctx, instrs, node, idx, loc))) + return false; + + comps[*comps_count] = value; + *comps_count += 1; + } + break; + } + + case HLSL_CLASS_STRUCT: + { + struct hlsl_struct_field *field; + struct hlsl_ir_node *load; + + LIST_FOR_EACH_ENTRY(field, type->e.elements, struct hlsl_struct_field, entry) + { + if (!add_record_load(ctx, instrs, node, field, *loc)) + return false; + + load = node_from_list(instrs); + if (!append_node_components(ctx, instrs, load, comps, comps_count, loc)) + return false; + } + break; + } + + case HLSL_CLASS_ARRAY: + { + struct hlsl_ir_constant *c; + struct hlsl_ir_node *load; + unsigned int i; + + for (i = 0; i < type->e.array.elements_count; i++) + { + if (!(c = hlsl_new_uint_constant(ctx, i, loc))) + return false; + list_add_tail(instrs, &c->node.entry); + + if (!add_array_load(ctx, instrs, node, &c->node, *loc)) + return false; + + load = node_from_list(instrs); + if (!append_node_components(ctx, instrs, load, comps, comps_count, loc)) + return false; + } + break; + } + + case HLSL_CLASS_OBJECT: + { + hlsl_fixme(ctx, loc, "Flattening of objects."); + return false; + } + } + + return true; +} + +static bool flatten_parse_initializer(struct hlsl_ctx *ctx, struct parse_initializer *initializer) +{ + unsigned int size = initializer_size(initializer); + unsigned int new_args_count = 0; + struct hlsl_ir_node **new_args; + bool success = true; + unsigned int i = 0; + + new_args = hlsl_alloc(ctx, size * sizeof(struct hlsl_ir_node *)); + if (!new_args) + return false; + + for (i = 0; i < initializer->args_count; i++) + { + success = append_node_components(ctx, initializer->instrs, initializer->args[i], new_args, + &new_args_count, &initializer->args[i]->loc); + if (!success) + break; + } + + vkd3d_free(initializer->args); + initializer->args = new_args; + initializer->args_count = new_args_count; + return success; +} + static void initialize_numeric_var(struct hlsl_ctx *ctx, struct hlsl_ir_var *var, struct parse_initializer *initializer, unsigned int reg_offset, struct hlsl_type *type, unsigned int *initializer_offset) @@ -1768,14 +1870,13 @@ static struct list *declare_vars(struct hlsl_ctx *ctx, struct hlsl_type *basic_t } else { - if (v->initializer.args_count != size) + if (!flatten_parse_initializer(ctx, &v->initializer)) { - hlsl_fixme(ctx, &v->loc, "Flatten initializer."); free_parse_initializer(&v->initializer); vkd3d_free(v); continue; } - + assert(v->initializer.args_count == size); initialize_numeric_var(ctx, var, &v->initializer, 0, type, &initializer_offset); } } @@ -2113,7 +2214,7 @@ static struct hlsl_ir_node *compute_offset_from_index(struct hlsl_ctx *ctx, stru } }
-static struct hlsl_ir_node *load_index(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *instr, +struct hlsl_ir_node *hlsl_load_index(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *instr, unsigned int idx, const struct vkd3d_shader_location *loc) { struct hlsl_type *scal_type = hlsl_get_scalar_type(ctx, instr->data_type->base_type); @@ -2135,7 +2236,7 @@ static struct hlsl_ir_node *load_index(struct hlsl_ctx *ctx, struct list *instrs return &load->node; }
-static struct hlsl_ir_node *store_index(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_var *var, +struct hlsl_ir_node *hlsl_store_index(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_var *var, struct hlsl_ir_node *rhs, unsigned int idx, const struct vkd3d_shader_location *loc) { struct hlsl_ir_node *offset; @@ -2188,13 +2289,13 @@ static struct list *add_constructor(struct hlsl_ctx *ctx, struct hlsl_type *type { struct hlsl_ir_node *value;
- if (!(value = load_index(ctx, params->instrs, arg, j, &loc))) + if (!(value = hlsl_load_index(ctx, params->instrs, arg, j, &loc))) return NULL;
if (!(value = add_implicit_conversion(ctx, params->instrs, value, scal_type, &arg->loc))) return NULL;
- if (!store_index(ctx, params->instrs, var, value, idx, &loc)) + if (!hlsl_store_index(ctx, params->instrs, var, value, idx, &loc)) return NULL; } } diff --git a/tests/hlsl-initializer-flatten.shader_test b/tests/hlsl-initializer-flatten.shader_test index 3a430e0d..56e2e372 100644 --- a/tests/hlsl-initializer-flatten.shader_test +++ b/tests/hlsl-initializer-flatten.shader_test @@ -6,7 +6,7 @@ float4 main() : sv_target }
[test] -todo draw quad +draw quad probe all rgba (1, 2, 3, 4)
@@ -56,7 +56,7 @@ float4 main() : sv_target }
[test] -todo draw quad +draw quad probe all rgba (1.0, 2.0, 3.0, 4.0)
@@ -69,5 +69,5 @@ float4 main() : sv_target }
[test] -todo draw quad -probe all rgba (1.0, 2.0, 3.0, 4.0) +draw quad +todo probe all rgba (1.0, 2.0, 3.0, 4.0)
Signed-off-by: Giovanni Mascellani gmascellani@codeweavers.com
Il 18/04/22 08:34, Giovanni Mascellani ha scritto:
From: Francisco Casas fcasas@codeweavers.com
I did some minor changes, so I'm removing Francisco's Signed-off-by.
libs/vkd3d-shader/hlsl.h | 6 ++ libs/vkd3d-shader/hlsl.y | 115 +++++++++++++++++++-- tests/hlsl-initializer-flatten.shader_test | 8 +- 3 files changed, 118 insertions(+), 11 deletions(-)
diff --git a/libs/vkd3d-shader/hlsl.h b/libs/vkd3d-shader/hlsl.h index 802adf87..18b70853 100644 --- a/libs/vkd3d-shader/hlsl.h +++ b/libs/vkd3d-shader/hlsl.h @@ -726,6 +726,9 @@ struct hlsl_ir_function_decl *hlsl_get_func_decl(struct hlsl_ctx *ctx, const cha struct hlsl_type *hlsl_get_type(struct hlsl_scope *scope, const char *name, bool recursive); struct hlsl_ir_var *hlsl_get_var(struct hlsl_scope *scope, const char *name);
+struct hlsl_ir_node *hlsl_load_index(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *instr,
unsigned int idx, const struct vkd3d_shader_location *loc);
- struct hlsl_type *hlsl_new_array_type(struct hlsl_ctx *ctx, struct hlsl_type *basic_type, unsigned int array_size); struct hlsl_ir_node *hlsl_new_binary_expr(struct hlsl_ctx *ctx, enum hlsl_ir_expr_op op, struct hlsl_ir_node *arg1, struct hlsl_ir_node *arg2);
@@ -768,6 +771,9 @@ struct hlsl_ir_var *hlsl_new_var(struct hlsl_ctx *ctx, const char *name, struct struct hlsl_ir_load *hlsl_new_var_load(struct hlsl_ctx *ctx, struct hlsl_ir_var *var, const struct vkd3d_shader_location loc);
+struct hlsl_ir_node *hlsl_store_index(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_var *var,
struct hlsl_ir_node *rhs, unsigned int idx, const struct vkd3d_shader_location *loc);
- void hlsl_error(struct hlsl_ctx *ctx, const struct vkd3d_shader_location *loc, enum vkd3d_shader_error error, const char *fmt, ...) VKD3D_PRINTF_FUNC(4, 5); void hlsl_fixme(struct hlsl_ctx *ctx, const struct vkd3d_shader_location *loc,
diff --git a/libs/vkd3d-shader/hlsl.y b/libs/vkd3d-shader/hlsl.y index 1ee1db4c..5b57a7bd 100644 --- a/libs/vkd3d-shader/hlsl.y +++ b/libs/vkd3d-shader/hlsl.y @@ -1534,6 +1534,108 @@ static bool add_increment(struct hlsl_ctx *ctx, struct list *instrs, bool decrem return true; }
+static bool append_node_components(struct hlsl_ctx *ctx, struct list *instrs,
struct hlsl_ir_node *node, struct hlsl_ir_node **comps, unsigned int *comps_count,
const struct vkd3d_shader_location *loc)
+{
- struct hlsl_type *type = node->data_type;
- switch (type->type)
- {
case HLSL_CLASS_SCALAR:
case HLSL_CLASS_VECTOR:
case HLSL_CLASS_MATRIX:
{
unsigned int idx;
for (idx = 0; idx < type->dimy * type->dimx; idx++)
{
struct hlsl_ir_node *value;
if (!(value = hlsl_load_index(ctx, instrs, node, idx, loc)))
return false;
comps[*comps_count] = value;
*comps_count += 1;
}
break;
}
case HLSL_CLASS_STRUCT:
{
struct hlsl_struct_field *field;
struct hlsl_ir_node *load;
LIST_FOR_EACH_ENTRY(field, type->e.elements, struct hlsl_struct_field, entry)
{
if (!add_record_load(ctx, instrs, node, field, *loc))
return false;
load = node_from_list(instrs);
if (!append_node_components(ctx, instrs, load, comps, comps_count, loc))
return false;
}
break;
}
case HLSL_CLASS_ARRAY:
{
struct hlsl_ir_constant *c;
struct hlsl_ir_node *load;
unsigned int i;
for (i = 0; i < type->e.array.elements_count; i++)
{
if (!(c = hlsl_new_uint_constant(ctx, i, loc)))
return false;
list_add_tail(instrs, &c->node.entry);
if (!add_array_load(ctx, instrs, node, &c->node, *loc))
return false;
load = node_from_list(instrs);
if (!append_node_components(ctx, instrs, load, comps, comps_count, loc))
return false;
}
break;
}
case HLSL_CLASS_OBJECT:
{
hlsl_fixme(ctx, loc, "Flattening of objects.");
return false;
}
- }
- return true;
+}
+static bool flatten_parse_initializer(struct hlsl_ctx *ctx, struct parse_initializer *initializer) +{
- unsigned int size = initializer_size(initializer);
- unsigned int new_args_count = 0;
- struct hlsl_ir_node **new_args;
- bool success = true;
- unsigned int i = 0;
- new_args = hlsl_alloc(ctx, size * sizeof(struct hlsl_ir_node *));
- if (!new_args)
return false;
- for (i = 0; i < initializer->args_count; i++)
- {
success = append_node_components(ctx, initializer->instrs, initializer->args[i], new_args,
&new_args_count, &initializer->args[i]->loc);
if (!success)
break;
- }
- vkd3d_free(initializer->args);
- initializer->args = new_args;
- initializer->args_count = new_args_count;
- return success;
+}
- static void initialize_numeric_var(struct hlsl_ctx *ctx, struct hlsl_ir_var *var, struct parse_initializer *initializer, unsigned int reg_offset, struct hlsl_type *type, unsigned int *initializer_offset)
@@ -1768,14 +1870,13 @@ static struct list *declare_vars(struct hlsl_ctx *ctx, struct hlsl_type *basic_t } else {
if (v->initializer.args_count != size)
if (!flatten_parse_initializer(ctx, &v->initializer)) {
hlsl_fixme(ctx, &v->loc, "Flatten initializer."); free_parse_initializer(&v->initializer); vkd3d_free(v); continue; }
assert(v->initializer.args_count == size); initialize_numeric_var(ctx, var, &v->initializer, 0, type, &initializer_offset); } }
@@ -2113,7 +2214,7 @@ static struct hlsl_ir_node *compute_offset_from_index(struct hlsl_ctx *ctx, stru } }
-static struct hlsl_ir_node *load_index(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *instr, +struct hlsl_ir_node *hlsl_load_index(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *instr, unsigned int idx, const struct vkd3d_shader_location *loc) { struct hlsl_type *scal_type = hlsl_get_scalar_type(ctx, instr->data_type->base_type); @@ -2135,7 +2236,7 @@ static struct hlsl_ir_node *load_index(struct hlsl_ctx *ctx, struct list *instrs return &load->node; }
-static struct hlsl_ir_node *store_index(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_var *var, +struct hlsl_ir_node *hlsl_store_index(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_var *var, struct hlsl_ir_node *rhs, unsigned int idx, const struct vkd3d_shader_location *loc) { struct hlsl_ir_node *offset; @@ -2188,13 +2289,13 @@ static struct list *add_constructor(struct hlsl_ctx *ctx, struct hlsl_type *type { struct hlsl_ir_node *value;
if (!(value = load_index(ctx, params->instrs, arg, j, &loc)))
if (!(value = hlsl_load_index(ctx, params->instrs, arg, j, &loc))) return NULL; if (!(value = add_implicit_conversion(ctx, params->instrs, value, scal_type, &arg->loc))) return NULL;
if (!store_index(ctx, params->instrs, var, value, idx, &loc))
if (!hlsl_store_index(ctx, params->instrs, var, value, idx, &loc)) return NULL; } }
diff --git a/tests/hlsl-initializer-flatten.shader_test b/tests/hlsl-initializer-flatten.shader_test index 3a430e0d..56e2e372 100644 --- a/tests/hlsl-initializer-flatten.shader_test +++ b/tests/hlsl-initializer-flatten.shader_test @@ -6,7 +6,7 @@ float4 main() : sv_target }
[test] -todo draw quad +draw quad probe all rgba (1, 2, 3, 4)
@@ -56,7 +56,7 @@ float4 main() : sv_target }
[test] -todo draw quad +draw quad probe all rgba (1.0, 2.0, 3.0, 4.0)
@@ -69,5 +69,5 @@ float4 main() : sv_target }
[test] -todo draw quad -probe all rgba (1.0, 2.0, 3.0, 4.0) +draw quad +todo probe all rgba (1.0, 2.0, 3.0, 4.0)
Signed-off-by: Francisco Casas fcasas@codeweavers.com
April 18, 2022 2:34 AM, "Giovanni Mascellani" gmascellani@codeweavers.com wrote:
From: Francisco Casas fcasas@codeweavers.com
I did some minor changes, so I'm removing Francisco's Signed-off-by.
libs/vkd3d-shader/hlsl.h | 6 ++ libs/vkd3d-shader/hlsl.y | 115 +++++++++++++++++++-- tests/hlsl-initializer-flatten.shader_test | 8 +- 3 files changed, 118 insertions(+), 11 deletions(-)
diff --git a/libs/vkd3d-shader/hlsl.h b/libs/vkd3d-shader/hlsl.h index 802adf87..18b70853 100644 --- a/libs/vkd3d-shader/hlsl.h +++ b/libs/vkd3d-shader/hlsl.h @@ -726,6 +726,9 @@ struct hlsl_ir_function_decl *hlsl_get_func_decl(struct hlsl_ctx *ctx, const cha struct hlsl_type *hlsl_get_type(struct hlsl_scope *scope, const char *name, bool recursive); struct hlsl_ir_var *hlsl_get_var(struct hlsl_scope *scope, const char *name);
+struct hlsl_ir_node *hlsl_load_index(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *instr,
- unsigned int idx, const struct vkd3d_shader_location *loc);
struct hlsl_type *hlsl_new_array_type(struct hlsl_ctx *ctx, struct hlsl_type *basic_type, unsigned int array_size); struct hlsl_ir_node *hlsl_new_binary_expr(struct hlsl_ctx *ctx, enum hlsl_ir_expr_op op, struct hlsl_ir_node *arg1, struct hlsl_ir_node *arg2); @@ -768,6 +771,9 @@ struct hlsl_ir_var *hlsl_new_var(struct hlsl_ctx *ctx, const char *name, struct struct hlsl_ir_load *hlsl_new_var_load(struct hlsl_ctx *ctx, struct hlsl_ir_var *var, const struct vkd3d_shader_location loc);
+struct hlsl_ir_node *hlsl_store_index(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_var *var,
- struct hlsl_ir_node *rhs, unsigned int idx, const struct vkd3d_shader_location *loc);
void hlsl_error(struct hlsl_ctx *ctx, const struct vkd3d_shader_location *loc, enum vkd3d_shader_error error, const char *fmt, ...) VKD3D_PRINTF_FUNC(4, 5); void hlsl_fixme(struct hlsl_ctx *ctx, const struct vkd3d_shader_location *loc, diff --git a/libs/vkd3d-shader/hlsl.y b/libs/vkd3d-shader/hlsl.y index 1ee1db4c..5b57a7bd 100644 --- a/libs/vkd3d-shader/hlsl.y +++ b/libs/vkd3d-shader/hlsl.y @@ -1534,6 +1534,108 @@ static bool add_increment(struct hlsl_ctx *ctx, struct list *instrs, bool decrem return true; }
+static bool append_node_components(struct hlsl_ctx *ctx, struct list *instrs,
- struct hlsl_ir_node *node, struct hlsl_ir_node **comps, unsigned int *comps_count,
- const struct vkd3d_shader_location *loc)
+{
- struct hlsl_type *type = node->data_type;
- switch (type->type)
- {
- case HLSL_CLASS_SCALAR:
- case HLSL_CLASS_VECTOR:
- case HLSL_CLASS_MATRIX:
- {
- unsigned int idx;
- for (idx = 0; idx < type->dimy * type->dimx; idx++)
- {
- struct hlsl_ir_node *value;
- if (!(value = hlsl_load_index(ctx, instrs, node, idx, loc)))
- return false;
- comps[*comps_count] = value;
- *comps_count += 1;
- }
- break;
- }
- case HLSL_CLASS_STRUCT:
- {
- struct hlsl_struct_field *field;
- struct hlsl_ir_node *load;
- LIST_FOR_EACH_ENTRY(field, type->e.elements, struct hlsl_struct_field, entry)
- {
- if (!add_record_load(ctx, instrs, node, field, *loc))
- return false;
- load = node_from_list(instrs);
- if (!append_node_components(ctx, instrs, load, comps, comps_count, loc))
- return false;
- }
- break;
- }
- case HLSL_CLASS_ARRAY:
- {
- struct hlsl_ir_constant *c;
- struct hlsl_ir_node *load;
- unsigned int i;
- for (i = 0; i < type->e.array.elements_count; i++)
- {
- if (!(c = hlsl_new_uint_constant(ctx, i, loc)))
- return false;
- list_add_tail(instrs, &c->node.entry);
- if (!add_array_load(ctx, instrs, node, &c->node, *loc))
- return false;
- load = node_from_list(instrs);
- if (!append_node_components(ctx, instrs, load, comps, comps_count, loc))
- return false;
- }
- break;
- }
- case HLSL_CLASS_OBJECT:
- {
- hlsl_fixme(ctx, loc, "Flattening of objects.");
- return false;
- }
- }
- return true;
+}
+static bool flatten_parse_initializer(struct hlsl_ctx *ctx, struct parse_initializer *initializer) +{
- unsigned int size = initializer_size(initializer);
- unsigned int new_args_count = 0;
- struct hlsl_ir_node **new_args;
- bool success = true;
- unsigned int i = 0;
- new_args = hlsl_alloc(ctx, size * sizeof(struct hlsl_ir_node *));
- if (!new_args)
- return false;
- for (i = 0; i < initializer->args_count; i++)
- {
- success = append_node_components(ctx, initializer->instrs, initializer->args[i], new_args,
- &new_args_count, &initializer->args[i]->loc);
- if (!success)
- break;
- }
- vkd3d_free(initializer->args);
- initializer->args = new_args;
- initializer->args_count = new_args_count;
- return success;
+}
static void initialize_numeric_var(struct hlsl_ctx *ctx, struct hlsl_ir_var *var, struct parse_initializer *initializer, unsigned int reg_offset, struct hlsl_type *type, unsigned int *initializer_offset) @@ -1768,14 +1870,13 @@ static struct list *declare_vars(struct hlsl_ctx *ctx, struct hlsl_type *basic_t } else {
- if (v->initializer.args_count != size)
- if (!flatten_parse_initializer(ctx, &v->initializer))
{
- hlsl_fixme(ctx, &v->loc, "Flatten initializer.");
free_parse_initializer(&v->initializer); vkd3d_free(v); continue; }
- assert(v->initializer.args_count == size);
initialize_numeric_var(ctx, var, &v->initializer, 0, type, &initializer_offset); } } @@ -2113,7 +2214,7 @@ static struct hlsl_ir_node *compute_offset_from_index(struct hlsl_ctx *ctx, stru } }
-static struct hlsl_ir_node *load_index(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *instr, +struct hlsl_ir_node *hlsl_load_index(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *instr, unsigned int idx, const struct vkd3d_shader_location *loc) { struct hlsl_type *scal_type = hlsl_get_scalar_type(ctx, instr->data_type->base_type); @@ -2135,7 +2236,7 @@ static struct hlsl_ir_node *load_index(struct hlsl_ctx *ctx, struct list *instrs return &load->node; }
-static struct hlsl_ir_node *store_index(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_var *var, +struct hlsl_ir_node *hlsl_store_index(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_var *var, struct hlsl_ir_node *rhs, unsigned int idx, const struct vkd3d_shader_location *loc) { struct hlsl_ir_node *offset; @@ -2188,13 +2289,13 @@ static struct list *add_constructor(struct hlsl_ctx *ctx, struct hlsl_type *type { struct hlsl_ir_node *value;
- if (!(value = load_index(ctx, params->instrs, arg, j, &loc)))
- if (!(value = hlsl_load_index(ctx, params->instrs, arg, j, &loc)))
return NULL;
if (!(value = add_implicit_conversion(ctx, params->instrs, value, scal_type, &arg->loc))) return NULL;
- if (!store_index(ctx, params->instrs, var, value, idx, &loc))
- if (!hlsl_store_index(ctx, params->instrs, var, value, idx, &loc))
return NULL; } } diff --git a/tests/hlsl-initializer-flatten.shader_test b/tests/hlsl-initializer-flatten.shader_test index 3a430e0d..56e2e372 100644 --- a/tests/hlsl-initializer-flatten.shader_test +++ b/tests/hlsl-initializer-flatten.shader_test @@ -6,7 +6,7 @@ float4 main() : sv_target }
[test] -todo draw quad +draw quad probe all rgba (1, 2, 3, 4)
@@ -56,7 +56,7 @@ float4 main() : sv_target }
[test] -todo draw quad +draw quad probe all rgba (1.0, 2.0, 3.0, 4.0)
@@ -69,5 +69,5 @@ float4 main() : sv_target }
[test] -todo draw quad -probe all rgba (1.0, 2.0, 3.0, 4.0) +draw quad
+todo probe all rgba (1.0, 2.0, 3.0, 4.0)
2.35.2
On Mon, Apr 18, 2022 at 8:34 AM Giovanni Mascellani gmascellani@codeweavers.com wrote:
From: Francisco Casas fcasas@codeweavers.com
I did some minor changes, so I'm removing Francisco's Signed-off-by.
libs/vkd3d-shader/hlsl.h | 6 ++ libs/vkd3d-shader/hlsl.y | 115 +++++++++++++++++++-- tests/hlsl-initializer-flatten.shader_test | 8 +- 3 files changed, 118 insertions(+), 11 deletions(-)
I'm also going to make some more changes on top of that, I'll probably resend the rest of the series in a bit.
On 4/18/22 01:34, Giovanni Mascellani wrote:
From: Francisco Casas fcasas@codeweavers.com
I did some minor changes, so I'm removing Francisco's Signed-off-by.
libs/vkd3d-shader/hlsl.h | 6 ++ libs/vkd3d-shader/hlsl.y | 115 +++++++++++++++++++-- tests/hlsl-initializer-flatten.shader_test | 8 +- 3 files changed, 118 insertions(+), 11 deletions(-)
diff --git a/libs/vkd3d-shader/hlsl.h b/libs/vkd3d-shader/hlsl.h index 802adf87..18b70853 100644 --- a/libs/vkd3d-shader/hlsl.h +++ b/libs/vkd3d-shader/hlsl.h @@ -726,6 +726,9 @@ struct hlsl_ir_function_decl *hlsl_get_func_decl(struct hlsl_ctx *ctx, const cha struct hlsl_type *hlsl_get_type(struct hlsl_scope *scope, const char *name, bool recursive); struct hlsl_ir_var *hlsl_get_var(struct hlsl_scope *scope, const char *name);
+struct hlsl_ir_node *hlsl_load_index(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *instr,
unsigned int idx, const struct vkd3d_shader_location *loc);
- struct hlsl_type *hlsl_new_array_type(struct hlsl_ctx *ctx, struct hlsl_type *basic_type, unsigned int array_size); struct hlsl_ir_node *hlsl_new_binary_expr(struct hlsl_ctx *ctx, enum hlsl_ir_expr_op op, struct hlsl_ir_node *arg1, struct hlsl_ir_node *arg2);
@@ -768,6 +771,9 @@ struct hlsl_ir_var *hlsl_new_var(struct hlsl_ctx *ctx, const char *name, struct struct hlsl_ir_load *hlsl_new_var_load(struct hlsl_ctx *ctx, struct hlsl_ir_var *var, const struct vkd3d_shader_location loc);
+struct hlsl_ir_node *hlsl_store_index(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_var *var,
struct hlsl_ir_node *rhs, unsigned int idx, const struct vkd3d_shader_location *loc);
- void hlsl_error(struct hlsl_ctx *ctx, const struct vkd3d_shader_location *loc, enum vkd3d_shader_error error, const char *fmt, ...) VKD3D_PRINTF_FUNC(4, 5); void hlsl_fixme(struct hlsl_ctx *ctx, const struct vkd3d_shader_location *loc,
Why are these no longer static? They're still only used in hlsl.y.
diff --git a/libs/vkd3d-shader/hlsl.y b/libs/vkd3d-shader/hlsl.y index 1ee1db4c..5b57a7bd 100644 --- a/libs/vkd3d-shader/hlsl.y +++ b/libs/vkd3d-shader/hlsl.y @@ -1534,6 +1534,108 @@ static bool add_increment(struct hlsl_ctx *ctx, struct list *instrs, bool decrem return true; }
+static bool append_node_components(struct hlsl_ctx *ctx, struct list *instrs,
struct hlsl_ir_node *node, struct hlsl_ir_node **comps, unsigned int *comps_count,
const struct vkd3d_shader_location *loc)
+{
- struct hlsl_type *type = node->data_type;
- switch (type->type)
- {
case HLSL_CLASS_SCALAR:
case HLSL_CLASS_VECTOR:
case HLSL_CLASS_MATRIX:
{
unsigned int idx;
for (idx = 0; idx < type->dimy * type->dimx; idx++)
{
struct hlsl_ir_node *value;
if (!(value = hlsl_load_index(ctx, instrs, node, idx, loc)))
return false;
comps[*comps_count] = value;
Nitpick, but you don't need a local variable for this.
*comps_count += 1;
}
break;
}
case HLSL_CLASS_STRUCT:
{
struct hlsl_struct_field *field;
struct hlsl_ir_node *load;
LIST_FOR_EACH_ENTRY(field, type->e.elements, struct hlsl_struct_field, entry)
{
if (!add_record_load(ctx, instrs, node, field, *loc))
return false;
I'm a bit torn on this one. My knee-jerk reaction is that accessing reg_offset and field->type directly would be simpler [along the lines of prepend_input_struct_copy(), although that function has other considerations], but on reflection I'm less sure. It'd definitely avoid generating extra work for the parser, though...
load = node_from_list(instrs);
if (!append_node_components(ctx, instrs, load, comps, comps_count, loc))
return false;
}
break;
}
case HLSL_CLASS_ARRAY:
{
struct hlsl_ir_constant *c;
struct hlsl_ir_node *load;
unsigned int i;
for (i = 0; i < type->e.array.elements_count; i++)
{
if (!(c = hlsl_new_uint_constant(ctx, i, loc)))
return false;
list_add_tail(instrs, &c->node.entry);
if (!add_array_load(ctx, instrs, node, &c->node, *loc))
return false;
load = node_from_list(instrs);
if (!append_node_components(ctx, instrs, load, comps, comps_count, loc))
return false;
}
break;
}
case HLSL_CLASS_OBJECT:
{
hlsl_fixme(ctx, loc, "Flattening of objects.");
return false;
}
Could this just fall into the scalar case?
- }
- return true;
+}
+static bool flatten_parse_initializer(struct hlsl_ctx *ctx, struct parse_initializer *initializer) +{
- unsigned int size = initializer_size(initializer);
- unsigned int new_args_count = 0;
- struct hlsl_ir_node **new_args;
- bool success = true;
- unsigned int i = 0;
- new_args = hlsl_alloc(ctx, size * sizeof(struct hlsl_ir_node *));
- if (!new_args)
return false;
- for (i = 0; i < initializer->args_count; i++)
- {
success = append_node_components(ctx, initializer->instrs, initializer->args[i], new_args,
&new_args_count, &initializer->args[i]->loc);
if (!success)
break;
- }
- vkd3d_free(initializer->args);
- initializer->args = new_args;
- initializer->args_count = new_args_count;
- return success;
+}
- static void initialize_numeric_var(struct hlsl_ctx *ctx, struct hlsl_ir_var *var, struct parse_initializer *initializer, unsigned int reg_offset, struct hlsl_type *type, unsigned int *initializer_offset)
@@ -1768,14 +1870,13 @@ static struct list *declare_vars(struct hlsl_ctx *ctx, struct hlsl_type *basic_t } else {
if (v->initializer.args_count != size)
if (!flatten_parse_initializer(ctx, &v->initializer)) {
hlsl_fixme(ctx, &v->loc, "Flatten initializer."); free_parse_initializer(&v->initializer); vkd3d_free(v); continue; }
assert(v->initializer.args_count == size); initialize_numeric_var(ctx, var, &v->initializer, 0, type, &initializer_offset); } }
This works, but I'm not sure that it's my favourite organization. Without having tried it, what seems maybe more idiomatically structured is to bring the hlsl_new_store() call into this loop, not unlike the way it's done for constructors. You could still keep the recursive quality of the function for the benefit of structs and arrays, but effectively pass the store index as an in-out parameter instead of comps_count. I think this would also simplify error handling.
In more concrete terms, I'm envisioning something like:
static void initialize_var_components(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_var *var, [in, out] unsigned int *store_index, struct hlsl_ir_node *instr)
which would be recursive, and called from what is currently initialize_numeric_var().
Hello,
April 20, 2022 3:34 PM, "Zebediah Figura" zfigura@codeweavers.com wrote:
On 4/18/22 01:34, Giovanni Mascellani wrote:
From: Francisco Casas fcasas@codeweavers.com
I did some minor changes, so I'm removing Francisco's Signed-off-by.
libs/vkd3d-shader/hlsl.h | 6 ++ libs/vkd3d-shader/hlsl.y | 115 +++++++++++++++++++-- tests/hlsl-initializer-flatten.shader_test | 8 +- 3 files changed, 118 insertions(+), 11 deletions(-) diff --git a/libs/vkd3d-shader/hlsl.h b/libs/vkd3d-shader/hlsl.h index 802adf87..18b70853 100644 --- a/libs/vkd3d-shader/hlsl.h +++ b/libs/vkd3d-shader/hlsl.h @@ -726,6 +726,9 @@ struct hlsl_ir_function_decl *hlsl_get_func_decl(struct hlsl_ctx *ctx, const cha struct hlsl_type *hlsl_get_type(struct hlsl_scope *scope, const char *name, bool recursive); struct hlsl_ir_var *hlsl_get_var(struct hlsl_scope *scope, const char *name);
+struct hlsl_ir_node *hlsl_load_index(struct hlsl_ctx *ctx, struct list *instrs, struct
hlsl_ir_node *instr,
- unsigned int idx, const struct vkd3d_shader_location *loc);
struct hlsl_type *hlsl_new_array_type(struct hlsl_ctx *ctx, struct hlsl_type *basic_type, unsigned int array_size); struct hlsl_ir_node *hlsl_new_binary_expr(struct hlsl_ctx *ctx, enum hlsl_ir_expr_op op, struct hlsl_ir_node *arg1, struct hlsl_ir_node *arg2); @@ -768,6 +771,9 @@ struct hlsl_ir_var *hlsl_new_var(struct hlsl_ctx *ctx, const char *name, struct struct hlsl_ir_load *hlsl_new_var_load(struct hlsl_ctx *ctx, struct hlsl_ir_var *var, const struct vkd3d_shader_location loc);
+struct hlsl_ir_node *hlsl_store_index(struct hlsl_ctx *ctx, struct list *instrs, struct
hlsl_ir_var *var,
- struct hlsl_ir_node *rhs, unsigned int idx, const struct vkd3d_shader_location *loc);
void hlsl_error(struct hlsl_ctx *ctx, const struct vkd3d_shader_location *loc, enum vkd3d_shader_error error, const char *fmt, ...) VKD3D_PRINTF_FUNC(4, 5); void hlsl_fixme(struct hlsl_ctx *ctx, const struct vkd3d_shader_location *loc,
Why are these no longer static? They're still only used in hlsl.y.
This is probably an artifact from reordering the patches. Some of my future patches use it outside hlsl.y, so I see no point in changing it now in a separate commit.
diff --git a/libs/vkd3d-shader/hlsl.y b/libs/vkd3d-shader/hlsl.y index 1ee1db4c..5b57a7bd 100644 --- a/libs/vkd3d-shader/hlsl.y +++ b/libs/vkd3d-shader/hlsl.y @@ -1534,6 +1534,108 @@ static bool add_increment(struct hlsl_ctx *ctx, struct list *instrs, bool decrem return true; }
+static bool append_node_components(struct hlsl_ctx *ctx, struct list *instrs,
- struct hlsl_ir_node *node, struct hlsl_ir_node **comps, unsigned int *comps_count,
- const struct vkd3d_shader_location *loc)
+{
- struct hlsl_type *type = node->data_type;
- switch (type->type)
- {
- case HLSL_CLASS_SCALAR:
- case HLSL_CLASS_VECTOR:
- case HLSL_CLASS_MATRIX:
- {
- unsigned int idx;
- for (idx = 0; idx < type->dimy * type->dimx; idx++)
- {
- struct hlsl_ir_node *value;
- if (!(value = hlsl_load_index(ctx, instrs, node, idx, loc)))
- return false;
- comps[*comps_count] = value;
Nitpick, but you don't need a local variable for this.
Okay.
- *comps_count += 1;
- }
- break;
- }
- case HLSL_CLASS_STRUCT:
- {
- struct hlsl_struct_field *field;
- struct hlsl_ir_node *load;
- LIST_FOR_EACH_ENTRY(field, type->e.elements, struct hlsl_struct_field, entry)
- {
- if (!add_record_load(ctx, instrs, node, field, *loc))
- return false;
I'm a bit torn on this one. My knee-jerk reaction is that accessing reg_offset and field->type directly would be simpler [along the lines of prepend_input_struct_copy(), although that function has other considerations], but on reflection I'm less sure. It'd definitely avoid generating extra work for the parser, though...
To use hlsl_new_load() instead, we would have to: - Add an argument for the current reg_offset. - Add an argument for the original node (that contains the "node" in the current call). - For consistency, we would also have to change hlsl_load_index() from the previous case and add_array_load() in the next case.
So we would end up with a signature more like initialize_generic_var().
I can do that if it seems better.
- load = node_from_list(instrs);
- if (!append_node_components(ctx, instrs, load, comps, comps_count, loc))
- return false;
- }
- break;
- }
- case HLSL_CLASS_ARRAY:
- {
- struct hlsl_ir_constant *c;
- struct hlsl_ir_node *load;
- unsigned int i;
- for (i = 0; i < type->e.array.elements_count; i++)
- {
- if (!(c = hlsl_new_uint_constant(ctx, i, loc)))
- return false;
- list_add_tail(instrs, &c->node.entry);
- if (!add_array_load(ctx, instrs, node, &c->node, *loc))
- return false;
- load = node_from_list(instrs);
- if (!append_node_components(ctx, instrs, load, comps, comps_count, loc))
- return false;
- }
- break;
- }
- case HLSL_CLASS_OBJECT:
- {
- hlsl_fixme(ctx, loc, "Flattening of objects.");
- return false;
- }
Could this just fall into the scalar case?
Not for now, hlsl_load_index doesn't support objects. We still are missing tests for complex initializer with objects elements, and see what the native compiler does.
I think we should add the tests in a separate patch before supporting them.
- }
- return true;
+}
+static bool flatten_parse_initializer(struct hlsl_ctx *ctx, struct parse_initializer *initializer) +{
- unsigned int size = initializer_size(initializer);
- unsigned int new_args_count = 0;
- struct hlsl_ir_node **new_args;
- bool success = true;
- unsigned int i = 0;
- new_args = hlsl_alloc(ctx, size * sizeof(struct hlsl_ir_node *));
- if (!new_args)
- return false;
- for (i = 0; i < initializer->args_count; i++)
- {
- success = append_node_components(ctx, initializer->instrs, initializer->args[i], new_args,
- &new_args_count, &initializer->args[i]->loc);
- if (!success)
- break;
- }
- vkd3d_free(initializer->args);
- initializer->args = new_args;
- initializer->args_count = new_args_count;
- return success;
+}
static void initialize_numeric_var(struct hlsl_ctx *ctx, struct hlsl_ir_var *var, struct parse_initializer *initializer, unsigned int reg_offset, struct hlsl_type *type, unsigned int *initializer_offset) @@ -1768,14 +1870,13 @@ static struct list *declare_vars(struct hlsl_ctx *ctx, struct hlsl_type *basic_t } else {
- if (v->initializer.args_count != size)
- if (!flatten_parse_initializer(ctx, &v->initializer))
{
- hlsl_fixme(ctx, &v->loc, "Flatten initializer.");
free_parse_initializer(&v->initializer); vkd3d_free(v); continue; }
- assert(v->initializer.args_count == size);
initialize_numeric_var(ctx, var, &v->initializer, 0, type, &initializer_offset); } }
This works, but I'm not sure that it's my favourite organization. Without having tried it, what seems maybe more idiomatically structured is to bring the hlsl_new_store() call into this loop, not unlike the way it's done for constructors. You could still keep the recursive quality of the function for the benefit of structs and arrays, but effectively pass the store index as an in-out parameter instead of comps_count. I think this would also simplify error handling.
In more concrete terms, I'm envisioning something like:
static void initialize_var_components(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_var *var, [in, out] unsigned int *store_index, struct hlsl_ir_node *instr)
which would be recursive, and called from what is currently initialize_numeric_var().
If I understand correctly, you are proposing to merge the "flatten" with the "initialize" functions. I tried it but I realized that it implies doing two types of recursion simultaneously, one on the "var" components and other on the "initializer->args"·s components.
Which is possible if we either: - Explicitly keep the call stack of one of the recursions. I assume the argument index and the call stack of "initializer->args". - Having a way of quickly map the index of a component (I assume that's what you meant with "store_index") to a register offset for any data type, or do a whole recursion each time we need that value.
I concluded that it is much more readable to keep the "flatten" separated from the "initialize" even if we mix those into one single function.
I still applied the formatting changes that Mystral suggested.
On 4/20/22 19:32, Francisco Casas wrote:
This works, but I'm not sure that it's my favourite organization. Without having tried it, what seems maybe more idiomatically structured is to bring the hlsl_new_store() call into this loop, not unlike the way it's done for constructors. You could still keep the recursive quality of the function for the benefit of structs and arrays, but effectively pass the store index as an in-out parameter instead of comps_count. I think this would also simplify error handling.
In more concrete terms, I'm envisioning something like:
static void initialize_var_components(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_var *var, [in, out] unsigned int *store_index, struct hlsl_ir_node *instr)
which would be recursive, and called from what is currently initialize_numeric_var().
If I understand correctly, you are proposing to merge the "flatten" with the "initialize" functions. I tried it but I realized that it implies doing two types of recursion simultaneously, one on the "var" components and other on the "initializer->args"·s components.
Hmm, that's a good point, I hadn't considered that both sides would need to recurse.
Which is possible if we either:
- Explicitly keep the call stack of one of the recursions. I assume the argument index and
the call stack of "initializer->args".
- Having a way of quickly map the index of a component (I assume that's what you meant with
"store_index") to a register offset for any data type, or do a whole recursion each time we need that value.
I think the latter is fine, but keeping the code as it is is also fine.
I concluded that it is much more readable to keep the "flatten" separated from the "initialize" even if we mix those into one single function.
I still applied the formatting changes that Mystral suggested.
Hello, just for the record
April 21, 2022 3:02 PM, "Zebediah Figura" zfigura@codeweavers.com wrote:
On 4/20/22 19:32, Francisco Casas wrote:
This works, but I'm not sure that it's my favourite organization. Without having tried it, what seems maybe more idiomatically structured is to bring the hlsl_new_store() call into this loop, not unlike the way it's done for constructors. You could still keep the recursive quality of the function for the benefit of structs and arrays, but effectively pass the store index as an in-out parameter instead of comps_count. I think this would also simplify error handling.
In more concrete terms, I'm envisioning something like:
static void initialize_var_components(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_var *var, [in, out] unsigned int *store_index, struct hlsl_ir_node *instr)
which would be recursive, and called from what is currently initialize_numeric_var().
If I understand correctly, you are proposing to merge the "flatten" with the "initialize" functions. I tried it but I realized that it implies doing two types of recursion simultaneously, one on the "var" components and other on the "initializer->args"·s components.
Hmm, that's a good point, I hadn't considered that both sides would need to recurse.
Which is possible if we either:
- Explicitly keep the call stack of one of the recursions. I assume the argument index and
the call stack of "initializer->args".
- Having a way of quickly map the index of a component (I assume that's what you meant with
"store_index") to a register offset for any data type, or do a whole recursion each time we need that value.
I think the latter is fine, but keeping the code as it is is also fine.
In the end I tried this and it turned out into a nice patch that supports all complex initializers. So it is better not to accept 09/12, 10/12, 11/12, and 12/12 yet.
I concluded that it is much more readable to keep the "flatten" separated from the "initialize" even if we mix those into one single function.
(I no longer agree with my previous self on this).
I still applied the formatting changes that Mystral suggested.
Best regards, Francisco.
From: Francisco Casas fcasas@codeweavers.com
Otherwise we can get failed assertions: assert(node->type == HLSL_IR_LOAD); because broadcasts to these types are not implemented yet.
Signed-off-by: Francisco Casas fcasas@codeweavers.com --- libs/vkd3d-shader/hlsl_codegen.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+)
diff --git a/libs/vkd3d-shader/hlsl_codegen.c b/libs/vkd3d-shader/hlsl_codegen.c index 73e3b73f..2104c48b 100644 --- a/libs/vkd3d-shader/hlsl_codegen.c +++ b/libs/vkd3d-shader/hlsl_codegen.c @@ -628,6 +628,13 @@ static bool split_array_copies(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, element_type = type->e.array.type; element_size = hlsl_type_get_array_element_reg_size(element_type);
+ if (rhs->type != HLSL_IR_LOAD) + { + hlsl_error(ctx, &instr->loc, VKD3D_SHADER_ERROR_HLSL_NOT_IMPLEMENTED, + "array store rhs is not HLSL_IR_LOAD, missing broadcast?"); + return false; + } + for (i = 0; i < type->e.array.elements_count; ++i) { if (!split_copy(ctx, store, hlsl_ir_load(rhs), i * element_size, element_type)) @@ -658,6 +665,13 @@ static bool split_struct_copies(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr if (type->type != HLSL_CLASS_STRUCT) return false;
+ if (rhs->type != HLSL_IR_LOAD) + { + hlsl_error(ctx, &instr->loc, VKD3D_SHADER_ERROR_HLSL_NOT_IMPLEMENTED, + "struct store rhs is not HLSL_IR_LOAD, missing broadcast?"); + return false; + } + LIST_FOR_EACH_ENTRY(field, type->e.elements, struct hlsl_struct_field, entry) { if (!split_copy(ctx, store, hlsl_ir_load(rhs), field->reg_offset, field->type))
Hi,
Il 18/04/22 08:34, Giovanni Mascellani ha scritto:
Otherwise we can get failed assertions: assert(node->type == HLSL_IR_LOAD); because broadcasts to these types are not implemented yet.
Signed-off-by: Francisco Casasfcasas@codeweavers.com
libs/vkd3d-shader/hlsl_codegen.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+)
diff --git a/libs/vkd3d-shader/hlsl_codegen.c b/libs/vkd3d-shader/hlsl_codegen.c index 73e3b73f..2104c48b 100644 --- a/libs/vkd3d-shader/hlsl_codegen.c +++ b/libs/vkd3d-shader/hlsl_codegen.c @@ -628,6 +628,13 @@ static bool split_array_copies(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, element_type = type->e.array.type; element_size = hlsl_type_get_array_element_reg_size(element_type);
- if (rhs->type != HLSL_IR_LOAD)
- {
hlsl_error(ctx, &instr->loc, VKD3D_SHADER_ERROR_HLSL_NOT_IMPLEMENTED,
"array store rhs is not HLSL_IR_LOAD, missing broadcast?");
return false;
- }
If the problem here is that the compiler is missing a feature, then I don't think hlsl_error() is appropriate, because that would be for a user error. I guess hlsl_fixme() is what should be used here.
Also, notice that the error message is usually formatted with a leading upper case letter and with a trailing dot (but no trailing newline).
Last, I am not completely sure of what the issue is here, but if eventually properly implementing other parts of the compiler will make this code useless, then I'd add a comment saying that.
Thanks, Giovanni.
Hello,
April 19, 2022 6:44 AM, "Giovanni Mascellani" gmascellani@codeweavers.com wrote:
Hi,
Il 18/04/22 08:34, Giovanni Mascellani ha scritto:
Otherwise we can get failed assertions: assert(node->type == HLSL_IR_LOAD); because broadcasts to these types are not implemented yet. Signed-off-by: Francisco Casasfcasas@codeweavers.com
libs/vkd3d-shader/hlsl_codegen.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/libs/vkd3d-shader/hlsl_codegen.c b/libs/vkd3d-shader/hlsl_codegen.c index 73e3b73f..2104c48b 100644 --- a/libs/vkd3d-shader/hlsl_codegen.c +++ b/libs/vkd3d-shader/hlsl_codegen.c @@ -628,6 +628,13 @@ static bool split_array_copies(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, element_type = type->e.array.type; element_size = hlsl_type_get_array_element_reg_size(element_type);
- if (rhs->type != HLSL_IR_LOAD)
- {
- hlsl_error(ctx, &instr->loc, VKD3D_SHADER_ERROR_HLSL_NOT_IMPLEMENTED,
- "array store rhs is not HLSL_IR_LOAD, missing broadcast?");
- return false;
- }
If the problem here is that the compiler is missing a feature, then I don't think hlsl_error() is appropriate, because that would be for a user error. I guess hlsl_fixme() is what should be used here.
I see, I think you are right.
Also, notice that the error message is usually formatted with a leading upper case letter and with a trailing dot (but no trailing newline).
Okay, I will try to remember that. I think I didn't pay too much attention to this patch because I just wrote it so that the tests in the following patch of my branch:
725ae863 fcasas tests: Test complex broadcasts.
didn't fail the aforementioned assertion.
Last, I am not completely sure of what the issue is here, but if eventually properly implementing other parts of the compiler will make this code useless, then I'd add a comment saying that.
Yes, this issue stops happening with:
6d2a14a0 fcasas vkd3d-shader/hlsl: Lower complex broadcasts.
On 4/19/22 13:26, Francisco Casas wrote:
Last, I am not completely sure of what the issue is here, but if eventually properly implementing other parts of the compiler will make this code useless, then I'd add a comment saying that.
Yes, this issue stops happening with:
6d2a14a0 fcasas vkd3d-shader/hlsl: Lower complex broadcasts.
In that case, could we just send that patch instead, and skip this one?
From: Francisco Casas fcasas@codeweavers.com
Signed-off-by: Francisco Casas fcasas@codeweavers.com --- Makefile.am | 1 + tests/cast-broadcast.shader_test | 24 ++++++++++++++++++++++++ 2 files changed, 25 insertions(+) create mode 100644 tests/cast-broadcast.shader_test
diff --git a/Makefile.am b/Makefile.am index 52d057c9..0d1ce2a2 100644 --- a/Makefile.am +++ b/Makefile.am @@ -56,6 +56,7 @@ vkd3d_shader_tests = \ tests/arithmetic-int.shader_test \ tests/arithmetic-uint.shader_test \ tests/bitwise.shader_test \ + tests/cast-broadcast.shader_test \ tests/cast-to-float.shader_test \ tests/cast-to-half.shader_test \ tests/cast-to-int.shader_test \ diff --git a/tests/cast-broadcast.shader_test b/tests/cast-broadcast.shader_test new file mode 100644 index 00000000..02d14c0b --- /dev/null +++ b/tests/cast-broadcast.shader_test @@ -0,0 +1,24 @@ +[pixel shader] + +struct foo +{ + float3 aa; + float4 bb; +}; + +struct bar +{ + struct foo aa; + int2 bb; + int4 cc[8]; +}; + +float4 main() : SV_TARGET +{ + struct bar p = (struct bar)42; + return p.aa.bb + p.cc[5]; +} + +[test] +todo draw quad +todo probe all rgba (84.0, 84.0, 84.0, 84.0)
Signed-off-by: Giovanni Mascellani gmascellani@codeweavers.com
Il 18/04/22 08:34, Giovanni Mascellani ha scritto:
From: Francisco Casas fcasas@codeweavers.com
Signed-off-by: Francisco Casas fcasas@codeweavers.com
Makefile.am | 1 + tests/cast-broadcast.shader_test | 24 ++++++++++++++++++++++++ 2 files changed, 25 insertions(+) create mode 100644 tests/cast-broadcast.shader_test
diff --git a/Makefile.am b/Makefile.am index 52d057c9..0d1ce2a2 100644 --- a/Makefile.am +++ b/Makefile.am @@ -56,6 +56,7 @@ vkd3d_shader_tests = \ tests/arithmetic-int.shader_test \ tests/arithmetic-uint.shader_test \ tests/bitwise.shader_test \
- tests/cast-broadcast.shader_test \ tests/cast-to-float.shader_test \ tests/cast-to-half.shader_test \ tests/cast-to-int.shader_test \
diff --git a/tests/cast-broadcast.shader_test b/tests/cast-broadcast.shader_test new file mode 100644 index 00000000..02d14c0b --- /dev/null +++ b/tests/cast-broadcast.shader_test @@ -0,0 +1,24 @@ +[pixel shader]
+struct foo +{
- float3 aa;
- float4 bb;
+};
+struct bar +{
- struct foo aa;
- int2 bb;
- int4 cc[8];
+};
+float4 main() : SV_TARGET +{
- struct bar p = (struct bar)42;
- return p.aa.bb + p.cc[5];
+}
+[test] +todo draw quad +todo probe all rgba (84.0, 84.0, 84.0, 84.0)
Signed-off-by: Zebediah Figura zfigura@codeweavers.com
From: Francisco Casas fcasas@codeweavers.com
Signed-off-by: Francisco Casas fcasas@codeweavers.com --- libs/vkd3d-shader/hlsl.y | 8 -------- 1 file changed, 8 deletions(-)
diff --git a/libs/vkd3d-shader/hlsl.y b/libs/vkd3d-shader/hlsl.y index 5b57a7bd..10625b7e 100644 --- a/libs/vkd3d-shader/hlsl.y +++ b/libs/vkd3d-shader/hlsl.y @@ -1676,14 +1676,6 @@ static void struct_var_initializer(struct hlsl_ctx *ctx, struct hlsl_ir_var *var struct hlsl_struct_field *field; unsigned int i = 0;
- if (initializer_size(initializer) != hlsl_type_component_count(type)) - { - hlsl_error(ctx, &var->loc, VKD3D_SHADER_ERROR_HLSL_WRONG_PARAMETER_COUNT, - "Expected %u components in initializer, but got %u.", - hlsl_type_component_count(type), initializer_size(initializer)); - return; - } - LIST_FOR_EACH_ENTRY(field, type->e.elements, struct hlsl_struct_field, entry) { struct hlsl_ir_node *node = initializer->args[i];
Signed-off-by: Giovanni Mascellani gmascellani@codeweavers.com
Il 18/04/22 08:34, Giovanni Mascellani ha scritto:
From: Francisco Casas fcasas@codeweavers.com
Signed-off-by: Francisco Casas fcasas@codeweavers.com
libs/vkd3d-shader/hlsl.y | 8 -------- 1 file changed, 8 deletions(-)
diff --git a/libs/vkd3d-shader/hlsl.y b/libs/vkd3d-shader/hlsl.y index 5b57a7bd..10625b7e 100644 --- a/libs/vkd3d-shader/hlsl.y +++ b/libs/vkd3d-shader/hlsl.y @@ -1676,14 +1676,6 @@ static void struct_var_initializer(struct hlsl_ctx *ctx, struct hlsl_ir_var *var struct hlsl_struct_field *field; unsigned int i = 0;
- if (initializer_size(initializer) != hlsl_type_component_count(type))
- {
hlsl_error(ctx, &var->loc, VKD3D_SHADER_ERROR_HLSL_WRONG_PARAMETER_COUNT,
"Expected %u components in initializer, but got %u.",
hlsl_type_component_count(type), initializer_size(initializer));
return;
- }
LIST_FOR_EACH_ENTRY(field, type->e.elements, struct hlsl_struct_field, entry) { struct hlsl_ir_node *node = initializer->args[i];
Signed-off-by: Giovanni Mascellani gmascellani@codeweavers.com
Il 18/04/22 08:33, Giovanni Mascellani ha scritto:
From: Zebediah Figura zfigura@codeweavers.com
Makefile.am | 3 --- tests/hlsl-function.shader_test | 14 ++++++------- ...lsl-return-implicit-conversion.shader_test | 10 ++++----- tests/shader_runner.c | 21 ++++++++++++++++--- 4 files changed, 30 insertions(+), 18 deletions(-)
diff --git a/Makefile.am b/Makefile.am index ed881ffa..52d057c9 100644 --- a/Makefile.am +++ b/Makefile.am @@ -317,9 +317,6 @@ tests_shader_runner_SOURCES = \ tests_vkd3d_api_LDADD = libvkd3d.la @VULKAN_LIBS@ tests_vkd3d_shader_api_LDADD = libvkd3d-shader.la SHADER_TEST_LOG_COMPILER = tests/shader_runner -XFAIL_TESTS = \
tests/hlsl-function.shader_test \
tests/hlsl-return-implicit-conversion.shader_test endif
EXTRA_DIST += $(vkd3d_shader_tests)
diff --git a/tests/hlsl-function.shader_test b/tests/hlsl-function.shader_test index 586f1ab7..6f11e35b 100644 --- a/tests/hlsl-function.shader_test +++ b/tests/hlsl-function.shader_test @@ -1,4 +1,4 @@ -[pixel shader fail] +[pixel shader fail todo]
float4 func();
@@ -23,7 +23,7 @@ float4 main() : sv_target return 0; }
-[pixel shader fail] +[pixel shader fail todo]
void func(inout float o) { @@ -37,7 +37,7 @@ float4 main() : sv_target return 0; }
-[pixel shader fail] +[pixel shader fail todo]
void func(inout float2 o) { @@ -51,7 +51,7 @@ float4 main() : sv_target return 0; }
-[pixel shader fail] +[pixel shader fail todo]
void func(out float o) { @@ -65,7 +65,7 @@ float4 main() : sv_target return x; }
-[pixel shader fail] +[pixel shader fail todo]
void func(inout float o) { @@ -78,7 +78,7 @@ float4 main() : sv_target return x; }
-[pixel shader fail] +[pixel shader fail todo]
void func() { @@ -89,7 +89,7 @@ float4 main() : sv_target return func(); }
-[pixel shader fail] +[pixel shader fail todo]
void foo() { diff --git a/tests/hlsl-return-implicit-conversion.shader_test b/tests/hlsl-return-implicit-conversion.shader_test index 4c07cf84..38d21633 100644 --- a/tests/hlsl-return-implicit-conversion.shader_test +++ b/tests/hlsl-return-implicit-conversion.shader_test @@ -98,7 +98,7 @@ float4 main() : sv_target todo draw quad probe all rgba (0.4, 0.3, 0.2, 0.0)
-[pixel shader fail] +[pixel shader fail todo] float3x1 func() { return float1x3(0.4, 0.3, 0.2); @@ -109,7 +109,7 @@ float4 main() : sv_target return float4(func(), 0.0); }
-[pixel shader fail] +[pixel shader fail todo] float1x3 func() { return float3x1(0.4, 0.3, 0.2); @@ -165,7 +165,7 @@ float4 main() : sv_target todo draw quad probe all rgba (0.4, 0.3, 0.2, 0.0)
-[pixel shader fail] +[pixel shader fail todo] float3x1 func() { return float4(0.4, 0.3, 0.2, 0.1); @@ -191,7 +191,7 @@ float4 main() : sv_target todo draw quad probe all rgba (0.4, 0.3, 0.2, 0.0)
-[pixel shader fail] +[pixel shader fail todo] float3x1 func() { return float1x4(0.4, 0.3, 0.2, 0.1); @@ -217,7 +217,7 @@ float4 main() : sv_target todo draw quad probe all rgba (0.4, 0.3, 0.2, 0.0)
-[pixel shader fail] +[pixel shader fail todo] float1x3 func() { return float4x1(0.4, 0.3, 0.2, 0.1); diff --git a/tests/shader_runner.c b/tests/shader_runner.c index 8633ee9a..8804164e 100644 --- a/tests/shader_runner.c +++ b/tests/shader_runner.c @@ -80,6 +80,7 @@ enum parse_state STATE_REQUIRE, STATE_SAMPLER, STATE_SHADER_INVALID_PIXEL,
- STATE_SHADER_INVALID_PIXEL_TODO, STATE_SHADER_PIXEL, STATE_SHADER_VERTEX, STATE_TEXTURE,
@@ -642,15 +643,24 @@ void run_shader_tests(struct shader_runner *runner, int argc, char **argv, const break;
case STATE_SHADER_INVALID_PIXEL:
case STATE_SHADER_INVALID_PIXEL_TODO: { ID3D10Blob *blob = NULL, *errors = NULL; HRESULT hr; hr = D3DCompile(shader_source, strlen(shader_source), NULL, NULL, NULL, "main", "ps_4_0", 0, 0, &blob, &errors);
ok(hr == E_FAIL, "Got unexpected hr %#x.\n", hr);
ok(!blob, "Expected no compiled shader blob.\n");
ok(!!errors, "Expected non-NULL error blob.\n");
todo_if (state == STATE_SHADER_INVALID_PIXEL_TODO)
ok(hr == E_FAIL, "Got unexpected hr %#x.\n", hr);
if (hr == S_OK)
{
ID3D10Blob_Release(blob);
}
else
{
ok(!blob, "Expected no compiled shader blob.\n");
ok(!!errors, "Expected non-NULL error blob.\n");
} if (errors) { if (vkd3d_test_state.debug_level)
@@ -735,6 +745,10 @@ void run_shader_tests(struct shader_runner *runner, int argc, char **argv, const { state = STATE_SHADER_INVALID_PIXEL; }
else if (!strcmp(line, "[pixel shader fail todo]\n"))
{
state = STATE_SHADER_INVALID_PIXEL_TODO;
} else if (sscanf(line, "[sampler %u]\n", &index)) { state = STATE_SAMPLER;
@@ -817,6 +831,7 @@ void run_shader_tests(struct shader_runner *runner, int argc, char **argv, const case STATE_PREPROC: case STATE_PREPROC_INVALID: case STATE_SHADER_INVALID_PIXEL:
case STATE_SHADER_INVALID_PIXEL_TODO: case STATE_SHADER_PIXEL: case STATE_SHADER_VERTEX: {
On Wed, 20 Apr 2022 at 17:44, Henri Verbeet hverbeet@codeweavers.com wrote:
Signed-off-by: Henri Verbeet hverbeet@codeweavers.com
But note that this patch conflicts with the series by Zeb.