Reinterpret min16float, min10float, min16int, min12int, and min16uint as their regular counterparts: float, float, int, int, uint, respectively.
A proper implementation would require adding minimum precision indicators to all the dxbc-tpf instructions that use these types. Consider the output of fxc 10.1 with the following shader:
```hlsl uniform int i;
float4 main() : sv_target { min16float4 a = {0, 1, 2, i}; min16int2 b = {4, i}; min10float3 c = {6.4, 7, i}; min12int d = 9.4; min16uint4x2 e = {14.4, 15, 16, 17, 18, 19, 20, i};
return mul(e, b) + a + c.xyzx + d; } ```
``` ps_5_0 dcl_globalFlags refactoringAllowed | enableMinimumPrecision dcl_constantbuffer CB0[1], immediateIndexed dcl_output o0.xyzw dcl_temps 3 imad r0.xyz {min16u}, l(15, 17, 19, 0) {def32 as min16u}, cb0[0].xxxx {def32 as min16u}, l(56, 64, 72, 0) {def32 as min16u} utof r0.xyz {min16f}, r0.xyzx {min16u} imad r1.x {min16u}, cb0[0].x {def32 as min16u}, cb0[0].x {def32 as min16u}, l(80) {def32 as min16u} utof r0.w {min16f}, r1.x {min16u} mov r1.xyz {min16f}, l(0,1.000000,2.000000,0) {def32 as min16f} itof r1.w {min16f}, cb0[0].x add r0.xyzw {min16f}, r0.xyzw {min16f}, r1.xyzw {min16f} mov r2.y {min2_8f}, l(7.000000) {def32 as min2_8f} itof r2.z {min2_8f}, cb0[0].x add r0.yz {min16f}, r0.yyzy {min16f}, r2.yyzy {min2_8f as min16f} add r0.xyzw {min16f}, r0.xyzw {min16f}, l(15.400000, 9.000000, 9.000000, 15.400000) {def32 as min16f} mov o0.xyzw, r0.xyzw {min16f as def32} ret
```
However, if the graphics driver doesn't have minimum precision support, it ignores the minimum precision indicators and runs at 32-bit precision, which is equivalent as working with regular types.
From: Francisco Casas fcasas@codeweavers.com
--- Makefile.am | 1 + tests/minimum-precision.shader_test | 19 +++++++++++++++++++ 2 files changed, 20 insertions(+) create mode 100644 tests/minimum-precision.shader_test
diff --git a/Makefile.am b/Makefile.am index 85cd4642..e56f696e 100644 --- a/Makefile.am +++ b/Makefile.am @@ -123,6 +123,7 @@ vkd3d_shader_tests = \ tests/math.shader_test \ tests/matrix-semantics.shader_test \ tests/max.shader_test \ + tests/minimum-precision.shader_test \ tests/multiple-rt.shader_test \ tests/nointerpolation.shader_test \ tests/object-references.shader_test \ diff --git a/tests/minimum-precision.shader_test b/tests/minimum-precision.shader_test new file mode 100644 index 00000000..3e4c5d4f --- /dev/null +++ b/tests/minimum-precision.shader_test @@ -0,0 +1,19 @@ +[require] +shader model >= 4.0 + + +[pixel shader todo] +float4 main() : sv_target +{ + min16float4 a = {0, 1, 2, 3}; + min10float2 b = {4, 5}; + min16int3 c = {6.4, 7, 8}; + min12int d = 9.4; + min16uint4x2 e = {14.4, 15, 16, 17, 18, 19, 20, 21}; + + return mul(e, b) + a + c.xyzx + d; +} + +[test] +todo draw quad +todo probe all rgba (146.0, 166.0, 186.0, 203.0)
From: Francisco Casas fcasas@codeweavers.com
Reinterpret min16float, min10float, min16int, min12int, and min16uint as their regular counterparts: float, float, int, int, uint, respectively.
A proper implementation would require adding minimum precision indicators to all the dxbc-tpf instructions that use these types. Consider the output of fxc 10.1 with the following shader:
uniform int i;
float4 main() : sv_target { min16float4 a = {0, 1, 2, i}; min16int2 b = {4, i}; min10float3 c = {6.4, 7, i}; min12int d = 9.4; min16uint4x2 e = {14.4, 15, 16, 17, 18, 19, 20, i};
return mul(e, b) + a + c.xyzx + d; }
However, if the graphics driver doesn't have minimum precision support, it ignores the minimum precision indicators and runs at 32-bit precision, which is equivalent as working with regular types. --- libs/vkd3d-shader/hlsl.c | 65 ++++++++++++++++++++++++++++- libs/vkd3d-shader/hlsl.h | 2 + libs/vkd3d-shader/hlsl.y | 12 ++++++ tests/minimum-precision.shader_test | 6 +-- 4 files changed, 80 insertions(+), 5 deletions(-)
diff --git a/libs/vkd3d-shader/hlsl.c b/libs/vkd3d-shader/hlsl.c index 8aa289ac..5e8430cb 100644 --- a/libs/vkd3d-shader/hlsl.c +++ b/libs/vkd3d-shader/hlsl.c @@ -2349,7 +2349,7 @@ static int compare_function_rb(const void *key, const struct rb_entry *entry)
static void declare_predefined_types(struct hlsl_ctx *ctx) { - unsigned int x, y, bt, i; + unsigned int x, y, bt, i, v; struct hlsl_type *type;
static const char * const names[] = @@ -2361,7 +2361,11 @@ static void declare_predefined_types(struct hlsl_ctx *ctx) "uint", "bool", }; - char name[10]; + char name[15]; + + const char *variants_float[] = {"min10float", "min16float"}; + const char *variants_int[] = {"min12int", "min16int"}; + const char *variants_uint[] = {"min16uint"};
static const char *const sampler_names[] = { @@ -2421,6 +2425,63 @@ static void declare_predefined_types(struct hlsl_ctx *ctx) } }
+ for (bt = 0; bt <= HLSL_TYPE_LAST_SCALAR; ++bt) + { + unsigned int n_variants = 0; + const char **variants; + + switch (bt) + { + case HLSL_TYPE_FLOAT: + variants = variants_float; + n_variants = ARRAY_SIZE(variants_float); + break; + + case HLSL_TYPE_INT: + variants = variants_int; + n_variants = ARRAY_SIZE(variants_int); + break; + + case HLSL_TYPE_UINT: + variants = variants_uint; + n_variants = ARRAY_SIZE(variants_uint); + break; + + default: + break; + } + + for (v = 0; v < n_variants; ++v) + { + for (y = 1; y <= 4; ++y) + { + for (x = 1; x <= 4; ++x) + { + sprintf(name, "%s%ux%u", variants[v], y, x); + type = hlsl_new_type(ctx, name, HLSL_CLASS_MATRIX, bt, x, y); + type->is_minimum_precision = 1; + hlsl_scope_add_type(ctx->globals, type); + + if (y == 1) + { + sprintf(name, "%s%u", variants[v], x); + type = hlsl_new_type(ctx, name, HLSL_CLASS_VECTOR, bt, x, y); + type->is_minimum_precision = 1; + hlsl_scope_add_type(ctx->globals, type); + + if (x == 1) + { + sprintf(name, "%s", variants[v]); + type = hlsl_new_type(ctx, name, HLSL_CLASS_SCALAR, bt, x, y); + type->is_minimum_precision = 1; + hlsl_scope_add_type(ctx->globals, type); + } + } + } + } + } + } + for (bt = 0; bt <= HLSL_SAMPLER_DIM_LAST_SAMPLER; ++bt) { type = hlsl_new_type(ctx, sampler_names[bt], HLSL_CLASS_OBJECT, HLSL_TYPE_SAMPLER, 1, 1); diff --git a/libs/vkd3d-shader/hlsl.h b/libs/vkd3d-shader/hlsl.h index b6a593ca..9664dc7a 100644 --- a/libs/vkd3d-shader/hlsl.h +++ b/libs/vkd3d-shader/hlsl.h @@ -142,6 +142,8 @@ struct hlsl_type
unsigned int reg_size; size_t bytecode_offset; + + uint32_t is_minimum_precision : 1; };
struct hlsl_semantic diff --git a/libs/vkd3d-shader/hlsl.y b/libs/vkd3d-shader/hlsl.y index eedc85bd..f16253b7 100644 --- a/libs/vkd3d-shader/hlsl.y +++ b/libs/vkd3d-shader/hlsl.y @@ -3911,6 +3911,18 @@ type: | TYPE_IDENTIFIER { $$ = hlsl_get_type(ctx->cur_scope, $1, true); + if ($$->is_minimum_precision) + { + if (ctx->profile->major_version < 4) + { + hlsl_error(ctx, &@1, VKD3D_SHADER_ERROR_HLSL_INVALID_TYPE, + "Target profile doesn't support minimum-precision types."); + } + else + { + WARN("Reinterpreting type %s.\n", $$->name); + } + } vkd3d_free($1); } | KW_STRUCT TYPE_IDENTIFIER diff --git a/tests/minimum-precision.shader_test b/tests/minimum-precision.shader_test index 3e4c5d4f..5212f8ad 100644 --- a/tests/minimum-precision.shader_test +++ b/tests/minimum-precision.shader_test @@ -2,7 +2,7 @@ shader model >= 4.0
-[pixel shader todo] +[pixel shader] float4 main() : sv_target { min16float4 a = {0, 1, 2, 3}; @@ -15,5 +15,5 @@ float4 main() : sv_target }
[test] -todo draw quad -todo probe all rgba (146.0, 166.0, 186.0, 203.0) +draw quad +probe all rgba (146.0, 166.0, 186.0, 203.0)
This is probably fine and I don't mind accepting it as-is.
That said, maybe we could go one more step and actually make these separate HLSL_TYPE_* types? And then map them to float anywhere they're used, same way as we do with half types (although we're definitely missing that in a lot of places too.) I only mention this because this commit has a decent amount of code that's going to have to be deleted and rewritten, but I think doing that would involve less deletion.
Zebediah Figura (@zfigura) commented about libs/vkd3d-shader/hlsl.c:
"uint", "bool", };
- char name[10];
- char name[15];
- const char *variants_float[] = {"min10float", "min16float"};
- const char *variants_int[] = {"min12int", "min16int"};
- const char *variants_uint[] = {"min16uint"};
"static const char *const"
Zebediah Figura (@zfigura) commented about libs/vkd3d-shader/hlsl.y:
| TYPE_IDENTIFIER { $$ = hlsl_get_type(ctx->cur_scope, $1, true);
if ($$->is_minimum_precision)
{
if (ctx->profile->major_version < 4)
{
hlsl_error(ctx, &@1, VKD3D_SHADER_ERROR_HLSL_INVALID_TYPE,
"Target profile doesn't support minimum-precision types.");
}
else
{
WARN("Reinterpreting type %s.\n", $$->name);
Personally I think this should be a FIXME, not a WARN.