Signed-off-by: Giovanni Mascellani gmascellani@codeweavers.com --- libs/vkd3d-shader/hlsl.c | 4 ++-- libs/vkd3d-shader/hlsl.h | 28 ++++++++++++++++++++++++++++ libs/vkd3d-shader/hlsl.y | 28 ++++++++++++---------------- 3 files changed, 42 insertions(+), 18 deletions(-)
diff --git a/libs/vkd3d-shader/hlsl.c b/libs/vkd3d-shader/hlsl.c index 9bce6beb..421aaf27 100644 --- a/libs/vkd3d-shader/hlsl.c +++ b/libs/vkd3d-shader/hlsl.c @@ -560,7 +560,7 @@ struct hlsl_ir_constant *hlsl_new_uint_constant(struct hlsl_ctx *ctx, unsigned i
if (!(c = hlsl_alloc(ctx, sizeof(*c)))) return NULL; - init_node(&c->node, HLSL_IR_CONSTANT, ctx->builtin_types.scalar[HLSL_TYPE_UINT], loc); + init_node(&c->node, HLSL_IR_CONSTANT, get_scalar_type(ctx, HLSL_TYPE_UINT), loc); c->value[0].u = n; return c; } @@ -650,7 +650,7 @@ struct hlsl_ir_swizzle *hlsl_new_swizzle(struct hlsl_ctx *ctx, DWORD s, unsigned if (!(swizzle = hlsl_alloc(ctx, sizeof(*swizzle)))) return NULL; init_node(&swizzle->node, HLSL_IR_SWIZZLE, - ctx->builtin_types.vector[val->data_type->base_type][components - 1], *loc); + get_vector_type(ctx, val->data_type->base_type, components), *loc); hlsl_src_from_node(&swizzle->val, val); swizzle->swizzle = s; return swizzle; diff --git a/libs/vkd3d-shader/hlsl.h b/libs/vkd3d-shader/hlsl.h index e401971e..2e6b1bf7 100644 --- a/libs/vkd3d-shader/hlsl.h +++ b/libs/vkd3d-shader/hlsl.h @@ -629,6 +629,34 @@ static inline void hlsl_release_string_buffer(struct hlsl_ctx *ctx, struct vkd3d vkd3d_string_buffer_release(&ctx->string_buffers, buffer); }
+static inline struct hlsl_type *get_scalar_type(const struct hlsl_ctx *ctx, enum hlsl_base_type base_type) +{ + return ctx->builtin_types.scalar[base_type]; +} + +static inline struct hlsl_type *get_vector_type(const struct hlsl_ctx *ctx, enum hlsl_base_type base_type, + unsigned int dimx) +{ + return ctx->builtin_types.vector[base_type][dimx - 1]; +} + +static inline struct hlsl_type *get_matrix_type(const struct hlsl_ctx *ctx, enum hlsl_base_type base_type, + unsigned int dimx, unsigned int dimy) +{ + return ctx->builtin_types.matrix[base_type][dimx - 1][dimy - 1]; +} + +static inline struct hlsl_type *get_numeric_type(const struct hlsl_ctx *ctx, enum hlsl_type_class type, + enum hlsl_base_type base_type, unsigned int dimx, unsigned int dimy) +{ + if (type == HLSL_CLASS_SCALAR) + return get_scalar_type(ctx, base_type); + else if (type == HLSL_CLASS_VECTOR) + return get_vector_type(ctx, base_type, dimx); + else + return get_matrix_type(ctx, base_type, dimx, dimy); +} + const char *debug_hlsl_expr_op(enum hlsl_ir_expr_op op); const char *debug_hlsl_type(struct hlsl_ctx *ctx, const struct hlsl_type *type); const char *debug_hlsl_writemask(unsigned int writemask); diff --git a/libs/vkd3d-shader/hlsl.y b/libs/vkd3d-shader/hlsl.y index ae19cd1a..addb9c59 100644 --- a/libs/vkd3d-shader/hlsl.y +++ b/libs/vkd3d-shader/hlsl.y @@ -1021,11 +1021,7 @@ static struct hlsl_type *expr_common_type(struct hlsl_ctx *ctx, struct hlsl_type } }
- if (type == HLSL_CLASS_SCALAR) - return ctx->builtin_types.scalar[base]; - if (type == HLSL_CLASS_VECTOR) - return ctx->builtin_types.vector[base][dimx - 1]; - return ctx->builtin_types.matrix[base][dimx - 1][dimy - 1]; + return get_numeric_type(ctx, type, base, dimx, dimy); }
static struct hlsl_ir_expr *add_expr(struct hlsl_ctx *ctx, struct list *instrs, enum hlsl_ir_expr_op op, @@ -1761,7 +1757,7 @@ static struct list *add_constructor(struct hlsl_ctx *ctx, struct hlsl_type *type }
if (!(arg = add_implicit_conversion(ctx, params->instrs, arg, - ctx->builtin_types.vector[type->base_type][width - 1], &arg->loc))) + get_vector_type(ctx, type->base_type, width), &arg->loc))) continue;
if (!(store = hlsl_new_store(ctx, var, NULL, arg, @@ -1816,9 +1812,9 @@ static bool add_method_call(struct hlsl_ctx *ctx, struct list *instrs, struct hl if (params->args_count >= 2) FIXME("Ignoring index and/or offset parameter(s).\n");
- /* -1 for zero-indexing; +1 for the mipmap level */ + /* +1 for the mipmap level */ if (!(coords = add_implicit_conversion(ctx, instrs, params->args[0], - ctx->builtin_types.vector[HLSL_TYPE_INT][sampler_dim - 1 + 1], loc))) + get_vector_type(ctx, HLSL_TYPE_INT, sampler_dim + 1), loc))) return false;
if (!(load = hlsl_new_resource_load(ctx, object_type->e.resource_format, HLSL_RESOURCE_LOAD, @@ -2500,7 +2496,7 @@ type: YYABORT; }
- $$ = ctx->builtin_types.vector[$3->base_type][$5 - 1]; + $$ = get_vector_type(ctx, $3->base_type, $5); } | KW_MATRIX '<' type ',' C_INTEGER ',' C_INTEGER '>' { @@ -2528,7 +2524,7 @@ type: YYABORT; }
- $$ = ctx->builtin_types.matrix[$3->base_type][$7 - 1][$5 - 1]; + $$ = get_matrix_type(ctx, $3->base_type, $7, $5); } | KW_VOID { @@ -2560,7 +2556,7 @@ type: } | texture_type { - $$ = hlsl_new_texture_type(ctx, $1, ctx->builtin_types.vector[HLSL_TYPE_FLOAT][4 - 1]); + $$ = hlsl_new_texture_type(ctx, $1, get_vector_type(ctx, HLSL_TYPE_FLOAT, 4)); } | texture_type '<' type '>' { @@ -2971,7 +2967,7 @@ primary_expr:
if (!(c = hlsl_alloc(ctx, sizeof(*c)))) YYABORT; - init_node(&c->node, HLSL_IR_CONSTANT, ctx->builtin_types.scalar[HLSL_TYPE_FLOAT], @1); + init_node(&c->node, HLSL_IR_CONSTANT, get_scalar_type(ctx, HLSL_TYPE_FLOAT), @1); c->value[0].f = $1; if (!($$ = make_list(ctx, &c->node))) YYABORT; @@ -2982,7 +2978,7 @@ primary_expr:
if (!(c = hlsl_alloc(ctx, sizeof(*c)))) YYABORT; - init_node(&c->node, HLSL_IR_CONSTANT, ctx->builtin_types.scalar[HLSL_TYPE_INT], @1); + init_node(&c->node, HLSL_IR_CONSTANT, get_scalar_type(ctx, HLSL_TYPE_INT), @1); c->value[0].i = $1; if (!($$ = make_list(ctx, &c->node))) YYABORT; @@ -2993,7 +2989,7 @@ primary_expr:
if (!(c = hlsl_alloc(ctx, sizeof(*c)))) YYABORT; - init_node(&c->node, HLSL_IR_CONSTANT, ctx->builtin_types.scalar[HLSL_TYPE_BOOL], @1); + init_node(&c->node, HLSL_IR_CONSTANT, get_scalar_type(ctx, HLSL_TYPE_BOOL), @1); c->value[0].b = $1; if (!($$ = make_list(ctx, &c->node))) YYABORT; @@ -3030,7 +3026,7 @@ primary_expr: struct hlsl_ir_var *var;
if (!(var = hlsl_new_synthetic_var(ctx, "<state-block-expr>", - ctx->builtin_types.scalar[HLSL_TYPE_INT], @1))) + get_scalar_type(ctx, HLSL_TYPE_INT), @1))) YYABORT; if (!(load = hlsl_new_var_load(ctx, var, @1))) YYABORT; @@ -3116,7 +3112,7 @@ postfix_expr: YYABORT; }
- if (!(cast = hlsl_new_cast(ctx, index, ctx->builtin_types.scalar[HLSL_TYPE_UINT], &index->loc))) + if (!(cast = hlsl_new_cast(ctx, index, get_scalar_type(ctx, HLSL_TYPE_UINT), &index->loc))) { destroy_instr_list($1); YYABORT;
Function expr_common_shape can be used for boolean operators, for which a common shape must be determined even if the base type of the result is always bool.
Signed-off-by: Giovanni Mascellani gmascellani@codeweavers.com --- libs/vkd3d-shader/hlsl.y | 82 +++++++++++++++++++++------------------- 1 file changed, 44 insertions(+), 38 deletions(-)
diff --git a/libs/vkd3d-shader/hlsl.y b/libs/vkd3d-shader/hlsl.y index addb9c59..05573739 100644 --- a/libs/vkd3d-shader/hlsl.y +++ b/libs/vkd3d-shader/hlsl.y @@ -912,13 +912,9 @@ static enum hlsl_base_type expr_common_base_type(enum hlsl_base_type t1, enum hl return HLSL_TYPE_INT; }
-static struct hlsl_type *expr_common_type(struct hlsl_ctx *ctx, struct hlsl_type *t1, struct hlsl_type *t2, - struct vkd3d_shader_location *loc) +static bool expr_common_shape(struct hlsl_ctx *ctx, struct hlsl_type *t1, struct hlsl_type *t2, + struct vkd3d_shader_location *loc, enum hlsl_type_class *type, unsigned int *dimx, unsigned int *dimy) { - enum hlsl_type_class type; - enum hlsl_base_type base; - unsigned int dimx, dimy; - if (t1->type > HLSL_CLASS_LAST_NUMERIC) { struct vkd3d_string_buffer *string; @@ -927,7 +923,7 @@ static struct hlsl_type *expr_common_type(struct hlsl_ctx *ctx, struct hlsl_type hlsl_error(ctx, *loc, VKD3D_SHADER_ERROR_HLSL_INVALID_TYPE, "Expression of type "%s" cannot be used in a numeric expression.", string->buffer); hlsl_release_string_buffer(ctx, string); - return NULL; + return false; }
if (t2->type > HLSL_CLASS_LAST_NUMERIC) @@ -938,12 +934,9 @@ static struct hlsl_type *expr_common_type(struct hlsl_ctx *ctx, struct hlsl_type hlsl_error(ctx, *loc, VKD3D_SHADER_ERROR_HLSL_INVALID_TYPE, "Expression of type "%s" cannot be used in a numeric expression.", string->buffer); hlsl_release_string_buffer(ctx, string); - return NULL; + return false; }
- if (hlsl_types_are_equal(t1, t2)) - return t1; - if (!expr_compatible_data_types(t1, t2)) { struct vkd3d_string_buffer *t1_string = hlsl_type_to_string(ctx, t1); @@ -955,28 +948,26 @@ static struct hlsl_type *expr_common_type(struct hlsl_ctx *ctx, struct hlsl_type t1_string->buffer, t2_string->buffer); hlsl_release_string_buffer(ctx, t1_string); hlsl_release_string_buffer(ctx, t2_string); - return NULL; + return false; }
- base = expr_common_base_type(t1->base_type, t2->base_type); - if (t1->dimx == 1 && t1->dimy == 1) { - type = t2->type; - dimx = t2->dimx; - dimy = t2->dimy; + *type = t2->type; + *dimx = t2->dimx; + *dimy = t2->dimy; } else if (t2->dimx == 1 && t2->dimy == 1) { - type = t1->type; - dimx = t1->dimx; - dimy = t1->dimy; + *type = t1->type; + *dimx = t1->dimx; + *dimy = t1->dimy; } else if (t1->type == HLSL_CLASS_MATRIX && t2->type == HLSL_CLASS_MATRIX) { - type = HLSL_CLASS_MATRIX; - dimx = min(t1->dimx, t2->dimx); - dimy = min(t1->dimy, t2->dimy); + *type = HLSL_CLASS_MATRIX; + *dimx = min(t1->dimx, t2->dimx); + *dimy = min(t1->dimy, t2->dimy); } else { @@ -987,40 +978,55 @@ static struct hlsl_type *expr_common_type(struct hlsl_ctx *ctx, struct hlsl_type max_dim_2 = max(t2->dimx, t2->dimy); if (t1->dimx * t1->dimy == t2->dimx * t2->dimy) { - type = HLSL_CLASS_VECTOR; - dimx = max(t1->dimx, t2->dimx); - dimy = 1; + *type = HLSL_CLASS_VECTOR; + *dimx = max(t1->dimx, t2->dimx); + *dimy = 1; } else if (max_dim_1 <= max_dim_2) { - type = t1->type; - if (type == HLSL_CLASS_VECTOR) + *type = t1->type; + if (*type == HLSL_CLASS_VECTOR) { - dimx = max_dim_1; - dimy = 1; + *dimx = max_dim_1; + *dimy = 1; } else { - dimx = t1->dimx; - dimy = t1->dimy; + *dimx = t1->dimx; + *dimy = t1->dimy; } } else { - type = t2->type; - if (type == HLSL_CLASS_VECTOR) + *type = t2->type; + if (*type == HLSL_CLASS_VECTOR) { - dimx = max_dim_2; - dimy = 1; + *dimx = max_dim_2; + *dimy = 1; } else { - dimx = t2->dimx; - dimy = t2->dimy; + *dimx = t2->dimx; + *dimy = t2->dimy; } } }
+ return true; +} + +static struct hlsl_type *expr_common_type(struct hlsl_ctx *ctx, struct hlsl_type *t1, struct hlsl_type *t2, + struct vkd3d_shader_location *loc) +{ + enum hlsl_type_class type; + enum hlsl_base_type base; + unsigned int dimx, dimy; + + if (!expr_common_shape(ctx, t1, t2, loc, &type, &dimx, &dimy)) + return NULL; + + base = expr_common_base_type(t1->base_type, t2->base_type); + return get_numeric_type(ctx, type, base, dimx, dimy); }
On 10/14/21 8:37 AM, Giovanni Mascellani wrote:
Function expr_common_shape can be used for boolean operators, for which a common shape must be determined even if the base type of the result is always bool.
Signed-off-by: Giovanni Mascellani gmascellani@codeweavers.com
libs/vkd3d-shader/hlsl.y | 82 +++++++++++++++++++++------------------- 1 file changed, 44 insertions(+), 38 deletions(-)
diff --git a/libs/vkd3d-shader/hlsl.y b/libs/vkd3d-shader/hlsl.y index addb9c59..05573739 100644 --- a/libs/vkd3d-shader/hlsl.y +++ b/libs/vkd3d-shader/hlsl.y @@ -912,13 +912,9 @@ static enum hlsl_base_type expr_common_base_type(enum hlsl_base_type t1, enum hl return HLSL_TYPE_INT; }
-static struct hlsl_type *expr_common_type(struct hlsl_ctx *ctx, struct hlsl_type *t1, struct hlsl_type *t2,
struct vkd3d_shader_location *loc)
+static bool expr_common_shape(struct hlsl_ctx *ctx, struct hlsl_type *t1, struct hlsl_type *t2,
struct vkd3d_shader_location *loc, enum hlsl_type_class *type, unsigned int *dimx, unsigned int *dimy)
Would it make more sense to instead pass the result of expr_common_base_type() to expr_common_shape()? I.e. something like
struct hlsl_type *expr_common_shape(ctx, t1, t2, expr_common_base_type(t1, t2));
or
struct hlsl_type *expr_common_shape(ctx, t1, t2, HLSL_TYPE_BOOL);
One of those things I think of when I see a function with a lot of output parameters :-)
Hi,
Il 14/10/21 18:09, Zebediah Figura ha scritto:
Would it make more sense to instead pass the result of expr_common_base_type() to expr_common_shape()? I.e. something like
struct hlsl_type *expr_common_shape(ctx, t1, t2, expr_common_base_type(t1, t2));
or
struct hlsl_type *expr_common_shape(ctx, t1, t2, HLSL_TYPE_BOOL);
One of those things I think of when I see a function with a lot of output parameters :-)
I can do that, if you really want, but I like it less. First because IMHO it uselessly subverts the dependencies between different pieces of data: the process of computing the result type consists of two parts, determining the base type and determining the shape. In principle they are nicely independent (well, I think HLSL has some very awful corner cases in which this does not happen, but for the moment we're pretending only the nice part exists), while reading the code you're proposing I would be led to think that determining the shape depends on which base type you worked out. I find this confusing.
Also, this means that, for example in the case of the comparison operations of patch 6/6, you have to call expr_common_shape twice, with two different base types. Ok, not a big deal, but why make the code more confusing for computing twice the same thing?
I don't like either passing three pointers to have three outputs from a function, but that's what you get when your language doesn't have decent support for anonymous product types (tuples). In Python the same thing would have been very straightforward and idiomatic. And defining a struct just to pass three values seems a bit excessive.
Does this make sense to you?
Thanks for the review, Giovanni.
On 10/14/21 12:37 PM, Giovanni Mascellani wrote:
Hi,
Il 14/10/21 18:09, Zebediah Figura ha scritto:
Would it make more sense to instead pass the result of expr_common_base_type() to expr_common_shape()? I.e. something like
struct hlsl_type *expr_common_shape(ctx, t1, t2, expr_common_base_type(t1, t2));
or
struct hlsl_type *expr_common_shape(ctx, t1, t2, HLSL_TYPE_BOOL);
One of those things I think of when I see a function with a lot of output parameters :-)
I can do that, if you really want, but I like it less. First because IMHO it uselessly subverts the dependencies between different pieces of data: the process of computing the result type consists of two parts, determining the base type and determining the shape. In principle they are nicely independent (well, I think HLSL has some very awful corner cases in which this does not happen, but for the moment we're pretending only the nice part exists), while reading the code you're proposing I would be led to think that determining the shape depends on which base type you worked out. I find this confusing.
In a sense, yeah, although I probably wouldn't call it expr_common_shape() either, which is probably part of the confusion.
Also, this means that, for example in the case of the comparison operations of patch 6/6, you have to call expr_common_shape twice, with two different base types. Ok, not a big deal, but why make the code more confusing for computing twice the same thing?
Hmm, I suppose so...
Granted, you don't *have* to call expr_common_shape() twice; you can take the prior type and change it to BOOL.
I don't like either passing three pointers to have three outputs from a function, but that's what you get when your language doesn't have decent support for anonymous product types (tuples). In Python the same thing would have been very straightforward and idiomatic. And defining a struct just to pass three values seems a bit excessive.
Well, that or proper out parameters, I guess, like HLSL ;-)
Not that HLSL is a pinnacle of good language design, but I'm sure if this weren't C I'd find it more idiomatic.
As it is I still have a mild preference for avoiding the output parameters, just for the sake of what I see as idiomatic code, but I won't object if you keep it the way it is.
Hi,
Il 15/10/21 04:39, Zebediah Figura ha scritto:
Not that HLSL is a pinnacle of good language design, but I'm sure if this weren't C I'd find it more idiomatic.
That's funny: passing pointer is precisely what I would consider the idiomatic C way to return more than one value. Even for HLSL, out parameter essentially have the semantics of passing by reference instead of by value, AFAIK.
(we still have the Microsoft way, though: return type << 16 | dimx << 8 | dimy, would that be better? :-P)
As it is I still have a mild preference for avoiding the output parameters, just for the sake of what I see as idiomatic code, but I won't object if you keep it the way it is.
I think in this case I'd prefer to keep the way it is, but I'll concede for 5/6, which hopefully can be a good compromise for you.
Giovanni.
When t1 is a vector type, it's already supposed to have dimx == max_dim_1 and dimy == 1, and the same for t2.
Signed-off-by: Giovanni Mascellani gmascellani@codeweavers.com --- libs/vkd3d-shader/hlsl.y | 24 ++++-------------------- 1 file changed, 4 insertions(+), 20 deletions(-)
diff --git a/libs/vkd3d-shader/hlsl.y b/libs/vkd3d-shader/hlsl.y index 05573739..3119380c 100644 --- a/libs/vkd3d-shader/hlsl.y +++ b/libs/vkd3d-shader/hlsl.y @@ -985,30 +985,14 @@ static bool expr_common_shape(struct hlsl_ctx *ctx, struct hlsl_type *t1, struct else if (max_dim_1 <= max_dim_2) { *type = t1->type; - if (*type == HLSL_CLASS_VECTOR) - { - *dimx = max_dim_1; - *dimy = 1; - } - else - { - *dimx = t1->dimx; - *dimy = t1->dimy; - } + *dimx = t1->dimx; + *dimy = t1->dimy; } else { *type = t2->type; - if (*type == HLSL_CLASS_VECTOR) - { - *dimx = max_dim_2; - *dimy = 1; - } - else - { - *dimx = t2->dimx; - *dimy = t2->dimy; - } + *dimx = t2->dimx; + *dimy = t2->dimy; } }
Looks correct to me. The amount of thinking I had to do to make sure of that is a good sign that this cleanup really is needed :-/
Matrices of shape 1xN always decay to vectors of length N.
Moreover, the assumption about the size of matrices is not correct: it is legitimate to compose a matrix 2x2 with a vector of length 4, in which case it appears that the result has the shape of the first (leftmost) operand. Even for matrices 1xN or Nx1, the result is not always a vector: in general it has the shape of the first operand again, which gets possibly decayed to a vector by the rule above.
This algorithm is unfortunately not complete yet: sometimes, but not always, vectors of length 1 are decayed to scalars.
Signed-off-by: Giovanni Mascellani gmascellani@codeweavers.com --- libs/vkd3d-shader/hlsl.y | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-)
diff --git a/libs/vkd3d-shader/hlsl.y b/libs/vkd3d-shader/hlsl.y index 3119380c..111db8cc 100644 --- a/libs/vkd3d-shader/hlsl.y +++ b/libs/vkd3d-shader/hlsl.y @@ -971,18 +971,7 @@ static bool expr_common_shape(struct hlsl_ctx *ctx, struct hlsl_type *t1, struct } else { - /* Two vectors or a vector and a matrix (matrix must be 1xn or nx1) */ - unsigned int max_dim_1, max_dim_2; - - max_dim_1 = max(t1->dimx, t1->dimy); - max_dim_2 = max(t2->dimx, t2->dimy); - if (t1->dimx * t1->dimy == t2->dimx * t2->dimy) - { - *type = HLSL_CLASS_VECTOR; - *dimx = max(t1->dimx, t2->dimx); - *dimy = 1; - } - else if (max_dim_1 <= max_dim_2) + if (t1->dimx * t1->dimy <= t2->dimx * t2->dimy) { *type = t1->type; *dimx = t1->dimx; @@ -996,6 +985,9 @@ static bool expr_common_shape(struct hlsl_ctx *ctx, struct hlsl_type *t1, struct } }
+ if (*type == HLSL_CLASS_MATRIX && *dimy == 1) + *type = HLSL_CLASS_VECTOR; + return true; }
On 10/14/21 8:37 AM, Giovanni Mascellani wrote:
Matrices of shape 1xN always decay to vectors of length N.
Moreover, the assumption about the size of matrices is not correct: it is legitimate to compose a matrix 2x2 with a vector of length 4, in which case it appears that the result has the shape of the first (leftmost) operand. Even for matrices 1xN or Nx1, the result is not always a vector: in general it has the shape of the first operand again, which gets possibly decayed to a vector by the rule above.
"Fix X" is not usually a very good commit message, and here as often it is a sign that multiple things are being done at once.
I.e. I think the "matrices decay to vectors" part makes sense as a separate commit (e.g. I guess float4x1 + float4x2 = float4 would be fixed by that case).
This algorithm is unfortunately not complete yet: sometimes, but not always, vectors of length 1 are decayed to scalars.
Signed-off-by: Giovanni Mascellani gmascellani@codeweavers.com
libs/vkd3d-shader/hlsl.y | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-)
diff --git a/libs/vkd3d-shader/hlsl.y b/libs/vkd3d-shader/hlsl.y index 3119380c..111db8cc 100644 --- a/libs/vkd3d-shader/hlsl.y +++ b/libs/vkd3d-shader/hlsl.y @@ -971,18 +971,7 @@ static bool expr_common_shape(struct hlsl_ctx *ctx, struct hlsl_type *t1, struct } else {
/* Two vectors or a vector and a matrix (matrix must be 1xn or nx1) */
unsigned int max_dim_1, max_dim_2;
max_dim_1 = max(t1->dimx, t1->dimy);
max_dim_2 = max(t2->dimx, t2->dimy);
if (t1->dimx * t1->dimy == t2->dimx * t2->dimy)
{
*type = HLSL_CLASS_VECTOR;
*dimx = max(t1->dimx, t2->dimx);
*dimy = 1;
}
else if (max_dim_1 <= max_dim_2)
if (t1->dimx * t1->dimy <= t2->dimx * t2->dimy) { *type = t1->type; *dimx = t1->dimx;
@@ -996,6 +985,9 @@ static bool expr_common_shape(struct hlsl_ctx *ctx, struct hlsl_type *t1, struct } }
- if (*type == HLSL_CLASS_MATRIX && *dimy == 1)
*type = HLSL_CLASS_VECTOR;
}return true;
In HLSL the rules used to determine the type of a composite expression change depending on the operator. This commit changes add_expr so that it accepts both the types to which the operands have to be casted and the type that the final expression must have (which is not necessarily the cast type).
For example, when comparing a half with an int, both of them will have to be casted to float, though the type of the expression as a whole will be bool.
Similarly, it is not always the case that all operands have to be casted to the same type, therefore the interface leaves room for specifying a different cast type for each operand. For example, the first operand of the ternary operator must be casted to bool, while the other two must be casted to their common type, which is then the output type of the whole expression.
The add_*_last helpers behave similarly as their counterpart without "_last", except that the last node in each instruction list is used as an argument.
Signed-off-by: Giovanni Mascellani gmascellani@codeweavers.com --- libs/vkd3d-shader/hlsl.y | 144 ++++++++++++++++++--------------------- 1 file changed, 66 insertions(+), 78 deletions(-)
diff --git a/libs/vkd3d-shader/hlsl.y b/libs/vkd3d-shader/hlsl.y index 111db8cc..baff757e 100644 --- a/libs/vkd3d-shader/hlsl.y +++ b/libs/vkd3d-shader/hlsl.y @@ -991,52 +991,28 @@ static bool expr_common_shape(struct hlsl_ctx *ctx, struct hlsl_type *t1, struct return true; }
-static struct hlsl_type *expr_common_type(struct hlsl_ctx *ctx, struct hlsl_type *t1, struct hlsl_type *t2, - struct vkd3d_shader_location *loc) -{ - enum hlsl_type_class type; - enum hlsl_base_type base; - unsigned int dimx, dimy; - - if (!expr_common_shape(ctx, t1, t2, loc, &type, &dimx, &dimy)) - return NULL; - - base = expr_common_base_type(t1->base_type, t2->base_type); - - return get_numeric_type(ctx, type, base, dimx, dimy); -} - static struct hlsl_ir_expr *add_expr(struct hlsl_ctx *ctx, struct list *instrs, enum hlsl_ir_expr_op op, - struct hlsl_ir_node *operands[HLSL_MAX_OPERANDS], struct vkd3d_shader_location *loc) + struct hlsl_ir_node *operands[HLSL_MAX_OPERANDS], struct hlsl_type *cast_types[HLSL_MAX_OPERANDS], + struct hlsl_type *type, struct vkd3d_shader_location *loc) { struct hlsl_ir_expr *expr; - struct hlsl_type *type; unsigned int i;
- type = operands[0]->data_type; - for (i = 1; i < HLSL_MAX_OPERANDS; ++i) - { - if (!operands[i]) - break; - type = expr_common_type(ctx, type, operands[i]->data_type, loc); - if (!type) - return NULL; - } for (i = 0; i < HLSL_MAX_OPERANDS; ++i) { struct hlsl_ir_expr *cast;
if (!operands[i]) break; - if (hlsl_types_are_equal(operands[i]->data_type, type)) + if (hlsl_types_are_equal(operands[i]->data_type, cast_types[i])) continue; if (operands[i]->data_type->dimx * operands[i]->data_type->dimy != 1 - && operands[i]->data_type->dimx * operands[i]->data_type->dimy != type->dimx * type->dimy) + && operands[i]->data_type->dimx * operands[i]->data_type->dimy != cast_types[i]->dimx * cast_types[i]->dimy) hlsl_warning(ctx, operands[i]->loc, VKD3D_SHADER_WARNING_HLSL_IMPLICIT_TRUNCATION, "Implicit truncation of %s type.", operands[i]->data_type->type == HLSL_CLASS_VECTOR ? "vector" : "matrix");
- if (!(cast = hlsl_new_cast(ctx, operands[i], type, &operands[i]->loc))) + if (!(cast = hlsl_new_cast(ctx, operands[i], cast_types[i], &operands[i]->loc))) return NULL; list_add_after(&operands[i]->entry, &cast->node.entry); operands[i] = &cast->node; @@ -1053,26 +1029,52 @@ static struct hlsl_ir_expr *add_expr(struct hlsl_ctx *ctx, struct list *instrs, return expr; }
-static struct list *add_unary_expr(struct hlsl_ctx *ctx, struct list *instrs, - enum hlsl_ir_expr_op op, struct vkd3d_shader_location loc) +static struct hlsl_ir_expr *add_unary_arithmetic_expr(struct hlsl_ctx *ctx, struct list *instrs, + enum hlsl_ir_expr_op op, struct hlsl_ir_node *arg, struct vkd3d_shader_location *loc) { - struct hlsl_ir_node *expr; + struct hlsl_ir_node *args[HLSL_MAX_OPERANDS] = {arg}; + struct hlsl_type *cast_types[HLSL_MAX_OPERANDS] = {arg->data_type}; + + return add_expr(ctx, instrs, op, args, cast_types, arg->data_type, loc); +} + +static struct list *add_unary_arithmetic_expr_last(struct hlsl_ctx *ctx, struct list *instrs, + enum hlsl_ir_expr_op op, struct vkd3d_shader_location *loc) +{ + add_unary_arithmetic_expr(ctx, instrs, op, node_from_list(instrs), loc);
- if (!(expr = hlsl_new_unary_expr(ctx, op, node_from_list(instrs), loc))) - return NULL; - list_add_tail(instrs, &expr->entry); return instrs; }
-static struct list *add_binary_expr(struct hlsl_ctx *ctx, struct list *list1, struct list *list2, - enum hlsl_ir_expr_op op, struct vkd3d_shader_location loc) +static struct hlsl_ir_expr *add_binary_arithmetic_expr(struct hlsl_ctx *ctx, struct list *instrs, + enum hlsl_ir_expr_op op, struct hlsl_ir_node *arg1, struct hlsl_ir_node *arg2, + struct vkd3d_shader_location *loc) { - struct hlsl_ir_node *args[HLSL_MAX_OPERANDS] = {node_from_list(list1), node_from_list(list2)}; + struct hlsl_type *cast_types[HLSL_MAX_OPERANDS]; + enum hlsl_base_type base = expr_common_base_type(arg1->data_type->base_type, arg2->data_type->base_type); + enum hlsl_type_class type; + unsigned int dimx, dimy; + struct hlsl_ir_node *args[HLSL_MAX_OPERANDS] = {arg1, arg2}; + + if (!expr_common_shape(ctx, arg1->data_type, arg2->data_type, loc, &type, &dimx, &dimy)) + return NULL; + + cast_types[0] = get_numeric_type(ctx, type, base, dimx, dimy); + cast_types[1] = cast_types[0];
- list_move_tail(list1, list2); - vkd3d_free(list2); - add_expr(ctx, list1, op, args, &loc); - return list1; + return add_expr(ctx, instrs, op, args, cast_types, cast_types[0], loc); +} + +static struct list *add_binary_arithmetic_expr_last(struct hlsl_ctx *ctx, struct list *instrs1, struct list *instrs2, + enum hlsl_ir_expr_op op, struct vkd3d_shader_location *loc) +{ + struct hlsl_ir_node *arg1 = node_from_list(instrs1), *arg2 = node_from_list(instrs2); + + list_move_tail(instrs1, instrs2); + vkd3d_free(instrs2); + add_binary_arithmetic_expr(ctx, instrs1, op, arg1, arg2, loc); + + return instrs1; }
static enum hlsl_ir_expr_op op_from_assignment(enum parse_assign_op op) @@ -1141,22 +1143,20 @@ static struct hlsl_ir_node *add_assignment(struct hlsl_ctx *ctx, struct list *in
if (assign_op == ASSIGN_OP_SUB) { - struct hlsl_ir_node *args[HLSL_MAX_OPERANDS] = {rhs}; struct hlsl_ir_expr *expr;
- if (!(expr = add_expr(ctx, instrs, HLSL_OP1_NEG, args, &rhs->loc))) + if (!(expr = add_unary_arithmetic_expr(ctx, instrs, HLSL_OP1_NEG, rhs, &rhs->loc))) return NULL; rhs = &expr->node; assign_op = ASSIGN_OP_ADD; } if (assign_op != ASSIGN_OP_ASSIGN) { - struct hlsl_ir_node *args[HLSL_MAX_OPERANDS] = {lhs, rhs}; enum hlsl_ir_expr_op op = op_from_assignment(assign_op); struct hlsl_ir_expr *expr;
assert(op); - if (!(expr = add_expr(ctx, instrs, op, args, &rhs->loc))) + if (!(expr = add_binary_arithmetic_expr(ctx, instrs, op, lhs, rhs, &rhs->loc))) return NULL; rhs = &expr->node; } @@ -1557,37 +1557,29 @@ static const struct hlsl_ir_function_decl *find_function_call(struct hlsl_ctx *c static bool intrinsic_abs(struct hlsl_ctx *ctx, const struct parse_initializer *params, struct vkd3d_shader_location loc) { - struct hlsl_ir_node *args[HLSL_MAX_OPERANDS] = {params->args[0]}; - - return !!add_expr(ctx, params->instrs, HLSL_OP1_ABS, args, &loc); + return !!add_unary_arithmetic_expr(ctx, params->instrs, HLSL_OP1_ABS, params->args[0], &loc); }
static bool intrinsic_clamp(struct hlsl_ctx *ctx, const struct parse_initializer *params, struct vkd3d_shader_location loc) { - struct hlsl_ir_node *args[HLSL_MAX_OPERANDS] = {params->args[0], params->args[1]}; struct hlsl_ir_expr *max;
- if (!(max = add_expr(ctx, params->instrs, HLSL_OP2_MAX, args, &loc))) + if (!(max = add_binary_arithmetic_expr(ctx, params->instrs, HLSL_OP2_MAX, params->args[0], params->args[1], &loc))) return false;
- args[0] = &max->node; - args[1] = params->args[2]; - return !!add_expr(ctx, params->instrs, HLSL_OP2_MIN, args, &loc); + return !!add_binary_arithmetic_expr(ctx, params->instrs, HLSL_OP2_MIN, &max->node, params->args[2], &loc); }
static bool intrinsic_max(struct hlsl_ctx *ctx, const struct parse_initializer *params, struct vkd3d_shader_location loc) { - struct hlsl_ir_node *args[HLSL_MAX_OPERANDS] = {params->args[0], params->args[1]}; - - return !!add_expr(ctx, params->instrs, HLSL_OP2_MAX, args, &loc); + return !!add_binary_arithmetic_expr(ctx, params->instrs, HLSL_OP2_MAX, params->args[0], params->args[1], &loc); }
static bool intrinsic_pow(struct hlsl_ctx *ctx, const struct parse_initializer *params, struct vkd3d_shader_location loc) { - struct hlsl_ir_node *args[HLSL_MAX_OPERANDS] = {0}; struct hlsl_ir_node *log, *exp; struct hlsl_ir_expr *mul;
@@ -1595,9 +1587,7 @@ static bool intrinsic_pow(struct hlsl_ctx *ctx, return false; list_add_tail(params->instrs, &log->entry);
- args[0] = params->args[1]; - args[1] = log; - if (!(mul = add_expr(ctx, params->instrs, HLSL_OP2_MUL, args, &loc))) + if (!(mul = add_binary_arithmetic_expr(ctx, params->instrs, HLSL_OP2_MUL, params->args[1], log, &loc))) return false;
if (!(exp = hlsl_new_unary_expr(ctx, HLSL_OP1_EXP2, &mul->node, loc))) @@ -1609,9 +1599,7 @@ static bool intrinsic_pow(struct hlsl_ctx *ctx, static bool intrinsic_saturate(struct hlsl_ctx *ctx, const struct parse_initializer *params, struct vkd3d_shader_location loc) { - struct hlsl_ir_node *args[HLSL_MAX_OPERANDS] = {params->args[0]}; - - return !!add_expr(ctx, params->instrs, HLSL_OP1_SAT, args, &loc); + return !!add_unary_arithmetic_expr(ctx, params->instrs, HLSL_OP1_SAT, params->args[0], &loc); }
static const struct intrinsic_function @@ -3188,15 +3176,15 @@ unary_expr: } | '-' unary_expr { - $$ = add_unary_expr(ctx, $2, HLSL_OP1_NEG, @1); + $$ = add_unary_arithmetic_expr_last(ctx, $2, HLSL_OP1_NEG, &@1); } | '~' unary_expr { - $$ = add_unary_expr(ctx, $2, HLSL_OP1_BIT_NOT, @1); + $$ = add_unary_arithmetic_expr_last(ctx, $2, HLSL_OP1_BIT_NOT, &@1); } | '!' unary_expr { - $$ = add_unary_expr(ctx, $2, HLSL_OP1_LOGIC_NOT, @1); + $$ = add_unary_arithmetic_expr_last(ctx, $2, HLSL_OP1_LOGIC_NOT, &@1); } /* var_modifiers is necessary to avoid shift/reduce conflicts. */ | '(' var_modifiers type arrays ')' unary_expr @@ -3244,22 +3232,22 @@ mul_expr: unary_expr | mul_expr '*' unary_expr { - $$ = add_binary_expr(ctx, $1, $3, HLSL_OP2_MUL, @2); + $$ = add_binary_arithmetic_expr_last(ctx, $1, $3, HLSL_OP2_MUL, &@2); } | mul_expr '/' unary_expr { - $$ = add_binary_expr(ctx, $1, $3, HLSL_OP2_DIV, @2); + $$ = add_binary_arithmetic_expr_last(ctx, $1, $3, HLSL_OP2_DIV, &@2); } | mul_expr '%' unary_expr { - $$ = add_binary_expr(ctx, $1, $3, HLSL_OP2_MOD, @2); + $$ = add_binary_arithmetic_expr_last(ctx, $1, $3, HLSL_OP2_MOD, &@2); }
add_expr: mul_expr | add_expr '+' mul_expr { - $$ = add_binary_expr(ctx, $1, $3, HLSL_OP2_ADD, @2); + $$ = add_binary_arithmetic_expr_last(ctx, $1, $3, HLSL_OP2_ADD, &@2); } | add_expr '-' mul_expr { @@ -3268,7 +3256,7 @@ add_expr: if (!(neg = hlsl_new_unary_expr(ctx, HLSL_OP1_NEG, node_from_list($3), @2))) YYABORT; list_add_tail($3, &neg->entry); - $$ = add_binary_expr(ctx, $1, $3, HLSL_OP2_ADD, @2); + $$ = add_binary_arithmetic_expr_last(ctx, $1, $3, HLSL_OP2_ADD, &@2); }
shift_expr: @@ -3286,30 +3274,30 @@ relational_expr: shift_expr | relational_expr '<' shift_expr { - $$ = add_binary_expr(ctx, $1, $3, HLSL_OP2_LESS, @2); + $$ = add_binary_arithmetic_expr_last(ctx, $1, $3, HLSL_OP2_LESS, &@2); } | relational_expr '>' shift_expr { - $$ = add_binary_expr(ctx, $1, $3, HLSL_OP2_GREATER, @2); + $$ = add_binary_arithmetic_expr_last(ctx, $1, $3, HLSL_OP2_GREATER, &@2); } | relational_expr OP_LE shift_expr { - $$ = add_binary_expr(ctx, $1, $3, HLSL_OP2_LEQUAL, @2); + $$ = add_binary_arithmetic_expr_last(ctx, $1, $3, HLSL_OP2_LEQUAL, &@2); } | relational_expr OP_GE shift_expr { - $$ = add_binary_expr(ctx, $1, $3, HLSL_OP2_GEQUAL, @2); + $$ = add_binary_arithmetic_expr_last(ctx, $1, $3, HLSL_OP2_GEQUAL, &@2); }
equality_expr: relational_expr | equality_expr OP_EQ relational_expr { - $$ = add_binary_expr(ctx, $1, $3, HLSL_OP2_EQUAL, @2); + $$ = add_binary_arithmetic_expr_last(ctx, $1, $3, HLSL_OP2_EQUAL, &@2); } | equality_expr OP_NE relational_expr { - $$ = add_binary_expr(ctx, $1, $3, HLSL_OP2_NEQUAL, @2); + $$ = add_binary_arithmetic_expr_last(ctx, $1, $3, HLSL_OP2_NEQUAL, &@2); }
bitand_expr:
On 10/14/21 8:37 AM, Giovanni Mascellani wrote:
In HLSL the rules used to determine the type of a composite expression change depending on the operator. This commit changes add_expr so that it accepts both the types to which the operands have to be casted and the type that the final expression must have (which is not necessarily the cast type).
For example, when comparing a half with an int, both of them will have to be casted to float, though the type of the expression as a whole will be bool.
Similarly, it is not always the case that all operands have to be casted to the same type, therefore the interface leaves room for specifying a different cast type for each operand. For example, the first operand of the ternary operator must be casted to bool, while the other two must be casted to their common type, which is then the output type of the whole expression.
Yeah, but the ternary operator is a special case. I don't think it should result in an HLSL_IR_EXPR. Probably we should synthesize a temporary variable, store to it inside of a conditional, and load from it.
I'm still not convinced that casting in add_expr() is better than casting in the caller, either.
The add_*_last helpers behave similarly as their counterpart without "_last", except that the last node in each instruction list is used as an argument.
Yeah, I still don't like this name. "last" makes me think that the instruction will be added to the end of a list, or something. "merge" or "combine" would be a lot better, but it's a bit awkward for the unary helpers. But then again maybe we don't need the unary helpers, and we can just as easily just spell out node_from_list() inline. There are only three such cases after all (and I guess they're not even all the same kind of operation).
Or "parse", I guess, to symbolize it's done as part of parsing, although that's not great either.
Signed-off-by: Giovanni Mascellani gmascellani@codeweavers.com
libs/vkd3d-shader/hlsl.y | 144 ++++++++++++++++++--------------------- 1 file changed, 66 insertions(+), 78 deletions(-)
I think at least splitting the "cast_types" part out of this patch would make it easier to read.
Hi,
Il 14/10/21 18:32, Zebediah Figura ha scritto:
Yeah, but the ternary operator is a special case. I don't think it should result in an HLSL_IR_EXPR. Probably we should synthesize a temporary variable, store to it inside of a conditional, and load from it.
Why? I don't know in SM1, but at least in SM4 we have a nice opcode (MOVC) that does just what the ternary operator needs to do, in a way that is probably more efficient (or at least not less efficient) than doing an actual branch. Also, I have a few other patches in my queue that use MOVC for other things (like implementing casts from bool), so I'd like to have it in the IR (not really related to this patch, though).
I'm still not convinced that casting in add_expr() is better than casting in the caller, either.
From my point of view it makes the code more declarative and therefore easier to read. However apparently we have different tastes, and I don't have a too strong opinion.
The add_*_last helpers behave similarly as their counterpart without "_last", except that the last node in each instruction list is used as an argument.
Yeah, I still don't like this name. "last" makes me think that the instruction will be added to the end of a list, or something. "merge" or "combine" would be a lot better, but it's a bit awkward for the unary helpers. But then again maybe we don't need the unary helpers, and we can just as easily just spell out node_from_list() inline. There are only three such cases after all (and I guess they're not even all the same kind of operation).
Or "parse", I guess, to symbolize it's done as part of parsing, although that's not great either.
I don't like it either, but neither I like better one of your alternatives, nor I could find anything better. Among your suggestions I would pick "parse", but if you eventually settle for something else just tell me.
I think at least splitting the "cast_types" part out of this patch would make it easier to read.
Ok, will do.
Thanks again, Giovanni.
On 10/14/21 12:52 PM, Giovanni Mascellani wrote:
Hi,
Il 14/10/21 18:32, Zebediah Figura ha scritto:
Yeah, but the ternary operator is a special case. I don't think it should result in an HLSL_IR_EXPR. Probably we should synthesize a temporary variable, store to it inside of a conditional, and load from it.
Why? I don't know in SM1, but at least in SM4 we have a nice opcode (MOVC) that does just what the ternary operator needs to do, in a way that is probably more efficient (or at least not less efficient) than doing an actual branch. Also, I have a few other patches in my queue that use MOVC for other things (like implementing casts from bool), so I'd like to have it in the IR (not really related to this patch, though).
Well, the usual answer, which is that keeping the scope of the IR low makes it easier to work with.
Maybe there's an argument for having a ternary/movc in the IR, but even if there is, I'm not sure we really want to make add_expr() more complex just for that. Are there any other expressions that (might) take operands of non-uniform types?
I'm still not convinced that casting in add_expr() is better than casting in the caller, either.
From my point of view it makes the code more declarative and therefore easier to read. However apparently we have different tastes, and I don't have a too strong opinion.
I guess it's not clear to me that it's actually making the caller's job easier, and in lieu of that I'm inclined to prefer modularity. Not to get too philosophical, but while I'm a fan of declarativity, I tend to shy away from giving a function too many parameters if I can avoid it.
The add_*_last helpers behave similarly as their counterpart without "_last", except that the last node in each instruction list is used as an argument.
Yeah, I still don't like this name. "last" makes me think that the instruction will be added to the end of a list, or something. "merge" or "combine" would be a lot better, but it's a bit awkward for the unary helpers. But then again maybe we don't need the unary helpers, and we can just as easily just spell out node_from_list() inline. There are only three such cases after all (and I guess they're not even all the same kind of operation).
Or "parse", I guess, to symbolize it's done as part of parsing, although that's not great either.
I don't like it either, but neither I like better one of your alternatives, nor I could find anything better. Among your suggestions I would pick "parse", but if you eventually settle for something else just tell me.
I would prefer "merge" if we can find a way to avoid needing it for unary helpers. Does that latter part seem too unpalatable?
I think at least splitting the "cast_types" part out of this patch would make it easier to read.
Ok, will do.
Thanks again, Giovanni.
On Fri, Oct 15, 2021 at 4:54 AM Zebediah Figura zfigura@codeweavers.com wrote:
On 10/14/21 12:52 PM, Giovanni Mascellani wrote:
Hi,
Il 14/10/21 18:32, Zebediah Figura ha scritto:
Yeah, but the ternary operator is a special case. I don't think it should result in an HLSL_IR_EXPR. Probably we should synthesize a temporary variable, store to it inside of a conditional, and load from it.
Why? I don't know in SM1, but at least in SM4 we have a nice opcode (MOVC) that does just what the ternary operator needs to do, in a way that is probably more efficient (or at least not less efficient) than doing an actual branch. Also, I have a few other patches in my queue that use MOVC for other things (like implementing casts from bool), so I'd like to have it in the IR (not really related to this patch, though).
Well, the usual answer, which is that keeping the scope of the IR low makes it easier to work with.
Maybe there's an argument for having a ternary/movc in the IR, but even if there is, I'm not sure we really want to make add_expr() more complex just for that. Are there any other expressions that (might) take operands of non-uniform types?
I agree with all of this, but let me go one step further. In general the (high-level) IR should express the program using the most basic elements, then the compiler will figure out how to best map the program to the output bytecode. For the ternary operator, ideally we generate an IF in the compiler frontend (e.g. while parsing), then during instruction selection we notice that we can use MOVC to implement that particular IF so we do that. Or we lower suitable IFs to MOVC in a separate pass if it becomes too complicated (but that would make the most sense as a lower level-IR transformation).
That said, we can certainly compromise and take a more practical route where it makes sense. The ternary operator doesn't feel like one such case though, mainly because there is a good chance it would complicate the rest of the compiler. For example, if some uses of the ternary operator can't be mapped to MOVC, we potentially need to handle the ternary op expression in all the passes that care about branches. Or lower them to regular branches anyway.
I guess this might seem at odds with e.g. my previous suggestion to lower matrix multiplication directly to a bunch of DP4s or MADs, but those are pretty regular operations by comparison.
Signed-off-by: Giovanni Mascellani gmascellani@codeweavers.com --- libs/vkd3d-shader/hlsl.y | 44 ++++++++++++++++++++++++++++++++++------ 1 file changed, 38 insertions(+), 6 deletions(-)
diff --git a/libs/vkd3d-shader/hlsl.y b/libs/vkd3d-shader/hlsl.y index baff757e..100aa96f 100644 --- a/libs/vkd3d-shader/hlsl.y +++ b/libs/vkd3d-shader/hlsl.y @@ -1077,6 +1077,38 @@ static struct list *add_binary_arithmetic_expr_last(struct hlsl_ctx *ctx, struct return instrs1; }
+static struct hlsl_ir_expr *add_binary_comparison_expr(struct hlsl_ctx *ctx, struct list *instrs, + enum hlsl_ir_expr_op op, struct hlsl_ir_node *arg1, struct hlsl_ir_node *arg2, + struct vkd3d_shader_location *loc) +{ + struct hlsl_type *cast_types[HLSL_MAX_OPERANDS], *ret_type; + enum hlsl_base_type base = expr_common_base_type(arg1->data_type->base_type, arg2->data_type->base_type); + enum hlsl_type_class type; + unsigned int dimx, dimy; + struct hlsl_ir_node *args[HLSL_MAX_OPERANDS] = {arg1, arg2}; + + if (!expr_common_shape(ctx, arg1->data_type, arg2->data_type, loc, &type, &dimx, &dimy)) + return NULL; + + cast_types[0] = get_numeric_type(ctx, type, base, dimx, dimy); + cast_types[1] = cast_types[0]; + ret_type = get_numeric_type(ctx, type, HLSL_TYPE_BOOL, dimx, dimy); + + return add_expr(ctx, instrs, op, args, cast_types, ret_type, loc); +} + +static struct list *add_binary_comparison_expr_last(struct hlsl_ctx *ctx, struct list *instrs1, struct list *instrs2, + enum hlsl_ir_expr_op op, struct vkd3d_shader_location *loc) +{ + struct hlsl_ir_node *arg1 = node_from_list(instrs1), *arg2 = node_from_list(instrs2); + + list_move_tail(instrs1, instrs2); + vkd3d_free(instrs2); + add_binary_comparison_expr(ctx, instrs1, op, arg1, arg2, loc); + + return instrs1; +} + static enum hlsl_ir_expr_op op_from_assignment(enum parse_assign_op op) { static const enum hlsl_ir_expr_op ops[] = @@ -3274,30 +3306,30 @@ relational_expr: shift_expr | relational_expr '<' shift_expr { - $$ = add_binary_arithmetic_expr_last(ctx, $1, $3, HLSL_OP2_LESS, &@2); + $$ = add_binary_comparison_expr_last(ctx, $1, $3, HLSL_OP2_LESS, &@2); } | relational_expr '>' shift_expr { - $$ = add_binary_arithmetic_expr_last(ctx, $1, $3, HLSL_OP2_GREATER, &@2); + $$ = add_binary_comparison_expr_last(ctx, $1, $3, HLSL_OP2_GREATER, &@2); } | relational_expr OP_LE shift_expr { - $$ = add_binary_arithmetic_expr_last(ctx, $1, $3, HLSL_OP2_LEQUAL, &@2); + $$ = add_binary_comparison_expr_last(ctx, $1, $3, HLSL_OP2_LEQUAL, &@2); } | relational_expr OP_GE shift_expr { - $$ = add_binary_arithmetic_expr_last(ctx, $1, $3, HLSL_OP2_GEQUAL, &@2); + $$ = add_binary_comparison_expr_last(ctx, $1, $3, HLSL_OP2_GEQUAL, &@2); }
equality_expr: relational_expr | equality_expr OP_EQ relational_expr { - $$ = add_binary_arithmetic_expr_last(ctx, $1, $3, HLSL_OP2_EQUAL, &@2); + $$ = add_binary_comparison_expr_last(ctx, $1, $3, HLSL_OP2_EQUAL, &@2); } | equality_expr OP_NE relational_expr { - $$ = add_binary_arithmetic_expr_last(ctx, $1, $3, HLSL_OP2_NEQUAL, &@2); + $$ = add_binary_comparison_expr_last(ctx, $1, $3, HLSL_OP2_NEQUAL, &@2); }
bitand_expr:
Can you please make sure that all of the HLSL helpers exported from a file begin with hlsl_?