At this point add_load() is split into add_load_component() and add_load_index(); register offsets are hidden for these functions.
Signed-off-by: Francisco Casas fcasas@codeweavers.com --- libs/vkd3d-shader/hlsl.c | 109 ++++++++++++++++++++++++++++++- libs/vkd3d-shader/hlsl.h | 7 ++ libs/vkd3d-shader/hlsl.y | 136 ++++++++++++++------------------------- 3 files changed, 162 insertions(+), 90 deletions(-)
diff --git a/libs/vkd3d-shader/hlsl.c b/libs/vkd3d-shader/hlsl.c index 23136aeb..61d75e0d 100644 --- a/libs/vkd3d-shader/hlsl.c +++ b/libs/vkd3d-shader/hlsl.c @@ -117,7 +117,7 @@ void hlsl_free_var(struct hlsl_ir_var *decl) vkd3d_free(decl); }
-static bool hlsl_type_is_row_major(const struct hlsl_type *type) +bool hlsl_type_is_row_major(const struct hlsl_type *type) { /* Default to column-major if the majority isn't explicitly set, which can * happen for anonymous nodes. */ @@ -314,6 +314,113 @@ unsigned int hlsl_compute_component_offset(struct hlsl_ctx *ctx, struct hlsl_typ return 0; }
+struct hlsl_type *hlsl_get_type_from_path_index(struct hlsl_ctx *ctx, const struct hlsl_type *type, + struct hlsl_ir_node *node) +{ + assert(node); + + if (type->type == HLSL_CLASS_VECTOR) + return hlsl_get_scalar_type(ctx, type->base_type); + + if (type->type == HLSL_CLASS_MATRIX) + { + if (hlsl_type_is_row_major(type)) + return hlsl_get_vector_type(ctx, type->base_type, type->dimx); + else + return hlsl_get_vector_type(ctx, type->base_type, type->dimy); + } + + if (type->type == HLSL_CLASS_ARRAY) + return type->e.array.type; + + if (type->type == HLSL_CLASS_STRUCT) + { + struct hlsl_ir_constant *c = hlsl_ir_constant(node); + + assert(c->value[0].u < type->e.record.field_count); + return type->e.record.fields[c->value[0].u].type; + } + + assert(0); + return NULL; +} + +struct hlsl_ir_node *hlsl_new_offset_from_path_index(struct hlsl_ctx *ctx, struct hlsl_block *block, + struct hlsl_type *type, struct hlsl_ir_node *offset, struct hlsl_ir_node *idx, + const struct vkd3d_shader_location *loc) +{ + struct hlsl_ir_node *idx_offset = NULL; + struct hlsl_ir_constant *c; + + list_init(&block->instrs); + + switch (type->type) + { + case HLSL_CLASS_VECTOR: + { + idx_offset = idx; + break; + } + + case HLSL_CLASS_MATRIX: + { + if (!(c = hlsl_new_uint_constant(ctx, 4, loc))) + return NULL; + list_add_tail(&block->instrs, &c->node.entry); + + if (!(idx_offset = hlsl_new_binary_expr(ctx, HLSL_OP2_MUL, &c->node, idx))) + return NULL; + list_add_tail(&block->instrs, &idx_offset->entry); + + break; + } + + case HLSL_CLASS_ARRAY: + { + unsigned int size = hlsl_type_get_array_element_reg_size(type->e.array.type); + + if (!(c = hlsl_new_uint_constant(ctx, size, loc))) + return NULL; + list_add_tail(&block->instrs, &c->node.entry); + + if (!(idx_offset = hlsl_new_binary_expr(ctx, HLSL_OP2_MUL, &c->node, idx))) + return NULL; + list_add_tail(&block->instrs, &idx_offset->entry); + + break; + } + + case HLSL_CLASS_STRUCT: + { + unsigned int field_i = hlsl_ir_constant(idx)->value[0].u; + struct hlsl_struct_field *field = &type->e.record.fields[field_i]; + + if (!(c = hlsl_new_uint_constant(ctx, field->reg_offset, loc))) + return NULL; + list_add_tail(&block->instrs, &c->node.entry); + + idx_offset = &c->node; + + break; + } + + default: + { + assert(0); + return NULL; + } + } + + if (offset) + { + if (!(idx_offset = hlsl_new_binary_expr(ctx, HLSL_OP2_ADD, offset, idx_offset))) + return NULL; + list_add_tail(&block->instrs, &idx_offset->entry); + } + + return idx_offset; +} + struct hlsl_type *hlsl_new_array_type(struct hlsl_ctx *ctx, struct hlsl_type *basic_type, unsigned int array_size) { struct hlsl_type *type; diff --git a/libs/vkd3d-shader/hlsl.h b/libs/vkd3d-shader/hlsl.h index 5bfbc5a7..f1bfa8c6 100644 --- a/libs/vkd3d-shader/hlsl.h +++ b/libs/vkd3d-shader/hlsl.h @@ -731,6 +731,12 @@ struct hlsl_ir_function_decl *hlsl_get_func_decl(struct hlsl_ctx *ctx, const cha struct hlsl_type *hlsl_get_type(struct hlsl_scope *scope, const char *name, bool recursive); struct hlsl_ir_var *hlsl_get_var(struct hlsl_scope *scope, const char *name);
+struct hlsl_type *hlsl_get_type_from_path_index(struct hlsl_ctx *ctx, const struct hlsl_type *type, + struct hlsl_ir_node *node); +struct hlsl_ir_node *hlsl_new_offset_from_path_index(struct hlsl_ctx *ctx, struct hlsl_block *block, + struct hlsl_type *type, struct hlsl_ir_node *offset, struct hlsl_ir_node *idx, + const struct vkd3d_shader_location *loc); + struct hlsl_type *hlsl_new_array_type(struct hlsl_ctx *ctx, struct hlsl_type *basic_type, unsigned int array_size); struct hlsl_ir_node *hlsl_new_binary_expr(struct hlsl_ctx *ctx, enum hlsl_ir_expr_op op, struct hlsl_ir_node *arg1, struct hlsl_ir_node *arg2); @@ -794,6 +800,7 @@ unsigned int hlsl_type_component_count(struct hlsl_type *type); unsigned int hlsl_type_get_array_element_reg_size(const struct hlsl_type *type); unsigned int hlsl_compute_component_offset(struct hlsl_ctx *ctx, struct hlsl_type *type, unsigned int idx, struct hlsl_type **comp_type); +bool hlsl_type_is_row_major(const struct hlsl_type *type); unsigned int hlsl_type_get_sm4_offset(const struct hlsl_type *type, unsigned int offset); bool hlsl_types_are_equal(const struct hlsl_type *t1, const struct hlsl_type *t2);
diff --git a/libs/vkd3d-shader/hlsl.y b/libs/vkd3d-shader/hlsl.y index 61c94fa8..c4ffaf88 100644 --- a/libs/vkd3d-shader/hlsl.y +++ b/libs/vkd3d-shader/hlsl.y @@ -622,44 +622,51 @@ static struct hlsl_ir_jump *add_return(struct hlsl_ctx *ctx, struct list *instrs return jump; }
-static struct hlsl_ir_load *add_load(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *var_node, - struct hlsl_ir_node *offset, struct hlsl_type *data_type, const struct vkd3d_shader_location loc) +static struct hlsl_ir_load *add_load_index(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *var_node, + struct hlsl_ir_node *idx, const struct vkd3d_shader_location loc) { - struct hlsl_ir_node *add = NULL; + struct hlsl_type *elem_type; + struct hlsl_ir_node *offset; struct hlsl_ir_load *load; - struct hlsl_ir_var *var; + struct hlsl_block block; + + elem_type = hlsl_get_type_from_path_index(ctx, var_node->data_type, idx);
if (var_node->type == HLSL_IR_LOAD) { const struct hlsl_deref *src = &hlsl_ir_load(var_node)->src;
- var = src->var; - if (src->offset.node) - { - if (!(add = hlsl_new_binary_expr(ctx, HLSL_OP2_ADD, src->offset.node, offset))) - return NULL; - list_add_tail(instrs, &add->entry); - offset = add; - } + if (!(offset = hlsl_new_offset_from_path_index(ctx, &block, var_node->data_type, src->offset.node, idx, &loc))) + return NULL; + list_move_tail(instrs, &block.instrs); + + if (!(load = hlsl_new_load(ctx, src->var, offset, elem_type, loc))) + return NULL; + list_add_tail(instrs, &load->node.entry); } else { struct hlsl_ir_store *store; + struct hlsl_ir_var *var; char name[27];
+ if (!(offset = hlsl_new_offset_from_path_index(ctx, &block, var_node->data_type, NULL, idx, &loc))) + return NULL; + list_move_tail(instrs, &block.instrs); + sprintf(name, "<deref-%p>", var_node); if (!(var = hlsl_new_synthetic_var(ctx, name, var_node->data_type, var_node->loc))) return NULL;
if (!(store = hlsl_new_simple_store(ctx, var, var_node))) return NULL; - list_add_tail(instrs, &store->node.entry); + + if (!(load = hlsl_new_load(ctx, var, offset, elem_type, loc))) + return NULL; + list_add_tail(instrs, &load->node.entry); }
- if (!(load = hlsl_new_load(ctx, var, offset, data_type, loc))) - return NULL; - list_add_tail(instrs, &load->node.entry); return load; }
@@ -724,61 +731,21 @@ static struct hlsl_ir_load *add_load_component(struct hlsl_ctx *ctx, struct list static bool add_record_load(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *record, unsigned int idx, const struct vkd3d_shader_location loc) { - const struct hlsl_struct_field *field; struct hlsl_ir_constant *c;
assert(idx < record->data_type->e.record.field_count); - field = &record->data_type->e.record.fields[idx];
- if (!(c = hlsl_new_uint_constant(ctx, field->reg_offset, &loc))) + if (!(c = hlsl_new_uint_constant(ctx, idx, &loc))) return false; list_add_tail(instrs, &c->node.entry);
- return !!add_load(ctx, instrs, record, &c->node, field->type, loc); + return !!add_load_index(ctx, instrs, record, &c->node, loc); }
static struct hlsl_ir_node *add_binary_arithmetic_expr(struct hlsl_ctx *ctx, struct list *instrs, enum hlsl_ir_expr_op op, struct hlsl_ir_node *arg1, struct hlsl_ir_node *arg2, const struct vkd3d_shader_location *loc);
-static struct hlsl_ir_node *add_matrix_scalar_load(struct hlsl_ctx *ctx, struct list *instrs, - struct hlsl_ir_node *matrix, struct hlsl_ir_node *x, struct hlsl_ir_node *y, - const struct vkd3d_shader_location *loc) -{ - struct hlsl_ir_node *major, *minor, *mul, *add; - struct hlsl_ir_constant *four; - struct hlsl_ir_load *load; - struct hlsl_type *type = matrix->data_type, *scalar_type; - - scalar_type = hlsl_get_scalar_type(ctx, type->base_type); - - if (type->modifiers & HLSL_MODIFIER_ROW_MAJOR) - { - minor = x; - major = y; - } - else - { - minor = y; - major = x; - } - - if (!(four = hlsl_new_uint_constant(ctx, 4, loc))) - return NULL; - list_add_tail(instrs, &four->node.entry); - - if (!(mul = add_binary_arithmetic_expr(ctx, instrs, HLSL_OP2_MUL, &four->node, major, loc))) - return NULL; - - if (!(add = add_binary_arithmetic_expr(ctx, instrs, HLSL_OP2_ADD, mul, minor, loc))) - return NULL; - - if (!(load = add_load(ctx, instrs, matrix, add, scalar_type, *loc))) - return NULL; - - return &load->node; -} - static bool add_matrix_index(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *matrix, struct hlsl_ir_node *index, const struct vkd3d_shader_location *loc) { @@ -789,6 +756,9 @@ static bool add_matrix_index(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_var *var; unsigned int i;
+ if (hlsl_type_is_row_major(mat_type)) + return add_load_index(ctx, instrs, matrix, index, *loc); + ret_type = hlsl_get_vector_type(ctx, mat_type->base_type, mat_type->dimx);
name = vkd3d_string_buffer_get(&ctx->string_buffers); @@ -800,18 +770,21 @@ static bool add_matrix_index(struct hlsl_ctx *ctx, struct list *instrs,
for (i = 0; i < mat_type->dimx; ++i) { + struct hlsl_ir_load *column, *value; struct hlsl_ir_store *store; - struct hlsl_ir_node *value; struct hlsl_ir_constant *c;
if (!(c = hlsl_new_uint_constant(ctx, i, loc))) return false; list_add_tail(instrs, &c->node.entry);
- if (!(value = add_matrix_scalar_load(ctx, instrs, matrix, &c->node, index, loc))) + if (!(column = add_load_index(ctx, instrs, matrix, &c->node, *loc))) + return false; + + if (!(value = add_load_index(ctx, instrs, &column->node, index, *loc))) return false;
- if (!(store = hlsl_new_store(ctx, var, &c->node, value, 0, *loc))) + if (!(store = hlsl_new_store(ctx, var, &c->node, &value->node, 0, *loc))) return false; list_add_tail(instrs, &store->node.entry); } @@ -827,30 +800,11 @@ static bool add_array_load(struct hlsl_ctx *ctx, struct list *instrs, struct hls struct hlsl_ir_node *index, const struct vkd3d_shader_location loc) { const struct hlsl_type *expr_type = array->data_type; - struct hlsl_type *data_type; - struct hlsl_ir_constant *c; - - if (expr_type->type == HLSL_CLASS_ARRAY) - { - data_type = expr_type->e.array.type; - - if (!(c = hlsl_new_uint_constant(ctx, hlsl_type_get_array_element_reg_size(data_type), &loc))) - return false; - list_add_tail(instrs, &c->node.entry);
- if (!(index = hlsl_new_binary_expr(ctx, HLSL_OP2_MUL, index, &c->node))) - return false; - list_add_tail(instrs, &index->entry); - } - else if (expr_type->type == HLSL_CLASS_MATRIX) - { + if (expr_type->type == HLSL_CLASS_MATRIX) return add_matrix_index(ctx, instrs, array, index, &loc); - } - else if (expr_type->type == HLSL_CLASS_VECTOR) - { - data_type = hlsl_get_scalar_type(ctx, expr_type->base_type); - } - else + + if (expr_type->type != HLSL_CLASS_ARRAY && expr_type->type != HLSL_CLASS_VECTOR) { if (expr_type->type == HLSL_CLASS_SCALAR) hlsl_error(ctx, &loc, VKD3D_SHADER_ERROR_HLSL_INVALID_INDEX, "Scalar expressions cannot be array-indexed."); @@ -859,7 +813,10 @@ static bool add_array_load(struct hlsl_ctx *ctx, struct list *instrs, struct hls return false; }
- return !!add_load(ctx, instrs, array, index, data_type, loc); + if (!add_load_index(ctx, instrs, array, index, loc)) + return false; + + return true; }
static const struct hlsl_struct_field *get_struct_field(const struct hlsl_struct_field *fields, @@ -1312,7 +1269,7 @@ static struct hlsl_ir_node *add_expr(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_constant *c; unsigned int j;
- if (!(c = hlsl_new_uint_constant(ctx, 4 * i, loc))) + if (!(c = hlsl_new_uint_constant(ctx, i, loc))) return NULL; list_add_tail(instrs, &c->node.entry);
@@ -1320,12 +1277,9 @@ static struct hlsl_ir_node *add_expr(struct hlsl_ctx *ctx, struct list *instrs, { if (operands[j]) { - struct hlsl_type *vector_arg_type; struct hlsl_ir_load *load;
- vector_arg_type = hlsl_get_vector_type(ctx, operands[j]->data_type->base_type, minor_size(type)); - - if (!(load = add_load(ctx, instrs, operands[j], &c->node, vector_arg_type, *loc))) + if (!(load = add_load_index(ctx, instrs, operands[j], &c->node, *loc))) return NULL; vector_operands[j] = &load->node; } @@ -1334,6 +1288,10 @@ static struct hlsl_ir_node *add_expr(struct hlsl_ctx *ctx, struct list *instrs, if (!(value = add_expr(ctx, instrs, op, vector_operands, vector_type, loc))) return NULL;
+ if (!(c = hlsl_new_uint_constant(ctx, 4 * i, loc))) + return NULL; + list_add_tail(instrs, &c->node.entry); + if (!(store = hlsl_new_store(ctx, var, &c->node, value, 0, *loc))) return NULL; list_add_tail(instrs, &store->node.entry);
On 7/1/22 16:24, Francisco Casas wrote:
+struct hlsl_ir_node *hlsl_new_offset_from_path_index(struct hlsl_ctx *ctx, struct hlsl_block *block,
struct hlsl_type *type, struct hlsl_ir_node *offset, struct hlsl_ir_node *idx,
const struct vkd3d_shader_location *loc)
+{
- struct hlsl_ir_node *idx_offset = NULL;
- struct hlsl_ir_constant *c;
- list_init(&block->instrs);
- switch (type->type)
- {
case HLSL_CLASS_VECTOR:
{
idx_offset = idx;
break;
}
case HLSL_CLASS_MATRIX:
{
if (!(c = hlsl_new_uint_constant(ctx, 4, loc)))
return NULL;
list_add_tail(&block->instrs, &c->node.entry);
if (!(idx_offset = hlsl_new_binary_expr(ctx, HLSL_OP2_MUL, &c->node, idx)))
return NULL;
list_add_tail(&block->instrs, &idx_offset->entry);
break;
}
case HLSL_CLASS_ARRAY:
{
unsigned int size = hlsl_type_get_array_element_reg_size(type->e.array.type);
if (!(c = hlsl_new_uint_constant(ctx, size, loc)))
return NULL;
list_add_tail(&block->instrs, &c->node.entry);
if (!(idx_offset = hlsl_new_binary_expr(ctx, HLSL_OP2_MUL, &c->node, idx)))
return NULL;
list_add_tail(&block->instrs, &idx_offset->entry);
break;
}
case HLSL_CLASS_STRUCT:
{
unsigned int field_i = hlsl_ir_constant(idx)->value[0].u;
Erf, that is awkward. Well, it's a temporary measure, and avoiding it would probably be worse...
struct hlsl_struct_field *field = &type->e.record.fields[field_i];
if (!(c = hlsl_new_uint_constant(ctx, field->reg_offset, loc)))
return NULL;
list_add_tail(&block->instrs, &c->node.entry);
idx_offset = &c->node;
break;
}
default:
{
assert(0);
return NULL;
}
- }
- if (offset)
- {
if (!(idx_offset = hlsl_new_binary_expr(ctx, HLSL_OP2_ADD, offset, idx_offset)))
return NULL;
list_add_tail(&block->instrs, &idx_offset->entry);
- }
- return idx_offset;
+}
Hi,
Il 01/07/22 23:24, Francisco Casas ha scritto:
+struct hlsl_ir_node *hlsl_new_offset_from_path_index(struct hlsl_ctx *ctx, struct hlsl_block *block,
struct hlsl_type *type, struct hlsl_ir_node *offset, struct hlsl_ir_node *idx,
const struct vkd3d_shader_location *loc)
Any reason why you're passing a struct hlsl_block and then copying the instructions instead of the usual pattern of passing the instruction list in depth?
-static struct hlsl_ir_load *add_load(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *var_node,
struct hlsl_ir_node *offset, struct hlsl_type *data_type, const struct vkd3d_shader_location loc)
+static struct hlsl_ir_load *add_load_index(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *var_node,
{struct hlsl_ir_node *idx, const struct vkd3d_shader_location loc)
- struct hlsl_ir_node *add = NULL;
- struct hlsl_type *elem_type;
- struct hlsl_ir_node *offset; struct hlsl_ir_load *load;
- struct hlsl_ir_var *var;
struct hlsl_block block;
elem_type = hlsl_get_type_from_path_index(ctx, var_node->data_type, idx);
if (var_node->type == HLSL_IR_LOAD) { const struct hlsl_deref *src = &hlsl_ir_load(var_node)->src;
var = src->var;
if (src->offset.node)
{
if (!(add = hlsl_new_binary_expr(ctx, HLSL_OP2_ADD, src->offset.node, offset)))
return NULL;
list_add_tail(instrs, &add->entry);
offset = add;
}
if (!(offset = hlsl_new_offset_from_path_index(ctx, &block, var_node->data_type, src->offset.node, idx, &loc)))
return NULL;
list_move_tail(instrs, &block.instrs);
if (!(load = hlsl_new_load(ctx, src->var, offset, elem_type, loc)))
return NULL;
list_add_tail(instrs, &load->node.entry); } else { struct hlsl_ir_store *store;
struct hlsl_ir_var *var; char name[27];
if (!(offset = hlsl_new_offset_from_path_index(ctx, &block, var_node->data_type, NULL, idx, &loc)))
return NULL;
list_move_tail(instrs, &block.instrs);
sprintf(name, "<deref-%p>", var_node);
As for 04/12, I think the current trend is to use string buffers.
if (!(var = hlsl_new_synthetic_var(ctx, name, var_node->data_type, var_node->loc))) return NULL; if (!(store = hlsl_new_simple_store(ctx, var, var_node))) return NULL;
list_add_tail(instrs, &store->node.entry);
if (!(load = hlsl_new_load(ctx, var, offset, elem_type, loc)))
return NULL;
list_add_tail(instrs, &load->node.entry); }
- if (!(load = hlsl_new_load(ctx, var, offset, data_type, loc)))
return NULL;
- list_add_tail(instrs, &load->node.entry);
As I said, I liked it better when the hlsl_new_load() call wasn't duplicated.
return load;
}
Giovanni.
On 7/5/22 06:26, Giovanni Mascellani wrote:
Hi,
Il 01/07/22 23:24, Francisco Casas ha scritto:
+struct hlsl_ir_node *hlsl_new_offset_from_path_index(struct hlsl_ctx *ctx, struct hlsl_block *block, + struct hlsl_type *type, struct hlsl_ir_node *offset, struct hlsl_ir_node *idx, + const struct vkd3d_shader_location *loc)
Any reason why you're passing a struct hlsl_block and then copying the instructions instead of the usual pattern of passing the instruction list in depth?
This was my suggestion; the idea is that it allows one to generate multiple instructions without necessarily needing to append them to the end of a block. It may not be necessary in this case, but it could end up being useful in the future.
The use of "struct hlsl_block" instead of passing an untyped list is something I think we want to encourage more of, though.
Hello,
On 05-07-22 07:26, Giovanni Mascellani wrote:
Hi,
Il 01/07/22 23:24, Francisco Casas ha scritto:
+struct hlsl_ir_node *hlsl_new_offset_from_path_index(struct hlsl_ctx *ctx, struct hlsl_block *block, + struct hlsl_type *type, struct hlsl_ir_node *offset, struct hlsl_ir_node *idx, + const struct vkd3d_shader_location *loc)
Any reason why you're passing a struct hlsl_block and then copying the instructions instead of the usual pattern of passing the instruction list in depth?
IIRC Zeb suggested it this way.
I think that using blocks is more versatile. We can introduce the generated instructions at the beginning of another instruction list, no just at the end. It may also help if we ever need to iterate these new instructions or insert them in the middle of the instruction list in some compilation passes.
No that those things are impossible to do with the "passing the instruction list" pattern, though.
-static struct hlsl_ir_load *add_load(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *var_node, - struct hlsl_ir_node *offset, struct hlsl_type *data_type, const struct vkd3d_shader_location loc) +static struct hlsl_ir_load *add_load_index(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *var_node, + struct hlsl_ir_node *idx, const struct vkd3d_shader_location loc) { - struct hlsl_ir_node *add = NULL; + struct hlsl_type *elem_type; + struct hlsl_ir_node *offset; struct hlsl_ir_load *load; - struct hlsl_ir_var *var; + struct hlsl_block block;
+ elem_type = hlsl_get_type_from_path_index(ctx, var_node->data_type, idx); if (var_node->type == HLSL_IR_LOAD) { const struct hlsl_deref *src = &hlsl_ir_load(var_node)->src; - var = src->var; - if (src->offset.node) - { - if (!(add = hlsl_new_binary_expr(ctx, HLSL_OP2_ADD, src->offset.node, offset))) - return NULL; - list_add_tail(instrs, &add->entry); - offset = add; - } + if (!(offset = hlsl_new_offset_from_path_index(ctx, &block, var_node->data_type, src->offset.node, idx, &loc))) + return NULL; + list_move_tail(instrs, &block.instrs);
+ if (!(load = hlsl_new_load(ctx, src->var, offset, elem_type, loc))) + return NULL; + list_add_tail(instrs, &load->node.entry); } else { struct hlsl_ir_store *store; + struct hlsl_ir_var *var; char name[27]; + if (!(offset = hlsl_new_offset_from_path_index(ctx, &block, var_node->data_type, NULL, idx, &loc))) + return NULL; + list_move_tail(instrs, &block.instrs);
sprintf(name, "<deref-%p>", var_node);
As for 04/12, I think the current trend is to use string buffers.
Yep, I changed that for the next version here too.
if (!(var = hlsl_new_synthetic_var(ctx, name, var_node->data_type, var_node->loc))) return NULL; if (!(store = hlsl_new_simple_store(ctx, var, var_node))) return NULL;
list_add_tail(instrs, &store->node.entry);
+ if (!(load = hlsl_new_load(ctx, var, offset, elem_type, loc))) + return NULL; + list_add_tail(instrs, &load->node.entry); } - if (!(load = hlsl_new_load(ctx, var, offset, data_type, loc))) - return NULL; - list_add_tail(instrs, &load->node.entry);
As I said, I liked it better when the hlsl_new_load() call wasn't duplicated.
... and here too.
return load; }
Giovanni.