From: Francisco Casas fcasas@codeweavers.com
From this point on, it is no longer true that only hlsl_ir_loads can return objects, because an object can also come from chain of hlsl_ir_indexes that ends in an hlsl_ir_load.
For this reason, hlsl_resource_load_params now expects both the resource as the sampler to be just an hlsl_ir_node pointer instead of a pointer to a more specific hlsl_ir_load. --- libs/vkd3d-shader/hlsl.c | 72 +++++++++++++- libs/vkd3d-shader/hlsl.h | 3 +- libs/vkd3d-shader/hlsl.y | 128 ++++++++----------------- libs/vkd3d-shader/hlsl_codegen.c | 99 +++++++++++++++++++ tests/hlsl-matrix-indexing.shader_test | 4 +- 5 files changed, 211 insertions(+), 95 deletions(-)
diff --git a/libs/vkd3d-shader/hlsl.c b/libs/vkd3d-shader/hlsl.c index 2b89660a..79f85a06 100644 --- a/libs/vkd3d-shader/hlsl.c +++ b/libs/vkd3d-shader/hlsl.c @@ -435,6 +435,71 @@ static bool init_deref(struct hlsl_ctx *ctx, struct hlsl_deref *deref, struct hl return true; }
+bool hlsl_init_deref_from_index_chain(struct hlsl_ctx *ctx, struct hlsl_deref *deref, struct hlsl_ir_node *chain) +{ + struct hlsl_ir_index *index; + struct hlsl_ir_load *load; + unsigned int chain_len, i; + struct hlsl_ir_node *ptr; + + deref->path = NULL; + deref->path_len = 0; + deref->offset.node = NULL; + + if (!chain) + return true; + + if (chain->type == HLSL_IR_INDEX) + assert(!hlsl_index_is_crooked_matrix_indexing(hlsl_ir_index(chain))); + + /* Find the length of the index chain */ + chain_len = 0; + ptr = chain; + while (ptr->type == HLSL_IR_INDEX) + { + index = hlsl_ir_index(ptr); + chain_len++; + ptr = index->val.node; + } + + if (ptr->type != HLSL_IR_LOAD) + { + /* This should be an hlsl_error() for invalid l-values, but indexing non-load expressions is + * also not supported yet so we use a hlsl_fixme() for now. */ + hlsl_fixme(ctx, &chain->loc, "Indexing %s.", hlsl_node_type_to_string(ptr->type)); + return false; + } + + load = hlsl_ir_load(ptr); + + if (!init_deref(ctx, deref, load->src.var, load->src.path_len + chain_len)) + return false; + + for (i = 0; i < load->src.path_len; ++i) + hlsl_src_from_node(&deref->path[i], load->src.path[i].node); + + ptr = chain; + for (i = 0; i < chain_len; ++i) + { + unsigned int p = deref->path_len - 1 - i; + + index = hlsl_ir_index(ptr); + if (!hlsl_index_is_crooked_matrix_indexing(index)) + { + hlsl_src_from_node(&deref->path[p], index->idx.node); + } + else + { + hlsl_src_from_node(&deref->path[p], deref->path[p + 1].node); + hlsl_src_remove(&deref->path[p + 1]); + hlsl_src_from_node(&deref->path[p + 1], index->idx.node); + } + ptr = index->val.node; + } + + return true; +} + struct hlsl_type *hlsl_deref_get_type(struct hlsl_ctx *ctx, const struct hlsl_deref *deref) { struct hlsl_type *type; @@ -1225,17 +1290,20 @@ struct hlsl_ir_resource_load *hlsl_new_resource_load(struct hlsl_ctx *ctx, return NULL; init_node(&load->node, HLSL_IR_RESOURCE_LOAD, params->format, loc); load->load_type = params->type; - if (!hlsl_copy_deref(ctx, &load->resource, ¶ms->resource)) + + if (!hlsl_init_deref_from_index_chain(ctx, &load->resource, params->resource)) { vkd3d_free(load); return NULL; } - if (!hlsl_copy_deref(ctx, &load->sampler, ¶ms->sampler)) + + if (!hlsl_init_deref_from_index_chain(ctx, &load->sampler, params->sampler)) { hlsl_cleanup_deref(&load->resource); vkd3d_free(load); return NULL; } + hlsl_src_from_node(&load->coords, params->coords); hlsl_src_from_node(&load->texel_offset, params->texel_offset); hlsl_src_from_node(&load->lod, params->lod); diff --git a/libs/vkd3d-shader/hlsl.h b/libs/vkd3d-shader/hlsl.h index b16c7b80..05f162dd 100644 --- a/libs/vkd3d-shader/hlsl.h +++ b/libs/vkd3d-shader/hlsl.h @@ -785,7 +785,7 @@ struct hlsl_resource_load_params { struct hlsl_type *format; enum hlsl_resource_load_type type; - struct hlsl_deref resource, sampler; + struct hlsl_ir_node *resource, *sampler; struct hlsl_ir_node *coords, *lod, *texel_offset; };
@@ -997,6 +997,7 @@ void hlsl_dump_function(struct hlsl_ctx *ctx, const struct hlsl_ir_function_decl int hlsl_emit_bytecode(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *entry_func, enum vkd3d_shader_target_type target_type, struct vkd3d_shader_code *out);
+bool hlsl_init_deref_from_index_chain(struct hlsl_ctx *ctx, struct hlsl_deref *deref, struct hlsl_ir_node *chain); bool hlsl_copy_deref(struct hlsl_ctx *ctx, struct hlsl_deref *deref, const struct hlsl_deref *other);
void hlsl_cleanup_deref(struct hlsl_deref *deref); diff --git a/libs/vkd3d-shader/hlsl.y b/libs/vkd3d-shader/hlsl.y index 7435ef10..4c64ef86 100644 --- a/libs/vkd3d-shader/hlsl.y +++ b/libs/vkd3d-shader/hlsl.y @@ -685,9 +685,10 @@ static struct hlsl_ir_load *add_load_component(struct hlsl_ctx *ctx, struct list return load; }
-static bool add_record_load(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *record, +static bool add_record_access(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *record, unsigned int idx, const struct vkd3d_shader_location loc) { + struct hlsl_ir_index *index; struct hlsl_ir_constant *c;
assert(idx < record->data_type->e.record.field_count); @@ -696,60 +697,17 @@ static bool add_record_load(struct hlsl_ctx *ctx, struct list *instrs, struct hl return false; list_add_tail(instrs, &c->node.entry);
- return !!add_load_index(ctx, instrs, record, &c->node, &loc); + if (!(index = hlsl_new_index(ctx, record, &c->node, &loc))) + return false; + list_add_tail(instrs, &index->node.entry); + + return true; }
static struct hlsl_ir_node *add_binary_arithmetic_expr(struct hlsl_ctx *ctx, struct list *instrs, enum hlsl_ir_expr_op op, struct hlsl_ir_node *arg1, struct hlsl_ir_node *arg2, const struct vkd3d_shader_location *loc);
-static bool add_matrix_index(struct hlsl_ctx *ctx, struct list *instrs, - struct hlsl_ir_node *matrix, struct hlsl_ir_node *index, const struct vkd3d_shader_location *loc) -{ - struct hlsl_type *mat_type = matrix->data_type, *ret_type; - struct hlsl_deref var_deref; - struct hlsl_ir_load *load; - struct hlsl_ir_var *var; - unsigned int i; - - if (hlsl_type_is_row_major(mat_type)) - return add_load_index(ctx, instrs, matrix, index, loc); - - ret_type = hlsl_get_vector_type(ctx, mat_type->base_type, mat_type->dimx); - - if (!(var = hlsl_new_synthetic_var(ctx, "index", ret_type, loc))) - return false; - hlsl_init_simple_deref_from_var(&var_deref, var); - - for (i = 0; i < mat_type->dimx; ++i) - { - struct hlsl_ir_load *column, *value; - struct hlsl_ir_store *store; - struct hlsl_ir_constant *c; - struct hlsl_block block; - - if (!(c = hlsl_new_uint_constant(ctx, i, loc))) - return false; - list_add_tail(instrs, &c->node.entry); - - if (!(column = add_load_index(ctx, instrs, matrix, &c->node, loc))) - return false; - - if (!(value = add_load_index(ctx, instrs, &column->node, index, loc))) - return false; - - if (!(store = hlsl_new_store_component(ctx, &block, &var_deref, i, &value->node))) - return false; - list_move_tail(instrs, &block.instrs); - } - - if (!(load = hlsl_new_var_load(ctx, var, *loc))) - return false; - list_add_tail(instrs, &load->node.entry); - - return true; -} - static struct hlsl_ir_node *add_zero_mipmap_level(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *index, unsigned int dim_count, const struct vkd3d_shader_location *loc) { @@ -783,10 +741,11 @@ static struct hlsl_ir_node *add_zero_mipmap_level(struct hlsl_ctx *ctx, struct l return &coords_load->node; }
-static bool add_array_load(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *array, +static bool add_array_access(struct hlsl_ctx *ctx, struct list *instrs, struct hlsl_ir_node *array, struct hlsl_ir_node *index, const struct vkd3d_shader_location *loc) { const struct hlsl_type *expr_type = array->data_type, *index_type = index->data_type; + struct hlsl_ir_index *return_index; struct hlsl_ir_expr *cast;
if (expr_type->type == HLSL_CLASS_OBJECT @@ -795,8 +754,6 @@ static bool add_array_load(struct hlsl_ctx *ctx, struct list *instrs, struct hls { struct hlsl_resource_load_params load_params = {.type = HLSL_RESOURCE_LOAD}; unsigned int dim_count = hlsl_sampler_dim_count(expr_type->sampler_dim); - /* Only HLSL_IR_LOAD can return an object. */ - struct hlsl_ir_load *object_load = hlsl_ir_load(array); struct hlsl_ir_resource_load *resource_load;
if (index_type->type > HLSL_CLASS_VECTOR || index_type->dimx != dim_count) @@ -818,7 +775,7 @@ static bool add_array_load(struct hlsl_ctx *ctx, struct list *instrs, struct hls return false;
load_params.format = expr_type->e.resource_format; - load_params.resource = object_load->src; + load_params.resource = array; load_params.coords = index;
if (!(resource_load = hlsl_new_resource_load(ctx, &load_params, loc))) @@ -838,10 +795,7 @@ static bool add_array_load(struct hlsl_ctx *ctx, struct list *instrs, struct hls list_add_tail(instrs, &cast->node.entry); index = &cast->node;
- if (expr_type->type == HLSL_CLASS_MATRIX) - return add_matrix_index(ctx, instrs, array, index, loc); - - if (expr_type->type != HLSL_CLASS_ARRAY && expr_type->type != HLSL_CLASS_VECTOR) + if (expr_type->type != HLSL_CLASS_ARRAY && expr_type->type != HLSL_CLASS_VECTOR && expr_type->type != HLSL_CLASS_MATRIX) { if (expr_type->type == HLSL_CLASS_SCALAR) hlsl_error(ctx, loc, VKD3D_SHADER_ERROR_HLSL_INVALID_INDEX, "Scalar expressions cannot be array-indexed."); @@ -850,8 +804,9 @@ static bool add_array_load(struct hlsl_ctx *ctx, struct list *instrs, struct hls return false; }
- if (!add_load_index(ctx, instrs, array, index, loc)) + if (!(return_index = hlsl_new_index(ctx, array, index, loc))) return false; + list_add_tail(instrs, &return_index->node.entry);
return true; } @@ -1727,7 +1682,7 @@ static struct hlsl_ir_node *add_assignment(struct hlsl_ctx *ctx, struct list *in if (!(rhs = add_implicit_conversion(ctx, instrs, rhs, lhs_type, &rhs->loc))) return NULL;
- while (lhs->type != HLSL_IR_LOAD && lhs->type != HLSL_IR_RESOURCE_LOAD) + while (lhs->type != HLSL_IR_LOAD && lhs->type != HLSL_IR_RESOURCE_LOAD && lhs->type != HLSL_IR_INDEX) { if (lhs->type == HLSL_IR_EXPR && hlsl_ir_expr(lhs)->op == HLSL_OP1_CAST) { @@ -1800,13 +1755,26 @@ static struct hlsl_ir_node *add_assignment(struct hlsl_ctx *ctx, struct list *in return NULL; list_add_tail(instrs, &store->node.entry); } + else if (lhs->type == HLSL_IR_INDEX && hlsl_index_is_crooked_matrix_indexing(hlsl_ir_index(lhs))) + { + hlsl_fixme(ctx, &lhs->loc, "Column-major matrix index store."); + return NULL; + } else { struct hlsl_ir_store *store; + struct hlsl_deref deref;
- if (!(store = hlsl_new_store_index(ctx, &hlsl_ir_load(lhs)->src, NULL, rhs, writemask, &rhs->loc))) + if (!hlsl_init_deref_from_index_chain(ctx, &deref, lhs)) + return NULL; + + if (!(store = hlsl_new_store_index(ctx, &deref, NULL, rhs, writemask, &rhs->loc))) + { + hlsl_cleanup_deref(&deref); return NULL; + } list_add_tail(instrs, &store->node.entry); + hlsl_cleanup_deref(&deref); }
/* Don't use the instruction itself as a source, as this makes structure @@ -3058,7 +3026,6 @@ static bool intrinsic_tex(struct hlsl_ctx *ctx, const struct parse_initializer * struct hlsl_resource_load_params load_params = {.type = HLSL_RESOURCE_SAMPLE}; const struct hlsl_type *sampler_type; struct hlsl_ir_resource_load *load; - struct hlsl_ir_load *sampler_load; struct hlsl_ir_node *coords;
if (params->args_count != 2 && params->args_count != 4) @@ -3087,10 +3054,7 @@ static bool intrinsic_tex(struct hlsl_ctx *ctx, const struct parse_initializer * } else { - /* Only HLSL_IR_LOAD can return an object. */ - sampler_load = hlsl_ir_load(params->args[0]); - - load_params.resource = sampler_load->src; + load_params.resource = params->args[0]; }
if (!(coords = add_implicit_conversion(ctx, params->instrs, params->args[1], @@ -3424,7 +3388,6 @@ static bool add_method_call(struct hlsl_ctx *ctx, struct list *instrs, struct hl const char *name, const struct parse_initializer *params, const struct vkd3d_shader_location *loc) { const struct hlsl_type *object_type = object->data_type; - struct hlsl_ir_load *object_load;
if (object_type->type != HLSL_CLASS_OBJECT || object_type->base_type != HLSL_TYPE_TEXTURE || object_type->sampler_dim == HLSL_SAMPLER_DIM_GENERIC) @@ -3438,9 +3401,6 @@ static bool add_method_call(struct hlsl_ctx *ctx, struct list *instrs, struct hl return false; }
- /* Only HLSL_IR_LOAD can return an object. */ - object_load = hlsl_ir_load(object); - if (!strcmp(name, "Load") && object_type->sampler_dim != HLSL_SAMPLER_DIM_CUBE && object_type->sampler_dim != HLSL_SAMPLER_DIM_CUBEARRAY) @@ -3484,7 +3444,7 @@ static bool add_method_call(struct hlsl_ctx *ctx, struct list *instrs, struct hl return false;
load_params.format = object_type->e.resource_format; - load_params.resource = object_load->src; + load_params.resource = object;
if (!(load = hlsl_new_resource_load(ctx, &load_params, loc))) return false; @@ -3500,7 +3460,6 @@ static bool add_method_call(struct hlsl_ctx *ctx, struct list *instrs, struct hl struct hlsl_resource_load_params load_params = {.type = HLSL_RESOURCE_SAMPLE}; const struct hlsl_type *sampler_type; struct hlsl_ir_resource_load *load; - struct hlsl_ir_load *sampler_load;
if (params->args_count < 2 || params->args_count > 4 + !!offset_dim) { @@ -3523,9 +3482,6 @@ static bool add_method_call(struct hlsl_ctx *ctx, struct list *instrs, struct hl return false; }
- /* Only HLSL_IR_LOAD can return an object. */ - sampler_load = hlsl_ir_load(params->args[0]); - if (!(load_params.coords = add_implicit_conversion(ctx, instrs, params->args[1], hlsl_get_vector_type(ctx, HLSL_TYPE_FLOAT, sampler_dim), loc))) return false; @@ -3543,8 +3499,8 @@ static bool add_method_call(struct hlsl_ctx *ctx, struct list *instrs, struct hl hlsl_fixme(ctx, loc, "Tiled resource status argument.");
load_params.format = object_type->e.resource_format; - load_params.resource = object_load->src; - load_params.sampler = sampler_load->src; + load_params.resource = object; + load_params.sampler = params->args[0];
if (!(load = hlsl_new_resource_load(ctx, &load_params, loc))) return false; @@ -3564,7 +3520,6 @@ static bool add_method_call(struct hlsl_ctx *ctx, struct list *instrs, struct hl struct hlsl_resource_load_params load_params = {0}; const struct hlsl_type *sampler_type; struct hlsl_ir_resource_load *load; - struct hlsl_ir_load *sampler_load; unsigned int read_channel;
if (!strcmp(name, "GatherGreen")) @@ -3640,16 +3595,13 @@ static bool add_method_call(struct hlsl_ctx *ctx, struct list *instrs, struct hl return false; }
- /* Only HLSL_IR_LOAD can return an object. */ - sampler_load = hlsl_ir_load(params->args[0]); - if (!(load_params.coords = add_implicit_conversion(ctx, instrs, params->args[1], hlsl_get_vector_type(ctx, HLSL_TYPE_FLOAT, sampler_dim), loc))) return false;
load_params.format = hlsl_get_vector_type(ctx, object_type->e.resource_format->base_type, 4); - load_params.resource = object_load->src; - load_params.sampler = sampler_load->src; + load_params.resource = object; + load_params.sampler = params->args[0];
if (!(load = hlsl_new_resource_load(ctx, &load_params, loc))) return false; @@ -3665,7 +3617,6 @@ static bool add_method_call(struct hlsl_ctx *ctx, struct list *instrs, struct hl const unsigned int offset_dim = hlsl_offset_dim_count(object_type->sampler_dim); const struct hlsl_type *sampler_type; struct hlsl_ir_resource_load *load; - struct hlsl_ir_load *sampler_load;
if (params->args_count < 3 || params->args_count > 4 + !!offset_dim) { @@ -3688,9 +3639,6 @@ static bool add_method_call(struct hlsl_ctx *ctx, struct list *instrs, struct hl return false; }
- /* Only HLSL_IR_LOAD can return an object. */ - sampler_load = hlsl_ir_load(params->args[0]); - if (!(load_params.coords = add_implicit_conversion(ctx, instrs, params->args[1], hlsl_get_vector_type(ctx, HLSL_TYPE_FLOAT, sampler_dim), loc))) load_params.coords = params->args[1]; @@ -3710,8 +3658,8 @@ static bool add_method_call(struct hlsl_ctx *ctx, struct list *instrs, struct hl hlsl_fixme(ctx, loc, "Tiled resource status argument.");
load_params.format = object_type->e.resource_format; - load_params.resource = object_load->src; - load_params.sampler = sampler_load->src; + load_params.resource = object; + load_params.sampler = params->args[0];
if (!(load = hlsl_new_resource_load(ctx, &load_params, loc))) return false; @@ -5296,7 +5244,7 @@ postfix_expr: }
field_idx = field - type->e.record.fields; - if (!add_record_load(ctx, $1, node, field_idx, @2)) + if (!add_record_access(ctx, $1, node, field_idx, @2)) YYABORT; $$ = $1; } @@ -5325,7 +5273,7 @@ postfix_expr: list_move_tail($1, $3); vkd3d_free($3);
- if (!add_array_load(ctx, $1, array, index, &@2)) + if (!add_array_access(ctx, $1, array, index, &@2)) { destroy_instr_list($1); YYABORT; diff --git a/libs/vkd3d-shader/hlsl_codegen.c b/libs/vkd3d-shader/hlsl_codegen.c index 553aedef..ab2b4b02 100644 --- a/libs/vkd3d-shader/hlsl_codegen.c +++ b/libs/vkd3d-shader/hlsl_codegen.c @@ -731,6 +731,102 @@ static bool lower_calls(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void * return true; }
+/* Single hlsl_ir_index instructions on column-major matrices cannot be turned into derefs, however, + * index chains that refer to a single component can be. This pass turns the former in the latter. */ +static bool lower_crooked_matrix_index_loads(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context) +{ + struct hlsl_ir_index *index; + struct hlsl_deref var_deref; + struct hlsl_ir_index *copy; + struct hlsl_ir_load *load; + struct hlsl_ir_node *mat; + struct hlsl_ir_var *var; + unsigned int i; + + if (instr->type != HLSL_IR_INDEX) + return false; + index = hlsl_ir_index(instr); + + if (!hlsl_index_is_crooked_matrix_indexing(index)) + return false; + + mat = index->val.node; + assert(!hlsl_type_is_row_major(mat->data_type)); + + /* We will remove this instruction but we need to generate nodes that refer to it, + * so we create a copy. */ + if (!(copy = hlsl_new_index(ctx, index->val.node, index->idx.node, &instr->loc))) + return false; + list_add_before(&instr->entry, ©->node.entry); + + if (!(var = hlsl_new_synthetic_var(ctx, "index", instr->data_type, &instr->loc))) + return false; + hlsl_init_simple_deref_from_var(&var_deref, var); + + for (i = 0; i < mat->data_type->dimx; ++i) + { + struct hlsl_ir_index *value; + struct hlsl_ir_store *store; + struct hlsl_ir_constant *c; + struct hlsl_block block; + + if (!(c = hlsl_new_uint_constant(ctx, i, &instr->loc))) + return false; + list_add_before(&instr->entry, &c->node.entry); + + if (!(value = hlsl_new_index(ctx, ©->node, &c->node, &instr->loc))) + return false; + list_add_before(&instr->entry, &value->node.entry); + + if (!(store = hlsl_new_store_component(ctx, &block, &var_deref, i, &value->node))) + return false; + list_move_before(&instr->entry, &block.instrs); + } + + if (!(load = hlsl_new_var_load(ctx, var, instr->loc))) + return false; + list_add_before(&instr->entry, &load->node.entry); + hlsl_replace_node(instr, &load->node); + + return true; +} + +/* hlsl_ir_index nodes are a parse-time construct used to represent array indexing and struct + * record access before knowing if they will be used in the lhs of an assignment --in which case + * they are lowered into a deref-- or as the load of an element within a larger value. + * For the latter case, this pass takes care of lowering hlsl_ir_indexes (and index chains) into + * individual hlsl_ir_loads. + * It is worth noting that the generated hlsl_ir_loads don't load from a copy of the variable loaded + * at the beggining of the index chain, but from the same variable instead, because it is assumed + * that the value of the variable won't change between the the hlsl_ir_load instruction and the + * hlsl_ir_index. */ +static bool lower_index_loads(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context) +{ + struct hlsl_ir_index *index; + struct hlsl_ir_load *load; + struct hlsl_deref deref; + + if (instr->type != HLSL_IR_INDEX) + return false; + index = hlsl_ir_index(instr); + + if (hlsl_index_is_crooked_matrix_indexing(index)) + return false; + + if (!hlsl_init_deref_from_index_chain(ctx, &deref, instr)) + return false; + + if (!(load = hlsl_new_load_index(ctx, &deref, NULL, &instr->loc))) + { + hlsl_cleanup_deref(&deref); + return false; + } + list_add_before(&instr->entry, &load->node.entry); + hlsl_replace_node(instr, &load->node); + hlsl_cleanup_deref(&deref); + return true; +} + /* Lower casts from vec1 to vecN to swizzles. */ static bool lower_broadcasts(struct hlsl_ctx *ctx, struct hlsl_ir_node *instr, void *context) { @@ -3233,6 +3329,9 @@ int hlsl_emit_bytecode(struct hlsl_ctx *ctx, struct hlsl_ir_function_decl *entry
while (transform_ir(ctx, lower_calls, body, NULL));
+ transform_ir(ctx, lower_crooked_matrix_index_loads, body, NULL); + transform_ir(ctx, lower_index_loads, body, NULL); + LIST_FOR_EACH_ENTRY(var, &ctx->globals->vars, struct hlsl_ir_var, scope_entry) { if (var->storage_modifiers & HLSL_STORAGE_UNIFORM) diff --git a/tests/hlsl-matrix-indexing.shader_test b/tests/hlsl-matrix-indexing.shader_test index 7ae1a91d..4ca0e871 100644 --- a/tests/hlsl-matrix-indexing.shader_test +++ b/tests/hlsl-matrix-indexing.shader_test @@ -78,7 +78,7 @@ draw quad probe all rgba (1.0, 5.0, 7.0, 12.0)
-[pixel shader] +[pixel shader todo] float4 main() : SV_TARGET { float3x2 m = {1, 2, 3, 4, 5, 6}; @@ -89,7 +89,7 @@ float4 main() : SV_TARGET }
[test] -draw quad +todo draw quad todo probe all rgba (30.0, 40.0, 5.0, 6.0)