From: Francisco Casas fcasas@codeweavers.com
This is required so we can access lookup tables for register type information tables later. --- libs/vkd3d-shader/tpf.c | 236 ++++++++++++++++++++-------------------- 1 file changed, 121 insertions(+), 115 deletions(-)
diff --git a/libs/vkd3d-shader/tpf.c b/libs/vkd3d-shader/tpf.c index d5937beb..77cadb94 100644 --- a/libs/vkd3d-shader/tpf.c +++ b/libs/vkd3d-shader/tpf.c @@ -3676,7 +3676,7 @@ static void sm4_src_from_node(struct sm4_src_register *src, src->swizzle = hlsl_map_swizzle(hlsl_swizzle_from_writemask(writemask), map_writemask); }
-static uint32_t sm4_encode_register(const struct sm4_register *reg) +static uint32_t sm4_encode_register(struct hlsl_ctx *ctx, const struct sm4_register *reg) { uint32_t sm4_reg_type = ~0u; unsigned int i; @@ -3709,7 +3709,8 @@ static uint32_t sm4_register_order(const struct sm4_register *reg) return order; }
-static void write_sm4_instruction(struct vkd3d_bytecode_buffer *buffer, const struct sm4_instruction *instr) +static void write_sm4_instruction(struct hlsl_ctx *ctx, struct vkd3d_bytecode_buffer *buffer, + const struct sm4_instruction *instr) { uint32_t token = instr->opcode; unsigned int size = 1, i, j; @@ -3739,7 +3740,7 @@ static void write_sm4_instruction(struct vkd3d_bytecode_buffer *buffer, const st
for (i = 0; i < instr->dst_count; ++i) { - token = sm4_encode_register(&instr->dsts[i].reg); + token = sm4_encode_register(ctx, &instr->dsts[i].reg); if (instr->dsts[i].reg.dim == VKD3D_SM4_DIMENSION_VEC4) token |= instr->dsts[i].writemask << VKD3D_SM4_WRITEMASK_SHIFT; put_u32(buffer, token); @@ -3753,7 +3754,7 @@ static void write_sm4_instruction(struct vkd3d_bytecode_buffer *buffer, const st
for (i = 0; i < instr->src_count; ++i) { - token = sm4_encode_register(&instr->srcs[i].reg); + token = sm4_encode_register(ctx, &instr->srcs[i].reg); token |= (uint32_t)instr->srcs[i].swizzle_type << VKD3D_SM4_SWIZZLE_TYPE_SHIFT; token |= instr->srcs[i].swizzle << VKD3D_SM4_SWIZZLE_SHIFT; if (instr->srcs[i].reg.mod) @@ -3816,7 +3817,8 @@ static bool encode_texel_offset_as_aoffimmi(struct sm4_instruction *instr, return true; }
-static void write_sm4_dcl_constant_buffer(struct vkd3d_bytecode_buffer *buffer, const struct hlsl_buffer *cbuffer) +static void write_sm4_dcl_constant_buffer(struct hlsl_ctx *ctx, struct vkd3d_bytecode_buffer *buffer, + const struct hlsl_buffer *cbuffer) { const struct sm4_instruction instr = { @@ -3831,7 +3833,7 @@ static void write_sm4_dcl_constant_buffer(struct vkd3d_bytecode_buffer *buffer, .srcs[0].swizzle = HLSL_SWIZZLE(X, Y, Z, W), .src_count = 1, }; - write_sm4_instruction(buffer, &instr); + write_sm4_instruction(ctx, buffer, &instr); }
static void write_sm4_dcl_samplers(struct hlsl_ctx *ctx, struct vkd3d_bytecode_buffer *buffer, @@ -3861,7 +3863,7 @@ static void write_sm4_dcl_samplers(struct hlsl_ctx *ctx, struct vkd3d_bytecode_b continue;
instr.dsts[0].reg.idx[0].offset = resource->id + i; - write_sm4_instruction(buffer, &instr); + write_sm4_instruction(ctx, buffer, &instr); } }
@@ -3918,7 +3920,7 @@ static void write_sm4_dcl_textures(struct hlsl_ctx *ctx, struct vkd3d_bytecode_b instr.opcode |= component_type->sample_count << VKD3D_SM4_RESOURCE_SAMPLE_COUNT_SHIFT; }
- write_sm4_instruction(buffer, &instr); + write_sm4_instruction(ctx, buffer, &instr); } }
@@ -4019,10 +4021,10 @@ static void write_sm4_dcl_semantic(struct hlsl_ctx *ctx, struct vkd3d_bytecode_b break; }
- write_sm4_instruction(buffer, &instr); + write_sm4_instruction(ctx, buffer, &instr); }
-static void write_sm4_dcl_temps(struct vkd3d_bytecode_buffer *buffer, uint32_t temp_count) +static void write_sm4_dcl_temps(struct hlsl_ctx *ctx, struct vkd3d_bytecode_buffer *buffer, uint32_t temp_count) { struct sm4_instruction instr = { @@ -4032,10 +4034,11 @@ static void write_sm4_dcl_temps(struct vkd3d_bytecode_buffer *buffer, uint32_t t .idx_count = 1, };
- write_sm4_instruction(buffer, &instr); + write_sm4_instruction(ctx, buffer, &instr); }
-static void write_sm4_dcl_thread_group(struct vkd3d_bytecode_buffer *buffer, const uint32_t thread_count[3]) +static void write_sm4_dcl_thread_group(struct hlsl_ctx *ctx, struct vkd3d_bytecode_buffer *buffer, + const uint32_t thread_count[3]) { struct sm4_instruction instr = { @@ -4047,21 +4050,22 @@ static void write_sm4_dcl_thread_group(struct vkd3d_bytecode_buffer *buffer, con .idx_count = 3, };
- write_sm4_instruction(buffer, &instr); + write_sm4_instruction(ctx, buffer, &instr); }
-static void write_sm4_ret(struct vkd3d_bytecode_buffer *buffer) +static void write_sm4_ret(struct hlsl_ctx *ctx, struct vkd3d_bytecode_buffer *buffer) { struct sm4_instruction instr = { .opcode = VKD3D_SM4_OP_RET, };
- write_sm4_instruction(buffer, &instr); + write_sm4_instruction(ctx, buffer, &instr); }
-static void write_sm4_unary_op(struct vkd3d_bytecode_buffer *buffer, enum vkd3d_sm4_opcode opcode, - const struct hlsl_ir_node *dst, const struct hlsl_ir_node *src, unsigned int src_mod) +static void write_sm4_unary_op(struct hlsl_ctx *ctx, struct vkd3d_bytecode_buffer *buffer, + enum vkd3d_sm4_opcode opcode, const struct hlsl_ir_node *dst, const struct hlsl_ir_node *src, + unsigned int src_mod) { struct sm4_instruction instr;
@@ -4075,12 +4079,12 @@ static void write_sm4_unary_op(struct vkd3d_bytecode_buffer *buffer, enum vkd3d_ instr.srcs[0].reg.mod = src_mod; instr.src_count = 1;
- write_sm4_instruction(buffer, &instr); + write_sm4_instruction(ctx, buffer, &instr); }
-static void write_sm4_unary_op_with_two_destinations(struct vkd3d_bytecode_buffer *buffer, - enum vkd3d_sm4_opcode opcode, const struct hlsl_ir_node *dst, unsigned dst_idx, - const struct hlsl_ir_node *src) +static void write_sm4_unary_op_with_two_destinations(struct hlsl_ctx *ctx, + struct vkd3d_bytecode_buffer *buffer, enum vkd3d_sm4_opcode opcode, const struct hlsl_ir_node *dst, + unsigned dst_idx, const struct hlsl_ir_node *src) { struct sm4_instruction instr;
@@ -4098,11 +4102,12 @@ static void write_sm4_unary_op_with_two_destinations(struct vkd3d_bytecode_buffe sm4_src_from_node(&instr.srcs[0], src, instr.dsts[dst_idx].writemask); instr.src_count = 1;
- write_sm4_instruction(buffer, &instr); + write_sm4_instruction(ctx, buffer, &instr); }
-static void write_sm4_binary_op(struct vkd3d_bytecode_buffer *buffer, enum vkd3d_sm4_opcode opcode, - const struct hlsl_ir_node *dst, const struct hlsl_ir_node *src1, const struct hlsl_ir_node *src2) +static void write_sm4_binary_op(struct hlsl_ctx *ctx, struct vkd3d_bytecode_buffer *buffer, + enum vkd3d_sm4_opcode opcode, const struct hlsl_ir_node *dst, const struct hlsl_ir_node *src1, + const struct hlsl_ir_node *src2) { struct sm4_instruction instr;
@@ -4116,12 +4121,13 @@ static void write_sm4_binary_op(struct vkd3d_bytecode_buffer *buffer, enum vkd3d sm4_src_from_node(&instr.srcs[1], src2, instr.dsts[0].writemask); instr.src_count = 2;
- write_sm4_instruction(buffer, &instr); + write_sm4_instruction(ctx, buffer, &instr); }
/* dp# instructions don't map the swizzle. */ -static void write_sm4_binary_op_dot(struct vkd3d_bytecode_buffer *buffer, enum vkd3d_sm4_opcode opcode, - const struct hlsl_ir_node *dst, const struct hlsl_ir_node *src1, const struct hlsl_ir_node *src2) +static void write_sm4_binary_op_dot(struct hlsl_ctx *ctx, struct vkd3d_bytecode_buffer *buffer, + enum vkd3d_sm4_opcode opcode, const struct hlsl_ir_node *dst, const struct hlsl_ir_node *src1, + const struct hlsl_ir_node *src2) { struct sm4_instruction instr;
@@ -4135,12 +4141,12 @@ static void write_sm4_binary_op_dot(struct vkd3d_bytecode_buffer *buffer, enum v sm4_src_from_node(&instr.srcs[1], src2, VKD3DSP_WRITEMASK_ALL); instr.src_count = 2;
- write_sm4_instruction(buffer, &instr); + write_sm4_instruction(ctx, buffer, &instr); }
-static void write_sm4_binary_op_with_two_destinations(struct vkd3d_bytecode_buffer *buffer, - enum vkd3d_sm4_opcode opcode, const struct hlsl_ir_node *dst, unsigned dst_idx, - const struct hlsl_ir_node *src1, const struct hlsl_ir_node *src2) +static void write_sm4_binary_op_with_two_destinations(struct hlsl_ctx *ctx, + struct vkd3d_bytecode_buffer *buffer, enum vkd3d_sm4_opcode opcode, const struct hlsl_ir_node *dst, + unsigned dst_idx, const struct hlsl_ir_node *src1, const struct hlsl_ir_node *src2) { struct sm4_instruction instr;
@@ -4159,7 +4165,7 @@ static void write_sm4_binary_op_with_two_destinations(struct vkd3d_bytecode_buff sm4_src_from_node(&instr.srcs[1], src2, instr.dsts[dst_idx].writemask); instr.src_count = 2;
- write_sm4_instruction(buffer, &instr); + write_sm4_instruction(ctx, buffer, &instr); }
static void write_sm4_ld(struct hlsl_ctx *ctx, struct vkd3d_bytecode_buffer *buffer, @@ -4238,7 +4244,7 @@ static void write_sm4_ld(struct hlsl_ctx *ctx, struct vkd3d_bytecode_buffer *buf ++instr.src_count; }
- write_sm4_instruction(buffer, &instr); + write_sm4_instruction(ctx, buffer, &instr); }
static void write_sm4_sample(struct hlsl_ctx *ctx, struct vkd3d_bytecode_buffer *buffer, @@ -4319,7 +4325,7 @@ static void write_sm4_sample(struct hlsl_ctx *ctx, struct vkd3d_bytecode_buffer ++instr.src_count; }
- write_sm4_instruction(buffer, &instr); + write_sm4_instruction(ctx, buffer, &instr); }
static bool type_is_float(const struct hlsl_type *type) @@ -4346,11 +4352,11 @@ static void write_sm4_cast_from_bool(struct hlsl_ctx *ctx, instr.srcs[1].reg.immconst_uint[0] = mask; instr.src_count = 2;
- write_sm4_instruction(buffer, &instr); + write_sm4_instruction(ctx, buffer, &instr); }
-static void write_sm4_cast(struct hlsl_ctx *ctx, - struct vkd3d_bytecode_buffer *buffer, const struct hlsl_ir_expr *expr) +static void write_sm4_cast(struct hlsl_ctx *ctx, struct vkd3d_bytecode_buffer *buffer, + const struct hlsl_ir_expr *expr) { static const union { @@ -4372,15 +4378,15 @@ static void write_sm4_cast(struct hlsl_ctx *ctx, { case HLSL_TYPE_HALF: case HLSL_TYPE_FLOAT: - write_sm4_unary_op(buffer, VKD3D_SM4_OP_MOV, &expr->node, arg1, 0); + write_sm4_unary_op(ctx, buffer, VKD3D_SM4_OP_MOV, &expr->node, arg1, 0); break;
case HLSL_TYPE_INT: - write_sm4_unary_op(buffer, VKD3D_SM4_OP_ITOF, &expr->node, arg1, 0); + write_sm4_unary_op(ctx, buffer, VKD3D_SM4_OP_ITOF, &expr->node, arg1, 0); break;
case HLSL_TYPE_UINT: - write_sm4_unary_op(buffer, VKD3D_SM4_OP_UTOF, &expr->node, arg1, 0); + write_sm4_unary_op(ctx, buffer, VKD3D_SM4_OP_UTOF, &expr->node, arg1, 0); break;
case HLSL_TYPE_BOOL: @@ -4401,12 +4407,12 @@ static void write_sm4_cast(struct hlsl_ctx *ctx, { case HLSL_TYPE_HALF: case HLSL_TYPE_FLOAT: - write_sm4_unary_op(buffer, VKD3D_SM4_OP_FTOI, &expr->node, arg1, 0); + write_sm4_unary_op(ctx, buffer, VKD3D_SM4_OP_FTOI, &expr->node, arg1, 0); break;
case HLSL_TYPE_INT: case HLSL_TYPE_UINT: - write_sm4_unary_op(buffer, VKD3D_SM4_OP_MOV, &expr->node, arg1, 0); + write_sm4_unary_op(ctx, buffer, VKD3D_SM4_OP_MOV, &expr->node, arg1, 0); break;
case HLSL_TYPE_BOOL: @@ -4427,12 +4433,12 @@ static void write_sm4_cast(struct hlsl_ctx *ctx, { case HLSL_TYPE_HALF: case HLSL_TYPE_FLOAT: - write_sm4_unary_op(buffer, VKD3D_SM4_OP_FTOU, &expr->node, arg1, 0); + write_sm4_unary_op(ctx, buffer, VKD3D_SM4_OP_FTOU, &expr->node, arg1, 0); break;
case HLSL_TYPE_INT: case HLSL_TYPE_UINT: - write_sm4_unary_op(buffer, VKD3D_SM4_OP_MOV, &expr->node, arg1, 0); + write_sm4_unary_op(ctx, buffer, VKD3D_SM4_OP_MOV, &expr->node, arg1, 0); break;
case HLSL_TYPE_BOOL: @@ -4474,7 +4480,7 @@ static void write_sm4_store_uav_typed(struct hlsl_ctx *ctx, struct vkd3d_bytecod sm4_src_from_node(&instr.srcs[1], value, VKD3DSP_WRITEMASK_ALL); instr.src_count = 2;
- write_sm4_instruction(buffer, &instr); + write_sm4_instruction(ctx, buffer, &instr); }
static void write_sm4_expr(struct hlsl_ctx *ctx, @@ -4496,7 +4502,7 @@ static void write_sm4_expr(struct hlsl_ctx *ctx, switch (dst_type->base_type) { case HLSL_TYPE_FLOAT: - write_sm4_unary_op(buffer, VKD3D_SM4_OP_MOV, &expr->node, arg1, VKD3D_SM4_REGISTER_MODIFIER_ABS); + write_sm4_unary_op(ctx, buffer, VKD3D_SM4_OP_MOV, &expr->node, arg1, VKD3D_SM4_REGISTER_MODIFIER_ABS); break;
default: @@ -4506,7 +4512,7 @@ static void write_sm4_expr(struct hlsl_ctx *ctx,
case HLSL_OP1_BIT_NOT: assert(type_is_integer(dst_type)); - write_sm4_unary_op(buffer, VKD3D_SM4_OP_NOT, &expr->node, arg1, 0); + write_sm4_unary_op(ctx, buffer, VKD3D_SM4_OP_NOT, &expr->node, arg1, 0); break;
case HLSL_OP1_CAST: @@ -4515,74 +4521,74 @@ static void write_sm4_expr(struct hlsl_ctx *ctx,
case HLSL_OP1_COS: assert(type_is_float(dst_type)); - write_sm4_unary_op_with_two_destinations(buffer, VKD3D_SM4_OP_SINCOS, &expr->node, 1, arg1); + write_sm4_unary_op_with_two_destinations(ctx, buffer, VKD3D_SM4_OP_SINCOS, &expr->node, 1, arg1); break;
case HLSL_OP1_DSX: assert(type_is_float(dst_type)); - write_sm4_unary_op(buffer, VKD3D_SM4_OP_DERIV_RTX, &expr->node, arg1, 0); + write_sm4_unary_op(ctx, buffer, VKD3D_SM4_OP_DERIV_RTX, &expr->node, arg1, 0); break;
case HLSL_OP1_DSX_COARSE: assert(type_is_float(dst_type)); - write_sm4_unary_op(buffer, VKD3D_SM5_OP_DERIV_RTX_COARSE, &expr->node, arg1, 0); + write_sm4_unary_op(ctx, buffer, VKD3D_SM5_OP_DERIV_RTX_COARSE, &expr->node, arg1, 0); break;
case HLSL_OP1_DSX_FINE: assert(type_is_float(dst_type)); - write_sm4_unary_op(buffer, VKD3D_SM5_OP_DERIV_RTX_FINE, &expr->node, arg1, 0); + write_sm4_unary_op(ctx, buffer, VKD3D_SM5_OP_DERIV_RTX_FINE, &expr->node, arg1, 0); break;
case HLSL_OP1_DSY: assert(type_is_float(dst_type)); - write_sm4_unary_op(buffer, VKD3D_SM4_OP_DERIV_RTY, &expr->node, arg1, 0); + write_sm4_unary_op(ctx, buffer, VKD3D_SM4_OP_DERIV_RTY, &expr->node, arg1, 0); break;
case HLSL_OP1_DSY_COARSE: assert(type_is_float(dst_type)); - write_sm4_unary_op(buffer, VKD3D_SM5_OP_DERIV_RTY_COARSE, &expr->node, arg1, 0); + write_sm4_unary_op(ctx, buffer, VKD3D_SM5_OP_DERIV_RTY_COARSE, &expr->node, arg1, 0); break;
case HLSL_OP1_DSY_FINE: assert(type_is_float(dst_type)); - write_sm4_unary_op(buffer, VKD3D_SM5_OP_DERIV_RTY_FINE, &expr->node, arg1, 0); + write_sm4_unary_op(ctx, buffer, VKD3D_SM5_OP_DERIV_RTY_FINE, &expr->node, arg1, 0); break;
case HLSL_OP1_EXP2: assert(type_is_float(dst_type)); - write_sm4_unary_op(buffer, VKD3D_SM4_OP_EXP, &expr->node, arg1, 0); + write_sm4_unary_op(ctx, buffer, VKD3D_SM4_OP_EXP, &expr->node, arg1, 0); break;
case HLSL_OP1_FLOOR: assert(type_is_float(dst_type)); - write_sm4_unary_op(buffer, VKD3D_SM4_OP_ROUND_NI, &expr->node, arg1, 0); + write_sm4_unary_op(ctx, buffer, VKD3D_SM4_OP_ROUND_NI, &expr->node, arg1, 0); break;
case HLSL_OP1_FRACT: assert(type_is_float(dst_type)); - write_sm4_unary_op(buffer, VKD3D_SM4_OP_FRC, &expr->node, arg1, 0); + write_sm4_unary_op(ctx, buffer, VKD3D_SM4_OP_FRC, &expr->node, arg1, 0); break;
case HLSL_OP1_LOG2: assert(type_is_float(dst_type)); - write_sm4_unary_op(buffer, VKD3D_SM4_OP_LOG, &expr->node, arg1, 0); + write_sm4_unary_op(ctx, buffer, VKD3D_SM4_OP_LOG, &expr->node, arg1, 0); break;
case HLSL_OP1_LOGIC_NOT: assert(dst_type->base_type == HLSL_TYPE_BOOL); - write_sm4_unary_op(buffer, VKD3D_SM4_OP_NOT, &expr->node, arg1, 0); + write_sm4_unary_op(ctx, buffer, VKD3D_SM4_OP_NOT, &expr->node, arg1, 0); break;
case HLSL_OP1_NEG: switch (dst_type->base_type) { case HLSL_TYPE_FLOAT: - write_sm4_unary_op(buffer, VKD3D_SM4_OP_MOV, &expr->node, arg1, VKD3D_SM4_REGISTER_MODIFIER_NEGATE); + write_sm4_unary_op(ctx, buffer, VKD3D_SM4_OP_MOV, &expr->node, arg1, VKD3D_SM4_REGISTER_MODIFIER_NEGATE); break;
case HLSL_TYPE_INT: case HLSL_TYPE_UINT: - write_sm4_unary_op(buffer, VKD3D_SM4_OP_INEG, &expr->node, arg1, 0); + write_sm4_unary_op(ctx, buffer, VKD3D_SM4_OP_INEG, &expr->node, arg1, 0); break;
default: @@ -4591,51 +4597,51 @@ static void write_sm4_expr(struct hlsl_ctx *ctx, break;
case HLSL_OP1_REINTERPRET: - write_sm4_unary_op(buffer, VKD3D_SM4_OP_MOV, &expr->node, arg1, 0); + write_sm4_unary_op(ctx, buffer, VKD3D_SM4_OP_MOV, &expr->node, arg1, 0); break;
case HLSL_OP1_ROUND: assert(type_is_float(dst_type)); - write_sm4_unary_op(buffer, VKD3D_SM4_OP_ROUND_NE, &expr->node, arg1, 0); + write_sm4_unary_op(ctx, buffer, VKD3D_SM4_OP_ROUND_NE, &expr->node, arg1, 0); break;
case HLSL_OP1_RSQ: assert(type_is_float(dst_type)); - write_sm4_unary_op(buffer, VKD3D_SM4_OP_RSQ, &expr->node, arg1, 0); + write_sm4_unary_op(ctx, buffer, VKD3D_SM4_OP_RSQ, &expr->node, arg1, 0); break;
case HLSL_OP1_SAT: assert(type_is_float(dst_type)); - write_sm4_unary_op(buffer, VKD3D_SM4_OP_MOV + write_sm4_unary_op(ctx, buffer, VKD3D_SM4_OP_MOV | (VKD3D_SM4_INSTRUCTION_FLAG_SATURATE << VKD3D_SM4_INSTRUCTION_FLAGS_SHIFT), &expr->node, arg1, 0); break;
case HLSL_OP1_SIN: assert(type_is_float(dst_type)); - write_sm4_unary_op_with_two_destinations(buffer, VKD3D_SM4_OP_SINCOS, &expr->node, 0, arg1); + write_sm4_unary_op_with_two_destinations(ctx, buffer, VKD3D_SM4_OP_SINCOS, &expr->node, 0, arg1); break;
case HLSL_OP1_SQRT: assert(type_is_float(dst_type)); - write_sm4_unary_op(buffer, VKD3D_SM4_OP_SQRT, &expr->node, arg1, 0); + write_sm4_unary_op(ctx, buffer, VKD3D_SM4_OP_SQRT, &expr->node, arg1, 0); break;
case HLSL_OP1_TRUNC: assert(type_is_float(dst_type)); - write_sm4_unary_op(buffer, VKD3D_SM4_OP_ROUND_Z, &expr->node, arg1, 0); + write_sm4_unary_op(ctx, buffer, VKD3D_SM4_OP_ROUND_Z, &expr->node, arg1, 0); break;
case HLSL_OP2_ADD: switch (dst_type->base_type) { case HLSL_TYPE_FLOAT: - write_sm4_binary_op(buffer, VKD3D_SM4_OP_ADD, &expr->node, arg1, arg2); + write_sm4_binary_op(ctx, buffer, VKD3D_SM4_OP_ADD, &expr->node, arg1, arg2); break;
case HLSL_TYPE_INT: case HLSL_TYPE_UINT: - write_sm4_binary_op(buffer, VKD3D_SM4_OP_IADD, &expr->node, arg1, arg2); + write_sm4_binary_op(ctx, buffer, VKD3D_SM4_OP_IADD, &expr->node, arg1, arg2); break;
default: @@ -4645,28 +4651,28 @@ static void write_sm4_expr(struct hlsl_ctx *ctx,
case HLSL_OP2_BIT_AND: assert(type_is_integer(dst_type)); - write_sm4_binary_op(buffer, VKD3D_SM4_OP_AND, &expr->node, arg1, arg2); + write_sm4_binary_op(ctx, buffer, VKD3D_SM4_OP_AND, &expr->node, arg1, arg2); break;
case HLSL_OP2_BIT_OR: assert(type_is_integer(dst_type)); - write_sm4_binary_op(buffer, VKD3D_SM4_OP_OR, &expr->node, arg1, arg2); + write_sm4_binary_op(ctx, buffer, VKD3D_SM4_OP_OR, &expr->node, arg1, arg2); break;
case HLSL_OP2_BIT_XOR: assert(type_is_integer(dst_type)); - write_sm4_binary_op(buffer, VKD3D_SM4_OP_XOR, &expr->node, arg1, arg2); + write_sm4_binary_op(ctx, buffer, VKD3D_SM4_OP_XOR, &expr->node, arg1, arg2); break;
case HLSL_OP2_DIV: switch (dst_type->base_type) { case HLSL_TYPE_FLOAT: - write_sm4_binary_op(buffer, VKD3D_SM4_OP_DIV, &expr->node, arg1, arg2); + write_sm4_binary_op(ctx, buffer, VKD3D_SM4_OP_DIV, &expr->node, arg1, arg2); break;
case HLSL_TYPE_UINT: - write_sm4_binary_op_with_two_destinations(buffer, VKD3D_SM4_OP_UDIV, &expr->node, 0, arg1, arg2); + write_sm4_binary_op_with_two_destinations(ctx, buffer, VKD3D_SM4_OP_UDIV, &expr->node, 0, arg1, arg2); break;
default: @@ -4681,15 +4687,15 @@ static void write_sm4_expr(struct hlsl_ctx *ctx, switch (arg1->data_type->dimx) { case 4: - write_sm4_binary_op_dot(buffer, VKD3D_SM4_OP_DP4, &expr->node, arg1, arg2); + write_sm4_binary_op_dot(ctx, buffer, VKD3D_SM4_OP_DP4, &expr->node, arg1, arg2); break;
case 3: - write_sm4_binary_op_dot(buffer, VKD3D_SM4_OP_DP3, &expr->node, arg1, arg2); + write_sm4_binary_op_dot(ctx, buffer, VKD3D_SM4_OP_DP3, &expr->node, arg1, arg2); break;
case 2: - write_sm4_binary_op_dot(buffer, VKD3D_SM4_OP_DP2, &expr->node, arg1, arg2); + write_sm4_binary_op_dot(ctx, buffer, VKD3D_SM4_OP_DP2, &expr->node, arg1, arg2); break;
case 1: @@ -4712,13 +4718,13 @@ static void write_sm4_expr(struct hlsl_ctx *ctx, switch (src_type->base_type) { case HLSL_TYPE_FLOAT: - write_sm4_binary_op(buffer, VKD3D_SM4_OP_EQ, &expr->node, arg1, arg2); + write_sm4_binary_op(ctx, buffer, VKD3D_SM4_OP_EQ, &expr->node, arg1, arg2); break;
case HLSL_TYPE_BOOL: case HLSL_TYPE_INT: case HLSL_TYPE_UINT: - write_sm4_binary_op(buffer, VKD3D_SM4_OP_IEQ, &expr->node, arg1, arg2); + write_sm4_binary_op(ctx, buffer, VKD3D_SM4_OP_IEQ, &expr->node, arg1, arg2); break;
default: @@ -4738,16 +4744,16 @@ static void write_sm4_expr(struct hlsl_ctx *ctx, switch (src_type->base_type) { case HLSL_TYPE_FLOAT: - write_sm4_binary_op(buffer, VKD3D_SM4_OP_GE, &expr->node, arg1, arg2); + write_sm4_binary_op(ctx, buffer, VKD3D_SM4_OP_GE, &expr->node, arg1, arg2); break;
case HLSL_TYPE_INT: - write_sm4_binary_op(buffer, VKD3D_SM4_OP_IGE, &expr->node, arg1, arg2); + write_sm4_binary_op(ctx, buffer, VKD3D_SM4_OP_IGE, &expr->node, arg1, arg2); break;
case HLSL_TYPE_BOOL: case HLSL_TYPE_UINT: - write_sm4_binary_op(buffer, VKD3D_SM4_OP_UGE, &expr->node, arg1, arg2); + write_sm4_binary_op(ctx, buffer, VKD3D_SM4_OP_UGE, &expr->node, arg1, arg2); break;
default: @@ -4767,16 +4773,16 @@ static void write_sm4_expr(struct hlsl_ctx *ctx, switch (src_type->base_type) { case HLSL_TYPE_FLOAT: - write_sm4_binary_op(buffer, VKD3D_SM4_OP_LT, &expr->node, arg1, arg2); + write_sm4_binary_op(ctx, buffer, VKD3D_SM4_OP_LT, &expr->node, arg1, arg2); break;
case HLSL_TYPE_INT: - write_sm4_binary_op(buffer, VKD3D_SM4_OP_ILT, &expr->node, arg1, arg2); + write_sm4_binary_op(ctx, buffer, VKD3D_SM4_OP_ILT, &expr->node, arg1, arg2); break;
case HLSL_TYPE_BOOL: case HLSL_TYPE_UINT: - write_sm4_binary_op(buffer, VKD3D_SM4_OP_ULT, &expr->node, arg1, arg2); + write_sm4_binary_op(ctx, buffer, VKD3D_SM4_OP_ULT, &expr->node, arg1, arg2); break;
default: @@ -4789,33 +4795,33 @@ static void write_sm4_expr(struct hlsl_ctx *ctx,
case HLSL_OP2_LOGIC_AND: assert(dst_type->base_type == HLSL_TYPE_BOOL); - write_sm4_binary_op(buffer, VKD3D_SM4_OP_AND, &expr->node, arg1, arg2); + write_sm4_binary_op(ctx, buffer, VKD3D_SM4_OP_AND, &expr->node, arg1, arg2); break;
case HLSL_OP2_LOGIC_OR: assert(dst_type->base_type == HLSL_TYPE_BOOL); - write_sm4_binary_op(buffer, VKD3D_SM4_OP_OR, &expr->node, arg1, arg2); + write_sm4_binary_op(ctx, buffer, VKD3D_SM4_OP_OR, &expr->node, arg1, arg2); break;
case HLSL_OP2_LSHIFT: assert(type_is_integer(dst_type)); assert(dst_type->base_type != HLSL_TYPE_BOOL); - write_sm4_binary_op(buffer, VKD3D_SM4_OP_ISHL, &expr->node, arg1, arg2); + write_sm4_binary_op(ctx, buffer, VKD3D_SM4_OP_ISHL, &expr->node, arg1, arg2); break;
case HLSL_OP2_MAX: switch (dst_type->base_type) { case HLSL_TYPE_FLOAT: - write_sm4_binary_op(buffer, VKD3D_SM4_OP_MAX, &expr->node, arg1, arg2); + write_sm4_binary_op(ctx, buffer, VKD3D_SM4_OP_MAX, &expr->node, arg1, arg2); break;
case HLSL_TYPE_INT: - write_sm4_binary_op(buffer, VKD3D_SM4_OP_IMAX, &expr->node, arg1, arg2); + write_sm4_binary_op(ctx, buffer, VKD3D_SM4_OP_IMAX, &expr->node, arg1, arg2); break;
case HLSL_TYPE_UINT: - write_sm4_binary_op(buffer, VKD3D_SM4_OP_UMAX, &expr->node, arg1, arg2); + write_sm4_binary_op(ctx, buffer, VKD3D_SM4_OP_UMAX, &expr->node, arg1, arg2); break;
default: @@ -4827,15 +4833,15 @@ static void write_sm4_expr(struct hlsl_ctx *ctx, switch (dst_type->base_type) { case HLSL_TYPE_FLOAT: - write_sm4_binary_op(buffer, VKD3D_SM4_OP_MIN, &expr->node, arg1, arg2); + write_sm4_binary_op(ctx, buffer, VKD3D_SM4_OP_MIN, &expr->node, arg1, arg2); break;
case HLSL_TYPE_INT: - write_sm4_binary_op(buffer, VKD3D_SM4_OP_IMIN, &expr->node, arg1, arg2); + write_sm4_binary_op(ctx, buffer, VKD3D_SM4_OP_IMIN, &expr->node, arg1, arg2); break;
case HLSL_TYPE_UINT: - write_sm4_binary_op(buffer, VKD3D_SM4_OP_UMIN, &expr->node, arg1, arg2); + write_sm4_binary_op(ctx, buffer, VKD3D_SM4_OP_UMIN, &expr->node, arg1, arg2); break;
default: @@ -4847,7 +4853,7 @@ static void write_sm4_expr(struct hlsl_ctx *ctx, switch (dst_type->base_type) { case HLSL_TYPE_UINT: - write_sm4_binary_op_with_two_destinations(buffer, VKD3D_SM4_OP_UDIV, &expr->node, 1, arg1, arg2); + write_sm4_binary_op_with_two_destinations(ctx, buffer, VKD3D_SM4_OP_UDIV, &expr->node, 1, arg1, arg2); break;
default: @@ -4859,14 +4865,14 @@ static void write_sm4_expr(struct hlsl_ctx *ctx, switch (dst_type->base_type) { case HLSL_TYPE_FLOAT: - write_sm4_binary_op(buffer, VKD3D_SM4_OP_MUL, &expr->node, arg1, arg2); + write_sm4_binary_op(ctx, buffer, VKD3D_SM4_OP_MUL, &expr->node, arg1, arg2); break;
case HLSL_TYPE_INT: case HLSL_TYPE_UINT: /* Using IMUL instead of UMUL because we're taking the low * bits, and the native compiler generates IMUL. */ - write_sm4_binary_op_with_two_destinations(buffer, VKD3D_SM4_OP_IMUL, &expr->node, 1, arg1, arg2); + write_sm4_binary_op_with_two_destinations(ctx, buffer, VKD3D_SM4_OP_IMUL, &expr->node, 1, arg1, arg2); break;
default: @@ -4883,13 +4889,13 @@ static void write_sm4_expr(struct hlsl_ctx *ctx, switch (src_type->base_type) { case HLSL_TYPE_FLOAT: - write_sm4_binary_op(buffer, VKD3D_SM4_OP_NE, &expr->node, arg1, arg2); + write_sm4_binary_op(ctx, buffer, VKD3D_SM4_OP_NE, &expr->node, arg1, arg2); break;
case HLSL_TYPE_BOOL: case HLSL_TYPE_INT: case HLSL_TYPE_UINT: - write_sm4_binary_op(buffer, VKD3D_SM4_OP_INE, &expr->node, arg1, arg2); + write_sm4_binary_op(ctx, buffer, VKD3D_SM4_OP_INE, &expr->node, arg1, arg2); break;
default: @@ -4903,7 +4909,7 @@ static void write_sm4_expr(struct hlsl_ctx *ctx, case HLSL_OP2_RSHIFT: assert(type_is_integer(dst_type)); assert(dst_type->base_type != HLSL_TYPE_BOOL); - write_sm4_binary_op(buffer, dst_type->base_type == HLSL_TYPE_INT ? VKD3D_SM4_OP_ISHR : VKD3D_SM4_OP_USHR, + write_sm4_binary_op(ctx, buffer, dst_type->base_type == HLSL_TYPE_INT ? VKD3D_SM4_OP_ISHR : VKD3D_SM4_OP_USHR, &expr->node, arg1, arg2); break;
@@ -4925,7 +4931,7 @@ static void write_sm4_if(struct hlsl_ctx *ctx, struct vkd3d_bytecode_buffer *buf assert(iff->condition.node->data_type->dimx == 1);
sm4_src_from_node(&instr.srcs[0], iff->condition.node, VKD3DSP_WRITEMASK_ALL); - write_sm4_instruction(buffer, &instr); + write_sm4_instruction(ctx, buffer, &instr);
write_sm4_block(ctx, buffer, &iff->then_block);
@@ -4933,14 +4939,14 @@ static void write_sm4_if(struct hlsl_ctx *ctx, struct vkd3d_bytecode_buffer *buf { instr.opcode = VKD3D_SM4_OP_ELSE; instr.src_count = 0; - write_sm4_instruction(buffer, &instr); + write_sm4_instruction(ctx, buffer, &instr);
write_sm4_block(ctx, buffer, &iff->else_block); }
instr.opcode = VKD3D_SM4_OP_ENDIF; instr.src_count = 0; - write_sm4_instruction(buffer, &instr); + write_sm4_instruction(ctx, buffer, &instr); }
static void write_sm4_jump(struct hlsl_ctx *ctx, @@ -4972,7 +4978,7 @@ static void write_sm4_jump(struct hlsl_ctx *ctx, return; }
- write_sm4_instruction(buffer, &instr); + write_sm4_instruction(ctx, buffer, &instr); }
/* Does this variable's data come directly from the API user, rather than being @@ -5023,7 +5029,7 @@ static void write_sm4_load(struct hlsl_ctx *ctx, instr.src_count = 1; }
- write_sm4_instruction(buffer, &instr); + write_sm4_instruction(ctx, buffer, &instr); }
static void write_sm4_loop(struct hlsl_ctx *ctx, @@ -5034,12 +5040,12 @@ static void write_sm4_loop(struct hlsl_ctx *ctx, .opcode = VKD3D_SM4_OP_LOOP, };
- write_sm4_instruction(buffer, &instr); + write_sm4_instruction(ctx, buffer, &instr);
write_sm4_block(ctx, buffer, &loop->body);
instr.opcode = VKD3D_SM4_OP_ENDLOOP; - write_sm4_instruction(buffer, &instr); + write_sm4_instruction(ctx, buffer, &instr); }
static void write_sm4_gather(struct hlsl_ctx *ctx, struct vkd3d_bytecode_buffer *buffer, @@ -5081,7 +5087,7 @@ static void write_sm4_gather(struct hlsl_ctx *ctx, struct vkd3d_bytecode_buffer src->swizzle_type = VKD3D_SM4_SWIZZLE_SCALAR; src->swizzle = swizzle;
- write_sm4_instruction(buffer, &instr); + write_sm4_instruction(ctx, buffer, &instr); }
static void write_sm4_resource_load(struct hlsl_ctx *ctx, @@ -5180,7 +5186,7 @@ static void write_sm4_store(struct hlsl_ctx *ctx, sm4_src_from_node(&instr.srcs[0], rhs, instr.dsts[0].writemask); instr.src_count = 1;
- write_sm4_instruction(buffer, &instr); + write_sm4_instruction(ctx, buffer, &instr); }
static void write_sm4_swizzle(struct hlsl_ctx *ctx, @@ -5200,7 +5206,7 @@ static void write_sm4_swizzle(struct hlsl_ctx *ctx, swizzle->swizzle, swizzle->node.data_type->dimx), instr.dsts[0].writemask); instr.src_count = 1;
- write_sm4_instruction(buffer, &instr); + write_sm4_instruction(ctx, buffer, &instr); }
static void write_sm4_block(struct hlsl_ctx *ctx, struct vkd3d_bytecode_buffer *buffer, @@ -5312,7 +5318,7 @@ static void write_sm4_shdr(struct hlsl_ctx *ctx, LIST_FOR_EACH_ENTRY(cbuffer, &ctx->buffers, struct hlsl_buffer, entry) { if (cbuffer->reg.allocated) - write_sm4_dcl_constant_buffer(&buffer, cbuffer); + write_sm4_dcl_constant_buffer(ctx, &buffer, cbuffer); }
for (i = 0; i < extern_resources_count; ++i) @@ -5334,14 +5340,14 @@ static void write_sm4_shdr(struct hlsl_ctx *ctx, }
if (profile->type == VKD3D_SHADER_TYPE_COMPUTE) - write_sm4_dcl_thread_group(&buffer, ctx->thread_count); + write_sm4_dcl_thread_group(ctx, &buffer, ctx->thread_count);
if (ctx->temp_count) - write_sm4_dcl_temps(&buffer, ctx->temp_count); + write_sm4_dcl_temps(ctx, &buffer, ctx->temp_count);
write_sm4_block(ctx, &buffer, &entry_func->body);
- write_sm4_ret(&buffer); + write_sm4_ret(ctx, &buffer);
set_u32(&buffer, token_count_position, bytecode_get_size(&buffer) / sizeof(uint32_t));