From: Francisco Casas fcasas@codeweavers.com
The idea of the struct sm4_xregister is that it is a regular struct sm4_register but that can also contain other registers for relative addressing in their indexes. --- libs/vkd3d-shader/tpf.c | 289 ++++++++++++++++++++----- tests/array-index-expr.shader_test | 36 +-- tests/function-return.shader_test | 22 +- tests/hlsl-matrix-indexing.shader_test | 6 +- tests/return.shader_test | 22 +- 5 files changed, 283 insertions(+), 92 deletions(-)
diff --git a/libs/vkd3d-shader/tpf.c b/libs/vkd3d-shader/tpf.c index d066b13e..5b1b96a2 100644 --- a/libs/vkd3d-shader/tpf.c +++ b/libs/vkd3d-shader/tpf.c @@ -3316,6 +3316,16 @@ struct sm4_register unsigned int mod; };
+struct sm4_xregister +{ + struct sm4_register r; + + /* Relative addressing to be added to the constant offsets in r.idx[ยท]. */ + bool idx_has_reg[3]; + struct sm4_register idx_reg[3]; + unsigned int idx_reg_swizzle[3]; +}; + struct sm4_instruction { enum vkd3d_sm4_opcode opcode; @@ -3325,14 +3335,22 @@ struct sm4_instruction
struct sm4_dst_register { - struct sm4_register reg; + union + { + struct sm4_xregister xreg; + struct sm4_register reg; + }; unsigned int writemask; } dsts[2]; unsigned int dst_count;
struct sm4_src_register { - struct sm4_register reg; + union + { + struct sm4_xregister xreg; + struct sm4_register reg; + }; enum vkd3d_sm4_swizzle_type swizzle_type; unsigned int swizzle; } srcs[5]; @@ -3344,11 +3362,64 @@ struct sm4_instruction unsigned int idx_count; };
-static void sm4_register_from_deref(struct hlsl_ctx *ctx, struct sm4_register *reg, +static void sm4_register_from_node(struct sm4_register *reg, unsigned int *writemask, + enum vkd3d_sm4_swizzle_type *swizzle_type, const struct hlsl_ir_node *instr) +{ + assert(instr->reg.allocated); + reg->type = VKD3D_SM4_RT_TEMP; + reg->dim = VKD3D_SM4_DIMENSION_VEC4; + if (swizzle_type) + *swizzle_type = VKD3D_SM4_SWIZZLE_VEC4; + reg->idx[0] = instr->reg.id; + reg->idx_count = 1; + *writemask = instr->reg.writemask; +} + +static void sm4_xregister_set_idxs_from_deref(struct hlsl_ctx *ctx, struct sm4_xregister *xreg, + const struct hlsl_deref *deref, unsigned int *writemask) +{ + const struct hlsl_ir_var *var = deref->var; + unsigned int offset = 0; + + xreg->r.idx[0] = var->regs[HLSL_REGSET_NUMERIC].id; + xreg->idx_has_reg[0] = false; + xreg->r.idx_count = 1; + + if (!var->indexable) + { + offset = hlsl_offset_from_deref_safe(ctx, deref); + xreg->r.idx[0] += offset / 4; + } + else + { + offset = deref->offset_const; + xreg->r.idx[1] = offset / 4; + xreg->idx_has_reg[1] = false; + xreg->r.idx_count = 2; + + if (deref->offset.node) + { + unsigned int idx_reg_writemask; + + xreg->idx_has_reg[1] = true; + sm4_register_from_node(&xreg->idx_reg[1], &idx_reg_writemask, NULL, deref->offset.node); + xreg->idx_reg_swizzle[1] = hlsl_swizzle_from_writemask(idx_reg_writemask) & 0x3; /* NOTE: Native applies this mask to the swizzle. */ + } + } + + *writemask = 0xf & (0xf << (offset % 4)); + if (var->regs[HLSL_REGSET_NUMERIC].writemask) + *writemask = hlsl_combine_writemasks(var->regs[HLSL_REGSET_NUMERIC].writemask, *writemask); +} + +static void sm4_xregister_from_deref(struct hlsl_ctx *ctx, struct sm4_xregister *xreg, unsigned int *writemask, enum vkd3d_sm4_swizzle_type *swizzle_type, const struct hlsl_deref *deref, const struct hlsl_type *data_type) { const struct hlsl_ir_var *var = deref->var; + struct sm4_register *reg = &xreg->r; + + memset(xreg, 0, sizeof(*xreg));
if (var->is_uniform) { @@ -3424,16 +3495,12 @@ static void sm4_register_from_deref(struct hlsl_ctx *ctx, struct sm4_register *r } else { - struct hlsl_reg hlsl_reg = hlsl_reg_from_deref(ctx, deref); - - assert(hlsl_reg.allocated); reg->type = VKD3D_SM4_RT_INPUT; reg->dim = VKD3D_SM4_DIMENSION_VEC4; if (swizzle_type) *swizzle_type = VKD3D_SM4_SWIZZLE_VEC4; - reg->idx[0] = hlsl_reg.id; - reg->idx_count = 1; - *writemask = hlsl_reg.writemask; + + sm4_xregister_set_idxs_from_deref(ctx, xreg, deref, writemask); } } else if (var->is_output_semantic) @@ -3458,28 +3525,22 @@ static void sm4_register_from_deref(struct hlsl_ctx *ctx, struct sm4_register *r } else { - struct hlsl_reg hlsl_reg = hlsl_reg_from_deref(ctx, deref); - - assert(hlsl_reg.allocated); reg->type = VKD3D_SM4_RT_OUTPUT; reg->dim = VKD3D_SM4_DIMENSION_VEC4; - reg->idx[0] = hlsl_reg.id; - reg->idx_count = 1; - *writemask = hlsl_reg.writemask; + if (swizzle_type) + *swizzle_type = VKD3D_SM4_SWIZZLE_VEC4; + + sm4_xregister_set_idxs_from_deref(ctx, xreg, deref, writemask); } } else { - struct hlsl_reg hlsl_reg = hlsl_reg_from_deref(ctx, deref); - - assert(hlsl_reg.allocated); - reg->type = VKD3D_SM4_RT_TEMP; + reg->type = deref->var->indexable ? VKD3D_SM4_RT_INDEXABLE_TEMP : VKD3D_SM4_RT_TEMP; reg->dim = VKD3D_SM4_DIMENSION_VEC4; if (swizzle_type) *swizzle_type = VKD3D_SM4_SWIZZLE_VEC4; - reg->idx[0] = hlsl_reg.id; - reg->idx_count = 1; - *writemask = hlsl_reg.writemask; + + sm4_xregister_set_idxs_from_deref(ctx, xreg, deref, writemask); } }
@@ -3488,27 +3549,16 @@ static void sm4_src_from_deref(struct hlsl_ctx *ctx, struct sm4_src_register *sr { unsigned int writemask;
- sm4_register_from_deref(ctx, &src->reg, &writemask, &src->swizzle_type, deref, data_type); + sm4_xregister_from_deref(ctx, &src->xreg, &writemask, &src->swizzle_type, deref, data_type); if (src->swizzle_type == VKD3D_SM4_SWIZZLE_VEC4) src->swizzle = hlsl_map_swizzle(hlsl_swizzle_from_writemask(writemask), map_writemask); }
-static void sm4_register_from_node(struct sm4_register *reg, unsigned int *writemask, - enum vkd3d_sm4_swizzle_type *swizzle_type, const struct hlsl_ir_node *instr) -{ - assert(instr->reg.allocated); - reg->type = VKD3D_SM4_RT_TEMP; - reg->dim = VKD3D_SM4_DIMENSION_VEC4; - *swizzle_type = VKD3D_SM4_SWIZZLE_VEC4; - reg->idx[0] = instr->reg.id; - reg->idx_count = 1; - *writemask = instr->reg.writemask; -} - static void sm4_dst_from_node(struct sm4_dst_register *dst, const struct hlsl_ir_node *instr) { unsigned int swizzle_type;
+ memset(&dst->xreg, 0, sizeof(dst->xreg)); sm4_register_from_node(&dst->reg, &dst->writemask, &swizzle_type, instr); }
@@ -3548,11 +3598,41 @@ static void sm4_src_from_node(struct sm4_src_register *src, return; }
+ memset(&src->xreg, 0, sizeof(src->xreg)); sm4_register_from_node(&src->reg, &writemask, &src->swizzle_type, instr); if (src->swizzle_type == VKD3D_SM4_SWIZZLE_VEC4) src->swizzle = hlsl_map_swizzle(hlsl_swizzle_from_writemask(writemask), map_writemask); }
+static unsigned int sm4_get_index_adressing_from_xreg(const struct sm4_xregister *xreg, + unsigned int i, unsigned int *size) +{ + if (i >= xreg->r.idx_count) + { + *size = 0; + return 0; + } + + if (xreg->idx_has_reg[i]) + { + if (xreg->r.idx[i] == 0) + { + *size = 1 + xreg->idx_reg[i].idx_count; + return VKD3D_SM4_ADDRESSING_RELATIVE; + } + else + { + *size = 2 + xreg->idx_reg[i].idx_count; + return VKD3D_SM4_ADDRESSING_RELATIVE | VKD3D_SM4_ADDRESSING_OFFSET; + } + } + else + { + *size = 1; + return 0; + } +} + static uint32_t sm4_encode_register(const struct sm4_register *reg) { return (reg->type << VKD3D_SM4_REGISTER_TYPE_SHIFT) @@ -3560,27 +3640,53 @@ static uint32_t sm4_encode_register(const struct sm4_register *reg) | (reg->dim << VKD3D_SM4_DIMENSION_SHIFT); }
-static uint32_t sm4_register_order(const struct sm4_register *reg) +static uint32_t sm4_encode_xregister(const struct sm4_xregister *xreg) +{ + unsigned int idx_size; + unsigned int idx0_addressing = sm4_get_index_adressing_from_xreg(xreg, 0, &idx_size); + unsigned int idx1_addressing = sm4_get_index_adressing_from_xreg(xreg, 1, &idx_size); + unsigned int idx2_addressing = sm4_get_index_adressing_from_xreg(xreg, 2, &idx_size); + + return (xreg->r.type << VKD3D_SM4_REGISTER_TYPE_SHIFT) + | (xreg->r.idx_count << VKD3D_SM4_REGISTER_ORDER_SHIFT) + | (idx0_addressing << VKD3D_SM4_ADDRESSING_SHIFT0) + | (idx1_addressing << VKD3D_SM4_ADDRESSING_SHIFT1) + | (idx2_addressing << VKD3D_SM4_ADDRESSING_SHIFT2) + | (xreg->r.dim << VKD3D_SM4_DIMENSION_SHIFT); +} + +static uint32_t sm4_xregister_order(const struct sm4_xregister *xreg) { uint32_t order = 1; - if (reg->type == VKD3D_SM4_RT_IMMCONST) - order += reg->dim == VKD3D_SM4_DIMENSION_VEC4 ? 4 : 1; - order += reg->idx_count; - if (reg->mod) + unsigned int i; + + if (xreg->r.type == VKD3D_SM4_RT_IMMCONST) + order += xreg->r.dim == VKD3D_SM4_DIMENSION_VEC4 ? 4 : 1; + + for (i = 0; i < xreg->r.idx_count; i++) + { + unsigned int size; + + sm4_get_index_adressing_from_xreg(xreg, i, &size); + order += size; + } + + if (xreg->r.mod) ++order; + return order; }
static void write_sm4_instruction(struct vkd3d_bytecode_buffer *buffer, const struct sm4_instruction *instr) { uint32_t token = instr->opcode; - unsigned int size = 1, i, j; + unsigned int size = 1, i, j, k;
size += instr->modifier_count; for (i = 0; i < instr->dst_count; ++i) - size += sm4_register_order(&instr->dsts[i].reg); + size += sm4_xregister_order(&instr->dsts[i].xreg); for (i = 0; i < instr->src_count; ++i) - size += sm4_register_order(&instr->srcs[i].reg); + size += sm4_xregister_order(&instr->srcs[i].xreg); size += instr->idx_count; if (instr->byte_stride) ++size; @@ -3601,18 +3707,43 @@ static void write_sm4_instruction(struct vkd3d_bytecode_buffer *buffer, const st
for (i = 0; i < instr->dst_count; ++i) { - token = sm4_encode_register(&instr->dsts[i].reg); + token = sm4_encode_xregister(&instr->dsts[i].xreg); if (instr->dsts[i].reg.dim == VKD3D_SM4_DIMENSION_VEC4) token |= instr->dsts[i].writemask << VKD3D_SM4_WRITEMASK_SHIFT; put_u32(buffer, token);
for (j = 0; j < instr->dsts[i].reg.idx_count; ++j) - put_u32(buffer, instr->dsts[i].reg.idx[j]); + { + unsigned int addressing, idx_size; + + addressing = sm4_get_index_adressing_from_xreg(&instr->dsts[i].xreg, j, &idx_size); + + if (addressing & VKD3D_SM4_ADDRESSING_RELATIVE) + { + const struct sm4_register *idx_reg = &instr->dsts[i].xreg.idx_reg[j]; + uint32_t idx_reg_swizzle = instr->dsts[i].xreg.idx_reg_swizzle[j]; + + token = sm4_encode_register(idx_reg); + token |= VKD3D_SM4_SWIZZLE_SCALAR << VKD3D_SM4_SWIZZLE_TYPE_SHIFT; + token |= idx_reg_swizzle << VKD3D_SM4_SWIZZLE_SHIFT; + put_u32(buffer, token); + + for (k = 0; k < idx_reg->idx_count; ++k) + put_u32(buffer, idx_reg->idx[k]); + + if (addressing & VKD3D_SM4_ADDRESSING_OFFSET) + put_u32(buffer, instr->dsts[i].xreg.r.idx[k]); + } + else + { + put_u32(buffer, instr->dsts[i].reg.idx[j]); + } + } }
for (i = 0; i < instr->src_count; ++i) { - token = sm4_encode_register(&instr->srcs[i].reg); + token = sm4_encode_xregister(&instr->srcs[i].xreg); token |= (uint32_t)instr->srcs[i].swizzle_type << VKD3D_SM4_SWIZZLE_TYPE_SHIFT; token |= instr->srcs[i].swizzle << VKD3D_SM4_SWIZZLE_SHIFT; if (instr->srcs[i].reg.mod) @@ -3624,8 +3755,34 @@ static void write_sm4_instruction(struct vkd3d_bytecode_buffer *buffer, const st | VKD3D_SM4_EXTENDED_OPERAND_MODIFIER);
for (j = 0; j < instr->srcs[i].reg.idx_count; ++j) - put_u32(buffer, instr->srcs[i].reg.idx[j]); + { + unsigned int addressing, idx_size;
+ addressing = sm4_get_index_adressing_from_xreg(&instr->srcs[i].xreg, j, &idx_size); + + if (addressing & VKD3D_SM4_ADDRESSING_RELATIVE) + { + const struct sm4_register *idx_reg = &instr->srcs[i].xreg.idx_reg[j]; + uint32_t idx_reg_swizzle = instr->srcs[i].xreg.idx_reg_swizzle[j]; + + token = sm4_encode_register(idx_reg); + token |= VKD3D_SM4_SWIZZLE_SCALAR << VKD3D_SM4_SWIZZLE_TYPE_SHIFT; + token |= idx_reg_swizzle << VKD3D_SM4_SWIZZLE_SHIFT; + put_u32(buffer, token); + + for (k = 0; k < idx_reg->idx_count; ++k) + put_u32(buffer, idx_reg->idx[k]); + + if (addressing & VKD3D_SM4_ADDRESSING_OFFSET) + put_u32(buffer, instr->srcs[i].xreg.r.idx[k]); + } + else + { + put_u32(buffer, instr->srcs[i].reg.idx[j]); + } + } + + /* FIXME: This comes before or after the previous block? */ if (instr->srcs[i].reg.type == VKD3D_SM4_RT_IMMCONST) { put_u32(buffer, instr->srcs[i].reg.immconst_uint[0]); @@ -3882,6 +4039,20 @@ static void write_sm4_dcl_temps(struct vkd3d_bytecode_buffer *buffer, uint32_t t write_sm4_instruction(buffer, &instr); }
+static void write_sm4_dcl_indexable_temp(struct vkd3d_bytecode_buffer *buffer, uint32_t idx, + uint32_t size, uint32_t comp_count) +{ + struct sm4_instruction instr = + { + .opcode = VKD3D_SM4_OP_DCL_INDEXABLE_TEMP, + + .idx = {idx, size, comp_count}, + .idx_count = 3, + }; + + write_sm4_instruction(buffer, &instr); +} + static void write_sm4_dcl_thread_group(struct vkd3d_bytecode_buffer *buffer, const uint32_t thread_count[3]) { struct sm4_instruction instr = @@ -4313,7 +4484,7 @@ static void write_sm4_store_uav_typed(struct hlsl_ctx *ctx, struct vkd3d_bytecod memset(&instr, 0, sizeof(instr)); instr.opcode = VKD3D_SM5_OP_STORE_UAV_TYPED;
- sm4_register_from_deref(ctx, &instr.dsts[0].reg, &instr.dsts[0].writemask, NULL, dst, dst->var->data_type); + sm4_xregister_from_deref(ctx, &instr.dsts[0].xreg, &instr.dsts[0].writemask, NULL, dst, dst->var->data_type); instr.dst_count = 1;
sm4_src_from_node(&instr.srcs[0], coords, VKD3DSP_WRITEMASK_ALL); @@ -5033,7 +5204,7 @@ static void write_sm4_store(struct hlsl_ctx *ctx, memset(&instr, 0, sizeof(instr)); instr.opcode = VKD3D_SM4_OP_MOV;
- sm4_register_from_deref(ctx, &instr.dsts[0].reg, &writemask, NULL, &store->lhs, rhs->data_type); + sm4_xregister_from_deref(ctx, &instr.dsts[0].xreg, &writemask, NULL, &store->lhs, rhs->data_type); instr.dsts[0].writemask = hlsl_combine_writemasks(writemask, store->writemask); instr.dst_count = 1;
@@ -5148,6 +5319,7 @@ static void write_sm4_shdr(struct hlsl_ctx *ctx, struct vkd3d_bytecode_buffer buffer = {0}; unsigned int extern_resources_count, i; const struct hlsl_buffer *cbuffer; + const struct hlsl_scope *scope; const struct hlsl_ir_var *var; size_t token_count_position;
@@ -5202,6 +5374,25 @@ static void write_sm4_shdr(struct hlsl_ctx *ctx, if (ctx->temp_count) write_sm4_dcl_temps(&buffer, ctx->temp_count);
+ LIST_FOR_EACH_ENTRY(scope, &ctx->scopes, struct hlsl_scope, entry) + { + LIST_FOR_EACH_ENTRY(var, &scope->vars, struct hlsl_ir_var, scope_entry) + { + if (var->is_uniform || var->is_input_semantic || var->is_output_semantic) + continue; + if (!var->regs[HLSL_REGSET_NUMERIC].allocated) + continue; + + if (var->indexable) + { + unsigned int id = var->regs[HLSL_REGSET_NUMERIC].id; + unsigned int size = align(var->data_type->reg_size[HLSL_REGSET_NUMERIC], 4) / 4; + + write_sm4_dcl_indexable_temp(&buffer, id, size, 4); + } + } + } + write_sm4_block(ctx, &buffer, &entry_func->body);
write_sm4_ret(&buffer); diff --git a/tests/array-index-expr.shader_test b/tests/array-index-expr.shader_test index 0a83080c..b88c54a9 100644 --- a/tests/array-index-expr.shader_test +++ b/tests/array-index-expr.shader_test @@ -1,4 +1,4 @@ -[pixel shader todo] +[pixel shader] uniform float4 f[3]; uniform float2 i;
@@ -12,17 +12,17 @@ uniform 0 float4 1.0 2.0 3.0 4.0 uniform 4 float4 5.0 6.0 7.0 8.0 uniform 8 float4 9.0 10.0 11.0 12.0 uniform 12 float4 0 0 0 0 -todo draw quad -todo probe all rgba (1.0, 2.0, 3.0, 4.0) +draw quad +probe all rgba (1.0, 2.0, 3.0, 4.0) uniform 12 float4 1 0 0 0 -todo draw quad -todo probe all rgba (5.0, 6.0, 7.0, 8.0) +draw quad +probe all rgba (5.0, 6.0, 7.0, 8.0) uniform 12 float4 0 1 0 0 -todo draw quad -todo probe all rgba (5.0, 6.0, 7.0, 8.0) +draw quad +probe all rgba (5.0, 6.0, 7.0, 8.0) uniform 12 float4 1 1 0 0 -todo draw quad -todo probe all rgba (9.0, 10.0, 11.0, 12.0) +draw quad +probe all rgba (9.0, 10.0, 11.0, 12.0)
[pixel shader] @@ -74,7 +74,7 @@ draw quad probe all rgba (24.0, 0.0, 21.0, 1.0)
-[pixel shader todo] +[pixel shader] uniform float2 i;
float4 main() : sv_target @@ -86,14 +86,14 @@ float4 main() : sv_target
[test] uniform 0 float4 0 0 0 0 -todo draw quad -todo probe all rgba (1.0, 2.0, 3.0, 4.0) +draw quad +probe all rgba (1.0, 2.0, 3.0, 4.0) uniform 0 float4 1 0 0 0 -todo draw quad -todo probe all rgba (5.0, 6.0, 7.0, 8.0) +draw quad +probe all rgba (5.0, 6.0, 7.0, 8.0) uniform 0 float4 0 1 0 0 -todo draw quad -todo probe all rgba (5.0, 6.0, 7.0, 8.0) +draw quad +probe all rgba (5.0, 6.0, 7.0, 8.0) uniform 0 float4 1 1 0 0 -todo draw quad -todo probe all rgba (9.0, 10.0, 11.0, 12.0) +draw quad +probe all rgba (9.0, 10.0, 11.0, 12.0) diff --git a/tests/function-return.shader_test b/tests/function-return.shader_test index cbd29749..80bc5767 100644 --- a/tests/function-return.shader_test +++ b/tests/function-return.shader_test @@ -258,7 +258,7 @@ uniform 0 float 0.9 draw quad probe all rgba (0.4, 0.1, 0.7, 0.6) 1
-[pixel shader todo] +[pixel shader]
uniform float4 f[3];
@@ -295,21 +295,21 @@ float4 main() : sv_target uniform 0 float4 0.3 0.0 0.0 0.0 uniform 4 float4 0.0 0.0 0.0 0.0 uniform 8 float4 0.1 0.0 0.0 0.0 -todo draw quad -todo probe all rgba (0.3, 0.2, 0.6, 0.6) 1 +draw quad +probe all rgba (0.3, 0.2, 0.6, 0.6) 1
uniform 4 float4 0.35 0.0 0.0 0.0 -todo draw quad -todo probe all rgba (0.3, 0.3, 0.6, 0.6) 1 +draw quad +probe all rgba (0.3, 0.3, 0.6, 0.6) 1
uniform 8 float4 0.5 0.0 0.0 0.0 -todo draw quad -todo probe all rgba (0.3, 0.5, 0.6, 0.6) 1 +draw quad +probe all rgba (0.3, 0.5, 0.6, 0.6) 1
uniform 0 float4 1.0 0.0 0.0 0.0 -todo draw quad -todo probe all rgba (0.3, 0.5, 0.6, 0.6) 1 +draw quad +probe all rgba (0.3, 0.5, 0.6, 0.6) 1
uniform 4 float4 2.0 0.0 0.0 0.0 -todo draw quad -todo probe all rgba (0.4, 0.1, 0.6, 0.6) 1 +draw quad +probe all rgba (0.4, 0.1, 0.6, 0.6) 1 diff --git a/tests/hlsl-matrix-indexing.shader_test b/tests/hlsl-matrix-indexing.shader_test index a57d8fb8..f44ba63e 100644 --- a/tests/hlsl-matrix-indexing.shader_test +++ b/tests/hlsl-matrix-indexing.shader_test @@ -124,7 +124,7 @@ draw quad probe all rgba (8, 9, 10, 11)
-[pixel shader todo] +[pixel shader] uniform float i;
float4 main() : sv_target @@ -136,5 +136,5 @@ float4 main() : sv_target
[test] uniform 0 float 3 -todo draw quad -todo probe all rgba (12, 13, 14, 15) +draw quad +probe all rgba (12, 13, 14, 15) diff --git a/tests/return.shader_test b/tests/return.shader_test index 9f800d1a..30fcd65b 100644 --- a/tests/return.shader_test +++ b/tests/return.shader_test @@ -217,7 +217,7 @@ uniform 0 float 0.8 draw quad probe all rgba (0.5, 0.5, 0.5, 0.5)
-[pixel shader todo] +[pixel shader]
uniform float4 f[3];
@@ -243,21 +243,21 @@ void main(out float4 ret : sv_target) uniform 0 float4 0.3 0.0 0.0 0.0 uniform 4 float4 0.0 0.0 0.0 0.0 uniform 8 float4 0.1 0.0 0.0 0.0 -todo draw quad -todo probe all rgba (0.1, 0.1, 0.1, 0.1) 1 +draw quad +probe all rgba (0.1, 0.1, 0.1, 0.1) 1
uniform 4 float4 0.35 0.0 0.0 0.0 -todo draw quad -todo probe all rgba (0.2, 0.2, 0.2, 0.2) 1 +draw quad +probe all rgba (0.2, 0.2, 0.2, 0.2) 1
uniform 8 float4 0.5 0.0 0.0 0.0 -todo draw quad -todo probe all rgba (0.4, 0.4, 0.4, 0.4) 1 +draw quad +probe all rgba (0.4, 0.4, 0.4, 0.4) 1
uniform 0 float4 1.0 0.0 0.0 0.0 -todo draw quad -todo probe all rgba (0.4, 0.4, 0.4, 0.4) 1 +draw quad +probe all rgba (0.4, 0.4, 0.4, 0.4) 1
uniform 4 float4 2.0 0.0 0.0 0.0 -todo draw quad -todo probe all rgba (0.9, 0.9, 0.9, 0.9) 1 +draw quad +probe all rgba (0.9, 0.9, 0.9, 0.9) 1