+static HRESULT pipeline_state_desc_from_d3d12_graphics_desc(struct d3d12_pipeline_state_desc *desc, + const D3D12_GRAPHICS_PIPELINE_STATE_DESC *d3d12_desc) +{ + memset(desc, 0, sizeof(*desc)); + desc->root_signature = d3d12_desc->pRootSignature; + desc->vs = d3d12_desc->VS; + desc->ps = d3d12_desc->PS; + desc->ds = d3d12_desc->DS; + desc->hs = d3d12_desc->HS; + desc->gs = d3d12_desc->GS; + desc->stream_output = d3d12_desc->StreamOutput; + desc->blend_state = d3d12_desc->BlendState; + desc->sample_mask = d3d12_desc->SampleMask; + desc->rasterizer_state = d3d12_desc->RasterizerState; + memcpy(&desc->depth_stencil_state, &d3d12_desc->DepthStencilState, sizeof(d3d12_desc->DepthStencilState)); + desc->input_layout = d3d12_desc->InputLayout; + desc->strip_cut_value = d3d12_desc->IBStripCutValue; + desc->primitive_topology_type = d3d12_desc->PrimitiveTopologyType; + desc->rtv_formats.NumRenderTargets = d3d12_desc->NumRenderTargets; + memcpy(desc->rtv_formats.RTFormats, d3d12_desc->RTVFormats, sizeof(desc->rtv_formats.RTFormats)); + desc->dsv_format = d3d12_desc->DSVFormat; + desc->sample_desc = d3d12_desc->SampleDesc; + desc->node_mask = d3d12_desc->NodeMask; + desc->cached_pso = d3d12_desc->CachedPSO; + desc->flags = d3d12_desc->Flags; + return S_OK; +}
This can't fail, right?
+static HRESULT pipeline_state_desc_from_d3d12_compute_desc(struct d3d12_pipeline_state_desc *desc, + const D3D12_COMPUTE_PIPELINE_STATE_DESC *d3d12_desc) +{ + memset(desc, 0, sizeof(*desc)); + desc->root_signature = d3d12_desc->pRootSignature; + desc->cs = d3d12_desc->CS; + desc->node_mask = d3d12_desc->NodeMask; + desc->cached_pso = d3d12_desc->CachedPSO; + desc->flags = d3d12_desc->Flags; + return S_OK; +}
Likewise.
+static HRESULT pipeline_state_desc_from_d3d12_stream_desc(struct d3d12_pipeline_state_desc *desc, + const D3D12_PIPELINE_STATE_STREAM_DESC *d3d12_desc, VkPipelineBindPoint *vk_bind_point) +{ + D3D12_PIPELINE_STATE_SUBOBJECT_TYPE subobject_type; + const uint8_t *stream_ptr, *stream_end; + uint64_t defined_subobjects = 0; + uint64_t subobject_bit; + char *desc_char;
We might as well make "desc_char" an uint8_t pointer as well.
+ stream_ptr = d3d12_desc->pPipelineStateSubobjectStream; + stream_end = stream_ptr + d3d12_desc->SizeInBytes; + desc_char = (char *)desc; + + while (stream_ptr < stream_end) + { + if (!vkd3d_bound_range(0, sizeof(subobject_type), stream_end - stream_ptr)) + { + WARN("Invalid pipeline state stream.\n"); + return E_INVALIDARG; + } + + subobject_type = *(const D3D12_PIPELINE_STATE_SUBOBJECT_TYPE *)stream_ptr; + if (subobject_type >= ARRAY_SIZE(subobject_info)) + { + FIXME("Unhandled pipeline subobject type %#x.\n", subobject_type); + return E_INVALIDARG; + } + + subobject_bit = 1ull << subobject_type; + if (defined_subobjects & subobject_bit) + { + WARN("Duplicate pipeline subobject type %u.\n", subobject_type); + return E_INVALIDARG; + } + defined_subobjects |= subobject_bit; + + i = align(sizeof(subobject_type), subobject_info[subobject_type].alignment); + size = subobject_info[subobject_type].size; + + if (!vkd3d_bound_range(i, size, stream_end - stream_ptr)) + { + WARN("Invalid pipeline state stream.\n"); + return E_INVALIDARG; + } + + memcpy(&desc_char[subobject_info[subobject_type].dst_offset], &stream_ptr[i], size); + /* Stream packets are aligned to the size of pointers. */ + stream_ptr += align(i + size, sizeof(void *)); + }
It might be slightly nicer to get rid of "stream_end", and just index the stream from the start instead of adjusting the pointer. I'm not going to insist on it though.
+ struct d3d12_sample_mask_subobject + { + DECLSPEC_ALIGN(sizeof(void *)) D3D12_PIPELINE_STATE_SUBOBJECT_TYPE type; + UINT sample_mask; + };
Does that do the right thing? It seems like this would introduce padding between "type" and "sample_mask" that previously wasn't there. The tests seem to pass on the CI, but...