Source descriptors are copied to separate arrays to facilitate use of pre-initialised Vulkan structures, and allow arrayed writes where possible.
Signed-off-by: Conor McCarthy cmccarthy@codeweavers.com --- libs/vkd3d/device.c | 159 +++++++++++++++++++++++++++++++ libs/vkd3d/resource.c | 190 +++++++++++++++++++++++++++++++------ libs/vkd3d/vkd3d_private.h | 24 ++++- 3 files changed, 344 insertions(+), 29 deletions(-)
diff --git a/libs/vkd3d/device.c b/libs/vkd3d/device.c index 5fea705f..fbdb9f96 100644 --- a/libs/vkd3d/device.c +++ b/libs/vkd3d/device.c @@ -3558,6 +3558,142 @@ static void STDMETHODCALLTYPE d3d12_device_CreateSampler(ID3D12Device *iface, d3d12_desc_write_atomic(d3d12_desc_from_cpu_handle(descriptor), &tmp, device); }
+static void flush_desc_writes(struct d3d12_desc_copy_location locations[][VKD3D_DESCRIPTOR_WRITE_BUFFER_SIZE], + struct d3d12_desc_copy_info *infos, struct d3d12_descriptor_heap *descriptor_heap, struct d3d12_device *device) +{ + enum vkd3d_vk_descriptor_set_index set; + for (set = 0; set < VKD3D_SET_INDEX_COUNT; ++set) + { + if (!infos[set].count) + continue; + d3d12_desc_copy_vk_heap_range(locations[set], &infos[set], descriptor_heap, set, device); + infos[set].count = 0; + infos[set].uav_counter = false; + } +} + +static void d3d12_desc_buffered_copy_atomic(struct d3d12_desc *dst, const struct d3d12_desc *src, + struct d3d12_desc_copy_location locations[][VKD3D_DESCRIPTOR_WRITE_BUFFER_SIZE], + struct d3d12_desc_copy_info *infos, struct d3d12_descriptor_heap *descriptor_heap, struct d3d12_device *device) +{ + struct d3d12_desc_copy_location *location; + enum vkd3d_vk_descriptor_set_index set; + pthread_mutex_t *mutex; + + mutex = d3d12_device_get_descriptor_mutex(device, src); + pthread_mutex_lock(mutex); + + if (src->magic == VKD3D_DESCRIPTOR_MAGIC_FREE) + { + /* Source must be unlocked first, and therefore can't be used as a null source. */ + static const struct d3d12_desc null = {0}; + pthread_mutex_unlock(mutex); + d3d12_desc_write_atomic(dst, &null, device); + return; + } + + set = vkd3d_vk_descriptor_set_index_from_vk_descriptor_type(src->vk_descriptor_type); + location = &locations[set][infos[set].count++]; + + location->src = *src; + + if (location->src.magic & VKD3D_DESCRIPTOR_MAGIC_HAS_VIEW) + vkd3d_view_incref(location->src.u.view); + + pthread_mutex_unlock(mutex); + + infos[set].uav_counter |= (location->src.magic == VKD3D_DESCRIPTOR_MAGIC_UAV) + & !!location->src.u.view->vk_counter_view; + location->dst = dst; + + if (infos[set].count == ARRAY_SIZE(locations[0])) + { + d3d12_desc_copy_vk_heap_range(locations[set], &infos[set], descriptor_heap, set, device); + infos[set].count = 0; + infos[set].uav_counter = false; + } +} + +/* Some games, e.g. Control, copy a large number of descriptors per frame, so the + * speed of this function is critical. */ +static void d3d12_device_vk_heaps_copy_descriptors(struct d3d12_device *device, + UINT dst_descriptor_range_count, const D3D12_CPU_DESCRIPTOR_HANDLE *dst_descriptor_range_offsets, + const UINT *dst_descriptor_range_sizes, + UINT src_descriptor_range_count, const D3D12_CPU_DESCRIPTOR_HANDLE *src_descriptor_range_offsets, + const UINT *src_descriptor_range_sizes) +{ + struct d3d12_desc_copy_location locations[VKD3D_SET_INDEX_COUNT][VKD3D_DESCRIPTOR_WRITE_BUFFER_SIZE]; + unsigned int dst_range_idx, dst_idx, src_range_idx, src_idx; + /* The locations array is relatively large, and often mostly empty. Keeping these + * values together in a separate array will likely result in fewer cache misses. */ + struct d3d12_desc_copy_info infos[VKD3D_SET_INDEX_COUNT]; + struct d3d12_descriptor_heap *descriptor_heap = NULL; + const struct d3d12_desc *src, *heap_base, *heap_end; + unsigned int dst_range_size, src_range_size; + struct d3d12_desc *dst; + + descriptor_heap = vkd3d_gpu_descriptor_allocator_heap_from_descriptor(&device->gpu_descriptor_allocator, + d3d12_desc_from_cpu_handle(dst_descriptor_range_offsets[0])); + heap_base = (const struct d3d12_desc *)descriptor_heap->descriptors; + heap_end = heap_base + descriptor_heap->desc.NumDescriptors; + + memset(infos, 0, sizeof(infos)); + dst_range_idx = dst_idx = 0; + src_range_idx = src_idx = 0; + while (dst_range_idx < dst_descriptor_range_count && src_range_idx < src_descriptor_range_count) + { + dst_range_size = dst_descriptor_range_sizes ? dst_descriptor_range_sizes[dst_range_idx] : 1; + src_range_size = src_descriptor_range_sizes ? src_descriptor_range_sizes[src_range_idx] : 1; + + dst = d3d12_desc_from_cpu_handle(dst_descriptor_range_offsets[dst_range_idx]); + src = d3d12_desc_from_cpu_handle(src_descriptor_range_offsets[src_range_idx]); + + if (dst < heap_base || dst >= heap_end) + { + flush_desc_writes(locations, infos, descriptor_heap, device); + descriptor_heap = vkd3d_gpu_descriptor_allocator_heap_from_descriptor(&device->gpu_descriptor_allocator, + dst); + heap_base = (const struct d3d12_desc *)descriptor_heap->descriptors; + heap_end = heap_base + descriptor_heap->desc.NumDescriptors; + } + + for (; dst_idx < dst_range_size && src_idx < src_range_size; src_idx++, dst_idx++) + { + /* See comment in d3d12_desc_copy_atomic() regarding the thread safety of this check. */ + if (dst[dst_idx].magic == src[src_idx].magic) + { + if (dst[dst_idx].magic & VKD3D_DESCRIPTOR_MAGIC_HAS_VIEW) + { + if (dst[dst_idx].u.view == src[src_idx].u.view) + continue; + } + else if (dst[dst_idx].u.vk_cbv_info.buffer == src[src_idx].u.vk_cbv_info.buffer + && dst[dst_idx].u.vk_cbv_info.offset == src[src_idx].u.vk_cbv_info.offset + && dst[dst_idx].u.vk_cbv_info.range == src[src_idx].u.vk_cbv_info.range) + { + continue; + } + } + d3d12_desc_buffered_copy_atomic(&dst[dst_idx], &src[src_idx], locations, infos, descriptor_heap, device); + } + + if (dst_idx >= dst_range_size) + { + ++dst_range_idx; + dst_idx = 0; + } + if (src_idx >= src_range_size) + { + ++src_range_idx; + src_idx = 0; + } + } + + flush_desc_writes(locations, infos, descriptor_heap, device); +} + +#define VKD3D_DESCRIPTOR_OPTIMISED_COPY_MIN_COUNT 8 + static void STDMETHODCALLTYPE d3d12_device_CopyDescriptors(ID3D12Device *iface, UINT dst_descriptor_range_count, const D3D12_CPU_DESCRIPTOR_HANDLE *dst_descriptor_range_offsets, const UINT *dst_descriptor_range_sizes, @@ -3586,6 +3722,18 @@ static void STDMETHODCALLTYPE d3d12_device_CopyDescriptors(ID3D12Device *iface, return; }
+ if (!dst_descriptor_range_count) + return; + + if (device->use_vk_heaps && (dst_descriptor_range_count > 1 || (dst_descriptor_range_sizes + && dst_descriptor_range_sizes[0] >= VKD3D_DESCRIPTOR_OPTIMISED_COPY_MIN_COUNT))) + { + d3d12_device_vk_heaps_copy_descriptors(device, dst_descriptor_range_count, dst_descriptor_range_offsets, + dst_descriptor_range_sizes, src_descriptor_range_count, src_descriptor_range_offsets, + src_descriptor_range_sizes); + return; + } + dst_range_idx = dst_idx = 0; src_range_idx = src_idx = 0; while (dst_range_idx < dst_descriptor_range_count && src_range_idx < src_descriptor_range_count) @@ -3622,6 +3770,17 @@ static void STDMETHODCALLTYPE d3d12_device_CopyDescriptorsSimple(ID3D12Device *i iface, descriptor_count, dst_descriptor_range_offset.ptr, src_descriptor_range_offset.ptr, descriptor_heap_type);
+ if (descriptor_count >= VKD3D_DESCRIPTOR_OPTIMISED_COPY_MIN_COUNT) + { + struct d3d12_device *device = impl_from_ID3D12Device(iface); + if (device->use_vk_heaps) + { + d3d12_device_vk_heaps_copy_descriptors(device, 1, &dst_descriptor_range_offset, + &descriptor_count, 1, &src_descriptor_range_offset, &descriptor_count); + return; + } + } + d3d12_device_CopyDescriptors(iface, 1, &dst_descriptor_range_offset, &descriptor_count, 1, &src_descriptor_range_offset, &descriptor_count, descriptor_heap_type); } diff --git a/libs/vkd3d/resource.c b/libs/vkd3d/resource.c index 4b979689..fd21e1d5 100644 --- a/libs/vkd3d/resource.c +++ b/libs/vkd3d/resource.c @@ -2122,6 +2122,53 @@ void vkd3d_view_decref(struct vkd3d_view *view, struct d3d12_device *device) vkd3d_view_destroy(view, device); }
+static void d3d12_descriptor_heap_write_vk_descriptor_range(struct d3d12_descriptor_heap_vk_set *descriptor_set, + struct d3d12_desc_copy_location *locations, unsigned int write_count) +{ + unsigned int i, info_index = 0, write_index = 0; + + switch (locations[0].src.vk_descriptor_type) + { + case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: + for (; write_index < write_count; ++write_index) + { + descriptor_set->vk_descriptor_writes[write_index].pBufferInfo = &descriptor_set->vk_buffer_infos[info_index]; + for (i = 0; i < descriptor_set->vk_descriptor_writes[write_index].descriptorCount; ++i, ++info_index) + descriptor_set->vk_buffer_infos[info_index] = locations[info_index].src.u.vk_cbv_info; + } + break; + case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: + case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: + for (; write_index < write_count; ++write_index) + { + descriptor_set->vk_descriptor_writes[write_index].pImageInfo = &descriptor_set->vk_image_infos[info_index]; + for (i = 0; i < descriptor_set->vk_descriptor_writes[write_index].descriptorCount; ++i, ++info_index) + descriptor_set->vk_image_infos[info_index].imageView = locations[info_index].src.u.view->u.vk_image_view; + } + break; + case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: + case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: + for (; write_index < write_count; ++write_index) + { + descriptor_set->vk_descriptor_writes[write_index].pTexelBufferView = &descriptor_set->vk_buffer_views[info_index]; + for (i = 0; i < descriptor_set->vk_descriptor_writes[write_index].descriptorCount; ++i, ++info_index) + descriptor_set->vk_buffer_views[info_index] = locations[info_index].src.u.view->u.vk_buffer_view; + } + break; + case VK_DESCRIPTOR_TYPE_SAMPLER: + for (; write_index < write_count; ++write_index) + { + descriptor_set->vk_descriptor_writes[write_index].pImageInfo = &descriptor_set->vk_image_infos[info_index]; + for (i = 0; i < descriptor_set->vk_descriptor_writes[write_index].descriptorCount; ++i, ++info_index) + descriptor_set->vk_image_infos[info_index].sampler = locations[info_index].src.u.view->u.vk_sampler; + } + break; + default: + ERR("Unhandled descriptor type %#x.\n", locations[0].src.vk_descriptor_type); + break; + } +} + /* dst and src contain the same data unless another thread overwrites dst. The array index is * calculated from dst, and src is thread safe. */ static void d3d12_desc_write_vk_heap(const struct d3d12_desc *dst, const struct d3d12_desc *src, @@ -2138,42 +2185,67 @@ static void d3d12_desc_write_vk_heap(const struct d3d12_desc *dst, const struct
pthread_mutex_lock(&descriptor_heap->vk_sets_mutex);
- descriptor_set->vk_descriptor_write.dstArrayElement = dst + descriptor_set->vk_descriptor_writes[0].dstArrayElement = dst - (const struct d3d12_desc *)descriptor_heap->descriptors; + descriptor_set->vk_descriptor_writes[0].descriptorCount = 1; switch (src->vk_descriptor_type) { case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: - descriptor_set->vk_descriptor_write.pBufferInfo = &src->u.vk_cbv_info; + descriptor_set->vk_descriptor_writes[0].pBufferInfo = &src->u.vk_cbv_info; break; case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: - descriptor_set->vk_image_info.imageView = src->u.view->u.vk_image_view; + descriptor_set->vk_image_infos[0].imageView = src->u.view->u.vk_image_view; break; case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER: case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: - descriptor_set->vk_descriptor_write.pTexelBufferView = &src->u.view->u.vk_buffer_view; + descriptor_set->vk_descriptor_writes[0].pTexelBufferView = &src->u.view->u.vk_buffer_view; break; case VK_DESCRIPTOR_TYPE_SAMPLER: - descriptor_set->vk_image_info.sampler = src->u.view->u.vk_sampler; + descriptor_set->vk_image_infos[0].sampler = src->u.view->u.vk_sampler; break; default: ERR("Unhandled descriptor type %#x.\n", src->vk_descriptor_type); break; } - VK_CALL(vkUpdateDescriptorSets(device->vk_device, 1, &descriptor_set->vk_descriptor_write, 0, NULL)); + VK_CALL(vkUpdateDescriptorSets(device->vk_device, 1, descriptor_set->vk_descriptor_writes, 0, NULL));
if (src->magic == VKD3D_DESCRIPTOR_MAGIC_UAV && src->u.view->vk_counter_view) { descriptor_set = &descriptor_heap->vk_descriptor_sets[VKD3D_SET_INDEX_UAV_COUNTER]; - descriptor_set->vk_descriptor_write.dstArrayElement = dst + descriptor_set->vk_descriptor_writes[0].dstArrayElement = dst - (const struct d3d12_desc *)descriptor_heap->descriptors; - descriptor_set->vk_descriptor_write.pTexelBufferView = &src->u.view->vk_counter_view; - VK_CALL(vkUpdateDescriptorSets(device->vk_device, 1, &descriptor_set->vk_descriptor_write, 0, NULL)); + descriptor_set->vk_descriptor_writes[0].descriptorCount = 1; + descriptor_set->vk_descriptor_writes[0].pTexelBufferView = &src->u.view->vk_counter_view; + VK_CALL(vkUpdateDescriptorSets(device->vk_device, 1, descriptor_set->vk_descriptor_writes, 0, NULL)); }
pthread_mutex_unlock(&descriptor_heap->vk_sets_mutex); }
+static void d3d12_desc_write_atomic_d3d12_only(struct d3d12_desc *dst, const struct d3d12_desc *src, struct d3d12_device *device) +{ + struct vkd3d_view *defunct_view; + pthread_mutex_t *mutex; + + mutex = d3d12_device_get_descriptor_mutex(device, dst); + pthread_mutex_lock(mutex); + + if (!(dst->magic & VKD3D_DESCRIPTOR_MAGIC_HAS_VIEW) || InterlockedDecrement(&dst->u.view->refcount)) + { + *dst = *src; + pthread_mutex_unlock(mutex); + return; + } + + defunct_view = dst->u.view; + *dst = *src; + pthread_mutex_unlock(mutex); + + /* Destroy the view after unlocking to reduce wait time. */ + vkd3d_view_destroy(defunct_view, device); +} + void d3d12_desc_write_atomic(struct d3d12_desc *dst, const struct d3d12_desc *src, struct d3d12_device *device) { @@ -2207,6 +2279,56 @@ static void d3d12_desc_destroy(struct d3d12_desc *descriptor, struct d3d12_devic d3d12_desc_write_atomic(descriptor, &null_desc, device); }
+void d3d12_desc_copy_vk_heap_range(struct d3d12_desc_copy_location *locations, const struct d3d12_desc_copy_info *info, + struct d3d12_descriptor_heap *descriptor_heap, enum vkd3d_vk_descriptor_set_index set, + struct d3d12_device *device) +{ + struct d3d12_descriptor_heap_vk_set *descriptor_set = &descriptor_heap->vk_descriptor_sets[set]; + const struct vkd3d_vk_device_procs *vk_procs = &device->vk_procs; + unsigned int i, write_count; + + pthread_mutex_lock(&descriptor_heap->vk_sets_mutex); + + for (i = 0, write_count = 0; i < info->count; ++i) + { + d3d12_desc_write_atomic_d3d12_only(locations[i].dst, &locations[i].src, device); + + if (i && locations[i].dst == locations[i - 1].dst + 1) + { + ++descriptor_set->vk_descriptor_writes[write_count - 1].descriptorCount; + continue; + } + descriptor_set->vk_descriptor_writes[write_count].dstArrayElement = locations[i].dst + - (const struct d3d12_desc *)descriptor_heap->descriptors; + descriptor_set->vk_descriptor_writes[write_count++].descriptorCount = 1; + } + d3d12_descriptor_heap_write_vk_descriptor_range(descriptor_set, locations, write_count); + /* We could pass a VkCopyDescriptorSet array instead, but that would require also storing a src array index + * for each location, which means querying the src descriptor heap. Contiguous copies require contiguous src + * descriptors as well as dst, which is less likely to occur. And client race conditions may break it. */ + VK_CALL(vkUpdateDescriptorSets(device->vk_device, write_count, descriptor_set->vk_descriptor_writes, 0, NULL)); + + if (!info->uav_counter) + goto done; + + descriptor_set = &descriptor_heap->vk_descriptor_sets[VKD3D_SET_INDEX_UAV_COUNTER]; + + for (i = 0, write_count = 0; i < info->count; ++i) + { + if (!locations[i].src.u.view->vk_counter_view) + continue; + descriptor_set->vk_buffer_views[write_count] = locations[i].src.u.view->vk_counter_view; + descriptor_set->vk_descriptor_writes[write_count].pTexelBufferView = &descriptor_set->vk_buffer_views[write_count]; + descriptor_set->vk_descriptor_writes[write_count].dstArrayElement = locations[i].dst + - (const struct d3d12_desc *)descriptor_heap->descriptors; + descriptor_set->vk_descriptor_writes[write_count++].descriptorCount = 1; + } + VK_CALL(vkUpdateDescriptorSets(device->vk_device, write_count, descriptor_set->vk_descriptor_writes, 0, NULL)); + +done: + pthread_mutex_unlock(&descriptor_heap->vk_sets_mutex); +} + void d3d12_desc_copy(struct d3d12_desc *dst, const struct d3d12_desc *src, struct d3d12_device *device) { @@ -3688,6 +3810,7 @@ static HRESULT d3d12_descriptor_heap_create_descriptor_set(struct d3d12_descript const struct vkd3d_vk_device_procs *vk_procs = &device->vk_procs; VkDescriptorSetVariableDescriptorCountAllocateInfoEXT set_size; VkDescriptorSetAllocateInfo set_desc; + unsigned int i; VkResult vr;
set_desc.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; @@ -3701,7 +3824,8 @@ static HRESULT d3d12_descriptor_heap_create_descriptor_set(struct d3d12_descript set_size.pDescriptorCounts = &variable_binding_size; if ((vr = VK_CALL(vkAllocateDescriptorSets(device->vk_device, &set_desc, &descriptor_set->vk_set))) >= 0) { - descriptor_set->vk_descriptor_write.dstSet = descriptor_set->vk_set; + for (i = 0; i < ARRAY_SIZE(descriptor_set->vk_descriptor_writes); ++i) + descriptor_set->vk_descriptor_writes[i].dstSet = descriptor_set->vk_set; return S_OK; }
@@ -3729,15 +3853,18 @@ static HRESULT d3d12_descriptor_heap_vk_descriptor_sets_init(struct d3d12_descri for (set = 0; set < ARRAY_SIZE(descriptor_heap->vk_descriptor_sets); ++set) { struct d3d12_descriptor_heap_vk_set *descriptor_set = &descriptor_heap->vk_descriptor_sets[set]; + unsigned int i;
- descriptor_set->vk_descriptor_write.sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; - descriptor_set->vk_descriptor_write.pNext = NULL; - descriptor_set->vk_descriptor_write.dstBinding = 0; - descriptor_set->vk_descriptor_write.descriptorCount = 1; - descriptor_set->vk_descriptor_write.descriptorType = device->vk_descriptor_heap_layouts[set].type; - descriptor_set->vk_descriptor_write.pImageInfo = NULL; - descriptor_set->vk_descriptor_write.pBufferInfo = NULL; - descriptor_set->vk_descriptor_write.pTexelBufferView = NULL; + for (i = 0; i < ARRAY_SIZE(descriptor_set->vk_descriptor_writes); ++i) + { + descriptor_set->vk_descriptor_writes[i].sType = VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET; + descriptor_set->vk_descriptor_writes[i].pNext = NULL; + descriptor_set->vk_descriptor_writes[i].dstBinding = 0; + descriptor_set->vk_descriptor_writes[i].descriptorType = device->vk_descriptor_heap_layouts[set].type; + descriptor_set->vk_descriptor_writes[i].pImageInfo = NULL; + descriptor_set->vk_descriptor_writes[i].pBufferInfo = NULL; + descriptor_set->vk_descriptor_writes[i].pTexelBufferView = NULL; + } switch (device->vk_descriptor_heap_layouts[set].type) { case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER: @@ -3745,19 +3872,28 @@ static HRESULT d3d12_descriptor_heap_vk_descriptor_sets_init(struct d3d12_descri case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER: break; case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: - descriptor_set->vk_descriptor_write.pImageInfo = &descriptor_set->vk_image_info; - descriptor_set->vk_image_info.sampler = VK_NULL_HANDLE; - descriptor_set->vk_image_info.imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + descriptor_set->vk_descriptor_writes[0].pImageInfo = &descriptor_set->vk_image_infos[0]; + for (i = 0; i < ARRAY_SIZE(descriptor_set->vk_image_infos); ++i) + { + descriptor_set->vk_image_infos[i].sampler = VK_NULL_HANDLE; + descriptor_set->vk_image_infos[i].imageLayout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; + } break; case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: - descriptor_set->vk_descriptor_write.pImageInfo = &descriptor_set->vk_image_info; - descriptor_set->vk_image_info.sampler = VK_NULL_HANDLE; - descriptor_set->vk_image_info.imageLayout = VK_IMAGE_LAYOUT_GENERAL; + descriptor_set->vk_descriptor_writes[0].pImageInfo = &descriptor_set->vk_image_infos[0]; + for (i = 0; i < ARRAY_SIZE(descriptor_set->vk_image_infos); ++i) + { + descriptor_set->vk_image_infos[i].sampler = VK_NULL_HANDLE; + descriptor_set->vk_image_infos[i].imageLayout = VK_IMAGE_LAYOUT_GENERAL; + } break; case VK_DESCRIPTOR_TYPE_SAMPLER: - descriptor_set->vk_descriptor_write.pImageInfo = &descriptor_set->vk_image_info; - descriptor_set->vk_image_info.imageView = VK_NULL_HANDLE; - descriptor_set->vk_image_info.imageLayout = VK_IMAGE_LAYOUT_UNDEFINED; + descriptor_set->vk_descriptor_writes[0].pImageInfo = &descriptor_set->vk_image_infos[0]; + for (i = 0; i < ARRAY_SIZE(descriptor_set->vk_image_infos); ++i) + { + descriptor_set->vk_image_infos[i].imageView = VK_NULL_HANDLE; + descriptor_set->vk_image_infos[i].imageLayout = VK_IMAGE_LAYOUT_UNDEFINED; + } break; default: ERR("Unhandled descriptor type %#x.\n", device->vk_descriptor_heap_layouts[set].type); diff --git a/libs/vkd3d/vkd3d_private.h b/libs/vkd3d/vkd3d_private.h index 22533204..051da630 100644 --- a/libs/vkd3d/vkd3d_private.h +++ b/libs/vkd3d/vkd3d_private.h @@ -673,11 +673,15 @@ struct vkd3d_vk_descriptor_heap_layout VkDescriptorSetLayout vk_set_layout; };
+#define VKD3D_DESCRIPTOR_WRITE_BUFFER_SIZE 64 + struct d3d12_descriptor_heap_vk_set { VkDescriptorSet vk_set; - VkDescriptorImageInfo vk_image_info; - VkWriteDescriptorSet vk_descriptor_write; + VkDescriptorBufferInfo vk_buffer_infos[VKD3D_DESCRIPTOR_WRITE_BUFFER_SIZE]; + VkBufferView vk_buffer_views[VKD3D_DESCRIPTOR_WRITE_BUFFER_SIZE]; + VkDescriptorImageInfo vk_image_infos[VKD3D_DESCRIPTOR_WRITE_BUFFER_SIZE]; + VkWriteDescriptorSet vk_descriptor_writes[VKD3D_DESCRIPTOR_WRITE_BUFFER_SIZE]; };
/* ID3D12DescriptorHeap */ @@ -703,6 +707,22 @@ struct d3d12_descriptor_heap HRESULT d3d12_descriptor_heap_create(struct d3d12_device *device, const D3D12_DESCRIPTOR_HEAP_DESC *desc, struct d3d12_descriptor_heap **descriptor_heap);
+struct d3d12_desc_copy_location +{ + struct d3d12_desc src; + struct d3d12_desc *dst; +}; + +struct d3d12_desc_copy_info +{ + unsigned int count; + bool uav_counter; +}; + +void d3d12_desc_copy_vk_heap_range(struct d3d12_desc_copy_location *locations, const struct d3d12_desc_copy_info *info, + struct d3d12_descriptor_heap *descriptor_heap, enum vkd3d_vk_descriptor_set_index set, + struct d3d12_device *device); + /* ID3D12QueryHeap */ struct d3d12_query_heap {