Signed-off-by: Alexandre Julliard julliard@winehq.org --- libs/vkd3d/command.c | 134 ++++++++++++++++++------------------- libs/vkd3d/device.c | 52 +++++++------- libs/vkd3d/resource.c | 28 ++++---- libs/vkd3d/state.c | 12 ++-- libs/vkd3d/utils.c | 12 ++-- libs/vkd3d/vkd3d_private.h | 86 +++++++++++++++++++----- 6 files changed, 190 insertions(+), 134 deletions(-)
diff --git a/libs/vkd3d/command.c b/libs/vkd3d/command.c index f1ec6be3fd20..61e18105ee12 100644 --- a/libs/vkd3d/command.c +++ b/libs/vkd3d/command.c @@ -32,7 +32,7 @@ HRESULT vkd3d_queue_create(struct d3d12_device *device, if (!(object = vkd3d_malloc(sizeof(*object)))) return E_OUTOFMEMORY;
- if ((rc = pthread_mutex_init(&object->mutex, NULL))) + if ((rc = vkd3d_mutex_init(&object->mutex))) { ERR("Failed to initialize mutex, error %d.\n", rc); vkd3d_free(object); @@ -67,7 +67,7 @@ void vkd3d_queue_destroy(struct vkd3d_queue *queue, struct d3d12_device *device) unsigned int i; int rc;
- if ((rc = pthread_mutex_lock(&queue->mutex))) + if ((rc = vkd3d_mutex_lock(&queue->mutex))) ERR("Failed to lock mutex, error %d.\n", rc);
for (i = 0; i < queue->semaphore_count; ++i) @@ -82,9 +82,9 @@ void vkd3d_queue_destroy(struct vkd3d_queue *queue, struct d3d12_device *device) }
if (!rc) - pthread_mutex_unlock(&queue->mutex); + vkd3d_mutex_unlock(&queue->mutex);
- pthread_mutex_destroy(&queue->mutex); + vkd3d_mutex_destroy(&queue->mutex); vkd3d_free(queue); }
@@ -94,7 +94,7 @@ VkQueue vkd3d_queue_acquire(struct vkd3d_queue *queue)
TRACE("queue %p.\n", queue);
- if ((rc = pthread_mutex_lock(&queue->mutex))) + if ((rc = vkd3d_mutex_lock(&queue->mutex))) { ERR("Failed to lock mutex, error %d.\n", rc); return VK_NULL_HANDLE; @@ -108,7 +108,7 @@ void vkd3d_queue_release(struct vkd3d_queue *queue) { TRACE("queue %p.\n", queue);
- pthread_mutex_unlock(&queue->mutex); + vkd3d_mutex_unlock(&queue->mutex); }
static VkResult vkd3d_queue_wait_idle(struct vkd3d_queue *queue, @@ -144,7 +144,7 @@ static void vkd3d_queue_update_sequence_number(struct vkd3d_queue *queue, unsigned int i, j; int rc;
- if ((rc = pthread_mutex_lock(&queue->mutex))) + if ((rc = vkd3d_mutex_lock(&queue->mutex))) { ERR("Failed to lock mutex, error %d.\n", rc); return; @@ -189,7 +189,7 @@ static void vkd3d_queue_update_sequence_number(struct vkd3d_queue *queue, if (destroyed_semaphore_count) TRACE("Destroyed %u Vulkan semaphores.\n", destroyed_semaphore_count);
- pthread_mutex_unlock(&queue->mutex); + vkd3d_mutex_unlock(&queue->mutex); }
static uint64_t vkd3d_queue_reset_sequence_number_locked(struct vkd3d_queue *queue) @@ -253,7 +253,7 @@ static HRESULT vkd3d_enqueue_gpu_fence(struct vkd3d_fence_worker *worker,
TRACE("worker %p, fence %p, value %#"PRIx64".\n", worker, fence, value);
- if ((rc = pthread_mutex_lock(&worker->mutex))) + if ((rc = vkd3d_mutex_lock(&worker->mutex))) { ERR("Failed to lock mutex, error %d.\n", rc); return hresult_from_errno(rc); @@ -263,7 +263,7 @@ static HRESULT vkd3d_enqueue_gpu_fence(struct vkd3d_fence_worker *worker, worker->enqueued_fence_count + 1, sizeof(*worker->enqueued_fences))) { ERR("Failed to add GPU fence.\n"); - pthread_mutex_unlock(&worker->mutex); + vkd3d_mutex_unlock(&worker->mutex); return E_OUTOFMEMORY; }
@@ -277,8 +277,8 @@ static HRESULT vkd3d_enqueue_gpu_fence(struct vkd3d_fence_worker *worker,
InterlockedIncrement(&fence->pending_worker_operation_count);
- pthread_cond_signal(&worker->cond); - pthread_mutex_unlock(&worker->mutex); + vkd3d_cond_signal(&worker->cond); + vkd3d_mutex_unlock(&worker->mutex);
return S_OK; } @@ -293,7 +293,7 @@ static void vkd3d_fence_worker_remove_fence(struct vkd3d_fence_worker *worker, s
WARN("Waiting for %u pending fence operations (fence %p).\n", count, fence);
- if ((rc = pthread_mutex_lock(&worker->mutex))) + if ((rc = vkd3d_mutex_lock(&worker->mutex))) { ERR("Failed to lock mutex, error %d.\n", rc); return; @@ -304,14 +304,14 @@ static void vkd3d_fence_worker_remove_fence(struct vkd3d_fence_worker *worker, s TRACE("Still waiting for %u pending fence operations (fence %p).\n", count, fence);
worker->pending_fence_destruction = true; - pthread_cond_signal(&worker->cond); + vkd3d_cond_signal(&worker->cond);
- pthread_cond_wait(&worker->fence_destruction_cond, &worker->mutex); + vkd3d_cond_wait(&worker->fence_destruction_cond, &worker->mutex); }
TRACE("Removed fence %p.\n", fence);
- pthread_mutex_unlock(&worker->mutex); + vkd3d_mutex_unlock(&worker->mutex); }
static void vkd3d_fence_worker_move_enqueued_fences_locked(struct vkd3d_fence_worker *worker) @@ -412,7 +412,7 @@ static void *vkd3d_fence_worker_main(void *arg)
if (!worker->fence_count || InterlockedAdd(&worker->enqueued_fence_count, 0)) { - if ((rc = pthread_mutex_lock(&worker->mutex))) + if ((rc = vkd3d_mutex_lock(&worker->mutex))) { ERR("Failed to lock mutex, error %d.\n", rc); break; @@ -420,7 +420,7 @@ static void *vkd3d_fence_worker_main(void *arg)
if (worker->pending_fence_destruction) { - pthread_cond_broadcast(&worker->fence_destruction_cond); + vkd3d_cond_broadcast(&worker->fence_destruction_cond); worker->pending_fence_destruction = false; }
@@ -432,19 +432,19 @@ static void *vkd3d_fence_worker_main(void *arg) { if (worker->should_exit) { - pthread_mutex_unlock(&worker->mutex); + vkd3d_mutex_unlock(&worker->mutex); break; }
- if ((rc = pthread_cond_wait(&worker->cond, &worker->mutex))) + if ((rc = vkd3d_cond_wait(&worker->cond, &worker->mutex))) { ERR("Failed to wait on condition variable, error %d.\n", rc); - pthread_mutex_unlock(&worker->mutex); + vkd3d_mutex_unlock(&worker->mutex); break; } }
- pthread_mutex_unlock(&worker->mutex); + vkd3d_mutex_unlock(&worker->mutex); } }
@@ -474,33 +474,33 @@ HRESULT vkd3d_fence_worker_start(struct vkd3d_fence_worker *worker, worker->fences = NULL; worker->fences_size = 0;
- if ((rc = pthread_mutex_init(&worker->mutex, NULL))) + if ((rc = vkd3d_mutex_init(&worker->mutex))) { ERR("Failed to initialize mutex, error %d.\n", rc); return hresult_from_errno(rc); }
- if ((rc = pthread_cond_init(&worker->cond, NULL))) + if ((rc = vkd3d_cond_init(&worker->cond))) { ERR("Failed to initialize condition variable, error %d.\n", rc); - pthread_mutex_destroy(&worker->mutex); + vkd3d_mutex_destroy(&worker->mutex); return hresult_from_errno(rc); }
- if ((rc = pthread_cond_init(&worker->fence_destruction_cond, NULL))) + if ((rc = vkd3d_cond_init(&worker->fence_destruction_cond))) { ERR("Failed to initialize condition variable, error %d.\n", rc); - pthread_mutex_destroy(&worker->mutex); - pthread_cond_destroy(&worker->cond); + vkd3d_mutex_destroy(&worker->mutex); + vkd3d_cond_destroy(&worker->cond); return hresult_from_errno(rc); }
if (FAILED(hr = vkd3d_create_thread(device->vkd3d_instance, vkd3d_fence_worker_main, worker, &worker->thread))) { - pthread_mutex_destroy(&worker->mutex); - pthread_cond_destroy(&worker->cond); - pthread_cond_destroy(&worker->fence_destruction_cond); + vkd3d_mutex_destroy(&worker->mutex); + vkd3d_cond_destroy(&worker->cond); + vkd3d_cond_destroy(&worker->fence_destruction_cond); }
return hr; @@ -514,23 +514,23 @@ HRESULT vkd3d_fence_worker_stop(struct vkd3d_fence_worker *worker,
TRACE("worker %p.\n", worker);
- if ((rc = pthread_mutex_lock(&worker->mutex))) + if ((rc = vkd3d_mutex_lock(&worker->mutex))) { ERR("Failed to lock mutex, error %d.\n", rc); return hresult_from_errno(rc); }
worker->should_exit = true; - pthread_cond_signal(&worker->cond); + vkd3d_cond_signal(&worker->cond);
- pthread_mutex_unlock(&worker->mutex); + vkd3d_mutex_unlock(&worker->mutex);
if (FAILED(hr = vkd3d_join_thread(device->vkd3d_instance, &worker->thread))) return hr;
- pthread_mutex_destroy(&worker->mutex); - pthread_cond_destroy(&worker->cond); - pthread_cond_destroy(&worker->fence_destruction_cond); + vkd3d_mutex_destroy(&worker->mutex); + vkd3d_cond_destroy(&worker->cond); + vkd3d_cond_destroy(&worker->fence_destruction_cond);
vkd3d_free(worker->enqueued_fences); vkd3d_free(worker->vk_fences); @@ -589,7 +589,7 @@ static VkResult d3d12_fence_create_vk_fence(struct d3d12_fence *fence, VkFence *
*vk_fence = VK_NULL_HANDLE;
- if ((rc = pthread_mutex_lock(&fence->mutex))) + if ((rc = vkd3d_mutex_lock(&fence->mutex))) { ERR("Failed to lock mutex, error %d.\n", rc); goto create_fence; @@ -604,7 +604,7 @@ static VkResult d3d12_fence_create_vk_fence(struct d3d12_fence *fence, VkFence * } }
- pthread_mutex_unlock(&fence->mutex); + vkd3d_mutex_unlock(&fence->mutex);
if (*vk_fence) return VK_SUCCESS; @@ -668,7 +668,7 @@ static void d3d12_fence_destroy_vk_objects(struct d3d12_fence *fence) unsigned int i; int rc;
- if ((rc = pthread_mutex_lock(&fence->mutex))) + if ((rc = vkd3d_mutex_lock(&fence->mutex))) { ERR("Failed to lock mutex, error %d.\n", rc); return; @@ -685,7 +685,7 @@ static void d3d12_fence_destroy_vk_objects(struct d3d12_fence *fence)
d3d12_fence_garbage_collect_vk_semaphores_locked(fence, true);
- pthread_mutex_unlock(&fence->mutex); + vkd3d_mutex_unlock(&fence->mutex); }
static struct vkd3d_signaled_semaphore *d3d12_fence_acquire_vk_semaphore(struct d3d12_fence *fence, @@ -698,7 +698,7 @@ static struct vkd3d_signaled_semaphore *d3d12_fence_acquire_vk_semaphore(struct
TRACE("fence %p, value %#"PRIx64".\n", fence, value);
- if ((rc = pthread_mutex_lock(&fence->mutex))) + if ((rc = vkd3d_mutex_lock(&fence->mutex))) { ERR("Failed to lock mutex, error %d.\n", rc); return VK_NULL_HANDLE; @@ -724,7 +724,7 @@ static struct vkd3d_signaled_semaphore *d3d12_fence_acquire_vk_semaphore(struct
*completed_value = fence->value;
- pthread_mutex_unlock(&fence->mutex); + vkd3d_mutex_unlock(&fence->mutex);
return semaphore; } @@ -733,7 +733,7 @@ static void d3d12_fence_remove_vk_semaphore(struct d3d12_fence *fence, struct vk { int rc;
- if ((rc = pthread_mutex_lock(&fence->mutex))) + if ((rc = vkd3d_mutex_lock(&fence->mutex))) { ERR("Failed to lock mutex, error %d.\n", rc); return; @@ -746,14 +746,14 @@ static void d3d12_fence_remove_vk_semaphore(struct d3d12_fence *fence, struct vk
--fence->semaphore_count;
- pthread_mutex_unlock(&fence->mutex); + vkd3d_mutex_unlock(&fence->mutex); }
static void d3d12_fence_release_vk_semaphore(struct d3d12_fence *fence, struct vkd3d_signaled_semaphore *semaphore) { int rc;
- if ((rc = pthread_mutex_lock(&fence->mutex))) + if ((rc = vkd3d_mutex_lock(&fence->mutex))) { ERR("Failed to lock mutex, error %d.\n", rc); return; @@ -762,7 +762,7 @@ static void d3d12_fence_release_vk_semaphore(struct d3d12_fence *fence, struct v assert(semaphore->is_acquired); semaphore->is_acquired = false;
- pthread_mutex_unlock(&fence->mutex); + vkd3d_mutex_unlock(&fence->mutex); }
static HRESULT d3d12_fence_add_vk_semaphore(struct d3d12_fence *fence, @@ -780,7 +780,7 @@ static HRESULT d3d12_fence_add_vk_semaphore(struct d3d12_fence *fence, return E_OUTOFMEMORY; }
- if ((rc = pthread_mutex_lock(&fence->mutex))) + if ((rc = vkd3d_mutex_lock(&fence->mutex))) { ERR("Failed to lock mutex, error %d.\n", rc); vkd3d_free(semaphore); @@ -797,7 +797,7 @@ static HRESULT d3d12_fence_add_vk_semaphore(struct d3d12_fence *fence, list_add_tail(&fence->semaphores, &semaphore->entry); ++fence->semaphore_count;
- pthread_mutex_unlock(&fence->mutex); + vkd3d_mutex_unlock(&fence->mutex);
return hr; } @@ -810,7 +810,7 @@ static HRESULT d3d12_fence_signal(struct d3d12_fence *fence, uint64_t value, VkF unsigned int i, j; int rc;
- if ((rc = pthread_mutex_lock(&fence->mutex))) + if ((rc = vkd3d_mutex_lock(&fence->mutex))) { ERR("Failed to lock mutex, error %d.\n", rc); return hresult_from_errno(rc); @@ -844,7 +844,7 @@ static HRESULT d3d12_fence_signal(struct d3d12_fence *fence, uint64_t value, VkF fence->event_count = j;
if (signal_null_event_cond) - pthread_cond_broadcast(&fence->null_event_cond); + vkd3d_cond_broadcast(&fence->null_event_cond);
if (vk_fence) { @@ -870,7 +870,7 @@ static HRESULT d3d12_fence_signal(struct d3d12_fence *fence, uint64_t value, VkF VK_CALL(vkDestroyFence(device->vk_device, vk_fence, NULL)); }
- pthread_mutex_unlock(&fence->mutex); + vkd3d_mutex_unlock(&fence->mutex);
return S_OK; } @@ -926,9 +926,9 @@ static ULONG STDMETHODCALLTYPE d3d12_fence_Release(ID3D12Fence *iface) d3d12_fence_destroy_vk_objects(fence);
vkd3d_free(fence->events); - if ((rc = pthread_mutex_destroy(&fence->mutex))) + if ((rc = vkd3d_mutex_destroy(&fence->mutex))) ERR("Failed to destroy mutex, error %d.\n", rc); - pthread_cond_destroy(&fence->null_event_cond); + vkd3d_cond_destroy(&fence->null_event_cond); vkd3d_free(fence);
d3d12_device_release(device); @@ -995,13 +995,13 @@ static UINT64 STDMETHODCALLTYPE d3d12_fence_GetCompletedValue(ID3D12Fence *iface
TRACE("iface %p.\n", iface);
- if ((rc = pthread_mutex_lock(&fence->mutex))) + if ((rc = vkd3d_mutex_lock(&fence->mutex))) { ERR("Failed to lock mutex, error %d.\n", rc); return 0; } completed_value = fence->value; - pthread_mutex_unlock(&fence->mutex); + vkd3d_mutex_unlock(&fence->mutex); return completed_value; }
@@ -1015,7 +1015,7 @@ static HRESULT STDMETHODCALLTYPE d3d12_fence_SetEventOnCompletion(ID3D12Fence *i
TRACE("iface %p, value %#"PRIx64", event %p.\n", iface, value, event);
- if ((rc = pthread_mutex_lock(&fence->mutex))) + if ((rc = vkd3d_mutex_lock(&fence->mutex))) { ERR("Failed to lock mutex, error %d.\n", rc); return hresult_from_errno(rc); @@ -1025,7 +1025,7 @@ static HRESULT STDMETHODCALLTYPE d3d12_fence_SetEventOnCompletion(ID3D12Fence *i { if (event) fence->device->signal_event(event); - pthread_mutex_unlock(&fence->mutex); + vkd3d_mutex_unlock(&fence->mutex); return S_OK; }
@@ -1036,7 +1036,7 @@ static HRESULT STDMETHODCALLTYPE d3d12_fence_SetEventOnCompletion(ID3D12Fence *i { WARN("Event completion for (%p, %#"PRIx64") is already in the list.\n", event, value); - pthread_mutex_unlock(&fence->mutex); + vkd3d_mutex_unlock(&fence->mutex); return S_OK; } } @@ -1045,7 +1045,7 @@ static HRESULT STDMETHODCALLTYPE d3d12_fence_SetEventOnCompletion(ID3D12Fence *i fence->event_count + 1, sizeof(*fence->events))) { WARN("Failed to add event.\n"); - pthread_mutex_unlock(&fence->mutex); + vkd3d_mutex_unlock(&fence->mutex); return E_OUTOFMEMORY; }
@@ -1062,10 +1062,10 @@ static HRESULT STDMETHODCALLTYPE d3d12_fence_SetEventOnCompletion(ID3D12Fence *i if (!event) { while (!*latch) - pthread_cond_wait(&fence->null_event_cond, &fence->mutex); + vkd3d_cond_wait(&fence->null_event_cond, &fence->mutex); }
- pthread_mutex_unlock(&fence->mutex); + vkd3d_mutex_unlock(&fence->mutex); return S_OK; }
@@ -1116,16 +1116,16 @@ static HRESULT d3d12_fence_init(struct d3d12_fence *fence, struct d3d12_device *
fence->value = initial_value;
- if ((rc = pthread_mutex_init(&fence->mutex, NULL))) + if ((rc = vkd3d_mutex_init(&fence->mutex))) { ERR("Failed to initialize mutex, error %d.\n", rc); return hresult_from_errno(rc); }
- if ((rc = pthread_cond_init(&fence->null_event_cond, NULL))) + if ((rc = vkd3d_cond_init(&fence->null_event_cond))) { ERR("Failed to initialize cond variable, error %d.\n", rc); - pthread_mutex_destroy(&fence->mutex); + vkd3d_mutex_destroy(&fence->mutex); return hresult_from_errno(rc); }
@@ -1145,8 +1145,8 @@ static HRESULT d3d12_fence_init(struct d3d12_fence *fence, struct d3d12_device *
if (FAILED(hr = vkd3d_private_store_init(&fence->private_store))) { - pthread_mutex_destroy(&fence->mutex); - pthread_cond_destroy(&fence->null_event_cond); + vkd3d_mutex_destroy(&fence->mutex); + vkd3d_cond_destroy(&fence->null_event_cond); return hr; }
diff --git a/libs/vkd3d/device.c b/libs/vkd3d/device.c index 4bcb5efcbba9..fe90eb37ef00 100644 --- a/libs/vkd3d/device.c +++ b/libs/vkd3d/device.c @@ -1878,7 +1878,7 @@ static HRESULT d3d12_device_init_pipeline_cache(struct d3d12_device *device) VkResult vr; int rc;
- if ((rc = pthread_mutex_init(&device->mutex, NULL))) + if ((rc = vkd3d_mutex_init(&device->mutex))) { ERR("Failed to initialize mutex, error %d.\n", rc); return hresult_from_errno(rc); @@ -1906,7 +1906,7 @@ static void d3d12_device_destroy_pipeline_cache(struct d3d12_device *device) if (device->vk_pipeline_cache) VK_CALL(vkDestroyPipelineCache(device->vk_device, device->vk_pipeline_cache, NULL));
- pthread_mutex_destroy(&device->mutex); + vkd3d_mutex_destroy(&device->mutex); }
#define VKD3D_VA_FALLBACK_BASE 0x8000000000000000ull @@ -1979,7 +1979,7 @@ D3D12_GPU_VIRTUAL_ADDRESS vkd3d_gpu_va_allocator_allocate(struct vkd3d_gpu_va_al return 0; size = align(size, alignment);
- if ((rc = pthread_mutex_lock(&allocator->mutex))) + if ((rc = vkd3d_mutex_lock(&allocator->mutex))) { ERR("Failed to lock mutex, error %d.\n", rc); return 0; @@ -1990,7 +1990,7 @@ D3D12_GPU_VIRTUAL_ADDRESS vkd3d_gpu_va_allocator_allocate(struct vkd3d_gpu_va_al else address = vkd3d_gpu_va_allocator_allocate_fallback(allocator, alignment, size, ptr);
- pthread_mutex_unlock(&allocator->mutex); + vkd3d_mutex_unlock(&allocator->mutex);
return address; } @@ -2061,7 +2061,7 @@ void *vkd3d_gpu_va_allocator_dereference(struct vkd3d_gpu_va_allocator *allocato return vkd3d_gpu_va_allocator_dereference_slab(allocator, address);
/* Slow fallback. */ - if ((rc = pthread_mutex_lock(&allocator->mutex))) + if ((rc = vkd3d_mutex_lock(&allocator->mutex))) { ERR("Failed to lock mutex, error %d.\n", rc); return NULL; @@ -2069,7 +2069,7 @@ void *vkd3d_gpu_va_allocator_dereference(struct vkd3d_gpu_va_allocator *allocato
ret = vkd3d_gpu_va_allocator_dereference_fallback(allocator, address);
- pthread_mutex_unlock(&allocator->mutex); + vkd3d_mutex_unlock(&allocator->mutex);
return ret; } @@ -2124,7 +2124,7 @@ void vkd3d_gpu_va_allocator_free(struct vkd3d_gpu_va_allocator *allocator, D3D12 { int rc;
- if ((rc = pthread_mutex_lock(&allocator->mutex))) + if ((rc = vkd3d_mutex_lock(&allocator->mutex))) { ERR("Failed to lock mutex, error %d.\n", rc); return; @@ -2133,13 +2133,13 @@ void vkd3d_gpu_va_allocator_free(struct vkd3d_gpu_va_allocator *allocator, D3D12 if (address < VKD3D_VA_FALLBACK_BASE) { vkd3d_gpu_va_allocator_free_slab(allocator, address); - pthread_mutex_unlock(&allocator->mutex); + vkd3d_mutex_unlock(&allocator->mutex); return; }
vkd3d_gpu_va_allocator_free_fallback(allocator, address);
- pthread_mutex_unlock(&allocator->mutex); + vkd3d_mutex_unlock(&allocator->mutex); }
static bool vkd3d_gpu_va_allocator_init(struct vkd3d_gpu_va_allocator *allocator) @@ -2165,7 +2165,7 @@ static bool vkd3d_gpu_va_allocator_init(struct vkd3d_gpu_va_allocator *allocator allocator->slabs[i].ptr = &allocator->slabs[i + 1]; }
- if ((rc = pthread_mutex_init(&allocator->mutex, NULL))) + if ((rc = vkd3d_mutex_init(&allocator->mutex))) { ERR("Failed to initialize mutex, error %d.\n", rc); vkd3d_free(allocator->slabs); @@ -2179,15 +2179,15 @@ static void vkd3d_gpu_va_allocator_cleanup(struct vkd3d_gpu_va_allocator *alloca { int rc;
- if ((rc = pthread_mutex_lock(&allocator->mutex))) + if ((rc = vkd3d_mutex_lock(&allocator->mutex))) { ERR("Failed to lock mutex, error %d.\n", rc); return; } vkd3d_free(allocator->slabs); vkd3d_free(allocator->fallback_allocations); - pthread_mutex_unlock(&allocator->mutex); - pthread_mutex_destroy(&allocator->mutex); + vkd3d_mutex_unlock(&allocator->mutex); + vkd3d_mutex_destroy(&allocator->mutex); }
/* We could use bsearch() or recursion here, but it probably helps to omit @@ -2223,7 +2223,7 @@ bool vkd3d_gpu_descriptor_allocator_register_range(struct vkd3d_gpu_descriptor_a struct vkd3d_gpu_descriptor_allocation *allocation; int rc;
- if ((rc = pthread_mutex_lock(&allocator->mutex))) + if ((rc = vkd3d_mutex_lock(&allocator->mutex))) { ERR("Failed to lock mutex, error %d.\n", rc); return false; @@ -2232,7 +2232,7 @@ bool vkd3d_gpu_descriptor_allocator_register_range(struct vkd3d_gpu_descriptor_a if (!vkd3d_array_reserve((void **)&allocator->allocations, &allocator->allocations_size, allocator->allocation_count + 1, sizeof(*allocator->allocations))) { - pthread_mutex_unlock(&allocator->mutex); + vkd3d_mutex_unlock(&allocator->mutex); return false; }
@@ -2247,7 +2247,7 @@ bool vkd3d_gpu_descriptor_allocator_register_range(struct vkd3d_gpu_descriptor_a allocation->base = base; allocation->count = count;
- pthread_mutex_unlock(&allocator->mutex); + vkd3d_mutex_unlock(&allocator->mutex);
return true; } @@ -2259,7 +2259,7 @@ bool vkd3d_gpu_descriptor_allocator_unregister_range( size_t i; int rc;
- if ((rc = pthread_mutex_lock(&allocator->mutex))) + if ((rc = vkd3d_mutex_lock(&allocator->mutex))) { ERR("Failed to lock mutex, error %d.\n", rc); return false; @@ -2277,7 +2277,7 @@ bool vkd3d_gpu_descriptor_allocator_unregister_range( break; }
- pthread_mutex_unlock(&allocator->mutex); + vkd3d_mutex_unlock(&allocator->mutex);
return found; } @@ -2301,7 +2301,7 @@ size_t vkd3d_gpu_descriptor_allocator_range_size_from_descriptor(
assert(allocator->allocation_count);
- if ((rc = pthread_mutex_lock(&allocator->mutex))) + if ((rc = vkd3d_mutex_lock(&allocator->mutex))) { ERR("Failed to lock mutex, error %d.\n", rc); return 0; @@ -2311,7 +2311,7 @@ size_t vkd3d_gpu_descriptor_allocator_range_size_from_descriptor( if ((allocation = vkd3d_gpu_descriptor_allocator_allocation_from_descriptor(allocator, desc))) remaining = allocation->count - (desc - allocation->base);
- pthread_mutex_unlock(&allocator->mutex); + vkd3d_mutex_unlock(&allocator->mutex);
return remaining; } @@ -2325,7 +2325,7 @@ struct d3d12_descriptor_heap *vkd3d_gpu_descriptor_allocator_heap_from_descripto if (!allocator->allocation_count) return NULL;
- if ((rc = pthread_mutex_lock(&allocator->mutex))) + if ((rc = vkd3d_mutex_lock(&allocator->mutex))) { ERR("Failed to lock mutex, error %d.\n", rc); return NULL; @@ -2333,7 +2333,7 @@ struct d3d12_descriptor_heap *vkd3d_gpu_descriptor_allocator_heap_from_descripto
allocation = vkd3d_gpu_descriptor_allocator_allocation_from_descriptor(allocator, desc);
- pthread_mutex_unlock(&allocator->mutex); + vkd3d_mutex_unlock(&allocator->mutex);
return allocation ? CONTAINING_RECORD(allocation->base, struct d3d12_descriptor_heap, descriptors) : NULL; @@ -2344,7 +2344,7 @@ static bool vkd3d_gpu_descriptor_allocator_init(struct vkd3d_gpu_descriptor_allo int rc;
memset(allocator, 0, sizeof(*allocator)); - if ((rc = pthread_mutex_init(&allocator->mutex, NULL))) + if ((rc = vkd3d_mutex_init(&allocator->mutex))) { ERR("Failed to initialise mutex, error %d.\n", rc); return false; @@ -2356,7 +2356,7 @@ static bool vkd3d_gpu_descriptor_allocator_init(struct vkd3d_gpu_descriptor_allo static void vkd3d_gpu_descriptor_allocator_cleanup(struct vkd3d_gpu_descriptor_allocator *allocator) { vkd3d_free(allocator->allocations); - pthread_mutex_destroy(&allocator->mutex); + vkd3d_mutex_destroy(&allocator->mutex); }
static bool have_vk_time_domain(VkTimeDomainEXT *domains, unsigned int count, VkTimeDomainEXT domain) @@ -2479,7 +2479,7 @@ static ULONG STDMETHODCALLTYPE d3d12_device_Release(ID3D12Device *iface) d3d12_device_destroy_pipeline_cache(device); d3d12_device_destroy_vkd3d_queues(device); for (i = 0; i < ARRAY_SIZE(device->desc_mutex); ++i) - pthread_mutex_destroy(&device->desc_mutex[i]); + vkd3d_mutex_destroy(&device->desc_mutex[i]); VK_CALL(vkDestroyDevice(device->vk_device, NULL)); if (device->parent) IUnknown_Release(device->parent); @@ -3977,7 +3977,7 @@ static HRESULT d3d12_device_init(struct d3d12_device *device, vkd3d_time_domains_init(device);
for (i = 0; i < ARRAY_SIZE(device->desc_mutex); ++i) - pthread_mutex_init(&device->desc_mutex[i], NULL); + vkd3d_mutex_init(&device->desc_mutex[i]);
if ((device->parent = create_info->parent)) IUnknown_AddRef(device->parent); diff --git a/libs/vkd3d/resource.c b/libs/vkd3d/resource.c index 19a163beead4..7b94ac926f28 100644 --- a/libs/vkd3d/resource.c +++ b/libs/vkd3d/resource.c @@ -326,7 +326,7 @@ static void d3d12_heap_destroy(struct d3d12_heap *heap)
VK_CALL(vkFreeMemory(device->vk_device, heap->vk_memory, NULL));
- pthread_mutex_destroy(&heap->mutex); + vkd3d_mutex_destroy(&heap->mutex);
if (heap->is_private) device = NULL; @@ -443,7 +443,7 @@ static HRESULT d3d12_heap_map(struct d3d12_heap *heap, uint64_t offset, VkResult vr; int rc;
- if ((rc = pthread_mutex_lock(&heap->mutex))) + if ((rc = vkd3d_mutex_lock(&heap->mutex))) { ERR("Failed to lock mutex, error %d.\n", rc); if (data) @@ -491,7 +491,7 @@ static HRESULT d3d12_heap_map(struct d3d12_heap *heap, uint64_t offset, *data = NULL; }
- pthread_mutex_unlock(&heap->mutex); + vkd3d_mutex_unlock(&heap->mutex);
return hr; } @@ -501,7 +501,7 @@ static void d3d12_heap_unmap(struct d3d12_heap *heap, struct d3d12_resource *res struct d3d12_device *device = heap->device; int rc;
- if ((rc = pthread_mutex_lock(&heap->mutex))) + if ((rc = vkd3d_mutex_lock(&heap->mutex))) { ERR("Failed to lock mutex, error %d.\n", rc); return; @@ -535,7 +535,7 @@ static void d3d12_heap_unmap(struct d3d12_heap *heap, struct d3d12_resource *res }
done: - pthread_mutex_unlock(&heap->mutex); + vkd3d_mutex_unlock(&heap->mutex); }
static HRESULT validate_heap_desc(const D3D12_HEAP_DESC *desc, const struct d3d12_resource *resource) @@ -594,7 +594,7 @@ static HRESULT d3d12_heap_init(struct d3d12_heap *heap, if (FAILED(hr = validate_heap_desc(&heap->desc, resource))) return hr;
- if ((rc = pthread_mutex_init(&heap->mutex, NULL))) + if ((rc = vkd3d_mutex_init(&heap->mutex))) { ERR("Failed to initialize mutex, error %d.\n", rc); return hresult_from_errno(rc); @@ -602,7 +602,7 @@ static HRESULT d3d12_heap_init(struct d3d12_heap *heap,
if (FAILED(hr = vkd3d_private_store_init(&heap->private_store))) { - pthread_mutex_destroy(&heap->mutex); + vkd3d_mutex_destroy(&heap->mutex); return hr; }
@@ -636,7 +636,7 @@ static HRESULT d3d12_heap_init(struct d3d12_heap *heap, if (FAILED(hr)) { vkd3d_private_store_destroy(&heap->private_store); - pthread_mutex_destroy(&heap->mutex); + vkd3d_mutex_destroy(&heap->mutex); return hr; }
@@ -2126,10 +2126,10 @@ void d3d12_desc_write_atomic(struct d3d12_desc *dst, const struct d3d12_desc *sr struct d3d12_device *device) { struct vkd3d_view *defunct_view = NULL; - pthread_mutex_t *mutex; + struct vkd3d_mutex *mutex;
mutex = d3d12_device_get_descriptor_mutex(device, dst); - pthread_mutex_lock(mutex); + vkd3d_mutex_lock(mutex);
/* Nothing to do for VKD3D_DESCRIPTOR_MAGIC_CBV. */ if ((dst->magic & VKD3D_DESCRIPTOR_MAGIC_HAS_VIEW) @@ -2138,7 +2138,7 @@ void d3d12_desc_write_atomic(struct d3d12_desc *dst, const struct d3d12_desc *sr
*dst = *src;
- pthread_mutex_unlock(mutex); + vkd3d_mutex_unlock(mutex);
/* Destroy the view after unlocking to reduce wait time. */ if (defunct_view) @@ -2156,21 +2156,21 @@ void d3d12_desc_copy(struct d3d12_desc *dst, const struct d3d12_desc *src, struct d3d12_device *device) { struct d3d12_desc tmp; - pthread_mutex_t *mutex; + struct vkd3d_mutex *mutex;
assert(dst != src);
/* Shadow of the Tomb Raider and possibly other titles sometimes destroy * and rewrite a descriptor in another thread while it is being copied. */ mutex = d3d12_device_get_descriptor_mutex(device, src); - pthread_mutex_lock(mutex); + vkd3d_mutex_lock(mutex);
if (src->magic & VKD3D_DESCRIPTOR_MAGIC_HAS_VIEW) vkd3d_view_incref(src->u.view);
tmp = *src;
- pthread_mutex_unlock(mutex); + vkd3d_mutex_unlock(mutex);
d3d12_desc_write_atomic(dst, &tmp, device); } diff --git a/libs/vkd3d/state.c b/libs/vkd3d/state.c index b54a6527c016..12c7a9e8f65c 100644 --- a/libs/vkd3d/state.c +++ b/libs/vkd3d/state.c @@ -1432,7 +1432,7 @@ HRESULT vkd3d_render_pass_cache_find(struct vkd3d_render_pass_cache *cache, unsigned int i; int rc;
- if ((rc = pthread_mutex_lock(&device->mutex))) + if ((rc = vkd3d_mutex_lock(&device->mutex))) { ERR("Failed to lock mutex, error %d.\n", rc); *vk_render_pass = VK_NULL_HANDLE; @@ -1454,7 +1454,7 @@ HRESULT vkd3d_render_pass_cache_find(struct vkd3d_render_pass_cache *cache, if (!found) hr = vkd3d_render_pass_cache_create_pass_locked(cache, device, key, vk_render_pass);
- pthread_mutex_unlock(&device->mutex); + vkd3d_mutex_unlock(&device->mutex);
return hr; } @@ -3078,7 +3078,7 @@ static VkPipeline d3d12_pipeline_state_find_compiled_pipeline(const struct d3d12
*vk_render_pass = VK_NULL_HANDLE;
- if (!(rc = pthread_mutex_lock(&device->mutex))) + if (!(rc = vkd3d_mutex_lock(&device->mutex))) { LIST_FOR_EACH_ENTRY(current, &graphics->compiled_pipelines, struct vkd3d_compiled_pipeline, entry) { @@ -3089,7 +3089,7 @@ static VkPipeline d3d12_pipeline_state_find_compiled_pipeline(const struct d3d12 break; } } - pthread_mutex_unlock(&device->mutex); + vkd3d_mutex_unlock(&device->mutex); } else { @@ -3114,7 +3114,7 @@ static bool d3d12_pipeline_state_put_pipeline_to_cache(struct d3d12_pipeline_sta compiled_pipeline->vk_pipeline = vk_pipeline; compiled_pipeline->vk_render_pass = vk_render_pass;
- if ((rc = pthread_mutex_lock(&device->mutex))) + if ((rc = vkd3d_mutex_lock(&device->mutex))) { ERR("Failed to lock mutex, error %d.\n", rc); vkd3d_free(compiled_pipeline); @@ -3134,7 +3134,7 @@ static bool d3d12_pipeline_state_put_pipeline_to_cache(struct d3d12_pipeline_sta if (compiled_pipeline) list_add_tail(&graphics->compiled_pipelines, &compiled_pipeline->entry);
- pthread_mutex_unlock(&device->mutex); + vkd3d_mutex_unlock(&device->mutex); return compiled_pipeline; }
diff --git a/libs/vkd3d/utils.c b/libs/vkd3d/utils.c index cdb81f671b94..873f840ccf56 100644 --- a/libs/vkd3d/utils.c +++ b/libs/vkd3d/utils.c @@ -948,7 +948,7 @@ HRESULT vkd3d_get_private_data(struct vkd3d_private_store *store, if (!out_size) return E_INVALIDARG;
- if ((rc = pthread_mutex_lock(&store->mutex))) + if ((rc = vkd3d_mutex_lock(&store->mutex))) { ERR("Failed to lock mutex, error %d.\n", rc); return hresult_from_errno(rc); @@ -977,7 +977,7 @@ HRESULT vkd3d_get_private_data(struct vkd3d_private_store *store, memcpy(out, data->u.data, data->size);
done: - pthread_mutex_unlock(&store->mutex); + vkd3d_mutex_unlock(&store->mutex); return hr; }
@@ -987,7 +987,7 @@ HRESULT vkd3d_set_private_data(struct vkd3d_private_store *store, HRESULT hr; int rc;
- if ((rc = pthread_mutex_lock(&store->mutex))) + if ((rc = vkd3d_mutex_lock(&store->mutex))) { ERR("Failed to lock mutex, error %d.\n", rc); return hresult_from_errno(rc); @@ -995,7 +995,7 @@ HRESULT vkd3d_set_private_data(struct vkd3d_private_store *store,
hr = vkd3d_private_store_set_private_data(store, tag, data, data_size, false);
- pthread_mutex_unlock(&store->mutex); + vkd3d_mutex_unlock(&store->mutex); return hr; }
@@ -1006,7 +1006,7 @@ HRESULT vkd3d_set_private_data_interface(struct vkd3d_private_store *store, HRESULT hr; int rc;
- if ((rc = pthread_mutex_lock(&store->mutex))) + if ((rc = vkd3d_mutex_lock(&store->mutex))) { ERR("Failed to lock mutex, error %d.\n", rc); return hresult_from_errno(rc); @@ -1014,7 +1014,7 @@ HRESULT vkd3d_set_private_data_interface(struct vkd3d_private_store *store,
hr = vkd3d_private_store_set_private_data(store, tag, data, sizeof(object), !!object);
- pthread_mutex_unlock(&store->mutex); + vkd3d_mutex_unlock(&store->mutex); return hr; }
diff --git a/libs/vkd3d/vkd3d_private.h b/libs/vkd3d/vkd3d_private.h index d21cd411ec8a..bb7be99e7efc 100644 --- a/libs/vkd3d/vkd3d_private.h +++ b/libs/vkd3d/vkd3d_private.h @@ -171,6 +171,62 @@ union vkd3d_thread_handle void *handle; };
+struct vkd3d_mutex +{ + pthread_mutex_t lock; +}; + +struct vkd3d_cond +{ + pthread_cond_t cond; +}; + + +static inline int vkd3d_mutex_init(struct vkd3d_mutex *lock) +{ + return pthread_mutex_init(&lock->lock, NULL); +} + +static inline int vkd3d_mutex_lock(struct vkd3d_mutex *lock) +{ + return pthread_mutex_lock(&lock->lock); +} + +static inline int vkd3d_mutex_unlock(struct vkd3d_mutex *lock) +{ + return pthread_mutex_unlock(&lock->lock); +} + +static inline int vkd3d_mutex_destroy(struct vkd3d_mutex *lock) +{ + return pthread_mutex_destroy(&lock->lock); +} + +static inline int vkd3d_cond_init(struct vkd3d_cond *cond) +{ + return pthread_cond_init(&cond->cond, NULL); +} + +static inline int vkd3d_cond_signal(struct vkd3d_cond *cond) +{ + return pthread_cond_signal(&cond->cond); +} + +static inline int vkd3d_cond_broadcast(struct vkd3d_cond *cond) +{ + return pthread_cond_broadcast(&cond->cond); +} + +static inline int vkd3d_cond_wait(struct vkd3d_cond *cond, struct vkd3d_mutex *lock) +{ + return pthread_cond_wait(&cond->cond, &lock->lock); +} + +static inline int vkd3d_cond_destroy(struct vkd3d_cond *cond) +{ + return pthread_cond_destroy(&cond->cond); +} + HRESULT vkd3d_create_thread(struct vkd3d_instance *instance, PFN_vkd3d_thread thread_main, void *data, union vkd3d_thread_handle *thread); HRESULT vkd3d_join_thread(struct vkd3d_instance *instance, union vkd3d_thread_handle *thread); @@ -186,9 +242,9 @@ struct vkd3d_waiting_fence struct vkd3d_fence_worker { union vkd3d_thread_handle thread; - pthread_mutex_t mutex; - pthread_cond_t cond; - pthread_cond_t fence_destruction_cond; + struct vkd3d_mutex mutex; + struct vkd3d_cond cond; + struct vkd3d_cond fence_destruction_cond; bool should_exit; bool pending_fence_destruction;
@@ -227,7 +283,7 @@ struct vkd3d_gpu_va_slab
struct vkd3d_gpu_va_allocator { - pthread_mutex_t mutex; + struct vkd3d_mutex mutex;
D3D12_GPU_VIRTUAL_ADDRESS fallback_floor; struct vkd3d_gpu_va_allocation *fallback_allocations; @@ -251,7 +307,7 @@ struct vkd3d_gpu_descriptor_allocation
struct vkd3d_gpu_descriptor_allocator { - pthread_mutex_t mutex; + struct vkd3d_mutex mutex;
struct vkd3d_gpu_descriptor_allocation *allocations; size_t allocations_size; @@ -294,7 +350,7 @@ void vkd3d_render_pass_cache_init(struct vkd3d_render_pass_cache *cache);
struct vkd3d_private_store { - pthread_mutex_t mutex; + struct vkd3d_mutex mutex;
struct list content; }; @@ -327,7 +383,7 @@ static inline HRESULT vkd3d_private_store_init(struct vkd3d_private_store *store
list_init(&store->content);
- if ((rc = pthread_mutex_init(&store->mutex, NULL))) + if ((rc = vkd3d_mutex_init(&store->mutex))) ERR("Failed to initialize mutex, error %d.\n", rc);
return hresult_from_errno(rc); @@ -342,7 +398,7 @@ static inline void vkd3d_private_store_destroy(struct vkd3d_private_store *store vkd3d_private_data_destroy(data); }
- pthread_mutex_destroy(&store->mutex); + vkd3d_mutex_destroy(&store->mutex); }
HRESULT vkd3d_get_private_data(struct vkd3d_private_store *store, const GUID *tag, unsigned int *out_size, void *out); @@ -366,8 +422,8 @@ struct d3d12_fence LONG refcount;
uint64_t value; - pthread_mutex_t mutex; - pthread_cond_t null_event_cond; + struct vkd3d_mutex mutex; + struct vkd3d_cond null_event_cond;
struct vkd3d_waiting_event { @@ -402,7 +458,7 @@ struct d3d12_heap bool is_private; D3D12_HEAP_DESC desc;
- pthread_mutex_t mutex; + struct vkd3d_mutex mutex;
VkDeviceMemory vk_memory; void *map_ptr; @@ -1041,7 +1097,7 @@ HRESULT d3d12_command_list_create(struct d3d12_device *device, struct vkd3d_queue { /* Access to VkQueue must be externally synchronized. */ - pthread_mutex_t mutex; + struct vkd3d_mutex mutex;
VkQueue vk_queue;
@@ -1181,8 +1237,8 @@ struct d3d12_device struct vkd3d_gpu_va_allocator gpu_va_allocator; struct vkd3d_fence_worker fence_worker;
- pthread_mutex_t mutex; - pthread_mutex_t desc_mutex[8]; + struct vkd3d_mutex mutex; + struct vkd3d_mutex desc_mutex[8]; struct vkd3d_render_pass_cache render_pass_cache; VkPipelineCache vk_pipeline_cache;
@@ -1249,7 +1305,7 @@ static inline unsigned int d3d12_device_get_descriptor_handle_increment_size(str return ID3D12Device_GetDescriptorHandleIncrementSize(&device->ID3D12Device_iface, descriptor_type); }
-static inline pthread_mutex_t *d3d12_device_get_descriptor_mutex(struct d3d12_device *device, +static inline struct vkd3d_mutex *d3d12_device_get_descriptor_mutex(struct d3d12_device *device, const struct d3d12_desc *descriptor) { STATIC_ASSERT(!(ARRAY_SIZE(device->desc_mutex) & (ARRAY_SIZE(device->desc_mutex) - 1)));