From: Rémi Bernon rbernon@codeweavers.com
--- dlls/kernel32/tests/heap.c | 1 - dlls/ntdll/heap.c | 56 ++++++++++++++++++++------------------ 2 files changed, 29 insertions(+), 28 deletions(-)
diff --git a/dlls/kernel32/tests/heap.c b/dlls/kernel32/tests/heap.c index b3c3dc1eebc..0d71669cebb 100644 --- a/dlls/kernel32/tests/heap.c +++ b/dlls/kernel32/tests/heap.c @@ -1221,7 +1221,6 @@ static void test_HeapCreate(void) thread_params.flags = 0; SetEvent( thread_params.start_event ); res = WaitForSingleObject( thread_params.ready_event, 100 ); - todo_wine ok( !res, "WaitForSingleObject returned %#lx, error %lu\n", res, GetLastError() ); ret = HeapUnlock( heap ); ok( ret, "HeapUnlock failed, error %lu\n", GetLastError() ); diff --git a/dlls/ntdll/heap.c b/dlls/ntdll/heap.c index e3800cad6af..4d6c018dddd 100644 --- a/dlls/ntdll/heap.c +++ b/dlls/ntdll/heap.c @@ -236,7 +236,7 @@ struct category volatile BOOL enabled;
/* list of groups with free blocks */ - struct list groups; + SLIST_HEADER groups; };
struct heap @@ -1515,7 +1515,7 @@ HANDLE WINAPI RtlCreateHeap( ULONG flags, void *addr, SIZE_T total_size, SIZE_T NtAllocateVirtualMemory( NtCurrentProcess(), (void *)&heap->categories, 0, &size, MEM_COMMIT, PAGE_READWRITE ); for (i = 0; i < BLOCK_SIZE_CATEGORY_COUNT; ++i) - list_init( &heap->categories[i].groups ); + RtlInitializeSListHead( &heap->categories[i].groups ); }
/* link it into the per-process heap list */ @@ -1672,13 +1672,16 @@ static NTSTATUS heap_allocate_block( struct heap *heap, ULONG flags, SIZE_T bloc struct DECLSPEC_ALIGN(BLOCK_ALIGN) group { struct block block; - struct list entry; + SINGLE_LIST_ENTRY entry; /* one bit for each free block and the highest bit as unlinked flag */ LONG free_bits; };
#define GROUP_FLAG_FREE (1u << (sizeof(((struct group *)0)->free_bits) * 8 - 1))
+/* entry actually is a SLIST_ENTRY and needs to be 16B aligned on 64bit */ +C_ASSERT( offsetof(struct group, entry) % 8 == 0 ); + static inline UINT block_get_group_index( const struct block *block ) { return block->base_offset; @@ -1714,7 +1717,7 @@ static inline LONG group_find_free_block( struct group *group, SIZE_T block_size #endif /* we remove the group from the free list once all its blocks are used, i will never be -1 */ *block = group_get_block( group, block_size, i ); - return group->free_bits &= ~(1 << i); + return InterlockedAnd( &group->free_bits, ~(1 << i) ) & ~(1 << i); }
/* allocate a new group block using non-LFH allocation, returns a group owned by current thread */ @@ -1728,11 +1731,15 @@ static struct group *group_allocate( struct heap *heap, ULONG flags, SIZE_T bloc size = sizeof(*group) + sizeof(group->free_bits) * 8 * block_size - sizeof(struct block); group_size = heap_get_block_size( heap, flags, size );
+ heap_lock( heap, flags ); + if (group_size >= HEAP_MIN_LARGE_BLOCK_SIZE) status = heap_allocate_large( heap, flags & ~HEAP_ZERO_MEMORY, group_size, size, (void **)&ptr ); else status = heap_allocate_block( heap, flags & ~HEAP_ZERO_MEMORY, group_size, size, (void **)&ptr );
+ heap_unlock( heap, flags ); + if (status) return NULL;
group = CONTAINING_RECORD( (struct block *)ptr - 1, struct group, block ); @@ -1755,6 +1762,8 @@ static NTSTATUS group_release( struct heap *heap, ULONG flags, struct category * { NTSTATUS status;
+ heap_lock( heap, flags ); + block_set_flags( &group->block, BLOCK_FLAG_LFH, 0 );
if (block_get_flags( &group->block ) & BLOCK_FLAG_LARGE) @@ -1762,6 +1771,8 @@ static NTSTATUS group_release( struct heap *heap, ULONG flags, struct category * else status = heap_free_block( heap, flags, &group->block );
+ heap_unlock( heap, flags ); + return status; }
@@ -1770,15 +1781,12 @@ static struct group *heap_acquire_category_group( struct heap *heap, ULONG flags struct category *category ) { struct group *group; - struct list *entry; + SLIST_ENTRY *entry;
- if (!(entry = list_head( &category->groups ))) + if (!(entry = RtlInterlockedPopEntrySList( &category->groups ))) group = group_allocate( heap, flags, block_size, category ); else - { - group = LIST_ENTRY( entry, struct group, entry ); - list_remove( &group->entry ); - } + group = CONTAINING_RECORD( entry, struct group, entry );
return group; } @@ -1790,8 +1798,12 @@ static NTSTATUS heap_release_category_group( struct heap *heap, ULONG flags, str NTSTATUS status = STATUS_SUCCESS;
/* try re-using the block group instead of releasing it */ - if (list_empty( &category->groups )) - list_add_tail( &category->groups, &group->entry ); +#ifdef _WIN64 + if (category->groups.Header16.Depth <= 16) +#else + if (category->groups.Depth <= 16) +#endif + RtlInterlockedPushEntrySList( &category->groups, (SLIST_ENTRY *)&group->entry ); else status = group_release( heap, flags, category, group );
@@ -1811,15 +1823,15 @@ static struct block *find_free_block_lfh( struct heap *heap, ULONG flags, SIZE_T
/* serialize with heap_free_block_lfh: atomically set GROUP_FLAG_FREE when the free bits are all 0. */ if (group_find_free_block( group, block_size, &block )) - group->free_bits &= ~GROUP_FLAG_FREE; - else if (!group->free_bits) - group->free_bits = GROUP_FLAG_FREE; + InterlockedAnd( &group->free_bits, ~GROUP_FLAG_FREE ); + else + InterlockedCompareExchange( &group->free_bits, GROUP_FLAG_FREE, 0 );
/* if GROUP_FLAG_FREE was set, thread released its ownership. */ if (group->free_bits & GROUP_FLAG_FREE) return block;
/* otherwise there is still some free blocks, put the group back into the category */ - list_add_tail( &category->groups, &group->entry ); + RtlInterlockedPushEntrySList( &category->groups, (SLIST_ENTRY *)&group->entry );
return block; } @@ -1846,7 +1858,6 @@ static NTSTATUS heap_allocate_block_lfh( struct heap *heap, ULONG flags, SIZE_T
block_size = BLOCK_CATEGORY_SIZE( BLOCK_SIZE_CATEGORY( block_size ) );
- heap_lock( heap, flags ); if ((block = find_free_block_lfh( heap, flags, block_size, category ))) { block_set_type( block, BLOCK_TYPE_USED ); @@ -1857,7 +1868,6 @@ static NTSTATUS heap_allocate_block_lfh( struct heap *heap, ULONG flags, SIZE_T mark_block_tail( block, flags ); *ret = block + 1; } - heap_unlock( heap, flags );
return block ? STATUS_SUCCESS : STATUS_NO_MEMORY; } @@ -1874,22 +1884,18 @@ static NTSTATUS heap_free_block_lfh( struct heap *heap, ULONG flags, struct bloc category = heap->categories + BLOCK_SIZE_CATEGORY( block_size ); if (category == last) return STATUS_UNSUCCESSFUL;
- heap_lock( heap, flags ); - i = block_get_group_index( block ); block_init_free( block, flags, NULL, block_size ); block_set_flags( block, 0, BLOCK_FLAG_LFH ); block_set_group( block, block_size, group );
/* if this was the last used block in a group and GROUP_FLAG_FREE was set */ - if ((group->free_bits |= (1 << i)) == ~0) + if (InterlockedOr( &group->free_bits, 1 << i ) == ~(1 << i)) { /* thread now owns the group, and can release it to its category */ status = heap_release_category_group( heap, flags, category, group ); }
- heap_unlock( heap, flags ); - return status; }
@@ -2016,15 +2022,11 @@ static NTSTATUS heap_resize_block( struct heap *heap, ULONG flags, struct block if (ROUND_SIZE( *old_size, BLOCK_ALIGN - 1) != ROUND_SIZE( size, BLOCK_ALIGN - 1)) return STATUS_NO_MEMORY; if (size >= *old_size) return STATUS_NO_MEMORY;
- heap_lock( heap, flags ); - block_set_flags( block, BLOCK_FLAG_USER_MASK & ~BLOCK_FLAG_USER_INFO, BLOCK_USER_FLAGS( flags ) ); block->tail_size = old_block_size - sizeof(*block) - size; initialize_block( block, *old_size, size, flags ); mark_block_tail( block, flags );
- heap_unlock( heap, flags ); - *ret = block + 1; return STATUS_SUCCESS; }