From: Vibhav Pant vibhavp@gmail.com
--- dlls/ntdll/unix/server.c | 2 +- dlls/ntdll/unix/sync.c | 15 ++- dlls/ntdll/unix/unix_private.h | 2 +- dlls/ntdll/unix/virtual.c | 164 ++++++++++++++++++++++++--------- 4 files changed, 138 insertions(+), 45 deletions(-)
diff --git a/dlls/ntdll/unix/server.c b/dlls/ntdll/unix/server.c index f3ffd99c3fc..00aeea4fb0f 100644 --- a/dlls/ntdll/unix/server.c +++ b/dlls/ntdll/unix/server.c @@ -1019,7 +1019,7 @@ static BOOL add_fd_to_cache( HANDLE handle, int fd, enum server_fd_type type, else { void *ptr = anon_mmap_alloc( FD_CACHE_BLOCK_SIZE * sizeof(union fd_cache_entry), - PROT_READ | PROT_WRITE ); + PROT_READ | PROT_WRITE, FALSE ); if (ptr == MAP_FAILED) return FALSE; fd_cache[entry] = ptr; } diff --git a/dlls/ntdll/unix/sync.c b/dlls/ntdll/unix/sync.c index 4b2d7c1ccbc..737cc8ed865 100644 --- a/dlls/ntdll/unix/sync.c +++ b/dlls/ntdll/unix/sync.c @@ -2058,6 +2058,19 @@ NTSTATUS WINAPI NtCreateSection( HANDLE *handle, ACCESS_MASK access, const OBJEC
*handle = 0;
+ if (sec_flags & SEC_LARGE_PAGES) + { + SIZE_T min_size = user_shared_data->LargePageMinimum; + if (file != NULL || size == NULL) + { + return STATUS_INVALID_PARAMETER; + } + + if (min_size == 0 || size->QuadPart % min_size != 0) + { + return STATUS_INVALID_PARAMETER; + } + } switch (protect & 0xff) { case PAGE_READONLY: @@ -2442,7 +2455,7 @@ static union tid_alert_entry *get_tid_alert_entry( HANDLE tid ) if (!tid_alert_blocks[block_idx]) { static const size_t size = TID_ALERT_BLOCK_SIZE * sizeof(union tid_alert_entry); - void *ptr = anon_mmap_alloc( size, PROT_READ | PROT_WRITE ); + void *ptr = anon_mmap_alloc( size, PROT_READ | PROT_WRITE, FALSE ); if (ptr == MAP_FAILED) return NULL; if (InterlockedCompareExchangePointer( (void **)&tid_alert_blocks[block_idx], ptr, NULL )) munmap( ptr, size ); /* someone beat us to it */ diff --git a/dlls/ntdll/unix/unix_private.h b/dlls/ntdll/unix/unix_private.h index fcb3de83a29..2ce10b62a1c 100644 --- a/dlls/ntdll/unix/unix_private.h +++ b/dlls/ntdll/unix/unix_private.h @@ -252,7 +252,7 @@ extern unsigned int alloc_object_attributes( const OBJECT_ATTRIBUTES *attr, stru extern NTSTATUS system_time_precise( void *args );
extern void *anon_mmap_fixed( void *start, size_t size, int prot, int flags ); -extern void *anon_mmap_alloc( size_t size, int prot ); +extern void *anon_mmap_alloc( size_t size, int prot, BOOL large_pages ); extern void virtual_init(void); extern ULONG_PTR get_system_affinity_mask(void); extern void virtual_get_system_info( SYSTEM_BASIC_INFORMATION *info, BOOL wow64 ); diff --git a/dlls/ntdll/unix/virtual.c b/dlls/ntdll/unix/virtual.c index 3981905bcd3..6de1d1c21c5 100644 --- a/dlls/ntdll/unix/virtual.c +++ b/dlls/ntdll/unix/virtual.c @@ -195,6 +195,12 @@ static struct list teb_list = LIST_INIT( teb_list ); #define MAP_NORESERVE 0 #endif
+#if defined( MAP_HUGETLB ) && defined( MAP_LOCKED ) +#define MMAP_LARGE_PAGES_FLAG (MAP_HUGETLB | MAP_LOCKED) +#elif +#define MMAP_LARGE_PAGES_FLAG 0 +#endif + #ifdef _WIN64 /* on 64-bit the page protection bytes use a 2-level table */ static const size_t pages_vprot_shift = 20; static const size_t pages_vprot_mask = (1 << 20) - 1; @@ -232,9 +238,10 @@ void *anon_mmap_fixed( void *start, size_t size, int prot, int flags ) }
/* allocate anonymous mmap() memory at any address */ -void *anon_mmap_alloc( size_t size, int prot ) +void *anon_mmap_alloc( size_t size, int prot, BOOL large_pages ) { - return mmap( NULL, size, prot, MAP_PRIVATE | MAP_ANON, -1, 0 ); + return mmap( NULL, size, prot, + MAP_PRIVATE | MAP_ANON | (large_pages ? MMAP_LARGE_PAGES_FLAG : 0), -1, 0 ); }
@@ -1282,13 +1289,14 @@ static struct wine_rb_entry *find_view_inside_range( void **base_ptr, void **end * retrying inside it, and return where it actually succeeded, or NULL. */ static void* try_map_free_area( void *base, void *end, ptrdiff_t step, - void *start, size_t size, int unix_prot ) + void *start, size_t size, int unix_prot, BOOL large_pages ) { void *ptr; + int flags = large_pages ? MMAP_LARGE_PAGES_FLAG : 0;
while (start && base <= start && (char*)start + size <= (char*)end) { - if ((ptr = anon_mmap_tryfixed( start, size, unix_prot, 0 )) != MAP_FAILED) return start; + if ((ptr = anon_mmap_tryfixed( start, size, unix_prot, flags )) != MAP_FAILED) return start; TRACE( "Found free area is already mapped, start %p.\n", start ); if (errno != EEXIST) { @@ -1313,7 +1321,7 @@ static void* try_map_free_area( void *base, void *end, ptrdiff_t step, * Find a free area between views inside the specified range and map it. * virtual_mutex must be held by caller. */ -static void *map_free_area( void *base, void *end, size_t size, int top_down, int unix_prot, size_t align_mask ) +static void *map_free_area( void *base, void *end, size_t size, int top_down, int unix_prot, size_t align_mask, BOOL large_pages ) { struct wine_rb_entry *first = find_view_inside_range( &base, &end, top_down ); ptrdiff_t step = top_down ? -(align_mask + 1) : (align_mask + 1); @@ -1328,7 +1336,7 @@ static void *map_free_area( void *base, void *end, size_t size, int top_down, in { struct file_view *view = WINE_RB_ENTRY_VALUE( first, struct file_view, entry ); if ((start = try_map_free_area( (char *)view->base + view->size, (char *)start + size, step, - start, size, unix_prot ))) break; + start, size, unix_prot, large_pages ))) break; start = ROUND_ADDR( (char *)view->base - size, align_mask ); /* stop if remaining space is not large enough */ if (!start || start >= end || start < base) return NULL; @@ -1344,7 +1352,7 @@ static void *map_free_area( void *base, void *end, size_t size, int top_down, in { struct file_view *view = WINE_RB_ENTRY_VALUE( first, struct file_view, entry ); if ((start = try_map_free_area( start, view->base, step, - start, size, unix_prot ))) break; + start, size, unix_prot, large_pages ))) break; start = ROUND_ADDR( (char *)view->base + view->size + align_mask, align_mask ); /* stop if remaining space is not large enough */ if (!start || start >= end || (char *)end - (char *)start < size) return NULL; @@ -1353,7 +1361,7 @@ static void *map_free_area( void *base, void *end, size_t size, int top_down, in }
if (!first) - start = try_map_free_area( base, end, step, start, size, unix_prot ); + start = try_map_free_area( base, end, step, start, size, unix_prot, large_pages );
if (!start) ERR( "couldn't map free area in range %p-%p, size %p\n", base, end, (void *)size ); @@ -1494,7 +1502,7 @@ static struct file_view *alloc_view(void) } if (view_block_start == view_block_end) { - void *ptr = anon_mmap_alloc( view_block_size, PROT_READ | PROT_WRITE ); + void *ptr = anon_mmap_alloc( view_block_size, PROT_READ | PROT_WRITE, FALSE ); if (ptr == MAP_FAILED) return NULL; view_block_start = ptr; view_block_end = view_block_start + view_block_size / sizeof(*view_block_start); @@ -1871,10 +1879,11 @@ static void *find_reserved_free_area_outside_preloader( void *start, void *end, * virtual_mutex must be held by caller. */ static void *map_reserved_area( void *limit_low, void *limit_high, size_t size, int top_down, - int unix_prot, size_t align_mask ) + int unix_prot, size_t align_mask, BOOL large_pages ) { void *ptr = NULL; struct reserved_area *area = LIST_ENTRY( ptr, struct reserved_area, entry ); + int flags = large_pages ? MMAP_LARGE_PAGES_FLAG : 0;
if (top_down) { @@ -1906,7 +1915,7 @@ static void *map_reserved_area( void *limit_low, void *limit_high, size_t size, if (ptr) break; } } - if (ptr && anon_mmap_fixed( ptr, size, unix_prot, 0 ) != ptr) ptr = NULL; + if (ptr && anon_mmap_fixed( ptr, size, unix_prot, flags ) != ptr) ptr = NULL; return ptr; }
@@ -1916,12 +1925,13 @@ static void *map_reserved_area( void *limit_low, void *limit_high, size_t size, * Map a memory area at a fixed address. * virtual_mutex must be held by caller. */ -static NTSTATUS map_fixed_area( void *base, size_t size, unsigned int vprot ) +static NTSTATUS map_fixed_area( void *base, size_t size, unsigned int vprot, BOOL large_pages ) { int unix_prot = get_unix_prot(vprot); struct reserved_area *area; NTSTATUS status; char *start = base, *end = (char *)base + size; + int flags = large_pages ? MMAP_LARGE_PAGES_FLAG : 0;
if (find_view_range( base, size )) return STATUS_CONFLICTING_ADDRESSES;
@@ -1934,19 +1944,19 @@ static NTSTATUS map_fixed_area( void *base, size_t size, unsigned int vprot ) if (area_end <= start) continue; if (area_start > start) { - if (anon_mmap_tryfixed( start, area_start - start, unix_prot, 0 ) == MAP_FAILED) goto failed; + if (anon_mmap_tryfixed( start, area_start - start, unix_prot, flags ) == MAP_FAILED) goto failed; start = area_start; } if (area_end >= end) { - if (anon_mmap_fixed( start, end - start, unix_prot, 0 ) == MAP_FAILED) goto failed; + if (anon_mmap_fixed( start, end - start, unix_prot, flags ) == MAP_FAILED) goto failed; return STATUS_SUCCESS; } - if (anon_mmap_fixed( start, area_end - start, unix_prot, 0 ) == MAP_FAILED) goto failed; + if (anon_mmap_fixed( start, area_end - start, unix_prot, flags ) == MAP_FAILED) goto failed; start = area_end; }
- if (anon_mmap_tryfixed( start, end - start, unix_prot, 0 ) == MAP_FAILED) goto failed; + if (anon_mmap_tryfixed( start, end - start, unix_prot, flags ) == MAP_FAILED) goto failed; return STATUS_SUCCESS;
failed: @@ -1979,7 +1989,9 @@ static NTSTATUS map_view( struct file_view **view_ret, void *base, size_t size, int top_down = alloc_type & MEM_TOP_DOWN; void *ptr; NTSTATUS status; + BOOL large_pages = (alloc_type & MEM_LARGE_PAGES) != 0;
+ if (large_pages) vprot |= SEC_LARGE_PAGES; if (alloc_type & MEM_REPLACE_PLACEHOLDER) { struct file_view *view; @@ -1997,6 +2009,14 @@ static NTSTATUS map_view( struct file_view **view_ret, void *base, size_t size, return STATUS_SUCCESS; }
+ if (large_pages) + { + if (user_shared_data->LargePageMinimum == 0) + return STATUS_INVALID_PARAMETER; + if (size == 0 || size % user_shared_data->LargePageMinimum != 0) + return STATUS_INVALID_PARAMETER; + } + if (limit_high && limit_low >= limit_high) return STATUS_INVALID_PARAMETER;
if (base) @@ -2005,7 +2025,8 @@ static NTSTATUS map_view( struct file_view **view_ret, void *base, size_t size, if (limit_low && base < (void *)limit_low) return STATUS_CONFLICTING_ADDRESSES; if (limit_high && is_beyond_limit( base, size, (void *)limit_high )) return STATUS_CONFLICTING_ADDRESSES; if (is_beyond_limit( base, size, host_addr_space_limit )) return STATUS_CONFLICTING_ADDRESSES; - if ((status = map_fixed_area( base, size, vprot ))) return status; + if (large_pages && ((UINT_PTR)base % user_shared_data->LargePageMinimum) != 0) return STATUS_INVALID_PARAMETER; + if ((status = map_fixed_area( base, size, vprot, large_pages ))) return status; if (is_beyond_limit( base, size, working_set_limit )) working_set_limit = address_space_limit; ptr = base; } @@ -2021,7 +2042,7 @@ static NTSTATUS map_view( struct file_view **view_ret, void *base, size_t size, if (limit_low && (void *)limit_low > start) start = (void *)limit_low; if (limit_high && (void *)limit_high < end) end = (char *)limit_high + 1;
- if ((ptr = map_reserved_area( start, end, size, top_down, get_unix_prot(vprot), align_mask ))) + if ((ptr = map_reserved_area( start, end, size, top_down, get_unix_prot(vprot), align_mask, large_pages ))) { TRACE( "got mem in reserved area %p-%p\n", ptr, (char *)ptr + size ); goto done; @@ -2029,7 +2050,7 @@ static NTSTATUS map_view( struct file_view **view_ret, void *base, size_t size,
if (start > address_space_start || end < host_addr_space_limit || top_down) { - if (!(ptr = map_free_area( start, end, size, top_down, get_unix_prot(vprot), align_mask ))) + if (!(ptr = map_free_area( start, end, size, top_down, get_unix_prot(vprot), align_mask, large_pages ))) return STATUS_NO_MEMORY; TRACE( "got mem with map_free_area %p-%p\n", ptr, (char *)ptr + size ); goto done; @@ -2037,7 +2058,7 @@ static NTSTATUS map_view( struct file_view **view_ret, void *base, size_t size,
for (;;) { - if ((ptr = anon_mmap_alloc( view_size, get_unix_prot(vprot) )) == MAP_FAILED) + if ((ptr = anon_mmap_alloc( view_size, get_unix_prot(vprot), large_pages )) == MAP_FAILED) { status = (errno == ENOMEM) ? STATUS_NO_MEMORY : STATUS_INVALID_PARAMETER; ERR( "anon mmap error %s, size %p, unix_prot %#x\n", @@ -2055,6 +2076,18 @@ static NTSTATUS map_view( struct file_view **view_ret, void *base, size_t size, done: status = create_view( view_ret, ptr, size, vprot ); if (status != STATUS_SUCCESS) unmap_area( ptr, size ); + if (large_pages && mlock(ptr, size) != 0) + { + unmap_area( ptr, size ); + switch (errno) + { + case EPERM: + status = STATUS_ACCESS_DENIED; + break; + default: + status = STATUS_NO_MEMORY; + } + } return status; }
@@ -2082,10 +2115,17 @@ static NTSTATUS map_file_into_view( struct file_view *view, int fd, size_t start prot |= PROT_EXEC; }
+#ifdef MAP_LOCKED + if (vprot & SEC_LARGE_PAGES) + { + flags |= MAP_LOCKED; + } +#endif /* only try mmap if media is not removable (or if we require write access) */ if (!removable || (flags & MAP_SHARED)) { - if (mmap( (char *)view->base + start, size, prot, flags, fd, offset ) != MAP_FAILED) + ptr = mmap( (char *)view->base + start, size, prot, flags, fd, offset ); + if (ptr != MAP_FAILED) goto done;
switch (errno) @@ -2118,8 +2158,14 @@ static NTSTATUS map_file_into_view( struct file_view *view, int fd, size_t start }
/* Reserve the memory with an anonymous mmap */ - ptr = anon_mmap_fixed( (char *)view->base + start, size, PROT_READ | PROT_WRITE, 0 ); - if (ptr == MAP_FAILED) + ptr = anon_mmap_fixed( (char *)view->base + start, size, PROT_READ | PROT_WRITE, +#ifdef MAP_LOCKED + vprot & SEC_LARGE_PAGES ? MAP_LOCKED : 0 +#else + 0 +#endif + ); + if ( ptr == MAP_FAILED ) { ERR( "anon mmap error %s, range %p-%p\n", strerror(errno), (char *)view->base + start, (char *)view->base + start + size ); @@ -2130,6 +2176,16 @@ static NTSTATUS map_file_into_view( struct file_view *view, int fd, size_t start if (prot != (PROT_READ|PROT_WRITE)) mprotect( ptr, size, prot ); /* Set the right protection */ done: set_page_vprot( (char *)view->base + start, size, vprot ); + if ( vprot & SEC_LARGE_PAGES && mlock( ptr, size ) != 0 ) + { + switch (errno) + { + case EPERM: + return STATUS_ACCESS_DENIED; + default: + return STATUS_NO_MEMORY; + } + } return STATUS_SUCCESS; }
@@ -3179,6 +3235,16 @@ static unsigned int virtual_map_section( HANDLE handle, PVOID *addr_ptr, ULONG_P base = *addr_ptr; offset.QuadPart = offset_ptr ? offset_ptr->QuadPart : 0; if (offset.QuadPart >= full_size) return STATUS_INVALID_PARAMETER; + + if (sec_flags & SEC_LARGE_PAGES) + { + if (!*size_ptr || user_shared_data->LargePageMinimum == 0 || + *size_ptr % user_shared_data->LargePageMinimum != 0) + return STATUS_INVALID_PARAMETER; + if (base && ((UINT_PTR)base % user_shared_data->LargePageMinimum != 0)) + return STATUS_INVALID_PARAMETER; + } + if (*size_ptr) { size = *size_ptr; @@ -3204,7 +3270,8 @@ static unsigned int virtual_map_section( HANDLE handle, PVOID *addr_ptr, ULONG_P
server_enter_uninterrupted_section( &virtual_mutex, &sigset );
- res = map_view( &view, base, size, alloc_type, vprot, limit_low, limit_high, 0 ); + res = map_view( &view, base, size, alloc_type, vprot, limit_low, limit_high, + sec_flags & SEC_LARGE_PAGES ? ( user_shared_data->LargePageMinimum - 1 ) : 0 ); if (res) goto done;
TRACE( "handle=%p size=%lx offset=%s\n", handle, size, wine_dbgstr_longlong(offset.QuadPart) ); @@ -3270,7 +3337,7 @@ static void *alloc_virtual_heap( SIZE_T size ) mmap_remove_reserved_area( ret, size ); return ret; } - return anon_mmap_alloc( size, PROT_READ | PROT_WRITE ); + return anon_mmap_alloc( size, PROT_READ | PROT_WRITE, FALSE ); }
/*********************************************************************** @@ -4470,33 +4537,45 @@ static NTSTATUS allocate_virtual_memory( void **ret, SIZE_T *size_ptr, ULONG typ sigset_t sigset; SIZE_T size = *size_ptr; NTSTATUS status = STATUS_SUCCESS; + BOOL large_pages = (type & MEM_LARGE_PAGES) != 0;
/* Round parameters to a page boundary */
if (is_beyond_limit( 0, size, working_set_limit )) return STATUS_WORKING_SET_LIMIT_RANGE; - + if (large_pages && (user_shared_data->LargePageMinimum == 0 || size == 0 || + size % user_shared_data->LargePageMinimum != 0 || + (type & (MEM_RESERVE | MEM_COMMIT)) != (MEM_RESERVE | MEM_COMMIT))) + return STATUS_INVALID_PARAMETER; if (*ret) { - if (type & MEM_RESERVE && !(type & MEM_REPLACE_PLACEHOLDER)) /* Round down to 64k boundary */ - base = ROUND_ADDR( *ret, granularity_mask ); + if (large_pages) + { + if ((UINT_PTR)*ret % user_shared_data->LargePageMinimum != 0) return STATUS_INVALID_PARAMETER; + base = *ret; + } else - base = ROUND_ADDR( *ret, page_mask ); - size = (((UINT_PTR)*ret + size + page_mask) & ~page_mask) - (UINT_PTR)base; - - /* disallow low 64k, wrap-around and kernel space */ - if (((char *)base < (char *)0x10000) || - ((char *)base + size < (char *)base) || - is_beyond_limit( base, size, address_space_limit )) { - /* address 1 is magic to mean DOS area */ - if (!base && *ret == (void *)1 && size == 0x110000) is_dos_memory = TRUE; - else return STATUS_INVALID_PARAMETER; + if (type & MEM_RESERVE && + !(type & MEM_REPLACE_PLACEHOLDER )) /* Round down to 64k boundary */ + base = ROUND_ADDR( *ret, granularity_mask ); + else + base = ROUND_ADDR( *ret, page_mask ); + size = (((UINT_PTR)*ret + size + page_mask) & ~page_mask) - (UINT_PTR)base; + + /* disallow low 64k, wrap-around and kernel space */ + if (((char *)base < (char *)0x10000 ) || ((char *)base + size < (char *)base) || + is_beyond_limit( base, size, address_space_limit )) + { + /* address 1 is magic to mean DOS area */ + if (!base && *ret == (void *)1 && size == 0x110000) is_dos_memory = TRUE; + else return STATUS_INVALID_PARAMETER; + } } } else { base = NULL; - size = (size + page_mask) & ~page_mask; + if (!large_pages) size = (size + page_mask) & ~page_mask; }
/* Compute the alloc type flags */ @@ -4527,7 +4606,7 @@ static NTSTATUS allocate_virtual_memory( void **ret, SIZE_T *size_ptr, ULONG typ if (vprot & VPROT_WRITECOPY) status = STATUS_INVALID_PAGE_PROTECTION; else if (is_dos_memory) status = allocate_dos_memory( &view, vprot ); else status = map_view( &view, base, size, type, vprot, limit_low, limit_high, - align ? align - 1 : granularity_mask ); + align ? align - 1 : (large_pages ? user_shared_data->LargePageMinimum - 1 : granularity_mask) );
if (status == STATUS_SUCCESS) base = view->base; } @@ -4584,7 +4663,7 @@ static NTSTATUS allocate_virtual_memory( void **ret, SIZE_T *size_ptr, ULONG typ NTSTATUS WINAPI NtAllocateVirtualMemory( HANDLE process, PVOID *ret, ULONG_PTR zero_bits, SIZE_T *size_ptr, ULONG type, ULONG protect ) { - static const ULONG type_mask = MEM_COMMIT | MEM_RESERVE | MEM_TOP_DOWN | MEM_WRITE_WATCH | MEM_RESET; + static const ULONG type_mask = MEM_COMMIT | MEM_RESERVE | MEM_TOP_DOWN | MEM_WRITE_WATCH | MEM_RESET | MEM_LARGE_PAGES; ULONG_PTR limit;
TRACE("%p %p %08lx %x %08x\n", process, *ret, *size_ptr, (int)type, (int)protect ); @@ -4724,7 +4803,8 @@ NTSTATUS WINAPI NtAllocateVirtualMemoryEx( HANDLE process, PVOID *ret, SIZE_T *s ULONG count ) { static const ULONG type_mask = MEM_COMMIT | MEM_RESERVE | MEM_TOP_DOWN | MEM_WRITE_WATCH - | MEM_RESET | MEM_RESERVE_PLACEHOLDER | MEM_REPLACE_PLACEHOLDER; + | MEM_RESET | MEM_RESERVE_PLACEHOLDER | MEM_REPLACE_PLACEHOLDER + | MEM_LARGE_PAGES; ULONG_PTR limit_low = 0; ULONG_PTR limit_high = 0; ULONG_PTR align = 0;