Signed-off-by: Paul Gofman pgofman@codeweavers.com --- dlls/ntdll/unix/virtual.c | 40 ++++++++++++++++++++++++++++----------- 1 file changed, 29 insertions(+), 11 deletions(-)
diff --git a/dlls/ntdll/unix/virtual.c b/dlls/ntdll/unix/virtual.c index 984af2d4a21..41d878546bb 100644 --- a/dlls/ntdll/unix/virtual.c +++ b/dlls/ntdll/unix/virtual.c @@ -943,6 +943,24 @@ static BYTE get_page_vprot( const void *addr ) }
+/*********************************************************************** + * get_vprot_range_size + * + * Return the size of the region with equal masked vprot byte. + * Also return the protections for the first page. + * The function assumes that base and size are page aligned and + * base + size does not wrap around. */ +static SIZE_T get_vprot_range_size( char *base, SIZE_T size, BYTE mask, BYTE *vprot ) +{ + char *addr; + + *vprot = get_page_vprot( base ); + for (addr = base + page_size; addr != base + size; addr += page_size) + if ((*vprot ^ get_page_vprot( addr )) & mask) break; + + return addr - base; +} + /*********************************************************************** * set_page_vprot * @@ -2047,18 +2065,21 @@ done: */ static SIZE_T get_committed_size( struct file_view *view, void *base, BYTE *vprot ) { - SIZE_T i, start; + SIZE_T offset;
- start = ((char *)base - (char *)view->base) >> page_shift; - *vprot = get_page_vprot( base ); + base = ROUND_ADDR( base, page_mask ); + offset = (char *)base - (char *)view->base;
if (view->protect & SEC_RESERVE) { SIZE_T ret = 0; + + *vprot = get_page_vprot( base ); + SERVER_START_REQ( get_mapping_committed_range ) { req->base = wine_server_client_ptr( view->base ); - req->offset = start << page_shift; + req->offset = offset; if (!wine_server_call( req )) { ret = reply->size; @@ -2072,9 +2093,8 @@ static SIZE_T get_committed_size( struct file_view *view, void *base, BYTE *vpro SERVER_END_REQ; return ret; } - for (i = start + 1; i < view->size >> page_shift; i++) - if ((*vprot ^ get_page_vprot( (char *)view->base + (i << page_shift) )) & VPROT_COMMITTED) break; - return (i - start) << page_shift; + + return get_vprot_range_size( base, view->size - offset, VPROT_COMMITTED, vprot ); }
@@ -4196,7 +4216,6 @@ static NTSTATUS get_basic_memory_info( HANDLE process, LPCVOID addr, else { BYTE vprot; - char *ptr; SIZE_T range_size = get_committed_size( view, base, &vprot );
info->State = (vprot & VPROT_COMMITTED) ? MEM_COMMIT : MEM_RESERVE; @@ -4205,9 +4224,8 @@ static NTSTATUS get_basic_memory_info( HANDLE process, LPCVOID addr, if (view->protect & SEC_IMAGE) info->Type = MEM_IMAGE; else if (view->protect & (SEC_FILE | SEC_RESERVE | SEC_COMMIT)) info->Type = MEM_MAPPED; else info->Type = MEM_PRIVATE; - for (ptr = base; ptr < base + range_size; ptr += page_size) - if ((get_page_vprot( ptr ) ^ vprot) & ~VPROT_WRITEWATCH) break; - info->RegionSize = ptr - base; + + info->RegionSize = get_vprot_range_size( base, range_size, ~VPROT_WRITEWATCH, &vprot ); } server_leave_uninterrupted_section( &virtual_mutex, &sigset );
Signed-off-by: Paul Gofman pgofman@codeweavers.com --- v5: - Introduce get_committed_size_vprot() to avoid checking for SEC_RESERVE in get_basic_memory_info() and keep that logic contained.
dlls/ntdll/unix/virtual.c | 37 ++++++++++++++++++++++++------------- 1 file changed, 24 insertions(+), 13 deletions(-)
diff --git a/dlls/ntdll/unix/virtual.c b/dlls/ntdll/unix/virtual.c index 41d878546bb..5ba28a71f78 100644 --- a/dlls/ntdll/unix/virtual.c +++ b/dlls/ntdll/unix/virtual.c @@ -2058,21 +2058,21 @@ done:
/*********************************************************************** - * get_committed_size + * get_committed_size_vprot * - * Get the size of the committed range starting at base. + * Get the size of the committed range with equal masked vprot bytes starting at base. * Also return the protections for the first page. */ -static SIZE_T get_committed_size( struct file_view *view, void *base, BYTE *vprot ) +static SIZE_T get_committed_size_vprot( struct file_view *view, void *base, BYTE *vprot, BYTE vprot_mask ) { - SIZE_T offset; + SIZE_T offset, size;
base = ROUND_ADDR( base, page_mask ); offset = (char *)base - (char *)view->base;
if (view->protect & SEC_RESERVE) { - SIZE_T ret = 0; + size = 0;
*vprot = get_page_vprot( base );
@@ -2082,22 +2082,34 @@ static SIZE_T get_committed_size( struct file_view *view, void *base, BYTE *vpro req->offset = offset; if (!wine_server_call( req )) { - ret = reply->size; + size = reply->size; if (reply->committed) { *vprot |= VPROT_COMMITTED; - set_page_vprot_bits( base, ret, VPROT_COMMITTED, 0 ); + set_page_vprot_bits( base, size, VPROT_COMMITTED, 0 ); } } } SERVER_END_REQ; - return ret; - }
- return get_vprot_range_size( base, view->size - offset, VPROT_COMMITTED, vprot ); + if (!size || !vprot_mask) return size; + } else size = view->size - offset; + + return get_vprot_range_size( base, size, VPROT_COMMITTED | vprot_mask, vprot ); }
+/*********************************************************************** + * get_committed_size_vprot + * + * Get the size of the committed range starting at base. + * Also return the protections for the first page. + */ +static SIZE_T get_committed_size( struct file_view *view, void *base, BYTE *vprot ) +{ + return get_committed_size_vprot( view, base, vprot, 0 ); +} + /*********************************************************************** * decommit_view * @@ -4216,7 +4228,8 @@ static NTSTATUS get_basic_memory_info( HANDLE process, LPCVOID addr, else { BYTE vprot; - SIZE_T range_size = get_committed_size( view, base, &vprot ); + + info->RegionSize = get_committed_size_vprot( view, base, &vprot, ~VPROT_WRITEWATCH );
info->State = (vprot & VPROT_COMMITTED) ? MEM_COMMIT : MEM_RESERVE; info->Protect = (vprot & VPROT_COMMITTED) ? get_win32_prot( vprot, view->protect ) : 0; @@ -4224,8 +4237,6 @@ static NTSTATUS get_basic_memory_info( HANDLE process, LPCVOID addr, if (view->protect & SEC_IMAGE) info->Type = MEM_IMAGE; else if (view->protect & (SEC_FILE | SEC_RESERVE | SEC_COMMIT)) info->Type = MEM_MAPPED; else info->Type = MEM_PRIVATE; - - info->RegionSize = get_vprot_range_size( base, range_size, ~VPROT_WRITEWATCH, &vprot ); } server_leave_uninterrupted_section( &virtual_mutex, &sigset );
Signed-off-by: Paul Gofman pgofman@codeweavers.com --- v5: - simplify the code: - get rid of allocated vprot bytes range check since the function is called for the ranges within views only for which the vprot bytes should always be allocated; - get rid of a few variables and one #ifdef straightening up a bit page index handling on 64; - renamed 'i' to 'curr_idx'; - use 32 bit word on 32 bit.
dlls/ntdll/unix/virtual.c | 53 +++++++++++++++++++++++++++++++++------ 1 file changed, 46 insertions(+), 7 deletions(-)
diff --git a/dlls/ntdll/unix/virtual.c b/dlls/ntdll/unix/virtual.c index 5ba28a71f78..8c6a8299dd0 100644 --- a/dlls/ntdll/unix/virtual.c +++ b/dlls/ntdll/unix/virtual.c @@ -948,17 +948,56 @@ static BYTE get_page_vprot( const void *addr ) * * Return the size of the region with equal masked vprot byte. * Also return the protections for the first page. - * The function assumes that base and size are page aligned and - * base + size does not wrap around. */ + * base and size should be page aligned. + * The function assumes that base and size are page aligned, + * base + size does not wrap around and the range is within view so + * vprot bytes are allocated for the range. */ static SIZE_T get_vprot_range_size( char *base, SIZE_T size, BYTE mask, BYTE *vprot ) { - char *addr; +#define BYTES_IN_WORD sizeof(UINT_PTR) + static const UINT_PTR word_from_byte = (UINT_PTR)0x101010101010101; + static const UINT_PTR index_align_mask = BYTES_IN_WORD - 1; + SIZE_T curr_idx, start_idx, end_idx, aligned_start_idx; + UINT_PTR vprot_word, mask_word; + const BYTE *vprot_ptr;
- *vprot = get_page_vprot( base ); - for (addr = base + page_size; addr != base + size; addr += page_size) - if ((*vprot ^ get_page_vprot( addr )) & mask) break; + TRACE("base %p, size %p, mask %#x.\n", base, (void *)size, mask);
- return addr - base; + curr_idx = start_idx = (size_t)base >> page_shift; + end_idx = start_idx + (size >> page_shift); + + aligned_start_idx = (start_idx + index_align_mask) & ~index_align_mask; + if (aligned_start_idx > end_idx) aligned_start_idx = end_idx; + +#ifdef _WIN64 + vprot_ptr = pages_vprot[curr_idx >> pages_vprot_shift] + (curr_idx & pages_vprot_mask); +#else + vprot_ptr = pages_vprot + curr_idx; +#endif + *vprot = *vprot_ptr; + + /* Page count page table is at least the multiples of BYTES_IN_WORD + * so we don't have to worry about crossing the boundary on unaligned idx values. */ + + for (; curr_idx < aligned_start_idx; ++curr_idx, ++vprot_ptr) + if ((*vprot ^ *vprot_ptr) & mask) return (curr_idx - start_idx) << page_shift; + + vprot_word = word_from_byte * *vprot; + mask_word = word_from_byte * mask; + for (; curr_idx < end_idx; curr_idx += BYTES_IN_WORD, vprot_ptr += BYTES_IN_WORD) + { +#ifdef _WIN64 + if (!(curr_idx & pages_vprot_mask)) vprot_ptr = pages_vprot[curr_idx >> pages_vprot_shift]; +#endif + if ((vprot_word ^ *(UINT_PTR *)vprot_ptr) & mask_word) + { + for (; curr_idx < end_idx; ++curr_idx, ++vprot_ptr) + if ((*vprot ^ *vprot_ptr) & mask) break; + return (curr_idx - start_idx) << page_shift; + } + } + return size; +#undef BYTES_IN_WORD }
/***********************************************************************