From: Rémi Bernon rbernon@codeweavers.com
--- dlls/win32u/window.c | 11 ++- server/protocol.def | 21 ++++- server/user.c | 187 ++++++++++++++++++++++++++++--------------- server/user.h | 2 - 4 files changed, 146 insertions(+), 75 deletions(-)
diff --git a/dlls/win32u/window.c b/dlls/win32u/window.c index 3c3161acdd1..ba79cdc11dd 100644 --- a/dlls/win32u/window.c +++ b/dlls/win32u/window.c @@ -34,10 +34,9 @@
WINE_DEFAULT_DEBUG_CHANNEL(win);
-#define NB_USER_HANDLES ((LAST_USER_HANDLE - FIRST_USER_HANDLE + 1) >> 1) #define USER_HANDLE_TO_INDEX(hwnd) ((LOWORD(hwnd) - FIRST_USER_HANDLE) >> 1)
-static void *user_handles[NB_USER_HANDLES]; +static void *user_handles[MAX_USER_HANDLES];
#define SWP_AGG_NOGEOMETRYCHANGE \ (SWP_NOSIZE | SWP_NOCLIENTSIZE | SWP_NOZORDER) @@ -70,7 +69,7 @@ HANDLE alloc_user_handle( struct user_object *ptr, unsigned short type ) { UINT index = USER_HANDLE_TO_INDEX( handle );
- assert( index < NB_USER_HANDLES ); + assert( index < MAX_USER_HANDLES ); ptr->handle = handle; ptr->type = type; InterlockedExchangePointer( &user_handles[index], ptr ); @@ -86,7 +85,7 @@ void *get_user_handle_ptr( HANDLE handle, unsigned short type ) struct user_object *ptr; WORD index = USER_HANDLE_TO_INDEX( handle );
- if (index >= NB_USER_HANDLES) return NULL; + if (index >= MAX_USER_HANDLES) return NULL;
user_lock(); if ((ptr = user_handles[index])) @@ -112,7 +111,7 @@ void *next_process_user_handle_ptr( HANDLE *handle, unsigned short type ) struct user_object *ptr; WORD index = *handle ? USER_HANDLE_TO_INDEX( *handle ) + 1 : 0;
- while (index < NB_USER_HANDLES) + while (index < MAX_USER_HANDLES) { if (!(ptr = user_handles[index++])) continue; /* OBJ_OTHER_PROCESS */ if (ptr->type != type) continue; @@ -128,7 +127,7 @@ void *next_process_user_handle_ptr( HANDLE *handle, unsigned short type ) static void set_user_handle_ptr( HANDLE handle, struct user_object *ptr ) { WORD index = USER_HANDLE_TO_INDEX(handle); - assert( index < NB_USER_HANDLES ); + assert( index < MAX_USER_HANDLES ); InterlockedExchangePointer( &user_handles[index], ptr ); }
diff --git a/server/protocol.def b/server/protocol.def index 297e187d35f..9bb018adc20 100644 --- a/server/protocol.def +++ b/server/protocol.def @@ -976,9 +976,28 @@ struct shared_cursor struct rectangle clip; /* cursor clip rectangle */ };
+struct user_entry +{ + mem_size_t offset; /* shared user object offset */ + thread_id_t tid; /* owner thread id */ + process_id_t pid; /* owner process id */ + unsigned __int64 padding; + union + { + struct + { + unsigned short type; /* object type (0 if free) */ + unsigned short generation; /* generation counter */ + }; + unsigned __int64 uniq; + }; +}; + +#define MAX_USER_HANDLES ((LAST_USER_HANDLE - FIRST_USER_HANDLE + 1) >> 1) + typedef volatile struct { - int placeholder; + struct user_entry user_entries[MAX_USER_HANDLES]; } session_shm_t;
typedef volatile struct diff --git a/server/user.c b/server/user.c index b660b91ef7f..4ada806e2a5 100644 --- a/server/user.c +++ b/server/user.c @@ -26,19 +26,41 @@ #include "thread.h" #include "file.h" #include "user.h" +#include "file.h" +#include "process.h" #include "request.h"
-struct user_handle -{ - void *ptr; /* pointer to object */ - unsigned short type; /* object type (0 if free) */ - unsigned short generation; /* generation counter */ -}; +typedef volatile struct user_entry user_entry_t;
-static struct user_handle *handles; -static struct user_handle *freelist; +static void *server_objects[MAX_USER_HANDLES]; +static mem_size_t freelist = -1; static int nb_handles; -static int allocated_handles; + +static void atomic_store_u32(volatile unsigned int *ptr, unsigned int value) +{ + /* on x86 there should be total store order guarantees, so volatile is + * enough to ensure the stores aren't reordered by the compiler, and then + * they will always be seen in-order from other CPUs. On other archs, we + * need atomic intrinsics to guarantee that. */ +#if defined(__i386__) || defined(__x86_64__) + *ptr = value; +#else + __atomic_store_n(ptr, value, __ATOMIC_SEQ_CST); +#endif +} + +static void atomic_store_u64(volatile unsigned __int64 *ptr, unsigned __int64 value) +{ + /* on x86 there should be total store order guarantees, so volatile is + * enough to ensure the stores aren't reordered by the compiler, and then + * they will always be seen in-order from other CPUs. On other archs, we + * need atomic intrinsics to guarantee that. */ +#if defined(__i386__) || defined(__x86_64__) + *ptr = value; +#else + __atomic_store_n(ptr, value, __ATOMIC_SEQ_CST); +#endif +}
const session_shm_t *get_session_shm(void) { @@ -49,7 +71,7 @@ const session_shm_t *get_session_shm(void) { SHARED_WRITE_BEGIN( session, session_shm_t ) { - shared->placeholder = 0; + memset( (void *)shared->user_entries, 0, sizeof(shared->user_entries) ); } SHARED_WRITE_END; } @@ -57,85 +79,110 @@ const session_shm_t *get_session_shm(void) return session; }
-static struct user_handle *handle_to_entry( user_handle_t handle ) +static void *get_server_object( const user_entry_t *entry ) { - unsigned short generation; - int index = ((handle & 0xffff) - FIRST_USER_HANDLE) >> 1; + const session_shm_t *session = get_session_shm(); + const user_entry_t *handles = session->user_entries; + return server_objects[entry - handles]; +} + +static void *set_server_object( const user_entry_t *entry, void *ptr ) +{ + const session_shm_t *session = get_session_shm(); + const user_entry_t *handles = session->user_entries; + void *prev = server_objects[entry - handles]; + server_objects[entry - handles] = ptr; + return prev; +} + +static const user_entry_t *handle_to_entry( user_handle_t handle ) +{ + const session_shm_t *session = get_session_shm(); + const user_entry_t *handles = session->user_entries; + unsigned short generation = handle >> 16; + const user_entry_t *entry; + int index; + + index = ((handle & 0xffff) - FIRST_USER_HANDLE) >> 1; if (index < 0 || index >= nb_handles) return NULL; - if (!handles[index].type) return NULL; - generation = handle >> 16; - if (generation == handles[index].generation || !generation || generation == 0xffff) - return &handles[index]; + entry = handles + index; + + if (!entry->type) return NULL; + if (!generation || generation == 0xffff) return entry; + if (generation == entry->generation) return entry; return NULL; }
-static inline user_handle_t entry_to_handle( struct user_handle *ptr ) +static user_handle_t entry_to_handle( const user_entry_t *entry ) { - unsigned int index = ptr - handles; - return (index << 1) + FIRST_USER_HANDLE + (ptr->generation << 16); + const session_shm_t *session = get_session_shm(); + const user_entry_t *handles = session->user_entries; + unsigned int index = entry - handles; + return (index << 1) + FIRST_USER_HANDLE + (entry->generation << 16); }
-static inline struct user_handle *alloc_user_entry(void) +static const user_entry_t *alloc_user_entry( unsigned short type ) { - struct user_handle *handle; + const session_shm_t *session = get_session_shm(); + const user_entry_t *handles = session->user_entries; + unsigned short generation; + user_entry_t *entry;
- if (freelist) + if (freelist != -1) { - handle = freelist; - freelist = handle->ptr; - return handle; + entry = (user_entry_t *)handles + freelist; + generation = entry->generation + 1; + freelist = entry->offset; } - if (nb_handles >= allocated_handles) /* need to grow the array */ + else { - struct user_handle *new_handles; - /* grow array by 50% (but at minimum 32 entries) */ - int growth = max( 32, allocated_handles / 2 ); - int new_size = min( allocated_handles + growth, MAX_USER_HANDLES ); - if (new_size <= allocated_handles) return NULL; - if (!(new_handles = realloc( handles, new_size * sizeof(*handles) ))) - return NULL; - handles = new_handles; - allocated_handles = new_size; + entry = (user_entry_t *)handles + nb_handles; + generation = 1; + nb_handles++; } - handle = &handles[nb_handles++]; - handle->generation = 0; - return handle; + + if (generation == 0 || generation == 0xffff) generation = 1; + + atomic_store_u32( &entry->tid, get_thread_id( current ) ); + atomic_store_u32( &entry->pid, get_process_id( current->process ) ); + atomic_store_u64( &entry->uniq, MAKELONG(type, generation) ); + return entry; }
-static inline void *free_user_entry( struct user_handle *ptr ) +static void free_user_entry( user_entry_t *entry ) { - void *ret; - ret = ptr->ptr; - ptr->ptr = freelist; - ptr->type = 0; - freelist = ptr; - return ret; + const session_shm_t *session = get_session_shm(); + const user_entry_t *handles = session->user_entries; + size_t index = entry - handles; + + atomic_store_u64( &entry->uniq, MAKELONG(0, entry->generation) ); + entry->offset = freelist; + freelist = index; }
/* allocate a user handle for a given object */ user_handle_t alloc_user_handle( void *ptr, unsigned short type ) { - struct user_handle *entry = alloc_user_entry(); - if (!entry) return 0; - entry->ptr = ptr; - entry->type = type; - if (++entry->generation >= 0xffff) entry->generation = 1; + const user_entry_t *entry; + + if (!(entry = alloc_user_entry( type ))) return 0; + set_server_object( entry, ptr ); return entry_to_handle( entry ); }
/* return a pointer to a user object from its handle */ void *get_user_object( user_handle_t handle, unsigned short type ) { - struct user_handle *entry; + const user_entry_t *entry;
if (!(entry = handle_to_entry( handle )) || entry->type != type) return NULL; - return entry->ptr; + return get_server_object( entry ); }
/* get the full handle for a possibly truncated handle */ user_handle_t get_user_full_handle( user_handle_t handle ) { - struct user_handle *entry; + const user_entry_t *entry;
if (handle >> 16) return handle; if (!(entry = handle_to_entry( handle ))) return handle; @@ -145,30 +192,35 @@ user_handle_t get_user_full_handle( user_handle_t handle ) /* same as get_user_object plus set the handle to the full 32-bit value */ void *get_user_object_handle( user_handle_t *handle, unsigned short type ) { - struct user_handle *entry; + const user_entry_t *entry;
if (!(entry = handle_to_entry( *handle )) || entry->type != type) return NULL; *handle = entry_to_handle( entry ); - return entry->ptr; + return get_server_object( entry ); }
/* free a user handle and return a pointer to the object */ void *free_user_handle( user_handle_t handle ) { - struct user_handle *entry; + const user_entry_t *entry; + void *ret;
if (!(entry = handle_to_entry( handle ))) { set_error( STATUS_INVALID_HANDLE ); return NULL; } - return free_user_entry( entry ); + + ret = set_server_object( entry, NULL ); + free_user_entry( (user_entry_t *)entry ); + return ret; }
/* return the next user handle after 'handle' that is of a given type */ void *next_user_handle( user_handle_t *handle, unsigned short type ) { - struct user_handle *entry; + const session_shm_t *session = get_session_shm(); + const user_entry_t *entry, *handles = session->user_entries;
if (!*handle) entry = handles; else @@ -182,7 +234,7 @@ void *next_user_handle( user_handle_t *handle, unsigned short type ) if (!type || entry->type == type) { *handle = entry_to_handle( entry ); - return entry->ptr; + return get_server_object( entry ); } entry++; } @@ -192,18 +244,21 @@ void *next_user_handle( user_handle_t *handle, unsigned short type ) /* free client-side user handles managed by the process */ void free_process_user_handles( struct process *process ) { - unsigned int i; + const session_shm_t *session = get_session_shm(); + const user_entry_t *handles = session->user_entries; + unsigned int i, pid = get_process_id( process );
for (i = 0; i < nb_handles; i++) { - switch (handles[i].type) + const user_entry_t *entry = handles + i; + switch (entry->type) { case NTUSER_OBJ_MENU: case NTUSER_OBJ_ICON: case NTUSER_OBJ_WINPOS: case NTUSER_OBJ_ACCEL: case NTUSER_OBJ_IMC: - if (handles[i].ptr == process) free_user_entry( &handles[i] ); + if (entry->pid == pid) free_user_entry( (user_entry_t *)entry ); break; case NTUSER_OBJ_HOOK: case NTUSER_OBJ_WINDOW: @@ -216,17 +271,17 @@ void free_process_user_handles( struct process *process ) /* allocate an arbitrary user handle */ DECL_HANDLER(alloc_user_handle) { - reply->handle = alloc_user_handle( current->process, req->type ); + reply->handle = alloc_user_handle( (void *)-1 /* never used */, req->type ); }
/* free an arbitrary user handle */ DECL_HANDLER(free_user_handle) { - struct user_handle *entry; + const user_entry_t *entry;
if ((entry = handle_to_entry( req->handle )) && entry->type == req->type) - free_user_entry( entry ); + free_user_entry( (user_entry_t *)entry ); else set_error( STATUS_INVALID_HANDLE ); } diff --git a/server/user.h b/server/user.h index 473ff0ce820..06541ff8ec3 100644 --- a/server/user.h +++ b/server/user.h @@ -36,8 +36,6 @@ struct clipboard;
#define DESKTOP_ATOM ((atom_t)32769)
-#define MAX_USER_HANDLES ((LAST_USER_HANDLE - FIRST_USER_HANDLE + 1) >> 1) - struct winstation { struct object obj; /* object header */