 
            These are the remaining patches for ntsync, sending them all at once for visibility but also with a bit of hope they can make it in 10.16.
 
            From: Rémi Bernon rbernon@codeweavers.com
--- server/completion.c | 2 +- server/console.c | 4 ++-- server/debugger.c | 4 ++-- server/device.c | 2 +- server/event.c | 7 ++++++- server/fd.c | 6 +++--- server/object.h | 2 +- server/process.c | 6 +++--- server/queue.c | 2 +- server/thread.c | 6 +++--- server/timer.c | 2 +- 11 files changed, 24 insertions(+), 19 deletions(-)
diff --git a/server/completion.c b/server/completion.c index a2028a89823..caef8fba2a8 100644 --- a/server/completion.c +++ b/server/completion.c @@ -279,7 +279,7 @@ static struct completion *create_completion( struct object *root, const struct u list_init( &completion->wait_queue ); completion->depth = 0;
- if (!(completion->sync = create_event_sync( 1, 0 ))) + if (!(completion->sync = create_internal_sync( 1, 0 ))) { release_object( completion ); return NULL; diff --git a/server/console.c b/server/console.c index f0cb6689d4b..08bad277a3f 100644 --- a/server/console.c +++ b/server/console.c @@ -554,7 +554,7 @@ static struct object *create_console(void) init_async_queue( &console->ioctl_q ); init_async_queue( &console->read_q );
- if (!(console->sync = create_event_sync( 1, 0 ))) goto error; + if (!(console->sync = create_internal_sync( 1, 0 ))) goto error; if (!(console->fd = alloc_pseudo_fd( &console_fd_ops, &console->obj, FILE_SYNCHRONOUS_IO_NONALERT ))) goto error; allow_fd_caching( console->fd ); return &console->obj; @@ -977,7 +977,7 @@ static struct object *create_console_server( void ) list_init( &server->queue ); list_init( &server->read_queue );
- if (!(server->sync = create_event_sync( 1, 1 ))) goto error; + if (!(server->sync = create_internal_sync( 1, 1 ))) goto error; if (!(server->fd = alloc_pseudo_fd( &console_server_fd_ops, &server->obj, FILE_SYNCHRONOUS_IO_NONALERT ))) goto error; allow_fd_caching(server->fd); return &server->obj; diff --git a/server/debugger.c b/server/debugger.c index 2956ee17b8e..31ce91a36a8 100644 --- a/server/debugger.c +++ b/server/debugger.c @@ -372,7 +372,7 @@ static struct debug_obj *create_debug_obj( struct object *root, const struct uni debug_obj->flags = flags; list_init( &debug_obj->event_queue );
- if (!(debug_obj->sync = create_event_sync( 1, 0 ))) + if (!(debug_obj->sync = create_internal_sync( 1, 0 ))) { release_object( debug_obj ); return NULL; @@ -451,7 +451,7 @@ static struct debug_event *alloc_debug_event( struct thread *thread, int code, c fill_debug_event[code - DbgCreateThreadStateChange]( event, arg ); event->data.code = code;
- if (!(event->sync = create_event_sync( 1, 0 ))) + if (!(event->sync = create_internal_sync( 1, 0 ))) { release_object( event ); return NULL; diff --git a/server/device.c b/server/device.c index f4b3f19028b..859ca9a2b52 100644 --- a/server/device.c +++ b/server/device.c @@ -848,7 +848,7 @@ static struct device_manager *create_device_manager(void) list_init( &manager->requests ); wine_rb_init( &manager->kernel_objects, compare_kernel_object );
- if (!(manager->sync = create_event_sync( 1, 0 ))) + if (!(manager->sync = create_internal_sync( 1, 0 ))) { release_object( manager ); return NULL; diff --git a/server/event.c b/server/event.c index c69554fc657..cb9fe49460e 100644 --- a/server/event.c +++ b/server/event.c @@ -87,7 +87,7 @@ static const struct object_ops event_sync_ops = no_destroy /* destroy */ };
-struct event_sync *create_event_sync( int manual, int signaled ) +static struct event_sync *create_event_sync( int manual, int signaled ) { struct event_sync *event;
@@ -98,6 +98,11 @@ struct event_sync *create_event_sync( int manual, int signaled ) return event; }
+struct event_sync *create_internal_sync( int manual, int signaled ) +{ + return create_event_sync( manual, signaled ); +} + static void event_sync_dump( struct object *obj, int verbose ) { struct event_sync *event = (struct event_sync *)obj; diff --git a/server/fd.c b/server/fd.c index 1b932cc3ae3..5a43888e719 100644 --- a/server/fd.c +++ b/server/fd.c @@ -1441,7 +1441,7 @@ static struct file_lock *add_lock( struct fd *fd, int shared, file_pos_t start, lock->fd = fd; lock->process = current->process;
- if (!(lock->sync = create_event_sync( 1, 0 ))) goto error; + if (!(lock->sync = create_internal_sync( 1, 0 ))) goto error; /* now try to set a Unix lock */ if (!set_unix_lock( lock->fd, lock->start, lock->end, lock->shared ? F_RDLCK : F_WRLCK )) goto error; list_add_tail( &fd->locks, &lock->fd_entry ); @@ -1724,7 +1724,7 @@ static struct fd *alloc_fd_object(void) list_init( &fd->inode_entry ); list_init( &fd->locks );
- if (!(fd->sync = create_event_sync( 1, 1 ))) goto error; + if (!(fd->sync = create_internal_sync( 1, 1 ))) goto error; if ((fd->poll_index = add_poll_user( fd )) == -1) goto error;
return fd; @@ -1767,7 +1767,7 @@ struct fd *alloc_pseudo_fd( const struct fd_ops *fd_user_ops, struct object *use list_init( &fd->inode_entry ); list_init( &fd->locks );
- if (!(fd->sync = create_event_sync( 1, 1 ))) + if (!(fd->sync = create_internal_sync( 1, 1 ))) { release_object( fd ); return NULL; diff --git a/server/object.h b/server/object.h index 203734a565f..0a7b5bf7c84 100644 --- a/server/object.h +++ b/server/object.h @@ -222,7 +222,7 @@ struct event_sync; struct event; struct keyed_event;
-extern struct event_sync *create_event_sync( int manual, int signaled ); +extern struct event_sync *create_internal_sync( int manual, int signaled ); extern void signal_sync( struct event_sync *sync ); extern void reset_sync( struct event_sync *sync );
diff --git a/server/process.c b/server/process.c index b9bc02b6469..48a1d2d697d 100644 --- a/server/process.c +++ b/server/process.c @@ -259,7 +259,7 @@ static struct job *create_job_object( struct object *root, const struct unicode_ job->completion_key = 0; job->parent = NULL;
- if (!(job->sync = create_event_sync( 1, 0 ))) + if (!(job->sync = create_internal_sync( 1, 0 ))) { release_object( job ); return NULL; @@ -722,7 +722,7 @@ struct process *create_process( int fd, struct process *parent, unsigned int fla goto error; } if (!(process->msg_fd = create_anonymous_fd( &process_fd_ops, fd, &process->obj, 0 ))) goto error; - if (!(process->sync = create_event_sync( 1, 0 ))) goto error; + if (!(process->sync = create_internal_sync( 1, 0 ))) goto error;
/* create the handle table */ if (!parent) @@ -1222,7 +1222,7 @@ DECL_HANDLER(new_process) info->process = NULL; info->data = NULL;
- if (!(info->sync = create_event_sync( 1, 0 ))) + if (!(info->sync = create_internal_sync( 1, 0 ))) { close( socket_fd ); goto done; diff --git a/server/queue.c b/server/queue.c index 89e862d25a4..4ccecc5c26a 100644 --- a/server/queue.c +++ b/server/queue.c @@ -323,7 +323,7 @@ static struct msg_queue *create_msg_queue( struct thread *thread, struct thread_ list_init( &queue->expired_timers ); for (i = 0; i < NB_MSG_KINDS; i++) list_init( &queue->msg_list[i] );
- if (!(queue->sync = create_event_sync( 1, 0 ))) goto error; + if (!(queue->sync = create_internal_sync( 1, 0 ))) goto error; if (!(queue->shared = alloc_shared_object())) goto error;
SHARED_WRITE_BEGIN( queue->shared, queue_shm_t ) diff --git a/server/thread.c b/server/thread.c index 853aff4cc84..8bd1e0cff3b 100644 --- a/server/thread.c +++ b/server/thread.c @@ -487,7 +487,7 @@ static struct context *create_thread_context( struct thread *thread ) memset( &context->regs, 0, sizeof(context->regs) ); context->regs[CTX_NATIVE].machine = native_machine;
- if (!(context->sync = create_event_sync( 1, 0 ))) + if (!(context->sync = create_internal_sync( 1, 0 ))) { release_object( context ); return NULL; @@ -560,7 +560,7 @@ struct thread *create_thread( int fd, struct process *process, const struct secu return NULL; } if (!(thread->request_fd = create_anonymous_fd( &thread_fd_ops, fd, &thread->obj, 0 ))) goto error; - if (!(thread->sync = create_event_sync( 1, 0 ))) goto error; + if (!(thread->sync = create_internal_sync( 1, 0 ))) goto error; if (get_inproc_device_fd() >= 0 && !(thread->alert_sync = create_inproc_internal_sync( 1, 0 ))) goto error;
if (process->desktop) @@ -734,7 +734,7 @@ static struct thread_apc *create_apc( struct object *owner, const union apc_call apc->result.type = APC_NONE; if (owner) grab_object( owner );
- if (!(apc->sync = create_event_sync( 1, 0 ))) + if (!(apc->sync = create_internal_sync( 1, 0 ))) { release_object( apc ); return NULL; diff --git a/server/timer.c b/server/timer.c index 522fc6a7113..e8811594cac 100644 --- a/server/timer.c +++ b/server/timer.c @@ -113,7 +113,7 @@ static struct timer *create_timer( struct object *root, const struct unicode_str timer->timeout = NULL; timer->thread = NULL;
- if (!(timer->sync = create_event_sync( manual, 0 ))) + if (!(timer->sync = create_internal_sync( manual, 0 ))) { release_object( timer ); return NULL;
 
            From: Rémi Bernon rbernon@codeweavers.com
--- server/debugger.c | 3 ++- server/event.c | 11 +++++++++++ server/object.h | 1 + 3 files changed, 14 insertions(+), 1 deletion(-)
diff --git a/server/debugger.c b/server/debugger.c index 31ce91a36a8..094614a6a6a 100644 --- a/server/debugger.c +++ b/server/debugger.c @@ -451,7 +451,8 @@ static struct debug_event *alloc_debug_event( struct thread *thread, int code, c fill_debug_event[code - DbgCreateThreadStateChange]( event, arg ); event->data.code = code;
- if (!(event->sync = create_internal_sync( 1, 0 ))) + /* create a server-side sync here, as send_debug_event still uses server_select to pass contexts around */ + if (!(event->sync = create_server_internal_sync( 1, 0 ))) { release_object( event ); return NULL; diff --git a/server/event.c b/server/event.c index cb9fe49460e..3bbf92f18e5 100644 --- a/server/event.c +++ b/server/event.c @@ -98,6 +98,17 @@ static struct event_sync *create_event_sync( int manual, int signaled ) return event; }
+struct event_sync *create_server_internal_sync( int manual, int signaled ) +{ + struct event_sync *event; + + if (!(event = alloc_object( &event_sync_ops ))) return NULL; + event->manual = manual; + event->signaled = signaled; + + return event; +} + struct event_sync *create_internal_sync( int manual, int signaled ) { return create_event_sync( manual, signaled ); diff --git a/server/object.h b/server/object.h index 0a7b5bf7c84..d604f70d100 100644 --- a/server/object.h +++ b/server/object.h @@ -222,6 +222,7 @@ struct event_sync; struct event; struct keyed_event;
+extern struct event_sync *create_server_internal_sync( int manual, int signaled ); extern struct event_sync *create_internal_sync( int manual, int signaled ); extern void signal_sync( struct event_sync *sync ); extern void reset_sync( struct event_sync *sync );
 
            From: Rémi Bernon rbernon@codeweavers.com
--- server/event.c | 31 +++++++++++++++++-------------- server/inproc_sync.c | 16 +++++++++++++++- server/mutex.c | 7 +++++-- server/object.c | 2 +- server/object.h | 6 +++--- server/semaphore.c | 7 +++++-- server/thread.c | 2 +- 7 files changed, 47 insertions(+), 24 deletions(-)
diff --git a/server/event.c b/server/event.c index 3bbf92f18e5..3dc324df0ec 100644 --- a/server/event.c +++ b/server/event.c @@ -60,7 +60,7 @@ struct event_sync static void event_sync_dump( struct object *obj, int verbose ); static int event_sync_signaled( struct object *obj, struct wait_queue_entry *entry ); static void event_sync_satisfied( struct object *obj, struct wait_queue_entry *entry ); -static int event_sync_signal( struct object *obj, unsigned int access ); +static int event_sync_signal( struct object *obj, unsigned int access, int signal );
static const struct object_ops event_sync_ops = { @@ -129,16 +129,14 @@ static int event_sync_signaled( struct object *obj, struct wait_queue_entry *ent return event->signaled; }
-void signal_sync( struct event_sync *event ) +void signal_sync( struct event_sync *sync ) { - event->signaled = 1; - /* wake up all waiters if manual reset, a single one otherwise */ - wake_up( &event->obj, !event->manual ); + sync->obj.ops->signal( &sync->obj, 0, 1 ); }
-void reset_sync( struct event_sync *event ) +void reset_sync( struct event_sync *sync ) { - event->signaled = 0; + sync->obj.ops->signal( &sync->obj, 0, 0 ); }
static void event_sync_satisfied( struct object *obj, struct wait_queue_entry *entry ) @@ -146,14 +144,16 @@ static void event_sync_satisfied( struct object *obj, struct wait_queue_entry *e struct event_sync *event = (struct event_sync *)obj; assert( obj->ops == &event_sync_ops ); /* Reset if it's an auto-reset event */ - if (!event->manual) reset_sync( event ); + if (!event->manual) event->signaled = 0; }
-static int event_sync_signal( struct object *obj, unsigned int access ) +static int event_sync_signal( struct object *obj, unsigned int access, int signal ) { struct event_sync *event = (struct event_sync *)obj; assert( obj->ops == &event_sync_ops ); - signal_sync( event ); + + /* wake up all waiters if manual reset, a single one otherwise */ + if ((event->signaled = !!signal)) wake_up( &event->obj, !event->manual ); return 1; }
@@ -166,7 +166,7 @@ struct event
static void event_dump( struct object *obj, int verbose ); static struct object *event_get_sync( struct object *obj ); -static int event_signal( struct object *obj, unsigned int access); +static int event_signal( struct object *obj, unsigned int access, int signal ); static struct list *event_get_kernel_obj_list( struct object *obj ); static void event_destroy( struct object *obj );
@@ -297,18 +297,21 @@ static struct object *event_get_sync( struct object *obj ) return grab_object( event->sync ); }
-static int event_signal( struct object *obj, unsigned int access ) +static int event_signal( struct object *obj, unsigned int access, int signal ) { struct event *event = (struct event *)obj; assert( obj->ops == &event_ops );
+ assert( event->sync->obj.ops == &event_sync_ops ); /* never called with inproc syncs */ + assert( signal == -1 ); /* always called from signal_object */ + if (!(access & EVENT_MODIFY_STATE)) { set_error( STATUS_ACCESS_DENIED ); return 0; } - set_event( event ); - return 1; + + return event_sync_signal( &event->sync->obj, 0, 1 ); }
static struct list *event_get_kernel_obj_list( struct object *obj ) diff --git a/server/inproc_sync.c b/server/inproc_sync.c index 435c58947f3..54342fc3e2a 100644 --- a/server/inproc_sync.c +++ b/server/inproc_sync.c @@ -60,6 +60,7 @@ struct inproc_sync };
static void inproc_sync_dump( struct object *obj, int verbose ); +static int inproc_sync_signal( struct object *obj, unsigned int access, int signal ); static void inproc_sync_destroy( struct object *obj );
static const struct object_ops inproc_sync_ops = @@ -71,7 +72,7 @@ static const struct object_ops inproc_sync_ops = NULL, /* remove_queue */ NULL, /* signaled */ NULL, /* satisfied */ - no_signal, /* signal */ + inproc_sync_signal, /* signal */ no_get_fd, /* get_fd */ default_get_sync, /* get_sync */ default_map_access, /* map_access */ @@ -126,6 +127,19 @@ void reset_inproc_sync( struct inproc_sync *sync ) ioctl( sync->fd, NTSYNC_IOC_EVENT_RESET, &count ); }
+static int inproc_sync_signal( struct object *obj, unsigned int access, int signal ) +{ + struct inproc_sync *sync = (struct inproc_sync *)obj; + assert( obj->ops == &inproc_sync_ops ); + + assert( sync->type == INPROC_SYNC_INTERNAL ); /* never called for mutex / semaphore */ + assert( signal == 0 || signal == 1 ); /* never called from signal_object */ + + if (signal) signal_inproc_sync( sync ); + else reset_inproc_sync( sync ); + return 1; +} + static void inproc_sync_destroy( struct object *obj ) { struct inproc_sync *sync = (struct inproc_sync *)obj; diff --git a/server/mutex.c b/server/mutex.c index e370d301472..3fa8c021330 100644 --- a/server/mutex.c +++ b/server/mutex.c @@ -174,7 +174,7 @@ struct mutex
static void mutex_dump( struct object *obj, int verbose ); static struct object *mutex_get_sync( struct object *obj ); -static int mutex_signal( struct object *obj, unsigned int access ); +static int mutex_signal( struct object *obj, unsigned int access, int signal ); static void mutex_destroy( struct object *obj );
static const struct object_ops mutex_ops = @@ -251,11 +251,14 @@ static struct object *mutex_get_sync( struct object *obj ) return grab_object( mutex->sync ); }
-static int mutex_signal( struct object *obj, unsigned int access ) +static int mutex_signal( struct object *obj, unsigned int access, int signal ) { struct mutex *mutex = (struct mutex *)obj; assert( obj->ops == &mutex_ops );
+ assert( mutex->sync->obj.ops == &mutex_sync_ops ); /* never called with inproc syncs */ + assert( signal == -1 ); /* always called from signal_object */ + if (!(access & SYNCHRONIZE)) { set_error( STATUS_ACCESS_DENIED ); diff --git a/server/object.c b/server/object.c index 356956aad6a..694835a6a51 100644 --- a/server/object.c +++ b/server/object.c @@ -630,7 +630,7 @@ void no_satisfied( struct object *obj, struct wait_queue_entry *entry ) { }
-int no_signal( struct object *obj, unsigned int access ) +int no_signal( struct object *obj, unsigned int access, int signal ) { set_error( STATUS_OBJECT_TYPE_MISMATCH ); return 0; diff --git a/server/object.h b/server/object.h index d604f70d100..4cc169b827c 100644 --- a/server/object.h +++ b/server/object.h @@ -80,8 +80,8 @@ struct object_ops int (*signaled)(struct object *,struct wait_queue_entry *); /* wait satisfied */ void (*satisfied)(struct object *,struct wait_queue_entry *); - /* signal an object */ - int (*signal)(struct object *, unsigned int); + /* signal/reset an object */ + int (*signal)(struct object *,unsigned int,int); /* return an fd object that can be used to read/write from the object */ struct fd *(*get_fd)(struct object *); /* return a sync that can be used to wait/signal the object */ @@ -170,7 +170,7 @@ extern struct object *find_object( const struct namespace *namespace, const stru extern struct object *find_object_index( const struct namespace *namespace, unsigned int index ); extern int no_add_queue( struct object *obj, struct wait_queue_entry *entry ); extern void no_satisfied( struct object *obj, struct wait_queue_entry *entry ); -extern int no_signal( struct object *obj, unsigned int access ); +extern int no_signal( struct object *obj, unsigned int access, int signal ); extern struct fd *no_get_fd( struct object *obj ); extern struct object *default_get_sync( struct object *obj ); static inline struct object *get_obj_sync( struct object *obj ) { return obj->ops->get_sync( obj ); } diff --git a/server/semaphore.c b/server/semaphore.c index 4b31bfe806c..771115e62fd 100644 --- a/server/semaphore.c +++ b/server/semaphore.c @@ -148,7 +148,7 @@ struct semaphore
static void semaphore_dump( struct object *obj, int verbose ); static struct object *semaphore_get_sync( struct object *obj ); -static int semaphore_signal( struct object *obj, unsigned int access ); +static int semaphore_signal( struct object *obj, unsigned int access, int signal ); static void semaphore_destroy( struct object *obj );
static const struct object_ops semaphore_ops = @@ -218,11 +218,14 @@ static struct object *semaphore_get_sync( struct object *obj ) return grab_object( sem->sync ); }
-static int semaphore_signal( struct object *obj, unsigned int access ) +static int semaphore_signal( struct object *obj, unsigned int access, int signal ) { struct semaphore *sem = (struct semaphore *)obj; assert( obj->ops == &semaphore_ops );
+ assert( sem->sync->obj.ops == &semaphore_sync_ops ); /* never called with inproc syncs */ + assert( signal == -1 ); /* always called from signal_object */ + if (!(access & SEMAPHORE_MODIFY_STATE)) { set_error( STATUS_ACCESS_DENIED ); diff --git a/server/thread.c b/server/thread.c index 8bd1e0cff3b..83ee9a47794 100644 --- a/server/thread.c +++ b/server/thread.c @@ -1269,7 +1269,7 @@ static int signal_object( obj_handle_t handle ) obj = get_handle_obj( current->process, handle, 0, NULL ); if (obj) { - ret = obj->ops->signal( obj, get_handle_access( current->process, handle )); + ret = obj->ops->signal( obj, get_handle_access( current->process, handle ), -1 ); release_object( obj ); } return ret;
 
            From: Rémi Bernon rbernon@codeweavers.com
--- server/completion.c | 2 +- server/console.c | 16 ++++++++-------- server/debugger.c | 12 ++++++------ server/device.c | 2 +- server/event.c | 38 +++++++++++++++++--------------------- server/fd.c | 4 ++-- server/mutex.c | 28 +++++++++++++++++----------- server/object.h | 8 ++++---- server/process.c | 4 ++-- server/process.h | 2 +- server/queue.c | 2 +- server/semaphore.c | 24 +++++++++++++++--------- server/thread.c | 14 ++++++++++++-- server/thread.h | 2 +- server/timer.c | 2 +- 15 files changed, 89 insertions(+), 71 deletions(-)
diff --git a/server/completion.c b/server/completion.c index caef8fba2a8..dbcf4e2480c 100644 --- a/server/completion.c +++ b/server/completion.c @@ -73,7 +73,7 @@ struct completion_wait struct completion { struct object obj; - struct event_sync *sync; + struct object *sync; struct list queue; struct list wait_queue; unsigned int depth; diff --git a/server/console.c b/server/console.c index 08bad277a3f..eb849b7d239 100644 --- a/server/console.c +++ b/server/console.c @@ -53,7 +53,7 @@ struct history_line struct console { struct object obj; /* object header */ - struct event_sync *sync; /* sync object for wait/signal */ + struct object *sync; /* sync object for wait/signal */ struct thread *renderer; /* console renderer thread */ struct screen_buffer *active; /* active screen buffer */ struct console_server *server; /* console server object */ @@ -134,7 +134,7 @@ struct console_host_ioctl struct console_server { struct object obj; /* object header */ - struct event_sync *sync; /* sync object for wait/signal */ + struct object *sync; /* sync object for wait/signal */ struct fd *fd; /* pseudo-fd for ioctls */ struct console *console; /* attached console */ struct list queue; /* ioctl queue */ @@ -210,7 +210,7 @@ struct font_info struct screen_buffer { struct object obj; /* object header */ - struct event_sync *sync; /* sync object for wait/signal */ + struct object *sync; /* sync object for wait/signal */ struct list entry; /* entry in list of all screen buffers */ struct console *input; /* associated console input */ unsigned int id; /* buffer id */ @@ -303,7 +303,7 @@ static const struct object_ops console_device_ops = struct console_input { struct object obj; /* object header */ - struct event_sync *sync; /* sync object for wait/signal */ + struct object *sync; /* sync object for wait/signal */ struct fd *fd; /* pseudo-fd */ struct list entry; /* entry in console->inputs */ struct console *console; /* associated console at creation time */ @@ -364,7 +364,7 @@ static const struct fd_ops console_input_fd_ops = struct console_output { struct object obj; /* object header */ - struct event_sync *sync; /* sync object for wait/signal */ + struct object *sync; /* sync object for wait/signal */ struct fd *fd; /* pseudo-fd */ struct list entry; /* entry in console->outputs */ struct console *console; /* associated console at creation time */ @@ -646,7 +646,7 @@ static struct object *create_screen_buffer( struct console *console ) }
if (!(screen_buffer = alloc_object( &screen_buffer_ops ))) return NULL; - screen_buffer->sync = (struct event_sync *)grab_object( console->sync ); + screen_buffer->sync = grab_object( console->sync ); screen_buffer->id = ++console->last_id; screen_buffer->input = console; init_async_queue( &screen_buffer->ioctl_q ); @@ -1350,7 +1350,7 @@ static struct object *console_device_lookup_name( struct object *obj, struct uni
name->len = 0; if (!(console_input = alloc_object( &console_input_ops ))) return NULL; - console_input->sync = (struct event_sync *)grab_object( current->process->console->sync ); + console_input->sync = grab_object( current->process->console->sync ); console_input->fd = alloc_pseudo_fd( &console_input_fd_ops, &console_input->obj, FILE_SYNCHRONOUS_IO_NONALERT ); if (!console_input->fd) @@ -1375,7 +1375,7 @@ static struct object *console_device_lookup_name( struct object *obj, struct uni
name->len = 0; if (!(console_output = alloc_object( &console_output_ops ))) return NULL; - console_output->sync = (struct event_sync *)grab_object( current->process->console->sync ); + console_output->sync = grab_object( current->process->console->sync ); console_output->fd = alloc_pseudo_fd( &console_output_fd_ops, &console_output->obj, FILE_SYNCHRONOUS_IO_NONALERT ); if (!console_output->fd) diff --git a/server/debugger.c b/server/debugger.c index 094614a6a6a..c709e34078b 100644 --- a/server/debugger.c +++ b/server/debugger.c @@ -70,7 +70,7 @@ struct type_descr debug_obj_type = struct debug_obj { struct object obj; /* object header */ - struct event_sync *sync; /* sync object for wait/signal */ + struct object *sync; /* sync object for wait/signal */ struct list event_queue; /* pending events queue */ unsigned int flags; /* debug flags */ }; @@ -265,7 +265,7 @@ static void link_event( struct debug_obj *debug_obj, struct debug_event *event ) static void resume_event( struct debug_obj *debug_obj, struct debug_event *event ) { event->state = EVENT_QUEUED; - reset_sync( event->sync ); + reset_sync( (struct object *)event->sync ); if (!event->sender->process->debug_event) { grab_object( debug_obj ); @@ -278,7 +278,7 @@ static void resume_event( struct debug_obj *debug_obj, struct debug_event *event static void delay_event( struct debug_obj *debug_obj, struct debug_event *event ) { event->state = EVENT_DELAYED; - reset_sync( event->sync ); + reset_sync( (struct object *)event->sync ); if (event->sender->process->debug_event == event) event->sender->process->debug_event = NULL; }
@@ -422,7 +422,7 @@ static int continue_debug_event( struct debug_obj *debug_obj, struct process *pr assert( event->sender->process->debug_event == event ); event->status = status; event->state = EVENT_CONTINUED; - signal_sync( event->sync ); + signal_sync( (struct object *)event->sync ); unlink_event( debug_obj, event ); resume_process( process ); return 1; @@ -543,7 +543,7 @@ void debugger_detach( struct process *process, struct debug_obj *debug_obj ) assert( event->state != EVENT_CONTINUED ); event->status = DBG_CONTINUE; event->state = EVENT_CONTINUED; - signal_sync( event->sync ); + signal_sync( (struct object *)event->sync ); unlink_event( debug_obj, event ); /* from queued debug event */ resume_process( process ); @@ -589,7 +589,7 @@ DECL_HANDLER(wait_debug_event) if ((event = find_event_to_send( debug_obj ))) { event->state = EVENT_SENT; - reset_sync( event->sync ); + reset_sync( (struct object *)event->sync ); event->sender->process->debug_event = event; reply->pid = get_process_id( event->sender->process ); reply->tid = get_thread_id( event->sender ); diff --git a/server/device.c b/server/device.c index 859ca9a2b52..3eed74343e5 100644 --- a/server/device.c +++ b/server/device.c @@ -89,7 +89,7 @@ static const struct object_ops irp_call_ops = struct device_manager { struct object obj; /* object header */ - struct event_sync *sync; /* sync object for wait/signal */ + struct object *sync; /* sync object for wait/signal */ struct list devices; /* list of devices */ struct list requests; /* list of pending irps across all devices */ struct irp_call *current_call; /* call currently executed on client side */ diff --git a/server/event.c b/server/event.c index 3dc324df0ec..47a7d5a9b88 100644 --- a/server/event.c +++ b/server/event.c @@ -87,7 +87,7 @@ static const struct object_ops event_sync_ops = no_destroy /* destroy */ };
-static struct event_sync *create_event_sync( int manual, int signaled ) +static struct object *create_event_sync( int manual, int signaled ) { struct event_sync *event;
@@ -95,7 +95,7 @@ static struct event_sync *create_event_sync( int manual, int signaled ) event->manual = manual; event->signaled = signaled;
- return event; + return &event->obj; }
struct event_sync *create_server_internal_sync( int manual, int signaled ) @@ -109,9 +109,9 @@ struct event_sync *create_server_internal_sync( int manual, int signaled ) return event; }
-struct event_sync *create_internal_sync( int manual, int signaled ) +struct object *create_internal_sync( int manual, int signaled ) { - return create_event_sync( manual, signaled ); + return (struct object *)create_server_internal_sync( manual, signaled ); }
static void event_sync_dump( struct object *obj, int verbose ) @@ -129,16 +129,6 @@ static int event_sync_signaled( struct object *obj, struct wait_queue_entry *ent return event->signaled; }
-void signal_sync( struct event_sync *sync ) -{ - sync->obj.ops->signal( &sync->obj, 0, 1 ); -} - -void reset_sync( struct event_sync *sync ) -{ - sync->obj.ops->signal( &sync->obj, 0, 0 ); -} - static void event_sync_satisfied( struct object *obj, struct wait_queue_entry *entry ) { struct event_sync *event = (struct event_sync *)obj; @@ -160,7 +150,7 @@ static int event_sync_signal( struct object *obj, unsigned int access, int signa struct event { struct object obj; /* object header */ - struct event_sync *sync; /* event sync object */ + struct object *sync; /* event sync object */ struct list kernel_object; /* list of kernel object pointers */ };
@@ -287,7 +277,7 @@ static void event_dump( struct object *obj, int verbose ) { struct event *event = (struct event *)obj; assert( obj->ops == &event_ops ); - event->sync->obj.ops->dump( &event->sync->obj, verbose ); + event->sync->ops->dump( event->sync, verbose ); }
static struct object *event_get_sync( struct object *obj ) @@ -302,7 +292,7 @@ static int event_signal( struct object *obj, unsigned int access, int signal ) struct event *event = (struct event *)obj; assert( obj->ops == &event_ops );
- assert( event->sync->obj.ops == &event_sync_ops ); /* never called with inproc syncs */ + assert( event->sync->ops == &event_sync_ops ); /* never called with inproc syncs */ assert( signal == -1 ); /* always called from signal_object */
if (!(access & EVENT_MODIFY_STATE)) @@ -311,7 +301,7 @@ static int event_signal( struct object *obj, unsigned int access, int signal ) return 0; }
- return event_sync_signal( &event->sync->obj, 0, 1 ); + return event_sync_signal( event->sync, 0, 1 ); }
static struct list *event_get_kernel_obj_list( struct object *obj ) @@ -418,11 +408,14 @@ DECL_HANDLER(open_event) /* do an event operation */ DECL_HANDLER(event_op) { + struct event_sync *sync; struct event *event;
if (!(event = get_event_obj( current->process, req->handle, EVENT_MODIFY_STATE ))) return; + assert( event->sync->ops == &event_sync_ops ); /* never called with inproc syncs */ + sync = (struct event_sync *)event->sync;
- reply->state = event->sync->signaled; + reply->state = sync->signaled; switch(req->op) { case PULSE_EVENT: @@ -445,12 +438,15 @@ DECL_HANDLER(event_op) /* return details about the event */ DECL_HANDLER(query_event) { + struct event_sync *sync; struct event *event;
if (!(event = get_event_obj( current->process, req->handle, EVENT_QUERY_STATE ))) return; + assert( event->sync->ops == &event_sync_ops ); /* never called with inproc syncs */ + sync = (struct event_sync *)event->sync;
- reply->manual_reset = event->sync->manual; - reply->state = event->sync->signaled; + reply->manual_reset = sync->manual; + reply->state = sync->signaled;
release_object( event ); } diff --git a/server/fd.c b/server/fd.c index 5a43888e719..c4be028845f 100644 --- a/server/fd.c +++ b/server/fd.c @@ -129,7 +129,7 @@ struct fd { struct object obj; /* object header */ const struct fd_ops *fd_ops; /* file descriptor operations */ - struct event_sync *sync; /* sync object for wait/signal */ + struct object *sync; /* sync object for wait/signal */ struct inode *inode; /* inode that this fd belongs to */ struct list inode_entry; /* entry in inode fd list */ struct closed_fd *closed; /* structure to store the unix fd at destroy time */ @@ -273,7 +273,7 @@ static const struct object_ops inode_ops = struct file_lock { struct object obj; /* object header */ - struct event_sync *sync; /* sync object for wait/signal */ + struct object *sync; /* sync object for wait/signal */ struct fd *fd; /* fd owning this lock */ struct list fd_entry; /* entry in list of locks on a given fd */ struct list inode_entry; /* entry in inode list of locks */ diff --git a/server/mutex.c b/server/mutex.c index 3fa8c021330..4932e07e96d 100644 --- a/server/mutex.c +++ b/server/mutex.c @@ -153,7 +153,7 @@ static void mutex_sync_satisfied( struct object *obj, struct wait_queue_entry *e mutex->abandoned = 0; }
-static struct mutex_sync *create_mutex_sync( int owned ) +static struct object *create_mutex_sync( int owned ) { struct mutex_sync *mutex;
@@ -163,13 +163,13 @@ static struct mutex_sync *create_mutex_sync( int owned ) mutex->abandoned = 0; if (owned) do_grab( mutex, current );
- return mutex; + return &mutex->obj; }
struct mutex { struct object obj; /* object header */ - struct mutex_sync *sync; /* mutex sync object */ + struct object *sync; /* mutex sync object */ };
static void mutex_dump( struct object *obj, int verbose ); @@ -241,7 +241,7 @@ static void mutex_dump( struct object *obj, int verbose ) { struct mutex *mutex = (struct mutex *)obj; assert( obj->ops == &mutex_ops ); - mutex->sync->obj.ops->dump( &mutex->sync->obj, verbose ); + mutex->sync->ops->dump( mutex->sync, verbose ); }
static struct object *mutex_get_sync( struct object *obj ) @@ -256,7 +256,7 @@ static int mutex_signal( struct object *obj, unsigned int access, int signal ) struct mutex *mutex = (struct mutex *)obj; assert( obj->ops == &mutex_ops );
- assert( mutex->sync->obj.ops == &mutex_sync_ops ); /* never called with inproc syncs */ + assert( mutex->sync->ops == &mutex_sync_ops ); /* never called with inproc syncs */ assert( signal == -1 ); /* always called from signal_object */
if (!(access & SYNCHRONIZE)) @@ -264,7 +264,7 @@ static int mutex_signal( struct object *obj, unsigned int access, int signal ) set_error( STATUS_ACCESS_DENIED ); return 0; } - return do_release( mutex->sync, current, 1 ); + return do_release( (struct mutex_sync *)mutex->sync, current, 1 ); }
static void mutex_destroy( struct object *obj ) @@ -315,8 +315,11 @@ DECL_HANDLER(release_mutex) if ((mutex = (struct mutex *)get_handle_obj( current->process, req->handle, 0, &mutex_ops ))) { - reply->prev_count = mutex->sync->count; - do_release( mutex->sync, current, 1 ); + struct mutex_sync *sync = (struct mutex_sync *)mutex->sync; + assert( mutex->sync->ops == &mutex_sync_ops ); /* never called with inproc syncs */ + + reply->prev_count = sync->count; + do_release( sync, current, 1 ); release_object( mutex ); } } @@ -329,9 +332,12 @@ DECL_HANDLER(query_mutex) if ((mutex = (struct mutex *)get_handle_obj( current->process, req->handle, MUTANT_QUERY_STATE, &mutex_ops ))) { - reply->count = mutex->sync->count; - reply->owned = (mutex->sync->owner == current); - reply->abandoned = mutex->sync->abandoned; + struct mutex_sync *sync = (struct mutex_sync *)mutex->sync; + assert( mutex->sync->ops == &mutex_sync_ops ); /* never called with inproc syncs */ + + reply->count = sync->count; + reply->owned = (sync->owner == current); + reply->abandoned = sync->abandoned;
release_object( mutex ); } diff --git a/server/object.h b/server/object.h index 4cc169b827c..939b18f0036 100644 --- a/server/object.h +++ b/server/object.h @@ -218,14 +218,14 @@ static inline void *mem_append( void *ptr, const void *src, data_size_t len )
/* event functions */
-struct event_sync; struct event; +struct event_sync; struct keyed_event;
extern struct event_sync *create_server_internal_sync( int manual, int signaled ); -extern struct event_sync *create_internal_sync( int manual, int signaled ); -extern void signal_sync( struct event_sync *sync ); -extern void reset_sync( struct event_sync *sync ); +extern struct object *create_internal_sync( int manual, int signaled ); +extern void signal_sync( struct object *sync ); +extern void reset_sync( struct object *sync );
extern struct event *create_event( struct object *root, const struct unicode_str *name, unsigned int attr, int manual_reset, int initial_state, diff --git a/server/process.c b/server/process.c index 48a1d2d697d..097125776b2 100644 --- a/server/process.c +++ b/server/process.c @@ -140,7 +140,7 @@ static const struct fd_ops process_fd_ops = struct startup_info { struct object obj; /* object header */ - struct event_sync *sync; /* sync object for wait/signal */ + struct object *sync; /* sync object for wait/signal */ struct process *process; /* created process */ data_size_t info_size; /* size of startup info */ data_size_t data_size; /* size of whole startup data */ @@ -200,7 +200,7 @@ static void job_destroy( struct object *obj ); struct job { struct object obj; /* object header */ - struct event_sync *sync; /* sync object for wait/signal */ + struct object *sync; /* sync object for wait/signal */ struct list process_list; /* list of processes */ int num_processes; /* count of running processes */ int total_processes; /* count of processes which have been assigned */ diff --git a/server/process.h b/server/process.h index 619e4894d1d..f7188627abc 100644 --- a/server/process.h +++ b/server/process.h @@ -36,7 +36,7 @@ enum startup_state { STARTUP_IN_PROGRESS, STARTUP_DONE, STARTUP_ABORTED }; struct process { struct object obj; /* object header */ - struct event_sync *sync; /* sync object for wait/signal */ + struct object *sync; /* sync object for wait/signal */ struct list entry; /* entry in system-wide process list */ process_id_t parent_id; /* parent process id (at the time of creation) */ struct list thread_list; /* thread list */ diff --git a/server/queue.c b/server/queue.c index 4ccecc5c26a..fb9499b5589 100644 --- a/server/queue.c +++ b/server/queue.c @@ -120,7 +120,7 @@ struct msg_queue { struct object obj; /* object header */ struct fd *fd; /* optional file descriptor to poll */ - struct event_sync *sync; /* sync object for wait/signal */ + struct object *sync; /* sync object for wait/signal */ int paint_count; /* pending paint messages count */ int hotkey_count; /* pending hotkey messages count */ int quit_message; /* is there a pending quit message? */ diff --git a/server/semaphore.c b/server/semaphore.c index 771115e62fd..1b9109e1f98 100644 --- a/server/semaphore.c +++ b/server/semaphore.c @@ -130,20 +130,20 @@ static void semaphore_sync_satisfied( struct object *obj, struct wait_queue_entr sem->count--; }
-static struct semaphore_sync *create_semaphore_sync( unsigned int initial, unsigned int max ) +static struct object *create_semaphore_sync( unsigned int initial, unsigned int max ) { struct semaphore_sync *sem;
if (!(sem = alloc_object( &semaphore_sync_ops ))) return NULL; sem->count = initial; sem->max = max; - return sem; + return &sem->obj; }
struct semaphore { struct object obj; /* object header */ - struct semaphore_sync *sync; /* semaphore sync object */ + struct object *sync; /* semaphore sync object */ };
static void semaphore_dump( struct object *obj, int verbose ); @@ -208,7 +208,7 @@ static void semaphore_dump( struct object *obj, int verbose ) { struct semaphore *sem = (struct semaphore *)obj; assert( obj->ops == &semaphore_ops ); - sem->sync->obj.ops->dump( &sem->sync->obj, verbose ); + sem->sync->ops->dump( sem->sync, verbose ); }
static struct object *semaphore_get_sync( struct object *obj ) @@ -223,7 +223,7 @@ static int semaphore_signal( struct object *obj, unsigned int access, int signal struct semaphore *sem = (struct semaphore *)obj; assert( obj->ops == &semaphore_ops );
- assert( sem->sync->obj.ops == &semaphore_sync_ops ); /* never called with inproc syncs */ + assert( sem->sync->ops == &semaphore_sync_ops ); /* never called with inproc syncs */ assert( signal == -1 ); /* always called from signal_object */
if (!(access & SEMAPHORE_MODIFY_STATE)) @@ -231,7 +231,7 @@ static int semaphore_signal( struct object *obj, unsigned int access, int signal set_error( STATUS_ACCESS_DENIED ); return 0; } - return release_semaphore( sem->sync, 1, NULL ); + return release_semaphore( (struct semaphore_sync *)sem->sync, 1, NULL ); }
static void semaphore_destroy( struct object *obj ) @@ -282,7 +282,10 @@ DECL_HANDLER(release_semaphore) if ((sem = (struct semaphore *)get_handle_obj( current->process, req->handle, SEMAPHORE_MODIFY_STATE, &semaphore_ops ))) { - release_semaphore( sem->sync, req->count, &reply->prev_count ); + struct semaphore_sync *sync = (struct semaphore_sync *)sem->sync; + assert( sem->sync->ops == &semaphore_sync_ops ); /* never called with inproc syncs */ + + release_semaphore( sync, req->count, &reply->prev_count ); release_object( sem ); } } @@ -295,8 +298,11 @@ DECL_HANDLER(query_semaphore) if ((sem = (struct semaphore *)get_handle_obj( current->process, req->handle, SEMAPHORE_QUERY_STATE, &semaphore_ops ))) { - reply->current = sem->sync->count; - reply->max = sem->sync->max; + struct semaphore_sync *sync = (struct semaphore_sync *)sem->sync; + assert( sem->sync->ops == &semaphore_sync_ops ); /* never called with inproc syncs */ + + reply->current = sync->count; + reply->max = sync->max; release_object( sem ); } } diff --git a/server/thread.c b/server/thread.c index 83ee9a47794..b990907648f 100644 --- a/server/thread.c +++ b/server/thread.c @@ -85,7 +85,7 @@ struct thread_wait struct thread_apc { struct object obj; /* object header */ - struct event_sync *sync; /* sync object for wait/signal */ + struct object *sync; /* sync object for wait/signal */ struct list entry; /* queue linked list */ struct thread *caller; /* thread that queued this apc */ struct object *owner; /* object that queued this apc */ @@ -131,7 +131,7 @@ static const struct object_ops thread_apc_ops = struct context { struct object obj; /* object header */ - struct event_sync *sync; /* sync object for wait/signal */ + struct object *sync; /* sync object for wait/signal */ unsigned int status; /* status of the context */ struct context_data regs[2]; /* context data */ }; @@ -1031,6 +1031,16 @@ static int object_sync_signaled( struct object *obj, struct wait_queue_entry *en return ret; }
+void signal_sync( struct object *obj ) +{ + obj->ops->signal( obj, 0, 1 ); +} + +void reset_sync( struct object *obj ) +{ + obj->ops->signal( obj, 0, 0 ); +} + /* finish waiting */ static unsigned int end_wait( struct thread *thread, unsigned int status ) { diff --git a/server/thread.h b/server/thread.h index 9c552a88ed2..cbd13721083 100644 --- a/server/thread.h +++ b/server/thread.h @@ -50,7 +50,7 @@ struct inflight_fd struct thread { struct object obj; /* object header */ - struct event_sync *sync; /* sync object for wait/signal */ + struct object *sync; /* sync object for wait/signal */ struct inproc_sync *alert_sync; /* inproc sync for user apc alerts */ struct list entry; /* entry in system-wide thread list */ struct list proc_entry; /* entry in per-process thread list */ diff --git a/server/timer.c b/server/timer.c index e8811594cac..a4309d54045 100644 --- a/server/timer.c +++ b/server/timer.c @@ -53,7 +53,7 @@ struct type_descr timer_type = struct timer { struct object obj; /* object header */ - struct event_sync *sync; /* sync object for wait/signal */ + struct object *sync; /* sync object for wait/signal */ int manual; /* manual reset */ int signaled; /* current signaled state */ unsigned int period; /* timer period in ms */
 
            From: Elizabeth Figura zfigura@codeweavers.com
--- dlls/ntdll/unix/sync.c | 116 ++++++++++++++++++++++++++++++----------- server/inproc_sync.c | 2 +- server/protocol.def | 3 ++ 3 files changed, 91 insertions(+), 30 deletions(-)
diff --git a/dlls/ntdll/unix/sync.c b/dlls/ntdll/unix/sync.c index dd18c948d22..d8d8406f211 100644 --- a/dlls/ntdll/unix/sync.c +++ b/dlls/ntdll/unix/sync.c @@ -319,16 +319,11 @@ static void release_inproc_sync( struct inproc_sync *sync ) close( sync->fd ); }
-static NTSTATUS get_inproc_sync( HANDLE handle, ACCESS_MASK desired_access, struct inproc_sync *sync ) +/* fd_cache_mutex must be held to avoid races with other thread receiving fds */ +static NTSTATUS get_server_inproc_sync( HANDLE handle, struct inproc_sync *sync ) { - sigset_t sigset; NTSTATUS ret;
- /* We need to use fd_cache_mutex here to protect against races with - * other threads trying to receive fds for the fd cache, - * and we need to use an uninterrupted section to prevent reentrancy. */ - server_enter_uninterrupted_section( &fd_cache_mutex, &sigset ); - SERVER_START_REQ( get_inproc_sync_fd ) { req->handle = wine_server_obj_handle( handle ); @@ -343,9 +338,28 @@ static NTSTATUS get_inproc_sync( HANDLE handle, ACCESS_MASK desired_access, stru } SERVER_END_REQ;
+ return ret; +} + +static NTSTATUS get_inproc_sync( HANDLE handle, enum inproc_sync_type desired_type, ACCESS_MASK desired_access, + struct inproc_sync *sync ) +{ + sigset_t sigset; + NTSTATUS ret; + + /* We need to use fd_cache_mutex here to protect against races with + * other threads trying to receive fds for the fd cache, + * and we need to use an uninterrupted section to prevent reentrancy. */ + server_enter_uninterrupted_section( &fd_cache_mutex, &sigset ); + ret = get_server_inproc_sync( handle, sync ); server_leave_uninterrupted_section( &fd_cache_mutex, &sigset );
if (ret) return ret; + if (desired_type != INPROC_SYNC_UNKNOWN && desired_type != sync->type) + { + release_inproc_sync( sync ); + return STATUS_OBJECT_TYPE_MISMATCH; + } if ((sync->access & desired_access) != desired_access) { release_inproc_sync( sync ); @@ -361,6 +375,15 @@ extern NTSTATUS check_signal_access( struct inproc_sync *sync ) { case INPROC_SYNC_INTERNAL: return STATUS_OBJECT_TYPE_MISMATCH; + case INPROC_SYNC_EVENT: + if (!(sync->access & EVENT_MODIFY_STATE)) return STATUS_ACCESS_DENIED; + return STATUS_SUCCESS; + case INPROC_SYNC_MUTEX: + if (!(sync->access & SYNCHRONIZE)) return STATUS_ACCESS_DENIED; + return STATUS_SUCCESS; + case INPROC_SYNC_SEMAPHORE: + if (!(sync->access & SEMAPHORE_MODIFY_STATE)) return STATUS_ACCESS_DENIED; + return STATUS_SUCCESS; }
assert( 0 ); @@ -369,50 +392,90 @@ extern NTSTATUS check_signal_access( struct inproc_sync *sync )
static NTSTATUS inproc_release_semaphore( HANDLE handle, ULONG count, ULONG *prev_count ) { + struct inproc_sync stack, *sync = &stack; + NTSTATUS ret; + if (inproc_device_fd < 0) return STATUS_NOT_IMPLEMENTED; - return STATUS_NOT_IMPLEMENTED; + if ((ret = get_inproc_sync( handle, INPROC_SYNC_SEMAPHORE, SEMAPHORE_MODIFY_STATE, &stack ))) return ret; + release_inproc_sync( sync ); + return ret; }
static NTSTATUS inproc_query_semaphore( HANDLE handle, SEMAPHORE_BASIC_INFORMATION *info ) { + struct inproc_sync stack, *sync = &stack; + NTSTATUS ret; + if (inproc_device_fd < 0) return STATUS_NOT_IMPLEMENTED; - return STATUS_NOT_IMPLEMENTED; + if ((ret = get_inproc_sync( handle, INPROC_SYNC_SEMAPHORE, SEMAPHORE_QUERY_STATE, &stack ))) return ret; + release_inproc_sync( sync ); + return ret; }
static NTSTATUS inproc_set_event( HANDLE handle, LONG *prev_state ) { + struct inproc_sync stack, *sync = &stack; + NTSTATUS ret; + if (inproc_device_fd < 0) return STATUS_NOT_IMPLEMENTED; - return STATUS_NOT_IMPLEMENTED; + if ((ret = get_inproc_sync( handle, INPROC_SYNC_EVENT, EVENT_MODIFY_STATE, sync ))) return ret; + release_inproc_sync( sync ); + return ret; }
static NTSTATUS inproc_reset_event( HANDLE handle, LONG *prev_state ) { + struct inproc_sync stack, *sync = &stack; + NTSTATUS ret; + if (inproc_device_fd < 0) return STATUS_NOT_IMPLEMENTED; - return STATUS_NOT_IMPLEMENTED; + if ((ret = get_inproc_sync( handle, INPROC_SYNC_EVENT, EVENT_MODIFY_STATE, sync ))) return ret; + release_inproc_sync( sync ); + return ret; }
static NTSTATUS inproc_pulse_event( HANDLE handle, LONG *prev_state ) { + struct inproc_sync stack, *sync = &stack; + NTSTATUS ret; + if (inproc_device_fd < 0) return STATUS_NOT_IMPLEMENTED; - return STATUS_NOT_IMPLEMENTED; + if ((ret = get_inproc_sync( handle, INPROC_SYNC_EVENT, EVENT_MODIFY_STATE, sync ))) return ret; + release_inproc_sync( sync ); + return ret; }
static NTSTATUS inproc_query_event( HANDLE handle, EVENT_BASIC_INFORMATION *info ) { + struct inproc_sync stack, *sync = &stack; + NTSTATUS ret; + if (inproc_device_fd < 0) return STATUS_NOT_IMPLEMENTED; - return STATUS_NOT_IMPLEMENTED; + if ((ret = get_inproc_sync( handle, INPROC_SYNC_EVENT, EVENT_QUERY_STATE, sync ))) return ret; + release_inproc_sync( sync ); + return ret; }
static NTSTATUS inproc_release_mutex( HANDLE handle, LONG *prev_count ) { + struct inproc_sync stack, *sync = &stack; + NTSTATUS ret; + if (inproc_device_fd < 0) return STATUS_NOT_IMPLEMENTED; - return STATUS_NOT_IMPLEMENTED; + if ((ret = get_inproc_sync( handle, INPROC_SYNC_MUTEX, 0, &stack ))) return ret; + release_inproc_sync( sync ); + return ret; }
static NTSTATUS inproc_query_mutex( HANDLE handle, MUTANT_BASIC_INFORMATION *info ) { + struct inproc_sync stack, *sync = &stack; + NTSTATUS ret; + if (inproc_device_fd < 0) return STATUS_NOT_IMPLEMENTED; - return STATUS_NOT_IMPLEMENTED; + if ((ret = get_inproc_sync( handle, INPROC_SYNC_MUTEX, MUTANT_QUERY_STATE, &stack ))) return ret; + release_inproc_sync( sync ); + return ret; }
static NTSTATUS inproc_wait( DWORD count, const HANDLE *handles, BOOLEAN wait_any, @@ -426,7 +489,7 @@ static NTSTATUS inproc_wait( DWORD count, const HANDLE *handles, BOOLEAN wait_an assert( count <= ARRAY_SIZE(syncs) ); for (int i = 0; i < count; ++i) { - if ((ret = get_inproc_sync( handles[i], SYNCHRONIZE, stack + i ))) + if ((ret = get_inproc_sync( handles[i], INPROC_SYNC_UNKNOWN, SYNCHRONIZE, &stack[i] ))) { while (i--) release_inproc_sync( syncs[i] ); return ret; @@ -446,22 +509,17 @@ static NTSTATUS inproc_signal_and_wait( HANDLE signal, HANDLE wait,
if (inproc_device_fd < 0) return STATUS_NOT_IMPLEMENTED;
- if ((ret = get_inproc_sync( signal, 0, signal_sync ))) return ret; - if ((ret = check_signal_access( signal_sync ))) - { - release_inproc_sync( signal_sync ); - return ret; - } + if ((ret = get_inproc_sync( signal, INPROC_SYNC_UNKNOWN, 0, signal_sync ))) return ret; + if ((ret = check_signal_access( signal_sync ))) goto done;
- if ((ret = get_inproc_sync( wait, SYNCHRONIZE, wait_sync ))) - { - release_inproc_sync( signal_sync ); - return ret; - } + if ((ret = get_inproc_sync( wait, INPROC_SYNC_UNKNOWN, SYNCHRONIZE, wait_sync ))) goto done; + + ret = STATUS_NOT_IMPLEMENTED;
- release_inproc_sync( signal_sync ); release_inproc_sync( wait_sync ); - return STATUS_NOT_IMPLEMENTED; +done: + release_inproc_sync( signal_sync ); + return ret; }
diff --git a/server/inproc_sync.c b/server/inproc_sync.c index 54342fc3e2a..8cfd3b1e1e8 100644 --- a/server/inproc_sync.c +++ b/server/inproc_sync.c @@ -132,7 +132,7 @@ static int inproc_sync_signal( struct object *obj, unsigned int access, int sign struct inproc_sync *sync = (struct inproc_sync *)obj; assert( obj->ops == &inproc_sync_ops );
- assert( sync->type == INPROC_SYNC_INTERNAL ); /* never called for mutex / semaphore */ + assert( sync->type == INPROC_SYNC_INTERNAL || sync->type == INPROC_SYNC_EVENT ); /* never called for mutex / semaphore */ assert( signal == 0 || signal == 1 ); /* never called from signal_object */
if (signal) signal_inproc_sync( sync ); diff --git a/server/protocol.def b/server/protocol.def index 4fc12225fdb..3b2f6c497cf 100644 --- a/server/protocol.def +++ b/server/protocol.def @@ -4144,6 +4144,9 @@ enum inproc_sync_type { INPROC_SYNC_UNKNOWN = 0, INPROC_SYNC_INTERNAL = 1, + INPROC_SYNC_EVENT = 2, + INPROC_SYNC_MUTEX = 3, + INPROC_SYNC_SEMAPHORE = 4, };
/* Get the in-process synchronization fd associated with the waitable handle */
 
            From: Rémi Bernon rbernon@codeweavers.com
--- dlls/ntdll/unix/sync.c | 30 ++++++++++++++++++++++++++++++ dlls/ntdll/unix/thread.c | 1 + dlls/ntdll/unix/unix_private.h | 1 + dlls/ntdll/unix/virtual.c | 1 + server/inproc_sync.c | 17 ++++++++++++++--- server/object.h | 1 + server/protocol.def | 7 +++++++ server/thread.c | 14 ++++++++++++++ 8 files changed, 69 insertions(+), 3 deletions(-)
diff --git a/dlls/ntdll/unix/sync.c b/dlls/ntdll/unix/sync.c index d8d8406f211..7095aef30f2 100644 --- a/dlls/ntdll/unix/sync.c +++ b/dlls/ntdll/unix/sync.c @@ -478,6 +478,33 @@ static NTSTATUS inproc_query_mutex( HANDLE handle, MUTANT_BASIC_INFORMATION *inf return ret; }
+static int get_inproc_alert_fd(void) +{ + struct ntdll_thread_data *data = ntdll_get_thread_data(); + obj_handle_t token; + sigset_t sigset; + int fd; + + if ((fd = data->alert_fd) < 0) + { + server_enter_uninterrupted_section( &fd_cache_mutex, &sigset ); + + SERVER_START_REQ( get_inproc_alert_fd ) + { + if (!server_call_unlocked( req )) + { + data->alert_fd = fd = wine_server_receive_fd( &token ); + assert( token == reply->handle ); + } + } + SERVER_END_REQ; + + server_leave_uninterrupted_section( &fd_cache_mutex, &sigset ); + } + + return fd; +} + static NTSTATUS inproc_wait( DWORD count, const HANDLE *handles, BOOLEAN wait_any, BOOLEAN alertable, const LARGE_INTEGER *timeout ) { @@ -497,6 +524,8 @@ static NTSTATUS inproc_wait( DWORD count, const HANDLE *handles, BOOLEAN wait_an syncs[i] = stack + i; }
+ if (alertable) get_inproc_alert_fd(); + while (count--) release_inproc_sync( syncs[count] ); return STATUS_NOT_IMPLEMENTED; } @@ -514,6 +543,7 @@ static NTSTATUS inproc_signal_and_wait( HANDLE signal, HANDLE wait,
if ((ret = get_inproc_sync( wait, INPROC_SYNC_UNKNOWN, SYNCHRONIZE, wait_sync ))) goto done;
+ if (alertable) get_inproc_alert_fd(); ret = STATUS_NOT_IMPLEMENTED;
release_inproc_sync( wait_sync ); diff --git a/dlls/ntdll/unix/thread.c b/dlls/ntdll/unix/thread.c index 8f3fb701a38..10ccfe9eba2 100644 --- a/dlls/ntdll/unix/thread.c +++ b/dlls/ntdll/unix/thread.c @@ -1104,6 +1104,7 @@ static void contexts_from_server( CONTEXT *context, struct context_data server_c */ static DECLSPEC_NORETURN void pthread_exit_wrapper( int status ) { + close( ntdll_get_thread_data()->alert_fd ); close( ntdll_get_thread_data()->wait_fd[0] ); close( ntdll_get_thread_data()->wait_fd[1] ); close( ntdll_get_thread_data()->reply_fd ); diff --git a/dlls/ntdll/unix/unix_private.h b/dlls/ntdll/unix/unix_private.h index 75c04e5ec15..67cd3aede42 100644 --- a/dlls/ntdll/unix/unix_private.h +++ b/dlls/ntdll/unix/unix_private.h @@ -108,6 +108,7 @@ struct ntdll_thread_data int request_fd; /* fd for sending server requests */ int reply_fd; /* fd for receiving server replies */ int wait_fd[2]; /* fd for sleeping server requests */ + int alert_fd; /* inproc sync fd for user apc alerts */ BOOL allow_writes; /* ThreadAllowWrites flags */ pthread_t pthread_id; /* pthread thread id */ void *kernel_stack; /* stack for thread startup and kernel syscalls */ diff --git a/dlls/ntdll/unix/virtual.c b/dlls/ntdll/unix/virtual.c index 994f76fb72a..126bd915e8d 100644 --- a/dlls/ntdll/unix/virtual.c +++ b/dlls/ntdll/unix/virtual.c @@ -4026,6 +4026,7 @@ static TEB *init_teb( void *ptr, BOOL is_wow ) thread_data->reply_fd = -1; thread_data->wait_fd[0] = -1; thread_data->wait_fd[1] = -1; + thread_data->alert_fd = -1; list_add_head( &teb_list, &thread_data->entry ); return teb; } diff --git a/server/inproc_sync.c b/server/inproc_sync.c index 8cfd3b1e1e8..46222d56a45 100644 --- a/server/inproc_sync.c +++ b/server/inproc_sync.c @@ -88,6 +88,12 @@ static const struct object_ops inproc_sync_ops = inproc_sync_destroy, /* destroy */ };
+int get_inproc_sync_fd( struct inproc_sync *sync ) +{ + if (!sync) return -1; + return sync->fd; +} + struct inproc_sync *create_inproc_internal_sync( int manual, int signaled ) { struct ntsync_event_args args = {.signaled = signaled, .manual = manual}; @@ -147,7 +153,7 @@ static void inproc_sync_destroy( struct object *obj ) close( sync->fd ); }
-static int get_inproc_sync_fd( struct object *obj, int *type ) +static int get_obj_inproc_sync( struct object *obj, int *type ) { struct object *sync; int fd = -1; @@ -171,6 +177,11 @@ int get_inproc_device_fd(void) return -1; }
+int get_inproc_sync_fd( struct inproc_sync *sync ) +{ + return -1; +} + struct inproc_sync *create_inproc_internal_sync( int manual, int signaled ) { return NULL; @@ -184,7 +195,7 @@ void reset_inproc_sync( struct inproc_sync *sync ) { }
-static int get_inproc_sync_fd( struct object *obj, int *type ) +static int get_obj_inproc_sync( struct object *obj, int *type ) { return -1; } @@ -200,7 +211,7 @@ DECL_HANDLER(get_inproc_sync_fd)
reply->access = get_handle_access( current->process, req->handle );
- if ((fd = get_inproc_sync_fd( obj, &reply->type )) < 0) set_error( STATUS_NOT_IMPLEMENTED ); + if ((fd = get_obj_inproc_sync( obj, &reply->type )) < 0) set_error( STATUS_NOT_IMPLEMENTED ); else send_client_fd( current->process, fd, req->handle );
release_object( obj ); diff --git a/server/object.h b/server/object.h index 939b18f0036..bf242d0ad6d 100644 --- a/server/object.h +++ b/server/object.h @@ -245,6 +245,7 @@ extern void abandon_mutexes( struct thread *thread );
struct inproc_sync; extern int get_inproc_device_fd(void); +extern int get_inproc_sync_fd( struct inproc_sync *sync ); extern struct inproc_sync *create_inproc_internal_sync( int manual, int signaled ); extern void signal_inproc_sync( struct inproc_sync *sync ); extern void reset_inproc_sync( struct inproc_sync *sync ); diff --git a/server/protocol.def b/server/protocol.def index 3b2f6c497cf..14704f79b30 100644 --- a/server/protocol.def +++ b/server/protocol.def @@ -4158,6 +4158,13 @@ enum inproc_sync_type @END
+/* Get the in-process synchronization fd for the current thread user APC alerts */ +@REQ(get_inproc_alert_fd) +@REPLY + obj_handle_t handle; /* alert fd is in flight with this handle */ +@END + + /* Create a global d3dkmt object */ @REQ(d3dkmt_object_create) unsigned int type; /* d3dkmt object type */ diff --git a/server/thread.c b/server/thread.c index b990907648f..bb16fa9331f 100644 --- a/server/thread.c +++ b/server/thread.c @@ -2370,3 +2370,17 @@ DECL_HANDLER(get_next_thread) set_error( STATUS_NO_MORE_ENTRIES ); release_object( process ); } + + +/* Get the in-process synchronization fd for the current thread user APC alerts */ +DECL_HANDLER(get_inproc_alert_fd) +{ + int fd; + + if ((fd = get_inproc_sync_fd( current->alert_sync )) < 0) set_error( STATUS_INVALID_PARAMETER ); + else + { + reply->handle = get_thread_id( current ) | 1; /* arbitrary token */ + send_client_fd( current->process, fd, reply->handle ); + } +}
 
            From: Elizabeth Figura zfigura@codeweavers.com
--- dlls/ntdll/unix/sync.c | 234 +++++++++++++++++++++++++++++++-- dlls/ntdll/unix/unix_private.h | 1 + server/event.c | 3 + server/inproc_sync.c | 89 +++++++++++++ server/mutex.c | 4 + server/object.h | 4 + server/semaphore.c | 2 + 7 files changed, 328 insertions(+), 9 deletions(-)
diff --git a/dlls/ntdll/unix/sync.c b/dlls/ntdll/unix/sync.c index 7095aef30f2..bd0fe666bd1 100644 --- a/dlls/ntdll/unix/sync.c +++ b/dlls/ntdll/unix/sync.c @@ -30,9 +30,11 @@ #include <assert.h> #include <errno.h> #include <fcntl.h> +#include <inttypes.h> #include <limits.h> #include <signal.h> #include <sys/types.h> +#include <sys/ioctl.h> #include <sys/mman.h> #ifdef HAVE_SYS_SYSCALL_H #include <sys/syscall.h> @@ -48,6 +50,7 @@ #endif #include <string.h> #include <stdarg.h> +#include <stdint.h> #include <stdio.h> #include <stdlib.h> #include <time.h> @@ -306,6 +309,196 @@ static unsigned int validate_open_object_attributes( const OBJECT_ATTRIBUTES *at return STATUS_SUCCESS; }
+#ifdef NTSYNC_IOC_EVENT_READ + +static NTSTATUS linux_release_semaphore_obj( int obj, ULONG count, ULONG *prev_count ) +{ + if (ioctl( obj, NTSYNC_IOC_SEM_RELEASE, &count ) < 0) + { + if (errno == EOVERFLOW) return STATUS_SEMAPHORE_LIMIT_EXCEEDED; + return errno_to_status( errno ); + } + if (prev_count) *prev_count = count; + return STATUS_SUCCESS; +} + +static NTSTATUS linux_query_semaphore_obj( int obj, SEMAPHORE_BASIC_INFORMATION *info ) +{ + struct ntsync_sem_args args = {0}; + if (ioctl( obj, NTSYNC_IOC_SEM_READ, &args ) < 0) return errno_to_status( errno ); + info->CurrentCount = args.count; + info->MaximumCount = args.max; + return STATUS_SUCCESS; +} + +static NTSTATUS linux_set_event_obj( int obj, LONG *prev_state ) +{ + __u32 prev; + if (ioctl( obj, NTSYNC_IOC_EVENT_SET, &prev ) < 0) return errno_to_status( errno ); + if (prev_state) *prev_state = prev; + return STATUS_SUCCESS; +} + +static NTSTATUS linux_reset_event_obj( int obj, LONG *prev_state ) +{ + __u32 prev; + if (ioctl( obj, NTSYNC_IOC_EVENT_RESET, &prev ) < 0) return errno_to_status( errno ); + if (prev_state) *prev_state = prev; + return STATUS_SUCCESS; +} + +static NTSTATUS linux_pulse_event_obj( int obj, LONG *prev_state ) +{ + __u32 prev; + if (ioctl( obj, NTSYNC_IOC_EVENT_PULSE, &prev ) < 0) return errno_to_status( errno ); + if (prev_state) *prev_state = prev; + return STATUS_SUCCESS; +} + +static NTSTATUS linux_query_event_obj( int obj, enum inproc_sync_type type, EVENT_BASIC_INFORMATION *info ) +{ + struct ntsync_event_args args = {0}; + if (ioctl( obj, NTSYNC_IOC_EVENT_READ, &args ) < 0) return errno_to_status( errno ); + info->EventType = args.manual ? NotificationEvent : SynchronizationEvent; + info->EventState = args.signaled; + return STATUS_SUCCESS; +} + +static NTSTATUS linux_release_mutex_obj( int obj, LONG *prev_count ) +{ + struct ntsync_mutex_args args = {.owner = GetCurrentThreadId()}; + if (ioctl( obj, NTSYNC_IOC_MUTEX_UNLOCK, &args ) < 0) + { + if (errno == EOVERFLOW) return STATUS_MUTANT_LIMIT_EXCEEDED; + if (errno == EPERM) return STATUS_MUTANT_NOT_OWNED; + return errno_to_status( errno ); + } + if (prev_count) *prev_count = 1 - args.count; + return STATUS_SUCCESS; +} + +static NTSTATUS linux_query_mutex_obj( int obj, MUTANT_BASIC_INFORMATION *info ) +{ + struct ntsync_mutex_args args = {0}; + if (ioctl( obj, NTSYNC_IOC_MUTEX_READ, &args ) < 0) + { + if (errno == EOWNERDEAD) + { + info->AbandonedState = TRUE; + info->OwnedByCaller = FALSE; + info->CurrentCount = 1; + return STATUS_SUCCESS; + } + return errno_to_status( errno ); + } + info->AbandonedState = FALSE; + info->OwnedByCaller = (args.owner == GetCurrentThreadId()); + info->CurrentCount = 1 - args.count; + return STATUS_SUCCESS; +} + +static NTSTATUS linux_wait_objs( int device, const DWORD count, const int *objs, + BOOLEAN wait_any, int alert_fd, const LARGE_INTEGER *timeout ) +{ + struct ntsync_wait_args args = {0}; + unsigned long request; + struct timespec now; + int ret; + + if (!timeout || timeout->QuadPart == TIMEOUT_INFINITE) + { + args.timeout = ~(__u64)0; + } + else if (timeout->QuadPart <= 0) + { + clock_gettime( CLOCK_MONOTONIC, &now ); + args.timeout = (now.tv_sec * NSECPERSEC) + now.tv_nsec + (-timeout->QuadPart * 100); + } + else + { + args.timeout = (timeout->QuadPart * 100) - (SECS_1601_TO_1970 * NSECPERSEC); + args.flags |= NTSYNC_WAIT_REALTIME; + } + + args.objs = (uintptr_t)objs; + args.count = count; + args.owner = GetCurrentThreadId(); + args.index = ~0u; + args.alert = alert_fd; + + if (wait_any || count == 1) request = NTSYNC_IOC_WAIT_ANY; + else request = NTSYNC_IOC_WAIT_ALL; + + do { ret = ioctl( device, request, &args ); } + while (ret < 0 && errno == EINTR); + + if (!ret) + { + if (args.index == count) + { + static const LARGE_INTEGER timeout; + + ret = server_wait( NULL, 0, SELECT_INTERRUPTIBLE | SELECT_ALERTABLE, &timeout ); + assert( ret == STATUS_USER_APC ); + return ret; + } + + return wait_any ? args.index : 0; + } + if (errno == EOWNERDEAD) return STATUS_ABANDONED + (wait_any ? args.index : 0); + if (errno == ETIMEDOUT) return STATUS_TIMEOUT; + return errno_to_status( errno ); +} + +#else /* NTSYNC_IOC_EVENT_READ */ + +static NTSTATUS linux_release_semaphore_obj( int obj, ULONG count, ULONG *prev_count ) +{ + return STATUS_NOT_IMPLEMENTED; +} + +static NTSTATUS linux_query_semaphore_obj( int obj, SEMAPHORE_BASIC_INFORMATION *info ) +{ + return STATUS_NOT_IMPLEMENTED; +} + +static NTSTATUS linux_set_event_obj( int obj, LONG *prev_state ) +{ + return STATUS_NOT_IMPLEMENTED; +} + +static NTSTATUS linux_reset_event_obj( int obj, LONG *prev_state ) +{ + return STATUS_NOT_IMPLEMENTED; +} + +static NTSTATUS linux_pulse_event_obj( int obj, LONG *prev_state ) +{ + return STATUS_NOT_IMPLEMENTED; +} + +static NTSTATUS linux_query_event_obj( int obj, enum inproc_sync_type type, EVENT_BASIC_INFORMATION *info ) +{ + return STATUS_NOT_IMPLEMENTED; +} + +static NTSTATUS linux_release_mutex_obj( int obj, LONG *prev_count ) +{ + return STATUS_NOT_IMPLEMENTED; +} + +static NTSTATUS linux_query_mutex_obj( int obj, MUTANT_BASIC_INFORMATION *info ) +{ + return STATUS_NOT_IMPLEMENTED; +} + +static NTSTATUS linux_wait_objs( int device, const DWORD count, const int *objs, + BOOLEAN wait_any, int alert_fd, const LARGE_INTEGER *timeout ) +{ + return STATUS_NOT_IMPLEMENTED; +} + +#endif /* NTSYNC_IOC_EVENT_READ */
struct inproc_sync { @@ -397,6 +590,7 @@ static NTSTATUS inproc_release_semaphore( HANDLE handle, ULONG count, ULONG *pre
if (inproc_device_fd < 0) return STATUS_NOT_IMPLEMENTED; if ((ret = get_inproc_sync( handle, INPROC_SYNC_SEMAPHORE, SEMAPHORE_MODIFY_STATE, &stack ))) return ret; + ret = linux_release_semaphore_obj( sync->fd, count, prev_count ); release_inproc_sync( sync ); return ret; } @@ -408,6 +602,7 @@ static NTSTATUS inproc_query_semaphore( HANDLE handle, SEMAPHORE_BASIC_INFORMATI
if (inproc_device_fd < 0) return STATUS_NOT_IMPLEMENTED; if ((ret = get_inproc_sync( handle, INPROC_SYNC_SEMAPHORE, SEMAPHORE_QUERY_STATE, &stack ))) return ret; + ret = linux_query_semaphore_obj( sync->fd, info ); release_inproc_sync( sync ); return ret; } @@ -418,7 +613,8 @@ static NTSTATUS inproc_set_event( HANDLE handle, LONG *prev_state ) NTSTATUS ret;
if (inproc_device_fd < 0) return STATUS_NOT_IMPLEMENTED; - if ((ret = get_inproc_sync( handle, INPROC_SYNC_EVENT, EVENT_MODIFY_STATE, sync ))) return ret; + if ((ret = get_inproc_sync( handle, INPROC_SYNC_EVENT, EVENT_MODIFY_STATE, &stack ))) return ret; + ret = linux_set_event_obj( sync->fd, prev_state ); release_inproc_sync( sync ); return ret; } @@ -429,7 +625,8 @@ static NTSTATUS inproc_reset_event( HANDLE handle, LONG *prev_state ) NTSTATUS ret;
if (inproc_device_fd < 0) return STATUS_NOT_IMPLEMENTED; - if ((ret = get_inproc_sync( handle, INPROC_SYNC_EVENT, EVENT_MODIFY_STATE, sync ))) return ret; + if ((ret = get_inproc_sync( handle, INPROC_SYNC_EVENT, EVENT_MODIFY_STATE, &stack ))) return ret; + ret = linux_reset_event_obj( sync->fd, prev_state ); release_inproc_sync( sync ); return ret; } @@ -440,7 +637,8 @@ static NTSTATUS inproc_pulse_event( HANDLE handle, LONG *prev_state ) NTSTATUS ret;
if (inproc_device_fd < 0) return STATUS_NOT_IMPLEMENTED; - if ((ret = get_inproc_sync( handle, INPROC_SYNC_EVENT, EVENT_MODIFY_STATE, sync ))) return ret; + if ((ret = get_inproc_sync( handle, INPROC_SYNC_EVENT, EVENT_MODIFY_STATE, &stack ))) return ret; + ret = linux_pulse_event_obj( sync->fd, prev_state ); release_inproc_sync( sync ); return ret; } @@ -451,7 +649,8 @@ static NTSTATUS inproc_query_event( HANDLE handle, EVENT_BASIC_INFORMATION *info NTSTATUS ret;
if (inproc_device_fd < 0) return STATUS_NOT_IMPLEMENTED; - if ((ret = get_inproc_sync( handle, INPROC_SYNC_EVENT, EVENT_QUERY_STATE, sync ))) return ret; + if ((ret = get_inproc_sync( handle, INPROC_SYNC_EVENT, EVENT_QUERY_STATE, &stack ))) return ret; + ret = linux_query_event_obj( sync->fd, sync->type, info ); release_inproc_sync( sync ); return ret; } @@ -463,6 +662,7 @@ static NTSTATUS inproc_release_mutex( HANDLE handle, LONG *prev_count )
if (inproc_device_fd < 0) return STATUS_NOT_IMPLEMENTED; if ((ret = get_inproc_sync( handle, INPROC_SYNC_MUTEX, 0, &stack ))) return ret; + ret = linux_release_mutex_obj( sync->fd, prev_count ); release_inproc_sync( sync ); return ret; } @@ -474,6 +674,7 @@ static NTSTATUS inproc_query_mutex( HANDLE handle, MUTANT_BASIC_INFORMATION *inf
if (inproc_device_fd < 0) return STATUS_NOT_IMPLEMENTED; if ((ret = get_inproc_sync( handle, INPROC_SYNC_MUTEX, MUTANT_QUERY_STATE, &stack ))) return ret; + ret = linux_query_mutex_obj( sync->fd, info ); release_inproc_sync( sync ); return ret; } @@ -509,6 +710,7 @@ static NTSTATUS inproc_wait( DWORD count, const HANDLE *handles, BOOLEAN wait_an BOOLEAN alertable, const LARGE_INTEGER *timeout ) { struct inproc_sync *syncs[64], stack[ARRAY_SIZE(syncs)]; + int objs[ARRAY_SIZE(syncs)], alert_fd = 0; NTSTATUS ret;
if (inproc_device_fd < 0) return STATUS_NOT_IMPLEMENTED; @@ -521,19 +723,22 @@ static NTSTATUS inproc_wait( DWORD count, const HANDLE *handles, BOOLEAN wait_an while (i--) release_inproc_sync( syncs[i] ); return ret; } - syncs[i] = stack + i; + syncs[i] = &stack[i]; + objs[i] = syncs[i]->fd; }
- if (alertable) get_inproc_alert_fd(); + if (alertable) alert_fd = get_inproc_alert_fd(); + ret = linux_wait_objs( inproc_device_fd, count, objs, wait_any, alert_fd, timeout );
while (count--) release_inproc_sync( syncs[count] ); - return STATUS_NOT_IMPLEMENTED; + return ret; }
static NTSTATUS inproc_signal_and_wait( HANDLE signal, HANDLE wait, BOOLEAN alertable, const LARGE_INTEGER *timeout ) { struct inproc_sync stack_signal, stack_wait, *signal_sync = &stack_signal, *wait_sync = &stack_wait; + int alert_fd = 0; NTSTATUS ret;
if (inproc_device_fd < 0) return STATUS_NOT_IMPLEMENTED; @@ -543,8 +748,19 @@ static NTSTATUS inproc_signal_and_wait( HANDLE signal, HANDLE wait,
if ((ret = get_inproc_sync( wait, INPROC_SYNC_UNKNOWN, SYNCHRONIZE, wait_sync ))) goto done;
- if (alertable) get_inproc_alert_fd(); - ret = STATUS_NOT_IMPLEMENTED; + switch (signal_sync->type) + { + case INPROC_SYNC_EVENT: ret = linux_set_event_obj( signal_sync->fd, NULL ); break; + case INPROC_SYNC_MUTEX: ret = linux_release_mutex_obj( signal_sync->fd, NULL ); break; + case INPROC_SYNC_SEMAPHORE: ret = linux_release_semaphore_obj( signal_sync->fd, 1, NULL ); break; + default: assert( 0 ); break; + } + + if (!ret) + { + if (alertable) alert_fd = get_inproc_alert_fd(); + ret = linux_wait_objs( inproc_device_fd, 1, &wait_sync->fd, TRUE, alert_fd, timeout ); + }
release_inproc_sync( wait_sync ); done: diff --git a/dlls/ntdll/unix/unix_private.h b/dlls/ntdll/unix/unix_private.h index 67cd3aede42..3ef9041d64e 100644 --- a/dlls/ntdll/unix/unix_private.h +++ b/dlls/ntdll/unix/unix_private.h @@ -398,6 +398,7 @@ extern void call_raise_user_exception_dispatcher(void); #define IMAGE_DLLCHARACTERISTICS_PREFER_NATIVE 0x0010 /* Wine extension */
#define TICKSPERSEC 10000000 +#define NSECPERSEC 1000000000 #define SECS_1601_TO_1970 ((369 * 365 + 89) * (ULONGLONG)86400)
static inline ULONGLONG ticks_from_time_t( time_t time ) diff --git a/server/event.c b/server/event.c index 47a7d5a9b88..b70e9e06e2e 100644 --- a/server/event.c +++ b/server/event.c @@ -91,6 +91,8 @@ static struct object *create_event_sync( int manual, int signaled ) { struct event_sync *event;
+ if (get_inproc_device_fd() >= 0) return (struct object *)create_inproc_event_sync( manual, signaled ); + if (!(event = alloc_object( &event_sync_ops ))) return NULL; event->manual = manual; event->signaled = signaled; @@ -111,6 +113,7 @@ struct event_sync *create_server_internal_sync( int manual, int signaled )
struct object *create_internal_sync( int manual, int signaled ) { + if (get_inproc_device_fd() >= 0) return (struct object *)create_inproc_internal_sync( manual, signaled ); return (struct object *)create_server_internal_sync( manual, signaled ); }
diff --git a/server/inproc_sync.c b/server/inproc_sync.c index 46222d56a45..2b6b37cd2e0 100644 --- a/server/inproc_sync.c +++ b/server/inproc_sync.c @@ -57,8 +57,11 @@ struct inproc_sync struct object obj; /* object header */ enum inproc_sync_type type; int fd; + struct list entry; };
+static struct list inproc_mutexes = LIST_INIT( inproc_mutexes ); + static void inproc_sync_dump( struct object *obj, int verbose ); static int inproc_sync_signal( struct object *obj, unsigned int access, int signal ); static void inproc_sync_destroy( struct object *obj ); @@ -102,6 +105,7 @@ struct inproc_sync *create_inproc_internal_sync( int manual, int signaled ) if (!(event = alloc_object( &inproc_sync_ops ))) return NULL; event->type = INPROC_SYNC_INTERNAL; event->fd = ioctl( get_inproc_device_fd(), NTSYNC_IOC_CREATE_EVENT, &args ); + list_init( &event->entry );
if (event->fd == -1) { @@ -112,6 +116,63 @@ struct inproc_sync *create_inproc_internal_sync( int manual, int signaled ) return event; }
+struct inproc_sync *create_inproc_event_sync( int manual, int signaled ) +{ + struct ntsync_event_args args = {.signaled = signaled, .manual = manual}; + struct inproc_sync *event; + + if (!(event = alloc_object( &inproc_sync_ops ))) return NULL; + event->type = INPROC_SYNC_EVENT; + event->fd = ioctl( get_inproc_device_fd(), NTSYNC_IOC_CREATE_EVENT, &args ); + list_init( &event->entry ); + + if (event->fd == -1) + { + set_error( STATUS_NO_MORE_FILES ); + release_object( event ); + return NULL; + } + return event; +} + +struct inproc_sync *create_inproc_mutex_sync( thread_id_t owner, unsigned int count ) +{ + struct ntsync_mutex_args args = {.owner = owner, .count = count}; + struct inproc_sync *mutex; + + if (!(mutex = alloc_object( &inproc_sync_ops ))) return NULL; + mutex->type = INPROC_SYNC_MUTEX; + mutex->fd = ioctl( get_inproc_device_fd(), NTSYNC_IOC_CREATE_MUTEX, &args ); + list_add_tail( &inproc_mutexes, &mutex->entry ); + + if (mutex->fd == -1) + { + set_error( STATUS_NO_MORE_FILES ); + release_object( mutex ); + return NULL; + } + return mutex; +} + +struct inproc_sync *create_inproc_semaphore_sync( unsigned int initial, unsigned int max ) +{ + struct ntsync_sem_args args = {.count = initial, .max = max}; + struct inproc_sync *sem; + + if (!(sem = alloc_object( &inproc_sync_ops ))) return NULL; + sem->type = INPROC_SYNC_SEMAPHORE; + sem->fd = ioctl( get_inproc_device_fd(), NTSYNC_IOC_CREATE_SEM, &args ); + list_init( &sem->entry ); + + if (sem->fd == -1) + { + set_error( STATUS_NO_MORE_FILES ); + release_object( sem ); + return NULL; + } + return sem; +} + static void inproc_sync_dump( struct object *obj, int verbose ) { struct inproc_sync *sync = (struct inproc_sync *)obj; @@ -150,9 +211,18 @@ static void inproc_sync_destroy( struct object *obj ) { struct inproc_sync *sync = (struct inproc_sync *)obj; assert( obj->ops == &inproc_sync_ops ); + list_remove( &sync->entry ); close( sync->fd ); }
+void abandon_inproc_mutexes( thread_id_t tid ) +{ + struct inproc_sync *mutex; + + LIST_FOR_EACH_ENTRY( mutex, &inproc_mutexes, struct inproc_sync, entry ) + ioctl( mutex->fd, NTSYNC_IOC_MUTEX_KILL, &tid ); +} + static int get_obj_inproc_sync( struct object *obj, int *type ) { struct object *sync; @@ -187,6 +257,21 @@ struct inproc_sync *create_inproc_internal_sync( int manual, int signaled ) return NULL; }
+struct inproc_sync *create_inproc_event_sync( int manual, int signaled ) +{ + return NULL; +} + +struct inproc_sync *create_inproc_mutex_sync( thread_id_t owner, unsigned int count ) +{ + return NULL; +} + +struct inproc_sync *create_inproc_semaphore_sync( unsigned int initial, unsigned int max ) +{ + return NULL; +} + void signal_inproc_sync( struct inproc_sync *sync ) { } @@ -195,6 +280,10 @@ void reset_inproc_sync( struct inproc_sync *sync ) { }
+void abandon_inproc_mutexes( thread_id_t tid ) +{ +} + static int get_obj_inproc_sync( struct object *obj, int *type ) { return -1; diff --git a/server/mutex.c b/server/mutex.c index 4932e07e96d..a1684536dc2 100644 --- a/server/mutex.c +++ b/server/mutex.c @@ -157,6 +157,8 @@ static struct object *create_mutex_sync( int owned ) { struct mutex_sync *mutex;
+ if (get_inproc_device_fd() >= 0) return (struct object *)create_inproc_mutex_sync( owned ? current->id : 0, owned ? 1 : 0 ); + if (!(mutex = alloc_object( &mutex_sync_ops ))) return NULL; mutex->count = 0; mutex->owner = NULL; @@ -235,6 +237,8 @@ void abandon_mutexes( struct thread *thread ) mutex->abandoned = 1; do_release( mutex, thread, mutex->count ); } + + abandon_inproc_mutexes( thread->id ); }
static void mutex_dump( struct object *obj, int verbose ) diff --git a/server/object.h b/server/object.h index bf242d0ad6d..2a0ecfc2cf2 100644 --- a/server/object.h +++ b/server/object.h @@ -247,6 +247,10 @@ struct inproc_sync; extern int get_inproc_device_fd(void); extern int get_inproc_sync_fd( struct inproc_sync *sync ); extern struct inproc_sync *create_inproc_internal_sync( int manual, int signaled ); +extern struct inproc_sync *create_inproc_event_sync( int manual, int signaled ); +extern struct inproc_sync *create_inproc_semaphore_sync( unsigned int initial, unsigned int max ); +extern struct inproc_sync *create_inproc_mutex_sync( thread_id_t owner, unsigned int count ); +extern void abandon_inproc_mutexes( thread_id_t owner ); extern void signal_inproc_sync( struct inproc_sync *sync ); extern void reset_inproc_sync( struct inproc_sync *sync );
diff --git a/server/semaphore.c b/server/semaphore.c index 1b9109e1f98..69e4cef987e 100644 --- a/server/semaphore.c +++ b/server/semaphore.c @@ -134,6 +134,8 @@ static struct object *create_semaphore_sync( unsigned int initial, unsigned int { struct semaphore_sync *sem;
+ if (get_inproc_device_fd() >= 0) return (struct object *)create_inproc_semaphore_sync( initial, max ); + if (!(sem = alloc_object( &semaphore_sync_ops ))) return NULL; sem->count = initial; sem->max = max;
 
            From: Elizabeth Figura zfigura@codeweavers.com
--- dlls/ntdll/unix/server.c | 19 ++- dlls/ntdll/unix/sync.c | 239 ++++++++++++++++++++++++++++----- dlls/ntdll/unix/thread.c | 11 +- dlls/ntdll/unix/unix_private.h | 2 + 4 files changed, 239 insertions(+), 32 deletions(-)
diff --git a/dlls/ntdll/unix/server.c b/dlls/ntdll/unix/server.c index bbca17aee8f..ee34c5e9cd8 100644 --- a/dlls/ntdll/unix/server.c +++ b/dlls/ntdll/unix/server.c @@ -909,15 +909,24 @@ unsigned int server_queue_process_apc( HANDLE process, const union apc_call *cal } else { + sigset_t sigset; + NtWaitForSingleObject( handle, FALSE, NULL );
+ server_enter_uninterrupted_section( &fd_cache_mutex, &sigset ); + + /* remove the handle from the cache, get_apc_result will close it for us */ + close_inproc_sync( handle ); + SERVER_START_REQ( get_apc_result ) { req->handle = wine_server_obj_handle( handle ); - if (!(ret = wine_server_call( req ))) *result = reply->result; + if (!(ret = server_call_unlocked( req ))) *result = reply->result; } SERVER_END_REQ;
+ server_leave_uninterrupted_section( &fd_cache_mutex, &sigset ); + if (!ret && result->type == APC_NONE) continue; /* APC didn't run, try again */ } return ret; @@ -1846,12 +1855,17 @@ NTSTATUS WINAPI NtDuplicateObject( HANDLE source_process, HANDLE source, HANDLE return result.dup_handle.status; }
+ /* hold fd_cache_mutex to prevent the fd from being added again between the + * call to remove_fd_from_cache and close_handle */ server_enter_uninterrupted_section( &fd_cache_mutex, &sigset );
/* always remove the cached fd; if the server request fails we'll just * retrieve it again */ if (options & DUPLICATE_CLOSE_SOURCE) + { fd = remove_fd_from_cache( source ); + close_inproc_sync( source ); + }
SERVER_START_REQ( dup_handle ) { @@ -1917,11 +1931,14 @@ NTSTATUS WINAPI NtClose( HANDLE handle ) if (HandleToLong( handle ) >= ~5 && HandleToLong( handle ) <= ~0) return STATUS_SUCCESS;
+ /* hold fd_cache_mutex to prevent the fd from being added again between the + * call to remove_fd_from_cache and close_handle */ server_enter_uninterrupted_section( &fd_cache_mutex, &sigset );
/* always remove the cached fd; if the server request fails we'll just * retrieve it again */ fd = remove_fd_from_cache( handle ); + close_inproc_sync( handle );
SERVER_START_REQ( close_handle ) { diff --git a/dlls/ntdll/unix/sync.c b/dlls/ntdll/unix/sync.c index bd0fe666bd1..1660478b0e1 100644 --- a/dlls/ntdll/unix/sync.c +++ b/dlls/ntdll/unix/sync.c @@ -500,16 +500,164 @@ static NTSTATUS linux_wait_objs( int device, const DWORD count, const int *objs,
#endif /* NTSYNC_IOC_EVENT_READ */
+/* It's possible for synchronization primitives to remain alive even after being + * closed, because a thread is still waiting on them. It's rare in practice, and + * documented as being undefined behaviour by Microsoft, but it works, and some + * applications rely on it. This means we need to refcount handles, and defer + * deleting them on the server side until the refcount reaches zero. We do this + * by having each client process hold a handle to the in-process synchronization + * object, as well as a private refcount. When the client refcount reaches zero, + * it closes the handle; when all handles are closed, the server deletes the + * in-process synchronization object. + * + * We also need this for signal-and-wait. The signal and wait operations aren't + * atomic, but we can't perform the signal and then return STATUS_INVALID_HANDLE + * for the wait—we need to either do both operations or neither. That means we + * need to grab references to both objects, and prevent them from being + * destroyed before we're done with them. + * + * We want lookup of objects from the cache to be very fast; ideally, it should + * be lock-free. We achieve this by using atomic modifications to "refcount", + * and guaranteeing that all other fields are valid and correct *as long as* + * refcount is nonzero, and we store the entire structure in memory which will + * never be freed. + * + * This means that acquiring the object can't use a simple atomic increment; it + * has to use a compare-and-swap loop to ensure that it doesn't try to increment + * an object with a zero refcount. That's still leagues better than a real lock, + * though, and release can be a single atomic decrement. + * + * It also means that threads modifying the cache need to take a lock, to + * prevent other threads from writing to it concurrently. + * + * It's possible for an object currently in use (by a waiter) to be closed and + * the same handle immediately reallocated to a different object. This should be + * a very rare situation, and in that case we simply don't cache the handle. + */ struct inproc_sync { + LONG refcount; /* reference count of the sync object */ int fd; /* unix file descriptor */ unsigned int access; /* handle access rights */ - unsigned int type; /* enum inproc_sync_type */ + unsigned short type; /* enum inproc_sync_type as short to save space */ + unsigned short closed; /* fd has been closed but sync is still referenced */ };
+#define INPROC_SYNC_CACHE_BLOCK_SIZE (65536 / sizeof(struct inproc_sync)) +#define INPROC_SYNC_CACHE_ENTRIES 128 + +static struct inproc_sync *inproc_sync_cache[INPROC_SYNC_CACHE_ENTRIES]; +static struct inproc_sync inproc_sync_cache_initial_block[INPROC_SYNC_CACHE_BLOCK_SIZE]; + +static inline unsigned int inproc_sync_handle_to_index( HANDLE handle, unsigned int *entry ) +{ + unsigned int idx = (wine_server_obj_handle(handle) >> 2) - 1; + *entry = idx / INPROC_SYNC_CACHE_BLOCK_SIZE; + return idx % INPROC_SYNC_CACHE_BLOCK_SIZE; +} + +static struct inproc_sync *cache_inproc_sync( HANDLE handle, struct inproc_sync *sync ) +{ + unsigned int entry, idx = inproc_sync_handle_to_index( handle, &entry ); + struct inproc_sync *cache; + int refcount; + + /* don't cache pseudo-handles; waiting on them is pointless anyway */ + if ((ULONG)(ULONG_PTR)handle > 0xfffffffa) return sync; + + if (entry >= INPROC_SYNC_CACHE_ENTRIES) + { + FIXME( "too many allocated handles, not caching %p\n", handle ); + return sync; + } + + if (!inproc_sync_cache[entry]) /* do we need to allocate a new block of entries? */ + { + if (!entry) inproc_sync_cache[0] = inproc_sync_cache_initial_block; + else + { + static const size_t size = INPROC_SYNC_CACHE_BLOCK_SIZE * sizeof(struct inproc_sync); + void *ptr = anon_mmap_alloc( size, PROT_READ | PROT_WRITE ); + if (ptr == MAP_FAILED) return sync; + if (InterlockedCompareExchangePointer( (void **)&inproc_sync_cache[entry], ptr, NULL )) + munmap( ptr, size ); /* someone beat us to it */ + } + } + + cache = &inproc_sync_cache[entry][idx]; + + if (InterlockedCompareExchange( &cache->refcount, 0, 0 )) + { + /* The handle is currently being used for another object (i.e. it was + * closed and then reused, but some thread is waiting on the old handle + * or otherwise simultaneously using the old object). We can't cache + * this object until the old one is completely destroyed. */ + return sync; + } + + cache->fd = sync->fd; + cache->access = sync->access; + cache->type = sync->type; + cache->closed = sync->closed; + /* Make sure we set the other members before the refcount; this store needs + * release semantics [paired with the load in get_cached_inproc_sync()]. + * Set the refcount to 2 (one for the handle, one for the caller). */ + refcount = InterlockedExchange( &cache->refcount, 2 ); + assert( !refcount ); + + assert( sync->refcount == 1 ); + memset( sync, 0, sizeof(*sync) ); + + return cache; +} + +/* returns the previous value */ +static inline LONG interlocked_inc_if_nonzero( LONG *dest ) +{ + LONG val, tmp; + for (val = *dest;; val = tmp) + { + if (!val || (tmp = InterlockedCompareExchange( dest, val + 1, val )) == val) + break; + } + return val; +} + static void release_inproc_sync( struct inproc_sync *sync ) { - close( sync->fd ); + /* save the fd now; as soon as the refcount hits 0 we cannot + * access the cache anymore */ + int fd = sync->fd; + LONG ref = InterlockedDecrement( &sync->refcount ); + + assert( ref >= 0 ); + if (!ref) close( fd ); +} + +static struct inproc_sync *get_cached_inproc_sync( HANDLE handle ) +{ + unsigned int entry, idx = inproc_sync_handle_to_index( handle, &entry ); + struct inproc_sync *cache; + + if (entry >= INPROC_SYNC_CACHE_ENTRIES || !inproc_sync_cache[entry]) return NULL; + + cache = &inproc_sync_cache[entry][idx]; + + /* this load needs acquire semantics [paired with the store in + * cache_inproc_sync()] */ + if (!interlocked_inc_if_nonzero( &cache->refcount )) return NULL; + + if (cache->closed) + { + /* The object is still being used, but "handle" has been closed. The + * handle value might have been reused for another object in the + * meantime, in which case we have to report that valid object, so + * force the caller to check the server. */ + release_inproc_sync( cache ); + return NULL; + } + + return cache; }
/* fd_cache_mutex must be held to avoid races with other thread receiving fds */ @@ -523,10 +671,12 @@ static NTSTATUS get_server_inproc_sync( HANDLE handle, struct inproc_sync *sync if (!(ret = wine_server_call( req ))) { obj_handle_t fd_handle; + sync->refcount = 1; sync->fd = wine_server_receive_fd( &fd_handle ); assert( wine_server_ptr_handle(fd_handle) == handle ); sync->access = reply->access; sync->type = reply->type; + sync->closed = 0; } } SERVER_END_REQ; @@ -534,20 +684,34 @@ static NTSTATUS get_server_inproc_sync( HANDLE handle, struct inproc_sync *sync return ret; }
+/* returns a pointer to a cache entry; if the object could not be cached, + * returns "cache" instead, which should be allocated on stack */ static NTSTATUS get_inproc_sync( HANDLE handle, enum inproc_sync_type desired_type, ACCESS_MASK desired_access, - struct inproc_sync *sync ) + struct inproc_sync *stack, struct inproc_sync **out ) { + struct inproc_sync *sync; sigset_t sigset; NTSTATUS ret;
- /* We need to use fd_cache_mutex here to protect against races with - * other threads trying to receive fds for the fd cache, - * and we need to use an uninterrupted section to prevent reentrancy. */ - server_enter_uninterrupted_section( &fd_cache_mutex, &sigset ); - ret = get_server_inproc_sync( handle, sync ); - server_leave_uninterrupted_section( &fd_cache_mutex, &sigset ); + /* try to find it in the cache already */ + if ((sync = get_cached_inproc_sync( handle ))) ret = STATUS_SUCCESS; + else + { + /* We need to use fd_cache_mutex here to protect against races with + * other threads trying to receive fds for the fd cache, + * and we need to use an uninterrupted section to prevent reentrancy. + * We also need fd_cache_mutex to protect against the same race with + * NtClose, that is, to prevent the object from being cached again between + * close_inproc_sync() and close_handle. */ + server_enter_uninterrupted_section( &fd_cache_mutex, &sigset ); + if ((sync = get_cached_inproc_sync( handle ))) ret = STATUS_SUCCESS; + else ret = get_server_inproc_sync( handle, stack ); + server_leave_uninterrupted_section( &fd_cache_mutex, &sigset ); + if (ret) return ret; + + if (!sync) sync = cache_inproc_sync( handle, stack ); + }
- if (ret) return ret; if (desired_type != INPROC_SYNC_UNKNOWN && desired_type != sync->type) { release_inproc_sync( sync ); @@ -559,6 +723,7 @@ static NTSTATUS get_inproc_sync( HANDLE handle, enum inproc_sync_type desired_ty return STATUS_ACCESS_DENIED; }
+ *out = sync; return STATUS_SUCCESS; }
@@ -583,13 +748,28 @@ extern NTSTATUS check_signal_access( struct inproc_sync *sync ) return STATUS_OBJECT_TYPE_MISMATCH; }
+/* caller must hold fd_cache_mutex */ +void close_inproc_sync( HANDLE handle ) +{ + struct inproc_sync *cache; + + if (inproc_device_fd < 0) return; + if ((cache = get_cached_inproc_sync( handle ))) + { + cache->closed = 1; + /* once for the reference we just grabbed, and once for the handle */ + release_inproc_sync( cache ); + release_inproc_sync( cache ); + } +} + static NTSTATUS inproc_release_semaphore( HANDLE handle, ULONG count, ULONG *prev_count ) { - struct inproc_sync stack, *sync = &stack; + struct inproc_sync stack, *sync; NTSTATUS ret;
if (inproc_device_fd < 0) return STATUS_NOT_IMPLEMENTED; - if ((ret = get_inproc_sync( handle, INPROC_SYNC_SEMAPHORE, SEMAPHORE_MODIFY_STATE, &stack ))) return ret; + if ((ret = get_inproc_sync( handle, INPROC_SYNC_SEMAPHORE, SEMAPHORE_MODIFY_STATE, &stack, &sync ))) return ret; ret = linux_release_semaphore_obj( sync->fd, count, prev_count ); release_inproc_sync( sync ); return ret; @@ -597,11 +777,11 @@ static NTSTATUS inproc_release_semaphore( HANDLE handle, ULONG count, ULONG *pre
static NTSTATUS inproc_query_semaphore( HANDLE handle, SEMAPHORE_BASIC_INFORMATION *info ) { - struct inproc_sync stack, *sync = &stack; + struct inproc_sync stack, *sync; NTSTATUS ret;
if (inproc_device_fd < 0) return STATUS_NOT_IMPLEMENTED; - if ((ret = get_inproc_sync( handle, INPROC_SYNC_SEMAPHORE, SEMAPHORE_QUERY_STATE, &stack ))) return ret; + if ((ret = get_inproc_sync( handle, INPROC_SYNC_SEMAPHORE, SEMAPHORE_QUERY_STATE, &stack, &sync ))) return ret; ret = linux_query_semaphore_obj( sync->fd, info ); release_inproc_sync( sync ); return ret; @@ -609,11 +789,11 @@ static NTSTATUS inproc_query_semaphore( HANDLE handle, SEMAPHORE_BASIC_INFORMATI
static NTSTATUS inproc_set_event( HANDLE handle, LONG *prev_state ) { - struct inproc_sync stack, *sync = &stack; + struct inproc_sync stack, *sync; NTSTATUS ret;
if (inproc_device_fd < 0) return STATUS_NOT_IMPLEMENTED; - if ((ret = get_inproc_sync( handle, INPROC_SYNC_EVENT, EVENT_MODIFY_STATE, &stack ))) return ret; + if ((ret = get_inproc_sync( handle, INPROC_SYNC_EVENT, EVENT_MODIFY_STATE, &stack, &sync ))) return ret; ret = linux_set_event_obj( sync->fd, prev_state ); release_inproc_sync( sync ); return ret; @@ -621,11 +801,11 @@ static NTSTATUS inproc_set_event( HANDLE handle, LONG *prev_state )
static NTSTATUS inproc_reset_event( HANDLE handle, LONG *prev_state ) { - struct inproc_sync stack, *sync = &stack; + struct inproc_sync stack, *sync; NTSTATUS ret;
if (inproc_device_fd < 0) return STATUS_NOT_IMPLEMENTED; - if ((ret = get_inproc_sync( handle, INPROC_SYNC_EVENT, EVENT_MODIFY_STATE, &stack ))) return ret; + if ((ret = get_inproc_sync( handle, INPROC_SYNC_EVENT, EVENT_MODIFY_STATE, &stack, &sync ))) return ret; ret = linux_reset_event_obj( sync->fd, prev_state ); release_inproc_sync( sync ); return ret; @@ -633,11 +813,11 @@ static NTSTATUS inproc_reset_event( HANDLE handle, LONG *prev_state )
static NTSTATUS inproc_pulse_event( HANDLE handle, LONG *prev_state ) { - struct inproc_sync stack, *sync = &stack; + struct inproc_sync stack, *sync; NTSTATUS ret;
if (inproc_device_fd < 0) return STATUS_NOT_IMPLEMENTED; - if ((ret = get_inproc_sync( handle, INPROC_SYNC_EVENT, EVENT_MODIFY_STATE, &stack ))) return ret; + if ((ret = get_inproc_sync( handle, INPROC_SYNC_EVENT, EVENT_MODIFY_STATE, &stack, &sync ))) return ret; ret = linux_pulse_event_obj( sync->fd, prev_state ); release_inproc_sync( sync ); return ret; @@ -645,11 +825,11 @@ static NTSTATUS inproc_pulse_event( HANDLE handle, LONG *prev_state )
static NTSTATUS inproc_query_event( HANDLE handle, EVENT_BASIC_INFORMATION *info ) { - struct inproc_sync stack, *sync = &stack; + struct inproc_sync stack, *sync; NTSTATUS ret;
if (inproc_device_fd < 0) return STATUS_NOT_IMPLEMENTED; - if ((ret = get_inproc_sync( handle, INPROC_SYNC_EVENT, EVENT_QUERY_STATE, &stack ))) return ret; + if ((ret = get_inproc_sync( handle, INPROC_SYNC_EVENT, EVENT_QUERY_STATE, &stack, &sync ))) return ret; ret = linux_query_event_obj( sync->fd, sync->type, info ); release_inproc_sync( sync ); return ret; @@ -657,11 +837,11 @@ static NTSTATUS inproc_query_event( HANDLE handle, EVENT_BASIC_INFORMATION *info
static NTSTATUS inproc_release_mutex( HANDLE handle, LONG *prev_count ) { - struct inproc_sync stack, *sync = &stack; + struct inproc_sync stack, *sync; NTSTATUS ret;
if (inproc_device_fd < 0) return STATUS_NOT_IMPLEMENTED; - if ((ret = get_inproc_sync( handle, INPROC_SYNC_MUTEX, 0, &stack ))) return ret; + if ((ret = get_inproc_sync( handle, INPROC_SYNC_MUTEX, 0, &stack, &sync ))) return ret; ret = linux_release_mutex_obj( sync->fd, prev_count ); release_inproc_sync( sync ); return ret; @@ -669,11 +849,11 @@ static NTSTATUS inproc_release_mutex( HANDLE handle, LONG *prev_count )
static NTSTATUS inproc_query_mutex( HANDLE handle, MUTANT_BASIC_INFORMATION *info ) { - struct inproc_sync stack, *sync = &stack; + struct inproc_sync stack, *sync; NTSTATUS ret;
if (inproc_device_fd < 0) return STATUS_NOT_IMPLEMENTED; - if ((ret = get_inproc_sync( handle, INPROC_SYNC_MUTEX, MUTANT_QUERY_STATE, &stack ))) return ret; + if ((ret = get_inproc_sync( handle, INPROC_SYNC_MUTEX, MUTANT_QUERY_STATE, &stack, &sync ))) return ret; ret = linux_query_mutex_obj( sync->fd, info ); release_inproc_sync( sync ); return ret; @@ -718,12 +898,11 @@ static NTSTATUS inproc_wait( DWORD count, const HANDLE *handles, BOOLEAN wait_an assert( count <= ARRAY_SIZE(syncs) ); for (int i = 0; i < count; ++i) { - if ((ret = get_inproc_sync( handles[i], INPROC_SYNC_UNKNOWN, SYNCHRONIZE, &stack[i] ))) + if ((ret = get_inproc_sync( handles[i], INPROC_SYNC_UNKNOWN, SYNCHRONIZE, &stack[i], &syncs[i] ))) { while (i--) release_inproc_sync( syncs[i] ); return ret; } - syncs[i] = &stack[i]; objs[i] = syncs[i]->fd; }
@@ -743,10 +922,10 @@ static NTSTATUS inproc_signal_and_wait( HANDLE signal, HANDLE wait,
if (inproc_device_fd < 0) return STATUS_NOT_IMPLEMENTED;
- if ((ret = get_inproc_sync( signal, INPROC_SYNC_UNKNOWN, 0, signal_sync ))) return ret; + if ((ret = get_inproc_sync( signal, INPROC_SYNC_UNKNOWN, 0, &stack_signal, &signal_sync ))) return ret; if ((ret = check_signal_access( signal_sync ))) goto done;
- if ((ret = get_inproc_sync( wait, INPROC_SYNC_UNKNOWN, SYNCHRONIZE, wait_sync ))) goto done; + if ((ret = get_inproc_sync( wait, INPROC_SYNC_UNKNOWN, SYNCHRONIZE, &stack_wait, &wait_sync ))) goto done;
switch (signal_sync->type) { diff --git a/dlls/ntdll/unix/thread.c b/dlls/ntdll/unix/thread.c index 10ccfe9eba2..1d2ba4d62d9 100644 --- a/dlls/ntdll/unix/thread.c +++ b/dlls/ntdll/unix/thread.c @@ -1844,8 +1844,15 @@ NTSTATUS get_thread_context( HANDLE handle, void *context, BOOL *self, USHORT ma
if (ret == STATUS_PENDING) { + sigset_t sigset; + NtWaitForSingleObject( context_handle, FALSE, NULL );
+ server_enter_uninterrupted_section( &fd_cache_mutex, &sigset ); + + /* remove the handle from the cache, get_thread_context will close it for us */ + close_inproc_sync( context_handle ); + SERVER_START_REQ( get_thread_context ) { req->context = wine_server_obj_handle( context_handle ); @@ -1853,10 +1860,12 @@ NTSTATUS get_thread_context( HANDLE handle, void *context, BOOL *self, USHORT ma req->machine = machine; req->native_flags = flags & get_native_context_flags( native_machine, machine ); wine_server_set_reply( req, server_contexts, sizeof(server_contexts) ); - ret = wine_server_call( req ); + ret = server_call_unlocked( req ); count = wine_server_reply_size( reply ) / sizeof(server_contexts[0]); } SERVER_END_REQ; + + server_leave_uninterrupted_section( &fd_cache_mutex, &sigset ); } if (!ret && count) { diff --git a/dlls/ntdll/unix/unix_private.h b/dlls/ntdll/unix/unix_private.h index 3ef9041d64e..501eddb1fa3 100644 --- a/dlls/ntdll/unix/unix_private.h +++ b/dlls/ntdll/unix/unix_private.h @@ -390,6 +390,8 @@ extern NTSTATUS wow64_wine_spawnvp( void *args );
extern void dbg_init(void);
+extern void close_inproc_sync( HANDLE handle ); + extern NTSTATUS call_user_apc_dispatcher( CONTEXT *context_ptr, unsigned int flags, ULONG_PTR arg1, ULONG_PTR arg2, ULONG_PTR arg3, PNTAPCFUNC func, NTSTATUS status ); extern NTSTATUS call_user_exception_dispatcher( EXCEPTION_RECORD *rec, CONTEXT *context );
 
            This merge request was approved by Rémi Bernon.
 
            These are the remaining patches for ntsync, sending them all at once for visibility but also with a bit of hope they can make it in 10.16.
Thanks! I'm getting some test failures, but I'll try to put it into 10.16 anyway, hopefully we can address the failures later.
 
            One failure I know about is in kernel32:sync, I think it's enough of a corner case to not be worried about it. The test that fails was added specifically to check mutex ownership during the sync object refactor.
The owner thread gets terminated while every known handle for the owned mutex have been closed already, but while a thread is still waiting on it. From wineserver perspective the mutex doesn't exist anymore, and it has no way to know about the remaining waiters or owners, and this would need to be resolved on the kernel side.
 
            The owner thread gets terminated while every known handle for the owned mutex have been closed already, but while a thread is still waiting on it. From wineserver perspective the mutex doesn't exist anymore, and it has no way to know about the remaining waiters or owners, and this would need to be resolved on the kernel side.
Well, technically we could deal with it ourselves, by not losing track of the fd. An early revision which I don't think was ever submitted did this, by making inproc_sync a full object with a handle.
But it's also not the only thing we'll need to fix, so it's probably at least worth seeing if we can get a new API for this.
 
            This merge request was approved by Elizabeth Figura.




