From: William Horvath william@horvath.blog
Instead of first converting to milliseconds, then back to a nanosecond-precision timespec.
This mitigates a side effect of the global current_time and monotonic_time being updated on every server call's timeout, where the end time of any unrelated server call is moved into the future (depending on the frequency of server calls).
By using a more granular timeout, the overall frequency of server calls doesn't have as great of an effect on each individual timeout, as we don't have to wait for an entire millisecond (which was due to the ceiling operation in get_next_timeout).
Wine-Bug: https://bugs.winehq.org/show_bug.cgi?id=57849 --- server/fd.c | 72 ++++++++++++++++++++++++++++------------------------- 1 file changed, 38 insertions(+), 34 deletions(-)
diff --git a/server/fd.c b/server/fd.c index ce32e7f8397..05df1215f7e 100644 --- a/server/fd.c +++ b/server/fd.c @@ -496,6 +496,7 @@ static int active_users; /* current number of active users */ static int allocated_users; /* count of allocated entries in the array */ static struct fd **freelist; /* list of free entries in the array */
+static struct timespec *get_next_timeout_ts(void); static int get_next_timeout(void);
static inline void fd_poll_event( struct fd *fd, int event ) @@ -662,27 +663,20 @@ static inline void remove_epoll_user( struct fd *fd, int user )
static inline void main_loop_epoll(void) { - int i, ret, timeout; + int i, ret; + struct timespec *ts; struct kevent events[128];
if (kqueue_fd == -1) return;
while (active_users) { - timeout = get_next_timeout(); + ts = get_next_timeout_ts();
if (!active_users) break; /* last user removed by a timeout */ if (kqueue_fd == -1) break; /* an error occurred with kqueue */
- if (timeout != -1) - { - struct timespec ts; - - ts.tv_sec = timeout / 1000; - ts.tv_nsec = (timeout % 1000) * 1000000; - ret = kevent( kqueue_fd, NULL, 0, events, ARRAY_SIZE( events ), &ts ); - } - else ret = kevent( kqueue_fd, NULL, 0, events, ARRAY_SIZE( events ), NULL ); + ret = kevent( kqueue_fd, NULL, 0, events, ARRAY_SIZE( events ), ts );
set_current_time();
@@ -764,28 +758,21 @@ static inline void remove_epoll_user( struct fd *fd, int user )
static inline void main_loop_epoll(void) { - int i, nget, ret, timeout; + int i, nget, ret; + struct timespec *ts; port_event_t events[128];
if (port_fd == -1) return;
while (active_users) { - timeout = get_next_timeout(); + ts = get_next_timeout_ts(); nget = 1;
if (!active_users) break; /* last user removed by a timeout */ if (port_fd == -1) break; /* an error occurred with event completion */
- if (timeout != -1) - { - struct timespec ts; - - ts.tv_sec = timeout / 1000; - ts.tv_nsec = (timeout % 1000) * 1000000; - ret = port_getn( port_fd, events, ARRAY_SIZE( events ), &nget, &ts ); - } - else ret = port_getn( port_fd, events, ARRAY_SIZE( events ), &nget, NULL ); + ret = port_getn( port_fd, events, ARRAY_SIZE( events ), &nget, ts );
if (ret == -1) break; /* an error occurred with event completion */
@@ -876,10 +863,11 @@ static void remove_poll_user( struct fd *fd, int user ) active_users--; }
-/* process pending timeouts and return the time until the next timeout, in milliseconds */ -static int get_next_timeout(void) +/* process pending timeouts and return the time until the next timeout, in a struct timespec (nanoseconds) */ +static struct timespec *get_next_timeout_ts(void) { - int ret = user_shared_data ? user_shared_data_timeout : -1; + static struct timespec ts; + timeout_t next_timeout = user_shared_data ? user_shared_data_timeout : -1;
if (!list_empty( &abs_timeout_list ) || !list_empty( &rel_timeout_list )) { @@ -924,22 +912,38 @@ static int get_next_timeout(void) if ((ptr = list_head( &abs_timeout_list )) != NULL) { struct timeout_user *timeout = LIST_ENTRY( ptr, struct timeout_user, entry ); - timeout_t diff = (timeout->when - current_time + 9999) / 10000; - if (diff > INT_MAX) diff = INT_MAX; - else if (diff < 0) diff = 0; - if (ret == -1 || diff < ret) ret = diff; + timeout_t diff = timeout->when - current_time; + if (diff < 0) diff = 0; + if (next_timeout == -1 || diff < next_timeout) next_timeout = diff; }
if ((ptr = list_head( &rel_timeout_list )) != NULL) { struct timeout_user *timeout = LIST_ENTRY( ptr, struct timeout_user, entry ); - timeout_t diff = (-timeout->when - monotonic_time + 9999) / 10000; - if (diff > INT_MAX) diff = INT_MAX; - else if (diff < 0) diff = 0; - if (ret == -1 || diff < ret) ret = diff; + timeout_t diff = -timeout->when - monotonic_time; + if (diff < 0) diff = 0; + if (next_timeout == -1 || diff < next_timeout) next_timeout = diff; } } - return ret; + + /* infinite */ + if (next_timeout == -1) return NULL; + + ts.tv_sec = next_timeout / 10000000; + ts.tv_nsec = (next_timeout % 10000000) * 100; + return &ts; +} + +/* process pending timeouts and return the time until the next timeout, in milliseconds */ +static int get_next_timeout(void) +{ + struct timespec *ts = get_next_timeout_ts(); + + if (!ts) return -1; + if (ts->tv_sec >= INT_MAX / 1000) return INT_MAX; + + /* ceil to avoid spinning */ + return ts->tv_sec * 1000 + (ts->tv_nsec + 999999) / 1000000; }
/* server main poll() loop */