Signed-off-by: Zebediah Figura zfigura@codeweavers.com --- server/fd.c | 61 ++++++++++++++++++++++++++++------------------------- 1 file changed, 32 insertions(+), 29 deletions(-)
diff --git a/server/fd.c b/server/fd.c index a09fc9edfcf..79ddd0a96a9 100644 --- a/server/fd.c +++ b/server/fd.c @@ -385,41 +385,44 @@ timeout_t monotonic_time; struct _KUSER_SHARED_DATA *user_shared_data = NULL; static const int user_shared_data_timeout = 16;
+static void atomic_store_ulong(volatile ULONG *ptr, ULONG value) +{ + /* on x86 there should be total store order guarantees, so volatile is + * enough to ensure the stores aren't reordered by the compiler, and then + * they will always be seen in-order from other CPUs. On other archs, we + * need atomic intrinsics to guarantee that. */ +#if defined(__i386__) || defined(__x86_64__) + *ptr = value; +#else + __atomic_store_n(ptr, value, __ATOMIC_SEQ_CST); +#endif +} + +static void atomic_store_long(volatile LONG *ptr, LONG value) +{ +#if defined(__i386__) || defined(__x86_64__) + *ptr = value; +#else + __atomic_store_n(ptr, value, __ATOMIC_SEQ_CST); +#endif +} + static void set_user_shared_data_time(void) { timeout_t tick_count = monotonic_time / 10000;
- /* on X86 there should be total store order guarantees, so volatile is enough - * to ensure the stores aren't reordered by the compiler, and then they will - * always be seen in-order from other CPUs. On other archs, we need atomic - * intrinsics to guarantee that. */ -#if defined(__i386__) || defined(__x86_64__) - user_shared_data->SystemTime.High2Time = current_time >> 32; - user_shared_data->SystemTime.LowPart = current_time; - user_shared_data->SystemTime.High1Time = current_time >> 32; + atomic_store_long(&user_shared_data->SystemTime.High2Time, current_time >> 32); + atomic_store_ulong(&user_shared_data->SystemTime.LowPart, current_time); + atomic_store_long(&user_shared_data->SystemTime.High1Time, current_time >> 32);
- user_shared_data->InterruptTime.High2Time = monotonic_time >> 32; - user_shared_data->InterruptTime.LowPart = monotonic_time; - user_shared_data->InterruptTime.High1Time = monotonic_time >> 32; + atomic_store_long(&user_shared_data->InterruptTime.High2Time, monotonic_time >> 32); + atomic_store_ulong(&user_shared_data->InterruptTime.LowPart, monotonic_time); + atomic_store_long(&user_shared_data->InterruptTime.High1Time, monotonic_time >> 32);
- user_shared_data->TickCount.High2Time = tick_count >> 32; - user_shared_data->TickCount.LowPart = tick_count; - user_shared_data->TickCount.High1Time = tick_count >> 32; - *(volatile ULONG *)&user_shared_data->TickCountLowDeprecated = tick_count; -#else - __atomic_store_n(&user_shared_data->SystemTime.High2Time, current_time >> 32, __ATOMIC_SEQ_CST); - __atomic_store_n(&user_shared_data->SystemTime.LowPart, current_time, __ATOMIC_SEQ_CST); - __atomic_store_n(&user_shared_data->SystemTime.High1Time, current_time >> 32, __ATOMIC_SEQ_CST); - - __atomic_store_n(&user_shared_data->InterruptTime.High2Time, monotonic_time >> 32, __ATOMIC_SEQ_CST); - __atomic_store_n(&user_shared_data->InterruptTime.LowPart, monotonic_time, __ATOMIC_SEQ_CST); - __atomic_store_n(&user_shared_data->InterruptTime.High1Time, monotonic_time >> 32, __ATOMIC_SEQ_CST); - - __atomic_store_n(&user_shared_data->TickCount.High2Time, tick_count >> 32, __ATOMIC_SEQ_CST); - __atomic_store_n(&user_shared_data->TickCount.LowPart, tick_count, __ATOMIC_SEQ_CST); - __atomic_store_n(&user_shared_data->TickCount.High1Time, tick_count >> 32, __ATOMIC_SEQ_CST); - __atomic_store_n(&user_shared_data->TickCountLowDeprecated, tick_count, __ATOMIC_SEQ_CST); -#endif + atomic_store_long(&user_shared_data->TickCount.High2Time, tick_count >> 32); + atomic_store_ulong(&user_shared_data->TickCount.LowPart, tick_count); + atomic_store_long(&user_shared_data->TickCount.High1Time, tick_count >> 32); + atomic_store_ulong(&user_shared_data->TickCountLowDeprecated, tick_count); }
void set_current_time(void)