Signed-off-by: Stefan Dösinger stefan@codeweavers.com
---
Verified with a stand-alone Visual Studio project. There's also a lower-case _interlockedexchangeadd64 intrinsic for arm, arm64 and x86_64, but not x86. --- include/winnt.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/include/winnt.h b/include/winnt.h index 18d9fa656f8..66a591ddaf2 100644 --- a/include/winnt.h +++ b/include/winnt.h @@ -6292,7 +6292,6 @@ typedef enum _PROCESS_MITIGATION_POLICY #pragma intrinsic(_InterlockedCompareExchange64) #pragma intrinsic(_InterlockedExchange) #pragma intrinsic(_InterlockedExchangeAdd) -#pragma intrinsic(_InterlockedExchangeAdd64) #pragma intrinsic(_InterlockedIncrement) #pragma intrinsic(_InterlockedIncrement16) #pragma intrinsic(_InterlockedDecrement) @@ -6309,7 +6308,6 @@ long _InterlockedDecrement(long volatile*); short _InterlockedDecrement16(short volatile*); long _InterlockedExchange(long volatile*,long); long _InterlockedExchangeAdd(long volatile*,long); -long long _InterlockedExchangeAdd64(long long volatile*,long long); long _InterlockedIncrement(long volatile*); short _InterlockedIncrement16(short volatile*); long _InterlockedOr(long volatile *,long); @@ -6352,9 +6350,11 @@ static FORCEINLINE void MemoryBarrier(void)
#elif defined(__x86_64__)
+#pragma intrinsic(_InterlockedExchangeAdd64) #pragma intrinsic(__faststorefence)
void __faststorefence(void); +long long _InterlockedExchangeAdd64(long long volatile *, long long);
static FORCEINLINE void MemoryBarrier(void) {