From: Eric Pouech epouech@codeweavers.com
clang++ generates an error when an intrinsic function is redefined as inline. Now clang provides intrinsic variant of Interlocked*64 family on i386 machine.
So, we use intrinsic variant for clang whenever __has_builtin() reports its presence.
Signed-off-by: Eric Pouech epouech@codeweavers.com --- include/winnt.h | 60 +++++++++++++++++++++++++++++++------------------ 1 file changed, 38 insertions(+), 22 deletions(-)
diff --git a/include/winnt.h b/include/winnt.h index 20db9a8aabd..fd7b37cb1c0 100644 --- a/include/winnt.h +++ b/include/winnt.h @@ -6745,6 +6745,12 @@ typedef enum _FIRMWARE_TYPE FirmwareTypeMax } FIRMWARE_TYPE, *PFIRMWARE_TYPE;
+#if (defined(__clang__) || defined(__GNUC__)) && defined(__has_builtin) +# define WINE__HAS_BUILTIN(x) __has_builtin(x) +#else +# define WINE__HAS_BUILTIN(x) 0 +#endif + /* Intrinsic functions */
#define BitScanForward _BitScanForward @@ -6809,63 +6815,73 @@ long _InterlockedOr(long volatile *,long); long _InterlockedXor(long volatile *,long); DECLSPEC_NORETURN void __fastfail(unsigned int);
-#ifndef __i386__ - +#if !defined(__i386__) || WINE__HAS_BUILTIN(_InterlockedAnd64) #pragma intrinsic(_InterlockedAnd64) -#pragma intrinsic(_InterlockedDecrement64) -#pragma intrinsic(_InterlockedExchangeAdd64) -#pragma intrinsic(_InterlockedIncrement64) -#pragma intrinsic(_InterlockedOr64) -#pragma intrinsic(_InterlockedXor64) - __int64 _InterlockedAnd64(__int64 volatile *, __int64); -__int64 _InterlockedDecrement64(__int64 volatile *); -__int64 _InterlockedExchangeAdd64(__int64 volatile *, __int64); -__int64 _InterlockedIncrement64(__int64 volatile *); -__int64 _InterlockedOr64(__int64 volatile *, __int64); -__int64 _InterlockedXor64(__int64 volatile *, __int64); - #else - static FORCEINLINE __int64 InterlockedAnd64( __int64 volatile *dest, __int64 val ) { __int64 prev; do prev = *dest; while (InterlockedCompareExchange64( dest, prev & val, prev ) != prev); return prev; } +#endif + +#if !defined(__i386__) || WINE__HAS_BUILTIN(_InterlockedDecrement64) +#pragma intrinsic(_InterlockedDecrement64) +__int64 _InterlockedDecrement64(__int64 volatile *); +#else +static FORCEINLINE __int64 InterlockedDecrement64( __int64 volatile *dest ) +{ + return InterlockedExchangeAdd64( dest, -1 ) - 1; +} +#endif
+#if !defined(__i386__) || WINE__HAS_BUILTIN(_InterlockedExchangeAdd64) +#pragma intrinsic(_InterlockedExchangeAdd64) +__int64 _InterlockedExchangeAdd64(__int64 volatile *, __int64); +#else static FORCEINLINE __int64 InterlockedExchangeAdd64( __int64 volatile *dest, __int64 val ) { __int64 prev; do prev = *dest; while (InterlockedCompareExchange64( dest, prev + val, prev ) != prev); return prev; } +#endif
+#if !defined(__i386__) || WINE__HAS_BUILTIN(_InterlockedIncrement64) +#pragma intrinsic(_InterlockedIncrement64) +__int64 _InterlockedIncrement64(__int64 volatile *); +#else static FORCEINLINE __int64 InterlockedIncrement64( __int64 volatile *dest ) { return InterlockedExchangeAdd64( dest, 1 ) + 1; } +#endif
-static FORCEINLINE __int64 InterlockedDecrement64( __int64 volatile *dest ) -{ - return InterlockedExchangeAdd64( dest, -1 ) - 1; -} - +#if !defined(__i386__) || WINE__HAS_BUILTIN(_InterlockedOr64) +#pragma intrinsic(_InterlockedOr64) +__int64 _InterlockedOr64(__int64 volatile *, __int64); +#else static FORCEINLINE __int64 InterlockedOr64( __int64 volatile *dest, __int64 val ) { __int64 prev; do prev = *dest; while (InterlockedCompareExchange64( dest, prev | val, prev ) != prev); return prev; } +#endif
+#if !defined(__i386__) || WINE__HAS_BUILTIN(_InterlockedXor64) +#pragma intrinsic(_InterlockedXor64) +__int64 _InterlockedXor64(__int64 volatile *, __int64); +#else static FORCEINLINE __int64 InterlockedXor64( __int64 volatile *dest, __int64 val ) { __int64 prev; do prev = *dest; while (InterlockedCompareExchange64( dest, prev ^ val, prev ) != prev); return prev; } - -#endif /* __i386__ */ +#endif
static FORCEINLINE long InterlockedAdd( long volatile *dest, long val ) {