summaryrefslogtreecommitdiff
path: root/libs/libcurl/src/easy_lock.h
diff options
context:
space:
mode:
Diffstat (limited to 'libs/libcurl/src/easy_lock.h')
-rw-r--r--libs/libcurl/src/easy_lock.h44
1 files changed, 39 insertions, 5 deletions
diff --git a/libs/libcurl/src/easy_lock.h b/libs/libcurl/src/easy_lock.h
index 819f50ce81..d96e56b8d8 100644
--- a/libs/libcurl/src/easy_lock.h
+++ b/libs/libcurl/src/easy_lock.h
@@ -28,17 +28,51 @@
#if defined(_WIN32_WINNT) && _WIN32_WINNT >= 0x600
+#ifdef __MINGW32__
+#ifndef __MINGW64_VERSION_MAJOR
+#if (__MINGW32_MAJOR_VERSION < 5) || \
+ (__MINGW32_MAJOR_VERSION == 5 && __MINGW32_MINOR_VERSION == 0)
+/* mingw >= 5.0.1 defines SRWLOCK, and slightly different from MS define */
+typedef PVOID SRWLOCK, *PSRWLOCK;
+#endif
+#endif
+#ifndef SRWLOCK_INIT
+#define SRWLOCK_INIT NULL
+#endif
+#endif /* __MINGW32__ */
+
#define curl_simple_lock SRWLOCK
#define CURL_SIMPLE_LOCK_INIT SRWLOCK_INIT
#define curl_simple_lock_lock(m) AcquireSRWLockExclusive(m)
#define curl_simple_lock_unlock(m) ReleaseSRWLockExclusive(m)
-#elif defined (HAVE_ATOMIC)
+#elif defined(HAVE_ATOMIC) && defined(HAVE_STDATOMIC_H)
#include <stdatomic.h>
+#if defined(HAVE_SCHED_YIELD)
+#include <sched.h>
+#endif
+
+#define curl_simple_lock atomic_int
+#define CURL_SIMPLE_LOCK_INIT 0
-#define curl_simple_lock atomic_bool
-#define CURL_SIMPLE_LOCK_INIT false
+/* a clang-thing */
+#ifndef __has_builtin
+#define __has_builtin(x) 0
+#endif
+
+#ifndef __INTEL_COMPILER
+/* The Intel compiler tries to look like GCC *and* clang *and* lies in its
+ __has_builtin() function, so override it. */
+
+/* if GCC on i386/x86_64 or if the built-in is present */
+#if ( (defined(__GNUC__) && !defined(__clang__)) && \
+ (defined(__i386__) || defined(__x86_64__))) || \
+ __has_builtin(__builtin_ia32_pause)
+#define HAVE_BUILTIN_IA32_PAUSE
+#endif
+
+#endif
static inline void curl_simple_lock_lock(curl_simple_lock *lock)
{
@@ -48,10 +82,10 @@ static inline void curl_simple_lock_lock(curl_simple_lock *lock)
/* Reduce cache coherency traffic */
while(atomic_load_explicit(lock, memory_order_relaxed)) {
/* Reduce load (not mandatory) */
-#if defined(__i386__) || defined(__x86_64__)
+#ifdef HAVE_BUILTIN_IA32_PAUSE
__builtin_ia32_pause();
#elif defined(__aarch64__)
- asm volatile("yield" ::: "memory");
+ __asm__ volatile("yield" ::: "memory");
#elif defined(HAVE_SCHED_YIELD)
sched_yield();
#endif