ASPiK SDK
Loading...
Searching...
No Matches
atomicops.h
1// ©2013-2016 Cameron Desrochers.
2// Distributed under the simplified BSD license (see the license file that
3// should have come with this header).
4// Uses Jeff Preshing's semaphore implementation (under the terms of its
5// separate zlib license, embedded below).
6
7#pragma once
8
9// Provides portable (VC++2010+, Intel ICC 13, GCC 4.7+, and anything C++11 compliant) implementation
10// of low-level memory barriers, plus a few semi-portable utility macros (for inlining and alignment).
11// Also has a basic atomic type (limited to hardware-supported atomics with no memory ordering guarantees).
12// Uses the AE_* prefix for macros (historical reasons), and the "moodycamel" namespace for symbols.
13
14#include <cassert>
15#include <type_traits>
16#include <cerrno>
17#include <cstdint>
18#include <ctime>
19
20// Platform detection
21#if defined(__INTEL_COMPILER)
22#define AE_ICC
23#elif defined(_MSC_VER)
24#define AE_VCPP
25#elif defined(__GNUC__)
26#define AE_GCC
27#endif
28
29#if defined(_M_IA64) || defined(__ia64__)
30#define AE_ARCH_IA64
31#elif defined(_WIN64) || defined(__amd64__) || defined(_M_X64) || defined(__x86_64__)
32#define AE_ARCH_X64
33#elif defined(_M_IX86) || defined(__i386__)
34#define AE_ARCH_X86
35#elif defined(_M_PPC) || defined(__powerpc__)
36#define AE_ARCH_PPC
37#else
38#define AE_ARCH_UNKNOWN
39#endif
40
41
42// AE_UNUSED
43#define AE_UNUSED(x) ((void)x)
44
45
46// AE_FORCEINLINE
47#if defined(AE_VCPP) || defined(AE_ICC)
48#define AE_FORCEINLINE __forceinline
49#elif defined(AE_GCC)
50//#define AE_FORCEINLINE __attribute__((always_inline))
51#define AE_FORCEINLINE inline
52#else
53#define AE_FORCEINLINE inline
54#endif
55
56
57// AE_ALIGN
58#if defined(AE_VCPP) || defined(AE_ICC)
59#define AE_ALIGN(x) __declspec(align(x))
60#elif defined(AE_GCC)
61#define AE_ALIGN(x) __attribute__((aligned(x)))
62#else
63// Assume GCC compliant syntax...
64#define AE_ALIGN(x) __attribute__((aligned(x)))
65#endif
66
67
68// Portable atomic fences implemented below:
69
70namespace moodycamel {
71
72enum memory_order {
73 memory_order_relaxed,
74 memory_order_acquire,
75 memory_order_release,
76 memory_order_acq_rel,
77 memory_order_seq_cst,
78
79 // memory_order_sync: Forces a full sync:
80 // #LoadLoad, #LoadStore, #StoreStore, and most significantly, #StoreLoad
81 memory_order_sync = memory_order_seq_cst
82};
83
84} // end namespace moodycamel
85
86#if (defined(AE_VCPP) && (_MSC_VER < 1700 || defined(__cplusplus_cli))) || (defined(AE_ICC) && __INTEL_COMPILER < 1600)
87// VS2010 and ICC13 don't support std::atomic_*_fence, implement our own fences
88
89#include <intrin.h>
90
91#if defined(AE_ARCH_X64) || defined(AE_ARCH_X86)
92#define AeFullSync _mm_mfence
93#define AeLiteSync _mm_mfence
94#elif defined(AE_ARCH_IA64)
95#define AeFullSync __mf
96#define AeLiteSync __mf
97#elif defined(AE_ARCH_PPC)
98#include <ppcintrinsics.h>
99#define AeFullSync __sync
100#define AeLiteSync __lwsync
101#endif
102
103
104#ifdef AE_VCPP
105#pragma warning(push)
106#pragma warning(disable: 4365) // Disable erroneous 'conversion from long to unsigned int, signed/unsigned mismatch' error when using `assert`
107#ifdef __cplusplus_cli
108#pragma managed(push, off)
109#endif
110#endif
111
112namespace moodycamel {
113
114AE_FORCEINLINE void compiler_fence(memory_order order)
115{
116 switch (order) {
117 case memory_order_relaxed: break;
118 case memory_order_acquire: _ReadBarrier(); break;
119 case memory_order_release: _WriteBarrier(); break;
120 case memory_order_acq_rel: _ReadWriteBarrier(); break;
121 case memory_order_seq_cst: _ReadWriteBarrier(); break;
122 default: assert(false);
123 }
124}
125
126// x86/x64 have a strong memory model -- all loads and stores have
127// acquire and release semantics automatically (so only need compiler
128// barriers for those).
129#if defined(AE_ARCH_X86) || defined(AE_ARCH_X64)
130AE_FORCEINLINE void fence(memory_order order)
131{
132 switch (order) {
133 case memory_order_relaxed: break;
134 case memory_order_acquire: _ReadBarrier(); break;
135 case memory_order_release: _WriteBarrier(); break;
136 case memory_order_acq_rel: _ReadWriteBarrier(); break;
137 case memory_order_seq_cst:
138 _ReadWriteBarrier();
139 AeFullSync();
140 _ReadWriteBarrier();
141 break;
142 default: assert(false);
143 }
144}
145#else
146AE_FORCEINLINE void fence(memory_order order)
147{
148 // Non-specialized arch, use heavier memory barriers everywhere just in case :-(
149 switch (order) {
150 case memory_order_relaxed:
151 break;
152 case memory_order_acquire:
153 _ReadBarrier();
154 AeLiteSync();
155 _ReadBarrier();
156 break;
157 case memory_order_release:
158 _WriteBarrier();
159 AeLiteSync();
160 _WriteBarrier();
161 break;
162 case memory_order_acq_rel:
163 _ReadWriteBarrier();
164 AeLiteSync();
165 _ReadWriteBarrier();
166 break;
167 case memory_order_seq_cst:
168 _ReadWriteBarrier();
169 AeFullSync();
170 _ReadWriteBarrier();
171 break;
172 default: assert(false);
173 }
174}
175#endif
176} // end namespace moodycamel
177#else
178// Use standard library of atomics
179#include <atomic>
180
181namespace moodycamel {
182
183AE_FORCEINLINE void compiler_fence(memory_order order)
184{
185 switch (order) {
186 case memory_order_relaxed: break;
187 case memory_order_acquire: std::atomic_signal_fence(std::memory_order_acquire); break;
188 case memory_order_release: std::atomic_signal_fence(std::memory_order_release); break;
189 case memory_order_acq_rel: std::atomic_signal_fence(std::memory_order_acq_rel); break;
190 case memory_order_seq_cst: std::atomic_signal_fence(std::memory_order_seq_cst); break;
191 default: assert(false);
192 }
193}
194
195AE_FORCEINLINE void fence(memory_order order)
196{
197 switch (order) {
198 case memory_order_relaxed: break;
199 case memory_order_acquire: std::atomic_thread_fence(std::memory_order_acquire); break;
200 case memory_order_release: std::atomic_thread_fence(std::memory_order_release); break;
201 case memory_order_acq_rel: std::atomic_thread_fence(std::memory_order_acq_rel); break;
202 case memory_order_seq_cst: std::atomic_thread_fence(std::memory_order_seq_cst); break;
203 default: assert(false);
204 }
205}
206
207} // end namespace moodycamel
208
209#endif
210
211
212#if !defined(AE_VCPP) || (_MSC_VER >= 1700 && !defined(__cplusplus_cli))
213#define AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC
214#endif
215
216#ifdef AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC
217#include <atomic>
218#endif
219#include <utility>
220
221// WARNING: *NOT* A REPLACEMENT FOR std::atomic. READ CAREFULLY:
222// Provides basic support for atomic variables -- no memory ordering guarantees are provided.
223// The guarantee of atomicity is only made for types that already have atomic load and store guarantees
224// at the hardware level -- on most platforms this generally means aligned pointers and integers (only).
225namespace moodycamel {
226template<typename T>
228{
229public:
230 weak_atomic() { }
231#ifdef AE_VCPP
232#pragma warning(push)
233#pragma warning(disable: 4100) // Get rid of (erroneous) 'unreferenced formal parameter' warning
234#endif
235 template<typename U> weak_atomic(U&& x) : value(std::forward<U>(x)) { }
236#ifdef __cplusplus_cli
237 // Work around bug with universal reference/nullptr combination that only appears when /clr is on
238 weak_atomic(nullptr_t) : value(nullptr) { }
239#endif
240 weak_atomic(weak_atomic const& other) : value(other.value) { }
241 weak_atomic(weak_atomic&& other) : value(std::move(other.value)) { }
242#ifdef AE_VCPP
243#pragma warning(pop)
244#endif
245
246 AE_FORCEINLINE operator T() const { return load(); }
247
248
249#ifndef AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC
250 template<typename U> AE_FORCEINLINE weak_atomic const& operator=(U&& x) { value = std::forward<U>(x); return *this; }
251 AE_FORCEINLINE weak_atomic const& operator=(weak_atomic const& other) { value = other.value; return *this; }
252
253 AE_FORCEINLINE T load() const { return value; }
254
255 AE_FORCEINLINE T fetch_add_acquire(T increment)
256 {
257#if defined(AE_ARCH_X64) || defined(AE_ARCH_X86)
258 if (sizeof(T) == 4) return _InterlockedExchangeAdd((long volatile*)&value, (long)increment);
259#if defined(_M_AMD64)
260 else if (sizeof(T) == 8) return _InterlockedExchangeAdd64((long long volatile*)&value, (long long)increment);
261#endif
262#else
263#error Unsupported platform
264#endif
265 assert(false && "T must be either a 32 or 64 bit type");
266 return value;
267 }
268
269 AE_FORCEINLINE T fetch_add_release(T increment)
270 {
271#if defined(AE_ARCH_X64) || defined(AE_ARCH_X86)
272 if (sizeof(T) == 4) return _InterlockedExchangeAdd((long volatile*)&value, (long)increment);
273#if defined(_M_AMD64)
274 else if (sizeof(T) == 8) return _InterlockedExchangeAdd64((long long volatile*)&value, (long long)increment);
275#endif
276#else
277#error Unsupported platform
278#endif
279 assert(false && "T must be either a 32 or 64 bit type");
280 return value;
281 }
282#else
283 template<typename U>
284 AE_FORCEINLINE weak_atomic const& operator=(U&& x)
285 {
286 value.store(std::forward<U>(x), std::memory_order_relaxed);
287 return *this;
288 }
289
290 AE_FORCEINLINE weak_atomic const& operator=(weak_atomic const& other)
291 {
292 value.store(other.value.load(std::memory_order_relaxed), std::memory_order_relaxed);
293 return *this;
294 }
295
296 AE_FORCEINLINE T load() const { return value.load(std::memory_order_relaxed); }
297
298 AE_FORCEINLINE T fetch_add_acquire(T increment)
299 {
300 return value.fetch_add(increment, std::memory_order_acquire);
301 }
302
303 AE_FORCEINLINE T fetch_add_release(T increment)
304 {
305 return value.fetch_add(increment, std::memory_order_release);
306 }
307#endif
308
309
310private:
311#ifndef AE_USE_STD_ATOMIC_FOR_WEAK_ATOMIC
312 // No std::atomic support, but still need to circumvent compiler optimizations.
313 // `volatile` will make memory access slow, but is guaranteed to be reliable.
314 volatile T value;
315#else
316 std::atomic<T> value;
317#endif
318};
319
320} // end namespace moodycamel
321
322
323
324// Portable single-producer, single-consumer semaphore below:
325
326#if defined(_WIN32)
327// Avoid including windows.h in a header; we only need a handful of
328// items, so we'll redeclare them here (this is relatively safe since
329// the API generally has to remain stable between Windows versions).
330// I know this is an ugly hack but it still beats polluting the global
331// namespace with thousands of generic names or adding a .cpp for nothing.
332extern "C" {
333 struct _SECURITY_ATTRIBUTES;
334 __declspec(dllimport) void* __stdcall CreateSemaphoreW(_SECURITY_ATTRIBUTES* lpSemaphoreAttributes, long lInitialCount, long lMaximumCount, const wchar_t* lpName);
335 __declspec(dllimport) int __stdcall CloseHandle(void* hObject);
336 __declspec(dllimport) unsigned long __stdcall WaitForSingleObject(void* hHandle, unsigned long dwMilliseconds);
337 __declspec(dllimport) int __stdcall ReleaseSemaphore(void* hSemaphore, long lReleaseCount, long* lpPreviousCount);
338}
339#elif defined(__MACH__)
340#include <mach/mach.h>
341#elif defined(__unix__)
342#include <semaphore.h>
343#endif
344
345namespace moodycamel
346{
347 // Code in the spsc_sema namespace below is an adaptation of Jeff Preshing's
348 // portable + lightweight semaphore implementations, originally from
349 // https://github.com/preshing/cpp11-on-multicore/blob/master/common/sema.h
350 // LICENSE:
351 // Copyright (c) 2015 Jeff Preshing
352 //
353 // This software is provided 'as-is', without any express or implied
354 // warranty. In no event will the authors be held liable for any damages
355 // arising from the use of this software.
356 //
357 // Permission is granted to anyone to use this software for any purpose,
358 // including commercial applications, and to alter it and redistribute it
359 // freely, subject to the following restrictions:
360 //
361 // 1. The origin of this software must not be misrepresented; you must not
362 // claim that you wrote the original software. If you use this software
363 // in a product, an acknowledgement in the product documentation would be
364 // appreciated but is not required.
365 // 2. Altered source versions must be plainly marked as such, and must not be
366 // misrepresented as being the original software.
367 // 3. This notice may not be removed or altered from any source distribution.
368 namespace spsc_sema
369 {
370#if defined(_WIN32)
371 class Semaphore
372 {
373 private:
374 void* m_hSema;
375
376 Semaphore(const Semaphore& other);
377 Semaphore& operator=(const Semaphore& other);
378
379 public:
380 Semaphore(int initialCount = 0)
381 {
382 assert(initialCount >= 0);
383 const long maxLong = 0x7fffffff;
384 m_hSema = CreateSemaphoreW(nullptr, initialCount, maxLong, nullptr);
385 }
386
387 ~Semaphore()
388 {
389 CloseHandle(m_hSema);
390 }
391
392 void wait()
393 {
394 const unsigned long infinite = 0xffffffff;
395 WaitForSingleObject(m_hSema, infinite);
396 }
397
398 bool try_wait()
399 {
400 const unsigned long RC_WAIT_TIMEOUT = 0x00000102;
401 return WaitForSingleObject(m_hSema, 0) != RC_WAIT_TIMEOUT;
402 }
403
404 bool timed_wait(std::uint64_t usecs)
405 {
406 const unsigned long RC_WAIT_TIMEOUT = 0x00000102;
407 return WaitForSingleObject(m_hSema, (unsigned long)(usecs / 1000)) != RC_WAIT_TIMEOUT;
408 }
409
410 void signal(int count = 1)
411 {
412 ReleaseSemaphore(m_hSema, count, nullptr);
413 }
414 };
415#elif defined(__MACH__)
416 //---------------------------------------------------------
417 // Semaphore (Apple iOS and OSX)
418 // Can't use POSIX semaphores due to http://lists.apple.com/archives/darwin-kernel/2009/Apr/msg00010.html
419 //---------------------------------------------------------
420 class Semaphore
421 {
422 private:
423 semaphore_t m_sema;
424
425 Semaphore(const Semaphore& other);
426 Semaphore& operator=(const Semaphore& other);
427
428 public:
429 Semaphore(int initialCount = 0)
430 {
431 assert(initialCount >= 0);
432 semaphore_create(mach_task_self(), &m_sema, SYNC_POLICY_FIFO, initialCount);
433 }
434
435 ~Semaphore()
436 {
437 semaphore_destroy(mach_task_self(), m_sema);
438 }
439
440 void wait()
441 {
442 semaphore_wait(m_sema);
443 }
444
445 bool try_wait()
446 {
447 return timed_wait(0);
448 }
449
450 bool timed_wait(std::int64_t timeout_usecs)
451 {
452 mach_timespec_t ts;
453 ts.tv_sec = static_cast<unsigned int>(timeout_usecs / 1000000);
454 ts.tv_nsec = (timeout_usecs % 1000000) * 1000;
455
456 // added in OSX 10.10: https://developer.apple.com/library/prerelease/mac/documentation/General/Reference/APIDiffsMacOSX10_10SeedDiff/modules/Darwin.html
457 kern_return_t rc = semaphore_timedwait(m_sema, ts);
458
459 return rc != KERN_OPERATION_TIMED_OUT && rc != KERN_ABORTED;
460 }
461
462 void signal()
463 {
464 semaphore_signal(m_sema);
465 }
466
467 void signal(int count)
468 {
469 while (count-- > 0)
470 {
471 semaphore_signal(m_sema);
472 }
473 }
474 };
475#elif defined(__unix__)
476 //---------------------------------------------------------
477 // Semaphore (POSIX, Linux)
478 //---------------------------------------------------------
479 class Semaphore
480 {
481 private:
482 sem_t m_sema;
483
484 Semaphore(const Semaphore& other);
485 Semaphore& operator=(const Semaphore& other);
486
487 public:
488 Semaphore(int initialCount = 0)
489 {
490 assert(initialCount >= 0);
491 sem_init(&m_sema, 0, initialCount);
492 }
493
494 ~Semaphore()
495 {
496 sem_destroy(&m_sema);
497 }
498
499 void wait()
500 {
501 // http://stackoverflow.com/questions/2013181/gdb-causes-sem-wait-to-fail-with-eintr-error
502 int rc;
503 do
504 {
505 rc = sem_wait(&m_sema);
506 }
507 while (rc == -1 && errno == EINTR);
508 }
509
510 bool try_wait()
511 {
512 int rc;
513 do {
514 rc = sem_trywait(&m_sema);
515 } while (rc == -1 && errno == EINTR);
516 return !(rc == -1 && errno == EAGAIN);
517 }
518
519 bool timed_wait(std::uint64_t usecs)
520 {
521 struct timespec ts;
522 const int usecs_in_1_sec = 1000000;
523 const int nsecs_in_1_sec = 1000000000;
524 clock_gettime(CLOCK_REALTIME, &ts);
525 ts.tv_sec += usecs / usecs_in_1_sec;
526 ts.tv_nsec += (usecs % usecs_in_1_sec) * 1000;
527 // sem_timedwait bombs if you have more than 1e9 in tv_nsec
528 // so we have to clean things up before passing it in
529 if (ts.tv_nsec >= nsecs_in_1_sec) {
530 ts.tv_nsec -= nsecs_in_1_sec;
531 ++ts.tv_sec;
532 }
533
534 int rc;
535 do {
536 rc = sem_timedwait(&m_sema, &ts);
537 } while (rc == -1 && errno == EINTR);
538 return !(rc == -1 && errno == ETIMEDOUT);
539 }
540
541 void signal()
542 {
543 sem_post(&m_sema);
544 }
545
546 void signal(int count)
547 {
548 while (count-- > 0)
549 {
550 sem_post(&m_sema);
551 }
552 }
553 };
554#else
555#error Unsupported platform! (No semaphore wrapper available)
556#endif
557
558 //---------------------------------------------------------
559 // LightweightSemaphore
560 //---------------------------------------------------------
562 {
563 public:
564 typedef std::make_signed<std::size_t>::type ssize_t;
565
566 private:
567 weak_atomic<ssize_t> m_count;
568 Semaphore m_sema;
569
570 bool waitWithPartialSpinning(std::int64_t timeout_usecs = -1)
571 {
572 ssize_t oldCount;
573 // Is there a better way to set the initial spin count?
574 // If we lower it to 1000, testBenaphore becomes 15x slower on my Core i7-5930K Windows PC,
575 // as threads start hitting the kernel semaphore.
576 int spin = 10000;
577 while (--spin >= 0)
578 {
579 if (m_count.load() > 0)
580 {
581 m_count.fetch_add_acquire(-1);
582 return true;
583 }
584 compiler_fence(memory_order_acquire); // Prevent the compiler from collapsing the loop.
585 }
586 oldCount = m_count.fetch_add_acquire(-1);
587 if (oldCount > 0)
588 return true;
589 if (timeout_usecs < 0)
590 {
591 m_sema.wait();
592 return true;
593 }
594 if (m_sema.timed_wait(timeout_usecs))
595 return true;
596 // At this point, we've timed out waiting for the semaphore, but the
597 // count is still decremented indicating we may still be waiting on
598 // it. So we have to re-adjust the count, but only if the semaphore
599 // wasn't signaled enough times for us too since then. If it was, we
600 // need to release the semaphore too.
601 while (true)
602 {
603 oldCount = m_count.fetch_add_release(1);
604 if (oldCount < 0)
605 return false; // successfully restored things to the way they were
606 // Oh, the producer thread just signaled the semaphore after all. Try again:
607 oldCount = m_count.fetch_add_acquire(-1);
608 if (oldCount > 0 && m_sema.try_wait())
609 return true;
610 }
611 }
612
613 public:
614 LightweightSemaphore(ssize_t initialCount = 0) : m_count(initialCount)
615 {
616 assert(initialCount >= 0);
617 }
618
619 bool tryWait()
620 {
621 if (m_count.load() > 0)
622 {
623 m_count.fetch_add_acquire(-1);
624 return true;
625 }
626 return false;
627 }
628
629 void wait()
630 {
631 if (!tryWait())
632 waitWithPartialSpinning();
633 }
634
635 bool wait(std::int64_t timeout_usecs)
636 {
637 return tryWait() || waitWithPartialSpinning(timeout_usecs);
638 }
639
640 void signal(ssize_t count = 1)
641 {
642 assert(count >= 0);
643 ssize_t oldCount = m_count.fetch_add_release(count);
644 assert(oldCount >= -1);
645 if (oldCount < 0)
646 {
647 m_sema.signal(1);
648 }
649 }
650
651 ssize_t availableApprox() const
652 {
653 ssize_t count = m_count.load();
654 return count > 0 ? count : 0;
655 }
656 };
657 } // end namespace spsc_sema
658} // end namespace moodycamel
659
660#if defined(AE_VCPP) && (_MSC_VER < 1700 || defined(__cplusplus_cli))
661#pragma warning(pop)
662#ifdef __cplusplus_cli
663#pragma managed(pop)
664#endif
665#endif
Definition: atomicops.h:228