15#if __cplusplus > 199711L || _MSC_VER >= 1700
33#ifndef MOODYCAMEL_CACHE_LINE_SIZE
34#define MOODYCAMEL_CACHE_LINE_SIZE 64
37#ifndef MOODYCAMEL_EXCEPTIONS_ENABLED
38#if (defined(_MSC_VER) && defined(_CPPUNWIND)) || (defined(__GNUC__) && defined(__EXCEPTIONS)) || (!defined(_MSC_VER) && !defined(__GNUC__))
39#define MOODYCAMEL_EXCEPTIONS_ENABLED
43#ifndef MOODYCAMEL_HAS_EMPLACE
44#if !defined(_MSC_VER) || _MSC_VER >= 1800
45#define MOODYCAMEL_HAS_EMPLACE 1
51#pragma warning(disable: 4324)
52#pragma warning(disable: 4820)
53#pragma warning(disable: 4127)
58template<
typename T,
size_t MAX_BLOCK_SIZE = 512>
95 assert(MAX_BLOCK_SIZE == ceilToPow2(MAX_BLOCK_SIZE) &&
"MAX_BLOCK_SIZE must be a power of 2");
96 assert(MAX_BLOCK_SIZE >= 2 &&
"MAX_BLOCK_SIZE must be at least 2");
98 Block* firstBlock =
nullptr;
100 largestBlockSize = ceilToPow2(maxSize + 1);
101 if (largestBlockSize > MAX_BLOCK_SIZE * 2) {
107 size_t initialBlockCount = (maxSize + MAX_BLOCK_SIZE * 2 - 3) / (MAX_BLOCK_SIZE - 1);
108 largestBlockSize = MAX_BLOCK_SIZE;
109 Block* lastBlock =
nullptr;
110 for (
size_t i = 0; i != initialBlockCount; ++i) {
111 auto block = make_block(largestBlockSize);
112 if (block ==
nullptr) {
113#ifdef MOODYCAMEL_EXCEPTIONS_ENABLED
114 throw std::bad_alloc();
119 if (firstBlock ==
nullptr) {
123 lastBlock->next = block;
126 block->next = firstBlock;
130 firstBlock = make_block(largestBlockSize);
131 if (firstBlock ==
nullptr) {
132#ifdef MOODYCAMEL_EXCEPTIONS_ENABLED
133 throw std::bad_alloc();
138 firstBlock->next = firstBlock;
140 frontBlock = firstBlock;
141 tailBlock = firstBlock;
144 fence(memory_order_sync);
150 : frontBlock(other.frontBlock.load()),
151 tailBlock(other.tailBlock.load()),
152 largestBlockSize(other.largestBlockSize)
158 other.largestBlockSize = 32;
159 Block* b = other.make_block(other.largestBlockSize);
161#ifdef MOODYCAMEL_EXCEPTIONS_ENABLED
162 throw std::bad_alloc();
168 other.frontBlock = b;
176 Block* b = frontBlock.load();
177 frontBlock = other.frontBlock.load();
178 other.frontBlock = b;
179 b = tailBlock.load();
180 tailBlock = other.tailBlock.load();
182 std::swap(largestBlockSize, other.largestBlockSize);
191 fence(memory_order_sync);
194 Block* frontBlock_ = frontBlock;
195 Block* block = frontBlock_;
197 Block* nextBlock = block->next;
198 size_t blockFront = block->front;
199 size_t blockTail = block->tail;
201 for (
size_t i = blockFront; i != blockTail; i = (i + 1) & block->sizeMask) {
202 auto element =
reinterpret_cast<T*
>(block->data + i *
sizeof(T));
207 auto rawBlock = block->rawThis;
211 }
while (block != frontBlock_);
218 AE_FORCEINLINE
bool try_enqueue(T
const& element)
220 return inner_enqueue<CannotAlloc>(element);
226 AE_FORCEINLINE
bool try_enqueue(T&& element)
228 return inner_enqueue<CannotAlloc>(std::forward<T>(element));
231#if MOODYCAMEL_HAS_EMPLACE
233 template<
typename... Args>
234 AE_FORCEINLINE
bool try_emplace(Args&&... args)
236 return inner_enqueue<CannotAlloc>(std::forward<Args>(args)...);
243 AE_FORCEINLINE
bool enqueue(T
const& element)
245 return inner_enqueue<CanAlloc>(element);
251 AE_FORCEINLINE
bool enqueue(T&& element)
253 return inner_enqueue<CanAlloc>(std::forward<T>(element));
256#if MOODYCAMEL_HAS_EMPLACE
258 template<
typename... Args>
259 AE_FORCEINLINE
bool emplace(Args&&... args)
261 return inner_enqueue<CanAlloc>(std::forward<Args>(args)...);
269 bool try_dequeue(U& result)
272 ReentrantGuard guard(this->dequeuing);
292 Block* frontBlock_ = frontBlock.load();
293 size_t blockTail = frontBlock_->localTail;
294 size_t blockFront = frontBlock_->front.load();
296 if (blockFront != blockTail || blockFront != (frontBlock_->localTail = frontBlock_->tail.load())) {
297 fence(memory_order_acquire);
299 non_empty_front_block:
301 auto element =
reinterpret_cast<T*
>(frontBlock_->data + blockFront *
sizeof(T));
302 result = std::move(*element);
305 blockFront = (blockFront + 1) & frontBlock_->sizeMask;
307 fence(memory_order_release);
308 frontBlock_->front = blockFront;
310 else if (frontBlock_ != tailBlock.load()) {
311 fence(memory_order_acquire);
313 frontBlock_ = frontBlock.load();
314 blockTail = frontBlock_->localTail = frontBlock_->tail.load();
315 blockFront = frontBlock_->front.load();
316 fence(memory_order_acquire);
318 if (blockFront != blockTail) {
320 goto non_empty_front_block;
324 Block* nextBlock = frontBlock_->next;
329 size_t nextBlockFront = nextBlock->front.load();
330 size_t nextBlockTail = nextBlock->localTail = nextBlock->tail.load();
331 fence(memory_order_acquire);
335 assert(nextBlockFront != nextBlockTail);
336 AE_UNUSED(nextBlockTail);
339 fence(memory_order_release);
340 frontBlock = frontBlock_ = nextBlock;
342 compiler_fence(memory_order_release);
344 auto element =
reinterpret_cast<T*
>(frontBlock_->data + nextBlockFront *
sizeof(T));
346 result = std::move(*element);
349 nextBlockFront = (nextBlockFront + 1) & frontBlock_->sizeMask;
351 fence(memory_order_release);
352 frontBlock_->front = nextBlockFront;
371 ReentrantGuard guard(this->dequeuing);
375 Block* frontBlock_ = frontBlock.load();
376 size_t blockTail = frontBlock_->localTail;
377 size_t blockFront = frontBlock_->front.load();
379 if (blockFront != blockTail || blockFront != (frontBlock_->localTail = frontBlock_->tail.load())) {
380 fence(memory_order_acquire);
381 non_empty_front_block:
382 return reinterpret_cast<T*
>(frontBlock_->data + blockFront *
sizeof(T));
384 else if (frontBlock_ != tailBlock.load()) {
385 fence(memory_order_acquire);
386 frontBlock_ = frontBlock.load();
387 blockTail = frontBlock_->localTail = frontBlock_->tail.load();
388 blockFront = frontBlock_->front.load();
389 fence(memory_order_acquire);
391 if (blockFront != blockTail) {
392 goto non_empty_front_block;
395 Block* nextBlock = frontBlock_->next;
397 size_t nextBlockFront = nextBlock->front.load();
398 fence(memory_order_acquire);
400 assert(nextBlockFront != nextBlock->tail.load());
401 return reinterpret_cast<T*
>(nextBlock->data + nextBlockFront *
sizeof(T));
413 ReentrantGuard guard(this->dequeuing);
417 Block* frontBlock_ = frontBlock.load();
418 size_t blockTail = frontBlock_->localTail;
419 size_t blockFront = frontBlock_->front.load();
421 if (blockFront != blockTail || blockFront != (frontBlock_->localTail = frontBlock_->tail.load())) {
422 fence(memory_order_acquire);
424 non_empty_front_block:
425 auto element =
reinterpret_cast<T*
>(frontBlock_->data + blockFront *
sizeof(T));
428 blockFront = (blockFront + 1) & frontBlock_->sizeMask;
430 fence(memory_order_release);
431 frontBlock_->front = blockFront;
433 else if (frontBlock_ != tailBlock.load()) {
434 fence(memory_order_acquire);
435 frontBlock_ = frontBlock.load();
436 blockTail = frontBlock_->localTail = frontBlock_->tail.load();
437 blockFront = frontBlock_->front.load();
438 fence(memory_order_acquire);
440 if (blockFront != blockTail) {
441 goto non_empty_front_block;
445 Block* nextBlock = frontBlock_->next;
447 size_t nextBlockFront = nextBlock->front.load();
448 size_t nextBlockTail = nextBlock->localTail = nextBlock->tail.load();
449 fence(memory_order_acquire);
451 assert(nextBlockFront != nextBlockTail);
452 AE_UNUSED(nextBlockTail);
454 fence(memory_order_release);
455 frontBlock = frontBlock_ = nextBlock;
457 compiler_fence(memory_order_release);
459 auto element =
reinterpret_cast<T*
>(frontBlock_->data + nextBlockFront *
sizeof(T));
462 nextBlockFront = (nextBlockFront + 1) & frontBlock_->sizeMask;
464 fence(memory_order_release);
465 frontBlock_->front = nextBlockFront;
477 inline size_t size_approx()
const
480 Block* frontBlock_ = frontBlock.load();
481 Block* block = frontBlock_;
483 fence(memory_order_acquire);
484 size_t blockFront = block->front.load();
485 size_t blockTail = block->tail.load();
486 result += (blockTail - blockFront) & block->sizeMask;
487 block = block->next.load();
488 }
while (block != frontBlock_);
494 enum AllocationMode { CanAlloc, CannotAlloc };
496#if MOODYCAMEL_HAS_EMPLACE
497 template<AllocationMode canAlloc,
typename... Args>
498 bool inner_enqueue(Args&&... args)
500 template<AllocationMode canAlloc,
typename U>
501 bool inner_enqueue(U&& element)
505 ReentrantGuard guard(this->enqueuing);
515 Block* tailBlock_ = tailBlock.load();
516 size_t blockFront = tailBlock_->localFront;
517 size_t blockTail = tailBlock_->tail.load();
519 size_t nextBlockTail = (blockTail + 1) & tailBlock_->sizeMask;
520 if (nextBlockTail != blockFront || nextBlockTail != (tailBlock_->localFront = tailBlock_->front.load())) {
521 fence(memory_order_acquire);
523 char* location = tailBlock_->data + blockTail *
sizeof(T);
524#if MOODYCAMEL_HAS_EMPLACE
525 new (location) T(std::forward<Args>(args)...);
527 new (location) T(std::forward<U>(element));
530 fence(memory_order_release);
531 tailBlock_->tail = nextBlockTail;
534 fence(memory_order_acquire);
535 if (tailBlock_->next.load() != frontBlock) {
541 fence(memory_order_acquire);
544 Block* tailBlockNext = tailBlock_->next.load();
545 size_t nextBlockFront = tailBlockNext->localFront = tailBlockNext->front.load();
546 nextBlockTail = tailBlockNext->tail.load();
547 fence(memory_order_acquire);
551 assert(nextBlockFront == nextBlockTail);
552 tailBlockNext->localFront = nextBlockFront;
554 char* location = tailBlockNext->data + nextBlockTail *
sizeof(T);
555#if MOODYCAMEL_HAS_EMPLACE
556 new (location) T(std::forward<Args>(args)...);
558 new (location) T(std::forward<U>(element));
561 tailBlockNext->tail = (nextBlockTail + 1) & tailBlockNext->sizeMask;
563 fence(memory_order_release);
564 tailBlock = tailBlockNext;
566 else if (canAlloc == CanAlloc) {
568 auto newBlockSize = largestBlockSize >= MAX_BLOCK_SIZE ? largestBlockSize : largestBlockSize * 2;
569 auto newBlock = make_block(newBlockSize);
570 if (newBlock ==
nullptr) {
574 largestBlockSize = newBlockSize;
576#if MOODYCAMEL_HAS_EMPLACE
577 new (newBlock->data) T(std::forward<Args>(args)...);
579 new (newBlock->data) T(std::forward<U>(element));
581 assert(newBlock->front == 0);
582 newBlock->tail = newBlock->localTail = 1;
584 newBlock->next = tailBlock_->next.load();
585 tailBlock_->next = newBlock;
593 fence(memory_order_release);
594 tailBlock = newBlock;
596 else if (canAlloc == CannotAlloc) {
601 assert(
false &&
"Should be unreachable code");
618 AE_FORCEINLINE
static size_t ceilToPow2(
size_t x)
625 for (
size_t i = 1; i <
sizeof(size_t); i <<= 1) {
633 static AE_FORCEINLINE
char* align_for(
char* ptr)
635 const std::size_t alignment = std::alignment_of<U>::value;
636 return ptr + (alignment - (
reinterpret_cast<std::uintptr_t
>(ptr) % alignment)) % alignment;
640 struct ReentrantGuard
642 ReentrantGuard(
bool& _inSection)
643 : inSection(_inSection)
645 assert(!inSection &&
"ReaderWriterQueue does not support enqueuing or dequeuing elements from other elements' ctors and dtors");
649 ~ReentrantGuard() { inSection =
false; }
652 ReentrantGuard& operator=(ReentrantGuard
const&);
665 char cachelineFiller0[MOODYCAMEL_CACHE_LINE_SIZE -
sizeof(
weak_atomic<size_t>) -
sizeof(
size_t)];
669 char cachelineFiller1[MOODYCAMEL_CACHE_LINE_SIZE -
sizeof(
weak_atomic<size_t>) -
sizeof(
size_t)];
674 const size_t sizeMask;
678 Block(
size_t const& _size,
char* _rawThis,
char* _data)
679 : front(0), localTail(0), tail(0), localFront(0), next(
nullptr), data(_data), sizeMask(_size - 1), rawThis(_rawThis)
685 Block& operator=(Block
const&);
692 static Block* make_block(
size_t capacity)
695 auto size =
sizeof(Block) + std::alignment_of<Block>::value - 1;
696 size +=
sizeof(T) * capacity + std::alignment_of<T>::value - 1;
697 auto newBlockRaw =
static_cast<char*
>(std::malloc(size));
698 if (newBlockRaw ==
nullptr) {
702 auto newBlockAligned = align_for<Block>(newBlockRaw);
703 auto newBlockData = align_for<T>(newBlockAligned +
sizeof(Block));
704 return new (newBlockAligned) Block(capacity, newBlockRaw, newBlockData);
713 size_t largestBlockSize;
722template<
typename T,
size_t MAX_BLOCK_SIZE = 512>
737 AE_FORCEINLINE
bool try_enqueue(T
const& element)
739 if (inner.try_enqueue(element)) {
749 AE_FORCEINLINE
bool try_enqueue(T&& element)
751 if (inner.try_enqueue(std::forward<T>(element))) {
762 AE_FORCEINLINE
bool enqueue(T
const& element)
764 if (inner.enqueue(element)) {
774 AE_FORCEINLINE
bool enqueue(T&& element)
776 if (inner.enqueue(std::forward<T>(element))) {
788 bool try_dequeue(U& result)
790 if (sema.tryWait()) {
791 bool success = inner.try_dequeue(result);
803 void wait_dequeue(U& result)
806 bool success = inner.try_dequeue(result);
820 bool wait_dequeue_timed(U& result, std::int64_t timeout_usecs)
822 if (!sema.wait(timeout_usecs)) {
825 bool success = inner.try_dequeue(result);
833#if __cplusplus > 199711L || _MSC_VER >= 1700
840 template<
typename U,
typename Rep,
typename Period>
841 inline bool wait_dequeue_timed(U& result, std::chrono::duration<Rep, Period>
const& timeout)
843 return wait_dequeue_timed(result, std::chrono::duration_cast<std::chrono::microseconds>(timeout).count());
853 AE_FORCEINLINE T* peek()
861 AE_FORCEINLINE
bool pop()
863 if (sema.tryWait()) {
864 bool result = inner.pop();
874 AE_FORCEINLINE
size_t size_approx()
const
876 return sema.availableApprox();
Definition: readerwriterqueue.h:724
Definition: readerwriterqueue.h:60
Definition: atomicops.h:562
Definition: atomicops.h:228