30#ifndef _GLIBCXX_ATOMIC_BASE_H
31#define _GLIBCXX_ATOMIC_BASE_H 1
34#pragma GCC system_header
42#if __cplusplus > 201703L && _GLIBCXX_HOSTED
46#ifndef _GLIBCXX_ALWAYS_INLINE
47#define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
52namespace std _GLIBCXX_VISIBILITY(default)
54_GLIBCXX_BEGIN_NAMESPACE_VERSION
64#if __cplusplus > 201703L
75 inline constexpr memory_order memory_order_relaxed = memory_order::relaxed;
76 inline constexpr memory_order memory_order_consume = memory_order::consume;
77 inline constexpr memory_order memory_order_acquire = memory_order::acquire;
78 inline constexpr memory_order memory_order_release = memory_order::release;
79 inline constexpr memory_order memory_order_acq_rel = memory_order::acq_rel;
80 inline constexpr memory_order memory_order_seq_cst = memory_order::seq_cst;
94 enum __memory_order_modifier
96 __memory_order_mask = 0x0ffff,
97 __memory_order_modifier_mask = 0xffff0000,
98 __memory_order_hle_acquire = 0x10000,
99 __memory_order_hle_release = 0x20000
121 return __m == memory_order_acq_rel ? memory_order_acquire
122 : __m == memory_order_release ? memory_order_relaxed : __m;
128 return memory_order(__cmpexch_failure_order2(__m & __memory_order_mask)
129 | __memory_order_modifier(__m & __memory_order_modifier_mask));
133 __is_valid_cmpexch_failure_order(
memory_order __m)
noexcept
135 return (__m & __memory_order_mask) != memory_order_release
136 && (__m & __memory_order_mask) != memory_order_acq_rel;
140 template<
typename _IntTp>
141 struct __atomic_base;
145 _GLIBCXX_ALWAYS_INLINE
void
147 { __atomic_thread_fence(
int(__m)); }
149 _GLIBCXX_ALWAYS_INLINE
void
151 { __atomic_signal_fence(
int(__m)); }
154 template<
typename _Tp>
163#if __glibcxx_atomic_value_initialization
164# define _GLIBCXX20_INIT(I) = I
166# define _GLIBCXX20_INIT(I)
170#define ATOMIC_VAR_INIT(_VI) { _VI }
172 template<
typename _Tp>
175 template<
typename _Tp>
179#if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL == 1
180 typedef bool __atomic_flag_data_type;
182 typedef unsigned char __atomic_flag_data_type;
197 _GLIBCXX_BEGIN_EXTERN_C
199 struct __atomic_flag_base
201 __atomic_flag_data_type _M_i _GLIBCXX20_INIT({});
204 _GLIBCXX_END_EXTERN_C
208#define ATOMIC_FLAG_INIT { 0 }
221 : __atomic_flag_base{ _S_init(__i) }
224 _GLIBCXX_ALWAYS_INLINE
bool
225 test_and_set(
memory_order __m = memory_order_seq_cst)
noexcept
227 return __atomic_test_and_set (&_M_i,
int(__m));
230 _GLIBCXX_ALWAYS_INLINE
bool
231 test_and_set(
memory_order __m = memory_order_seq_cst)
volatile noexcept
233 return __atomic_test_and_set (&_M_i,
int(__m));
236#ifdef __glibcxx_atomic_flag_test
237 _GLIBCXX_ALWAYS_INLINE
bool
238 test(
memory_order __m = memory_order_seq_cst)
const noexcept
240 __atomic_flag_data_type __v;
241 __atomic_load(&_M_i, &__v,
int(__m));
242 return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL;
245 _GLIBCXX_ALWAYS_INLINE
bool
246 test(
memory_order __m = memory_order_seq_cst)
const volatile noexcept
248 __atomic_flag_data_type __v;
249 __atomic_load(&_M_i, &__v,
int(__m));
250 return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL;
254#if __glibcxx_atomic_wait
255 _GLIBCXX_ALWAYS_INLINE
void
259 const __atomic_flag_data_type __v
260 = __old ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0;
262 std::__atomic_wait_address_v(&_M_i, __v,
263 [__m,
this] {
return __atomic_load_n(&_M_i,
int(__m)); });
268 _GLIBCXX_ALWAYS_INLINE
void
269 notify_one()
noexcept
270 { std::__atomic_notify_address(&_M_i,
false); }
274 _GLIBCXX_ALWAYS_INLINE
void
275 notify_all()
noexcept
276 { std::__atomic_notify_address(&_M_i,
true); }
281 _GLIBCXX_ALWAYS_INLINE
void
285 = __m & __memory_order_mask;
286 __glibcxx_assert(__b != memory_order_consume);
287 __glibcxx_assert(__b != memory_order_acquire);
288 __glibcxx_assert(__b != memory_order_acq_rel);
290 __atomic_clear (&_M_i,
int(__m));
293 _GLIBCXX_ALWAYS_INLINE
void
294 clear(
memory_order __m = memory_order_seq_cst)
volatile noexcept
297 = __m & __memory_order_mask;
298 __glibcxx_assert(__b != memory_order_consume);
299 __glibcxx_assert(__b != memory_order_acquire);
300 __glibcxx_assert(__b != memory_order_acq_rel);
302 __atomic_clear (&_M_i,
int(__m));
306 static constexpr __atomic_flag_data_type
308 {
return __i ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0; }
337 template<
typename _ITp>
340 using value_type = _ITp;
341 using difference_type = value_type;
344 typedef _ITp __int_type;
346 static constexpr int _S_alignment =
347 sizeof(_ITp) >
alignof(_ITp) ?
sizeof(_ITp) : alignof(_ITp);
349 alignas(_S_alignment) __int_type _M_i _GLIBCXX20_INIT(0);
352 __atomic_base() noexcept = default;
353 ~__atomic_base() noexcept = default;
354 __atomic_base(const __atomic_base&) = delete;
355 __atomic_base& operator=(const __atomic_base&) = delete;
356 __atomic_base& operator=(const __atomic_base&) volatile = delete;
359 constexpr __atomic_base(__int_type __i) noexcept : _M_i (__i) { }
361 operator __int_type() const noexcept
364 operator __int_type() const volatile noexcept
368 operator=(__int_type __i)
noexcept
375 operator=(__int_type __i)
volatile noexcept
382 operator++(
int)
noexcept
383 {
return fetch_add(1); }
386 operator++(
int)
volatile noexcept
387 {
return fetch_add(1); }
390 operator--(
int)
noexcept
391 {
return fetch_sub(1); }
394 operator--(
int)
volatile noexcept
395 {
return fetch_sub(1); }
398 operator++() noexcept
399 {
return __atomic_add_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
402 operator++() volatile noexcept
403 {
return __atomic_add_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
406 operator--() noexcept
407 {
return __atomic_sub_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
410 operator--() volatile noexcept
411 {
return __atomic_sub_fetch(&_M_i, 1,
int(memory_order_seq_cst)); }
414 operator+=(__int_type __i)
noexcept
415 {
return __atomic_add_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
418 operator+=(__int_type __i)
volatile noexcept
419 {
return __atomic_add_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
422 operator-=(__int_type __i)
noexcept
423 {
return __atomic_sub_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
426 operator-=(__int_type __i)
volatile noexcept
427 {
return __atomic_sub_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
430 operator&=(__int_type __i)
noexcept
431 {
return __atomic_and_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
434 operator&=(__int_type __i)
volatile noexcept
435 {
return __atomic_and_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
438 operator|=(__int_type __i)
noexcept
439 {
return __atomic_or_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
442 operator|=(__int_type __i)
volatile noexcept
443 {
return __atomic_or_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
446 operator^=(__int_type __i)
noexcept
447 {
return __atomic_xor_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
450 operator^=(__int_type __i)
volatile noexcept
451 {
return __atomic_xor_fetch(&_M_i, __i,
int(memory_order_seq_cst)); }
454 is_lock_free() const noexcept
457 return __atomic_is_lock_free(
sizeof(_M_i),
458 reinterpret_cast<void *
>(-_S_alignment));
462 is_lock_free() const volatile noexcept
465 return __atomic_is_lock_free(
sizeof(_M_i),
466 reinterpret_cast<void *
>(-_S_alignment));
469 _GLIBCXX_ALWAYS_INLINE
void
470 store(__int_type __i, memory_order __m = memory_order_seq_cst)
noexcept
473 = __m & __memory_order_mask;
474 __glibcxx_assert(__b != memory_order_acquire);
475 __glibcxx_assert(__b != memory_order_acq_rel);
476 __glibcxx_assert(__b != memory_order_consume);
478 __atomic_store_n(&_M_i, __i,
int(__m));
481 _GLIBCXX_ALWAYS_INLINE
void
482 store(__int_type __i,
483 memory_order __m = memory_order_seq_cst)
volatile noexcept
486 = __m & __memory_order_mask;
487 __glibcxx_assert(__b != memory_order_acquire);
488 __glibcxx_assert(__b != memory_order_acq_rel);
489 __glibcxx_assert(__b != memory_order_consume);
491 __atomic_store_n(&_M_i, __i,
int(__m));
494 _GLIBCXX_ALWAYS_INLINE __int_type
495 load(memory_order __m = memory_order_seq_cst)
const noexcept
498 = __m & __memory_order_mask;
499 __glibcxx_assert(__b != memory_order_release);
500 __glibcxx_assert(__b != memory_order_acq_rel);
502 return __atomic_load_n(&_M_i,
int(__m));
505 _GLIBCXX_ALWAYS_INLINE __int_type
506 load(memory_order __m = memory_order_seq_cst)
const volatile noexcept
509 = __m & __memory_order_mask;
510 __glibcxx_assert(__b != memory_order_release);
511 __glibcxx_assert(__b != memory_order_acq_rel);
513 return __atomic_load_n(&_M_i,
int(__m));
516 _GLIBCXX_ALWAYS_INLINE __int_type
517 exchange(__int_type __i,
518 memory_order __m = memory_order_seq_cst)
noexcept
520 return __atomic_exchange_n(&_M_i, __i,
int(__m));
524 _GLIBCXX_ALWAYS_INLINE __int_type
525 exchange(__int_type __i,
526 memory_order __m = memory_order_seq_cst)
volatile noexcept
528 return __atomic_exchange_n(&_M_i, __i,
int(__m));
531 _GLIBCXX_ALWAYS_INLINE
bool
532 compare_exchange_weak(__int_type& __i1, __int_type __i2,
533 memory_order __m1, memory_order __m2)
noexcept
535 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
537 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
538 int(__m1),
int(__m2));
541 _GLIBCXX_ALWAYS_INLINE
bool
542 compare_exchange_weak(__int_type& __i1, __int_type __i2,
544 memory_order __m2)
volatile noexcept
546 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
548 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
549 int(__m1),
int(__m2));
552 _GLIBCXX_ALWAYS_INLINE
bool
553 compare_exchange_weak(__int_type& __i1, __int_type __i2,
554 memory_order __m = memory_order_seq_cst)
noexcept
556 return compare_exchange_weak(__i1, __i2, __m,
557 __cmpexch_failure_order(__m));
560 _GLIBCXX_ALWAYS_INLINE
bool
561 compare_exchange_weak(__int_type& __i1, __int_type __i2,
562 memory_order __m = memory_order_seq_cst)
volatile noexcept
564 return compare_exchange_weak(__i1, __i2, __m,
565 __cmpexch_failure_order(__m));
568 _GLIBCXX_ALWAYS_INLINE
bool
569 compare_exchange_strong(__int_type& __i1, __int_type __i2,
570 memory_order __m1, memory_order __m2)
noexcept
572 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
574 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
575 int(__m1),
int(__m2));
578 _GLIBCXX_ALWAYS_INLINE
bool
579 compare_exchange_strong(__int_type& __i1, __int_type __i2,
581 memory_order __m2)
volatile noexcept
583 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
585 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
586 int(__m1),
int(__m2));
589 _GLIBCXX_ALWAYS_INLINE
bool
590 compare_exchange_strong(__int_type& __i1, __int_type __i2,
591 memory_order __m = memory_order_seq_cst)
noexcept
593 return compare_exchange_strong(__i1, __i2, __m,
594 __cmpexch_failure_order(__m));
597 _GLIBCXX_ALWAYS_INLINE
bool
598 compare_exchange_strong(__int_type& __i1, __int_type __i2,
599 memory_order __m = memory_order_seq_cst)
volatile noexcept
601 return compare_exchange_strong(__i1, __i2, __m,
602 __cmpexch_failure_order(__m));
605#if __glibcxx_atomic_wait
606 _GLIBCXX_ALWAYS_INLINE
void
607 wait(__int_type __old,
608 memory_order __m = memory_order_seq_cst)
const noexcept
610 std::__atomic_wait_address_v(&_M_i, __old,
611 [__m,
this] {
return this->load(__m); });
616 _GLIBCXX_ALWAYS_INLINE
void
617 notify_one() noexcept
618 { std::__atomic_notify_address(&_M_i,
false); }
622 _GLIBCXX_ALWAYS_INLINE
void
623 notify_all() noexcept
624 { std::__atomic_notify_address(&_M_i,
true); }
629 _GLIBCXX_ALWAYS_INLINE __int_type
630 fetch_add(__int_type __i,
631 memory_order __m = memory_order_seq_cst)
noexcept
632 {
return __atomic_fetch_add(&_M_i, __i,
int(__m)); }
634 _GLIBCXX_ALWAYS_INLINE __int_type
635 fetch_add(__int_type __i,
636 memory_order __m = memory_order_seq_cst)
volatile noexcept
637 {
return __atomic_fetch_add(&_M_i, __i,
int(__m)); }
639 _GLIBCXX_ALWAYS_INLINE __int_type
640 fetch_sub(__int_type __i,
641 memory_order __m = memory_order_seq_cst)
noexcept
642 {
return __atomic_fetch_sub(&_M_i, __i,
int(__m)); }
644 _GLIBCXX_ALWAYS_INLINE __int_type
645 fetch_sub(__int_type __i,
646 memory_order __m = memory_order_seq_cst)
volatile noexcept
647 {
return __atomic_fetch_sub(&_M_i, __i,
int(__m)); }
649 _GLIBCXX_ALWAYS_INLINE __int_type
650 fetch_and(__int_type __i,
651 memory_order __m = memory_order_seq_cst)
noexcept
652 {
return __atomic_fetch_and(&_M_i, __i,
int(__m)); }
654 _GLIBCXX_ALWAYS_INLINE __int_type
655 fetch_and(__int_type __i,
656 memory_order __m = memory_order_seq_cst)
volatile noexcept
657 {
return __atomic_fetch_and(&_M_i, __i,
int(__m)); }
659 _GLIBCXX_ALWAYS_INLINE __int_type
660 fetch_or(__int_type __i,
661 memory_order __m = memory_order_seq_cst)
noexcept
662 {
return __atomic_fetch_or(&_M_i, __i,
int(__m)); }
664 _GLIBCXX_ALWAYS_INLINE __int_type
665 fetch_or(__int_type __i,
666 memory_order __m = memory_order_seq_cst)
volatile noexcept
667 {
return __atomic_fetch_or(&_M_i, __i,
int(__m)); }
669 _GLIBCXX_ALWAYS_INLINE __int_type
670 fetch_xor(__int_type __i,
671 memory_order __m = memory_order_seq_cst)
noexcept
672 {
return __atomic_fetch_xor(&_M_i, __i,
int(__m)); }
674 _GLIBCXX_ALWAYS_INLINE __int_type
675 fetch_xor(__int_type __i,
676 memory_order __m = memory_order_seq_cst)
volatile noexcept
677 {
return __atomic_fetch_xor(&_M_i, __i,
int(__m)); }
682 template<
typename _PTp>
683 struct __atomic_base<_PTp*>
686 typedef _PTp* __pointer_type;
688 __pointer_type _M_p _GLIBCXX20_INIT(
nullptr);
690 static constexpr ptrdiff_t
691 _S_type_size(ptrdiff_t __d)
692 {
return __d *
sizeof(_PTp); }
695 __atomic_base() noexcept = default;
696 ~__atomic_base() noexcept = default;
697 __atomic_base(const __atomic_base&) = delete;
698 __atomic_base& operator=(const __atomic_base&) = delete;
699 __atomic_base& operator=(const __atomic_base&) volatile = delete;
702 constexpr __atomic_base(__pointer_type __p) noexcept : _M_p (__p) { }
704 operator __pointer_type() const noexcept
707 operator __pointer_type() const volatile noexcept
711 operator=(__pointer_type __p)
noexcept
718 operator=(__pointer_type __p)
volatile noexcept
725 operator++(
int)
noexcept
726 {
return fetch_add(1); }
729 operator++(
int)
volatile noexcept
730 {
return fetch_add(1); }
733 operator--(
int)
noexcept
734 {
return fetch_sub(1); }
737 operator--(
int)
volatile noexcept
738 {
return fetch_sub(1); }
741 operator++() noexcept
742 {
return __atomic_add_fetch(&_M_p, _S_type_size(1),
743 int(memory_order_seq_cst)); }
746 operator++() volatile noexcept
747 {
return __atomic_add_fetch(&_M_p, _S_type_size(1),
748 int(memory_order_seq_cst)); }
751 operator--() noexcept
752 {
return __atomic_sub_fetch(&_M_p, _S_type_size(1),
753 int(memory_order_seq_cst)); }
756 operator--() volatile noexcept
757 {
return __atomic_sub_fetch(&_M_p, _S_type_size(1),
758 int(memory_order_seq_cst)); }
761 operator+=(ptrdiff_t __d)
noexcept
762 {
return __atomic_add_fetch(&_M_p, _S_type_size(__d),
763 int(memory_order_seq_cst)); }
766 operator+=(ptrdiff_t __d)
volatile noexcept
767 {
return __atomic_add_fetch(&_M_p, _S_type_size(__d),
768 int(memory_order_seq_cst)); }
771 operator-=(ptrdiff_t __d)
noexcept
772 {
return __atomic_sub_fetch(&_M_p, _S_type_size(__d),
773 int(memory_order_seq_cst)); }
776 operator-=(ptrdiff_t __d)
volatile noexcept
777 {
return __atomic_sub_fetch(&_M_p, _S_type_size(__d),
778 int(memory_order_seq_cst)); }
781 is_lock_free() const noexcept
784 return __atomic_is_lock_free(
sizeof(_M_p),
785 reinterpret_cast<void *
>(-__alignof(_M_p)));
789 is_lock_free() const volatile noexcept
792 return __atomic_is_lock_free(
sizeof(_M_p),
793 reinterpret_cast<void *
>(-__alignof(_M_p)));
796 _GLIBCXX_ALWAYS_INLINE
void
797 store(__pointer_type __p,
801 = __m & __memory_order_mask;
803 __glibcxx_assert(__b != memory_order_acquire);
804 __glibcxx_assert(__b != memory_order_acq_rel);
805 __glibcxx_assert(__b != memory_order_consume);
807 __atomic_store_n(&_M_p, __p,
int(__m));
810 _GLIBCXX_ALWAYS_INLINE
void
811 store(__pointer_type __p,
812 memory_order __m = memory_order_seq_cst)
volatile noexcept
815 = __m & __memory_order_mask;
816 __glibcxx_assert(__b != memory_order_acquire);
817 __glibcxx_assert(__b != memory_order_acq_rel);
818 __glibcxx_assert(__b != memory_order_consume);
820 __atomic_store_n(&_M_p, __p,
int(__m));
823 _GLIBCXX_ALWAYS_INLINE __pointer_type
824 load(
memory_order __m = memory_order_seq_cst)
const noexcept
827 = __m & __memory_order_mask;
828 __glibcxx_assert(__b != memory_order_release);
829 __glibcxx_assert(__b != memory_order_acq_rel);
831 return __atomic_load_n(&_M_p,
int(__m));
834 _GLIBCXX_ALWAYS_INLINE __pointer_type
835 load(
memory_order __m = memory_order_seq_cst)
const volatile noexcept
838 = __m & __memory_order_mask;
839 __glibcxx_assert(__b != memory_order_release);
840 __glibcxx_assert(__b != memory_order_acq_rel);
842 return __atomic_load_n(&_M_p,
int(__m));
845 _GLIBCXX_ALWAYS_INLINE __pointer_type
846 exchange(__pointer_type __p,
849 return __atomic_exchange_n(&_M_p, __p,
int(__m));
853 _GLIBCXX_ALWAYS_INLINE __pointer_type
854 exchange(__pointer_type __p,
855 memory_order __m = memory_order_seq_cst)
volatile noexcept
857 return __atomic_exchange_n(&_M_p, __p,
int(__m));
860 _GLIBCXX_ALWAYS_INLINE
bool
861 compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
865 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
867 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 1,
868 int(__m1),
int(__m2));
871 _GLIBCXX_ALWAYS_INLINE
bool
872 compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
876 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
878 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 1,
879 int(__m1),
int(__m2));
882 _GLIBCXX_ALWAYS_INLINE
bool
883 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
887 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
889 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
890 int(__m1),
int(__m2));
893 _GLIBCXX_ALWAYS_INLINE
bool
894 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
898 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
900 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
901 int(__m1),
int(__m2));
904#if __glibcxx_atomic_wait
905 _GLIBCXX_ALWAYS_INLINE
void
906 wait(__pointer_type __old,
909 std::__atomic_wait_address_v(&_M_p, __old,
911 {
return this->load(__m); });
916 _GLIBCXX_ALWAYS_INLINE
void
917 notify_one() const noexcept
918 { std::__atomic_notify_address(&_M_p,
false); }
922 _GLIBCXX_ALWAYS_INLINE
void
923 notify_all() const noexcept
924 { std::__atomic_notify_address(&_M_p,
true); }
929 _GLIBCXX_ALWAYS_INLINE __pointer_type
930 fetch_add(ptrdiff_t __d,
932 {
return __atomic_fetch_add(&_M_p, _S_type_size(__d),
int(__m)); }
934 _GLIBCXX_ALWAYS_INLINE __pointer_type
935 fetch_add(ptrdiff_t __d,
936 memory_order __m = memory_order_seq_cst)
volatile noexcept
937 {
return __atomic_fetch_add(&_M_p, _S_type_size(__d),
int(__m)); }
939 _GLIBCXX_ALWAYS_INLINE __pointer_type
940 fetch_sub(ptrdiff_t __d,
942 {
return __atomic_fetch_sub(&_M_p, _S_type_size(__d),
int(__m)); }
944 _GLIBCXX_ALWAYS_INLINE __pointer_type
945 fetch_sub(ptrdiff_t __d,
946 memory_order __m = memory_order_seq_cst)
volatile noexcept
947 {
return __atomic_fetch_sub(&_M_p, _S_type_size(__d),
int(__m)); }
950 namespace __atomic_impl
954 template<
typename _Tp>
956 __maybe_has_padding()
958#if ! __has_builtin(__builtin_clear_padding)
960#elif __has_builtin(__has_unique_object_representations)
961 return !__has_unique_object_representations(_Tp)
962 && !is_same<_Tp, float>::value && !is_same<_Tp, double>::value;
968 template<
typename _Tp>
969 _GLIBCXX_ALWAYS_INLINE _GLIBCXX14_CONSTEXPR _Tp*
970 __clear_padding(_Tp& __val)
noexcept
973#if __has_builtin(__builtin_clear_padding)
974 if _GLIBCXX17_CONSTEXPR (__atomic_impl::__maybe_has_padding<_Tp>())
975 __builtin_clear_padding(__ptr);
981 template<
typename _Tp>
982 using _Val =
typename remove_volatile<_Tp>::type;
984#pragma GCC diagnostic push
985#pragma GCC diagnostic ignored "-Wc++17-extensions"
987 template<
bool _AtomicRef = false,
typename _Tp>
988 _GLIBCXX_ALWAYS_INLINE
bool
989 __compare_exchange(_Tp& __val, _Val<_Tp>& __e, _Val<_Tp>& __i,
991 memory_order __s, memory_order __f)
noexcept
993 __glibcxx_assert(__is_valid_cmpexch_failure_order(__f));
995 using _Vp = _Val<_Tp>;
998 if constexpr (!__atomic_impl::__maybe_has_padding<_Vp>())
1002 int(__s),
int(__f));
1004 else if constexpr (!_AtomicRef)
1007 _Vp*
const __pi = __atomic_impl::__clear_padding(__i);
1011 _Vp*
const __pexp = __atomic_impl::__clear_padding(__exp);
1015 if (__atomic_compare_exchange(__pval, __pexp, __pi,
1016 __is_weak,
int(__s),
int(__f)))
1025 _Vp*
const __pi = __atomic_impl::__clear_padding(__i);
1031 _Vp*
const __pexp = __atomic_impl::__clear_padding(__exp);
1048 if (__atomic_compare_exchange(__pval, __pexp, __pi,
1049 __is_weak,
int(__s),
int(__f)))
1056 if (__builtin_memcmp(__atomic_impl::__clear_padding(__orig),
1057 __atomic_impl::__clear_padding(__curr),
1068#pragma GCC diagnostic pop
1071#if __cplusplus > 201703L
1073 namespace __atomic_impl
1076 template<
typename _Tp>
1077 using _Diff = __conditional_t<is_pointer_v<_Tp>, ptrdiff_t, _Val<_Tp>>;
1079 template<
size_t _Size,
size_t _Align>
1080 _GLIBCXX_ALWAYS_INLINE
bool
1081 is_lock_free() noexcept
1084 return __atomic_is_lock_free(_Size,
reinterpret_cast<void *
>(-_Align));
1087 template<
typename _Tp>
1088 _GLIBCXX_ALWAYS_INLINE
void
1089 store(_Tp* __ptr, _Val<_Tp> __t, memory_order __m)
noexcept
1091 __atomic_store(__ptr, __atomic_impl::__clear_padding(__t),
int(__m));
1094 template<
typename _Tp>
1095 _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
1096 load(
const _Tp* __ptr, memory_order __m)
noexcept
1098 alignas(_Tp)
unsigned char __buf[
sizeof(_Tp)];
1099 auto* __dest =
reinterpret_cast<_Val<_Tp>*
>(__buf);
1100 __atomic_load(__ptr, __dest,
int(__m));
1104 template<
typename _Tp>
1105 _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
1106 exchange(_Tp* __ptr, _Val<_Tp> __desired, memory_order __m)
noexcept
1108 alignas(_Tp)
unsigned char __buf[
sizeof(_Tp)];
1109 auto* __dest =
reinterpret_cast<_Val<_Tp>*
>(__buf);
1110 __atomic_exchange(__ptr, __atomic_impl::__clear_padding(__desired),
1115 template<
bool _AtomicRef = false,
typename _Tp>
1116 _GLIBCXX_ALWAYS_INLINE
bool
1117 compare_exchange_weak(_Tp* __ptr, _Val<_Tp>& __expected,
1118 _Val<_Tp> __desired, memory_order __success,
1119 memory_order __failure)
noexcept
1121 return __atomic_impl::__compare_exchange<_AtomicRef>(
1122 *__ptr, __expected, __desired,
true, __success, __failure);
1125 template<
bool _AtomicRef = false,
typename _Tp>
1126 _GLIBCXX_ALWAYS_INLINE
bool
1127 compare_exchange_strong(_Tp* __ptr, _Val<_Tp>& __expected,
1128 _Val<_Tp> __desired, memory_order __success,
1129 memory_order __failure)
noexcept
1131 return __atomic_impl::__compare_exchange<_AtomicRef>(
1132 *__ptr, __expected, __desired,
false, __success, __failure);
1135#if __glibcxx_atomic_wait
1136 template<
typename _Tp>
1137 _GLIBCXX_ALWAYS_INLINE
void
1138 wait(
const _Tp* __ptr, _Val<_Tp> __old,
1139 memory_order __m = memory_order_seq_cst)
noexcept
1141 std::__atomic_wait_address_v(__ptr, __old,
1142 [__ptr, __m]() {
return __atomic_impl::load(__ptr, __m); });
1147 template<
typename _Tp>
1148 _GLIBCXX_ALWAYS_INLINE
void
1149 notify_one(
const _Tp* __ptr)
noexcept
1150 { std::__atomic_notify_address(__ptr,
false); }
1154 template<
typename _Tp>
1155 _GLIBCXX_ALWAYS_INLINE
void
1156 notify_all(
const _Tp* __ptr)
noexcept
1157 { std::__atomic_notify_address(__ptr,
true); }
1162 template<
typename _Tp>
1163 _GLIBCXX_ALWAYS_INLINE _Tp
1164 fetch_add(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m)
noexcept
1165 {
return __atomic_fetch_add(__ptr, __i,
int(__m)); }
1167 template<
typename _Tp>
1168 _GLIBCXX_ALWAYS_INLINE _Tp
1169 fetch_sub(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m)
noexcept
1170 {
return __atomic_fetch_sub(__ptr, __i,
int(__m)); }
1172 template<
typename _Tp>
1173 _GLIBCXX_ALWAYS_INLINE _Tp
1174 fetch_and(_Tp* __ptr, _Val<_Tp> __i, memory_order __m)
noexcept
1175 {
return __atomic_fetch_and(__ptr, __i,
int(__m)); }
1177 template<
typename _Tp>
1178 _GLIBCXX_ALWAYS_INLINE _Tp
1179 fetch_or(_Tp* __ptr, _Val<_Tp> __i, memory_order __m)
noexcept
1180 {
return __atomic_fetch_or(__ptr, __i,
int(__m)); }
1182 template<
typename _Tp>
1183 _GLIBCXX_ALWAYS_INLINE _Tp
1184 fetch_xor(_Tp* __ptr, _Val<_Tp> __i, memory_order __m)
noexcept
1185 {
return __atomic_fetch_xor(__ptr, __i,
int(__m)); }
1187 template<
typename _Tp>
1188 _GLIBCXX_ALWAYS_INLINE _Tp
1189 __add_fetch(_Tp* __ptr, _Diff<_Tp> __i)
noexcept
1190 {
return __atomic_add_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1192 template<
typename _Tp>
1193 _GLIBCXX_ALWAYS_INLINE _Tp
1194 __sub_fetch(_Tp* __ptr, _Diff<_Tp> __i)
noexcept
1195 {
return __atomic_sub_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1197 template<
typename _Tp>
1198 _GLIBCXX_ALWAYS_INLINE _Tp
1199 __and_fetch(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1200 {
return __atomic_and_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1202 template<
typename _Tp>
1203 _GLIBCXX_ALWAYS_INLINE _Tp
1204 __or_fetch(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1205 {
return __atomic_or_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1207 template<
typename _Tp>
1208 _GLIBCXX_ALWAYS_INLINE _Tp
1209 __xor_fetch(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1210 {
return __atomic_xor_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1212 template<
typename _Tp>
1213 concept __atomic_fetch_addable
1214 =
requires (_Tp __t) { __atomic_fetch_add(&__t, __t, 0); };
1216 template<
typename _Tp>
1218 __fetch_add_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m)
noexcept
1220 if constexpr (__atomic_fetch_addable<_Tp>)
1221 return __atomic_fetch_add(__ptr, __i,
int(__m));
1224 _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
1225 _Val<_Tp> __newval = __oldval + __i;
1226 while (!compare_exchange_weak (__ptr, __oldval, __newval, __m,
1227 memory_order_relaxed))
1228 __newval = __oldval + __i;
1233 template<
typename _Tp>
1234 concept __atomic_fetch_subtractable
1235 =
requires (_Tp __t) { __atomic_fetch_sub(&__t, __t, 0); };
1237 template<
typename _Tp>
1239 __fetch_sub_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m)
noexcept
1241 if constexpr (__atomic_fetch_subtractable<_Tp>)
1242 return __atomic_fetch_sub(__ptr, __i,
int(__m));
1245 _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
1246 _Val<_Tp> __newval = __oldval - __i;
1247 while (!compare_exchange_weak (__ptr, __oldval, __newval, __m,
1248 memory_order_relaxed))
1249 __newval = __oldval - __i;
1254 template<
typename _Tp>
1255 concept __atomic_add_fetchable
1256 =
requires (_Tp __t) { __atomic_add_fetch(&__t, __t, 0); };
1258 template<
typename _Tp>
1260 __add_fetch_flt(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1262 if constexpr (__atomic_add_fetchable<_Tp>)
1263 return __atomic_add_fetch(__ptr, __i, __ATOMIC_SEQ_CST);
1266 _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
1267 _Val<_Tp> __newval = __oldval + __i;
1268 while (!compare_exchange_weak (__ptr, __oldval, __newval,
1269 memory_order_seq_cst,
1270 memory_order_relaxed))
1271 __newval = __oldval + __i;
1276 template<
typename _Tp>
1277 concept __atomic_sub_fetchable
1278 =
requires (_Tp __t) { __atomic_sub_fetch(&__t, __t, 0); };
1280 template<
typename _Tp>
1282 __sub_fetch_flt(_Tp* __ptr, _Val<_Tp> __i)
noexcept
1284 if constexpr (__atomic_sub_fetchable<_Tp>)
1285 return __atomic_sub_fetch(__ptr, __i, __ATOMIC_SEQ_CST);
1288 _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
1289 _Val<_Tp> __newval = __oldval - __i;
1290 while (!compare_exchange_weak (__ptr, __oldval, __newval,
1291 memory_order_seq_cst,
1292 memory_order_relaxed))
1293 __newval = __oldval - __i;
1300 template<
typename _Fp>
1301 struct __atomic_float
1303 static_assert(is_floating_point_v<_Fp>);
1305 static constexpr size_t _S_alignment = __alignof__(_Fp);
1308 using value_type = _Fp;
1309 using difference_type = value_type;
1311 static constexpr bool is_always_lock_free
1312 = __atomic_always_lock_free(
sizeof(_Fp), 0);
1314 __atomic_float() =
default;
1317 __atomic_float(_Fp __t) : _M_fp(__t)
1318 { __atomic_impl::__clear_padding(_M_fp); }
1320 __atomic_float(
const __atomic_float&) =
delete;
1321 __atomic_float& operator=(
const __atomic_float&) =
delete;
1322 __atomic_float& operator=(
const __atomic_float&)
volatile =
delete;
1325 operator=(_Fp __t)
volatile noexcept
1332 operator=(_Fp __t)
noexcept
1339 is_lock_free() const volatile noexcept
1340 {
return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1343 is_lock_free() const noexcept
1344 {
return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1347 store(_Fp __t, memory_order __m = memory_order_seq_cst)
volatile noexcept
1348 { __atomic_impl::store(&_M_fp, __t, __m); }
1351 store(_Fp __t, memory_order __m = memory_order_seq_cst)
noexcept
1352 { __atomic_impl::store(&_M_fp, __t, __m); }
1355 load(memory_order __m = memory_order_seq_cst)
const volatile noexcept
1356 {
return __atomic_impl::load(&_M_fp, __m); }
1359 load(memory_order __m = memory_order_seq_cst)
const noexcept
1360 {
return __atomic_impl::load(&_M_fp, __m); }
1362 operator _Fp() const volatile noexcept {
return this->load(); }
1363 operator _Fp() const noexcept {
return this->load(); }
1366 exchange(_Fp __desired,
1367 memory_order __m = memory_order_seq_cst)
volatile noexcept
1368 {
return __atomic_impl::exchange(&_M_fp, __desired, __m); }
1371 exchange(_Fp __desired,
1372 memory_order __m = memory_order_seq_cst)
noexcept
1373 {
return __atomic_impl::exchange(&_M_fp, __desired, __m); }
1376 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1377 memory_order __success,
1378 memory_order __failure)
noexcept
1380 return __atomic_impl::compare_exchange_weak(&_M_fp,
1381 __expected, __desired,
1382 __success, __failure);
1386 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1387 memory_order __success,
1388 memory_order __failure)
volatile noexcept
1390 return __atomic_impl::compare_exchange_weak(&_M_fp,
1391 __expected, __desired,
1392 __success, __failure);
1396 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1397 memory_order __success,
1398 memory_order __failure)
noexcept
1400 return __atomic_impl::compare_exchange_strong(&_M_fp,
1401 __expected, __desired,
1402 __success, __failure);
1406 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1407 memory_order __success,
1408 memory_order __failure)
volatile noexcept
1410 return __atomic_impl::compare_exchange_strong(&_M_fp,
1411 __expected, __desired,
1412 __success, __failure);
1416 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1417 memory_order __order = memory_order_seq_cst)
1420 return compare_exchange_weak(__expected, __desired, __order,
1421 __cmpexch_failure_order(__order));
1425 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1426 memory_order __order = memory_order_seq_cst)
1429 return compare_exchange_weak(__expected, __desired, __order,
1430 __cmpexch_failure_order(__order));
1434 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1435 memory_order __order = memory_order_seq_cst)
1438 return compare_exchange_strong(__expected, __desired, __order,
1439 __cmpexch_failure_order(__order));
1443 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1444 memory_order __order = memory_order_seq_cst)
1447 return compare_exchange_strong(__expected, __desired, __order,
1448 __cmpexch_failure_order(__order));
1451#if __glibcxx_atomic_wait
1452 _GLIBCXX_ALWAYS_INLINE
void
1453 wait(_Fp __old, memory_order __m = memory_order_seq_cst)
const noexcept
1454 { __atomic_impl::wait(&_M_fp, __old, __m); }
1458 _GLIBCXX_ALWAYS_INLINE
void
1459 notify_one() const noexcept
1460 { __atomic_impl::notify_one(&_M_fp); }
1464 _GLIBCXX_ALWAYS_INLINE
void
1465 notify_all() const noexcept
1466 { __atomic_impl::notify_all(&_M_fp); }
1472 fetch_add(value_type __i,
1473 memory_order __m = memory_order_seq_cst)
noexcept
1474 {
return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1477 fetch_add(value_type __i,
1478 memory_order __m = memory_order_seq_cst)
volatile noexcept
1479 {
return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1482 fetch_sub(value_type __i,
1483 memory_order __m = memory_order_seq_cst)
noexcept
1484 {
return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1487 fetch_sub(value_type __i,
1488 memory_order __m = memory_order_seq_cst)
volatile noexcept
1489 {
return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1492 operator+=(value_type __i)
noexcept
1493 {
return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1496 operator+=(value_type __i)
volatile noexcept
1497 {
return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1500 operator-=(value_type __i)
noexcept
1501 {
return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1504 operator-=(value_type __i)
volatile noexcept
1505 {
return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1508 alignas(_S_alignment) _Fp _M_fp _GLIBCXX20_INIT(0);
1510#undef _GLIBCXX20_INIT
1512 template<
typename _Tp,
1513 bool = is_integral_v<_Tp> && !is_same_v<_Tp, bool>,
1514 bool = is_floating_point_v<_Tp>>
1515 struct __atomic_ref;
1518 template<
typename _Tp>
1519 struct __atomic_ref<_Tp, false, false>
1521 static_assert(is_trivially_copyable_v<_Tp>);
1524 static constexpr int _S_min_alignment
1525 = (
sizeof(_Tp) & (
sizeof(_Tp) - 1)) ||
sizeof(_Tp) > 16
1529 using value_type = _Tp;
1531 static constexpr bool is_always_lock_free
1532 = __atomic_always_lock_free(
sizeof(_Tp), 0);
1534 static constexpr size_t required_alignment
1535 = _S_min_alignment >
alignof(_Tp) ? _S_min_alignment : alignof(_Tp);
1537 __atomic_ref& operator=(
const __atomic_ref&) =
delete;
1542 __glibcxx_assert(((__UINTPTR_TYPE__)_M_ptr % required_alignment) == 0);
1545 __atomic_ref(
const __atomic_ref&)
noexcept =
default;
1548 operator=(_Tp __t)
const noexcept
1554 operator _Tp() const noexcept {
return this->load(); }
1557 is_lock_free() const noexcept
1558 {
return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>(); }
1561 store(_Tp __t, memory_order __m = memory_order_seq_cst)
const noexcept
1562 { __atomic_impl::store(_M_ptr, __t, __m); }
1565 load(memory_order __m = memory_order_seq_cst)
const noexcept
1566 {
return __atomic_impl::load(_M_ptr, __m); }
1569 exchange(_Tp __desired, memory_order __m = memory_order_seq_cst)
1571 {
return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1574 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1575 memory_order __success,
1576 memory_order __failure)
const noexcept
1578 return __atomic_impl::compare_exchange_weak<true>(
1579 _M_ptr, __expected, __desired, __success, __failure);
1583 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1584 memory_order __success,
1585 memory_order __failure)
const noexcept
1587 return __atomic_impl::compare_exchange_strong<true>(
1588 _M_ptr, __expected, __desired, __success, __failure);
1592 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1593 memory_order __order = memory_order_seq_cst)
1596 return compare_exchange_weak(__expected, __desired, __order,
1597 __cmpexch_failure_order(__order));
1601 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1602 memory_order __order = memory_order_seq_cst)
1605 return compare_exchange_strong(__expected, __desired, __order,
1606 __cmpexch_failure_order(__order));
1609#if __glibcxx_atomic_wait
1610 _GLIBCXX_ALWAYS_INLINE
void
1611 wait(_Tp __old, memory_order __m = memory_order_seq_cst)
const noexcept
1612 { __atomic_impl::wait(_M_ptr, __old, __m); }
1616 _GLIBCXX_ALWAYS_INLINE
void
1617 notify_one() const noexcept
1618 { __atomic_impl::notify_one(_M_ptr); }
1622 _GLIBCXX_ALWAYS_INLINE
void
1623 notify_all() const noexcept
1624 { __atomic_impl::notify_all(_M_ptr); }
1634 template<
typename _Tp>
1635 struct __atomic_ref<_Tp, true, false>
1637 static_assert(is_integral_v<_Tp>);
1640 using value_type = _Tp;
1641 using difference_type = value_type;
1643 static constexpr bool is_always_lock_free
1644 = __atomic_always_lock_free(
sizeof(_Tp), 0);
1646 static constexpr size_t required_alignment
1647 =
sizeof(_Tp) >
alignof(_Tp) ?
sizeof(_Tp) : alignof(_Tp);
1649 __atomic_ref() =
delete;
1650 __atomic_ref& operator=(
const __atomic_ref&) =
delete;
1653 __atomic_ref(_Tp& __t) : _M_ptr(&__t)
1655 __glibcxx_assert(((__UINTPTR_TYPE__)_M_ptr % required_alignment) == 0);
1658 __atomic_ref(
const __atomic_ref&)
noexcept =
default;
1661 operator=(_Tp __t)
const noexcept
1667 operator _Tp() const noexcept {
return this->load(); }
1670 is_lock_free() const noexcept
1672 return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>();
1676 store(_Tp __t, memory_order __m = memory_order_seq_cst)
const noexcept
1677 { __atomic_impl::store(_M_ptr, __t, __m); }
1680 load(memory_order __m = memory_order_seq_cst)
const noexcept
1681 {
return __atomic_impl::load(_M_ptr, __m); }
1684 exchange(_Tp __desired,
1685 memory_order __m = memory_order_seq_cst)
const noexcept
1686 {
return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1689 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1690 memory_order __success,
1691 memory_order __failure)
const noexcept
1693 return __atomic_impl::compare_exchange_weak<true>(
1694 _M_ptr, __expected, __desired, __success, __failure);
1698 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1699 memory_order __success,
1700 memory_order __failure)
const noexcept
1702 return __atomic_impl::compare_exchange_strong<true>(
1703 _M_ptr, __expected, __desired, __success, __failure);
1707 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1708 memory_order __order = memory_order_seq_cst)
1711 return compare_exchange_weak(__expected, __desired, __order,
1712 __cmpexch_failure_order(__order));
1716 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1717 memory_order __order = memory_order_seq_cst)
1720 return compare_exchange_strong(__expected, __desired, __order,
1721 __cmpexch_failure_order(__order));
1724#if __glibcxx_atomic_wait
1725 _GLIBCXX_ALWAYS_INLINE
void
1726 wait(_Tp __old, memory_order __m = memory_order_seq_cst)
const noexcept
1727 { __atomic_impl::wait(_M_ptr, __old, __m); }
1731 _GLIBCXX_ALWAYS_INLINE
void
1732 notify_one() const noexcept
1733 { __atomic_impl::notify_one(_M_ptr); }
1737 _GLIBCXX_ALWAYS_INLINE
void
1738 notify_all() const noexcept
1739 { __atomic_impl::notify_all(_M_ptr); }
1745 fetch_add(value_type __i,
1746 memory_order __m = memory_order_seq_cst)
const noexcept
1747 {
return __atomic_impl::fetch_add(_M_ptr, __i, __m); }
1750 fetch_sub(value_type __i,
1751 memory_order __m = memory_order_seq_cst)
const noexcept
1752 {
return __atomic_impl::fetch_sub(_M_ptr, __i, __m); }
1755 fetch_and(value_type __i,
1756 memory_order __m = memory_order_seq_cst)
const noexcept
1757 {
return __atomic_impl::fetch_and(_M_ptr, __i, __m); }
1760 fetch_or(value_type __i,
1761 memory_order __m = memory_order_seq_cst)
const noexcept
1762 {
return __atomic_impl::fetch_or(_M_ptr, __i, __m); }
1765 fetch_xor(value_type __i,
1766 memory_order __m = memory_order_seq_cst)
const noexcept
1767 {
return __atomic_impl::fetch_xor(_M_ptr, __i, __m); }
1769 _GLIBCXX_ALWAYS_INLINE value_type
1770 operator++(
int)
const noexcept
1771 {
return fetch_add(1); }
1773 _GLIBCXX_ALWAYS_INLINE value_type
1774 operator--(
int)
const noexcept
1775 {
return fetch_sub(1); }
1778 operator++() const noexcept
1779 {
return __atomic_impl::__add_fetch(_M_ptr, value_type(1)); }
1782 operator--() const noexcept
1783 {
return __atomic_impl::__sub_fetch(_M_ptr, value_type(1)); }
1786 operator+=(value_type __i)
const noexcept
1787 {
return __atomic_impl::__add_fetch(_M_ptr, __i); }
1790 operator-=(value_type __i)
const noexcept
1791 {
return __atomic_impl::__sub_fetch(_M_ptr, __i); }
1794 operator&=(value_type __i)
const noexcept
1795 {
return __atomic_impl::__and_fetch(_M_ptr, __i); }
1798 operator|=(value_type __i)
const noexcept
1799 {
return __atomic_impl::__or_fetch(_M_ptr, __i); }
1802 operator^=(value_type __i)
const noexcept
1803 {
return __atomic_impl::__xor_fetch(_M_ptr, __i); }
1810 template<
typename _Fp>
1811 struct __atomic_ref<_Fp, false, true>
1813 static_assert(is_floating_point_v<_Fp>);
1816 using value_type = _Fp;
1817 using difference_type = value_type;
1819 static constexpr bool is_always_lock_free
1820 = __atomic_always_lock_free(
sizeof(_Fp), 0);
1822 static constexpr size_t required_alignment = __alignof__(_Fp);
1824 __atomic_ref() =
delete;
1825 __atomic_ref& operator=(
const __atomic_ref&) =
delete;
1828 __atomic_ref(_Fp& __t) : _M_ptr(&__t)
1830 __glibcxx_assert(((__UINTPTR_TYPE__)_M_ptr % required_alignment) == 0);
1833 __atomic_ref(
const __atomic_ref&)
noexcept =
default;
1836 operator=(_Fp __t)
const noexcept
1842 operator _Fp() const noexcept {
return this->load(); }
1845 is_lock_free() const noexcept
1847 return __atomic_impl::is_lock_free<sizeof(_Fp), required_alignment>();
1851 store(_Fp __t, memory_order __m = memory_order_seq_cst)
const noexcept
1852 { __atomic_impl::store(_M_ptr, __t, __m); }
1855 load(memory_order __m = memory_order_seq_cst)
const noexcept
1856 {
return __atomic_impl::load(_M_ptr, __m); }
1859 exchange(_Fp __desired,
1860 memory_order __m = memory_order_seq_cst)
const noexcept
1861 {
return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1864 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1865 memory_order __success,
1866 memory_order __failure)
const noexcept
1868 return __atomic_impl::compare_exchange_weak<true>(
1869 _M_ptr, __expected, __desired, __success, __failure);
1873 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1874 memory_order __success,
1875 memory_order __failure)
const noexcept
1877 return __atomic_impl::compare_exchange_strong<true>(
1878 _M_ptr, __expected, __desired, __success, __failure);
1882 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1883 memory_order __order = memory_order_seq_cst)
1886 return compare_exchange_weak(__expected, __desired, __order,
1887 __cmpexch_failure_order(__order));
1891 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1892 memory_order __order = memory_order_seq_cst)
1895 return compare_exchange_strong(__expected, __desired, __order,
1896 __cmpexch_failure_order(__order));
1899#if __glibcxx_atomic_wait
1900 _GLIBCXX_ALWAYS_INLINE
void
1901 wait(_Fp __old, memory_order __m = memory_order_seq_cst)
const noexcept
1902 { __atomic_impl::wait(_M_ptr, __old, __m); }
1906 _GLIBCXX_ALWAYS_INLINE
void
1907 notify_one() const noexcept
1908 { __atomic_impl::notify_one(_M_ptr); }
1912 _GLIBCXX_ALWAYS_INLINE
void
1913 notify_all() const noexcept
1914 { __atomic_impl::notify_all(_M_ptr); }
1920 fetch_add(value_type __i,
1921 memory_order __m = memory_order_seq_cst)
const noexcept
1922 {
return __atomic_impl::__fetch_add_flt(_M_ptr, __i, __m); }
1925 fetch_sub(value_type __i,
1926 memory_order __m = memory_order_seq_cst)
const noexcept
1927 {
return __atomic_impl::__fetch_sub_flt(_M_ptr, __i, __m); }
1930 operator+=(value_type __i)
const noexcept
1931 {
return __atomic_impl::__add_fetch_flt(_M_ptr, __i); }
1934 operator-=(value_type __i)
const noexcept
1935 {
return __atomic_impl::__sub_fetch_flt(_M_ptr, __i); }
1942 template<
typename _Tp>
1943 struct __atomic_ref<_Tp*,
false,
false>
1946 using value_type = _Tp*;
1947 using difference_type = ptrdiff_t;
1949 static constexpr bool is_always_lock_free = ATOMIC_POINTER_LOCK_FREE == 2;
1951 static constexpr size_t required_alignment = __alignof__(_Tp*);
1953 __atomic_ref() =
delete;
1954 __atomic_ref& operator=(
const __atomic_ref&) =
delete;
1959 __glibcxx_assert(((__UINTPTR_TYPE__)_M_ptr % required_alignment) == 0);
1962 __atomic_ref(
const __atomic_ref&)
noexcept =
default;
1965 operator=(_Tp* __t)
const noexcept
1971 operator _Tp*()
const noexcept {
return this->load(); }
1974 is_lock_free() const noexcept
1976 return __atomic_impl::is_lock_free<sizeof(_Tp*), required_alignment>();
1980 store(_Tp* __t,
memory_order __m = memory_order_seq_cst)
const noexcept
1981 { __atomic_impl::store(_M_ptr, __t, __m); }
1984 load(
memory_order __m = memory_order_seq_cst)
const noexcept
1985 {
return __atomic_impl::load(_M_ptr, __m); }
1988 exchange(_Tp* __desired,
1989 memory_order __m = memory_order_seq_cst)
const noexcept
1990 {
return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1993 compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
1997 return __atomic_impl::compare_exchange_weak<true>(
1998 _M_ptr, __expected, __desired, __success, __failure);
2002 compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
2006 return __atomic_impl::compare_exchange_strong<true>(
2007 _M_ptr, __expected, __desired, __success, __failure);
2011 compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
2015 return compare_exchange_weak(__expected, __desired, __order,
2016 __cmpexch_failure_order(__order));
2020 compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
2024 return compare_exchange_strong(__expected, __desired, __order,
2025 __cmpexch_failure_order(__order));
2028#if __glibcxx_atomic_wait
2029 _GLIBCXX_ALWAYS_INLINE
void
2030 wait(_Tp* __old,
memory_order __m = memory_order_seq_cst)
const noexcept
2031 { __atomic_impl::wait(_M_ptr, __old, __m); }
2035 _GLIBCXX_ALWAYS_INLINE
void
2036 notify_one() const noexcept
2037 { __atomic_impl::notify_one(_M_ptr); }
2041 _GLIBCXX_ALWAYS_INLINE
void
2042 notify_all() const noexcept
2043 { __atomic_impl::notify_all(_M_ptr); }
2048 _GLIBCXX_ALWAYS_INLINE value_type
2049 fetch_add(difference_type __d,
2050 memory_order __m = memory_order_seq_cst)
const noexcept
2051 {
return __atomic_impl::fetch_add(_M_ptr, _S_type_size(__d), __m); }
2053 _GLIBCXX_ALWAYS_INLINE value_type
2054 fetch_sub(difference_type __d,
2055 memory_order __m = memory_order_seq_cst)
const noexcept
2056 {
return __atomic_impl::fetch_sub(_M_ptr, _S_type_size(__d), __m); }
2059 operator++(
int)
const noexcept
2060 {
return fetch_add(1); }
2063 operator--(
int)
const noexcept
2064 {
return fetch_sub(1); }
2067 operator++() const noexcept
2069 return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(1));
2073 operator--() const noexcept
2075 return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(1));
2079 operator+=(difference_type __d)
const noexcept
2081 return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(__d));
2085 operator-=(difference_type __d)
const noexcept
2087 return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(__d));
2091 static constexpr ptrdiff_t
2092 _S_type_size(ptrdiff_t __d)
noexcept
2094 static_assert(is_object_v<_Tp>);
2095 return __d *
sizeof(_Tp);
2106_GLIBCXX_END_NAMESPACE_VERSION
constexpr _Tp * __addressof(_Tp &__r) noexcept
Same as C++11 std::addressof.
_Tp kill_dependency(_Tp __y) noexcept
kill_dependency
memory_order
Enumeration for memory_order.
ISO C++ entities toplevel namespace is std.
constexpr bitset< _Nb > operator|(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.
constexpr bitset< _Nb > operator&(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.