libstdc++
atomic_base.h
Go to the documentation of this file.
1// -*- C++ -*- header.
2
3// Copyright (C) 2008-2025 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file bits/atomic_base.h
26 * This is an internal header file, included by other library headers.
27 * Do not attempt to use it directly. @headername{atomic}
28 */
29
30#ifndef _GLIBCXX_ATOMIC_BASE_H
31#define _GLIBCXX_ATOMIC_BASE_H 1
32
33#ifdef _GLIBCXX_SYSHDR
34#pragma GCC system_header
35#endif
36
37#include <bits/c++config.h>
38#include <new> // For placement new
40#include <bits/move.h>
41
42#if __cplusplus > 201703L && _GLIBCXX_HOSTED
43#include <bits/atomic_wait.h>
44#endif
45
46#ifndef _GLIBCXX_ALWAYS_INLINE
47#define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
48#endif
49
50#include <bits/version.h>
51
52namespace std _GLIBCXX_VISIBILITY(default)
53{
54_GLIBCXX_BEGIN_NAMESPACE_VERSION
55
56 /**
57 * @defgroup atomics Atomics
58 *
59 * Components for performing atomic operations.
60 * @{
61 */
62
63 /// Enumeration for memory_order
64#if __cplusplus > 201703L
65 enum class memory_order : int
66 {
67 relaxed,
68 consume,
69 acquire,
70 release,
71 acq_rel,
72 seq_cst
73 };
74
75 inline constexpr memory_order memory_order_relaxed = memory_order::relaxed;
76 inline constexpr memory_order memory_order_consume = memory_order::consume;
77 inline constexpr memory_order memory_order_acquire = memory_order::acquire;
78 inline constexpr memory_order memory_order_release = memory_order::release;
79 inline constexpr memory_order memory_order_acq_rel = memory_order::acq_rel;
80 inline constexpr memory_order memory_order_seq_cst = memory_order::seq_cst;
81#else
82 enum memory_order : int
83 {
84 memory_order_relaxed,
85 memory_order_consume,
86 memory_order_acquire,
87 memory_order_release,
88 memory_order_acq_rel,
89 memory_order_seq_cst
90 };
91#endif
92
93 /// @cond undocumented
94 enum __memory_order_modifier
95 {
96 __memory_order_mask = 0x0ffff,
97 __memory_order_modifier_mask = 0xffff0000,
98 __memory_order_hle_acquire = 0x10000,
99 __memory_order_hle_release = 0x20000
100 };
101 /// @endcond
102
103 constexpr memory_order
104 operator|(memory_order __m, __memory_order_modifier __mod) noexcept
105 {
106 return memory_order(int(__m) | int(__mod));
107 }
108
109 constexpr memory_order
110 operator&(memory_order __m, __memory_order_modifier __mod) noexcept
111 {
112 return memory_order(int(__m) & int(__mod));
113 }
114
115 /// @cond undocumented
116
117 // Drop release ordering as per [atomics.types.operations.req]/21
118 constexpr memory_order
119 __cmpexch_failure_order2(memory_order __m) noexcept
120 {
121 return __m == memory_order_acq_rel ? memory_order_acquire
122 : __m == memory_order_release ? memory_order_relaxed : __m;
123 }
124
125 constexpr memory_order
126 __cmpexch_failure_order(memory_order __m) noexcept
127 {
128 return memory_order(__cmpexch_failure_order2(__m & __memory_order_mask)
129 | __memory_order_modifier(__m & __memory_order_modifier_mask));
130 }
131
132 constexpr bool
133 __is_valid_cmpexch_failure_order(memory_order __m) noexcept
134 {
135 return (__m & __memory_order_mask) != memory_order_release
136 && (__m & __memory_order_mask) != memory_order_acq_rel;
137 }
138
139 // Base types for atomics.
140 template<typename _IntTp>
141 struct __atomic_base;
142
143 /// @endcond
144
145 _GLIBCXX_ALWAYS_INLINE void
146 atomic_thread_fence(memory_order __m) noexcept
147 { __atomic_thread_fence(int(__m)); }
148
149 _GLIBCXX_ALWAYS_INLINE void
150 atomic_signal_fence(memory_order __m) noexcept
151 { __atomic_signal_fence(int(__m)); }
152
153 /// kill_dependency
154 template<typename _Tp>
155 inline _Tp
156 kill_dependency(_Tp __y) noexcept
157 {
158 _Tp __ret(__y);
159 return __ret;
160 }
161
162/// @cond undocumented
163#if __glibcxx_atomic_value_initialization
164# define _GLIBCXX20_INIT(I) = I
165#else
166# define _GLIBCXX20_INIT(I)
167#endif
168/// @endcond
169
170#define ATOMIC_VAR_INIT(_VI) { _VI }
171
172 template<typename _Tp>
173 struct atomic;
174
175 template<typename _Tp>
176 struct atomic<_Tp*>;
177
178 /* The target's "set" value for test-and-set may not be exactly 1. */
179#if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL == 1
180 typedef bool __atomic_flag_data_type;
181#else
182 typedef unsigned char __atomic_flag_data_type;
183#endif
184
185 /// @cond undocumented
186
187 /*
188 * Base type for atomic_flag.
189 *
190 * Base type is POD with data, allowing atomic_flag to derive from
191 * it and meet the standard layout type requirement. In addition to
192 * compatibility with a C interface, this allows different
193 * implementations of atomic_flag to use the same atomic operation
194 * functions, via a standard conversion to the __atomic_flag_base
195 * argument.
196 */
197 _GLIBCXX_BEGIN_EXTERN_C
198
199 struct __atomic_flag_base
200 {
201 __atomic_flag_data_type _M_i _GLIBCXX20_INIT({});
202 };
203
204 _GLIBCXX_END_EXTERN_C
205
206 /// @endcond
207
208#define ATOMIC_FLAG_INIT { 0 }
209
210 /// atomic_flag
211 struct atomic_flag : public __atomic_flag_base
212 {
213 atomic_flag() noexcept = default;
214 ~atomic_flag() noexcept = default;
215 atomic_flag(const atomic_flag&) = delete;
216 atomic_flag& operator=(const atomic_flag&) = delete;
217 atomic_flag& operator=(const atomic_flag&) volatile = delete;
218
219 // Conversion to ATOMIC_FLAG_INIT.
220 constexpr atomic_flag(bool __i) noexcept
221 : __atomic_flag_base{ _S_init(__i) }
222 { }
223
224 _GLIBCXX_ALWAYS_INLINE bool
225 test_and_set(memory_order __m = memory_order_seq_cst) noexcept
226 {
227 return __atomic_test_and_set (&_M_i, int(__m));
228 }
229
230 _GLIBCXX_ALWAYS_INLINE bool
231 test_and_set(memory_order __m = memory_order_seq_cst) volatile noexcept
232 {
233 return __atomic_test_and_set (&_M_i, int(__m));
234 }
235
236#ifdef __glibcxx_atomic_flag_test // C++ >= 20
237 _GLIBCXX_ALWAYS_INLINE bool
238 test(memory_order __m = memory_order_seq_cst) const noexcept
239 {
240 __atomic_flag_data_type __v;
241 __atomic_load(&_M_i, &__v, int(__m));
242 return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL;
243 }
244
245 _GLIBCXX_ALWAYS_INLINE bool
246 test(memory_order __m = memory_order_seq_cst) const volatile noexcept
247 {
248 __atomic_flag_data_type __v;
249 __atomic_load(&_M_i, &__v, int(__m));
250 return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL;
251 }
252#endif
253
254#if __glibcxx_atomic_wait // C++ >= 20 && (linux_futex || gthread)
255 _GLIBCXX_ALWAYS_INLINE void
256 wait(bool __old,
257 memory_order __m = memory_order_seq_cst) const noexcept
258 {
259 const __atomic_flag_data_type __v
260 = __old ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0;
261
262 std::__atomic_wait_address_v(&_M_i, __v,
263 [__m, this] { return __atomic_load_n(&_M_i, int(__m)); });
264 }
265
266 // TODO add const volatile overload
267
268 _GLIBCXX_ALWAYS_INLINE void
269 notify_one() noexcept
270 { std::__atomic_notify_address(&_M_i, false); }
271
272 // TODO add const volatile overload
273
274 _GLIBCXX_ALWAYS_INLINE void
275 notify_all() noexcept
276 { std::__atomic_notify_address(&_M_i, true); }
277
278 // TODO add const volatile overload
279#endif // __glibcxx_atomic_wait
280
281 _GLIBCXX_ALWAYS_INLINE void
282 clear(memory_order __m = memory_order_seq_cst) noexcept
283 {
284 memory_order __b __attribute__ ((__unused__))
285 = __m & __memory_order_mask;
286 __glibcxx_assert(__b != memory_order_consume);
287 __glibcxx_assert(__b != memory_order_acquire);
288 __glibcxx_assert(__b != memory_order_acq_rel);
289
290 __atomic_clear (&_M_i, int(__m));
291 }
292
293 _GLIBCXX_ALWAYS_INLINE void
294 clear(memory_order __m = memory_order_seq_cst) volatile noexcept
295 {
296 memory_order __b __attribute__ ((__unused__))
297 = __m & __memory_order_mask;
298 __glibcxx_assert(__b != memory_order_consume);
299 __glibcxx_assert(__b != memory_order_acquire);
300 __glibcxx_assert(__b != memory_order_acq_rel);
301
302 __atomic_clear (&_M_i, int(__m));
303 }
304
305 private:
306 static constexpr __atomic_flag_data_type
307 _S_init(bool __i)
308 { return __i ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0; }
309 };
310
311 /// @cond undocumented
312
313 /// Base class for atomic integrals.
314 //
315 // For each of the integral types, define atomic_[integral type] struct
316 //
317 // atomic_bool bool
318 // atomic_char char
319 // atomic_schar signed char
320 // atomic_uchar unsigned char
321 // atomic_short short
322 // atomic_ushort unsigned short
323 // atomic_int int
324 // atomic_uint unsigned int
325 // atomic_long long
326 // atomic_ulong unsigned long
327 // atomic_llong long long
328 // atomic_ullong unsigned long long
329 // atomic_char8_t char8_t
330 // atomic_char16_t char16_t
331 // atomic_char32_t char32_t
332 // atomic_wchar_t wchar_t
333 //
334 // NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or
335 // 8 bytes, since that is what GCC built-in functions for atomic
336 // memory access expect.
337 template<typename _ITp>
338 struct __atomic_base
339 {
340 using value_type = _ITp;
341 using difference_type = value_type;
342
343 private:
344 typedef _ITp __int_type;
345
346 static constexpr int _S_alignment =
347 sizeof(_ITp) > alignof(_ITp) ? sizeof(_ITp) : alignof(_ITp);
348
349 alignas(_S_alignment) __int_type _M_i _GLIBCXX20_INIT(0);
350
351 public:
352 __atomic_base() noexcept = default;
353 ~__atomic_base() noexcept = default;
354 __atomic_base(const __atomic_base&) = delete;
355 __atomic_base& operator=(const __atomic_base&) = delete;
356 __atomic_base& operator=(const __atomic_base&) volatile = delete;
357
358 // Requires __int_type convertible to _M_i.
359 constexpr __atomic_base(__int_type __i) noexcept : _M_i (__i) { }
360
361 operator __int_type() const noexcept
362 { return load(); }
363
364 operator __int_type() const volatile noexcept
365 { return load(); }
366
367 __int_type
368 operator=(__int_type __i) noexcept
369 {
370 store(__i);
371 return __i;
372 }
373
374 __int_type
375 operator=(__int_type __i) volatile noexcept
376 {
377 store(__i);
378 return __i;
379 }
380
381 __int_type
382 operator++(int) noexcept
383 { return fetch_add(1); }
384
385 __int_type
386 operator++(int) volatile noexcept
387 { return fetch_add(1); }
388
389 __int_type
390 operator--(int) noexcept
391 { return fetch_sub(1); }
392
393 __int_type
394 operator--(int) volatile noexcept
395 { return fetch_sub(1); }
396
397 __int_type
398 operator++() noexcept
399 { return __atomic_add_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
400
401 __int_type
402 operator++() volatile noexcept
403 { return __atomic_add_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
404
405 __int_type
406 operator--() noexcept
407 { return __atomic_sub_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
408
409 __int_type
410 operator--() volatile noexcept
411 { return __atomic_sub_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
412
413 __int_type
414 operator+=(__int_type __i) noexcept
415 { return __atomic_add_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
416
417 __int_type
418 operator+=(__int_type __i) volatile noexcept
419 { return __atomic_add_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
420
421 __int_type
422 operator-=(__int_type __i) noexcept
423 { return __atomic_sub_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
424
425 __int_type
426 operator-=(__int_type __i) volatile noexcept
427 { return __atomic_sub_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
428
429 __int_type
430 operator&=(__int_type __i) noexcept
431 { return __atomic_and_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
432
433 __int_type
434 operator&=(__int_type __i) volatile noexcept
435 { return __atomic_and_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
436
437 __int_type
438 operator|=(__int_type __i) noexcept
439 { return __atomic_or_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
440
441 __int_type
442 operator|=(__int_type __i) volatile noexcept
443 { return __atomic_or_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
444
445 __int_type
446 operator^=(__int_type __i) noexcept
447 { return __atomic_xor_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
448
449 __int_type
450 operator^=(__int_type __i) volatile noexcept
451 { return __atomic_xor_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
452
453 bool
454 is_lock_free() const noexcept
455 {
456 // Use a fake, minimally aligned pointer.
457 return __atomic_is_lock_free(sizeof(_M_i),
458 reinterpret_cast<void *>(-_S_alignment));
459 }
460
461 bool
462 is_lock_free() const volatile noexcept
463 {
464 // Use a fake, minimally aligned pointer.
465 return __atomic_is_lock_free(sizeof(_M_i),
466 reinterpret_cast<void *>(-_S_alignment));
467 }
468
469 _GLIBCXX_ALWAYS_INLINE void
470 store(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept
471 {
472 memory_order __b __attribute__ ((__unused__))
473 = __m & __memory_order_mask;
474 __glibcxx_assert(__b != memory_order_acquire);
475 __glibcxx_assert(__b != memory_order_acq_rel);
476 __glibcxx_assert(__b != memory_order_consume);
477
478 __atomic_store_n(&_M_i, __i, int(__m));
479 }
480
481 _GLIBCXX_ALWAYS_INLINE void
482 store(__int_type __i,
483 memory_order __m = memory_order_seq_cst) volatile noexcept
484 {
485 memory_order __b __attribute__ ((__unused__))
486 = __m & __memory_order_mask;
487 __glibcxx_assert(__b != memory_order_acquire);
488 __glibcxx_assert(__b != memory_order_acq_rel);
489 __glibcxx_assert(__b != memory_order_consume);
490
491 __atomic_store_n(&_M_i, __i, int(__m));
492 }
493
494 _GLIBCXX_ALWAYS_INLINE __int_type
495 load(memory_order __m = memory_order_seq_cst) const noexcept
496 {
497 memory_order __b __attribute__ ((__unused__))
498 = __m & __memory_order_mask;
499 __glibcxx_assert(__b != memory_order_release);
500 __glibcxx_assert(__b != memory_order_acq_rel);
501
502 return __atomic_load_n(&_M_i, int(__m));
503 }
504
505 _GLIBCXX_ALWAYS_INLINE __int_type
506 load(memory_order __m = memory_order_seq_cst) const volatile noexcept
507 {
508 memory_order __b __attribute__ ((__unused__))
509 = __m & __memory_order_mask;
510 __glibcxx_assert(__b != memory_order_release);
511 __glibcxx_assert(__b != memory_order_acq_rel);
512
513 return __atomic_load_n(&_M_i, int(__m));
514 }
515
516 _GLIBCXX_ALWAYS_INLINE __int_type
517 exchange(__int_type __i,
518 memory_order __m = memory_order_seq_cst) noexcept
519 {
520 return __atomic_exchange_n(&_M_i, __i, int(__m));
521 }
522
523
524 _GLIBCXX_ALWAYS_INLINE __int_type
525 exchange(__int_type __i,
526 memory_order __m = memory_order_seq_cst) volatile noexcept
527 {
528 return __atomic_exchange_n(&_M_i, __i, int(__m));
529 }
530
531 _GLIBCXX_ALWAYS_INLINE bool
532 compare_exchange_weak(__int_type& __i1, __int_type __i2,
533 memory_order __m1, memory_order __m2) noexcept
534 {
535 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
536
537 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
538 int(__m1), int(__m2));
539 }
540
541 _GLIBCXX_ALWAYS_INLINE bool
542 compare_exchange_weak(__int_type& __i1, __int_type __i2,
543 memory_order __m1,
544 memory_order __m2) volatile noexcept
545 {
546 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
547
548 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
549 int(__m1), int(__m2));
550 }
551
552 _GLIBCXX_ALWAYS_INLINE bool
553 compare_exchange_weak(__int_type& __i1, __int_type __i2,
554 memory_order __m = memory_order_seq_cst) noexcept
555 {
556 return compare_exchange_weak(__i1, __i2, __m,
557 __cmpexch_failure_order(__m));
558 }
559
560 _GLIBCXX_ALWAYS_INLINE bool
561 compare_exchange_weak(__int_type& __i1, __int_type __i2,
562 memory_order __m = memory_order_seq_cst) volatile noexcept
563 {
564 return compare_exchange_weak(__i1, __i2, __m,
565 __cmpexch_failure_order(__m));
566 }
567
568 _GLIBCXX_ALWAYS_INLINE bool
569 compare_exchange_strong(__int_type& __i1, __int_type __i2,
570 memory_order __m1, memory_order __m2) noexcept
571 {
572 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
573
574 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
575 int(__m1), int(__m2));
576 }
577
578 _GLIBCXX_ALWAYS_INLINE bool
579 compare_exchange_strong(__int_type& __i1, __int_type __i2,
580 memory_order __m1,
581 memory_order __m2) volatile noexcept
582 {
583 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
584
585 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
586 int(__m1), int(__m2));
587 }
588
589 _GLIBCXX_ALWAYS_INLINE bool
590 compare_exchange_strong(__int_type& __i1, __int_type __i2,
591 memory_order __m = memory_order_seq_cst) noexcept
592 {
593 return compare_exchange_strong(__i1, __i2, __m,
594 __cmpexch_failure_order(__m));
595 }
596
597 _GLIBCXX_ALWAYS_INLINE bool
598 compare_exchange_strong(__int_type& __i1, __int_type __i2,
599 memory_order __m = memory_order_seq_cst) volatile noexcept
600 {
601 return compare_exchange_strong(__i1, __i2, __m,
602 __cmpexch_failure_order(__m));
603 }
604
605#if __glibcxx_atomic_wait
606 _GLIBCXX_ALWAYS_INLINE void
607 wait(__int_type __old,
608 memory_order __m = memory_order_seq_cst) const noexcept
609 {
610 std::__atomic_wait_address_v(&_M_i, __old,
611 [__m, this] { return this->load(__m); });
612 }
613
614 // TODO add const volatile overload
615
616 _GLIBCXX_ALWAYS_INLINE void
617 notify_one() noexcept
618 { std::__atomic_notify_address(&_M_i, false); }
619
620 // TODO add const volatile overload
621
622 _GLIBCXX_ALWAYS_INLINE void
623 notify_all() noexcept
624 { std::__atomic_notify_address(&_M_i, true); }
625
626 // TODO add const volatile overload
627#endif // __glibcxx_atomic_wait
628
629 _GLIBCXX_ALWAYS_INLINE __int_type
630 fetch_add(__int_type __i,
631 memory_order __m = memory_order_seq_cst) noexcept
632 { return __atomic_fetch_add(&_M_i, __i, int(__m)); }
633
634 _GLIBCXX_ALWAYS_INLINE __int_type
635 fetch_add(__int_type __i,
636 memory_order __m = memory_order_seq_cst) volatile noexcept
637 { return __atomic_fetch_add(&_M_i, __i, int(__m)); }
638
639 _GLIBCXX_ALWAYS_INLINE __int_type
640 fetch_sub(__int_type __i,
641 memory_order __m = memory_order_seq_cst) noexcept
642 { return __atomic_fetch_sub(&_M_i, __i, int(__m)); }
643
644 _GLIBCXX_ALWAYS_INLINE __int_type
645 fetch_sub(__int_type __i,
646 memory_order __m = memory_order_seq_cst) volatile noexcept
647 { return __atomic_fetch_sub(&_M_i, __i, int(__m)); }
648
649 _GLIBCXX_ALWAYS_INLINE __int_type
650 fetch_and(__int_type __i,
651 memory_order __m = memory_order_seq_cst) noexcept
652 { return __atomic_fetch_and(&_M_i, __i, int(__m)); }
653
654 _GLIBCXX_ALWAYS_INLINE __int_type
655 fetch_and(__int_type __i,
656 memory_order __m = memory_order_seq_cst) volatile noexcept
657 { return __atomic_fetch_and(&_M_i, __i, int(__m)); }
658
659 _GLIBCXX_ALWAYS_INLINE __int_type
660 fetch_or(__int_type __i,
661 memory_order __m = memory_order_seq_cst) noexcept
662 { return __atomic_fetch_or(&_M_i, __i, int(__m)); }
663
664 _GLIBCXX_ALWAYS_INLINE __int_type
665 fetch_or(__int_type __i,
666 memory_order __m = memory_order_seq_cst) volatile noexcept
667 { return __atomic_fetch_or(&_M_i, __i, int(__m)); }
668
669 _GLIBCXX_ALWAYS_INLINE __int_type
670 fetch_xor(__int_type __i,
671 memory_order __m = memory_order_seq_cst) noexcept
672 { return __atomic_fetch_xor(&_M_i, __i, int(__m)); }
673
674 _GLIBCXX_ALWAYS_INLINE __int_type
675 fetch_xor(__int_type __i,
676 memory_order __m = memory_order_seq_cst) volatile noexcept
677 { return __atomic_fetch_xor(&_M_i, __i, int(__m)); }
678 };
679
680
681 /// Partial specialization for pointer types.
682 template<typename _PTp>
683 struct __atomic_base<_PTp*>
684 {
685 private:
686 typedef _PTp* __pointer_type;
687
688 __pointer_type _M_p _GLIBCXX20_INIT(nullptr);
689
690 static constexpr ptrdiff_t
691 _S_type_size(ptrdiff_t __d)
692 { return __d * sizeof(_PTp); }
693
694 public:
695 __atomic_base() noexcept = default;
696 ~__atomic_base() noexcept = default;
697 __atomic_base(const __atomic_base&) = delete;
698 __atomic_base& operator=(const __atomic_base&) = delete;
699 __atomic_base& operator=(const __atomic_base&) volatile = delete;
700
701 // Requires __pointer_type convertible to _M_p.
702 constexpr __atomic_base(__pointer_type __p) noexcept : _M_p (__p) { }
703
704 operator __pointer_type() const noexcept
705 { return load(); }
706
707 operator __pointer_type() const volatile noexcept
708 { return load(); }
709
710 __pointer_type
711 operator=(__pointer_type __p) noexcept
712 {
713 store(__p);
714 return __p;
715 }
716
717 __pointer_type
718 operator=(__pointer_type __p) volatile noexcept
719 {
720 store(__p);
721 return __p;
722 }
723
724 __pointer_type
725 operator++(int) noexcept
726 { return fetch_add(1); }
727
728 __pointer_type
729 operator++(int) volatile noexcept
730 { return fetch_add(1); }
731
732 __pointer_type
733 operator--(int) noexcept
734 { return fetch_sub(1); }
735
736 __pointer_type
737 operator--(int) volatile noexcept
738 { return fetch_sub(1); }
739
740 __pointer_type
741 operator++() noexcept
742 { return __atomic_add_fetch(&_M_p, _S_type_size(1),
743 int(memory_order_seq_cst)); }
744
745 __pointer_type
746 operator++() volatile noexcept
747 { return __atomic_add_fetch(&_M_p, _S_type_size(1),
748 int(memory_order_seq_cst)); }
749
750 __pointer_type
751 operator--() noexcept
752 { return __atomic_sub_fetch(&_M_p, _S_type_size(1),
753 int(memory_order_seq_cst)); }
754
755 __pointer_type
756 operator--() volatile noexcept
757 { return __atomic_sub_fetch(&_M_p, _S_type_size(1),
758 int(memory_order_seq_cst)); }
759
760 __pointer_type
761 operator+=(ptrdiff_t __d) noexcept
762 { return __atomic_add_fetch(&_M_p, _S_type_size(__d),
763 int(memory_order_seq_cst)); }
764
765 __pointer_type
766 operator+=(ptrdiff_t __d) volatile noexcept
767 { return __atomic_add_fetch(&_M_p, _S_type_size(__d),
768 int(memory_order_seq_cst)); }
769
770 __pointer_type
771 operator-=(ptrdiff_t __d) noexcept
772 { return __atomic_sub_fetch(&_M_p, _S_type_size(__d),
773 int(memory_order_seq_cst)); }
774
775 __pointer_type
776 operator-=(ptrdiff_t __d) volatile noexcept
777 { return __atomic_sub_fetch(&_M_p, _S_type_size(__d),
778 int(memory_order_seq_cst)); }
779
780 bool
781 is_lock_free() const noexcept
782 {
783 // Produce a fake, minimally aligned pointer.
784 return __atomic_is_lock_free(sizeof(_M_p),
785 reinterpret_cast<void *>(-__alignof(_M_p)));
786 }
787
788 bool
789 is_lock_free() const volatile noexcept
790 {
791 // Produce a fake, minimally aligned pointer.
792 return __atomic_is_lock_free(sizeof(_M_p),
793 reinterpret_cast<void *>(-__alignof(_M_p)));
794 }
795
796 _GLIBCXX_ALWAYS_INLINE void
797 store(__pointer_type __p,
798 memory_order __m = memory_order_seq_cst) noexcept
799 {
800 memory_order __b __attribute__ ((__unused__))
801 = __m & __memory_order_mask;
802
803 __glibcxx_assert(__b != memory_order_acquire);
804 __glibcxx_assert(__b != memory_order_acq_rel);
805 __glibcxx_assert(__b != memory_order_consume);
806
807 __atomic_store_n(&_M_p, __p, int(__m));
808 }
809
810 _GLIBCXX_ALWAYS_INLINE void
811 store(__pointer_type __p,
812 memory_order __m = memory_order_seq_cst) volatile noexcept
813 {
814 memory_order __b __attribute__ ((__unused__))
815 = __m & __memory_order_mask;
816 __glibcxx_assert(__b != memory_order_acquire);
817 __glibcxx_assert(__b != memory_order_acq_rel);
818 __glibcxx_assert(__b != memory_order_consume);
819
820 __atomic_store_n(&_M_p, __p, int(__m));
821 }
822
823 _GLIBCXX_ALWAYS_INLINE __pointer_type
824 load(memory_order __m = memory_order_seq_cst) const noexcept
825 {
826 memory_order __b __attribute__ ((__unused__))
827 = __m & __memory_order_mask;
828 __glibcxx_assert(__b != memory_order_release);
829 __glibcxx_assert(__b != memory_order_acq_rel);
830
831 return __atomic_load_n(&_M_p, int(__m));
832 }
833
834 _GLIBCXX_ALWAYS_INLINE __pointer_type
835 load(memory_order __m = memory_order_seq_cst) const volatile noexcept
836 {
837 memory_order __b __attribute__ ((__unused__))
838 = __m & __memory_order_mask;
839 __glibcxx_assert(__b != memory_order_release);
840 __glibcxx_assert(__b != memory_order_acq_rel);
841
842 return __atomic_load_n(&_M_p, int(__m));
843 }
844
845 _GLIBCXX_ALWAYS_INLINE __pointer_type
846 exchange(__pointer_type __p,
847 memory_order __m = memory_order_seq_cst) noexcept
848 {
849 return __atomic_exchange_n(&_M_p, __p, int(__m));
850 }
851
852
853 _GLIBCXX_ALWAYS_INLINE __pointer_type
854 exchange(__pointer_type __p,
855 memory_order __m = memory_order_seq_cst) volatile noexcept
856 {
857 return __atomic_exchange_n(&_M_p, __p, int(__m));
858 }
859
860 _GLIBCXX_ALWAYS_INLINE bool
861 compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
862 memory_order __m1,
863 memory_order __m2) noexcept
864 {
865 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
866
867 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 1,
868 int(__m1), int(__m2));
869 }
870
871 _GLIBCXX_ALWAYS_INLINE bool
872 compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
873 memory_order __m1,
874 memory_order __m2) volatile noexcept
875 {
876 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
877
878 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 1,
879 int(__m1), int(__m2));
880 }
881
882 _GLIBCXX_ALWAYS_INLINE bool
883 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
884 memory_order __m1,
885 memory_order __m2) noexcept
886 {
887 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
888
889 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
890 int(__m1), int(__m2));
891 }
892
893 _GLIBCXX_ALWAYS_INLINE bool
894 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
895 memory_order __m1,
896 memory_order __m2) volatile noexcept
897 {
898 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
899
900 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
901 int(__m1), int(__m2));
902 }
903
904#if __glibcxx_atomic_wait
905 _GLIBCXX_ALWAYS_INLINE void
906 wait(__pointer_type __old,
907 memory_order __m = memory_order_seq_cst) const noexcept
908 {
909 std::__atomic_wait_address_v(&_M_p, __old,
910 [__m, this]
911 { return this->load(__m); });
912 }
913
914 // TODO add const volatile overload
915
916 _GLIBCXX_ALWAYS_INLINE void
917 notify_one() const noexcept
918 { std::__atomic_notify_address(&_M_p, false); }
919
920 // TODO add const volatile overload
921
922 _GLIBCXX_ALWAYS_INLINE void
923 notify_all() const noexcept
924 { std::__atomic_notify_address(&_M_p, true); }
925
926 // TODO add const volatile overload
927#endif // __glibcxx_atomic_wait
928
929 _GLIBCXX_ALWAYS_INLINE __pointer_type
930 fetch_add(ptrdiff_t __d,
931 memory_order __m = memory_order_seq_cst) noexcept
932 { return __atomic_fetch_add(&_M_p, _S_type_size(__d), int(__m)); }
933
934 _GLIBCXX_ALWAYS_INLINE __pointer_type
935 fetch_add(ptrdiff_t __d,
936 memory_order __m = memory_order_seq_cst) volatile noexcept
937 { return __atomic_fetch_add(&_M_p, _S_type_size(__d), int(__m)); }
938
939 _GLIBCXX_ALWAYS_INLINE __pointer_type
940 fetch_sub(ptrdiff_t __d,
941 memory_order __m = memory_order_seq_cst) noexcept
942 { return __atomic_fetch_sub(&_M_p, _S_type_size(__d), int(__m)); }
943
944 _GLIBCXX_ALWAYS_INLINE __pointer_type
945 fetch_sub(ptrdiff_t __d,
946 memory_order __m = memory_order_seq_cst) volatile noexcept
947 { return __atomic_fetch_sub(&_M_p, _S_type_size(__d), int(__m)); }
948 };
949
950 namespace __atomic_impl
951 {
952 // Implementation details of atomic padding handling
953
954 template<typename _Tp>
955 constexpr bool
956 __maybe_has_padding()
957 {
958#if ! __has_builtin(__builtin_clear_padding)
959 return false;
960#elif __has_builtin(__has_unique_object_representations)
961 return !__has_unique_object_representations(_Tp)
962 && !is_same<_Tp, float>::value && !is_same<_Tp, double>::value;
963#else
964 return true;
965#endif
966 }
967
968 template<typename _Tp>
969 _GLIBCXX_ALWAYS_INLINE _GLIBCXX14_CONSTEXPR _Tp*
970 __clear_padding(_Tp& __val) noexcept
971 {
972 auto* __ptr = std::__addressof(__val);
973#if __has_builtin(__builtin_clear_padding)
974 if _GLIBCXX17_CONSTEXPR (__atomic_impl::__maybe_has_padding<_Tp>())
975 __builtin_clear_padding(__ptr);
976#endif
977 return __ptr;
978 }
979
980 // Remove volatile and create a non-deduced context for value arguments.
981 template<typename _Tp>
982 using _Val = typename remove_volatile<_Tp>::type;
983
984#pragma GCC diagnostic push
985#pragma GCC diagnostic ignored "-Wc++17-extensions"
986
987 template<bool _AtomicRef = false, typename _Tp>
988 _GLIBCXX_ALWAYS_INLINE bool
989 __compare_exchange(_Tp& __val, _Val<_Tp>& __e, _Val<_Tp>& __i,
990 bool __is_weak,
991 memory_order __s, memory_order __f) noexcept
992 {
993 __glibcxx_assert(__is_valid_cmpexch_failure_order(__f));
994
995 using _Vp = _Val<_Tp>;
996 _Tp* const __pval = std::__addressof(__val);
997
998 if constexpr (!__atomic_impl::__maybe_has_padding<_Vp>())
999 {
1000 return __atomic_compare_exchange(__pval, std::__addressof(__e),
1001 std::__addressof(__i), __is_weak,
1002 int(__s), int(__f));
1003 }
1004 else if constexpr (!_AtomicRef) // std::atomic<T>
1005 {
1006 // Clear padding of the value we want to set:
1007 _Vp* const __pi = __atomic_impl::__clear_padding(__i);
1008 // Only allowed to modify __e on failure, so make a copy:
1009 _Vp __exp = __e;
1010 // Clear padding of the expected value:
1011 _Vp* const __pexp = __atomic_impl::__clear_padding(__exp);
1012
1013 // For std::atomic<T> we know that the contained value will already
1014 // have zeroed padding, so trivial memcmp semantics are OK.
1015 if (__atomic_compare_exchange(__pval, __pexp, __pi,
1016 __is_weak, int(__s), int(__f)))
1017 return true;
1018 // Value bits must be different, copy from __exp back to __e:
1019 __builtin_memcpy(std::__addressof(__e), __pexp, sizeof(_Vp));
1020 return false;
1021 }
1022 else // std::atomic_ref<T> where T has padding bits.
1023 {
1024 // Clear padding of the value we want to set:
1025 _Vp* const __pi = __atomic_impl::__clear_padding(__i);
1026
1027 // Only allowed to modify __e on failure, so make a copy:
1028 _Vp __exp = __e;
1029 // Optimistically assume that a previous store had zeroed padding
1030 // so that zeroing it in the expected value will match first time.
1031 _Vp* const __pexp = __atomic_impl::__clear_padding(__exp);
1032
1033 // compare_exchange is specified to compare value representations.
1034 // Need to check whether a failure is 'real' or just due to
1035 // differences in padding bits. This loop should run no more than
1036 // three times, because the worst case scenario is:
1037 // First CAS fails because the actual value has non-zero padding.
1038 // Second CAS fails because another thread stored the same value,
1039 // but now with padding cleared. Third CAS succeeds.
1040 // We will never need to loop a fourth time, because any value
1041 // written by another thread (whether via store, exchange or
1042 // compare_exchange) will have had its padding cleared.
1043 while (true)
1044 {
1045 // Copy of the expected value so we can clear its padding.
1046 _Vp __orig = __exp;
1047
1048 if (__atomic_compare_exchange(__pval, __pexp, __pi,
1049 __is_weak, int(__s), int(__f)))
1050 return true;
1051
1052 // Copy of the actual value so we can clear its padding.
1053 _Vp __curr = __exp;
1054
1055 // Compare value representations (i.e. ignoring padding).
1056 if (__builtin_memcmp(__atomic_impl::__clear_padding(__orig),
1057 __atomic_impl::__clear_padding(__curr),
1058 sizeof(_Vp)))
1059 {
1060 // Value representations compare unequal, real failure.
1061 __builtin_memcpy(std::__addressof(__e), __pexp,
1062 sizeof(_Vp));
1063 return false;
1064 }
1065 }
1066 }
1067 }
1068#pragma GCC diagnostic pop
1069 } // namespace __atomic_impl
1070
1071#if __cplusplus > 201703L
1072 // Implementation details of atomic_ref and atomic<floating-point>.
1073 namespace __atomic_impl
1074 {
1075 // Like _Val<T> above, but for difference_type arguments.
1076 template<typename _Tp>
1077 using _Diff = __conditional_t<is_pointer_v<_Tp>, ptrdiff_t, _Val<_Tp>>;
1078
1079 template<size_t _Size, size_t _Align>
1080 _GLIBCXX_ALWAYS_INLINE bool
1081 is_lock_free() noexcept
1082 {
1083 // Produce a fake, minimally aligned pointer.
1084 return __atomic_is_lock_free(_Size, reinterpret_cast<void *>(-_Align));
1085 }
1086
1087 template<typename _Tp>
1088 _GLIBCXX_ALWAYS_INLINE void
1089 store(_Tp* __ptr, _Val<_Tp> __t, memory_order __m) noexcept
1090 {
1091 __atomic_store(__ptr, __atomic_impl::__clear_padding(__t), int(__m));
1092 }
1093
1094 template<typename _Tp>
1095 _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
1096 load(const _Tp* __ptr, memory_order __m) noexcept
1097 {
1098 alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
1099 auto* __dest = reinterpret_cast<_Val<_Tp>*>(__buf);
1100 __atomic_load(__ptr, __dest, int(__m));
1101 return *__dest;
1102 }
1103
1104 template<typename _Tp>
1105 _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
1106 exchange(_Tp* __ptr, _Val<_Tp> __desired, memory_order __m) noexcept
1107 {
1108 alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
1109 auto* __dest = reinterpret_cast<_Val<_Tp>*>(__buf);
1110 __atomic_exchange(__ptr, __atomic_impl::__clear_padding(__desired),
1111 __dest, int(__m));
1112 return *__dest;
1113 }
1114
1115 template<bool _AtomicRef = false, typename _Tp>
1116 _GLIBCXX_ALWAYS_INLINE bool
1117 compare_exchange_weak(_Tp* __ptr, _Val<_Tp>& __expected,
1118 _Val<_Tp> __desired, memory_order __success,
1119 memory_order __failure) noexcept
1120 {
1121 return __atomic_impl::__compare_exchange<_AtomicRef>(
1122 *__ptr, __expected, __desired, true, __success, __failure);
1123 }
1124
1125 template<bool _AtomicRef = false, typename _Tp>
1126 _GLIBCXX_ALWAYS_INLINE bool
1127 compare_exchange_strong(_Tp* __ptr, _Val<_Tp>& __expected,
1128 _Val<_Tp> __desired, memory_order __success,
1129 memory_order __failure) noexcept
1130 {
1131 return __atomic_impl::__compare_exchange<_AtomicRef>(
1132 *__ptr, __expected, __desired, false, __success, __failure);
1133 }
1134
1135#if __glibcxx_atomic_wait
1136 template<typename _Tp>
1137 _GLIBCXX_ALWAYS_INLINE void
1138 wait(const _Tp* __ptr, _Val<_Tp> __old,
1139 memory_order __m = memory_order_seq_cst) noexcept
1140 {
1141 std::__atomic_wait_address_v(__ptr, __old,
1142 [__ptr, __m]() { return __atomic_impl::load(__ptr, __m); });
1143 }
1144
1145 // TODO add const volatile overload
1146
1147 template<typename _Tp>
1148 _GLIBCXX_ALWAYS_INLINE void
1149 notify_one(const _Tp* __ptr) noexcept
1150 { std::__atomic_notify_address(__ptr, false); }
1151
1152 // TODO add const volatile overload
1153
1154 template<typename _Tp>
1155 _GLIBCXX_ALWAYS_INLINE void
1156 notify_all(const _Tp* __ptr) noexcept
1157 { std::__atomic_notify_address(__ptr, true); }
1158
1159 // TODO add const volatile overload
1160#endif // __glibcxx_atomic_wait
1161
1162 template<typename _Tp>
1163 _GLIBCXX_ALWAYS_INLINE _Tp
1164 fetch_add(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m) noexcept
1165 { return __atomic_fetch_add(__ptr, __i, int(__m)); }
1166
1167 template<typename _Tp>
1168 _GLIBCXX_ALWAYS_INLINE _Tp
1169 fetch_sub(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m) noexcept
1170 { return __atomic_fetch_sub(__ptr, __i, int(__m)); }
1171
1172 template<typename _Tp>
1173 _GLIBCXX_ALWAYS_INLINE _Tp
1174 fetch_and(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1175 { return __atomic_fetch_and(__ptr, __i, int(__m)); }
1176
1177 template<typename _Tp>
1178 _GLIBCXX_ALWAYS_INLINE _Tp
1179 fetch_or(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1180 { return __atomic_fetch_or(__ptr, __i, int(__m)); }
1181
1182 template<typename _Tp>
1183 _GLIBCXX_ALWAYS_INLINE _Tp
1184 fetch_xor(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1185 { return __atomic_fetch_xor(__ptr, __i, int(__m)); }
1186
1187 template<typename _Tp>
1188 _GLIBCXX_ALWAYS_INLINE _Tp
1189 __add_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept
1190 { return __atomic_add_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1191
1192 template<typename _Tp>
1193 _GLIBCXX_ALWAYS_INLINE _Tp
1194 __sub_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept
1195 { return __atomic_sub_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1196
1197 template<typename _Tp>
1198 _GLIBCXX_ALWAYS_INLINE _Tp
1199 __and_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
1200 { return __atomic_and_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1201
1202 template<typename _Tp>
1203 _GLIBCXX_ALWAYS_INLINE _Tp
1204 __or_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
1205 { return __atomic_or_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1206
1207 template<typename _Tp>
1208 _GLIBCXX_ALWAYS_INLINE _Tp
1209 __xor_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
1210 { return __atomic_xor_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1211
1212 template<typename _Tp>
1213 concept __atomic_fetch_addable
1214 = requires (_Tp __t) { __atomic_fetch_add(&__t, __t, 0); };
1215
1216 template<typename _Tp>
1217 _Tp
1218 __fetch_add_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1219 {
1220 if constexpr (__atomic_fetch_addable<_Tp>)
1221 return __atomic_fetch_add(__ptr, __i, int(__m));
1222 else
1223 {
1224 _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
1225 _Val<_Tp> __newval = __oldval + __i;
1226 while (!compare_exchange_weak (__ptr, __oldval, __newval, __m,
1227 memory_order_relaxed))
1228 __newval = __oldval + __i;
1229 return __oldval;
1230 }
1231 }
1232
1233 template<typename _Tp>
1234 concept __atomic_fetch_subtractable
1235 = requires (_Tp __t) { __atomic_fetch_sub(&__t, __t, 0); };
1236
1237 template<typename _Tp>
1238 _Tp
1239 __fetch_sub_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1240 {
1241 if constexpr (__atomic_fetch_subtractable<_Tp>)
1242 return __atomic_fetch_sub(__ptr, __i, int(__m));
1243 else
1244 {
1245 _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
1246 _Val<_Tp> __newval = __oldval - __i;
1247 while (!compare_exchange_weak (__ptr, __oldval, __newval, __m,
1248 memory_order_relaxed))
1249 __newval = __oldval - __i;
1250 return __oldval;
1251 }
1252 }
1253
1254 template<typename _Tp>
1255 concept __atomic_add_fetchable
1256 = requires (_Tp __t) { __atomic_add_fetch(&__t, __t, 0); };
1257
1258 template<typename _Tp>
1259 _Tp
1260 __add_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept
1261 {
1262 if constexpr (__atomic_add_fetchable<_Tp>)
1263 return __atomic_add_fetch(__ptr, __i, __ATOMIC_SEQ_CST);
1264 else
1265 {
1266 _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
1267 _Val<_Tp> __newval = __oldval + __i;
1268 while (!compare_exchange_weak (__ptr, __oldval, __newval,
1269 memory_order_seq_cst,
1270 memory_order_relaxed))
1271 __newval = __oldval + __i;
1272 return __newval;
1273 }
1274 }
1275
1276 template<typename _Tp>
1277 concept __atomic_sub_fetchable
1278 = requires (_Tp __t) { __atomic_sub_fetch(&__t, __t, 0); };
1279
1280 template<typename _Tp>
1281 _Tp
1282 __sub_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept
1283 {
1284 if constexpr (__atomic_sub_fetchable<_Tp>)
1285 return __atomic_sub_fetch(__ptr, __i, __ATOMIC_SEQ_CST);
1286 else
1287 {
1288 _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
1289 _Val<_Tp> __newval = __oldval - __i;
1290 while (!compare_exchange_weak (__ptr, __oldval, __newval,
1291 memory_order_seq_cst,
1292 memory_order_relaxed))
1293 __newval = __oldval - __i;
1294 return __newval;
1295 }
1296 }
1297 } // namespace __atomic_impl
1298
1299 // base class for atomic<floating-point-type>
1300 template<typename _Fp>
1301 struct __atomic_float
1302 {
1303 static_assert(is_floating_point_v<_Fp>);
1304
1305 static constexpr size_t _S_alignment = __alignof__(_Fp);
1306
1307 public:
1308 using value_type = _Fp;
1309 using difference_type = value_type;
1310
1311 static constexpr bool is_always_lock_free
1312 = __atomic_always_lock_free(sizeof(_Fp), 0);
1313
1314 __atomic_float() = default;
1315
1316 constexpr
1317 __atomic_float(_Fp __t) : _M_fp(__t)
1318 { __atomic_impl::__clear_padding(_M_fp); }
1319
1320 __atomic_float(const __atomic_float&) = delete;
1321 __atomic_float& operator=(const __atomic_float&) = delete;
1322 __atomic_float& operator=(const __atomic_float&) volatile = delete;
1323
1324 _Fp
1325 operator=(_Fp __t) volatile noexcept
1326 {
1327 this->store(__t);
1328 return __t;
1329 }
1330
1331 _Fp
1332 operator=(_Fp __t) noexcept
1333 {
1334 this->store(__t);
1335 return __t;
1336 }
1337
1338 bool
1339 is_lock_free() const volatile noexcept
1340 { return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1341
1342 bool
1343 is_lock_free() const noexcept
1344 { return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1345
1346 void
1347 store(_Fp __t, memory_order __m = memory_order_seq_cst) volatile noexcept
1348 { __atomic_impl::store(&_M_fp, __t, __m); }
1349
1350 void
1351 store(_Fp __t, memory_order __m = memory_order_seq_cst) noexcept
1352 { __atomic_impl::store(&_M_fp, __t, __m); }
1353
1354 _Fp
1355 load(memory_order __m = memory_order_seq_cst) const volatile noexcept
1356 { return __atomic_impl::load(&_M_fp, __m); }
1357
1358 _Fp
1359 load(memory_order __m = memory_order_seq_cst) const noexcept
1360 { return __atomic_impl::load(&_M_fp, __m); }
1361
1362 operator _Fp() const volatile noexcept { return this->load(); }
1363 operator _Fp() const noexcept { return this->load(); }
1364
1365 _Fp
1366 exchange(_Fp __desired,
1367 memory_order __m = memory_order_seq_cst) volatile noexcept
1368 { return __atomic_impl::exchange(&_M_fp, __desired, __m); }
1369
1370 _Fp
1371 exchange(_Fp __desired,
1372 memory_order __m = memory_order_seq_cst) noexcept
1373 { return __atomic_impl::exchange(&_M_fp, __desired, __m); }
1374
1375 bool
1376 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1377 memory_order __success,
1378 memory_order __failure) noexcept
1379 {
1380 return __atomic_impl::compare_exchange_weak(&_M_fp,
1381 __expected, __desired,
1382 __success, __failure);
1383 }
1384
1385 bool
1386 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1387 memory_order __success,
1388 memory_order __failure) volatile noexcept
1389 {
1390 return __atomic_impl::compare_exchange_weak(&_M_fp,
1391 __expected, __desired,
1392 __success, __failure);
1393 }
1394
1395 bool
1396 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1397 memory_order __success,
1398 memory_order __failure) noexcept
1399 {
1400 return __atomic_impl::compare_exchange_strong(&_M_fp,
1401 __expected, __desired,
1402 __success, __failure);
1403 }
1404
1405 bool
1406 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1407 memory_order __success,
1408 memory_order __failure) volatile noexcept
1409 {
1410 return __atomic_impl::compare_exchange_strong(&_M_fp,
1411 __expected, __desired,
1412 __success, __failure);
1413 }
1414
1415 bool
1416 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1417 memory_order __order = memory_order_seq_cst)
1418 noexcept
1419 {
1420 return compare_exchange_weak(__expected, __desired, __order,
1421 __cmpexch_failure_order(__order));
1422 }
1423
1424 bool
1425 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1426 memory_order __order = memory_order_seq_cst)
1427 volatile noexcept
1428 {
1429 return compare_exchange_weak(__expected, __desired, __order,
1430 __cmpexch_failure_order(__order));
1431 }
1432
1433 bool
1434 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1435 memory_order __order = memory_order_seq_cst)
1436 noexcept
1437 {
1438 return compare_exchange_strong(__expected, __desired, __order,
1439 __cmpexch_failure_order(__order));
1440 }
1441
1442 bool
1443 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1444 memory_order __order = memory_order_seq_cst)
1445 volatile noexcept
1446 {
1447 return compare_exchange_strong(__expected, __desired, __order,
1448 __cmpexch_failure_order(__order));
1449 }
1450
1451#if __glibcxx_atomic_wait
1452 _GLIBCXX_ALWAYS_INLINE void
1453 wait(_Fp __old, memory_order __m = memory_order_seq_cst) const noexcept
1454 { __atomic_impl::wait(&_M_fp, __old, __m); }
1455
1456 // TODO add const volatile overload
1457
1458 _GLIBCXX_ALWAYS_INLINE void
1459 notify_one() const noexcept
1460 { __atomic_impl::notify_one(&_M_fp); }
1461
1462 // TODO add const volatile overload
1463
1464 _GLIBCXX_ALWAYS_INLINE void
1465 notify_all() const noexcept
1466 { __atomic_impl::notify_all(&_M_fp); }
1467
1468 // TODO add const volatile overload
1469#endif // __glibcxx_atomic_wait
1470
1471 value_type
1472 fetch_add(value_type __i,
1473 memory_order __m = memory_order_seq_cst) noexcept
1474 { return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1475
1476 value_type
1477 fetch_add(value_type __i,
1478 memory_order __m = memory_order_seq_cst) volatile noexcept
1479 { return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1480
1481 value_type
1482 fetch_sub(value_type __i,
1483 memory_order __m = memory_order_seq_cst) noexcept
1484 { return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1485
1486 value_type
1487 fetch_sub(value_type __i,
1488 memory_order __m = memory_order_seq_cst) volatile noexcept
1489 { return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1490
1491 value_type
1492 operator+=(value_type __i) noexcept
1493 { return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1494
1495 value_type
1496 operator+=(value_type __i) volatile noexcept
1497 { return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1498
1499 value_type
1500 operator-=(value_type __i) noexcept
1501 { return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1502
1503 value_type
1504 operator-=(value_type __i) volatile noexcept
1505 { return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1506
1507 private:
1508 alignas(_S_alignment) _Fp _M_fp _GLIBCXX20_INIT(0);
1509 };
1510#undef _GLIBCXX20_INIT
1511
1512 template<typename _Tp,
1513 bool = is_integral_v<_Tp> && !is_same_v<_Tp, bool>,
1514 bool = is_floating_point_v<_Tp>>
1515 struct __atomic_ref;
1516
1517 // base class for non-integral, non-floating-point, non-pointer types
1518 template<typename _Tp>
1519 struct __atomic_ref<_Tp, false, false>
1520 {
1521 static_assert(is_trivially_copyable_v<_Tp>);
1522
1523 // 1/2/4/8/16-byte types must be aligned to at least their size.
1524 static constexpr int _S_min_alignment
1525 = (sizeof(_Tp) & (sizeof(_Tp) - 1)) || sizeof(_Tp) > 16
1526 ? 0 : sizeof(_Tp);
1527
1528 public:
1529 using value_type = _Tp;
1530
1531 static constexpr bool is_always_lock_free
1532 = __atomic_always_lock_free(sizeof(_Tp), 0);
1533
1534 static constexpr size_t required_alignment
1535 = _S_min_alignment > alignof(_Tp) ? _S_min_alignment : alignof(_Tp);
1536
1537 __atomic_ref& operator=(const __atomic_ref&) = delete;
1538
1539 explicit
1540 __atomic_ref(_Tp& __t) : _M_ptr(std::__addressof(__t))
1541 {
1542 __glibcxx_assert(((__UINTPTR_TYPE__)_M_ptr % required_alignment) == 0);
1543 }
1544
1545 __atomic_ref(const __atomic_ref&) noexcept = default;
1546
1547 _Tp
1548 operator=(_Tp __t) const noexcept
1549 {
1550 this->store(__t);
1551 return __t;
1552 }
1553
1554 operator _Tp() const noexcept { return this->load(); }
1555
1556 bool
1557 is_lock_free() const noexcept
1558 { return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>(); }
1559
1560 void
1561 store(_Tp __t, memory_order __m = memory_order_seq_cst) const noexcept
1562 { __atomic_impl::store(_M_ptr, __t, __m); }
1563
1564 _Tp
1565 load(memory_order __m = memory_order_seq_cst) const noexcept
1566 { return __atomic_impl::load(_M_ptr, __m); }
1567
1568 _Tp
1569 exchange(_Tp __desired, memory_order __m = memory_order_seq_cst)
1570 const noexcept
1571 { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1572
1573 bool
1574 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1575 memory_order __success,
1576 memory_order __failure) const noexcept
1577 {
1578 return __atomic_impl::compare_exchange_weak<true>(
1579 _M_ptr, __expected, __desired, __success, __failure);
1580 }
1581
1582 bool
1583 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1584 memory_order __success,
1585 memory_order __failure) const noexcept
1586 {
1587 return __atomic_impl::compare_exchange_strong<true>(
1588 _M_ptr, __expected, __desired, __success, __failure);
1589 }
1590
1591 bool
1592 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1593 memory_order __order = memory_order_seq_cst)
1594 const noexcept
1595 {
1596 return compare_exchange_weak(__expected, __desired, __order,
1597 __cmpexch_failure_order(__order));
1598 }
1599
1600 bool
1601 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1602 memory_order __order = memory_order_seq_cst)
1603 const noexcept
1604 {
1605 return compare_exchange_strong(__expected, __desired, __order,
1606 __cmpexch_failure_order(__order));
1607 }
1608
1609#if __glibcxx_atomic_wait
1610 _GLIBCXX_ALWAYS_INLINE void
1611 wait(_Tp __old, memory_order __m = memory_order_seq_cst) const noexcept
1612 { __atomic_impl::wait(_M_ptr, __old, __m); }
1613
1614 // TODO add const volatile overload
1615
1616 _GLIBCXX_ALWAYS_INLINE void
1617 notify_one() const noexcept
1618 { __atomic_impl::notify_one(_M_ptr); }
1619
1620 // TODO add const volatile overload
1621
1622 _GLIBCXX_ALWAYS_INLINE void
1623 notify_all() const noexcept
1624 { __atomic_impl::notify_all(_M_ptr); }
1625
1626 // TODO add const volatile overload
1627#endif // __glibcxx_atomic_wait
1628
1629 private:
1630 _Tp* _M_ptr;
1631 };
1632
1633 // base class for atomic_ref<integral-type>
1634 template<typename _Tp>
1635 struct __atomic_ref<_Tp, true, false>
1636 {
1637 static_assert(is_integral_v<_Tp>);
1638
1639 public:
1640 using value_type = _Tp;
1641 using difference_type = value_type;
1642
1643 static constexpr bool is_always_lock_free
1644 = __atomic_always_lock_free(sizeof(_Tp), 0);
1645
1646 static constexpr size_t required_alignment
1647 = sizeof(_Tp) > alignof(_Tp) ? sizeof(_Tp) : alignof(_Tp);
1648
1649 __atomic_ref() = delete;
1650 __atomic_ref& operator=(const __atomic_ref&) = delete;
1651
1652 explicit
1653 __atomic_ref(_Tp& __t) : _M_ptr(&__t)
1654 {
1655 __glibcxx_assert(((__UINTPTR_TYPE__)_M_ptr % required_alignment) == 0);
1656 }
1657
1658 __atomic_ref(const __atomic_ref&) noexcept = default;
1659
1660 _Tp
1661 operator=(_Tp __t) const noexcept
1662 {
1663 this->store(__t);
1664 return __t;
1665 }
1666
1667 operator _Tp() const noexcept { return this->load(); }
1668
1669 bool
1670 is_lock_free() const noexcept
1671 {
1672 return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>();
1673 }
1674
1675 void
1676 store(_Tp __t, memory_order __m = memory_order_seq_cst) const noexcept
1677 { __atomic_impl::store(_M_ptr, __t, __m); }
1678
1679 _Tp
1680 load(memory_order __m = memory_order_seq_cst) const noexcept
1681 { return __atomic_impl::load(_M_ptr, __m); }
1682
1683 _Tp
1684 exchange(_Tp __desired,
1685 memory_order __m = memory_order_seq_cst) const noexcept
1686 { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1687
1688 bool
1689 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1690 memory_order __success,
1691 memory_order __failure) const noexcept
1692 {
1693 return __atomic_impl::compare_exchange_weak<true>(
1694 _M_ptr, __expected, __desired, __success, __failure);
1695 }
1696
1697 bool
1698 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1699 memory_order __success,
1700 memory_order __failure) const noexcept
1701 {
1702 return __atomic_impl::compare_exchange_strong<true>(
1703 _M_ptr, __expected, __desired, __success, __failure);
1704 }
1705
1706 bool
1707 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1708 memory_order __order = memory_order_seq_cst)
1709 const noexcept
1710 {
1711 return compare_exchange_weak(__expected, __desired, __order,
1712 __cmpexch_failure_order(__order));
1713 }
1714
1715 bool
1716 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1717 memory_order __order = memory_order_seq_cst)
1718 const noexcept
1719 {
1720 return compare_exchange_strong(__expected, __desired, __order,
1721 __cmpexch_failure_order(__order));
1722 }
1723
1724#if __glibcxx_atomic_wait
1725 _GLIBCXX_ALWAYS_INLINE void
1726 wait(_Tp __old, memory_order __m = memory_order_seq_cst) const noexcept
1727 { __atomic_impl::wait(_M_ptr, __old, __m); }
1728
1729 // TODO add const volatile overload
1730
1731 _GLIBCXX_ALWAYS_INLINE void
1732 notify_one() const noexcept
1733 { __atomic_impl::notify_one(_M_ptr); }
1734
1735 // TODO add const volatile overload
1736
1737 _GLIBCXX_ALWAYS_INLINE void
1738 notify_all() const noexcept
1739 { __atomic_impl::notify_all(_M_ptr); }
1740
1741 // TODO add const volatile overload
1742#endif // __glibcxx_atomic_wait
1743
1744 value_type
1745 fetch_add(value_type __i,
1746 memory_order __m = memory_order_seq_cst) const noexcept
1747 { return __atomic_impl::fetch_add(_M_ptr, __i, __m); }
1748
1749 value_type
1750 fetch_sub(value_type __i,
1751 memory_order __m = memory_order_seq_cst) const noexcept
1752 { return __atomic_impl::fetch_sub(_M_ptr, __i, __m); }
1753
1754 value_type
1755 fetch_and(value_type __i,
1756 memory_order __m = memory_order_seq_cst) const noexcept
1757 { return __atomic_impl::fetch_and(_M_ptr, __i, __m); }
1758
1759 value_type
1760 fetch_or(value_type __i,
1761 memory_order __m = memory_order_seq_cst) const noexcept
1762 { return __atomic_impl::fetch_or(_M_ptr, __i, __m); }
1763
1764 value_type
1765 fetch_xor(value_type __i,
1766 memory_order __m = memory_order_seq_cst) const noexcept
1767 { return __atomic_impl::fetch_xor(_M_ptr, __i, __m); }
1768
1769 _GLIBCXX_ALWAYS_INLINE value_type
1770 operator++(int) const noexcept
1771 { return fetch_add(1); }
1772
1773 _GLIBCXX_ALWAYS_INLINE value_type
1774 operator--(int) const noexcept
1775 { return fetch_sub(1); }
1776
1777 value_type
1778 operator++() const noexcept
1779 { return __atomic_impl::__add_fetch(_M_ptr, value_type(1)); }
1780
1781 value_type
1782 operator--() const noexcept
1783 { return __atomic_impl::__sub_fetch(_M_ptr, value_type(1)); }
1784
1785 value_type
1786 operator+=(value_type __i) const noexcept
1787 { return __atomic_impl::__add_fetch(_M_ptr, __i); }
1788
1789 value_type
1790 operator-=(value_type __i) const noexcept
1791 { return __atomic_impl::__sub_fetch(_M_ptr, __i); }
1792
1793 value_type
1794 operator&=(value_type __i) const noexcept
1795 { return __atomic_impl::__and_fetch(_M_ptr, __i); }
1796
1797 value_type
1798 operator|=(value_type __i) const noexcept
1799 { return __atomic_impl::__or_fetch(_M_ptr, __i); }
1800
1801 value_type
1802 operator^=(value_type __i) const noexcept
1803 { return __atomic_impl::__xor_fetch(_M_ptr, __i); }
1804
1805 private:
1806 _Tp* _M_ptr;
1807 };
1808
1809 // base class for atomic_ref<floating-point-type>
1810 template<typename _Fp>
1811 struct __atomic_ref<_Fp, false, true>
1812 {
1813 static_assert(is_floating_point_v<_Fp>);
1814
1815 public:
1816 using value_type = _Fp;
1817 using difference_type = value_type;
1818
1819 static constexpr bool is_always_lock_free
1820 = __atomic_always_lock_free(sizeof(_Fp), 0);
1821
1822 static constexpr size_t required_alignment = __alignof__(_Fp);
1823
1824 __atomic_ref() = delete;
1825 __atomic_ref& operator=(const __atomic_ref&) = delete;
1826
1827 explicit
1828 __atomic_ref(_Fp& __t) : _M_ptr(&__t)
1829 {
1830 __glibcxx_assert(((__UINTPTR_TYPE__)_M_ptr % required_alignment) == 0);
1831 }
1832
1833 __atomic_ref(const __atomic_ref&) noexcept = default;
1834
1835 _Fp
1836 operator=(_Fp __t) const noexcept
1837 {
1838 this->store(__t);
1839 return __t;
1840 }
1841
1842 operator _Fp() const noexcept { return this->load(); }
1843
1844 bool
1845 is_lock_free() const noexcept
1846 {
1847 return __atomic_impl::is_lock_free<sizeof(_Fp), required_alignment>();
1848 }
1849
1850 void
1851 store(_Fp __t, memory_order __m = memory_order_seq_cst) const noexcept
1852 { __atomic_impl::store(_M_ptr, __t, __m); }
1853
1854 _Fp
1855 load(memory_order __m = memory_order_seq_cst) const noexcept
1856 { return __atomic_impl::load(_M_ptr, __m); }
1857
1858 _Fp
1859 exchange(_Fp __desired,
1860 memory_order __m = memory_order_seq_cst) const noexcept
1861 { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1862
1863 bool
1864 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1865 memory_order __success,
1866 memory_order __failure) const noexcept
1867 {
1868 return __atomic_impl::compare_exchange_weak<true>(
1869 _M_ptr, __expected, __desired, __success, __failure);
1870 }
1871
1872 bool
1873 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1874 memory_order __success,
1875 memory_order __failure) const noexcept
1876 {
1877 return __atomic_impl::compare_exchange_strong<true>(
1878 _M_ptr, __expected, __desired, __success, __failure);
1879 }
1880
1881 bool
1882 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1883 memory_order __order = memory_order_seq_cst)
1884 const noexcept
1885 {
1886 return compare_exchange_weak(__expected, __desired, __order,
1887 __cmpexch_failure_order(__order));
1888 }
1889
1890 bool
1891 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1892 memory_order __order = memory_order_seq_cst)
1893 const noexcept
1894 {
1895 return compare_exchange_strong(__expected, __desired, __order,
1896 __cmpexch_failure_order(__order));
1897 }
1898
1899#if __glibcxx_atomic_wait
1900 _GLIBCXX_ALWAYS_INLINE void
1901 wait(_Fp __old, memory_order __m = memory_order_seq_cst) const noexcept
1902 { __atomic_impl::wait(_M_ptr, __old, __m); }
1903
1904 // TODO add const volatile overload
1905
1906 _GLIBCXX_ALWAYS_INLINE void
1907 notify_one() const noexcept
1908 { __atomic_impl::notify_one(_M_ptr); }
1909
1910 // TODO add const volatile overload
1911
1912 _GLIBCXX_ALWAYS_INLINE void
1913 notify_all() const noexcept
1914 { __atomic_impl::notify_all(_M_ptr); }
1915
1916 // TODO add const volatile overload
1917#endif // __glibcxx_atomic_wait
1918
1919 value_type
1920 fetch_add(value_type __i,
1921 memory_order __m = memory_order_seq_cst) const noexcept
1922 { return __atomic_impl::__fetch_add_flt(_M_ptr, __i, __m); }
1923
1924 value_type
1925 fetch_sub(value_type __i,
1926 memory_order __m = memory_order_seq_cst) const noexcept
1927 { return __atomic_impl::__fetch_sub_flt(_M_ptr, __i, __m); }
1928
1929 value_type
1930 operator+=(value_type __i) const noexcept
1931 { return __atomic_impl::__add_fetch_flt(_M_ptr, __i); }
1932
1933 value_type
1934 operator-=(value_type __i) const noexcept
1935 { return __atomic_impl::__sub_fetch_flt(_M_ptr, __i); }
1936
1937 private:
1938 _Fp* _M_ptr;
1939 };
1940
1941 // base class for atomic_ref<pointer-type>
1942 template<typename _Tp>
1943 struct __atomic_ref<_Tp*, false, false>
1944 {
1945 public:
1946 using value_type = _Tp*;
1947 using difference_type = ptrdiff_t;
1948
1949 static constexpr bool is_always_lock_free = ATOMIC_POINTER_LOCK_FREE == 2;
1950
1951 static constexpr size_t required_alignment = __alignof__(_Tp*);
1952
1953 __atomic_ref() = delete;
1954 __atomic_ref& operator=(const __atomic_ref&) = delete;
1955
1956 explicit
1957 __atomic_ref(_Tp*& __t) : _M_ptr(std::__addressof(__t))
1958 {
1959 __glibcxx_assert(((__UINTPTR_TYPE__)_M_ptr % required_alignment) == 0);
1960 }
1961
1962 __atomic_ref(const __atomic_ref&) noexcept = default;
1963
1964 _Tp*
1965 operator=(_Tp* __t) const noexcept
1966 {
1967 this->store(__t);
1968 return __t;
1969 }
1970
1971 operator _Tp*() const noexcept { return this->load(); }
1972
1973 bool
1974 is_lock_free() const noexcept
1975 {
1976 return __atomic_impl::is_lock_free<sizeof(_Tp*), required_alignment>();
1977 }
1978
1979 void
1980 store(_Tp* __t, memory_order __m = memory_order_seq_cst) const noexcept
1981 { __atomic_impl::store(_M_ptr, __t, __m); }
1982
1983 _Tp*
1984 load(memory_order __m = memory_order_seq_cst) const noexcept
1985 { return __atomic_impl::load(_M_ptr, __m); }
1986
1987 _Tp*
1988 exchange(_Tp* __desired,
1989 memory_order __m = memory_order_seq_cst) const noexcept
1990 { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1991
1992 bool
1993 compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
1994 memory_order __success,
1995 memory_order __failure) const noexcept
1996 {
1997 return __atomic_impl::compare_exchange_weak<true>(
1998 _M_ptr, __expected, __desired, __success, __failure);
1999 }
2000
2001 bool
2002 compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
2003 memory_order __success,
2004 memory_order __failure) const noexcept
2005 {
2006 return __atomic_impl::compare_exchange_strong<true>(
2007 _M_ptr, __expected, __desired, __success, __failure);
2008 }
2009
2010 bool
2011 compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
2012 memory_order __order = memory_order_seq_cst)
2013 const noexcept
2014 {
2015 return compare_exchange_weak(__expected, __desired, __order,
2016 __cmpexch_failure_order(__order));
2017 }
2018
2019 bool
2020 compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
2021 memory_order __order = memory_order_seq_cst)
2022 const noexcept
2023 {
2024 return compare_exchange_strong(__expected, __desired, __order,
2025 __cmpexch_failure_order(__order));
2026 }
2027
2028#if __glibcxx_atomic_wait
2029 _GLIBCXX_ALWAYS_INLINE void
2030 wait(_Tp* __old, memory_order __m = memory_order_seq_cst) const noexcept
2031 { __atomic_impl::wait(_M_ptr, __old, __m); }
2032
2033 // TODO add const volatile overload
2034
2035 _GLIBCXX_ALWAYS_INLINE void
2036 notify_one() const noexcept
2037 { __atomic_impl::notify_one(_M_ptr); }
2038
2039 // TODO add const volatile overload
2040
2041 _GLIBCXX_ALWAYS_INLINE void
2042 notify_all() const noexcept
2043 { __atomic_impl::notify_all(_M_ptr); }
2044
2045 // TODO add const volatile overload
2046#endif // __glibcxx_atomic_wait
2047
2048 _GLIBCXX_ALWAYS_INLINE value_type
2049 fetch_add(difference_type __d,
2050 memory_order __m = memory_order_seq_cst) const noexcept
2051 { return __atomic_impl::fetch_add(_M_ptr, _S_type_size(__d), __m); }
2052
2053 _GLIBCXX_ALWAYS_INLINE value_type
2054 fetch_sub(difference_type __d,
2055 memory_order __m = memory_order_seq_cst) const noexcept
2056 { return __atomic_impl::fetch_sub(_M_ptr, _S_type_size(__d), __m); }
2057
2058 value_type
2059 operator++(int) const noexcept
2060 { return fetch_add(1); }
2061
2062 value_type
2063 operator--(int) const noexcept
2064 { return fetch_sub(1); }
2065
2066 value_type
2067 operator++() const noexcept
2068 {
2069 return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(1));
2070 }
2071
2072 value_type
2073 operator--() const noexcept
2074 {
2075 return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(1));
2076 }
2077
2078 value_type
2079 operator+=(difference_type __d) const noexcept
2080 {
2081 return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(__d));
2082 }
2083
2084 value_type
2085 operator-=(difference_type __d) const noexcept
2086 {
2087 return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(__d));
2088 }
2089
2090 private:
2091 static constexpr ptrdiff_t
2092 _S_type_size(ptrdiff_t __d) noexcept
2093 {
2094 static_assert(is_object_v<_Tp>);
2095 return __d * sizeof(_Tp);
2096 }
2097
2098 _Tp** _M_ptr;
2099 };
2100#endif // C++2a
2101
2102 /// @endcond
2103
2104 /// @} group atomics
2105
2106_GLIBCXX_END_NAMESPACE_VERSION
2107} // namespace std
2108
2109#endif
constexpr _Tp * __addressof(_Tp &__r) noexcept
Same as C++11 std::addressof.
Definition move.h:52
_Tp kill_dependency(_Tp __y) noexcept
kill_dependency
memory_order
Enumeration for memory_order.
Definition atomic_base.h:66
ISO C++ entities toplevel namespace is std.
constexpr bitset< _Nb > operator|(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.
Definition bitset:1572
constexpr bitset< _Nb > operator&(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.
Definition bitset:1562
atomic_flag