libstdc++
atomic_base.h
Go to the documentation of this file.
1// -*- C++ -*- header.
2
3// Copyright (C) 2008-2025 Free Software Foundation, Inc.
4//
5// This file is part of the GNU ISO C++ Library. This library is free
6// software; you can redistribute it and/or modify it under the
7// terms of the GNU General Public License as published by the
8// Free Software Foundation; either version 3, or (at your option)
9// any later version.
10
11// This library is distributed in the hope that it will be useful,
12// but WITHOUT ANY WARRANTY; without even the implied warranty of
13// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14// GNU General Public License for more details.
15
16// Under Section 7 of GPL version 3, you are granted additional
17// permissions described in the GCC Runtime Library Exception, version
18// 3.1, as published by the Free Software Foundation.
19
20// You should have received a copy of the GNU General Public License and
21// a copy of the GCC Runtime Library Exception along with this program;
22// see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
23// <http://www.gnu.org/licenses/>.
24
25/** @file bits/atomic_base.h
26 * This is an internal header file, included by other library headers.
27 * Do not attempt to use it directly. @headername{atomic}
28 */
29
30#ifndef _GLIBCXX_ATOMIC_BASE_H
31#define _GLIBCXX_ATOMIC_BASE_H 1
32
33#ifdef _GLIBCXX_SYSHDR
34#pragma GCC system_header
35#endif
36
37#include <bits/c++config.h>
38#include <new> // For placement new
40#include <bits/move.h>
41
42#if __cplusplus > 201703L && _GLIBCXX_HOSTED
43#include <bits/atomic_wait.h>
44#endif
45
46#ifndef _GLIBCXX_ALWAYS_INLINE
47#define _GLIBCXX_ALWAYS_INLINE inline __attribute__((__always_inline__))
48#endif
49
50#include <bits/version.h>
51
52namespace std _GLIBCXX_VISIBILITY(default)
53{
54_GLIBCXX_BEGIN_NAMESPACE_VERSION
55
56 /**
57 * @defgroup atomics Atomics
58 *
59 * Components for performing atomic operations.
60 * @{
61 */
62
63 /// Enumeration for memory_order
64#if __cplusplus > 201703L
65 enum class memory_order : int
66 {
67 relaxed,
68 consume,
69 acquire,
70 release,
71 acq_rel,
72 seq_cst
73 };
74
75 inline constexpr memory_order memory_order_relaxed = memory_order::relaxed;
76 inline constexpr memory_order memory_order_consume = memory_order::consume;
77 inline constexpr memory_order memory_order_acquire = memory_order::acquire;
78 inline constexpr memory_order memory_order_release = memory_order::release;
79 inline constexpr memory_order memory_order_acq_rel = memory_order::acq_rel;
80 inline constexpr memory_order memory_order_seq_cst = memory_order::seq_cst;
81#else
82 enum memory_order : int
83 {
84 memory_order_relaxed,
85 memory_order_consume,
86 memory_order_acquire,
87 memory_order_release,
88 memory_order_acq_rel,
89 memory_order_seq_cst
90 };
91#endif
92
93 /// @cond undocumented
94 enum __memory_order_modifier
95 {
96 __memory_order_mask = 0x0ffff,
97 __memory_order_modifier_mask = 0xffff0000,
98 __memory_order_hle_acquire = 0x10000,
99 __memory_order_hle_release = 0x20000
100 };
101 /// @endcond
102
103 constexpr memory_order
104 operator|(memory_order __m, __memory_order_modifier __mod) noexcept
105 {
106 return memory_order(int(__m) | int(__mod));
107 }
108
109 constexpr memory_order
110 operator&(memory_order __m, __memory_order_modifier __mod) noexcept
111 {
112 return memory_order(int(__m) & int(__mod));
113 }
114
115 /// @cond undocumented
116
117 // Drop release ordering as per [atomics.types.operations.req]/21
118 constexpr memory_order
119 __cmpexch_failure_order2(memory_order __m) noexcept
120 {
121 return __m == memory_order_acq_rel ? memory_order_acquire
122 : __m == memory_order_release ? memory_order_relaxed : __m;
123 }
124
125 constexpr memory_order
126 __cmpexch_failure_order(memory_order __m) noexcept
127 {
128 return memory_order(__cmpexch_failure_order2(__m & __memory_order_mask)
129 | __memory_order_modifier(__m & __memory_order_modifier_mask));
130 }
131
132 constexpr bool
133 __is_valid_cmpexch_failure_order(memory_order __m) noexcept
134 {
135 return (__m & __memory_order_mask) != memory_order_release
136 && (__m & __memory_order_mask) != memory_order_acq_rel;
137 }
138
139 // Base types for atomics.
140 template<typename _IntTp>
141 struct __atomic_base;
142
143 /// @endcond
144
145 _GLIBCXX_ALWAYS_INLINE void
146 atomic_thread_fence(memory_order __m) noexcept
147 { __atomic_thread_fence(int(__m)); }
148
149 _GLIBCXX_ALWAYS_INLINE void
150 atomic_signal_fence(memory_order __m) noexcept
151 { __atomic_signal_fence(int(__m)); }
152
153 /// kill_dependency
154 template<typename _Tp>
155 inline _Tp
156 kill_dependency(_Tp __y) noexcept
157 {
158 _Tp __ret(__y);
159 return __ret;
160 }
161
162/// @cond undocumented
163#if __glibcxx_atomic_value_initialization
164# define _GLIBCXX20_INIT(I) = I
165#else
166# define _GLIBCXX20_INIT(I)
167#endif
168/// @endcond
169
170#define ATOMIC_VAR_INIT(_VI) { _VI }
171
172 template<typename _Tp>
173 struct atomic;
174
175 template<typename _Tp>
176 struct atomic<_Tp*>;
177
178 /* The target's "set" value for test-and-set may not be exactly 1. */
179#if __GCC_ATOMIC_TEST_AND_SET_TRUEVAL == 1
180 typedef bool __atomic_flag_data_type;
181#else
182 typedef unsigned char __atomic_flag_data_type;
183#endif
184
185 /// @cond undocumented
186
187 /*
188 * Base type for atomic_flag.
189 *
190 * Base type is POD with data, allowing atomic_flag to derive from
191 * it and meet the standard layout type requirement. In addition to
192 * compatibility with a C interface, this allows different
193 * implementations of atomic_flag to use the same atomic operation
194 * functions, via a standard conversion to the __atomic_flag_base
195 * argument.
196 */
197 _GLIBCXX_BEGIN_EXTERN_C
198
199 struct __atomic_flag_base
200 {
201 __atomic_flag_data_type _M_i _GLIBCXX20_INIT({});
202 };
203
204 _GLIBCXX_END_EXTERN_C
205
206 /// @endcond
207
208#define ATOMIC_FLAG_INIT { 0 }
209
210 /// atomic_flag
211 struct atomic_flag : public __atomic_flag_base
212 {
213 atomic_flag() noexcept = default;
214 ~atomic_flag() noexcept = default;
215 atomic_flag(const atomic_flag&) = delete;
216 atomic_flag& operator=(const atomic_flag&) = delete;
217 atomic_flag& operator=(const atomic_flag&) volatile = delete;
218
219 // Conversion to ATOMIC_FLAG_INIT.
220 constexpr atomic_flag(bool __i) noexcept
221 : __atomic_flag_base{ _S_init(__i) }
222 { }
223
224 _GLIBCXX_ALWAYS_INLINE bool
225 test_and_set(memory_order __m = memory_order_seq_cst) noexcept
226 {
227 return __atomic_test_and_set (&_M_i, int(__m));
228 }
229
230 _GLIBCXX_ALWAYS_INLINE bool
231 test_and_set(memory_order __m = memory_order_seq_cst) volatile noexcept
232 {
233 return __atomic_test_and_set (&_M_i, int(__m));
234 }
235
236#ifdef __glibcxx_atomic_flag_test // C++ >= 20
237 _GLIBCXX_ALWAYS_INLINE bool
238 test(memory_order __m = memory_order_seq_cst) const noexcept
239 {
240 __atomic_flag_data_type __v;
241 __atomic_load(&_M_i, &__v, int(__m));
242 return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL;
243 }
244
245 _GLIBCXX_ALWAYS_INLINE bool
246 test(memory_order __m = memory_order_seq_cst) const volatile noexcept
247 {
248 __atomic_flag_data_type __v;
249 __atomic_load(&_M_i, &__v, int(__m));
250 return __v == __GCC_ATOMIC_TEST_AND_SET_TRUEVAL;
251 }
252#endif
253
254#if __glibcxx_atomic_wait // C++ >= 20 && (linux_futex || gthread)
255 _GLIBCXX_ALWAYS_INLINE void
256 wait(bool __old,
257 memory_order __m = memory_order_seq_cst) const noexcept
258 {
259 const __atomic_flag_data_type __v
260 = __old ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0;
261
262 std::__atomic_wait_address_v(&_M_i, __v,
263 [__m, this] { return __atomic_load_n(&_M_i, int(__m)); });
264 }
265
266 // TODO add const volatile overload
267
268 _GLIBCXX_ALWAYS_INLINE void
269 notify_one() noexcept
270 { std::__atomic_notify_address(&_M_i, false); }
271
272 // TODO add const volatile overload
273
274 _GLIBCXX_ALWAYS_INLINE void
275 notify_all() noexcept
276 { std::__atomic_notify_address(&_M_i, true); }
277
278 // TODO add const volatile overload
279#endif // __glibcxx_atomic_wait
280
281 _GLIBCXX_ALWAYS_INLINE void
282 clear(memory_order __m = memory_order_seq_cst) noexcept
283 {
284 memory_order __b __attribute__ ((__unused__))
285 = __m & __memory_order_mask;
286 __glibcxx_assert(__b != memory_order_consume);
287 __glibcxx_assert(__b != memory_order_acquire);
288 __glibcxx_assert(__b != memory_order_acq_rel);
289
290 __atomic_clear (&_M_i, int(__m));
291 }
292
293 _GLIBCXX_ALWAYS_INLINE void
294 clear(memory_order __m = memory_order_seq_cst) volatile noexcept
295 {
296 memory_order __b __attribute__ ((__unused__))
297 = __m & __memory_order_mask;
298 __glibcxx_assert(__b != memory_order_consume);
299 __glibcxx_assert(__b != memory_order_acquire);
300 __glibcxx_assert(__b != memory_order_acq_rel);
301
302 __atomic_clear (&_M_i, int(__m));
303 }
304
305 private:
306 static constexpr __atomic_flag_data_type
307 _S_init(bool __i)
308 { return __i ? __GCC_ATOMIC_TEST_AND_SET_TRUEVAL : 0; }
309 };
310
311 /// @cond undocumented
312
313 /// Base class for atomic integrals.
314 //
315 // For each of the integral types, define atomic_[integral type] struct
316 //
317 // atomic_bool bool
318 // atomic_char char
319 // atomic_schar signed char
320 // atomic_uchar unsigned char
321 // atomic_short short
322 // atomic_ushort unsigned short
323 // atomic_int int
324 // atomic_uint unsigned int
325 // atomic_long long
326 // atomic_ulong unsigned long
327 // atomic_llong long long
328 // atomic_ullong unsigned long long
329 // atomic_char8_t char8_t
330 // atomic_char16_t char16_t
331 // atomic_char32_t char32_t
332 // atomic_wchar_t wchar_t
333 //
334 // NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or
335 // 8 bytes, since that is what GCC built-in functions for atomic
336 // memory access expect.
337 template<typename _ITp>
338 struct __atomic_base
339 {
340 using value_type = _ITp;
342
343 private:
344 typedef _ITp __int_type;
345
346 static constexpr int _S_alignment =
347 sizeof(_ITp) > alignof(_ITp) ? sizeof(_ITp) : alignof(_ITp);
348
349 alignas(_S_alignment) __int_type _M_i _GLIBCXX20_INIT(0);
350
351 public:
352 __atomic_base() noexcept = default;
353 ~__atomic_base() noexcept = default;
354 __atomic_base(const __atomic_base&) = delete;
355 __atomic_base& operator=(const __atomic_base&) = delete;
356 __atomic_base& operator=(const __atomic_base&) volatile = delete;
357
358 constexpr __atomic_base(__int_type __i) noexcept : _M_i (__i) { }
359
360 operator __int_type() const noexcept
361 { return load(); }
362
363 operator __int_type() const volatile noexcept
364 { return load(); }
365
366 __int_type
367 operator=(__int_type __i) noexcept
368 {
369 store(__i);
370 return __i;
371 }
372
373 __int_type
374 operator=(__int_type __i) volatile noexcept
375 {
376 store(__i);
377 return __i;
378 }
379
380 __int_type
381 operator++(int) noexcept
382 { return fetch_add(1); }
383
384 __int_type
385 operator++(int) volatile noexcept
386 { return fetch_add(1); }
387
388 __int_type
389 operator--(int) noexcept
390 { return fetch_sub(1); }
391
392 __int_type
393 operator--(int) volatile noexcept
394 { return fetch_sub(1); }
395
396 __int_type
397 operator++() noexcept
398 { return __atomic_add_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
399
400 __int_type
401 operator++() volatile noexcept
402 { return __atomic_add_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
403
404 __int_type
405 operator--() noexcept
406 { return __atomic_sub_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
407
408 __int_type
409 operator--() volatile noexcept
410 { return __atomic_sub_fetch(&_M_i, 1, int(memory_order_seq_cst)); }
411
412 __int_type
413 operator+=(__int_type __i) noexcept
414 { return __atomic_add_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
415
416 __int_type
417 operator+=(__int_type __i) volatile noexcept
418 { return __atomic_add_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
419
420 __int_type
421 operator-=(__int_type __i) noexcept
422 { return __atomic_sub_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
423
424 __int_type
425 operator-=(__int_type __i) volatile noexcept
426 { return __atomic_sub_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
427
428 __int_type
429 operator&=(__int_type __i) noexcept
430 { return __atomic_and_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
431
432 __int_type
433 operator&=(__int_type __i) volatile noexcept
434 { return __atomic_and_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
435
436 __int_type
437 operator|=(__int_type __i) noexcept
438 { return __atomic_or_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
439
440 __int_type
441 operator|=(__int_type __i) volatile noexcept
442 { return __atomic_or_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
443
444 __int_type
445 operator^=(__int_type __i) noexcept
446 { return __atomic_xor_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
447
448 __int_type
449 operator^=(__int_type __i) volatile noexcept
450 { return __atomic_xor_fetch(&_M_i, __i, int(memory_order_seq_cst)); }
451
452 bool
453 is_lock_free() const noexcept
454 {
455 // Use a fake, minimally aligned pointer.
456 return __atomic_is_lock_free(sizeof(_M_i),
457 reinterpret_cast<void *>(-_S_alignment));
458 }
459
460 bool
461 is_lock_free() const volatile noexcept
462 {
463 // Use a fake, minimally aligned pointer.
464 return __atomic_is_lock_free(sizeof(_M_i),
465 reinterpret_cast<void *>(-_S_alignment));
466 }
467
468 _GLIBCXX_ALWAYS_INLINE void
469 store(__int_type __i, memory_order __m = memory_order_seq_cst) noexcept
470 {
471 memory_order __b __attribute__ ((__unused__))
472 = __m & __memory_order_mask;
473 __glibcxx_assert(__b != memory_order_acquire);
474 __glibcxx_assert(__b != memory_order_acq_rel);
475 __glibcxx_assert(__b != memory_order_consume);
476
477 __atomic_store_n(&_M_i, __i, int(__m));
478 }
479
480 _GLIBCXX_ALWAYS_INLINE void
481 store(__int_type __i,
482 memory_order __m = memory_order_seq_cst) volatile noexcept
483 {
484 memory_order __b __attribute__ ((__unused__))
485 = __m & __memory_order_mask;
486 __glibcxx_assert(__b != memory_order_acquire);
487 __glibcxx_assert(__b != memory_order_acq_rel);
488 __glibcxx_assert(__b != memory_order_consume);
489
490 __atomic_store_n(&_M_i, __i, int(__m));
491 }
492
493 _GLIBCXX_ALWAYS_INLINE __int_type
494 load(memory_order __m = memory_order_seq_cst) const noexcept
495 {
496 memory_order __b __attribute__ ((__unused__))
497 = __m & __memory_order_mask;
498 __glibcxx_assert(__b != memory_order_release);
499 __glibcxx_assert(__b != memory_order_acq_rel);
500
501 return __atomic_load_n(&_M_i, int(__m));
502 }
503
504 _GLIBCXX_ALWAYS_INLINE __int_type
505 load(memory_order __m = memory_order_seq_cst) const volatile noexcept
506 {
507 memory_order __b __attribute__ ((__unused__))
508 = __m & __memory_order_mask;
509 __glibcxx_assert(__b != memory_order_release);
510 __glibcxx_assert(__b != memory_order_acq_rel);
511
512 return __atomic_load_n(&_M_i, int(__m));
513 }
514
515 _GLIBCXX_ALWAYS_INLINE __int_type
516 exchange(__int_type __i,
517 memory_order __m = memory_order_seq_cst) noexcept
518 {
519 return __atomic_exchange_n(&_M_i, __i, int(__m));
520 }
521
522
523 _GLIBCXX_ALWAYS_INLINE __int_type
524 exchange(__int_type __i,
525 memory_order __m = memory_order_seq_cst) volatile noexcept
526 {
527 return __atomic_exchange_n(&_M_i, __i, int(__m));
528 }
529
530 _GLIBCXX_ALWAYS_INLINE bool
531 compare_exchange_weak(__int_type& __i1, __int_type __i2,
532 memory_order __m1, memory_order __m2) noexcept
533 {
534 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
535
536 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
537 int(__m1), int(__m2));
538 }
539
540 _GLIBCXX_ALWAYS_INLINE bool
541 compare_exchange_weak(__int_type& __i1, __int_type __i2,
542 memory_order __m1,
543 memory_order __m2) volatile noexcept
544 {
545 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
546
547 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 1,
548 int(__m1), int(__m2));
549 }
550
551 _GLIBCXX_ALWAYS_INLINE bool
552 compare_exchange_weak(__int_type& __i1, __int_type __i2,
553 memory_order __m = memory_order_seq_cst) noexcept
554 {
555 return compare_exchange_weak(__i1, __i2, __m,
556 __cmpexch_failure_order(__m));
557 }
558
559 _GLIBCXX_ALWAYS_INLINE bool
560 compare_exchange_weak(__int_type& __i1, __int_type __i2,
561 memory_order __m = memory_order_seq_cst) volatile noexcept
562 {
563 return compare_exchange_weak(__i1, __i2, __m,
564 __cmpexch_failure_order(__m));
565 }
566
567 _GLIBCXX_ALWAYS_INLINE bool
568 compare_exchange_strong(__int_type& __i1, __int_type __i2,
569 memory_order __m1, memory_order __m2) noexcept
570 {
571 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
572
573 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
574 int(__m1), int(__m2));
575 }
576
577 _GLIBCXX_ALWAYS_INLINE bool
578 compare_exchange_strong(__int_type& __i1, __int_type __i2,
579 memory_order __m1,
580 memory_order __m2) volatile noexcept
581 {
582 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
583
584 return __atomic_compare_exchange_n(&_M_i, &__i1, __i2, 0,
585 int(__m1), int(__m2));
586 }
587
588 _GLIBCXX_ALWAYS_INLINE bool
589 compare_exchange_strong(__int_type& __i1, __int_type __i2,
590 memory_order __m = memory_order_seq_cst) noexcept
591 {
592 return compare_exchange_strong(__i1, __i2, __m,
593 __cmpexch_failure_order(__m));
594 }
595
596 _GLIBCXX_ALWAYS_INLINE bool
597 compare_exchange_strong(__int_type& __i1, __int_type __i2,
598 memory_order __m = memory_order_seq_cst) volatile noexcept
599 {
600 return compare_exchange_strong(__i1, __i2, __m,
601 __cmpexch_failure_order(__m));
602 }
603
604#if __glibcxx_atomic_wait
605 _GLIBCXX_ALWAYS_INLINE void
606 wait(__int_type __old,
607 memory_order __m = memory_order_seq_cst) const noexcept
608 {
609 std::__atomic_wait_address_v(&_M_i, __old,
610 [__m, this] { return this->load(__m); });
611 }
612
613 // TODO add const volatile overload
614
615 _GLIBCXX_ALWAYS_INLINE void
616 notify_one() noexcept
617 { std::__atomic_notify_address(&_M_i, false); }
618
619 // TODO add const volatile overload
620
621 _GLIBCXX_ALWAYS_INLINE void
622 notify_all() noexcept
623 { std::__atomic_notify_address(&_M_i, true); }
624
625 // TODO add const volatile overload
626#endif // __glibcxx_atomic_wait
627
628 _GLIBCXX_ALWAYS_INLINE __int_type
629 fetch_add(__int_type __i,
630 memory_order __m = memory_order_seq_cst) noexcept
631 { return __atomic_fetch_add(&_M_i, __i, int(__m)); }
632
633 _GLIBCXX_ALWAYS_INLINE __int_type
634 fetch_add(__int_type __i,
635 memory_order __m = memory_order_seq_cst) volatile noexcept
636 { return __atomic_fetch_add(&_M_i, __i, int(__m)); }
637
638 _GLIBCXX_ALWAYS_INLINE __int_type
639 fetch_sub(__int_type __i,
640 memory_order __m = memory_order_seq_cst) noexcept
641 { return __atomic_fetch_sub(&_M_i, __i, int(__m)); }
642
643 _GLIBCXX_ALWAYS_INLINE __int_type
644 fetch_sub(__int_type __i,
645 memory_order __m = memory_order_seq_cst) volatile noexcept
646 { return __atomic_fetch_sub(&_M_i, __i, int(__m)); }
647
648 _GLIBCXX_ALWAYS_INLINE __int_type
649 fetch_and(__int_type __i,
650 memory_order __m = memory_order_seq_cst) noexcept
651 { return __atomic_fetch_and(&_M_i, __i, int(__m)); }
652
653 _GLIBCXX_ALWAYS_INLINE __int_type
654 fetch_and(__int_type __i,
655 memory_order __m = memory_order_seq_cst) volatile noexcept
656 { return __atomic_fetch_and(&_M_i, __i, int(__m)); }
657
658 _GLIBCXX_ALWAYS_INLINE __int_type
659 fetch_or(__int_type __i,
660 memory_order __m = memory_order_seq_cst) noexcept
661 { return __atomic_fetch_or(&_M_i, __i, int(__m)); }
662
663 _GLIBCXX_ALWAYS_INLINE __int_type
664 fetch_or(__int_type __i,
665 memory_order __m = memory_order_seq_cst) volatile noexcept
666 { return __atomic_fetch_or(&_M_i, __i, int(__m)); }
667
668 _GLIBCXX_ALWAYS_INLINE __int_type
669 fetch_xor(__int_type __i,
670 memory_order __m = memory_order_seq_cst) noexcept
671 { return __atomic_fetch_xor(&_M_i, __i, int(__m)); }
672
673 _GLIBCXX_ALWAYS_INLINE __int_type
674 fetch_xor(__int_type __i,
675 memory_order __m = memory_order_seq_cst) volatile noexcept
676 { return __atomic_fetch_xor(&_M_i, __i, int(__m)); }
677 };
678
679
680 /// Partial specialization for pointer types.
681 template<typename _PTp>
682 struct __atomic_base<_PTp*>
683 {
684 private:
685 typedef _PTp* __pointer_type;
686
687 __pointer_type _M_p _GLIBCXX20_INIT(nullptr);
688
689 static constexpr ptrdiff_t
690 _S_type_size(ptrdiff_t __d)
691 { return __d * sizeof(_PTp); }
692
693 public:
694 __atomic_base() noexcept = default;
695 ~__atomic_base() noexcept = default;
696 __atomic_base(const __atomic_base&) = delete;
697 __atomic_base& operator=(const __atomic_base&) = delete;
698 __atomic_base& operator=(const __atomic_base&) volatile = delete;
699
700 // Requires __pointer_type convertible to _M_p.
701 constexpr __atomic_base(__pointer_type __p) noexcept : _M_p (__p) { }
702
703 operator __pointer_type() const noexcept
704 { return load(); }
705
706 operator __pointer_type() const volatile noexcept
707 { return load(); }
708
709 __pointer_type
710 operator=(__pointer_type __p) noexcept
711 {
712 store(__p);
713 return __p;
714 }
715
716 __pointer_type
717 operator=(__pointer_type __p) volatile noexcept
718 {
719 store(__p);
720 return __p;
721 }
722
723 __pointer_type
724 operator++(int) noexcept
725 { return fetch_add(1); }
726
727 __pointer_type
728 operator++(int) volatile noexcept
729 { return fetch_add(1); }
730
731 __pointer_type
732 operator--(int) noexcept
733 { return fetch_sub(1); }
734
735 __pointer_type
736 operator--(int) volatile noexcept
737 { return fetch_sub(1); }
738
739 __pointer_type
740 operator++() noexcept
741 { return __atomic_add_fetch(&_M_p, _S_type_size(1),
742 int(memory_order_seq_cst)); }
743
744 __pointer_type
745 operator++() volatile noexcept
746 { return __atomic_add_fetch(&_M_p, _S_type_size(1),
747 int(memory_order_seq_cst)); }
748
749 __pointer_type
750 operator--() noexcept
751 { return __atomic_sub_fetch(&_M_p, _S_type_size(1),
752 int(memory_order_seq_cst)); }
753
754 __pointer_type
755 operator--() volatile noexcept
756 { return __atomic_sub_fetch(&_M_p, _S_type_size(1),
757 int(memory_order_seq_cst)); }
758
759 __pointer_type
760 operator+=(ptrdiff_t __d) noexcept
761 { return __atomic_add_fetch(&_M_p, _S_type_size(__d),
762 int(memory_order_seq_cst)); }
763
764 __pointer_type
765 operator+=(ptrdiff_t __d) volatile noexcept
766 { return __atomic_add_fetch(&_M_p, _S_type_size(__d),
767 int(memory_order_seq_cst)); }
768
769 __pointer_type
770 operator-=(ptrdiff_t __d) noexcept
771 { return __atomic_sub_fetch(&_M_p, _S_type_size(__d),
772 int(memory_order_seq_cst)); }
773
774 __pointer_type
775 operator-=(ptrdiff_t __d) volatile noexcept
776 { return __atomic_sub_fetch(&_M_p, _S_type_size(__d),
777 int(memory_order_seq_cst)); }
778
779 bool
780 is_lock_free() const noexcept
781 {
782 // Produce a fake, minimally aligned pointer.
783 return __atomic_is_lock_free(sizeof(_M_p),
784 reinterpret_cast<void *>(-__alignof(_M_p)));
785 }
786
787 bool
788 is_lock_free() const volatile noexcept
789 {
790 // Produce a fake, minimally aligned pointer.
791 return __atomic_is_lock_free(sizeof(_M_p),
792 reinterpret_cast<void *>(-__alignof(_M_p)));
793 }
794
795 _GLIBCXX_ALWAYS_INLINE void
796 store(__pointer_type __p,
797 memory_order __m = memory_order_seq_cst) noexcept
798 {
799 memory_order __b __attribute__ ((__unused__))
800 = __m & __memory_order_mask;
801
802 __glibcxx_assert(__b != memory_order_acquire);
803 __glibcxx_assert(__b != memory_order_acq_rel);
804 __glibcxx_assert(__b != memory_order_consume);
805
806 __atomic_store_n(&_M_p, __p, int(__m));
807 }
808
809 _GLIBCXX_ALWAYS_INLINE void
810 store(__pointer_type __p,
811 memory_order __m = memory_order_seq_cst) volatile noexcept
812 {
813 memory_order __b __attribute__ ((__unused__))
814 = __m & __memory_order_mask;
815 __glibcxx_assert(__b != memory_order_acquire);
816 __glibcxx_assert(__b != memory_order_acq_rel);
817 __glibcxx_assert(__b != memory_order_consume);
818
819 __atomic_store_n(&_M_p, __p, int(__m));
820 }
821
822 _GLIBCXX_ALWAYS_INLINE __pointer_type
823 load(memory_order __m = memory_order_seq_cst) const noexcept
824 {
825 memory_order __b __attribute__ ((__unused__))
826 = __m & __memory_order_mask;
827 __glibcxx_assert(__b != memory_order_release);
828 __glibcxx_assert(__b != memory_order_acq_rel);
829
830 return __atomic_load_n(&_M_p, int(__m));
831 }
832
833 _GLIBCXX_ALWAYS_INLINE __pointer_type
834 load(memory_order __m = memory_order_seq_cst) const volatile noexcept
835 {
836 memory_order __b __attribute__ ((__unused__))
837 = __m & __memory_order_mask;
838 __glibcxx_assert(__b != memory_order_release);
839 __glibcxx_assert(__b != memory_order_acq_rel);
840
841 return __atomic_load_n(&_M_p, int(__m));
842 }
843
844 _GLIBCXX_ALWAYS_INLINE __pointer_type
845 exchange(__pointer_type __p,
846 memory_order __m = memory_order_seq_cst) noexcept
847 {
848 return __atomic_exchange_n(&_M_p, __p, int(__m));
849 }
850
851
852 _GLIBCXX_ALWAYS_INLINE __pointer_type
853 exchange(__pointer_type __p,
854 memory_order __m = memory_order_seq_cst) volatile noexcept
855 {
856 return __atomic_exchange_n(&_M_p, __p, int(__m));
857 }
858
859 _GLIBCXX_ALWAYS_INLINE bool
860 compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
861 memory_order __m1,
862 memory_order __m2) noexcept
863 {
864 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
865
866 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 1,
867 int(__m1), int(__m2));
868 }
869
870 _GLIBCXX_ALWAYS_INLINE bool
871 compare_exchange_weak(__pointer_type& __p1, __pointer_type __p2,
872 memory_order __m1,
873 memory_order __m2) volatile noexcept
874 {
875 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
876
877 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 1,
878 int(__m1), int(__m2));
879 }
880
881 _GLIBCXX_ALWAYS_INLINE bool
882 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
883 memory_order __m1,
884 memory_order __m2) noexcept
885 {
886 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
887
888 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
889 int(__m1), int(__m2));
890 }
891
892 _GLIBCXX_ALWAYS_INLINE bool
893 compare_exchange_strong(__pointer_type& __p1, __pointer_type __p2,
894 memory_order __m1,
895 memory_order __m2) volatile noexcept
896 {
897 __glibcxx_assert(__is_valid_cmpexch_failure_order(__m2));
898
899 return __atomic_compare_exchange_n(&_M_p, &__p1, __p2, 0,
900 int(__m1), int(__m2));
901 }
902
903#if __glibcxx_atomic_wait
904 _GLIBCXX_ALWAYS_INLINE void
905 wait(__pointer_type __old,
906 memory_order __m = memory_order_seq_cst) const noexcept
907 {
908 std::__atomic_wait_address_v(&_M_p, __old,
909 [__m, this]
910 { return this->load(__m); });
911 }
912
913 // TODO add const volatile overload
914
915 _GLIBCXX_ALWAYS_INLINE void
916 notify_one() const noexcept
917 { std::__atomic_notify_address(&_M_p, false); }
918
919 // TODO add const volatile overload
920
921 _GLIBCXX_ALWAYS_INLINE void
922 notify_all() const noexcept
923 { std::__atomic_notify_address(&_M_p, true); }
924
925 // TODO add const volatile overload
926#endif // __glibcxx_atomic_wait
927
928 _GLIBCXX_ALWAYS_INLINE __pointer_type
929 fetch_add(ptrdiff_t __d,
930 memory_order __m = memory_order_seq_cst) noexcept
931 { return __atomic_fetch_add(&_M_p, _S_type_size(__d), int(__m)); }
932
933 _GLIBCXX_ALWAYS_INLINE __pointer_type
934 fetch_add(ptrdiff_t __d,
935 memory_order __m = memory_order_seq_cst) volatile noexcept
936 { return __atomic_fetch_add(&_M_p, _S_type_size(__d), int(__m)); }
937
938 _GLIBCXX_ALWAYS_INLINE __pointer_type
939 fetch_sub(ptrdiff_t __d,
940 memory_order __m = memory_order_seq_cst) noexcept
941 { return __atomic_fetch_sub(&_M_p, _S_type_size(__d), int(__m)); }
942
943 _GLIBCXX_ALWAYS_INLINE __pointer_type
944 fetch_sub(ptrdiff_t __d,
945 memory_order __m = memory_order_seq_cst) volatile noexcept
946 { return __atomic_fetch_sub(&_M_p, _S_type_size(__d), int(__m)); }
947 };
948
949 namespace __atomic_impl
950 {
951 // Implementation details of atomic padding handling
952
953 template<typename _Tp>
954 constexpr bool
955 __maybe_has_padding()
956 {
957#if ! __has_builtin(__builtin_clear_padding)
958 return false;
959#elif __has_builtin(__has_unique_object_representations)
960 return !__has_unique_object_representations(_Tp)
961 && !is_same<_Tp, float>::value && !is_same<_Tp, double>::value;
962#else
963 return true;
964#endif
965 }
966
967#pragma GCC diagnostic push
968#pragma GCC diagnostic ignored "-Wc++17-extensions"
969
970 template<typename _Tp>
971 _GLIBCXX_ALWAYS_INLINE _GLIBCXX14_CONSTEXPR _Tp*
972 __clear_padding(_Tp& __val) noexcept
973 {
974 auto* __ptr = std::__addressof(__val);
975#if __has_builtin(__builtin_clear_padding)
976 if constexpr (__atomic_impl::__maybe_has_padding<_Tp>())
977 __builtin_clear_padding(__ptr);
978#endif
979 return __ptr;
980 }
981
982 // Remove volatile and create a non-deduced context for value arguments.
983 template<typename _Tp>
984 using _Val = typename remove_volatile<_Tp>::type;
985
986 template<bool _AtomicRef = false, typename _Tp>
987 _GLIBCXX_ALWAYS_INLINE bool
988 __compare_exchange(_Tp& __val, _Val<_Tp>& __e, _Val<_Tp>& __i,
989 bool __is_weak,
990 memory_order __s, memory_order __f) noexcept
991 {
992 __glibcxx_assert(__is_valid_cmpexch_failure_order(__f));
993
994 using _Vp = _Val<_Tp>;
995 _Tp* const __pval = std::__addressof(__val);
996
997 if constexpr (!__atomic_impl::__maybe_has_padding<_Vp>())
998 {
999 return __atomic_compare_exchange(__pval, std::__addressof(__e),
1000 std::__addressof(__i), __is_weak,
1001 int(__s), int(__f));
1002 }
1003 else if constexpr (!_AtomicRef) // std::atomic<T>
1004 {
1005 // Clear padding of the value we want to set:
1006 _Vp* const __pi = __atomic_impl::__clear_padding(__i);
1007 // Only allowed to modify __e on failure, so make a copy:
1008 _Vp __exp = __e;
1009 // Clear padding of the expected value:
1010 _Vp* const __pexp = __atomic_impl::__clear_padding(__exp);
1011
1012 // For std::atomic<T> we know that the contained value will already
1013 // have zeroed padding, so trivial memcmp semantics are OK.
1014 if (__atomic_compare_exchange(__pval, __pexp, __pi,
1015 __is_weak, int(__s), int(__f)))
1016 return true;
1017 // Value bits must be different, copy from __exp back to __e:
1018 __builtin_memcpy(std::__addressof(__e), __pexp, sizeof(_Vp));
1019 return false;
1020 }
1021 else // std::atomic_ref<T> where T has padding bits.
1022 {
1023 // Clear padding of the value we want to set:
1024 _Vp* const __pi = __atomic_impl::__clear_padding(__i);
1025
1026 // Only allowed to modify __e on failure, so make a copy:
1027 _Vp __exp = __e;
1028 // Optimistically assume that a previous store had zeroed padding
1029 // so that zeroing it in the expected value will match first time.
1030 _Vp* const __pexp = __atomic_impl::__clear_padding(__exp);
1031
1032 // compare_exchange is specified to compare value representations.
1033 // Need to check whether a failure is 'real' or just due to
1034 // differences in padding bits. This loop should run no more than
1035 // three times, because the worst case scenario is:
1036 // First CAS fails because the actual value has non-zero padding.
1037 // Second CAS fails because another thread stored the same value,
1038 // but now with padding cleared. Third CAS succeeds.
1039 // We will never need to loop a fourth time, because any value
1040 // written by another thread (whether via store, exchange or
1041 // compare_exchange) will have had its padding cleared.
1042 while (true)
1043 {
1044 // Copy of the expected value so we can clear its padding.
1045 _Vp __orig = __exp;
1046
1047 if (__atomic_compare_exchange(__pval, __pexp, __pi,
1048 __is_weak, int(__s), int(__f)))
1049 return true;
1050
1051 // Copy of the actual value so we can clear its padding.
1052 _Vp __curr = __exp;
1053
1054 // Compare value representations (i.e. ignoring padding).
1055 if (__builtin_memcmp(__atomic_impl::__clear_padding(__orig),
1056 __atomic_impl::__clear_padding(__curr),
1057 sizeof(_Vp)))
1058 {
1059 // Value representations compare unequal, real failure.
1060 __builtin_memcpy(std::__addressof(__e), __pexp,
1061 sizeof(_Vp));
1062 return false;
1063 }
1064 }
1065 }
1066 }
1067#pragma GCC diagnostic pop
1068 } // namespace __atomic_impl
1069
1070#if __cplusplus > 201703L
1071 // Implementation details of atomic_ref and atomic<floating-point>.
1072 namespace __atomic_impl
1073 {
1074 // Like _Val<T> above, but for difference_type arguments.
1075 template<typename _Tp>
1076 using _Diff = __conditional_t<is_pointer_v<_Tp>, ptrdiff_t, _Val<_Tp>>;
1077
1078 template<size_t _Size, size_t _Align>
1079 _GLIBCXX_ALWAYS_INLINE bool
1080 is_lock_free() noexcept
1081 {
1082 // Produce a fake, minimally aligned pointer.
1083 return __atomic_is_lock_free(_Size, reinterpret_cast<void *>(-_Align));
1084 }
1085
1086 template<typename _Tp>
1087 _GLIBCXX_ALWAYS_INLINE void
1088 store(_Tp* __ptr, _Val<_Tp> __t, memory_order __m) noexcept
1089 {
1090 __atomic_store(__ptr, __atomic_impl::__clear_padding(__t), int(__m));
1091 }
1092
1093 template<typename _Tp>
1094 _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
1095 load(const _Tp* __ptr, memory_order __m) noexcept
1096 {
1097 alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
1098 auto* __dest = reinterpret_cast<_Val<_Tp>*>(__buf);
1099 __atomic_load(__ptr, __dest, int(__m));
1100 return *__dest;
1101 }
1102
1103 template<typename _Tp>
1104 _GLIBCXX_ALWAYS_INLINE _Val<_Tp>
1105 exchange(_Tp* __ptr, _Val<_Tp> __desired, memory_order __m) noexcept
1106 {
1107 alignas(_Tp) unsigned char __buf[sizeof(_Tp)];
1108 auto* __dest = reinterpret_cast<_Val<_Tp>*>(__buf);
1109 __atomic_exchange(__ptr, __atomic_impl::__clear_padding(__desired),
1110 __dest, int(__m));
1111 return *__dest;
1112 }
1113
1114 template<bool _AtomicRef = false, typename _Tp>
1115 _GLIBCXX_ALWAYS_INLINE bool
1116 compare_exchange_weak(_Tp* __ptr, _Val<_Tp>& __expected,
1117 _Val<_Tp> __desired, memory_order __success,
1118 memory_order __failure) noexcept
1119 {
1120 return __atomic_impl::__compare_exchange<_AtomicRef>(
1121 *__ptr, __expected, __desired, true, __success, __failure);
1122 }
1123
1124 template<bool _AtomicRef = false, typename _Tp>
1125 _GLIBCXX_ALWAYS_INLINE bool
1126 compare_exchange_strong(_Tp* __ptr, _Val<_Tp>& __expected,
1127 _Val<_Tp> __desired, memory_order __success,
1128 memory_order __failure) noexcept
1129 {
1130 return __atomic_impl::__compare_exchange<_AtomicRef>(
1131 *__ptr, __expected, __desired, false, __success, __failure);
1132 }
1133
1134#if __glibcxx_atomic_wait
1135 template<typename _Tp>
1136 _GLIBCXX_ALWAYS_INLINE void
1137 wait(const _Tp* __ptr, _Val<_Tp> __old,
1138 memory_order __m = memory_order_seq_cst) noexcept
1139 {
1140 std::__atomic_wait_address_v(__ptr, __old,
1141 [__ptr, __m]() { return __atomic_impl::load(__ptr, __m); });
1142 }
1143
1144 // TODO add const volatile overload
1145
1146 template<typename _Tp>
1147 _GLIBCXX_ALWAYS_INLINE void
1148 notify_one(const _Tp* __ptr) noexcept
1149 { std::__atomic_notify_address(__ptr, false); }
1150
1151 // TODO add const volatile overload
1152
1153 template<typename _Tp>
1154 _GLIBCXX_ALWAYS_INLINE void
1155 notify_all(const _Tp* __ptr) noexcept
1156 { std::__atomic_notify_address(__ptr, true); }
1157
1158 // TODO add const volatile overload
1159#endif // __glibcxx_atomic_wait
1160
1161 template<typename _Tp>
1162 _GLIBCXX_ALWAYS_INLINE _Tp
1163 fetch_add(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m) noexcept
1164 { return __atomic_fetch_add(__ptr, __i, int(__m)); }
1165
1166 template<typename _Tp>
1167 _GLIBCXX_ALWAYS_INLINE _Tp
1168 fetch_sub(_Tp* __ptr, _Diff<_Tp> __i, memory_order __m) noexcept
1169 { return __atomic_fetch_sub(__ptr, __i, int(__m)); }
1170
1171 template<typename _Tp>
1172 _GLIBCXX_ALWAYS_INLINE _Tp
1173 fetch_and(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1174 { return __atomic_fetch_and(__ptr, __i, int(__m)); }
1175
1176 template<typename _Tp>
1177 _GLIBCXX_ALWAYS_INLINE _Tp
1178 fetch_or(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1179 { return __atomic_fetch_or(__ptr, __i, int(__m)); }
1180
1181 template<typename _Tp>
1182 _GLIBCXX_ALWAYS_INLINE _Tp
1183 fetch_xor(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1184 { return __atomic_fetch_xor(__ptr, __i, int(__m)); }
1185
1186 template<typename _Tp>
1187 _GLIBCXX_ALWAYS_INLINE _Tp
1188 __add_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept
1189 { return __atomic_add_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1190
1191 template<typename _Tp>
1192 _GLIBCXX_ALWAYS_INLINE _Tp
1193 __sub_fetch(_Tp* __ptr, _Diff<_Tp> __i) noexcept
1194 { return __atomic_sub_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1195
1196 template<typename _Tp>
1197 _GLIBCXX_ALWAYS_INLINE _Tp
1198 __and_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
1199 { return __atomic_and_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1200
1201 template<typename _Tp>
1202 _GLIBCXX_ALWAYS_INLINE _Tp
1203 __or_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
1204 { return __atomic_or_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1205
1206 template<typename _Tp>
1207 _GLIBCXX_ALWAYS_INLINE _Tp
1208 __xor_fetch(_Tp* __ptr, _Val<_Tp> __i) noexcept
1209 { return __atomic_xor_fetch(__ptr, __i, __ATOMIC_SEQ_CST); }
1210
1211 template<typename _Tp>
1212 concept __atomic_fetch_addable
1213 = requires (_Tp __t) { __atomic_fetch_add(&__t, __t, 0); };
1214
1215 template<typename _Tp>
1216 _Tp
1217 __fetch_add_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1218 {
1219 if constexpr (__atomic_fetch_addable<_Tp>)
1220 return __atomic_fetch_add(__ptr, __i, int(__m));
1221 else
1222 {
1223 _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
1224 _Val<_Tp> __newval = __oldval + __i;
1225 while (!compare_exchange_weak (__ptr, __oldval, __newval, __m,
1226 memory_order_relaxed))
1227 __newval = __oldval + __i;
1228 return __oldval;
1229 }
1230 }
1231
1232 template<typename _Tp>
1233 concept __atomic_fetch_subtractable
1234 = requires (_Tp __t) { __atomic_fetch_sub(&__t, __t, 0); };
1235
1236 template<typename _Tp>
1237 _Tp
1238 __fetch_sub_flt(_Tp* __ptr, _Val<_Tp> __i, memory_order __m) noexcept
1239 {
1240 if constexpr (__atomic_fetch_subtractable<_Tp>)
1241 return __atomic_fetch_sub(__ptr, __i, int(__m));
1242 else
1243 {
1244 _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
1245 _Val<_Tp> __newval = __oldval - __i;
1246 while (!compare_exchange_weak (__ptr, __oldval, __newval, __m,
1247 memory_order_relaxed))
1248 __newval = __oldval - __i;
1249 return __oldval;
1250 }
1251 }
1252
1253 template<typename _Tp>
1254 concept __atomic_add_fetchable
1255 = requires (_Tp __t) { __atomic_add_fetch(&__t, __t, 0); };
1256
1257 template<typename _Tp>
1258 _Tp
1259 __add_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept
1260 {
1261 if constexpr (__atomic_add_fetchable<_Tp>)
1262 return __atomic_add_fetch(__ptr, __i, __ATOMIC_SEQ_CST);
1263 else
1264 {
1265 _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
1266 _Val<_Tp> __newval = __oldval + __i;
1267 while (!compare_exchange_weak (__ptr, __oldval, __newval,
1268 memory_order_seq_cst,
1269 memory_order_relaxed))
1270 __newval = __oldval + __i;
1271 return __newval;
1272 }
1273 }
1274
1275 template<typename _Tp>
1276 concept __atomic_sub_fetchable
1277 = requires (_Tp __t) { __atomic_sub_fetch(&__t, __t, 0); };
1278
1279 template<typename _Tp>
1280 _Tp
1281 __sub_fetch_flt(_Tp* __ptr, _Val<_Tp> __i) noexcept
1282 {
1283 if constexpr (__atomic_sub_fetchable<_Tp>)
1284 return __atomic_sub_fetch(__ptr, __i, __ATOMIC_SEQ_CST);
1285 else
1286 {
1287 _Val<_Tp> __oldval = load (__ptr, memory_order_relaxed);
1288 _Val<_Tp> __newval = __oldval - __i;
1289 while (!compare_exchange_weak (__ptr, __oldval, __newval,
1290 memory_order_seq_cst,
1291 memory_order_relaxed))
1292 __newval = __oldval - __i;
1293 return __newval;
1294 }
1295 }
1296 } // namespace __atomic_impl
1297
1298 // base class for atomic<floating-point-type>
1299 template<typename _Fp>
1300 struct __atomic_float
1301 {
1302 static_assert(is_floating_point_v<_Fp>);
1303
1304 static constexpr size_t _S_alignment = __alignof__(_Fp);
1305
1306 public:
1307 using value_type = _Fp;
1308 using difference_type = value_type;
1309
1310 static constexpr bool is_always_lock_free
1311 = __atomic_always_lock_free(sizeof(_Fp), 0);
1312
1313 __atomic_float() = default;
1314
1315 constexpr
1316 __atomic_float(_Fp __t) : _M_fp(__t)
1317 {
1318 if (!std::__is_constant_evaluated())
1319 __atomic_impl::__clear_padding(_M_fp);
1320 }
1321
1322 __atomic_float(const __atomic_float&) = delete;
1323 __atomic_float& operator=(const __atomic_float&) = delete;
1324 __atomic_float& operator=(const __atomic_float&) volatile = delete;
1325
1326 _Fp
1327 operator=(_Fp __t) volatile noexcept
1328 {
1329 this->store(__t);
1330 return __t;
1331 }
1332
1333 _Fp
1334 operator=(_Fp __t) noexcept
1335 {
1336 this->store(__t);
1337 return __t;
1338 }
1339
1340 bool
1341 is_lock_free() const volatile noexcept
1342 { return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1343
1344 bool
1345 is_lock_free() const noexcept
1346 { return __atomic_impl::is_lock_free<sizeof(_Fp), _S_alignment>(); }
1347
1348 void
1349 store(_Fp __t, memory_order __m = memory_order_seq_cst) volatile noexcept
1350 { __atomic_impl::store(&_M_fp, __t, __m); }
1351
1352 void
1353 store(_Fp __t, memory_order __m = memory_order_seq_cst) noexcept
1354 { __atomic_impl::store(&_M_fp, __t, __m); }
1355
1356 _Fp
1357 load(memory_order __m = memory_order_seq_cst) const volatile noexcept
1358 { return __atomic_impl::load(&_M_fp, __m); }
1359
1360 _Fp
1361 load(memory_order __m = memory_order_seq_cst) const noexcept
1362 { return __atomic_impl::load(&_M_fp, __m); }
1363
1364 operator _Fp() const volatile noexcept { return this->load(); }
1365 operator _Fp() const noexcept { return this->load(); }
1366
1367 _Fp
1368 exchange(_Fp __desired,
1369 memory_order __m = memory_order_seq_cst) volatile noexcept
1370 { return __atomic_impl::exchange(&_M_fp, __desired, __m); }
1371
1372 _Fp
1373 exchange(_Fp __desired,
1374 memory_order __m = memory_order_seq_cst) noexcept
1375 { return __atomic_impl::exchange(&_M_fp, __desired, __m); }
1376
1377 bool
1378 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1379 memory_order __success,
1380 memory_order __failure) noexcept
1381 {
1382 return __atomic_impl::compare_exchange_weak(&_M_fp,
1383 __expected, __desired,
1384 __success, __failure);
1385 }
1386
1387 bool
1388 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1389 memory_order __success,
1390 memory_order __failure) volatile noexcept
1391 {
1392 return __atomic_impl::compare_exchange_weak(&_M_fp,
1393 __expected, __desired,
1394 __success, __failure);
1395 }
1396
1397 bool
1398 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1399 memory_order __success,
1400 memory_order __failure) noexcept
1401 {
1402 return __atomic_impl::compare_exchange_strong(&_M_fp,
1403 __expected, __desired,
1404 __success, __failure);
1405 }
1406
1407 bool
1408 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1409 memory_order __success,
1410 memory_order __failure) volatile noexcept
1411 {
1412 return __atomic_impl::compare_exchange_strong(&_M_fp,
1413 __expected, __desired,
1414 __success, __failure);
1415 }
1416
1417 bool
1418 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1419 memory_order __order = memory_order_seq_cst)
1420 noexcept
1421 {
1422 return compare_exchange_weak(__expected, __desired, __order,
1423 __cmpexch_failure_order(__order));
1424 }
1425
1426 bool
1427 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1428 memory_order __order = memory_order_seq_cst)
1429 volatile noexcept
1430 {
1431 return compare_exchange_weak(__expected, __desired, __order,
1432 __cmpexch_failure_order(__order));
1433 }
1434
1435 bool
1436 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1437 memory_order __order = memory_order_seq_cst)
1438 noexcept
1439 {
1440 return compare_exchange_strong(__expected, __desired, __order,
1441 __cmpexch_failure_order(__order));
1442 }
1443
1444 bool
1445 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1446 memory_order __order = memory_order_seq_cst)
1447 volatile noexcept
1448 {
1449 return compare_exchange_strong(__expected, __desired, __order,
1450 __cmpexch_failure_order(__order));
1451 }
1452
1453#if __glibcxx_atomic_wait
1454 _GLIBCXX_ALWAYS_INLINE void
1455 wait(_Fp __old, memory_order __m = memory_order_seq_cst) const noexcept
1456 { __atomic_impl::wait(&_M_fp, __old, __m); }
1457
1458 // TODO add const volatile overload
1459
1460 _GLIBCXX_ALWAYS_INLINE void
1461 notify_one() const noexcept
1462 { __atomic_impl::notify_one(&_M_fp); }
1463
1464 // TODO add const volatile overload
1465
1466 _GLIBCXX_ALWAYS_INLINE void
1467 notify_all() const noexcept
1468 { __atomic_impl::notify_all(&_M_fp); }
1469
1470 // TODO add const volatile overload
1471#endif // __glibcxx_atomic_wait
1472
1473 value_type
1474 fetch_add(value_type __i,
1475 memory_order __m = memory_order_seq_cst) noexcept
1476 { return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1477
1478 value_type
1479 fetch_add(value_type __i,
1480 memory_order __m = memory_order_seq_cst) volatile noexcept
1481 { return __atomic_impl::__fetch_add_flt(&_M_fp, __i, __m); }
1482
1483 value_type
1484 fetch_sub(value_type __i,
1485 memory_order __m = memory_order_seq_cst) noexcept
1486 { return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1487
1488 value_type
1489 fetch_sub(value_type __i,
1490 memory_order __m = memory_order_seq_cst) volatile noexcept
1491 { return __atomic_impl::__fetch_sub_flt(&_M_fp, __i, __m); }
1492
1493 value_type
1494 operator+=(value_type __i) noexcept
1495 { return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1496
1497 value_type
1498 operator+=(value_type __i) volatile noexcept
1499 { return __atomic_impl::__add_fetch_flt(&_M_fp, __i); }
1500
1501 value_type
1502 operator-=(value_type __i) noexcept
1503 { return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1504
1505 value_type
1506 operator-=(value_type __i) volatile noexcept
1507 { return __atomic_impl::__sub_fetch_flt(&_M_fp, __i); }
1508
1509 private:
1510 alignas(_S_alignment) _Fp _M_fp _GLIBCXX20_INIT(0);
1511 };
1512#undef _GLIBCXX20_INIT
1513
1514 template<typename _Tp,
1515 bool = is_integral_v<_Tp> && !is_same_v<_Tp, bool>,
1516 bool = is_floating_point_v<_Tp>>
1517 struct __atomic_ref;
1518
1519 // base class for non-integral, non-floating-point, non-pointer types
1520 template<typename _Tp>
1521 struct __atomic_ref<_Tp, false, false>
1522 {
1523 static_assert(is_trivially_copyable_v<_Tp>);
1524
1525 // 1/2/4/8/16-byte types must be aligned to at least their size.
1526 static constexpr int _S_min_alignment
1527 = (sizeof(_Tp) & (sizeof(_Tp) - 1)) || sizeof(_Tp) > 16
1528 ? 0 : sizeof(_Tp);
1529
1530 public:
1531 using value_type = _Tp;
1532
1533 static constexpr bool is_always_lock_free
1534 = __atomic_always_lock_free(sizeof(_Tp), 0);
1535
1536 static constexpr size_t required_alignment
1537 = _S_min_alignment > alignof(_Tp) ? _S_min_alignment : alignof(_Tp);
1538
1539 __atomic_ref& operator=(const __atomic_ref&) = delete;
1540
1541 explicit
1542 __atomic_ref(_Tp& __t) : _M_ptr(std::__addressof(__t))
1543 {
1544 __glibcxx_assert(((__UINTPTR_TYPE__)_M_ptr % required_alignment) == 0);
1545 }
1546
1547 __atomic_ref(const __atomic_ref&) noexcept = default;
1548
1549 _Tp
1550 operator=(_Tp __t) const noexcept
1551 {
1552 this->store(__t);
1553 return __t;
1554 }
1555
1556 operator _Tp() const noexcept { return this->load(); }
1557
1558 bool
1559 is_lock_free() const noexcept
1560 { return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>(); }
1561
1562 void
1563 store(_Tp __t, memory_order __m = memory_order_seq_cst) const noexcept
1564 { __atomic_impl::store(_M_ptr, __t, __m); }
1565
1566 _Tp
1567 load(memory_order __m = memory_order_seq_cst) const noexcept
1568 { return __atomic_impl::load(_M_ptr, __m); }
1569
1570 _Tp
1571 exchange(_Tp __desired, memory_order __m = memory_order_seq_cst)
1572 const noexcept
1573 { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1574
1575 bool
1576 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1577 memory_order __success,
1578 memory_order __failure) const noexcept
1579 {
1580 return __atomic_impl::compare_exchange_weak<true>(
1581 _M_ptr, __expected, __desired, __success, __failure);
1582 }
1583
1584 bool
1585 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1586 memory_order __success,
1587 memory_order __failure) const noexcept
1588 {
1589 return __atomic_impl::compare_exchange_strong<true>(
1590 _M_ptr, __expected, __desired, __success, __failure);
1591 }
1592
1593 bool
1594 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1595 memory_order __order = memory_order_seq_cst)
1596 const noexcept
1597 {
1598 return compare_exchange_weak(__expected, __desired, __order,
1599 __cmpexch_failure_order(__order));
1600 }
1601
1602 bool
1603 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1604 memory_order __order = memory_order_seq_cst)
1605 const noexcept
1606 {
1607 return compare_exchange_strong(__expected, __desired, __order,
1608 __cmpexch_failure_order(__order));
1609 }
1610
1611#if __glibcxx_atomic_wait
1612 _GLIBCXX_ALWAYS_INLINE void
1613 wait(_Tp __old, memory_order __m = memory_order_seq_cst) const noexcept
1614 { __atomic_impl::wait(_M_ptr, __old, __m); }
1615
1616 // TODO add const volatile overload
1617
1618 _GLIBCXX_ALWAYS_INLINE void
1619 notify_one() const noexcept
1620 { __atomic_impl::notify_one(_M_ptr); }
1621
1622 // TODO add const volatile overload
1623
1624 _GLIBCXX_ALWAYS_INLINE void
1625 notify_all() const noexcept
1626 { __atomic_impl::notify_all(_M_ptr); }
1627
1628 // TODO add const volatile overload
1629#endif // __glibcxx_atomic_wait
1630
1631 private:
1632 _Tp* _M_ptr;
1633 };
1634
1635 // base class for atomic_ref<integral-type>
1636 template<typename _Tp>
1637 struct __atomic_ref<_Tp, true, false>
1638 {
1639 static_assert(is_integral_v<_Tp>);
1640
1641 public:
1642 using value_type = _Tp;
1643 using difference_type = value_type;
1644
1645 static constexpr bool is_always_lock_free
1646 = __atomic_always_lock_free(sizeof(_Tp), 0);
1647
1648 static constexpr size_t required_alignment
1649 = sizeof(_Tp) > alignof(_Tp) ? sizeof(_Tp) : alignof(_Tp);
1650
1651 __atomic_ref() = delete;
1652 __atomic_ref& operator=(const __atomic_ref&) = delete;
1653
1654 explicit
1655 __atomic_ref(_Tp& __t) : _M_ptr(&__t)
1656 {
1657 __glibcxx_assert(((__UINTPTR_TYPE__)_M_ptr % required_alignment) == 0);
1658 }
1659
1660 __atomic_ref(const __atomic_ref&) noexcept = default;
1661
1662 _Tp
1663 operator=(_Tp __t) const noexcept
1664 {
1665 this->store(__t);
1666 return __t;
1667 }
1668
1669 operator _Tp() const noexcept { return this->load(); }
1670
1671 bool
1672 is_lock_free() const noexcept
1673 {
1674 return __atomic_impl::is_lock_free<sizeof(_Tp), required_alignment>();
1675 }
1676
1677 void
1678 store(_Tp __t, memory_order __m = memory_order_seq_cst) const noexcept
1679 { __atomic_impl::store(_M_ptr, __t, __m); }
1680
1681 _Tp
1682 load(memory_order __m = memory_order_seq_cst) const noexcept
1683 { return __atomic_impl::load(_M_ptr, __m); }
1684
1685 _Tp
1686 exchange(_Tp __desired,
1687 memory_order __m = memory_order_seq_cst) const noexcept
1688 { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1689
1690 bool
1691 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1692 memory_order __success,
1693 memory_order __failure) const noexcept
1694 {
1695 return __atomic_impl::compare_exchange_weak<true>(
1696 _M_ptr, __expected, __desired, __success, __failure);
1697 }
1698
1699 bool
1700 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1701 memory_order __success,
1702 memory_order __failure) const noexcept
1703 {
1704 return __atomic_impl::compare_exchange_strong<true>(
1705 _M_ptr, __expected, __desired, __success, __failure);
1706 }
1707
1708 bool
1709 compare_exchange_weak(_Tp& __expected, _Tp __desired,
1710 memory_order __order = memory_order_seq_cst)
1711 const noexcept
1712 {
1713 return compare_exchange_weak(__expected, __desired, __order,
1714 __cmpexch_failure_order(__order));
1715 }
1716
1717 bool
1718 compare_exchange_strong(_Tp& __expected, _Tp __desired,
1719 memory_order __order = memory_order_seq_cst)
1720 const noexcept
1721 {
1722 return compare_exchange_strong(__expected, __desired, __order,
1723 __cmpexch_failure_order(__order));
1724 }
1725
1726#if __glibcxx_atomic_wait
1727 _GLIBCXX_ALWAYS_INLINE void
1728 wait(_Tp __old, memory_order __m = memory_order_seq_cst) const noexcept
1729 { __atomic_impl::wait(_M_ptr, __old, __m); }
1730
1731 // TODO add const volatile overload
1732
1733 _GLIBCXX_ALWAYS_INLINE void
1734 notify_one() const noexcept
1735 { __atomic_impl::notify_one(_M_ptr); }
1736
1737 // TODO add const volatile overload
1738
1739 _GLIBCXX_ALWAYS_INLINE void
1740 notify_all() const noexcept
1741 { __atomic_impl::notify_all(_M_ptr); }
1742
1743 // TODO add const volatile overload
1744#endif // __glibcxx_atomic_wait
1745
1746 value_type
1747 fetch_add(value_type __i,
1748 memory_order __m = memory_order_seq_cst) const noexcept
1749 { return __atomic_impl::fetch_add(_M_ptr, __i, __m); }
1750
1751 value_type
1752 fetch_sub(value_type __i,
1753 memory_order __m = memory_order_seq_cst) const noexcept
1754 { return __atomic_impl::fetch_sub(_M_ptr, __i, __m); }
1755
1756 value_type
1757 fetch_and(value_type __i,
1758 memory_order __m = memory_order_seq_cst) const noexcept
1759 { return __atomic_impl::fetch_and(_M_ptr, __i, __m); }
1760
1761 value_type
1762 fetch_or(value_type __i,
1763 memory_order __m = memory_order_seq_cst) const noexcept
1764 { return __atomic_impl::fetch_or(_M_ptr, __i, __m); }
1765
1766 value_type
1767 fetch_xor(value_type __i,
1768 memory_order __m = memory_order_seq_cst) const noexcept
1769 { return __atomic_impl::fetch_xor(_M_ptr, __i, __m); }
1770
1771 _GLIBCXX_ALWAYS_INLINE value_type
1772 operator++(int) const noexcept
1773 { return fetch_add(1); }
1774
1775 _GLIBCXX_ALWAYS_INLINE value_type
1776 operator--(int) const noexcept
1777 { return fetch_sub(1); }
1778
1779 value_type
1780 operator++() const noexcept
1781 { return __atomic_impl::__add_fetch(_M_ptr, value_type(1)); }
1782
1783 value_type
1784 operator--() const noexcept
1785 { return __atomic_impl::__sub_fetch(_M_ptr, value_type(1)); }
1786
1787 value_type
1788 operator+=(value_type __i) const noexcept
1789 { return __atomic_impl::__add_fetch(_M_ptr, __i); }
1790
1791 value_type
1792 operator-=(value_type __i) const noexcept
1793 { return __atomic_impl::__sub_fetch(_M_ptr, __i); }
1794
1795 value_type
1796 operator&=(value_type __i) const noexcept
1797 { return __atomic_impl::__and_fetch(_M_ptr, __i); }
1798
1799 value_type
1800 operator|=(value_type __i) const noexcept
1801 { return __atomic_impl::__or_fetch(_M_ptr, __i); }
1802
1803 value_type
1804 operator^=(value_type __i) const noexcept
1805 { return __atomic_impl::__xor_fetch(_M_ptr, __i); }
1806
1807 private:
1808 _Tp* _M_ptr;
1809 };
1810
1811 // base class for atomic_ref<floating-point-type>
1812 template<typename _Fp>
1813 struct __atomic_ref<_Fp, false, true>
1814 {
1815 static_assert(is_floating_point_v<_Fp>);
1816
1817 public:
1818 using value_type = _Fp;
1819 using difference_type = value_type;
1820
1821 static constexpr bool is_always_lock_free
1822 = __atomic_always_lock_free(sizeof(_Fp), 0);
1823
1824 static constexpr size_t required_alignment = __alignof__(_Fp);
1825
1826 __atomic_ref() = delete;
1827 __atomic_ref& operator=(const __atomic_ref&) = delete;
1828
1829 explicit
1830 __atomic_ref(_Fp& __t) : _M_ptr(&__t)
1831 {
1832 __glibcxx_assert(((__UINTPTR_TYPE__)_M_ptr % required_alignment) == 0);
1833 }
1834
1835 __atomic_ref(const __atomic_ref&) noexcept = default;
1836
1837 _Fp
1838 operator=(_Fp __t) const noexcept
1839 {
1840 this->store(__t);
1841 return __t;
1842 }
1843
1844 operator _Fp() const noexcept { return this->load(); }
1845
1846 bool
1847 is_lock_free() const noexcept
1848 {
1849 return __atomic_impl::is_lock_free<sizeof(_Fp), required_alignment>();
1850 }
1851
1852 void
1853 store(_Fp __t, memory_order __m = memory_order_seq_cst) const noexcept
1854 { __atomic_impl::store(_M_ptr, __t, __m); }
1855
1856 _Fp
1857 load(memory_order __m = memory_order_seq_cst) const noexcept
1858 { return __atomic_impl::load(_M_ptr, __m); }
1859
1860 _Fp
1861 exchange(_Fp __desired,
1862 memory_order __m = memory_order_seq_cst) const noexcept
1863 { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1864
1865 bool
1866 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1867 memory_order __success,
1868 memory_order __failure) const noexcept
1869 {
1870 return __atomic_impl::compare_exchange_weak<true>(
1871 _M_ptr, __expected, __desired, __success, __failure);
1872 }
1873
1874 bool
1875 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1876 memory_order __success,
1877 memory_order __failure) const noexcept
1878 {
1879 return __atomic_impl::compare_exchange_strong<true>(
1880 _M_ptr, __expected, __desired, __success, __failure);
1881 }
1882
1883 bool
1884 compare_exchange_weak(_Fp& __expected, _Fp __desired,
1885 memory_order __order = memory_order_seq_cst)
1886 const noexcept
1887 {
1888 return compare_exchange_weak(__expected, __desired, __order,
1889 __cmpexch_failure_order(__order));
1890 }
1891
1892 bool
1893 compare_exchange_strong(_Fp& __expected, _Fp __desired,
1894 memory_order __order = memory_order_seq_cst)
1895 const noexcept
1896 {
1897 return compare_exchange_strong(__expected, __desired, __order,
1898 __cmpexch_failure_order(__order));
1899 }
1900
1901#if __glibcxx_atomic_wait
1902 _GLIBCXX_ALWAYS_INLINE void
1903 wait(_Fp __old, memory_order __m = memory_order_seq_cst) const noexcept
1904 { __atomic_impl::wait(_M_ptr, __old, __m); }
1905
1906 // TODO add const volatile overload
1907
1908 _GLIBCXX_ALWAYS_INLINE void
1909 notify_one() const noexcept
1910 { __atomic_impl::notify_one(_M_ptr); }
1911
1912 // TODO add const volatile overload
1913
1914 _GLIBCXX_ALWAYS_INLINE void
1915 notify_all() const noexcept
1916 { __atomic_impl::notify_all(_M_ptr); }
1917
1918 // TODO add const volatile overload
1919#endif // __glibcxx_atomic_wait
1920
1921 value_type
1922 fetch_add(value_type __i,
1923 memory_order __m = memory_order_seq_cst) const noexcept
1924 { return __atomic_impl::__fetch_add_flt(_M_ptr, __i, __m); }
1925
1926 value_type
1927 fetch_sub(value_type __i,
1928 memory_order __m = memory_order_seq_cst) const noexcept
1929 { return __atomic_impl::__fetch_sub_flt(_M_ptr, __i, __m); }
1930
1931 value_type
1932 operator+=(value_type __i) const noexcept
1933 { return __atomic_impl::__add_fetch_flt(_M_ptr, __i); }
1934
1935 value_type
1936 operator-=(value_type __i) const noexcept
1937 { return __atomic_impl::__sub_fetch_flt(_M_ptr, __i); }
1938
1939 private:
1940 _Fp* _M_ptr;
1941 };
1942
1943 // base class for atomic_ref<pointer-type>
1944 template<typename _Tp>
1945 struct __atomic_ref<_Tp*, false, false>
1946 {
1947 public:
1948 using value_type = _Tp*;
1949 using difference_type = ptrdiff_t;
1950
1951 static constexpr bool is_always_lock_free = ATOMIC_POINTER_LOCK_FREE == 2;
1952
1953 static constexpr size_t required_alignment = __alignof__(_Tp*);
1954
1955 __atomic_ref() = delete;
1956 __atomic_ref& operator=(const __atomic_ref&) = delete;
1957
1958 explicit
1959 __atomic_ref(_Tp*& __t) : _M_ptr(std::__addressof(__t))
1960 {
1961 __glibcxx_assert(((__UINTPTR_TYPE__)_M_ptr % required_alignment) == 0);
1962 }
1963
1964 __atomic_ref(const __atomic_ref&) noexcept = default;
1965
1966 _Tp*
1967 operator=(_Tp* __t) const noexcept
1968 {
1969 this->store(__t);
1970 return __t;
1971 }
1972
1973 operator _Tp*() const noexcept { return this->load(); }
1974
1975 bool
1976 is_lock_free() const noexcept
1977 {
1978 return __atomic_impl::is_lock_free<sizeof(_Tp*), required_alignment>();
1979 }
1980
1981 void
1982 store(_Tp* __t, memory_order __m = memory_order_seq_cst) const noexcept
1983 { __atomic_impl::store(_M_ptr, __t, __m); }
1984
1985 _Tp*
1986 load(memory_order __m = memory_order_seq_cst) const noexcept
1987 { return __atomic_impl::load(_M_ptr, __m); }
1988
1989 _Tp*
1990 exchange(_Tp* __desired,
1991 memory_order __m = memory_order_seq_cst) const noexcept
1992 { return __atomic_impl::exchange(_M_ptr, __desired, __m); }
1993
1994 bool
1995 compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
1996 memory_order __success,
1997 memory_order __failure) const noexcept
1998 {
1999 return __atomic_impl::compare_exchange_weak<true>(
2000 _M_ptr, __expected, __desired, __success, __failure);
2001 }
2002
2003 bool
2004 compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
2005 memory_order __success,
2006 memory_order __failure) const noexcept
2007 {
2008 return __atomic_impl::compare_exchange_strong<true>(
2009 _M_ptr, __expected, __desired, __success, __failure);
2010 }
2011
2012 bool
2013 compare_exchange_weak(_Tp*& __expected, _Tp* __desired,
2014 memory_order __order = memory_order_seq_cst)
2015 const noexcept
2016 {
2017 return compare_exchange_weak(__expected, __desired, __order,
2018 __cmpexch_failure_order(__order));
2019 }
2020
2021 bool
2022 compare_exchange_strong(_Tp*& __expected, _Tp* __desired,
2023 memory_order __order = memory_order_seq_cst)
2024 const noexcept
2025 {
2026 return compare_exchange_strong(__expected, __desired, __order,
2027 __cmpexch_failure_order(__order));
2028 }
2029
2030#if __glibcxx_atomic_wait
2031 _GLIBCXX_ALWAYS_INLINE void
2032 wait(_Tp* __old, memory_order __m = memory_order_seq_cst) const noexcept
2033 { __atomic_impl::wait(_M_ptr, __old, __m); }
2034
2035 // TODO add const volatile overload
2036
2037 _GLIBCXX_ALWAYS_INLINE void
2038 notify_one() const noexcept
2039 { __atomic_impl::notify_one(_M_ptr); }
2040
2041 // TODO add const volatile overload
2042
2043 _GLIBCXX_ALWAYS_INLINE void
2044 notify_all() const noexcept
2045 { __atomic_impl::notify_all(_M_ptr); }
2046
2047 // TODO add const volatile overload
2048#endif // __glibcxx_atomic_wait
2049
2050 _GLIBCXX_ALWAYS_INLINE value_type
2051 fetch_add(difference_type __d,
2052 memory_order __m = memory_order_seq_cst) const noexcept
2053 { return __atomic_impl::fetch_add(_M_ptr, _S_type_size(__d), __m); }
2054
2055 _GLIBCXX_ALWAYS_INLINE value_type
2056 fetch_sub(difference_type __d,
2057 memory_order __m = memory_order_seq_cst) const noexcept
2058 { return __atomic_impl::fetch_sub(_M_ptr, _S_type_size(__d), __m); }
2059
2061 operator++(int) const noexcept
2062 { return fetch_add(1); }
2063
2065 operator--(int) const noexcept
2066 { return fetch_sub(1); }
2067
2069 operator++() const noexcept
2070 {
2071 return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(1));
2072 }
2073
2075 operator--() const noexcept
2076 {
2077 return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(1));
2078 }
2079
2081 operator+=(difference_type __d) const noexcept
2082 {
2083 return __atomic_impl::__add_fetch(_M_ptr, _S_type_size(__d));
2084 }
2085
2087 operator-=(difference_type __d) const noexcept
2088 {
2089 return __atomic_impl::__sub_fetch(_M_ptr, _S_type_size(__d));
2090 }
2091
2092 private:
2093 static constexpr ptrdiff_t
2094 _S_type_size(ptrdiff_t __d) noexcept
2095 {
2096 static_assert(is_object_v<_Tp>);
2097 return __d * sizeof(_Tp);
2098 }
2099
2100 _Tp** _M_ptr;
2101 };
2102#endif // C++2a
2103
2104 /// @endcond
2105
2106 /// @} group atomics
2107
2108_GLIBCXX_END_NAMESPACE_VERSION
2109} // namespace std
2110
2111#endif
constexpr _Tp * __addressof(_Tp &__r) noexcept
Same as C++11 std::addressof.
Definition move.h:52
_Tp kill_dependency(_Tp __y) noexcept
kill_dependency
memory_order
Enumeration for memory_order.
Definition atomic_base.h:66
ISO C++ entities toplevel namespace is std.
constexpr bitset< _Nb > operator|(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.
Definition bitset:1572
constexpr bitset< _Nb > operator&(const bitset< _Nb > &__x, const bitset< _Nb > &__y) noexcept
Global bitwise operations on bitsets.
Definition bitset:1562