31 #ifndef _GLIBCXX_ATOMIC_0_H
32 #define _GLIBCXX_ATOMIC_0_H 1
34 #pragma GCC system_header
44 #define _ATOMIC_LOAD_(__a, __x) \
45 ({ volatile __typeof__ _ATOMIC_MEMBER_* __p = &_ATOMIC_MEMBER_; \
46 volatile __atomic_flag_base* __g = __atomic_flag_for_address(__p); \
47 __atomic_flag_wait_explicit(__g, __x); \
48 __typeof__ _ATOMIC_MEMBER_ __r = *__p; \
49 atomic_flag_clear_explicit(__g, __x); \
52 #define _ATOMIC_STORE_(__a, __m, __x) \
53 ({ volatile __typeof__ _ATOMIC_MEMBER_* __p = &_ATOMIC_MEMBER_; \
54 __typeof__(__m) __v = (__m); \
55 volatile __atomic_flag_base* __g = __atomic_flag_for_address(__p); \
56 __atomic_flag_wait_explicit(__g, __x); \
58 atomic_flag_clear_explicit(__g, __x); \
61 #define _ATOMIC_MODIFY_(__a, __o, __m, __x) \
62 ({ volatile __typeof__ _ATOMIC_MEMBER_* __p = &_ATOMIC_MEMBER_; \
63 __typeof__(__m) __v = (__m); \
64 volatile __atomic_flag_base* __g = __atomic_flag_for_address(__p); \
65 __atomic_flag_wait_explicit(__g, __x); \
66 __typeof__ _ATOMIC_MEMBER_ __r = *__p; \
68 atomic_flag_clear_explicit(__g, __x); \
71 #define _ATOMIC_CMPEXCHNG_(__a, __e, __m, __x) \
72 ({ volatile __typeof__ _ATOMIC_MEMBER_* __p = &_ATOMIC_MEMBER_; \
73 __typeof__(__e) __q = (__e); \
74 __typeof__(__m) __v = (__m); \
76 volatile __atomic_flag_base* __g = __atomic_flag_for_address(__p); \
77 __atomic_flag_wait_explicit(__g, __x); \
78 __typeof__ _ATOMIC_MEMBER_ __t__ = *__p; \
79 if (__t__ == *__q) { *__p = __v; __r = true; } \
80 else { *__q = __t__; __r = false; } \
81 atomic_flag_clear_explicit(__g, __x); \
93 atomic_flag(
bool __i): __atomic_flag_base({ __i }) { }
96 test_and_set(
memory_order __m = memory_order_seq_cst)
volatile;
117 is_lock_free()
const volatile
121 store(
void* __v,
memory_order __m = memory_order_seq_cst)
volatile
123 __glibcxx_assert(__m != memory_order_acquire);
124 __glibcxx_assert(__m != memory_order_acq_rel);
125 __glibcxx_assert(__m != memory_order_consume);
126 _ATOMIC_STORE_(
this, __v, __m);
130 load(
memory_order __m = memory_order_seq_cst)
const volatile
132 __glibcxx_assert(__m != memory_order_release);
133 __glibcxx_assert(__m != memory_order_acq_rel);
134 return _ATOMIC_LOAD_(
this, __m);
138 exchange(
void* __v,
memory_order __m = memory_order_seq_cst)
volatile
139 {
return _ATOMIC_MODIFY_(
this, =, __v, __m); }
142 compare_exchange_weak(
void*& __v1,
void* __v2,
memory_order __m1,
145 __glibcxx_assert(__m2 != memory_order_release);
146 __glibcxx_assert(__m2 != memory_order_acq_rel);
147 __glibcxx_assert(__m2 <= __m1);
148 return _ATOMIC_CMPEXCHNG_(
this, &__v1, __v2, __m1);
152 compare_exchange_weak(
void*& __v1,
void* __v2,
155 return compare_exchange_weak(__v1, __v2, __m,
156 __calculate_memory_order(__m));
160 compare_exchange_strong(
void*& __v1,
void* __v2,
memory_order __m1,
163 __glibcxx_assert(__m2 != memory_order_release);
164 __glibcxx_assert(__m2 != memory_order_acq_rel);
165 __glibcxx_assert(__m2 <= __m1);
166 return _ATOMIC_CMPEXCHNG_(
this, &__v1, __v2, __m1);
170 compare_exchange_strong(
void*& __v1,
void* __v2,
173 return compare_exchange_strong(__v1, __v2, __m,
174 __calculate_memory_order(__m));
178 fetch_add(ptrdiff_t __d,
memory_order __m = memory_order_seq_cst)
volatile
180 void*
volatile* __p = &(_M_i);
181 volatile __atomic_flag_base* __g = __atomic_flag_for_address(__p);
182 __atomic_flag_wait_explicit(__g, __m);
184 *__p = (
void*)((
char*)(*__p) + __d);
185 atomic_flag_clear_explicit(__g, __m);
190 fetch_sub(ptrdiff_t __d,
memory_order __m = memory_order_seq_cst)
volatile
192 void*
volatile* __p = &(_M_i);
193 volatile __atomic_flag_base* __g = __atomic_flag_for_address(__p);
194 __atomic_flag_wait_explicit(__g, __m);
196 *__p = (
void*)((
char*)(*__p) - __d);
197 atomic_flag_clear_explicit(__g, __m);
201 operator void*()
const volatile
212 operator+=(ptrdiff_t __d)
volatile
213 {
return fetch_add(__d) + __d; }
216 operator-=(ptrdiff_t __d)
volatile
217 {
return fetch_sub(__d) - __d; }
243 template<
typename _ITp>
247 typedef _ITp __integral_type;
249 __integral_type _M_i;
252 __atomic_base() =
default;
253 ~__atomic_base() =
default;
254 __atomic_base(
const __atomic_base&) =
delete;
255 __atomic_base& operator=(
const __atomic_base&) =
delete;
258 __atomic_base(__integral_type __i) { _M_i = __i; }
260 operator __integral_type()
const volatile
264 operator=(__integral_type __i)
271 operator++(
int)
volatile
272 {
return fetch_add(1); }
275 operator--(
int)
volatile
276 {
return fetch_sub(1); }
279 operator++()
volatile
280 {
return fetch_add(1) + 1; }
283 operator--()
volatile
284 {
return fetch_sub(1) - 1; }
287 operator+=(__integral_type __i)
volatile
288 {
return fetch_add(__i) + __i; }
291 operator-=(__integral_type __i)
volatile
292 {
return fetch_sub(__i) - __i; }
295 operator&=(__integral_type __i)
volatile
296 {
return fetch_and(__i) & __i; }
299 operator|=(__integral_type __i)
volatile
300 {
return fetch_or(__i) | __i; }
303 operator^=(__integral_type __i)
volatile
304 {
return fetch_xor(__i) ^ __i; }
307 is_lock_free()
const volatile
311 store(__integral_type __i,
314 __glibcxx_assert(__m != memory_order_acquire);
315 __glibcxx_assert(__m != memory_order_acq_rel);
316 __glibcxx_assert(__m != memory_order_consume);
317 _ATOMIC_STORE_(
this, __i, __m);
321 load(
memory_order __m = memory_order_seq_cst)
const volatile
323 __glibcxx_assert(__m != memory_order_release);
324 __glibcxx_assert(__m != memory_order_acq_rel);
325 return _ATOMIC_LOAD_(
this, __m);
329 exchange(__integral_type __i,
331 {
return _ATOMIC_MODIFY_(
this, =, __i, __m); }
334 compare_exchange_weak(__integral_type& __i1, __integral_type __i2,
337 __glibcxx_assert(__m2 != memory_order_release);
338 __glibcxx_assert(__m2 != memory_order_acq_rel);
339 __glibcxx_assert(__m2 <= __m1);
340 return _ATOMIC_CMPEXCHNG_(
this, &__i1, __i2, __m1);
344 compare_exchange_weak(__integral_type& __i1, __integral_type __i2,
347 return compare_exchange_weak(__i1, __i2, __m,
348 __calculate_memory_order(__m));
352 compare_exchange_strong(__integral_type& __i1, __integral_type __i2,
355 __glibcxx_assert(__m2 != memory_order_release);
356 __glibcxx_assert(__m2 != memory_order_acq_rel);
357 __glibcxx_assert(__m2 <= __m1);
358 return _ATOMIC_CMPEXCHNG_(
this, &__i1, __i2, __m1);
362 compare_exchange_strong(__integral_type& __i1, __integral_type __i2,
365 return compare_exchange_strong(__i1, __i2, __m,
366 __calculate_memory_order(__m));
370 fetch_add(__integral_type __i,
372 {
return _ATOMIC_MODIFY_(
this, +=, __i, __m); }
375 fetch_sub(__integral_type __i,
377 {
return _ATOMIC_MODIFY_(
this, -=, __i, __m); }
380 fetch_and(__integral_type __i,
382 {
return _ATOMIC_MODIFY_(
this, &=, __i, __m); }
385 fetch_or(__integral_type __i,
387 {
return _ATOMIC_MODIFY_(
this, |=, __i, __m); }
390 fetch_xor(__integral_type __i,
392 {
return _ATOMIC_MODIFY_(
this, ^=, __i, __m); }
401 __atomic_base<bool> _M_base;
413 {
return _M_base.operator=(__i); }
415 operator bool()
const volatile
416 {
return _M_base.load(); }
419 is_lock_free()
const volatile
420 {
return _M_base.is_lock_free(); }
423 store(
bool __i,
memory_order __m = memory_order_seq_cst)
volatile
424 { _M_base.store(__i, __m); }
427 load(
memory_order __m = memory_order_seq_cst)
const volatile
428 {
return _M_base.load(__m); }
431 exchange(
bool __i,
memory_order __m = memory_order_seq_cst)
volatile
432 {
return _M_base.exchange(__i, __m); }
435 compare_exchange_weak(
bool& __i1,
bool __i2,
memory_order __m1,
437 {
return _M_base.compare_exchange_weak(__i1, __i2, __m1, __m2); }
440 compare_exchange_weak(
bool& __i1,
bool __i2,
442 {
return _M_base.compare_exchange_weak(__i1, __i2, __m); }
445 compare_exchange_strong(
bool& __i1,
bool __i2,
memory_order __m1,
447 {
return _M_base.compare_exchange_strong(__i1, __i2, __m1, __m2); }
451 compare_exchange_strong(
bool& __i1,
bool __i2,
453 {
return _M_base.compare_exchange_strong(__i1, __i2, __m); }
457 #undef _ATOMIC_STORE_
458 #undef _ATOMIC_MODIFY_
459 #undef _ATOMIC_CMPEXCHNG_