Mbed OS Reference
Loading...
Searching...
No Matches
mbed_atomic.h
1
2/*
3 * Copyright (c) 2015-2019, ARM Limited, All Rights Reserved
4 * SPDX-License-Identifier: Apache-2.0
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License"); you may
7 * not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
14 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18
19#ifndef __MBED_UTIL_ATOMIC_H__
20#define __MBED_UTIL_ATOMIC_H__
21
22#include "cmsis.h"
23
24#include <stdbool.h>
25#include <stdint.h>
26#include <stddef.h>
27#include "platform/mbed_toolchain.h"
28
29/** \addtogroup platform-public-api */
30/** @{*/
31
32/**
33 * \defgroup platform_atomic atomic functions
34 *
35 * Atomic functions function analogously to C11 and C++11 - loads have
36 * acquire semantics, stores have release semantics, and atomic operations
37 * are sequentially consistent. Atomicity is enforced both between threads and
38 * interrupt handlers.
39 *
40 * @{
41 */
42
43#ifdef __cplusplus
44extern "C" {
45#endif
46
47/**
48 * Memory order constraints for atomic operations. Intended semantics
49 * are as per C++11.
50 */
51typedef enum mbed_memory_order {
52 /* Bits 0 = consume
53 * 1 = acquire (explicitly requested, or implied by seq.cst)
54 * 2 = release (explicitly requested, or implied by seq.cst)
55 * 4 = sequentially consistent
56 */
57 mbed_memory_order_relaxed = 0x00,
58 mbed_memory_order_consume = 0x01,
59 mbed_memory_order_acquire = 0x02,
60 mbed_memory_order_release = 0x04,
61 mbed_memory_order_acq_rel = 0x06,
62 mbed_memory_order_seq_cst = 0x16
64
65// if __EXCLUSIVE_ACCESS rtx macro not defined, we need to get this via own-set architecture macros
66#ifndef MBED_EXCLUSIVE_ACCESS
67#ifndef __EXCLUSIVE_ACCESS
68#if defined __arm__ || defined __ICC_ARM__ || defined __ARM_ARCH
69#if ((__ARM_ARCH_7M__ == 1U) || \
70 (__ARM_ARCH_7EM__ == 1U) || \
71 (__ARM_ARCH_8M_BASE__ == 1U) || \
72 (__ARM_ARCH_8M_MAIN__ == 1U) || \
73 (__ARM_ARCH_8_1M_MAIN__ == 1U)) || \
74 (__ARM_ARCH_7A__ == 1U)
75#define MBED_EXCLUSIVE_ACCESS 1U
76#define MBED_EXCLUSIVE_ACCESS_THUMB1 (__ARM_ARCH_8M_BASE__ == 1U)
77#ifdef __ICCARM__
78#if __CPU_MODE__ == 2
79#define MBED_EXCLUSIVE_ACCESS_ARM 1U
80#else
81#define MBED_EXCLUSIVE_ACCESS_ARM 0U
82#endif
83#else
84#if !defined (__thumb__)
85#define MBED_EXCLUSIVE_ACCESS_ARM 1U
86#else
87#define MBED_EXCLUSIVE_ACCESS_ARM 0U
88#endif
89#endif
90#elif (__ARM_ARCH_6M__ == 1U)
91#define MBED_EXCLUSIVE_ACCESS 0U
92#elif defined __aarch64__ // Apple M1 Mac
93#define MBED_EXCLUSIVE_ACCESS 0U
94#else
95#error "Unknown ARM architecture for exclusive access"
96#endif // __ARM_ARCH_xxx
97#else // __arm__ || defined __ICC_ARM__ || defined __ARM_ARCH
98// Seem to be compiling for non-ARM, so stick with critical section implementations
99#define MBED_EXCLUSIVE_ACCESS 0U
100#endif
101#else
102#define MBED_EXCLUSIVE_ACCESS __EXCLUSIVE_ACCESS
103#endif
104#endif
105
106#if MBED_EXCLUSIVE_ACCESS
107#define MBED_INLINE_IF_EX inline
108#else
109#define MBED_INLINE_IF_EX
110#endif
111
112/**
113 * A lock-free, primitive atomic flag.
114 *
115 * Emulate C11's atomic_flag. The flag is initially in an indeterminate state
116 * unless explicitly initialized with CORE_UTIL_ATOMIC_FLAG_INIT.
117 */
118typedef struct core_util_atomic_flag {
119 uint8_t _flag;
121
122/**
123 * Initializer for a core_util_atomic_flag.
124 *
125 * Example:
126 * ~~~
127 * core_util_atomic_flag in_progress = CORE_UTIL_ATOMIC_FLAG_INIT;
128 * ~~~
129 */
130#define CORE_UTIL_ATOMIC_FLAG_INIT { 0 }
131
132/**
133 * Atomic test and set.
134 *
135 * Atomically tests then sets the flag to true, returning the previous value.
136 *
137 * @param flagPtr Target flag being tested and set.
138 * @return The previous value.
139 */
140MBED_INLINE_IF_EX bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *flagPtr);
141
142/** \copydoc core_util_atomic_flag_test_and_set
143 * @param order memory ordering constraint
144 */
146
147/**
148 * Atomic clear.
149 *
150 * @param flagPtr Target flag being cleared.
151 */
153
154/** \copydoc core_util_atomic_flag_clear
155 * @param order memory ordering constraint
156 */
158
159
160/**
161 * Atomic compare and set. It compares the contents of a memory location to a
162 * given value and, only if they are the same, modifies the contents of that
163 * memory location to a given new value. This is done as a single atomic
164 * operation. The atomicity guarantees that the new value is calculated based on
165 * up-to-date information; if the value had been updated by another thread in
166 * the meantime, the write would fail due to a mismatched expectedCurrentValue.
167 *
168 * Refer to https://en.wikipedia.org/wiki/Compare-and-set [which may redirect
169 * you to the article on compare-and swap].
170 *
171 * @param ptr The target memory location.
172 * @param[in,out] expectedCurrentValue A pointer to some location holding the
173 * expected current value of the data being set atomically.
174 * The computed 'desiredValue' should be a function of this current value.
175 * @note: This is an in-out parameter. In the
176 * failure case of atomic_cas (where the
177 * destination isn't set), the pointee of expectedCurrentValue is
178 * updated with the current value.
179 * @param[in] desiredValue The new value computed based on '*expectedCurrentValue'.
180 *
181 * @return true if the memory location was atomically
182 * updated with the desired value (after verifying
183 * that it contained the expectedCurrentValue),
184 * false otherwise. In the failure case,
185 * exepctedCurrentValue is updated with the new
186 * value of the target memory location.
187 *
188 * pseudocode:
189 * function cas(p : pointer to int, old : pointer to int, new : int) returns bool {
190 * if *p != *old {
191 * *old = *p
192 * return false
193 * }
194 * *p = new
195 * return true
196 * }
197 *
198 * @note: In the failure case (where the destination isn't set), the value
199 * pointed to by expectedCurrentValue is instead updated with the current value.
200 * This property helps writing concise code for the following incr:
201 *
202 * function incr(p : pointer to int, a : int) returns int {
203 * done = false
204 * value = atomic_load(p)
205 * while not done {
206 * done = atomic_cas(p, &value, value + a) // *value gets updated automatically until success
207 * }
208 * return value + a
209 * }
210 *
211 * However, if the call is made in a loop like this, the atomic_compare_exchange_weak
212 * functions are to be preferred.
213 *
214 * @note: This corresponds to the C11 "atomic_compare_exchange_strong" - it
215 * always succeeds if the current value is expected, as per the pseudocode
216 * above; it will not spuriously fail as "atomic_compare_exchange_weak" may.
217 * This call would normally be used when a fail return does not retry.
218 */
219MBED_INLINE_IF_EX bool core_util_atomic_cas_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue);
220
221/** \copydoc core_util_atomic_cas_u8
222 * @param success memory ordering constraint for successful exchange
223 * @param failure memory ordering constraint for failure
224 */
225MBED_FORCEINLINE bool core_util_atomic_cas_explicit_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue, mbed_memory_order success, mbed_memory_order failure);
226
227/** \copydoc core_util_atomic_cas_u8 */
228MBED_INLINE_IF_EX bool core_util_atomic_cas_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue);
229
230/** \copydoc core_util_atomic_cas_explicit_u8 */
231MBED_FORCEINLINE bool core_util_atomic_cas_explicit_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue, mbed_memory_order success, mbed_memory_order failure);
232
233/** \copydoc core_util_atomic_cas_u8 */
234MBED_INLINE_IF_EX bool core_util_atomic_cas_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue);
235
236/** \copydoc core_util_atomic_cas_explicit_u8 */
237MBED_FORCEINLINE bool core_util_atomic_cas_explicit_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue, mbed_memory_order success, mbed_memory_order failure);
238
239/** \copydoc core_util_atomic_cas_u8 */
240bool core_util_atomic_cas_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue);
241
242/** \copydoc core_util_atomic_cas_explicit_u8 */
243MBED_FORCEINLINE bool core_util_atomic_cas_explicit_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue, mbed_memory_order success, mbed_memory_order failure);
244
245/** \copydoc core_util_atomic_cas_u8 */
246MBED_FORCEINLINE bool core_util_atomic_cas_s8(volatile int8_t *ptr, int8_t *expectedCurrentValue, int8_t desiredValue);
247
248/** \copydoc core_util_atomic_cas_explicit_u8 */
249MBED_FORCEINLINE bool core_util_atomic_cas_explicit_s8(volatile int8_t *ptr, int8_t *expectedCurrentValue, int8_t desiredValue, mbed_memory_order success, mbed_memory_order failure);
250
251/** \copydoc core_util_atomic_cas_u8 */
252MBED_FORCEINLINE bool core_util_atomic_cas_s16(volatile int16_t *ptr, int16_t *expectedCurrentValue, int16_t desiredValue);
253
254/** \copydoc core_util_atomic_cas_explicit_u8 */
255MBED_FORCEINLINE bool core_util_atomic_cas_explicit_s16(volatile int16_t *ptr, int16_t *expectedCurrentValue, int16_t desiredValue, mbed_memory_order success, mbed_memory_order failure);
256
257/** \copydoc core_util_atomic_cas_u8 */
258MBED_FORCEINLINE bool core_util_atomic_cas_s32(volatile int32_t *ptr, int32_t *expectedCurrentValue, int32_t desiredValue);
259
260/** \copydoc core_util_atomic_cas_explicit_u8 */
261MBED_FORCEINLINE bool core_util_atomic_cas_explicit_s32(volatile int32_t *ptr, int32_t *expectedCurrentValue, int32_t desiredValue, mbed_memory_order success, mbed_memory_order failure);
262
263/** \copydoc core_util_atomic_cas_u8 */
264MBED_FORCEINLINE bool core_util_atomic_cas_s64(volatile int64_t *ptr, int64_t *expectedCurrentValue, int64_t desiredValue);
265
266/** \copydoc core_util_atomic_cas_explicit_u8 */
267MBED_FORCEINLINE bool core_util_atomic_cas_explicit_s64(volatile int64_t *ptr, int64_t *expectedCurrentValue, int64_t desiredValue, mbed_memory_order success, mbed_memory_order failure);
268
269/** \copydoc core_util_atomic_cas_u8 */
270MBED_FORCEINLINE bool core_util_atomic_cas_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue);
271
272/** \copydoc core_util_atomic_cas_explicit_u8 */
273MBED_FORCEINLINE bool core_util_atomic_cas_explicit_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue, mbed_memory_order success, mbed_memory_order failure);
274
275/** \copydoc core_util_atomic_cas_u8 */
276inline bool core_util_atomic_cas_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue);
277
278/** \copydoc core_util_atomic_cas_explicit_u8 */
279MBED_FORCEINLINE bool core_util_atomic_cas_explicit_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue, mbed_memory_order success, mbed_memory_order failure);
280
281
282
283/**
284 * Atomic compare and set. It compares the contents of a memory location to a
285 * given value and, only if they are the same, modifies the contents of that
286 * memory location to a given new value. This is done as a single atomic
287 * operation. The atomicity guarantees that the new value is calculated based on
288 * up-to-date information; if the value had been updated by another thread in
289 * the meantime, the write would fail due to a mismatched expectedCurrentValue.
290 *
291 * Refer to https://en.wikipedia.org/wiki/Compare-and-set [which may redirect
292 * you to the article on compare-and swap].
293 *
294 * @param ptr The target memory location.
295 * @param[in,out] expectedCurrentValue A pointer to some location holding the
296 * expected current value of the data being set atomically.
297 * The computed 'desiredValue' should be a function of this current value.
298 * @note: This is an in-out parameter. In the
299 * failure case of atomic_cas (where the
300 * destination isn't set), the pointee of expectedCurrentValue is
301 * updated with the current value.
302 * @param[in] desiredValue The new value computed based on '*expectedCurrentValue'.
303 *
304 * @return true if the memory location was atomically
305 * updated with the desired value (after verifying
306 * that it contained the expectedCurrentValue),
307 * false otherwise. In the failure case,
308 * exepctedCurrentValue is updated with the new
309 * value of the target memory location.
310 *
311 * pseudocode:
312 * function cas(p : pointer to int, old : pointer to int, new : int) returns bool {
313 * if *p != *old or spurious failure {
314 * *old = *p
315 * return false
316 * }
317 * *p = new
318 * return true
319 * }
320 *
321 * @note: In the failure case (where the destination isn't set), the value
322 * pointed to by expectedCurrentValue is instead updated with the current value.
323 * This property helps writing concise code for the following incr:
324 *
325 * function incr(p : pointer to int, a : int) returns int {
326 * done = false
327 * value = *p // This fetch operation need not be atomic.
328 * while not done {
329 * done = atomic_compare_exchange_weak(p, &value, value + a) // *value gets updated automatically until success
330 * }
331 * return value + a
332 * }
333 *
334 * @note: This corresponds to the C11 "atomic_compare_exchange_weak" - it
335 * may spuriously fail if the current value is expected, as per the pseudocode
336 * above; it will not spuriously fail as "atomic_compare_exchange_weak" may.
337 * This call would normally be used when a fail return will cause a retry anyway,
338 * saving the need for an extra loop inside the cas operation.
339 */
340MBED_INLINE_IF_EX bool core_util_atomic_compare_exchange_weak_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue);
341
342/** \copydoc core_util_atomic_compare_exchange_weak_u8
343 * @param success memory ordering constraint for successful exchange
344 * @param failure memory ordering constraint for failure
345 */
346MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue, mbed_memory_order success, mbed_memory_order failure);
347
348/** \copydoc core_util_atomic_compare_exchange_weak_u8 */
349MBED_INLINE_IF_EX bool core_util_atomic_compare_exchange_weak_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue);
350
351/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */
352MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue, mbed_memory_order success, mbed_memory_order failure);
353
354/** \copydoc core_util_atomic_compare_exchange_weak_u8 */
355MBED_INLINE_IF_EX bool core_util_atomic_compare_exchange_weak_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue);
356
357/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */
358MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue, mbed_memory_order success, mbed_memory_order failure);
359
360/** \copydoc core_util_atomic_compare_exchange_weak_u8 */
361bool core_util_atomic_compare_exchange_weak_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue);
362
363/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */
364MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue, mbed_memory_order success, mbed_memory_order failure);
365
366/** \copydoc core_util_atomic_compare_exchange_weak_u8 */
367MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_s8(volatile int8_t *ptr, int8_t *expectedCurrentValue, int8_t desiredValue);
368
369/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */
370MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_s8(volatile int8_t *ptr, int8_t *expectedCurrentValue, int8_t desiredValue, mbed_memory_order success, mbed_memory_order failure);
371
372/** \copydoc core_util_atomic_compare_exchange_weak_u8 */
373MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_s16(volatile int16_t *ptr, int16_t *expectedCurrentValue, int16_t desiredValue);
374
375/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */
376MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_s16(volatile int16_t *ptr, int16_t *expectedCurrentValue, int16_t desiredValue, mbed_memory_order success, mbed_memory_order failure);
377
378/** \copydoc core_util_atomic_compare_exchange_weak_u8 */
379MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_s32(volatile int32_t *ptr, int32_t *expectedCurrentValue, int32_t desiredValue);
380
381/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */
382MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_s32(volatile int32_t *ptr, int32_t *expectedCurrentValue, int32_t desiredValue, mbed_memory_order success, mbed_memory_order failure);
383
384/** \copydoc core_util_atomic_compare_exchange_weak_u8 */
385MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_s64(volatile int64_t *ptr, int64_t *expectedCurrentValue, int64_t desiredValue);
386
387/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */
388MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_s64(volatile int64_t *ptr, int64_t *expectedCurrentValue, int64_t desiredValue, mbed_memory_order success, mbed_memory_order failure);
389
390/** \copydoc core_util_atomic_compare_exchange_weak_u8 */
391MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue);
392
393/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */
394MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue, mbed_memory_order success, mbed_memory_order failure);
395
396/** \copydoc core_util_atomic_compare_exchange_weak_u8 */
397MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue);
398
399/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */
400MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue, mbed_memory_order success, mbed_memory_order failure);
401
402
403/**
404 * Atomic load.
405 * @param valuePtr Target memory location.
406 * @return The loaded value.
407 */
408MBED_FORCEINLINE uint8_t core_util_atomic_load_u8(const volatile uint8_t *valuePtr);
409
410/**
411 * Atomic load with explicit ordering.
412 *
413 * @param valuePtr Target memory location.
414 * @param order memory ordering constraint
415 *
416 * @return The loaded value.
417 */
418MBED_FORCEINLINE uint8_t core_util_atomic_load_explicit_u8(const volatile uint8_t *valuePtr, mbed_memory_order order);
419
420/** \copydoc core_util_atomic_load_u8 */
421MBED_FORCEINLINE uint16_t core_util_atomic_load_u16(const volatile uint16_t *valuePtr);
422
423/** \copydoc core_util_atomic_load_explicit_u8 */
424MBED_FORCEINLINE uint16_t core_util_atomic_load_explicit_u16(const volatile uint16_t *valuePtr, mbed_memory_order order);
425
426/** \copydoc core_util_atomic_load_u8 */
427MBED_FORCEINLINE uint32_t core_util_atomic_load_u32(const volatile uint32_t *valuePtr);
428
429/** \copydoc core_util_atomic_load_explicit_u8 */
430MBED_FORCEINLINE uint32_t core_util_atomic_load_explicit_u32(const volatile uint32_t *valuePtr, mbed_memory_order order);
431
432/** \copydoc core_util_atomic_load_u8 */
433uint64_t core_util_atomic_load_u64(const volatile uint64_t *valuePtr);
434
435/** \copydoc core_util_atomic_load_explicit_u8 */
436MBED_FORCEINLINE uint64_t core_util_atomic_load_explicit_u64(const volatile uint64_t *valuePtr, mbed_memory_order order);
437
438/** \copydoc core_util_atomic_load_u8 */
439MBED_FORCEINLINE int8_t core_util_atomic_load_s8(const volatile int8_t *valuePtr);
440
441/** \copydoc core_util_atomic_load_explicit_u8 */
442MBED_FORCEINLINE int8_t core_util_atomic_load_explicit_s8(const volatile int8_t *valuePtr, mbed_memory_order order);
443
444/** \copydoc core_util_atomic_load_u8 */
445MBED_FORCEINLINE int16_t core_util_atomic_load_s16(const volatile int16_t *valuePtr);
446
447/** \copydoc core_util_atomic_load_explicit_u8 */
448MBED_FORCEINLINE int16_t core_util_atomic_load_explicit_s16(const volatile int16_t *valuePtr, mbed_memory_order order);
449
450/** \copydoc core_util_atomic_load_u8 */
451MBED_FORCEINLINE int32_t core_util_atomic_load_s32(const volatile int32_t *valuePtr);
452
453/** \copydoc core_util_atomic_load_explicit_u8 */
454MBED_FORCEINLINE int32_t core_util_atomic_load_explicit_s32(const volatile int32_t *valuePtr, mbed_memory_order order);
455
456/** \copydoc core_util_atomic_load_u8 */
457MBED_FORCEINLINE int64_t core_util_atomic_load_s64(const volatile int64_t *valuePtr);
458
459/**
460 * Atomic load with explicit ordering.
461 *
462 * @param valuePtr Target memory location.
463 * @param order Currently unused since 64-bit atomic ops must be emulated
464 *
465 * @return The loaded value.
466 */
467MBED_FORCEINLINE int64_t core_util_atomic_load_explicit_s64(const volatile int64_t *valuePtr, MBED_UNUSED mbed_memory_order order);
468
469/** \copydoc core_util_atomic_load_u8 */
470MBED_FORCEINLINE bool core_util_atomic_load_bool(const volatile bool *valuePtr);
471
472/** \copydoc core_util_atomic_load_explicit_u8 */
473MBED_FORCEINLINE bool core_util_atomic_load_explicit_bool(const volatile bool *valuePtr, mbed_memory_order order);
474
475/** \copydoc core_util_atomic_load_u8 */
476MBED_FORCEINLINE void *core_util_atomic_load_ptr(void *const volatile *valuePtr);
477
478/** \copydoc core_util_atomic_load_explicit_u8 */
479MBED_FORCEINLINE void *core_util_atomic_load_explicit_ptr(void *const volatile *valuePtr, mbed_memory_order order);
480
481/**
482 * Atomic store.
483 * @param valuePtr Target memory location.
484 * @param desiredValue The value to store.
485 */
486MBED_FORCEINLINE void core_util_atomic_store_u8(volatile uint8_t *valuePtr, uint8_t desiredValue);
487
488/**
489 * Atomic store with explicit ordering.
490 *
491 * @param valuePtr Target memory location.
492 * @param desiredValue The value to store.
493 * @param order memory ordering constraint
494 */
495MBED_FORCEINLINE void core_util_atomic_store_explicit_u8(volatile uint8_t *valuePtr, uint8_t desiredValue, mbed_memory_order order);
496
497/** \copydoc core_util_atomic_store_u8 */
498MBED_FORCEINLINE void core_util_atomic_store_u16(volatile uint16_t *valuePtr, uint16_t desiredValue);
499
500/** \copydoc core_util_atomic_store_explicit_u8 */
501MBED_FORCEINLINE void core_util_atomic_store_explicit_u16(volatile uint16_t *valuePtr, uint16_t desiredValue, mbed_memory_order order);
502
503/** \copydoc core_util_atomic_store_u8 */
504MBED_FORCEINLINE void core_util_atomic_store_u32(volatile uint32_t *valuePtr, uint32_t desiredValue);
505
506/** \copydoc core_util_atomic_store_explicit_u8 */
507MBED_FORCEINLINE void core_util_atomic_store_explicit_u32(volatile uint32_t *valuePtr, uint32_t desiredValue, mbed_memory_order order);
508
509/** \copydoc core_util_atomic_store_u8 */
510void core_util_atomic_store_u64(volatile uint64_t *valuePtr, uint64_t desiredValue);
511
512/** \copydoc core_util_atomic_store_explicit_u8 */
513MBED_FORCEINLINE void core_util_atomic_store_explicit_u64(volatile uint64_t *valuePtr, uint64_t desiredValue, mbed_memory_order order);
514
515/** \copydoc core_util_atomic_store_u8 */
516MBED_FORCEINLINE void core_util_atomic_store_s8(volatile int8_t *valuePtr, int8_t desiredValue);
517
518/** \copydoc core_util_atomic_store_explicit_u8 */
519MBED_FORCEINLINE void core_util_atomic_store_explicit_s8(volatile int8_t *valuePtr, int8_t desiredValue, mbed_memory_order order);
520
521/** \copydoc core_util_atomic_store_u8 */
522MBED_FORCEINLINE void core_util_atomic_store_s16(volatile int16_t *valuePtr, int16_t desiredValue);
523
524/** \copydoc core_util_atomic_store_explicit_u8 */
525MBED_FORCEINLINE void core_util_atomic_store_explicit_s16(volatile int16_t *valuePtr, int16_t desiredValue, mbed_memory_order order);
526
527/** \copydoc core_util_atomic_store_u8 */
528MBED_FORCEINLINE void core_util_atomic_store_s32(volatile int32_t *valuePtr, int32_t desiredValue);
529
530/** \copydoc core_util_atomic_store_explicit_u8 */
531MBED_FORCEINLINE void core_util_atomic_store_explicit_s32(volatile int32_t *valuePtr, int32_t desiredValue, mbed_memory_order order);
532
533/** \copydoc core_util_atomic_store_u8 */
534MBED_FORCEINLINE void core_util_atomic_store_s64(volatile int64_t *valuePtr, int64_t desiredValue);
535
536/** \copydoc core_util_atomic_store_explicit_u8 */
537MBED_FORCEINLINE void core_util_atomic_store_explicit_s64(volatile int64_t *valuePtr, int64_t desiredValue, mbed_memory_order order);
538
539/** \copydoc core_util_atomic_store_u8 */
540MBED_FORCEINLINE void core_util_atomic_store_bool(volatile bool *valuePtr, bool desiredValue);
541
542/** \copydoc core_util_atomic_store_explicit_u8 */
543MBED_FORCEINLINE void core_util_atomic_store_explicit_bool(volatile bool *valuePtr, bool desiredValue, mbed_memory_order order);
544
545/** \copydoc core_util_atomic_store_u8 */
546MBED_FORCEINLINE void core_util_atomic_store_ptr(void *volatile *valuePtr, void *desiredValue);
547
548/** \copydoc core_util_atomic_store_explicit_u8 */
549MBED_FORCEINLINE void core_util_atomic_store_explicit_ptr(void *volatile *valuePtr, void *desiredValue, mbed_memory_order order);
550
551/**
552 * Atomic exchange.
553 * @param valuePtr Target memory location.
554 * @param desiredValue The value to store.
555 * @return The previous value.
556 */
557MBED_INLINE_IF_EX uint8_t core_util_atomic_exchange_u8(volatile uint8_t *valuePtr, uint8_t desiredValue);
558
559/** \copydoc core_util_atomic_exchange_u8
560 * @param order memory ordering constraint
561 */
562MBED_FORCEINLINE uint8_t core_util_atomic_exchange_explicit_u8(volatile uint8_t *valuePtr, uint8_t desiredValue, mbed_memory_order order);
563
564/** \copydoc core_util_atomic_exchange_u8 */
565MBED_INLINE_IF_EX uint16_t core_util_atomic_exchange_u16(volatile uint16_t *valuePtr, uint16_t desiredValue);
566
567/** \copydoc core_util_atomic_exchange_explicit_u8 */
568MBED_FORCEINLINE uint16_t core_util_atomic_exchange_explicit_u16(volatile uint16_t *valuePtr, uint16_t desiredValue, mbed_memory_order order);
569
570/** \copydoc core_util_atomic_exchange_u8 */
571MBED_INLINE_IF_EX uint32_t core_util_atomic_exchange_u32(volatile uint32_t *valuePtr, uint32_t desiredValue);
572
573/** \copydoc core_util_atomic_exchange_explicit_u8 */
574MBED_FORCEINLINE uint32_t core_util_atomic_exchange_explicit_u32(volatile uint32_t *valuePtr, uint32_t desiredValue, mbed_memory_order order);
575
576/** \copydoc core_util_atomic_exchange_u8 */
577uint64_t core_util_atomic_exchange_u64(volatile uint64_t *valuePtr, uint64_t desiredValue);
578
579/** \copydoc core_util_atomic_exchange_explicit_u8 */
580MBED_FORCEINLINE uint64_t core_util_atomic_exchange_explicit_u64(volatile uint64_t *valuePtr, uint64_t desiredValue, mbed_memory_order order);
581
582/** \copydoc core_util_atomic_exchange_u8 */
583MBED_FORCEINLINE int8_t core_util_atomic_exchange_s8(volatile int8_t *valuePtr, int8_t desiredValue);
584
585/** \copydoc core_util_atomic_exchange_explicit_u8 */
586MBED_FORCEINLINE int8_t core_util_atomic_exchange_explicit_s8(volatile int8_t *valuePtr, int8_t desiredValue, mbed_memory_order order);
587
588/** \copydoc core_util_atomic_exchange_u8 */
589MBED_FORCEINLINE int16_t core_util_atomic_exchange_s16(volatile int16_t *valuePtr, int16_t desiredValue);
590
591/** \copydoc core_util_atomic_exchange_explicit_u8 */
592MBED_FORCEINLINE int16_t core_util_atomic_exchange_explicit_s16(volatile int16_t *valuePtr, int16_t desiredValue, mbed_memory_order order);
593
594/** \copydoc core_util_atomic_exchange_u8 */
595MBED_FORCEINLINE int32_t core_util_atomic_exchange_s32(volatile int32_t *valuePtr, int32_t desiredValue);
596
597/** \copydoc core_util_atomic_exchange_explicit_u8 */
598MBED_FORCEINLINE int32_t core_util_atomic_exchange_explicit_s32(volatile int32_t *valuePtr, int32_t desiredValue, mbed_memory_order order);
599
600/** \copydoc core_util_atomic_exchange_u8 */
601MBED_FORCEINLINE int64_t core_util_atomic_exchange_s64(volatile int64_t *valuePtr, int64_t desiredValue);
602
603/** \copydoc core_util_atomic_exchange_explicit_u8 */
604MBED_FORCEINLINE int64_t core_util_atomic_exchange_explicit_s64(volatile int64_t *valuePtr, int64_t desiredValue, mbed_memory_order order);
605
606/** \copydoc core_util_atomic_exchange_u8 */
607MBED_FORCEINLINE bool core_util_atomic_exchange_bool(volatile bool *valuePtr, bool desiredValue);
608
609/** \copydoc core_util_atomic_exchange_explicit_u8 */
610MBED_FORCEINLINE bool core_util_atomic_exchange_explicit_bool(volatile bool *valuePtr, bool desiredValue, mbed_memory_order order);
611
612/** \copydoc core_util_atomic_exchange_u8 */
613inline void *core_util_atomic_exchange_ptr(void *volatile *valuePtr, void *desiredValue);
614
615/** \copydoc core_util_atomic_exchange_explicit_u8 */
616MBED_FORCEINLINE void *core_util_atomic_exchange_explicit_ptr(void *volatile *valuePtr, void *desiredValue, mbed_memory_order order);
617
618/**
619 * Atomic increment.
620 * @param valuePtr Target memory location being incremented.
621 * @param delta The amount being incremented.
622 * @return The new incremented value.
623 */
624MBED_INLINE_IF_EX uint8_t core_util_atomic_incr_u8(volatile uint8_t *valuePtr, uint8_t delta);
625
626/** \copydoc core_util_atomic_incr_u8 */
627MBED_INLINE_IF_EX uint16_t core_util_atomic_incr_u16(volatile uint16_t *valuePtr, uint16_t delta);
628
629/** \copydoc core_util_atomic_incr_u8 */
630MBED_INLINE_IF_EX uint32_t core_util_atomic_incr_u32(volatile uint32_t *valuePtr, uint32_t delta);
631
632/** \copydoc core_util_atomic_incr_u8 */
633uint64_t core_util_atomic_incr_u64(volatile uint64_t *valuePtr, uint64_t delta);
634
635/** \copydoc core_util_atomic_incr_u8 */
636MBED_FORCEINLINE int8_t core_util_atomic_incr_s8(volatile int8_t *valuePtr, int8_t delta);
637
638/** \copydoc core_util_atomic_incr_u8 */
639MBED_FORCEINLINE int16_t core_util_atomic_incr_s16(volatile int16_t *valuePtr, int16_t delta);
640
641/** \copydoc core_util_atomic_incr_u8 */
642MBED_FORCEINLINE int32_t core_util_atomic_incr_s32(volatile int32_t *valuePtr, int32_t delta);
643
644/** \copydoc core_util_atomic_incr_u8 */
645MBED_FORCEINLINE int64_t core_util_atomic_incr_s64(volatile int64_t *valuePtr, int64_t delta);
646
647/** \copydoc core_util_atomic_incr_u8 */
648inline void *core_util_atomic_incr_ptr(void *volatile *valuePtr, ptrdiff_t delta);
649
650/**
651 * Atomic decrement.
652 * @param valuePtr Target memory location being decremented.
653 * @param delta The amount being decremented.
654 * @return The new decremented value.
655 */
656MBED_INLINE_IF_EX uint8_t core_util_atomic_decr_u8(volatile uint8_t *valuePtr, uint8_t delta);
657
658/** \copydoc core_util_atomic_decr_u8 */
659MBED_INLINE_IF_EX uint16_t core_util_atomic_decr_u16(volatile uint16_t *valuePtr, uint16_t delta);
660
661/** \copydoc core_util_atomic_decr_u8 */
662MBED_INLINE_IF_EX uint32_t core_util_atomic_decr_u32(volatile uint32_t *valuePtr, uint32_t delta);
663
664/** \copydoc core_util_atomic_decr_u8 */
665uint64_t core_util_atomic_decr_u64(volatile uint64_t *valuePtr, uint64_t delta);
666
667/** \copydoc core_util_atomic_decr_u8 */
668MBED_FORCEINLINE int8_t core_util_atomic_decr_s8(volatile int8_t *valuePtr, int8_t delta);
669
670/** \copydoc core_util_atomic_decr_u8 */
671MBED_FORCEINLINE int16_t core_util_atomic_decr_s16(volatile int16_t *valuePtr, int16_t delta);
672
673/** \copydoc core_util_atomic_decr_u8 */
674MBED_FORCEINLINE int32_t core_util_atomic_decr_s32(volatile int32_t *valuePtr, int32_t delta);
675
676/** \copydoc core_util_atomic_decr_u8 */
677MBED_FORCEINLINE int64_t core_util_atomic_decr_s64(volatile int64_t *valuePtr, int64_t delta);
678
679/** \copydoc core_util_atomic_decr_u8 */
680inline void *core_util_atomic_decr_ptr(void *volatile *valuePtr, ptrdiff_t delta);
681
682/**
683 * Atomic add.
684 * @param valuePtr Target memory location being modified.
685 * @param arg The argument for the addition.
686 * @return The original value.
687 */
688MBED_INLINE_IF_EX uint8_t core_util_atomic_fetch_add_u8(volatile uint8_t *valuePtr, uint8_t arg);
689
690/** \copydoc core_util_atomic_fetch_add_u8
691 * @param order memory ordering constraint
692 */
693MBED_FORCEINLINE uint8_t core_util_atomic_fetch_add_explicit_u8(volatile uint8_t *valuePtr, uint8_t arg, mbed_memory_order order);
694
695/** \copydoc core_util_atomic_fetch_add_u8 */
696MBED_INLINE_IF_EX uint16_t core_util_atomic_fetch_add_u16(volatile uint16_t *valuePtr, uint16_t arg);
697
698/** \copydoc core_util_atomic_fetch_add_explicit_u8 */
699MBED_FORCEINLINE uint16_t core_util_atomic_fetch_add_explicit_u16(volatile uint16_t *valuePtr, uint16_t arg, mbed_memory_order order);
700
701/** \copydoc core_util_atomic_fetch_add_u8 */
702MBED_INLINE_IF_EX uint32_t core_util_atomic_fetch_add_u32(volatile uint32_t *valuePtr, uint32_t arg);
703
704/** \copydoc core_util_atomic_fetch_add_explicit_u8 */
705MBED_FORCEINLINE uint32_t core_util_atomic_fetch_add_explicit_u32(volatile uint32_t *valuePtr, uint32_t arg, mbed_memory_order order);
706
707/** \copydoc core_util_atomic_fetch_add_u8 */
708uint64_t core_util_atomic_fetch_add_u64(volatile uint64_t *valuePtr, uint64_t arg);
709
710/** \copydoc core_util_atomic_fetch_add_explicit_u8 */
711MBED_FORCEINLINE uint64_t core_util_atomic_fetch_add_explicit_u64(volatile uint64_t *valuePtr, uint64_t arg, mbed_memory_order order);
712
713/** \copydoc core_util_atomic_fetch_add_u8 */
714MBED_FORCEINLINE int8_t core_util_atomic_fetch_add_s8(volatile int8_t *valuePtr, int8_t arg);
715
716/** \copydoc core_util_atomic_fetch_add_explicit_u8 */
717MBED_FORCEINLINE int8_t core_util_atomic_fetch_add_explicit_s8(volatile int8_t *valuePtr, int8_t arg, mbed_memory_order order);
718
719/** \copydoc core_util_atomic_fetch_add_u8 */
720MBED_FORCEINLINE int16_t core_util_atomic_fetch_add_s16(volatile int16_t *valuePtr, int16_t arg);
721
722/** \copydoc core_util_atomic_fetch_add_explicit_u8 */
723MBED_FORCEINLINE int16_t core_util_atomic_fetch_add_explicit_s16(volatile int16_t *valuePtr, int16_t arg, mbed_memory_order order);
724
725/** \copydoc core_util_atomic_fetch_add_u8 */
726MBED_FORCEINLINE int32_t core_util_atomic_fetch_add_s32(volatile int32_t *valuePtr, int32_t arg);
727
728/** \copydoc core_util_atomic_fetch_add_explicit_u8 */
729MBED_FORCEINLINE int32_t core_util_atomic_fetch_add_explicit_s32(volatile int32_t *valuePtr, int32_t arg, mbed_memory_order order);
730
731/** \copydoc core_util_atomic_fetch_add_u8 */
732MBED_FORCEINLINE int64_t core_util_atomic_fetch_add_s64(volatile int64_t *valuePtr, int64_t arg);
733
734/** \copydoc core_util_atomic_fetch_add_explicit_u8 */
735MBED_FORCEINLINE int64_t core_util_atomic_fetch_add_explicit_s64(volatile int64_t *valuePtr, int64_t arg, mbed_memory_order order);
736
737/** \copydoc core_util_atomic_fetch_add_u8 */
738MBED_FORCEINLINE void *core_util_atomic_fetch_add_ptr(void *volatile *valuePtr, ptrdiff_t arg);
739
740/** \copydoc core_util_atomic_fetch_add_explicit_u8 */
741MBED_FORCEINLINE void *core_util_atomic_fetch_add_explicit_ptr(void *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order);
742
743/**
744 * Atomic subtract.
745 * @param valuePtr Target memory location being modified.
746 * @param arg The argument for the subtraction.
747 * @return The original value.
748 */
749MBED_INLINE_IF_EX uint8_t core_util_atomic_fetch_sub_u8(volatile uint8_t *valuePtr, uint8_t arg);
750
751/** \copydoc core_util_atomic_fetch_sub_u8
752 * @param order memory ordering constraint
753 */
754MBED_FORCEINLINE uint8_t core_util_atomic_fetch_sub_explicit_u8(volatile uint8_t *valuePtr, uint8_t arg, mbed_memory_order order);
755
756/** \copydoc core_util_atomic_fetch_sub_u8 */
757MBED_INLINE_IF_EX uint16_t core_util_atomic_fetch_sub_u16(volatile uint16_t *valuePtr, uint16_t arg);
758
759/** \copydoc core_util_atomic_fetch_sub_explicit_u8 */
760MBED_FORCEINLINE uint16_t core_util_atomic_fetch_sub_explicit_u16(volatile uint16_t *valuePtr, uint16_t arg, mbed_memory_order order);
761
762/** \copydoc core_util_atomic_fetch_sub_u8 */
763MBED_INLINE_IF_EX uint32_t core_util_atomic_fetch_sub_u32(volatile uint32_t *valuePtr, uint32_t arg);
764
765/** \copydoc core_util_atomic_fetch_sub_explicit_u8 */
766MBED_FORCEINLINE uint32_t core_util_atomic_fetch_sub_explicit_u32(volatile uint32_t *valuePtr, uint32_t arg, mbed_memory_order order);
767
768/** \copydoc core_util_atomic_fetch_sub_u8 */
769uint64_t core_util_atomic_fetch_sub_u64(volatile uint64_t *valuePtr, uint64_t arg);
770
771/** \copydoc core_util_atomic_fetch_sub_explicit_u8 */
772MBED_FORCEINLINE uint64_t core_util_atomic_fetch_sub_explicit_u64(volatile uint64_t *valuePtr, uint64_t arg, mbed_memory_order order);
773
774/** \copydoc core_util_atomic_fetch_sub_u8 */
775MBED_FORCEINLINE int8_t core_util_atomic_fetch_sub_s8(volatile int8_t *valuePtr, int8_t arg);
776
777/** \copydoc core_util_atomic_fetch_sub_explicit_u8 */
778MBED_FORCEINLINE int8_t core_util_atomic_fetch_sub_explicit_s8(volatile int8_t *valuePtr, int8_t arg, mbed_memory_order order);
779
780/** \copydoc core_util_atomic_fetch_sub_u8 */
781MBED_FORCEINLINE int16_t core_util_atomic_fetch_sub_s16(volatile int16_t *valuePtr, int16_t arg);
782
783/** \copydoc core_util_atomic_fetch_sub_explicit_u8 */
784MBED_FORCEINLINE int16_t core_util_atomic_fetch_sub_explicit_s16(volatile int16_t *valuePtr, int16_t arg, mbed_memory_order order);
785
786/** \copydoc core_util_atomic_fetch_sub_u8 */
787MBED_FORCEINLINE int32_t core_util_atomic_fetch_sub_s32(volatile int32_t *valuePtr, int32_t arg);
788
789/** \copydoc core_util_atomic_fetch_sub_explicit_u8 */
790MBED_FORCEINLINE int32_t core_util_atomic_fetch_sub_explicit_s32(volatile int32_t *valuePtr, int32_t arg, mbed_memory_order order);
791
792/** \copydoc core_util_atomic_fetch_sub_u8 */
793MBED_FORCEINLINE int64_t core_util_atomic_fetch_sub_s64(volatile int64_t *valuePtr, int64_t arg);
794
795/** \copydoc core_util_atomic_fetch_sub_explicit_u8 */
796MBED_FORCEINLINE int64_t core_util_atomic_fetch_sub_explicit_s64(volatile int64_t *valuePtr, int64_t arg, mbed_memory_order order);
797
798/** \copydoc core_util_atomic_fetch_sub_u8 */
799MBED_FORCEINLINE void *core_util_atomic_fetch_sub_ptr(void *volatile *valuePtr, ptrdiff_t arg);
800
801/** \copydoc core_util_atomic_fetch_sub_explicit_u8 */
802MBED_FORCEINLINE void *core_util_atomic_fetch_sub_explicit_ptr(void *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order);
803
804/**
805 * Atomic bitwise and.
806 * @param valuePtr Target memory location being modified.
807 * @param arg The argument for the bitwise operation.
808 * @return The original value.
809 */
810MBED_INLINE_IF_EX uint8_t core_util_atomic_fetch_and_u8(volatile uint8_t *valuePtr, uint8_t arg);
811
812/** \copydoc core_util_atomic_fetch_and_u8
813 * @param order memory ordering constraint
814 */
815MBED_FORCEINLINE uint8_t core_util_atomic_fetch_and_explicit_u8(volatile uint8_t *valuePtr, uint8_t arg, mbed_memory_order order);
816
817/** \copydoc core_util_atomic_fetch_and_u8 */
818MBED_INLINE_IF_EX uint16_t core_util_atomic_fetch_and_u16(volatile uint16_t *valuePtr, uint16_t arg);
819
820/** \copydoc core_util_atomic_fetch_and_explicit_u8 */
821MBED_FORCEINLINE uint16_t core_util_atomic_fetch_and_explicit_u16(volatile uint16_t *valuePtr, uint16_t arg, mbed_memory_order order);
822
823/** \copydoc core_util_atomic_fetch_and_u8 */
824MBED_INLINE_IF_EX uint32_t core_util_atomic_fetch_and_u32(volatile uint32_t *valuePtr, uint32_t arg);
825
826/** \copydoc core_util_atomic_fetch_and_explicit_u8 */
827MBED_FORCEINLINE uint32_t core_util_atomic_fetch_and_explicit_u32(volatile uint32_t *valuePtr, uint32_t arg, mbed_memory_order order);
828
829/** \copydoc core_util_atomic_fetch_and_u8 */
830uint64_t core_util_atomic_fetch_and_u64(volatile uint64_t *valuePtr, uint64_t arg);
831
832/** \copydoc core_util_atomic_fetch_and_explicit_u8 */
833MBED_FORCEINLINE uint64_t core_util_atomic_fetch_and_explicit_u64(volatile uint64_t *valuePtr, uint64_t arg, mbed_memory_order order);
834
835/**
836 * Atomic bitwise inclusive or.
837 * @param valuePtr Target memory location being modified.
838 * @param arg The argument for the bitwise operation.
839 * @return The original value.
840 */
841MBED_INLINE_IF_EX uint8_t core_util_atomic_fetch_or_u8(volatile uint8_t *valuePtr, uint8_t arg);
842
843/** \copydoc core_util_atomic_fetch_or_u8
844 * @param order memory ordering constraint
845 */
846MBED_FORCEINLINE uint8_t core_util_atomic_fetch_or_explicit_u8(volatile uint8_t *valuePtr, uint8_t arg, mbed_memory_order order);
847
848/** \copydoc core_util_atomic_fetch_or_u8 */
849MBED_INLINE_IF_EX uint16_t core_util_atomic_fetch_or_u16(volatile uint16_t *valuePtr, uint16_t arg);
850
851/** \copydoc core_util_atomic_fetch_or_explicit_u8 */
852MBED_FORCEINLINE uint16_t core_util_atomic_fetch_or_explicit_u16(volatile uint16_t *valuePtr, uint16_t arg, mbed_memory_order order);
853
854/** \copydoc core_util_atomic_fetch_or_u8 */
855MBED_INLINE_IF_EX uint32_t core_util_atomic_fetch_or_u32(volatile uint32_t *valuePtr, uint32_t arg);
856
857/** \copydoc core_util_atomic_fetch_or_explicit_u8 */
858MBED_FORCEINLINE uint32_t core_util_atomic_fetch_or_explicit_u32(volatile uint32_t *valuePtr, uint32_t arg, mbed_memory_order order);
859
860/** \copydoc core_util_atomic_fetch_or_u8 */
861uint64_t core_util_atomic_fetch_or_u64(volatile uint64_t *valuePtr, uint64_t arg);
862
863/** \copydoc core_util_atomic_fetch_or_explicit_u8 */
864MBED_FORCEINLINE uint64_t core_util_atomic_fetch_or_explicit_u64(volatile uint64_t *valuePtr, uint64_t arg, mbed_memory_order order);
865
866/**
867 * Atomic bitwise exclusive or.
868 * @param valuePtr Target memory location being modified.
869 * @param arg The argument for the bitwise operation.
870 * @return The original value.
871 */
872MBED_INLINE_IF_EX uint8_t core_util_atomic_fetch_xor_u8(volatile uint8_t *valuePtr, uint8_t arg);
873
874/** \copydoc core_util_atomic_fetch_xor_u8
875 * @param order memory ordering constraint
876 */
877MBED_FORCEINLINE uint8_t core_util_atomic_fetch_xor_explicit_u8(volatile uint8_t *valuePtr, uint8_t arg, mbed_memory_order order);
878
879/** \copydoc core_util_atomic_fetch_xor_u8 */
880MBED_INLINE_IF_EX uint16_t core_util_atomic_fetch_xor_u16(volatile uint16_t *valuePtr, uint16_t arg);
881
882/** \copydoc core_util_atomic_fetch_xor_explicit_u8 */
883MBED_FORCEINLINE uint16_t core_util_atomic_fetch_xor_explicit_u16(volatile uint16_t *valuePtr, uint16_t arg, mbed_memory_order order);
884
885/** \copydoc core_util_atomic_fetch_xor_u8 */
886MBED_INLINE_IF_EX uint32_t core_util_atomic_fetch_xor_u32(volatile uint32_t *valuePtr, uint32_t arg);
887
888/** \copydoc core_util_atomic_fetch_xor_explicit_u8 */
889MBED_FORCEINLINE uint32_t core_util_atomic_fetch_xor_explicit_u32(volatile uint32_t *valuePtr, uint32_t arg, mbed_memory_order order);
890
891/** \copydoc core_util_atomic_fetch_xor_u8 */
892uint64_t core_util_atomic_fetch_xor_u64(volatile uint64_t *valuePtr, uint64_t arg);
893
894/** \copydoc core_util_atomic_fetch_xor_explicit_u8 */
895MBED_FORCEINLINE uint64_t core_util_atomic_fetch_xor_explicit_u64(volatile uint64_t *valuePtr, uint64_t arg, mbed_memory_order order);
896
897#ifdef __cplusplus
898} // extern "C"
899
900#include <mstd_type_traits>
901
902// For each operation, two overloaded templates:
903// * one for non-pointer types, which has implementations based on the
904// u8/u16/u32/u64/s8/s16/s32/s64/bool functions above. No base implementation.
905// * one for any pointer type, generically implemented based on ptr function above.
906//
907// Templates use standard C/C++ naming - old incr/decr/cas forms are not provided.
908//
909// The `type_identity_t<T>` used here means "same type as T", blocking template
910// argument deduction. It forces type selection based on the type of the actual pointer
911// to the atomic. If just `T` was used, the following would be ambiguous:
912// core_util_atomic_store(&my_uint8_t, 1) - it wouldn't be able to select between T
913// being uint8_t and int.
914
915/** \copydoc core_util_atomic_load_u8 */
916template<typename T> T core_util_atomic_load(const volatile T *valuePtr) noexcept;
917/** \copydoc core_util_atomic_load_u8 */
918template<typename T> T core_util_atomic_load(const T *valuePtr) noexcept;
919/** \copydoc core_util_atomic_store_u8 */
920template<typename T> void core_util_atomic_store(volatile T *valuePtr, mstd::type_identity_t<T> desiredValue) noexcept;
921/** \copydoc core_util_atomic_store_u8 */
922template<typename T> void core_util_atomic_store(T *valuePtr, mstd::type_identity_t<T> desiredValue) noexcept;
923/** \copydoc core_util_atomic_exchange_u8 */
924template<typename T> T core_util_atomic_exchange(volatile T *valuePtr, mstd::type_identity_t<T> desiredValue) noexcept;
925/** \copydoc core_util_atomic_cas_u8 */
926template<typename T> bool core_util_atomic_compare_exchange_strong(volatile T *ptr, mstd::type_identity_t<T> *expectedCurrentValue, mstd::type_identity_t<T> desiredValue) noexcept;
927/** \copydoc core_util_atomic_compare_exchange_weak_u8 */
928template<typename T> bool core_util_atomic_compare_exchange_weak(volatile T *ptr, mstd::type_identity_t<T> *expectedCurrentValue, mstd::type_identity_t<T> desiredValue) noexcept;
929/** \copydoc core_util_atomic_fetch_add_u8 */
930template<typename T> T core_util_atomic_fetch_add(volatile T *valuePtr, mstd::type_identity_t<T> arg) noexcept;
931/** \copydoc core_util_atomic_fetch_sub_u8 */
932template<typename T> T core_util_atomic_fetch_sub(volatile T *valuePtr, mstd::type_identity_t<T> arg) noexcept;
933/** \copydoc core_util_atomic_fetch_and_u8 */
934template<typename T> T core_util_atomic_fetch_and(volatile T *valuePtr, mstd::type_identity_t<T> arg) noexcept;
935/** \copydoc core_util_atomic_fetch_or_u8 */
936template<typename T> T core_util_atomic_fetch_or(volatile T *valuePtr, mstd::type_identity_t<T> arg) noexcept;
937/** \copydoc core_util_atomic_fetch_xor_u8 */
938template<typename T> T core_util_atomic_fetch_xor(volatile T *valuePtr, mstd::type_identity_t<T> arg) noexcept;
939
940/** \copydoc core_util_atomic_load_explicit_u8 */
941template<typename T> T core_util_atomic_load_explicit(const volatile T *valuePtr, mbed_memory_order order) noexcept;
942/** \copydoc core_util_atomic_load_explicit_u8 */
943template<typename T> T core_util_atomic_load_explicit(const T *valuePtr, mbed_memory_order order) noexcept;
944/** \copydoc core_util_atomic_store_explicit_u8 */
945template<typename T> void core_util_atomic_store_explicit(volatile T *valuePtr, mstd::type_identity_t<T> desiredValue, mbed_memory_order order) noexcept;
946/** \copydoc core_util_atomic_store_explicit_u8 */
947template<typename T> void core_util_atomic_store_explicit(T *valuePtr, mstd::type_identity_t<T> desiredValue, mbed_memory_order order) noexcept;
948/** \copydoc core_util_atomic_exchange_explicit_u8 */
949template<typename T> T core_util_atomic_exchange_explicit(volatile T *valuePtr, mstd::type_identity_t<T> desiredValue, mbed_memory_order order) noexcept;
950/** \copydoc core_util_atomic_cas_explicit_u8 */
951template<typename T> bool core_util_atomic_compare_exchange_strong_explicit(volatile T *ptr, mstd::type_identity_t<T> *expectedCurrentValue, mstd::type_identity_t<T> desiredValue, mbed_memory_order success, mbed_memory_order failure) noexcept;
952/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */
953template<typename T> bool core_util_atomic_compare_exchange_weak_explicit(volatile T *ptr, mstd::type_identity_t<T> *expectedCurrentValue, mstd::type_identity_t<T> desiredValue, mbed_memory_order success, mbed_memory_order failure) noexcept;
954/** \copydoc core_util_atomic_fetch_add_explicit_u8 */
955template<typename T> T core_util_atomic_fetch_add_explicit(volatile T *valuePtr, mstd::type_identity_t<T> arg, mbed_memory_order order) noexcept;
956/** \copydoc core_util_atomic_fetch_sub_explicit_u8 */
957template<typename T> T core_util_atomic_fetch_sub_explicit(volatile T *valuePtr, mstd::type_identity_t<T> arg, mbed_memory_order order) noexcept;
958/** \copydoc core_util_atomic_fetch_and_explicit_u8 */
959template<typename T> T core_util_atomic_fetch_and_explicit(volatile T *valuePtr, mstd::type_identity_t<T> arg, mbed_memory_order order) noexcept;
960/** \copydoc core_util_atomic_fetch_or_explicit_u8 */
961template<typename T> T core_util_atomic_fetch_or_explicit(volatile T *valuePtr, mstd::type_identity_t<T> arg, mbed_memory_order order) noexcept;
962/** \copydoc core_util_atomic_fetch_xor_explicit_u8 */
963template<typename T> T core_util_atomic_fetch_xor_explicit(volatile T *valuePtr, mstd::type_identity_t<T> arg, mbed_memory_order order) noexcept;
964
965/** \copydoc core_util_atomic_load_u8 */
966template<typename T> inline T *core_util_atomic_load(T *const volatile *valuePtr) noexcept;
967/** \copydoc core_util_atomic_load_u8 */
968template<typename T> inline T *core_util_atomic_load(T *const *valuePtr) noexcept;
969/** \copydoc core_util_atomic_store_u8 */
970template<typename T> inline void core_util_atomic_store(T *volatile *valuePtr, mstd::type_identity_t<T> *desiredValue) noexcept;
971/** \copydoc core_util_atomic_store_u8 */
972template<typename T> inline void core_util_atomic_store(T **valuePtr, mstd::type_identity_t<T> *desiredValue) noexcept;
973/** \copydoc core_util_atomic_exchange_u8 */
974template<typename T> inline T *core_util_atomic_exchange(T *volatile *valuePtr, mstd::type_identity_t<T> *desiredValue) noexcept;
975/** \copydoc core_util_atomic_cas_u8 */
976template<typename T> inline bool core_util_atomic_compare_exchange_strong(T *volatile *ptr, mstd::type_identity_t<T> **expectedCurrentValue, mstd::type_identity_t<T> *desiredValue) noexcept;
977/** \copydoc core_util_atomic_compare_exchange_weak_u8 */
978template<typename T> inline bool core_util_atomic_compare_exchange_weak(T *volatile *ptr, mstd::type_identity_t<T> **expectedCurrentValue, mstd::type_identity_t<T> *desiredValue) noexcept;
979/** \copydoc core_util_atomic_fetch_add_u8 */
980template<typename T> inline T *core_util_atomic_fetch_add(T *volatile *valuePtr, ptrdiff_t arg) noexcept;
981/** \copydoc core_util_atomic_fetch_sub_u8 */
982template<typename T> inline T *core_util_atomic_fetch_sub(T *volatile *valuePtr, ptrdiff_t arg) noexcept;
983
984/** \copydoc core_util_atomic_load_explicit_u8 */
985template<typename T> inline T *core_util_atomic_load_explicit(T *const volatile *valuePtr, mbed_memory_order order) noexcept;
986/** \copydoc core_util_atomic_load_explicit_u8 */
987template<typename T> inline T *core_util_atomic_load_explicit(T *const *valuePtr, mbed_memory_order order) noexcept;
988/** \copydoc core_util_atomic_store_explicit_u8 */
989template<typename T> inline void core_util_atomic_store_explicit(T *volatile *valuePtr, mstd::type_identity_t<T> *desiredValue, mbed_memory_order order) noexcept;
990/** \copydoc core_util_atomic_store_explicit_u8 */
991template<typename T> inline void core_util_atomic_store_explicit(T **valuePtr, mstd::type_identity_t<T> *desiredValue, mbed_memory_order order) noexcept;
992/** \copydoc core_util_atomic_exchange_explicit_u8 */
993template<typename T> inline T *core_util_atomic_exchange_explicit(T *volatile *valuePtr, mstd::type_identity_t<T> *desiredValue, mbed_memory_order order) noexcept;
994/** \copydoc core_util_atomic_cas_explicit_u8 */
995template<typename T> inline bool core_util_atomic_compare_exchange_strong_explicit(T *volatile *ptr, mstd::type_identity_t<T> **expectedCurrentValue, mstd::type_identity_t<T> *desiredValue, mbed_memory_order success, mbed_memory_order failure) noexcept;
996/** \copydoc core_util_atomic_compare_exchange_weak_explicit_u8 */
997template<typename T> inline bool core_util_atomic_compare_exchange_weak_explicit(T *volatile *ptr, mstd::type_identity_t<T> **expectedCurrentValue, mstd::type_identity_t<T> *desiredValue, mbed_memory_order success, mbed_memory_order failure) noexcept;
998/** \copydoc core_util_atomic_fetch_add_explicit_u8 */
999template<typename T> inline T *core_util_atomic_fetch_add_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order) noexcept;
1000/** \copydoc core_util_atomic_fetch_sub_explicit_u8 */
1001template<typename T> inline T *core_util_atomic_fetch_sub_explicit(T *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order) noexcept;
1002
1003#endif // __cplusplus
1004
1005/**@}*/
1006
1007/**@}*/
1008
1009/* Hide the implementation away */
1010#include "platform/internal/mbed_atomic_impl.h"
1011
1012#endif // __MBED_UTIL_ATOMICL_H__
1013
1014
1015
MBED_FORCEINLINE int32_t core_util_atomic_fetch_sub_explicit_s32(volatile int32_t *valuePtr, int32_t arg, mbed_memory_order order)
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_s16(volatile int16_t *ptr, int16_t *expectedCurrentValue, int16_t desiredValue)
Atomic compare and set.
uint16_t core_util_atomic_fetch_and_u16(volatile uint16_t *valuePtr, uint16_t arg)
Atomic bitwise and.
MBED_FORCEINLINE bool core_util_atomic_cas_s16(volatile int16_t *ptr, int16_t *expectedCurrentValue, int16_t desiredValue)
Atomic compare and set.
MBED_FORCEINLINE bool core_util_atomic_exchange_bool(volatile bool *valuePtr, bool desiredValue)
Atomic exchange.
MBED_FORCEINLINE uint16_t core_util_atomic_exchange_explicit_u16(volatile uint16_t *valuePtr, uint16_t desiredValue, mbed_memory_order order)
MBED_FORCEINLINE void * core_util_atomic_fetch_sub_explicit_ptr(void *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order)
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue, mbed_memory_order success, mbed_memory_order failure)
bool core_util_atomic_compare_exchange_weak_explicit(volatile T *ptr, mstd::type_identity_t< T > *expectedCurrentValue, mstd::type_identity_t< T > desiredValue, mbed_memory_order success, mbed_memory_order failure) noexcept
MBED_FORCEINLINE uint16_t core_util_atomic_fetch_xor_explicit_u16(volatile uint16_t *valuePtr, uint16_t arg, mbed_memory_order order)
MBED_FORCEINLINE uint8_t core_util_atomic_fetch_sub_explicit_u8(volatile uint8_t *valuePtr, uint8_t arg, mbed_memory_order order)
Atomic subtract.
MBED_FORCEINLINE int64_t core_util_atomic_load_s64(const volatile int64_t *valuePtr)
T core_util_atomic_fetch_or_explicit(volatile T *valuePtr, mstd::type_identity_t< T > arg, mbed_memory_order order) noexcept
bool core_util_atomic_compare_exchange_weak(volatile T *ptr, mstd::type_identity_t< T > *expectedCurrentValue, mstd::type_identity_t< T > desiredValue) noexcept
Atomic compare and set.
MBED_FORCEINLINE bool core_util_atomic_exchange_explicit_bool(volatile bool *valuePtr, bool desiredValue, mbed_memory_order order)
MBED_FORCEINLINE uint8_t core_util_atomic_fetch_and_explicit_u8(volatile uint8_t *valuePtr, uint8_t arg, mbed_memory_order order)
Atomic bitwise and.
bool core_util_atomic_compare_exchange_strong(volatile T *ptr, mstd::type_identity_t< T > *expectedCurrentValue, mstd::type_identity_t< T > desiredValue) noexcept
Atomic compare and set.
bool core_util_atomic_compare_exchange_weak_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue)
Atomic compare and set.
MBED_FORCEINLINE void core_util_atomic_store_s64(volatile int64_t *valuePtr, int64_t desiredValue)
MBED_FORCEINLINE int64_t core_util_atomic_fetch_sub_s64(volatile int64_t *valuePtr, int64_t arg)
Atomic subtract.
MBED_FORCEINLINE int32_t core_util_atomic_load_s32(const volatile int32_t *valuePtr)
MBED_FORCEINLINE uint8_t core_util_atomic_fetch_or_explicit_u8(volatile uint8_t *valuePtr, uint8_t arg, mbed_memory_order order)
Atomic bitwise inclusive or.
uint32_t core_util_atomic_fetch_xor_u32(volatile uint32_t *valuePtr, uint32_t arg)
Atomic bitwise exclusive or.
MBED_FORCEINLINE int64_t core_util_atomic_decr_s64(volatile int64_t *valuePtr, int64_t delta)
Atomic decrement.
MBED_FORCEINLINE int16_t core_util_atomic_fetch_sub_s16(volatile int16_t *valuePtr, int16_t arg)
Atomic subtract.
MBED_FORCEINLINE void * core_util_atomic_fetch_add_ptr(void *volatile *valuePtr, ptrdiff_t arg)
Atomic add.
MBED_FORCEINLINE bool core_util_atomic_cas_explicit_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue, mbed_memory_order success, mbed_memory_order failure)
uint64_t core_util_atomic_incr_u64(volatile uint64_t *valuePtr, uint64_t delta)
Atomic increment.
MBED_FORCEINLINE void core_util_atomic_store_s8(volatile int8_t *valuePtr, int8_t desiredValue)
MBED_FORCEINLINE bool core_util_atomic_cas_s32(volatile int32_t *ptr, int32_t *expectedCurrentValue, int32_t desiredValue)
Atomic compare and set.
uint8_t core_util_atomic_decr_u8(volatile uint8_t *valuePtr, uint8_t delta)
Atomic decrement.
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_s16(volatile int16_t *ptr, int16_t *expectedCurrentValue, int16_t desiredValue, mbed_memory_order success, mbed_memory_order failure)
uint32_t core_util_atomic_exchange_u32(volatile uint32_t *valuePtr, uint32_t desiredValue)
Atomic exchange.
MBED_FORCEINLINE uint8_t core_util_atomic_load_u8(const volatile uint8_t *valuePtr)
Atomic load.
uint16_t core_util_atomic_fetch_add_u16(volatile uint16_t *valuePtr, uint16_t arg)
Atomic add.
MBED_FORCEINLINE void core_util_atomic_store_explicit_u32(volatile uint32_t *valuePtr, uint32_t desiredValue, mbed_memory_order order)
MBED_FORCEINLINE uint16_t core_util_atomic_load_explicit_u16(const volatile uint16_t *valuePtr, mbed_memory_order order)
MBED_FORCEINLINE bool core_util_atomic_load_explicit_bool(const volatile bool *valuePtr, mbed_memory_order order)
MBED_FORCEINLINE void core_util_atomic_store_u16(volatile uint16_t *valuePtr, uint16_t desiredValue)
void core_util_atomic_store(volatile T *valuePtr, mstd::type_identity_t< T > desiredValue) noexcept
MBED_FORCEINLINE int16_t core_util_atomic_fetch_sub_explicit_s16(volatile int16_t *valuePtr, int16_t arg, mbed_memory_order order)
MBED_FORCEINLINE void core_util_atomic_store_explicit_s8(volatile int8_t *valuePtr, int8_t desiredValue, mbed_memory_order order)
MBED_FORCEINLINE void * core_util_atomic_exchange_explicit_ptr(void *volatile *valuePtr, void *desiredValue, mbed_memory_order order)
bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *flagPtr)
Atomic test and set.
uint16_t core_util_atomic_fetch_sub_u16(volatile uint16_t *valuePtr, uint16_t arg)
Atomic subtract.
MBED_FORCEINLINE void core_util_atomic_store_explicit_s32(volatile int32_t *valuePtr, int32_t desiredValue, mbed_memory_order order)
bool core_util_atomic_cas_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue)
Atomic compare and set.
MBED_FORCEINLINE uint32_t core_util_atomic_exchange_explicit_u32(volatile uint32_t *valuePtr, uint32_t desiredValue, mbed_memory_order order)
MBED_FORCEINLINE uint64_t core_util_atomic_fetch_xor_explicit_u64(volatile uint64_t *valuePtr, uint64_t arg, mbed_memory_order order)
MBED_FORCEINLINE int16_t core_util_atomic_exchange_explicit_s16(volatile int16_t *valuePtr, int16_t desiredValue, mbed_memory_order order)
MBED_FORCEINLINE int64_t core_util_atomic_load_explicit_s64(const volatile int64_t *valuePtr, MBED_UNUSED mbed_memory_order order)
Atomic load with explicit ordering.
void * core_util_atomic_decr_ptr(void *volatile *valuePtr, ptrdiff_t delta)
Atomic decrement.
MBED_FORCEINLINE int64_t core_util_atomic_exchange_explicit_s64(volatile int64_t *valuePtr, int64_t desiredValue, mbed_memory_order order)
MBED_FORCEINLINE bool core_util_atomic_cas_s8(volatile int8_t *ptr, int8_t *expectedCurrentValue, int8_t desiredValue)
Atomic compare and set.
MBED_FORCEINLINE void core_util_atomic_store_bool(volatile bool *valuePtr, bool desiredValue)
uint32_t core_util_atomic_incr_u32(volatile uint32_t *valuePtr, uint32_t delta)
Atomic increment.
MBED_FORCEINLINE uint8_t core_util_atomic_load_explicit_u8(const volatile uint8_t *valuePtr, mbed_memory_order order)
Atomic load with explicit ordering.
uint16_t core_util_atomic_fetch_or_u16(volatile uint16_t *valuePtr, uint16_t arg)
Atomic bitwise inclusive or.
MBED_FORCEINLINE int64_t core_util_atomic_fetch_add_explicit_s64(volatile int64_t *valuePtr, int64_t arg, mbed_memory_order order)
MBED_FORCEINLINE uint64_t core_util_atomic_fetch_add_explicit_u64(volatile uint64_t *valuePtr, uint64_t arg, mbed_memory_order order)
void * core_util_atomic_incr_ptr(void *volatile *valuePtr, ptrdiff_t delta)
Atomic increment.
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue, mbed_memory_order success, mbed_memory_order failure)
T core_util_atomic_load_explicit(const volatile T *valuePtr, mbed_memory_order order) noexcept
uint8_t core_util_atomic_exchange_u8(volatile uint8_t *valuePtr, uint8_t desiredValue)
Atomic exchange.
MBED_FORCEINLINE int32_t core_util_atomic_load_explicit_s32(const volatile int32_t *valuePtr, mbed_memory_order order)
bool core_util_atomic_compare_exchange_weak_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue)
Atomic compare and set.
MBED_FORCEINLINE int32_t core_util_atomic_fetch_add_s32(volatile int32_t *valuePtr, int32_t arg)
Atomic add.
MBED_FORCEINLINE bool core_util_atomic_cas_explicit_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue, mbed_memory_order success, mbed_memory_order failure)
T core_util_atomic_exchange(volatile T *valuePtr, mstd::type_identity_t< T > desiredValue) noexcept
Atomic exchange.
MBED_FORCEINLINE void * core_util_atomic_fetch_add_explicit_ptr(void *volatile *valuePtr, ptrdiff_t arg, mbed_memory_order order)
MBED_FORCEINLINE uint32_t core_util_atomic_fetch_and_explicit_u32(volatile uint32_t *valuePtr, uint32_t arg, mbed_memory_order order)
MBED_FORCEINLINE void core_util_atomic_store_s32(volatile int32_t *valuePtr, int32_t desiredValue)
uint64_t core_util_atomic_fetch_sub_u64(volatile uint64_t *valuePtr, uint64_t arg)
Atomic subtract.
MBED_FORCEINLINE int64_t core_util_atomic_fetch_sub_explicit_s64(volatile int64_t *valuePtr, int64_t arg, mbed_memory_order order)
MBED_FORCEINLINE uint8_t core_util_atomic_fetch_add_explicit_u8(volatile uint8_t *valuePtr, uint8_t arg, mbed_memory_order order)
Atomic add.
T core_util_atomic_fetch_xor(volatile T *valuePtr, mstd::type_identity_t< T > arg) noexcept
Atomic bitwise exclusive or.
MBED_FORCEINLINE int16_t core_util_atomic_load_s16(const volatile int16_t *valuePtr)
T core_util_atomic_fetch_add(volatile T *valuePtr, mstd::type_identity_t< T > arg) noexcept
Atomic add.
MBED_FORCEINLINE void * core_util_atomic_fetch_sub_ptr(void *volatile *valuePtr, ptrdiff_t arg)
Atomic subtract.
uint32_t core_util_atomic_fetch_sub_u32(volatile uint32_t *valuePtr, uint32_t arg)
Atomic subtract.
MBED_FORCEINLINE uint32_t core_util_atomic_fetch_sub_explicit_u32(volatile uint32_t *valuePtr, uint32_t arg, mbed_memory_order order)
MBED_FORCEINLINE uint16_t core_util_atomic_load_u16(const volatile uint16_t *valuePtr)
uint64_t core_util_atomic_fetch_xor_u64(volatile uint64_t *valuePtr, uint64_t arg)
Atomic bitwise exclusive or.
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_s8(volatile int8_t *ptr, int8_t *expectedCurrentValue, int8_t desiredValue, mbed_memory_order success, mbed_memory_order failure)
uint16_t core_util_atomic_fetch_xor_u16(volatile uint16_t *valuePtr, uint16_t arg)
Atomic bitwise exclusive or.
MBED_FORCEINLINE uint32_t core_util_atomic_fetch_add_explicit_u32(volatile uint32_t *valuePtr, uint32_t arg, mbed_memory_order order)
MBED_FORCEINLINE bool core_util_atomic_cas_s64(volatile int64_t *ptr, int64_t *expectedCurrentValue, int64_t desiredValue)
Atomic compare and set.
MBED_FORCEINLINE int64_t core_util_atomic_fetch_add_s64(volatile int64_t *valuePtr, int64_t arg)
Atomic add.
uint16_t core_util_atomic_decr_u16(volatile uint16_t *valuePtr, uint16_t delta)
Atomic decrement.
MBED_FORCEINLINE int16_t core_util_atomic_decr_s16(volatile int16_t *valuePtr, int16_t delta)
Atomic decrement.
MBED_FORCEINLINE int8_t core_util_atomic_load_explicit_s8(const volatile int8_t *valuePtr, mbed_memory_order order)
MBED_FORCEINLINE uint64_t core_util_atomic_fetch_or_explicit_u64(volatile uint64_t *valuePtr, uint64_t arg, mbed_memory_order order)
uint64_t core_util_atomic_exchange_u64(volatile uint64_t *valuePtr, uint64_t desiredValue)
Atomic exchange.
uint8_t core_util_atomic_incr_u8(volatile uint8_t *valuePtr, uint8_t delta)
Atomic increment.
MBED_FORCEINLINE uint32_t core_util_atomic_fetch_xor_explicit_u32(volatile uint32_t *valuePtr, uint32_t arg, mbed_memory_order order)
MBED_FORCEINLINE int32_t core_util_atomic_fetch_add_explicit_s32(volatile int32_t *valuePtr, int32_t arg, mbed_memory_order order)
MBED_FORCEINLINE uint8_t core_util_atomic_fetch_xor_explicit_u8(volatile uint8_t *valuePtr, uint8_t arg, mbed_memory_order order)
Atomic bitwise exclusive or.
bool core_util_atomic_compare_exchange_weak_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue)
Atomic compare and set.
MBED_FORCEINLINE bool core_util_atomic_cas_explicit_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue, mbed_memory_order success, mbed_memory_order failure)
uint32_t core_util_atomic_fetch_add_u32(volatile uint32_t *valuePtr, uint32_t arg)
Atomic add.
bool core_util_atomic_cas_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue)
Atomic compare and set.
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_s64(volatile int64_t *ptr, int64_t *expectedCurrentValue, int64_t desiredValue)
Atomic compare and set.
MBED_FORCEINLINE void * core_util_atomic_load_ptr(void *const volatile *valuePtr)
MBED_FORCEINLINE bool core_util_atomic_cas_explicit_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue, mbed_memory_order success, mbed_memory_order failure)
MBED_FORCEINLINE int32_t core_util_atomic_decr_s32(volatile int32_t *valuePtr, int32_t delta)
Atomic decrement.
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue, mbed_memory_order success, mbed_memory_order failure)
Atomic compare and set.
bool core_util_atomic_cas_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue)
Atomic compare and set.
MBED_FORCEINLINE bool core_util_atomic_cas_explicit_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue, mbed_memory_order success, mbed_memory_order failure)
Atomic compare and set.
bool core_util_atomic_cas_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue)
Atomic compare and set.
MBED_FORCEINLINE void core_util_atomic_store_u32(volatile uint32_t *valuePtr, uint32_t desiredValue)
MBED_FORCEINLINE uint32_t core_util_atomic_fetch_or_explicit_u32(volatile uint32_t *valuePtr, uint32_t arg, mbed_memory_order order)
MBED_FORCEINLINE int16_t core_util_atomic_load_explicit_s16(const volatile int16_t *valuePtr, mbed_memory_order order)
MBED_FORCEINLINE uint16_t core_util_atomic_fetch_add_explicit_u16(volatile uint16_t *valuePtr, uint16_t arg, mbed_memory_order order)
MBED_FORCEINLINE int8_t core_util_atomic_fetch_sub_s8(volatile int8_t *valuePtr, int8_t arg)
Atomic subtract.
MBED_FORCEINLINE int8_t core_util_atomic_exchange_s8(volatile int8_t *valuePtr, int8_t desiredValue)
Atomic exchange.
MBED_FORCEINLINE void * core_util_atomic_load_explicit_ptr(void *const volatile *valuePtr, mbed_memory_order order)
MBED_FORCEINLINE void core_util_atomic_store_ptr(void *volatile *valuePtr, void *desiredValue)
MBED_FORCEINLINE void core_util_atomic_flag_clear_explicit(volatile core_util_atomic_flag *flagPtr, mbed_memory_order order)
Atomic clear.
MBED_FORCEINLINE int32_t core_util_atomic_exchange_s32(volatile int32_t *valuePtr, int32_t desiredValue)
Atomic exchange.
uint32_t core_util_atomic_fetch_and_u32(volatile uint32_t *valuePtr, uint32_t arg)
Atomic bitwise and.
uint32_t core_util_atomic_decr_u32(volatile uint32_t *valuePtr, uint32_t delta)
Atomic decrement.
mbed_memory_order
Memory order constraints for atomic operations.
Definition: mbed_atomic.h:51
T core_util_atomic_fetch_add_explicit(volatile T *valuePtr, mstd::type_identity_t< T > arg, mbed_memory_order order) noexcept
MBED_FORCEINLINE uint32_t core_util_atomic_load_explicit_u32(const volatile uint32_t *valuePtr, mbed_memory_order order)
uint64_t core_util_atomic_fetch_or_u64(volatile uint64_t *valuePtr, uint64_t arg)
Atomic bitwise inclusive or.
T core_util_atomic_load(const volatile T *valuePtr) noexcept
MBED_FORCEINLINE int32_t core_util_atomic_incr_s32(volatile int32_t *valuePtr, int32_t delta)
Atomic increment.
bool core_util_atomic_compare_exchange_weak_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue)
Atomic compare and set.
MBED_FORCEINLINE bool core_util_atomic_cas_explicit_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue, mbed_memory_order success, mbed_memory_order failure)
void core_util_atomic_store_u64(volatile uint64_t *valuePtr, uint64_t desiredValue)
T core_util_atomic_fetch_xor_explicit(volatile T *valuePtr, mstd::type_identity_t< T > arg, mbed_memory_order order) noexcept
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue)
Atomic compare and set.
MBED_FORCEINLINE int8_t core_util_atomic_exchange_explicit_s8(volatile int8_t *valuePtr, int8_t desiredValue, mbed_memory_order order)
MBED_FORCEINLINE uint64_t core_util_atomic_exchange_explicit_u64(volatile uint64_t *valuePtr, uint64_t desiredValue, mbed_memory_order order)
MBED_FORCEINLINE bool core_util_atomic_cas_explicit_s32(volatile int32_t *ptr, int32_t *expectedCurrentValue, int32_t desiredValue, mbed_memory_order success, mbed_memory_order failure)
T core_util_atomic_fetch_or(volatile T *valuePtr, mstd::type_identity_t< T > arg) noexcept
Atomic bitwise inclusive or.
MBED_FORCEINLINE int8_t core_util_atomic_fetch_add_s8(volatile int8_t *valuePtr, int8_t arg)
Atomic add.
MBED_FORCEINLINE void core_util_atomic_store_explicit_bool(volatile bool *valuePtr, bool desiredValue, mbed_memory_order order)
MBED_FORCEINLINE bool core_util_atomic_load_bool(const volatile bool *valuePtr)
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_u64(volatile uint64_t *ptr, uint64_t *expectedCurrentValue, uint64_t desiredValue, mbed_memory_order success, mbed_memory_order failure)
bool core_util_atomic_compare_exchange_strong_explicit(volatile T *ptr, mstd::type_identity_t< T > *expectedCurrentValue, mstd::type_identity_t< T > desiredValue, mbed_memory_order success, mbed_memory_order failure) noexcept
MBED_FORCEINLINE int64_t core_util_atomic_exchange_s64(volatile int64_t *valuePtr, int64_t desiredValue)
Atomic exchange.
MBED_FORCEINLINE bool core_util_atomic_flag_test_and_set_explicit(volatile core_util_atomic_flag *flagPtr, mbed_memory_order order)
Atomic test and set.
MBED_FORCEINLINE uint64_t core_util_atomic_fetch_and_explicit_u64(volatile uint64_t *valuePtr, uint64_t arg, mbed_memory_order order)
uint8_t core_util_atomic_fetch_and_u8(volatile uint8_t *valuePtr, uint8_t arg)
Atomic bitwise and.
MBED_FORCEINLINE uint8_t core_util_atomic_exchange_explicit_u8(volatile uint8_t *valuePtr, uint8_t desiredValue, mbed_memory_order order)
Atomic exchange.
MBED_FORCEINLINE void core_util_atomic_store_explicit_u8(volatile uint8_t *valuePtr, uint8_t desiredValue, mbed_memory_order order)
Atomic store with explicit ordering.
MBED_FORCEINLINE void core_util_atomic_store_explicit_ptr(void *volatile *valuePtr, void *desiredValue, mbed_memory_order order)
MBED_FORCEINLINE bool core_util_atomic_cas_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue)
Atomic compare and set.
uint64_t core_util_atomic_fetch_add_u64(volatile uint64_t *valuePtr, uint64_t arg)
Atomic add.
uint8_t core_util_atomic_fetch_sub_u8(volatile uint8_t *valuePtr, uint8_t arg)
Atomic subtract.
uint8_t core_util_atomic_fetch_xor_u8(volatile uint8_t *valuePtr, uint8_t arg)
Atomic bitwise exclusive or.
MBED_FORCEINLINE bool core_util_atomic_cas_explicit_s16(volatile int16_t *ptr, int16_t *expectedCurrentValue, int16_t desiredValue, mbed_memory_order success, mbed_memory_order failure)
uint64_t core_util_atomic_fetch_and_u64(volatile uint64_t *valuePtr, uint64_t arg)
Atomic bitwise and.
MBED_FORCEINLINE int16_t core_util_atomic_exchange_s16(volatile int16_t *valuePtr, int16_t desiredValue)
Atomic exchange.
T core_util_atomic_fetch_and(volatile T *valuePtr, mstd::type_identity_t< T > arg) noexcept
Atomic bitwise and.
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_s8(volatile int8_t *ptr, int8_t *expectedCurrentValue, int8_t desiredValue)
Atomic compare and set.
uint32_t core_util_atomic_fetch_or_u32(volatile uint32_t *valuePtr, uint32_t arg)
Atomic bitwise inclusive or.
uint64_t core_util_atomic_decr_u64(volatile uint64_t *valuePtr, uint64_t delta)
Atomic decrement.
T core_util_atomic_exchange_explicit(volatile T *valuePtr, mstd::type_identity_t< T > desiredValue, mbed_memory_order order) noexcept
MBED_FORCEINLINE int8_t core_util_atomic_fetch_add_explicit_s8(volatile int8_t *valuePtr, int8_t arg, mbed_memory_order order)
MBED_FORCEINLINE void core_util_atomic_store_explicit_u64(volatile uint64_t *valuePtr, uint64_t desiredValue, mbed_memory_order order)
MBED_FORCEINLINE bool core_util_atomic_cas_explicit_s64(volatile int64_t *ptr, int64_t *expectedCurrentValue, int64_t desiredValue, mbed_memory_order success, mbed_memory_order failure)
T core_util_atomic_fetch_sub(volatile T *valuePtr, mstd::type_identity_t< T > arg) noexcept
Atomic subtract.
MBED_FORCEINLINE bool core_util_atomic_cas_explicit_s8(volatile int8_t *ptr, int8_t *expectedCurrentValue, int8_t desiredValue, mbed_memory_order success, mbed_memory_order failure)
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_s32(volatile int32_t *ptr, int32_t *expectedCurrentValue, int32_t desiredValue, mbed_memory_order success, mbed_memory_order failure)
MBED_FORCEINLINE int64_t core_util_atomic_incr_s64(volatile int64_t *valuePtr, int64_t delta)
Atomic increment.
MBED_FORCEINLINE void core_util_atomic_store_explicit_u16(volatile uint16_t *valuePtr, uint16_t desiredValue, mbed_memory_order order)
MBED_FORCEINLINE int8_t core_util_atomic_incr_s8(volatile int8_t *valuePtr, int8_t delta)
Atomic increment.
uint8_t core_util_atomic_fetch_or_u8(volatile uint8_t *valuePtr, uint8_t arg)
Atomic bitwise inclusive or.
T core_util_atomic_fetch_and_explicit(volatile T *valuePtr, mstd::type_identity_t< T > arg, mbed_memory_order order) noexcept
MBED_FORCEINLINE int16_t core_util_atomic_fetch_add_s16(volatile int16_t *valuePtr, int16_t arg)
Atomic add.
void * core_util_atomic_exchange_ptr(void *volatile *valuePtr, void *desiredValue)
Atomic exchange.
MBED_FORCEINLINE void core_util_atomic_store_explicit_s64(volatile int64_t *valuePtr, int64_t desiredValue, mbed_memory_order order)
MBED_FORCEINLINE uint32_t core_util_atomic_load_u32(const volatile uint32_t *valuePtr)
MBED_FORCEINLINE int8_t core_util_atomic_decr_s8(volatile int8_t *valuePtr, int8_t delta)
Atomic decrement.
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_bool(volatile bool *ptr, bool *expectedCurrentValue, bool desiredValue, mbed_memory_order success, mbed_memory_order failure)
void core_util_atomic_store_explicit(volatile T *valuePtr, mstd::type_identity_t< T > desiredValue, mbed_memory_order order) noexcept
T core_util_atomic_fetch_sub_explicit(volatile T *valuePtr, mstd::type_identity_t< T > arg, mbed_memory_order order) noexcept
MBED_FORCEINLINE void core_util_atomic_store_s16(volatile int16_t *valuePtr, int16_t desiredValue)
uint64_t core_util_atomic_load_u64(const volatile uint64_t *valuePtr)
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_s64(volatile int64_t *ptr, int64_t *expectedCurrentValue, int64_t desiredValue, mbed_memory_order success, mbed_memory_order failure)
uint16_t core_util_atomic_exchange_u16(volatile uint16_t *valuePtr, uint16_t desiredValue)
Atomic exchange.
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_s32(volatile int32_t *ptr, int32_t *expectedCurrentValue, int32_t desiredValue)
Atomic compare and set.
MBED_FORCEINLINE uint16_t core_util_atomic_fetch_and_explicit_u16(volatile uint16_t *valuePtr, uint16_t arg, mbed_memory_order order)
MBED_FORCEINLINE void core_util_atomic_store_explicit_s16(volatile int16_t *valuePtr, int16_t desiredValue, mbed_memory_order order)
MBED_FORCEINLINE int16_t core_util_atomic_fetch_add_explicit_s16(volatile int16_t *valuePtr, int16_t arg, mbed_memory_order order)
MBED_FORCEINLINE int8_t core_util_atomic_load_s8(const volatile int8_t *valuePtr)
MBED_FORCEINLINE uint16_t core_util_atomic_fetch_sub_explicit_u16(volatile uint16_t *valuePtr, uint16_t arg, mbed_memory_order order)
uint16_t core_util_atomic_incr_u16(volatile uint16_t *valuePtr, uint16_t delta)
Atomic increment.
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue)
Atomic compare and set.
MBED_FORCEINLINE uint64_t core_util_atomic_load_explicit_u64(const volatile uint64_t *valuePtr, mbed_memory_order order)
MBED_FORCEINLINE void core_util_atomic_store_u8(volatile uint8_t *valuePtr, uint8_t desiredValue)
Atomic store.
MBED_FORCEINLINE int32_t core_util_atomic_exchange_explicit_s32(volatile int32_t *valuePtr, int32_t desiredValue, mbed_memory_order order)
MBED_FORCEINLINE int16_t core_util_atomic_incr_s16(volatile int16_t *valuePtr, int16_t delta)
Atomic increment.
MBED_FORCEINLINE uint64_t core_util_atomic_fetch_sub_explicit_u64(volatile uint64_t *valuePtr, uint64_t arg, mbed_memory_order order)
MBED_FORCEINLINE int32_t core_util_atomic_fetch_sub_s32(volatile int32_t *valuePtr, int32_t arg)
Atomic subtract.
bool core_util_atomic_cas_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue)
Atomic compare and set.
uint8_t core_util_atomic_fetch_add_u8(volatile uint8_t *valuePtr, uint8_t arg)
Atomic add.
MBED_FORCEINLINE int8_t core_util_atomic_fetch_sub_explicit_s8(volatile int8_t *valuePtr, int8_t arg, mbed_memory_order order)
MBED_FORCEINLINE bool core_util_atomic_compare_exchange_weak_explicit_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue, mbed_memory_order success, mbed_memory_order failure)
MBED_FORCEINLINE void core_util_atomic_flag_clear(volatile core_util_atomic_flag *flagPtr)
Atomic clear.
MBED_FORCEINLINE uint16_t core_util_atomic_fetch_or_explicit_u16(volatile uint16_t *valuePtr, uint16_t arg, mbed_memory_order order)
#define MBED_UNUSED
MBED_UNUSED Declare a function argument to be unused, suppressing compiler warnings.
#define MBED_FORCEINLINE
MBED_FORCEINLINE Declare a function that must always be inlined.
A lock-free, primitive atomic flag.
Definition: mbed_atomic.h:118