forked from ARMmbed/mbed-os
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmbed_atomic_impl.c
169 lines (153 loc) · 9.92 KB
/
mbed_atomic_impl.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
/*
* Copyright (c) 2019, ARM Limited, All Rights Reserved
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "platform/mbed_assert.h"
#include "platform/mbed_atomic.h"
#include "platform/mbed_critical.h"
/* Inline bool implementations in the header use uint8_t versions to manipulate the bool */
static_assert(sizeof(bool) == sizeof(uint8_t), "Surely bool is a byte");
/* Inline implementations in the header use uint32_t versions to manipulate pointers */
static_assert(sizeof(void *) == sizeof(uint32_t), "Alas, pointers must be 32-bit");
#define DO_MBED_LOCKED_OP(name, OP, retValue, T, fn_suffix) \
T core_util_atomic_##name##_##fn_suffix(volatile T *valuePtr, T arg) \
{ \
T oldValue, newValue; \
core_util_critical_section_enter(); \
oldValue = *valuePtr; \
newValue = OP; \
*valuePtr = newValue; \
core_util_critical_section_exit(); \
return retValue; \
}
#define DO_MBED_LOCKED_CAS_OP(T, fn_suffix) \
bool core_util_atomic_cas_##fn_suffix(volatile T *ptr, T *expectedCurrentValue, T desiredValue) \
{ \
bool success; \
T currentValue; \
core_util_critical_section_enter(); \
currentValue = *ptr; \
if (currentValue == *expectedCurrentValue) { \
*ptr = desiredValue; \
success = true; \
} else { \
*expectedCurrentValue = currentValue; \
success = false; \
} \
core_util_critical_section_exit(); \
return success; \
} \
\
bool core_util_atomic_compare_exchange_weak_##fn_suffix(volatile T *ptr, \
T *expectedCurrentValue, T desiredValue) \
{ \
return core_util_atomic_cas_##fn_suffix(ptr, expectedCurrentValue, desiredValue); \
}
#if MBED_EXCLUSIVE_ACCESS
/* These are the C99 external definitions for the inline functions */
/* We maintain external definitions rather than using "static inline" for backwards binary compatibility
* and to give the compiler plenty of leeway to choose to not inline in both C and C++ modes
*/
extern inline bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *flagPtr);
extern inline uint8_t core_util_atomic_exchange_u8(volatile uint8_t *valuePtr, uint8_t newValue);
extern inline uint16_t core_util_atomic_exchange_u16(volatile uint16_t *valuePtr, uint16_t newValue);
extern inline uint32_t core_util_atomic_exchange_u32(volatile uint32_t *valuePtr, uint32_t newValue);
extern inline uint8_t core_util_atomic_incr_u8(volatile uint8_t *valuePtr, uint8_t arg);
extern inline uint16_t core_util_atomic_incr_u16(volatile uint16_t *valuePtr, uint16_t arg);
extern inline uint32_t core_util_atomic_incr_u32(volatile uint32_t *valuePtr, uint32_t arg);
extern inline uint8_t core_util_atomic_decr_u8(volatile uint8_t *valuePtr, uint8_t arg);
extern inline uint16_t core_util_atomic_decr_u16(volatile uint16_t *valuePtr, uint16_t arg);
extern inline uint32_t core_util_atomic_decr_u32(volatile uint32_t *valuePtr, uint32_t arg);
extern inline uint8_t core_util_atomic_fetch_add_u8(volatile uint8_t *valuePtr, uint8_t arg);
extern inline uint16_t core_util_atomic_fetch_add_u16(volatile uint16_t *valuePtr, uint16_t arg);
extern inline uint32_t core_util_atomic_fetch_add_u32(volatile uint32_t *valuePtr, uint32_t arg);
extern inline uint8_t core_util_atomic_fetch_sub_u8(volatile uint8_t *valuePtr, uint8_t arg);
extern inline uint16_t core_util_atomic_fetch_sub_u16(volatile uint16_t *valuePtr, uint16_t arg);
extern inline uint32_t core_util_atomic_fetch_sub_u32(volatile uint32_t *valuePtr, uint32_t arg);
extern inline uint8_t core_util_atomic_fetch_and_u8(volatile uint8_t *valuePtr, uint8_t arg);
extern inline uint16_t core_util_atomic_fetch_and_u16(volatile uint16_t *valuePtr, uint16_t arg);
extern inline uint32_t core_util_atomic_fetch_and_u32(volatile uint32_t *valuePtr, uint32_t arg);
extern inline uint8_t core_util_atomic_fetch_or_u8(volatile uint8_t *valuePtr, uint8_t arg);
extern inline uint16_t core_util_atomic_fetch_or_u16(volatile uint16_t *valuePtr, uint16_t arg);
extern inline uint32_t core_util_atomic_fetch_or_u32(volatile uint32_t *valuePtr, uint32_t arg);
extern inline uint8_t core_util_atomic_fetch_xor_u8(volatile uint8_t *valuePtr, uint8_t arg);
extern inline uint16_t core_util_atomic_fetch_xor_u16(volatile uint16_t *valuePtr, uint16_t arg);
extern inline uint32_t core_util_atomic_fetch_xor_u32(volatile uint32_t *valuePtr, uint32_t arg);
extern inline bool core_util_atomic_cas_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue);
extern inline bool core_util_atomic_cas_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue);
extern inline bool core_util_atomic_cas_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue);
extern inline bool core_util_atomic_compare_exchange_weak_u8(volatile uint8_t *ptr, uint8_t *expectedCurrentValue, uint8_t desiredValue);
extern inline bool core_util_atomic_compare_exchange_weak_u16(volatile uint16_t *ptr, uint16_t *expectedCurrentValue, uint16_t desiredValue);
extern inline bool core_util_atomic_compare_exchange_weak_u32(volatile uint32_t *ptr, uint32_t *expectedCurrentValue, uint32_t desiredValue);
#else
bool core_util_atomic_flag_test_and_set(volatile core_util_atomic_flag *flagPtr)
{
core_util_critical_section_enter();
uint8_t currentValue = flagPtr->_flag;
flagPtr->_flag = true;
core_util_critical_section_exit();
return currentValue;
}
#endif
/* No architecture we support has LDREXD/STREXD, so must always disable IRQs for 64-bit operations */
uint64_t core_util_atomic_load_u64(const volatile uint64_t *valuePtr)
{
core_util_critical_section_enter();
uint64_t currentValue = *valuePtr;
core_util_critical_section_exit();
return currentValue;
}
void core_util_atomic_store_u64(volatile uint64_t *valuePtr, uint64_t desiredValue)
{
core_util_critical_section_enter();
*valuePtr = desiredValue;
core_util_critical_section_exit();
}
/* Now locked operations for whichever we don't have lock-free ones for */
#if MBED_EXCLUSIVE_ACCESS
/* Just need 64-bit locked operations */
#define DO_MBED_LOCKED_OPS(name, OP, retValue) \
DO_MBED_LOCKED_OP(name, OP, retValue, uint64_t, u64)
#define DO_MBED_LOCKED_CAS_OPS() \
DO_MBED_LOCKED_CAS_OP(uint64_t, u64)
#else
/* All the operations are locked */
#define DO_MBED_LOCKED_OPS(name, OP, retValue) \
DO_MBED_LOCKED_OP(name, OP, retValue, uint8_t, u8) \
DO_MBED_LOCKED_OP(name, OP, retValue, uint16_t, u16) \
DO_MBED_LOCKED_OP(name, OP, retValue, uint32_t, u32) \
DO_MBED_LOCKED_OP(name, OP, retValue, uint64_t, u64)
#define DO_MBED_LOCKED_CAS_OPS() \
DO_MBED_LOCKED_CAS_OP(uint8_t, u8) \
DO_MBED_LOCKED_CAS_OP(uint16_t, u16) \
DO_MBED_LOCKED_CAS_OP(uint32_t, u32) \
DO_MBED_LOCKED_CAS_OP(uint64_t, u64)
#endif
// *INDENT-OFF*
DO_MBED_LOCKED_OPS(exchange, arg, oldValue)
DO_MBED_LOCKED_OPS(incr, oldValue + arg, newValue)
DO_MBED_LOCKED_OPS(decr, oldValue - arg, newValue)
DO_MBED_LOCKED_OPS(fetch_add, oldValue + arg, oldValue)
DO_MBED_LOCKED_OPS(fetch_sub, oldValue - arg, oldValue)
DO_MBED_LOCKED_OPS(fetch_and, oldValue & arg, oldValue)
DO_MBED_LOCKED_OPS(fetch_or, oldValue | arg, oldValue)
DO_MBED_LOCKED_OPS(fetch_xor, oldValue ^ arg, oldValue)
DO_MBED_LOCKED_CAS_OPS()
// *INDENT-ON*
/* Similar functions for s32 etc are static inline, but these are extern inline for legacy binary compatibility */
extern inline void *core_util_atomic_exchange_ptr(void *volatile *valuePtr, void *desiredValue);
extern inline void *core_util_atomic_incr_ptr(void *volatile *valuePtr, ptrdiff_t delta);
extern inline void *core_util_atomic_decr_ptr(void *volatile *valuePtr, ptrdiff_t delta);
extern inline bool core_util_atomic_cas_ptr(void *volatile *ptr, void **expectedCurrentValue, void *desiredValue);