Control Surface main
MIDI Control Surface library for Arduino
Loading...
Searching...
No Matches
Atomic.hpp
Go to the documentation of this file.
1#pragma once
2
3#include <atomic>
4
5#include <Settings/NamespaceSettings.hpp>
6
7#if defined(ARDUINO_ARCH_RP2040)
8#include <hardware/sync.h>
9#elif defined(ARDUINO_ARCH_MBED)
10#include <platform/mbed_critical.h>
11#endif
12
14
15#ifndef ARDUINO
16
17#define CS_USE_REAL_ATOMIC 1
18
19#elif defined(ARDUINO_ARCH_RP2040)
20
21class ScopedInterruptDisabler {
22 public:
23 ScopedInterruptDisabler() : state {save_and_disable_interrupts()} {}
24 ~ScopedInterruptDisabler() { restore_interrupts(state); }
25 ScopedInterruptDisabler(const ScopedInterruptDisabler &) = delete;
26 ScopedInterruptDisabler &
27 operator=(const ScopedInterruptDisabler &) = delete;
28 ScopedInterruptDisabler(ScopedInterruptDisabler &&) = delete;
29 ScopedInterruptDisabler &operator=(ScopedInterruptDisabler &&) = delete;
30
31 private:
32 uint32_t state;
33};
34
35#elif defined(ARDUINO_ARCH_MBED)
36
38class ScopedInterruptDisabler {
39 public:
40 ScopedInterruptDisabler() { core_util_critical_section_enter(); }
41 ~ScopedInterruptDisabler() { core_util_critical_section_exit(); }
42 ScopedInterruptDisabler(const ScopedInterruptDisabler &) = delete;
43 ScopedInterruptDisabler &
44 operator=(const ScopedInterruptDisabler &) = delete;
45 ScopedInterruptDisabler(ScopedInterruptDisabler &&) = delete;
46 ScopedInterruptDisabler &operator=(ScopedInterruptDisabler &&) = delete;
47};
48
49#elif defined(TEENSYDUINO)
50
51// Not necessary (assuming we only support the higher end boards)
52
53#else
54#error "Unknown platform, I don't know how to disable and restore interrupts"
55#endif
56
57// Some boards with a Cortex-M3, Cortex-M4 or Cortex-M7 have exclusive LD/STREX
58// instructions, so they can implement read-modify-write operations without
59// disabling interrupts, which turns out to be faster.
60
61#if defined(DARDUINO_ARCH_NRF52840) || defined(DARDUINO_ARCH_MBED_GIGA) || \
62 defined(TEENSYDUINO)
63#define CS_USE_ATOMIC_RMW 1
64#else
65#define CS_USE_ATOMIC_RMW 0
66#endif
67
68// #define CS_USE_REAL_ATOMIC
69#ifdef CS_USE_REAL_ATOMIC
70template <class T>
71using interrupt_atomic = std::atomic<T>;
72#else
73
80template <class T>
82 public:
83 interrupt_atomic() noexcept = default;
84 explicit interrupt_atomic(T t) noexcept : value {t} {}
85
86 [[gnu::always_inline]] static void
87 after_load_fence(std::memory_order o) noexcept {
88 switch (o) {
89 case std::memory_order_consume: // fallthrough
90 case std::memory_order_acq_rel: // fallthrough
91 case std::memory_order_acquire:
92 std::atomic_signal_fence(std::memory_order_acquire);
93 break;
94 case std::memory_order_seq_cst:
95 std::atomic_signal_fence(std::memory_order_seq_cst);
96 break;
97 case std::memory_order_relaxed: // fallthrough
98 case std::memory_order_release:
99 // no fence needed
100 break;
101 default:;
102 }
103 }
104
105 [[gnu::always_inline]] static void
106 before_store_fence(std::memory_order o) noexcept {
107 switch (o) {
108 case std::memory_order_consume: // fallthrough
109 case std::memory_order_acq_rel: // fallthrough
110 case std::memory_order_acquire:
111 std::atomic_signal_fence(std::memory_order_acquire);
112 break;
113 case std::memory_order_seq_cst:
114 std::atomic_signal_fence(std::memory_order_seq_cst);
115 break;
116 case std::memory_order_relaxed: // fallthrough
117 case std::memory_order_release:
118 // no fence needed
119 break;
120 default:;
121 }
122 }
123
124 [[gnu::always_inline]] T load(std::memory_order o) const {
125 if (o == std::memory_order_seq_cst)
126 std::atomic_signal_fence(std::memory_order_seq_cst);
127 auto t = value.load(std::memory_order_relaxed);
129 return t;
130 }
131
132 [[gnu::always_inline]] void store(T t, std::memory_order o) noexcept {
134 value.store(t, std::memory_order_relaxed);
135 if (o == std::memory_order_seq_cst)
136 std::atomic_signal_fence(std::memory_order_seq_cst);
137 }
138
139#if CS_USE_ATOMIC_RMW
140 [[gnu::always_inline]] T exchange(T arg, std::memory_order o) {
142 auto t = value.exchange(arg, std::memory_order_relaxed);
144 return t;
145 }
146
147 [[gnu::always_inline]] bool
148 compare_exchange_strong(T &expected, T desired,
149 std::memory_order o) noexcept {
151 bool success = value.compare_exchange_strong(expected, desired,
152 std::memory_order_relaxed);
154 return success;
155 }
156
157 [[gnu::always_inline]] bool
158 compare_exchange_weak(T &expected, T desired,
159 std::memory_order o) noexcept {
161 bool success = value.compare_exchange_weak(expected, desired,
162 std::memory_order_relaxed);
164 return success;
165 }
166
167 [[gnu::always_inline]] T fetch_add(T arg, std::memory_order o) {
169 auto t = value.fetch_add(arg, std::memory_order_relaxed);
171 return t;
172 }
173
174 [[gnu::always_inline]] T fetch_sub(T arg, std::memory_order o) {
176 auto t = value.fetch_sub(arg, std::memory_order_relaxed);
178 return t;
179 }
180#else
181 [[gnu::always_inline]] T exchange(T arg, std::memory_order o) {
182 ScopedInterruptDisabler disable_interrupts;
184 auto t = value.load(std::memory_order_relaxed);
185 value.store(arg, std::memory_order_relaxed);
187 return t;
188 }
189
190 [[gnu::always_inline]] bool
191 compare_exchange_strong(T &expected, T desired,
192 std::memory_order o) noexcept {
193 ScopedInterruptDisabler disable_interrupts;
194 if (o == std::memory_order_seq_cst)
195 std::atomic_signal_fence(std::memory_order_seq_cst);
196 auto t = value.load(std::memory_order_relaxed);
197 bool success = t == expected;
198 if (success) {
200 value.store(desired, std::memory_order_relaxed);
201 } else {
202 expected = t;
203 }
205 return success;
206 }
207
208 [[gnu::always_inline]] bool
209 compare_exchange_weak(T &expected, T desired,
210 std::memory_order o) noexcept {
211 return compare_exchange_strong(expected, desired, o);
212 }
213
214 [[gnu::always_inline]] T fetch_add(T arg, std::memory_order o) {
215 ScopedInterruptDisabler disable_interrupts;
216 auto t = load(o);
217 store(t + arg, o);
218 return t;
219 }
220
221 [[gnu::always_inline]] T fetch_sub(T arg, std::memory_order o) {
222 ScopedInterruptDisabler disable_interrupts;
223 auto t = load(o);
224 store(t - arg, o);
225 return t;
226 }
227#endif
228
229 private:
230 std::atomic<T> value;
231};
232
233#endif
234
#define END_CS_NAMESPACE
#define BEGIN_CS_NAMESPACE
Wrapper that provides atomic access to variables shared between the main program and interrupt handle...
Definition Atomic.hpp:81
std::atomic< T > value
Definition Atomic.hpp:230
T load(std::memory_order o) const
Definition Atomic.hpp:124
T fetch_add(T arg, std::memory_order o)
Definition Atomic.hpp:167
T exchange(T arg, std::memory_order o)
Definition Atomic.hpp:140
interrupt_atomic() noexcept=default
T fetch_sub(T arg, std::memory_order o)
Definition Atomic.hpp:174
bool compare_exchange_weak(T &expected, T desired, std::memory_order o) noexcept
Definition Atomic.hpp:158
static void after_load_fence(std::memory_order o) noexcept
Definition Atomic.hpp:87
bool compare_exchange_strong(T &expected, T desired, std::memory_order o) noexcept
Definition Atomic.hpp:148
static void before_store_fence(std::memory_order o) noexcept
Definition Atomic.hpp:106
void store(T t, std::memory_order o) noexcept
Definition Atomic.hpp:132
MIDIBLEState * state