1 /**
2  * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "fmutex.h"
17 
18 #ifdef MC_ON
19 // NOLINTNEXTLINE(C_RULE_ID_DEFINE_MULTILINE)
20 #define FAIL_WITH_MESSAGE(m) \
21     ASSERT(0);               \
22     return
23 #define LOG_MESSAGE(l, m)
24 #define HELPERS_TO_UNSIGNED(m) m
25 #else
26 #include "utils/logger.h"
27 #include "utils/type_helpers.h"
28 #define LOG_MESSAGE(l, m) LOG(l, COMMON) << (m)        // NOLINT(cppcoreguidelines-macro-usage)
29 #define FAIL_WITH_MESSAGE(m) LOG_MESSAGE(FATAL, m)     // NOLINT(cppcoreguidelines-macro-usage)
30 #define HELPERS_TO_UNSIGNED(m) helpers::ToUnsigned(m)  // NOLINT(cppcoreguidelines-macro-usage)
31 namespace panda::os::unix::memory::futex {
32 #endif
33 
34 // This field is set to false in case of deadlock with daemon threads (only daemon threads
35 // are not finished and they have state IS_BLOCKED). In this case we should terminate
36 // those threads ignoring failures on lock structures destructors.
37 static ATOMIC(bool) g_deadlockFlag = false;
38 
39 #ifdef MC_ON
40 // GenMC does not support syscalls(futex)
41 static ATOMIC_INT g_futexSignal;
42 
FutexWait(ATOMIC_INT *m, int v)43 static inline void FutexWait(ATOMIC_INT *m, int v)
44 {
45     // Atomic with acquire order reason: mutex synchronization
46     int s = ATOMIC_LOAD(&g_futexSignal, memory_order_acquire);
47     // Atomic with acquire order reason: mutex synchronization
48     if (ATOMIC_LOAD(m, memory_order_acquire) != v) {
49         return;
50     }
51     // Atomic with acquire order reason: mutex synchronization
52     while (ATOMIC_LOAD(&g_futexSignal, memory_order_acquire) == s) {
53     }
54 }
55 
FutexWake()56 static inline void FutexWake()
57 {
58     // Atomic with release order reason: mutex synchronization
59     ATOMIC_FETCH_ADD(&g_futexSignal, 1, memory_order_release);
60 }
61 #else
62 // futex() is defined in header, as it is still included in different places
63 #endif
64 
MutexDoNotCheckOnDeadlock()65 bool MutexDoNotCheckOnDeadlock()
66 {
67     return g_deadlockFlag;
68 }
69 
MutexIgnoreChecksOnDeadlock()70 void MutexIgnoreChecksOnDeadlock()
71 {
72     g_deadlockFlag = true;
73 }
74 
GetStateAddr(struct fmutex *const m)75 int *GetStateAddr(struct fmutex *const m)
76 {
77     return reinterpret_cast<int *>(&m->state_and_waiters_);
78 }
79 
IncrementWaiters(struct fmutex *m)80 void IncrementWaiters(struct fmutex *m)
81 {
82     // Atomic with relaxed order reason: mutex synchronization
83     ATOMIC_FETCH_ADD(&m->state_and_waiters_, WAITER_INCREMENT, memory_order_relaxed);
84 }
DecrementWaiters(struct fmutex *m)85 void DecrementWaiters(struct fmutex *m)
86 {
87     // Atomic with relaxed order reason: mutex synchronization
88     ATOMIC_FETCH_SUB(&m->state_and_waiters_, WAITER_INCREMENT, memory_order_relaxed);
89 }
90 
GetWaiters(struct fmutex *const m)91 int32_t GetWaiters(struct fmutex *const m)
92 {
93     // Atomic with relaxed order reason: mutex synchronization
94     // NOLINTNEXTLINE(hicpp-signed-bitwise)
95     return static_cast<uint32_t>(ATOMIC_LOAD(&m->state_and_waiters_, memory_order_relaxed)) >> WAITER_SHIFT;
96 }
97 
IsHeld(struct fmutex *const m, THREAD_ID thread)98 bool IsHeld(struct fmutex *const m, THREAD_ID thread)
99 {
100     // Atomic with relaxed order reason: mutex synchronization
101     return ATOMIC_LOAD(&m->exclusive_owner_, memory_order_relaxed) == thread;
102 }
103 
104 // Spin for small arguments and yield for longer ones.
BackOff(uint32_t i)105 static void BackOff(uint32_t i)
106 {
107     static constexpr uint32_t SPIN_MAX = 10;
108     if (i <= SPIN_MAX) {
109         volatile uint32_t x = 0;  // Volatile to make sure loop is not optimized out.
110         const uint32_t spinCount = 10 * i;
111         for (uint32_t spin = 0; spin < spinCount; spin++) {
112             ++x;
113         }
114     } else {
115 #ifdef MC_ON
116 #else
117         thread::ThreadYield();
118 #endif
119     }
120 }
121 
122 // Wait until pred is true, or until timeout is reached.
123 // Return true if the predicate test succeeded, false if we timed out.
WaitBrieflyFor(ATOMIC_INT *addr)124 static inline bool WaitBrieflyFor(ATOMIC_INT *addr)
125 {
126     // We probably don't want to do syscall (switch context) when we use WaitBrieflyFor
127     static constexpr uint32_t MAX_BACK_OFF = 10;
128     static constexpr uint32_t maxIter = 50;
129     for (uint32_t i = 1; i <= maxIter; i++) {
130         BackOff(MIN(i, MAX_BACK_OFF));
131         // Atomic with relaxed order reason: mutex synchronization
132         int state = ATOMIC_LOAD(addr, memory_order_relaxed);
133         if ((HELPERS_TO_UNSIGNED(state) & HELPERS_TO_UNSIGNED(HELD_MASK)) == 0) {
134             return true;
135         }
136     }
137     return false;
138 }
139 
MutexInit(struct fmutex *m)140 void MutexInit(struct fmutex *m)
141 {
142     // Atomic with relaxed order reason: mutex synchronization
143     ATOMIC_STORE(&m->exclusive_owner_, 0, memory_order_relaxed);
144     m->recursiveCount = 0;
145     m->recursive_mutex_ = false;
146     // Atomic with release order reason: mutex synchronization
147     ATOMIC_STORE(&m->state_and_waiters_, 0, memory_order_release);
148 }
149 
MutexDestroy(struct fmutex *m)150 void MutexDestroy(struct fmutex *m)
151 {
152 #ifndef PANDA_TARGET_MOBILE
153     // We can ignore these checks on devices because we use zygote and do not destroy runtime.
154     if (!MutexDoNotCheckOnDeadlock()) {
155 #endif  // PANDA_TARGET_MOBILE
156         // Atomic with relaxed order reason: mutex synchronization
157         if (ATOMIC_LOAD(&m->state_and_waiters_, memory_order_relaxed) != 0) {
158             FAIL_WITH_MESSAGE("Mutex destruction failed; state_and_waiters_ is non zero!");
159             // Atomic with relaxed order reason: mutex synchronization
160         } else if (ATOMIC_LOAD(&m->exclusive_owner_, memory_order_relaxed) != 0) {
161             FAIL_WITH_MESSAGE("Mutex destruction failed; mutex has an owner!");
162         }
163 #ifndef PANDA_TARGET_MOBILE
164     } else {
165         LOG_MESSAGE(WARNING, "Deadlock detected, ignoring Mutex");
166     }
167 #endif  // PANDA_TARGET_MOBILE
168 }
169 
MutexLock(struct fmutex *m, bool trylock)170 bool MutexLock(struct fmutex *m, bool trylock)
171 {
172     if (current_tid == 0) {
173         current_tid = GET_CURRENT_THREAD;
174     }
175     if (m->recursive_mutex_) {
176         if (IsHeld(m, current_tid)) {
177             m->recursiveCount++;
178             return true;
179         }
180     }
181 
182     ASSERT(!IsHeld(m, current_tid));
183     bool done = false;
184     while (!done) {
185         // Atomic with relaxed order reason: mutex synchronization
186         auto curState = ATOMIC_LOAD(&m->state_and_waiters_, memory_order_relaxed);
187         if (LIKELY((HELPERS_TO_UNSIGNED(curState) & HELPERS_TO_UNSIGNED(HELD_MASK)) == 0)) {
188             // Lock not held, try acquiring it.
189             auto newState = HELPERS_TO_UNSIGNED(curState) | HELPERS_TO_UNSIGNED(HELD_MASK);
190             done =
191                 ATOMIC_CAS_WEAK(&m->state_and_waiters_, curState, newState, memory_order_acquire, memory_order_relaxed);
192         } else {
193             if (trylock) {
194                 return false;
195             }
196             // Failed to acquire, wait for unlock
197             // NOLINTNEXTLINE(hicpp-signed-bitwise)
198             if (!WaitBrieflyFor(&m->state_and_waiters_)) {
199                 // WaitBrieflyFor failed, go to futex wait
200                 // Increment waiters count.
201                 IncrementWaiters(m);
202                 // Update curState to be equal to current expected state_and_waiters_.
203                 curState += WAITER_INCREMENT;
204                 // Retry wait until lock not held. In heavy contention situations curState check can fail
205                 // immediately due to repeatedly decrementing and incrementing waiters.
206                 // NOLINTNEXTLINE(C_RULE_ID_FUNCTION_NESTING_LEVEL)
207                 while ((HELPERS_TO_UNSIGNED(curState) & HELPERS_TO_UNSIGNED(HELD_MASK)) != 0) {
208 #ifdef MC_ON
209                     FutexWait(&m->state_and_waiters_, curState);
210 #else
211                     // NOLINTNEXTLINE(hicpp-signed-bitwise), NOLINTNEXTLINE(C_RULE_ID_FUNCTION_NESTING_LEVEL)
212                     if (futex(GetStateAddr(m), FUTEX_WAIT_PRIVATE, curState, nullptr, nullptr, 0) != 0) {
213                         // NOLINTNEXTLINE(C_RULE_ID_FUNCTION_NESTING_LEVEL)
214                         if ((errno != EAGAIN) && (errno != EINTR)) {
215                             LOG(FATAL, COMMON) << "Futex wait failed!";
216                         }
217                     }
218 #endif
219                     // Atomic with relaxed order reason: mutex synchronization
220                     curState = ATOMIC_LOAD(&m->state_and_waiters_, memory_order_relaxed);
221                 }
222                 DecrementWaiters(m);
223             }
224         }
225     }
226     // Mutex is held now
227     // Atomic with relaxed order reason: mutex synchronization
228     ASSERT((HELPERS_TO_UNSIGNED(ATOMIC_LOAD(&m->state_and_waiters_, memory_order_relaxed)) &
229             HELPERS_TO_UNSIGNED(HELD_MASK)) != 0);
230     // Atomic with relaxed order reason: mutex synchronization
231     ASSERT(ATOMIC_LOAD(&m->exclusive_owner_, memory_order_relaxed) == 0);
232     // Atomic with relaxed order reason: mutex synchronization
233     ATOMIC_STORE(&m->exclusive_owner_, current_tid, memory_order_relaxed);
234     m->recursiveCount++;
235     ASSERT(m->recursiveCount == 1);  // should be 1 here, there's a separate path for recursive mutex above
236     return true;
237 }
238 
MutexTryLockWithSpinning(struct fmutex *m)239 bool MutexTryLockWithSpinning(struct fmutex *m)
240 {
241     const int maxIter = 10;
242     for (int i = 0; i < maxIter; i++) {
243         if (MutexLock(m, true)) {
244             return true;
245         }
246         // NOLINTNEXTLINE(hicpp-signed-bitwise)
247         if (!WaitBrieflyFor(&m->state_and_waiters_)) {
248             // WaitBrieflyFor failed, means lock is held
249             return false;
250         }
251     }
252     return false;
253 }
254 
MutexUnlock(struct fmutex *m)255 void MutexUnlock(struct fmutex *m)
256 {
257     if (current_tid == 0) {
258         current_tid = GET_CURRENT_THREAD;
259     }
260     if (!IsHeld(m, current_tid)) {
261         FAIL_WITH_MESSAGE("Trying to unlock mutex which is not held by current thread");
262     }
263     m->recursiveCount--;
264     if (m->recursive_mutex_) {
265         if (m->recursiveCount > 0) {
266             return;
267         }
268     }
269 
270     ASSERT(m->recursiveCount == 0);  // should be 0 here, there's a separate path for recursive mutex above
271     bool done = false;
272     // Atomic with relaxed order reason: mutex synchronization
273     auto curState = ATOMIC_LOAD(&m->state_and_waiters_, memory_order_relaxed);
274     // Retry CAS until succeess
275     while (!done) {
276         auto newState = HELPERS_TO_UNSIGNED(curState) & ~HELPERS_TO_UNSIGNED(HELD_MASK);  // State without holding bit
277         if ((HELPERS_TO_UNSIGNED(curState) & HELPERS_TO_UNSIGNED(HELD_MASK)) == 0) {
278             FAIL_WITH_MESSAGE("Mutex unlock got unexpected state, mutex is unlocked?");
279         }
280         // Reset exclusive owner before changing state to avoid check failures if other thread sees UNLOCKED
281         // Atomic with relaxed order reason: mutex synchronization
282         ATOMIC_STORE(&m->exclusive_owner_, 0, memory_order_relaxed);
283         // curState should be updated with fetched value on fail
284         done = ATOMIC_CAS_WEAK(&m->state_and_waiters_, curState, newState, memory_order_release, memory_order_relaxed);
285         if (LIKELY(done)) {
286             // If we had waiters, we need to do futex call
287             if (UNLIKELY(newState != 0)) {
288 #ifdef MC_ON
289                 FutexWake();
290 #else
291                 // NOLINTNEXTLINE(hicpp-signed-bitwise)
292                 futex(GetStateAddr(m), FUTEX_WAKE_PRIVATE, WAKE_ONE, nullptr, nullptr, 0);
293 #endif
294             }
295         }
296     }
297 }
298 
MutexLockForOther(struct fmutex *m, THREAD_ID thread)299 void MutexLockForOther(struct fmutex *m, THREAD_ID thread)
300 {
301     // Atomic with relaxed order reason: mutex synchronization
302     ASSERT(ATOMIC_LOAD(&m->state_and_waiters_, memory_order_relaxed) == 0);
303     // Atomic with relaxed order reason: mutex synchronization
304     ATOMIC_STORE(&m->state_and_waiters_, HELD_MASK, memory_order_relaxed);
305     m->recursiveCount = 1;
306     // Atomic with relaxed order reason: mutex synchronization
307     ATOMIC_STORE(&m->exclusive_owner_, thread, memory_order_relaxed);
308 }
309 
MutexUnlockForOther(struct fmutex *m, THREAD_ID thread)310 void MutexUnlockForOther(struct fmutex *m, THREAD_ID thread)
311 {
312     if (!IsHeld(m, thread)) {
313         FAIL_WITH_MESSAGE("Unlocking for thread which doesn't own this mutex");
314     }
315     // Atomic with relaxed order reason: mutex synchronization
316     ASSERT(ATOMIC_LOAD(&m->state_and_waiters_, memory_order_relaxed) == HELD_MASK);
317     // Atomic with relaxed order reason: mutex synchronization
318     ATOMIC_STORE(&m->state_and_waiters_, 0, memory_order_relaxed);
319     m->recursiveCount = 0;
320     // Atomic with relaxed order reason: mutex synchronization
321     ATOMIC_STORE(&m->exclusive_owner_, 0, memory_order_relaxed);
322 }
323 
324 #ifdef MC_ON
325 #else
326 }  // namespace panda::os::unix::memory::futex
327 #endif
328