1 /**
2  * Copyright (c) 2021-2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  * http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "mutex.h"
17 #include "fmutex.h"
18 #include "utils/logger.h"
19 #include "utils/type_helpers.h"
20 
21 #include <cstring>
22 #include <cerrno>
23 #include <ctime>
24 
25 #include <sched.h>
26 
27 namespace panda::os::unix::memory::futex {
28 
29 // Avoid repeatedly calling GetCurrentThreadId by storing tid locally
30 thread_local thread::ThreadId current_tid {0};
31 
PostFork()32 void PostFork()
33 {
34     current_tid = os::thread::GetCurrentThreadId();
35 }
36 
37 // Spin for small arguments and yield for longer ones.
BackOff(uint32_t i)38 static void BackOff(uint32_t i)
39 {
40     static constexpr uint32_t SPIN_MAX = 10;
41     if (i <= SPIN_MAX) {
42         volatile uint32_t x = 0;  // Volatile to make sure loop is not optimized out.
43         const uint32_t spin_count = 10 * i;
44         for (uint32_t spin = 0; spin < spin_count; spin++) {
45             ++x;
46         }
47     } else {
48         thread::ThreadYield();
49     }
50 }
51 
52 // Wait until pred is true, or until timeout is reached.
53 // Return true if the predicate test succeeded, false if we timed out.
54 template <typename Pred>
WaitBrieflyFor(std::atomic_int *addr, Pred pred)55 static inline bool WaitBrieflyFor(std::atomic_int *addr, Pred pred)
56 {
57     // We probably don't want to do syscall (switch context) when we use WaitBrieflyFor
58     static constexpr uint32_t MAX_BACK_OFF = 10;
59     static constexpr uint32_t MAX_ITER = 50;
60     for (uint32_t i = 1; i <= MAX_ITER; i++) {
61         BackOff(std::min(i, MAX_BACK_OFF));
62         // Atomic with relaxed order reason: mutex synchronization
63         if (pred(addr->load(std::memory_order_relaxed))) {
64             return true;
65         }
66     }
67     return false;
68 }
69 
70 // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init)
Mutex()71 Mutex::Mutex()
72 {
73     MutexInit(&mutex_);
74 }
75 
~Mutex()76 Mutex::~Mutex()
77 {
78     MutexDestroy(&mutex_);
79 }
80 
Lock()81 void Mutex::Lock()
82 {
83     MutexLock(&mutex_, false);
84 }
85 
TryLock()86 bool Mutex::TryLock()
87 {
88     return MutexLock(&mutex_, true);
89 }
90 
TryLockWithSpinning()91 bool Mutex::TryLockWithSpinning()
92 {
93     return MutexTryLockWithSpinning(&mutex_);
94 }
95 
Unlock()96 void Mutex::Unlock()
97 {
98     MutexUnlock(&mutex_);
99 }
100 
LockForOther(thread::ThreadId thread)101 void Mutex::LockForOther(thread::ThreadId thread)
102 {
103     MutexLockForOther(&mutex_, thread);
104 }
105 
UnlockForOther(thread::ThreadId thread)106 void Mutex::UnlockForOther(thread::ThreadId thread)
107 {
108     MutexUnlockForOther(&mutex_, thread);
109 }
110 
~RWLock()111 RWLock::~RWLock()
112 {
113 #ifndef PANDA_TARGET_MOBILE
114     if (!Mutex::DoNotCheckOnDeadlock()) {
115 #endif  // PANDA_TARGET_MOBILE
116         // Atomic with relaxed order reason: mutex synchronization
117         if (state_.load(std::memory_order_relaxed) != 0) {
118             LOG(FATAL, COMMON) << "RWLock destruction failed; state_ is non zero!";
119             // Atomic with relaxed order reason: mutex synchronization
120         } else if (exclusive_owner_.load(std::memory_order_relaxed) != 0) {
121             LOG(FATAL, COMMON) << "RWLock destruction failed; RWLock has an owner!";
122             // Atomic with relaxed order reason: mutex synchronization
123         } else if (waiters_.load(std::memory_order_relaxed) != 0) {
124             LOG(FATAL, COMMON) << "RWLock destruction failed; RWLock has waiters!";
125         }
126 #ifndef PANDA_TARGET_MOBILE
127     } else {
128         LOG(WARNING, COMMON) << "Deadlock detected, ignoring RWLock";
129     }
130 #endif  // PANDA_TARGET_MOBILE
131 }
132 
WriteLock()133 void RWLock::WriteLock()
134 {
135     if (current_tid == 0) {
136         current_tid = os::thread::GetCurrentThreadId();
137     }
138     bool done = false;
139     while (!done) {
140         // Atomic with relaxed order reason: mutex synchronization
141         auto cur_state = state_.load(std::memory_order_relaxed);
142         if (LIKELY(cur_state == UNLOCKED)) {
143             // Unlocked, can acquire writelock
144             // Do CAS in case other thread beats us and acquires readlock first
145             done = state_.compare_exchange_weak(cur_state, WRITE_LOCKED, std::memory_order_acquire);
146         } else {
147             // Wait until RWLock is unlocked
148             if (!WaitBrieflyFor(&state_, [](int32_t state) { return state == UNLOCKED; })) {
149                 // WaitBrieflyFor failed, go to futex wait
150                 // Increment waiters count.
151                 IncrementWaiters();
152                 // Retry wait until lock not held. If we have more than one reader, cur_state check fail
153                 // doesn't mean this lock is unlocked.
154                 while (cur_state != UNLOCKED) {
155                     // NOLINTNEXTLINE(hicpp-signed-bitwise)
156                     if (futex(GetStateAddr(), FUTEX_WAIT_PRIVATE, cur_state, nullptr, nullptr, 0) != 0) {
157                         if ((errno != EAGAIN) && (errno != EINTR)) {
158                             LOG(FATAL, COMMON) << "Futex wait failed!";
159                         }
160                     }
161                     // Atomic with relaxed order reason: mutex synchronization
162                     cur_state = state_.load(std::memory_order_relaxed);
163                 }
164                 DecrementWaiters();
165             }
166         }
167     }
168     // RWLock is held now
169     // Atomic with relaxed order reason: mutex synchronization
170     ASSERT(state_.load(std::memory_order_relaxed) == WRITE_LOCKED);
171     // Atomic with relaxed order reason: mutex synchronization
172     ASSERT(exclusive_owner_.load(std::memory_order_relaxed) == 0);
173     // Atomic with relaxed order reason: mutex synchronization
174     exclusive_owner_.store(current_tid, std::memory_order_relaxed);
175 }
176 
HandleReadLockWait(int32_t cur_state)177 void RWLock::HandleReadLockWait(int32_t cur_state)
178 {
179     // Wait until RWLock WriteLock is unlocked
180     if (!WaitBrieflyFor(&state_, [](int32_t state) { return state >= UNLOCKED; })) {
181         // WaitBrieflyFor failed, go to futex wait
182         IncrementWaiters();
183         // Retry wait until WriteLock not held.
184         while (cur_state == WRITE_LOCKED) {
185             // NOLINTNEXTLINE(hicpp-signed-bitwise)
186             if (futex(GetStateAddr(), FUTEX_WAIT_PRIVATE, cur_state, nullptr, nullptr, 0) != 0) {
187                 if ((errno != EAGAIN) && (errno != EINTR)) {
188                     LOG(FATAL, COMMON) << "Futex wait failed!";
189                 }
190             }
191             // Atomic with relaxed order reason: mutex synchronization
192             cur_state = state_.load(std::memory_order_relaxed);
193         }
194         DecrementWaiters();
195     }
196 }
197 
TryReadLock()198 bool RWLock::TryReadLock()
199 {
200     bool done = false;
201     // Atomic with relaxed order reason: mutex synchronization
202     auto cur_state = state_.load(std::memory_order_relaxed);
203     while (!done) {
204         if (cur_state >= UNLOCKED) {
205             auto new_state = cur_state + READ_INCREMENT;
206             // cur_state should be updated with fetched value on fail
207             done = state_.compare_exchange_weak(cur_state, new_state, std::memory_order_acquire);
208         } else {
209             // RWLock is Write held, trylock failed.
210             return false;
211         }
212     }
213     ASSERT(!HasExclusiveHolder());
214     return true;
215 }
216 
TryWriteLock()217 bool RWLock::TryWriteLock()
218 {
219     if (current_tid == 0) {
220         current_tid = os::thread::GetCurrentThreadId();
221     }
222     bool done = false;
223     // Atomic with relaxed order reason: mutex synchronization
224     auto cur_state = state_.load(std::memory_order_relaxed);
225     while (!done) {
226         if (LIKELY(cur_state == UNLOCKED)) {
227             // Unlocked, can acquire writelock
228             // Do CAS in case other thread beats us and acquires readlock first
229             // cur_state should be updated with fetched value on fail
230             done = state_.compare_exchange_weak(cur_state, WRITE_LOCKED, std::memory_order_acquire);
231         } else {
232             // RWLock is held, trylock failed.
233             return false;
234         }
235     }
236     // RWLock is held now
237     // Atomic with relaxed order reason: mutex synchronization
238     ASSERT(state_.load(std::memory_order_relaxed) == WRITE_LOCKED);
239     // Atomic with relaxed order reason: mutex synchronization
240     ASSERT(exclusive_owner_.load(std::memory_order_relaxed) == 0);
241     // Atomic with relaxed order reason: mutex synchronization
242     exclusive_owner_.store(current_tid, std::memory_order_relaxed);
243     return true;
244 }
245 
WriteUnlock()246 void RWLock::WriteUnlock()
247 {
248     if (current_tid == 0) {
249         current_tid = os::thread::GetCurrentThreadId();
250     }
251     ASSERT(IsExclusiveHeld(current_tid));
252 
253     bool done = false;
254     // Atomic with relaxed order reason: mutex synchronization
255     int32_t cur_state = state_.load(std::memory_order_relaxed);
256     // CAS is weak and might fail, do in loop
257     while (!done) {
258         if (LIKELY(cur_state == WRITE_LOCKED)) {
259             // Reset exclusive owner before changing state to avoid check failures if other thread sees UNLOCKED
260             // Atomic with relaxed order reason: mutex synchronization
261             exclusive_owner_.store(0, std::memory_order_relaxed);
262             // Change state to unlocked and do release store.
263             // waiters_ load should not be reordered before state_, so it's done with seq cst.
264             // cur_state should be updated with fetched value on fail
265             done = state_.compare_exchange_weak(cur_state, UNLOCKED, std::memory_order_seq_cst);
266             if (LIKELY(done)) {
267                 // We are doing write unlock, all waiters could be ReadLocks so we need to wake all.
268                 // Atomic with seq_cst order reason: mutex synchronization
269                 if (waiters_.load(std::memory_order_seq_cst) > 0) {
270                     // NOLINTNEXTLINE(hicpp-signed-bitwise)
271                     futex(GetStateAddr(), FUTEX_WAKE_PRIVATE, WAKE_ALL, nullptr, nullptr, 0);
272                 }
273             }
274         } else {
275             LOG(FATAL, COMMON) << "RWLock WriteUnlock got unexpected state, RWLock is not writelocked?";
276         }
277     }
278 }
279 
~ConditionVariable()280 ConditionVariable::~ConditionVariable()
281 {
282 #ifndef PANDA_TARGET_MOBILE
283     if (!Mutex::DoNotCheckOnDeadlock()) {
284 #endif  // PANDA_TARGET_MOBILE
285         // Atomic with relaxed order reason: mutex synchronization
286         if (waiters_.load(std::memory_order_relaxed) != 0) {
287             LOG(FATAL, COMMON) << "CondVar destruction failed; waiters_ is non zero!";
288         }
289 #ifndef PANDA_TARGET_MOBILE
290     } else {
291         LOG(WARNING, COMMON) << "Deadlock detected, ignoring CondVar";
292     }
293 #endif  // PANDA_TARGET_MOBILE
294 }
295 
Wait(Mutex *mutex)296 void ConditionVariable::Wait(Mutex *mutex)
297 {
298     if (current_tid == 0) {
299         current_tid = os::thread::GetCurrentThreadId();
300     }
301     if (!mutex->IsHeld(current_tid)) {
302         LOG(FATAL, COMMON) << "CondVar Wait failed; provided mutex is not held by current thread";
303     }
304 
305     // It's undefined behavior to call Wait with different mutexes on the same condvar
306     Mutex *old_mutex = nullptr;
307     // Atomic with relaxed order reason: mutex synchronization
308     while (!mutex_ptr_.compare_exchange_weak(old_mutex, mutex, std::memory_order_relaxed)) {
309         // CAS failed, either it was spurious fail and old val is nullptr, or make sure mutex ptr equals to current
310         if (old_mutex != mutex && old_mutex != nullptr) {
311             LOG(FATAL, COMMON) << "CondVar Wait failed; mutex_ptr_ doesn't equal to provided mutex";
312         }
313     }
314 
315     // Atomic with relaxed order reason: mutex synchronization
316     waiters_.fetch_add(1, std::memory_order_relaxed);
317     mutex->IncrementWaiters();
318     auto old_count = mutex->GetRecursiveCount();
319     mutex->SetRecursiveCount(1);
320     // Atomic with relaxed order reason: mutex synchronization
321     auto cur_cond = cond_.load(std::memory_order_relaxed);
322     mutex->Unlock();
323     // NOLINTNEXTLINE(hicpp-signed-bitwise)
324     if (futex(GetCondAddr(), FUTEX_WAIT_PRIVATE, cur_cond, nullptr, nullptr, 0) != 0) {
325         if ((errno != EAGAIN) && (errno != EINTR)) {
326             LOG(FATAL, COMMON) << "Futex wait failed!";
327         }
328     }
329     mutex->Lock();
330     mutex->SetRecursiveCount(old_count);
331     mutex->DecrementWaiters();
332     // Atomic with relaxed order reason: mutex synchronization
333     waiters_.fetch_sub(1, std::memory_order_relaxed);
334 }
335 
ConvertTime(uint64_t ms, uint64_t ns)336 struct timespec ConvertTime(uint64_t ms, uint64_t ns)
337 {
338     struct timespec time = {0, 0};
339     const int64_t MILLISECONDS_PER_SEC = 1000;
340     const int64_t NANOSECONDS_PER_MILLISEC = 1000000;
341     const int64_t NANOSECONDS_PER_SEC = 1000000000;
342     auto seconds = static_cast<time_t>(ms / MILLISECONDS_PER_SEC);
343     auto nanoseconds = static_cast<time_t>((ms % MILLISECONDS_PER_SEC) * NANOSECONDS_PER_MILLISEC + ns);
344     time.tv_sec += seconds;
345     time.tv_nsec += nanoseconds;
346     if (time.tv_nsec >= NANOSECONDS_PER_SEC) {
347         time.tv_nsec -= NANOSECONDS_PER_SEC;
348         time.tv_sec++;
349     }
350     return time;
351 }
352 
TimedWait(Mutex *mutex, uint64_t ms, uint64_t ns, bool is_absolute)353 bool ConditionVariable::TimedWait(Mutex *mutex, uint64_t ms, uint64_t ns, bool is_absolute)
354 {
355     if (current_tid == 0) {
356         current_tid = os::thread::GetCurrentThreadId();
357     }
358     if (!mutex->IsHeld(current_tid)) {
359         LOG(FATAL, COMMON) << "CondVar Wait failed; provided mutex is not held by current thread";
360     }
361 
362     // It's undefined behavior to call Wait with different mutexes on the same condvar
363     Mutex *old_mutex = nullptr;
364     // Atomic with relaxed order reason: mutex synchronization
365     while (!mutex_ptr_.compare_exchange_weak(old_mutex, mutex, std::memory_order_relaxed)) {
366         // CAS failed, either it was spurious fail and old val is nullptr, or make sure mutex ptr equals to current
367         if (old_mutex != mutex && old_mutex != nullptr) {
368             LOG(FATAL, COMMON) << "CondVar Wait failed; mutex_ptr_ doesn't equal to provided mutex";
369         }
370     }
371 
372     bool timeout = false;
373     struct timespec time = ConvertTime(ms, ns);
374     // Atomic with relaxed order reason: mutex synchronization
375     waiters_.fetch_add(1, std::memory_order_relaxed);
376     mutex->IncrementWaiters();
377     auto old_count = mutex->GetRecursiveCount();
378     mutex->SetRecursiveCount(1);
379     // Atomic with relaxed order reason: mutex synchronization
380     auto cur_cond = cond_.load(std::memory_order_relaxed);
381     mutex->Unlock();
382     int futex_call_res = 0;
383     if (is_absolute) {
384         // FUTEX_WAIT_BITSET uses absolute time
385         // NOLINTNEXTLINE(hicpp-signed-bitwise)
386         static constexpr int WAIT_BITSET = FUTEX_WAIT_BITSET_PRIVATE;
387         // NOLINTNEXTLINE(hicpp-signed-bitwise)
388         static constexpr int MATCH_ANY = FUTEX_BITSET_MATCH_ANY;
389         futex_call_res = futex(GetCondAddr(), WAIT_BITSET, cur_cond, &time, nullptr, MATCH_ANY);
390     } else {
391         // FUTEX_WAIT uses relative time
392         // NOLINTNEXTLINE(hicpp-signed-bitwise)
393         futex_call_res = futex(GetCondAddr(), FUTEX_WAIT_PRIVATE, cur_cond, &time, nullptr, 0);
394     }
395     if (futex_call_res != 0) {
396         if (errno == ETIMEDOUT) {
397             timeout = true;
398         } else if ((errno != EAGAIN) && (errno != EINTR)) {
399             LOG(FATAL, COMMON) << "Futex wait failed!";
400         }
401     }
402     mutex->Lock();
403     mutex->SetRecursiveCount(old_count);
404     mutex->DecrementWaiters();
405     // Atomic with relaxed order reason: mutex synchronization
406     waiters_.fetch_sub(1, std::memory_order_relaxed);
407     return timeout;
408 }
409 
SignalCount(int32_t to_wake)410 void ConditionVariable::SignalCount(int32_t to_wake)
411 {
412     // Atomic with relaxed order reason: mutex synchronization
413     if (waiters_.load(std::memory_order_relaxed) == 0) {
414         // No waiters, do nothing
415         return;
416     }
417 
418     if (current_tid == 0) {
419         current_tid = os::thread::GetCurrentThreadId();
420     }
421     // Atomic with relaxed order reason: mutex synchronization
422     auto mutex = mutex_ptr_.load(std::memory_order_relaxed);
423     // If this condvar has waiters, mutex_ptr_ should be set
424     ASSERT(mutex != nullptr);
425     // Atomic with relaxed order reason: mutex synchronization
426     cond_.fetch_add(1, std::memory_order_relaxed);
427     if (mutex->IsHeld(current_tid)) {
428         // This thread is owner of current mutex, do requeue to mutex waitqueue
429         // NOLINTNEXTLINE(hicpp-signed-bitwise)
430         bool success = futex(GetCondAddr(), FUTEX_REQUEUE_PRIVATE, 0, reinterpret_cast<const timespec *>(to_wake),
431                              mutex->GetStateAddr(), 0) != -1;
432         if (!success) {
433             LOG(FATAL, COMMON) << "Futex requeue failed!";
434         }
435     } else {
436         // Mutex is not held by this thread, do wake
437         // NOLINTNEXTLINE(hicpp-signed-bitwise)
438         futex(GetCondAddr(), FUTEX_WAKE_PRIVATE, to_wake, nullptr, nullptr, 0);
439     }
440 }
441 
442 }  // namespace panda::os::unix::memory::futex
443