18bf80f4bSopenharmony_ci/* 28bf80f4bSopenharmony_ci * Copyright (c) 2024 Huawei Device Co., Ltd. 38bf80f4bSopenharmony_ci * Licensed under the Apache License, Version 2.0 (the "License"); 48bf80f4bSopenharmony_ci * you may not use this file except in compliance with the License. 58bf80f4bSopenharmony_ci * You may obtain a copy of the License at 68bf80f4bSopenharmony_ci * 78bf80f4bSopenharmony_ci * http://www.apache.org/licenses/LICENSE-2.0 88bf80f4bSopenharmony_ci * 98bf80f4bSopenharmony_ci * Unless required by applicable law or agreed to in writing, software 108bf80f4bSopenharmony_ci * distributed under the License is distributed on an "AS IS" BASIS, 118bf80f4bSopenharmony_ci * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 128bf80f4bSopenharmony_ci * See the License for the specific language governing permissions and 138bf80f4bSopenharmony_ci * limitations under the License. 148bf80f4bSopenharmony_ci */ 158bf80f4bSopenharmony_ci#ifndef META_BASE_ATOMICS_H 168bf80f4bSopenharmony_ci#define META_BASE_ATOMICS_H 178bf80f4bSopenharmony_ci 188bf80f4bSopenharmony_ci#include <stdint.h> 198bf80f4bSopenharmony_ci 208bf80f4bSopenharmony_ci#include <core/namespace.h> 218bf80f4bSopenharmony_ci#if defined(_MSC_VER) && defined(WIN32) 228bf80f4bSopenharmony_ci#include <intrin.h> 238bf80f4bSopenharmony_ci#endif 248bf80f4bSopenharmony_ci 258bf80f4bSopenharmony_ciCORE_BEGIN_NAMESPACE(); 268bf80f4bSopenharmony_ci/* 278bf80f4bSopenharmony_ci * Implementation of InterlockedIncrement/InterlockedDecrement (int32_t atomics) 288bf80f4bSopenharmony_ci * Bare minimum to implement thread safe reference counters. 298bf80f4bSopenharmony_ci **/ 308bf80f4bSopenharmony_ci 318bf80f4bSopenharmony_ci#if defined(_MSC_VER) && defined(WIN32) 328bf80f4bSopenharmony_ci// On windows and visual studio, we just forward to the matching OS methods. 338bf80f4bSopenharmony_ciinline int32_t AtomicIncrement(volatile int32_t* a) 348bf80f4bSopenharmony_ci{ 358bf80f4bSopenharmony_ci return ::_InterlockedIncrement((long*)a); 368bf80f4bSopenharmony_ci} 378bf80f4bSopenharmony_ciinline int32_t AtomicDecrement(volatile int32_t* a) 388bf80f4bSopenharmony_ci{ 398bf80f4bSopenharmony_ci return ::_InterlockedDecrement((long*)a); 408bf80f4bSopenharmony_ci} 418bf80f4bSopenharmony_ciinline int32_t AtomicRead(const volatile int32_t* a) 428bf80f4bSopenharmony_ci{ 438bf80f4bSopenharmony_ci return ::_InterlockedExchangeAdd((long*)a, 0); 448bf80f4bSopenharmony_ci} 458bf80f4bSopenharmony_ciinline int32_t AtomicIncrementIfNotZero(volatile int32_t* a) 468bf80f4bSopenharmony_ci{ 478bf80f4bSopenharmony_ci int32_t v = AtomicRead(a); 488bf80f4bSopenharmony_ci while (v) { 498bf80f4bSopenharmony_ci int32_t temp = v; 508bf80f4bSopenharmony_ci v = ::_InterlockedCompareExchange((long*)a, v + 1, v); 518bf80f4bSopenharmony_ci if (v == temp) { 528bf80f4bSopenharmony_ci return temp; 538bf80f4bSopenharmony_ci } 548bf80f4bSopenharmony_ci } 558bf80f4bSopenharmony_ci return v; 568bf80f4bSopenharmony_ci} 578bf80f4bSopenharmony_ci 588bf80f4bSopenharmony_ci// Trivial spinlock implemented with atomics. 598bf80f4bSopenharmony_ci// NOTE: this does NOT yield while waiting, so use this ONLY in places where lock contention times are expected to be 608bf80f4bSopenharmony_ci// trivial. Also does not ensure fairness. but most likely enough for our reference counting purposes. 618bf80f4bSopenharmony_ci// and of course is non-recursive, so you can only lock once in a thread. 628bf80f4bSopenharmony_ciclass SpinLock { 638bf80f4bSopenharmony_cipublic: 648bf80f4bSopenharmony_ci void Lock() 658bf80f4bSopenharmony_ci { 668bf80f4bSopenharmony_ci while (_InterlockedCompareExchange(&lock_, taken_, free_) == taken_) { 678bf80f4bSopenharmony_ci } 688bf80f4bSopenharmony_ci } 698bf80f4bSopenharmony_ci void Unlock() 708bf80f4bSopenharmony_ci { 718bf80f4bSopenharmony_ci _InterlockedExchange(&lock_, free_); 728bf80f4bSopenharmony_ci } 738bf80f4bSopenharmony_ci 748bf80f4bSopenharmony_ciprivate: 758bf80f4bSopenharmony_ci long lock_ = 0; 768bf80f4bSopenharmony_ci static constexpr long taken_ = 1; 778bf80f4bSopenharmony_ci static constexpr long free_ = 0; 788bf80f4bSopenharmony_ci}; 798bf80f4bSopenharmony_ci#elif defined(__has_builtin) && __has_builtin(__atomic_add_fetch) && __has_builtin(__atomic_load_n) && \ 808bf80f4bSopenharmony_ci __has_builtin(__atomic_compare_exchange_n) 818bf80f4bSopenharmony_ci/* gcc built in atomics, supported on clang also */ 828bf80f4bSopenharmony_ciinline int32_t AtomicIncrement(volatile int32_t* a) 838bf80f4bSopenharmony_ci{ 848bf80f4bSopenharmony_ci return __atomic_add_fetch(a, 1, __ATOMIC_ACQ_REL); 858bf80f4bSopenharmony_ci} 868bf80f4bSopenharmony_ciinline int32_t AtomicDecrement(volatile int32_t* a) 878bf80f4bSopenharmony_ci{ 888bf80f4bSopenharmony_ci return __atomic_add_fetch(a, -1, __ATOMIC_ACQ_REL); 898bf80f4bSopenharmony_ci} 908bf80f4bSopenharmony_ciinline int32_t AtomicRead(const volatile int32_t* a) 918bf80f4bSopenharmony_ci{ 928bf80f4bSopenharmony_ci return __atomic_load_n(a, __ATOMIC_ACQUIRE); 938bf80f4bSopenharmony_ci} 948bf80f4bSopenharmony_ciinline int32_t AtomicIncrementIfNotZero(volatile int32_t* a) 958bf80f4bSopenharmony_ci{ 968bf80f4bSopenharmony_ci int32_t v = AtomicRead(a); 978bf80f4bSopenharmony_ci while (v) { 988bf80f4bSopenharmony_ci int32_t temp = v; 998bf80f4bSopenharmony_ci if (__atomic_compare_exchange_n(a, &v, temp + 1, false, __ATOMIC_RELAXED, __ATOMIC_RELAXED)) { 1008bf80f4bSopenharmony_ci return temp; 1018bf80f4bSopenharmony_ci } 1028bf80f4bSopenharmony_ci } 1038bf80f4bSopenharmony_ci return v; 1048bf80f4bSopenharmony_ci} 1058bf80f4bSopenharmony_ci// Trivial spinlock implemented with atomics. 1068bf80f4bSopenharmony_ci// NOTE: this does NOT yield while waiting, so use this ONLY in places where lock contention times are expected to be 1078bf80f4bSopenharmony_ci// trivial. Also does not ensure fairness. but most likely enough for our reference counting purposes. 1088bf80f4bSopenharmony_ci// and of course is non-recursive, so you can only lock once in a thread. 1098bf80f4bSopenharmony_ciclass SpinLock { 1108bf80f4bSopenharmony_cipublic: 1118bf80f4bSopenharmony_ci void Lock() 1128bf80f4bSopenharmony_ci { 1138bf80f4bSopenharmony_ci long taken = 1; 1148bf80f4bSopenharmony_ci long expect = 0; 1158bf80f4bSopenharmony_ci while (!__atomic_compare_exchange(&lock_, &expect, &taken, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) { 1168bf80f4bSopenharmony_ci expect = 0; 1178bf80f4bSopenharmony_ci } 1188bf80f4bSopenharmony_ci } 1198bf80f4bSopenharmony_ci void Unlock() 1208bf80f4bSopenharmony_ci { 1218bf80f4bSopenharmony_ci long free = 0; 1228bf80f4bSopenharmony_ci __atomic_store(&lock_, &free, __ATOMIC_SEQ_CST); 1238bf80f4bSopenharmony_ci } 1248bf80f4bSopenharmony_ci 1258bf80f4bSopenharmony_ciprivate: 1268bf80f4bSopenharmony_ci long lock_ = 0; 1278bf80f4bSopenharmony_ci}; 1288bf80f4bSopenharmony_ci 1298bf80f4bSopenharmony_ci#else 1308bf80f4bSopenharmony_ci#error Compiler / Platform specific atomic methods not implemented ! 1318bf80f4bSopenharmony_ci#endif 1328bf80f4bSopenharmony_ci 1338bf80f4bSopenharmony_ci/** 1348bf80f4bSopenharmony_ci * @brief Scoped helper to lock and unlock spin locks. 1358bf80f4bSopenharmony_ci */ 1368bf80f4bSopenharmony_ciclass ScopedSpinLock { 1378bf80f4bSopenharmony_cipublic: 1388bf80f4bSopenharmony_ci ScopedSpinLock(const ScopedSpinLock&) = delete; 1398bf80f4bSopenharmony_ci ScopedSpinLock& operator=(const ScopedSpinLock&) = delete; 1408bf80f4bSopenharmony_ci ScopedSpinLock(ScopedSpinLock&&) = delete; 1418bf80f4bSopenharmony_ci ScopedSpinLock& operator=(ScopedSpinLock&&) = delete; 1428bf80f4bSopenharmony_ci 1438bf80f4bSopenharmony_ci explicit ScopedSpinLock(SpinLock& l) : lock_(l) 1448bf80f4bSopenharmony_ci { 1458bf80f4bSopenharmony_ci lock_.Lock(); 1468bf80f4bSopenharmony_ci } 1478bf80f4bSopenharmony_ci ~ScopedSpinLock() 1488bf80f4bSopenharmony_ci { 1498bf80f4bSopenharmony_ci lock_.Unlock(); 1508bf80f4bSopenharmony_ci } 1518bf80f4bSopenharmony_ci 1528bf80f4bSopenharmony_ciprivate: 1538bf80f4bSopenharmony_ci SpinLock& lock_; 1548bf80f4bSopenharmony_ci}; 1558bf80f4bSopenharmony_ci 1568bf80f4bSopenharmony_ciCORE_END_NAMESPACE(); 1578bf80f4bSopenharmony_ci#endif 158