1/*
2 * Copyright (c) 2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 *     http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15#ifndef META_BASE_ATOMICS_H
16#define META_BASE_ATOMICS_H
17
18#include <stdint.h>
19
20#include <core/namespace.h>
21#if defined(_MSC_VER) && defined(WIN32)
22#include <intrin.h>
23#endif
24
25CORE_BEGIN_NAMESPACE();
26/*
27 * Implementation of InterlockedIncrement/InterlockedDecrement (int32_t atomics)
28 * Bare minimum to implement thread safe reference counters.
29 **/
30
31#if defined(_MSC_VER) && defined(WIN32)
32// On windows and visual studio, we just forward to the matching OS methods.
33inline int32_t AtomicIncrement(volatile int32_t* a)
34{
35    return ::_InterlockedIncrement((long*)a);
36}
37inline int32_t AtomicDecrement(volatile int32_t* a)
38{
39    return ::_InterlockedDecrement((long*)a);
40}
41inline int32_t AtomicRead(const volatile int32_t* a)
42{
43    return ::_InterlockedExchangeAdd((long*)a, 0);
44}
45inline int32_t AtomicIncrementIfNotZero(volatile int32_t* a)
46{
47    int32_t v = AtomicRead(a);
48    while (v) {
49        int32_t temp = v;
50        v = ::_InterlockedCompareExchange((long*)a, v + 1, v);
51        if (v == temp) {
52            return temp;
53        }
54    }
55    return v;
56}
57
58// Trivial spinlock implemented with atomics.
59// NOTE: this does NOT yield while waiting, so use this ONLY in places where lock contention times are expected to be
60// trivial. Also does not ensure fairness. but most likely enough for our reference counting purposes.
61// and of course is non-recursive, so you can only lock once in a thread.
62class SpinLock {
63public:
64    void Lock()
65    {
66        while (_InterlockedCompareExchange(&lock_, taken_, free_) == taken_) {
67        }
68    }
69    void Unlock()
70    {
71        _InterlockedExchange(&lock_, free_);
72    }
73
74private:
75    long lock_ = 0;
76    static constexpr long taken_ = 1;
77    static constexpr long free_ = 0;
78};
79#elif defined(__has_builtin) && __has_builtin(__atomic_add_fetch) && __has_builtin(__atomic_load_n) && \
80    __has_builtin(__atomic_compare_exchange_n)
81/* gcc built in atomics, supported on clang also */
82inline int32_t AtomicIncrement(volatile int32_t* a)
83{
84    return __atomic_add_fetch(a, 1, __ATOMIC_ACQ_REL);
85}
86inline int32_t AtomicDecrement(volatile int32_t* a)
87{
88    return __atomic_add_fetch(a, -1, __ATOMIC_ACQ_REL);
89}
90inline int32_t AtomicRead(const volatile int32_t* a)
91{
92    return __atomic_load_n(a, __ATOMIC_ACQUIRE);
93}
94inline int32_t AtomicIncrementIfNotZero(volatile int32_t* a)
95{
96    int32_t v = AtomicRead(a);
97    while (v) {
98        int32_t temp = v;
99        if (__atomic_compare_exchange_n(a, &v, temp + 1, false, __ATOMIC_RELAXED, __ATOMIC_RELAXED)) {
100            return temp;
101        }
102    }
103    return v;
104}
105// Trivial spinlock implemented with atomics.
106// NOTE: this does NOT yield while waiting, so use this ONLY in places where lock contention times are expected to be
107// trivial. Also does not ensure fairness. but most likely enough for our reference counting purposes.
108// and of course is non-recursive, so you can only lock once in a thread.
109class SpinLock {
110public:
111    void Lock()
112    {
113        long taken = 1;
114        long expect = 0;
115        while (!__atomic_compare_exchange(&lock_, &expect, &taken, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
116            expect = 0;
117        }
118    }
119    void Unlock()
120    {
121        long free = 0;
122        __atomic_store(&lock_, &free, __ATOMIC_SEQ_CST);
123    }
124
125private:
126    long lock_ = 0;
127};
128
129#else
130#error Compiler / Platform specific atomic methods not implemented !
131#endif
132
133/**
134 * @brief Scoped helper to lock and unlock spin locks.
135 */
136class ScopedSpinLock {
137public:
138    ScopedSpinLock(const ScopedSpinLock&) = delete;
139    ScopedSpinLock& operator=(const ScopedSpinLock&) = delete;
140    ScopedSpinLock(ScopedSpinLock&&) = delete;
141    ScopedSpinLock& operator=(ScopedSpinLock&&) = delete;
142
143    explicit ScopedSpinLock(SpinLock& l) : lock_(l)
144    {
145        lock_.Lock();
146    }
147    ~ScopedSpinLock()
148    {
149        lock_.Unlock();
150    }
151
152private:
153    SpinLock& lock_;
154};
155
156CORE_END_NAMESPACE();
157#endif
158