1/*
2 * Copyright (c) 2024 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 *     http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16#include <uv.h>
17#include "async_lock.h"
18#include "async_lock_manager.h"
19#include "helper/error_helper.h"
20#include "helper/napi_helper.h"
21#include "helper/object_helper.h"
22#include "tools/log.h"
23
24namespace Commonlibrary::Concurrent::LocksModule {
25using namespace Commonlibrary::Concurrent::Common::Helper;
26
27AsyncLock::AsyncLock(const std::string &lockName)
28{
29    lockName_ = lockName;
30    anonymousLockId_ = 0;
31}
32
33AsyncLock::AsyncLock(uint32_t lockId)
34{
35    lockName_ = "";
36    anonymousLockId_ = lockId;
37}
38
39napi_value AsyncLock::LockAsync(napi_env env, napi_ref cb, LockMode mode, const LockOptions &options)
40{
41    napi_value promise;
42    napi_deferred deferred;
43    napi_create_promise(env, &deferred, &promise);
44    LockRequest *lockRequest =
45        new LockRequest(this, AsyncLockManager::GetCurrentTid(env), env, cb, mode, options, deferred);
46    std::unique_lock<std::mutex> lock(asyncLockMutex_);
47    if (!CanAcquireLock(lockRequest) && options.isAvailable) {
48        napi_value err;
49        NAPI_CALL(env, napi_create_string_utf8(env, "The lock is acquired", NAPI_AUTO_LENGTH, &err));
50        napi_reject_deferred(env, deferred, err);
51    } else {
52        lockRequest->OnQueued(options.timeoutMillis);
53        pendingList_.push_back(lockRequest);
54        ProcessPendingLockRequestUnsafe(env, lockRequest);
55    }
56    return promise;
57}
58
59void AsyncLock::CleanUpLockRequestOnCompletion(LockRequest* lockRequest)
60{
61    std::unique_lock<std::mutex> lock(asyncLockMutex_);
62    auto it = std::find(heldList_.begin(), heldList_.end(), lockRequest);
63    if (it == heldList_.end()) {
64        HILOG_FATAL("Lock is not found");
65        return;
66    }
67    heldList_.erase(it);
68    if (heldList_.empty()) {
69        // There are may be other shared lock requests in the heldList_.
70        // IF so, we mustn't change the status.
71        lockStatus_ = LOCK_MODE_UNLOCK;
72    }
73    napi_env env = lockRequest->GetEnv();
74    delete lockRequest;
75    ProcessPendingLockRequestUnsafe(env);
76}
77
78bool AsyncLock::CleanUpLockRequestOnTimeout(LockRequest* lockRequest)
79{
80    std::unique_lock<std::mutex> lock(asyncLockMutex_);
81    auto it = std::find(pendingList_.begin(), pendingList_.end(), lockRequest);
82    if (it == pendingList_.end()) {
83        // the lock got held while we were waiting on the mutex, no-op
84        return false;
85    }
86    // we won the race, need to remove the request from the queue and handle the time out event
87    pendingList_.erase(it);
88    return true;
89}
90
91template<bool isAsync>
92void AsyncLock::ProcessLockRequest(LockRequest* lockRequest)
93{
94    lockRequest->OnSatisfied();
95    heldList_.push_back(lockRequest);
96    pendingList_.pop_front();
97    asyncLockMutex_.unlock();
98    if constexpr (isAsync) {
99        lockRequest->CallCallbackAsync();
100    } else {
101        lockRequest->CallCallback();
102    }
103    asyncLockMutex_.lock();
104}
105
106void AsyncLock::ProcessPendingLockRequest(napi_env env, LockRequest* syncLockRequest)
107{
108    std::unique_lock<std::mutex> lock(asyncLockMutex_);
109    ProcessPendingLockRequestUnsafe(env, syncLockRequest);
110}
111
112void AsyncLock::ProcessPendingLockRequestUnsafe(napi_env env, LockRequest* syncLockRequest)
113{
114    if (pendingList_.empty()) {
115        if (refCount_ == 0) {
116            // No more refs to the lock. We need to delete the instance but we cannot do it right now
117            // because asyncLockMutex_ is acquired. Do it asynchronously.
118            AsyncDestroy(env);
119        }
120        return;
121    }
122    LockRequest *lockRequest = pendingList_.front();
123    if (!CanAcquireLock(lockRequest)) {
124        return;
125    }
126    lockStatus_ = lockRequest->GetMode();
127    if (lockStatus_ == LOCK_MODE_SHARED) {
128        do {
129            if (syncLockRequest == lockRequest) {
130                ProcessLockRequest<false>(lockRequest);
131            } else {
132                ProcessLockRequest<true>(lockRequest);
133            }
134            if (pendingList_.empty()) {
135                break;
136            }
137            lockRequest = pendingList_.front();
138        } while (lockRequest->GetMode() == LOCK_MODE_SHARED);
139    } else {
140        ProcessLockRequest<true>(lockRequest);
141    }
142}
143
144bool AsyncLock::CanAcquireLock(LockRequest *lockRequest)
145{
146    if (heldList_.empty()) {
147        return true;
148    }
149    if (lockRequest->GetMode() == LOCK_MODE_SHARED && lockStatus_ == LOCK_MODE_SHARED) {
150        return true;
151    }
152    if (lockStatus_ == LOCK_MODE_UNLOCK) {
153        return true;
154    }
155    return false;
156}
157
158napi_status AsyncLock::FillLockState(napi_env env, napi_value held, napi_value pending)
159{
160    std::unique_lock<std::mutex> lock(asyncLockMutex_);
161    uint32_t idx = 0;
162    for (LockRequest *rq : heldList_) {
163        napi_value info = CreateLockInfo(env, rq);
164        bool pendingException = false;
165        napi_is_exception_pending(env, &pendingException);
166        if (pendingException) {
167            return napi_pending_exception;
168        }
169        napi_value index;
170        napi_create_int32(env, idx, &index);
171        napi_status status = napi_set_property(env, held, index, info);
172        if (status != napi_ok) {
173            return status;
174        }
175        ++idx;
176    }
177    idx = 0;
178    for (LockRequest *rq : pendingList_) {
179        napi_value info = CreateLockInfo(env, rq);
180        bool pendingException = false;
181        napi_is_exception_pending(env, &pendingException);
182        if (pendingException) {
183            return napi_pending_exception;
184        }
185        napi_value index;
186        napi_create_int32(env, idx, &index);
187        napi_status status = napi_set_property(env, pending, index, info);
188        if (status != napi_ok) {
189            return status;
190        }
191        ++idx;
192    }
193    return napi_ok;
194}
195
196napi_value AsyncLock::CreateLockInfo(napi_env env, const LockRequest *rq)
197{
198    napi_value info;
199    NAPI_CALL(env, napi_create_object(env, &info));
200    napi_value name;
201    NAPI_CALL(env, napi_create_string_utf8(env, lockName_.c_str(), NAPI_AUTO_LENGTH, &name));
202    napi_value mode;
203    NAPI_CALL(env, napi_create_int32(env, rq->GetMode(), &mode));
204    napi_value tid;
205    NAPI_CALL(env, napi_create_int32(env, rq->GetTid(), &tid));
206
207    napi_property_descriptor properties[] = {
208        DECLARE_NAPI_PROPERTY("name", name),
209        DECLARE_NAPI_PROPERTY("mode", mode),
210        DECLARE_NAPI_PROPERTY("contextId", tid),
211    };
212    NAPI_CALL(env, napi_define_properties(env, info, sizeof(properties) / sizeof(properties[0]), properties));
213    return info;
214}
215
216void AsyncLock::AsyncDestroy(napi_env env)
217{
218    napi_value resourceName;
219    napi_create_string_utf8(env, "AsyncLock::AsyncDestroyCallback", NAPI_AUTO_LENGTH, &resourceName);
220    auto *data = new std::pair<AsyncLock *, napi_async_work>();
221    data->first = this;
222    napi_async_work &work = data->second;
223    napi_status status = napi_create_async_work(
224        env, nullptr, resourceName, [](napi_env, void *) {}, AsyncDestroyCallback, data, &work);
225    if (status != napi_ok) {
226        HILOG_FATAL("Internal error: cannot create async work");
227    }
228    status = napi_queue_async_work(env, work);
229    if (status != napi_ok) {
230        HILOG_FATAL("Internal error: cannot queue async work");
231    }
232}
233
234void AsyncLock::AsyncDestroyCallback(napi_env env, napi_status, void *data)
235{
236    auto *lockAndWork = reinterpret_cast<std::pair<AsyncLock *, napi_async_work> *>(data);
237    delete lockAndWork->first;
238    napi_delete_async_work(env, lockAndWork->second);
239    delete lockAndWork;
240}
241
242uint32_t AsyncLock::IncRefCount()
243{
244    std::unique_lock<std::mutex> lock(asyncLockMutex_);
245    return ++refCount_;
246}
247
248uint32_t AsyncLock::DecRefCount()
249{
250    asyncLockMutex_.lock();
251    uint32_t count = --refCount_;
252    if (count == 0) {
253        // No refs to the instance. We can delete it right now if there are no more lock requests.
254        // In case there are some lock requests, the last processed lock request will delete the instance.
255        if (pendingList_.size() == 0 && heldList_.size() == 0) {
256            asyncLockMutex_.unlock();
257            delete this;
258            return 0;
259        }
260    }
261    asyncLockMutex_.unlock();
262    return count;
263}
264
265std::vector<RequestCreationInfo> AsyncLock::GetSatisfiedRequestInfos()
266{
267    std::vector<RequestCreationInfo> result;
268    std::unique_lock<std::mutex> lock(asyncLockMutex_);
269    for (auto *request : heldList_) {
270        result.push_back(RequestCreationInfo { request->GetTid(), request->GetCreationStacktrace() });
271    }
272    return result;
273}
274
275std::vector<RequestCreationInfo> AsyncLock::GetPendingRequestInfos()
276{
277    std::vector<RequestCreationInfo> result;
278    std::unique_lock<std::mutex> lock(asyncLockMutex_);
279    for (auto *request : pendingList_) {
280        result.push_back(RequestCreationInfo { request->GetTid(), request->GetCreationStacktrace() });
281    }
282    return result;
283}
284
285} // namespace Commonlibrary::Concurrent::LocksModule
286