1 /*
2  * Copyright (c) 2021 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 #define LOG_TAG "KvSyncManager"
16 #include "kvstore_sync_manager.h"
17 #include "log_print.h"
18 
19 namespace OHOS {
20 namespace DistributedKv {
KvStoreSyncManager()21 KvStoreSyncManager::KvStoreSyncManager() {}
~KvStoreSyncManager()22 KvStoreSyncManager::~KvStoreSyncManager() {}
23 
AddSyncOperation(uintptr_t syncId, uint32_t delayMs, const SyncFunc &syncFunc, const SyncEnd &syncEnd)24 Status KvStoreSyncManager::AddSyncOperation(uintptr_t syncId, uint32_t delayMs, const SyncFunc &syncFunc,
25                                             const SyncEnd &syncEnd)
26 {
27     if (syncId == 0 || syncFunc == nullptr) {
28         return Status::INVALID_ARGUMENT;
29     }
30     uint32_t opSeq = ++syncOpSeq_;
31     SyncEnd endFunc;
32     if (syncEnd != nullptr) {
33         endFunc = [opSeq, delayMs, syncEnd, this](const std::map<std::string, DistributedDB::DBStatus> &devices) {
34             RemoveSyncingOp(opSeq, (delayMs == 0) ? realtimeSyncingOps_ : delaySyncingOps_);
35             syncEnd(devices);
36         };
37     }
38 
39     auto beginTime = std::chrono::steady_clock::now() + std::chrono::milliseconds(delayMs);
40     KvSyncOperation syncOp{ syncId, opSeq, delayMs, syncFunc, endFunc, beginTime };
41     if (delayMs == 0) {
42         if (endFunc != nullptr) {
43             std::lock_guard<std::mutex> lock(syncOpsMutex_);
44             realtimeSyncingOps_.push_back(syncOp);
45         }
46         auto status = syncFunc(endFunc);
47         if (status != Status::SUCCESS) {
48             RemoveSyncingOp(opSeq, realtimeSyncingOps_);
49         }
50         return status;
51     }
52 
53     std::lock_guard<std::mutex> lock(syncOpsMutex_);
54     scheduleSyncOps_.emplace(beginTime, syncOp);
55     ZLOGD("add op %u delay %u count %zu.", opSeq, delayMs, scheduleSyncOps_.size());
56     if ((scheduleSyncOps_.size() == 1) ||
57         (nextScheduleTime_ > beginTime + std::chrono::milliseconds(GetExpireTimeRange(delayMs)))) {
58         AddTimer(beginTime);
59     }
60     return Status::SUCCESS;
61 }
62 
GetExpireTimeRange(uint32_t delayMs) const63 uint32_t KvStoreSyncManager::GetExpireTimeRange(uint32_t delayMs) const
64 {
65     uint32_t range = delayMs / DELAY_TIME_RANGE_DIVISOR;
66     return std::max(range, SYNC_MIN_DELAY_MS >> 1);
67 }
68 
RemoveSyncOperation(uintptr_t syncId)69 Status KvStoreSyncManager::RemoveSyncOperation(uintptr_t syncId)
70 {
71     auto pred = [syncId](const KvSyncOperation &op) -> bool { return syncId == op.syncId; };
72     std::lock_guard<std::mutex> lock(syncOpsMutex_);
73     uint32_t count = DoRemoveSyncingOp(pred, realtimeSyncingOps_);
74     count += DoRemoveSyncingOp(pred, delaySyncingOps_);
75     auto &syncOps = scheduleSyncOps_;
76     for (auto it = syncOps.begin(); it != syncOps.end();) {
77         if (pred(it->second)) {
78             count++;
79             it = syncOps.erase(it);
80         } else {
81             ++it;
82         }
83     }
84     return (count > 0) ? Status::SUCCESS : Status::ERROR;
85 }
86 
DoRemoveSyncingOp(OpPred pred, std::list<KvSyncOperation> &syncingOps)87 uint32_t KvStoreSyncManager::DoRemoveSyncingOp(OpPred pred, std::list<KvSyncOperation> &syncingOps)
88 {
89     uint32_t count = 0;
90     for (auto it = syncingOps.begin(); it != syncingOps.end();) {
91         if (pred(*it)) {
92             count++;
93             it = syncingOps.erase(it);
94         } else {
95             ++it;
96         }
97     }
98     return count;
99 }
100 
RemoveSyncingOp(uint32_t opSeq, std::list<KvSyncOperation> &syncingOps)101 Status KvStoreSyncManager::RemoveSyncingOp(uint32_t opSeq, std::list<KvSyncOperation> &syncingOps)
102 {
103     ZLOGD("remove op %u", opSeq);
104     auto pred = [opSeq](const KvSyncOperation &op) -> bool { return opSeq == op.opSeq; };
105     std::lock_guard<std::mutex> lock(syncOpsMutex_);
106     uint32_t count = DoRemoveSyncingOp(pred, syncingOps);
107     return (count == 1) ? Status::SUCCESS : Status::ERROR;
108 }
109 
AddTimer(const TimePoint &expireTime)110 void KvStoreSyncManager::AddTimer(const TimePoint &expireTime)
111 {
112     ZLOGD("time %lld", expireTime.time_since_epoch().count());
113     nextScheduleTime_ = expireTime;
114     executors_->Schedule(
115         expireTime - std::chrono::steady_clock::now(),
116         [time = expireTime, this]() {
117             Schedule(time);
118         });
119 }
120 
GetTimeoutSyncOps(const TimePoint &currentTime, std::list<KvSyncOperation> &syncOps)121 bool KvStoreSyncManager::GetTimeoutSyncOps(const TimePoint &currentTime, std::list<KvSyncOperation> &syncOps)
122 {
123     std::lock_guard<std::mutex> lock(syncOpsMutex_);
124     if ((!realtimeSyncingOps_.empty()) && (!scheduleSyncOps_.empty())) {
125         // the last processing time is less than priorSyncingTime
126         auto priorSyncingTime = std::chrono::milliseconds(REALTIME_PRIOR_SYNCING_MS);
127         if (currentTime < realtimeSyncingOps_.rbegin()->beginTime + priorSyncingTime) {
128             return true;
129         }
130     }
131     for (auto it = scheduleSyncOps_.begin(); it != scheduleSyncOps_.end();) {
132         const auto &expireTime = it->first;
133         const auto &op = it->second;
134         // currentTime is earlier than expireTime minus delayMs
135         if (currentTime + std::chrono::milliseconds(GetExpireTimeRange(op.delayMs)) < expireTime) {
136             break;
137         }
138         syncOps.push_back(op);
139         if (op.syncEnd != nullptr) {
140             delaySyncingOps_.push_back(op);
141         }
142         it = scheduleSyncOps_.erase(it);
143     }
144     return false;
145 }
146 
DoCheckSyncingTimeout(std::list<KvSyncOperation> &syncingOps)147 void KvStoreSyncManager::DoCheckSyncingTimeout(std::list<KvSyncOperation> &syncingOps)
148 {
149     auto syncingTimeoutPred = [](const KvSyncOperation &op) -> bool {
150         return op.beginTime + std::chrono::milliseconds(SYNCING_TIMEOUT_MS) < std::chrono::steady_clock::now();
151     };
152     uint32_t count = DoRemoveSyncingOp(syncingTimeoutPred, syncingOps);
153     if (count > 0) {
154         ZLOGI("remove %u syncing ops by timeout", count);
155     }
156 }
157 
Schedule(const TimePoint &time)158 void KvStoreSyncManager::Schedule(const TimePoint &time)
159 {
160     ZLOGD("timeout %lld", time.time_since_epoch().count());
161     std::list<KvSyncOperation> syncOps;
162     bool delaySchedule = GetTimeoutSyncOps(time, syncOps);
163 
164     for (const auto &op : syncOps) {
165         op.syncFunc(op.syncEnd);
166     }
167 
168     std::lock_guard<std::mutex> lock(syncOpsMutex_);
169     DoCheckSyncingTimeout(realtimeSyncingOps_);
170     DoCheckSyncingTimeout(delaySyncingOps_);
171     if (!scheduleSyncOps_.empty()) {
172         auto nextTime = scheduleSyncOps_.begin()->first;
173         if (delaySchedule) {
174             nextTime = std::chrono::steady_clock::now() + std::chrono::milliseconds(SYNC_MIN_DELAY_MS);
175         }
176         AddTimer(nextTime);
177     }
178 }
179 
SetThreadPool(std::shared_ptr<ExecutorPool> executors)180 void KvStoreSyncManager::SetThreadPool(std::shared_ptr<ExecutorPool> executors)
181 {
182     executors_ = executors;
183 }
184 } // namespace DistributedKv
185 } // namespace OHOS