1 /*
2 * Copyright (c) Huawei Technologies Co., Ltd. 2021-2023. All rights reserved.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15 #include "stack_data_repeater.h"
16 #include "hook_common.h"
17
18 using namespace OHOS::Developtools::NativeDaemon;
19
StackDataRepeater(size_t maxSize)20 StackDataRepeater::StackDataRepeater(size_t maxSize)
21 {
22 maxSize_ = maxSize;
23 closed_ = false;
24 reducedStackCount_ = 0;
25 for (int index = 0; index < CACHE_DATA_SIZE; ++index) {
26 rawDataCacheQueue_.emplace_back(std::make_shared<RawStack>());
27 }
28 }
29
~StackDataRepeater()30 StackDataRepeater::~StackDataRepeater()
31 {
32 Close();
33 }
34
Size()35 size_t StackDataRepeater::Size()
36 {
37 std::unique_lock<std::mutex> lock(mutex_);
38 return rawDataQueue_.size();
39 }
40
Reset()41 void StackDataRepeater::Reset()
42 {
43 std::unique_lock<std::mutex> lock(mutex_);
44 closed_ = false;
45 }
46
Close()47 void StackDataRepeater::Close()
48 {
49 {
50 std::unique_lock<std::mutex> lock(mutex_);
51 rawDataQueue_.clear();
52 closed_ = true;
53 }
54 PROFILER_LOG_INFO(LOG_CORE, "StackDataRepeater Close, reducedStackCount_ : %" PRIx64 " ", reducedStackCount_);
55 slotCondVar_.notify_all();
56 itemCondVar_.notify_all();
57 }
58
GetRawStack()59 RawStackPtr StackDataRepeater::GetRawStack()
60 {
61 std::unique_lock<std::mutex> lock(cacheMutex_);
62 if (!rawDataCacheQueue_.empty()) {
63 RawStackPtr rawStack = rawDataCacheQueue_.back();
64 rawDataCacheQueue_.pop_back();
65 return rawStack;
66 }
67 return std::make_shared<RawStack>();
68 }
69
ReturnRawStack(RawStackPtr rawStack)70 void StackDataRepeater::ReturnRawStack(RawStackPtr rawStack)
71 {
72 std::unique_lock<std::mutex> lock(cacheMutex_);
73 if (rawDataCacheQueue_.size() <= CACHE_DATA_SIZE) {
74 rawStack->Reset();
75 rawDataCacheQueue_.push_back(rawStack);
76 }
77 }
78
PutRawStackArray(std::array<std::shared_ptr<RawStack>, CACHE_ARRAY_SIZE>& rawDataArray, uint32_t batchCount)79 bool StackDataRepeater::PutRawStackArray(std::array<std::shared_ptr<RawStack>, CACHE_ARRAY_SIZE>& rawDataArray, uint32_t batchCount)
80 {
81 std::unique_lock<std::mutex> lock(mutex_);
82 if ((rawDataArray.empty()) && (rawDataQueue_.size() > 0)) {
83 PROFILER_LOG_INFO(LOG_CORE, "PutRawStackArray: no need to put nullptr if queue has data");
84 return true;
85 }
86 while (rawDataQueue_.size() >= maxSize_ && !closed_) {
87 slotCondVar_.wait(lock);
88 }
89 if (closed_) {
90 return false;
91 }
92 for (uint32_t i = 0; i < batchCount; i++) {
93 rawDataQueue_.push_back(rawDataArray[i]);
94 }
95 lock.unlock();
96 itemCondVar_.notify_one();
97 return true;
98 }
99
PutRawStack(const RawStackPtr& rawData, bool isRecordAccurately)100 bool StackDataRepeater::PutRawStack(const RawStackPtr& rawData, bool isRecordAccurately)
101 {
102 bool needInsert = true;
103 std::unique_lock<std::mutex> lock(mutex_);
104
105 if ((rawData == nullptr) && (rawDataQueue_.size() > 0)) {
106 PROFILER_LOG_INFO(LOG_CORE, "no need put nullptr if queue has data, rawDataQueue_.size() = %zu",
107 rawDataQueue_.size());
108 return true;
109 }
110 while (rawDataQueue_.size() >= maxSize_ && !closed_) {
111 slotCondVar_.wait(lock);
112 }
113 if (closed_) {
114 return false;
115 }
116
117 if (__builtin_expect((rawData != nullptr) && !isRecordAccurately, true)) {
118 if (rawData->stackConext->type == FREE_MSG) {
119 auto temp = mallocMap_.find(rawData->stackConext->addr);
120 // true : pair of malloc and free matched, both malloc and free will be ignored
121 // false : can not match, send free's data anyway
122 if (temp != mallocMap_.end()) {
123 temp->second->reportFlag = false; // will be ignore later
124 mallocMap_.erase(rawData->stackConext->addr);
125 needInsert = false;
126 }
127 } else if (rawData->stackConext->type == MALLOC_MSG) {
128 mallocMap_.insert(std::pair<void*, std::shared_ptr<RawStack>>(rawData->stackConext->addr, rawData));
129 }
130 if (needInsert) {
131 rawDataQueue_.push_back(rawData);
132 }
133 } else {
134 rawDataQueue_.push_back(rawData);
135 }
136
137 lock.unlock();
138 itemCondVar_.notify_one();
139 return true;
140 }
141
TakeRawData(uint32_t during, clockid_t clockId, uint32_t batchCount, RawStackPtr batchRawStack[], uint32_t statInterval, bool& isTimeOut)142 RawStackPtr StackDataRepeater::TakeRawData(uint32_t during, clockid_t clockId, uint32_t batchCount,
143 RawStackPtr batchRawStack[], uint32_t statInterval, bool& isTimeOut)
144 {
145 uint32_t rawDataQueueSize = 0;
146 std::unique_lock<std::mutex> lock(mutex_);
147 if (statInterval > 0 &&
148 !itemCondVar_.wait_for(lock, std::chrono::milliseconds(during), [&] { return !rawDataQueue_.empty(); })) {
149 if (rawDataQueue_.empty() && !closed_) {
150 isTimeOut = true;
151 lock.unlock();
152 slotCondVar_.notify_one();
153 return nullptr;
154 }
155 } else {
156 while (rawDataQueue_.empty() && !closed_) {
157 itemCondVar_.wait(lock);
158 }
159 }
160 if (closed_) {
161 return nullptr;
162 }
163 RawStackPtr result = nullptr;
164 rawDataQueueSize = rawDataQueue_.size();
165 int resultSize = rawDataQueueSize > batchCount ? batchCount : rawDataQueueSize;
166 bool needReduceStack = rawDataQueueSize >= SPEED_UP_THRESHOLD;
167 for (int i = 0; i < resultSize; i++) {
168 result = rawDataQueue_.front();
169 rawDataQueue_.pop_front();
170 batchRawStack[i] = result;
171 if ((result != nullptr) && (result->stackConext != nullptr) && (result->stackConext->type == MALLOC_MSG)) {
172 mallocMap_.erase(result->stackConext->addr);
173 if (needReduceStack) {
174 result->reduceStackFlag = true;
175 reducedStackCount_++;
176 }
177 }
178 }
179
180 lock.unlock();
181 slotCondVar_.notify_one();
182 return result;
183 }