1 /*
2 * Copyright (c) Huawei Technologies Co., Ltd. 2021-2023. All rights reserved.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 *
15 * Description: FlowController implements
16 */
17 #include "flow_controller.h"
18
19 #include <algorithm>
20 #include <cinttypes>
21 #include <set>
22 #include <sys/stat.h>
23 #include <sys/types.h>
24 #include <unistd.h>
25 #include <regex>
26
27 #include "file_utils.h"
28 #include "ftrace_field_parser.h"
29 #include "ftrace_fs_ops.h"
30 #include "logging.h"
31 #include "parameters.h"
32
33 namespace {
34 using namespace OHOS::Developtools::Profiler;
35 #ifndef PAGE_SIZE
36 constexpr uint32_t PAGE_SIZE = 4096;
37 #endif
38 constexpr int KB_PER_PAGE = PAGE_SIZE / 1024;
39 constexpr uint32_t BYTE_PER_KB = 1024;
40 constexpr uint32_t MAX_FLUSH_INTERVAL = 1800 * 1000;
41 constexpr uint32_t MAX_FLUSH_THRESHOLD = 128 * 1024 * 1024;
42 constexpr uint32_t MAX_TRACE_PERIOD_MS = 720 * 1000;
43 constexpr uint32_t MAX_BUFFER_SIZE_KB = 64 * 1024; // 64MB
44 constexpr uint32_t HM_MAX_BUFFER_SIZE_KB = 512 * 1024; // 512MB
45 constexpr uint32_t MIN_BUFFER_SIZE_KB = 1024; // 1 MB
46 constexpr uint32_t DEFAULT_TRACE_PERIOD_MS = 250; // 250 ms
47 constexpr uint32_t MAX_BLOCK_SIZE_PAGES = 4096; // 16 MB
48 constexpr uint32_t MIN_BLOCK_SIZE_PAGES = 256; // 1 MB
49 constexpr uint32_t PARSE_CMDLINE_COUNT = 1000;
50 const std::set<std::string> g_availableClocks = { "boot", "global", "local", "mono" };
51 constexpr uint32_t SAVED_CMDLINE_SIZE_SMALL = 1024; // save cmdline sizes for cpu num less than 8
52 constexpr uint32_t SAVED_CMDLINE_SIZE_LARGE = 4096; // save cmdline sizes for cpu num no less than 8
53 constexpr int OCTA_CORE_CPU = 8; // 8 core
54 constexpr unsigned int RMQ_ENTRY_ALIGN_MASK = (1U << 2) - 1;
55 const std::string TRACE_PROPERTY = "debug.hitrace.tags.enableflags";
56 const std::string BGSRV_PROPERTY = "5456538433239656448";
57 } // namespace
58
59 FTRACE_NS_BEGIN
FlowController()60 FlowController::FlowController()
61 {
62 ftraceParser_ = std::make_unique<FtraceParser>();
63 ksymsParser_ = std::make_unique<KernelSymbolsParser>();
64 ftraceSupported_ = FtraceFsOps::GetInstance().GetFtraceRoot().size() > 0;
65 traceCollector_ = OHOS::HiviewDFX::UCollectClient::TraceCollector::Create();
66 }
67
~FlowController(void)68 FlowController::~FlowController(void)
69 {
70 PROFILER_LOG_INFO(LOG_CORE, "FlowController destroy!");
71 }
72
SetWriter(const WriterStructPtr& writer)73 int FlowController::SetWriter(const WriterStructPtr& writer)
74 {
75 CHECK_TRUE(ftraceSupported_, -1, "current kernel not support ftrace!");
76 CHECK_TRUE(resultWriter_ == nullptr, 0, "writer already setted!");
77
78 CHECK_NOTNULL(writer, -1, "writer null!");
79 auto transmiter = std::make_unique<ResultTransporter>("Transporter", writer);
80 CHECK_NOTNULL(transmiter, -1, "create ResultTransporter FAILED!");
81
82 // get CPU core numbers
83 int nprocs = static_cast<int>(sysconf(_SC_NPROCESSORS_ONLN));
84 CHECK_TRUE(nprocs > 0, -1, "get processor number failed!");
85 platformCpuNum_ = nprocs;
86
87 // init FtraceParser
88 CHECK_NOTNULL(ftraceParser_, 0, "FtraceParser create FAILED!");
89 CHECK_TRUE(ftraceParser_->Init(), -1, "ftrace parser init failed!");
90
91 // init KernelSymbolsParser
92 CHECK_NOTNULL(ksymsParser_, 0, "KernelSymbolsParser create FAILED!");
93 ksymsParser_->Parse(FtraceFsOps::GetInstance().GetKernelSymbols());
94
95 CHECK_TRUE(AddPlatformEventsToParser(), -1, "add platform events to parser failed!");
96 // disable all trace events
97 DisableAllCategories();
98
99 resultWriter_ = writer;
100 tansporter_ = std::move(transmiter);
101 return 0;
102 }
103
CreateRawDataReaders()104 bool FlowController::CreateRawDataReaders()
105 {
106 if (FtraceFsOps::GetInstance().IsHmKernel()) {
107 auto reader = std::make_unique<FtraceDataReader>(FtraceFsOps::GetInstance().GetHmRawTracePath());
108 CHECK_NOTNULL(reader, false, "create hm raw trace reader FAILED!");
109 ftraceReaders_.emplace_back(std::move(reader));
110 return true;
111 }
112
113 for (int i = 0; i < platformCpuNum_; i++) {
114 auto rawPath = FtraceFsOps::GetInstance().GetRawTracePath(i);
115 if (fakePath_ != "") {
116 rawPath = fakePath_ + "test_raw_" + std::to_string(i);
117 CHECK_NOTNULL(ftraceParser_, false, "create FtraceParser FAILED!");
118 ftraceParser_->ParseSavedCmdlines(FileUtils::ReadFile(fakePath_ + "test_comm"));
119 ftraceParser_->ParseSavedTgid(FileUtils::ReadFile(fakePath_ + "test_tgid"));
120 }
121 auto reader = std::make_unique<FtraceDataReader>(rawPath);
122 CHECK_NOTNULL(reader, false, "create reader %d FAILED!", i);
123 ftraceReaders_.emplace_back(std::move(reader));
124 }
125 return true;
126 }
127
CreatePagedMemoryPool()128 bool FlowController::CreatePagedMemoryPool()
129 {
130 PROFILER_LOG_INFO(LOG_CORE, "create memory pool, buffer_size_kb = %u", bufferSizeKb_);
131 if (KB_PER_PAGE == 0 || platformCpuNum_ == 0) {
132 return false;
133 }
134 size_t bufferSizePages = bufferSizeKb_ / KB_PER_PAGE;
135 size_t pagesPerBlock = bufferSizePages / static_cast<size_t>(platformCpuNum_);
136 if (pagesPerBlock < MIN_BLOCK_SIZE_PAGES) {
137 pagesPerBlock = MIN_BLOCK_SIZE_PAGES;
138 }
139 if (pagesPerBlock > MAX_BLOCK_SIZE_PAGES) {
140 pagesPerBlock = MAX_BLOCK_SIZE_PAGES;
141 }
142
143 if (FtraceFsOps::GetInstance().IsHmKernel()) {
144 memPool_ = std::make_unique<PagedMemPool>(bufferSizePages, 1);
145 } else {
146 memPool_ = std::make_unique<PagedMemPool>(pagesPerBlock, platformCpuNum_);
147 }
148 CHECK_NOTNULL(memPool_, false, "create PagedMemPool FAILED!");
149 return true;
150 }
151
CreateRawDataBuffers()152 bool FlowController::CreateRawDataBuffers()
153 {
154 int num = platformCpuNum_;
155 if (FtraceFsOps::GetInstance().IsHmKernel()) {
156 num = 1;
157 }
158 for (int i = 0; i < num; i++) {
159 using u8ptr = std::unique_ptr<uint8_t>::pointer;
160 auto buffer = std::shared_ptr<uint8_t>(reinterpret_cast<u8ptr>(memPool_->Allocate()),
161 [&](u8ptr block) { this->memPool_->Recycle(block); });
162 CHECK_NOTNULL(buffer, false, "create buffer %d failed!", i);
163 ftraceBuffers_.push_back(buffer);
164 };
165 return true;
166 }
167
CreateRawDataCaches()168 bool FlowController::CreateRawDataCaches()
169 {
170 char fileName[] = "/data/local/tmp/ftrace_rawdata.XXXXXX";
171 CHECK_TRUE(mkstemp(fileName) >= 0, false, "Create temp file failed!");
172 rawDataFile_ = std::shared_ptr<FILE>(fopen(fileName, "wb+"), [](FILE* fp) { fclose(fp); });
173 unlink(fileName);
174 return true;
175 }
176
ParseBasicData()177 bool FlowController::ParseBasicData()
178 {
179 CHECK_NOTNULL(resultWriter_, false, "%s: resultWriter_ nullptr", __func__);
180 // get clock times
181 if (getClockTimes_) {
182 if (resultWriter_->isProtobufSerialize) {
183 auto traceResult = std::make_unique<TracePluginResult>();
184 CHECK_TRUE(ReportClockTimes(traceResult), false, "parse clock times FAILED!");
185 CHECK_TRUE(tansporter_->Submit(std::move(traceResult)), false, "report clock times FAILED!");
186 } else {
187 auto ctx = resultWriter_->startReport(resultWriter_);
188 CHECK_NOTNULL(ctx, false, "%s: get RandomWriteCtx FAILED!", __func__);
189 auto traceResult = std::make_unique<ProtoEncoder::TracePluginResult>(ctx);
190 CHECK_TRUE(ReportClockTimes(traceResult), false, "parse clock times FAILED!");
191 int32_t msgSize = traceResult->Finish();
192 resultWriter_->finishReport(resultWriter_, msgSize);
193 tansporter_->Report(static_cast<size_t>(msgSize));
194 }
195 }
196
197 // parse kernel symbols
198 if (parseKsyms_) {
199 if (resultWriter_->isProtobufSerialize) {
200 auto traceResult = std::make_unique<TracePluginResult>();
201 CHECK_TRUE(ParseKernelSymbols(traceResult), false, "parse kernel symbols FAILED!");
202 CHECK_TRUE(tansporter_->Submit(std::move(traceResult)), false, "report kernel symbols FAILED!");
203 } else {
204 auto ctx = resultWriter_->startReport(resultWriter_);
205 CHECK_NOTNULL(ctx, false, "%s: get RandomWriteCtx FAILED!", __func__);
206 auto traceResult = std::make_unique<ProtoEncoder::TracePluginResult>(ctx);
207 CHECK_TRUE(ParseKernelSymbols(traceResult), false, "parse kernel symbols FAILED!");
208 int32_t msgSize = traceResult->Finish();
209 resultWriter_->finishReport(resultWriter_, msgSize);
210 tansporter_->Report(static_cast<size_t>(msgSize));
211 }
212 }
213 if (resultWriter_->isProtobufSerialize) {
214 auto traceResult = std::make_unique<TracePluginResult>();
215 CHECK_TRUE(ParsePerCpuStatus(traceResult, TRACE_START), false, "parse TRACE_START stats failed!");
216 CHECK_TRUE(tansporter_->Submit(std::move(traceResult)), false, "report TRACE_START stats failed!");
217 } else {
218 auto ctx = resultWriter_->startReport(resultWriter_);
219 CHECK_NOTNULL(ctx, false, "%s: get RandomWriteCtx FAILED!", __func__);
220 auto traceResult = std::make_unique<ProtoEncoder::TracePluginResult>(ctx);
221 CHECK_TRUE(ParsePerCpuStatus(traceResult, TRACE_START), false, "parse TRACE_START stats failed!");
222 int32_t msgSize = traceResult->Finish();
223 resultWriter_->finishReport(resultWriter_, msgSize);
224 tansporter_->Report(static_cast<size_t>(msgSize));
225 }
226 return true;
227 }
228
ReloadTraceArgs()229 std::string FlowController::ReloadTraceArgs()
230 {
231 std::string args;
232 for (size_t i = 0; i < traceCategories_.size(); i++) {
233 if (i == 0) {
234 args += ("tags:" + traceCategories_[i]);
235 } else {
236 args += ("," + traceCategories_[i]);
237 }
238 }
239
240 if (traceClock_.size() > 0) {
241 args += (" clockType:" + traceClock_);
242 }
243
244 if (bufferSizeKb_ > 0) {
245 args += (" bufferSize:" + std::to_string(bufferSizeKb_));
246 }
247 PROFILER_LOG_INFO(LOG_CORE, "trace args: %s", args.c_str());
248 return args;
249 }
250
StartCapture(void)251 int FlowController::StartCapture(void)
252 {
253 CHECK_TRUE(ftraceSupported_, -1, "current kernel not support ftrace!");
254 CHECK_NOTNULL(ftraceParser_, -1, "create FtraceParser FAILED!");
255 CHECK_NOTNULL(ksymsParser_, -1, "create KernelSymbolsParser FAILED!");
256 CHECK_NOTNULL(tansporter_, -1, "create ResultTransporter FAILED!");
257 CHECK_NOTNULL(traceCollector_, -1, "create TraceCollector FAILED!");
258 CHECK_NOTNULL(resultWriter_, -1, "%s: resultWriter_ nullptr", __func__);
259
260 CHECK_TRUE(ParseBasicData(), -1, "parse basic data failed!");
261
262 // create memory pool, and raw data readers, buffers, caches.
263 CHECK_TRUE(CreatePagedMemoryPool(), -1, "create paged memory pool failed!");
264 CHECK_TRUE(CreateRawDataReaders(), -1, "create raw data readers failed!");
265 CHECK_TRUE(CreateRawDataBuffers(), -1, "create raw data buffers failed!");
266
267 // clear old trace
268 FtraceFsOps::GetInstance().ClearTraceBuffer();
269 // recover the hitrace
270 std::string param = OHOS::system::GetParameter(TRACE_PROPERTY, "");
271 if (param != "0" && param != BGSRV_PROPERTY) {
272 traceCollector_->Recover();
273 }
274
275 uint32_t savedCmdlinesSize = platformCpuNum_ < OCTA_CORE_CPU ? SAVED_CMDLINE_SIZE_SMALL : SAVED_CMDLINE_SIZE_LARGE;
276 if (!FtraceFsOps::GetInstance().SetSavedCmdLinesSize(savedCmdlinesSize)) {
277 PROFILER_LOG_ERROR(LOG_CORE, "SetSavedCmdLinesSize %u fail.", savedCmdlinesSize);
278 }
279
280 // enable additional record options
281 FtraceFsOps::GetInstance().SetRecordCmdOption(true);
282 FtraceFsOps::GetInstance().SetRecordTgidOption(true);
283
284 // start ftrace event data polling thread
285 keepRunning_ = true;
286
287 if (parseMode_ == TracePluginConfig_ParseMode_NORMAL) {
288 pollThread_ = std::thread([this] { this->CaptureWorkOnNomalMode(); });
289 } else if (parseMode_ == TracePluginConfig_ParseMode_DELAY_PARSE) {
290 CHECK_TRUE(CreateRawDataCaches(), -1, "create raw data caches failed!");
291 pollThread_ = std::thread([this] { this->CaptureWorkOnDelayMode(); });
292 } else {
293 PROFILER_LOG_ERROR(LOG_CORE, "ParseMode is Illegal parameter!");
294 return -1;
295 }
296
297 // set trace_clock and enable all tag categories with hiview::TraceCollector
298 auto openRet = traceCollector_->OpenRecording(ReloadTraceArgs());
299 if (openRet.retCode != OHOS::HiviewDFX::UCollect::UcError::SUCCESS) {
300 PROFILER_LOG_ERROR(LOG_CORE, "Enable tag categories failed, trace error code is %d!", openRet.retCode);
301 return -1;
302 }
303 EnableTraceEvents();
304 return 0;
305 }
306
307 void FlowController::CaptureWorkOnNomalModeInner()
308 {
309 pthread_setname_np(pthread_self(), "TraceReader");
310 PROFILER_LOG_INFO(LOG_CORE, "FlowController::CaptureWorkOnNomalMode start!");
311 auto tracePeriod = std::chrono::milliseconds(tracePeriodMs_);
312 std::vector<long> rawDataBytes(platformCpuNum_, 0);
313 while (keepRunning_) {
314 std::this_thread::sleep_for(tracePeriod);
315 // read data from percpu trace_pipe_raw, consume kernel ring buffers
316 for (size_t i = 0; i < rawDataBytes.size(); i++) {
317 if (flushCacheData_ && !keepRunning_) {
318 PROFILER_LOG_INFO(LOG_CORE, "flushCacheData_ is true, return");
319 return;
320 }
321 long nbytes = ReadEventData(i);
322 rawDataBytes[i] = nbytes;
323 }
324 // parse ftrace metadata
325 ftraceParser_->ParseSavedCmdlines(FtraceFsOps::GetInstance().GetSavedCmdLines());
326 // parse ftrace percpu event data
327 for (size_t i = 0; i < rawDataBytes.size(); i++) {
328 if (flushCacheData_ && !keepRunning_) {
329 PROFILER_LOG_INFO(LOG_CORE, "flushCacheData_ is true, return");
330 return;
331 }
332 if (rawDataBytes[i] == 0) {
333 PROFILER_LOG_INFO(LOG_CORE, "Get raw data from CPU%zu is 0 bytes.", i);
334 continue;
335 }
336 if (!ParseEventDataOnNomalMode(i, rawDataBytes[i])) {
337 PROFILER_LOG_ERROR(LOG_CORE, "%s:ParseEventData failed!", __func__);
338 }
339 }
340 if (isReportBasicData_.load()) {
341 ParseBasicData();
342 isReportBasicData_ = false;
343 }
344 }
345 tansporter_->Flush();
346 PROFILER_LOG_DEBUG(LOG_CORE, "FlowController::CaptureWorkOnNomalMode done!");
347 }
348
349 long FlowController::HmReadEventData()
350 {
351 auto buffer = ftraceBuffers_[0].get();
352 auto reader = ftraceReaders_[0].get();
353 auto bufferSize = static_cast<long>(memPool_->GetBlockSize());
354
355 long nbytes = 0;
356 long used = 0;
357 long rest = bufferSize;
358 while ((nbytes = reader->Read(&buffer[used], rest)) > 0 && used < bufferSize) {
359 used += nbytes;
360 rest -= nbytes;
361 }
362 if (used == bufferSize) {
363 PROFILER_LOG_WARN(LOG_CORE, "hm trace raw data may overwrite. current buffer size = %u.",
364 (unsigned int)bufferSize);
365 }
366 return used;
367 }
368
369 void FlowController::HmCaptureWorkOnNomalModeInner()
370 {
371 pthread_setname_np(pthread_self(), "HmTraceReader");
372 PROFILER_LOG_INFO(LOG_CORE, "FlowController::HmCaptureWorkOnNomalMode start!");
373 auto tracePeriod = std::chrono::milliseconds(tracePeriodMs_);
374 while (keepRunning_) {
375 std::this_thread::sleep_for(tracePeriod);
376 if (flushCacheData_ && !keepRunning_) {
377 PROFILER_LOG_INFO(LOG_CORE, "flushCacheData_ is true, return");
378 return;
379 }
380 long rawDataBytes = HmReadEventData();
381 ftraceParser_->ParseSavedCmdlines(FtraceFsOps::GetInstance().GetSavedCmdLines());
382 if (flushCacheData_ && !keepRunning_) {
383 PROFILER_LOG_INFO(LOG_CORE, "flushCacheData_ is true, return");
384 return;
385 }
386 if (rawDataBytes == 0) {
387 PROFILER_LOG_INFO(LOG_CORE, "Get hm raw data is 0 bytes.");
388 continue;
389 }
390 if (!HmParseEventDataOnNomalMode(rawDataBytes)) {
391 PROFILER_LOG_ERROR(LOG_CORE, "HmParseEventData failed!");
392 }
393 }
394 tansporter_->Flush();
395 PROFILER_LOG_INFO(LOG_CORE, "FlowController::HmCaptureWorkOnNomalMode done!");
396 }
397
398 void FlowController::CaptureWorkOnNomalMode()
399 {
400 if (FtraceFsOps::GetInstance().IsHmKernel()) {
401 HmCaptureWorkOnNomalModeInner();
402 } else {
403 CaptureWorkOnNomalModeInner();
404 }
405 }
406
407 void FlowController::CaptureWorkOnDelayMode()
408 {
409 pthread_setname_np(pthread_self(), "TraceReader");
410 PROFILER_LOG_INFO(LOG_CORE, "FlowController::CaptureWorkOnDelayMode start!");
411
412 auto tracePeriod = std::chrono::milliseconds(tracePeriodMs_);
413 int writeDataCount = 0;
414 while (keepRunning_) {
415 std::this_thread::sleep_for(tracePeriod);
416
417 // read data from percpu trace_pipe_raw, consume kernel ring buffers
418 for (int cpuIdx = 0; cpuIdx < platformCpuNum_; cpuIdx++) {
419 if (flushCacheData_ && !keepRunning_) {
420 PROFILER_LOG_INFO(LOG_CORE, "flushCacheData_ is true, return");
421 return;
422 }
423 long nbytes = ReadEventData(cpuIdx);
424 if (nbytes == 0) {
425 PROFILER_LOG_INFO(LOG_CORE, "Get raw data from CPU%d is 0 bytes.", cpuIdx);
426 continue;
427 }
428 fwrite(&cpuIdx, sizeof(uint8_t), 1, rawDataFile_.get());
429 fwrite(&nbytes, sizeof(long), 1, rawDataFile_.get());
430 fwrite(ftraceBuffers_[cpuIdx].get(), sizeof(uint8_t), nbytes, rawDataFile_.get());
431 }
432 writeDataCount++;
433 if (writeDataCount == PARSE_CMDLINE_COUNT) {
434 // parse ftrace metadata
435 ftraceParser_->ParseSavedCmdlines(FtraceFsOps::GetInstance().GetSavedCmdLines());
436 writeDataCount = 0;
437 }
438 }
439
440 CHECK_TRUE(ParseEventDataOnDelayMode(), NO_RETVAL, "ParseEventData failed!");
441 tansporter_->Flush();
442 PROFILER_LOG_DEBUG(LOG_CORE, "FlowController::CaptureWorkOnDelayMode done!");
443 }
444
445 static inline int RmqEntryTotalSize(unsigned int size)
446 {
447 return sizeof(struct RmqEntry) + ((size + RMQ_ENTRY_ALIGN_MASK) & (~RMQ_ENTRY_ALIGN_MASK));
448 }
449
450 template <typename T, typename E>
451 bool FlowController::HmParseEventData(T* traceResult, uint8_t*& data, E* ftraceEvent)
452 {
453 struct RmqConsumerData* rmqData = reinterpret_cast<struct RmqConsumerData*>(data);
454 uint64_t timeStampBase = rmqData->timeStamp;
455 auto cpuDetailMsg = traceResult->add_ftrace_cpu_detail();
456 struct RmqEntry* event;
457 cpuDetailMsg->set_cpu(rmqData->coreId);
458 cpuDetailMsg->set_overwrite(0);
459 auto curPtr = rmqData->data;
460 auto endPtr = rmqData->data + rmqData->length;
461 while (curPtr < endPtr) {
462 event = reinterpret_cast<struct RmqEntry*>(curPtr);
463 unsigned int evtSize = event->size;
464 if (evtSize == 0U) {
465 break;
466 }
467 struct HmTraceHeader* header = reinterpret_cast<struct HmTraceHeader*>(event->data);
468 auto parseEventCtx = SubEventParser<E>::GetInstance().GetParseEventCtx(header->commonType);
469 if (parseEventCtx == NULL) {
470 curPtr += RmqEntryTotalSize(evtSize);
471 continue;
472 }
473 ftraceEvent = cpuDetailMsg->add_event();
474 ftraceEvent->set_timestamp(event->timeStampOffset + timeStampBase);
475 if (!ftraceParser_->HmParseFtraceEvent(*ftraceEvent, reinterpret_cast<uint8_t*>(header), evtSize,
476 parseEventCtx)) {
477 PROFILER_LOG_ERROR(LOG_CORE, "hm parse event failed!");
478 }
479 curPtr += RmqEntryTotalSize(evtSize);
480 }
481 data += PAGE_SIZE;
482 return true;
483 }
484
485 bool FlowController::HmParseEventDataOnNomalMode(long dataSize)
486 {
487 CHECK_NOTNULL(resultWriter_, false, "%s: resultWriter_ nullptr", __func__);
488 auto buffer = ftraceBuffers_[0].get();
489 auto endPtr = buffer + dataSize;
490
491 for (auto data = buffer; data < endPtr;) {
492 if (resultWriter_->isProtobufSerialize) {
493 auto traceResult = std::make_unique<TracePluginResult>();
494 FtraceEvent* event = nullptr;
495 CHECK_TRUE(HmParseEventData(traceResult.get(), data, event), false, "hm parse raw data failed!");
496 CHECK_TRUE(tansporter_->Submit(std::move(traceResult)), false, "report hm raw event failed!");
497 } else {
498 auto ctx = resultWriter_->startReport(resultWriter_);
499 CHECK_NOTNULL(ctx, false, "%s: get RandomWriteCtx FAILED!", __func__);
500 static ProtoEncoder::MessagePool msgPool;
501 static ProtoEncoder::TracePluginResult traceResult;
502 msgPool.Reset();
503 traceResult.Reset(ctx, &msgPool);
504 ProtoEncoder::FtraceEvent* event = nullptr;
505 CHECK_TRUE(HmParseEventData(&traceResult, data, event), false, "hm parse raw data failed!");
506 int32_t msgSize = traceResult.Finish();
507 resultWriter_->finishReport(resultWriter_, msgSize);
508 tansporter_->Report(static_cast<size_t>(msgSize));
509 }
510 }
511
512 return true;
513 }
514
515 long FlowController::ReadEventData(int cpuid)
516 {
517 auto buffer = ftraceBuffers_[cpuid].get();
518 auto reader = ftraceReaders_[cpuid].get();
519 auto bufferSize = static_cast<long>(memPool_->GetBlockSize());
520
521 long nbytes = 0;
522 long used = 0;
523 long rest = bufferSize;
524 while ((nbytes = reader->Read(&buffer[used], rest)) > 0 && used < bufferSize) {
525 CHECK_TRUE(used % PAGE_SIZE == 0, used, "used invalid!");
526 used += nbytes;
527 rest -= nbytes;
528 }
529
530 if (used == bufferSize) {
531 PROFILER_LOG_INFO(LOG_CORE,
532 "used(%ld) equals bufferSize(%ld), please expand buffer_size_kb, otherwise the kernel may lose data\n",
533 used, bufferSize);
534 }
535 return used;
536 }
537
538 bool FlowController::ParseEventData(int cpuid, uint8_t* page)
539 {
540 if (resultWriter_->isProtobufSerialize) {
541 auto traceResult = std::make_unique<TracePluginResult>();
542 FtraceEvent* event = nullptr; // Used to distinguish between SubEventParser instance types.
543 CHECK_TRUE(ParseFtraceEvent(traceResult.get(), cpuid, page, event), false, "parse raw event for cpu-%d failed!",
544 cpuid);
545 CHECK_TRUE(tansporter_->Submit(std::move(traceResult)), false, "report raw event for cpu-%d failed!", cpuid);
546 } else {
547 auto ctx = resultWriter_->startReport(resultWriter_);
548 CHECK_NOTNULL(ctx, false, "%s: get RandomWriteCtx FAILED!", __func__);
549 static ProtoEncoder::MessagePool msgPool;
550 static ProtoEncoder::TracePluginResult traceResult;
551 msgPool.Reset();
552 traceResult.Reset(ctx, &msgPool);
553 ProtoEncoder::FtraceEvent* event = nullptr; // Used to distinguish between SubEventParser instance types.
554 CHECK_TRUE(ParseFtraceEvent(&traceResult, cpuid, page, event), false, "parse raw event for cpu-%d failed!",
555 cpuid);
556 int32_t msgSize = traceResult.Finish();
557 resultWriter_->finishReport(resultWriter_, msgSize);
558 tansporter_->Report(static_cast<size_t>(msgSize));
559 }
560 return true;
561 }
562
563 bool FlowController::ParseEventDataOnNomalMode(int cpuid, long dataSize)
564 {
565 CHECK_NOTNULL(resultWriter_, false, "%s: resultWriter_ nullptr", __func__);
566 auto buffer = ftraceBuffers_[cpuid].get();
567 auto endPtr = buffer + dataSize;
568 for (auto page = buffer; page < endPtr; page += PAGE_SIZE) {
569 if (!ParseEventData(cpuid, page)) {
570 PROFILER_LOG_ERROR(LOG_CORE, "%s:ParseEventData for cpu-%d failed!", __func__, cpuid);
571 }
572 }
573 return true;
574 }
575
576 bool FlowController::ParseEventDataOnDelayMode()
577 {
578 CHECK_TRUE(fseek(rawDataFile_.get(), 0, SEEK_SET) == 0, false, "fseek failed!");
579 while (!feof(rawDataFile_.get())) {
580 uint8_t cpuId = 0;
581 long dataBytes = 0;
582 fread(&cpuId, sizeof(uint8_t), 1, rawDataFile_.get());
583 fread(&dataBytes, sizeof(long), 1, rawDataFile_.get());
584 for (long i = 0; i < dataBytes; i += PAGE_SIZE) {
585 uint8_t page[PAGE_SIZE] = {0};
586 fread(page, sizeof(uint8_t), PAGE_SIZE, rawDataFile_.get());
587 if (!ParseEventData(cpuId, page)) {
588 PROFILER_LOG_ERROR(LOG_CORE, "%s:ParseEventData for cpu-%d failed!", __func__, cpuId);
589 }
590 }
591 }
592 return true;
593 }
594
595 int FlowController::StopCapture(void)
596 {
597 CHECK_TRUE(ftraceSupported_, -1, "current kernel not support ftrace!");
598 CHECK_NOTNULL(tansporter_, -1, "crate ResultTransporter FAILED!");
599
600 CHECK_TRUE(requestEvents_.size() != 0 || traceApps_.size() != 0 || traceCategories_.size() != 0, -1,
601 "StopCapture: ftrace event is not set, return false");
602
603 // disable ftrace event switches
604 DisableTraceEvents();
605
606 // stop ftrace event data polling thread
607 keepRunning_ = false;
608 if (pollThread_.joinable()) {
609 PROFILER_LOG_INFO(LOG_CORE, "join thread start!\n");
610 pollThread_.join();
611 PROFILER_LOG_INFO(LOG_CORE, "join thread done!\n");
612 }
613 // parse per cpu stats
614 if (resultWriter_->isProtobufSerialize) {
615 auto traceResult = std::make_unique<TracePluginResult>();
616 CHECK_TRUE(ParsePerCpuStatus(traceResult, TRACE_END), -1, "parse TRACE_END stats FAILED!");
617 CHECK_TRUE(tansporter_->Submit(std::move(traceResult)), -1, "report TRACE_END stats FAILED!");
618 } else {
619 auto ctx = resultWriter_->startReport(resultWriter_);
620 CHECK_NOTNULL(ctx, -1, "%s: get RandomWriteCtx FAILED!", __func__);
621 auto traceResult = std::make_unique<ProtoEncoder::TracePluginResult>(ctx);
622 CHECK_TRUE(ParsePerCpuStatus(traceResult, TRACE_END), -1, "parse TRACE_END stats FAILED!");
623 int32_t msgSize = traceResult->Finish();
624 resultWriter_->finishReport(resultWriter_, msgSize);
625 tansporter_->Report(static_cast<size_t>(msgSize));
626 }
627
628 // disable userspace trace triggers
629 // because trace cmd will read trace buffer,
630 // so we to this action after polling thread exit.
631 traceCollector_->Recover();
632 tansporter_->Flush();
633
634 // release resources
635 ftraceReaders_.clear(); // release ftrace data readers
636 ftraceBuffers_.clear(); // release ftrace event read buffers
637 memPool_.reset(); // release memory pool
638 return 0;
639 }
640
641 template <typename T> bool FlowController::ParsePerCpuStatus(T& tracePluginResult, int stage)
642 {
643 CHECK_NOTNULL(tracePluginResult, false, "create TracePluginResult FAILED!");
644
645 auto cpuStatsMsg = tracePluginResult->add_ftrace_cpu_stats();
646 if (stage == TRACE_START) {
647 cpuStatsMsg->set_status(FtraceCpuStatsMsg_Status_TRACE_START);
648 } else {
649 cpuStatsMsg->set_status(FtraceCpuStatsMsg_Status_TRACE_END);
650 }
651
652 std::string traceClock = FtraceFsOps::GetInstance().GetTraceClock();
653 if (traceClock.size() > 0) {
654 cpuStatsMsg->set_trace_clock(traceClock);
655 }
656
657 for (int i = 0; i < platformCpuNum_; i++) {
658 PROFILER_LOG_INFO(LOG_CORE, "[%d] ParsePerCpuStatus %d!", i, stage);
659 PerCpuStats stats = {};
660 stats.cpuIndex = i;
661 ftraceParser_->ParsePerCpuStatus(stats, FtraceFsOps::GetInstance().GetPerCpuStats(i));
662 auto perCpuMsg = cpuStatsMsg->add_per_cpu_stats();
663 perCpuMsg->set_cpu(stats.cpuIndex);
664 perCpuMsg->set_entries(stats.entries);
665 perCpuMsg->set_overrun(stats.overrun);
666 perCpuMsg->set_commit_overrun(stats.commitOverrun);
667 perCpuMsg->set_bytes(stats.bytes);
668 perCpuMsg->set_oldest_event_ts(stats.oldestEventTs);
669 perCpuMsg->set_now_ts(stats.nowTs);
670 perCpuMsg->set_dropped_events(stats.droppedEvents);
671 perCpuMsg->set_read_events(stats.readEvents);
672 }
673
674 return true;
675 }
676
677 template <typename T> bool FlowController::ReportClockTimes(T& tracePluginResult)
678 {
679 CHECK_NOTNULL(tracePluginResult, false, "create TracePluginResult FAILED!");
680
681 std::map<clockid_t, ClockDetailMsg::ClockId> clocksMap = {
682 {CLOCK_REALTIME, ClockDetailMsg::REALTIME},
683 {CLOCK_REALTIME_COARSE, ClockDetailMsg::REALTIME_COARSE},
684 {CLOCK_MONOTONIC, ClockDetailMsg::MONOTONIC},
685 {CLOCK_MONOTONIC_COARSE, ClockDetailMsg::MONOTONIC_COARSE},
686 {CLOCK_MONOTONIC_RAW, ClockDetailMsg::MONOTONIC_RAW},
687 {CLOCK_BOOTTIME, ClockDetailMsg::BOOTTIME},
688 };
689 for (auto& entry : clocksMap) {
690 struct timespec ts = {};
691 clock_gettime(entry.first, &ts);
692 auto clockMsg = tracePluginResult->add_clocks_detail();
693 CHECK_NOTNULL(clockMsg, false, "add clock_detail failed for %d!", entry.first);
694 clockMsg->set_id(entry.second);
695 auto timeMsg = clockMsg->mutable_time();
696 timeMsg->set_tv_sec(ts.tv_sec);
697 timeMsg->set_tv_nsec(ts.tv_nsec);
698
699 struct timespec tsResolution = {};
700 clock_getres(entry.first, &tsResolution);
701 auto resolutionMsg = clockMsg->mutable_resolution();
702 resolutionMsg->set_tv_sec(tsResolution.tv_sec);
703 resolutionMsg->set_tv_nsec(tsResolution.tv_nsec);
704 }
705 return true;
706 }
707
708 template <typename T> bool FlowController::ParseKernelSymbols(T& tracePluginResult)
709 {
710 CHECK_NOTNULL(tracePluginResult, false, "create TracePluginResult FAILED!");
711
712 ksymsParser_->Accept([&tracePluginResult](const KernelSymbol& symbol) {
713 auto symbolDetail = tracePluginResult->add_symbols_detail();
714 symbolDetail->set_symbol_addr(symbol.addr);
715 symbolDetail->set_symbol_name(symbol.name);
716 });
717 PROFILER_LOG_INFO(LOG_CORE, "parse kernel symbol message done!");
718 return true;
719 }
720
721 template <typename T, typename E>
722 bool FlowController::ParseFtraceEvent(T* tracePluginResult, int cpuid, uint8_t page[], E* ftraceEvent)
723 {
724 CHECK_NOTNULL(tracePluginResult, false, "create TracePluginResult FAILED!");
725
726 auto cpudetail = tracePluginResult->add_ftrace_cpu_detail();
727 cpudetail->set_cpu(static_cast<uint32_t>(cpuid));
728
729 CHECK_TRUE(ftraceParser_->ParsePage(*cpudetail, page, PAGE_SIZE, ftraceEvent), false, "parse page failed!");
730 return true;
731 }
732
733 bool FlowController::AddPlatformEventsToParser(void)
734 {
735 CHECK_TRUE(ftraceSupported_, false, "current kernel not support ftrace!");
736
737 PROFILER_LOG_INFO(LOG_CORE, "Add platform events to parser start!");
738 for (auto& typeName : FtraceFsOps::GetInstance().GetPlatformEvents()) {
739 std::string type = typeName.first;
740 std::string name = typeName.second;
741 if (ftraceParser_->SetupEvent(type, name)) {
742 supportedEvents_.push_back(typeName);
743 }
744 }
745 PROFILER_LOG_INFO(LOG_CORE, "Add platform events to parser done, events: %zu!", supportedEvents_.size());
746 return true;
747 }
748
749 int FlowController::LoadConfig(const uint8_t configData[], uint32_t size)
750 {
751 CHECK_TRUE(size > 0, -1, "config data size is zero!");
752 CHECK_NOTNULL(configData, -1, "config data is null!");
753 CHECK_TRUE(ftraceSupported_, -1, "current kernel not support ftrace!");
754 CHECK_NOTNULL(tansporter_, -1, "ResultTransporter crated FAILED!");
755
756 TracePluginConfig traceConfig;
757 CHECK_TRUE(traceConfig.ParseFromArray(configData, size), -1, "parse %u bytes configData failed!", size);
758
759 // sort and save user requested trace events
760 std::set<std::string> events(traceConfig.ftrace_events().begin(), traceConfig.ftrace_events().end());
761 for (auto ftraceEvent : events) {
762 requestEvents_.push_back(ftraceEvent);
763 }
764
765 traceApps_.assign(traceConfig.hitrace_apps().begin(), traceConfig.hitrace_apps().end());
766 traceCategories_.assign(traceConfig.hitrace_categories().begin(), traceConfig.hitrace_categories().end());
767
768 CHECK_TRUE(requestEvents_.size() != 0 || traceApps_.size() != 0 || traceCategories_.size() != 0, -1,
769 "LoadConfig: ftrace event is not set, return false");
770
771 // setup trace clock
772 if (g_availableClocks.count(traceConfig.clock()) > 0) {
773 traceClock_ = traceConfig.clock();
774 FtraceFsOps::GetInstance().SetTraceClock(traceConfig.clock());
775 }
776
777 // setup parse kernel symbol option
778 parseKsyms_ = traceConfig.parse_ksyms();
779 parseMode_ = traceConfig.parse_mode();
780 // setup trace buffer size
781 SetupTraceBufferSize(traceConfig.buffer_size_kb());
782
783 // setup transporter flush params
784 SetupTransporterFlushParams(traceConfig.flush_interval_ms(), traceConfig.flush_threshold_kb());
785
786 // generate raw data file names
787 GenerateRawDataFileNames(traceConfig.raw_data_prefix());
788
789 // setup trace period param
790 SetupTraceReadPeriod(traceConfig.trace_period_ms());
791 flushCacheData_ = traceConfig.discard_cache_data();
792 hitraceTime_ = traceConfig.hitrace_time();
793 return 0;
794 }
795
796 void FlowController::SetupTraceBufferSize(uint32_t sizeKb)
797 {
798 uint32_t maxBufferSizeKb = MAX_BUFFER_SIZE_KB;
799 if (FtraceFsOps::GetInstance().IsHmKernel()) {
800 maxBufferSizeKb = HM_MAX_BUFFER_SIZE_KB;
801 }
802 if (sizeKb < MIN_BUFFER_SIZE_KB) {
803 bufferSizeKb_ = MIN_BUFFER_SIZE_KB;
804 } else if (sizeKb > maxBufferSizeKb) {
805 bufferSizeKb_ = maxBufferSizeKb;
806 } else {
807 bufferSizeKb_ = sizeKb / KB_PER_PAGE * KB_PER_PAGE;
808 }
809 }
810
811 void FlowController::SetupTransporterFlushParams(uint32_t flushInterval, uint32_t flushThresholdKb)
812 {
813 if (flushInterval > 0 && flushInterval <= MAX_FLUSH_INTERVAL) {
814 tansporter_->SetFlushInterval(flushInterval);
815 }
816 if (flushThresholdKb > 0 && flushThresholdKb <= MAX_FLUSH_THRESHOLD) {
817 tansporter_->SetFlushThreshold(flushThresholdKb * BYTE_PER_KB);
818 }
819 }
820
821 void FlowController::GenerateRawDataFileNames(const std::string& prefix)
822 {
823 if (prefix.size() > 0) {
824 for (int i = 0; i < platformCpuNum_; i++) {
825 std::string path = prefix + std::to_string(i);
826 rawDataDumpPath_.push_back(path);
827 }
828 }
829 }
830
831 void FlowController::SetupTraceReadPeriod(uint32_t tracePeriod)
832 {
833 if (tracePeriod > 0 && tracePeriod <= MAX_TRACE_PERIOD_MS) {
834 tracePeriodMs_ = tracePeriod;
835 } else {
836 tracePeriodMs_ = DEFAULT_TRACE_PERIOD_MS;
837 }
838 }
839
840 void FlowController::EnableTraceEvents(void)
841 {
842 std::unordered_set<std::string> userEventSet(requestEvents_.begin(), requestEvents_.end());
843 for (auto& event : supportedEvents_) {
844 std::string type = event.first;
845 std::string name = event.second;
846 std::string fmtType = type;
847 if (type == "power_kernel") {
848 fmtType = "power";
849 }
850 if (userEventSet.count(fmtType + "/" + name)) { // user config format
851 if (FtraceFsOps::GetInstance().EnableEvent(type, name)) {
852 FtraceFsOps::GetInstance().AppendSetEvent(type, name);
853 enabledEvents_.push_back(event);
854 }
855 }
856 }
857 FtraceFsOps::GetInstance().EnableTracing();
858 }
859
860 void FlowController::DisableTraceEvents(void)
861 {
862 FtraceFsOps::GetInstance().DisableTracing();
863 for (auto& event : enabledEvents_) {
864 std::string type = event.first;
865 std::string name = event.second;
866 FtraceFsOps::GetInstance().DisableEvent(type, name);
867 }
868 enabledEvents_.clear();
869 }
870
871 void FlowController::DisableAllCategories(void)
872 {
873 for (auto& event : supportedEvents_) {
874 std::string type = event.first;
875 std::string name = event.second;
876 FtraceFsOps::GetInstance().DisableCategories(type);
877 }
878 }
879
880 void FlowController::SetReportBasicData(bool isReportBasicData)
881 {
882 isReportBasicData_ = isReportBasicData;
883 }
884 FTRACE_NS_END
885