1 /*
2 * Copyright (c) Huawei Technologies Co., Ltd. 2022. All rights reserved.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include <cerrno>
17 #include <chrono>
18 #include <csignal>
19 #include <cstdio>
20 #include <cstdlib>
21 #include <cstring>
22 #include <ctime>
23 #include <iomanip>
24 #include <iostream>
25 #include <memory>
26 #include <mutex>
27
28 #include <fcntl.h>
29 #include <unistd.h>
30 #include <linux/bpf.h>
31 #include <linux/perf_event.h>
32 #include <sys/resource.h>
33
34 #include "bpf.h"
35 #include "elf_file.h"
36 #include "libbpf_logger.h"
37 #include "bpf_controller.h"
38
39 namespace {
40 std::unique_ptr<LIBBPFLogger> libbpfLogger {nullptr};
41 const std::string THIRD_PARTY_MUSL_ADDR = "/system/lib/ld-musl-aarch64.so.1";
42 constexpr int32_t SYM_32_VALUE_OFFSET = 4;
43 constexpr int32_t SYM_64_VALUE_OFFSET = 8;
44 constexpr int32_t WIDE_SIXTEEN = 16;
45 constexpr uint32_t MAX_SIZE = INT_MAX;
46 } // namespace
47
LIBBPFPrintFunc(enum libbpf_print_level level, const char *format, va_list args)48 int BPFController::LIBBPFPrintFunc(enum libbpf_print_level level, const char *format, va_list args)
49 {
50 if (libbpfLogger) {
51 return libbpfLogger->Printf(level, format, args);
52 }
53 return 0;
54 }
55
~BPFController()56 BPFController::~BPFController()
57 {
58 Stop();
59 if (rb_) {
60 // release bpf ringbuffer
61 ring_buffer__free(rb_);
62 rb_ = nullptr;
63 }
64 if (ips_) {
65 delete[] ips_;
66 ips_ = nullptr;
67 }
68 if (skel_) {
69 hiebpf_bpf__destroy(skel_);
70 skel_ = nullptr;
71 }
72
73 for (size_t k = 0; k < receivers_.size(); ++k) {
74 receivers_[k]->Stop();
75 }
76 if (bpfLogReader_) {
77 bpfLogReader_->Stop();
78 }
79 }
80
MakeUnique(const BPFConfig& config)81 std::unique_ptr<BPFController> BPFController::MakeUnique(const BPFConfig& config)
82 {
83 std::unique_ptr<BPFController> bpfctlr {new(std::nothrow) BPFController {config}};
84 CHECK_NOTNULL(bpfctlr, nullptr, "failed to instantiate BPFController");
85 HHLOGI(true, "BPFController instantiated");
86
87 CHECK_TRUE(bpfctlr->VerifyConfigurations() == 0, nullptr, "failed to verify config");
88 HHLOGI(true, "BPFConfig verified");
89
90 CHECK_TRUE(bpfctlr->SetUpBPF() == 0, nullptr, "failed to set up BPF");
91 HHLOGI(true, "BPF setup done");
92
93 return bpfctlr;
94 }
95
VerifyDumpEvents(const __u32 nr)96 static inline int VerifyDumpEvents(const __u32 nr)
97 {
98 CHECK_TRUE(nr <= BPFController::DUMP_EVENTS_LIMIT, -1, "dump events exceeds limit");
99 return 0;
100 }
101
VerifyTraceDuration(const __u32 duration)102 static inline int VerifyTraceDuration(const __u32 duration)
103 {
104 CHECK_TRUE(duration <= BPFController::TRACE_DURATION_LIMIT, -1, "trace duration exceeds limit");
105 return 0;
106 }
107
VerifyMaxStackDepth(const __u32 depth)108 static inline int VerifyMaxStackDepth(const __u32 depth)
109 {
110 CHECK_TRUE(depth <= MAX_STACK_LIMIT, -1, "max stack depth exceeds limit");
111 return 0;
112 }
113
VerifySelectEventGroups(const std::set<HiebpfEventGroup> &selectEventGroups)114 int BPFController::VerifySelectEventGroups(const std::set<HiebpfEventGroup> &selectEventGroups)
115 {
116 CHECK_TRUE(!selectEventGroups.empty(), -1, "VerifySelectEventGroups() failed: event group list is empty");
117 selectEventGroups_ = selectEventGroups;
118 return 0;
119 }
120
VerifyConfigurations()121 int BPFController::VerifyConfigurations()
122 {
123 CHECK_TRUE(VerifySelectEventGroups(config_.selectEventGroups_) == 0, -1, "VerifySelectEventGroups fail");
124 HHLOGI(true, "VerifySelectEventGroups() done");
125 CHECK_TRUE(VerifyDumpEvents(config_.dumpEvents_) == 0, -1,
126 "VerifyDumpEvents() failed: dump events = %u", config_.dumpEvents_);
127 HHLOGI(true, "VerifyDumpEents() done");
128 CHECK_TRUE(VerifyTraceDuration(config_.traceDuration_) == 0, -1,
129 "VerifyTraceDuration() failed: duration = %u", config_.traceDuration_);
130 HHLOGI(true, "VerifyTraceDuration() done");
131 CHECK_TRUE(VerifyMaxStackDepth(config_.maxStackDepth_) == 0, -1,
132 "VerifyMaxStackDepth() failed: max stack depth = %u", config_.maxStackDepth_);
133 HHLOGI(true, "VerifyMaxStackDepth() done");
134 return 0;
135 }
136
SetUpBPF()137 int BPFController::SetUpBPF()
138 {
139 CHECK_TRUE(ConfigLIBBPFLogger() == 0, -1, "failed to configure LIBBPF logger");
140 HHLOGI(true, "ConfigLIBBPFLogger() done");
141
142 // set up libbpf deubug level
143 libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
144 // set RLIMIT_MEMLOCK
145 struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
146 setrlimit(RLIMIT_MEMLOCK, &r);
147
148 skel_ = hiebpf_bpf__open();
149 int err = libbpf_get_error(skel_);
150 CHECK_TRUE(!err, err, "failed to open BPF skeleton: %s", strerror(-err));
151 HHLOGI(true, "BPF skeleton opened");
152 if (config_.maxStackDepth_ > MAX_SIZE) {
153 HHLOGE(true, "SetUpBPF maxStackDepth exceeds max size");
154 return -1;
155 }
156 if (config_.unwindStack_) {
157 ips_ = new(std::nothrow) __u64[config_.maxStackDepth_];
158 CHECK_NOTNULL(ips_, -1, "failed to allocate memory for ips");
159 }
160 HHLOGI(true, "allocate ips buffer done");
161 dataFile_ = HiebpfDataFile::MakeShared(config_.cmd_, config_.outputFile_);
162 CHECK_NOTNULL(dataFile_, -1, "failed to make hiebpf data file");
163 CHECK_TRUE(FilterProgByEvents() == 0, -1, "failed to load BPF objects");
164 HHLOGI(true, "make HiebpfDataFile done");
165 skel_->rodata->g_stack_limit = config_.maxStackDepth_;
166 err = hiebpf_bpf__load(skel_);
167 CHECK_TRUE(!err, err, "failed to load BPF skeleton: %s", strerror(-err));
168 CHECK_TRUE(ConfigureBPF() == 0, -1, "failed to configure BPF");
169 HHLOGI(true, "BPF configuration done");
170
171 return 0;
172 }
173
FilterProgByEvents()174 int BPFController::FilterProgByEvents()
175 {
176 // check each one hiebpf_bpf.progs in hiebpf.skel.h
177 // hiebpf_bpf.progs is autoload by default
178 FilterFsGroup();
179
180 if (selectEventGroups_.find(MEM_GROUP_ALL) == selectEventGroups_.end()) {
181 bpf_program__set_autoload(skel_->progs.__do_fault_entry, false);
182 bpf_program__set_autoload(skel_->progs.__do_fault_exit, false);
183
184 bpf_program__set_autoload(skel_->progs.do_swap_page_entry, false);
185 bpf_program__set_autoload(skel_->progs.do_swap_page_exit, false);
186
187 bpf_program__set_autoload(skel_->progs.do_wp_page_entry, false);
188 bpf_program__set_autoload(skel_->progs.do_wp_page_exit, false);
189 }
190 if (selectEventGroups_.find(BIO_GROUP_ALL) == selectEventGroups_.end()) {
191 bpf_program__set_autoload(skel_->progs.block_issue, false);
192 bpf_program__set_autoload(skel_->progs.blk_update_request, false);
193 } else {
194 dataFile_->WriteKernelSymbol();
195 }
196 return 0;
197 }
198
FilterFsGroup()199 void BPFController::FilterFsGroup()
200 {
201 if (selectEventGroups_.find(FS_GROUP_ALL) != selectEventGroups_.end()) {
202 return;
203 }
204 if (skel_ == nullptr) {
205 return;
206 }
207 if (selectEventGroups_.find(FS_GROUP_OPEN) == selectEventGroups_.end()) {
208 bpf_program__set_autoload(skel_->progs.do_sys_openat2_entry, false);
209 bpf_program__set_autoload(skel_->progs.do_sys_openat2_exit, false);
210 }
211 if (selectEventGroups_.find(FS_GROUP_READ) == selectEventGroups_.end()) {
212 bpf_program__set_autoload(skel_->progs.do_readv_entry, false);
213 bpf_program__set_autoload(skel_->progs.do_readv_exit, false);
214
215 bpf_program__set_autoload(skel_->progs.do_preadv_entry, false);
216 bpf_program__set_autoload(skel_->progs.do_preadv_exit, false);
217
218 bpf_program__set_autoload(skel_->progs.ksys_read_entry, false);
219 bpf_program__set_autoload(skel_->progs.ksys_read_exit, false);
220
221 bpf_program__set_autoload(skel_->progs.ksys_pread64_entry, false);
222 bpf_program__set_autoload(skel_->progs.ksys_pread64_exit, false);
223 }
224 if (selectEventGroups_.find(FS_GROUP_WRITE) == selectEventGroups_.end()) {
225 bpf_program__set_autoload(skel_->progs.do_writev_entry, false);
226 bpf_program__set_autoload(skel_->progs.do_writev_exit, false);
227
228 bpf_program__set_autoload(skel_->progs.__arm64_sys_pwritev_entry, false);
229 bpf_program__set_autoload(skel_->progs.__arm64_sys_pwritev_exit, false);
230 bpf_program__set_autoload(skel_->progs.__arm64_sys_pwritev2_entry, false);
231 bpf_program__set_autoload(skel_->progs.__arm64_sys_pwritev2_exit, false);
232 bpf_program__set_autoload(skel_->progs.__arm64_compat_sys_pwritev_entry, false);
233 bpf_program__set_autoload(skel_->progs.__arm64_compat_sys_pwritev_exit, false);
234 bpf_program__set_autoload(skel_->progs.__arm64_compat_sys_pwritev2_entry, false);
235 bpf_program__set_autoload(skel_->progs.__arm64_compat_sys_pwritev2_exit, false);
236
237 bpf_program__set_autoload(skel_->progs.ksys_write_entry, false);
238 bpf_program__set_autoload(skel_->progs.ksys_write_exit, false);
239
240 bpf_program__set_autoload(skel_->progs.ksys_pwrite64_entry, false);
241 bpf_program__set_autoload(skel_->progs.ksys_pwrite64_exit, false);
242 }
243 if (selectEventGroups_.find(FS_GROUP_CLOSE) == selectEventGroups_.end()) {
244 bpf_program__set_autoload(skel_->progs.__close_fd_entry, false);
245 bpf_program__set_autoload(skel_->progs.__close_fd_exit, false);
246 }
247 }
248
InitTracerPid(const int fd, bool excludeTracer)249 static int InitTracerPid(const int fd, bool excludeTracer)
250 {
251 int32_t pid = -1;
252 if (excludeTracer) {
253 /* we write the tracer pid into BPF map to notify BPF progs
254 * to exclude the tracer itself
255 */
256 pid = static_cast<int32_t>(getpid());
257 CHECK_TRUE(pid >= 0, -1, "failed to get current pid");
258 }
259 constexpr __u32 pididx {TRACER_PID_INDEX};
260 int err = bpf_map_update_elem(fd, &pididx, &pid, BPF_ANY);
261 CHECK_TRUE(!err, -1, "failed to update tracer pid %d in config_var_map", pid);
262 return 0;
263 }
264
InitBPFLogLevel(const int fd, const __u32 level)265 static inline int InitBPFLogLevel(const int fd, const __u32 level)
266 {
267 if (level == BPF_LOG_NONE) {
268 HHLOGD(true, "bpf log level is NONE!");
269 return 0;
270 }
271 constexpr __u32 levelidx {BPF_LOG_LEVEL_INDEX};
272 int err = bpf_map_update_elem(fd, &levelidx, &level, BPF_ANY);
273 CHECK_TRUE(!err, -1, "failed to set bpf log level in config_var_map");
274 return 0;
275 }
276
InitUnwindFlag(const int fd, bool unwind)277 static inline int InitUnwindFlag(const int fd, bool unwind)
278 {
279 constexpr __u32 uflagidx {UNWIND_FLAG_INDEX};
280 __u32 uflag {0};
281 if (unwind) {
282 uflag = 1;
283 }
284 int err = bpf_map_update_elem(fd, &uflagidx, &uflag, BPF_ANY);
285 CHECK_TRUE(!err, -1, "failed to set unwind stack flag in config_var_map");
286 return 0;
287 }
288
InitBPFVariables() const289 int BPFController::InitBPFVariables() const
290 {
291 int fd = bpf_map__fd(skel_->maps.config_var_map);
292 CHECK_TRUE(fd >= 0, -1, "failed to get fd of config_var_map");
293 HHLOGI(true, "InitBPFVariables() done");
294 CHECK_TRUE(InitTracerPid(fd, config_.excludeTracer_) == 0, -1,
295 "failed to init tracer pid in config_var_map");
296 HHLOGI(true, "InitTracerPid() done");
297 CHECK_TRUE(InitBPFLogLevel(fd, config_.BPFLogLevel_) == 0, -1,
298 "failed to init BPF log level in config_var_map");
299 HHLOGI(true, "InitBPFLogLevel() done");
300 CHECK_TRUE(InitUnwindFlag(fd, config_.unwindStack_) == 0, -1,
301 "failed to init unwind stack flag in config_var_map");
302 HHLOGI(true, "InitUnwindFlag() done");
303 return 0;
304 }
305
FillTargetPidMap() const306 int BPFController::FillTargetPidMap() const
307 {
308 int fd = bpf_map__fd(skel_->maps.target_pid_map);
309 CHECK_TRUE(fd >= 0, -1, "failed to get fd of target_pid_map");
310 int index {0};
311 uint32_t val {1}; // target_pid_Map[0] = 1 means tracing all processes
312 int err {0};
313 int numPids {config_.targetPids_.size()};
314 HHLOGD(true, "target pid num = %d", numPids);
315 if (numPids == 0) {
316 // no target pid specified, trace all processes
317 err = bpf_map_update_elem(fd, &index, &val, BPF_ANY);
318 CHECK_TRUE(!err, -1, "failed to set target pid = %u", val);
319 return 0;
320 }
321 if (numPids > MAX_TARGET_PIDS) {
322 HHLOGW(true, "BPFController WARN: number of target pids exceeds the maximum limit");
323 numPids = MAX_TARGET_PIDS;
324 }
325 for (index = 1; index <= numPids; ++index) {
326 val = static_cast<uint32_t>(config_.targetPids_[index - 1]);
327 HHLOGD(true, "target pid = %u", val);
328 std::cout << "target pid = " << val << std::endl;
329 err = bpf_map_update_elem(fd, &index, &val, BPF_ANY);
330 CHECK_TRUE(!err, -1, "failed to set target pid = %d", val);
331 }
332 return 0;
333 }
334
ConfigBPFLogger()335 inline int BPFController::ConfigBPFLogger()
336 {
337 if (config_.BPFLogLevel_ == BPF_LOG_NONE) {
338 HHLOGD(true, "bpf log level is NONE!");
339 return 0;
340 }
341 #if defined(BPF_LOGGER_DEBUG) || defined(BPF_LOGGER_INFO) || defined(BPF_LOGGER_WARN) || \
342 defined(BPF_LOGGER_ERROR) || defined(BPF_LOGGER_FATAL)
343 bpfLogReader_ = BPFLogReader::MakeUnique(config_.BPFLogFile_);
344 CHECK_NOTNULL(bpfLogReader_, -1, "failed to initialize BPFLogReader");
345 #endif
346 return 0;
347 }
348
ConfigLIBBPFLogger() const349 inline int BPFController::ConfigLIBBPFLogger() const
350 {
351 // set up libbpf print callback
352 HHLOGI(true, "libbpf logger: file = %s, level = %d", config_.LIBBPFLogFile_.c_str(), config_.LIBBPFLogLevel_);
353 libbpf_set_print(BPFController::LIBBPFPrintFunc);
354 if (config_.LIBBPFLogLevel_ == LIBBPF_NONE) {
355 HHLOGD(true, "libbpf log level is NONE!");
356 return 0;
357 }
358 libbpfLogger = LIBBPFLogger::MakeUnique(config_.LIBBPFLogFile_, config_.LIBBPFLogLevel_);
359 CHECK_NOTNULL(libbpfLogger, -1, "libbpfLogger is nullptr");
360 return 0;
361 }
362
ConfigReceivers()363 int BPFController::ConfigReceivers()
364 {
365 if (config_.dumpEvents_ == 0) {
366 rb_ = ring_buffer__new(
367 bpf_map__fd(skel_->maps.bpf_ringbuf_map),
368 BPFController::HandleEvent,
369 this, nullptr);
370 int err = libbpf_get_error(rb_);
371 CHECK_TRUE(!err, err, "failed to make BPF ring buffer: %s", strerror(-err));
372 if (config_.pipelines_ == 0) {
373 config_.pipelines_ = MIN_PIPELINES_LIMIT;
374 }
375 for (__u32 cnt = config_.pipelines_; cnt != 0; --cnt) {
376 receivers_.push_back(BPFEventReceiver::MakeShared(dataFile_));
377 }
378 CHECK_TRUE(receivers_.size() == config_.pipelines_, -1, "failed to make BPF event receivers");
379 last_ = 0;
380 } else {
381 rb_ = ring_buffer__new(
382 bpf_map__fd(skel_->maps.bpf_ringbuf_map),
383 BPFController::DumpEvent,
384 this, nullptr);
385 int err = libbpf_get_error(rb_);
386 CHECK_TRUE(!err, err, "failed to make BPF ring buffer: %s", strerror(-err));
387 }
388 return 0;
389 }
390
GetSymOffset(const std::string &path, const std::string &symbol)391 uint64_t BPFController::GetSymOffset(const std::string &path, const std::string &symbol)
392 {
393 CHECK_TRUE(access(path.c_str(), F_OK) == 0, 0, "the file does not exist");
394 using namespace OHOS::Developtools::Hiebpf;
395 std::unique_ptr<ElfFile> elfFile = ElfFile::MakeUnique(path);
396 CHECK_NOTNULL(elfFile, 0, "ELF file open failed");
397 const std::string dynsym {".dynsym"};
398 CHECK_TRUE(elfFile->shdrs_.find(dynsym) != elfFile->shdrs_.end(), 0, "section dynsym failed to obtain data");
399 const auto &sym = elfFile->shdrs_[dynsym];
400 const uint8_t *symData = elfFile->GetSectionData(sym->secIndex_);
401 CHECK_NOTNULL(symData, 0, "symData is NULL!");
402
403 const std::string dynstr {".dynstr"};
404 CHECK_TRUE(elfFile->shdrs_.find(dynstr) != elfFile->shdrs_.end(), 0, "section dynstr failed to obtain data");
405 const auto &str = elfFile->shdrs_[dynstr];
406 const uint8_t *strData = elfFile->GetSectionData(str->secIndex_);
407 CHECK_NOTNULL(strData, 0, "strData is NULL!");
408
409 uint32_t st_name = 0;
410 uint64_t stepLength = 0;
411 uint64_t vaddr = 0;
412 while (stepLength < sym->secSize_) {
413 int ret = memcpy_s(&st_name, sizeof(uint32_t), symData + stepLength, sizeof(uint32_t));
414 CHECK_TRUE(ret == EOK, 0, "failed to memcpy symData");
415 auto name = const_cast<uint8_t*>(strData + st_name);
416 if (name != nullptr && std::string(reinterpret_cast<char*>(name)).compare(symbol) == 0) {
417 int32_t valueOffset = sym->secEntrySize_ == sizeof(Elf64_Sym) ? SYM_64_VALUE_OFFSET : SYM_32_VALUE_OFFSET;
418 int32_t valueSize = valueOffset == SYM_64_VALUE_OFFSET ? sizeof(uint64_t) : sizeof(uint32_t);
419 CHECK_TRUE(stepLength + static_cast<uint64_t>(valueOffset) < sym->secSize_, 0, "invalid symbol offset");
420 ret = memcpy_s(&vaddr, sizeof(uint64_t), symData + stepLength + valueOffset, valueSize);
421 CHECK_TRUE(ret == EOK, 0, "failed to memcpy symData");
422 break;
423 }
424 stepLength += sym->secEntrySize_;
425 }
426 CHECK_TRUE(vaddr != 0, 0, "get vaddr failed");
427
428 const std::string text {".text"};
429 CHECK_TRUE(elfFile->shdrs_.find(text) != elfFile->shdrs_.end(), 0, "section text failed to obtain data");
430 const auto &textPtr = elfFile->shdrs_[text];
431 return vaddr - textPtr->secVaddr_ + textPtr->fileOffset_;
432 }
433
ConfigDlopenBPFProg()434 int32_t BPFController::ConfigDlopenBPFProg()
435 {
436 uint64_t symOffset = GetSymOffset(THIRD_PARTY_MUSL_ADDR, "dlopen");
437 CHECK_TRUE(symOffset != 0, -1, "get symOffset failed");
438 skel_->links.uretprobe_dlopen = bpf_program__attach_uprobe(skel_->progs.uretprobe_dlopen,
439 true,
440 -1,
441 THIRD_PARTY_MUSL_ADDR.c_str(),
442 symOffset);
443 CHECK_TRUE(skel_->links.uretprobe_dlopen, -1, "failed to attach uretprobe_dlopen");
444 return 0;
445 }
446
ConfigureBPF()447 int BPFController::ConfigureBPF()
448 {
449 CHECK_TRUE(InitBPFVariables() == 0, -1, "failed to fill config_var_map");
450 HHLOGI(true, "InitBPFVariables() done");
451 CHECK_TRUE(FillTargetPidMap() == 0, -1, "failed to fill target_pid_map");
452 HHLOGI(true, "FillTargetPidMap() done");
453 CHECK_TRUE(ConfigBPFLogger() == 0, -1, "failed to configure BPF logger");
454 HHLOGI(true, "ConfigBPFLogger() done");
455 CHECK_TRUE(ConfigReceivers() == 0, -1, "failed to configure BPF ringbuffer");
456 HHLOGI(true, "ConfigReceivers() done");
457 CHECK_TRUE(ConfigDlopenBPFProg() == 0, -1, "failed to configure user BPF prog");
458 return 0;
459 }
460
Start()461 int BPFController::Start()
462 {
463 #if defined(BPF_LOGGER_DEBUG) || defined(BPF_LOGGER_INFO) || defined(BPF_LOGGER_WARN) || \
464 defined(BPF_LOGGER_ERROR) || defined(BPF_LOGGER_FATAL)
465 CHECK_TRUE(StartBPFLogReader() == 0, -1, "failed to start BPF log reader");
466 #endif
467 HHLOGI(true, "BPF log reader started");
468 CHECK_TRUE(StartReceivers() == 0, -1, "failed to start receivers");
469 HHLOGI(true, "receivers started");
470 // activate events
471 int err = hiebpf_bpf__attach(skel_);
472 CHECK_TRUE(!err, -1, "failed to attach bpf object: %s", strerror(-err));
473 HHLOGI(true, "BPF events activated");
474
475 const auto endTime = std::chrono::steady_clock::now() + std::chrono::seconds(config_.traceDuration_);
476 while (!loopStop_) {
477 if (BPFEventLoopOnce() != 0) {
478 printf("libbpf error occured, hiebpf exit\n");
479 err = -1;
480 break;
481 }
482 if (std::chrono::steady_clock::now() >= endTime) {
483 printf("timeout(%us), hiebpf exit\n", config_.traceDuration_);
484 break;
485 }
486 }
487 // receivers_ must stop after BPFEventLoopOnce();
488 for (size_t k = 0; k < receivers_.size(); ++k) {
489 receivers_[k]->Stop();
490 }
491 if (bpfLogReader_) {
492 bpfLogReader_->Stop();
493 }
494
495 HHLOGI(true, "hiebpf stopped");
496
497 return err;
498 }
499
Stop()500 void BPFController::Stop()
501 {
502 loopStop_ = true;
503 }
504
HandleEvent(void *ctx, void *data, size_t dataSize)505 int BPFController::HandleEvent(void *ctx, void *data, size_t dataSize)
506 {
507 // get the next running receiver
508 BPFController *bpfctlr = static_cast<BPFController *>(ctx);
509 auto wrecv = bpfctlr->NextActiveReceiver();
510 auto receiver = wrecv.lock();
511 if (receiver == nullptr) {
512 HHLOGF(true, "all receivers have stopped, will stop BPF event loop");
513 bpfctlr->Stop();
514 return -1;
515 }
516
517 // move data and notify receiver
518 int ret = receiver->Put(data, dataSize);
519 HHLOGE((ret < 0), "event lost: failed to move data to receiver"); // try other receivers ?
520 HHLOGF(
521 (0 <= ret and ret < static_cast<int>(dataSize)),
522 "incomplete data movement: this should never happen");
523 return ret;
524 }
525
DumpOpenat2Args(const struct fstrace_cmplt_event_t &cmpltEvent)526 static int DumpOpenat2Args(const struct fstrace_cmplt_event_t &cmpltEvent)
527 {
528 std::cout << "\nArgs:";
529 std::cout << "\n dfd = " << cmpltEvent.start_event.openat2_args.dfd;
530 std::cout << "\n filename = " << cmpltEvent.start_event.openat2_args.filename;
531 std::cout << "\n how = " << cmpltEvent.start_event.openat2_args.how;
532 return 0;
533 }
534
DumpReadvArgs(const struct fstrace_cmplt_event_t &cmpltEvent)535 static int DumpReadvArgs(const struct fstrace_cmplt_event_t &cmpltEvent)
536 {
537 std::cout << "\nArgs:";
538 std::cout << "\n fd = " << cmpltEvent.start_event.readv_args.fd;
539 std::cout << "\n vec = " << cmpltEvent.start_event.readv_args.vec;
540 std::cout << "\n vlen = " << cmpltEvent.start_event.readv_args.vlen;
541 std::cout << "\n flags = " << cmpltEvent.start_event.readv_args.flags;
542 return 0;
543 }
544
DumpPreadvArgs(const struct fstrace_cmplt_event_t &cmpltEvent)545 static int DumpPreadvArgs(const struct fstrace_cmplt_event_t &cmpltEvent)
546 {
547 std::cout << "\nArgs:";
548 std::cout << "\n fd = " << cmpltEvent.start_event.preadv_args.fd;
549 std::cout << "\n vec = " << cmpltEvent.start_event.preadv_args.vec;
550 std::cout << "\n vlen = " << cmpltEvent.start_event.preadv_args.vlen;
551 std::cout << "\n pos = " << cmpltEvent.start_event.preadv_args.pos;
552 std::cout << "\n flags = " << cmpltEvent.start_event.preadv_args.flags;
553 return 0;
554 }
555
DumpReadArgs(const struct fstrace_cmplt_event_t &cmpltEvent)556 static int DumpReadArgs(const struct fstrace_cmplt_event_t &cmpltEvent)
557 {
558 std::cout << "\nArgs:";
559 std::cout << "\n fd = " << cmpltEvent.start_event.read_args.fd;
560 std::cout << "\n buf = " << cmpltEvent.start_event.read_args.buf;
561 std::cout << "\n count = " << cmpltEvent.start_event.read_args.count;
562 return 0;
563 }
564
DumpPread64Args(const struct fstrace_cmplt_event_t &cmpltEvent)565 static int DumpPread64Args(const struct fstrace_cmplt_event_t &cmpltEvent)
566 {
567 std::cout << "\nArgs:";
568 std::cout << "\n fd = " << cmpltEvent.start_event.pread64_args.fd;
569 std::cout << "\n buf = " << cmpltEvent.start_event.pread64_args.buf;
570 std::cout << "\n count = " << cmpltEvent.start_event.pread64_args.count;
571 std::cout << "\n pos = " << cmpltEvent.start_event.pread64_args.pos;
572 return 0;
573 }
574
DumpWritevArgs(const struct fstrace_cmplt_event_t &cmpltEvent)575 static int DumpWritevArgs(const struct fstrace_cmplt_event_t &cmpltEvent)
576 {
577 std::cout << "\nArgs:";
578 std::cout << "\n fd = " << cmpltEvent.start_event.writev_args.fd;
579 std::cout << "\n vec = " << cmpltEvent.start_event.writev_args.vec;
580 std::cout << "\n vlen = " << cmpltEvent.start_event.writev_args.vlen;
581 std::cout << "\n flags = " << cmpltEvent.start_event.writev_args.flags;
582 return 0;
583 }
584
DumpPwritevArgs(const struct fstrace_cmplt_event_t &cmpltEvent)585 static int DumpPwritevArgs(const struct fstrace_cmplt_event_t &cmpltEvent)
586 {
587 std::cout << "\nArgs:";
588 std::cout << "\n fd = " << cmpltEvent.start_event.pwritev_args.fd;
589 std::cout << "\n vec = " << cmpltEvent.start_event.pwritev_args.vec;
590 std::cout << "\n vlen = " << cmpltEvent.start_event.pwritev_args.vlen;
591 std::cout << "\n pos = " << cmpltEvent.start_event.pwritev_args.pos;
592 std::cout << "\n flags = " << cmpltEvent.start_event.pwritev_args.flags;
593 return 0;
594 }
595
DumpWriteArgs(const struct fstrace_cmplt_event_t &cmpltEvent)596 static int DumpWriteArgs(const struct fstrace_cmplt_event_t &cmpltEvent)
597 {
598 std::cout << "\nArgs:";
599 std::cout << "\n fd = " << cmpltEvent.start_event.write_args.fd;
600 std::cout << "\n buf = " << cmpltEvent.start_event.write_args.buf;
601 std::cout << "\n count = " << cmpltEvent.start_event.write_args.count;
602 return 0;
603 }
604
DumpPwrite64Args(const struct fstrace_cmplt_event_t &cmpltEvent)605 static int DumpPwrite64Args(const struct fstrace_cmplt_event_t &cmpltEvent)
606 {
607 std::cout << "\nArgs:";
608 std::cout << "\n fd = " << cmpltEvent.start_event.pwrite64_args.fd;
609 std::cout << "\n buf = " << cmpltEvent.start_event.pwrite64_args.buf;
610 std::cout << "\n count = " << cmpltEvent.start_event.pwrite64_args.count;
611 std::cout << "\n pos = " << cmpltEvent.start_event.pwrite64_args.pos;
612 return 0;
613 }
614
DumpCloseArgs(const struct fstrace_cmplt_event_t &cmpltEvent)615 static int DumpCloseArgs(const struct fstrace_cmplt_event_t &cmpltEvent)
616 {
617 std::cout << "\nArgs:";
618 std::cout << "\n files = " << cmpltEvent.start_event.close_args.files;
619 std::cout << "\n fd = " << cmpltEvent.start_event.close_args.fd;
620 return 0;
621 }
622
DumpTypeAndArgs(const struct fstrace_cmplt_event_t &cmpltEvent)623 static int DumpTypeAndArgs(const struct fstrace_cmplt_event_t &cmpltEvent)
624 {
625 std::cout << "\nevent type: ";
626 switch (cmpltEvent.start_event.type) {
627 case SYS_OPENAT2: std::cout << "openat2"; return DumpOpenat2Args(cmpltEvent);
628
629 case SYS_READV: std::cout << "readv"; return DumpReadvArgs(cmpltEvent);
630 case SYS_PREADV: std::cout << "preadv"; return DumpPreadvArgs(cmpltEvent);
631 case SYS_READ: std::cout << "read"; return DumpReadArgs(cmpltEvent);
632 case SYS_PREAD64: std::cout << "pread64"; return DumpPread64Args(cmpltEvent);
633
634 case SYS_WRITEV: std::cout << "writev"; return DumpWritevArgs(cmpltEvent);
635 case SYS_PWRITEV: std::cout << "pwritev"; return DumpPwritevArgs(cmpltEvent);
636 case SYS_WRITE: std::cout << "write"; return DumpWriteArgs(cmpltEvent);
637 case SYS_PWRITE64: std::cout << "pwrite64"; return DumpPwrite64Args(cmpltEvent);
638
639 case SYS_CLOSE: std::cout << "close"; return DumpCloseArgs(cmpltEvent);
640 }
641 HHLOGE(true, "unreognized fstrace event type = %d", cmpltEvent.start_event.type);
642 return -1;
643 }
644
DumpFSTraceEvent(BPFController *bpfctlr, void *data, size_t dataSize)645 int BPFController::DumpFSTraceEvent(BPFController *bpfctlr, void *data, size_t dataSize)
646 {
647 if (dataSize != sizeof(fstrace_cmplt_event_t)) {
648 std::cout << "DumpFSTraceEvent ERROR: size dismatch:"
649 << " data size = " << dataSize
650 << " fstrace event size = " << sizeof(fstrace_cmplt_event_t)
651 << std::endl;
652 return -1;
653 }
654 struct fstrace_cmplt_event_t cmpltEvent {};
655 if (memcpy_s(&cmpltEvent, sizeof(fstrace_cmplt_event_t), data, dataSize) != EOK) {
656 std::cout << "failed to copy data to fstrace_cmplt_event_t" << std::endl;
657 return -1;
658 }
659 std::cout << "\nFSTrace Event:"
660 << "\ndata size: " << dataSize;
661 DumpTypeAndArgs(cmpltEvent);
662 std::cout << "\nretval: " << cmpltEvent.retval
663 << "\nstart time: " << cmpltEvent.start_event.stime
664 << "\nexit time: " << cmpltEvent.ctime
665 << "\npid: " << cmpltEvent.pid
666 << "\ntgid: " << cmpltEvent.tgid
667 << "\ncomm: " << cmpltEvent.comm
668 << "\nips: " << cmpltEvent.nips
669 << "\nips:"
670 << std::setw(WIDE_SIXTEEN) << std::hex;
671 for (uint32_t i = 0; i < cmpltEvent.nips; ++i) {
672 std::cout << "\n " << cmpltEvent.ips[i];
673 }
674 std::cout << std::dec << std::endl;
675 return 0;
676 }
677
DumpPFTraceEvent(BPFController *bpfctlr, void *data, size_t dataSize)678 int BPFController::DumpPFTraceEvent(BPFController *bpfctlr, void *data, size_t dataSize)
679 {
680 if (dataSize != sizeof(pftrace_cmplt_event_t)) {
681 std::cout << "DumpPFTraceEvent ERROR: size dismatch:"
682 << " data size = " << dataSize
683 << " pftrace event size = " << sizeof(pftrace_cmplt_event_t)
684 << std::endl;
685 return -1;
686 }
687 struct pftrace_cmplt_event_t cmpltEvent {};
688 if (memcpy_s(&cmpltEvent, sizeof(pftrace_cmplt_event_t), data, dataSize) != EOK) {
689 std::cout << "failed to copy data to pftrace_cmplt_event_t" << std::endl;
690 return -1;
691 }
692 std::cout << "PFTrace Event:"
693 << "\ndata size: " << dataSize
694 << "\nevent type: ";
695 switch (cmpltEvent.start_event.type) {
696 case PF_COPY_ON_WRITE: std::cout << "Copy On Write"; break;
697 case PF_FAKE_ZERO_PAGE: std::cout << "Zero FAKE Page"; break;
698 case PF_FILE_BACKED_IN: std::cout << "File Backed In"; break;
699 case PF_PAGE_CACHE_HIT: std::cout << "Page Cache Hit"; break;
700 case PF_SWAP_FROM_DISK: std::cout << "Swap From Disk"; break;
701 case PF_SWAP_FROM_ZRAM: std::cout << "Swap From Zram"; break;
702 case PF_ZERO_FILL_PAGE: std::cout << "Zero Fill Page"; break;
703 default: std::cout << cmpltEvent.start_event.type;
704 }
705 std::cout << "\naddress: " << cmpltEvent.start_event.addr
706 << "\nsize: " << cmpltEvent.size
707 << "\nstart time: " << cmpltEvent.start_event.stime
708 << "\nexit time: " << cmpltEvent.ctime
709 << "\npid: " << cmpltEvent.pid
710 << "\ntgid: " << cmpltEvent.tgid
711 << "\ncomm: " << cmpltEvent.comm
712 << "\nips: " << cmpltEvent.nips
713 << std::setw(WIDE_SIXTEEN) << std::hex;
714 for (uint32_t i = 0; i < cmpltEvent.nips; ++i) {
715 std::cout << "\n " << cmpltEvent.ips[i];
716 }
717 std::cout << std::dec << std::endl;
718 return 0;
719 }
720
DumpBIOTraceEvent(BPFController *bpfctlr, void *data, size_t dataSize)721 int BPFController::DumpBIOTraceEvent(BPFController *bpfctlr, void *data, size_t dataSize)
722 {
723 if (dataSize != sizeof(biotrace_cmplt_event_t)) {
724 std::cout << "DumpBIOTraceEvent ERROR: size dismatch:"
725 << " data size = " << dataSize
726 << " biotrace event size = " << sizeof(biotrace_cmplt_event_t)
727 << std::endl;
728 return -1;
729 }
730 struct biotrace_cmplt_event_t cmpltEvent {};
731 if (memcpy_s(&cmpltEvent, sizeof(biotrace_cmplt_event_t), data, dataSize) != EOK) {
732 std::cout << "failed to copy data to biotrace_cmplt_event_t" << std::endl;
733 return -1;
734 }
735 std::cout << "BIOTrace Event:"
736 << "\ndata size: " << dataSize
737 << "\nevent type: ";
738 switch (cmpltEvent.start_event.type) {
739 case BIO_DATA_READ: std::cout << "DATA_READ"; break;
740 case BIO_DATA_WRITE: std::cout << "DATA_WRITE"; break;
741 case BIO_METADATA_READ: std::cout << "METADATA_READ"; break;
742 case BIO_METADATA_WRITE: std::cout << "METADATA_WRITE"; break;
743 case BIO_PAGE_IN: std::cout << "PAGE_IN"; break;
744 case BIO_PAGE_OUT: std::cout << "PAGE_OUT"; break;
745 default: std::cout << cmpltEvent.start_event.type;
746 }
747
748 std::cout << "\nstart time: " << cmpltEvent.start_event.stime
749 << "\nexit time: " << cmpltEvent.ctime
750 << "\npid: " << cmpltEvent.start_event.pid
751 << "\ntgid: " << cmpltEvent.start_event.tgid
752 << "\ncomm: " << cmpltEvent.start_event.comm
753 << "\nprio: " << cmpltEvent.prio
754 << "\nsize: " << cmpltEvent.start_event.size
755 << "\nblkcnt: " << cmpltEvent.blkcnt
756 << "\nips: " << cmpltEvent.nips
757 << std::setw(WIDE_SIXTEEN) << std::hex;
758 for (uint32_t i = 0; i < cmpltEvent.nips; ++i) {
759 std::cout << "\n " << cmpltEvent.ips[i];
760 }
761 std::cout << std::dec << std::endl;
762 return 0;
763 }
764
DumpSTRTraceEvent(void *data, size_t dataSize)765 int BPFController::DumpSTRTraceEvent(void *data, size_t dataSize)
766 {
767 if (dataSize != sizeof(strtrace_cmplt_event_t)) {
768 std::cout << "DumpSTRTraceEvent ERROR: size dismatch:"
769 << " data size = " << dataSize
770 << " strtrace event size = " << sizeof(strtrace_cmplt_event_t)
771 << std::endl;
772 return -1;
773 }
774 struct strtrace_cmplt_event_t cmpltEvent {};
775 if (memcpy_s(&cmpltEvent, sizeof(strtrace_cmplt_event_t), data, dataSize) != EOK) {
776 std::cout << "failed to copy data to strtrace_cmplt_event_t" << std::endl;
777 return -1;
778 }
779 std::cout << "STRTrace Event:"
780 << "\ndata size: " << dataSize
781 << "\ntracer: " << cmpltEvent.start_event.stracer
782 << "\ntype: " << cmpltEvent.start_event.type
783 << "\naddress: " << cmpltEvent.start_event.addr
784 << "\nstart time: " << cmpltEvent.start_event.stime
785 << "\npid: " << cmpltEvent.pid
786 << "\ntgid: " << cmpltEvent.tgid
787 << "\nfilename len: " << cmpltEvent.len
788 << "\nfilename: " << cmpltEvent.filename
789 << std::endl;
790 return 0;
791 }
792
DumpEvent(void *ctx, void *data, size_t dataSize)793 int BPFController::DumpEvent(void *ctx, void *data, size_t dataSize)
794 {
795 const __u32 *tracer = static_cast<const __u32 *>(data);
796 BPFController *bpfctlr = static_cast<BPFController *>(ctx);
797 if (bpfctlr->config_.dumpEvents_) {
798 --bpfctlr->config_.dumpEvents_;
799 static __u32 counter {0};
800 std::cout << "\ncounter = " << ++counter;
801 switch (*tracer) {
802 case FSTRACE: return DumpFSTraceEvent(bpfctlr, data, dataSize);
803 case PFTRACE: return DumpPFTraceEvent(bpfctlr, data, dataSize);
804 case BIOTRACE: return DumpBIOTraceEvent(bpfctlr, data, dataSize);
805 case STRTRACE: return DumpSTRTraceEvent(data, dataSize);
806 }
807 std::cout << "DumpEvent ERROR: bad tracer type = " << (*tracer) << std::endl;
808 }
809 return 0;
810 }
811
NextActiveReceiver()812 std::weak_ptr<BPFEventReceiver> BPFController::NextActiveReceiver()
813 {
814 __u32 next = last_ + 1;
815 __u32 total = receivers_.size();
816 for (;;) {
817 if (next >= total) {
818 next -= total;
819 }
820 if (receivers_[next]->Running() or next == last_) {
821 break;
822 }
823 ++next;
824 }
825 if (receivers_[next]->Running()) {
826 last_ = next;
827 return receivers_[last_];
828 }
829 return std::weak_ptr<BPFEventReceiver>();
830 }