1/* 2 * Copyright (c) Huawei Technologies Co., Ltd. 2021-2022. All rights reserved. 3 * Licensed under the Apache License, Version 2.0 (the "License"); 4 * you may not use this file except in compliance with the License. 5 * You may obtain a copy of the License at 6 * 7 * http://www.apache.org/licenses/LICENSE-2.0 8 * 9 * Unless required by applicable law or agreed to in writing, software 10 * distributed under the License is distributed on an "AS IS" BASIS, 11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 * See the License for the specific language governing permissions and 13 * limitations under the License. 14 */ 15 16#include <atomic> 17#include <climits> 18#include <dlfcn.h> 19#include <fcntl.h> 20#include <malloc.h> 21#include <string> 22#include <sys/time.h> 23#include <pthread.h> 24#include <sys/prctl.h> 25#include <unordered_map> 26#include <unordered_set> 27#include "dfx_regs_get.h" 28#include "c/executor_task.h" 29#include "common.h" 30#include "hook_common.h" 31#include "hook_socket_client.h" 32#include "logging.h" 33#include "musl_preinit_common.h" 34#include "parameter.h" 35#include "stack_writer.h" 36#include "runtime_stack_range.h" 37#include "get_thread_id.h" 38#include "hook_client.h" 39#include <sys/mman.h> 40#include "sampling.h" 41#include "hitrace/trace.h" 42 43using namespace OHOS::HiviewDFX; 44using namespace OHOS::Developtools::NativeDaemon; 45 46static pthread_key_t g_disableHookFlag; 47static pthread_key_t g_hookTid; 48static pthread_key_t g_updateThreadNameCount; 49namespace { 50static std::atomic<uint64_t> g_mallocTimes = 0; 51 52enum class MISC_TYPE : uint32_t { 53 JS_STACK_DATA = 1, 54}; 55 56#ifdef PERFORMANCE_DEBUG 57static std::atomic<uint64_t> g_timeCost = 0; 58static std::atomic<uint64_t> g_dataCounts = 0; 59constexpr int PRINT_INTERVAL = 5000; 60constexpr uint64_t S_TO_NS = 1000 * 1000 * 1000; 61#endif 62 63using OHOS::Developtools::NativeDaemon::buildArchType; 64static std::shared_ptr<HookSocketClient> g_hookClient {nullptr}; 65static Sampling g_sampler; 66std::recursive_timed_mutex g_ClientMutex; 67std::mutex g_tagMapMutex; 68std::atomic<const MallocDispatchType*> g_dispatch {nullptr}; 69constexpr int UPDATE_THEARD_NAME = 1000; 70static pid_t g_hookPid = 0; 71static ClientConfig g_ClientConfig = {0}; 72static uint32_t g_maxSize = INT_MAX; 73static std::unordered_map<std::string, uint32_t> g_memTagMap; 74constexpr int PID_STR_SIZE = 4; 75constexpr int STATUS_LINE_SIZE = 512; 76constexpr int PID_NAMESPACE_ID = 1; // 1: pid is 1 after pid namespace used 77constexpr int FD_PATH_LENGTH = 64; 78constexpr int MIN_SAMPLER_INTERVAL = 1; 79constexpr int FIRST_HASH = 16; 80constexpr int SECOND_HASH = 13; 81constexpr int THRESHOLD = 256; 82constexpr int DIVIDE_VAL = 64; 83//5: fp mode is used, response_library_mode maximum stack depth 84#if defined(__aarch64__) 85constexpr int RESPONSE_LIBRARY_MODE_DEPTH = 5; 86constexpr int TEMP_IP = 100; 87#endif 88static bool g_isPidChanged = false; 89static struct mallinfo2 g_miStart = {0}; 90std::vector<std::pair<uint64_t, uint64_t>> g_filterStaLibRange; 91constexpr int MAX_BITPOOL_SIZE = 1000 * 1024; 92struct Bitpool { 93 std::atomic<uint64_t> slot; 94}; 95Bitpool* g_addressChecker = nullptr; 96 97inline static uint32_t AddrHash(uint32_t h) 98{ 99 h ^= h >> FIRST_HASH; 100 h *= 0x85ebca6b; 101 h ^= h >> SECOND_HASH; 102 h *= 0xc2b2ae35; 103 h ^= h >> FIRST_HASH; 104 return h; 105} 106 107inline void Addr2Bitpool(void* addr) 108{ 109 if (!g_addressChecker) { 110 return; 111 } 112 uint32_t val = AddrHash(static_cast<uint32_t>(reinterpret_cast<uint64_t>(addr))) % (MAX_BITPOOL_SIZE * DIVIDE_VAL); 113 g_addressChecker[val / DIVIDE_VAL].slot |= (0x1 << (val % DIVIDE_VAL)); 114} 115 116inline bool IsAddrExist(void* addr) 117{ 118 if (!g_addressChecker) { 119 return true; 120 } 121 uint32_t val = AddrHash(static_cast<uint32_t>(reinterpret_cast<uint64_t>(addr))) % (MAX_BITPOOL_SIZE * DIVIDE_VAL); 122 if (g_addressChecker[val / DIVIDE_VAL].slot.load() & (0x1 << (val % DIVIDE_VAL))) { 123 return true; 124 } 125 return false; 126} 127 128const MallocDispatchType* GetDispatch() 129{ 130 return g_dispatch.load(std::memory_order_relaxed); 131} 132 133bool InititalizeIPC() 134{ 135 return true; 136} 137void FinalizeIPC() {} 138 139int ConvertPid(char* buf, size_t len) 140{ 141 UNUSED_PARAMETER(len); 142 int count = 0; 143 char pidBuf[11] = {0}; /* 11: 32 bits to the maximum length of a string */ 144 char *str = buf; 145 while (*str != '\0') { 146 if ((*str >= '0') && (*str <= '9') && (static_cast<unsigned long>(count) < sizeof(pidBuf) - 1)) { 147 pidBuf[count] = *str; 148 count++; 149 str++; 150 continue; 151 } 152 153 if (count > 0) { 154 break; 155 } 156 str++; 157 } 158 return atoi(pidBuf); 159} 160 161pid_t GetRealPid(void) 162{ 163 const char *path = "/proc/self/status"; 164 char buf[STATUS_LINE_SIZE] = {0}; 165 FILE *fp = fopen(path, "r"); 166 CHECK_NOTNULL(fp, -1, "fopen fail"); 167 while (!feof(fp)) { 168 if (fgets(buf, STATUS_LINE_SIZE, fp) == nullptr) { 169 fclose(fp); 170 return -1; 171 } 172 if (strncmp(buf, "Pid:", PID_STR_SIZE) == 0) { 173 break; 174 } 175 } 176 (void)fclose(fp); 177 return static_cast<pid_t>(ConvertPid(buf, sizeof(buf))); 178} 179} // namespace 180 181pid_t inline __attribute__((always_inline)) GetCurThreadId() 182{ 183 if (pthread_getspecific(g_hookTid) == nullptr) { 184 pthread_setspecific(g_hookTid, reinterpret_cast<void *>(GetThreadId())); 185 } 186 return reinterpret_cast<long>((pthread_getspecific(g_hookTid))); 187} 188 189bool inline __attribute__((always_inline)) UpdateThreadName(std::shared_ptr<HookSocketClient>& client) 190{ 191 long updateCount = reinterpret_cast<long>(pthread_getspecific(g_updateThreadNameCount)); 192 bool ret = true; 193 if (updateCount == 0) { 194 StackRawData tnameData = {{{{0}}}}; 195 tnameData.tid = static_cast<uint32_t>(GetCurThreadId()); 196 tnameData.type = THREAD_NAME_MSG; 197 prctl(PR_GET_NAME, tnameData.name); 198 ret = client->SendStackWithPayload(&tnameData, 199 sizeof(BaseStackRawData) + strlen(tnameData.name) + 1, nullptr, 0); 200 if (!ret) { 201 return ret; 202 } 203 } 204 pthread_setspecific(g_updateThreadNameCount, 205 reinterpret_cast<void *>(updateCount == UPDATE_THEARD_NAME ? 0 : updateCount + 1)); 206 return ret; 207} 208 209uint32_t inline __attribute__((always_inline)) GetTagId(std::shared_ptr<HookSocketClient>& client, const char* tagName) 210{ 211 if (tagName == nullptr || strlen(tagName) > PATH_MAX) { 212 return 0; 213 } 214 uint32_t tagId = 0; 215 bool isNewTag = false; 216 std::unique_lock<std::mutex> lock(g_tagMapMutex); 217 auto it = g_memTagMap.find(tagName); 218 if (it == g_memTagMap.end()) { 219 isNewTag = true; 220 tagId = g_memTagMap.size() + 1; 221 g_memTagMap[tagName] = tagId; 222 } else { 223 tagId = it->second; 224 } 225 lock.unlock(); 226 if (isNewTag) { 227 StackRawData tagData = {{{{0}}}}; 228 tagData.type = MEMORY_TAG; 229 tagData.tagId = tagId; 230 strcpy_s(tagData.name, PATH_MAX + 1, tagName); 231 if (client != nullptr) { 232 client->SendStackWithPayload(&tagData, sizeof(BaseStackRawData) + strlen(tagName) + 1, nullptr, 0); 233 } 234 } 235 return tagId; 236} 237 238static bool IsPidChanged(void); 239 240void* MallocHookStart(void* disableHookCallback) 241{ 242 std::lock_guard<std::recursive_timed_mutex> guard(g_ClientMutex); 243 g_addressChecker = new Bitpool [MAX_BITPOOL_SIZE] {{0}}; 244 g_miStart = mallinfo2(); 245 COMMON::PrintMallinfoLog("before hook(byte) => ", g_miStart); 246 g_mallocTimes = 0; 247 g_hookClient.reset(); 248 if (g_hookClient != nullptr) { 249 return nullptr; 250 } else { 251 g_ClientConfig.Reset(); 252 g_sampler.Reset(); 253 g_hookClient = std::make_shared<HookSocketClient>(g_hookPid, &g_ClientConfig, &g_sampler, 254 reinterpret_cast<void (*)()>(disableHookCallback)); 255 } 256 return nullptr; 257} 258 259bool ohos_malloc_hook_on_start(void (*disableHookCallback)()) 260{ 261 pthread_t threadStart; 262 if (pthread_create(&threadStart, nullptr, MallocHookStart, 263 reinterpret_cast<void *>(disableHookCallback))) { 264 return false; 265 } 266 pthread_detach(threadStart); 267 g_hookPid = GetRealPid(); 268 pthread_key_create(&g_disableHookFlag, nullptr); 269 pthread_setspecific(g_disableHookFlag, nullptr); 270 pthread_key_create(&g_hookTid, nullptr); 271 pthread_setspecific(g_hookTid, nullptr); 272 pthread_key_create(&g_updateThreadNameCount, nullptr); 273 pthread_setspecific(g_updateThreadNameCount, reinterpret_cast<void *>(0)); 274 GetMainThreadRuntimeStackRange(g_filterStaLibRange); 275 constexpr int paramBufferLen = 128; 276 char paramOutBuf[paramBufferLen] = {0}; 277 int ret = GetParameter("persist.hiviewdfx.profiler.mem.filter", "", paramOutBuf, paramBufferLen); 278 if (ret > 0) { 279 int min = 0; 280 int max = 0; 281 if (sscanf_s(paramOutBuf, "%d,%d", &min, &max) == 2) { // 2: two parameters. 282 g_maxSize = max > 0 ? static_cast<uint32_t>(max) : INT_MAX; 283 g_ClientConfig.filterSize = min > 0 ? min : 0; 284 } 285 } 286 return true; 287} 288 289void* ohos_release_on_end(void*) 290{ 291 std::lock_guard<std::recursive_timed_mutex> guard(g_ClientMutex); 292 delete [] g_addressChecker; 293 g_addressChecker = nullptr; 294 g_hookClient = nullptr; 295 pthread_key_delete(g_disableHookFlag); 296 pthread_key_delete(g_hookTid); 297 pthread_key_delete(g_updateThreadNameCount); 298 g_ClientConfig.Reset(); 299 return nullptr; 300} 301 302bool ohos_malloc_hook_on_end(void) 303{ 304 { 305 std::lock_guard<std::recursive_timed_mutex> guard(g_ClientMutex); 306 if (g_hookClient != nullptr) { 307 if (g_hookClient->GetNmdType() == 1) { 308 g_hookClient->SendNmdInfo(); 309 } 310 g_hookClient->SendEndMsg(); 311 g_hookClient->Flush(); 312 } 313 } 314 pthread_t threadEnd; 315 if (pthread_create(&threadEnd, nullptr, ohos_release_on_end, nullptr)) { 316 return false; 317 } 318 pthread_detach(threadEnd); 319 return true; 320} 321 322bool FilterStandardSoIp(uint64_t ip) 323{ 324 for (auto [soBegin, soEnd_]: g_filterStaLibRange) { 325 if (ip >= soBegin && ip < soEnd_) { 326 return true; 327 } 328 } 329 return false; 330} 331 332#if defined(__aarch64__) 333static int inline __attribute__((always_inline)) FpUnwind(int maxDepth, uint64_t* ip, int stackSize, 334 const char* startPtr, const char* endPtr) 335{ 336 void** startfp = (void**)__builtin_frame_address(0); 337 void** fp = startfp; 338 int depth = 0; 339 int count = 0; 340 uint64_t tempIp = 0; 341 while (depth < maxDepth) { 342 if (fp < (void**)startPtr || (fp + 1) >= (void**)endPtr) { 343 break; 344 } 345 void** nextFp = (void**)*fp; 346 if (nextFp <= fp) { 347 break; 348 } 349 if (((nextFp - startfp) * sizeof(void*)) > static_cast<unsigned long>(stackSize)) { 350 break; 351 } 352 fp = nextFp; 353 tempIp = *(reinterpret_cast<unsigned long*>(fp + 1)); 354 if (tempIp <= TEMP_IP) { 355 break; 356 } 357 if (g_ClientConfig.responseLibraryMode) { 358 if (++count >= RESPONSE_LIBRARY_MODE_DEPTH || !FilterStandardSoIp(tempIp)) { 359 break; 360 } 361 } else { 362 ip[depth++] = tempIp; 363 } 364 } 365 if (g_ClientConfig.responseLibraryMode) { 366 ip[0] = tempIp; 367 depth = 1; 368 } 369 return depth; 370} 371 372uint64_t getJsChainId() 373{ 374 if (g_ClientConfig.arktsConfig.jsStackReport > 0) { 375 OHOS::HiviewDFX::HiTraceId hitraceId = OHOS::HiviewDFX::HiTraceChain::GetId(); 376 if (hitraceId.IsValid()) { 377 return hitraceId.GetChainId(); 378 } 379 } 380 return 0; 381} 382#endif 383 384void* hook_malloc(void* (*fn)(size_t), size_t size) 385{ 386 void* ret = nullptr; 387 if (fn) { 388 ret = fn(size); 389 } 390 if (g_ClientConfig.mallocDisable || IsPidChanged()) { 391 return ret; 392 } 393 if (!ohos_set_filter_size(size, ret)) { 394 return ret; 395 } 396 397#ifdef PERFORMANCE_DEBUG 398 struct timespec start = {}; 399 clock_gettime(CLOCK_REALTIME, &start); 400#endif 401 402 if (g_ClientConfig.sampleInterval > MIN_SAMPLER_INTERVAL && g_sampler.StartSampling(size) == 0) { //0 not sampling 403#ifdef PERFORMANCE_DEBUG 404 g_mallocTimes++; 405 struct timespec end = {}; 406 clock_gettime(CLOCK_REALTIME, &end); 407 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec); 408 if (g_mallocTimes % PRINT_INTERVAL == 0) { 409 PROFILER_LOG_ERROR(LOG_CORE, 410 "g_mallocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %" PRIu64"\n", 411 g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(), g_timeCost.load() / g_mallocTimes.load()); 412 } 413#endif 414 return ret; 415 } 416 417 std::weak_ptr<HookSocketClient> weakClient = g_hookClient; 418 auto holder = weakClient.lock(); 419 if (holder == nullptr) { 420 return ret; 421 } 422 if (!UpdateThreadName(holder)) { 423 return ret; 424 } 425 StackRawData rawdata = {{{{0}}}}; 426 const char* stackptr = nullptr; 427 const char* stackendptr = nullptr; 428 int stackSize = 0; 429 int fpStackDepth = 0; 430 clock_gettime(g_ClientConfig.clockId, &rawdata.ts); 431 432 if (g_ClientConfig.fpunwind) { 433#ifdef __aarch64__ 434 void* stackAddr = nullptr; 435 size_t coroutineStackSize = 0; 436 if (ffrt_get_current_coroutine_stack(&stackAddr, &coroutineStackSize)) { 437 stackSize = static_cast<int>(coroutineStackSize); 438 stackptr = reinterpret_cast<const char*>(stackAddr); 439 stackendptr = stackptr + coroutineStackSize; 440 } else { 441 stackptr = reinterpret_cast<const char*>(__builtin_frame_address(0)); 442 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer 443 stackSize = stackendptr - stackptr; 444 } 445 fpStackDepth = FpUnwind(g_ClientConfig.maxStackDepth, rawdata.ip, stackSize, stackptr, stackendptr); 446 stackSize = 0; 447 rawdata.jsChainId = getJsChainId(); 448#endif 449 } else { 450 unsigned long* regs = reinterpret_cast<unsigned long*>(&(rawdata.regs)); 451 GetLocalRegs(regs); 452 stackptr = reinterpret_cast<const char*>(regs[RegisterGetSP(buildArchType)]); 453 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer 454 stackSize = stackendptr - stackptr; 455 if (stackendptr == nullptr) { 456 stackSize = 0; 457 } 458 } 459 rawdata.type = MALLOC_MSG; 460 rawdata.pid = static_cast<uint32_t>(g_hookPid); 461 rawdata.tid = static_cast<uint32_t>(GetCurThreadId()); 462 rawdata.mallocSize = size; 463 rawdata.addr = ret; 464 if (g_ClientConfig.sampleInterval >= THRESHOLD) { 465 Addr2Bitpool(ret); 466 } 467 int realSize = 0; 468 if (g_ClientConfig.fpunwind) { 469 realSize = sizeof(BaseStackRawData) + (fpStackDepth * sizeof(uint64_t)); 470 } else { 471 realSize = sizeof(BaseStackRawData) + sizeof(rawdata.regs); 472 } 473 holder->SendStackWithPayload(&rawdata, realSize, stackptr, stackSize); 474 g_mallocTimes++; 475#ifdef PERFORMANCE_DEBUG 476 struct timespec end = {}; 477 clock_gettime(CLOCK_REALTIME, &end); 478 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec); 479 g_dataCounts += stackSize; 480 if (g_mallocTimes % PRINT_INTERVAL == 0) { 481 PROFILER_LOG_ERROR(LOG_CORE, 482 "g_mallocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %" PRIu64"\n", 483 g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(), g_timeCost.load() / g_mallocTimes.load()); 484 } 485#endif 486 return ret; 487} 488 489void* hook_aligned_alloc(void* (*fn)(size_t, size_t), size_t align, size_t len) 490{ 491 void* ret = nullptr; 492 if (fn) { 493 ret = fn(align, len); 494 } 495 if (g_ClientConfig.mallocDisable || IsPidChanged()) { 496 return ret; 497 } 498 if (!ohos_set_filter_size(len, ret)) { 499 return ret; 500 } 501 502#ifdef PERFORMANCE_DEBUG 503 struct timespec start = {}; 504 clock_gettime(CLOCK_REALTIME, &start); 505#endif 506 507 if (g_ClientConfig.sampleInterval > MIN_SAMPLER_INTERVAL && g_sampler.StartSampling(len) == 0) { //0 not sampling 508#ifdef PERFORMANCE_DEBUG 509 g_mallocTimes++; 510 struct timespec end = {}; 511 clock_gettime(CLOCK_REALTIME, &end); 512 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec); 513 if (g_mallocTimes % PRINT_INTERVAL == 0) { 514 PROFILER_LOG_ERROR(LOG_CORE, 515 "g_aligned_allocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %" PRIu64"\n", 516 g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(), g_timeCost.load() / g_mallocTimes.load()); 517 } 518#endif 519 return ret; 520 } 521 522 std::weak_ptr<HookSocketClient> weakClient = g_hookClient; 523 auto holder = weakClient.lock(); 524 if (holder == nullptr) { 525 return ret; 526 } 527 if (!UpdateThreadName(holder)) { 528 return ret; 529 } 530 StackRawData rawdata = {{{{0}}}}; 531 const char* stackptr = nullptr; 532 const char* stackendptr = nullptr; 533 int stackSize = 0; 534 int fpStackDepth = 0; 535 clock_gettime(g_ClientConfig.clockId, &rawdata.ts); 536 537 if (g_ClientConfig.fpunwind) { 538#ifdef __aarch64__ 539 void* stackAddr = nullptr; 540 size_t coroutineStackSize = 0; 541 if (ffrt_get_current_coroutine_stack(&stackAddr, &coroutineStackSize)) { 542 stackSize = static_cast<int>(coroutineStackSize); 543 stackptr = reinterpret_cast<const char*>(stackAddr); 544 stackendptr = stackptr + coroutineStackSize; 545 } else { 546 stackptr = reinterpret_cast<const char*>(__builtin_frame_address(0)); 547 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer 548 stackSize = stackendptr - stackptr; 549 } 550 fpStackDepth = FpUnwind(g_ClientConfig.maxStackDepth, rawdata.ip, stackSize, stackptr, stackendptr); 551 stackSize = 0; 552 rawdata.jsChainId = getJsChainId(); 553#endif 554 } else { 555 unsigned long* regs = reinterpret_cast<unsigned long*>(&(rawdata.regs)); 556 GetLocalRegs(regs); 557 stackptr = reinterpret_cast<const char*>(regs[RegisterGetSP(buildArchType)]); 558 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer 559 stackSize = stackendptr - stackptr; 560 if (stackendptr == nullptr) { 561 stackSize = 0; 562 } 563 } 564 rawdata.type = MALLOC_MSG; 565 rawdata.pid = static_cast<uint32_t>(g_hookPid); 566 rawdata.tid = static_cast<uint32_t>(GetCurThreadId()); 567 rawdata.mallocSize = len; 568 rawdata.addr = ret; 569 if (g_ClientConfig.sampleInterval >= THRESHOLD) { 570 Addr2Bitpool(ret); 571 } 572 int realSize = 0; 573 if (g_ClientConfig.fpunwind) { 574 realSize = sizeof(BaseStackRawData) + (fpStackDepth * sizeof(uint64_t)); 575 } else { 576 realSize = sizeof(BaseStackRawData) + sizeof(rawdata.regs); 577 } 578 holder->SendStackWithPayload(&rawdata, realSize, stackptr, stackSize); 579 g_mallocTimes++; 580#ifdef PERFORMANCE_DEBUG 581 struct timespec end = {}; 582 clock_gettime(CLOCK_REALTIME, &end); 583 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec); 584 g_dataCounts += stackSize; 585 if (g_mallocTimes % PRINT_INTERVAL == 0) { 586 PROFILER_LOG_ERROR(LOG_CORE, 587 "g_aligned_allocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %" PRIu64"\n", 588 g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(), g_timeCost.load() / g_mallocTimes.load()); 589 } 590#endif 591 return ret; 592} 593 594void* hook_valloc(void* (*fn)(size_t), size_t size) 595{ 596 void* pRet = nullptr; 597 if (fn) { 598 pRet = fn(size); 599 } 600 return pRet; 601} 602 603void* hook_calloc(void* (*fn)(size_t, size_t), size_t number, size_t size) 604{ 605 void* pRet = nullptr; 606 if (fn) { 607 pRet = fn(number, size); 608 } 609 if (g_ClientConfig.mallocDisable || IsPidChanged()) { 610 return pRet; 611 } 612 if (!ohos_set_filter_size(number * size, pRet)) { 613 return pRet; 614 } 615 616#ifdef PERFORMANCE_DEBUG 617 struct timespec start = {}; 618 clock_gettime(CLOCK_REALTIME, &start); 619#endif 620 621 if (g_ClientConfig.sampleInterval > MIN_SAMPLER_INTERVAL && g_sampler.StartSampling(size * number) == 0) { 622#ifdef PERFORMANCE_DEBUG 623 g_mallocTimes++; 624 struct timespec end = {}; 625 clock_gettime(CLOCK_REALTIME, &end); 626 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec); 627 if (g_mallocTimes % PRINT_INTERVAL == 0) { 628 PROFILER_LOG_ERROR(LOG_CORE, 629 "g_mallocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %" PRIu64"\n", 630 g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(), g_timeCost.load() / g_mallocTimes.load()); 631 } 632#endif 633 return pRet; 634 } 635 StackRawData rawdata = {{{{0}}}}; 636 const char* stackptr = nullptr; 637 const char* stackendptr = nullptr; 638 int stackSize = 0; 639 int fpStackDepth = 0; 640 clock_gettime(g_ClientConfig.clockId, &rawdata.ts); 641 642 if (g_ClientConfig.fpunwind) { 643#ifdef __aarch64__ 644 void* stackAddr = nullptr; 645 size_t coroutineStackSize = 0; 646 if (ffrt_get_current_coroutine_stack(&stackAddr, &coroutineStackSize)) { 647 stackSize = static_cast<int>(coroutineStackSize); 648 stackptr = reinterpret_cast<const char*>(stackAddr); 649 stackendptr = stackptr + coroutineStackSize; 650 } else { 651 stackptr = reinterpret_cast<const char*>(__builtin_frame_address(0)); 652 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer 653 stackSize = stackendptr - stackptr; 654 } 655 fpStackDepth = FpUnwind(g_ClientConfig.maxStackDepth, rawdata.ip, stackSize, stackptr, stackendptr); 656 stackSize = 0; 657 rawdata.jsChainId = getJsChainId(); 658#endif 659 } else { 660 unsigned long* regs = reinterpret_cast<unsigned long*>(&(rawdata.regs)); 661 GetLocalRegs(regs); 662 stackptr = reinterpret_cast<const char*>(regs[RegisterGetSP(buildArchType)]); 663 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer 664 stackSize = stackendptr - stackptr; 665 if (stackendptr == nullptr) { 666 stackSize = 0; 667 } 668 } 669 670 rawdata.type = MALLOC_MSG; 671 rawdata.pid = static_cast<uint32_t>(g_hookPid); 672 rawdata.tid = static_cast<uint32_t>(GetCurThreadId()); 673 rawdata.mallocSize = number * size; 674 rawdata.addr = pRet; 675 if (g_ClientConfig.sampleInterval >= THRESHOLD) { 676 Addr2Bitpool(pRet); 677 } 678 std::weak_ptr<HookSocketClient> weakClient = g_hookClient; 679 auto holder = weakClient.lock(); 680 if (holder != nullptr) { 681 int realSize = 0; 682 if (g_ClientConfig.fpunwind) { 683 realSize = sizeof(BaseStackRawData) + (fpStackDepth * sizeof(uint64_t)); 684 } else { 685 realSize = sizeof(BaseStackRawData) + sizeof(rawdata.regs); 686 } 687 holder->SendStackWithPayload(&rawdata, realSize, stackptr, stackSize); 688 } 689 g_mallocTimes++; 690#ifdef PERFORMANCE_DEBUG 691 struct timespec end = {}; 692 clock_gettime(CLOCK_REALTIME, &end); 693 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec); 694 if (g_mallocTimes % PRINT_INTERVAL == 0) { 695 PROFILER_LOG_ERROR(LOG_CORE, 696 "g_mallocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %" PRIu64"\n", 697 g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(), g_timeCost.load() / g_mallocTimes.load()); 698 } 699#endif 700 return pRet; 701} 702 703void* hook_memalign(void* (*fn)(size_t, size_t), size_t align, size_t bytes) 704{ 705 void* pRet = nullptr; 706 if (fn) { 707 pRet = fn(align, bytes); 708 } 709 return pRet; 710} 711 712void* hook_realloc(void* (*fn)(void*, size_t), void* ptr, size_t size) 713{ 714 void* pRet = nullptr; 715 if (fn) { 716 pRet = fn(ptr, size); 717 } 718 if (g_ClientConfig.mallocDisable || IsPidChanged()) { 719 return pRet; 720 } 721 if (!ohos_set_filter_size(size, pRet)) { 722 return pRet; 723 } 724 725#ifdef PERFORMANCE_DEBUG 726 struct timespec start = {}; 727 clock_gettime(CLOCK_REALTIME, &start); 728#endif 729 730 if (g_ClientConfig.sampleInterval > MIN_SAMPLER_INTERVAL && g_sampler.StartSampling(size) == 0) { 731#ifdef PERFORMANCE_DEBUG 732 g_mallocTimes++; 733 struct timespec end = {}; 734 clock_gettime(CLOCK_REALTIME, &end); 735 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec); 736 if (g_mallocTimes % PRINT_INTERVAL == 0) { 737 PROFILER_LOG_ERROR(LOG_CORE, 738 "g_mallocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %" PRIu64"\n", 739 g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(), g_timeCost.load() / g_mallocTimes.load()); 740 } 741#endif 742 return pRet; 743 } 744 StackRawData rawdata = {{{{0}}}}; 745 StackRawData freeData = {{{{0}}}}; 746 const char* stackptr = nullptr; 747 const char* stackendptr = nullptr; 748 int stackSize = 0; 749 int fpStackDepth = 0; 750 clock_gettime(g_ClientConfig.clockId, &rawdata.ts); 751 752 if (g_ClientConfig.fpunwind) { 753#ifdef __aarch64__ 754 void* stackAddr = nullptr; 755 size_t coroutineStackSize = 0; 756 if (ffrt_get_current_coroutine_stack(&stackAddr, &coroutineStackSize)) { 757 stackSize = static_cast<int>(coroutineStackSize); 758 stackptr = reinterpret_cast<const char*>(stackAddr); 759 stackendptr = stackptr + coroutineStackSize; 760 } else { 761 stackptr = reinterpret_cast<const char*>(__builtin_frame_address(0)); 762 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer 763 stackSize = stackendptr - stackptr; 764 } 765 fpStackDepth = FpUnwind(g_ClientConfig.maxStackDepth, rawdata.ip, stackSize, stackptr, stackendptr); 766 stackSize = 0; 767 if (g_ClientConfig.freeStackData) { 768 (void)memcpy_s(freeData.ip, sizeof(freeData.ip) / sizeof(uint64_t), 769 rawdata.ip, sizeof(rawdata.ip) / sizeof(uint64_t)); 770 } 771#endif 772 } else { 773 unsigned long* regs = reinterpret_cast<unsigned long*>(&(rawdata.regs)); 774 GetLocalRegs(regs); 775 stackptr = reinterpret_cast<const char*>(regs[RegisterGetSP(buildArchType)]); 776 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer 777 stackSize = stackendptr - stackptr; 778 if (stackendptr == nullptr) { 779 stackSize = 0; 780 } 781 if (g_ClientConfig.freeStackData) { 782 (void)memcpy_s(freeData.regs, sizeof(freeData.regs) / sizeof(char), 783 rawdata.regs, sizeof(rawdata.regs) / sizeof(char)); 784 } 785 } 786 787 rawdata.type = MALLOC_MSG; 788 rawdata.pid = static_cast<uint32_t>(g_hookPid); 789 rawdata.tid = static_cast<uint32_t>(GetCurThreadId()); 790 rawdata.mallocSize = size; 791 rawdata.addr = pRet; 792 if (g_ClientConfig.sampleInterval >= THRESHOLD) { 793 Addr2Bitpool(pRet); 794 } 795 std::weak_ptr<HookSocketClient> weakClient = g_hookClient; 796 auto holder = weakClient.lock(); 797 if (holder != nullptr) { 798 int realSize = 0; 799 int freeRealSize = 0; 800 freeData.type = FREE_MSG; 801 freeData.pid = rawdata.pid; 802 freeData.tid = rawdata.tid; 803 freeData.mallocSize = 0; 804 freeData.addr = ptr; 805 freeData.ts = rawdata.ts; 806 if (g_ClientConfig.fpunwind) { 807 realSize = sizeof(BaseStackRawData) + (fpStackDepth * sizeof(uint64_t)); 808 freeRealSize = sizeof(BaseStackRawData); 809 } else { 810 realSize = sizeof(BaseStackRawData) + sizeof(rawdata.regs); 811 freeRealSize = realSize; 812 } 813 holder->SendStackWithPayload(&freeData, freeRealSize, nullptr, 0); // 0: Don't unwind the freeData 814 holder->SendStackWithPayload(&rawdata, realSize, stackptr, stackSize); 815 } 816#ifdef PERFORMANCE_DEBUG 817 g_mallocTimes++; 818 struct timespec end = {}; 819 clock_gettime(CLOCK_REALTIME, &end); 820 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec); 821 if (g_mallocTimes % PRINT_INTERVAL == 0) { 822 PROFILER_LOG_ERROR(LOG_CORE, 823 "g_mallocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %" PRIu64"\n", 824 g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(), g_timeCost.load() / g_mallocTimes.load()); 825 } 826#endif 827 return pRet; 828} 829 830size_t hook_malloc_usable_size(size_t (*fn)(void*), void* ptr) 831{ 832 size_t ret = 0; 833 if (fn) { 834 ret = fn(ptr); 835 } 836 837 return ret; 838} 839 840void hook_free(void (*free_func)(void*), void* p) 841{ 842 if (g_ClientConfig.statisticsInterval > 0) { 843 if (!free_func) { 844 return; 845 } 846 if (g_ClientConfig.mallocDisable || IsPidChanged()) { 847 free_func(p); 848 return; 849 } 850 std::weak_ptr<HookSocketClient> weakClient = g_hookClient; 851 auto holder = weakClient.lock(); 852 if ((holder != nullptr) && p) { 853 holder->SendStackWithPayload(&p, sizeof(uint64_t), nullptr, 0); 854 } 855 free_func(p); 856#ifdef PERFORMANCE_DEBUG 857 g_mallocTimes++; 858 struct timespec end = {}; 859 clock_gettime(CLOCK_REALTIME, &end); 860 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec); 861 if (g_mallocTimes % PRINT_INTERVAL == 0) { 862 PROFILER_LOG_ERROR(LOG_CORE, 863 "g_mallocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %" PRIu64"\n", 864 g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(), g_timeCost.load() / g_mallocTimes.load()); 865 } 866#endif 867 return; 868 } 869 struct timespec freeTime = {}; 870 clock_gettime(g_ClientConfig.clockId, &freeTime); 871 if (free_func) { 872 free_func(p); 873 } 874 if (g_ClientConfig.mallocDisable || IsPidChanged()) { 875 return; 876 } 877 if (g_ClientConfig.sampleInterval >= THRESHOLD) { 878 if (!IsAddrExist(p)) { 879 return; 880 } 881 } 882#ifdef PERFORMANCE_DEBUG 883 struct timespec start = {}; 884 clock_gettime(CLOCK_REALTIME, &start); 885#endif 886 StackRawData rawdata = {{{{0}}}}; 887 const char* stackptr = nullptr; 888 const char* stackendptr = nullptr; 889 int stackSize = 0; 890 int fpStackDepth = 0; 891 rawdata.ts = freeTime; 892 893 if (g_ClientConfig.freeStackData) { 894 if (g_ClientConfig.fpunwind) { 895#ifdef __aarch64__ 896 void* stackAddr = nullptr; 897 size_t coroutineStackSize = 0; 898 if (ffrt_get_current_coroutine_stack(&stackAddr, &coroutineStackSize)) { 899 stackSize = static_cast<int>(coroutineStackSize); 900 stackptr = reinterpret_cast<const char*>(stackAddr); 901 stackendptr = stackptr + coroutineStackSize; 902 } else { 903 stackptr = reinterpret_cast<const char*>(__builtin_frame_address(0)); 904 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer 905 stackSize = stackendptr - stackptr; 906 } 907 fpStackDepth = FpUnwind(g_ClientConfig.maxStackDepth, rawdata.ip, stackSize, stackptr, stackendptr); 908 stackSize = 0; 909 rawdata.jsChainId = getJsChainId(); 910#endif 911 } else { 912 unsigned long* regs = reinterpret_cast<unsigned long*>(&(rawdata.regs)); 913 GetLocalRegs(regs); 914 stackptr = reinterpret_cast<const char*>(regs[RegisterGetSP(buildArchType)]); 915 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer 916 stackSize = stackendptr - stackptr; 917 if (stackendptr == nullptr) { 918 stackSize = 0; 919 } 920 } 921 } 922 923 rawdata.type = FREE_MSG; 924 rawdata.pid = static_cast<uint32_t>(g_hookPid); 925 rawdata.tid = static_cast<uint32_t>(GetCurThreadId()); 926 rawdata.mallocSize = 0; 927 rawdata.addr = p; 928 std::weak_ptr<HookSocketClient> weakClient = g_hookClient; 929 auto holder = weakClient.lock(); 930 if (holder != nullptr) { 931 int realSize = 0; 932 if (g_ClientConfig.fpunwind) { 933 realSize = sizeof(BaseStackRawData) + (fpStackDepth * sizeof(uint64_t)); 934 } else { 935 realSize = sizeof(BaseStackRawData) + sizeof(rawdata.regs); 936 } 937 holder->SendStackWithPayload(&rawdata, realSize, stackptr, stackSize); 938 } 939#ifdef PERFORMANCE_DEBUG 940 g_mallocTimes++; 941 struct timespec end = {}; 942 clock_gettime(CLOCK_REALTIME, &end); 943 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec); 944 if (g_mallocTimes % PRINT_INTERVAL == 0) { 945 PROFILER_LOG_ERROR(LOG_CORE, 946 "g_mallocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %" PRIu64"\n", 947 g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(), g_timeCost.load() / g_mallocTimes.load()); 948 } 949#endif 950} 951 952inline void SendMmapFileRawData(int prot, int flags, off_t offset, const std::string& filePath, 953 const StackRawData& rawdata, std::shared_ptr<HookSocketClient>& holder) 954{ 955 StackRawData curRawdata = {{{{0}}}}; 956 curRawdata.addr = rawdata.addr; 957 curRawdata.pid = static_cast<uint32_t>(g_hookPid); 958 curRawdata.mallocSize = rawdata.mallocSize; 959 curRawdata.mmapArgs.offset = offset; 960 curRawdata.type = OHOS::Developtools::NativeDaemon::MMAP_FILE_TYPE; 961 if (prot & PROT_EXEC) { 962 curRawdata.mmapArgs.flags |= PROT_EXEC; 963 } 964 size_t len = strlen(filePath.c_str()) + 1; 965 if (strncpy_s(curRawdata.name, PATH_MAX + 1, filePath.c_str(), len) != EOK) { 966 return; 967 } 968 if (flags & MAP_FIXED) { 969 curRawdata.mmapArgs.flags |= MAP_FIXED; 970 } 971 holder->SendStackWithPayload(&curRawdata, sizeof(BaseStackRawData) + len, nullptr, 0); 972} 973 974void* hook_mmap(void*(*fn)(void*, size_t, int, int, int, off_t), 975 void* addr, size_t length, int prot, int flags, int fd, off_t offset) 976{ 977 void* ret = nullptr; 978 if (fn) { 979 ret = fn(addr, length, prot, flags, fd, offset); 980 } 981 if (g_ClientConfig.mmapDisable || IsPidChanged()) { 982 return ret; 983 } 984 985#ifdef PERFORMANCE_DEBUG 986 struct timespec start = {}; 987 clock_gettime(CLOCK_REALTIME, &start); 988#endif 989 990 if ((fd < 0 && offset == 0) && g_ClientConfig.sampleInterval > MIN_SAMPLER_INTERVAL 991 && g_sampler.StartSampling(length) == 0) { 992#ifdef PERFORMANCE_DEBUG 993 g_mallocTimes++; 994 struct timespec end = {}; 995 clock_gettime(CLOCK_REALTIME, &end); 996 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec); 997 if (g_mallocTimes % PRINT_INTERVAL == 0) { 998 PROFILER_LOG_ERROR(LOG_CORE, 999 "g_mallocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %" PRIu64"\n", 1000 g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(), g_timeCost.load() / g_mallocTimes.load()); 1001 } 1002#endif 1003 return ret; 1004 } 1005 StackRawData rawdata = {{{{0}}}}; 1006 const char* stackptr = nullptr; 1007 const char* stackendptr = nullptr; 1008 int stackSize = 0; 1009 int fpStackDepth = 0; 1010 clock_gettime(g_ClientConfig.clockId, &rawdata.ts); 1011 1012 if (g_ClientConfig.fpunwind) { 1013#ifdef __aarch64__ 1014 void* stackAddr = nullptr; 1015 size_t coroutineStackSize = 0; 1016 if (ffrt_get_current_coroutine_stack(&stackAddr, &coroutineStackSize)) { 1017 stackSize = static_cast<int>(coroutineStackSize); 1018 stackptr = reinterpret_cast<const char*>(stackAddr); 1019 stackendptr = stackptr + coroutineStackSize; 1020 } else { 1021 stackptr = reinterpret_cast<const char*>(__builtin_frame_address(0)); 1022 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer 1023 stackSize = stackendptr - stackptr; 1024 } 1025 fpStackDepth = FpUnwind(g_ClientConfig.maxStackDepth, rawdata.ip, stackSize, stackptr, stackendptr); 1026 stackSize = 0; 1027 rawdata.jsChainId = getJsChainId(); 1028#endif 1029 } else { 1030 unsigned long* regs = reinterpret_cast<unsigned long*>(&(rawdata.regs)); 1031 GetLocalRegs(regs); 1032 stackptr = reinterpret_cast<const char*>(regs[RegisterGetSP(buildArchType)]); 1033 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer 1034 stackSize = stackendptr - stackptr; 1035 if (stackendptr == nullptr) { 1036 stackSize = 0; 1037 } 1038 } 1039 1040 rawdata.type = MMAP_MSG; 1041 rawdata.pid = static_cast<uint32_t>(g_hookPid); 1042 rawdata.tid = static_cast<uint32_t>(GetCurThreadId()); 1043 rawdata.mallocSize = length; 1044 rawdata.addr = ret; 1045 std::weak_ptr<HookSocketClient> weakClient = g_hookClient; 1046 auto holder = weakClient.lock(); 1047 if (holder == nullptr) { 1048 return ret; 1049 } 1050 if (fd >= 0) { 1051 rawdata.type = MMAP_FILE_PAGE_MSG; 1052 char path[FD_PATH_LENGTH] = {0}; 1053 char fileName[PATH_MAX + 1] = {0}; 1054 (void)snprintf_s(path, FD_PATH_LENGTH, FD_PATH_LENGTH - 1, "/proc/self/fd/%d", fd); 1055 ssize_t len = readlink(path, fileName, sizeof(fileName) - 1); 1056 if (len != -1) { 1057 fileName[len] = '\0'; 1058 SendMmapFileRawData(prot, flags, offset, fileName, rawdata, holder); 1059 char* p = strrchr(fileName, '/'); 1060 if (p != nullptr) { 1061 rawdata.tagId = GetTagId(holder, &fileName[p - fileName + 1]); 1062 } else { 1063 rawdata.tagId = GetTagId(holder, fileName); 1064 } 1065 } 1066 } 1067 if (!UpdateThreadName(holder)) { 1068 return ret; 1069 } 1070 int realSize = 0; 1071 if (g_ClientConfig.fpunwind) { 1072 realSize = sizeof(BaseStackRawData) + (fpStackDepth * sizeof(uint64_t)); 1073 } else { 1074 realSize = sizeof(BaseStackRawData) + sizeof(rawdata.regs); 1075 } 1076 holder->SendStackWithPayload(&rawdata, realSize, stackptr, stackSize); 1077#ifdef PERFORMANCE_DEBUG 1078 g_mallocTimes++; 1079 struct timespec end = {}; 1080 clock_gettime(CLOCK_REALTIME, &end); 1081 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec); 1082 if (g_mallocTimes % PRINT_INTERVAL == 0) { 1083 PROFILER_LOG_ERROR(LOG_CORE, 1084 "g_mallocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %" PRIu64"\n", 1085 g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(), g_timeCost.load() / g_mallocTimes.load()); 1086 } 1087#endif 1088 return ret; 1089} 1090 1091int hook_munmap(int(*fn)(void*, size_t), void* addr, size_t length) 1092{ 1093 int ret = -1; 1094 struct timespec unmapTime = {}; 1095 clock_gettime(g_ClientConfig.clockId, &unmapTime); 1096 if (fn) { 1097 ret = fn(addr, length); 1098 } 1099 if (g_ClientConfig.mmapDisable || IsPidChanged()) { 1100 return ret; 1101 } 1102 1103#ifdef PERFORMANCE_DEBUG 1104 struct timespec start = {}; 1105 clock_gettime(CLOCK_REALTIME, &start); 1106#endif 1107 1108 int stackSize = 0; 1109 StackRawData rawdata = {{{{0}}}}; 1110 const char* stackptr = nullptr; 1111 const char* stackendptr = nullptr; 1112 int fpStackDepth = 0; 1113 rawdata.ts = unmapTime; 1114 if (g_ClientConfig.munmapStackData) { 1115 if (g_ClientConfig.fpunwind) { 1116#ifdef __aarch64__ 1117 void* stackAddr = nullptr; 1118 size_t coroutineStackSize = 0; 1119 if (ffrt_get_current_coroutine_stack(&stackAddr, &coroutineStackSize)) { 1120 stackSize = static_cast<int>(coroutineStackSize); 1121 stackptr = reinterpret_cast<const char*>(stackAddr); 1122 stackendptr = stackptr + coroutineStackSize; 1123 } else { 1124 stackptr = reinterpret_cast<const char*>(__builtin_frame_address(0)); 1125 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer 1126 stackSize = stackendptr - stackptr; 1127 } 1128 fpStackDepth = FpUnwind(g_ClientConfig.maxStackDepth, rawdata.ip, stackSize, stackptr, stackendptr); 1129 stackSize = 0; 1130 rawdata.jsChainId = getJsChainId(); 1131#endif 1132 } else { 1133 unsigned long* regs = reinterpret_cast<unsigned long*>(&(rawdata.regs)); 1134 GetLocalRegs(regs); 1135 stackptr = reinterpret_cast<const char*>(regs[RegisterGetSP(buildArchType)]); 1136 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer 1137 stackSize = stackendptr - stackptr; 1138 if (stackendptr == nullptr) { 1139 stackSize = 0; 1140 } 1141 } 1142 } 1143 1144 rawdata.type = MUNMAP_MSG; 1145 rawdata.pid = static_cast<uint32_t>(g_hookPid); 1146 rawdata.tid = static_cast<uint32_t>(GetCurThreadId()); 1147 rawdata.mallocSize = length; 1148 rawdata.addr = addr; 1149 std::weak_ptr<HookSocketClient> weakClient = g_hookClient; 1150 auto holder = weakClient.lock(); 1151 if (holder != nullptr) { 1152 int realSize = 0; 1153 if (g_ClientConfig.fpunwind) { 1154 realSize = sizeof(BaseStackRawData) + (fpStackDepth * sizeof(uint64_t)); 1155 } else { 1156 realSize = sizeof(BaseStackRawData) + sizeof(rawdata.regs); 1157 } 1158 holder->SendStackWithPayload(&rawdata, realSize, stackptr, stackSize); 1159 } 1160#ifdef PERFORMANCE_DEBUG 1161 g_mallocTimes++; 1162 struct timespec end = {}; 1163 clock_gettime(CLOCK_REALTIME, &end); 1164 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec); 1165 if (g_mallocTimes % PRINT_INTERVAL == 0) { 1166 PROFILER_LOG_ERROR(LOG_CORE, 1167 "g_mallocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %" PRIu64"\n", 1168 g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(), g_timeCost.load() / g_mallocTimes.load()); 1169 } 1170#endif 1171 return ret; 1172} 1173 1174int hook_prctl(int(*fn)(int, ...), 1175 int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5) 1176{ 1177 int ret = -1; 1178 if (fn) { 1179 ret = fn(option, arg2, arg3, arg4, arg5); 1180 } 1181 if (reinterpret_cast<char*>(arg5) == nullptr || IsPidChanged()) { 1182 return ret; 1183 } 1184 if (option == PR_SET_VMA && arg2 == PR_SET_VMA_ANON_NAME) { 1185#ifdef PERFORMANCE_DEBUG 1186 struct timespec start = {}; 1187 clock_gettime(CLOCK_REALTIME, &start); 1188#endif 1189 StackRawData rawdata = {{{{0}}}}; 1190 clock_gettime(g_ClientConfig.clockId, &rawdata.ts); 1191 rawdata.type = PR_SET_VMA_MSG; 1192 rawdata.pid = static_cast<uint32_t>(g_hookPid); 1193 rawdata.tid = static_cast<uint32_t>(GetCurThreadId()); 1194 rawdata.mallocSize = arg4; 1195 rawdata.addr = reinterpret_cast<void*>(arg3); 1196 size_t tagLen = strlen(reinterpret_cast<char*>(arg5)) + 1; 1197 if (memcpy_s(rawdata.name, sizeof(rawdata.name), reinterpret_cast<char*>(arg5), tagLen) != EOK) { 1198 HILOG_BASE_ERROR(LOG_CORE, "memcpy_s tag failed"); 1199 } 1200 rawdata.name[sizeof(rawdata.name) - 1] = '\0'; 1201 std::weak_ptr<HookSocketClient> weakClient = g_hookClient; 1202 auto holder = weakClient.lock(); 1203 if (holder != nullptr) { 1204 holder->SendStackWithPayload(&rawdata, sizeof(BaseStackRawData) + tagLen, nullptr, 0); 1205 } 1206#ifdef PERFORMANCE_DEBUG 1207 g_mallocTimes++; 1208 struct timespec end = {}; 1209 clock_gettime(CLOCK_REALTIME, &end); 1210 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec); 1211 if (g_mallocTimes % PRINT_INTERVAL == 0) { 1212 PROFILER_LOG_ERROR(LOG_CORE, 1213 "g_mallocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %" PRIu64"\n", 1214 g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(), g_timeCost.load() / g_mallocTimes.load()); 1215 } 1216#endif 1217 } 1218 return ret; 1219} 1220 1221void hook_memtrace(void* addr, size_t size, const char* tag, bool isUsing) 1222{ 1223 if (!g_ClientConfig.memtraceEnable || IsPidChanged()) { 1224 return; 1225 } 1226#ifdef PERFORMANCE_DEBUG 1227 struct timespec start = {}; 1228 clock_gettime(CLOCK_REALTIME, &start); 1229#endif 1230 int stackSize = 0; 1231 StackRawData rawdata = {{{{0}}}}; 1232 const char* stackptr = nullptr; 1233 const char* stackendptr = nullptr; 1234 int fpStackDepth = 0; 1235 clock_gettime(g_ClientConfig.clockId, &rawdata.ts); 1236 1237 if (isUsing) { 1238 if (g_ClientConfig.fpunwind) { 1239#ifdef __aarch64__ 1240 void* stackAddr = nullptr; 1241 size_t coroutineStackSize = 0; 1242 if (ffrt_get_current_coroutine_stack(&stackAddr, &coroutineStackSize)) { 1243 stackSize = static_cast<int>(coroutineStackSize); 1244 stackptr = reinterpret_cast<const char*>(stackAddr); 1245 stackendptr = stackptr + coroutineStackSize; 1246 } else { 1247 stackptr = reinterpret_cast<const char*>(__builtin_frame_address(0)); 1248 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer 1249 stackSize = stackendptr - stackptr; 1250 } 1251 fpStackDepth = FpUnwind(g_ClientConfig.maxStackDepth, rawdata.ip, stackSize, stackptr, stackendptr); 1252 stackSize = 0; 1253 rawdata.jsChainId = getJsChainId(); 1254#endif 1255 } else { 1256 unsigned long* regs = reinterpret_cast<unsigned long*>(&(rawdata.regs)); 1257 GetLocalRegs(regs); 1258 stackptr = reinterpret_cast<const char*>(regs[RegisterGetSP(buildArchType)]); 1259 GetRuntimeStackEnd(stackptr, &stackendptr, g_hookPid, GetCurThreadId()); // stack end pointer 1260 stackSize = stackendptr - stackptr; 1261 if (stackendptr == nullptr) { 1262 stackSize = 0; 1263 } 1264 } 1265 } 1266 rawdata.type = isUsing ? MEMORY_USING_MSG : MEMORY_UNUSING_MSG; 1267 rawdata.pid = static_cast<uint32_t>(g_hookPid); 1268 rawdata.tid = static_cast<uint32_t>(GetCurThreadId()); 1269 rawdata.mallocSize = size; 1270 rawdata.addr = addr; 1271 std::weak_ptr<HookSocketClient> weakClient = g_hookClient; 1272 auto holder = weakClient.lock(); 1273 rawdata.tagId = isUsing ? GetTagId(holder, tag) : 0; 1274 if (holder != nullptr) { 1275 int realSize = 0; 1276 if (g_ClientConfig.fpunwind) { 1277 realSize = sizeof(BaseStackRawData) + (fpStackDepth * sizeof(uint64_t)); 1278 } else { 1279 realSize = sizeof(BaseStackRawData) + sizeof(rawdata.regs); 1280 } 1281 holder->SendStackWithPayload(&rawdata, realSize, stackptr, stackSize); 1282 } 1283#ifdef PERFORMANCE_DEBUG 1284 g_mallocTimes++; 1285 struct timespec end = {}; 1286 clock_gettime(CLOCK_REALTIME, &end); 1287 g_timeCost += (end.tv_sec - start.tv_sec) * S_TO_NS + (end.tv_nsec - start.tv_nsec); 1288 if (g_mallocTimes % PRINT_INTERVAL == 0) { 1289 PROFILER_LOG_ERROR(LOG_CORE, 1290 "g_mallocTimes %" PRIu64" cost time = %" PRIu64" copy data bytes = %" PRIu64" mean cost = %" PRIu64"\n", 1291 g_mallocTimes.load(), g_timeCost.load(), g_dataCounts.load(), g_timeCost.load() / g_mallocTimes.load()); 1292 } 1293#endif 1294} 1295 1296bool ohos_malloc_hook_initialize(const MallocDispatchType*malloc_dispatch, bool*, const char*) 1297{ 1298 g_dispatch.store(malloc_dispatch); 1299 InititalizeIPC(); 1300 return true; 1301} 1302void ohos_malloc_hook_finalize(void) 1303{ 1304 FinalizeIPC(); 1305} 1306 1307void* ohos_malloc_hook_malloc(size_t size) 1308{ 1309 __set_hook_flag(false); 1310 void* ret = hook_malloc(GetDispatch()->malloc, size); 1311 __set_hook_flag(true); 1312 return ret; 1313} 1314 1315void* ohos_malloc_hook_realloc(void* ptr, size_t size) 1316{ 1317 __set_hook_flag(false); 1318 void* ret = hook_realloc(GetDispatch()->realloc, ptr, size); 1319 __set_hook_flag(true); 1320 return ret; 1321} 1322 1323void* ohos_malloc_hook_calloc(size_t number, size_t size) 1324{ 1325 __set_hook_flag(false); 1326 void* ret = hook_calloc(GetDispatch()->calloc, number, size); 1327 __set_hook_flag(true); 1328 return ret; 1329} 1330 1331void* ohos_malloc_hook_valloc(size_t size) 1332{ 1333 __set_hook_flag(false); 1334 void* ret = hook_valloc(GetDispatch()->valloc, size); 1335 __set_hook_flag(true); 1336 return ret; 1337} 1338 1339void ohos_malloc_hook_free(void* p) 1340{ 1341 __set_hook_flag(false); 1342 hook_free(GetDispatch()->free, p); 1343 __set_hook_flag(true); 1344} 1345 1346size_t ohos_malloc_hook_malloc_usable_size(void* mem) 1347{ 1348 __set_hook_flag(false); 1349 size_t ret = hook_malloc_usable_size(GetDispatch()->malloc_usable_size, mem); 1350 __set_hook_flag(true); 1351 return ret; 1352} 1353 1354bool ohos_malloc_hook_get_hook_flag(void) 1355{ 1356 return pthread_getspecific(g_disableHookFlag) == nullptr; 1357} 1358 1359bool ohos_malloc_hook_set_hook_flag(bool flag) 1360{ 1361 bool oldFlag = ohos_malloc_hook_get_hook_flag(); 1362 if (flag) { 1363 pthread_setspecific(g_disableHookFlag, nullptr); 1364 } else { 1365 pthread_setspecific(g_disableHookFlag, reinterpret_cast<void *>(1)); 1366 } 1367 return oldFlag; 1368} 1369 1370void* ohos_malloc_hook_mmap(void* addr, size_t length, int prot, int flags, int fd, off_t offset) 1371{ 1372 __set_hook_flag(false); 1373 void* ret = hook_mmap(GetDispatch()->mmap, addr, length, prot, flags, fd, offset); 1374 __set_hook_flag(true); 1375 return ret; 1376} 1377 1378int ohos_malloc_hook_munmap(void* addr, size_t length) 1379{ 1380 __set_hook_flag(false); 1381 int ret = hook_munmap(GetDispatch()->munmap, addr, length); 1382 __set_hook_flag(true); 1383 return ret; 1384} 1385 1386void ohos_malloc_hook_memtrace(void* addr, size_t size, const char* tag, bool isUsing) 1387{ 1388 __set_hook_flag(false); 1389 hook_memtrace(addr, size, tag, isUsing); 1390 __set_hook_flag(true); 1391} 1392 1393void* ohos_malloc_hook_aligned_alloc(size_t align, size_t len) 1394{ 1395 __set_hook_flag(false); 1396 void* ret = hook_aligned_alloc(GetDispatch()->aligned_alloc, align, len); 1397 __set_hook_flag(true); 1398 return ret; 1399} 1400 1401int ohos_malloc_hook_prctl(int option, unsigned long arg2, unsigned long arg3, 1402 unsigned long arg4, unsigned long arg5) 1403{ 1404 __set_hook_flag(false); 1405 int ret = hook_prctl((GetDispatch()->prctl), option, arg2, arg3, arg4, arg5); 1406 __set_hook_flag(true); 1407 return ret; 1408} 1409 1410bool ohos_set_filter_size(size_t size, void* ret) 1411{ 1412 if (g_ClientConfig.filterSize < 0 || size < static_cast<size_t>(g_ClientConfig.filterSize) || size > g_maxSize) { 1413 return false; 1414 } 1415 return true; 1416} 1417 1418static bool IsPidChanged(void) 1419{ 1420 if (g_isPidChanged) { 1421 return true; 1422 } 1423 int pid = getprocpid(); 1424 // hap app after pid namespace used 1425 if (pid == PID_NAMESPACE_ID) { 1426 return false; 1427 } else { 1428 // native app & sa service 1429 g_isPidChanged = (g_hookPid != pid); 1430 } 1431 return g_isPidChanged; 1432} 1433 1434bool ohos_malloc_hook_send_hook_misc_data(uint64_t id, const char* stackPtr, size_t stackSize, uint32_t type) 1435{ 1436 if (type == static_cast<uint32_t>(MISC_TYPE::JS_STACK_DATA)) { 1437 StackRawData rawdata = {{{{0}}}}; 1438 rawdata.jsChainId = id; 1439 rawdata.type = JS_STACK_MSG; 1440 std::weak_ptr<HookSocketClient> weakClient = g_hookClient; 1441 auto holder = weakClient.lock(); 1442 if (holder != nullptr) { 1443 return holder->SendStackWithPayload(&rawdata, sizeof(BaseStackRawData), stackPtr, stackSize); 1444 } 1445 } 1446 return false; 1447} 1448 1449void* ohos_malloc_hook_get_hook_config() 1450{ 1451 return &g_ClientConfig.arktsConfig; 1452}