1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 // Platform-specific code for POSIX goes here. This is not a platform on its
6 // own, but contains the parts which are the same across the POSIX platforms
7 // Linux, MacOS, FreeBSD, OpenBSD, NetBSD and QNX.
8 
9 #include <errno.h>
10 #include <limits.h>
11 #include <pthread.h>
12 #if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__)
13 #include <pthread_np.h>  // for pthread_set_name_np
14 #endif
15 #include <fcntl.h>
16 #include <sched.h>  // for sched_yield
17 #include <stdio.h>
18 #include <sys/mman.h>
19 #include <sys/stat.h>
20 #include <sys/time.h>
21 #include <sys/types.h>
22 #include <time.h>
23 #include <unistd.h>
24 #if defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__) || \
25     defined(__NetBSD__) || defined(__OpenBSD__)
26 #include <sys/sysctl.h>  // for sysctl
27 #endif
28 
29 #if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
30 #define LOG_TAG "v8"
31 #include <android/log.h>
32 #endif
33 
34 #include <cmath>
35 #include <cstdlib>
36 
37 #include "src/base/platform/platform-posix.h"
38 
39 #include "src/base/lazy-instance.h"
40 #include "src/base/macros.h"
41 #include "src/base/platform/platform.h"
42 #include "src/base/platform/time.h"
43 #include "src/base/utils/random-number-generator.h"
44 
45 #ifdef V8_FAST_TLS_SUPPORTED
46 #include <atomic>
47 #endif
48 
49 #if V8_OS_DARWIN || V8_OS_LINUX
50 #include <dlfcn.h>  // for dlsym
51 #endif
52 
53 #if V8_OS_DARWIN
54 #include <mach/mach.h>
55 #endif
56 
57 #if V8_OS_LINUX
58 #include <sys/prctl.h>  // for prctl
59 #endif
60 
61 #if defined(V8_OS_FUCHSIA)
62 #include <zircon/process.h>
63 #else
64 #include <sys/resource.h>
65 #endif
66 
67 #if !defined(_AIX) && !defined(V8_OS_FUCHSIA)
68 #include <sys/syscall.h>
69 #endif
70 
71 #if V8_OS_FREEBSD || V8_OS_DARWIN || V8_OS_OPENBSD || V8_OS_SOLARIS
72 #define MAP_ANONYMOUS MAP_ANON
73 #endif
74 
75 #if defined(V8_OS_SOLARIS)
76 #if (defined(_POSIX_C_SOURCE) && _POSIX_C_SOURCE > 2) || defined(__EXTENSIONS__)
77 extern "C" int madvise(caddr_t, size_t, int);
78 #else
79 extern int madvise(caddr_t, size_t, int);
80 #endif
81 #endif
82 
83 #ifndef MADV_FREE
84 #define MADV_FREE MADV_DONTNEED
85 #endif
86 
87 #if defined(V8_LIBC_GLIBC)
88 extern "C" void* __libc_stack_end;
89 #endif
90 
91 namespace v8 {
92 namespace base {
93 
94 namespace {
95 
96 // 0 is never a valid thread id.
97 const pthread_t kNoThread = static_cast<pthread_t>(0);
98 
99 bool g_hard_abort = false;
100 
101 const char* g_gc_fake_mmap = nullptr;
102 
103 DEFINE_LAZY_LEAKY_OBJECT_GETTER(RandomNumberGenerator,
104                                 GetPlatformRandomNumberGenerator)
105 static LazyMutex rng_mutex = LAZY_MUTEX_INITIALIZER;
106 
107 #if !V8_OS_FUCHSIA
108 #if V8_OS_DARWIN
109 // kMmapFd is used to pass vm_alloc flags to tag the region with the user
110 // defined tag 255 This helps identify V8-allocated regions in memory analysis
111 // tools like vmmap(1).
112 const int kMmapFd = VM_MAKE_TAG(255);
113 #else   // !V8_OS_DARWIN
114 const int kMmapFd = -1;
115 #endif  // !V8_OS_DARWIN
116 
117 #if defined(V8_TARGET_OS_MACOS) && V8_HOST_ARCH_ARM64
118 // During snapshot generation in cross builds, sysconf() runs on the Intel
119 // host and returns host page size, while the snapshot needs to use the
120 // target page size.
121 constexpr int kAppleArmPageSize = 1 << 14;
122 #endif
123 
124 const int kMmapFdOffset = 0;
125 
126 // TODO(v8:10026): Add the right permission flag to make executable pages
127 // guarded.
GetProtectionFromMemoryPermission(OS::MemoryPermission access)128 int GetProtectionFromMemoryPermission(OS::MemoryPermission access) {
129   switch (access) {
130     case OS::MemoryPermission::kNoAccess:
131     case OS::MemoryPermission::kNoAccessWillJitLater:
132       return PROT_NONE;
133     case OS::MemoryPermission::kRead:
134       return PROT_READ;
135     case OS::MemoryPermission::kReadWrite:
136       return PROT_READ | PROT_WRITE;
137     case OS::MemoryPermission::kReadWriteExecute:
138       return PROT_READ | PROT_WRITE | PROT_EXEC;
139     case OS::MemoryPermission::kReadExecute:
140       return PROT_READ | PROT_EXEC;
141   }
142   UNREACHABLE();
143 }
144 
145 enum class PageType { kShared, kPrivate };
146 
GetFlagsForMemoryPermission(OS::MemoryPermission access, PageType page_type)147 int GetFlagsForMemoryPermission(OS::MemoryPermission access,
148                                 PageType page_type) {
149   int flags = MAP_ANONYMOUS;
150   flags |= (page_type == PageType::kShared) ? MAP_SHARED : MAP_PRIVATE;
151   if (access == OS::MemoryPermission::kNoAccess) {
152 #if !V8_OS_AIX && !V8_OS_FREEBSD && !V8_OS_QNX
153     flags |= MAP_NORESERVE;
154 #endif  // !V8_OS_AIX && !V8_OS_FREEBSD && !V8_OS_QNX
155 #if V8_OS_QNX
156     flags |= MAP_LAZY;
157 #endif  // V8_OS_QNX
158   }
159 #if V8_OS_DARWIN
160   // MAP_JIT is required to obtain writable and executable pages when the
161   // hardened runtime/memory protection is enabled, which is optional (via code
162   // signing) on Intel-based Macs but mandatory on Apple silicon ones. See also
163   // https://developer.apple.com/documentation/apple-silicon/porting-just-in-time-compilers-to-apple-silicon.
164   if (access == OS::MemoryPermission::kNoAccessWillJitLater) {
165     flags |= MAP_JIT;
166   }
167 #endif  // V8_OS_DARWIN
168   return flags;
169 }
170 
Allocate(void* hint, size_t size, OS::MemoryPermission access, PageType page_type)171 void* Allocate(void* hint, size_t size, OS::MemoryPermission access,
172                PageType page_type) {
173   int prot = GetProtectionFromMemoryPermission(access);
174   int flags = GetFlagsForMemoryPermission(access, page_type);
175   void* result = mmap(hint, size, prot, flags, kMmapFd, kMmapFdOffset);
176   if (result == MAP_FAILED) return nullptr;
177 
178 #if V8_ENABLE_PRIVATE_MAPPING_FORK_OPTIMIZATION
179   // This is advisory, so we ignore errors.
180   madvise(result, size, MADV_DONTFORK);
181 #endif
182 
183 #if ENABLE_HUGEPAGE
184   if (result != nullptr && size >= kHugePageSize) {
185     const uintptr_t huge_start =
186         RoundUp(reinterpret_cast<uintptr_t>(result), kHugePageSize);
187     const uintptr_t huge_end =
188         RoundDown(reinterpret_cast<uintptr_t>(result) + size, kHugePageSize);
189     if (huge_end > huge_start) {
190       // Bail out in case the aligned addresses do not provide a block of at
191       // least kHugePageSize size.
192       madvise(reinterpret_cast<void*>(huge_start), huge_end - huge_start,
193               MADV_HUGEPAGE);
194     }
195   }
196 #endif
197 
198   return result;
199 }
200 
201 #endif  // !V8_OS_FUCHSIA
202 
203 }  // namespace
204 
205 #if V8_OS_LINUX || V8_OS_FREEBSD
206 #ifdef __arm__
207 
ArmUsingHardFloat()208 bool OS::ArmUsingHardFloat() {
209   // GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
210   // the Floating Point ABI used (PCS stands for Procedure Call Standard).
211   // We use these as well as a couple of other defines to statically determine
212   // what FP ABI used.
213   // GCC versions 4.4 and below don't support hard-fp.
214   // GCC versions 4.5 may support hard-fp without defining __ARM_PCS or
215   // __ARM_PCS_VFP.
216 
217 #define GCC_VERSION \
218   (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__)
219 #if GCC_VERSION >= 40600 && !defined(__clang__)
220 #if defined(__ARM_PCS_VFP)
221   return true;
222 #else
223   return false;
224 #endif
225 
226 #elif GCC_VERSION < 40500 && !defined(__clang__)
227   return false;
228 
229 #else
230 #if defined(__ARM_PCS_VFP)
231   return true;
232 #elif defined(__ARM_PCS) || defined(__SOFTFP__) || defined(__SOFTFP) || \
233     !defined(__VFP_FP__)
234   return false;
235 #else
236 #error \
237     "Your version of compiler does not report the FP ABI compiled for."     \
238        "Please report it on this issue"                                        \
239        "http://code.google.com/p/v8/issues/detail?id=2140"
240 
241 #endif
242 #endif
243 #undef GCC_VERSION
244 }
245 
246 #endif  // def __arm__
247 #endif
248 
PosixInitializeCommon(bool hard_abort, const char* const gc_fake_mmap)249 void PosixInitializeCommon(bool hard_abort, const char* const gc_fake_mmap) {
250   g_hard_abort = hard_abort;
251   g_gc_fake_mmap = gc_fake_mmap;
252 }
253 
254 #if !V8_OS_FUCHSIA
Initialize(bool hard_abort, const char* const gc_fake_mmap)255 void OS::Initialize(bool hard_abort, const char* const gc_fake_mmap) {
256   PosixInitializeCommon(hard_abort, gc_fake_mmap);
257 }
258 #endif  // !V8_OS_FUCHSIA
259 
ActivationFrameAlignment()260 int OS::ActivationFrameAlignment() {
261 #if V8_TARGET_ARCH_ARM
262   // On EABI ARM targets this is required for fp correctness in the
263   // runtime system.
264   return 8;
265 #elif V8_TARGET_ARCH_MIPS
266   return 8;
267 #elif V8_TARGET_ARCH_S390
268   return 8;
269 #else
270   // Otherwise we just assume 16 byte alignment, i.e.:
271   // - With gcc 4.4 the tree vectorization optimizer can generate code
272   //   that requires 16 byte alignment such as movdqa on x86.
273   // - Mac OS X, PPC and Solaris (64-bit) activation frames must
274   //   be 16 byte-aligned;  see "Mac OS X ABI Function Call Guide"
275   return 16;
276 #endif
277 }
278 
279 // static
AllocatePageSize()280 size_t OS::AllocatePageSize() {
281 #if defined(V8_TARGET_OS_MACOS) && V8_HOST_ARCH_ARM64
282   return kAppleArmPageSize;
283 #else
284   static size_t page_size = static_cast<size_t>(sysconf(_SC_PAGESIZE));
285   return page_size;
286 #endif
287 }
288 
289 // static
CommitPageSize()290 size_t OS::CommitPageSize() {
291   // Commit and allocate page size are the same on posix.
292   return OS::AllocatePageSize();
293 }
294 
295 // static
SetRandomMmapSeed(int64_t seed)296 void OS::SetRandomMmapSeed(int64_t seed) {
297   if (seed) {
298     MutexGuard guard(rng_mutex.Pointer());
299     GetPlatformRandomNumberGenerator()->SetSeed(seed);
300   }
301 }
302 
303 // static
GetRandomMmapAddr()304 void* OS::GetRandomMmapAddr() {
305   uintptr_t raw_addr;
306   {
307     MutexGuard guard(rng_mutex.Pointer());
308     GetPlatformRandomNumberGenerator()->NextBytes(&raw_addr, sizeof(raw_addr));
309   }
310 #if V8_HOST_ARCH_ARM64
311 #if defined(V8_TARGET_OS_MACOS)
312   DCHECK_EQ(1 << 14, AllocatePageSize());
313 #endif
314   // Keep the address page-aligned, AArch64 supports 4K, 16K and 64K
315   // configurations.
316   raw_addr = RoundDown(raw_addr, AllocatePageSize());
317 #endif
318 #if defined(V8_USE_ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
319     defined(THREAD_SANITIZER) || defined(LEAK_SANITIZER)
320   // If random hint addresses interfere with address ranges hard coded in
321   // sanitizers, bad things happen. This address range is copied from TSAN
322   // source but works with all tools.
323   // See crbug.com/539863.
324   raw_addr &= 0x007fffff0000ULL;
325   raw_addr += 0x7e8000000000ULL;
326 #else
327 #if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
328   // Currently available CPUs have 48 bits of virtual addressing.  Truncate
329   // the hint address to 46 bits to give the kernel a fighting chance of
330   // fulfilling our placement request.
331   raw_addr &= uint64_t{0x3FFFFFFFF000};
332 #elif V8_TARGET_ARCH_PPC64
333 #if V8_OS_AIX
334   // AIX: 64 bits of virtual addressing, but we limit address range to:
335   //   a) minimize Segment Lookaside Buffer (SLB) misses and
336   raw_addr &= uint64_t{0x3FFFF000};
337   // Use extra address space to isolate the mmap regions.
338   raw_addr += uint64_t{0x400000000000};
339 #elif V8_TARGET_BIG_ENDIAN
340   // Big-endian Linux: 42 bits of virtual addressing.
341   raw_addr &= uint64_t{0x03FFFFFFF000};
342 #else
343   // Little-endian Linux: 46 bits of virtual addressing.
344   raw_addr &= uint64_t{0x3FFFFFFF0000};
345 #endif
346 #elif V8_TARGET_ARCH_S390X
347   // Linux on Z uses bits 22-32 for Region Indexing, which translates to 42 bits
348   // of virtual addressing.  Truncate to 40 bits to allow kernel chance to
349   // fulfill request.
350   raw_addr &= uint64_t{0xFFFFFFF000};
351 #elif V8_TARGET_ARCH_S390
352   // 31 bits of virtual addressing.  Truncate to 29 bits to allow kernel chance
353   // to fulfill request.
354   raw_addr &= 0x1FFFF000;
355 #elif V8_TARGET_ARCH_MIPS64
356   // 42 bits of virtual addressing. Truncate to 40 bits to allow kernel chance
357   // to fulfill request.
358   raw_addr &= uint64_t{0xFFFFFF0000};
359 #elif V8_TARGET_ARCH_RISCV64
360   // TODO(RISCV): We need more information from the kernel to correctly mask
361   // this address for RISC-V. https://github.com/v8-riscv/v8/issues/375
362   raw_addr &= uint64_t{0xFFFFFF0000};
363 #elif V8_TARGET_ARCH_LOONG64
364   // 42 bits of virtual addressing. Truncate to 40 bits to allow kernel chance
365   // to fulfill request.
366   raw_addr &= uint64_t{0xFFFFFF0000};
367 #else
368   raw_addr &= 0x3FFFF000;
369 
370 #ifdef __sun
371   // For our Solaris/illumos mmap hint, we pick a random address in the bottom
372   // half of the top half of the address space (that is, the third quarter).
373   // Because we do not MAP_FIXED, this will be treated only as a hint -- the
374   // system will not fail to mmap() because something else happens to already
375   // be mapped at our random address. We deliberately set the hint high enough
376   // to get well above the system's break (that is, the heap); Solaris and
377   // illumos will try the hint and if that fails allocate as if there were
378   // no hint at all. The high hint prevents the break from getting hemmed in
379   // at low values, ceding half of the address space to the system heap.
380   raw_addr += 0x80000000;
381 #elif V8_OS_AIX
382   // The range 0x30000000 - 0xD0000000 is available on AIX;
383   // choose the upper range.
384   raw_addr += 0x90000000;
385 #else
386   // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
387   // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos
388   // 10.6 and 10.7.
389   raw_addr += 0x20000000;
390 #endif
391 #endif
392 #endif
393   return reinterpret_cast<void*>(raw_addr);
394 }
395 
396 // TODO(bbudge) Move Cygwin and Fuchsia stuff into platform-specific files.
397 #if !V8_OS_CYGWIN && !V8_OS_FUCHSIA
398 // static
Allocate(void* hint, size_t size, size_t alignment, MemoryPermission access)399 void* OS::Allocate(void* hint, size_t size, size_t alignment,
400                    MemoryPermission access) {
401   size_t page_size = AllocatePageSize();
402   DCHECK_EQ(0, size % page_size);
403   DCHECK_EQ(0, alignment % page_size);
404   hint = AlignedAddress(hint, alignment);
405   // Add the maximum misalignment so we are guaranteed an aligned base address.
406   size_t request_size = size + (alignment - page_size);
407   request_size = RoundUp(request_size, OS::AllocatePageSize());
408   void* result = base::Allocate(hint, request_size, access, PageType::kPrivate);
409   if (result == nullptr) return nullptr;
410 
411   // Unmap memory allocated before the aligned base address.
412   uint8_t* base = static_cast<uint8_t*>(result);
413   uint8_t* aligned_base = reinterpret_cast<uint8_t*>(
414       RoundUp(reinterpret_cast<uintptr_t>(base), alignment));
415   if (aligned_base != base) {
416     DCHECK_LT(base, aligned_base);
417     size_t prefix_size = static_cast<size_t>(aligned_base - base);
418     Free(base, prefix_size);
419     request_size -= prefix_size;
420   }
421   // Unmap memory allocated after the potentially unaligned end.
422   if (size != request_size) {
423     DCHECK_LT(size, request_size);
424     size_t suffix_size = request_size - size;
425     Free(aligned_base + size, suffix_size);
426     request_size -= suffix_size;
427   }
428 
429   DCHECK_EQ(size, request_size);
430   return static_cast<void*>(aligned_base);
431 }
432 
433 // static
AllocateShared(size_t size, MemoryPermission access)434 void* OS::AllocateShared(size_t size, MemoryPermission access) {
435   DCHECK_EQ(0, size % AllocatePageSize());
436   return base::Allocate(nullptr, size, access, PageType::kShared);
437 }
438 
439 // static
Free(void* address, size_t size)440 void OS::Free(void* address, size_t size) {
441   DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % AllocatePageSize());
442   DCHECK_EQ(0, size % AllocatePageSize());
443   CHECK_EQ(0, munmap(address, size));
444 }
445 
446 // macOS specific implementation in platform-macos.cc.
447 #if !defined(V8_OS_MACOS)
448 // static
AllocateShared(void* hint, size_t size, MemoryPermission access, PlatformSharedMemoryHandle handle, uint64_t offset)449 void* OS::AllocateShared(void* hint, size_t size, MemoryPermission access,
450                          PlatformSharedMemoryHandle handle, uint64_t offset) {
451   DCHECK_EQ(0, size % AllocatePageSize());
452   int prot = GetProtectionFromMemoryPermission(access);
453   int fd = FileDescriptorFromSharedMemoryHandle(handle);
454   void* result = mmap(hint, size, prot, MAP_SHARED, fd, offset);
455   if (result == MAP_FAILED) return nullptr;
456   return result;
457 }
458 #endif  // !defined(V8_OS_MACOS)
459 
460 // static
FreeShared(void* address, size_t size)461 void OS::FreeShared(void* address, size_t size) {
462   DCHECK_EQ(0, size % AllocatePageSize());
463   CHECK_EQ(0, munmap(address, size));
464 }
465 
466 // static
Release(void* address, size_t size)467 void OS::Release(void* address, size_t size) {
468   DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
469   DCHECK_EQ(0, size % CommitPageSize());
470   CHECK_EQ(0, munmap(address, size));
471 }
472 
473 // static
SetPermissions(void* address, size_t size, MemoryPermission access)474 bool OS::SetPermissions(void* address, size_t size, MemoryPermission access) {
475   DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
476   DCHECK_EQ(0, size % CommitPageSize());
477 
478   int prot = GetProtectionFromMemoryPermission(access);
479   int ret = mprotect(address, size, prot);
480 
481   // MacOS 11.2 on Apple Silicon refuses to switch permissions from
482   // rwx to none. Just use madvise instead.
483 #if defined(V8_OS_DARWIN)
484   if (ret != 0 && access == OS::MemoryPermission::kNoAccess) {
485     ret = madvise(address, size, MADV_FREE_REUSABLE);
486     return ret == 0;
487   }
488 #endif
489 
490   if (ret == 0 && access == OS::MemoryPermission::kNoAccess) {
491     // This is advisory; ignore errors and continue execution.
492     USE(DiscardSystemPages(address, size));
493   }
494 
495 // For accounting purposes, we want to call MADV_FREE_REUSE on macOS after
496 // changing permissions away from OS::MemoryPermission::kNoAccess. Since this
497 // state is not kept at this layer, we always call this if access != kNoAccess.
498 // The cost is a syscall that effectively no-ops.
499 // TODO(erikchen): Fix this to only call MADV_FREE_REUSE when necessary.
500 // https://crbug.com/823915
501 #if defined(V8_OS_DARWIN)
502   if (access != OS::MemoryPermission::kNoAccess)
503     madvise(address, size, MADV_FREE_REUSE);
504 #endif
505 
506   return ret == 0;
507 }
508 
509 // static
DiscardSystemPages(void* address, size_t size)510 bool OS::DiscardSystemPages(void* address, size_t size) {
511   // Roughly based on PartitionAlloc's DiscardSystemPagesInternal
512   // (base/allocator/partition_allocator/page_allocator_internals_posix.h)
513   DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
514   DCHECK_EQ(0, size % CommitPageSize());
515 #if defined(V8_OS_DARWIN)
516   // On OSX, MADV_FREE_REUSABLE has comparable behavior to MADV_FREE, but also
517   // marks the pages with the reusable bit, which allows both Activity Monitor
518   // and memory-infra to correctly track the pages.
519   int ret = madvise(address, size, MADV_FREE_REUSABLE);
520   if (ret) {
521     // MADV_FREE_REUSABLE sometimes fails, so fall back to MADV_DONTNEED.
522     ret = madvise(address, size, MADV_DONTNEED);
523   }
524 #elif defined(_AIX) || defined(V8_OS_SOLARIS)
525   int ret = madvise(reinterpret_cast<caddr_t>(address), size, MADV_FREE);
526   if (ret != 0 && errno == ENOSYS)
527     return true;  // madvise is not available on all systems.
528   if (ret != 0 && errno == EINVAL)
529     ret = madvise(reinterpret_cast<caddr_t>(address), size, MADV_DONTNEED);
530 #else
531   int ret = madvise(address, size, MADV_DONTNEED);
532 #endif
533   return ret == 0;
534 }
535 
536 #if !defined(_AIX)
537 // See AIX version for details.
538 // static
DecommitPages(void* address, size_t size)539 bool OS::DecommitPages(void* address, size_t size) {
540   DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
541   DCHECK_EQ(0, size % CommitPageSize());
542   // From https://pubs.opengroup.org/onlinepubs/9699919799/functions/mmap.html:
543   // "If a MAP_FIXED request is successful, then any previous mappings [...] for
544   // those whole pages containing any part of the address range [pa,pa+len)
545   // shall be removed, as if by an appropriate call to munmap(), before the new
546   // mapping is established." As a consequence, the memory will be
547   // zero-initialized on next access.
548   void* ptr = mmap(address, size, PROT_NONE,
549                    MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
550   return ptr == address;
551 }
552 #endif  // !defined(_AIX)
553 
554 // static
CanReserveAddressSpace()555 bool OS::CanReserveAddressSpace() { return true; }
556 
557 // static
CreateAddressSpaceReservation( void* hint, size_t size, size_t alignment, MemoryPermission max_permission)558 Optional<AddressSpaceReservation> OS::CreateAddressSpaceReservation(
559     void* hint, size_t size, size_t alignment,
560     MemoryPermission max_permission) {
561   // On POSIX, address space reservations are backed by private memory mappings.
562   MemoryPermission permission = MemoryPermission::kNoAccess;
563   if (max_permission == MemoryPermission::kReadWriteExecute) {
564     permission = MemoryPermission::kNoAccessWillJitLater;
565   }
566 
567   void* reservation = Allocate(hint, size, alignment, permission);
568   if (!reservation && permission == MemoryPermission::kNoAccessWillJitLater) {
569     // Retry without MAP_JIT, for example in case we are running on an old OS X.
570     permission = MemoryPermission::kNoAccess;
571     reservation = Allocate(hint, size, alignment, permission);
572   }
573 
574   if (!reservation) return {};
575 
576   return AddressSpaceReservation(reservation, size);
577 }
578 
579 // static
FreeAddressSpaceReservation(AddressSpaceReservation reservation)580 void OS::FreeAddressSpaceReservation(AddressSpaceReservation reservation) {
581   Free(reservation.base(), reservation.size());
582 }
583 
584 // macOS specific implementation in platform-macos.cc.
585 #if !defined(V8_OS_MACOS)
586 // static
587 // Need to disable CFI_ICALL due to the indirect call to memfd_create.
588 DISABLE_CFI_ICALL
CreateSharedMemoryHandleForTesting(size_t size)589 PlatformSharedMemoryHandle OS::CreateSharedMemoryHandleForTesting(size_t size) {
590   return kInvalidSharedMemoryHandle;
591 }
592 
593 // static
DestroySharedMemoryHandle(PlatformSharedMemoryHandle handle)594 void OS::DestroySharedMemoryHandle(PlatformSharedMemoryHandle handle) {
595   DCHECK_NE(kInvalidSharedMemoryHandle, handle);
596   int fd = FileDescriptorFromSharedMemoryHandle(handle);
597   CHECK_EQ(0, close(fd));
598 }
599 #endif  // !defined(V8_OS_MACOS)
600 
601 // static
HasLazyCommits()602 bool OS::HasLazyCommits() {
603 #if V8_OS_AIX || V8_OS_LINUX || V8_OS_DARWIN
604   return true;
605 #else
606   // TODO(bbudge) Return true for all POSIX platforms.
607   return false;
608 #endif
609 }
610 #endif  // !V8_OS_CYGWIN && !V8_OS_FUCHSIA
611 
GetGCFakeMMapFile()612 const char* OS::GetGCFakeMMapFile() {
613   return g_gc_fake_mmap;
614 }
615 
616 
Sleep(TimeDelta interval)617 void OS::Sleep(TimeDelta interval) {
618   usleep(static_cast<useconds_t>(interval.InMicroseconds()));
619 }
620 
621 
Abort()622 void OS::Abort() {
623   if (g_hard_abort) {
624     IMMEDIATE_CRASH();
625   }
626   // Redirect to std abort to signal abnormal program termination.
627   abort();
628 }
629 
630 
DebugBreak()631 void OS::DebugBreak() {
632 #if V8_HOST_ARCH_ARM
633   asm("bkpt 0");
634 #elif V8_HOST_ARCH_ARM64
635   asm("brk 0");
636 #elif V8_HOST_ARCH_MIPS
637   asm("break");
638 #elif V8_HOST_ARCH_MIPS64
639   asm("break");
640 #elif V8_HOST_ARCH_LOONG64
641   asm("break 0");
642 #elif V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64
643   asm("twge 2,2");
644 #elif V8_HOST_ARCH_IA32
645   asm("int $3");
646 #elif V8_HOST_ARCH_X64
647   asm("int $3");
648 #elif V8_HOST_ARCH_S390
649   // Software breakpoint instruction is 0x0001
650   asm volatile(".word 0x0001");
651 #elif V8_HOST_ARCH_RISCV64
652   asm("ebreak");
653 #else
654 #error Unsupported host architecture.
655 #endif
656 }
657 
658 
659 class PosixMemoryMappedFile final : public OS::MemoryMappedFile {
660  public:
PosixMemoryMappedFile(FILE* file, void* memory, size_t size)661   PosixMemoryMappedFile(FILE* file, void* memory, size_t size)
662       : file_(file), memory_(memory), size_(size) {}
663   ~PosixMemoryMappedFile() final;
664   void* memory() const final { return memory_; }
665   size_t size() const final { return size_; }
666 
667  private:
668   FILE* const file_;
669   void* const memory_;
670   size_t const size_;
671 };
672 
673 
674 // static
open(const char* name, FileMode mode)675 OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name,
676                                                  FileMode mode) {
677   const char* fopen_mode = (mode == FileMode::kReadOnly) ? "r" : "r+";
678   struct stat statbuf;
679   // Make sure path exists and is not a directory.
680   if (stat(name, &statbuf) == 0 && !S_ISDIR(statbuf.st_mode)) {
681     if (FILE* file = fopen(name, fopen_mode)) {
682       if (fseek(file, 0, SEEK_END) == 0) {
683         long size = ftell(file);  // NOLINT(runtime/int)
684         if (size == 0) return new PosixMemoryMappedFile(file, nullptr, 0);
685         if (size > 0) {
686           int prot = PROT_READ;
687           int flags = MAP_PRIVATE;
688           if (mode == FileMode::kReadWrite) {
689             prot |= PROT_WRITE;
690             flags = MAP_SHARED;
691           }
692           void* const memory =
693               mmap(OS::GetRandomMmapAddr(), size, prot, flags, fileno(file), 0);
694           if (memory != MAP_FAILED) {
695             return new PosixMemoryMappedFile(file, memory, size);
696           }
697         }
698       }
699       fclose(file);
700     }
701   }
702   return nullptr;
703 }
704 
705 // static
create(const char* name, size_t size, void* initial)706 OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name,
707                                                    size_t size, void* initial) {
708   if (FILE* file = fopen(name, "w+")) {
709     if (size == 0) return new PosixMemoryMappedFile(file, nullptr, 0);
710     size_t result = fwrite(initial, 1, size, file);
711     if (result == size && !ferror(file)) {
712       void* memory = mmap(OS::GetRandomMmapAddr(), result,
713                           PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
714       if (memory != MAP_FAILED) {
715         return new PosixMemoryMappedFile(file, memory, result);
716       }
717     }
718     fclose(file);
719   }
720   return nullptr;
721 }
722 
723 
~PosixMemoryMappedFile()724 PosixMemoryMappedFile::~PosixMemoryMappedFile() {
725   if (memory_) OS::Free(memory_, RoundUp(size_, OS::AllocatePageSize()));
726   fclose(file_);
727 }
728 
729 
GetCurrentProcessId()730 int OS::GetCurrentProcessId() {
731   return static_cast<int>(getpid());
732 }
733 
734 
GetCurrentThreadId()735 int OS::GetCurrentThreadId() {
736 #if V8_OS_DARWIN || (V8_OS_ANDROID && defined(__APPLE__))
737   return static_cast<int>(pthread_mach_thread_np(pthread_self()));
738 #elif V8_OS_LINUX
739   return static_cast<int>(syscall(__NR_gettid));
740 #elif V8_OS_ANDROID
741   return static_cast<int>(gettid());
742 #elif V8_OS_AIX
743   return static_cast<int>(thread_self());
744 #elif V8_OS_FUCHSIA
745   return static_cast<int>(zx_thread_self());
746 #elif V8_OS_SOLARIS
747   return static_cast<int>(pthread_self());
748 #else
749   return static_cast<int>(reinterpret_cast<intptr_t>(pthread_self()));
750 #endif
751 }
752 
ExitProcess(int exit_code)753 void OS::ExitProcess(int exit_code) {
754   // Use _exit instead of exit to avoid races between isolate
755   // threads and static destructors.
756   fflush(stdout);
757   fflush(stderr);
758   _exit(exit_code);
759 }
760 
761 // ----------------------------------------------------------------------------
762 // POSIX date/time support.
763 //
764 
765 #if !defined(V8_OS_FUCHSIA)
GetUserTime(uint32_t* secs, uint32_t* usecs)766 int OS::GetUserTime(uint32_t* secs, uint32_t* usecs) {
767   struct rusage usage;
768 
769   if (getrusage(RUSAGE_SELF, &usage) < 0) return -1;
770   *secs = static_cast<uint32_t>(usage.ru_utime.tv_sec);
771   *usecs = static_cast<uint32_t>(usage.ru_utime.tv_usec);
772   return 0;
773 }
774 #endif
775 
TimeCurrentMillis()776 double OS::TimeCurrentMillis() {
777   return Time::Now().ToJsTime();
778 }
779 
DaylightSavingsOffset(double time)780 double PosixTimezoneCache::DaylightSavingsOffset(double time) {
781   if (std::isnan(time)) return std::numeric_limits<double>::quiet_NaN();
782   time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
783   struct tm tm;
784   struct tm* t = localtime_r(&tv, &tm);
785   if (nullptr == t) return std::numeric_limits<double>::quiet_NaN();
786   return t->tm_isdst > 0 ? 3600 * msPerSecond : 0;
787 }
788 
789 
GetLastError()790 int OS::GetLastError() {
791   return errno;
792 }
793 
794 
795 // ----------------------------------------------------------------------------
796 // POSIX stdio support.
797 //
798 
FOpen(const char* path, const char* mode)799 FILE* OS::FOpen(const char* path, const char* mode) {
800   FILE* file = fopen(path, mode);
801   if (file == nullptr) return nullptr;
802   struct stat file_stat;
803   if (fstat(fileno(file), &file_stat) != 0) {
804     fclose(file);
805     return nullptr;
806   }
807   bool is_regular_file = ((file_stat.st_mode & S_IFREG) != 0);
808   if (is_regular_file) return file;
809   fclose(file);
810   return nullptr;
811 }
812 
813 
Remove(const char* path)814 bool OS::Remove(const char* path) {
815   return (remove(path) == 0);
816 }
817 
DirectorySeparator()818 char OS::DirectorySeparator() { return '/'; }
819 
isDirectorySeparator(const char ch)820 bool OS::isDirectorySeparator(const char ch) {
821   return ch == DirectorySeparator();
822 }
823 
824 
OpenTemporaryFile()825 FILE* OS::OpenTemporaryFile() {
826   return tmpfile();
827 }
828 
829 const char* const OS::LogFileOpenMode = "w+";
830 
Print(const char* format, ...)831 void OS::Print(const char* format, ...) {
832   va_list args;
833   va_start(args, format);
834   VPrint(format, args);
835   va_end(args);
836 }
837 
838 
VPrint(const char* format, va_list args)839 void OS::VPrint(const char* format, va_list args) {
840 #if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
841   __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args);
842 #else
843   vprintf(format, args);
844 #endif
845 }
846 
847 
FPrint(FILE* out, const char* format, ...)848 void OS::FPrint(FILE* out, const char* format, ...) {
849   va_list args;
850   va_start(args, format);
851   VFPrint(out, format, args);
852   va_end(args);
853 }
854 
855 
VFPrint(FILE* out, const char* format, va_list args)856 void OS::VFPrint(FILE* out, const char* format, va_list args) {
857 #if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
858   __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args);
859 #else
860   vfprintf(out, format, args);
861 #endif
862 }
863 
864 
PrintError(const char* format, ...)865 void OS::PrintError(const char* format, ...) {
866   va_list args;
867   va_start(args, format);
868   VPrintError(format, args);
869   va_end(args);
870 }
871 
872 
VPrintError(const char* format, va_list args)873 void OS::VPrintError(const char* format, va_list args) {
874 #if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
875   __android_log_vprint(ANDROID_LOG_ERROR, LOG_TAG, format, args);
876 #else
877   vfprintf(stderr, format, args);
878 #endif
879 }
880 
881 
SNPrintF(char* str, int length, const char* format, ...)882 int OS::SNPrintF(char* str, int length, const char* format, ...) {
883   va_list args;
884   va_start(args, format);
885   int result = VSNPrintF(str, length, format, args);
886   va_end(args);
887   return result;
888 }
889 
890 
VSNPrintF(char* str, int length, const char* format, va_list args)891 int OS::VSNPrintF(char* str,
892                   int length,
893                   const char* format,
894                   va_list args) {
895   int n = vsnprintf(str, length, format, args);
896   if (n < 0 || n >= length) {
897     // If the length is zero, the assignment fails.
898     if (length > 0)
899       str[length - 1] = '\0';
900     return -1;
901   } else {
902     return n;
903   }
904 }
905 
906 
907 // ----------------------------------------------------------------------------
908 // POSIX string support.
909 //
910 
StrNCpy(char* dest, int length, const char* src, size_t n)911 void OS::StrNCpy(char* dest, int length, const char* src, size_t n) {
912   strncpy(dest, src, n);
913 }
914 
915 // ----------------------------------------------------------------------------
916 // POSIX Address space reservation support.
917 //
918 
919 #if !V8_OS_CYGWIN && !V8_OS_FUCHSIA
920 
CreateSubReservation( void* address, size_t size, OS::MemoryPermission max_permission)921 Optional<AddressSpaceReservation> AddressSpaceReservation::CreateSubReservation(
922     void* address, size_t size, OS::MemoryPermission max_permission) {
923   DCHECK(Contains(address, size));
924   DCHECK_EQ(0, size % OS::AllocatePageSize());
925   DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % OS::AllocatePageSize());
926 
927   return AddressSpaceReservation(address, size);
928 }
929 
FreeSubReservation( AddressSpaceReservation reservation)930 bool AddressSpaceReservation::FreeSubReservation(
931     AddressSpaceReservation reservation) {
932   // Nothing to do.
933   // Pages allocated inside the reservation must've already been freed.
934   return true;
935 }
936 
Allocate(void* address, size_t size, OS::MemoryPermission access)937 bool AddressSpaceReservation::Allocate(void* address, size_t size,
938                                        OS::MemoryPermission access) {
939   // The region is already mmap'ed, so it just has to be made accessible now.
940   DCHECK(Contains(address, size));
941   if (access == OS::MemoryPermission::kNoAccess) {
942     // Nothing to do. We don't want to call SetPermissions with kNoAccess here
943     // as that will for example mark the pages as discardable, which is
944     // probably not desired here.
945     return true;
946   }
947   return OS::SetPermissions(address, size, access);
948 }
949 
Free(void* address, size_t size)950 bool AddressSpaceReservation::Free(void* address, size_t size) {
951   DCHECK(Contains(address, size));
952   return OS::DecommitPages(address, size);
953 }
954 
955 // macOS specific implementation in platform-macos.cc.
956 #if !defined(V8_OS_MACOS)
AllocateShared(void* address, size_t size, OS::MemoryPermission access, PlatformSharedMemoryHandle handle, uint64_t offset)957 bool AddressSpaceReservation::AllocateShared(void* address, size_t size,
958                                              OS::MemoryPermission access,
959                                              PlatformSharedMemoryHandle handle,
960                                              uint64_t offset) {
961   DCHECK(Contains(address, size));
962   int prot = GetProtectionFromMemoryPermission(access);
963   int fd = FileDescriptorFromSharedMemoryHandle(handle);
964   return mmap(address, size, prot, MAP_SHARED | MAP_FIXED, fd, offset) !=
965          MAP_FAILED;
966 }
967 #endif  // !defined(V8_OS_MACOS)
968 
FreeShared(void* address, size_t size)969 bool AddressSpaceReservation::FreeShared(void* address, size_t size) {
970   DCHECK(Contains(address, size));
971   return mmap(address, size, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE,
972               -1, 0) == address;
973 }
974 
SetPermissions(void* address, size_t size, OS::MemoryPermission access)975 bool AddressSpaceReservation::SetPermissions(void* address, size_t size,
976                                              OS::MemoryPermission access) {
977   DCHECK(Contains(address, size));
978   return OS::SetPermissions(address, size, access);
979 }
980 
DiscardSystemPages(void* address, size_t size)981 bool AddressSpaceReservation::DiscardSystemPages(void* address, size_t size) {
982   DCHECK(Contains(address, size));
983   return OS::DiscardSystemPages(address, size);
984 }
985 
DecommitPages(void* address, size_t size)986 bool AddressSpaceReservation::DecommitPages(void* address, size_t size) {
987   DCHECK(Contains(address, size));
988   return OS::DecommitPages(address, size);
989 }
990 
991 #endif  // !V8_OS_CYGWIN && !V8_OS_FUCHSIA
992 
993 // ----------------------------------------------------------------------------
994 // POSIX thread support.
995 //
996 
997 class Thread::PlatformData {
998  public:
PlatformData()999   PlatformData() : thread_(kNoThread) {}
1000   pthread_t thread_;  // Thread handle for pthread.
1001   // Synchronizes thread creation
1002   Mutex thread_creation_mutex_;
1003 };
1004 
Thread(const Options& options)1005 Thread::Thread(const Options& options)
1006     : data_(new PlatformData),
1007       stack_size_(options.stack_size()),
1008       start_semaphore_(nullptr) {
1009   const int min_stack_size = static_cast<int>(PTHREAD_STACK_MIN);
1010   if (stack_size_ > 0) stack_size_ = std::max(stack_size_, min_stack_size);
1011   set_name(options.name());
1012 }
1013 
1014 
~Thread()1015 Thread::~Thread() {
1016   delete data_;
1017 }
1018 
1019 
SetThreadName(const char* name)1020 static void SetThreadName(const char* name) {
1021 #if V8_OS_DRAGONFLYBSD || V8_OS_FREEBSD || V8_OS_OPENBSD
1022   pthread_set_name_np(pthread_self(), name);
1023 #elif V8_OS_NETBSD
1024   STATIC_ASSERT(Thread::kMaxThreadNameLength <= PTHREAD_MAX_NAMELEN_NP);
1025   pthread_setname_np(pthread_self(), "%s", name);
1026 #elif V8_OS_DARWIN
1027   // pthread_setname_np is only available in 10.6 or later, so test
1028   // for it at runtime.
1029   int (*dynamic_pthread_setname_np)(const char*);
1030   *reinterpret_cast<void**>(&dynamic_pthread_setname_np) =
1031     dlsym(RTLD_DEFAULT, "pthread_setname_np");
1032   if (dynamic_pthread_setname_np == nullptr) return;
1033 
1034   // Mac OS X does not expose the length limit of the name, so hardcode it.
1035   static const int kMaxNameLength = 63;
1036   STATIC_ASSERT(Thread::kMaxThreadNameLength <= kMaxNameLength);
1037   dynamic_pthread_setname_np(name);
1038 #elif defined(PR_SET_NAME)
1039   prctl(PR_SET_NAME,
1040         reinterpret_cast<unsigned long>(name),  // NOLINT
1041         0, 0, 0);
1042 #endif
1043 }
1044 
1045 
ThreadEntry(void* arg)1046 static void* ThreadEntry(void* arg) {
1047   Thread* thread = reinterpret_cast<Thread*>(arg);
1048   // We take the lock here to make sure that pthread_create finished first since
1049   // we don't know which thread will run first (the original thread or the new
1050   // one).
1051   { MutexGuard lock_guard(&thread->data()->thread_creation_mutex_); }
1052   SetThreadName(thread->name());
1053   DCHECK_NE(thread->data()->thread_, kNoThread);
1054   thread->NotifyStartedAndRun();
1055   return nullptr;
1056 }
1057 
1058 
set_name(const char* name)1059 void Thread::set_name(const char* name) {
1060   strncpy(name_, name, sizeof(name_) - 1);
1061   name_[sizeof(name_) - 1] = '\0';
1062 }
1063 
Start()1064 bool Thread::Start() {
1065   int result;
1066   pthread_attr_t attr;
1067   memset(&attr, 0, sizeof(attr));
1068   result = pthread_attr_init(&attr);
1069   if (result != 0) return false;
1070   size_t stack_size = stack_size_;
1071   if (stack_size == 0) {
1072 #if V8_OS_DARWIN
1073     // Default on Mac OS X is 512kB -- bump up to 1MB
1074     stack_size = 1 * 1024 * 1024;
1075 #elif V8_OS_AIX
1076     // Default on AIX is 96kB -- bump up to 2MB
1077     stack_size = 2 * 1024 * 1024;
1078 #endif
1079   }
1080   if (stack_size > 0) {
1081     result = pthread_attr_setstacksize(&attr, stack_size);
1082     if (result != 0) return pthread_attr_destroy(&attr), false;
1083   }
1084   {
1085     MutexGuard lock_guard(&data_->thread_creation_mutex_);
1086     result = pthread_create(&data_->thread_, &attr, ThreadEntry, this);
1087     if (result != 0 || data_->thread_ == kNoThread) {
1088       return pthread_attr_destroy(&attr), false;
1089     }
1090   }
1091   result = pthread_attr_destroy(&attr);
1092   return result == 0;
1093 }
1094 
Join()1095 void Thread::Join() { pthread_join(data_->thread_, nullptr); }
1096 
PthreadKeyToLocalKey(pthread_key_t pthread_key)1097 static Thread::LocalStorageKey PthreadKeyToLocalKey(pthread_key_t pthread_key) {
1098 #if V8_OS_CYGWIN
1099   // We need to cast pthread_key_t to Thread::LocalStorageKey in two steps
1100   // because pthread_key_t is a pointer type on Cygwin. This will probably not
1101   // work on 64-bit platforms, but Cygwin doesn't support 64-bit anyway.
1102   STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
1103   intptr_t ptr_key = reinterpret_cast<intptr_t>(pthread_key);
1104   return static_cast<Thread::LocalStorageKey>(ptr_key);
1105 #else
1106   return static_cast<Thread::LocalStorageKey>(pthread_key);
1107 #endif
1108 }
1109 
1110 
LocalKeyToPthreadKey(Thread::LocalStorageKey local_key)1111 static pthread_key_t LocalKeyToPthreadKey(Thread::LocalStorageKey local_key) {
1112 #if V8_OS_CYGWIN
1113   STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
1114   intptr_t ptr_key = static_cast<intptr_t>(local_key);
1115   return reinterpret_cast<pthread_key_t>(ptr_key);
1116 #else
1117   return static_cast<pthread_key_t>(local_key);
1118 #endif
1119 }
1120 
1121 
1122 #ifdef V8_FAST_TLS_SUPPORTED
1123 
1124 static std::atomic<bool> tls_base_offset_initialized{false};
1125 intptr_t kMacTlsBaseOffset = 0;
1126 
1127 // It's safe to do the initialization more that once, but it has to be
1128 // done at least once.
InitializeTlsBaseOffset()1129 static void InitializeTlsBaseOffset() {
1130   const size_t kBufferSize = 128;
1131   char buffer[kBufferSize];
1132   size_t buffer_size = kBufferSize;
1133   int ctl_name[] = { CTL_KERN , KERN_OSRELEASE };
1134   if (sysctl(ctl_name, 2, buffer, &buffer_size, nullptr, 0) != 0) {
1135     FATAL("V8 failed to get kernel version");
1136   }
1137   // The buffer now contains a string of the form XX.YY.ZZ, where
1138   // XX is the major kernel version component.
1139   // Make sure the buffer is 0-terminated.
1140   buffer[kBufferSize - 1] = '\0';
1141   char* period_pos = strchr(buffer, '.');
1142   *period_pos = '\0';
1143   int kernel_version_major = static_cast<int>(strtol(buffer, nullptr, 10));
1144   // The constants below are taken from pthreads.s from the XNU kernel
1145   // sources archive at www.opensource.apple.com.
1146   if (kernel_version_major < 11) {
1147     // 8.x.x (Tiger), 9.x.x (Leopard), 10.x.x (Snow Leopard) have the
1148     // same offsets.
1149 #if V8_HOST_ARCH_IA32
1150     kMacTlsBaseOffset = 0x48;
1151 #else
1152     kMacTlsBaseOffset = 0x60;
1153 #endif
1154   } else {
1155     // 11.x.x (Lion) changed the offset.
1156     kMacTlsBaseOffset = 0;
1157   }
1158 
1159   tls_base_offset_initialized.store(true, std::memory_order_release);
1160 }
1161 
1162 
CheckFastTls(Thread::LocalStorageKey key)1163 static void CheckFastTls(Thread::LocalStorageKey key) {
1164   void* expected = reinterpret_cast<void*>(0x1234CAFE);
1165   Thread::SetThreadLocal(key, expected);
1166   void* actual = Thread::GetExistingThreadLocal(key);
1167   if (expected != actual) {
1168     FATAL("V8 failed to initialize fast TLS on current kernel");
1169   }
1170   Thread::SetThreadLocal(key, nullptr);
1171 }
1172 
1173 #endif  // V8_FAST_TLS_SUPPORTED
1174 
1175 
CreateThreadLocalKey()1176 Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
1177 #ifdef V8_FAST_TLS_SUPPORTED
1178   bool check_fast_tls = false;
1179   if (!tls_base_offset_initialized.load(std::memory_order_acquire)) {
1180     check_fast_tls = true;
1181     InitializeTlsBaseOffset();
1182   }
1183 #endif
1184   pthread_key_t key;
1185   int result = pthread_key_create(&key, nullptr);
1186   DCHECK_EQ(0, result);
1187   USE(result);
1188   LocalStorageKey local_key = PthreadKeyToLocalKey(key);
1189 #ifdef V8_FAST_TLS_SUPPORTED
1190   // If we just initialized fast TLS support, make sure it works.
1191   if (check_fast_tls) CheckFastTls(local_key);
1192 #endif
1193   return local_key;
1194 }
1195 
1196 
DeleteThreadLocalKey(LocalStorageKey key)1197 void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
1198   pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
1199   int result = pthread_key_delete(pthread_key);
1200   DCHECK_EQ(0, result);
1201   USE(result);
1202 }
1203 
1204 
GetThreadLocal(LocalStorageKey key)1205 void* Thread::GetThreadLocal(LocalStorageKey key) {
1206   pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
1207   return pthread_getspecific(pthread_key);
1208 }
1209 
1210 
SetThreadLocal(LocalStorageKey key, void* value)1211 void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
1212   pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
1213   int result = pthread_setspecific(pthread_key, value);
1214   DCHECK_EQ(0, result);
1215   USE(result);
1216 }
1217 
1218 // pthread_getattr_np used below is non portable (hence the _np suffix). We
1219 // keep this version in POSIX as most Linux-compatible derivatives will
1220 // support it. MacOS and FreeBSD are different here.
1221 #if !defined(V8_OS_FREEBSD) && !defined(V8_OS_DARWIN) && !defined(_AIX) && \
1222     !defined(V8_OS_SOLARIS)
1223 
1224 // static
GetStackStart()1225 Stack::StackSlot Stack::GetStackStart() {
1226   pthread_attr_t attr;
1227   int error = pthread_getattr_np(pthread_self(), &attr);
1228   if (!error) {
1229     void* base;
1230     size_t size;
1231     error = pthread_attr_getstack(&attr, &base, &size);
1232     CHECK(!error);
1233     pthread_attr_destroy(&attr);
1234     return reinterpret_cast<uint8_t*>(base) + size;
1235   }
1236 
1237 #if defined(V8_LIBC_GLIBC)
1238   // pthread_getattr_np can fail for the main thread. In this case
1239   // just like NaCl we rely on the __libc_stack_end to give us
1240   // the start of the stack.
1241   // See https://code.google.com/p/nativeclient/issues/detail?id=3431.
1242   return __libc_stack_end;
1243 #else
1244   return nullptr;
1245 #endif  // !defined(V8_LIBC_GLIBC)
1246 }
1247 
1248 #endif  // !defined(V8_OS_FREEBSD) && !defined(V8_OS_DARWIN) &&
1249         // !defined(_AIX) && !defined(V8_OS_SOLARIS)
1250 
1251 // static
GetCurrentStackPosition()1252 Stack::StackSlot Stack::GetCurrentStackPosition() {
1253   return __builtin_frame_address(0);
1254 }
1255 
1256 #undef LOG_TAG
1257 #undef MAP_ANONYMOUS
1258 #undef MADV_FREE
1259 
1260 }  // namespace base
1261 }  // namespace v8
1262