xref: /third_party/node/deps/v8/src/utils/utils.h (revision 1cb0ef41)
1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_UTILS_UTILS_H_
6#define V8_UTILS_UTILS_H_
7
8#include <limits.h>
9#include <stdlib.h>
10#include <string.h>
11
12#include <cmath>
13#include <string>
14#include <type_traits>
15
16#include "src/base/bits.h"
17#include "src/base/compiler-specific.h"
18#include "src/base/logging.h"
19#include "src/base/macros.h"
20#include "src/base/platform/platform.h"
21#include "src/base/safe_conversions.h"
22#include "src/base/v8-fallthrough.h"
23#include "src/base/vector.h"
24#include "src/common/globals.h"
25#include "src/utils/allocation.h"
26
27#if defined(V8_USE_SIPHASH)
28#include "src/third_party/siphash/halfsiphash.h"
29#endif
30
31#if defined(V8_OS_AIX)
32#include <fenv.h>  // NOLINT(build/c++11)
33#endif
34
35namespace v8 {
36namespace internal {
37
38// ----------------------------------------------------------------------------
39// General helper functions
40
41template <typename T>
42static T ArithmeticShiftRight(T x, int shift) {
43  DCHECK_LE(0, shift);
44  if (x < 0) {
45    // Right shift of signed values is implementation defined. Simulate a
46    // true arithmetic right shift by adding leading sign bits.
47    using UnsignedT = typename std::make_unsigned<T>::type;
48    UnsignedT mask = ~(static_cast<UnsignedT>(~0) >> shift);
49    return (static_cast<UnsignedT>(x) >> shift) | mask;
50  } else {
51    return x >> shift;
52  }
53}
54
55// Returns the maximum of the two parameters according to JavaScript semantics.
56template <typename T>
57T JSMax(T x, T y) {
58  if (std::isnan(x)) return x;
59  if (std::isnan(y)) return y;
60  if (std::signbit(x) < std::signbit(y)) return x;
61  return x > y ? x : y;
62}
63
64// Returns the maximum of the two parameters according to JavaScript semantics.
65template <typename T>
66T JSMin(T x, T y) {
67  if (std::isnan(x)) return x;
68  if (std::isnan(y)) return y;
69  if (std::signbit(x) < std::signbit(y)) return y;
70  return x > y ? y : x;
71}
72
73// Returns the absolute value of its argument.
74template <typename T,
75          typename = typename std::enable_if<std::is_signed<T>::value>::type>
76typename std::make_unsigned<T>::type Abs(T a) {
77  // This is a branch-free implementation of the absolute value function and is
78  // described in Warren's "Hacker's Delight", chapter 2. It avoids undefined
79  // behavior with the arithmetic negation operation on signed values as well.
80  using unsignedT = typename std::make_unsigned<T>::type;
81  unsignedT x = static_cast<unsignedT>(a);
82  unsignedT y = static_cast<unsignedT>(a >> (sizeof(T) * 8 - 1));
83  return (x ^ y) - y;
84}
85
86inline double Modulo(double x, double y) {
87#if defined(V8_OS_WIN)
88  // Workaround MS fmod bugs. ECMA-262 says:
89  // dividend is finite and divisor is an infinity => result equals dividend
90  // dividend is a zero and divisor is nonzero finite => result equals dividend
91  if (!(std::isfinite(x) && (!std::isfinite(y) && !std::isnan(y))) &&
92      !(x == 0 && (y != 0 && std::isfinite(y)))) {
93    double result = fmod(x, y);
94    // Workaround MS bug in VS CRT in some OS versions, https://crbug.com/915045
95    // fmod(-17, +/-1) should equal -0.0 but now returns 0.0.
96    if (x < 0 && result == 0) result = -0.0;
97    x = result;
98  }
99  return x;
100#elif defined(V8_OS_AIX)
101  // AIX raises an underflow exception for (Number.MIN_VALUE % Number.MAX_VALUE)
102  feclearexcept(FE_ALL_EXCEPT);
103  double result = std::fmod(x, y);
104  int exception = fetestexcept(FE_UNDERFLOW);
105  return (exception ? x : result);
106#else
107  return std::fmod(x, y);
108#endif
109}
110
111template <typename T>
112T SaturateAdd(T a, T b) {
113  if (std::is_signed<T>::value) {
114    if (a > 0 && b > 0) {
115      if (a > std::numeric_limits<T>::max() - b) {
116        return std::numeric_limits<T>::max();
117      }
118    } else if (a < 0 && b < 0) {
119      if (a < std::numeric_limits<T>::min() - b) {
120        return std::numeric_limits<T>::min();
121      }
122    }
123  } else {
124    CHECK(std::is_unsigned<T>::value);
125    if (a > std::numeric_limits<T>::max() - b) {
126      return std::numeric_limits<T>::max();
127    }
128  }
129  return a + b;
130}
131
132template <typename T>
133T SaturateSub(T a, T b) {
134  if (std::is_signed<T>::value) {
135    if (a >= 0 && b < 0) {
136      if (a > std::numeric_limits<T>::max() + b) {
137        return std::numeric_limits<T>::max();
138      }
139    } else if (a < 0 && b > 0) {
140      if (a < std::numeric_limits<T>::min() + b) {
141        return std::numeric_limits<T>::min();
142      }
143    }
144  } else {
145    CHECK(std::is_unsigned<T>::value);
146    if (a < b) {
147      return static_cast<T>(0);
148    }
149  }
150  return a - b;
151}
152
153template <typename T>
154T SaturateRoundingQMul(T a, T b) {
155  // Saturating rounding multiplication for Q-format numbers. See
156  // https://en.wikipedia.org/wiki/Q_(number_format) for a description.
157  // Specifically this supports Q7, Q15, and Q31. This follows the
158  // implementation in simulator-logic-arm64.cc (sqrdmulh) to avoid overflow
159  // when a == b == int32 min.
160  static_assert(std::is_integral<T>::value, "only integral types");
161
162  constexpr int size_in_bits = sizeof(T) * 8;
163  int round_const = 1 << (size_in_bits - 2);
164  int64_t product = a * b;
165  product += round_const;
166  product >>= (size_in_bits - 1);
167  return base::saturated_cast<T>(product);
168}
169
170// Multiply two numbers, returning a result that is twice as wide, no overflow.
171// Put Wide first so we can use function template argument deduction for Narrow,
172// and callers can provide only Wide.
173template <typename Wide, typename Narrow>
174Wide MultiplyLong(Narrow a, Narrow b) {
175  static_assert(
176      std::is_integral<Narrow>::value && std::is_integral<Wide>::value,
177      "only integral types");
178  static_assert(std::is_signed<Narrow>::value == std::is_signed<Wide>::value,
179                "both must have same signedness");
180  static_assert(sizeof(Narrow) * 2 == sizeof(Wide), "only twice as long");
181
182  return static_cast<Wide>(a) * static_cast<Wide>(b);
183}
184
185// Add two numbers, returning a result that is twice as wide, no overflow.
186// Put Wide first so we can use function template argument deduction for Narrow,
187// and callers can provide only Wide.
188template <typename Wide, typename Narrow>
189Wide AddLong(Narrow a, Narrow b) {
190  static_assert(
191      std::is_integral<Narrow>::value && std::is_integral<Wide>::value,
192      "only integral types");
193  static_assert(std::is_signed<Narrow>::value == std::is_signed<Wide>::value,
194                "both must have same signedness");
195  static_assert(sizeof(Narrow) * 2 == sizeof(Wide), "only twice as long");
196
197  return static_cast<Wide>(a) + static_cast<Wide>(b);
198}
199
200template <typename T>
201inline T RoundingAverageUnsigned(T a, T b) {
202  static_assert(std::is_unsigned<T>::value, "Only for unsiged types");
203  static_assert(sizeof(T) < sizeof(uint64_t), "Must be smaller than uint64_t");
204  return (static_cast<uint64_t>(a) + static_cast<uint64_t>(b) + 1) >> 1;
205}
206
207// Helper macros for defining a contiguous sequence of field offset constants.
208// Example: (backslashes at the ends of respective lines of this multi-line
209// macro definition are omitted here to please the compiler)
210//
211// #define MAP_FIELDS(V)
212//   V(kField1Offset, kTaggedSize)
213//   V(kField2Offset, kIntSize)
214//   V(kField3Offset, kIntSize)
215//   V(kField4Offset, kSystemPointerSize)
216//   V(kSize, 0)
217//
218// DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, MAP_FIELDS)
219//
220#define DEFINE_ONE_FIELD_OFFSET(Name, Size, ...) \
221  Name, Name##End = Name + (Size)-1,
222
223#define DEFINE_FIELD_OFFSET_CONSTANTS(StartOffset, LIST_MACRO) \
224  enum {                                                       \
225    LIST_MACRO##_StartOffset = StartOffset - 1,                \
226    LIST_MACRO(DEFINE_ONE_FIELD_OFFSET)                        \
227  };
228
229// Size of the field defined by DEFINE_FIELD_OFFSET_CONSTANTS
230#define FIELD_SIZE(Name) (Name##End + 1 - Name)
231
232// Compare two offsets with static cast
233#define STATIC_ASSERT_FIELD_OFFSETS_EQUAL(Offset1, Offset2) \
234  STATIC_ASSERT(static_cast<int>(Offset1) == Offset2)
235// ----------------------------------------------------------------------------
236// Hash function.
237
238static const uint64_t kZeroHashSeed = 0;
239
240// Thomas Wang, Integer Hash Functions.
241// http://www.concentric.net/~Ttwang/tech/inthash.htm`
242inline uint32_t ComputeUnseededHash(uint32_t key) {
243  uint32_t hash = key;
244  hash = ~hash + (hash << 15);  // hash = (hash << 15) - hash - 1;
245  hash = hash ^ (hash >> 12);
246  hash = hash + (hash << 2);
247  hash = hash ^ (hash >> 4);
248  hash = hash * 2057;  // hash = (hash + (hash << 3)) + (hash << 11);
249  hash = hash ^ (hash >> 16);
250  return hash & 0x3fffffff;
251}
252
253inline uint32_t ComputeLongHash(uint64_t key) {
254  uint64_t hash = key;
255  hash = ~hash + (hash << 18);  // hash = (hash << 18) - hash - 1;
256  hash = hash ^ (hash >> 31);
257  hash = hash * 21;  // hash = (hash + (hash << 2)) + (hash << 4);
258  hash = hash ^ (hash >> 11);
259  hash = hash + (hash << 6);
260  hash = hash ^ (hash >> 22);
261  return static_cast<uint32_t>(hash & 0x3fffffff);
262}
263
264inline uint32_t ComputeSeededHash(uint32_t key, uint64_t seed) {
265#ifdef V8_USE_SIPHASH
266  return halfsiphash(key, seed);
267#else
268  return ComputeLongHash(static_cast<uint64_t>(key) ^ seed);
269#endif  // V8_USE_SIPHASH
270}
271
272inline uint32_t ComputePointerHash(void* ptr) {
273  return ComputeUnseededHash(
274      static_cast<uint32_t>(reinterpret_cast<intptr_t>(ptr)));
275}
276
277inline uint32_t ComputeAddressHash(Address address) {
278  return ComputeUnseededHash(static_cast<uint32_t>(address & 0xFFFFFFFFul));
279}
280
281// ----------------------------------------------------------------------------
282// Miscellaneous
283
284// Memory offset for lower and higher bits in a 64 bit integer.
285#if defined(V8_TARGET_LITTLE_ENDIAN)
286static const int kInt64LowerHalfMemoryOffset = 0;
287static const int kInt64UpperHalfMemoryOffset = 4;
288#elif defined(V8_TARGET_BIG_ENDIAN)
289static const int kInt64LowerHalfMemoryOffset = 4;
290static const int kInt64UpperHalfMemoryOffset = 0;
291#endif  // V8_TARGET_LITTLE_ENDIAN
292
293// A pointer that can only be set once and doesn't allow NULL values.
294template <typename T>
295class SetOncePointer {
296 public:
297  SetOncePointer() = default;
298
299  bool is_set() const { return pointer_ != nullptr; }
300
301  T* get() const {
302    DCHECK_NOT_NULL(pointer_);
303    return pointer_;
304  }
305
306  void set(T* value) {
307    DCHECK(pointer_ == nullptr && value != nullptr);
308    pointer_ = value;
309  }
310
311  SetOncePointer& operator=(T* value) {
312    set(value);
313    return *this;
314  }
315
316  bool operator==(std::nullptr_t) const { return pointer_ == nullptr; }
317  bool operator!=(std::nullptr_t) const { return pointer_ != nullptr; }
318
319 private:
320  T* pointer_ = nullptr;
321};
322
323// Compare 8bit/16bit chars to 8bit/16bit chars.
324template <typename lchar, typename rchar>
325inline bool CompareCharsEqualUnsigned(const lchar* lhs, const rchar* rhs,
326                                      size_t chars) {
327  STATIC_ASSERT(std::is_unsigned<lchar>::value);
328  STATIC_ASSERT(std::is_unsigned<rchar>::value);
329  if (sizeof(*lhs) == sizeof(*rhs)) {
330    // memcmp compares byte-by-byte, but for equality it doesn't matter whether
331    // two-byte char comparison is little- or big-endian.
332    return memcmp(lhs, rhs, chars * sizeof(*lhs)) == 0;
333  }
334  for (const lchar* limit = lhs + chars; lhs < limit; ++lhs, ++rhs) {
335    if (*lhs != *rhs) return false;
336  }
337  return true;
338}
339
340template <typename lchar, typename rchar>
341inline bool CompareCharsEqual(const lchar* lhs, const rchar* rhs,
342                              size_t chars) {
343  using ulchar = typename std::make_unsigned<lchar>::type;
344  using urchar = typename std::make_unsigned<rchar>::type;
345  return CompareCharsEqualUnsigned(reinterpret_cast<const ulchar*>(lhs),
346                                   reinterpret_cast<const urchar*>(rhs), chars);
347}
348
349// Compare 8bit/16bit chars to 8bit/16bit chars.
350template <typename lchar, typename rchar>
351inline int CompareCharsUnsigned(const lchar* lhs, const rchar* rhs,
352                                size_t chars) {
353  STATIC_ASSERT(std::is_unsigned<lchar>::value);
354  STATIC_ASSERT(std::is_unsigned<rchar>::value);
355  if (sizeof(*lhs) == sizeof(char) && sizeof(*rhs) == sizeof(char)) {
356    // memcmp compares byte-by-byte, yielding wrong results for two-byte
357    // strings on little-endian systems.
358    return memcmp(lhs, rhs, chars);
359  }
360  for (const lchar* limit = lhs + chars; lhs < limit; ++lhs, ++rhs) {
361    int r = static_cast<int>(*lhs) - static_cast<int>(*rhs);
362    if (r != 0) return r;
363  }
364  return 0;
365}
366
367template <typename lchar, typename rchar>
368inline int CompareChars(const lchar* lhs, const rchar* rhs, size_t chars) {
369  using ulchar = typename std::make_unsigned<lchar>::type;
370  using urchar = typename std::make_unsigned<rchar>::type;
371  return CompareCharsUnsigned(reinterpret_cast<const ulchar*>(lhs),
372                              reinterpret_cast<const urchar*>(rhs), chars);
373}
374
375// Calculate 10^exponent.
376inline int TenToThe(int exponent) {
377  DCHECK_LE(exponent, 9);
378  DCHECK_GE(exponent, 1);
379  int answer = 10;
380  for (int i = 1; i < exponent; i++) answer *= 10;
381  return answer;
382}
383
384// Bit field extraction.
385inline uint32_t unsigned_bitextract_32(int msb, int lsb, uint32_t x) {
386  return (x >> lsb) & ((1 << (1 + msb - lsb)) - 1);
387}
388
389inline uint64_t unsigned_bitextract_64(int msb, int lsb, uint64_t x) {
390  return (x >> lsb) & ((static_cast<uint64_t>(1) << (1 + msb - lsb)) - 1);
391}
392
393inline int32_t signed_bitextract_32(int msb, int lsb, uint32_t x) {
394  return static_cast<int32_t>(x << (31 - msb)) >> (lsb + 31 - msb);
395}
396
397// Check number width.
398inline bool is_intn(int64_t x, unsigned n) {
399  DCHECK((0 < n) && (n < 64));
400  int64_t limit = static_cast<int64_t>(1) << (n - 1);
401  return (-limit <= x) && (x < limit);
402}
403
404inline bool is_uintn(int64_t x, unsigned n) {
405  DCHECK((0 < n) && (n < (sizeof(x) * kBitsPerByte)));
406  return !(x >> n);
407}
408
409template <class T>
410inline T truncate_to_intn(T x, unsigned n) {
411  DCHECK((0 < n) && (n < (sizeof(x) * kBitsPerByte)));
412  return (x & ((static_cast<T>(1) << n) - 1));
413}
414
415// clang-format off
416#define INT_1_TO_63_LIST(V)                                   \
417  V(1) V(2) V(3) V(4) V(5) V(6) V(7) V(8) V(9) V(10)          \
418  V(11) V(12) V(13) V(14) V(15) V(16) V(17) V(18) V(19) V(20) \
419  V(21) V(22) V(23) V(24) V(25) V(26) V(27) V(28) V(29) V(30) \
420  V(31) V(32) V(33) V(34) V(35) V(36) V(37) V(38) V(39) V(40) \
421  V(41) V(42) V(43) V(44) V(45) V(46) V(47) V(48) V(49) V(50) \
422  V(51) V(52) V(53) V(54) V(55) V(56) V(57) V(58) V(59) V(60) \
423  V(61) V(62) V(63)
424// clang-format on
425
426#define DECLARE_IS_INT_N(N) \
427  inline bool is_int##N(int64_t x) { return is_intn(x, N); }
428#define DECLARE_IS_UINT_N(N)    \
429  template <class T>            \
430  inline bool is_uint##N(T x) { \
431    return is_uintn(x, N);      \
432  }
433#define DECLARE_TRUNCATE_TO_INT_N(N) \
434  template <class T>                 \
435  inline T truncate_to_int##N(T x) { \
436    return truncate_to_intn(x, N);   \
437  }
438INT_1_TO_63_LIST(DECLARE_IS_INT_N)
439INT_1_TO_63_LIST(DECLARE_IS_UINT_N)
440INT_1_TO_63_LIST(DECLARE_TRUNCATE_TO_INT_N)
441#undef DECLARE_IS_INT_N
442#undef DECLARE_IS_UINT_N
443#undef DECLARE_TRUNCATE_TO_INT_N
444
445// clang-format off
446#define INT_0_TO_127_LIST(V)                                          \
447V(0)   V(1)   V(2)   V(3)   V(4)   V(5)   V(6)   V(7)   V(8)   V(9)   \
448V(10)  V(11)  V(12)  V(13)  V(14)  V(15)  V(16)  V(17)  V(18)  V(19)  \
449V(20)  V(21)  V(22)  V(23)  V(24)  V(25)  V(26)  V(27)  V(28)  V(29)  \
450V(30)  V(31)  V(32)  V(33)  V(34)  V(35)  V(36)  V(37)  V(38)  V(39)  \
451V(40)  V(41)  V(42)  V(43)  V(44)  V(45)  V(46)  V(47)  V(48)  V(49)  \
452V(50)  V(51)  V(52)  V(53)  V(54)  V(55)  V(56)  V(57)  V(58)  V(59)  \
453V(60)  V(61)  V(62)  V(63)  V(64)  V(65)  V(66)  V(67)  V(68)  V(69)  \
454V(70)  V(71)  V(72)  V(73)  V(74)  V(75)  V(76)  V(77)  V(78)  V(79)  \
455V(80)  V(81)  V(82)  V(83)  V(84)  V(85)  V(86)  V(87)  V(88)  V(89)  \
456V(90)  V(91)  V(92)  V(93)  V(94)  V(95)  V(96)  V(97)  V(98)  V(99)  \
457V(100) V(101) V(102) V(103) V(104) V(105) V(106) V(107) V(108) V(109) \
458V(110) V(111) V(112) V(113) V(114) V(115) V(116) V(117) V(118) V(119) \
459V(120) V(121) V(122) V(123) V(124) V(125) V(126) V(127)
460// clang-format on
461
462class FeedbackSlot {
463 public:
464  FeedbackSlot() : id_(kInvalidSlot) {}
465  explicit FeedbackSlot(int id) : id_(id) {}
466
467  int ToInt() const { return id_; }
468
469  static FeedbackSlot Invalid() { return FeedbackSlot(); }
470  bool IsInvalid() const { return id_ == kInvalidSlot; }
471
472  bool operator==(FeedbackSlot that) const { return this->id_ == that.id_; }
473  bool operator!=(FeedbackSlot that) const { return !(*this == that); }
474
475  friend size_t hash_value(FeedbackSlot slot) { return slot.ToInt(); }
476  V8_EXPORT_PRIVATE friend std::ostream& operator<<(std::ostream& os,
477                                                    FeedbackSlot);
478
479  FeedbackSlot WithOffset(int offset) const {
480    return FeedbackSlot(id_ + offset);
481  }
482
483 private:
484  static const int kInvalidSlot = -1;
485
486  int id_;
487};
488
489V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, FeedbackSlot);
490
491class BytecodeOffset {
492 public:
493  explicit constexpr BytecodeOffset(int id) : id_(id) {}
494  constexpr int ToInt() const { return id_; }
495
496  static constexpr BytecodeOffset None() { return BytecodeOffset(kNoneId); }
497
498  // Special bailout id support for deopting into the {JSConstructStub} stub.
499  // The following hard-coded deoptimization points are supported by the stub:
500  //  - {ConstructStubCreate} maps to {construct_stub_create_deopt_pc_offset}.
501  //  - {ConstructStubInvoke} maps to {construct_stub_invoke_deopt_pc_offset}.
502  static BytecodeOffset ConstructStubCreate() { return BytecodeOffset(1); }
503  static BytecodeOffset ConstructStubInvoke() { return BytecodeOffset(2); }
504  bool IsValidForConstructStub() const {
505    return id_ == ConstructStubCreate().ToInt() ||
506           id_ == ConstructStubInvoke().ToInt();
507  }
508
509  constexpr bool IsNone() const { return id_ == kNoneId; }
510  bool operator==(const BytecodeOffset& other) const {
511    return id_ == other.id_;
512  }
513  bool operator!=(const BytecodeOffset& other) const {
514    return id_ != other.id_;
515  }
516  friend size_t hash_value(BytecodeOffset);
517  V8_EXPORT_PRIVATE friend std::ostream& operator<<(std::ostream&,
518                                                    BytecodeOffset);
519
520 private:
521  friend class Builtins;
522
523  static const int kNoneId = -1;
524
525  // Using 0 could disguise errors.
526  // Builtin continuations bailout ids start here. If you need to add a
527  // non-builtin BytecodeOffset, add it before this id so that this Id has the
528  // highest number.
529  static const int kFirstBuiltinContinuationId = 1;
530
531  int id_;
532};
533
534// ----------------------------------------------------------------------------
535// I/O support.
536
537// Our version of printf().
538V8_EXPORT_PRIVATE void PRINTF_FORMAT(1, 2) PrintF(const char* format, ...);
539V8_EXPORT_PRIVATE void PRINTF_FORMAT(2, 3)
540    PrintF(FILE* out, const char* format, ...);
541
542// Prepends the current process ID to the output.
543void PRINTF_FORMAT(1, 2) PrintPID(const char* format, ...);
544
545// Prepends the current process ID and given isolate pointer to the output.
546void PRINTF_FORMAT(2, 3) PrintIsolate(void* isolate, const char* format, ...);
547
548// Read a line of characters after printing the prompt to stdout. The resulting
549// char* needs to be disposed off with DeleteArray by the caller.
550char* ReadLine(const char* prompt);
551
552// Write size chars from str to the file given by filename.
553// The file is overwritten. Returns the number of chars written.
554int WriteChars(const char* filename, const char* str, int size,
555               bool verbose = true);
556
557// Write size bytes to the file given by filename.
558// The file is overwritten. Returns the number of bytes written.
559int WriteBytes(const char* filename, const byte* bytes, int size,
560               bool verbose = true);
561
562// Simple support to read a file into std::string.
563// On return, *exits tells whether the file existed.
564V8_EXPORT_PRIVATE std::string ReadFile(const char* filename, bool* exists,
565                                       bool verbose = true);
566V8_EXPORT_PRIVATE std::string ReadFile(FILE* file, bool* exists,
567                                       bool verbose = true);
568
569bool DoubleToBoolean(double d);
570
571template <typename Char>
572bool TryAddIndexChar(uint32_t* index, Char c);
573
574enum ToIndexMode { kToArrayIndex, kToIntegerIndex };
575
576// {index_t} is meant to be {uint32_t} or {size_t}.
577template <typename Stream, typename index_t,
578          enum ToIndexMode mode = kToArrayIndex>
579bool StringToIndex(Stream* stream, index_t* index);
580
581// Returns the current stack top. Works correctly with ASAN and SafeStack.
582// GetCurrentStackPosition() should not be inlined, because it works on stack
583// frames if it were inlined into a function with a huge stack frame it would
584// return an address significantly above the actual current stack position.
585V8_EXPORT_PRIVATE V8_NOINLINE uintptr_t GetCurrentStackPosition();
586
587static inline uint16_t ByteReverse16(uint16_t value) {
588#if V8_HAS_BUILTIN_BSWAP16
589  return __builtin_bswap16(value);
590#else
591  return value << 8 | (value >> 8 & 0x00FF);
592#endif
593}
594
595static inline uint32_t ByteReverse32(uint32_t value) {
596#if V8_HAS_BUILTIN_BSWAP32
597  return __builtin_bswap32(value);
598#else
599  return value << 24 | ((value << 8) & 0x00FF0000) |
600         ((value >> 8) & 0x0000FF00) | ((value >> 24) & 0x00000FF);
601#endif
602}
603
604static inline uint64_t ByteReverse64(uint64_t value) {
605#if V8_HAS_BUILTIN_BSWAP64
606  return __builtin_bswap64(value);
607#else
608  size_t bits_of_v = sizeof(value) * kBitsPerByte;
609  return value << (bits_of_v - 8) |
610         ((value << (bits_of_v - 24)) & 0x00FF000000000000) |
611         ((value << (bits_of_v - 40)) & 0x0000FF0000000000) |
612         ((value << (bits_of_v - 56)) & 0x000000FF00000000) |
613         ((value >> (bits_of_v - 56)) & 0x00000000FF000000) |
614         ((value >> (bits_of_v - 40)) & 0x0000000000FF0000) |
615         ((value >> (bits_of_v - 24)) & 0x000000000000FF00) |
616         ((value >> (bits_of_v - 8)) & 0x00000000000000FF);
617#endif
618}
619
620template <typename V>
621static inline V ByteReverse(V value) {
622  size_t size_of_v = sizeof(value);
623  switch (size_of_v) {
624    case 1:
625      return value;
626    case 2:
627      return static_cast<V>(ByteReverse16(static_cast<uint16_t>(value)));
628    case 4:
629      return static_cast<V>(ByteReverse32(static_cast<uint32_t>(value)));
630    case 8:
631      return static_cast<V>(ByteReverse64(static_cast<uint64_t>(value)));
632    default:
633      UNREACHABLE();
634  }
635}
636
637#if V8_OS_AIX
638// glibc on aix has a bug when using ceil, trunc or nearbyint:
639// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=97086
640template <typename T>
641T FpOpWorkaround(T input, T value) {
642  if (/*if -*/ std::signbit(input) && value == 0.0 &&
643      /*if +*/ !std::signbit(value)) {
644    return -0.0;
645  }
646  return value;
647}
648#endif
649
650V8_EXPORT_PRIVATE bool PassesFilter(base::Vector<const char> name,
651                                    base::Vector<const char> filter);
652
653// Zap the specified area with a specific byte pattern. This currently defaults
654// to int3 on x64 and ia32. On other architectures this will produce unspecified
655// instruction sequences.
656// TODO(jgruber): Better support for other architectures.
657V8_INLINE void ZapCode(Address addr, size_t size_in_bytes) {
658  static constexpr int kZapByte = 0xCC;
659  std::memset(reinterpret_cast<void*>(addr), kZapByte, size_in_bytes);
660}
661
662inline bool RoundUpToPageSize(size_t byte_length, size_t page_size,
663                              size_t max_allowed_byte_length, size_t* pages) {
664  size_t bytes_wanted = RoundUp(byte_length, page_size);
665  if (bytes_wanted > max_allowed_byte_length) {
666    return false;
667  }
668  *pages = bytes_wanted / page_size;
669  return true;
670}
671
672}  // namespace internal
673}  // namespace v8
674
675#endif  // V8_UTILS_UTILS_H_
676