1// Copyright 2013 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_V8_PLATFORM_H_
6#define V8_V8_PLATFORM_H_
7
8#include <math.h>
9#include <stddef.h>
10#include <stdint.h>
11#include <stdlib.h>  // For abort.
12
13#include <memory>
14#include <string>
15
16#include "v8-source-location.h"  // NOLINT(build/include_directory)
17#include "v8config.h"  // NOLINT(build/include_directory)
18
19namespace v8 {
20
21class Isolate;
22
23// Valid priorities supported by the task scheduling infrastructure.
24enum class TaskPriority : uint8_t {
25  /**
26   * Best effort tasks are not critical for performance of the application. The
27   * platform implementation should preempt such tasks if higher priority tasks
28   * arrive.
29   */
30  kBestEffort,
31  /**
32   * User visible tasks are long running background tasks that will
33   * improve performance and memory usage of the application upon completion.
34   * Example: background compilation and garbage collection.
35   */
36  kUserVisible,
37  /**
38   * User blocking tasks are highest priority tasks that block the execution
39   * thread (e.g. major garbage collection). They must be finished as soon as
40   * possible.
41   */
42  kUserBlocking,
43};
44
45/**
46 * A Task represents a unit of work.
47 */
48class Task {
49 public:
50  virtual ~Task() = default;
51
52  virtual void Run() = 0;
53};
54
55/**
56 * An IdleTask represents a unit of work to be performed in idle time.
57 * The Run method is invoked with an argument that specifies the deadline in
58 * seconds returned by MonotonicallyIncreasingTime().
59 * The idle task is expected to complete by this deadline.
60 */
61class IdleTask {
62 public:
63  virtual ~IdleTask() = default;
64  virtual void Run(double deadline_in_seconds) = 0;
65};
66
67/**
68 * A TaskRunner allows scheduling of tasks. The TaskRunner may still be used to
69 * post tasks after the isolate gets destructed, but these tasks may not get
70 * executed anymore. All tasks posted to a given TaskRunner will be invoked in
71 * sequence. Tasks can be posted from any thread.
72 */
73class TaskRunner {
74 public:
75  /**
76   * Schedules a task to be invoked by this TaskRunner. The TaskRunner
77   * implementation takes ownership of |task|.
78   */
79  virtual void PostTask(std::unique_ptr<Task> task) = 0;
80
81  /**
82   * Schedules a task to be invoked by this TaskRunner. The TaskRunner
83   * implementation takes ownership of |task|. The |task| cannot be nested
84   * within other task executions.
85   *
86   * Tasks which shouldn't be interleaved with JS execution must be posted with
87   * |PostNonNestableTask| or |PostNonNestableDelayedTask|. This is because the
88   * embedder may process tasks in a callback which is called during JS
89   * execution.
90   *
91   * In particular, tasks which execute JS must be non-nestable, since JS
92   * execution is not allowed to nest.
93   *
94   * Requires that |TaskRunner::NonNestableTasksEnabled()| is true.
95   */
96  virtual void PostNonNestableTask(std::unique_ptr<Task> task) {}
97
98  /**
99   * Schedules a task to be invoked by this TaskRunner. The task is scheduled
100   * after the given number of seconds |delay_in_seconds|. The TaskRunner
101   * implementation takes ownership of |task|.
102   */
103  virtual void PostDelayedTask(std::unique_ptr<Task> task,
104                               double delay_in_seconds) = 0;
105
106  /**
107   * Schedules a task to be invoked by this TaskRunner. The task is scheduled
108   * after the given number of seconds |delay_in_seconds|. The TaskRunner
109   * implementation takes ownership of |task|. The |task| cannot be nested
110   * within other task executions.
111   *
112   * Tasks which shouldn't be interleaved with JS execution must be posted with
113   * |PostNonNestableTask| or |PostNonNestableDelayedTask|. This is because the
114   * embedder may process tasks in a callback which is called during JS
115   * execution.
116   *
117   * In particular, tasks which execute JS must be non-nestable, since JS
118   * execution is not allowed to nest.
119   *
120   * Requires that |TaskRunner::NonNestableDelayedTasksEnabled()| is true.
121   */
122  virtual void PostNonNestableDelayedTask(std::unique_ptr<Task> task,
123                                          double delay_in_seconds) {}
124
125  /**
126   * Schedules an idle task to be invoked by this TaskRunner. The task is
127   * scheduled when the embedder is idle. Requires that
128   * |TaskRunner::IdleTasksEnabled()| is true. Idle tasks may be reordered
129   * relative to other task types and may be starved for an arbitrarily long
130   * time if no idle time is available. The TaskRunner implementation takes
131   * ownership of |task|.
132   */
133  virtual void PostIdleTask(std::unique_ptr<IdleTask> task) = 0;
134
135  /**
136   * Returns true if idle tasks are enabled for this TaskRunner.
137   */
138  virtual bool IdleTasksEnabled() = 0;
139
140  /**
141   * Returns true if non-nestable tasks are enabled for this TaskRunner.
142   */
143  virtual bool NonNestableTasksEnabled() const { return false; }
144
145  /**
146   * Returns true if non-nestable delayed tasks are enabled for this TaskRunner.
147   */
148  virtual bool NonNestableDelayedTasksEnabled() const { return false; }
149
150  TaskRunner() = default;
151  virtual ~TaskRunner() = default;
152
153  TaskRunner(const TaskRunner&) = delete;
154  TaskRunner& operator=(const TaskRunner&) = delete;
155};
156
157/**
158 * Delegate that's passed to Job's worker task, providing an entry point to
159 * communicate with the scheduler.
160 */
161class JobDelegate {
162 public:
163  /**
164   * Returns true if this thread *must* return from the worker task on the
165   * current thread ASAP. Workers should periodically invoke ShouldYield (or
166   * YieldIfNeeded()) as often as is reasonable.
167   * After this method returned true, ShouldYield must not be called again.
168   */
169  virtual bool ShouldYield() = 0;
170
171  /**
172   * Notifies the scheduler that max concurrency was increased, and the number
173   * of worker should be adjusted accordingly. See Platform::PostJob() for more
174   * details.
175   */
176  virtual void NotifyConcurrencyIncrease() = 0;
177
178  /**
179   * Returns a task_id unique among threads currently running this job, such
180   * that GetTaskId() < worker count. To achieve this, the same task_id may be
181   * reused by a different thread after a worker_task returns.
182   */
183  virtual uint8_t GetTaskId() = 0;
184
185  /**
186   * Returns true if the current task is called from the thread currently
187   * running JobHandle::Join().
188   */
189  virtual bool IsJoiningThread() const = 0;
190};
191
192/**
193 * Handle returned when posting a Job. Provides methods to control execution of
194 * the posted Job.
195 */
196class JobHandle {
197 public:
198  virtual ~JobHandle() = default;
199
200  /**
201   * Notifies the scheduler that max concurrency was increased, and the number
202   * of worker should be adjusted accordingly. See Platform::PostJob() for more
203   * details.
204   */
205  virtual void NotifyConcurrencyIncrease() = 0;
206
207  /**
208   * Contributes to the job on this thread. Doesn't return until all tasks have
209   * completed and max concurrency becomes 0. When Join() is called and max
210   * concurrency reaches 0, it should not increase again. This also promotes
211   * this Job's priority to be at least as high as the calling thread's
212   * priority.
213   */
214  virtual void Join() = 0;
215
216  /**
217   * Forces all existing workers to yield ASAP. Waits until they have all
218   * returned from the Job's callback before returning.
219   */
220  virtual void Cancel() = 0;
221
222  /*
223   * Forces all existing workers to yield ASAP but doesn’t wait for them.
224   * Warning, this is dangerous if the Job's callback is bound to or has access
225   * to state which may be deleted after this call.
226   */
227  virtual void CancelAndDetach() = 0;
228
229  /**
230   * Returns true if there's any work pending or any worker running.
231   */
232  virtual bool IsActive() = 0;
233
234  /**
235   * Returns true if associated with a Job and other methods may be called.
236   * Returns false after Join() or Cancel() was called. This may return true
237   * even if no workers are running and IsCompleted() returns true
238   */
239  virtual bool IsValid() = 0;
240
241  /**
242   * Returns true if job priority can be changed.
243   */
244  virtual bool UpdatePriorityEnabled() const { return false; }
245
246  /**
247   *  Update this Job's priority.
248   */
249  virtual void UpdatePriority(TaskPriority new_priority) {}
250};
251
252/**
253 * A JobTask represents work to run in parallel from Platform::PostJob().
254 */
255class JobTask {
256 public:
257  virtual ~JobTask() = default;
258
259  virtual void Run(JobDelegate* delegate) = 0;
260
261  /**
262   * Controls the maximum number of threads calling Run() concurrently, given
263   * the number of threads currently assigned to this job and executing Run().
264   * Run() is only invoked if the number of threads previously running Run() was
265   * less than the value returned. In general, this should return the latest
266   * number of incomplete work items (smallest unit of work) left to process,
267   * including items that are currently in progress. |worker_count| is the
268   * number of threads currently assigned to this job which some callers may
269   * need to determine their return value. Since GetMaxConcurrency() is a leaf
270   * function, it must not call back any JobHandle methods.
271   */
272  virtual size_t GetMaxConcurrency(size_t worker_count) const = 0;
273};
274
275/**
276 * A "blocking call" refers to any call that causes the calling thread to wait
277 * off-CPU. It includes but is not limited to calls that wait on synchronous
278 * file I/O operations: read or write a file from disk, interact with a pipe or
279 * a socket, rename or delete a file, enumerate files in a directory, etc.
280 * Acquiring a low contention lock is not considered a blocking call.
281 */
282
283/**
284 * BlockingType indicates the likelihood that a blocking call will actually
285 * block.
286 */
287enum class BlockingType {
288  // The call might block (e.g. file I/O that might hit in memory cache).
289  kMayBlock,
290  // The call will definitely block (e.g. cache already checked and now pinging
291  // server synchronously).
292  kWillBlock
293};
294
295/**
296 * This class is instantiated with CreateBlockingScope() in every scope where a
297 * blocking call is made and serves as a precise annotation of the scope that
298 * may/will block. May be implemented by an embedder to adjust the thread count.
299 * CPU usage should be minimal within that scope. ScopedBlockingCalls can be
300 * nested.
301 */
302class ScopedBlockingCall {
303 public:
304  virtual ~ScopedBlockingCall() = default;
305};
306
307/**
308 * The interface represents complex arguments to trace events.
309 */
310class ConvertableToTraceFormat {
311 public:
312  virtual ~ConvertableToTraceFormat() = default;
313
314  /**
315   * Append the class info to the provided |out| string. The appended
316   * data must be a valid JSON object. Strings must be properly quoted, and
317   * escaped. There is no processing applied to the content after it is
318   * appended.
319   */
320  virtual void AppendAsTraceFormat(std::string* out) const = 0;
321};
322
323/**
324 * V8 Tracing controller.
325 *
326 * Can be implemented by an embedder to record trace events from V8.
327 *
328 * Will become obsolete in Perfetto SDK build (v8_use_perfetto = true).
329 */
330class TracingController {
331 public:
332  virtual ~TracingController() = default;
333
334  // In Perfetto mode, trace events are written using Perfetto's Track Event
335  // API directly without going through the embedder. However, it is still
336  // possible to observe tracing being enabled and disabled.
337#if !defined(V8_USE_PERFETTO)
338  /**
339   * Called by TRACE_EVENT* macros, don't call this directly.
340   * The name parameter is a category group for example:
341   * TRACE_EVENT0("v8,parse", "V8.Parse")
342   * The pointer returned points to a value with zero or more of the bits
343   * defined in CategoryGroupEnabledFlags.
344   **/
345  virtual const uint8_t* GetCategoryGroupEnabled(const char* name) {
346    static uint8_t no = 0;
347    return &no;
348  }
349
350  /**
351   * Adds a trace event to the platform tracing system. These function calls are
352   * usually the result of a TRACE_* macro from trace_event_common.h when
353   * tracing and the category of the particular trace are enabled. It is not
354   * advisable to call these functions on their own; they are really only meant
355   * to be used by the trace macros. The returned handle can be used by
356   * UpdateTraceEventDuration to update the duration of COMPLETE events.
357   */
358  virtual uint64_t AddTraceEvent(
359      char phase, const uint8_t* category_enabled_flag, const char* name,
360      const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
361      const char** arg_names, const uint8_t* arg_types,
362      const uint64_t* arg_values,
363      std::unique_ptr<ConvertableToTraceFormat>* arg_convertables,
364      unsigned int flags) {
365    return 0;
366  }
367  virtual uint64_t AddTraceEventWithTimestamp(
368      char phase, const uint8_t* category_enabled_flag, const char* name,
369      const char* scope, uint64_t id, uint64_t bind_id, int32_t num_args,
370      const char** arg_names, const uint8_t* arg_types,
371      const uint64_t* arg_values,
372      std::unique_ptr<ConvertableToTraceFormat>* arg_convertables,
373      unsigned int flags, int64_t timestamp) {
374    return 0;
375  }
376
377  /**
378   * Sets the duration field of a COMPLETE trace event. It must be called with
379   * the handle returned from AddTraceEvent().
380   **/
381  virtual void UpdateTraceEventDuration(const uint8_t* category_enabled_flag,
382                                        const char* name, uint64_t handle) {}
383#endif  // !defined(V8_USE_PERFETTO)
384
385  class TraceStateObserver {
386   public:
387    virtual ~TraceStateObserver() = default;
388    virtual void OnTraceEnabled() = 0;
389    virtual void OnTraceDisabled() = 0;
390  };
391
392  /**
393   * Adds tracing state change observer.
394   * Does nothing in Perfetto SDK build (v8_use_perfetto = true).
395   */
396  virtual void AddTraceStateObserver(TraceStateObserver*) {}
397
398  /**
399   * Removes tracing state change observer.
400   * Does nothing in Perfetto SDK build (v8_use_perfetto = true).
401   */
402  virtual void RemoveTraceStateObserver(TraceStateObserver*) {}
403};
404
405/**
406 * A V8 memory page allocator.
407 *
408 * Can be implemented by an embedder to manage large host OS allocations.
409 */
410class PageAllocator {
411 public:
412  virtual ~PageAllocator() = default;
413
414  /**
415   * Gets the page granularity for AllocatePages and FreePages. Addresses and
416   * lengths for those calls should be multiples of AllocatePageSize().
417   */
418  virtual size_t AllocatePageSize() = 0;
419
420  /**
421   * Gets the page granularity for SetPermissions and ReleasePages. Addresses
422   * and lengths for those calls should be multiples of CommitPageSize().
423   */
424  virtual size_t CommitPageSize() = 0;
425
426  /**
427   * Sets the random seed so that GetRandomMmapAddr() will generate repeatable
428   * sequences of random mmap addresses.
429   */
430  virtual void SetRandomMmapSeed(int64_t seed) = 0;
431
432  /**
433   * Returns a randomized address, suitable for memory allocation under ASLR.
434   * The address will be aligned to AllocatePageSize.
435   */
436  virtual void* GetRandomMmapAddr() = 0;
437
438  /**
439   * Memory permissions.
440   */
441  enum Permission {
442    kNoAccess,
443    kRead,
444    kReadWrite,
445    kReadWriteExecute,
446    kReadExecute,
447    // Set this when reserving memory that will later require kReadWriteExecute
448    // permissions. The resulting behavior is platform-specific, currently
449    // this is used to set the MAP_JIT flag on Apple Silicon.
450    // TODO(jkummerow): Remove this when Wasm has a platform-independent
451    // w^x implementation.
452    // TODO(saelo): Remove this once all JIT pages are allocated through the
453    // VirtualAddressSpace API.
454    kNoAccessWillJitLater
455  };
456
457  /**
458   * Allocates memory in range with the given alignment and permission.
459   */
460  virtual void* AllocatePages(void* address, size_t length, size_t alignment,
461                              Permission permissions) = 0;
462
463  /**
464   * Frees memory in a range that was allocated by a call to AllocatePages.
465   */
466  virtual bool FreePages(void* address, size_t length) = 0;
467
468  /**
469   * Releases memory in a range that was allocated by a call to AllocatePages.
470   */
471  virtual bool ReleasePages(void* address, size_t length,
472                            size_t new_length) = 0;
473
474  /**
475   * Sets permissions on pages in an allocated range.
476   */
477  virtual bool SetPermissions(void* address, size_t length,
478                              Permission permissions) = 0;
479
480  /**
481   * Recommits discarded pages in the given range with given permissions.
482   * Discarded pages must be recommitted with their original permissions
483   * before they are used again.
484   */
485  virtual bool RecommitPages(void* address, size_t length,
486                             Permission permissions) {
487    // TODO(v8:12797): make it pure once it's implemented on Chromium side.
488    return false;
489  }
490
491  /**
492   * Frees memory in the given [address, address + size) range. address and size
493   * should be operating system page-aligned. The next write to this
494   * memory area brings the memory transparently back. This should be treated as
495   * a hint to the OS that the pages are no longer needed. It does not guarantee
496   * that the pages will be discarded immediately or at all.
497   */
498  virtual bool DiscardSystemPages(void* address, size_t size) { return true; }
499
500  /**
501   * Decommits any wired memory pages in the given range, allowing the OS to
502   * reclaim them, and marks the region as inacessible (kNoAccess). The address
503   * range stays reserved and can be accessed again later by changing its
504   * permissions. However, in that case the memory content is guaranteed to be
505   * zero-initialized again. The memory must have been previously allocated by a
506   * call to AllocatePages. Returns true on success, false otherwise.
507   */
508  virtual bool DecommitPages(void* address, size_t size) = 0;
509
510  /**
511   * INTERNAL ONLY: This interface has not been stabilised and may change
512   * without notice from one release to another without being deprecated first.
513   */
514  class SharedMemoryMapping {
515   public:
516    // Implementations are expected to free the shared memory mapping in the
517    // destructor.
518    virtual ~SharedMemoryMapping() = default;
519    virtual void* GetMemory() const = 0;
520  };
521
522  /**
523   * INTERNAL ONLY: This interface has not been stabilised and may change
524   * without notice from one release to another without being deprecated first.
525   */
526  class SharedMemory {
527   public:
528    // Implementations are expected to free the shared memory in the destructor.
529    virtual ~SharedMemory() = default;
530    virtual std::unique_ptr<SharedMemoryMapping> RemapTo(
531        void* new_address) const = 0;
532    virtual void* GetMemory() const = 0;
533    virtual size_t GetSize() const = 0;
534  };
535
536  /**
537   * INTERNAL ONLY: This interface has not been stabilised and may change
538   * without notice from one release to another without being deprecated first.
539   *
540   * Reserve pages at a fixed address returning whether the reservation is
541   * possible. The reserved memory is detached from the PageAllocator and so
542   * should not be freed by it. It's intended for use with
543   * SharedMemory::RemapTo, where ~SharedMemoryMapping would free the memory.
544   */
545  virtual bool ReserveForSharedMemoryMapping(void* address, size_t size) {
546    return false;
547  }
548
549  /**
550   * INTERNAL ONLY: This interface has not been stabilised and may change
551   * without notice from one release to another without being deprecated first.
552   *
553   * Allocates shared memory pages. Not all PageAllocators need support this and
554   * so this method need not be overridden.
555   * Allocates a new read-only shared memory region of size |length| and copies
556   * the memory at |original_address| into it.
557   */
558  virtual std::unique_ptr<SharedMemory> AllocateSharedPages(
559      size_t length, const void* original_address) {
560    return {};
561  }
562
563  /**
564   * INTERNAL ONLY: This interface has not been stabilised and may change
565   * without notice from one release to another without being deprecated first.
566   *
567   * If not overridden and changed to return true, V8 will not attempt to call
568   * AllocateSharedPages or RemapSharedPages. If overridden, AllocateSharedPages
569   * and RemapSharedPages must also be overridden.
570   */
571  virtual bool CanAllocateSharedPages() { return false; }
572};
573
574// Opaque type representing a handle to a shared memory region.
575using PlatformSharedMemoryHandle = intptr_t;
576static constexpr PlatformSharedMemoryHandle kInvalidSharedMemoryHandle = -1;
577
578// Conversion routines from the platform-dependent shared memory identifiers
579// into the opaque PlatformSharedMemoryHandle type. These use the underlying
580// types (e.g. unsigned int) instead of the typedef'd ones (e.g. mach_port_t)
581// to avoid pulling in large OS header files into this header file. Instead,
582// the users of these routines are expected to include the respecitve OS
583// headers in addition to this one.
584#if V8_OS_DARWIN
585// Convert between a shared memory handle and a mach_port_t referencing a memory
586// entry object.
587inline PlatformSharedMemoryHandle SharedMemoryHandleFromMachMemoryEntry(
588    unsigned int port) {
589  return static_cast<PlatformSharedMemoryHandle>(port);
590}
591inline unsigned int MachMemoryEntryFromSharedMemoryHandle(
592    PlatformSharedMemoryHandle handle) {
593  return static_cast<unsigned int>(handle);
594}
595#elif V8_OS_FUCHSIA
596// Convert between a shared memory handle and a zx_handle_t to a VMO.
597inline PlatformSharedMemoryHandle SharedMemoryHandleFromVMO(uint32_t handle) {
598  return static_cast<PlatformSharedMemoryHandle>(handle);
599}
600inline uint32_t VMOFromSharedMemoryHandle(PlatformSharedMemoryHandle handle) {
601  return static_cast<uint32_t>(handle);
602}
603#elif V8_OS_WIN
604// Convert between a shared memory handle and a Windows HANDLE to a file mapping
605// object.
606inline PlatformSharedMemoryHandle SharedMemoryHandleFromFileMapping(
607    void* handle) {
608  return reinterpret_cast<PlatformSharedMemoryHandle>(handle);
609}
610inline void* FileMappingFromSharedMemoryHandle(
611    PlatformSharedMemoryHandle handle) {
612  return reinterpret_cast<void*>(handle);
613}
614#else
615// Convert between a shared memory handle and a file descriptor.
616inline PlatformSharedMemoryHandle SharedMemoryHandleFromFileDescriptor(int fd) {
617  return static_cast<PlatformSharedMemoryHandle>(fd);
618}
619inline int FileDescriptorFromSharedMemoryHandle(
620    PlatformSharedMemoryHandle handle) {
621  return static_cast<int>(handle);
622}
623#endif
624
625/**
626 * Possible permissions for memory pages.
627 */
628enum class PagePermissions {
629  kNoAccess,
630  kRead,
631  kReadWrite,
632  kReadWriteExecute,
633  kReadExecute,
634};
635
636/**
637 * Class to manage a virtual memory address space.
638 *
639 * This class represents a contiguous region of virtual address space in which
640 * sub-spaces and (private or shared) memory pages can be allocated, freed, and
641 * modified. This interface is meant to eventually replace the PageAllocator
642 * interface, and can be used as an alternative in the meantime.
643 *
644 * This API is not yet stable and may change without notice!
645 */
646class VirtualAddressSpace {
647 public:
648  using Address = uintptr_t;
649
650  VirtualAddressSpace(size_t page_size, size_t allocation_granularity,
651                      Address base, size_t size,
652                      PagePermissions max_page_permissions)
653      : page_size_(page_size),
654        allocation_granularity_(allocation_granularity),
655        base_(base),
656        size_(size),
657        max_page_permissions_(max_page_permissions) {}
658
659  virtual ~VirtualAddressSpace() = default;
660
661  /**
662   * The page size used inside this space. Guaranteed to be a power of two.
663   * Used as granularity for all page-related operations except for allocation,
664   * which use the allocation_granularity(), see below.
665   *
666   * \returns the page size in bytes.
667   */
668  size_t page_size() const { return page_size_; }
669
670  /**
671   * The granularity of page allocations and, by extension, of subspace
672   * allocations. This is guaranteed to be a power of two and a multiple of the
673   * page_size(). In practice, this is equal to the page size on most OSes, but
674   * on Windows it is usually 64KB, while the page size is 4KB.
675   *
676   * \returns the allocation granularity in bytes.
677   */
678  size_t allocation_granularity() const { return allocation_granularity_; }
679
680  /**
681   * The base address of the address space managed by this instance.
682   *
683   * \returns the base address of this address space.
684   */
685  Address base() const { return base_; }
686
687  /**
688   * The size of the address space managed by this instance.
689   *
690   * \returns the size of this address space in bytes.
691   */
692  size_t size() const { return size_; }
693
694  /**
695   * The maximum page permissions that pages allocated inside this space can
696   * obtain.
697   *
698   * \returns the maximum page permissions.
699   */
700  PagePermissions max_page_permissions() const { return max_page_permissions_; }
701
702  /**
703   * Sets the random seed so that GetRandomPageAddress() will generate
704   * repeatable sequences of random addresses.
705   *
706   * \param The seed for the PRNG.
707   */
708  virtual void SetRandomSeed(int64_t seed) = 0;
709
710  /**
711   * Returns a random address inside this address space, suitable for page
712   * allocations hints.
713   *
714   * \returns a random address aligned to allocation_granularity().
715   */
716  virtual Address RandomPageAddress() = 0;
717
718  /**
719   * Allocates private memory pages with the given alignment and permissions.
720   *
721   * \param hint If nonzero, the allocation is attempted to be placed at the
722   * given address first. If that fails, the allocation is attempted to be
723   * placed elsewhere, possibly nearby, but that is not guaranteed. Specifying
724   * zero for the hint always causes this function to choose a random address.
725   * The hint, if specified, must be aligned to the specified alignment.
726   *
727   * \param size The size of the allocation in bytes. Must be a multiple of the
728   * allocation_granularity().
729   *
730   * \param alignment The alignment of the allocation in bytes. Must be a
731   * multiple of the allocation_granularity() and should be a power of two.
732   *
733   * \param permissions The page permissions of the newly allocated pages.
734   *
735   * \returns the start address of the allocated pages on success, zero on
736   * failure.
737   */
738  static constexpr Address kNoHint = 0;
739  virtual V8_WARN_UNUSED_RESULT Address
740  AllocatePages(Address hint, size_t size, size_t alignment,
741                PagePermissions permissions) = 0;
742
743  /**
744   * Frees previously allocated pages.
745   *
746   * This function will terminate the process on failure as this implies a bug
747   * in the client. As such, there is no return value.
748   *
749   * \param address The start address of the pages to free. This address must
750   * have been obtained through a call to AllocatePages.
751   *
752   * \param size The size in bytes of the region to free. This must match the
753   * size passed to AllocatePages when the pages were allocated.
754   */
755  virtual void FreePages(Address address, size_t size) = 0;
756
757  /**
758   * Sets permissions of all allocated pages in the given range.
759   *
760   * This operation can fail due to OOM, in which case false is returned. If
761   * the operation fails for a reason other than OOM, this function will
762   * terminate the process as this implies a bug in the client.
763   *
764   * \param address The start address of the range. Must be aligned to
765   * page_size().
766   *
767   * \param size The size in bytes of the range. Must be a multiple
768   * of page_size().
769   *
770   * \param permissions The new permissions for the range.
771   *
772   * \returns true on success, false on OOM.
773   */
774  virtual V8_WARN_UNUSED_RESULT bool SetPagePermissions(
775      Address address, size_t size, PagePermissions permissions) = 0;
776
777  /**
778   * Creates a guard region at the specified address.
779   *
780   * Guard regions are guaranteed to cause a fault when accessed and generally
781   * do not count towards any memory consumption limits. Further, allocating
782   * guard regions can usually not fail in subspaces if the region does not
783   * overlap with another region, subspace, or page allocation.
784   *
785   * \param address The start address of the guard region. Must be aligned to
786   * the allocation_granularity().
787   *
788   * \param size The size of the guard region in bytes. Must be a multiple of
789   * the allocation_granularity().
790   *
791   * \returns true on success, false otherwise.
792   */
793  virtual V8_WARN_UNUSED_RESULT bool AllocateGuardRegion(Address address,
794                                                         size_t size) = 0;
795
796  /**
797   * Frees an existing guard region.
798   *
799   * This function will terminate the process on failure as this implies a bug
800   * in the client. As such, there is no return value.
801   *
802   * \param address The start address of the guard region to free. This address
803   * must have previously been used as address parameter in a successful
804   * invocation of AllocateGuardRegion.
805   *
806   * \param size The size in bytes of the guard region to free. This must match
807   * the size passed to AllocateGuardRegion when the region was created.
808   */
809  virtual void FreeGuardRegion(Address address, size_t size) = 0;
810
811  /**
812   * Allocates shared memory pages with the given permissions.
813   *
814   * \param hint Placement hint. See AllocatePages.
815   *
816   * \param size The size of the allocation in bytes. Must be a multiple of the
817   * allocation_granularity().
818   *
819   * \param permissions The page permissions of the newly allocated pages.
820   *
821   * \param handle A platform-specific handle to a shared memory object. See
822   * the SharedMemoryHandleFromX routines above for ways to obtain these.
823   *
824   * \param offset The offset in the shared memory object at which the mapping
825   * should start. Must be a multiple of the allocation_granularity().
826   *
827   * \returns the start address of the allocated pages on success, zero on
828   * failure.
829   */
830  virtual V8_WARN_UNUSED_RESULT Address
831  AllocateSharedPages(Address hint, size_t size, PagePermissions permissions,
832                      PlatformSharedMemoryHandle handle, uint64_t offset) = 0;
833
834  /**
835   * Frees previously allocated shared pages.
836   *
837   * This function will terminate the process on failure as this implies a bug
838   * in the client. As such, there is no return value.
839   *
840   * \param address The start address of the pages to free. This address must
841   * have been obtained through a call to AllocateSharedPages.
842   *
843   * \param size The size in bytes of the region to free. This must match the
844   * size passed to AllocateSharedPages when the pages were allocated.
845   */
846  virtual void FreeSharedPages(Address address, size_t size) = 0;
847
848  /**
849   * Whether this instance can allocate subspaces or not.
850   *
851   * \returns true if subspaces can be allocated, false if not.
852   */
853  virtual bool CanAllocateSubspaces() = 0;
854
855  /*
856   * Allocate a subspace.
857   *
858   * The address space of a subspace stays reserved in the parent space for the
859   * lifetime of the subspace. As such, it is guaranteed that page allocations
860   * on the parent space cannot end up inside a subspace.
861   *
862   * \param hint Hints where the subspace should be allocated. See
863   * AllocatePages() for more details.
864   *
865   * \param size The size in bytes of the subspace. Must be a multiple of the
866   * allocation_granularity().
867   *
868   * \param alignment The alignment of the subspace in bytes. Must be a multiple
869   * of the allocation_granularity() and should be a power of two.
870   *
871   * \param max_page_permissions The maximum permissions that pages allocated in
872   * the subspace can obtain.
873   *
874   * \returns a new subspace or nullptr on failure.
875   */
876  virtual std::unique_ptr<VirtualAddressSpace> AllocateSubspace(
877      Address hint, size_t size, size_t alignment,
878      PagePermissions max_page_permissions) = 0;
879
880  //
881  // TODO(v8) maybe refactor the methods below before stabilizing the API. For
882  // example by combining them into some form of page operation method that
883  // takes a command enum as parameter.
884  //
885
886  /**
887   * Recommits discarded pages in the given range with given permissions.
888   * Discarded pages must be recommitted with their original permissions
889   * before they are used again.
890   *
891   * \param address The start address of the range. Must be aligned to
892   * page_size().
893   *
894   * \param size The size in bytes of the range. Must be a multiple
895   * of page_size().
896   *
897   * \param permissions The permissions for the range that the pages must have.
898   *
899   * \returns true on success, false otherwise.
900   */
901  virtual V8_WARN_UNUSED_RESULT bool RecommitPages(
902      Address address, size_t size, PagePermissions permissions) = 0;
903
904  /**
905   * Frees memory in the given [address, address + size) range. address and
906   * size should be aligned to the page_size(). The next write to this memory
907   * area brings the memory transparently back. This should be treated as a
908   * hint to the OS that the pages are no longer needed. It does not guarantee
909   * that the pages will be discarded immediately or at all.
910   *
911   * \returns true on success, false otherwise. Since this method is only a
912   * hint, a successful invocation does not imply that pages have been removed.
913   */
914  virtual V8_WARN_UNUSED_RESULT bool DiscardSystemPages(Address address,
915                                                        size_t size) {
916    return true;
917  }
918  /**
919   * Decommits any wired memory pages in the given range, allowing the OS to
920   * reclaim them, and marks the region as inacessible (kNoAccess). The address
921   * range stays reserved and can be accessed again later by changing its
922   * permissions. However, in that case the memory content is guaranteed to be
923   * zero-initialized again. The memory must have been previously allocated by a
924   * call to AllocatePages.
925   *
926   * \returns true on success, false otherwise.
927   */
928  virtual V8_WARN_UNUSED_RESULT bool DecommitPages(Address address,
929                                                   size_t size) = 0;
930
931 private:
932  const size_t page_size_;
933  const size_t allocation_granularity_;
934  const Address base_;
935  const size_t size_;
936  const PagePermissions max_page_permissions_;
937};
938
939/**
940 * V8 Allocator used for allocating zone backings.
941 */
942class ZoneBackingAllocator {
943 public:
944  using MallocFn = void* (*)(size_t);
945  using FreeFn = void (*)(void*);
946
947  virtual MallocFn GetMallocFn() const { return ::malloc; }
948  virtual FreeFn GetFreeFn() const { return ::free; }
949};
950
951/**
952 * Observer used by V8 to notify the embedder about entering/leaving sections
953 * with high throughput of malloc/free operations.
954 */
955class HighAllocationThroughputObserver {
956 public:
957  virtual void EnterSection() {}
958  virtual void LeaveSection() {}
959};
960
961/**
962 * V8 Platform abstraction layer.
963 *
964 * The embedder has to provide an implementation of this interface before
965 * initializing the rest of V8.
966 */
967class Platform {
968 public:
969  virtual ~Platform() = default;
970
971  /**
972   * Allows the embedder to manage memory page allocations.
973   * Returning nullptr will cause V8 to use the default page allocator.
974   */
975  virtual PageAllocator* GetPageAllocator() = 0;
976
977  /**
978   * Allows the embedder to specify a custom allocator used for zones.
979   */
980  virtual ZoneBackingAllocator* GetZoneBackingAllocator() {
981    static ZoneBackingAllocator default_allocator;
982    return &default_allocator;
983  }
984
985  /**
986   * Enables the embedder to respond in cases where V8 can't allocate large
987   * blocks of memory. V8 retries the failed allocation once after calling this
988   * method. On success, execution continues; otherwise V8 exits with a fatal
989   * error.
990   * Embedder overrides of this function must NOT call back into V8.
991   */
992  virtual void OnCriticalMemoryPressure() {}
993
994  /**
995   * Gets the max number of worker threads that may be used to execute
996   * concurrent work scheduled for any single TaskPriority by
997   * Call(BlockingTask)OnWorkerThread() or PostJob(). This can be used to
998   * estimate the number of tasks a work package should be split into. A return
999   * value of 0 means that there are no worker threads available. Note that a
1000   * value of 0 won't prohibit V8 from posting tasks using |CallOnWorkerThread|.
1001   */
1002  virtual int NumberOfWorkerThreads() = 0;
1003
1004  /**
1005   * Returns a TaskRunner which can be used to post a task on the foreground.
1006   * The TaskRunner's NonNestableTasksEnabled() must be true. This function
1007   * should only be called from a foreground thread.
1008   */
1009  virtual std::shared_ptr<v8::TaskRunner> GetForegroundTaskRunner(
1010      Isolate* isolate) = 0;
1011
1012  /**
1013   * Schedules a task to be invoked on a worker thread.
1014   * Embedders should override PostTaskOnWorkerThreadImpl() instead of
1015   * CallOnWorkerThread().
1016   * TODO(chromium:1424158): Make non-virtual once embedders are migrated to
1017   * PostTaskOnWorkerThreadImpl().
1018   */
1019  virtual void CallOnWorkerThread(std::unique_ptr<Task> task) {
1020    PostTaskOnWorkerThreadImpl(TaskPriority::kUserVisible, std::move(task),
1021                               SourceLocation::Current());
1022  }
1023
1024  /**
1025   * Schedules a task that blocks the main thread to be invoked with
1026   * high-priority on a worker thread.
1027   * Embedders should override PostTaskOnWorkerThreadImpl() instead of
1028   * CallBlockingTaskOnWorkerThread().
1029   * TODO(chromium:1424158): Make non-virtual once embedders are migrated to
1030   * PostTaskOnWorkerThreadImpl().
1031   */
1032  virtual void CallBlockingTaskOnWorkerThread(std::unique_ptr<Task> task) {
1033    // Embedders may optionally override this to process these tasks in a high
1034    // priority pool.
1035    CallOnWorkerThread(std::move(task));
1036  }
1037
1038  /**
1039   * Schedules a task to be invoked with low-priority on a worker thread.
1040   * Embedders should override PostTaskOnWorkerThreadImpl() instead of
1041   * CallLowPriorityTaskOnWorkerThread().
1042   * TODO(chromium:1424158): Make non-virtual once embedders are migrated to
1043   * PostTaskOnWorkerThreadImpl().
1044   */
1045  virtual void CallLowPriorityTaskOnWorkerThread(std::unique_ptr<Task> task) {
1046    // Embedders may optionally override this to process these tasks in a low
1047    // priority pool.
1048    CallOnWorkerThread(std::move(task));
1049  }
1050
1051  /**
1052   * Schedules a task to be invoked on a worker thread after |delay_in_seconds|
1053   * expires.
1054   * Embedders should override PostDelayedTaskOnWorkerThreadImpl() instead of
1055   * CallDelayedOnWorkerThread().
1056   * TODO(chromium:1424158): Make non-virtual once embedders are migrated to
1057   * PostDelayedTaskOnWorkerThreadImpl().
1058   */
1059  virtual void CallDelayedOnWorkerThread(std::unique_ptr<Task> task,
1060                                         double delay_in_seconds) {
1061    PostDelayedTaskOnWorkerThreadImpl(TaskPriority::kUserVisible,
1062                                      std::move(task), delay_in_seconds,
1063                                      SourceLocation::Current());
1064  }
1065
1066  /**
1067   * Returns true if idle tasks are enabled for the given |isolate|.
1068   */
1069  virtual bool IdleTasksEnabled(Isolate* isolate) { return false; }
1070
1071  /**
1072   * Posts |job_task| to run in parallel. Returns a JobHandle associated with
1073   * the Job, which can be joined or canceled.
1074   * This avoids degenerate cases:
1075   * - Calling CallOnWorkerThread() for each work item, causing significant
1076   *   overhead.
1077   * - Fixed number of CallOnWorkerThread() calls that split the work and might
1078   *   run for a long time. This is problematic when many components post
1079   *   "num cores" tasks and all expect to use all the cores. In these cases,
1080   *   the scheduler lacks context to be fair to multiple same-priority requests
1081   *   and/or ability to request lower priority work to yield when high priority
1082   *   work comes in.
1083   * A canonical implementation of |job_task| looks like:
1084   * class MyJobTask : public JobTask {
1085   *  public:
1086   *   MyJobTask(...) : worker_queue_(...) {}
1087   *   // JobTask:
1088   *   void Run(JobDelegate* delegate) override {
1089   *     while (!delegate->ShouldYield()) {
1090   *       // Smallest unit of work.
1091   *       auto work_item = worker_queue_.TakeWorkItem(); // Thread safe.
1092   *       if (!work_item) return;
1093   *       ProcessWork(work_item);
1094   *     }
1095   *   }
1096   *
1097   *   size_t GetMaxConcurrency() const override {
1098   *     return worker_queue_.GetSize(); // Thread safe.
1099   *   }
1100   * };
1101   * auto handle = PostJob(TaskPriority::kUserVisible,
1102   *                       std::make_unique<MyJobTask>(...));
1103   * handle->Join();
1104   *
1105   * PostJob() and methods of the returned JobHandle/JobDelegate, must never be
1106   * called while holding a lock that could be acquired by JobTask::Run or
1107   * JobTask::GetMaxConcurrency -- that could result in a deadlock. This is
1108   * because [1] JobTask::GetMaxConcurrency may be invoked while holding
1109   * internal lock (A), hence JobTask::GetMaxConcurrency can only use a lock (B)
1110   * if that lock is *never* held while calling back into JobHandle from any
1111   * thread (A=>B/B=>A deadlock) and [2] JobTask::Run or
1112   * JobTask::GetMaxConcurrency may be invoked synchronously from JobHandle
1113   * (B=>JobHandle::foo=>B deadlock).
1114   * Embedders should override CreateJobImpl() instead of PostJob().
1115   * TODO(chromium:1424158): Make non-virtual once embedders are migrated to
1116   * CreateJobImpl().
1117   */
1118  virtual std::unique_ptr<JobHandle> PostJob(
1119      TaskPriority priority, std::unique_ptr<JobTask> job_task) {
1120    auto handle = CreateJob(priority, std::move(job_task));
1121    handle->NotifyConcurrencyIncrease();
1122    return handle;
1123  }
1124
1125  /**
1126   * Creates and returns a JobHandle associated with a Job. Unlike PostJob(),
1127   * this doesn't immediately schedules |worker_task| to run; the Job is then
1128   * scheduled by calling either NotifyConcurrencyIncrease() or Join().
1129   *
1130   * A sufficient CreateJob() implementation that uses the default Job provided
1131   * in libplatform looks like:
1132   *  std::unique_ptr<JobHandle> CreateJob(
1133   *      TaskPriority priority, std::unique_ptr<JobTask> job_task) override {
1134   *    return v8::platform::NewDefaultJobHandle(
1135   *        this, priority, std::move(job_task), NumberOfWorkerThreads());
1136   * }
1137   *
1138   * Embedders should override CreateJobImpl() instead of CreateJob().
1139   * TODO(chromium:1424158): Make non-virtual once embedders are migrated to
1140   * CreateJobImpl().
1141   */
1142  virtual std::unique_ptr<JobHandle> CreateJob(
1143      TaskPriority priority, std::unique_ptr<JobTask> job_task) {
1144    return CreateJobImpl(priority, std::move(job_task),
1145                         SourceLocation::Current());
1146  }
1147
1148  /**
1149   * Instantiates a ScopedBlockingCall to annotate a scope that may/will block.
1150   */
1151  virtual std::unique_ptr<ScopedBlockingCall> CreateBlockingScope(
1152      BlockingType blocking_type) {
1153    return nullptr;
1154  }
1155
1156  /**
1157   * Monotonically increasing time in seconds from an arbitrary fixed point in
1158   * the past. This function is expected to return at least
1159   * millisecond-precision values. For this reason,
1160   * it is recommended that the fixed point be no further in the past than
1161   * the epoch.
1162   **/
1163  virtual double MonotonicallyIncreasingTime() = 0;
1164
1165  /**
1166   * Current wall-clock time in milliseconds since epoch. Use
1167   * CurrentClockTimeMillisHighResolution() when higher precision is
1168   * required.
1169   */
1170  virtual int64_t CurrentClockTimeMilliseconds() {
1171    return floor(CurrentClockTimeMillis());
1172  }
1173
1174  /**
1175   * This function is deprecated and will be deleted. Use either
1176   * CurrentClockTimeMilliseconds() or
1177   * CurrentClockTimeMillisecondsHighResolution().
1178   */
1179  virtual double CurrentClockTimeMillis() = 0;
1180
1181  /**
1182   * Same as CurrentClockTimeMilliseconds(), but with more precision.
1183   */
1184  virtual double CurrentClockTimeMillisecondsHighResolution() {
1185    return CurrentClockTimeMillis();
1186  }
1187
1188  typedef void (*StackTracePrinter)();
1189
1190  /**
1191   * Returns a function pointer that print a stack trace of the current stack
1192   * on invocation. Disables printing of the stack trace if nullptr.
1193   */
1194  virtual StackTracePrinter GetStackTracePrinter() { return nullptr; }
1195
1196  /**
1197   * Returns an instance of a v8::TracingController. This must be non-nullptr.
1198   */
1199  virtual TracingController* GetTracingController() = 0;
1200
1201  /**
1202   * Tells the embedder to generate and upload a crashdump during an unexpected
1203   * but non-critical scenario.
1204   */
1205  virtual void DumpWithoutCrashing() {}
1206
1207  /**
1208   * Allows the embedder to observe sections with high throughput allocation
1209   * operations.
1210   */
1211  virtual HighAllocationThroughputObserver*
1212  GetHighAllocationThroughputObserver() {
1213    static HighAllocationThroughputObserver default_observer;
1214    return &default_observer;
1215  }
1216
1217 protected:
1218  /**
1219   * Default implementation of current wall-clock time in milliseconds
1220   * since epoch. Useful for implementing |CurrentClockTimeMillis| if
1221   * nothing special needed.
1222   */
1223  V8_EXPORT static double SystemClockTimeMillis();
1224
1225  /**
1226   * Creates and returns a JobHandle associated with a Job.
1227   * TODO(chromium:1424158): Make pure virtual once embedders implement it.
1228   */
1229  virtual std::unique_ptr<JobHandle> CreateJobImpl(
1230      TaskPriority priority, std::unique_ptr<JobTask> job_task,
1231      const SourceLocation& location) {
1232    return nullptr;
1233  }
1234
1235  /**
1236   * Schedules a task with |priority| to be invoked on a worker thread.
1237   * TODO(chromium:1424158): Make pure virtual once embedders implement it.
1238   */
1239  virtual void PostTaskOnWorkerThreadImpl(TaskPriority priority,
1240                                          std::unique_ptr<Task> task,
1241                                          const SourceLocation& location) {}
1242
1243  /**
1244   * Schedules a task with |priority| to be invoked on a worker thread after
1245   * |delay_in_seconds| expires.
1246   * TODO(chromium:1424158): Make pure virtual once embedders implement it.
1247   */
1248  virtual void PostDelayedTaskOnWorkerThreadImpl(
1249      TaskPriority priority, std::unique_ptr<Task> task,
1250      double delay_in_seconds, const SourceLocation& location) {}
1251};
1252
1253}  // namespace v8
1254
1255#endif  // V8_V8_PLATFORM_H_
1256