xref: /third_party/node/deps/v8/include/v8-profiler.h (revision 1cb0ef41)
1// Copyright 2010 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#ifndef V8_V8_PROFILER_H_
6#define V8_V8_PROFILER_H_
7
8#include <limits.h>
9
10#include <memory>
11#include <unordered_set>
12#include <vector>
13
14#include "v8-local-handle.h"       // NOLINT(build/include_directory)
15#include "v8-message.h"            // NOLINT(build/include_directory)
16#include "v8-persistent-handle.h"  // NOLINT(build/include_directory)
17
18/**
19 * Profiler support for the V8 JavaScript engine.
20 */
21namespace v8 {
22
23enum class EmbedderStateTag : uint8_t;
24class HeapGraphNode;
25struct HeapStatsUpdate;
26class Object;
27enum StateTag : int;
28
29using NativeObject = void*;
30using SnapshotObjectId = uint32_t;
31using ProfilerId = uint32_t;
32
33struct CpuProfileDeoptFrame {
34  int script_id;
35  size_t position;
36};
37
38namespace internal {
39class CpuProfile;
40}  // namespace internal
41
42}  // namespace v8
43
44#ifdef V8_OS_WIN
45template class V8_EXPORT std::vector<v8::CpuProfileDeoptFrame>;
46#endif
47
48namespace v8 {
49
50struct V8_EXPORT CpuProfileDeoptInfo {
51  /** A pointer to a static string owned by v8. */
52  const char* deopt_reason;
53  std::vector<CpuProfileDeoptFrame> stack;
54};
55
56}  // namespace v8
57
58#ifdef V8_OS_WIN
59template class V8_EXPORT std::vector<v8::CpuProfileDeoptInfo>;
60#endif
61
62namespace v8 {
63
64/**
65 * CpuProfileNode represents a node in a call graph.
66 */
67class V8_EXPORT CpuProfileNode {
68 public:
69  struct LineTick {
70    /** The 1-based number of the source line where the function originates. */
71    int line;
72
73    /** The count of samples associated with the source line. */
74    unsigned int hit_count;
75  };
76
77  // An annotation hinting at the source of a CpuProfileNode.
78  enum SourceType {
79    // User-supplied script with associated resource information.
80    kScript = 0,
81    // Native scripts and provided builtins.
82    kBuiltin = 1,
83    // Callbacks into native code.
84    kCallback = 2,
85    // VM-internal functions or state.
86    kInternal = 3,
87    // A node that failed to symbolize.
88    kUnresolved = 4,
89  };
90
91  /** Returns function name (empty string for anonymous functions.) */
92  Local<String> GetFunctionName() const;
93
94  /**
95   * Returns function name (empty string for anonymous functions.)
96   * The string ownership is *not* passed to the caller. It stays valid until
97   * profile is deleted. The function is thread safe.
98   */
99  const char* GetFunctionNameStr() const;
100
101  /** Returns id of the script where function is located. */
102  int GetScriptId() const;
103
104  /** Returns resource name for script from where the function originates. */
105  Local<String> GetScriptResourceName() const;
106
107  /**
108   * Returns resource name for script from where the function originates.
109   * The string ownership is *not* passed to the caller. It stays valid until
110   * profile is deleted. The function is thread safe.
111   */
112  const char* GetScriptResourceNameStr() const;
113
114  /**
115   * Return true if the script from where the function originates is flagged as
116   * being shared cross-origin.
117   */
118  bool IsScriptSharedCrossOrigin() const;
119
120  /**
121   * Returns the number, 1-based, of the line where the function originates.
122   * kNoLineNumberInfo if no line number information is available.
123   */
124  int GetLineNumber() const;
125
126  /**
127   * Returns 1-based number of the column where the function originates.
128   * kNoColumnNumberInfo if no column number information is available.
129   */
130  int GetColumnNumber() const;
131
132  /**
133   * Returns the number of the function's source lines that collect the samples.
134   */
135  unsigned int GetHitLineCount() const;
136
137  /** Returns the set of source lines that collect the samples.
138   *  The caller allocates buffer and responsible for releasing it.
139   *  True if all available entries are copied, otherwise false.
140   *  The function copies nothing if buffer is not large enough.
141   */
142  bool GetLineTicks(LineTick* entries, unsigned int length) const;
143
144  /** Returns bailout reason for the function
145    * if the optimization was disabled for it.
146    */
147  const char* GetBailoutReason() const;
148
149  /**
150    * Returns the count of samples where the function was currently executing.
151    */
152  unsigned GetHitCount() const;
153
154  /** Returns id of the node. The id is unique within the tree */
155  unsigned GetNodeId() const;
156
157  /**
158   * Gets the type of the source which the node was captured from.
159   */
160  SourceType GetSourceType() const;
161
162  /** Returns child nodes count of the node. */
163  int GetChildrenCount() const;
164
165  /** Retrieves a child node by index. */
166  const CpuProfileNode* GetChild(int index) const;
167
168  /** Retrieves the ancestor node, or null if the root. */
169  const CpuProfileNode* GetParent() const;
170
171  /** Retrieves deopt infos for the node. */
172  const std::vector<CpuProfileDeoptInfo>& GetDeoptInfos() const;
173
174  static const int kNoLineNumberInfo = Message::kNoLineNumberInfo;
175  static const int kNoColumnNumberInfo = Message::kNoColumnInfo;
176};
177
178/**
179 * An interface for exporting data from V8, using "push" model.
180 */
181class V8_EXPORT OutputStream {
182 public:
183  enum WriteResult {
184    kContinue = 0,
185    kAbort = 1
186  };
187  virtual ~OutputStream() = default;
188  /** Notify about the end of stream. */
189  virtual void EndOfStream() = 0;
190  /** Get preferred output chunk size. Called only once. */
191  virtual int GetChunkSize() { return 1024; }
192  /**
193   * Writes the next chunk of snapshot data into the stream. Writing
194   * can be stopped by returning kAbort as function result. EndOfStream
195   * will not be called in case writing was aborted.
196   */
197  virtual WriteResult WriteAsciiChunk(char* data, int size) = 0;
198  /**
199   * Writes the next chunk of heap stats data into the stream. Writing
200   * can be stopped by returning kAbort as function result. EndOfStream
201   * will not be called in case writing was aborted.
202   */
203  virtual WriteResult WriteHeapStatsChunk(HeapStatsUpdate* data, int count) {
204    return kAbort;
205  }
206};
207
208/**
209 * CpuProfile contains a CPU profile in a form of top-down call tree
210 * (from main() down to functions that do all the work).
211 */
212class V8_EXPORT CpuProfile {
213 public:
214  enum SerializationFormat {
215    kJSON = 0  // See format description near 'Serialize' method.
216  };
217  /** Returns CPU profile title. */
218  Local<String> GetTitle() const;
219
220  /** Returns the root node of the top down call tree. */
221  const CpuProfileNode* GetTopDownRoot() const;
222
223  /**
224   * Returns number of samples recorded. The samples are not recorded unless
225   * |record_samples| parameter of CpuProfiler::StartCpuProfiling is true.
226   */
227  int GetSamplesCount() const;
228
229  /**
230   * Returns profile node corresponding to the top frame the sample at
231   * the given index.
232   */
233  const CpuProfileNode* GetSample(int index) const;
234
235  /**
236   * Returns the timestamp of the sample. The timestamp is the number of
237   * microseconds since some unspecified starting point.
238   * The point is equal to the starting point used by GetStartTime.
239   */
240  int64_t GetSampleTimestamp(int index) const;
241
242  /**
243   * Returns time when the profile recording was started (in microseconds)
244   * since some unspecified starting point.
245   */
246  int64_t GetStartTime() const;
247
248  /**
249   * Returns state of the vm when sample was captured.
250   */
251  StateTag GetSampleState(int index) const;
252
253  /**
254   * Returns state of the embedder when sample was captured.
255   */
256  EmbedderStateTag GetSampleEmbedderState(int index) const;
257
258  /**
259   * Returns time when the profile recording was stopped (in microseconds)
260   * since some unspecified starting point.
261   * The point is equal to the starting point used by GetStartTime.
262   */
263  int64_t GetEndTime() const;
264
265  /**
266   * Deletes the profile and removes it from CpuProfiler's list.
267   * All pointers to nodes previously returned become invalid.
268   */
269  void Delete();
270
271  /**
272   * Prepare a serialized representation of the profile. The result
273   * is written into the stream provided in chunks of specified size.
274   *
275   * For the JSON format, heap contents are represented as an object
276   * with the following structure:
277   *
278   *  {
279   *    nodes: [nodes array],
280   *    startTime: number,
281   *    endTime: number
282   *    samples: [strings array]
283   *    timeDeltas: [numbers array]
284   *  }
285   *
286   */
287  void Serialize(OutputStream* stream,
288                 SerializationFormat format = kJSON) const;
289};
290
291enum CpuProfilingMode {
292  // In the resulting CpuProfile tree, intermediate nodes in a stack trace
293  // (from the root to a leaf) will have line numbers that point to the start
294  // line of the function, rather than the line of the callsite of the child.
295  kLeafNodeLineNumbers,
296  // In the resulting CpuProfile tree, nodes are separated based on the line
297  // number of their callsite in their parent.
298  kCallerLineNumbers,
299};
300
301// Determines how names are derived for functions sampled.
302enum CpuProfilingNamingMode {
303  // Use the immediate name of functions at compilation time.
304  kStandardNaming,
305  // Use more verbose naming for functions without names, inferred from scope
306  // where possible.
307  kDebugNaming,
308};
309
310enum CpuProfilingLoggingMode {
311  // Enables logging when a profile is active, and disables logging when all
312  // profiles are detached.
313  kLazyLogging,
314  // Enables logging for the lifetime of the CpuProfiler. Calls to
315  // StartRecording are faster, at the expense of runtime overhead.
316  kEagerLogging,
317};
318
319// Enum for returning profiling status. Once StartProfiling is called,
320// we want to return to clients whether the profiling was able to start
321// correctly, or return a descriptive error.
322enum class CpuProfilingStatus {
323  kStarted,
324  kAlreadyStarted,
325  kErrorTooManyProfilers
326};
327
328/**
329 * Result from StartProfiling returning the Profiling Status, and
330 * id of the started profiler, or 0 if profiler is not started
331 */
332struct CpuProfilingResult {
333  const ProfilerId id;
334  const CpuProfilingStatus status;
335};
336
337/**
338 * Delegate for when max samples reached and samples are discarded.
339 */
340class V8_EXPORT DiscardedSamplesDelegate {
341 public:
342  DiscardedSamplesDelegate() = default;
343
344  virtual ~DiscardedSamplesDelegate() = default;
345  virtual void Notify() = 0;
346
347  ProfilerId GetId() const { return profiler_id_; }
348
349 private:
350  friend internal::CpuProfile;
351
352  void SetId(ProfilerId id) { profiler_id_ = id; }
353
354  ProfilerId profiler_id_;
355};
356
357/**
358 * Optional profiling attributes.
359 */
360class V8_EXPORT CpuProfilingOptions {
361 public:
362  // Indicates that the sample buffer size should not be explicitly limited.
363  static const unsigned kNoSampleLimit = UINT_MAX;
364
365  /**
366   * \param mode Type of computation of stack frame line numbers.
367   * \param max_samples The maximum number of samples that should be recorded by
368   *                    the profiler. Samples obtained after this limit will be
369   *                    discarded.
370   * \param sampling_interval_us controls the profile-specific target
371   *                             sampling interval. The provided sampling
372   *                             interval will be snapped to the next lowest
373   *                             non-zero multiple of the profiler's sampling
374   *                             interval, set via SetSamplingInterval(). If
375   *                             zero, the sampling interval will be equal to
376   *                             the profiler's sampling interval.
377   * \param filter_context If specified, profiles will only contain frames
378   *                       using this context. Other frames will be elided.
379   */
380  CpuProfilingOptions(
381      CpuProfilingMode mode = kLeafNodeLineNumbers,
382      unsigned max_samples = kNoSampleLimit, int sampling_interval_us = 0,
383      MaybeLocal<Context> filter_context = MaybeLocal<Context>());
384
385  CpuProfilingMode mode() const { return mode_; }
386  unsigned max_samples() const { return max_samples_; }
387  int sampling_interval_us() const { return sampling_interval_us_; }
388
389 private:
390  friend class internal::CpuProfile;
391
392  bool has_filter_context() const { return !filter_context_.IsEmpty(); }
393  void* raw_filter_context() const;
394
395  CpuProfilingMode mode_;
396  unsigned max_samples_;
397  int sampling_interval_us_;
398  CopyablePersistentTraits<Context>::CopyablePersistent filter_context_;
399};
400
401/**
402 * Interface for controlling CPU profiling. Instance of the
403 * profiler can be created using v8::CpuProfiler::New method.
404 */
405class V8_EXPORT CpuProfiler {
406 public:
407  /**
408   * Creates a new CPU profiler for the |isolate|. The isolate must be
409   * initialized. The profiler object must be disposed after use by calling
410   * |Dispose| method.
411   */
412  static CpuProfiler* New(Isolate* isolate,
413                          CpuProfilingNamingMode = kDebugNaming,
414                          CpuProfilingLoggingMode = kLazyLogging);
415
416  /**
417   * Synchronously collect current stack sample in all profilers attached to
418   * the |isolate|. The call does not affect number of ticks recorded for
419   * the current top node.
420   */
421  static void CollectSample(Isolate* isolate);
422
423  /**
424   * Disposes the CPU profiler object.
425   */
426  void Dispose();
427
428  /**
429   * Changes default CPU profiler sampling interval to the specified number
430   * of microseconds. Default interval is 1000us. This method must be called
431   * when there are no profiles being recorded.
432   */
433  void SetSamplingInterval(int us);
434
435  /**
436   * Sets whether or not the profiler should prioritize consistency of sample
437   * periodicity on Windows. Disabling this can greatly reduce CPU usage, but
438   * may result in greater variance in sample timings from the platform's
439   * scheduler. Defaults to enabled. This method must be called when there are
440   * no profiles being recorded.
441   */
442  void SetUsePreciseSampling(bool);
443
444  /**
445   * Starts collecting a CPU profile. Several profiles may be collected at once.
446   * Generates an anonymous profiler, without a String identifier.
447   */
448  CpuProfilingResult Start(
449      CpuProfilingOptions options,
450      std::unique_ptr<DiscardedSamplesDelegate> delegate = nullptr);
451
452  /**
453   * Starts collecting a CPU profile. Title may be an empty string. Several
454   * profiles may be collected at once. Attempts to start collecting several
455   * profiles with the same title are silently ignored.
456   */
457  CpuProfilingResult Start(
458      Local<String> title, CpuProfilingOptions options,
459      std::unique_ptr<DiscardedSamplesDelegate> delegate = nullptr);
460
461  /**
462   * Starts profiling with the same semantics as above, except with expanded
463   * parameters.
464   *
465   * |record_samples| parameter controls whether individual samples should
466   * be recorded in addition to the aggregated tree.
467   *
468   * |max_samples| controls the maximum number of samples that should be
469   * recorded by the profiler. Samples obtained after this limit will be
470   * discarded.
471   */
472  CpuProfilingResult Start(
473      Local<String> title, CpuProfilingMode mode, bool record_samples = false,
474      unsigned max_samples = CpuProfilingOptions::kNoSampleLimit);
475
476  /**
477   * The same as StartProfiling above, but the CpuProfilingMode defaults to
478   * kLeafNodeLineNumbers mode, which was the previous default behavior of the
479   * profiler.
480   */
481  CpuProfilingResult Start(Local<String> title, bool record_samples = false);
482
483  /**
484   * Starts collecting a CPU profile. Title may be an empty string. Several
485   * profiles may be collected at once. Attempts to start collecting several
486   * profiles with the same title are silently ignored.
487   */
488  CpuProfilingStatus StartProfiling(
489      Local<String> title, CpuProfilingOptions options,
490      std::unique_ptr<DiscardedSamplesDelegate> delegate = nullptr);
491
492  /**
493   * Starts profiling with the same semantics as above, except with expanded
494   * parameters.
495   *
496   * |record_samples| parameter controls whether individual samples should
497   * be recorded in addition to the aggregated tree.
498   *
499   * |max_samples| controls the maximum number of samples that should be
500   * recorded by the profiler. Samples obtained after this limit will be
501   * discarded.
502   */
503  CpuProfilingStatus StartProfiling(
504      Local<String> title, CpuProfilingMode mode, bool record_samples = false,
505      unsigned max_samples = CpuProfilingOptions::kNoSampleLimit);
506
507  /**
508   * The same as StartProfiling above, but the CpuProfilingMode defaults to
509   * kLeafNodeLineNumbers mode, which was the previous default behavior of the
510   * profiler.
511   */
512  CpuProfilingStatus StartProfiling(Local<String> title,
513                                    bool record_samples = false);
514
515  /**
516   * Stops collecting CPU profile with a given id and returns it.
517   */
518  CpuProfile* Stop(ProfilerId id);
519
520  /**
521   * Stops collecting CPU profile with a given title and returns it.
522   * If the title given is empty, finishes the last profile started.
523   */
524  CpuProfile* StopProfiling(Local<String> title);
525
526  /**
527   * Generate more detailed source positions to code objects. This results in
528   * better results when mapping profiling samples to script source.
529   */
530  static void UseDetailedSourcePositionsForProfiling(Isolate* isolate);
531
532 private:
533  CpuProfiler();
534  ~CpuProfiler();
535  CpuProfiler(const CpuProfiler&);
536  CpuProfiler& operator=(const CpuProfiler&);
537};
538
539/**
540 * HeapSnapshotEdge represents a directed connection between heap
541 * graph nodes: from retainers to retained nodes.
542 */
543class V8_EXPORT HeapGraphEdge {
544 public:
545  enum Type {
546    kContextVariable = 0,  // A variable from a function context.
547    kElement = 1,          // An element of an array.
548    kProperty = 2,         // A named object property.
549    kInternal = 3,         // A link that can't be accessed from JS,
550                           // thus, its name isn't a real property name
551                           // (e.g. parts of a ConsString).
552    kHidden = 4,           // A link that is needed for proper sizes
553                           // calculation, but may be hidden from user.
554    kShortcut = 5,         // A link that must not be followed during
555                           // sizes calculation.
556    kWeak = 6              // A weak reference (ignored by the GC).
557  };
558
559  /** Returns edge type (see HeapGraphEdge::Type). */
560  Type GetType() const;
561
562  /**
563   * Returns edge name. This can be a variable name, an element index, or
564   * a property name.
565   */
566  Local<Value> GetName() const;
567
568  /** Returns origin node. */
569  const HeapGraphNode* GetFromNode() const;
570
571  /** Returns destination node. */
572  const HeapGraphNode* GetToNode() const;
573};
574
575
576/**
577 * HeapGraphNode represents a node in a heap graph.
578 */
579class V8_EXPORT HeapGraphNode {
580 public:
581  enum Type {
582    kHidden = 0,         // Hidden node, may be filtered when shown to user.
583    kArray = 1,          // An array of elements.
584    kString = 2,         // A string.
585    kObject = 3,         // A JS object (except for arrays and strings).
586    kCode = 4,           // Compiled code.
587    kClosure = 5,        // Function closure.
588    kRegExp = 6,         // RegExp.
589    kHeapNumber = 7,     // Number stored in the heap.
590    kNative = 8,         // Native object (not from V8 heap).
591    kSynthetic = 9,      // Synthetic object, usually used for grouping
592                         // snapshot items together.
593    kConsString = 10,    // Concatenated string. A pair of pointers to strings.
594    kSlicedString = 11,  // Sliced string. A fragment of another string.
595    kSymbol = 12,        // A Symbol (ES6).
596    kBigInt = 13         // BigInt.
597  };
598
599  /** Returns node type (see HeapGraphNode::Type). */
600  Type GetType() const;
601
602  /**
603   * Returns node name. Depending on node's type this can be the name
604   * of the constructor (for objects), the name of the function (for
605   * closures), string value, or an empty string (for compiled code).
606   */
607  Local<String> GetName() const;
608
609  /**
610   * Returns node id. For the same heap object, the id remains the same
611   * across all snapshots.
612   */
613  SnapshotObjectId GetId() const;
614
615  /** Returns node's own size, in bytes. */
616  size_t GetShallowSize() const;
617
618  /** Returns child nodes count of the node. */
619  int GetChildrenCount() const;
620
621  /** Retrieves a child by index. */
622  const HeapGraphEdge* GetChild(int index) const;
623};
624
625/**
626 * HeapSnapshots record the state of the JS heap at some moment.
627 */
628class V8_EXPORT HeapSnapshot {
629 public:
630  enum SerializationFormat {
631    kJSON = 0  // See format description near 'Serialize' method.
632  };
633
634  /** Returns the root node of the heap graph. */
635  const HeapGraphNode* GetRoot() const;
636
637  /** Returns a node by its id. */
638  const HeapGraphNode* GetNodeById(SnapshotObjectId id) const;
639
640  /** Returns total nodes count in the snapshot. */
641  int GetNodesCount() const;
642
643  /** Returns a node by index. */
644  const HeapGraphNode* GetNode(int index) const;
645
646  /** Returns a max seen JS object Id. */
647  SnapshotObjectId GetMaxSnapshotJSObjectId() const;
648
649  /**
650   * Deletes the snapshot and removes it from HeapProfiler's list.
651   * All pointers to nodes, edges and paths previously returned become
652   * invalid.
653   */
654  void Delete();
655
656  /**
657   * Prepare a serialized representation of the snapshot. The result
658   * is written into the stream provided in chunks of specified size.
659   * The total length of the serialized snapshot is unknown in
660   * advance, it can be roughly equal to JS heap size (that means,
661   * it can be really big - tens of megabytes).
662   *
663   * For the JSON format, heap contents are represented as an object
664   * with the following structure:
665   *
666   *  {
667   *    snapshot: {
668   *      title: "...",
669   *      uid: nnn,
670   *      meta: { meta-info },
671   *      node_count: nnn,
672   *      edge_count: nnn
673   *    },
674   *    nodes: [nodes array],
675   *    edges: [edges array],
676   *    strings: [strings array]
677   *  }
678   *
679   * Nodes reference strings, other nodes, and edges by their indexes
680   * in corresponding arrays.
681   */
682  void Serialize(OutputStream* stream,
683                 SerializationFormat format = kJSON) const;
684};
685
686
687/**
688 * An interface for reporting progress and controlling long-running
689 * activities.
690 */
691class V8_EXPORT ActivityControl {
692 public:
693  enum ControlOption {
694    kContinue = 0,
695    kAbort = 1
696  };
697  virtual ~ActivityControl() = default;
698  /**
699   * Notify about current progress. The activity can be stopped by
700   * returning kAbort as the callback result.
701   */
702  virtual ControlOption ReportProgressValue(uint32_t done, uint32_t total) = 0;
703};
704
705/**
706 * AllocationProfile is a sampled profile of allocations done by the program.
707 * This is structured as a call-graph.
708 */
709class V8_EXPORT AllocationProfile {
710 public:
711  struct Allocation {
712    /**
713     * Size of the sampled allocation object.
714     */
715    size_t size;
716
717    /**
718     * The number of objects of such size that were sampled.
719     */
720    unsigned int count;
721  };
722
723  /**
724   * Represents a node in the call-graph.
725   */
726  struct Node {
727    /**
728     * Name of the function. May be empty for anonymous functions or if the
729     * script corresponding to this function has been unloaded.
730     */
731    Local<String> name;
732
733    /**
734     * Name of the script containing the function. May be empty if the script
735     * name is not available, or if the script has been unloaded.
736     */
737    Local<String> script_name;
738
739    /**
740     * id of the script where the function is located. May be equal to
741     * v8::UnboundScript::kNoScriptId in cases where the script doesn't exist.
742     */
743    int script_id;
744
745    /**
746     * Start position of the function in the script.
747     */
748    int start_position;
749
750    /**
751     * 1-indexed line number where the function starts. May be
752     * kNoLineNumberInfo if no line number information is available.
753     */
754    int line_number;
755
756    /**
757     * 1-indexed column number where the function starts. May be
758     * kNoColumnNumberInfo if no line number information is available.
759     */
760    int column_number;
761
762    /**
763     * Unique id of the node.
764     */
765    uint32_t node_id;
766
767    /**
768     * List of callees called from this node for which we have sampled
769     * allocations. The lifetime of the children is scoped to the containing
770     * AllocationProfile.
771     */
772    std::vector<Node*> children;
773
774    /**
775     * List of self allocations done by this node in the call-graph.
776     */
777    std::vector<Allocation> allocations;
778  };
779
780  /**
781   * Represent a single sample recorded for an allocation.
782   */
783  struct Sample {
784    /**
785     * id of the node in the profile tree.
786     */
787    uint32_t node_id;
788
789    /**
790     * Size of the sampled allocation object.
791     */
792    size_t size;
793
794    /**
795     * The number of objects of such size that were sampled.
796     */
797    unsigned int count;
798
799    /**
800     * Unique time-ordered id of the allocation sample. Can be used to track
801     * what samples were added or removed between two snapshots.
802     */
803    uint64_t sample_id;
804  };
805
806  /**
807   * Returns the root node of the call-graph. The root node corresponds to an
808   * empty JS call-stack. The lifetime of the returned Node* is scoped to the
809   * containing AllocationProfile.
810   */
811  virtual Node* GetRootNode() = 0;
812  virtual const std::vector<Sample>& GetSamples() = 0;
813
814  virtual ~AllocationProfile() = default;
815
816  static const int kNoLineNumberInfo = Message::kNoLineNumberInfo;
817  static const int kNoColumnNumberInfo = Message::kNoColumnInfo;
818};
819
820/**
821 * An object graph consisting of embedder objects and V8 objects.
822 * Edges of the graph are strong references between the objects.
823 * The embedder can build this graph during heap snapshot generation
824 * to include the embedder objects in the heap snapshot.
825 * Usage:
826 * 1) Define derived class of EmbedderGraph::Node for embedder objects.
827 * 2) Set the build embedder graph callback on the heap profiler using
828 *    HeapProfiler::AddBuildEmbedderGraphCallback.
829 * 3) In the callback use graph->AddEdge(node1, node2) to add an edge from
830 *    node1 to node2.
831 * 4) To represent references from/to V8 object, construct V8 nodes using
832 *    graph->V8Node(value).
833 */
834class V8_EXPORT EmbedderGraph {
835 public:
836  class Node {
837   public:
838    /**
839     * Detachedness specifies whether an object is attached or detached from the
840     * main application state. While unkown in general, there may be objects
841     * that specifically know their state. V8 passes this information along in
842     * the snapshot. Users of the snapshot may use it to annotate the object
843     * graph.
844     */
845    enum class Detachedness : uint8_t {
846      kUnknown = 0,
847      kAttached = 1,
848      kDetached = 2,
849    };
850
851    Node() = default;
852    virtual ~Node() = default;
853    virtual const char* Name() = 0;
854    virtual size_t SizeInBytes() = 0;
855    /**
856     * The corresponding V8 wrapper node if not null.
857     * During heap snapshot generation the embedder node and the V8 wrapper
858     * node will be merged into one node to simplify retaining paths.
859     */
860    virtual Node* WrapperNode() { return nullptr; }
861    virtual bool IsRootNode() { return false; }
862    /** Must return true for non-V8 nodes. */
863    virtual bool IsEmbedderNode() { return true; }
864    /**
865     * Optional name prefix. It is used in Chrome for tagging detached nodes.
866     */
867    virtual const char* NamePrefix() { return nullptr; }
868
869    /**
870     * Returns the NativeObject that can be used for querying the
871     * |HeapSnapshot|.
872     */
873    virtual NativeObject GetNativeObject() { return nullptr; }
874
875    /**
876     * Detachedness state of a given object. While unkown in general, there may
877     * be objects that specifically know their state. V8 passes this information
878     * along in the snapshot. Users of the snapshot may use it to annotate the
879     * object graph.
880     */
881    virtual Detachedness GetDetachedness() { return Detachedness::kUnknown; }
882
883    Node(const Node&) = delete;
884    Node& operator=(const Node&) = delete;
885  };
886
887  /**
888   * Returns a node corresponding to the given V8 value. Ownership is not
889   * transferred. The result pointer is valid while the graph is alive.
890   */
891  virtual Node* V8Node(const v8::Local<v8::Value>& value) = 0;
892
893  /**
894   * Adds the given node to the graph and takes ownership of the node.
895   * Returns a raw pointer to the node that is valid while the graph is alive.
896   */
897  virtual Node* AddNode(std::unique_ptr<Node> node) = 0;
898
899  /**
900   * Adds an edge that represents a strong reference from the given
901   * node |from| to the given node |to|. The nodes must be added to the graph
902   * before calling this function.
903   *
904   * If name is nullptr, the edge will have auto-increment indexes, otherwise
905   * it will be named accordingly.
906   */
907  virtual void AddEdge(Node* from, Node* to, const char* name = nullptr) = 0;
908
909  virtual ~EmbedderGraph() = default;
910};
911
912/**
913 * Interface for controlling heap profiling. Instance of the
914 * profiler can be retrieved using v8::Isolate::GetHeapProfiler.
915 */
916class V8_EXPORT HeapProfiler {
917 public:
918  enum SamplingFlags {
919    kSamplingNoFlags = 0,
920    kSamplingForceGC = 1 << 0,
921  };
922
923  /**
924   * Callback function invoked during heap snapshot generation to retrieve
925   * the embedder object graph. The callback should use graph->AddEdge(..) to
926   * add references between the objects.
927   * The callback must not trigger garbage collection in V8.
928   */
929  typedef void (*BuildEmbedderGraphCallback)(v8::Isolate* isolate,
930                                             v8::EmbedderGraph* graph,
931                                             void* data);
932
933  /**
934   * Callback function invoked during heap snapshot generation to retrieve
935   * the detachedness state of an object referenced by a TracedReference.
936   *
937   * The callback takes Local<Value> as parameter to allow the embedder to
938   * unpack the TracedReference into a Local and reuse that Local for different
939   * purposes.
940   */
941  using GetDetachednessCallback = EmbedderGraph::Node::Detachedness (*)(
942      v8::Isolate* isolate, const v8::Local<v8::Value>& v8_value,
943      uint16_t class_id, void* data);
944
945  /** Returns the number of snapshots taken. */
946  int GetSnapshotCount();
947
948  /** Returns a snapshot by index. */
949  const HeapSnapshot* GetHeapSnapshot(int index);
950
951  /**
952   * Returns SnapshotObjectId for a heap object referenced by |value| if
953   * it has been seen by the heap profiler, kUnknownObjectId otherwise.
954   */
955  SnapshotObjectId GetObjectId(Local<Value> value);
956
957  /**
958   * Returns SnapshotObjectId for a native object referenced by |value| if it
959   * has been seen by the heap profiler, kUnknownObjectId otherwise.
960   */
961  SnapshotObjectId GetObjectId(NativeObject value);
962
963  /**
964   * Returns heap object with given SnapshotObjectId if the object is alive,
965   * otherwise empty handle is returned.
966   */
967  Local<Value> FindObjectById(SnapshotObjectId id);
968
969  /**
970   * Clears internal map from SnapshotObjectId to heap object. The new objects
971   * will not be added into it unless a heap snapshot is taken or heap object
972   * tracking is kicked off.
973   */
974  void ClearObjectIds();
975
976  /**
977   * A constant for invalid SnapshotObjectId. GetSnapshotObjectId will return
978   * it in case heap profiler cannot find id  for the object passed as
979   * parameter. HeapSnapshot::GetNodeById will always return NULL for such id.
980   */
981  static const SnapshotObjectId kUnknownObjectId = 0;
982
983  /**
984   * Callback interface for retrieving user friendly names of global objects.
985   */
986  class ObjectNameResolver {
987   public:
988    /**
989     * Returns name to be used in the heap snapshot for given node. Returned
990     * string must stay alive until snapshot collection is completed.
991     */
992    virtual const char* GetName(Local<Object> object) = 0;
993
994   protected:
995    virtual ~ObjectNameResolver() = default;
996  };
997
998  /**
999   * Takes a heap snapshot and returns it.
1000   */
1001  const HeapSnapshot* TakeHeapSnapshot(
1002      ActivityControl* control = nullptr,
1003      ObjectNameResolver* global_object_name_resolver = nullptr,
1004      bool treat_global_objects_as_roots = true,
1005      bool capture_numeric_value = false);
1006
1007  /**
1008   * Starts tracking of heap objects population statistics. After calling
1009   * this method, all heap objects relocations done by the garbage collector
1010   * are being registered.
1011   *
1012   * |track_allocations| parameter controls whether stack trace of each
1013   * allocation in the heap will be recorded and reported as part of
1014   * HeapSnapshot.
1015   */
1016  void StartTrackingHeapObjects(bool track_allocations = false);
1017
1018  /**
1019   * Adds a new time interval entry to the aggregated statistics array. The
1020   * time interval entry contains information on the current heap objects
1021   * population size. The method also updates aggregated statistics and
1022   * reports updates for all previous time intervals via the OutputStream
1023   * object. Updates on each time interval are provided as a stream of the
1024   * HeapStatsUpdate structure instances.
1025   * If |timestamp_us| is supplied, timestamp of the new entry will be written
1026   * into it. The return value of the function is the last seen heap object Id.
1027   *
1028   * StartTrackingHeapObjects must be called before the first call to this
1029   * method.
1030   */
1031  SnapshotObjectId GetHeapStats(OutputStream* stream,
1032                                int64_t* timestamp_us = nullptr);
1033
1034  /**
1035   * Stops tracking of heap objects population statistics, cleans up all
1036   * collected data. StartHeapObjectsTracking must be called again prior to
1037   * calling GetHeapStats next time.
1038   */
1039  void StopTrackingHeapObjects();
1040
1041  /**
1042   * Starts gathering a sampling heap profile. A sampling heap profile is
1043   * similar to tcmalloc's heap profiler and Go's mprof. It samples object
1044   * allocations and builds an online 'sampling' heap profile. At any point in
1045   * time, this profile is expected to be a representative sample of objects
1046   * currently live in the system. Each sampled allocation includes the stack
1047   * trace at the time of allocation, which makes this really useful for memory
1048   * leak detection.
1049   *
1050   * This mechanism is intended to be cheap enough that it can be used in
1051   * production with minimal performance overhead.
1052   *
1053   * Allocations are sampled using a randomized Poisson process. On average, one
1054   * allocation will be sampled every |sample_interval| bytes allocated. The
1055   * |stack_depth| parameter controls the maximum number of stack frames to be
1056   * captured on each allocation.
1057   *
1058   * NOTE: This is a proof-of-concept at this point. Right now we only sample
1059   * newspace allocations. Support for paged space allocation (e.g. pre-tenured
1060   * objects, large objects, code objects, etc.) and native allocations
1061   * doesn't exist yet, but is anticipated in the future.
1062   *
1063   * Objects allocated before the sampling is started will not be included in
1064   * the profile.
1065   *
1066   * Returns false if a sampling heap profiler is already running.
1067   */
1068  bool StartSamplingHeapProfiler(uint64_t sample_interval = 512 * 1024,
1069                                 int stack_depth = 16,
1070                                 SamplingFlags flags = kSamplingNoFlags);
1071
1072  /**
1073   * Stops the sampling heap profile and discards the current profile.
1074   */
1075  void StopSamplingHeapProfiler();
1076
1077  /**
1078   * Returns the sampled profile of allocations allocated (and still live) since
1079   * StartSamplingHeapProfiler was called. The ownership of the pointer is
1080   * transferred to the caller. Returns nullptr if sampling heap profiler is not
1081   * active.
1082   */
1083  AllocationProfile* GetAllocationProfile();
1084
1085  /**
1086   * Deletes all snapshots taken. All previously returned pointers to
1087   * snapshots and their contents become invalid after this call.
1088   */
1089  void DeleteAllHeapSnapshots();
1090
1091  void AddBuildEmbedderGraphCallback(BuildEmbedderGraphCallback callback,
1092                                     void* data);
1093  void RemoveBuildEmbedderGraphCallback(BuildEmbedderGraphCallback callback,
1094                                        void* data);
1095
1096  void SetGetDetachednessCallback(GetDetachednessCallback callback, void* data);
1097
1098  /**
1099   * Default value of persistent handle class ID. Must not be used to
1100   * define a class. Can be used to reset a class of a persistent
1101   * handle.
1102   */
1103  static const uint16_t kPersistentHandleNoClassId = 0;
1104
1105 private:
1106  HeapProfiler();
1107  ~HeapProfiler();
1108  HeapProfiler(const HeapProfiler&);
1109  HeapProfiler& operator=(const HeapProfiler&);
1110};
1111
1112/**
1113 * A struct for exporting HeapStats data from V8, using "push" model.
1114 * See HeapProfiler::GetHeapStats.
1115 */
1116struct HeapStatsUpdate {
1117  HeapStatsUpdate(uint32_t index, uint32_t count, uint32_t size)
1118    : index(index), count(count), size(size) { }
1119  uint32_t index;  // Index of the time interval that was changed.
1120  uint32_t count;  // New value of count field for the interval with this index.
1121  uint32_t size;  // New value of size field for the interval with this index.
1122};
1123
1124#define CODE_EVENTS_LIST(V) \
1125  V(Builtin)                \
1126  V(Callback)               \
1127  V(Eval)                   \
1128  V(Function)               \
1129  V(InterpretedFunction)    \
1130  V(Handler)                \
1131  V(BytecodeHandler)        \
1132  V(LazyCompile)            \
1133  V(RegExp)                 \
1134  V(Script)                 \
1135  V(Stub)                   \
1136  V(Relocation)
1137
1138/**
1139 * Note that this enum may be extended in the future. Please include a default
1140 * case if this enum is used in a switch statement.
1141 */
1142enum CodeEventType {
1143  kUnknownType = 0
1144#define V(Name) , k##Name##Type
1145  CODE_EVENTS_LIST(V)
1146#undef V
1147};
1148
1149/**
1150 * Representation of a code creation event
1151 */
1152class V8_EXPORT CodeEvent {
1153 public:
1154  uintptr_t GetCodeStartAddress();
1155  size_t GetCodeSize();
1156  Local<String> GetFunctionName();
1157  Local<String> GetScriptName();
1158  int GetScriptLine();
1159  int GetScriptColumn();
1160  /**
1161   * NOTE (mmarchini): We can't allocate objects in the heap when we collect
1162   * existing code, and both the code type and the comment are not stored in the
1163   * heap, so we return those as const char*.
1164   */
1165  CodeEventType GetCodeType();
1166  const char* GetComment();
1167
1168  static const char* GetCodeEventTypeName(CodeEventType code_event_type);
1169
1170  uintptr_t GetPreviousCodeStartAddress();
1171};
1172
1173/**
1174 * Interface to listen to code creation and code relocation events.
1175 */
1176class V8_EXPORT CodeEventHandler {
1177 public:
1178  /**
1179   * Creates a new listener for the |isolate|. The isolate must be initialized.
1180   * The listener object must be disposed after use by calling |Dispose| method.
1181   * Multiple listeners can be created for the same isolate.
1182   */
1183  explicit CodeEventHandler(Isolate* isolate);
1184  virtual ~CodeEventHandler();
1185
1186  /**
1187   * Handle is called every time a code object is created or moved. Information
1188   * about each code event will be available through the `code_event`
1189   * parameter.
1190   *
1191   * When the CodeEventType is kRelocationType, the code for this CodeEvent has
1192   * moved from `GetPreviousCodeStartAddress()` to `GetCodeStartAddress()`.
1193   */
1194  virtual void Handle(CodeEvent* code_event) = 0;
1195
1196  /**
1197   * Call `Enable()` to starts listening to code creation and code relocation
1198   * events. These events will be handled by `Handle()`.
1199   */
1200  void Enable();
1201
1202  /**
1203   * Call `Disable()` to stop listening to code creation and code relocation
1204   * events.
1205   */
1206  void Disable();
1207
1208 private:
1209  CodeEventHandler();
1210  CodeEventHandler(const CodeEventHandler&);
1211  CodeEventHandler& operator=(const CodeEventHandler&);
1212  void* internal_listener_;
1213};
1214
1215}  // namespace v8
1216
1217
1218#endif  // V8_V8_PROFILER_H_
1219