1// Copyright 2012 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/profiler/cpu-profiler.h"
6
7#include <unordered_map>
8#include <utility>
9
10#include "include/v8-locker.h"
11#include "src/base/lazy-instance.h"
12#include "src/base/template-utils.h"
13#include "src/debug/debug.h"
14#include "src/execution/frames-inl.h"
15#include "src/execution/v8threads.h"
16#include "src/execution/vm-state-inl.h"
17#include "src/libsampler/sampler.h"
18#include "src/logging/counters.h"
19#include "src/logging/log.h"
20#include "src/profiler/cpu-profiler-inl.h"
21#include "src/profiler/profiler-stats.h"
22#include "src/profiler/symbolizer.h"
23#include "src/utils/locked-queue-inl.h"
24
25#if V8_ENABLE_WEBASSEMBLY
26#include "src/wasm/wasm-engine.h"
27#endif  // V8_ENABLE_WEBASSEMBLY
28
29namespace v8 {
30namespace internal {
31
32static const int kProfilerStackSize = 64 * KB;
33
34class CpuSampler : public sampler::Sampler {
35 public:
36  CpuSampler(Isolate* isolate, SamplingEventsProcessor* processor)
37      : sampler::Sampler(reinterpret_cast<v8::Isolate*>(isolate)),
38        processor_(processor),
39        perThreadData_(isolate->FindPerThreadDataForThisThread()) {}
40
41  void SampleStack(const v8::RegisterState& regs) override {
42    Isolate* isolate = reinterpret_cast<Isolate*>(this->isolate());
43    if (isolate->was_locker_ever_used() &&
44        (!isolate->thread_manager()->IsLockedByThread(
45             perThreadData_->thread_id()) ||
46         perThreadData_->thread_state() != nullptr)) {
47      ProfilerStats::Instance()->AddReason(
48          ProfilerStats::Reason::kIsolateNotLocked);
49      return;
50    }
51    TickSample* sample = processor_->StartTickSample();
52    if (sample == nullptr) {
53      ProfilerStats::Instance()->AddReason(
54          ProfilerStats::Reason::kTickBufferFull);
55      return;
56    }
57    // Every bailout up until here resulted in a dropped sample. From now on,
58    // the sample is created in the buffer.
59    sample->Init(isolate, regs, TickSample::kIncludeCEntryFrame,
60                 /* update_stats */ true,
61                 /* use_simulator_reg_state */ true, processor_->period());
62    if (is_counting_samples_ && !sample->timestamp.IsNull()) {
63      if (sample->state == JS) ++js_sample_count_;
64      if (sample->state == EXTERNAL) ++external_sample_count_;
65    }
66    processor_->FinishTickSample();
67  }
68
69 private:
70  SamplingEventsProcessor* processor_;
71  Isolate::PerIsolateThreadData* perThreadData_;
72};
73
74ProfilingScope::ProfilingScope(Isolate* isolate, ProfilerListener* listener)
75    : isolate_(isolate), listener_(listener) {
76  size_t profiler_count = isolate_->num_cpu_profilers();
77  profiler_count++;
78  isolate_->set_num_cpu_profilers(profiler_count);
79  isolate_->SetIsProfiling(true);
80#if V8_ENABLE_WEBASSEMBLY
81  wasm::GetWasmEngine()->EnableCodeLogging(isolate_);
82#endif  // V8_ENABLE_WEBASSEMBLY
83
84  Logger* logger = isolate_->logger();
85  logger->AddCodeEventListener(listener_);
86  // Populate the ProfilerCodeObserver with the initial functions and
87  // callbacks on the heap.
88  DCHECK(isolate_->heap()->HasBeenSetUp());
89
90  if (!FLAG_prof_browser_mode) {
91    logger->LogCodeObjects();
92  }
93  logger->LogCompiledFunctions();
94  logger->LogAccessorCallbacks();
95}
96
97ProfilingScope::~ProfilingScope() {
98  isolate_->logger()->RemoveCodeEventListener(listener_);
99
100  size_t profiler_count = isolate_->num_cpu_profilers();
101  DCHECK_GT(profiler_count, 0);
102  profiler_count--;
103  isolate_->set_num_cpu_profilers(profiler_count);
104  if (profiler_count == 0) isolate_->SetIsProfiling(false);
105}
106
107ProfilerEventsProcessor::ProfilerEventsProcessor(
108    Isolate* isolate, Symbolizer* symbolizer,
109    ProfilerCodeObserver* code_observer, CpuProfilesCollection* profiles)
110    : Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
111      symbolizer_(symbolizer),
112      code_observer_(code_observer),
113      profiles_(profiles),
114      last_code_event_id_(0),
115      last_processed_code_event_id_(0),
116      isolate_(isolate) {
117  DCHECK(!code_observer_->processor());
118  code_observer_->set_processor(this);
119}
120
121SamplingEventsProcessor::SamplingEventsProcessor(
122    Isolate* isolate, Symbolizer* symbolizer,
123    ProfilerCodeObserver* code_observer, CpuProfilesCollection* profiles,
124    base::TimeDelta period, bool use_precise_sampling)
125    : ProfilerEventsProcessor(isolate, symbolizer, code_observer, profiles),
126      sampler_(new CpuSampler(isolate, this)),
127      period_(period),
128      use_precise_sampling_(use_precise_sampling) {
129  sampler_->Start();
130}
131
132SamplingEventsProcessor::~SamplingEventsProcessor() { sampler_->Stop(); }
133
134ProfilerEventsProcessor::~ProfilerEventsProcessor() {
135  DCHECK_EQ(code_observer_->processor(), this);
136  code_observer_->clear_processor();
137}
138
139void ProfilerEventsProcessor::Enqueue(const CodeEventsContainer& event) {
140  event.generic.order = ++last_code_event_id_;
141  events_buffer_.Enqueue(event);
142}
143
144void ProfilerEventsProcessor::AddDeoptStack(Address from, int fp_to_sp_delta) {
145  TickSampleEventRecord record(last_code_event_id_);
146  RegisterState regs;
147  Address fp = isolate_->c_entry_fp(isolate_->thread_local_top());
148  regs.sp = reinterpret_cast<void*>(fp - fp_to_sp_delta);
149  regs.fp = reinterpret_cast<void*>(fp);
150  regs.pc = reinterpret_cast<void*>(from);
151  record.sample.Init(isolate_, regs, TickSample::kSkipCEntryFrame, false,
152                     false);
153  ticks_from_vm_buffer_.Enqueue(record);
154}
155
156void ProfilerEventsProcessor::AddCurrentStack(bool update_stats) {
157  TickSampleEventRecord record(last_code_event_id_);
158  RegisterState regs;
159  StackFrameIterator it(isolate_);
160  if (!it.done()) {
161    StackFrame* frame = it.frame();
162    regs.sp = reinterpret_cast<void*>(frame->sp());
163    regs.fp = reinterpret_cast<void*>(frame->fp());
164    regs.pc = reinterpret_cast<void*>(frame->pc());
165  }
166  record.sample.Init(isolate_, regs, TickSample::kSkipCEntryFrame, update_stats,
167                     false);
168  ticks_from_vm_buffer_.Enqueue(record);
169}
170
171void ProfilerEventsProcessor::AddSample(TickSample sample) {
172  TickSampleEventRecord record(last_code_event_id_);
173  record.sample = sample;
174  ticks_from_vm_buffer_.Enqueue(record);
175}
176
177void ProfilerEventsProcessor::StopSynchronously() {
178  bool expected = true;
179  if (!running_.compare_exchange_strong(expected, false,
180                                        std::memory_order_relaxed))
181    return;
182  {
183    base::MutexGuard guard(&running_mutex_);
184    running_cond_.NotifyOne();
185  }
186  Join();
187}
188
189
190bool ProfilerEventsProcessor::ProcessCodeEvent() {
191  CodeEventsContainer record;
192  if (events_buffer_.Dequeue(&record)) {
193    if (record.generic.type == CodeEventRecord::Type::kNativeContextMove) {
194      NativeContextMoveEventRecord& nc_record =
195          record.NativeContextMoveEventRecord_;
196      profiles_->UpdateNativeContextAddressForCurrentProfiles(
197          nc_record.from_address, nc_record.to_address);
198    } else {
199      code_observer_->CodeEventHandlerInternal(record);
200    }
201    last_processed_code_event_id_ = record.generic.order;
202    return true;
203  }
204  return false;
205}
206
207void ProfilerEventsProcessor::CodeEventHandler(
208    const CodeEventsContainer& evt_rec) {
209  switch (evt_rec.generic.type) {
210    case CodeEventRecord::Type::kCodeCreation:
211    case CodeEventRecord::Type::kCodeMove:
212    case CodeEventRecord::Type::kCodeDisableOpt:
213    case CodeEventRecord::Type::kCodeDelete:
214    case CodeEventRecord::Type::kNativeContextMove:
215      Enqueue(evt_rec);
216      break;
217    case CodeEventRecord::Type::kCodeDeopt: {
218      const CodeDeoptEventRecord* rec = &evt_rec.CodeDeoptEventRecord_;
219      Address pc = rec->pc;
220      int fp_to_sp_delta = rec->fp_to_sp_delta;
221      Enqueue(evt_rec);
222      AddDeoptStack(pc, fp_to_sp_delta);
223      break;
224    }
225    case CodeEventRecord::Type::kNoEvent:
226    case CodeEventRecord::Type::kReportBuiltin:
227      UNREACHABLE();
228  }
229}
230
231void SamplingEventsProcessor::SymbolizeAndAddToProfiles(
232    const TickSampleEventRecord* record) {
233  const TickSample& tick_sample = record->sample;
234  Symbolizer::SymbolizedSample symbolized =
235      symbolizer_->SymbolizeTickSample(tick_sample);
236  profiles_->AddPathToCurrentProfiles(
237      tick_sample.timestamp, symbolized.stack_trace, symbolized.src_line,
238      tick_sample.update_stats_, tick_sample.sampling_interval_,
239      tick_sample.state, tick_sample.embedder_state,
240      reinterpret_cast<Address>(tick_sample.context),
241      reinterpret_cast<Address>(tick_sample.embedder_context));
242}
243
244ProfilerEventsProcessor::SampleProcessingResult
245SamplingEventsProcessor::ProcessOneSample() {
246  TickSampleEventRecord record1;
247  if (ticks_from_vm_buffer_.Peek(&record1) &&
248      (record1.order == last_processed_code_event_id_)) {
249    TickSampleEventRecord record;
250    ticks_from_vm_buffer_.Dequeue(&record);
251    SymbolizeAndAddToProfiles(&record);
252    return OneSampleProcessed;
253  }
254
255  const TickSampleEventRecord* record = ticks_buffer_.Peek();
256  if (record == nullptr) {
257    if (ticks_from_vm_buffer_.IsEmpty()) return NoSamplesInQueue;
258    return FoundSampleForNextCodeEvent;
259  }
260  if (record->order != last_processed_code_event_id_) {
261    return FoundSampleForNextCodeEvent;
262  }
263  SymbolizeAndAddToProfiles(record);
264  ticks_buffer_.Remove();
265  return OneSampleProcessed;
266}
267
268void SamplingEventsProcessor::Run() {
269  base::MutexGuard guard(&running_mutex_);
270  while (running_.load(std::memory_order_relaxed)) {
271    base::TimeTicks nextSampleTime = base::TimeTicks::Now() + period_;
272    base::TimeTicks now;
273    SampleProcessingResult result;
274    // Keep processing existing events until we need to do next sample
275    // or the ticks buffer is empty.
276    do {
277      result = ProcessOneSample();
278      if (result == FoundSampleForNextCodeEvent) {
279        // All ticks of the current last_processed_code_event_id_ are
280        // processed, proceed to the next code event.
281        ProcessCodeEvent();
282      }
283      now = base::TimeTicks::Now();
284    } while (result != NoSamplesInQueue && now < nextSampleTime);
285
286    if (nextSampleTime > now) {
287#if V8_OS_WIN
288      if (use_precise_sampling_ &&
289          nextSampleTime - now < base::TimeDelta::FromMilliseconds(100)) {
290        // Do not use Sleep on Windows as it is very imprecise, with up to 16ms
291        // jitter, which is unacceptable for short profile intervals.
292        while (base::TimeTicks::Now() < nextSampleTime) {
293        }
294      } else  // NOLINT
295#else
296      USE(use_precise_sampling_);
297#endif  // V8_OS_WIN
298      {
299        // Allow another thread to interrupt the delay between samples in the
300        // event of profiler shutdown.
301        while (now < nextSampleTime &&
302               running_cond_.WaitFor(&running_mutex_, nextSampleTime - now)) {
303          // If true was returned, we got interrupted before the timeout
304          // elapsed. If this was not due to a change in running state, a
305          // spurious wakeup occurred (thus we should continue to wait).
306          if (!running_.load(std::memory_order_relaxed)) {
307            break;
308          }
309          now = base::TimeTicks::Now();
310        }
311      }
312    }
313
314    // Schedule next sample.
315    sampler_->DoSample();
316  }
317
318  // Process remaining tick events.
319  do {
320    SampleProcessingResult result;
321    do {
322      result = ProcessOneSample();
323    } while (result == OneSampleProcessed);
324  } while (ProcessCodeEvent());
325}
326
327void SamplingEventsProcessor::SetSamplingInterval(base::TimeDelta period) {
328  if (period_ == period) return;
329  StopSynchronously();
330
331  period_ = period;
332  running_.store(true, std::memory_order_relaxed);
333
334  StartSynchronously();
335}
336
337void* SamplingEventsProcessor::operator new(size_t size) {
338  return AlignedAlloc(size, alignof(SamplingEventsProcessor));
339}
340
341void SamplingEventsProcessor::operator delete(void* ptr) { AlignedFree(ptr); }
342
343ProfilerCodeObserver::ProfilerCodeObserver(Isolate* isolate,
344                                           CodeEntryStorage& storage)
345    : isolate_(isolate),
346      code_entries_(storage),
347      code_map_(storage),
348      weak_code_registry_(isolate),
349      processor_(nullptr) {
350  CreateEntriesForRuntimeCallStats();
351  LogBuiltins();
352}
353
354void ProfilerCodeObserver::ClearCodeMap() {
355  weak_code_registry_.Clear();
356  code_map_.Clear();
357}
358
359void ProfilerCodeObserver::CodeEventHandler(
360    const CodeEventsContainer& evt_rec) {
361  if (processor_) {
362    processor_->CodeEventHandler(evt_rec);
363    return;
364  }
365  CodeEventHandlerInternal(evt_rec);
366}
367
368size_t ProfilerCodeObserver::GetEstimatedMemoryUsage() const {
369  // To avoid race condition in codemap,
370  // for now limit computation in kEagerLogging mode
371  if (!processor_) {
372    return sizeof(*this) + code_map_.GetEstimatedMemoryUsage() +
373           code_entries_.strings().GetStringSize();
374  }
375  return 0;
376}
377
378void ProfilerCodeObserver::CodeEventHandlerInternal(
379    const CodeEventsContainer& evt_rec) {
380  CodeEventsContainer record = evt_rec;
381  switch (evt_rec.generic.type) {
382#define PROFILER_TYPE_CASE(type, clss)        \
383  case CodeEventRecord::Type::type:           \
384    record.clss##_.UpdateCodeMap(&code_map_); \
385    break;
386
387    CODE_EVENTS_TYPE_LIST(PROFILER_TYPE_CASE)
388
389#undef PROFILER_TYPE_CASE
390    default:
391      break;
392  }
393}
394
395void ProfilerCodeObserver::CreateEntriesForRuntimeCallStats() {
396#ifdef V8_RUNTIME_CALL_STATS
397  RuntimeCallStats* rcs = isolate_->counters()->runtime_call_stats();
398  for (int i = 0; i < RuntimeCallStats::kNumberOfCounters; ++i) {
399    RuntimeCallCounter* counter = rcs->GetCounter(i);
400    DCHECK(counter->name());
401    auto entry = code_entries_.Create(CodeEventListener::FUNCTION_TAG,
402                                      counter->name(), "native V8Runtime");
403    code_map_.AddCode(reinterpret_cast<Address>(counter), entry, 1);
404  }
405#endif  // V8_RUNTIME_CALL_STATS
406}
407
408void ProfilerCodeObserver::LogBuiltins() {
409  Builtins* builtins = isolate_->builtins();
410  DCHECK(builtins->is_initialized());
411  for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
412       ++builtin) {
413    CodeEventsContainer evt_rec(CodeEventRecord::Type::kReportBuiltin);
414    ReportBuiltinEventRecord* rec = &evt_rec.ReportBuiltinEventRecord_;
415    Code code = FromCodeT(builtins->code(builtin));
416    rec->instruction_start = code.InstructionStart();
417    rec->instruction_size = code.InstructionSize();
418    rec->builtin = builtin;
419    CodeEventHandlerInternal(evt_rec);
420  }
421}
422
423int CpuProfiler::GetProfilesCount() {
424  // The count of profiles doesn't depend on a security token.
425  return static_cast<int>(profiles_->profiles()->size());
426}
427
428
429CpuProfile* CpuProfiler::GetProfile(int index) {
430  return profiles_->profiles()->at(index).get();
431}
432
433
434void CpuProfiler::DeleteAllProfiles() {
435  if (is_profiling_) StopProcessor();
436  ResetProfiles();
437}
438
439
440void CpuProfiler::DeleteProfile(CpuProfile* profile) {
441  profiles_->RemoveProfile(profile);
442  if (profiles_->profiles()->empty() && !is_profiling_) {
443    // If this was the last profile, clean up all accessory data as well.
444    ResetProfiles();
445  }
446}
447
448namespace {
449
450class CpuProfilersManager {
451 public:
452  void AddProfiler(Isolate* isolate, CpuProfiler* profiler) {
453    base::MutexGuard lock(&mutex_);
454    profilers_.emplace(isolate, profiler);
455  }
456
457  void RemoveProfiler(Isolate* isolate, CpuProfiler* profiler) {
458    base::MutexGuard lock(&mutex_);
459    auto range = profilers_.equal_range(isolate);
460    for (auto it = range.first; it != range.second; ++it) {
461      if (it->second != profiler) continue;
462      profilers_.erase(it);
463      return;
464    }
465    UNREACHABLE();
466  }
467
468  void CallCollectSample(Isolate* isolate) {
469    base::MutexGuard lock(&mutex_);
470    auto range = profilers_.equal_range(isolate);
471    for (auto it = range.first; it != range.second; ++it) {
472      it->second->CollectSample();
473    }
474  }
475
476  size_t GetAllProfilersMemorySize(Isolate* isolate) {
477    base::MutexGuard lock(&mutex_);
478    size_t estimated_memory = 0;
479    auto range = profilers_.equal_range(isolate);
480    for (auto it = range.first; it != range.second; ++it) {
481      estimated_memory += it->second->GetEstimatedMemoryUsage();
482    }
483    return estimated_memory;
484  }
485
486 private:
487  std::unordered_multimap<Isolate*, CpuProfiler*> profilers_;
488  base::Mutex mutex_;
489};
490
491DEFINE_LAZY_LEAKY_OBJECT_GETTER(CpuProfilersManager, GetProfilersManager)
492
493}  // namespace
494
495CpuProfiler::CpuProfiler(Isolate* isolate, CpuProfilingNamingMode naming_mode,
496                         CpuProfilingLoggingMode logging_mode)
497    : CpuProfiler(isolate, naming_mode, logging_mode,
498                  new CpuProfilesCollection(isolate), nullptr, nullptr,
499                  new ProfilerCodeObserver(isolate, code_entries_)) {}
500
501CpuProfiler::CpuProfiler(Isolate* isolate, CpuProfilingNamingMode naming_mode,
502                         CpuProfilingLoggingMode logging_mode,
503                         CpuProfilesCollection* test_profiles,
504                         Symbolizer* test_symbolizer,
505                         ProfilerEventsProcessor* test_processor,
506                         ProfilerCodeObserver* test_code_observer)
507    : isolate_(isolate),
508      naming_mode_(naming_mode),
509      logging_mode_(logging_mode),
510      base_sampling_interval_(base::TimeDelta::FromMicroseconds(
511          FLAG_cpu_profiler_sampling_interval)),
512      code_observer_(test_code_observer),
513      profiles_(test_profiles),
514      symbolizer_(test_symbolizer),
515      processor_(test_processor),
516      is_profiling_(false) {
517  profiles_->set_cpu_profiler(this);
518  GetProfilersManager()->AddProfiler(isolate, this);
519
520  if (logging_mode == kEagerLogging) EnableLogging();
521}
522
523CpuProfiler::~CpuProfiler() {
524  DCHECK(!is_profiling_);
525  GetProfilersManager()->RemoveProfiler(isolate_, this);
526
527  DisableLogging();
528  profiles_.reset();
529
530  // We don't currently expect any references to refcounted strings to be
531  // maintained with zero profiles after the code map is cleared.
532  DCHECK(code_entries_.strings().empty());
533}
534
535void CpuProfiler::set_sampling_interval(base::TimeDelta value) {
536  DCHECK(!is_profiling_);
537  base_sampling_interval_ = value;
538}
539
540void CpuProfiler::set_use_precise_sampling(bool value) {
541  DCHECK(!is_profiling_);
542  use_precise_sampling_ = value;
543}
544
545void CpuProfiler::ResetProfiles() {
546  profiles_.reset(new CpuProfilesCollection(isolate_));
547  profiles_->set_cpu_profiler(this);
548}
549
550void CpuProfiler::EnableLogging() {
551  if (profiling_scope_) return;
552
553  if (!profiler_listener_) {
554    profiler_listener_.reset(new ProfilerListener(
555        isolate_, code_observer_.get(), *code_observer_->code_entries(),
556        *code_observer_->weak_code_registry(), naming_mode_));
557  }
558  profiling_scope_.reset(
559      new ProfilingScope(isolate_, profiler_listener_.get()));
560}
561
562void CpuProfiler::DisableLogging() {
563  if (!profiling_scope_) return;
564
565  DCHECK(profiler_listener_);
566  profiling_scope_.reset();
567  profiler_listener_.reset();
568  code_observer_->ClearCodeMap();
569}
570
571base::TimeDelta CpuProfiler::ComputeSamplingInterval() const {
572  return profiles_->GetCommonSamplingInterval();
573}
574
575void CpuProfiler::AdjustSamplingInterval() {
576  if (!processor_) return;
577
578  base::TimeDelta base_interval = ComputeSamplingInterval();
579  processor_->SetSamplingInterval(base_interval);
580}
581
582// static
583void CpuProfiler::CollectSample(Isolate* isolate) {
584  GetProfilersManager()->CallCollectSample(isolate);
585}
586
587void CpuProfiler::CollectSample() {
588  if (processor_) {
589    processor_->AddCurrentStack();
590  }
591}
592
593// static
594size_t CpuProfiler::GetAllProfilersMemorySize(Isolate* isolate) {
595  return GetProfilersManager()->GetAllProfilersMemorySize(isolate);
596}
597
598size_t CpuProfiler::GetEstimatedMemoryUsage() const {
599  return code_observer_->GetEstimatedMemoryUsage();
600}
601
602CpuProfilingResult CpuProfiler::StartProfiling(
603    CpuProfilingOptions options,
604    std::unique_ptr<DiscardedSamplesDelegate> delegate) {
605  return StartProfiling(nullptr, options, std::move(delegate));
606}
607
608CpuProfilingResult CpuProfiler::StartProfiling(
609    const char* title, CpuProfilingOptions options,
610    std::unique_ptr<DiscardedSamplesDelegate> delegate) {
611  CpuProfilingResult result =
612      profiles_->StartProfiling(title, options, std::move(delegate));
613
614  // TODO(nicodubus): Revisit logic for if we want to do anything different for
615  // kAlreadyStarted
616  if (result.status == CpuProfilingStatus::kStarted ||
617      result.status == CpuProfilingStatus::kAlreadyStarted) {
618    TRACE_EVENT0("v8", "CpuProfiler::StartProfiling");
619    AdjustSamplingInterval();
620    StartProcessorIfNotStarted();
621  }
622
623  return result;
624}
625
626CpuProfilingResult CpuProfiler::StartProfiling(
627    String title, CpuProfilingOptions options,
628    std::unique_ptr<DiscardedSamplesDelegate> delegate) {
629  return StartProfiling(profiles_->GetName(title), options,
630                        std::move(delegate));
631}
632
633void CpuProfiler::StartProcessorIfNotStarted() {
634  if (processor_) {
635    processor_->AddCurrentStack();
636    return;
637  }
638
639  if (!profiling_scope_) {
640    DCHECK_EQ(logging_mode_, kLazyLogging);
641    EnableLogging();
642  }
643
644  if (!symbolizer_) {
645    symbolizer_ = std::make_unique<Symbolizer>(code_observer_->code_map());
646  }
647
648  base::TimeDelta sampling_interval = ComputeSamplingInterval();
649  processor_.reset(new SamplingEventsProcessor(
650      isolate_, symbolizer_.get(), code_observer_.get(), profiles_.get(),
651      sampling_interval, use_precise_sampling_));
652  is_profiling_ = true;
653
654  // Enable stack sampling.
655  processor_->AddCurrentStack();
656  processor_->StartSynchronously();
657}
658
659CpuProfile* CpuProfiler::StopProfiling(const char* title) {
660  CpuProfile* profile = profiles_->Lookup(title);
661  if (profile) {
662    return StopProfiling(profile->id());
663  }
664  return nullptr;
665}
666
667CpuProfile* CpuProfiler::StopProfiling(ProfilerId id) {
668  if (!is_profiling_) return nullptr;
669  const bool last_profile = profiles_->IsLastProfileLeft(id);
670  if (last_profile) StopProcessor();
671
672  CpuProfile* profile = profiles_->StopProfiling(id);
673
674  AdjustSamplingInterval();
675
676  DCHECK(profiling_scope_);
677  if (last_profile && logging_mode_ == kLazyLogging) {
678    DisableLogging();
679  }
680
681  return profile;
682}
683
684CpuProfile* CpuProfiler::StopProfiling(String title) {
685  return StopProfiling(profiles_->GetName(title));
686}
687
688void CpuProfiler::StopProcessor() {
689  is_profiling_ = false;
690  processor_->StopSynchronously();
691  processor_.reset();
692}
693}  // namespace internal
694}  // namespace v8
695