1// Copyright 2015 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/heap/memory-reducer.h"
6
7#include "src/flags/flags.h"
8#include "src/heap/gc-tracer.h"
9#include "src/heap/heap-inl.h"
10#include "src/heap/incremental-marking.h"
11#include "src/init/v8.h"
12#include "src/utils/utils.h"
13
14namespace v8 {
15namespace internal {
16
17const int MemoryReducer::kLongDelayMs = 8000;
18const int MemoryReducer::kShortDelayMs = 500;
19const int MemoryReducer::kWatchdogDelayMs = 100000;
20const int MemoryReducer::kMaxNumberOfGCs = 3;
21const double MemoryReducer::kCommittedMemoryFactor = 1.1;
22const size_t MemoryReducer::kCommittedMemoryDelta = 10 * MB;
23
24MemoryReducer::MemoryReducer(Heap* heap)
25    : heap_(heap),
26      taskrunner_(V8::GetCurrentPlatform()->GetForegroundTaskRunner(
27          reinterpret_cast<v8::Isolate*>(heap->isolate()))),
28      state_(kDone, 0, 0.0, 0.0, 0),
29      js_calls_counter_(0),
30      js_calls_sample_time_ms_(0.0) {}
31
32MemoryReducer::TimerTask::TimerTask(MemoryReducer* memory_reducer)
33    : CancelableTask(memory_reducer->heap()->isolate()),
34      memory_reducer_(memory_reducer) {}
35
36
37void MemoryReducer::TimerTask::RunInternal() {
38  Heap* heap = memory_reducer_->heap();
39  Event event;
40  double time_ms = heap->MonotonicallyIncreasingTimeInMs();
41  heap->tracer()->SampleAllocation(time_ms, heap->NewSpaceAllocationCounter(),
42                                   heap->OldGenerationAllocationCounter(),
43                                   heap->EmbedderAllocationCounter());
44  bool low_allocation_rate = heap->HasLowAllocationRate();
45  bool optimize_for_memory = heap->ShouldOptimizeForMemoryUsage();
46  if (FLAG_trace_gc_verbose) {
47    heap->isolate()->PrintWithTimestamp(
48        "Memory reducer: %s, %s\n",
49        low_allocation_rate ? "low alloc" : "high alloc",
50        optimize_for_memory ? "background" : "foreground");
51  }
52  event.type = kTimer;
53  event.time_ms = time_ms;
54  // The memory reducer will start incremental markig if
55  // 1) mutator is likely idle: js call rate is low and allocation rate is low.
56  // 2) mutator is in background: optimize for memory flag is set.
57  event.should_start_incremental_gc =
58      low_allocation_rate || optimize_for_memory;
59  event.can_start_incremental_gc =
60      heap->incremental_marking()->IsStopped() &&
61      (heap->incremental_marking()->CanBeActivated() || optimize_for_memory);
62  event.committed_memory = heap->CommittedOldGenerationMemory();
63  memory_reducer_->NotifyTimer(event);
64}
65
66
67void MemoryReducer::NotifyTimer(const Event& event) {
68  DCHECK_EQ(kTimer, event.type);
69  DCHECK_EQ(kWait, state_.action);
70  state_ = Step(state_, event);
71  if (state_.action == kRun) {
72    DCHECK(heap()->incremental_marking()->IsStopped());
73    DCHECK(FLAG_incremental_marking);
74    if (FLAG_trace_gc_verbose) {
75      heap()->isolate()->PrintWithTimestamp("Memory reducer: started GC #%d\n",
76                                            state_.started_gcs);
77    }
78    heap()->StartIdleIncrementalMarking(
79        GarbageCollectionReason::kMemoryReducer,
80        kGCCallbackFlagCollectAllExternalMemory);
81  } else if (state_.action == kWait) {
82    if (!heap()->incremental_marking()->IsStopped() &&
83        heap()->ShouldOptimizeForMemoryUsage()) {
84      // Make progress with pending incremental marking if memory usage has
85      // higher priority than latency. This is important for background tabs
86      // that do not send idle notifications.
87      const int kIncrementalMarkingDelayMs = 500;
88      double deadline = heap()->MonotonicallyIncreasingTimeInMs() +
89                        kIncrementalMarkingDelayMs;
90      heap()->incremental_marking()->AdvanceWithDeadline(
91          deadline, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
92          StepOrigin::kTask);
93      heap()->FinalizeIncrementalMarkingIfComplete(
94          GarbageCollectionReason::kFinalizeMarkingViaTask);
95    }
96    // Re-schedule the timer.
97    ScheduleTimer(state_.next_gc_start_ms - event.time_ms);
98    if (FLAG_trace_gc_verbose) {
99      heap()->isolate()->PrintWithTimestamp(
100          "Memory reducer: waiting for %.f ms\n",
101          state_.next_gc_start_ms - event.time_ms);
102    }
103  }
104}
105
106
107void MemoryReducer::NotifyMarkCompact(const Event& event) {
108  DCHECK_EQ(kMarkCompact, event.type);
109  Action old_action = state_.action;
110  state_ = Step(state_, event);
111  if (old_action != kWait && state_.action == kWait) {
112    // If we are transitioning to the WAIT state, start the timer.
113    ScheduleTimer(state_.next_gc_start_ms - event.time_ms);
114  }
115  if (old_action == kRun) {
116    if (FLAG_trace_gc_verbose) {
117      heap()->isolate()->PrintWithTimestamp(
118          "Memory reducer: finished GC #%d (%s)\n", state_.started_gcs,
119          state_.action == kWait ? "will do more" : "done");
120    }
121  }
122}
123
124void MemoryReducer::NotifyPossibleGarbage(const Event& event) {
125  DCHECK_EQ(kPossibleGarbage, event.type);
126  Action old_action = state_.action;
127  state_ = Step(state_, event);
128  if (old_action != kWait && state_.action == kWait) {
129    // If we are transitioning to the WAIT state, start the timer.
130    ScheduleTimer(state_.next_gc_start_ms - event.time_ms);
131  }
132}
133
134
135bool MemoryReducer::WatchdogGC(const State& state, const Event& event) {
136  return state.last_gc_time_ms != 0 &&
137         event.time_ms > state.last_gc_time_ms + kWatchdogDelayMs;
138}
139
140
141// For specification of this function see the comment for MemoryReducer class.
142MemoryReducer::State MemoryReducer::Step(const State& state,
143                                         const Event& event) {
144  if (!FLAG_incremental_marking || !FLAG_memory_reducer) {
145    return State(kDone, 0, 0, state.last_gc_time_ms, 0);
146  }
147  switch (state.action) {
148    case kDone:
149      if (event.type == kTimer) {
150        return state;
151      } else if (event.type == kMarkCompact) {
152        if (event.committed_memory <
153            std::max(
154                static_cast<size_t>(state.committed_memory_at_last_run *
155                                    kCommittedMemoryFactor),
156                state.committed_memory_at_last_run + kCommittedMemoryDelta)) {
157          return state;
158        } else {
159          return State(kWait, 0, event.time_ms + kLongDelayMs,
160                       event.type == kMarkCompact ? event.time_ms
161                                                  : state.last_gc_time_ms,
162                       0);
163        }
164      } else {
165        DCHECK_EQ(kPossibleGarbage, event.type);
166        return State(
167            kWait, 0, event.time_ms + kLongDelayMs,
168            event.type == kMarkCompact ? event.time_ms : state.last_gc_time_ms,
169            0);
170      }
171    case kWait:
172      switch (event.type) {
173        case kPossibleGarbage:
174          return state;
175        case kTimer:
176          if (state.started_gcs >= kMaxNumberOfGCs) {
177            return State(kDone, kMaxNumberOfGCs, 0.0, state.last_gc_time_ms,
178                         event.committed_memory);
179          } else if (event.can_start_incremental_gc &&
180                     (event.should_start_incremental_gc ||
181                      WatchdogGC(state, event))) {
182            if (state.next_gc_start_ms <= event.time_ms) {
183              return State(kRun, state.started_gcs + 1, 0.0,
184                           state.last_gc_time_ms, 0);
185            } else {
186              return state;
187            }
188          } else {
189            return State(kWait, state.started_gcs, event.time_ms + kLongDelayMs,
190                         state.last_gc_time_ms, 0);
191          }
192        case kMarkCompact:
193          return State(kWait, state.started_gcs, event.time_ms + kLongDelayMs,
194                       event.time_ms, 0);
195      }
196    case kRun:
197      if (event.type != kMarkCompact) {
198        return state;
199      } else {
200        if (state.started_gcs < kMaxNumberOfGCs &&
201            (event.next_gc_likely_to_collect_more || state.started_gcs == 1)) {
202          return State(kWait, state.started_gcs, event.time_ms + kShortDelayMs,
203                       event.time_ms, 0);
204        } else {
205          return State(kDone, kMaxNumberOfGCs, 0.0, event.time_ms,
206                       event.committed_memory);
207        }
208      }
209  }
210  UNREACHABLE();
211}
212
213void MemoryReducer::ScheduleTimer(double delay_ms) {
214  DCHECK_LT(0, delay_ms);
215  if (heap()->IsTearingDown()) return;
216  // Leave some room for precision error in task scheduler.
217  const double kSlackMs = 100;
218  taskrunner_->PostDelayedTask(std::make_unique<MemoryReducer::TimerTask>(this),
219                               (delay_ms + kSlackMs) / 1000.0);
220}
221
222void MemoryReducer::TearDown() { state_ = State(kDone, 0, 0, 0.0, 0); }
223
224}  // namespace internal
225}  // namespace v8
226