1// Copyright 2019 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/objects/osr-optimized-code-cache.h"
6
7#include "src/execution/isolate-inl.h"
8#include "src/objects/code.h"
9#include "src/objects/maybe-object.h"
10#include "src/objects/shared-function-info.h"
11
12namespace v8 {
13namespace internal {
14
15// static
16Handle<OSROptimizedCodeCache> OSROptimizedCodeCache::Empty(Isolate* isolate) {
17  return Handle<OSROptimizedCodeCache>::cast(
18      isolate->factory()->empty_weak_fixed_array());
19}
20
21// static
22void OSROptimizedCodeCache::Insert(Isolate* isolate,
23                                   Handle<NativeContext> native_context,
24                                   Handle<SharedFunctionInfo> shared,
25                                   Handle<CodeT> code,
26                                   BytecodeOffset osr_offset) {
27  DCHECK(!osr_offset.IsNone());
28  DCHECK(!isolate->serializer_enabled());
29  DCHECK(CodeKindIsOptimizedJSFunction(code->kind()));
30
31  Handle<OSROptimizedCodeCache> osr_cache(native_context->osr_code_cache(),
32                                          isolate);
33
34  if (shared->osr_code_cache_state() == kNotCached) {
35    DCHECK_EQ(osr_cache->FindEntry(*shared, osr_offset), -1);
36  } else if (osr_cache->FindEntry(*shared, osr_offset) != -1) {
37    return;  // Already cached for a different JSFunction.
38  }
39
40  STATIC_ASSERT(kEntryLength == 3);
41  int entry = -1;
42  for (int index = 0; index < osr_cache->length(); index += kEntryLength) {
43    if (osr_cache->Get(index + kSharedOffset)->IsCleared() ||
44        osr_cache->Get(index + kCachedCodeOffset)->IsCleared()) {
45      entry = index;
46      break;
47    }
48  }
49
50  if (entry == -1) {
51    if (osr_cache->length() + kEntryLength <= kMaxLength) {
52      entry = GrowOSRCache(isolate, native_context, &osr_cache);
53    } else {
54      // We reached max capacity and cannot grow further. Reuse an existing
55      // entry.
56      // TODO(mythria): We could use better mechanisms (like lru) to replace
57      // existing entries. Though we don't expect this to be a common case, so
58      // for now choosing to replace the first entry.
59      osr_cache->ClearEntry(0, isolate);
60      entry = 0;
61    }
62  }
63
64  osr_cache->InitializeEntry(entry, *shared, *code, osr_offset);
65}
66
67void OSROptimizedCodeCache::Clear(Isolate* isolate,
68                                  NativeContext native_context) {
69  native_context.set_osr_code_cache(*OSROptimizedCodeCache::Empty(isolate));
70}
71
72void OSROptimizedCodeCache::Compact(Isolate* isolate,
73                                    Handle<NativeContext> native_context) {
74  Handle<OSROptimizedCodeCache> osr_cache(native_context->osr_code_cache(),
75                                          isolate);
76
77  // Re-adjust the cache so all the valid entries are on one side. This will
78  // enable us to compress the cache if needed.
79  int curr_valid_index = 0;
80  for (int curr_index = 0; curr_index < osr_cache->length();
81       curr_index += kEntryLength) {
82    if (osr_cache->Get(curr_index + kSharedOffset)->IsCleared() ||
83        osr_cache->Get(curr_index + kCachedCodeOffset)->IsCleared()) {
84      continue;
85    }
86    if (curr_valid_index != curr_index) {
87      osr_cache->MoveEntry(curr_index, curr_valid_index, isolate);
88    }
89    curr_valid_index += kEntryLength;
90  }
91
92  if (!NeedsTrimming(curr_valid_index, osr_cache->length())) return;
93
94  Handle<OSROptimizedCodeCache> new_osr_cache =
95      Handle<OSROptimizedCodeCache>::cast(isolate->factory()->NewWeakFixedArray(
96          CapacityForLength(curr_valid_index), AllocationType::kOld));
97  DCHECK_LT(new_osr_cache->length(), osr_cache->length());
98  {
99    DisallowGarbageCollection no_gc;
100    new_osr_cache->CopyElements(isolate, 0, *osr_cache, 0,
101                                new_osr_cache->length(),
102                                new_osr_cache->GetWriteBarrierMode(no_gc));
103  }
104  native_context->set_osr_code_cache(*new_osr_cache);
105}
106
107CodeT OSROptimizedCodeCache::TryGet(SharedFunctionInfo shared,
108                                    BytecodeOffset osr_offset,
109                                    Isolate* isolate) {
110  DisallowGarbageCollection no_gc;
111  int index = FindEntry(shared, osr_offset);
112  if (index == -1) return {};
113
114  CodeT code = GetCodeFromEntry(index);
115  if (code.is_null()) {
116    ClearEntry(index, isolate);
117    return {};
118  }
119
120  DCHECK(code.is_optimized_code() && !code.marked_for_deoptimization());
121  return code;
122}
123
124void OSROptimizedCodeCache::EvictDeoptimizedCode(Isolate* isolate) {
125  // This is called from DeoptimizeMarkedCodeForContext that uses raw pointers
126  // and hence the DisallowGarbageCollection scope here.
127  DisallowGarbageCollection no_gc;
128  for (int index = 0; index < length(); index += kEntryLength) {
129    MaybeObject code_entry = Get(index + kCachedCodeOffset);
130    HeapObject heap_object;
131    if (!code_entry->GetHeapObject(&heap_object)) continue;
132
133    CodeT code = CodeT::cast(heap_object);
134    DCHECK(code.is_optimized_code());
135    if (!code.marked_for_deoptimization()) continue;
136
137    ClearEntry(index, isolate);
138  }
139}
140
141std::vector<BytecodeOffset> OSROptimizedCodeCache::OsrOffsetsFor(
142    SharedFunctionInfo shared) {
143  DisallowGarbageCollection gc;
144
145  const OSRCodeCacheStateOfSFI state = shared.osr_code_cache_state();
146  if (state == kNotCached) return {};
147
148  std::vector<BytecodeOffset> offsets;
149  for (int index = 0; index < length(); index += kEntryLength) {
150    if (GetSFIFromEntry(index) != shared) continue;
151    offsets.emplace_back(GetBytecodeOffsetFromEntry(index));
152    if (state == kCachedOnce) return offsets;
153  }
154
155  return offsets;
156}
157
158base::Optional<BytecodeOffset> OSROptimizedCodeCache::FirstOsrOffsetFor(
159    SharedFunctionInfo shared) {
160  DisallowGarbageCollection gc;
161
162  const OSRCodeCacheStateOfSFI state = shared.osr_code_cache_state();
163  if (state == kNotCached) return {};
164
165  for (int index = 0; index < length(); index += kEntryLength) {
166    if (GetSFIFromEntry(index) != shared) continue;
167    return GetBytecodeOffsetFromEntry(index);
168  }
169
170  return {};
171}
172
173int OSROptimizedCodeCache::GrowOSRCache(
174    Isolate* isolate, Handle<NativeContext> native_context,
175    Handle<OSROptimizedCodeCache>* osr_cache) {
176  int old_length = (*osr_cache)->length();
177  int grow_by = CapacityForLength(old_length) - old_length;
178  DCHECK_GT(grow_by, kEntryLength);
179  *osr_cache = Handle<OSROptimizedCodeCache>::cast(
180      isolate->factory()->CopyWeakFixedArrayAndGrow(*osr_cache, grow_by));
181  for (int i = old_length; i < (*osr_cache)->length(); i++) {
182    (*osr_cache)->Set(i, HeapObjectReference::ClearedValue(isolate));
183  }
184  native_context->set_osr_code_cache(**osr_cache);
185
186  return old_length;
187}
188
189CodeT OSROptimizedCodeCache::GetCodeFromEntry(int index) {
190  DCHECK_LE(index + OSRCodeCacheConstants::kEntryLength, length());
191  DCHECK_EQ(index % kEntryLength, 0);
192  HeapObject code_entry;
193  Get(index + OSRCodeCacheConstants::kCachedCodeOffset)
194      ->GetHeapObject(&code_entry);
195  if (code_entry.is_null()) return CodeT();
196  return CodeT::cast(code_entry);
197}
198
199SharedFunctionInfo OSROptimizedCodeCache::GetSFIFromEntry(int index) {
200  DCHECK_LE(index + OSRCodeCacheConstants::kEntryLength, length());
201  DCHECK_EQ(index % kEntryLength, 0);
202  HeapObject sfi_entry;
203  Get(index + OSRCodeCacheConstants::kSharedOffset)->GetHeapObject(&sfi_entry);
204  return sfi_entry.is_null() ? SharedFunctionInfo()
205                             : SharedFunctionInfo::cast(sfi_entry);
206}
207
208BytecodeOffset OSROptimizedCodeCache::GetBytecodeOffsetFromEntry(int index) {
209  DCHECK_LE(index + OSRCodeCacheConstants::kEntryLength, length());
210  DCHECK_EQ(index % kEntryLength, 0);
211  Smi osr_offset_entry;
212  Get(index + kOsrIdOffset)->ToSmi(&osr_offset_entry);
213  return BytecodeOffset(osr_offset_entry.value());
214}
215
216int OSROptimizedCodeCache::FindEntry(SharedFunctionInfo shared,
217                                     BytecodeOffset osr_offset) {
218  DisallowGarbageCollection no_gc;
219  DCHECK(!osr_offset.IsNone());
220  for (int index = 0; index < length(); index += kEntryLength) {
221    if (GetSFIFromEntry(index) != shared) continue;
222    if (GetBytecodeOffsetFromEntry(index) != osr_offset) continue;
223    return index;
224  }
225  return -1;
226}
227
228void OSROptimizedCodeCache::ClearEntry(int index, Isolate* isolate) {
229  SharedFunctionInfo shared = GetSFIFromEntry(index);
230  DCHECK_GT(shared.osr_code_cache_state(), kNotCached);
231  if (V8_LIKELY(shared.osr_code_cache_state() == kCachedOnce)) {
232    shared.set_osr_code_cache_state(kNotCached);
233  } else if (shared.osr_code_cache_state() == kCachedMultiple) {
234    int osr_code_cache_count = 0;
235    for (int index = 0; index < length(); index += kEntryLength) {
236      if (GetSFIFromEntry(index) == shared) {
237        osr_code_cache_count++;
238      }
239    }
240    if (osr_code_cache_count == 2) {
241      shared.set_osr_code_cache_state(kCachedOnce);
242    }
243  }
244  HeapObjectReference cleared_value =
245      HeapObjectReference::ClearedValue(isolate);
246  Set(index + OSRCodeCacheConstants::kSharedOffset, cleared_value);
247  Set(index + OSRCodeCacheConstants::kCachedCodeOffset, cleared_value);
248  Set(index + OSRCodeCacheConstants::kOsrIdOffset, cleared_value);
249}
250
251void OSROptimizedCodeCache::InitializeEntry(int entry,
252                                            SharedFunctionInfo shared,
253                                            CodeT code,
254                                            BytecodeOffset osr_offset) {
255  Set(entry + OSRCodeCacheConstants::kSharedOffset,
256      HeapObjectReference::Weak(shared));
257  HeapObjectReference weak_code_entry = HeapObjectReference::Weak(code);
258  Set(entry + OSRCodeCacheConstants::kCachedCodeOffset, weak_code_entry);
259  Set(entry + OSRCodeCacheConstants::kOsrIdOffset,
260      MaybeObject::FromSmi(Smi::FromInt(osr_offset.ToInt())));
261  if (V8_LIKELY(shared.osr_code_cache_state() == kNotCached)) {
262    shared.set_osr_code_cache_state(kCachedOnce);
263  } else if (shared.osr_code_cache_state() == kCachedOnce) {
264    shared.set_osr_code_cache_state(kCachedMultiple);
265  }
266}
267
268void OSROptimizedCodeCache::MoveEntry(int src, int dst, Isolate* isolate) {
269  Set(dst + OSRCodeCacheConstants::kSharedOffset,
270      Get(src + OSRCodeCacheConstants::kSharedOffset));
271  Set(dst + OSRCodeCacheConstants::kCachedCodeOffset,
272      Get(src + OSRCodeCacheConstants::kCachedCodeOffset));
273  Set(dst + OSRCodeCacheConstants::kOsrIdOffset, Get(src + kOsrIdOffset));
274  HeapObjectReference cleared_value =
275      HeapObjectReference::ClearedValue(isolate);
276  Set(src + OSRCodeCacheConstants::kSharedOffset, cleared_value);
277  Set(src + OSRCodeCacheConstants::kCachedCodeOffset, cleared_value);
278  Set(src + OSRCodeCacheConstants::kOsrIdOffset, cleared_value);
279}
280
281int OSROptimizedCodeCache::CapacityForLength(int curr_length) {
282  // TODO(mythria): This is a randomly chosen heuristic and is not based on any
283  // data. We may have to tune this later.
284  if (curr_length == 0) return kInitialLength;
285  if (curr_length * 2 > kMaxLength) return kMaxLength;
286  return curr_length * 2;
287}
288
289bool OSROptimizedCodeCache::NeedsTrimming(int num_valid_entries,
290                                          int curr_length) {
291  return curr_length > kInitialLength && curr_length > num_valid_entries * 3;
292}
293
294MaybeObject OSROptimizedCodeCache::RawGetForTesting(int index) const {
295  return WeakFixedArray::Get(index);
296}
297
298void OSROptimizedCodeCache::RawSetForTesting(int index, MaybeObject value) {
299  WeakFixedArray::Set(index, value);
300}
301
302}  // namespace internal
303}  // namespace v8
304