1// Copyright 2020 the V8 project authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "src/objects/string-table.h"
6
7#include <atomic>
8
9#include "src/base/atomicops.h"
10#include "src/base/macros.h"
11#include "src/common/assert-scope.h"
12#include "src/common/globals.h"
13#include "src/common/ptr-compr-inl.h"
14#include "src/execution/isolate-utils-inl.h"
15#include "src/heap/safepoint.h"
16#include "src/objects/internal-index.h"
17#include "src/objects/object-list-macros.h"
18#include "src/objects/slots-inl.h"
19#include "src/objects/slots.h"
20#include "src/objects/string-inl.h"
21#include "src/objects/string-table-inl.h"
22#include "src/snapshot/deserializer.h"
23#include "src/utils/allocation.h"
24#include "src/utils/ostreams.h"
25
26namespace v8 {
27namespace internal {
28
29namespace {
30
31static constexpr int kStringTableMaxEmptyFactor = 4;
32static constexpr int kStringTableMinCapacity = 2048;
33
34bool StringTableHasSufficientCapacityToAdd(int capacity, int number_of_elements,
35                                           int number_of_deleted_elements,
36                                           int number_of_additional_elements) {
37  int nof = number_of_elements + number_of_additional_elements;
38  // Return true if:
39  //   50% is still free after adding number_of_additional_elements elements and
40  //   at most 50% of the free elements are deleted elements.
41  if ((nof < capacity) &&
42      ((number_of_deleted_elements <= (capacity - nof) / 2))) {
43    int needed_free = nof / 2;
44    if (nof + needed_free <= capacity) return true;
45  }
46  return false;
47}
48
49int ComputeStringTableCapacity(int at_least_space_for) {
50  // Add 50% slack to make slot collisions sufficiently unlikely.
51  // See matching computation in StringTableHasSufficientCapacityToAdd().
52  int raw_capacity = at_least_space_for + (at_least_space_for >> 1);
53  int capacity = base::bits::RoundUpToPowerOfTwo32(raw_capacity);
54  return std::max(capacity, kStringTableMinCapacity);
55}
56
57int ComputeStringTableCapacityWithShrink(int current_capacity,
58                                         int at_least_room_for) {
59  // Only shrink if the table is very empty to avoid performance penalty.
60  DCHECK_GE(current_capacity, kStringTableMinCapacity);
61  if (at_least_room_for > (current_capacity / kStringTableMaxEmptyFactor))
62    return current_capacity;
63
64  // Recalculate the smaller capacity actually needed.
65  int new_capacity = ComputeStringTableCapacity(at_least_room_for);
66  DCHECK_GE(new_capacity, at_least_room_for);
67  // Don't go lower than room for {kStringTableMinCapacity} elements.
68  if (new_capacity < kStringTableMinCapacity) return current_capacity;
69  return new_capacity;
70}
71
72template <typename IsolateT, typename StringTableKey>
73bool KeyIsMatch(IsolateT* isolate, StringTableKey* key, String string) {
74  if (string.hash() != key->hash()) return false;
75  if (string.length() != key->length()) return false;
76  return key->IsMatch(isolate, string);
77}
78
79}  // namespace
80
81// Data holds the actual data of the string table, including capacity and number
82// of elements.
83//
84// It is a variable sized structure, with a "header" followed directly in memory
85// by the elements themselves. These are accessed as offsets from the elements_
86// field, which itself provides storage for the first element.
87//
88// The elements themselves are stored as an open-addressed hash table, with
89// quadratic probing and Smi 0 and Smi 1 as the empty and deleted sentinels,
90// respectively.
91class StringTable::Data {
92 public:
93  static std::unique_ptr<Data> New(int capacity);
94  static std::unique_ptr<Data> Resize(PtrComprCageBase cage_base,
95                                      std::unique_ptr<Data> data, int capacity);
96
97  OffHeapObjectSlot slot(InternalIndex index) const {
98    return OffHeapObjectSlot(&elements_[index.as_uint32()]);
99  }
100
101  Object Get(PtrComprCageBase cage_base, InternalIndex index) const {
102    return slot(index).Acquire_Load(cage_base);
103  }
104
105  void Set(InternalIndex index, String entry) {
106    slot(index).Release_Store(entry);
107  }
108
109  void ElementAdded() {
110    DCHECK_LT(number_of_elements_ + 1, capacity());
111    DCHECK(StringTableHasSufficientCapacityToAdd(
112        capacity(), number_of_elements(), number_of_deleted_elements(), 1));
113
114    number_of_elements_++;
115  }
116  void DeletedElementOverwritten() {
117    DCHECK_LT(number_of_elements_ + 1, capacity());
118    DCHECK(StringTableHasSufficientCapacityToAdd(
119        capacity(), number_of_elements(), number_of_deleted_elements() - 1, 1));
120
121    number_of_elements_++;
122    number_of_deleted_elements_--;
123  }
124  void ElementsRemoved(int count) {
125    DCHECK_LE(count, number_of_elements_);
126    number_of_elements_ -= count;
127    number_of_deleted_elements_ += count;
128  }
129
130  void* operator new(size_t size, int capacity);
131  void* operator new(size_t size) = delete;
132  void operator delete(void* description);
133
134  int capacity() const { return capacity_; }
135  int number_of_elements() const { return number_of_elements_; }
136  int number_of_deleted_elements() const { return number_of_deleted_elements_; }
137
138  template <typename IsolateT, typename StringTableKey>
139  InternalIndex FindEntry(IsolateT* isolate, StringTableKey* key,
140                          uint32_t hash) const;
141
142  InternalIndex FindInsertionEntry(PtrComprCageBase cage_base,
143                                   uint32_t hash) const;
144
145  template <typename IsolateT, typename StringTableKey>
146  InternalIndex FindEntryOrInsertionEntry(IsolateT* isolate,
147                                          StringTableKey* key,
148                                          uint32_t hash) const;
149
150  // Helper method for StringTable::TryStringToIndexOrLookupExisting.
151  template <typename Char>
152  static Address TryStringToIndexOrLookupExisting(Isolate* isolate,
153                                                  String string, String source,
154                                                  size_t start);
155
156  void IterateElements(RootVisitor* visitor);
157
158  Data* PreviousData() { return previous_data_.get(); }
159  void DropPreviousData() { previous_data_.reset(); }
160
161  void Print(PtrComprCageBase cage_base) const;
162  size_t GetCurrentMemoryUsage() const;
163
164 private:
165  explicit Data(int capacity);
166
167  // Returns probe entry.
168  inline static InternalIndex FirstProbe(uint32_t hash, uint32_t size) {
169    return InternalIndex(hash & (size - 1));
170  }
171
172  inline static InternalIndex NextProbe(InternalIndex last, uint32_t number,
173                                        uint32_t size) {
174    return InternalIndex((last.as_uint32() + number) & (size - 1));
175  }
176
177 private:
178  std::unique_ptr<Data> previous_data_;
179  int number_of_elements_;
180  int number_of_deleted_elements_;
181  const int capacity_;
182  Tagged_t elements_[1];
183};
184
185void* StringTable::Data::operator new(size_t size, int capacity) {
186  // Make sure the size given is the size of the Data structure.
187  DCHECK_EQ(size, sizeof(StringTable::Data));
188  // Make sure that the elements_ array is at the end of Data, with no padding,
189  // so that subsequent elements can be accessed as offsets from elements_.
190  STATIC_ASSERT(offsetof(StringTable::Data, elements_) ==
191                sizeof(StringTable::Data) - sizeof(Tagged_t));
192  // Make sure that elements_ is aligned when StringTable::Data is aligned.
193  STATIC_ASSERT(
194      (alignof(StringTable::Data) + offsetof(StringTable::Data, elements_)) %
195          kTaggedSize ==
196      0);
197
198  // Subtract 1 from capacity, as the member elements_ already supplies the
199  // storage for the first element.
200  return AlignedAlloc(size + (capacity - 1) * sizeof(Tagged_t),
201                      alignof(StringTable::Data));
202}
203
204void StringTable::Data::operator delete(void* table) { AlignedFree(table); }
205
206size_t StringTable::Data::GetCurrentMemoryUsage() const {
207  size_t usage = sizeof(*this) + (capacity_ - 1) * sizeof(Tagged_t);
208  if (previous_data_) {
209    usage += previous_data_->GetCurrentMemoryUsage();
210  }
211  return usage;
212}
213
214StringTable::Data::Data(int capacity)
215    : previous_data_(nullptr),
216      number_of_elements_(0),
217      number_of_deleted_elements_(0),
218      capacity_(capacity) {
219  OffHeapObjectSlot first_slot = slot(InternalIndex(0));
220  MemsetTagged(first_slot, empty_element(), capacity);
221}
222
223std::unique_ptr<StringTable::Data> StringTable::Data::New(int capacity) {
224  return std::unique_ptr<Data>(new (capacity) Data(capacity));
225}
226
227std::unique_ptr<StringTable::Data> StringTable::Data::Resize(
228    PtrComprCageBase cage_base, std::unique_ptr<Data> data, int capacity) {
229  std::unique_ptr<Data> new_data(new (capacity) Data(capacity));
230
231  DCHECK_LT(data->number_of_elements(), new_data->capacity());
232  DCHECK(StringTableHasSufficientCapacityToAdd(
233      new_data->capacity(), new_data->number_of_elements(),
234      new_data->number_of_deleted_elements(), data->number_of_elements()));
235
236  // Rehash the elements.
237  for (InternalIndex i : InternalIndex::Range(data->capacity())) {
238    Object element = data->Get(cage_base, i);
239    if (element == empty_element() || element == deleted_element()) continue;
240    String string = String::cast(element);
241    uint32_t hash = string.hash();
242    InternalIndex insertion_index =
243        new_data->FindInsertionEntry(cage_base, hash);
244    new_data->Set(insertion_index, string);
245  }
246  new_data->number_of_elements_ = data->number_of_elements();
247
248  new_data->previous_data_ = std::move(data);
249  return new_data;
250}
251
252template <typename IsolateT, typename StringTableKey>
253InternalIndex StringTable::Data::FindEntry(IsolateT* isolate,
254                                           StringTableKey* key,
255                                           uint32_t hash) const {
256  uint32_t count = 1;
257  // EnsureCapacity will guarantee the hash table is never full.
258  for (InternalIndex entry = FirstProbe(hash, capacity_);;
259       entry = NextProbe(entry, count++, capacity_)) {
260    // TODO(leszeks): Consider delaying the decompression until after the
261    // comparisons against empty/deleted.
262    Object element = Get(isolate, entry);
263    if (element == empty_element()) return InternalIndex::NotFound();
264    if (element == deleted_element()) continue;
265    String string = String::cast(element);
266    if (KeyIsMatch(isolate, key, string)) return entry;
267  }
268}
269
270InternalIndex StringTable::Data::FindInsertionEntry(PtrComprCageBase cage_base,
271                                                    uint32_t hash) const {
272  uint32_t count = 1;
273  // EnsureCapacity will guarantee the hash table is never full.
274  for (InternalIndex entry = FirstProbe(hash, capacity_);;
275       entry = NextProbe(entry, count++, capacity_)) {
276    // TODO(leszeks): Consider delaying the decompression until after the
277    // comparisons against empty/deleted.
278    Object element = Get(cage_base, entry);
279    if (element == empty_element() || element == deleted_element())
280      return entry;
281  }
282}
283
284template <typename IsolateT, typename StringTableKey>
285InternalIndex StringTable::Data::FindEntryOrInsertionEntry(
286    IsolateT* isolate, StringTableKey* key, uint32_t hash) const {
287  InternalIndex insertion_entry = InternalIndex::NotFound();
288  uint32_t count = 1;
289  // EnsureCapacity will guarantee the hash table is never full.
290  for (InternalIndex entry = FirstProbe(hash, capacity_);;
291       entry = NextProbe(entry, count++, capacity_)) {
292    // TODO(leszeks): Consider delaying the decompression until after the
293    // comparisons against empty/deleted.
294    Object element = Get(isolate, entry);
295    if (element == empty_element()) {
296      // Empty entry, it's our insertion entry if there was no previous Hole.
297      if (insertion_entry.is_not_found()) return entry;
298      return insertion_entry;
299    }
300
301    if (element == deleted_element()) {
302      // Holes are potential insertion candidates, but we continue the search
303      // in case we find the actual matching entry.
304      if (insertion_entry.is_not_found()) insertion_entry = entry;
305      continue;
306    }
307
308    String string = String::cast(element);
309    if (KeyIsMatch(isolate, key, string)) return entry;
310  }
311}
312
313void StringTable::Data::IterateElements(RootVisitor* visitor) {
314  OffHeapObjectSlot first_slot = slot(InternalIndex(0));
315  OffHeapObjectSlot end_slot = slot(InternalIndex(capacity_));
316  visitor->VisitRootPointers(Root::kStringTable, nullptr, first_slot, end_slot);
317}
318
319void StringTable::Data::Print(PtrComprCageBase cage_base) const {
320  OFStream os(stdout);
321  os << "StringTable {" << std::endl;
322  for (InternalIndex i : InternalIndex::Range(capacity_)) {
323    os << "  " << i.as_uint32() << ": " << Brief(Get(cage_base, i))
324       << std::endl;
325  }
326  os << "}" << std::endl;
327}
328
329StringTable::StringTable(Isolate* isolate)
330    : data_(Data::New(kStringTableMinCapacity).release()), isolate_(isolate) {}
331
332StringTable::~StringTable() { delete data_; }
333
334int StringTable::Capacity() const {
335  return data_.load(std::memory_order_acquire)->capacity();
336}
337int StringTable::NumberOfElements() const {
338  {
339    base::MutexGuard table_write_guard(&write_mutex_);
340    return data_.load(std::memory_order_relaxed)->number_of_elements();
341  }
342}
343
344// InternalizedStringKey carries a string/internalized-string object as key.
345class InternalizedStringKey final : public StringTableKey {
346 public:
347  explicit InternalizedStringKey(Handle<String> string)
348      : StringTableKey(0, string->length()), string_(string) {
349    // When sharing the string table, it's possible that another thread already
350    // internalized the key, in which case StringTable::LookupKey will perform a
351    // redundant lookup and return the already internalized copy.
352    DCHECK_IMPLIES(!FLAG_shared_string_table, !string->IsInternalizedString());
353    DCHECK(string->IsFlat());
354    // Make sure hash_field is computed.
355    string->EnsureHash();
356    set_raw_hash_field(string->raw_hash_field());
357  }
358
359  bool IsMatch(Isolate* isolate, String string) {
360    DCHECK(!SharedStringAccessGuardIfNeeded::IsNeeded(string));
361    return string_->SlowEquals(string);
362  }
363
364  void PrepareForInsertion(Isolate* isolate) {
365    StringTransitionStrategy strategy =
366        isolate->factory()->ComputeInternalizationStrategyForString(
367            string_, &maybe_internalized_map_);
368    switch (strategy) {
369      case StringTransitionStrategy::kCopy:
370        break;
371      case StringTransitionStrategy::kInPlace:
372        // In-place transition will be done in GetHandleForInsertion, when we
373        // are sure that we are going to insert the string into the table.
374        return;
375      case StringTransitionStrategy::kAlreadyTransitioned:
376        // We can see already internalized strings here only when sharing the
377        // string table and allowing concurrent internalization.
378        DCHECK(FLAG_shared_string_table);
379        return;
380    }
381
382    // Copying the string here is always threadsafe, as no instance type
383    // requiring a copy can transition any further.
384    StringShape shape(*string_);
385    // External strings get special treatment, to avoid copying their
386    // contents as long as they are not uncached.
387    if (shape.IsExternalOneByte() && !shape.IsUncachedExternal()) {
388      // TODO(syg): External strings not yet supported.
389      DCHECK(!FLAG_shared_string_table);
390      string_ =
391          isolate->factory()->InternalizeExternalString<ExternalOneByteString>(
392              string_);
393    } else if (shape.IsExternalTwoByte() && !shape.IsUncachedExternal()) {
394      // TODO(syg): External strings not yet supported.
395      DCHECK(!FLAG_shared_string_table);
396      string_ =
397          isolate->factory()->InternalizeExternalString<ExternalTwoByteString>(
398              string_);
399    } else {
400      // Otherwise allocate a new internalized string.
401      string_ = isolate->factory()->NewInternalizedStringImpl(
402          string_, string_->length(), string_->raw_hash_field());
403    }
404  }
405
406  Handle<String> GetHandleForInsertion() {
407    Handle<Map> internalized_map;
408    // When preparing the string, the strategy was to in-place migrate it.
409    if (maybe_internalized_map_.ToHandle(&internalized_map)) {
410      // It is always safe to overwrite the map. The only transition possible
411      // is another thread migrated the string to internalized already.
412      // Migrations to thin are impossible, as we only call this method on table
413      // misses inside the critical section.
414      string_->set_map_no_write_barrier(*internalized_map);
415      DCHECK(string_->IsInternalizedString());
416      return string_;
417    }
418    // We prepared an internalized copy for the string or the string was already
419    // internalized.
420    // In theory we could have created a copy of a SeqString in young generation
421    // that has been promoted to old space by now. In that case we could
422    // in-place migrate the original string instead of internalizing the copy
423    // and migrating the original string to a ThinString. This scenario doesn't
424    // seem to be common enough to justify re-computing the strategy here.
425    return string_;
426  }
427
428 private:
429  Handle<String> string_;
430  MaybeHandle<Map> maybe_internalized_map_;
431};
432
433Handle<String> StringTable::LookupString(Isolate* isolate,
434                                         Handle<String> string) {
435  // When sharing the string table, internalization is allowed to be concurrent
436  // from multiple Isolates, assuming that:
437  //
438  //  - All in-place internalizable strings (i.e. old-generation flat strings)
439  //    and internalized strings are in the shared heap.
440  //  - LookupKey supports concurrent access (see comment below).
441  //
442  // These assumptions guarantee the following properties:
443  //
444  //  - String::Flatten is not threadsafe but is only called on non-shared
445  //    strings, since non-flat strings are not shared.
446  //
447  //  - String::ComputeAndSetHash is threadsafe on flat strings. This is safe
448  //    because the characters are immutable and the same hash will be
449  //    computed. The hash field is set with relaxed memory order. A thread that
450  //    doesn't see the hash may do redundant work but will not be incorrect.
451  //
452  //  - In-place internalizable strings do not incur a copy regardless of string
453  //    table sharing. The map mutation is threadsafe even with relaxed memory
454  //    order, because for concurrent table lookups, the "losing" thread will be
455  //    correctly ordered by LookupKey's write mutex and see the updated map
456  //    during the re-lookup.
457  //
458  // For lookup misses, the internalized string map is the same map in RO space
459  // regardless of which thread is doing the lookup.
460  //
461  // For lookup hits, String::MakeThin is threadsafe and spinlocks on
462  // migrating into a ThinString.
463
464  string = String::Flatten(isolate, string);
465  if (string->IsInternalizedString()) return string;
466
467  InternalizedStringKey key(string);
468  Handle<String> result = LookupKey(isolate, &key);
469
470  if (!string->IsInternalizedString()) {
471    string->MakeThin(isolate, *result);
472  }
473
474  return result;
475}
476
477template <typename StringTableKey, typename IsolateT>
478Handle<String> StringTable::LookupKey(IsolateT* isolate, StringTableKey* key) {
479  // String table lookups are allowed to be concurrent, assuming that:
480  //
481  //   - The Heap access is allowed to be concurrent (using LocalHeap or
482  //     similar),
483  //   - All writes to the string table are guarded by the Isolate string table
484  //     mutex,
485  //   - Resizes of the string table first copies the old contents to the new
486  //     table, and only then sets the new string table pointer to the new
487  //     table,
488  //   - Only GCs can remove elements from the string table.
489  //
490  // These assumptions allow us to make the following statement:
491  //
492  //   "Reads are allowed when not holding the lock, as long as false negatives
493  //    (misses) are ok. We will never get a false positive (hit of an entry no
494  //    longer in the table)"
495  //
496  // This is because we _know_ that if we find an entry in the string table, any
497  // entry will also be in all reallocations of that tables. This is required
498  // for strong consistency of internalized string equality implying reference
499  // equality.
500  //
501  // We therefore try to optimistically read from the string table without
502  // taking the lock (both here and in the NoAllocate version of the lookup),
503  // and on a miss we take the lock and try to write the entry, with a second
504  // read lookup in case the non-locked read missed a write.
505  //
506  // One complication is allocation -- we don't want to allocate while holding
507  // the string table lock. This applies to both allocation of new strings, and
508  // re-allocation of the string table on resize. So, we optimistically allocate
509  // (without copying values) outside the lock, and potentially discard the
510  // allocation if another write also did an allocation. This assumes that
511  // writes are rarer than reads.
512
513  // Load the current string table data, in case another thread updates the
514  // data while we're reading.
515  const Data* current_data = data_.load(std::memory_order_acquire);
516
517  // First try to find the string in the table. This is safe to do even if the
518  // table is now reallocated; we won't find a stale entry in the old table
519  // because the new table won't delete it's corresponding entry until the
520  // string is dead, in which case it will die in this table too and worst
521  // case we'll have a false miss.
522  InternalIndex entry = current_data->FindEntry(isolate, key, key->hash());
523  if (entry.is_found()) {
524    Handle<String> result(String::cast(current_data->Get(isolate, entry)),
525                          isolate);
526    DCHECK_IMPLIES(FLAG_shared_string_table, result->InSharedHeap());
527    return result;
528  }
529
530  // No entry found, so adding new string.
531  key->PrepareForInsertion(isolate);
532  {
533    base::MutexGuard table_write_guard(&write_mutex_);
534
535    Data* data = EnsureCapacity(isolate, 1);
536
537    // Check one last time if the key is present in the table, in case it was
538    // added after the check.
539    entry = data->FindEntryOrInsertionEntry(isolate, key, key->hash());
540
541    Object element = data->Get(isolate, entry);
542    if (element == empty_element()) {
543      // This entry is empty, so write it and register that we added an
544      // element.
545      Handle<String> new_string = key->GetHandleForInsertion();
546      DCHECK_IMPLIES(FLAG_shared_string_table, new_string->IsShared());
547      data->Set(entry, *new_string);
548      data->ElementAdded();
549      return new_string;
550    } else if (element == deleted_element()) {
551      // This entry was deleted, so overwrite it and register that we
552      // overwrote a deleted element.
553      Handle<String> new_string = key->GetHandleForInsertion();
554      DCHECK_IMPLIES(FLAG_shared_string_table, new_string->IsShared());
555      data->Set(entry, *new_string);
556      data->DeletedElementOverwritten();
557      return new_string;
558    } else {
559      // Return the existing string as a handle.
560      return handle(String::cast(element), isolate);
561    }
562  }
563}
564
565template Handle<String> StringTable::LookupKey(Isolate* isolate,
566                                               OneByteStringKey* key);
567template Handle<String> StringTable::LookupKey(Isolate* isolate,
568                                               TwoByteStringKey* key);
569template Handle<String> StringTable::LookupKey(Isolate* isolate,
570                                               SeqOneByteSubStringKey* key);
571template Handle<String> StringTable::LookupKey(Isolate* isolate,
572                                               SeqTwoByteSubStringKey* key);
573
574template Handle<String> StringTable::LookupKey(LocalIsolate* isolate,
575                                               OneByteStringKey* key);
576template Handle<String> StringTable::LookupKey(LocalIsolate* isolate,
577                                               TwoByteStringKey* key);
578
579template Handle<String> StringTable::LookupKey(Isolate* isolate,
580                                               StringTableInsertionKey* key);
581template Handle<String> StringTable::LookupKey(LocalIsolate* isolate,
582                                               StringTableInsertionKey* key);
583
584StringTable::Data* StringTable::EnsureCapacity(PtrComprCageBase cage_base,
585                                               int additional_elements) {
586  // This call is only allowed while the write mutex is held.
587  write_mutex_.AssertHeld();
588
589  // This load can be relaxed as the table pointer can only be modified while
590  // the lock is held.
591  Data* data = data_.load(std::memory_order_relaxed);
592
593  // Grow or shrink table if needed. We first try to shrink the table, if it
594  // is sufficiently empty; otherwise we make sure to grow it so that it has
595  // enough space.
596  int current_capacity = data->capacity();
597  int current_nof = data->number_of_elements();
598  int capacity_after_shrinking =
599      ComputeStringTableCapacityWithShrink(current_capacity, current_nof + 1);
600
601  int new_capacity = -1;
602  if (capacity_after_shrinking < current_capacity) {
603    DCHECK(StringTableHasSufficientCapacityToAdd(capacity_after_shrinking,
604                                                 current_nof, 0, 1));
605    new_capacity = capacity_after_shrinking;
606  } else if (!StringTableHasSufficientCapacityToAdd(
607                 current_capacity, current_nof,
608                 data->number_of_deleted_elements(), 1)) {
609    new_capacity = ComputeStringTableCapacity(current_nof + 1);
610  }
611
612  if (new_capacity != -1) {
613    std::unique_ptr<Data> new_data =
614        Data::Resize(cage_base, std::unique_ptr<Data>(data), new_capacity);
615    // `new_data` is the new owner of `data`.
616    DCHECK_EQ(new_data->PreviousData(), data);
617    // Release-store the new data pointer as `data_`, so that it can be
618    // acquire-loaded by other threads. This string table becomes the owner of
619    // the pointer.
620    data = new_data.release();
621    data_.store(data, std::memory_order_release);
622  }
623
624  return data;
625}
626
627// static
628template <typename Char>
629Address StringTable::Data::TryStringToIndexOrLookupExisting(Isolate* isolate,
630                                                            String string,
631                                                            String source,
632                                                            size_t start) {
633  // TODO(leszeks): This method doesn't really belong on StringTable::Data.
634  // Ideally it would be a free function in an anonymous namespace, but that
635  // causes issues around method and class visibility.
636
637  DisallowGarbageCollection no_gc;
638  uint64_t seed = HashSeed(isolate);
639
640  int length = string.length();
641
642  std::unique_ptr<Char[]> buffer;
643  const Char* chars;
644
645  SharedStringAccessGuardIfNeeded access_guard(isolate);
646  if (source.IsConsString(isolate)) {
647    DCHECK(!source.IsFlat(isolate));
648    buffer.reset(new Char[length]);
649    String::WriteToFlat(source, buffer.get(), 0, length, isolate, access_guard);
650    chars = buffer.get();
651  } else {
652    chars = source.GetChars<Char>(isolate, no_gc, access_guard) + start;
653  }
654  // TODO(verwaest): Internalize to one-byte when possible.
655  SequentialStringKey<Char> key(base::Vector<const Char>(chars, length), seed);
656
657  // String could be an array index.
658  uint32_t raw_hash_field = key.raw_hash_field();
659
660  if (Name::ContainsCachedArrayIndex(raw_hash_field)) {
661    return Smi::FromInt(String::ArrayIndexValueBits::decode(raw_hash_field))
662        .ptr();
663  }
664
665  if (Name::IsIntegerIndex(raw_hash_field)) {
666    // It is an index, but it's not cached.
667    return Smi::FromInt(ResultSentinel::kUnsupported).ptr();
668  }
669
670  Data* string_table_data =
671      isolate->string_table()->data_.load(std::memory_order_acquire);
672
673  InternalIndex entry = string_table_data->FindEntry(isolate, &key, key.hash());
674  if (entry.is_not_found()) {
675    // A string that's not an array index, and not in the string table,
676    // cannot have been used as a property name before.
677    return Smi::FromInt(ResultSentinel::kNotFound).ptr();
678  }
679
680  String internalized = String::cast(string_table_data->Get(isolate, entry));
681  // string can be internalized here, if another thread internalized it.
682  // If we found and entry in the string table and string is not internalized,
683  // there is no way that it can transition to internalized later on. So a last
684  // check here is sufficient.
685  if (!string.IsInternalizedString()) {
686    string.MakeThin(isolate, internalized);
687  } else {
688    DCHECK(FLAG_shared_string_table);
689  }
690  return internalized.ptr();
691}
692
693// static
694Address StringTable::TryStringToIndexOrLookupExisting(Isolate* isolate,
695                                                      Address raw_string) {
696  String string = String::cast(Object(raw_string));
697  if (string.IsInternalizedString()) {
698    // string could be internalized, if the string table is shared and another
699    // thread internalized it.
700    DCHECK(FLAG_shared_string_table);
701    return raw_string;
702  }
703
704  // Valid array indices are >= 0, so they cannot be mixed up with any of
705  // the result sentinels, which are negative.
706  STATIC_ASSERT(
707      !String::ArrayIndexValueBits::is_valid(ResultSentinel::kUnsupported));
708  STATIC_ASSERT(
709      !String::ArrayIndexValueBits::is_valid(ResultSentinel::kNotFound));
710
711  size_t start = 0;
712  String source = string;
713  if (source.IsSlicedString()) {
714    SlicedString sliced = SlicedString::cast(source);
715    start = sliced.offset();
716    source = sliced.parent();
717  } else if (source.IsConsString() && source.IsFlat()) {
718    source = ConsString::cast(source).first();
719  }
720  if (source.IsThinString()) {
721    source = ThinString::cast(source).actual();
722    if (string.length() == source.length()) {
723      return source.ptr();
724    }
725  }
726
727  if (source.IsOneByteRepresentation()) {
728    return StringTable::Data::TryStringToIndexOrLookupExisting<uint8_t>(
729        isolate, string, source, start);
730  }
731  return StringTable::Data::TryStringToIndexOrLookupExisting<uint16_t>(
732      isolate, string, source, start);
733}
734
735void StringTable::Print(PtrComprCageBase cage_base) const {
736  data_.load(std::memory_order_acquire)->Print(cage_base);
737}
738
739size_t StringTable::GetCurrentMemoryUsage() const {
740  return sizeof(*this) +
741         data_.load(std::memory_order_acquire)->GetCurrentMemoryUsage();
742}
743
744void StringTable::IterateElements(RootVisitor* visitor) {
745  // This should only happen during garbage collection when background threads
746  // are paused, so the load can be relaxed.
747  isolate_->heap()->safepoint()->AssertActive();
748  data_.load(std::memory_order_relaxed)->IterateElements(visitor);
749}
750
751void StringTable::DropOldData() {
752  // This should only happen during garbage collection when background threads
753  // are paused, so the load can be relaxed.
754  isolate_->heap()->safepoint()->AssertActive();
755  DCHECK_NE(isolate_->heap()->gc_state(), Heap::NOT_IN_GC);
756  data_.load(std::memory_order_relaxed)->DropPreviousData();
757}
758
759void StringTable::NotifyElementsRemoved(int count) {
760  // This should only happen during garbage collection when background threads
761  // are paused, so the load can be relaxed.
762  isolate_->heap()->safepoint()->AssertActive();
763  DCHECK_NE(isolate_->heap()->gc_state(), Heap::NOT_IN_GC);
764  data_.load(std::memory_order_relaxed)->ElementsRemoved(count);
765}
766
767void StringTable::UpdateCountersIfOwnedBy(Isolate* isolate) {
768  DCHECK_EQ(isolate->string_table(), this);
769  if (!isolate->OwnsStringTable()) return;
770  isolate->counters()->string_table_capacity()->Set(Capacity());
771  isolate->counters()->number_of_symbols()->Set(NumberOfElements());
772}
773
774}  // namespace internal
775}  // namespace v8
776