Lines Matching defs:page
99 // Do not use the platform page allocator when sharing a pointer compression
100 // cage, as the Isolate's page allocator is a BoundedPageAllocator tied to the
146 const ReadOnlyPage* page = pages_[i];
154 reinterpret_cast<void*>(new_address), page->size());
165 new_stats.IncreaseAllocatedBytes(page->allocated_bytes(), new_page);
226 for (const ReadOnlyPage* page : pages) {
227 size_t size = RoundUp(page->size(), page_allocator->AllocatePageSize());
228 // 1. Allocate some new memory for a shared copy of the page and copy the
229 // original contents into it. Doesn't need to be V8 page aligned, since
231 auto shared_memory = page_allocator->AllocateSharedPages(size, page);
235 // 2. Copy the contents of the original page into the shared page.
241 Tagged_t compressed_address = CompressTagged(page->address());
245 // shared page rather than the original.
246 stats_.IncreaseAllocatedBytes(page->allocated_bytes(), new_page);
355 // page allocator manually.
369 // Each page may have a small free space that is not tracked by a free list.
517 for (BasicMemoryChunk* page : pages_) {
519 CHECK_NULL(page->owner());
521 CHECK_EQ(page->owner(), this);
524 if (page == Page::FromAllocationAreaAddress(top_)) {
527 ReadOnlySpaceObjectIterator it(isolate->heap(), this, page);
528 Address end_of_previous_object = page->area_start();
529 Address top = page->area_end();
550 CHECK(!page->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION));
551 CHECK(!page->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
564 for (BasicMemoryChunk* page : pages_) {
565 total_capacity += page->area_size();
566 ReadOnlySpaceObjectIterator it(heap, this, page);
573 total_allocated += page->allocated_bytes();
576 DCHECK_LE(real_allocated, accounting_stats_.AllocatedOnPage(page));
577 DCHECK_EQ(page->allocated_bytes(), accounting_stats_.AllocatedOnPage(page));
605 ReadOnlyPage* page = pages_.back();
606 heap()->incremental_marking()->marking_state()->bitmap(page)->ClearRange(
607 page->AddressToMarkbitIndex(top_), page->AddressToMarkbitIndex(limit_));
735 PrintIsolate(heap()->isolate(), "Shrinking page %p: end %p -> %p\n",
782 for (ReadOnlyPage* page : artifacts->pages()) {
783 pages_.push_back(page);
784 accounting_stats_.IncreaseAllocatedBytes(page->allocated_bytes(), page);