Lines Matching refs:page

136   void VerifyMarkingOnPage(const Page* page, Address start, Address end);
149 void MarkingVerifier::VerifyMarkingOnPage(const Page* page, Address start,
154 LiveObjectRange<kAllLiveObjects>(page, bitmap(page))) {
166 CHECK(bitmap(page)->AllBitsSetInRange(
167 page->AddressToMarkbitIndex(current),
168 page->AddressToMarkbitIndex(next_object_must_be_here_or_later)) ||
169 bitmap(page)->AllBitsClearInRange(
170 page->AddressToMarkbitIndex(current + kTaggedSize * 2),
171 page->AddressToMarkbitIndex(next_object_must_be_here_or_later)));
179 // The bottom position is at the start of its page. Allows us to use
180 // page->area_start() as start of range on all pages.
186 Page* page = *(it++);
187 Address limit = it != range.end() ? page->area_end() : end;
188 CHECK(limit == end || !page->Contains(end));
189 VerifyMarkingOnPage(page, page->area_start(), limit);
374 Page* page = *(it++);
375 Address current = page->area_start();
376 Address limit = it != range.end() ? page->area_end() : space->top();
377 CHECK(limit == space->top() || !page->Contains(space->top()));
736 void MarkCompactCollector::EnsurePageIsSwept(Page* page) {
737 sweeper()->EnsurePageIsSwept(page);
761 // Time to take for a single area (=payload of page). Used as soon as there
777 // and a goal for a single page.
811 // We use two conditions to decide whether a page qualifies as an evacuation
813 // * Target fragmentation: How fragmented is a page, i.e., how is the ratio
814 // between live bytes and capacity of this page (= area).
822 // Pairs of (live_bytes_in_page, page).
908 // - fragmentation of (n+1)-th page does not exceed the specified limit.
923 "compaction-selection-page: space=%s free_bytes_page=%zu "
1716 Page* page = Page::FromHeapObject(object);
1717 if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED_FOR_TESTING)) {
1718 page->ClearFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
1720 page->SetFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
1857 static void Move(Page* page) {
1860 page->heap()->new_space()->MovePageFromSpaceToSpace(page);
1861 page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION);
1864 page->heap()->new_space()->from_space().RemovePage(page);
1865 Page* new_page = Page::ConvertNewToOld(page);
2488 // finished as it will reset page flags that share the same bitmap as
3224 // on another page.
3560 return "page-new-to-old";
3564 return "page-new-to-new";
3626 // |saved_live_bytes| returns the live bytes of the page that was processed.
3673 "evacuation[%p]: page=%p new_space=%d "
3785 // Aborted compaction page. Actual processing happens on the main
3924 for (Page* page : new_space_evacuation_pages_) {
3925 intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page);
3928 if (ShouldMovePage(page, live_bytes_on_page, AlwaysPromoteYoung::kYes)) {
3929 EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
3930 DCHECK_EQ(heap()->old_space(), page->owner());
3931 // The move added page->allocated_bytes to the old space, but we are
3932 // going to sweep the page and add page->live_byte_count.
3933 heap()->old_space()->DecreaseAllocatedBytes(page->allocated_bytes(),
3934 page);
3936 evacuation_items.emplace_back(ParallelWorkItem{}, page);
3941 for (Page* page : old_space_evacuation_pages_) {
3942 if (!FLAG_compact_with_stack || page->owner_identity() == CODE_SPACE) {
3943 ReportAbortedEvacuationCandidateDueToFlags(page->area_start(), page);
3946 page->SetFlag(Page::COMPACTION_WAS_ABORTED);
3952 for (Page* page : old_space_evacuation_pages_) {
3953 if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) continue;
3955 live_bytes += non_atomic_marking_state()->live_bytes(page);
3956 evacuation_items.emplace_back(ParallelWorkItem{}, page);
4012 void MarkCompactCollector::RecordLiveSlotsOnPage(Page* page) {
4014 LiveObjectVisitor::VisitBlackObjectsNoFail(page, non_atomic_marking_state(),
4142 // to still have access to page headers.
4355 // Slots can point to "to" space if the page has been moved, or if the
4358 // In case the page has been moved, check markbits to determine liveness
4707 Address failed_start, Page* page) {
4710 std::make_pair(failed_start, page));
4714 Address failed_start, Page* page) {
4717 std::make_pair(failed_start, page));
4725 Address failed_start, Page* page) {
4726 page->SetFlag(Page::COMPACTION_WAS_ABORTED);
4727 // Aborted compaction page. We have to record slots here, since we
4731 RememberedSet<OLD_TO_NEW>::RemoveRange(page, page->address(), failed_start,
4733 RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(),
4737 if (failed_start > page->area_start()) {
4739 InvalidatedSlotsCleanup::OldToNew(page);
4740 old_to_new_cleanup.Free(page->area_start(), failed_start);
4744 LiveObjectVisitor::RecomputeLiveBytes(page, marking_state);
4748 page, marking_state, &record_visitor, LiveObjectVisitor::kKeepMarking);
4771 // After clearing the evacuation candidate flag the page is again in a
4804 // Loop needs to support deletion if live bytes == 0 for a page.
4815 // One unused page is kept, all further are released before sweeping them.
4819 PrintIsolate(isolate(), "sweeping: released page: %p",
5507 for (Page* page : PageRange(space_start, space_end)) {
5509 page->Contains(space_start) ? space_start : page->area_start();
5510 Address end = page->Contains(space_end) ? space_end : page->area_end();
5511 items->emplace_back(CreateToSpaceUpdatingItem(page, start, end));
5630 // has to be in a to page.
5745 // Create items for each page.
5994 for (Page* page : new_space_evacuation_pages_) {
5995 intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page);
5999 ShouldMovePage(page, live_bytes_on_page, AlwaysPromoteYoung::kNo)) {
6000 if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
6001 EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
6003 EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
6006 evacuation_items.emplace_back(ParallelWorkItem{}, page);