Lines Matching refs:hpage
890 static bool hpage_collapse_alloc_page(struct page **hpage, gfp_t gfp, int node,
893 *hpage = __alloc_pages(gfp, HPAGE_PMD_ORDER, node, nmask);
894 if (unlikely(!*hpage)) {
899 folio_prep_large_rmappable((struct folio *)*hpage);
1058 static int alloc_charge_hpage(struct page **hpage, struct mm_struct *mm,
1066 if (!hpage_collapse_alloc_page(hpage, gfp, node, &cc->alloc_nmask))
1069 folio = page_folio(*hpage);
1072 *hpage = NULL;
1075 count_memcg_page_event(*hpage, THP_COLLAPSE_ALLOC);
1088 struct page *hpage;
1104 result = alloc_charge_hpage(&hpage, mm, cc);
1200 result = __collapse_huge_page_copy(pte, hpage, pmd, _pmd,
1213 __SetPageUptodate(hpage);
1216 _pmd = mk_huge_pmd(hpage, vma->vm_page_prot);
1221 page_add_new_anon_rmap(hpage, vma, address);
1222 lru_cache_add_inactive_or_unevictable(hpage, vma);
1228 hpage = NULL;
1234 if (hpage)
1235 put_page(hpage);
1436 /* hpage must be locked, and mmap_lock must be held */
1438 pmd_t *pmdp, struct page *hpage)
1447 VM_BUG_ON(!PageTransHuge(hpage));
1450 if (do_set_pmd(&vmf, hpage))
1453 get_page(hpage);
1476 struct page *hpage;
1509 hpage = find_lock_page(vma->vm_file->f_mapping,
1511 if (!hpage)
1514 if (!PageHead(hpage)) {
1519 if (compound_order(hpage) != HPAGE_PMD_ORDER) {
1564 * page table, but the new page will not be a subpage of hpage.
1566 if (hpage + i != page)
1581 * So page lock of hpage does not protect from it, so we must not drop
1605 * page lock stops more PTEs of the hpage being faulted in, but
1614 if (hpage + i != page)
1633 page_ref_sub(hpage, nr_ptes);
1634 add_mm_counter(mm, mm_counter_file(hpage), -nr_ptes);
1658 ? set_huge_pmd(vma, haddr, pmd, hpage)
1664 page_ref_sub(hpage, nr_ptes);
1665 add_mm_counter(mm, mm_counter_file(hpage), -nr_ptes);
1674 unlock_page(hpage);
1675 put_page(hpage);
1793 struct page *hpage;
1807 result = alloc_charge_hpage(&hpage, mm, cc);
1811 __SetPageLocked(hpage);
1813 __SetPageSwapBacked(hpage);
1814 hpage->index = start;
1815 hpage->mapping = mapping;
2050 clear_highpage(hpage + (index % HPAGE_PMD_NR));
2053 if (copy_mc_highpage(hpage + (page->index % HPAGE_PMD_NR), page) > 0) {
2060 clear_highpage(hpage + (index % HPAGE_PMD_NR));
2123 nr = thp_nr_pages(hpage);
2125 __mod_lruvec_page_state(hpage, NR_SHMEM_THPS, nr);
2127 __mod_lruvec_page_state(hpage, NR_FILE_THPS, nr);
2130 __mod_lruvec_page_state(hpage, NR_FILE_PAGES, nr_none);
2132 __mod_lruvec_page_state(hpage, NR_SHMEM, nr_none);
2136 * Mark hpage as uptodate before inserting it into the page cache so
2139 folio = page_folio(hpage);
2149 xas_store(&xas, hpage);
2160 unlock_page(hpage);
2205 hpage->mapping = NULL;
2207 unlock_page(hpage);
2208 put_page(hpage);
2211 trace_mm_khugepaged_collapse_file(mm, hpage, index, is_shmem, addr, file, nr, result);