Lines Matching defs:page
34 #include <asm/page.h>
56 * Minimum page order among possible hugepage sizes, set to a proper value
76 * Serializes faults on the same logical page. This is used to
82 static inline bool PageHugeFreed(struct page *head)
87 static inline void SetPageHugeFreed(struct page *head)
92 static inline void ClearPageHugeFreed(struct page *head)
198 * Return the number of global page reservations that must be dropped.
498 * Add the huge page range represented by [f, t) to the reserve
512 * 1 page will only require at most 1 entry.
737 * the reserve map region for a page. The huge page itself was free'ed
738 * and removed from the page cache. This routine will adjust the subpool
796 * Convert the address within this vma to the page offset within
797 * the mapping, in pagecache page units; huge pages here.
815 * cases this will be same size as used by the page table entries.
826 * Return the page size being used by the MMU to back a VMA. In the majority
827 * of cases, the page size used by the kernel matches the MMU size. On
859 * after the page is instantiated. A private mapping has a region map
1013 * allocated page will go into page cache and is regarded as
1068 static void enqueue_huge_page(struct hstate *h, struct page *page)
1070 int nid = page_to_nid(page);
1071 list_move(&page->lru, &h->hugepage_freelists[nid]);
1074 SetPageHugeFreed(page);
1077 static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
1079 struct page *page;
1082 list_for_each_entry(page, &h->hugepage_freelists[nid], lru) {
1083 if (nocma && is_migrate_cma_page(page))
1086 if (PageHWPoison(page))
1089 list_move(&page->lru, &h->hugepage_activelist);
1090 set_page_refcounted(page);
1091 ClearPageHugeFreed(page);
1094 return page;
1100 static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid,
1114 struct page *page;
1126 page = dequeue_huge_page_node_exact(h, node);
1127 if (page)
1128 return page;
1136 static struct page *dequeue_huge_page_vma(struct hstate *h,
1141 struct page *page;
1149 * have no page reserves. This check ensures that reservations are
1162 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
1163 if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
1164 SetPagePrivate(page);
1169 return page;
1177 * We may have allocated or freed a huge page based on a different
1199 * allocate a persistent huge page for the pool and advance the
1218 * node ["this node"] from which to free a huge page. Advance the
1219 * next node id whether or not we find a free huge page to free so
1247 static void destroy_compound_gigantic_page(struct page *page,
1252 struct page *p = page + 1;
1254 atomic_set(compound_mapcount_ptr(page), 0);
1255 atomic_set(compound_pincount_ptr(page), 0);
1257 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1262 set_compound_order(page, 0);
1263 page[1].compound_nr = 0;
1264 __ClearPageHead(page);
1267 static void free_gigantic_page(struct page *page, unsigned int order)
1270 * If the page isn't allocated using the cma allocator,
1274 if (cma_release(hugetlb_cma[page_to_nid(page)], page, 1 << order))
1278 free_contig_range(page_to_pfn(page), 1 << order);
1282 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1291 struct page *page;
1295 page = cma_alloc(hugetlb_cma[nid], nr_pages,
1297 if (page)
1298 return page;
1306 page = cma_alloc(hugetlb_cma[node], nr_pages,
1308 if (page)
1309 return page;
1319 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1327 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1332 static inline void free_gigantic_page(struct page *page, unsigned int order) { }
1333 static inline void destroy_compound_gigantic_page(struct page *page,
1337 static void update_and_free_page(struct hstate *h, struct page *page)
1340 struct page *subpage = page;
1346 h->nr_huge_pages_node[page_to_nid(page)]--;
1348 i++, subpage = mem_map_next(subpage, page, i)) {
1354 VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
1355 VM_BUG_ON_PAGE(hugetlb_cgroup_from_page_rsvd(page), page);
1360 * page dtor. This is needed in case someone takes an additional
1361 * temporary ref to the page, and freeing is delayed until they drop
1366 * page destroy_compound_gigantic_page will turn the compound page
1373 set_page_refcounted(page);
1375 set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
1381 destroy_compound_gigantic_page(page, huge_page_order(h));
1382 free_gigantic_page(page, huge_page_order(h));
1385 set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
1386 __free_pages(page, huge_page_order(h));
1407 bool page_huge_active(struct page *page)
1409 return PageHeadHuge(page) && PagePrivate(&page[1]);
1412 /* never called for tail page */
1413 void set_page_huge_active(struct page *page)
1415 VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1416 SetPagePrivate(&page[1]);
1419 static void clear_page_huge_active(struct page *page)
1421 VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1422 ClearPagePrivate(&page[1]);
1426 * Internal hugetlb specific page flag. Do not use outside of the hugetlb
1429 static inline bool PageHugeTemporary(struct page *page)
1431 if (!PageHuge(page))
1434 return (unsigned long)page[2].mapping == -1U;
1437 static inline void SetPageHugeTemporary(struct page *page)
1439 page[2].mapping = (void *)-1U;
1442 static inline void ClearPageHugeTemporary(struct page *page)
1444 page[2].mapping = NULL;
1447 static void __free_huge_page(struct page *page)
1451 * compound page destructor.
1453 struct hstate *h = page_hstate(page);
1454 int nid = page_to_nid(page);
1456 (struct hugepage_subpool *)page_private(page);
1459 VM_BUG_ON_PAGE(page_count(page), page);
1460 VM_BUG_ON_PAGE(page_mapcount(page), page);
1462 set_page_private(page, 0);
1463 page->mapping = NULL;
1464 restore_reserve = PagePrivate(page);
1465 ClearPagePrivate(page);
1468 * If PagePrivate() was set on page, page allocation consumed a
1469 * reservation. If the page was associated with a subpool, there
1470 * would have been a page reserved in the subpool before allocation
1473 * remove the reserved page from the subpool.
1479 * after page is free. Therefore, force restore_reserve
1487 clear_page_huge_active(page);
1489 pages_per_huge_page(h), page);
1491 pages_per_huge_page(h), page);
1495 if (PageHugeTemporary(page)) {
1496 list_del(&page->lru);
1497 ClearPageHugeTemporary(page);
1498 update_and_free_page(h, page);
1500 /* remove the page from active list */
1501 list_del(&page->lru);
1502 update_and_free_page(h, page);
1506 arch_clear_hugepage_flags(page);
1507 enqueue_huge_page(h, page);
1518 * be freed and frees them one-by-one. As the page->mapping pointer is
1527 struct page *page;
1532 page = container_of((struct address_space **)node,
1533 struct page, mapping);
1535 __free_huge_page(page);
1540 void free_huge_page(struct page *page)
1551 if (llist_add((struct llist_node *)&page->mapping,
1557 __free_huge_page(page);
1560 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1562 INIT_LIST_HEAD(&page->lru);
1563 set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1564 set_hugetlb_cgroup(page, NULL);
1565 set_hugetlb_cgroup_rsvd(page, NULL);
1569 ClearPageHugeFreed(page);
1573 static void prep_compound_gigantic_page(struct page *page, unsigned int order)
1577 struct page *p = page + 1;
1580 set_compound_order(page, order);
1581 __ClearPageReserved(page);
1582 __SetPageHead(page);
1583 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1590 * PG_reserved set on a tail page (despite the head page not
1593 * on the head page when they need know if put_page() is needed
1598 set_compound_head(p, page);
1600 atomic_set(compound_mapcount_ptr(page), -1);
1601 atomic_set(compound_pincount_ptr(page), 0);
1609 int PageHuge(struct page *page)
1611 if (!PageCompound(page))
1614 page = compound_head(page);
1615 return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
1620 * PageHeadHuge() only returns true for hugetlbfs head page, but not for
1623 int PageHeadHuge(struct page *page_head)
1634 * Upon entry, the page is locked which means that page_mapping() is
1638 struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage)
1651 pgoff_t hugetlb_basepage_index(struct page *page)
1653 struct page *page_head = compound_head(page);
1658 compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1660 compound_idx = page - page_head;
1665 static struct page *alloc_buddy_huge_page(struct hstate *h,
1670 struct page *page;
1674 * By default we always try hard to allocate the page with
1676 * a loop (to adjust global huge page counts) and previous allocation
1687 page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
1688 if (page)
1694 * If we did not specify __GFP_RETRY_MAYFAIL, but still got a page this
1698 if (node_alloc_noretry && page && !alloc_try_hard)
1702 * If we tried hard to get a page but failed, set bit so that
1706 if (node_alloc_noretry && !page && alloc_try_hard)
1709 return page;
1713 * Common helper to allocate a fresh hugetlb page. All specific allocators
1716 static struct page *alloc_fresh_huge_page(struct hstate *h,
1720 struct page *page;
1723 page = alloc_gigantic_page(h, gfp_mask, nid, nmask);
1725 page = alloc_buddy_huge_page(h, gfp_mask,
1727 if (!page)
1731 prep_compound_gigantic_page(page, huge_page_order(h));
1732 prep_new_huge_page(h, page, page_to_nid(page));
1734 return page;
1738 * Allocates a fresh page to the hugetlb allocator pool in the node interleaved
1744 struct page *page;
1749 page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed,
1751 if (page)
1755 if (!page)
1758 put_page(page); /* free it into the hugepage allocator */
1764 * Free huge page from pool from next node to free.
1782 struct page *page =
1784 struct page, lru);
1785 list_del(&page->lru);
1792 update_and_free_page(h, page);
1808 * 0: successfully dissolved free hugepages or the page is not a
1811 int dissolve_free_huge_page(struct page *page)
1817 if (!PageHuge(page))
1821 if (!PageHuge(page)) {
1826 if (!page_count(page)) {
1827 struct page *head = compound_head(page);
1834 * We should make sure that the page is already on the free list
1844 * to successfully dissolve the page if we do a
1847 * for increasing the success rate of dissolving page.
1853 * Move PageHWPoison flag from head page to the raw error page,
1854 * which makes any subpages rather than the error page reusable.
1856 if (PageHWPoison(head) && page != head) {
1857 SetPageHWPoison(page);
1883 struct page *page;
1890 page = pfn_to_page(pfn);
1891 rc = dissolve_free_huge_page(page);
1900 * Allocates a fresh surplus page from the page allocator.
1902 static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
1905 struct page *page = NULL;
1915 page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
1916 if (!page)
1922 * Double check that and simply deallocate the new page
1924 * temporary page to workaround the nasty free_huge_page
1928 SetPageHugeTemporary(page);
1930 put_page(page);
1934 h->surplus_huge_pages_node[page_to_nid(page)]++;
1940 return page;
1943 static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
1946 struct page *page;
1951 page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask, NULL);
1952 if (!page)
1959 SetPageHugeTemporary(page);
1961 return page;
1965 * Use the VMA's mpolicy to allocate a huge page from the buddy.
1968 struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
1971 struct page *page;
1978 page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask);
1981 return page;
1984 /* page migration callback function */
1985 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
1990 struct page *page;
1992 page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask);
1993 if (page) {
1995 return page;
2004 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
2009 struct page *page;
2015 page = alloc_huge_page_nodemask(h, node, nodemask, gfp_mask);
2018 return page;
2029 struct page *page, *tmp;
2048 page = alloc_surplus_huge_page(h, htlb_alloc_mask(h),
2050 if (!page) {
2054 list_add(&page->lru, &surplus_list);
2089 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
2093 * This page is now managed by the hugetlb allocator and has
2096 put_page_testzero(page);
2097 VM_BUG_ON_PAGE(page_count(page), page);
2098 enqueue_huge_page(h, page);
2104 list_for_each_entry_safe(page, tmp, &surplus_list, lru)
2105 put_page(page);
2168 * are used by the huge page allocation routines to manage reservations.
2170 * vma_needs_reservation is called to determine if the huge page at addr
2174 * the huge page has been allocated, vma_commit_reservation is called
2175 * to add the page to the reservation map. If the page allocation fails,
2186 * be restored when a newly allocated huge page must be freed. It is
2214 * 1 page, and that adding to resv map a 1 page entry can only
2293 * specific error paths, a huge page was allocated (via alloc_huge_page)
2294 * and is about to be freed. If a reservation for the page existed,
2296 * in the newly allocated page. When the page is freed via free_huge_page,
2304 struct page *page)
2306 if (unlikely(PagePrivate(page))) {
2315 * as though the reservation for this page was
2317 * faulting in the page at a later time. This
2318 * is better than inconsistent global huge page
2321 ClearPagePrivate(page);
2329 ClearPagePrivate(page);
2335 struct page *alloc_huge_page(struct vm_area_struct *vma,
2340 struct page *page;
2350 * has a reservation for the page to be allocated. A return
2399 * glb_chg is passed to indicate whether or not a page must be taken
2403 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
2404 if (!page) {
2406 page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
2407 if (!page)
2411 SetPagePrivate(page);
2414 list_add(&page->lru, &h->hugepage_activelist);
2417 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
2419 * hugetlb_cgroup pointer on the page.
2423 h_cg, page);
2428 set_page_private(page, (unsigned long)spool);
2433 * The page was added to the reservation map between
2437 * in hugetlb_reserve_pages for the same page. Also,
2447 pages_per_huge_page(h), page);
2449 return page;
2479 * Use the beginning of the huge page to store the
2507 struct page *page = virt_to_page(m);
2511 WARN_ON(page_count(page) != 1);
2512 prep_compound_gigantic_page(page, huge_page_order(h));
2513 WARN_ON(PageReserved(page));
2514 prep_new_huge_page(h, page, page_to_nid(page));
2515 put_page(page); /* free it into the hugepage allocator */
2522 adjust_managed_page_count(page, pages_per_huge_page(h));
2568 pr_warn("HugeTLB: allocating %lu of page size %s failed. Only allocated %lu hugepages.\n",
2599 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
2614 struct page *page, *next;
2616 list_for_each_entry_safe(page, next, freel, lru) {
2619 if (PageHighMem(page))
2621 list_del(&page->lru);
2622 update_and_free_page(h, page);
2624 h->free_huge_pages_node[page_to_nid(page)]--;
2688 * Changing node specific huge page count may require a corresponding
2708 * page range allocation.
2728 * to convert a surplus huge page to a normal huge page. That is
2741 * page, free_huge_page will handle it by freeing the page
2905 * huge page alloc/free.
3207 * If we did not parse a default huge page size, set
3221 pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n",
3256 /* Overwritten by architectures with more huge page sizes */
3292 * be the first huge page command line option in which case it implicitly
3341 * A specific huge page size can only be specified once with hugepagesz.
3420 * page size is gigantic (>= MAX_ORDER), then the pages must be
3628 * When cpuset is configured, it breaks the strict hugetlb page
3631 * the reservation is not checked against page availability for the
3633 * with lack of free htlb page in cpuset that the task is in.
3640 * we fall back to check against current free page availability as
3745 * shmget/shmat with "huge page" specified are backed by hugetlbfs files,
3756 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
3762 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
3765 entry = huge_pte_wrprotect(mk_huge_pte(page,
3770 entry = arch_make_huge_pte(entry, vma, page, writable);
3815 struct page *ptepage;
3858 * Check here before taking page table lock, and once again
3895 * No need to notify as we are downgrading page
3897 * to a new page.
3924 struct page *ref_page)
3931 struct page *page;
3943 * to huge page.
3985 page = pte_page(pte);
3987 * If a reference page is supplied, it is because a specific
3988 * page is being unmapped, not a range. Ensure the page we
3989 * are about to unmap is the actual page of interest.
3992 if (page != ref_page) {
3997 * Mark the VMA as having unmapped its page so that
4007 set_page_dirty(page);
4010 page_remove_rmap(page, true);
4013 tlb_remove_page_size(tlb, page, huge_page_size(h));
4015 * Bail out after unmapping reference page if supplied
4028 * dropped and the last reference to the shared PMDs page might be
4032 * huge_pmd_unshare() relies on the exact page_count for the PMD page to
4033 * detect sharing, so we cannot defer the release of the page either.
4042 unsigned long end, struct page *ref_page)
4048 * test will fail on a vma being torn down, and not grab a page table
4060 unsigned long end, struct page *ref_page)
4085 * mappping it owns the reserve page for. The intention is to unmap the page
4090 struct page *page, unsigned long address)
4099 * from page cache lookup which is in HPAGE_SIZE units.
4120 * VMA is using the same page so check and skip such VMAs.
4126 * Unmap the page from other VMAs without their own reserves.
4128 * areas. This is because a future no-page fault on this VMA
4129 * could insert a zeroed page instead of the data existing
4134 address + huge_page_size(h), page);
4140 * Hugetlb_cow() should be called with page lock of the original hugepage held.
4142 * cannot race with other handlers or page migration.
4147 struct page *pagecache_page, spinlock_t *ptl)
4151 struct page *old_page, *new_page;
4161 /* If no-one else is actually using this page, avoid the copy
4162 * and just make the page writable */
4171 * perform a COW due to a shared page count, attempt to satisfy
4173 * page is used to determine if the reserve at this address was
4185 * Drop page table lock as buddy allocator may be called. It will
4195 * huge page pool. To guarantee the original mappers
4196 * reliability, unmap the page from child processes. The child
4230 * race occurs while re-acquiring page table
4258 * Retake the page table lock to check for racing updates
4259 * before the page tables are altered
4274 /* Make the old page be freed below */
4289 /* Return the pagecache page at a given address within a VMA */
4290 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
4303 * Return whether there is a pagecache page to back given address within VMA.
4311 struct page *page;
4316 page = find_get_page(mapping, idx);
4317 if (page)
4318 put_page(page);
4319 return page != NULL;
4322 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
4327 int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
4331 ClearPagePrivate(page);
4334 * set page dirty so that it will not be removed from cache/file
4337 set_page_dirty(page);
4354 struct page *page;
4382 page = find_lock_page(mapping, idx);
4383 if (!page) {
4385 * Check for page in userfault range
4412 page = alloc_huge_page(vma, haddr, 0);
4413 if (IS_ERR(page)) {
4417 * tasks from racing to fault in the same page which
4420 * does a clear then write of pte's under page table
4422 * notice the clear pte and try to allocate a page
4433 ret = vmf_error(PTR_ERR(page));
4436 clear_huge_page(page, address, pages_per_huge_page(h));
4437 __SetPageUptodate(page);
4441 int err = huge_add_to_page_cache(page, mapping, idx);
4443 put_page(page);
4449 lock_page(page);
4462 if (unlikely(PageHWPoison(page))) {
4471 * pending reservations for this page now. This will ensure that
4490 ClearPagePrivate(page);
4491 hugepage_add_new_anon_rmap(page, vma, haddr);
4493 page_dup_rmap(page, true);
4494 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
4501 ret = hugetlb_cow(mm, vma, address, ptep, page, ptl);
4512 set_page_huge_active(page);
4514 unlock_page(page);
4523 unlock_page(page);
4524 restore_reserve_on_error(h, vma, haddr, page);
4525 put_page(page);
4561 struct page *page = NULL;
4562 struct page *pagecache_page = NULL;
4606 * the same page in the page cache.
4625 * an active hugepage in pagecache. This goto expects the 2nd page
4634 * reservations for this page now. This will ensure that any
4637 * page now as it is used to determine if a reservation has been
4660 * hugetlb_cow() requires page locks of pte_page(entry) and
4662 * when page != pagecache_page or !pagecache_page.
4664 page = pte_page(entry);
4665 if (page != pagecache_page)
4666 if (!trylock_page(page)) {
4671 get_page(page);
4686 if (page != pagecache_page)
4687 unlock_page(page);
4688 put_page(page);
4700 * Generally it's safe to hold refcount during waiting page lock. But
4701 * here we just wait to defer the next page fault to avoid busy loop and
4702 * the page is not used after unlocked before returning from the current
4703 * page fault. So we are safe from accessing freed page, even if we wait
4707 wait_on_page_locked(page);
4720 struct page **pagep)
4730 struct page *page;
4733 /* If a page already exists, then it's UFFDIO_COPY for
4742 page = alloc_huge_page(dst_vma, dst_addr, 0);
4743 if (IS_ERR(page)) {
4748 ret = copy_huge_page_from_user(page,
4755 *pagep = page;
4756 /* don't free the page */
4760 page = *pagep;
4766 * preceding stores to the page contents become visible before
4769 __SetPageUptodate(page);
4775 * If shared, add to page cache
4789 ret = huge_add_to_page_cache(page, mapping, idx);
4799 * to leave any page mapped (as page_mapped()) beyond the end
4802 * page in the radix tree in the vm_shared case beyond the end
4816 page_dup_rmap(page, true);
4818 ClearPagePrivate(page);
4819 hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
4822 _dst_pte = make_huge_pte(dst_vma, page, dst_vma->vm_flags & VM_WRITE);
4837 set_page_huge_active(page);
4839 unlock_page(page);
4846 unlock_page(page);
4848 put_page(page);
4853 struct page **pages, struct vm_area_struct **vmas,
4867 struct page *page;
4881 * first, for the page indexing below to work.
4883 * Note that page table lock is not held when pte is null.
4965 page = pte_page(huge_ptep_get(pte));
4983 pages[i] = mem_map_offset(page, pfn_offset);
4987 * that the huge page is present in the page tables. If
4988 * the huge page is present, then the tail pages must
4989 * also be present. The ptl prevents the head page and
4991 * page must be available at this point, unless the page
5013 * of this compound page.
5109 * may have cleared our pud entry and done put_page on the page table:
5111 * and that page table be reused and filled with junk. If we actually
5112 * did unshare a page of pmds, flush the range corresponding to the pud.
5120 * page table protection not changing it to point to a new page.
5150 * attempt will be made for VM_NORESERVE to allocate a page
5228 * the reservation was and the page cache tells how much of
5346 * page table page.
5362 * check on proper vm_flags and page table alignment
5397 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
5403 * sharing is possible. For hugetlbfs, this prevents removal of any page
5405 * are setting up sharing based on existing page table entries (mappings).
5462 * unmap huge page backed by shared pte.
5464 * Hugetlb pte page is ref counted at the time of mapping. If pte is shared
5466 * decrementing the ref count. If count == 1, the pte page is not shared.
5468 * Called with page table lock held and i_mmap_rwsem held in write mode.
5470 * returns: 1 successfully unmapped a shared pte page
5471 * 0 the underlying pte page is not shared, or it is the last user
5490 * processing addresses in increments of huge page size (PMD_SIZE
5492 * Update address to the 'last page' in the cleared area so that
5493 * calling loop can move to first page past this area.
5549 * huge_pte_offset() - Walk the page table to resolve the hugepage
5552 * Return: Pointer to page table entry (PUD or PMD) for
5554 * size @sz doesn't match the hugepage size at this level of the page
5591 struct page * __weak
5598 struct page * __weak
5606 struct page * __weak
5611 struct page *page = NULL;
5628 page = pte_page(pte) +
5633 * huge pmd (head) page is present in the page tables. The ptl
5634 * prevents the head page and tail pages from being rearranged
5635 * in any way. So this page must be available at this point,
5636 * unless the page refcount overflowed:
5638 if (WARN_ON_ONCE(!try_grab_page(page, flags))) {
5639 page = NULL;
5655 return page;
5658 struct page * __weak
5668 struct page * __weak
5677 int isolate_hugetlb(struct page *page, struct list_head *list)
5682 if (!PageHeadHuge(page) || !page_huge_active(page) ||
5683 !get_page_unless_zero(page)) {
5687 clear_page_huge_active(page);
5688 list_move_tail(&page->lru, list);
5694 void putback_active_hugepage(struct page *page)
5696 VM_BUG_ON_PAGE(!PageHead(page), page);
5698 set_page_huge_active(page);
5699 list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
5701 put_page(page);
5704 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
5712 * transfer temporary state of the new huge page. This is