Lines Matching refs:page

33 static inline void sanity_check_pinned_pages(struct page **pages,
42 * stick around until the page is freed.
47 * THP we can assume that either the given page (PTE-mapped THP) or
48 * the head page (PMD-mapped THP) should be PageAnonExclusive(). If
52 struct page *page = *pages;
53 struct folio *folio = page_folio(page);
55 if (is_zero_page(page) ||
59 VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page), page);
62 VM_BUG_ON_PAGE(!PageAnonExclusive(&folio->page) &&
63 !PageAnonExclusive(page), page);
71 static inline struct folio *try_get_folio(struct page *page, int refs)
76 folio = page_folio(page);
86 * holding a reference on a folio that has nothing to do with the page
88 * So now that the folio is stable, recheck that the page still
91 if (unlikely(page_folio(page) != folio)) {
92 if (!put_devmap_managed_page_refs(&folio->page, refs))
102 * @page: pointer to page to be grabbed
118 * FOLL_PIN on single-page folios: folio's refcount will be incremented by
121 * Return: The folio containing @page (with refcount appropriately
126 struct folio *try_grab_folio(struct page *page, int refs, unsigned int flags)
133 if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)))
137 return try_get_folio(page, refs);
142 * Don't take a pin on the zero page - it's not going anywhere
145 if (is_zero_page(page))
146 return page_folio(page);
148 folio = try_get_folio(page, refs);
159 if (!put_devmap_managed_page_refs(&folio->page, refs))
201 if (!put_devmap_managed_page_refs(&folio->page, refs))
206 * try_grab_page() - elevate a page's refcount by a flag-dependent amount
207 * @page: pointer to page to be grabbed
213 * FOLL_PIN or FOLL_GET behavior, when incrementing the page's refcount.
222 * -ENOMEM FOLL_GET or FOLL_PIN was set, but the page could not
225 int __must_check try_grab_page(struct page *page, unsigned int flags)
227 struct folio *folio = page_folio(page);
232 if (unlikely(!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)))
239 * Don't take a pin on the zero page - it's not going anywhere
242 if (is_zero_page(page))
247 * increment the normal page refcount field at least once,
248 * so that the page really is pinned.
264 * unpin_user_page() - release a dma-pinned page
265 * @page: pointer to page to be released
272 void unpin_user_page(struct page *page)
274 sanity_check_pinned_pages(&page, 1);
275 gup_put_folio(page_folio(page), 1, FOLL_PIN);
293 * page refcount field at least once, so that the page really is
306 static inline struct folio *gup_folio_range_next(struct page *start,
309 struct page *next = nth_page(start, i);
321 static inline struct folio *gup_folio_next(struct page **list,
342 * "gup-pinned page" refers to a page that has had one of the get_user_pages()
343 * variants called on that page.
345 * For each page in the @pages array, make that page (or its head page, if a
346 * compound page) dirty, if @make_dirty is true, and if the page was previously
358 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
378 * 1) This code sees the page as already dirty, so it
382 * However, now the page is going to get written back,
385 * on to call TestClearPageDirty(), and write the page
388 * 2) This code sees the page as clean, so it calls
389 * set_page_dirty(). The page stays dirty, despite being
405 * gup-pinned page range
407 * @page: the starting page of a range maybe marked dirty, and definitely released.
411 * "gup-pinned page range" refers to a range of pages that has had one of the
412 * pin_user_pages() variants called on that page.
414 * For the page ranges defined by [page .. page+npages], make that range (or
415 * its head pages, if a compound page) dirty, if @make_dirty is true, and if the
416 * page range was previously listed as clean.
424 void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages,
432 folio = gup_folio_range_next(page, npages, i, &nr);
443 static void unpin_user_pages_lockless(struct page **pages, unsigned long npages)
465 * For each page in the @pages array, release the page using unpin_user_page().
469 void unpin_user_pages(struct page **pages, unsigned long npages)
503 static struct page *no_page_table(struct vm_area_struct *vma,
509 * page tables. Return error instead of NULL to skip handle_mm_fault,
537 /* Proper page table entry exists, but no corresponding struct page */
542 static inline bool can_follow_write_pte(pte_t pte, struct page *page,
546 /* If the pte is writable, we can write to the page. */
567 * See can_change_pte_writable(): we broke COW and could map the page
568 * writable if we have an exclusive anonymous page ...
570 if (!page || !PageAnon(page) || !PageAnonExclusive(page))
579 static struct page *follow_page_pte(struct vm_area_struct *vma,
584 struct page *page;
603 page = vm_normal_page(vma, address, pte);
610 !can_follow_write_pte(pte, page, vma, flags)) {
611 page = NULL;
615 if (!page && pte_devmap(pte) && (flags & (FOLL_GET | FOLL_PIN))) {
623 page = pte_page(pte);
626 } else if (unlikely(!page)) {
629 page = ERR_PTR(-EFAULT);
634 page = pte_page(pte);
637 page = ERR_PTR(ret);
642 if (!pte_write(pte) && gup_must_unshare(vma, flags, page)) {
643 page = ERR_PTR(-EMLINK);
647 VM_BUG_ON_PAGE((flags & FOLL_PIN) && PageAnon(page) &&
648 !PageAnonExclusive(page), page);
651 ret = try_grab_page(page, flags);
653 page = ERR_PTR(ret);
658 * We need to make the page accessible if and only if we are going
663 ret = arch_make_page_accessible(page);
665 unpin_user_page(page);
666 page = ERR_PTR(ret);
672 !pte_dirty(pte) && !PageDirty(page))
673 set_page_dirty(page);
679 mark_page_accessed(page);
683 return page;
691 static struct page *follow_pmd_mask(struct vm_area_struct *vma,
698 struct page *page;
709 page = follow_devmap_pmd(vma, address, pmd, flags, &ctx->pgmap);
711 if (page)
712 return page;
732 /* If pmd was left empty, stuff a page table in there quickly */
736 page = follow_trans_huge_pmd(vma, address, pmd, flags);
739 return page;
742 static struct page *follow_pud_mask(struct vm_area_struct *vma,
749 struct page *page;
757 page = follow_devmap_pud(vma, address, pud, flags, &ctx->pgmap);
759 if (page)
760 return page;
768 static struct page *follow_p4d_mask(struct vm_area_struct *vma,
786 * follow_page_mask - look up a page descriptor from a user-virtual address
798 * When getting an anonymous page and the caller has to trigger unsharing
799 * of a shared anonymous page first, -EMLINK is returned. The caller should
803 * On output, the @ctx->page_mask is set according to the size of the page.
805 * Return: the mapped (struct page *), %NULL if no mapping exists, or
807 * by a page descriptor (see also vm_normal_page()).
809 static struct page *follow_page_mask(struct vm_area_struct *vma,
820 * special hugetlb page table walking code. This eliminates the
835 struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
839 struct page *page;
851 page = follow_page_mask(vma, address, foll_flags, &ctx);
854 return page;
859 struct page **page)
894 if (!page)
896 *page = vm_normal_page(*vma, address, entry);
897 if (!*page) {
900 *page = pte_page(entry);
902 ret = try_grab_page(*page, gup_flags);
961 * mmap lock in the page fault handler. Sanity check this.
969 * what has happened - we've just fully completed a page
1006 * 4. The GUP caller, now done with the page, unpins it and sets it dirty
1154 * __get_user_pages walks a process's page tables and takes a reference to
1155 * each struct page that each user address corresponds to at a given
1156 * instant. That is, it takes the page that would be accessed if a user
1159 * This does not guarantee that the page exists in the user mappings when
1161 * page there in some cases (eg. if mmapped pagecache has been invalidated
1162 * and subsequently re-faulted). However it does guarantee that the page
1163 * won't be freed completely. And mostly callers simply care that the page
1168 * If @gup_flags & FOLL_WRITE == 0, the page must not be written to. If
1169 * the page is written to, set_page_dirty (or set_page_dirty_lock, as
1170 * appropriate) must be called after the page is finished with, and
1186 unsigned int gup_flags, struct page **pages,
1201 struct page *page;
1211 pages ? &page : NULL);
1237 page = follow_page_mask(vma, start, foll_flags, &ctx);
1238 if (!page || PTR_ERR(page) == -EMLINK) {
1240 PTR_ERR(page) == -EMLINK, locked);
1254 } else if (PTR_ERR(page) == -EEXIST) {
1256 * Proper page table entry exists, but no corresponding
1257 * struct page. If the caller expects **pages to be
1259 * for this page.
1262 ret = PTR_ERR(page);
1265 } else if (IS_ERR(page)) {
1266 ret = PTR_ERR(page);
1275 struct page *subpage;
1283 * NOTE: here the page may not be the head page
1295 folio = try_grab_folio(page, page_increm - 1,
1299 * Release the 1st page ref if the
1302 gup_put_folio(page_folio(page), 1,
1310 subpage = nth_page(page, j);
1351 * fixup_user_fault() - manually resolve a user page fault
1369 * get_user_pages() only guarantees to update these in the struct page.
1372 * access permission to the page because they are maintained in software. On
1467 struct page **pages,
1673 * faultin_vma_page_range() - populate (prefault) page tables inside the
1687 * vma->vm_mm->mmap_lock must be held. The range must be page-aligned and
1705 * FOLL_TOUCH: Mark page accessed and thereby young; will also mark
1706 * the page dirty with FOLL_WRITE -- which doesn't make a
1707 * difference with !FOLL_FORCE, because the page is writable
1708 * in the page table.
1710 * a poisoned page.
1792 unsigned long nr_pages, struct page **pages,
1892 * sub-page granularity (e.g. arm64 MTE). This function should be used when
1903 * Attempt faulting in at page granularity first for page table
1998 * get_dump_page() - pin user page in memory while writing it to core dump
2001 * Returns struct page pointer of user page pinned for dump,
2012 struct page *get_dump_page(unsigned long addr)
2014 struct page *page;
2018 ret = __get_user_pages_locked(current->mm, addr, 1, &page, &locked,
2020 return (ret == 1) ? page : NULL;
2031 struct page **pages)
2082 struct page **pages)
2092 * Migration will fail if the page is pinned, so convert
2093 * the pin on the source page to a normal reference.
2099 if (migrate_device_coherent_page(&folio->page)) {
2112 * page won't be freed if it's migrating.
2162 struct page **pages)
2177 struct page **pages)
2190 struct page **pages,
2222 static bool is_valid_gup_args(struct page **pages, int *locked,
2295 * get_user_pages_remote walks a process's page tables and takes a reference
2296 * to each struct page that each user address corresponds to at a given
2297 * instant. That is, it takes the page that would be accessed if a user
2300 * This does not guarantee that the page exists in the user mappings when
2302 * page there in some cases (eg. if mmapped pagecache has been invalidated
2303 * and subsequently re-faulted). However it does guarantee that the page
2304 * won't be freed completely. And mostly callers simply care that the page
2309 * If gup_flags & FOLL_WRITE == 0, the page must not be written to. If the page
2311 * be called after the page is finished with, and before put_page is called.
2328 unsigned int gup_flags, struct page **pages,
2346 unsigned int gup_flags, struct page **pages,
2368 unsigned int gup_flags, struct page **pages)
2396 struct page **pages, unsigned int gup_flags)
2412 * get_user_pages_fast attempts to pin user pages by walking the page
2414 * protected from page table pages being freed from under it, and should
2418 * rely on IPIs from the TLB flushing code blocking before the page table
2422 * Another way to achieve this is to batch up page table containing pages
2432 * free pages containing page tables or TLB flushing requires IPI broadcast.
2448 * This call assumes the caller has pinned the folio, that the lowest page table
2522 struct page **pages)
2525 struct page *page = pages[--(*nr)];
2527 ClearPageReferenced(page);
2529 unpin_user_page(page);
2531 put_page(page);
2540 * To pin the page, fast-gup needs to do below in order:
2541 * (1) pin the page (by prefetching pte), then (2) check pte not changed.
2544 * with fast-gup, we need to do (1) clear pte, then (2) check whether page
2550 * walking a pgtable page that is being freed (pte is still valid but pmd
2557 struct page **pages, int *nr)
2568 struct page *page;
2597 page = pte_page(pte);
2599 folio = try_grab_folio(page, 1, flags);
2619 if (!pte_write(pte) && gup_must_unshare(NULL, flags, page)) {
2625 * We need to make the page accessible if and only if we are
2631 ret = arch_make_page_accessible(page);
2638 pages[*nr] = page;
2657 * For a futex to be placed on a THP tail page, get_futex_key requires a
2663 struct page **pages, int *nr)
2672 struct page **pages, int *nr)
2678 struct page *page = pfn_to_page(pfn);
2686 if (!(flags & FOLL_PCI_P2PDMA) && is_pci_p2pdma_page(page)) {
2691 SetPageReferenced(page);
2692 pages[*nr] = page;
2693 if (unlikely(try_grab_page(page, flags))) {
2707 struct page **pages, int *nr)
2725 struct page **pages, int *nr)
2743 struct page **pages, int *nr)
2751 struct page **pages, int *nr)
2758 static int record_subpages(struct page *page, unsigned long addr,
2759 unsigned long end, struct page **pages)
2764 pages[nr] = nth_page(page, nr);
2779 struct page **pages, int *nr)
2782 struct page *page;
2799 page = nth_page(pte_page(pte), (addr & (sz - 1)) >> PAGE_SHIFT);
2800 refs = record_subpages(page, addr, end, pages + *nr);
2802 folio = try_grab_folio(page, refs, flags);
2816 if (!pte_write(pte) && gup_must_unshare(NULL, flags, &folio->page)) {
2828 struct page **pages, int *nr)
2846 struct page **pages, int *nr)
2854 struct page **pages, int *nr)
2856 struct page *page;
2870 page = nth_page(pmd_page(orig), (addr & ~PMD_MASK) >> PAGE_SHIFT);
2871 refs = record_subpages(page, addr, end, pages + *nr);
2873 folio = try_grab_folio(page, refs, flags);
2886 if (!pmd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
2898 struct page **pages, int *nr)
2900 struct page *page;
2914 page = nth_page(pud_page(orig), (addr & ~PUD_MASK) >> PAGE_SHIFT);
2915 refs = record_subpages(page, addr, end, pages + *nr);
2917 folio = try_grab_folio(page, refs, flags);
2931 if (!pud_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
2943 struct page **pages, int *nr)
2946 struct page *page;
2954 page = nth_page(pgd_page(orig), (addr & ~PGDIR_MASK) >> PAGE_SHIFT);
2955 refs = record_subpages(page, addr, end, pages + *nr);
2957 folio = try_grab_folio(page, refs, flags);
2966 if (!pgd_write(orig) && gup_must_unshare(NULL, flags, &folio->page)) {
2982 unsigned int flags, struct page **pages, int *nr)
3021 unsigned int flags, struct page **pages, int *nr)
3049 unsigned int flags, struct page **pages, int *nr)
3074 unsigned int flags, struct page **pages, int *nr)
3100 unsigned int flags, struct page **pages, int *nr)
3119 struct page **pages)
3139 * With interrupts disabled, we block page table pages from being freed
3168 struct page **pages)
3233 * access can get ambiguous page results. If you call this function without
3237 unsigned int gup_flags, struct page **pages)
3241 * because gup fast is always a "pin with a +1 page refcount" request.
3271 unsigned int gup_flags, struct page **pages)
3276 * FOLL_GET, because gup fast is always a "pin with a +1 page refcount"
3305 unsigned int gup_flags, struct page **pages)
3338 unsigned int gup_flags, struct page **pages,
3371 unsigned int gup_flags, struct page **pages)
3391 struct page **pages, unsigned int gup_flags)