Lines Matching refs:pages
83 * So now that the head page is stable, recheck that the pages still
236 * that such pages can be separately tracked and uniquely handled. In
246 * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages
247 * @pages: array of pages to be maybe marked dirty, and definitely released.
248 * @npages: number of pages in the @pages array.
249 * @make_dirty: whether to mark the pages dirty
254 * For each page in the @pages array, make that page (or its head page, if a
256 * listed as clean. In any case, releases all pages using unpin_user_page(),
267 void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
273 * TODO: this can be optimized for huge pages: if a series of pages is
279 unpin_user_pages(pages, npages);
284 struct page *page = compound_head(pages[index]);
313 * unpin_user_pages() - release an array of gup-pinned pages.
314 * @pages: array of pages to be marked dirty and released.
315 * @npages: number of pages in the @pages array.
317 * For each page in the @pages array, release the page using unpin_user_page().
321 void unpin_user_pages(struct page **pages, unsigned long npages)
326 * If this WARN_ON() fires, then the system *might* be leaking pages (by
333 * TODO: this can be optimized for huge pages: if a series of pages is
338 unpin_user_page(pages[index]);
348 * has touched so far, we don't want to allocate unnecessary pages or
454 * Only return device mapping pages in the FOLL_GET or FOLL_PIN
465 /* Avoid special (like zero) pages in core dumps */
535 lru_add_drain(); /* push cached pages to LRU */
747 * When getting pages from ZONE_DEVICE memory, the @ctx->pgmap caches
819 /* user gate pages are read-only */
872 /* mlock all present pages, but do not fault in new pages */
942 * Anon pages in shared mappings are surprising: now
968 * __get_user_pages() - pin user pages in memory
971 * @nr_pages: number of pages from start to pin
973 * @pages: array that receives pointers to the pages pinned.
975 * only intends to ensure the pages are faulted in.
980 * Returns either number of pages pinned (which may be less than the
984 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
985 * -- If nr_pages is >0, and some pages were pinned, returns the number of
986 * pages pinned. Again, this may be less than nr_pages.
989 * The caller is responsible for releasing returned @pages, via put_page().
1029 unsigned int gup_flags, struct page **pages,
1041 VM_BUG_ON(!!pages != !!(gup_flags & (FOLL_GET | FOLL_PIN)));
1062 pages ? &pages[i] : NULL);
1074 i = follow_hugetlb_page(mm, vma, pages, vmas,
1092 * If we have a pending SIGKILL, don't keep faulting pages and
1128 if (pages) {
1129 pages[i] = page;
1256 struct page **pages,
1276 * is to set FOLL_GET if the caller wants pages[] filled in (but has
1280 * FOLL_PIN always expects pages to be non-null, but no need to assert
1283 if (pages && !(flags & FOLL_PIN))
1289 ret = __get_user_pages(mm, start, nr_pages, flags, pages,
1318 * For the prefault case (!pages) we only update counts.
1320 if (likely(pages))
1321 pages += ret;
1350 pages, NULL, locked);
1366 if (likely(pages))
1367 pages++;
1382 * populate_vma_page_range() - populate a range of pages in the vma.
1388 * This takes care of mlocking the pages too if VM_LOCKED is set.
1390 * Return either number of pages pinned in the vma, or a negative error
1441 * __mm_populate - populate and/or mlock pages within a range of address space.
1459 * We want to fault in pages for [nstart; end) address range.
1480 * Now fault in a range of pages. populate_vma_page_range()
1481 * double checks the vma flags, so that it won't mlock pages
1501 unsigned long nr_pages, struct page **pages,
1527 if (pages) {
1528 pages[i] = virt_to_page(start);
1529 if (pages[i])
1530 get_page(pages[i]);
1600 struct page **pages,
1619 head = compound_head(pages[i]);
1652 * If list is empty, and no isolation errors, means that all pages are
1663 unpin_user_pages(pages, nr_pages);
1666 put_page(pages[i]);
1677 /* We unpinned pages before migration, pin them again */
1678 ret = __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
1686 * check again because pages were unpinned, and we also might have
1687 * had isolation errors and need more pages to migrate.
1695 struct page **pages,
1710 struct page **pages,
1719 if (!pages)
1732 rc = __get_user_pages_locked(mm, start, nr_pages, pages,
1741 unpin_user_pages(pages, rc);
1744 put_page(pages[i]);
1749 rc = check_and_migrate_cma_pages(mm, start, rc, pages,
1763 struct page **pages,
1767 return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
1794 unsigned int gup_flags, struct page **pages,
1811 return __gup_longterm_locked(mm, start, nr_pages, pages,
1816 return __get_user_pages_locked(mm, start, nr_pages, pages, vmas,
1822 * get_user_pages_remote() - pin user pages in memory
1825 * @nr_pages: number of pages from start to pin
1827 * @pages: array that receives pointers to the pages pinned.
1829 * only intends to ensure the pages are faulted in.
1836 * Returns either number of pages pinned (which may be less than the
1840 * -- If nr_pages is >0, but no pages were pinned, returns -errno.
1841 * -- If nr_pages is >0, and some pages were pinned, returns the number of
1842 * pages pinned. Again, this may be less than nr_pages.
1844 * The caller is responsible for releasing returned @pages, via put_page().
1870 * via the user virtual addresses. The pages may be submitted for
1883 unsigned int gup_flags, struct page **pages,
1890 pages, vmas, locked);
1897 unsigned int gup_flags, struct page **pages,
1905 unsigned int gup_flags, struct page **pages,
1913 * get_user_pages() - pin user pages in memory
1915 * @nr_pages: number of pages from start to pin
1917 * @pages: array that receives pointers to the pages pinned.
1919 * only intends to ensure the pages are faulted in.
1929 unsigned int gup_flags, struct page **pages,
1936 pages, vmas, gup_flags | FOLL_TOUCH);
1945 * get_user_pages(mm, ..., pages, NULL);
1953 * get_user_pages_locked(mm, ..., pages, &locked);
1958 * @nr_pages: number of pages from start to pin
1960 * @pages: array that receives pointers to the pages pinned.
1962 * only intends to ensure the pages are faulted in.
1973 unsigned int gup_flags, struct page **pages,
1992 pages, NULL, locked,
2001 * get_user_pages(mm, ..., pages, NULL);
2006 * get_user_pages_unlocked(mm, ..., pages);
2013 struct page **pages, unsigned int gup_flags)
2029 ret = __get_user_pages_locked(mm, start, nr_pages, pages, NULL,
2040 * get_user_pages_fast attempts to pin user pages by walking the page
2042 * protected from page table pages being freed from under it, and should
2047 * pages are freed. This is unsuitable for architectures that do not need
2050 * Another way to achieve this is to batch up page table containing pages
2052 * pages. Disabling interrupts will allow the fast_gup walker to both block
2060 * free pages containing page tables or TLB flushing requires IPI broadcast.
2129 struct page **pages)
2132 struct page *page = pages[--(*nr)];
2164 struct page **pages, int *nr)
2191 undo_dev_pagemap(nr, nr_start, flags, pages);
2226 pages[*nr] = page;
2247 * get_user_pages_fast_only implementation that can pin pages. Thus it's still
2252 struct page **pages, int *nr)
2261 struct page **pages, int *nr)
2271 undo_dev_pagemap(nr, nr_start, flags, pages);
2275 pages[*nr] = page;
2277 undo_dev_pagemap(nr, nr_start, flags, pages);
2291 struct page **pages, int *nr)
2297 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
2301 undo_dev_pagemap(nr, nr_start, flags, pages);
2309 struct page **pages, int *nr)
2315 if (!__gup_device_huge(fault_pfn, addr, end, flags, pages, nr))
2319 undo_dev_pagemap(nr, nr_start, flags, pages);
2327 struct page **pages, int *nr)
2335 struct page **pages, int *nr)
2343 unsigned long end, struct page **pages)
2348 pages[nr++] = page++;
2363 struct page **pages, int *nr)
2384 refs = record_subpages(page, addr, end, pages + *nr);
2402 struct page **pages, int *nr)
2411 if (!gup_hugepte(ptep, sz, addr, end, flags, pages, nr))
2420 struct page **pages, int *nr)
2428 struct page **pages, int *nr)
2440 pages, nr);
2444 refs = record_subpages(page, addr, end, pages + *nr);
2462 struct page **pages, int *nr)
2474 pages, nr);
2478 refs = record_subpages(page, addr, end, pages + *nr);
2496 struct page **pages, int *nr)
2507 refs = record_subpages(page, addr, end, pages + *nr);
2524 unsigned int flags, struct page **pages, int *nr)
2548 pages, nr))
2557 PMD_SHIFT, next, flags, pages, nr))
2559 } else if (!gup_pte_range(pmd, pmdp, addr, next, flags, pages, nr))
2567 unsigned int flags, struct page **pages, int *nr)
2581 pages, nr))
2585 PUD_SHIFT, next, flags, pages, nr))
2587 } else if (!gup_pmd_range(pudp, pud, addr, next, flags, pages, nr))
2595 unsigned int flags, struct page **pages, int *nr)
2610 P4D_SHIFT, next, flags, pages, nr))
2612 } else if (!gup_pud_range(p4dp, p4d, addr, next, flags, pages, nr))
2620 unsigned int flags, struct page **pages, int *nr)
2634 pages, nr))
2638 PGDIR_SHIFT, next, flags, pages, nr))
2640 } else if (!gup_p4d_range(pgdp, pgd, addr, next, flags, pages, nr))
2646 unsigned int flags, struct page **pages, int *nr)
2663 unsigned int gup_flags, struct page **pages)
2675 pages, NULL, gup_flags);
2679 pages, gup_flags);
2688 struct page **pages)
2708 * With interrupts disabled, we block page table pages from being freed
2716 gup_pgd_range(start, end, gup_flags, pages, &nr_pinned);
2720 * When pinning pages for DMA there could be a concurrent write protect
2725 unpin_user_pages(pages, nr_pinned);
2735 struct page **pages)
2759 nr_pinned = lockless_pages_from_mm(start, end, gup_flags, pages);
2763 /* Slow path: try to get the remaining pages with get_user_pages */
2765 pages += nr_pinned;
2767 pages);
2770 * The caller has to unpin the pages we already pinned so
2781 * get_user_pages_fast_only() - pin user pages in memory
2783 * @nr_pages: number of pages from start to pin
2785 * @pages: array that receives pointers to the pages pinned.
2791 * number of pages pinned, 0 if no pages were pinned.
2794 * pages pinned.
2801 unsigned int gup_flags, struct page **pages)
2814 pages);
2830 * get_user_pages_fast() - pin user pages in memory
2832 * @nr_pages: number of pages from start to pin
2834 * @pages: array that receives pointers to the pages pinned.
2837 * Attempt to pin user pages in memory without taking mm->mmap_lock.
2841 * Returns number of pages pinned. This may be fewer than the number requested.
2842 * If nr_pages is 0 or negative, returns 0. If no pages were pinned, returns
2846 unsigned int gup_flags, struct page **pages)
2858 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
2863 * pin_user_pages_fast() - pin user pages in memory without taking locks
2866 * @nr_pages: number of pages from start to pin
2868 * @pages: array that receives pointers to the pages pinned.
2875 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
2879 unsigned int gup_flags, struct page **pages)
2886 return internal_get_user_pages_fast(start, nr_pages, gup_flags, pages);
2897 unsigned int gup_flags, struct page **pages)
2913 pages);
2927 * pin_user_pages_remote() - pin pages of a remote process
2931 * @nr_pages: number of pages from start to pin
2933 * @pages: array that receives pointers to the pages pinned.
2935 * only intends to ensure the pages are faulted in.
2946 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
2951 unsigned int gup_flags, struct page **pages,
2960 pages, vmas, locked);
2965 * pin_user_pages() - pin user pages in memory for use by other devices
2968 * @nr_pages: number of pages from start to pin
2970 * @pages: array that receives pointers to the pages pinned.
2972 * only intends to ensure the pages are faulted in.
2979 * FOLL_PIN means that the pages must be released via unpin_user_page(). Please
2983 unsigned int gup_flags, struct page **pages,
2992 pages, vmas, gup_flags);
3002 struct page **pages, unsigned int gup_flags)
3009 return get_user_pages_unlocked(start, nr_pages, pages, gup_flags);
3019 unsigned int gup_flags, struct page **pages,
3037 pages, NULL, locked,