/kernel/linux/linux-6.6/drivers/iommu/iommufd/ |
H A D | pages.c | 69 * allocation can hold about 26M of 4k pages and 13G of 2M pages in an 163 static void iopt_pages_add_npinned(struct iopt_pages *pages, size_t npages) in iopt_pages_add_npinned() argument 167 rc = check_add_overflow(pages->npinned, npages, &pages->npinned); in iopt_pages_add_npinned() 169 WARN_ON(rc || pages->npinned > pages->npages); in iopt_pages_add_npinned() 172 static void iopt_pages_sub_npinned(struct iopt_pages *pages, size_t npages) in iopt_pages_sub_npinned() argument 176 rc = check_sub_overflow(pages->npinned, npages, &pages in iopt_pages_sub_npinned() 181 iopt_pages_err_unpin(struct iopt_pages *pages, unsigned long start_index, unsigned long last_index, struct page **page_list) iopt_pages_err_unpin() argument 249 iopt_pages_find_domain_area(struct iopt_pages *pages, unsigned long index) iopt_pages_find_domain_area() argument 575 pages_to_xarray(struct xarray *xa, unsigned long start_index, unsigned long last_index, struct page **pages) pages_to_xarray() argument 615 batch_from_pages(struct pfn_batch *batch, struct page **pages, size_t npages) batch_from_pages() argument 625 batch_unpin(struct pfn_batch *batch, struct iopt_pages *pages, unsigned int first_page_off, size_t npages) batch_unpin() argument 708 pfn_reader_user_init(struct pfn_reader_user *user, struct iopt_pages *pages) pfn_reader_user_init() argument 721 pfn_reader_user_destroy(struct pfn_reader_user *user, struct iopt_pages *pages) pfn_reader_user_destroy() argument 736 pfn_reader_user_pin(struct pfn_reader_user *user, struct iopt_pages *pages, unsigned long start_index, unsigned long last_index) pfn_reader_user_pin() argument 804 incr_user_locked_vm(struct iopt_pages *pages, unsigned long npages) incr_user_locked_vm() argument 822 decr_user_locked_vm(struct iopt_pages *pages, unsigned long npages) decr_user_locked_vm() argument 830 update_mm_locked_vm(struct iopt_pages *pages, unsigned long npages, bool inc, struct pfn_reader_user *user) update_mm_locked_vm() argument 857 do_update_pinned(struct iopt_pages *pages, unsigned long npages, bool inc, struct pfn_reader_user *user) do_update_pinned() argument 886 update_unpinned(struct iopt_pages *pages) update_unpinned() argument 903 pfn_reader_user_update_pinned(struct pfn_reader_user *user, struct iopt_pages *pages) pfn_reader_user_update_pinned() argument 937 struct iopt_pages *pages; global() member 961 struct iopt_pages *pages = pfns->pages; pfn_reader_unpin() local 1062 pfn_reader_init(struct pfn_reader *pfns, struct iopt_pages *pages, unsigned long start_index, unsigned long last_index) pfn_reader_init() argument 1093 struct iopt_pages *pages = pfns->pages; pfn_reader_release_pins() local 1113 struct iopt_pages *pages = pfns->pages; pfn_reader_destroy() local 1121 pfn_reader_first(struct pfn_reader *pfns, struct iopt_pages *pages, unsigned long start_index, unsigned long last_index) pfn_reader_first() argument 1144 struct iopt_pages *pages; iopt_alloc_pages() local 1183 struct iopt_pages *pages = container_of(kref, struct iopt_pages, kref); iopt_release_pages() local 1197 iopt_area_unpin_domain(struct pfn_batch *batch, struct iopt_area *area, struct iopt_pages *pages, struct iommu_domain *domain, unsigned long start_index, unsigned long last_index, unsigned long *unmapped_end_index, unsigned long real_last_index) iopt_area_unpin_domain() argument 1257 __iopt_area_unfill_domain(struct iopt_area *area, struct iopt_pages *pages, struct iommu_domain *domain, unsigned long last_index) __iopt_area_unfill_domain() argument 1309 iopt_area_unfill_partial_domain(struct iopt_area *area, struct iopt_pages *pages, struct iommu_domain *domain, unsigned long end_index) iopt_area_unfill_partial_domain() argument 1342 iopt_area_unfill_domain(struct iopt_area *area, struct iopt_pages *pages, struct iommu_domain *domain) iopt_area_unfill_domain() argument 1408 iopt_area_fill_domains(struct iopt_area *area, struct iopt_pages *pages) iopt_area_fill_domains() argument 1492 iopt_area_unfill_domains(struct iopt_area *area, struct iopt_pages *pages) iopt_area_unfill_domains() argument 1519 iopt_pages_unpin_xarray(struct pfn_batch *batch, struct iopt_pages *pages, unsigned long start_index, unsigned long end_index) iopt_pages_unpin_xarray() argument 1542 iopt_pages_unfill_xarray(struct iopt_pages *pages, unsigned long start_index, unsigned long last_index) iopt_pages_unfill_xarray() argument 1590 iopt_pages_fill_from_xarray(struct iopt_pages *pages, unsigned long start_index, unsigned long last_index, struct page **out_pages) iopt_pages_fill_from_xarray() argument 1610 iopt_pages_fill_from_domain(struct iopt_pages *pages, unsigned long start_index, unsigned long last_index, struct page **out_pages) iopt_pages_fill_from_domain() argument 1632 iopt_pages_fill_from_mm(struct iopt_pages *pages, struct pfn_reader_user *user, unsigned long start_index, unsigned long last_index, struct page **out_pages) iopt_pages_fill_from_mm() argument 1671 iopt_pages_fill_xarray(struct iopt_pages *pages, unsigned long start_index, unsigned long last_index, struct page **out_pages) iopt_pages_fill_xarray() argument 1743 iopt_pages_rw_slow(struct iopt_pages *pages, unsigned long start_index, unsigned long last_index, unsigned long offset, void *data, unsigned long length, unsigned int flags) iopt_pages_rw_slow() argument 1784 iopt_pages_rw_page(struct iopt_pages *pages, unsigned long index, unsigned long offset, void *data, unsigned long length, unsigned int flags) iopt_pages_rw_page() argument 1831 iopt_pages_rw_access(struct iopt_pages *pages, unsigned long start_byte, void *data, unsigned long length, unsigned int flags) iopt_pages_rw_access() argument 1886 iopt_pages_get_exact_access(struct iopt_pages *pages, unsigned long index, unsigned long last) iopt_pages_get_exact_access() argument 1919 struct iopt_pages *pages = area->pages; iopt_area_add_access() local 1974 struct iopt_pages *pages = area->pages; iopt_area_remove_access() local [all...] |
H A D | io_pagetable.c | 23 struct iopt_pages *pages; member 42 if (!iter->area->pages) { in iopt_area_contig_init() 65 !iter->area->pages) { in iopt_area_contig_next() 187 * The area takes a slice of the pages from start_bytes to start_byte + length 190 struct iopt_pages *pages, unsigned long iova, in iopt_insert_area() 196 if ((iommu_prot & IOMMU_WRITE) && !pages->writable) in iopt_insert_area() 212 if (WARN_ON(area->pages_node.last >= pages->npages)) in iopt_insert_area() 216 * The area is inserted with a NULL pages indicating it is not fully in iopt_insert_area() 263 (uintptr_t)elm->pages->uptr + elm->start_byte, length); in iopt_alloc_area_pages() 278 * Areas are created with a NULL pages s in iopt_alloc_area_pages() 189 iopt_insert_area(struct io_pagetable *iopt, struct iopt_area *area, struct iopt_pages *pages, unsigned long iova, unsigned long start_byte, unsigned long length, int iommu_prot) iopt_insert_area() argument 487 struct iopt_pages *pages; iopt_unmap_iova_range() local 715 struct iopt_pages *pages = area->pages; iopt_unfill_domain() local 734 struct iopt_pages *pages = area->pages; iopt_unfill_domain() local 768 struct iopt_pages *pages = area->pages; iopt_fill_domain() local 793 struct iopt_pages *pages = area->pages; iopt_fill_domain() local 1003 struct iopt_pages *pages = area->pages; iopt_area_split() local [all...] |
/kernel/linux/linux-5.10/net/ceph/ |
H A D | pagevec.c | 13 void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty) in ceph_put_page_vector() argument 19 set_page_dirty_lock(pages[i]); in ceph_put_page_vector() 20 put_page(pages[i]); in ceph_put_page_vector() 22 kvfree(pages); in ceph_put_page_vector() 26 void ceph_release_page_vector(struct page **pages, int num_pages) in ceph_release_page_vector() argument 31 __free_pages(pages[i], 0); in ceph_release_page_vector() 32 kfree(pages); in ceph_release_page_vector() 37 * allocate a vector new pages 41 struct page **pages; in ceph_alloc_page_vector() local 44 pages in ceph_alloc_page_vector() 61 ceph_copy_user_to_page_vector(struct page **pages, const void __user *data, loff_t off, size_t len) ceph_copy_user_to_page_vector() argument 87 ceph_copy_to_page_vector(struct page **pages, const void *data, loff_t off, size_t len) ceph_copy_to_page_vector() argument 110 ceph_copy_from_page_vector(struct page **pages, void *data, loff_t off, size_t len) ceph_copy_from_page_vector() argument 137 ceph_zero_page_vector_range(int off, int len, struct page **pages) ceph_zero_page_vector_range() argument [all...] |
/kernel/linux/linux-6.6/net/ceph/ |
H A D | pagevec.c | 13 void ceph_put_page_vector(struct page **pages, int num_pages, bool dirty) in ceph_put_page_vector() argument 19 set_page_dirty_lock(pages[i]); in ceph_put_page_vector() 20 put_page(pages[i]); in ceph_put_page_vector() 22 kvfree(pages); in ceph_put_page_vector() 26 void ceph_release_page_vector(struct page **pages, int num_pages) in ceph_release_page_vector() argument 31 __free_pages(pages[i], 0); in ceph_release_page_vector() 32 kfree(pages); in ceph_release_page_vector() 37 * allocate a vector new pages 41 struct page **pages; in ceph_alloc_page_vector() local 44 pages in ceph_alloc_page_vector() 61 ceph_copy_user_to_page_vector(struct page **pages, const void __user *data, loff_t off, size_t len) ceph_copy_user_to_page_vector() argument 87 ceph_copy_to_page_vector(struct page **pages, const void *data, loff_t off, size_t len) ceph_copy_to_page_vector() argument 110 ceph_copy_from_page_vector(struct page **pages, void *data, loff_t off, size_t len) ceph_copy_from_page_vector() argument 137 ceph_zero_page_vector_range(int off, int len, struct page **pages) ceph_zero_page_vector_range() argument [all...] |
/kernel/linux/linux-5.10/mm/ |
H A D | percpu-vm.c | 22 * pcpu_get_pages - get temp pages array 29 * Pointer to temp pages array on success. 33 static struct page **pages; in pcpu_get_pages() local 34 size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]); in pcpu_get_pages() 38 if (!pages) in pcpu_get_pages() 39 pages = pcpu_mem_zalloc(pages_size, GFP_KERNEL); in pcpu_get_pages() 40 return pages; in pcpu_get_pages() 44 * pcpu_free_pages - free pages which were allocated for @chunk 45 * @chunk: chunk pages were allocated for 46 * @pages 53 pcpu_free_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end) pcpu_free_pages() argument 81 pcpu_alloc_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end, gfp_t gfp) pcpu_alloc_pages() argument 152 pcpu_unmap_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end) pcpu_unmap_pages() argument 192 __pcpu_map_pages(unsigned long addr, struct page **pages, int nr_pages) __pcpu_map_pages() argument 213 pcpu_map_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end) pcpu_map_pages() argument 278 struct page **pages; pcpu_populate_chunk() local 311 struct page **pages; pcpu_depopulate_chunk() local [all...] |
H A D | gup.c | 83 * So now that the head page is stable, recheck that the pages still in try_get_compound_head() 236 * that such pages can be separately tracked and uniquely handled. In 246 * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages 247 * @pages: array of pages to be maybe marked dirty, and definitely released. 248 * @npages: number of pages in the @pages array. 249 * @make_dirty: whether to mark the pages dirty 254 * For each page in the @pages array, make that page (or its head page, if a 256 * listed as clean. In any case, releases all pages usin 267 unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, bool make_dirty) unpin_user_pages_dirty_lock() argument 321 unpin_user_pages(struct page **pages, unsigned long npages) unpin_user_pages() argument 1027 __get_user_pages(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked) __get_user_pages() argument 1253 __get_user_pages_locked(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, struct vm_area_struct **vmas, int *locked, unsigned int flags) __get_user_pages_locked() argument 1500 __get_user_pages_locked(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, struct vm_area_struct **vmas, int *locked, unsigned int foll_flags) __get_user_pages_locked() argument 1597 check_and_migrate_cma_pages(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, struct vm_area_struct **vmas, unsigned int gup_flags) check_and_migrate_cma_pages() argument 1692 check_and_migrate_cma_pages(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, struct vm_area_struct **vmas, unsigned int gup_flags) check_and_migrate_cma_pages() argument 1707 __gup_longterm_locked(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, struct vm_area_struct **vmas, unsigned int gup_flags) __gup_longterm_locked() argument 1760 __gup_longterm_locked(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, struct vm_area_struct **vmas, unsigned int flags) __gup_longterm_locked() argument 1792 __get_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked) __get_user_pages_remote() argument 1881 get_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked) get_user_pages_remote() argument 1895 get_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked) get_user_pages_remote() argument 1903 __get_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked) __get_user_pages_remote() argument 1928 get_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas) get_user_pages() argument 1972 get_user_pages_locked(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, int *locked) get_user_pages_locked() argument 2012 get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, struct page **pages, unsigned int gup_flags) get_user_pages_unlocked() argument 2127 undo_dev_pagemap(int *nr, int nr_start, unsigned int flags, struct page **pages) undo_dev_pagemap() argument 2162 gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) gup_pte_range() argument 2250 gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) gup_pte_range() argument 2259 __gup_device_huge(unsigned long pfn, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) __gup_device_huge() argument 2289 __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) __gup_device_huge_pmd() argument 2307 __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) __gup_device_huge_pud() argument 2325 __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) __gup_device_huge_pmd() argument 2333 __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) __gup_device_huge_pud() argument 2342 record_subpages(struct page *page, unsigned long addr, unsigned long end, struct page **pages) record_subpages() argument 2361 gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) gup_hugepte() argument 2400 gup_huge_pd(hugepd_t hugepd, unsigned long addr, unsigned int pdshift, unsigned long end, unsigned int flags, struct page **pages, int *nr) gup_huge_pd() argument 2418 gup_huge_pd(hugepd_t hugepd, unsigned long addr, unsigned int pdshift, unsigned long end, unsigned int flags, struct page **pages, int *nr) gup_huge_pd() argument 2426 gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) gup_huge_pmd() argument 2460 gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) gup_huge_pud() argument 2494 gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) gup_huge_pgd() argument 2523 gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) gup_pmd_range() argument 2566 gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) gup_pud_range() argument 2594 gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) gup_p4d_range() argument 2619 gup_pgd_range(unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) gup_pgd_range() argument 2645 gup_pgd_range(unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) gup_pgd_range() argument 2662 __gup_longterm_unlocked(unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages) __gup_longterm_unlocked() argument 2685 lockless_pages_from_mm(unsigned long start, unsigned long end, unsigned int gup_flags, struct page **pages) lockless_pages_from_mm() argument 2732 internal_get_user_pages_fast(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages) internal_get_user_pages_fast() argument 2800 get_user_pages_fast_only(unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages) get_user_pages_fast_only() argument 2845 get_user_pages_fast(unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages) get_user_pages_fast() argument 2878 pin_user_pages_fast(unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages) pin_user_pages_fast() argument 2896 pin_user_pages_fast_only(unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages) pin_user_pages_fast_only() argument 2949 pin_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas, int *locked) pin_user_pages_remote() argument 2982 pin_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, struct vm_area_struct **vmas) pin_user_pages() argument 3001 pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages, struct page **pages, unsigned int gup_flags) pin_user_pages_unlocked() argument 3018 pin_user_pages_locked(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, int *locked) pin_user_pages_locked() argument [all...] |
H A D | gup_benchmark.c | 24 static void put_back_pages(unsigned int cmd, struct page **pages, in put_back_pages() argument 33 put_page(pages[i]); in put_back_pages() 39 unpin_user_pages(pages, nr_pages); in put_back_pages() 44 static void verify_dma_pinned(unsigned int cmd, struct page **pages, in verify_dma_pinned() argument 55 page = pages[i]; in verify_dma_pinned() 57 "pages[%lu] is NOT dma-pinned\n", i)) { in verify_dma_pinned() 73 struct page **pages; in __gup_benchmark_ioctl() local 82 pages = kvcalloc(nr_pages, sizeof(void *), GFP_KERNEL); in __gup_benchmark_ioctl() 83 if (!pages) in __gup_benchmark_ioctl() 110 pages in __gup_benchmark_ioctl() [all...] |
/kernel/linux/linux-6.6/mm/ |
H A D | percpu-vm.c | 23 * pcpu_get_pages - get temp pages array 30 * Pointer to temp pages array on success. 34 static struct page **pages; in pcpu_get_pages() local 35 size_t pages_size = pcpu_nr_units * pcpu_unit_pages * sizeof(pages[0]); in pcpu_get_pages() 39 if (!pages) in pcpu_get_pages() 40 pages = pcpu_mem_zalloc(pages_size, GFP_KERNEL); in pcpu_get_pages() 41 return pages; in pcpu_get_pages() 45 * pcpu_free_pages - free pages which were allocated for @chunk 46 * @chunk: chunk pages were allocated for 47 * @pages 54 pcpu_free_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end) pcpu_free_pages() argument 82 pcpu_alloc_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end, gfp_t gfp) pcpu_alloc_pages() argument 153 pcpu_unmap_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end) pcpu_unmap_pages() argument 193 __pcpu_map_pages(unsigned long addr, struct page **pages, int nr_pages) __pcpu_map_pages() argument 214 pcpu_map_pages(struct pcpu_chunk *chunk, struct page **pages, int page_start, int page_end) pcpu_map_pages() argument 279 struct page **pages; pcpu_populate_chunk() local 315 struct page **pages; pcpu_depopulate_chunk() local [all...] |
H A D | gup_test.c | 10 static void put_back_pages(unsigned int cmd, struct page **pages, in put_back_pages() argument 19 put_page(pages[i]); in put_back_pages() 25 unpin_user_pages(pages, nr_pages); in put_back_pages() 29 unpin_user_pages(pages, nr_pages); in put_back_pages() 32 put_page(pages[i]); in put_back_pages() 39 static void verify_dma_pinned(unsigned int cmd, struct page **pages, in verify_dma_pinned() argument 50 folio = page_folio(pages[i]); in verify_dma_pinned() 53 "pages[%lu] is NOT dma-pinned\n", i)) { in verify_dma_pinned() 59 "pages[%lu] is NOT pinnable but pinned\n", in verify_dma_pinned() 69 static void dump_pages_test(struct gup_test *gup, struct page **pages, in dump_pages_test() argument 106 struct page **pages; __gup_test_ioctl() local 227 struct page **pages; pin_longterm_test_start() local [all...] |
H A D | gup.c | 33 static inline void sanity_check_pinned_pages(struct page **pages, in sanity_check_pinned_pages() argument 40 * We only pin anonymous pages if they are exclusive. Once pinned, we in sanity_check_pinned_pages() 44 * We'd like to verify that our pinned anonymous pages are still mapped in sanity_check_pinned_pages() 51 for (; npages; npages--, pages++) { in sanity_check_pinned_pages() 52 struct page *page = *pages; in sanity_check_pinned_pages() 269 * that such pages can be separately tracked and uniquely handled. In 337 * unpin_user_pages_dirty_lock() - release and optionally dirty gup-pinned pages 338 * @pages: array of pages to be maybe marked dirty, and definitely released. 339 * @npages: number of pages i 358 unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, bool make_dirty) unpin_user_pages_dirty_lock() argument 443 unpin_user_pages_lockless(struct page **pages, unsigned long npages) unpin_user_pages_lockless() argument 469 unpin_user_pages(struct page **pages, unsigned long npages) unpin_user_pages() argument 1184 __get_user_pages(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, int *locked) __get_user_pages() argument 1464 __get_user_pages_locked(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, int *locked, unsigned int flags) __get_user_pages_locked() argument 1791 __get_user_pages_locked(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, int *locked, unsigned int foll_flags) __get_user_pages_locked() argument 2028 collect_longterm_unpinnable_pages( struct list_head *movable_page_list, unsigned long nr_pages, struct page **pages) collect_longterm_unpinnable_pages() argument 2079 migrate_longterm_unpinnable_pages( struct list_head *movable_page_list, unsigned long nr_pages, struct page **pages) migrate_longterm_unpinnable_pages() argument 2161 check_and_migrate_movable_pages(unsigned long nr_pages, struct page **pages) check_and_migrate_movable_pages() argument 2176 check_and_migrate_movable_pages(unsigned long nr_pages, struct page **pages) check_and_migrate_movable_pages() argument 2187 __gup_longterm_locked(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, struct page **pages, int *locked, unsigned int gup_flags) __gup_longterm_locked() argument 2222 is_valid_gup_args(struct page **pages, int *locked, unsigned int *gup_flags_p, unsigned int to_set) is_valid_gup_args() argument 2326 get_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, int *locked) get_user_pages_remote() argument 2344 get_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, int *locked) get_user_pages_remote() argument 2367 get_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages) get_user_pages() argument 2395 get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, struct page **pages, unsigned int gup_flags) get_user_pages_unlocked() argument 2520 undo_dev_pagemap(int *nr, int nr_start, unsigned int flags, struct page **pages) undo_dev_pagemap() argument 2555 gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) gup_pte_range() argument 2661 gup_pte_range(pmd_t pmd, pmd_t *pmdp, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) gup_pte_range() argument 2670 __gup_device_huge(unsigned long pfn, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) __gup_device_huge() argument 2705 __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) __gup_device_huge_pmd() argument 2723 __gup_device_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) __gup_device_huge_pud() argument 2741 __gup_device_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) __gup_device_huge_pmd() argument 2749 __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) __gup_device_huge_pud() argument 2758 record_subpages(struct page *page, unsigned long addr, unsigned long end, struct page **pages) record_subpages() argument 2777 gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) gup_hugepte() argument 2826 gup_huge_pd(hugepd_t hugepd, unsigned long addr, unsigned int pdshift, unsigned long end, unsigned int flags, struct page **pages, int *nr) gup_huge_pd() argument 2844 gup_huge_pd(hugepd_t hugepd, unsigned long addr, unsigned int pdshift, unsigned long end, unsigned int flags, struct page **pages, int *nr) gup_huge_pd() argument 2852 gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) gup_huge_pmd() argument 2896 gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) gup_huge_pud() argument 2941 gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) gup_huge_pgd() argument 2981 gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) gup_pmd_range() argument 3020 gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) gup_pud_range() argument 3048 gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) gup_p4d_range() argument 3073 gup_pgd_range(unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) gup_pgd_range() argument 3099 gup_pgd_range(unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) gup_pgd_range() argument 3116 lockless_pages_from_mm(unsigned long start, unsigned long end, unsigned int gup_flags, struct page **pages) lockless_pages_from_mm() argument 3165 internal_get_user_pages_fast(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages) internal_get_user_pages_fast() argument 3236 get_user_pages_fast_only(unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages) get_user_pages_fast_only() argument 3270 get_user_pages_fast(unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages) get_user_pages_fast() argument 3304 pin_user_pages_fast(unsigned long start, int nr_pages, unsigned int gup_flags, struct page **pages) pin_user_pages_fast() argument 3336 pin_user_pages_remote(struct mm_struct *mm, unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages, int *locked) pin_user_pages_remote() argument 3370 pin_user_pages(unsigned long start, unsigned long nr_pages, unsigned int gup_flags, struct page **pages) pin_user_pages() argument 3390 pin_user_pages_unlocked(unsigned long start, unsigned long nr_pages, struct page **pages, unsigned int gup_flags) pin_user_pages_unlocked() argument [all...] |
/kernel/linux/linux-5.10/fs/isofs/ |
H A D | compress.c | 37 * to one zisofs block. Store the data in the @pages array with @pcount 42 struct page **pages, unsigned poffset, in zisofs_uncompress_block() 68 if (!pages[i]) in zisofs_uncompress_block() 70 memset(page_address(pages[i]), 0, PAGE_SIZE); in zisofs_uncompress_block() 71 flush_dcache_page(pages[i]); in zisofs_uncompress_block() 72 SetPageUptodate(pages[i]); in zisofs_uncompress_block() 122 if (pages[curpage]) { in zisofs_uncompress_block() 123 stream.next_out = page_address(pages[curpage]) in zisofs_uncompress_block() 175 if (pages[curpage]) { in zisofs_uncompress_block() 176 flush_dcache_page(pages[curpag in zisofs_uncompress_block() 40 zisofs_uncompress_block(struct inode *inode, loff_t block_start, loff_t block_end, int pcount, struct page **pages, unsigned poffset, int *errp) zisofs_uncompress_block() argument 201 zisofs_fill_pages(struct inode *inode, int full_page, int pcount, struct page **pages) zisofs_fill_pages() argument 309 struct page **pages; zisofs_readpage() local [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gem/selftests/ |
H A D | huge_gem_object.c | 12 struct sg_table *pages) in huge_free_pages() 18 for_each_sgt_page(page, sgt_iter, pages) { in huge_free_pages() 24 sg_free_table(pages); in huge_free_pages() 25 kfree(pages); in huge_free_pages() 34 struct sg_table *pages; in huge_get_pages() local 37 pages = kmalloc(sizeof(*pages), GFP); in huge_get_pages() 38 if (!pages) in huge_get_pages() 41 if (sg_alloc_table(pages, npages, GFP)) { in huge_get_pages() 42 kfree(pages); in huge_get_pages() 11 huge_free_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) huge_free_pages() argument 81 huge_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) huge_put_pages() argument [all...] |
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/gem/selftests/ |
H A D | huge_gem_object.c | 12 struct sg_table *pages) in huge_free_pages() 18 for_each_sgt_page(page, sgt_iter, pages) { in huge_free_pages() 24 sg_free_table(pages); in huge_free_pages() 25 kfree(pages); in huge_free_pages() 34 struct sg_table *pages; in huge_get_pages() local 41 pages = kmalloc(sizeof(*pages), GFP); in huge_get_pages() 42 if (!pages) in huge_get_pages() 45 if (sg_alloc_table(pages, npages, GFP)) { in huge_get_pages() 46 kfree(pages); in huge_get_pages() 11 huge_free_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) huge_free_pages() argument 85 huge_put_pages(struct drm_i915_gem_object *obj, struct sg_table *pages) huge_put_pages() argument [all...] |
/kernel/linux/linux-5.10/include/drm/ttm/ |
H A D | ttm_set_memory.h | 40 static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray) in ttm_set_pages_array_wb() argument 42 return set_pages_array_wb(pages, addrinarray); in ttm_set_pages_array_wb() 45 static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray) in ttm_set_pages_array_wc() argument 47 return set_pages_array_wc(pages, addrinarray); in ttm_set_pages_array_wc() 50 static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray) in ttm_set_pages_array_uc() argument 52 return set_pages_array_uc(pages, addrinarray); in ttm_set_pages_array_uc() 78 static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray) in ttm_set_pages_array_wb() argument 83 unmap_page_from_agp(pages[i]); in ttm_set_pages_array_wb() 87 static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray) in ttm_set_pages_array_wc() argument 92 map_page_into_agp(pages[ in ttm_set_pages_array_wc() 96 ttm_set_pages_array_uc(struct page **pages, int addrinarray) ttm_set_pages_array_uc() argument 116 ttm_set_pages_array_wb(struct page **pages, int addrinarray) ttm_set_pages_array_wb() argument 121 ttm_set_pages_array_wc(struct page **pages, int addrinarray) ttm_set_pages_array_wc() argument 126 ttm_set_pages_array_uc(struct page **pages, int addrinarray) ttm_set_pages_array_uc() argument [all...] |
/kernel/linux/linux-6.6/fs/isofs/ |
H A D | compress.c | 37 * to one zisofs block. Store the data in the @pages array with @pcount 42 struct page **pages, unsigned poffset, in zisofs_uncompress_block() 68 if (!pages[i]) in zisofs_uncompress_block() 70 memzero_page(pages[i], 0, PAGE_SIZE); in zisofs_uncompress_block() 71 SetPageUptodate(pages[i]); in zisofs_uncompress_block() 121 if (pages[curpage]) { in zisofs_uncompress_block() 122 stream.next_out = kmap_local_page(pages[curpage]) in zisofs_uncompress_block() 174 if (pages[curpage]) { in zisofs_uncompress_block() 175 flush_dcache_page(pages[curpage]); in zisofs_uncompress_block() 176 SetPageUptodate(pages[curpag in zisofs_uncompress_block() 40 zisofs_uncompress_block(struct inode *inode, loff_t block_start, loff_t block_end, int pcount, struct page **pages, unsigned poffset, int *errp) zisofs_uncompress_block() argument 206 zisofs_fill_pages(struct inode *inode, int full_page, int pcount, struct page **pages) zisofs_fill_pages() argument 313 struct page **pages; zisofs_read_folio() local [all...] |
/kernel/linux/linux-6.6/fs/erofs/ |
H A D | pcpubuf.c | 6 * per-CPU virtual memory (in pages) in advance to store such inplace I/O 15 struct page **pages; member 64 struct page **pages, **oldpages; in erofs_pcpubuf_growsize() local 67 pages = kmalloc_array(nrpages, sizeof(*pages), GFP_KERNEL); in erofs_pcpubuf_growsize() 68 if (!pages) { in erofs_pcpubuf_growsize() 74 pages[i] = erofs_allocpage(&pagepool, GFP_KERNEL); in erofs_pcpubuf_growsize() 75 if (!pages[i]) { in erofs_pcpubuf_growsize() 77 oldpages = pages; in erofs_pcpubuf_growsize() 81 ptr = vmap(pages, nrpage in erofs_pcpubuf_growsize() [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/vkms/ |
H A D | vkms_gem.c | 37 WARN_ON(gem->pages); in vkms_gem_free_object() 61 if (obj->pages) { in vkms_gem_fault() 62 get_page(obj->pages[page_offset]); in vkms_gem_fault() 63 vmf->page = obj->pages[page_offset]; in vkms_gem_fault() 155 if (!vkms_obj->pages) { in _get_pages() 156 struct page **pages = drm_gem_get_pages(gem_obj); in _get_pages() local 158 if (IS_ERR(pages)) in _get_pages() 159 return pages; in _get_pages() 161 if (cmpxchg(&vkms_obj->pages, NULL, pages)) in _get_pages() 201 struct page **pages = _get_pages(vkms_obj); vkms_gem_vmap() local [all...] |
/kernel/linux/linux-5.10/kernel/dma/ |
H A D | remap.c | 15 return area->pages; in dma_common_find_pages() 19 * Remaps an array of PAGE_SIZE pages into another vm_area. 22 void *dma_common_pages_remap(struct page **pages, size_t size, in dma_common_pages_remap() argument 27 vaddr = vmap(pages, PAGE_ALIGN(size) >> PAGE_SHIFT, in dma_common_pages_remap() 30 find_vm_area(vaddr)->pages = pages; in dma_common_pages_remap() 42 struct page **pages; in dma_common_contiguous_remap() local 46 pages = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL); in dma_common_contiguous_remap() 47 if (!pages) in dma_common_contiguous_remap() 50 pages[ in dma_common_contiguous_remap() [all...] |
/kernel/linux/linux-6.6/kernel/dma/ |
H A D | remap.c | 15 return area->pages; in dma_common_find_pages() 19 * Remaps an array of PAGE_SIZE pages into another vm_area. 22 void *dma_common_pages_remap(struct page **pages, size_t size, in dma_common_pages_remap() argument 27 vaddr = vmap(pages, PAGE_ALIGN(size) >> PAGE_SHIFT, in dma_common_pages_remap() 30 find_vm_area(vaddr)->pages = pages; in dma_common_pages_remap() 42 struct page **pages; in dma_common_contiguous_remap() local 46 pages = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL); in dma_common_contiguous_remap() 47 if (!pages) in dma_common_contiguous_remap() 50 pages[ in dma_common_contiguous_remap() [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/xen/ |
H A D | xen_drm_front_gem.c | 30 struct page **pages; member 49 xen_obj->pages = kvmalloc_array(xen_obj->num_pages, in gem_alloc_pages_array() 51 return !xen_obj->pages ? -ENOMEM : 0; in gem_alloc_pages_array() 56 kvfree(xen_obj->pages); in gem_free_pages_array() 57 xen_obj->pages = NULL; in gem_free_pages_array() 93 * only allocate array of pointers to pages in gem_create() 100 * allocate ballooned pages which will be used to map in gem_create() 104 xen_obj->pages); in gem_create() 106 DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n", in gem_create() 116 * need to allocate backing pages no in gem_create() [all...] |
/kernel/linux/linux-6.6/drivers/gpu/drm/xen/ |
H A D | xen_drm_front_gem.c | 29 struct page **pages; member 48 xen_obj->pages = kvmalloc_array(xen_obj->num_pages, in gem_alloc_pages_array() 50 return !xen_obj->pages ? -ENOMEM : 0; in gem_alloc_pages_array() 55 kvfree(xen_obj->pages); in gem_free_pages_array() 56 xen_obj->pages = NULL; in gem_free_pages_array() 87 * touch the memory. Insert pages now, so both CPU and GPU are happy. in xen_drm_front_gem_object_mmap() 89 * FIXME: as we insert all the pages now then no .fault handler must in xen_drm_front_gem_object_mmap() 92 ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages); in xen_drm_front_gem_object_mmap() 94 DRM_ERROR("Failed to map pages into vma: %d\n", ret); in xen_drm_front_gem_object_mmap() 148 * only allocate array of pointers to pages in gem_create() [all...] |
/kernel/linux/linux-5.10/tools/testing/selftests/kvm/include/ |
H A D | perf_test_util.h | 45 uint64_t pages; member 70 uint64_t pages; in guest_code() local 77 pages = vcpu_args->pages; in guest_code() 80 for (i = 0; i < pages; i++) { in guest_code() 97 uint64_t pages = DEFAULT_GUEST_PHY_PAGES; in create_vm() local 100 /* Account for a few pages per-vCPU for stacks */ in create_vm() 101 pages += DEFAULT_STACK_PGS * vcpus; in create_vm() 109 pages += (2 * pages) / PTES_PER_4K_P in create_vm() [all...] |
/kernel/linux/linux-5.10/drivers/staging/media/ipu3/ |
H A D | ipu3-dmamap.c | 20 static void imgu_dmamap_free_buffer(struct page **pages, in imgu_dmamap_free_buffer() argument 26 __free_page(pages[count]); in imgu_dmamap_free_buffer() 27 kvfree(pages); in imgu_dmamap_free_buffer() 36 struct page **pages; in imgu_dmamap_alloc_buffer() local 42 pages = kvmalloc_array(count, sizeof(*pages), GFP_KERNEL); in imgu_dmamap_alloc_buffer() 44 if (!pages) in imgu_dmamap_alloc_buffer() 72 imgu_dmamap_free_buffer(pages, i << PAGE_SHIFT); in imgu_dmamap_alloc_buffer() 77 pages[i++] = page++; in imgu_dmamap_alloc_buffer() 80 return pages; in imgu_dmamap_alloc_buffer() 100 struct page **pages; imgu_dmamap_alloc() local [all...] |
/kernel/linux/linux-6.6/drivers/staging/media/ipu3/ |
H A D | ipu3-dmamap.c | 20 static void imgu_dmamap_free_buffer(struct page **pages, in imgu_dmamap_free_buffer() argument 26 __free_page(pages[count]); in imgu_dmamap_free_buffer() 27 kvfree(pages); in imgu_dmamap_free_buffer() 36 struct page **pages; in imgu_dmamap_alloc_buffer() local 42 pages = kvmalloc_array(count, sizeof(*pages), GFP_KERNEL); in imgu_dmamap_alloc_buffer() 44 if (!pages) in imgu_dmamap_alloc_buffer() 72 imgu_dmamap_free_buffer(pages, i << PAGE_SHIFT); in imgu_dmamap_alloc_buffer() 77 pages[i++] = page++; in imgu_dmamap_alloc_buffer() 80 return pages; in imgu_dmamap_alloc_buffer() 100 struct page **pages; imgu_dmamap_alloc() local [all...] |
/kernel/linux/linux-5.10/drivers/xen/ |
H A D | xlate_mmu.c | 47 /* Break down the pages in 4KB chunk and call fn for each gfn */ 48 static void xen_for_each_gfn(struct page **pages, unsigned nr_gfn, in xen_for_each_gfn() argument 57 page = pages[i / XEN_PFN_PER_PAGE]; in xen_for_each_gfn() 71 struct page **pages; member 99 struct page *page = info->pages[info->index++]; in remap_pte_fn() 148 struct page **pages) in xen_xlate_remap_gfn_array() 163 data.pages = pages; in xen_xlate_remap_gfn_array() 184 int nr, struct page **pages) in xen_xlate_unmap_gfn_range() 186 xen_for_each_gfn(pages, n in xen_xlate_unmap_gfn_range() 143 xen_xlate_remap_gfn_array(struct vm_area_struct *vma, unsigned long addr, xen_pfn_t *gfn, int nr, int *err_ptr, pgprot_t prot, unsigned domid, struct page **pages) xen_xlate_remap_gfn_array() argument 183 xen_xlate_unmap_gfn_range(struct vm_area_struct *vma, int nr, struct page **pages) xen_xlate_unmap_gfn_range() argument 217 struct page **pages; xen_xlate_map_ballooned_pages() local 267 struct page **pages; global() member [all...] |