/kernel/linux/linux-6.6/tools/testing/selftests/mm/ |
H A D | hmm-tests.c | 181 unsigned long npages) in hmm_dmirror_cmd() 189 cmd.npages = npages; in hmm_dmirror_cmd() 270 unsigned long npages) in hmm_migrate_sys_to_dev() 272 return hmm_dmirror_cmd(fd, HMM_DMIRROR_MIGRATE_TO_DEV, buffer, npages); in hmm_migrate_sys_to_dev() 277 unsigned long npages) in hmm_migrate_dev_to_sys() 279 return hmm_dmirror_cmd(fd, HMM_DMIRROR_MIGRATE_TO_SYS, buffer, npages); in hmm_migrate_dev_to_sys() 295 unsigned long npages; in TEST_F() local 302 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift; in TEST_F() 303 ASSERT_NE(npages, in TEST_F() 178 hmm_dmirror_cmd(int fd, unsigned long request, struct hmm_buffer *buffer, unsigned long npages) hmm_dmirror_cmd() argument 268 hmm_migrate_sys_to_dev(int fd, struct hmm_buffer *buffer, unsigned long npages) hmm_migrate_sys_to_dev() argument 275 hmm_migrate_dev_to_sys(int fd, struct hmm_buffer *buffer, unsigned long npages) hmm_migrate_dev_to_sys() argument 359 unsigned long npages; TEST_F() local 418 unsigned long npages; TEST_F() local 466 unsigned long npages; TEST_F() local 532 unsigned long npages; TEST_F() local 610 unsigned long npages; TEST_F() local 687 unsigned long npages; TEST_F() local 791 unsigned long npages; TEST_F() local 848 unsigned long npages; TEST_F() local 903 unsigned long npages; TEST_F() local 961 unsigned long npages; TEST_F() local 1009 unsigned long npages; TEST_F() local 1065 unsigned long npages; TEST_F() local 1117 unsigned long npages; TEST_F() local 1152 unsigned long npages; TEST_F() local 1244 unsigned long npages; TEST_F() local 1303 unsigned long npages; TEST_F() local 1365 unsigned long npages; TEST_F() local 1427 unsigned long npages; TEST_F() local 1469 unsigned long npages; TEST_F() local 1571 unsigned long npages; TEST_F() local 1646 unsigned long npages; TEST_F() local 1721 unsigned long npages; TEST_F() local 1775 unsigned long npages; TEST_F() local 1828 unsigned long npages; TEST_F() local 1873 gup_test_exec(int gup_fd, unsigned long addr, int cmd, int npages, int size, int flags) gup_test_exec() argument 1902 unsigned long npages; TEST_F() local 1990 unsigned long npages; TEST_F() local [all...] |
/kernel/linux/linux-5.10/tools/testing/selftests/vm/ |
H A D | hmm-tests.c | 123 unsigned long npages) in hmm_dmirror_cmd() 131 cmd.npages = npages; in hmm_dmirror_cmd() 223 unsigned long npages; in TEST_F() local 230 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift; in TEST_F() 231 ASSERT_NE(npages, 0); in TEST_F() 232 size = npages << self->page_shift; in TEST_F() 265 ret = hmm_dmirror_cmd(self->fd, HMM_DMIRROR_READ, buffer, npages); in TEST_F() 267 ASSERT_EQ(buffer->cpages, npages); in TEST_F() 287 unsigned long npages; in TEST_F() local 120 hmm_dmirror_cmd(int fd, unsigned long request, struct hmm_buffer *buffer, unsigned long npages) hmm_dmirror_cmd() argument 346 unsigned long npages; TEST_F() local 394 unsigned long npages; TEST_F() local 460 unsigned long npages; TEST_F() local 538 unsigned long npages; TEST_F() local 615 unsigned long npages; TEST_F() local 671 unsigned long npages; TEST_F() local 730 unsigned long npages; TEST_F() local 785 unsigned long npages; TEST_F() local 843 unsigned long npages; TEST_F() local 891 unsigned long npages; TEST_F() local 950 unsigned long npages; TEST_F() local 985 unsigned long npages; TEST_F() local 1073 unsigned long npages; TEST_F() local 1127 unsigned long npages; TEST_F() local 1189 unsigned long npages; TEST_F() local 1251 unsigned long npages; TEST_F() local 1293 unsigned long npages; TEST_F() local 1388 unsigned long npages; TEST_F() local 1463 unsigned long npages; TEST_F() local [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/selftests/ |
H A D | scatterlist.c | 53 unsigned int npages = npages_fn(n, pt->st.nents, rnd); in expect_pfn_sg() local 61 if (sg->length != npages * PAGE_SIZE) { in expect_pfn_sg() 63 __func__, who, npages * PAGE_SIZE, sg->length); in expect_pfn_sg() 70 pfn += npages; in expect_pfn_sg() 209 unsigned long npages) in page_contiguous() 211 return first + npages == last; in page_contiguous() 238 unsigned long npages = npages_fn(n, count, rnd); in alloc_table() local 242 pfn_to_page(pfn + npages), in alloc_table() 243 npages)) { in alloc_table() 250 sg_set_page(sg, pfn_to_page(pfn), npages * PAGE_SIZ in alloc_table() 207 page_contiguous(struct page *first, struct page *last, unsigned long npages) page_contiguous() argument 288 const npages_fn_t *npages; igt_sg_alloc() local 330 const npages_fn_t *npages; igt_sg_trim() local [all...] |
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/selftests/ |
H A D | scatterlist.c | 53 unsigned int npages = npages_fn(n, pt->st.nents, rnd); in expect_pfn_sg() local 61 if (sg->length != npages * PAGE_SIZE) { in expect_pfn_sg() 63 __func__, who, npages * PAGE_SIZE, sg->length); in expect_pfn_sg() 70 pfn += npages; in expect_pfn_sg() 209 unsigned long npages) in page_contiguous() 211 return first + npages == last; in page_contiguous() 242 unsigned long npages = npages_fn(n, count, rnd); in alloc_table() local 246 pfn_to_page(pfn + npages), in alloc_table() 247 npages)) { in alloc_table() 254 sg_set_page(sg, pfn_to_page(pfn), npages * PAGE_SIZ in alloc_table() 207 page_contiguous(struct page *first, struct page *last, unsigned long npages) page_contiguous() argument 292 const npages_fn_t *npages; igt_sg_alloc() local 334 const npages_fn_t *npages; igt_sg_trim() local [all...] |
/kernel/linux/linux-6.6/lib/ |
H A D | kunit_iov_iter.c | 49 size_t npages) in iov_kunit_create_buffer() 55 pages = kunit_kcalloc(test, npages, sizeof(struct page *), GFP_KERNEL); in iov_kunit_create_buffer() 59 got = alloc_pages_bulk_array(GFP_KERNEL, npages, pages); in iov_kunit_create_buffer() 60 if (got != npages) { in iov_kunit_create_buffer() 62 KUNIT_ASSERT_EQ(test, got, npages); in iov_kunit_create_buffer() 65 buffer = vmap(pages, npages, VM_MAP | VM_MAP_PUT_PAGES, PAGE_KERNEL); in iov_kunit_create_buffer() 105 size_t bufsize, npages, size, copied; in iov_kunit_copy_to_kvec() local 109 npages = bufsize / PAGE_SIZE; in iov_kunit_copy_to_kvec() 111 scratch = iov_kunit_create_buffer(test, &spages, npages); in iov_kunit_copy_to_kvec() 115 buffer = iov_kunit_create_buffer(test, &bpages, npages); in iov_kunit_copy_to_kvec() 47 iov_kunit_create_buffer(struct kunit *test, struct page ***ppages, size_t npages) iov_kunit_create_buffer() argument 155 size_t bufsize, npages, size, copied; iov_kunit_copy_from_kvec() local 216 iov_kunit_load_bvec(struct kunit *test, struct iov_iter *iter, int dir, struct bio_vec *bvec, unsigned int bvmax, struct page **pages, size_t npages, size_t bufsize, const struct bvec_test_range *pr) iov_kunit_load_bvec() argument 264 size_t bufsize, npages, size, copied; iov_kunit_copy_to_bvec() local 318 size_t bufsize, npages, size, copied; iov_kunit_copy_from_bvec() local 373 iov_kunit_load_xarray(struct kunit *test, struct iov_iter *iter, int dir, struct xarray *xarray, struct page **pages, size_t npages) iov_kunit_load_xarray() argument 411 size_t bufsize, npages, size, copied; iov_kunit_copy_to_xarray() local 469 size_t bufsize, npages, size, copied; iov_kunit_copy_from_xarray() local 533 size_t bufsize, size = 0, npages; iov_kunit_extract_pages_kvec() local 612 size_t bufsize, size = 0, npages; iov_kunit_extract_pages_bvec() local 690 size_t bufsize, size = 0, npages; iov_kunit_extract_pages_xarray() local [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/hw/hfi1/ |
H A D | user_pages.c | 30 u32 nlocked, u32 npages) in hfi1_can_pin_pages() 47 if (atomic64_read(&mm->pinned_vm) + npages > ulimit_pages) in hfi1_can_pin_pages() 66 if (nlocked + npages > (ulimit_pages / usr_ctxts / 4)) in hfi1_can_pin_pages() 74 if (nlocked + npages > cache_limit_pages) in hfi1_can_pin_pages() 80 int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t npages, in hfi1_acquire_user_pages() argument 86 ret = pin_user_pages_fast(vaddr, npages, gup_flags, pages); in hfi1_acquire_user_pages() 96 size_t npages, bool dirty) in hfi1_release_user_pages() 98 unpin_user_pages_dirty_lock(p, npages, dirty); in hfi1_release_user_pages() 101 atomic64_sub(npages, &mm->pinned_vm); in hfi1_release_user_pages() 29 hfi1_can_pin_pages(struct hfi1_devdata *dd, struct mm_struct *mm, u32 nlocked, u32 npages) hfi1_can_pin_pages() argument 95 hfi1_release_user_pages(struct mm_struct *mm, struct page **p, size_t npages, bool dirty) hfi1_release_user_pages() argument
|
H A D | pin_system.c | 20 unsigned int npages; member 55 static u32 sdma_cache_evict(struct hfi1_user_sdma_pkt_q *pq, u32 npages) in sdma_cache_evict() argument 60 evict_data.target = npages; in sdma_cache_evict() 66 unsigned int start, unsigned int npages) in unpin_vector_pages() 68 hfi1_release_user_pages(mm, pages + start, npages, false); in unpin_vector_pages() 79 if (node->npages) { in free_system_node() 81 node->npages); in free_system_node() 82 atomic_sub(node->npages, &node->pq->n_locked); in free_system_node() 116 struct sdma_mmu_node *node, int npages) in pin_system_pages() 122 pages = kcalloc(npages, sizeo in pin_system_pages() 65 unpin_vector_pages(struct mm_struct *mm, struct page **pages, unsigned int start, unsigned int npages) unpin_vector_pages() argument 114 pin_system_pages(struct user_sdma_request *req, uintptr_t start_address, size_t length, struct sdma_mmu_node *node, int npages) pin_system_pages() argument [all...] |
H A D | user_exp_rcv.c | 16 static u32 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages); 20 u16 pageidx, unsigned int npages); 126 * @npages: No of pages to unpin. 136 unsigned int npages, in unpin_rcv_pages() 145 node->npages * PAGE_SIZE, DMA_FROM_DEVICE); in unpin_rcv_pages() 152 hfi1_release_user_pages(mm, pages, npages, mapped); in unpin_rcv_pages() 153 fd->tid_n_pinned -= npages; in unpin_rcv_pages() 162 unsigned int npages = tidbuf->npages; in pin_rcv_pages() local 167 if (npages > f in pin_rcv_pages() 132 unpin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf, struct tid_rb_node *node, unsigned int idx, unsigned int npages, bool mapped) unpin_rcv_pages() argument 565 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages) find_phys_blocks() argument 683 u16 npages, pageidx, setidx = start + idx; program_rcvarray() local 723 set_rcvarray_entry(struct hfi1_filedata *fd, struct tid_user_buf *tbuf, u32 rcventry, struct tid_group *grp, u16 pageidx, unsigned int npages) set_rcvarray_entry() argument [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/mellanox/mlx5/core/ |
H A D | pagealloc.c | 52 s32 npages; member 199 s32 *npages, int boot) in mlx5_cmd_query_pages() 215 *npages = MLX5_GET(query_pages_out, out, num_pages); in mlx5_cmd_query_pages() 353 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, in give_pages() argument 366 inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]); in give_pages() 374 for (i = 0; i < npages; i++) { in give_pages() 381 dev->priv.fw_pages_alloc_failed += (npages - i); in give_pages() 393 MLX5_SET(manage_pages_in, in, input_num_entries, npages); in give_pages() 407 mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n", in give_pages() 408 func_id, npages, er in give_pages() 198 mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, s32 *npages, int boot) mlx5_cmd_query_pages() argument 440 int npages = 0; release_all_pages() local 464 fwp_fill_manage_pages_out(struct fw_page *fwp, u32 *out, u32 index, u32 npages) fwp_fill_manage_pages_out() argument 490 u32 npages; reclaim_pages_cmd() local 517 reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, int *nclaimed, bool event, bool ec_function) reclaim_pages() argument 621 s32 npages; req_pages_handler() local 653 s32 npages; mlx5_satisfy_startup_pages() local [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/ttm/ |
H A D | ttm_page_alloc.c | 67 * @npages: Number of pages in pool. 74 unsigned npages; member 247 static void ttm_pages_put(struct page *pages[], unsigned npages, in ttm_pages_put() argument 253 if (ttm_set_pages_array_wb(pages, npages)) in ttm_pages_put() 254 pr_err("Failed to set %d pages to wb!\n", npages); in ttm_pages_put() 257 for (i = 0; i < npages; ++i) { in ttm_pages_put() 269 pool->npages -= freed_pages; in ttm_pool_update_free_locked() 423 count += (pool->npages << pool->order); in ttm_pool_shrink_count() 495 unsigned npages = 1 << order; in ttm_alloc_new_pages() local 537 for (j = 0; j < npages; in ttm_alloc_new_pages() 708 ttm_put_pages(struct page **pages, unsigned npages, int flags, enum ttm_caching_state cstate) ttm_put_pages() argument 826 ttm_get_pages(struct page **pages, unsigned npages, int flags, enum ttm_caching_state cstate) ttm_get_pages() argument [all...] |
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gem/selftests/ |
H A D | mock_dmabuf.c | 21 err = sg_alloc_table(st, mock->npages, GFP_KERNEL); in mock_map_dma_buf() 26 for (i = 0; i < mock->npages; i++) { in mock_map_dma_buf() 58 for (i = 0; i < mock->npages; i++) in mock_dmabuf_release() 68 return vm_map_ram(mock->pages, mock->npages, 0); in mock_dmabuf_vmap() 75 vm_unmap_ram(vaddr, mock->npages); in mock_dmabuf_vunmap() 92 static struct dma_buf *mock_dmabuf(int npages) in mock_dmabuf() argument 99 mock = kmalloc(sizeof(*mock) + npages * sizeof(struct page *), in mock_dmabuf() 104 mock->npages = npages; in mock_dmabuf() 105 for (i = 0; i < npages; in mock_dmabuf() [all...] |
/kernel/linux/linux-6.6/drivers/gpu/drm/i915/gem/selftests/ |
H A D | mock_dmabuf.c | 21 err = sg_alloc_table(st, mock->npages, GFP_KERNEL); in mock_map_dma_buf() 26 for (i = 0; i < mock->npages; i++) { in mock_map_dma_buf() 58 for (i = 0; i < mock->npages; i++) in mock_dmabuf_release() 69 vaddr = vm_map_ram(mock->pages, mock->npages, 0); in mock_dmabuf_vmap() 81 vm_unmap_ram(map->vaddr, mock->npages); in mock_dmabuf_vunmap() 98 static struct dma_buf *mock_dmabuf(int npages) in mock_dmabuf() argument 105 mock = kmalloc(sizeof(*mock) + npages * sizeof(struct page *), in mock_dmabuf() 110 mock->npages = npages; in mock_dmabuf() 111 for (i = 0; i < npages; in mock_dmabuf() [all...] |
/kernel/linux/linux-6.6/fs/netfs/ |
H A D | iterator.c | 44 unsigned int npages = 0; in netfs_extract_user_iter() local 66 while (count && npages < max_pages) { in netfs_extract_user_iter() 68 max_pages - npages, extraction_flags, in netfs_extract_user_iter() 84 if (npages + cur_npages > max_pages) { in netfs_extract_user_iter() 86 npages + cur_npages, max_pages); in netfs_extract_user_iter() 92 bvec_set_page(bv + npages + i, *pages++, len - offset, offset); in netfs_extract_user_iter() 97 npages += cur_npages; in netfs_extract_user_iter() 100 iov_iter_bvec(new, orig->data_source, bv, npages, orig_len - count); in netfs_extract_user_iter() 101 return npages; in netfs_extract_user_iter()
|
/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlx5/core/ |
H A D | pagealloc.c | 52 s32 npages; member 180 s32 *npages, int boot) in mlx5_cmd_query_pages() 196 *npages = MLX5_GET(query_pages_out, out, num_pages); in mlx5_cmd_query_pages() 334 static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, in give_pages() argument 345 inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]); in give_pages() 353 for (i = 0; i < npages; i++) { in give_pages() 370 MLX5_SET(manage_pages_in, in, input_num_entries, npages); in give_pages() 375 mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n", in give_pages() 376 func_id, npages, err); in give_pages() 380 dev->priv.fw_pages += npages; in give_pages() 179 mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, s32 *npages, int boot) mlx5_cmd_query_pages() argument 408 int npages = 0; release_all_pages() local 433 fwp_fill_manage_pages_out(struct fw_page *fwp, u32 *out, u32 index, u32 npages) fwp_fill_manage_pages_out() argument 459 u32 npages; reclaim_pages_cmd() local 486 reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, int *nclaimed, bool ec_function) reclaim_pages() argument 580 s32 npages; req_pages_handler() local 612 s32 npages; mlx5_satisfy_startup_pages() local [all...] |
/kernel/linux/linux-5.10/arch/sparc/kernel/ |
H A D | iommu.c | 158 unsigned long npages) in alloc_npages() 162 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, in alloc_npages() 204 int npages, nid; in dma_4u_alloc_coherent() local 233 npages = size >> IO_PAGE_SHIFT; in dma_4u_alloc_coherent() 235 while (npages--) { in dma_4u_alloc_coherent() 251 unsigned long order, npages; in dma_4u_free_coherent() local 253 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; in dma_4u_free_coherent() 256 iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE); in dma_4u_free_coherent() 271 unsigned long flags, npages, oaddr; in dma_4u_map_page() local 283 npages in dma_4u_map_page() 156 alloc_npages(struct device *dev, struct iommu *iommu, unsigned long npages) alloc_npages() argument 320 strbuf_flush(struct strbuf *strbuf, struct iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages, enum dma_data_direction direction) strbuf_flush() argument 393 unsigned long flags, npages, ctx, i; dma_4u_unmap_page() local 478 unsigned long paddr, npages, entry, out_entry = 0, slen; dma_4u_map_sg() local 558 unsigned long vaddr, npages, entry, j; dma_4u_map_sg() local 629 unsigned long npages, entry; dma_4u_unmap_sg() local 665 unsigned long flags, ctx, npages; dma_4u_sync_single_for_cpu() local 703 unsigned long flags, ctx, npages, i; dma_4u_sync_sg_for_cpu() local [all...] |
H A D | pci_sun4v.c | 60 unsigned long npages; /* Number of pages in list. */ member 74 p->npages = 0; in iommu_batch_start() 91 unsigned long npages = p->npages; in iommu_batch_flush() local 100 while (npages != 0) { in iommu_batch_flush() 104 npages, in iommu_batch_flush() 112 npages, prot, __pa(pglist), in iommu_batch_flush() 117 index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry), in iommu_batch_flush() 135 npages -= num; in iommu_batch_flush() 140 p->npages in iommu_batch_flush() 185 unsigned long flags, order, first_page, npages, n; dma_4v_alloc_coherent() local 295 dma_4v_iommu_demap(struct device *dev, unsigned long devhandle, dma_addr_t dvma, unsigned long iotsb_num, unsigned long entry, unsigned long npages) dma_4v_iommu_demap() argument 329 unsigned long order, npages, entry; dma_4v_free_coherent() local 363 unsigned long flags, npages, oaddr; dma_4v_map_page() local 436 unsigned long npages; dma_4v_unmap_page() local 523 unsigned long paddr, npages, entry, out_entry = 0, slen; dma_4v_map_sg() local 606 unsigned long vaddr, npages; dma_4v_map_sg() local 650 unsigned long npages; dma_4v_unmap_sg() local [all...] |
/kernel/linux/linux-6.6/arch/sparc/kernel/ |
H A D | iommu.c | 158 unsigned long npages) in alloc_npages() 162 entry = iommu_tbl_range_alloc(dev, &iommu->tbl, npages, NULL, in alloc_npages() 204 int npages, nid; in dma_4u_alloc_coherent() local 233 npages = size >> IO_PAGE_SHIFT; in dma_4u_alloc_coherent() 235 while (npages--) { in dma_4u_alloc_coherent() 251 unsigned long order, npages; in dma_4u_free_coherent() local 253 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; in dma_4u_free_coherent() 256 iommu_tbl_range_free(&iommu->tbl, dvma, npages, IOMMU_ERROR_CODE); in dma_4u_free_coherent() 271 unsigned long flags, npages, oaddr; in dma_4u_map_page() local 283 npages in dma_4u_map_page() 156 alloc_npages(struct device *dev, struct iommu *iommu, unsigned long npages) alloc_npages() argument 320 strbuf_flush(struct strbuf *strbuf, struct iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages, enum dma_data_direction direction) strbuf_flush() argument 393 unsigned long flags, npages, ctx, i; dma_4u_unmap_page() local 478 unsigned long paddr, npages, entry, out_entry = 0, slen; dma_4u_map_sg() local 557 unsigned long vaddr, npages, entry, j; dma_4u_map_sg() local 627 unsigned long npages, entry; dma_4u_unmap_sg() local 663 unsigned long flags, ctx, npages; dma_4u_sync_single_for_cpu() local 701 unsigned long flags, ctx, npages, i; dma_4u_sync_sg_for_cpu() local [all...] |
H A D | pci_sun4v.c | 61 unsigned long npages; /* Number of pages in list. */ member 75 p->npages = 0; in iommu_batch_start() 92 unsigned long npages = p->npages; in iommu_batch_flush() local 101 while (npages != 0) { in iommu_batch_flush() 105 npages, in iommu_batch_flush() 113 npages, prot, __pa(pglist), in iommu_batch_flush() 118 index_count = HV_PCI_IOTSB_INDEX_COUNT(npages, entry), in iommu_batch_flush() 136 npages -= num; in iommu_batch_flush() 141 p->npages in iommu_batch_flush() 186 unsigned long flags, order, first_page, npages, n; dma_4v_alloc_coherent() local 296 dma_4v_iommu_demap(struct device *dev, unsigned long devhandle, dma_addr_t dvma, unsigned long iotsb_num, unsigned long entry, unsigned long npages) dma_4v_iommu_demap() argument 330 unsigned long order, npages, entry; dma_4v_free_coherent() local 364 unsigned long flags, npages, oaddr; dma_4v_map_page() local 437 unsigned long npages; dma_4v_unmap_page() local 524 unsigned long paddr, npages, entry, out_entry = 0, slen; dma_4v_map_sg() local 606 unsigned long vaddr, npages; dma_4v_map_sg() local 649 unsigned long npages; dma_4v_unmap_sg() local [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/hw/hfi1/ |
H A D | user_pages.c | 72 u32 nlocked, u32 npages) in hfi1_can_pin_pages() 97 if (pinned + npages >= ulimit && !can_lock) in hfi1_can_pin_pages() 100 return ((nlocked + npages) <= size) || can_lock; in hfi1_can_pin_pages() 103 int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t npages, in hfi1_acquire_user_pages() argument 109 ret = pin_user_pages_fast(vaddr, npages, gup_flags, pages); in hfi1_acquire_user_pages() 119 size_t npages, bool dirty) in hfi1_release_user_pages() 121 unpin_user_pages_dirty_lock(p, npages, dirty); in hfi1_release_user_pages() 124 atomic64_sub(npages, &mm->pinned_vm); in hfi1_release_user_pages() 71 hfi1_can_pin_pages(struct hfi1_devdata *dd, struct mm_struct *mm, u32 nlocked, u32 npages) hfi1_can_pin_pages() argument 118 hfi1_release_user_pages(struct mm_struct *mm, struct page **p, size_t npages, bool dirty) hfi1_release_user_pages() argument
|
H A D | user_exp_rcv.c | 58 static u32 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages); 62 u16 pageidx, unsigned int npages); 169 * @npages - No of pages to unpin. 179 unsigned int npages, in unpin_rcv_pages() 188 node->npages * PAGE_SIZE, PCI_DMA_FROMDEVICE); in unpin_rcv_pages() 195 hfi1_release_user_pages(mm, pages, npages, mapped); in unpin_rcv_pages() 196 fd->tid_n_pinned -= npages; in unpin_rcv_pages() 205 unsigned int npages = tidbuf->npages; in pin_rcv_pages() local 210 if (npages > f in pin_rcv_pages() 175 unpin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf, struct tid_rb_node *node, unsigned int idx, unsigned int npages, bool mapped) unpin_rcv_pages() argument 610 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages) find_phys_blocks() argument 729 u16 npages, pageidx, setidx = start + idx; program_rcvarray() local 770 set_rcvarray_entry(struct hfi1_filedata *fd, struct tid_user_buf *tbuf, u32 rcventry, struct tid_group *grp, u16 pageidx, unsigned int npages) set_rcvarray_entry() argument [all...] |
/kernel/linux/linux-5.10/arch/powerpc/kernel/ |
H A D | iommu.c | 176 unsigned long npages, in iommu_range_alloc() 183 int largealloc = npages > 15; in iommu_range_alloc() 195 if (unlikely(npages == 0)) { in iommu_range_alloc() 249 n = iommu_area_alloc(tbl->it_map, limit, start, npages, tbl->it_offset, in iommu_range_alloc() 276 end = n + npages; in iommu_range_alloc() 298 void *page, unsigned int npages, in iommu_alloc() 307 entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order); in iommu_alloc() 316 build_fail = tbl->it_ops->set(tbl, entry, npages, in iommu_alloc() 326 __iommu_free(tbl, ret, npages); in iommu_alloc() 341 unsigned int npages) in iommu_free_check() 174 iommu_range_alloc(struct device *dev, struct iommu_table *tbl, unsigned long npages, unsigned long *handle, unsigned long mask, unsigned int align_order) iommu_range_alloc() argument 297 iommu_alloc(struct device *dev, struct iommu_table *tbl, void *page, unsigned int npages, enum dma_data_direction direction, unsigned long mask, unsigned int align_order, unsigned long attrs) iommu_alloc() argument 340 iommu_free_check(struct iommu_table *tbl, dma_addr_t dma_addr, unsigned int npages) iommu_free_check() argument 387 __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, unsigned int npages) __iommu_free() argument 409 iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, unsigned int npages) iommu_free() argument 451 unsigned long vaddr, npages, entry, slen; ppc_iommu_map_sg() local 551 unsigned long vaddr, npages; ppc_iommu_map_sg() local 580 unsigned int npages; ppc_iommu_unmap_sg() local 806 unsigned int npages, align; iommu_map_page() local 841 unsigned int npages; iommu_unmap_page() local 989 iommu_tce_check_ioba(unsigned long page_shift, unsigned long offset, unsigned long size, unsigned long ioba, unsigned long npages) iommu_tce_check_ioba() argument [all...] |
/kernel/linux/linux-6.6/drivers/gpu/drm/amd/amdkfd/ |
H A D | kfd_migrate.c | 49 svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages, in svm_migrate_gart_map() argument 65 num_bytes = npages * 8; in svm_migrate_gart_map() 93 amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr); in svm_migrate_gart_map() 106 * @npages: number of pages to copy 111 * vram address uses direct mapping of vram pages, which must have npages 125 uint64_t *vram, uint64_t npages, in svm_migrate_copy_memory_gart() 138 while (npages) { in svm_migrate_copy_memory_gart() 139 size = min(GTT_MAX_PAGES, npages); in svm_migrate_copy_memory_gart() 164 npages -= size; in svm_migrate_copy_memory_gart() 165 if (npages) { in svm_migrate_copy_memory_gart() 124 svm_migrate_copy_memory_gart(struct amdgpu_device *adev, dma_addr_t *sys, uint64_t *vram, uint64_t npages, enum MIGRATION_COPY_DIR direction, struct dma_fence **mfence) svm_migrate_copy_memory_gart() argument 294 uint64_t npages = migrate->cpages; svm_migrate_copy_to_vram() local 399 uint64_t npages = (end - start) >> PAGE_SHIFT; svm_migrate_vma_to_vram() local 566 svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange, struct migrate_vma *migrate, struct dma_fence **mfence, dma_addr_t *scratch, uint64_t npages) svm_migrate_copy_to_ram() argument 676 uint64_t npages = (end - start) >> PAGE_SHIFT; svm_migrate_vma_to_ram() local [all...] |
/kernel/linux/linux-6.6/drivers/vfio/ |
H A D | iova_bitmap.c | 45 unsigned long npages; member 164 unsigned long npages; in iova_bitmap_get() local 174 npages = DIV_ROUND_UP((bitmap->mapped_total_index - in iova_bitmap_get() 188 npages = min(npages + !!offset_in_page(addr), in iova_bitmap_get() 191 ret = pin_user_pages_fast((unsigned long)addr, npages, in iova_bitmap_get() 196 mapped->npages = (unsigned long)ret; in iova_bitmap_get() 210 * Unpins the bitmap user pages and clears @npages 218 if (mapped->npages) { in iova_bitmap_put() 219 unpin_user_pages(mapped->pages, mapped->npages); in iova_bitmap_put() [all...] |
/kernel/linux/linux-6.6/mm/ |
H A D | migrate_device.c | 28 migrate->dst[migrate->npages] = 0; in migrate_vma_collect_skip() 29 migrate->src[migrate->npages++] = 0; in migrate_vma_collect_skip() 48 migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE; in migrate_vma_collect_hole() 49 migrate->dst[migrate->npages] = 0; in migrate_vma_collect_hole() 50 migrate->npages++; in migrate_vma_collect_hole() 265 migrate->dst[migrate->npages] = 0; in migrate_vma_collect_pmd() 266 migrate->src[migrate->npages++] = mpfn; in migrate_vma_collect_pmd() 311 migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT); in migrate_vma_collect() 358 unsigned long npages, in migrate_device_unmap() 367 for (i = 0; i < npages; in migrate_device_unmap() 357 migrate_device_unmap(unsigned long *src_pfns, unsigned long npages, struct page *fault_page) migrate_device_unmap() argument 681 __migrate_device_pages(unsigned long *src_pfns, unsigned long *dst_pfns, unsigned long npages, struct migrate_vma *migrate) __migrate_device_pages() argument 780 migrate_device_pages(unsigned long *src_pfns, unsigned long *dst_pfns, unsigned long npages) migrate_device_pages() argument 811 migrate_device_finalize(unsigned long *src_pfns, unsigned long *dst_pfns, unsigned long npages) migrate_device_finalize() argument 894 migrate_device_range(unsigned long *src_pfns, unsigned long start, unsigned long npages) migrate_device_range() argument [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/core/ |
H A D | umem.c | 151 unsigned long npages; in ib_umem_get() local 191 npages = ib_umem_num_pages(umem); in ib_umem_get() 192 if (npages == 0 || npages > UINT_MAX) { in ib_umem_get() 199 new_pinned = atomic64_add_return(npages, &mm->pinned_vm); in ib_umem_get() 201 atomic64_sub(npages, &mm->pinned_vm); in ib_umem_get() 211 while (npages) { in ib_umem_get() 214 min_t(unsigned long, npages, in ib_umem_get() 222 npages -= ret; in ib_umem_get() 225 ib_dma_max_seg_size(device), sg, npages, in ib_umem_get() [all...] |