Searched refs:pages_mapped (Results 1 - 7 of 7) sorted by relevance
/kernel/linux/linux-5.10/arch/parisc/kernel/ |
H A D | pci-dma.c | 309 unsigned int pages_mapped = size >> PAGE_SHIFT; in pcxl_free_range() local 312 mask >>= BITS_PER_LONG - pages_mapped; in pcxl_free_range() 314 DBG_RES("pcxl_free_range() res_idx: %d size: %d pages_mapped %d mask 0x%08lx\n", in pcxl_free_range() 315 res_idx, size, pages_mapped, mask); in pcxl_free_range() 319 if(pages_mapped <= 8) { in pcxl_free_range() 321 } else if(pages_mapped <= 16) { in pcxl_free_range() 323 } else if(pages_mapped <= 32) { in pcxl_free_range() 330 pcxl_used_pages -= (pages_mapped ? pages_mapped : 1); in pcxl_free_range() 331 pcxl_used_bytes -= ((pages_mapped >> in pcxl_free_range() [all...] |
/kernel/linux/linux-6.6/arch/parisc/kernel/ |
H A D | pci-dma.c | 309 unsigned int pages_mapped = size >> PAGE_SHIFT; in pcxl_free_range() local 312 mask >>= BITS_PER_LONG - pages_mapped; in pcxl_free_range() 314 DBG_RES("pcxl_free_range() res_idx: %d size: %d pages_mapped %d mask 0x%08lx\n", in pcxl_free_range() 315 res_idx, size, pages_mapped, mask); in pcxl_free_range() 319 if(pages_mapped <= 8) { in pcxl_free_range() 321 } else if(pages_mapped <= 16) { in pcxl_free_range() 323 } else if(pages_mapped <= 32) { in pcxl_free_range() 330 pcxl_used_pages -= (pages_mapped ? pages_mapped : 1); in pcxl_free_range() 331 pcxl_used_bytes -= ((pages_mapped >> in pcxl_free_range() [all...] |
/kernel/linux/linux-5.10/drivers/parisc/ |
H A D | ccio-dma.c | 425 * @pages_mapped: The requested number of pages to be freed from the 431 ccio_free_range(struct ioc *ioc, dma_addr_t iova, unsigned long pages_mapped) in ccio_free_range() argument 436 BUG_ON(pages_mapped == 0); in ccio_free_range() 437 BUG_ON((pages_mapped * IOVP_SIZE) > DMA_CHUNK_SIZE); in ccio_free_range() 438 BUG_ON(pages_mapped > BITS_PER_LONG); in ccio_free_range() 440 DBG_RES("%s(): res_idx: %d pages_mapped %d\n", in ccio_free_range() 441 __func__, res_idx, pages_mapped); in ccio_free_range() 444 ioc->used_pages -= pages_mapped; in ccio_free_range() 447 if(pages_mapped <= 8) { in ccio_free_range() 450 unsigned long mask = ~(~0UL >> pages_mapped); in ccio_free_range() [all...] |
/kernel/linux/linux-6.6/drivers/parisc/ |
H A D | ccio-dma.c | 415 * @pages_mapped: The requested number of pages to be freed from the 421 ccio_free_range(struct ioc *ioc, dma_addr_t iova, unsigned long pages_mapped) in ccio_free_range() argument 426 BUG_ON(pages_mapped == 0); in ccio_free_range() 427 BUG_ON((pages_mapped * IOVP_SIZE) > DMA_CHUNK_SIZE); in ccio_free_range() 428 BUG_ON(pages_mapped > BITS_PER_LONG); in ccio_free_range() 430 DBG_RES("%s(): res_idx: %d pages_mapped %lu\n", in ccio_free_range() 431 __func__, res_idx, pages_mapped); in ccio_free_range() 434 ioc->used_pages -= pages_mapped; in ccio_free_range() 437 if(pages_mapped <= 8) { in ccio_free_range() 440 unsigned long mask = ~(~0UL >> pages_mapped); in ccio_free_range() [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/hw/mlx5/ |
H A D | umr.c | 699 size_t pages_mapped = 0; in mlx5r_umr_update_xlt() local 742 for (pages_mapped = 0; in mlx5r_umr_update_xlt() 743 pages_mapped < pages_to_map && !err; in mlx5r_umr_update_xlt() 744 pages_mapped += pages_iter, idx += pages_iter) { in mlx5r_umr_update_xlt() 745 npages = min_t(int, pages_iter, pages_to_map - pages_mapped); in mlx5r_umr_update_xlt() 754 if (pages_mapped + pages_iter >= pages_to_map) in mlx5r_umr_update_xlt()
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/mlx5/ |
H A D | mr.c | 1036 size_t pages_mapped = 0; in mlx5_ib_update_xlt() local 1113 for (pages_mapped = 0; in mlx5_ib_update_xlt() 1114 pages_mapped < pages_to_map && !err; in mlx5_ib_update_xlt() 1115 pages_mapped += pages_iter, idx += pages_iter) { in mlx5_ib_update_xlt() 1116 npages = min_t(int, pages_iter, pages_to_map - pages_mapped); in mlx5_ib_update_xlt() 1134 if (pages_mapped + pages_iter >= pages_to_map) { in mlx5_ib_update_xlt()
|
/kernel/linux/linux-6.6/net/ipv4/ |
H A D | tcp.c | 2004 unsigned int pages_mapped; in tcp_zerocopy_vm_insert_batch() local 2009 pages_mapped = pages_to_map - (unsigned int)pages_remaining; in tcp_zerocopy_vm_insert_batch() 2010 bytes_mapped = PAGE_SIZE * pages_mapped; in tcp_zerocopy_vm_insert_batch() 2021 return tcp_zerocopy_vm_insert_batch_error(vma, pages + pages_mapped, in tcp_zerocopy_vm_insert_batch()
|
Completed in 13 milliseconds