Lines Matching refs:bcnt

553 			     u64 user_va, size_t bcnt, u32 *bytes_mapped,
573 np = ib_umem_odp_map_dma_and_lock(odp, user_va, bcnt, access_mask, fault);
595 *bytes_mapped += min_t(u32, new_mappings, bcnt);
606 size_t bcnt, u32 *bytes_mapped, u32 flags)
608 unsigned long end_idx = (user_va + bcnt - 1) >> MLX5_IMR_MTT_SHIFT;
616 mlx5_imr_ksm_entries * MLX5_IMR_MTT_SIZE - user_va < bcnt))
620 while (bcnt) {
643 len = min_t(u64, user_va + bcnt, ib_umem_end(umem_odp)) -
654 bcnt -= len;
690 static int pagefault_dmabuf_mr(struct mlx5_ib_mr *mr, size_t bcnt,
723 *bytes_mapped += bcnt;
730 * -EFAULT: The io_virt->bcnt is not within the MR, it covers pages that are
737 static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt,
746 return pagefault_dmabuf_mr(mr, bcnt, bytes_mapped, flags);
755 ib_umem_end(odp) - user_va < bcnt))
757 return pagefault_real_mr(mr, odp, user_va, bcnt, bytes_mapped,
760 return pagefault_implicit_mr(mr, odp, io_virt, bcnt, bytes_mapped,
788 size_t bcnt;
815 u64 io_virt, size_t bcnt,
828 bcnt -= *bytes_committed;
840 *bytes_mapped += bcnt;
862 ret = pagefault_mr(mr, io_virt, bcnt, bytes_mapped, 0);
903 for (i = 0; bcnt && i < mmkey->ndescs; i++, pklm++) {
917 frame->bcnt = min_t(size_t, bcnt,
923 bcnt -= frame->bcnt;
940 bcnt = frame->bcnt;
991 size_t bcnt;
1006 bcnt = byte_count & ~MLX5_INLINE_SEG;
1009 bcnt = bcnt & MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK;
1010 wqe += ALIGN(sizeof(struct mlx5_wqe_inline_seg) + bcnt,
1017 if (receive_queue && bcnt == 0 &&
1023 *total_wqe_bytes += bcnt - min_t(size_t, bcnt,
1028 if (bcnt == 0)
1029 bcnt = 1U << 31;
1031 if (inline_segment || bcnt <= pfault->bytes_committed) {
1033 min_t(size_t, bcnt,
1039 io_virt, bcnt,