Lines Matching refs:bcnt

671 			     u64 user_va, size_t bcnt, u32 *bytes_mapped,
691 np = ib_umem_odp_map_dma_and_lock(odp, user_va, bcnt, access_mask, fault);
713 *bytes_mapped += min_t(u32, new_mappings, bcnt);
724 size_t bcnt, u32 *bytes_mapped, u32 flags)
726 unsigned long end_idx = (user_va + bcnt - 1) >> MLX5_IMR_MTT_SHIFT;
734 mlx5_imr_ksm_entries * MLX5_IMR_MTT_SIZE - user_va < bcnt))
738 while (bcnt) {
756 len = min_t(u64, user_va + bcnt, ib_umem_end(umem_odp)) -
764 bcnt -= len;
802 * -EFAULT: The io_virt->bcnt is not within the MR, it covers pages that are
809 static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt,
825 ib_umem_end(odp) - user_va < bcnt))
827 return pagefault_real_mr(mr, odp, user_va, bcnt, bytes_mapped,
830 return pagefault_implicit_mr(mr, odp, io_virt, bcnt, bytes_mapped,
852 size_t bcnt;
893 u64 io_virt, size_t bcnt,
909 bcnt -= *bytes_committed;
919 *bytes_mapped += bcnt;
938 ret = pagefault_mr(mr, io_virt, bcnt, bytes_mapped, 0);
981 for (i = 0; bcnt && i < ndescs; i++, pklm++) {
995 frame->bcnt = min_t(size_t, bcnt,
1001 bcnt -= frame->bcnt;
1018 bcnt = frame->bcnt;
1065 size_t bcnt;
1080 bcnt = byte_count & ~MLX5_INLINE_SEG;
1083 bcnt = bcnt & MLX5_WQE_INLINE_SEG_BYTE_COUNT_MASK;
1084 wqe += ALIGN(sizeof(struct mlx5_wqe_inline_seg) + bcnt,
1091 if (receive_queue && bcnt == 0 && key == MLX5_INVALID_LKEY &&
1096 *total_wqe_bytes += bcnt - min_t(size_t, bcnt,
1101 if (bcnt == 0)
1102 bcnt = 1U << 31;
1104 if (inline_segment || bcnt <= pfault->bytes_committed) {
1106 min_t(size_t, bcnt,
1112 io_virt, bcnt,