Searched refs:umrwr (Results 1 - 2 of 2) sorted by relevance
/kernel/linux/linux-5.10/drivers/infiniband/hw/mlx5/ |
H A D | wr.c | 323 const struct mlx5_umr_wr *umrwr = umr_wr(wr); in set_reg_umr_segment() local 327 if (!umrwr->ignore_free_state) { in set_reg_umr_segment() 336 umr->xlt_octowords = cpu_to_be16(get_xlt_octo(umrwr->xlt_size)); in set_reg_umr_segment() 338 u64 offset = get_xlt_octo(umrwr->offset); in set_reg_umr_segment() 405 const struct mlx5_umr_wr *umrwr = umr_wr(wr); in set_reg_mkey_segment() local 412 !!(umrwr->access_flags & IB_ACCESS_REMOTE_ATOMIC)); in set_reg_mkey_segment() 414 !!(umrwr->access_flags & IB_ACCESS_REMOTE_WRITE)); in set_reg_mkey_segment() 415 MLX5_SET(mkc, seg, rr, !!(umrwr->access_flags & IB_ACCESS_REMOTE_READ)); in set_reg_mkey_segment() 416 MLX5_SET(mkc, seg, lw, !!(umrwr->access_flags & IB_ACCESS_LOCAL_WRITE)); in set_reg_mkey_segment() 420 !!(umrwr in set_reg_mkey_segment() [all...] |
H A D | mr.c | 943 struct mlx5_umr_wr *umrwr) in mlx5_ib_post_send_wait() 951 umrwr->wr.wr_cqe = &umr_context.cqe; in mlx5_ib_post_send_wait() 954 err = ib_post_send(umrc->qp, &umrwr->wr, &bad); in mlx5_ib_post_send_wait() 1479 struct mlx5_umr_wr umrwr = {}; in mlx5_mr_cache_invalidate() local 1484 umrwr.wr.send_flags = MLX5_IB_SEND_UMR_DISABLE_MR | in mlx5_mr_cache_invalidate() 1486 umrwr.wr.opcode = MLX5_IB_WR_UMR; in mlx5_mr_cache_invalidate() 1487 umrwr.pd = mr->dev->umrc.pd; in mlx5_mr_cache_invalidate() 1488 umrwr.mkey = mr->mmkey.key; in mlx5_mr_cache_invalidate() 1489 umrwr.ignore_free_state = 1; in mlx5_mr_cache_invalidate() 1491 return mlx5_ib_post_send_wait(mr->dev, &umrwr); in mlx5_mr_cache_invalidate() 942 mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev, struct mlx5_umr_wr *umrwr) mlx5_ib_post_send_wait() argument 1498 struct mlx5_umr_wr umrwr = {}; rereg_umr() local [all...] |
Completed in 5 milliseconds