Lines Matching refs:imr
98 struct mlx5_ib_mr *imr, int flags)
105 pklm->key = cpu_to_be32(imr->dev->null_mkey);
131 lockdep_assert_held(&to_ib_umem_odp(imr->umem)->umem_mutex);
132 lockdep_assert_held(&imr->dev->odp_srcu);
135 struct mlx5_ib_mr *mtt = xa_load(&imr->implicit_children, idx);
142 pklm->key = cpu_to_be32(imr->dev->null_mkey);
215 struct mlx5_ib_mr *imr = mr->parent;
216 struct ib_umem_odp *odp_imr = to_ib_umem_odp(imr->umem);
239 if (atomic_dec_and_test(&imr->num_deferred_work))
240 wake_up(&imr->q_deferred_work);
265 struct mlx5_ib_mr *imr = mr->parent;
267 xa_lock(&imr->implicit_children);
272 if (__xa_cmpxchg(&imr->implicit_children, idx, mr, NULL, GFP_ATOMIC) !=
276 atomic_inc(&imr->num_deferred_work);
281 xa_unlock(&imr->implicit_children);
465 static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr,
473 odp = ib_umem_odp_alloc_child(to_ib_umem_odp(imr->umem),
479 ret = mr = mlx5_mr_cache_alloc(imr->dev, MLX5_IMR_MTT_CACHE_ENTRY,
480 imr->access_flags);
484 mr->ibmr.pd = imr->ibmr.pd;
489 mr->parent = imr;
506 ret = xa_cmpxchg(&imr->implicit_children, idx, NULL, mr,
520 mlx5_ib_dbg(imr->dev, "key %x mr %p\n", mr->mmkey.key, mr);
524 mlx5_mr_cache_free(imr->dev, mr);
536 struct mlx5_ib_mr *imr;
543 imr = mlx5_mr_cache_alloc(dev, MLX5_IMR_KSM_CACHE_ENTRY, access_flags);
544 if (IS_ERR(imr)) {
545 err = PTR_ERR(imr);
549 imr->ibmr.pd = &pd->ibpd;
550 imr->mmkey.iova = 0;
551 imr->umem = &umem_odp->umem;
552 imr->ibmr.lkey = imr->mmkey.key;
553 imr->ibmr.rkey = imr->mmkey.key;
554 imr->umem = &umem_odp->umem;
555 imr->is_odp_implicit = true;
556 atomic_set(&imr->num_deferred_work, 0);
557 init_waitqueue_head(&imr->q_deferred_work);
558 xa_init(&imr->implicit_children);
560 err = mlx5_ib_update_xlt(imr, 0,
569 err = xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(imr->mmkey.key),
570 &imr->mmkey, GFP_KERNEL));
574 mlx5_ib_dbg(dev, "key %x mr %p\n", imr->mmkey.key, imr);
575 return imr;
578 mlx5_mr_cache_free(dev, imr);
584 void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
586 struct ib_umem_odp *odp_imr = to_ib_umem_odp(imr->umem);
587 struct mlx5_ib_dev *dev = imr->dev;
595 xa_erase(&dev->odp_mkeys, mlx5_base_mkey(imr->mmkey.key));
598 * the imr or any children. The page fault path can only reach the
599 * children xarray via the imr.
607 wait_event(imr->q_deferred_work, !atomic_read(&imr->num_deferred_work));
611 * pagefault_mr() on this imr. It is already forbidden to call
620 xa_lock(&imr->implicit_children);
621 xa_for_each (&imr->implicit_children, idx, mtt) {
622 __xa_erase(&imr->implicit_children, idx);
625 xa_unlock(&imr->implicit_children);
631 wait_event(imr->q_deferred_work, !atomic_read(&imr->num_deferred_work));
634 * Fence the imr before we destroy the children. This allows us to
635 * skip updating the XLT of the imr during destroy of the child mkey
636 * the imr points to.
638 mlx5_mr_cache_invalidate(imr);
643 mlx5_mr_cache_free(dev, imr);
722 static int pagefault_implicit_mr(struct mlx5_ib_mr *imr,
744 mtt = xa_load(&imr->implicit_children, idx);
746 mtt = implicit_get_child_mr(imr, idx);
789 err = mlx5_ib_update_xlt(imr, upd_start_idx, upd_len, 0,
794 mlx5_ib_err(imr->dev, "Failed to update PAS\n");