Lines Matching refs:mr

126 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
128 WARN_ON(xa_load(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key)));
130 return mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key);
138 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
749 struct mlx5_ib_mr *mr;
752 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
753 if (!mr)
763 err = create_cache_mkey(ent, &mr->mmkey.key);
768 kfree(mr);
772 mr->mmkey.key = pop_stored_mkey(ent);
776 mr->mmkey.cache_ent = ent;
777 mr->mmkey.type = MLX5_MKEY_MR;
778 init_waitqueue_head(&mr->mmkey.wait);
779 return mr;
1068 struct mlx5_ib_mr *mr;
1073 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1074 if (!mr)
1090 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
1095 mr->mmkey.type = MLX5_MKEY_MR;
1096 mr->ibmr.lkey = mr->mmkey.key;
1097 mr->ibmr.rkey = mr->mmkey.key;
1098 mr->umem = NULL;
1100 return &mr->ibmr;
1106 kfree(mr);
1129 static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
1132 mr->ibmr.lkey = mr->mmkey.key;
1133 mr->ibmr.rkey = mr->mmkey.key;
1134 mr->ibmr.length = length;
1135 mr->ibmr.device = &dev->ib_dev;
1136 mr->ibmr.iova = iova;
1137 mr->access_flags = access_flags;
1160 struct mlx5_ib_mr *mr;
1181 mr = reg_create(pd, umem, iova, access_flags, page_size, false);
1183 if (IS_ERR(mr))
1184 return mr;
1185 mr->mmkey.rb_key = rb_key;
1186 return mr;
1189 mr = _mlx5_mr_cache_alloc(dev, ent, access_flags);
1190 if (IS_ERR(mr))
1191 return mr;
1193 mr->ibmr.pd = pd;
1194 mr->umem = umem;
1195 mr->page_shift = order_base_2(page_size);
1196 set_mr_fields(dev, mr, umem->length, access_flags, iova);
1198 return mr;
1210 struct mlx5_ib_mr *mr;
1220 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1221 if (!mr)
1224 mr->ibmr.pd = pd;
1225 mr->access_flags = access_flags;
1226 mr->page_shift = order_base_2(page_size);
1243 mlx5_ib_populate_pas(umem, 1UL << mr->page_shift, pas,
1262 get_octo_len(iova, umem->length, mr->page_shift));
1263 MLX5_SET(mkc, mkc, log_page_size, mr->page_shift);
1268 get_octo_len(iova, umem->length, mr->page_shift));
1271 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
1276 mr->mmkey.type = MLX5_MKEY_MR;
1277 mr->mmkey.ndescs = get_octo_len(iova, umem->length, mr->page_shift);
1278 mr->umem = umem;
1279 set_mr_fields(dev, mr, umem->length, access_flags, iova);
1282 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
1284 return mr;
1289 kfree(mr);
1298 struct mlx5_ib_mr *mr;
1303 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1304 if (!mr)
1320 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
1326 set_mr_fields(dev, mr, length, acc, start_addr);
1328 return &mr->ibmr;
1334 kfree(mr);
1392 struct mlx5_ib_mr *mr = NULL;
1398 mr = alloc_cacheable_mr(pd, umem, iova, access_flags);
1404 mr = reg_create(pd, umem, iova, access_flags, page_size, true);
1407 if (IS_ERR(mr)) {
1409 return ERR_CAST(mr);
1412 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
1422 err = mlx5r_umr_update_mr_pas(mr, MLX5_IB_UPD_XLT_ENABLE);
1424 mlx5_ib_dereg_mr(&mr->ibmr, NULL);
1428 return &mr->ibmr;
1437 struct mlx5_ib_mr *mr;
1452 mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), access_flags);
1453 if (IS_ERR(mr))
1454 return ERR_CAST(mr);
1455 return &mr->ibmr;
1467 mr = alloc_cacheable_mr(pd, &odp->umem, iova, access_flags);
1468 if (IS_ERR(mr)) {
1470 return ERR_CAST(mr);
1472 xa_init(&mr->implicit_children);
1474 odp->private = mr;
1475 err = mlx5r_store_odp_mkey(dev, &mr->mmkey);
1479 err = mlx5_ib_init_odp_mr(mr);
1482 return &mr->ibmr;
1485 mlx5_ib_dereg_mr(&mr->ibmr, NULL);
1514 struct mlx5_ib_mr *mr = umem_dmabuf->private;
1521 mlx5r_umr_update_mr_pas(mr, MLX5_IB_UPD_XLT_ZAP);
1536 struct mlx5_ib_mr *mr = NULL;
1561 mr = alloc_cacheable_mr(pd, &umem_dmabuf->umem, virt_addr,
1563 if (IS_ERR(mr)) {
1565 return ERR_CAST(mr);
1568 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
1570 atomic_add(ib_umem_num_pages(mr->umem), &dev->mdev->priv.reg_pages);
1571 umem_dmabuf->private = mr;
1572 err = mlx5r_store_odp_mkey(dev, &mr->mmkey);
1576 err = mlx5_ib_init_dmabuf_mr(mr);
1579 return &mr->ibmr;
1582 mlx5_ib_dereg_mr(&mr->ibmr, NULL);
1603 static bool can_use_umr_rereg_pas(struct mlx5_ib_mr *mr,
1608 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
1611 if (!mr->mmkey.cache_ent)
1620 return (mr->mmkey.cache_ent->rb_key.ndescs) >=
1624 static int umr_rereg_pas(struct mlx5_ib_mr *mr, struct ib_pd *pd,
1628 struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
1630 struct ib_umem *old_umem = mr->umem;
1638 err = mlx5r_umr_revoke_mr(mr);
1643 mr->ibmr.pd = pd;
1647 mr->access_flags = access_flags;
1651 mr->ibmr.iova = iova;
1652 mr->ibmr.length = new_umem->length;
1653 mr->page_shift = order_base_2(page_size);
1654 mr->umem = new_umem;
1655 err = mlx5r_umr_update_mr_pas(mr, upd_flags);
1661 mr->umem = old_umem;
1677 struct mlx5_ib_mr *mr = to_mmr(ib_mr);
1692 new_access_flags = mr->access_flags;
1700 if (can_use_umr_rereg_access(dev, mr->access_flags,
1702 err = mlx5r_umr_rereg_pd_access(mr, new_pd,
1709 if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr))
1716 err = mlx5r_umr_revoke_mr(mr);
1719 umem = mr->umem;
1720 mr->umem = NULL;
1723 return create_real_mr(new_pd, umem, mr->ibmr.iova,
1731 if (!mr->umem || is_odp_mr(mr) || is_dmabuf_mr(mr))
1735 can_use_umr_rereg_access(dev, mr->access_flags, new_access_flags)) {
1745 if (can_use_umr_rereg_pas(mr, new_umem, new_access_flags, iova,
1747 err = umr_rereg_pas(mr, new_pd, new_access_flags, flags,
1769 struct mlx5_ib_mr *mr,
1786 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL);
1787 if (!mr->descs_alloc)
1790 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN);
1792 mr->desc_map = dma_map_single(ddev, mr->descs, size, DMA_TO_DEVICE);
1793 if (dma_mapping_error(ddev, mr->desc_map)) {
1800 kfree(mr->descs_alloc);
1806 mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
1808 if (!mr->umem && mr->descs) {
1809 struct ib_device *device = mr->ibmr.device;
1810 int size = mr->max_descs * mr->desc_size;
1813 dma_unmap_single(&dev->mdev->pdev->dev, mr->desc_map, size,
1815 kfree(mr->descs_alloc);
1816 mr->descs = NULL;
1821 struct mlx5_ib_mr *mr)
1827 if (mr->mmkey.cache_ent) {
1828 xa_lock_irq(&mr->mmkey.cache_ent->mkeys);
1829 mr->mmkey.cache_ent->in_use--;
1834 ent = mkey_cache_ent_from_rb_key(dev, mr->mmkey.rb_key);
1836 if (ent->rb_key.ndescs == mr->mmkey.rb_key.ndescs) {
1841 mr->mmkey.cache_ent = ent;
1842 xa_lock_irq(&mr->mmkey.cache_ent->mkeys);
1848 ent = mlx5r_cache_create_ent_locked(dev, mr->mmkey.rb_key, false);
1853 mr->mmkey.cache_ent = ent;
1854 xa_lock_irq(&mr->mmkey.cache_ent->mkeys);
1857 ret = push_mkey_locked(mr->mmkey.cache_ent, false,
1858 xa_mk_value(mr->mmkey.key));
1859 xa_unlock_irq(&mr->mmkey.cache_ent->mkeys);
1865 struct mlx5_ib_mr *mr = to_mmr(ibmr);
1870 * Any async use of the mr must hold the refcount, once the refcount
1875 refcount_read(&mr->mmkey.usecount) != 0 &&
1876 xa_erase(&mr_to_mdev(mr)->odp_mkeys, mlx5_base_mkey(mr->mmkey.key)))
1877 mlx5r_deref_wait_odp_mkey(&mr->mmkey);
1880 xa_cmpxchg(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key),
1881 mr->sig, NULL, GFP_KERNEL);
1883 if (mr->mtt_mr) {
1884 rc = mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL);
1887 mr->mtt_mr = NULL;
1889 if (mr->klm_mr) {
1890 rc = mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL);
1893 mr->klm_mr = NULL;
1897 mr->sig->psv_memory.psv_idx))
1899 mr->sig->psv_memory.psv_idx);
1900 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx))
1902 mr->sig->psv_wire.psv_idx);
1903 kfree(mr->sig);
1904 mr->sig = NULL;
1908 if (mr->umem && mlx5r_umr_can_load_pas(dev, mr->umem->length))
1909 if (mlx5r_umr_revoke_mr(mr) ||
1910 cache_ent_find_and_store(dev, mr))
1911 mr->mmkey.cache_ent = NULL;
1913 if (!mr->mmkey.cache_ent) {
1914 rc = destroy_mkey(to_mdev(mr->ibmr.device), mr);
1919 if (mr->umem) {
1920 bool is_odp = is_odp_mr(mr);
1923 atomic_sub(ib_umem_num_pages(mr->umem),
1925 ib_umem_release(mr->umem);
1927 mlx5_ib_free_odp_mr(mr);
1930 if (!mr->mmkey.cache_ent)
1931 mlx5_free_priv_descs(mr);
1933 kfree(mr);
1954 static int _mlx5_alloc_mkey_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
1961 mr->access_mode = access_mode;
1962 mr->desc_size = desc_size;
1963 mr->max_descs = ndescs;
1965 err = mlx5_alloc_priv_descs(pd->device, mr, ndescs, desc_size);
1971 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
1975 mr->mmkey.type = MLX5_MKEY_MR;
1976 mr->ibmr.lkey = mr->mmkey.key;
1977 mr->ibmr.rkey = mr->mmkey.key;
1982 mlx5_free_priv_descs(mr);
1993 struct mlx5_ib_mr *mr;
1997 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1998 if (!mr)
2001 mr->ibmr.pd = pd;
2002 mr->ibmr.device = pd->device;
2013 err = _mlx5_alloc_mkey_descs(pd, mr, ndescs, desc_size, page_shift,
2018 mr->umem = NULL;
2021 return mr;
2026 kfree(mr);
2030 static int mlx5_alloc_mem_reg_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
2033 return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_mtt),
2038 static int mlx5_alloc_sg_gaps_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
2041 return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_klm),
2045 static int mlx5_alloc_integrity_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
2054 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
2055 if (!mr->sig)
2063 mr->sig->psv_memory.psv_idx = psv_index[0];
2064 mr->sig->psv_wire.psv_idx = psv_index[1];
2066 mr->sig->sig_status_checked = true;
2067 mr->sig->sig_err_exists = false;
2069 ++mr->sig->sigerr_count;
2070 mr->klm_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg,
2073 if (IS_ERR(mr->klm_mr)) {
2074 err = PTR_ERR(mr->klm_mr);
2077 mr->mtt_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg,
2080 if (IS_ERR(mr->mtt_mr)) {
2081 err = PTR_ERR(mr->mtt_mr);
2090 err = _mlx5_alloc_mkey_descs(pd, mr, 4, sizeof(struct mlx5_klm), 0,
2095 err = xa_err(xa_store(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key),
2096 mr->sig, GFP_KERNEL));
2102 destroy_mkey(dev, mr);
2103 mlx5_free_priv_descs(mr);
2105 mlx5_ib_dereg_mr(&mr->mtt_mr->ibmr, NULL);
2106 mr->mtt_mr = NULL;
2108 mlx5_ib_dereg_mr(&mr->klm_mr->ibmr, NULL);
2109 mr->klm_mr = NULL;
2111 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_memory.psv_idx))
2113 mr->sig->psv_memory.psv_idx);
2114 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx))
2116 mr->sig->psv_wire.psv_idx);
2118 kfree(mr->sig);
2130 struct mlx5_ib_mr *mr;
2134 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2135 if (!mr)
2144 mr->ibmr.device = pd->device;
2145 mr->umem = NULL;
2149 err = mlx5_alloc_mem_reg_descs(pd, mr, ndescs, in, inlen);
2152 err = mlx5_alloc_sg_gaps_descs(pd, mr, ndescs, in, inlen);
2155 err = mlx5_alloc_integrity_descs(pd, mr, max_num_sg,
2159 mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type);
2168 return &mr->ibmr;
2173 kfree(mr);
2329 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2333 mr->meta_length = 0;
2336 mr->mmkey.ndescs = 1;
2339 mr->data_length = sg_dma_len(data_sg) - sg_offset;
2340 mr->data_iova = sg_dma_address(data_sg) + sg_offset;
2343 mr->meta_ndescs = 1;
2348 mr->meta_length = sg_dma_len(meta_sg) - sg_offset;
2349 mr->pi_iova = sg_dma_address(meta_sg) + sg_offset;
2351 ibmr->length = mr->data_length + mr->meta_length;
2358 mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
2367 struct mlx5_klm *klms = mr->descs;
2369 u32 lkey = mr->ibmr.pd->local_dma_lkey;
2372 mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
2373 mr->ibmr.length = 0;
2376 if (unlikely(i >= mr->max_descs))
2381 mr->ibmr.length += sg_dma_len(sg) - sg_offset;
2389 mr->mmkey.ndescs = i;
2390 mr->data_length = mr->ibmr.length;
2396 if (unlikely(i + j >= mr->max_descs))
2403 mr->ibmr.length += sg_dma_len(sg) - sg_offset;
2410 mr->meta_ndescs = j;
2411 mr->meta_length = mr->ibmr.length - mr->data_length;
2419 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2422 if (unlikely(mr->mmkey.ndescs == mr->max_descs))
2425 descs = mr->descs;
2426 descs[mr->mmkey.ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
2433 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2436 if (unlikely(mr->mmkey.ndescs + mr->meta_ndescs == mr->max_descs))
2439 descs = mr->descs;
2440 descs[mr->mmkey.ndescs + mr->meta_ndescs++] =
2452 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2453 struct mlx5_ib_mr *pi_mr = mr->mtt_mr;
2517 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2518 struct mlx5_ib_mr *pi_mr = mr->klm_mr;
2550 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2556 mr->mmkey.ndescs = 0;
2557 mr->data_length = 0;
2558 mr->data_iova = 0;
2559 mr->meta_ndescs = 0;
2560 mr->pi_iova = 0;
2580 pi_mr = mr->mtt_mr;
2587 pi_mr = mr->klm_mr;
2597 mr->pi_mr = pi_mr;
2601 ibmr->sig_attrs->meta_length = mr->meta_length;
2609 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2612 mr->mmkey.ndescs = 0;
2614 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map,
2615 mr->desc_size * mr->max_descs,
2618 if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
2619 n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset, NULL, 0,
2625 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
2626 mr->desc_size * mr->max_descs,