Lines Matching refs:mr
109 static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
110 static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
119 static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
121 WARN_ON(xa_load(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key)));
123 return mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
126 static inline bool mlx5_ib_pas_fits_in_mr(struct mlx5_ib_mr *mr, u64 start,
129 return ((u64)1 << mr->order) * MLX5_ADAPTER_PAGE_SIZE >=
135 struct mlx5_ib_mr *mr =
137 struct mlx5_ib_dev *dev = mr->dev;
138 struct mlx5_cache_ent *ent = mr->cache_ent;
142 mlx5_ib_warn(dev, "async reg mr failed. status %d\n", status);
143 kfree(mr);
152 mr->mmkey.type = MLX5_MKEY_MR;
153 mr->mmkey.key |= mlx5_idx_to_mkey(
154 MLX5_GET(create_mkey_out, mr->out, mkey_index));
159 list_add_tail(&mr->list, &ent->head);
170 struct mlx5_ib_mr *mr;
172 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
173 if (!mr)
175 mr->order = ent->order;
176 mr->cache_ent = ent;
177 mr->dev = ent->dev;
187 return mr;
194 struct mlx5_ib_mr *mr;
206 mr = alloc_cache_mr(ent, mkc);
207 if (!mr) {
215 kfree(mr);
220 err = mlx5_ib_create_mkey_cb(ent->dev, &mr->mmkey,
222 mr->out, sizeof(mr->out),
223 &mr->cb_work);
229 kfree(mr);
242 struct mlx5_ib_mr *mr;
252 mr = alloc_cache_mr(ent, mkc);
253 if (!mr) {
258 err = mlx5_core_create_mkey(ent->dev->mdev, &mr->mmkey, in, inlen);
262 mr->mmkey.type = MLX5_MKEY_MR;
268 return mr;
270 kfree(mr);
278 struct mlx5_ib_mr *mr;
283 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
284 list_del(&mr->list);
288 mlx5_core_destroy_mkey(ent->dev->mdev, &mr->mmkey);
289 kfree(mr);
567 struct mlx5_ib_mr *mr;
583 mr = create_cache_mr(ent);
584 if (IS_ERR(mr))
585 return mr;
587 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
588 list_del(&mr->list);
593 mr->access_flags = access_flags;
594 return mr;
601 struct mlx5_ib_mr *mr = NULL;
611 mr = list_first_entry(&ent->head, struct mlx5_ib_mr,
613 list_del(&mr->list);
623 if (!mr)
626 return mr;
629 static void detach_mr_from_cache(struct mlx5_ib_mr *mr)
631 struct mlx5_cache_ent *ent = mr->cache_ent;
633 mr->cache_ent = NULL;
639 void mlx5_mr_cache_free(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
641 struct mlx5_cache_ent *ent = mr->cache_ent;
646 if (mlx5_mr_cache_invalidate(mr)) {
647 detach_mr_from_cache(mr);
648 destroy_mkey(dev, mr);
649 kfree(mr);
654 list_add_tail(&mr->list, &ent->head);
665 struct mlx5_ib_mr *mr;
675 mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list);
676 list_move(&mr->list, &del_list);
680 mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey);
683 list_for_each_entry_safe(mr, tmp_mr, &del_list, list) {
684 list_del(&mr->list);
685 kfree(mr);
815 struct mlx5_ib_mr *mr;
820 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
821 if (!mr)
836 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
841 mr->mmkey.type = MLX5_MKEY_MR;
842 mr->ibmr.lkey = mr->mmkey.key;
843 mr->ibmr.rkey = mr->mmkey.key;
844 mr->umem = NULL;
846 return &mr->ibmr;
852 kfree(mr);
989 struct mlx5_ib_mr *mr;
998 mr = get_cache_mr(ent);
999 if (!mr) {
1000 mr = create_cache_mr(ent);
1001 if (IS_ERR(mr))
1002 return mr;
1005 mr->ibmr.pd = pd;
1006 mr->umem = umem;
1007 mr->access_flags = access_flags;
1008 mr->desc_size = sizeof(struct mlx5_mtt);
1009 mr->mmkey.iova = virt_addr;
1010 mr->mmkey.size = len;
1011 mr->mmkey.pd = to_mpd(pd)->pdn;
1013 return mr;
1020 int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
1023 struct mlx5_ib_dev *dev = mr->dev;
1086 if (mr->umem->is_odp) {
1088 struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem);
1106 wr.pd = mr->ibmr.pd;
1107 wr.mkey = mr->mmkey.key;
1108 wr.length = mr->mmkey.size;
1109 wr.virt_addr = mr->mmkey.iova;
1110 wr.access_flags = mr->access_flags;
1119 if (mr->umem->is_odp) {
1120 mlx5_odp_populate_xlt(xlt, idx, npages, mr, flags);
1122 __mlx5_ib_populate_pas(dev, mr->umem, page_shift, idx,
1176 struct mlx5_ib_mr *mr;
1184 mr = ibmr ? to_mmr(ibmr) : kzalloc(sizeof(*mr), GFP_KERNEL);
1185 if (!mr)
1188 mr->ibmr.pd = pd;
1189 mr->access_flags = access_flags;
1230 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
1235 mr->mmkey.type = MLX5_MKEY_MR;
1236 mr->desc_size = sizeof(struct mlx5_mtt);
1237 mr->dev = dev;
1240 mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
1242 return mr;
1249 kfree(mr);
1254 static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr,
1257 mr->ibmr.lkey = mr->mmkey.key;
1258 mr->ibmr.rkey = mr->mmkey.key;
1259 mr->ibmr.length = length;
1260 mr->access_flags = access_flags;
1268 struct mlx5_ib_mr *mr;
1273 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1274 if (!mr)
1290 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
1296 set_mr_fields(dev, mr, length, acc);
1298 return &mr->ibmr;
1304 kfree(mr);
1362 struct mlx5_ib_mr *mr = NULL;
1390 mr = mlx5_ib_alloc_implicit_mr(to_mpd(pd), udata, access_flags);
1391 if (IS_ERR(mr))
1392 return ERR_CAST(mr);
1393 return &mr->ibmr;
1403 mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
1405 if (IS_ERR(mr))
1406 mr = NULL;
1409 if (!mr) {
1411 mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
1416 if (IS_ERR(mr)) {
1417 err = PTR_ERR(mr);
1421 mlx5_ib_dbg(dev, "mkey 0x%x\n", mr->mmkey.key);
1423 mr->umem = umem;
1424 mr->npages = npages;
1425 atomic_add(mr->npages, &dev->mdev->priv.reg_pages);
1426 set_mr_fields(dev, mr, length, access_flags);
1436 err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift,
1439 dereg_mr(dev, mr);
1444 if (is_odp_mr(mr)) {
1445 to_ib_umem_odp(mr->umem)->private = mr;
1446 init_waitqueue_head(&mr->q_deferred_work);
1447 atomic_set(&mr->num_deferred_work, 0);
1449 mlx5_base_mkey(mr->mmkey.key), &mr->mmkey,
1452 dereg_mr(dev, mr);
1456 err = mlx5_ib_init_odp_mr(mr, xlt_with_umr);
1458 dereg_mr(dev, mr);
1463 return &mr->ibmr;
1471 * @mr: The MR to fence
1477 int mlx5_mr_cache_invalidate(struct mlx5_ib_mr *mr)
1481 if (mr->dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)
1487 umrwr.pd = mr->dev->umrc.pd;
1488 umrwr.mkey = mr->mmkey.key;
1491 return mlx5_ib_post_send_wait(mr->dev, &umrwr);
1494 static int rereg_umr(struct ib_pd *pd, struct mlx5_ib_mr *mr,
1504 umrwr.mkey = mr->mmkey.key;
1522 struct mlx5_ib_mr *mr = to_mmr(ib_mr);
1526 mr->access_flags;
1538 if (!mr->umem)
1541 if (is_odp_mr(mr))
1548 addr = mr->umem->address;
1549 len = mr->umem->length;
1558 atomic_sub(mr->npages, &dev->mdev->priv.reg_pages);
1559 mr->npages = 0;
1560 ib_umem_release(mr->umem);
1561 mr->umem = NULL;
1563 err = mr_umem_get(dev, addr, len, access_flags, &mr->umem,
1567 mr->npages = ncont;
1568 atomic_add(mr->npages, &dev->mdev->priv.reg_pages);
1571 if (!mlx5_ib_can_reconfig_with_umr(dev, mr->access_flags,
1575 !mlx5_ib_pas_fits_in_mr(mr, addr, len))) {
1579 if (mr->cache_ent)
1580 detach_mr_from_cache(mr);
1581 err = destroy_mkey(dev, mr);
1585 mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont,
1588 if (IS_ERR(mr)) {
1589 err = PTR_ERR(mr);
1590 mr = to_mmr(ib_mr);
1597 mr->ibmr.pd = pd;
1598 mr->access_flags = access_flags;
1599 mr->mmkey.iova = addr;
1600 mr->mmkey.size = len;
1601 mr->mmkey.pd = to_mpd(pd)->pdn;
1609 err = mlx5_ib_update_xlt(mr, 0, npages, page_shift,
1612 err = rereg_umr(pd, mr, access_flags, flags);
1619 set_mr_fields(dev, mr, len, access_flags);
1624 ib_umem_release(mr->umem);
1625 mr->umem = NULL;
1627 clean_mr(dev, mr);
1633 struct mlx5_ib_mr *mr,
1643 mr->descs_alloc = kzalloc(size + add_size, GFP_KERNEL);
1644 if (!mr->descs_alloc)
1647 mr->descs = PTR_ALIGN(mr->descs_alloc, MLX5_UMR_ALIGN);
1649 mr->desc_map = dma_map_single(device->dev.parent, mr->descs,
1651 if (dma_mapping_error(device->dev.parent, mr->desc_map)) {
1658 kfree(mr->descs_alloc);
1664 mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
1666 if (mr->descs) {
1667 struct ib_device *device = mr->ibmr.device;
1668 int size = mr->max_descs * mr->desc_size;
1670 dma_unmap_single(device->dev.parent, mr->desc_map,
1672 kfree(mr->descs_alloc);
1673 mr->descs = NULL;
1677 static void clean_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1679 if (mr->sig) {
1681 mr->sig->psv_memory.psv_idx))
1683 mr->sig->psv_memory.psv_idx);
1685 mr->sig->psv_wire.psv_idx))
1687 mr->sig->psv_wire.psv_idx);
1688 xa_erase(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key));
1689 kfree(mr->sig);
1690 mr->sig = NULL;
1693 if (!mr->cache_ent) {
1694 destroy_mkey(dev, mr);
1695 mlx5_free_priv_descs(mr);
1699 static void dereg_mr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
1701 int npages = mr->npages;
1702 struct ib_umem *umem = mr->umem;
1705 if (is_odp_mr(mr))
1706 mlx5_ib_fence_odp_mr(mr);
1708 clean_mr(dev, mr);
1710 if (mr->cache_ent)
1711 mlx5_mr_cache_free(dev, mr);
1713 kfree(mr);
1756 static int _mlx5_alloc_mkey_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
1763 mr->access_mode = access_mode;
1764 mr->desc_size = desc_size;
1765 mr->max_descs = ndescs;
1767 err = mlx5_alloc_priv_descs(pd->device, mr, ndescs, desc_size);
1773 err = mlx5_ib_create_mkey(dev, &mr->mmkey, in, inlen);
1777 mr->mmkey.type = MLX5_MKEY_MR;
1778 mr->ibmr.lkey = mr->mmkey.key;
1779 mr->ibmr.rkey = mr->mmkey.key;
1784 mlx5_free_priv_descs(mr);
1795 struct mlx5_ib_mr *mr;
1799 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1800 if (!mr)
1803 mr->ibmr.pd = pd;
1804 mr->ibmr.device = pd->device;
1815 err = _mlx5_alloc_mkey_descs(pd, mr, ndescs, desc_size, page_shift,
1820 mr->umem = NULL;
1823 return mr;
1828 kfree(mr);
1832 static int mlx5_alloc_mem_reg_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
1835 return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_mtt),
1840 static int mlx5_alloc_sg_gaps_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
1843 return _mlx5_alloc_mkey_descs(pd, mr, ndescs, sizeof(struct mlx5_klm),
1847 static int mlx5_alloc_integrity_descs(struct ib_pd *pd, struct mlx5_ib_mr *mr,
1856 mr->sig = kzalloc(sizeof(*mr->sig), GFP_KERNEL);
1857 if (!mr->sig)
1865 mr->sig->psv_memory.psv_idx = psv_index[0];
1866 mr->sig->psv_wire.psv_idx = psv_index[1];
1868 mr->sig->sig_status_checked = true;
1869 mr->sig->sig_err_exists = false;
1871 ++mr->sig->sigerr_count;
1872 mr->klm_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg,
1875 if (IS_ERR(mr->klm_mr)) {
1876 err = PTR_ERR(mr->klm_mr);
1879 mr->mtt_mr = mlx5_ib_alloc_pi_mr(pd, max_num_sg, max_num_meta_sg,
1882 if (IS_ERR(mr->mtt_mr)) {
1883 err = PTR_ERR(mr->mtt_mr);
1892 err = _mlx5_alloc_mkey_descs(pd, mr, 4, sizeof(struct mlx5_klm), 0,
1897 err = xa_err(xa_store(&dev->sig_mrs, mlx5_base_mkey(mr->mmkey.key),
1898 mr->sig, GFP_KERNEL));
1904 destroy_mkey(dev, mr);
1905 mlx5_free_priv_descs(mr);
1907 dereg_mr(to_mdev(mr->mtt_mr->ibmr.device), mr->mtt_mr);
1908 mr->mtt_mr = NULL;
1910 dereg_mr(to_mdev(mr->klm_mr->ibmr.device), mr->klm_mr);
1911 mr->klm_mr = NULL;
1913 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_memory.psv_idx))
1915 mr->sig->psv_memory.psv_idx);
1916 if (mlx5_core_destroy_psv(dev->mdev, mr->sig->psv_wire.psv_idx))
1918 mr->sig->psv_wire.psv_idx);
1920 kfree(mr->sig);
1932 struct mlx5_ib_mr *mr;
1936 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1937 if (!mr)
1946 mr->ibmr.device = pd->device;
1947 mr->umem = NULL;
1951 err = mlx5_alloc_mem_reg_descs(pd, mr, ndescs, in, inlen);
1954 err = mlx5_alloc_sg_gaps_descs(pd, mr, ndescs, in, inlen);
1957 err = mlx5_alloc_integrity_descs(pd, mr, max_num_sg,
1961 mlx5_ib_warn(dev, "Invalid mr type %d\n", mr_type);
1970 return &mr->ibmr;
1975 kfree(mr);
2136 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2140 mr->meta_length = 0;
2143 mr->ndescs = 1;
2146 mr->data_length = sg_dma_len(data_sg) - sg_offset;
2147 mr->data_iova = sg_dma_address(data_sg) + sg_offset;
2150 mr->meta_ndescs = 1;
2155 mr->meta_length = sg_dma_len(meta_sg) - sg_offset;
2156 mr->pi_iova = sg_dma_address(meta_sg) + sg_offset;
2158 ibmr->length = mr->data_length + mr->meta_length;
2165 mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr,
2174 struct mlx5_klm *klms = mr->descs;
2176 u32 lkey = mr->ibmr.pd->local_dma_lkey;
2179 mr->ibmr.iova = sg_dma_address(sg) + sg_offset;
2180 mr->ibmr.length = 0;
2183 if (unlikely(i >= mr->max_descs))
2188 mr->ibmr.length += sg_dma_len(sg) - sg_offset;
2196 mr->ndescs = i;
2197 mr->data_length = mr->ibmr.length;
2203 if (unlikely(i + j >= mr->max_descs))
2210 mr->ibmr.length += sg_dma_len(sg) - sg_offset;
2217 mr->meta_ndescs = j;
2218 mr->meta_length = mr->ibmr.length - mr->data_length;
2226 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2229 if (unlikely(mr->ndescs == mr->max_descs))
2232 descs = mr->descs;
2233 descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR);
2240 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2243 if (unlikely(mr->ndescs + mr->meta_ndescs == mr->max_descs))
2246 descs = mr->descs;
2247 descs[mr->ndescs + mr->meta_ndescs++] =
2259 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2260 struct mlx5_ib_mr *pi_mr = mr->mtt_mr;
2324 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2325 struct mlx5_ib_mr *pi_mr = mr->klm_mr;
2357 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2363 mr->ndescs = 0;
2364 mr->data_length = 0;
2365 mr->data_iova = 0;
2366 mr->meta_ndescs = 0;
2367 mr->pi_iova = 0;
2387 pi_mr = mr->mtt_mr;
2394 pi_mr = mr->klm_mr;
2404 mr->pi_mr = pi_mr;
2408 ibmr->sig_attrs->meta_length = mr->meta_length;
2416 struct mlx5_ib_mr *mr = to_mmr(ibmr);
2419 mr->ndescs = 0;
2421 ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map,
2422 mr->desc_size * mr->max_descs,
2425 if (mr->access_mode == MLX5_MKC_ACCESS_MODE_KLMS)
2426 n = mlx5_ib_sg_to_klms(mr, sg, sg_nents, sg_offset, NULL, 0,
2432 ib_dma_sync_single_for_device(ibmr->device, mr->desc_map,
2433 mr->desc_size * mr->max_descs,