Lines Matching refs:mr

60 	struct mlx4_ib_mr *mr;
63 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
64 if (!mr)
68 ~0ull, convert_access(acc), 0, 0, &mr->mmr);
72 err = mlx4_mr_enable(to_mdev(pd->device)->dev, &mr->mmr);
76 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
77 mr->umem = NULL;
79 return &mr->ibmr;
82 (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
85 kfree(mr);
411 struct mlx4_ib_mr *mr;
416 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
417 if (!mr)
420 mr->umem = mlx4_get_umem_mr(pd->device, start, length, access_flags);
421 if (IS_ERR(mr->umem)) {
422 err = PTR_ERR(mr->umem);
426 shift = mlx4_ib_umem_calc_optimal_mtt_size(mr->umem, start, &n);
429 convert_access(access_flags), n, shift, &mr->mmr);
433 err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem);
437 err = mlx4_mr_enable(dev->dev, &mr->mmr);
441 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
442 mr->ibmr.page_size = 1U << shift;
444 return &mr->ibmr;
447 (void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);
450 ib_umem_release(mr->umem);
453 kfree(mr);
458 struct ib_mr *mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags, u64 start,
463 struct mlx4_ib_dev *dev = to_mdev(mr->device);
464 struct mlx4_ib_mr *mmr = to_mmr(mr);
505 mmr->umem = mlx4_get_umem_mr(mr->device, start, length,
550 struct mlx4_ib_mr *mr,
560 mr->page_map_size = roundup(max_pages * sizeof(u64),
564 mr->pages = (__be64 *)get_zeroed_page(GFP_KERNEL);
565 if (!mr->pages)
568 mr->page_map = dma_map_single(device->dev.parent, mr->pages,
569 mr->page_map_size, DMA_TO_DEVICE);
571 if (dma_mapping_error(device->dev.parent, mr->page_map)) {
579 free_page((unsigned long)mr->pages);
584 mlx4_free_priv_pages(struct mlx4_ib_mr *mr)
586 if (mr->pages) {
587 struct ib_device *device = mr->ibmr.device;
589 dma_unmap_single(device->dev.parent, mr->page_map,
590 mr->page_map_size, DMA_TO_DEVICE);
591 free_page((unsigned long)mr->pages);
592 mr->pages = NULL;
598 struct mlx4_ib_mr *mr = to_mmr(ibmr);
601 mlx4_free_priv_pages(mr);
603 ret = mlx4_mr_free(to_mdev(ibmr->device)->dev, &mr->mmr);
606 if (mr->umem)
607 ib_umem_release(mr->umem);
608 kfree(mr);
648 struct mlx4_ib_mr *mr;
655 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
656 if (!mr)
660 max_num_sg, 0, &mr->mmr);
664 err = mlx4_alloc_priv_pages(pd->device, mr, max_num_sg);
668 mr->max_pages = max_num_sg;
669 err = mlx4_mr_enable(dev->dev, &mr->mmr);
673 mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
674 mr->umem = NULL;
676 return &mr->ibmr;
679 mr->ibmr.device = pd->device;
680 mlx4_free_priv_pages(mr);
682 (void) mlx4_mr_free(dev->dev, &mr->mmr);
684 kfree(mr);
690 struct mlx4_ib_mr *mr = to_mmr(ibmr);
692 if (unlikely(mr->npages == mr->max_pages))
695 mr->pages[mr->npages++] = cpu_to_be64(addr | MLX4_MTT_FLAG_PRESENT);
703 struct mlx4_ib_mr *mr = to_mmr(ibmr);
706 mr->npages = 0;
708 ib_dma_sync_single_for_cpu(ibmr->device, mr->page_map,
709 mr->page_map_size, DMA_TO_DEVICE);
713 ib_dma_sync_single_for_device(ibmr->device, mr->page_map,
714 mr->page_map_size, DMA_TO_DEVICE);