Lines Matching refs:mtt

25 	struct erdma_mtt *mtt = mem->mtt;
28 *addr0 = mtt->buf_dma;
32 *addr0 = mtt->buf[0];
33 memcpy(addr1, mtt->buf + 1, MTT_SIZE(mem->mtt_nents - 1));
143 if (mr->mem.mtt->continuous) {
144 req.phy_addr[0] = mr->mem.mtt->buf_dma;
147 req.phy_addr[0] = sg_dma_address(mr->mem.mtt->sglist);
148 mtt_level = mr->mem.mtt->level;
151 memcpy(req.phy_addr, mr->mem.mtt->buf,
174 if (!mr->mem.mtt->continuous && mr->mem.mtt->level > 1) {
220 req.qbuf_addr_l = lower_32_bits(mem->mtt->buf[0]);
221 req.qbuf_addr_h = upper_32_bits(mem->mtt->buf[0]);
226 req.qbuf_addr_l = lower_32_bits(mem->mtt->buf_dma);
227 req.qbuf_addr_h = upper_32_bits(mem->mtt->buf_dma);
543 struct erdma_mtt *mtt = mem->mtt;
547 while (mtt->low_level)
548 mtt = mtt->low_level;
551 mtt->buf[idx++] = rdma_block_iter_dma_address(&biter);
557 struct erdma_mtt *mtt;
559 mtt = kzalloc(sizeof(*mtt), GFP_KERNEL);
560 if (!mtt)
563 mtt->size = size;
564 mtt->buf = kzalloc(mtt->size, GFP_KERNEL);
565 if (!mtt->buf)
568 mtt->continuous = true;
569 mtt->buf_dma = dma_map_single(&dev->pdev->dev, mtt->buf, mtt->size,
571 if (dma_mapping_error(&dev->pdev->dev, mtt->buf_dma))
574 return mtt;
577 kfree(mtt->buf);
580 kfree(mtt);
586 struct erdma_mtt *mtt)
588 dma_unmap_sg(&dev->pdev->dev, mtt->sglist, mtt->nsg, DMA_TO_DEVICE);
589 vfree(mtt->sglist);
593 struct erdma_mtt *mtt)
595 erdma_destroy_mtt_buf_sg(dev, mtt);
596 vfree(mtt->buf);
597 kfree(mtt);
600 static void erdma_init_middle_mtt(struct erdma_mtt *mtt,
607 mtt->buf[idx++] = sg_dma_address(sg);
610 static int erdma_create_mtt_buf_sg(struct erdma_dev *dev, struct erdma_mtt *mtt)
613 void *buf = mtt->buf;
621 npages = DIV_ROUND_UP(mtt->size, PAGE_SIZE);
639 mtt->sglist = sglist;
640 mtt->nsg = nsg;
652 struct erdma_mtt *mtt;
655 mtt = kzalloc(sizeof(*mtt), GFP_KERNEL);
656 if (!mtt)
659 mtt->size = ALIGN(size, PAGE_SIZE);
660 mtt->buf = vzalloc(mtt->size);
661 mtt->continuous = false;
662 if (!mtt->buf)
665 ret = erdma_create_mtt_buf_sg(dev, mtt);
669 ibdev_dbg(&dev->ibdev, "create scatter mtt, size:%lu, nsg:%u\n",
670 mtt->size, mtt->nsg);
672 return mtt;
675 vfree(mtt->buf);
678 kfree(mtt);
686 struct erdma_mtt *mtt, *tmp_mtt;
698 mtt = erdma_create_scatter_mtt(dev, size);
699 if (IS_ERR(mtt))
700 return mtt;
703 /* convergence the mtt table. */
704 while (mtt->nsg != 1 && level <= 3) {
705 tmp_mtt = erdma_create_scatter_mtt(dev, MTT_SIZE(mtt->nsg));
710 erdma_init_middle_mtt(tmp_mtt, mtt);
711 tmp_mtt->low_level = mtt;
712 mtt = tmp_mtt;
721 mtt->level = level;
722 ibdev_dbg(&dev->ibdev, "top mtt: level:%d, dma_addr 0x%llx\n",
723 mtt->level, mtt->sglist[0].dma_address);
725 return mtt;
727 while (mtt) {
728 tmp_mtt = mtt->low_level;
729 erdma_destroy_scatter_mtt(dev, mtt);
730 mtt = tmp_mtt;
736 static void erdma_destroy_mtt(struct erdma_dev *dev, struct erdma_mtt *mtt)
740 if (mtt->continuous) {
741 dma_unmap_single(&dev->pdev->dev, mtt->buf_dma, mtt->size,
743 kfree(mtt->buf);
744 kfree(mtt);
746 while (mtt) {
747 tmp_mtt = mtt->low_level;
748 erdma_destroy_scatter_mtt(dev, mtt);
749 mtt = tmp_mtt;
773 mem->mtt = erdma_create_mtt(dev, MTT_SIZE(mem->page_cnt),
775 if (IS_ERR(mem->mtt)) {
776 ret = PTR_ERR(mem->mtt);
795 if (mem->mtt)
796 erdma_destroy_mtt(dev, mem->mtt);
1092 mr->mem.mtt = erdma_create_mtt(dev, MTT_SIZE(max_num_sg), true);
1093 if (IS_ERR(mr->mem.mtt)) {
1094 ret = PTR_ERR(mr->mem.mtt);
1105 erdma_destroy_mtt(dev, mr->mem.mtt);
1124 mr->mem.mtt->buf[mr->mem.mtt_nents] = addr;