Lines Matching defs:mhp

377 static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
381 mhp->attr.state = 1;
382 mhp->attr.stag = stag;
384 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
385 mhp->ibmr.length = mhp->attr.len;
386 mhp->ibmr.page_size = 1U << (mhp->attr.page_size + 12);
387 pr_debug("mmid 0x%x mhp %p\n", mmid, mhp);
388 return xa_insert_irq(&mhp->rhp->mrs, mmid, mhp, GFP_KERNEL);
392 struct c4iw_mr *mhp, int shift)
397 ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid,
398 FW_RI_STAG_NSMR, mhp->attr.len ?
399 mhp->attr.perms : 0,
400 mhp->attr.mw_bind_enable, mhp->attr.zbva,
401 mhp->attr.va_fbo, mhp->attr.len ?
402 mhp->attr.len : -1, shift - 12,
403 mhp->attr.pbl_size, mhp->attr.pbl_addr, NULL,
404 mhp->wr_waitp);
408 ret = finish_mem_reg(mhp, stag);
410 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
411 mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
412 mhp->dereg_skb = NULL;
417 static int alloc_pbl(struct c4iw_mr *mhp, int npages)
419 mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev,
422 if (!mhp->attr.pbl_addr)
425 mhp->attr.pbl_size = npages;
434 struct c4iw_mr *mhp;
442 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
443 if (!mhp)
445 mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
446 if (!mhp->wr_waitp) {
450 c4iw_init_wr_wait(mhp->wr_waitp);
452 mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
453 if (!mhp->dereg_skb) {
458 mhp->rhp = rhp;
459 mhp->attr.pdid = php->pdid;
460 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
461 mhp->attr.mw_bind_enable = (acc&IB_ACCESS_MW_BIND) == IB_ACCESS_MW_BIND;
462 mhp->attr.zbva = 0;
463 mhp->attr.va_fbo = 0;
464 mhp->attr.page_size = 0;
465 mhp->attr.len = ~0ULL;
466 mhp->attr.pbl_size = 0;
469 FW_RI_STAG_NSMR, mhp->attr.perms,
470 mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0,
471 NULL, mhp->wr_waitp);
475 ret = finish_mem_reg(mhp, stag);
478 return &mhp->ibmr;
480 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
481 mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
483 kfree_skb(mhp->dereg_skb);
485 c4iw_put_wr_wait(mhp->wr_waitp);
487 kfree(mhp);
500 struct c4iw_mr *mhp;
516 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
517 if (!mhp)
519 mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
520 if (!mhp->wr_waitp)
523 mhp->dereg_skb = alloc_skb(SGE_MAX_WR_LEN, GFP_KERNEL);
524 if (!mhp->dereg_skb)
527 mhp->rhp = rhp;
529 mhp->umem = ib_umem_get(pd->device, start, length, acc);
530 if (IS_ERR(mhp->umem))
535 n = ib_umem_num_dma_blocks(mhp->umem, 1 << shift);
536 err = alloc_pbl(mhp, n);
548 rdma_umem_for_each_dma_block(mhp->umem, &biter, 1 << shift) {
551 err = write_pbl(&mhp->rhp->rdev, pages,
552 mhp->attr.pbl_addr + (n << 3), i,
553 mhp->wr_waitp);
562 err = write_pbl(&mhp->rhp->rdev, pages,
563 mhp->attr.pbl_addr + (n << 3), i,
564 mhp->wr_waitp);
571 mhp->attr.pdid = php->pdid;
572 mhp->attr.zbva = 0;
573 mhp->attr.perms = c4iw_ib_to_tpt_access(acc);
574 mhp->attr.va_fbo = virt;
575 mhp->attr.page_size = shift - 12;
576 mhp->attr.len = length;
578 err = register_mem(rhp, php, mhp, shift);
582 return &mhp->ibmr;
585 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
586 mhp->attr.pbl_size << 3);
588 ib_umem_release(mhp->umem);
590 kfree_skb(mhp->dereg_skb);
592 c4iw_put_wr_wait(mhp->wr_waitp);
594 kfree(mhp);
603 struct c4iw_mr *mhp;
617 mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
618 if (!mhp) {
623 mhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
624 if (!mhp->wr_waitp) {
628 c4iw_init_wr_wait(mhp->wr_waitp);
630 mhp->mpl = dma_alloc_coherent(&rhp->rdev.lldi.pdev->dev,
631 length, &mhp->mpl_addr, GFP_KERNEL);
632 if (!mhp->mpl) {
636 mhp->max_mpl_len = length;
638 mhp->rhp = rhp;
639 ret = alloc_pbl(mhp, max_num_sg);
642 mhp->attr.pbl_size = max_num_sg;
644 mhp->attr.pbl_size, mhp->attr.pbl_addr,
645 mhp->wr_waitp);
648 mhp->attr.pdid = php->pdid;
649 mhp->attr.type = FW_RI_STAG_NSMR;
650 mhp->attr.stag = stag;
651 mhp->attr.state = 0;
653 mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
654 if (xa_insert_irq(&rhp->mrs, mmid, mhp, GFP_KERNEL)) {
659 pr_debug("mmid 0x%x mhp %p stag 0x%x\n", mmid, mhp, stag);
660 return &(mhp->ibmr);
662 dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
663 mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
665 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
666 mhp->attr.pbl_size << 3);
668 dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev,
669 mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr);
671 c4iw_put_wr_wait(mhp->wr_waitp);
673 kfree(mhp);
680 struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
682 if (unlikely(mhp->mpl_len == mhp->attr.pbl_size))
685 mhp->mpl[mhp->mpl_len++] = addr;
693 struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
695 mhp->mpl_len = 0;
703 struct c4iw_mr *mhp;
708 mhp = to_c4iw_mr(ib_mr);
709 rhp = mhp->rhp;
710 mmid = mhp->attr.stag >> 8;
712 if (mhp->mpl)
713 dma_free_coherent(&mhp->rhp->rdev.lldi.pdev->dev,
714 mhp->max_mpl_len, mhp->mpl, mhp->mpl_addr);
715 dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
716 mhp->attr.pbl_addr, mhp->dereg_skb, mhp->wr_waitp);
717 if (mhp->attr.pbl_size)
718 c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
719 mhp->attr.pbl_size << 3);
720 if (mhp->kva)
721 kfree((void *) (unsigned long) mhp->kva);
722 ib_umem_release(mhp->umem);
723 pr_debug("mmid 0x%x ptr %p\n", mmid, mhp);
724 c4iw_put_wr_wait(mhp->wr_waitp);
725 kfree(mhp);
731 struct c4iw_mr *mhp;
735 mhp = xa_load(&rhp->mrs, rkey >> 8);
736 if (mhp)
737 mhp->attr.state = 0;