Lines Matching defs:tun_qp
519 struct mlx4_ib_demux_pv_qp *tun_qp;
543 tun_qp = &tun_ctx->qp[0];
545 tun_qp = &tun_ctx->qp[1];
572 src_qp = tun_qp->qp;
593 spin_lock(&tun_qp->tx_lock);
594 if (tun_qp->tx_ix_head - tun_qp->tx_ix_tail >=
598 tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
599 spin_unlock(&tun_qp->tx_lock);
603 tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr);
604 if (tun_qp->tx_ring[tun_tx_ix].ah)
605 rdma_destroy_ah(tun_qp->tx_ring[tun_tx_ix].ah, 0);
606 tun_qp->tx_ring[tun_tx_ix].ah = ah;
608 tun_qp->tx_ring[tun_tx_ix].buf.map,
650 tun_qp->tx_ring[tun_tx_ix].buf.map,
654 list.addr = tun_qp->tx_ring[tun_tx_ix].buf.map;
673 spin_lock(&tun_qp->tx_lock);
674 tun_qp->tx_ix_tail++;
675 spin_unlock(&tun_qp->tx_lock);
676 tun_qp->tx_ring[tun_tx_ix].ah = NULL;
1315 struct mlx4_ib_demux_pv_qp *tun_qp,
1323 size = (tun_qp->qp->qp_type == IB_QPT_UD) ?
1326 sg_list.addr = tun_qp->ring[index].map;
1334 MLX4_TUN_SET_WRID_QPN(tun_qp->proxy_qpt);
1335 ib_dma_sync_single_for_device(ctx->ib_dev, tun_qp->ring[index].map,
1337 return ib_post_recv(tun_qp->qp, &recv_wr, &bad_recv_wr);
1480 struct mlx4_ib_demux_pv_qp *tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc->wr_id)];
1482 struct mlx4_tunnel_mad *tunnel = tun_qp->ring[wr_ix].addr;
1509 ib_dma_sync_single_for_cpu(ctx->ib_dev, tun_qp->ring[wr_ix].map,
1606 struct mlx4_ib_demux_pv_qp *tun_qp;
1613 tun_qp = &ctx->qp[qp_type];
1615 tun_qp->ring = kcalloc(nmbr_bufs,
1618 if (!tun_qp->ring)
1621 tun_qp->tx_ring = kcalloc(nmbr_bufs,
1624 if (!tun_qp->tx_ring) {
1625 kfree(tun_qp->ring);
1626 tun_qp->ring = NULL;
1639 tun_qp->ring[i].addr = kmalloc(rx_buf_size, GFP_KERNEL);
1640 if (!tun_qp->ring[i].addr)
1642 tun_qp->ring[i].map = ib_dma_map_single(ctx->ib_dev,
1643 tun_qp->ring[i].addr,
1646 if (ib_dma_mapping_error(ctx->ib_dev, tun_qp->ring[i].map)) {
1647 kfree(tun_qp->ring[i].addr);
1653 tun_qp->tx_ring[i].buf.addr =
1655 if (!tun_qp->tx_ring[i].buf.addr)
1657 tun_qp->tx_ring[i].buf.map =
1659 tun_qp->tx_ring[i].buf.addr,
1663 tun_qp->tx_ring[i].buf.map)) {
1664 kfree(tun_qp->tx_ring[i].buf.addr);
1667 tun_qp->tx_ring[i].ah = NULL;
1669 spin_lock_init(&tun_qp->tx_lock);
1670 tun_qp->tx_ix_head = 0;
1671 tun_qp->tx_ix_tail = 0;
1672 tun_qp->proxy_qpt = qp_type;
1679 ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
1681 kfree(tun_qp->tx_ring[i].buf.addr);
1687 ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
1689 kfree(tun_qp->ring[i].addr);
1691 kfree(tun_qp->tx_ring);
1692 tun_qp->tx_ring = NULL;
1693 kfree(tun_qp->ring);
1694 tun_qp->ring = NULL;
1702 struct mlx4_ib_demux_pv_qp *tun_qp;
1709 tun_qp = &ctx->qp[qp_type];
1720 ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
1722 kfree(tun_qp->ring[i].addr);
1726 ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
1728 kfree(tun_qp->tx_ring[i].buf.addr);
1729 if (tun_qp->tx_ring[i].ah)
1730 rdma_destroy_ah(tun_qp->tx_ring[i].ah, 0);
1732 kfree(tun_qp->tx_ring);
1733 kfree(tun_qp->ring);
1739 struct mlx4_ib_demux_pv_qp *tun_qp;
1746 tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)];
1751 ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp,
1759 rdma_destroy_ah(tun_qp->tx_ring[wc.wr_id &
1761 tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1763 spin_lock(&tun_qp->tx_lock);
1764 tun_qp->tx_ix_tail++;
1765 spin_unlock(&tun_qp->tx_lock);
1776 rdma_destroy_ah(tun_qp->tx_ring[wc.wr_id &
1778 tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1780 spin_lock(&tun_qp->tx_lock);
1781 tun_qp->tx_ix_tail++;
1782 spin_unlock(&tun_qp->tx_lock);
1801 struct mlx4_ib_demux_pv_qp *tun_qp;
1810 tun_qp = &ctx->qp[qp_type];
1836 tun_qp->qp = ib_create_qp(ctx->pd, &qp_init_attr.init_attr);
1837 if (IS_ERR(tun_qp->qp)) {
1838 ret = PTR_ERR(tun_qp->qp);
1839 tun_qp->qp = NULL;
1857 ret = ib_modify_qp(tun_qp->qp, &attr, qp_attr_mask_INIT);
1864 ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE);
1872 ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE | IB_QP_SQ_PSN);
1880 ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp, i);
1890 ib_destroy_qp(tun_qp->qp);
1891 tun_qp->qp = NULL;