Lines Matching defs:tun_qp
518 struct mlx4_ib_demux_pv_qp *tun_qp;
542 tun_qp = &tun_ctx->qp[0];
544 tun_qp = &tun_ctx->qp[1];
571 src_qp = tun_qp->qp;
592 spin_lock(&tun_qp->tx_lock);
593 if (tun_qp->tx_ix_head - tun_qp->tx_ix_tail >=
597 tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
598 spin_unlock(&tun_qp->tx_lock);
602 tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr);
603 if (tun_qp->tx_ring[tun_tx_ix].ah)
604 rdma_destroy_ah(tun_qp->tx_ring[tun_tx_ix].ah, 0);
605 tun_qp->tx_ring[tun_tx_ix].ah = ah;
607 tun_qp->tx_ring[tun_tx_ix].buf.map,
649 tun_qp->tx_ring[tun_tx_ix].buf.map,
653 list.addr = tun_qp->tx_ring[tun_tx_ix].buf.map;
672 spin_lock(&tun_qp->tx_lock);
673 tun_qp->tx_ix_tail++;
674 spin_unlock(&tun_qp->tx_lock);
675 tun_qp->tx_ring[tun_tx_ix].ah = NULL;
1313 struct mlx4_ib_demux_pv_qp *tun_qp,
1321 size = (tun_qp->qp->qp_type == IB_QPT_UD) ?
1324 sg_list.addr = tun_qp->ring[index].map;
1332 MLX4_TUN_SET_WRID_QPN(tun_qp->proxy_qpt);
1333 ib_dma_sync_single_for_device(ctx->ib_dev, tun_qp->ring[index].map,
1335 return ib_post_recv(tun_qp->qp, &recv_wr, &bad_recv_wr);
1478 struct mlx4_ib_demux_pv_qp *tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc->wr_id)];
1480 struct mlx4_tunnel_mad *tunnel = tun_qp->ring[wr_ix].addr;
1507 ib_dma_sync_single_for_cpu(ctx->ib_dev, tun_qp->ring[wr_ix].map,
1603 struct mlx4_ib_demux_pv_qp *tun_qp;
1610 tun_qp = &ctx->qp[qp_type];
1612 tun_qp->ring = kcalloc(nmbr_bufs,
1615 if (!tun_qp->ring)
1618 tun_qp->tx_ring = kcalloc(nmbr_bufs,
1621 if (!tun_qp->tx_ring) {
1622 kfree(tun_qp->ring);
1623 tun_qp->ring = NULL;
1636 tun_qp->ring[i].addr = kmalloc(rx_buf_size, GFP_KERNEL);
1637 if (!tun_qp->ring[i].addr)
1639 tun_qp->ring[i].map = ib_dma_map_single(ctx->ib_dev,
1640 tun_qp->ring[i].addr,
1643 if (ib_dma_mapping_error(ctx->ib_dev, tun_qp->ring[i].map)) {
1644 kfree(tun_qp->ring[i].addr);
1650 tun_qp->tx_ring[i].buf.addr =
1652 if (!tun_qp->tx_ring[i].buf.addr)
1654 tun_qp->tx_ring[i].buf.map =
1656 tun_qp->tx_ring[i].buf.addr,
1660 tun_qp->tx_ring[i].buf.map)) {
1661 kfree(tun_qp->tx_ring[i].buf.addr);
1664 tun_qp->tx_ring[i].ah = NULL;
1666 spin_lock_init(&tun_qp->tx_lock);
1667 tun_qp->tx_ix_head = 0;
1668 tun_qp->tx_ix_tail = 0;
1669 tun_qp->proxy_qpt = qp_type;
1676 ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
1678 kfree(tun_qp->tx_ring[i].buf.addr);
1684 ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
1686 kfree(tun_qp->ring[i].addr);
1688 kfree(tun_qp->tx_ring);
1689 tun_qp->tx_ring = NULL;
1690 kfree(tun_qp->ring);
1691 tun_qp->ring = NULL;
1699 struct mlx4_ib_demux_pv_qp *tun_qp;
1706 tun_qp = &ctx->qp[qp_type];
1717 ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
1719 kfree(tun_qp->ring[i].addr);
1723 ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
1725 kfree(tun_qp->tx_ring[i].buf.addr);
1726 if (tun_qp->tx_ring[i].ah)
1727 rdma_destroy_ah(tun_qp->tx_ring[i].ah, 0);
1729 kfree(tun_qp->tx_ring);
1730 kfree(tun_qp->ring);
1736 struct mlx4_ib_demux_pv_qp *tun_qp;
1743 tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)];
1748 ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp,
1756 rdma_destroy_ah(tun_qp->tx_ring[wc.wr_id &
1758 tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1760 spin_lock(&tun_qp->tx_lock);
1761 tun_qp->tx_ix_tail++;
1762 spin_unlock(&tun_qp->tx_lock);
1773 rdma_destroy_ah(tun_qp->tx_ring[wc.wr_id &
1775 tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1777 spin_lock(&tun_qp->tx_lock);
1778 tun_qp->tx_ix_tail++;
1779 spin_unlock(&tun_qp->tx_lock);
1798 struct mlx4_ib_demux_pv_qp *tun_qp;
1807 tun_qp = &ctx->qp[qp_type];
1833 tun_qp->qp = ib_create_qp(ctx->pd, &qp_init_attr.init_attr);
1834 if (IS_ERR(tun_qp->qp)) {
1835 ret = PTR_ERR(tun_qp->qp);
1836 tun_qp->qp = NULL;
1854 ret = ib_modify_qp(tun_qp->qp, &attr, qp_attr_mask_INIT);
1861 ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE);
1869 ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE | IB_QP_SQ_PSN);
1877 ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp, i);
1887 ib_destroy_qp(tun_qp->qp);
1888 tun_qp->qp = NULL;