Lines Matching defs:recv_cq
53 struct mlx4_ib_cq *recv_cq);
55 struct mlx4_ib_cq *recv_cq);
926 to_mcq(init_attr->recv_cq));
936 mcq = to_mcq(init_attr->recv_cq);
939 to_mcq(init_attr->recv_cq));
1200 to_mcq(init_attr->recv_cq));
1210 mcq = to_mcq(init_attr->recv_cq);
1213 to_mcq(init_attr->recv_cq));
1267 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
1268 __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
1270 if (send_cq == recv_cq) {
1272 __acquire(&recv_cq->lock);
1273 } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
1275 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
1277 spin_lock(&recv_cq->lock);
1282 static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
1283 __releases(&send_cq->lock) __releases(&recv_cq->lock)
1285 if (send_cq == recv_cq) {
1286 __release(&recv_cq->lock);
1288 } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
1289 spin_unlock(&recv_cq->lock);
1293 spin_unlock(&recv_cq->lock);
1316 struct mlx4_ib_cq **send_cq, struct mlx4_ib_cq **recv_cq)
1321 *recv_cq = *send_cq;
1325 *recv_cq = *send_cq;
1328 *recv_cq = (src == MLX4_IB_QP_SRC) ? to_mcq(qp->ibqp.recv_cq) :
1331 *recv_cq;
1369 struct mlx4_ib_cq *send_cq, *recv_cq;
1400 get_cqs(qp, src, &send_cq, &recv_cq);
1403 mlx4_ib_lock_cqs(send_cq, recv_cq);
1410 __mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
1412 if (send_cq != recv_cq)
1418 mlx4_ib_unlock_cqs(send_cq, recv_cq);
1533 init_attr->recv_cq = init_attr->send_cq;
2088 struct mlx4_ib_cq *send_cq, *recv_cq;
2323 get_cqs(qp, src_type, &send_cq, &recv_cq);
2326 recv_cq = send_cq;
2329 context->cqn_recv = cpu_to_be32(recv_cq->mcq.cqn);
2545 mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
2547 if (send_cq != recv_cq)
3837 if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
4138 ib_qp_init_attr.recv_cq = init_attr->cq;
4139 ib_qp_init_attr.send_cq = ib_qp_init_attr.recv_cq; /* Dummy CQ */
4448 struct ib_cq *cq = qp->recv_cq;