Lines Matching defs:recv_cq
53 struct mlx4_ib_cq *recv_cq);
55 struct mlx4_ib_cq *recv_cq);
961 to_mcq(init_attr->recv_cq));
971 mcq = to_mcq(init_attr->recv_cq);
974 to_mcq(init_attr->recv_cq));
1235 to_mcq(init_attr->recv_cq));
1245 mcq = to_mcq(init_attr->recv_cq);
1248 to_mcq(init_attr->recv_cq));
1302 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
1303 __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
1305 if (send_cq == recv_cq) {
1307 __acquire(&recv_cq->lock);
1308 } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
1310 spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
1312 spin_lock(&recv_cq->lock);
1317 static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
1318 __releases(&send_cq->lock) __releases(&recv_cq->lock)
1320 if (send_cq == recv_cq) {
1321 __release(&recv_cq->lock);
1323 } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
1324 spin_unlock(&recv_cq->lock);
1328 spin_unlock(&recv_cq->lock);
1351 struct mlx4_ib_cq **send_cq, struct mlx4_ib_cq **recv_cq)
1356 *recv_cq = *send_cq;
1360 *recv_cq = *send_cq;
1363 *recv_cq = (src == MLX4_IB_QP_SRC) ? to_mcq(qp->ibqp.recv_cq) :
1366 *recv_cq;
1404 struct mlx4_ib_cq *send_cq, *recv_cq;
1435 get_cqs(qp, src, &send_cq, &recv_cq);
1438 mlx4_ib_lock_cqs(send_cq, recv_cq);
1445 __mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
1447 if (send_cq != recv_cq)
1453 mlx4_ib_unlock_cqs(send_cq, recv_cq);
1568 init_attr->recv_cq = init_attr->send_cq;
2121 struct mlx4_ib_cq *send_cq, *recv_cq;
2356 get_cqs(qp, src_type, &send_cq, &recv_cq);
2359 recv_cq = send_cq;
2362 context->cqn_recv = cpu_to_be32(recv_cq->mcq.cqn);
2578 mlx4_ib_cq_clean(recv_cq, qp->mqp.qpn,
2580 if (send_cq != recv_cq)
3870 if (mlx4_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
4173 ib_qp_init_attr.recv_cq = init_attr->cq;
4174 ib_qp_init_attr.send_cq = ib_qp_init_attr.recv_cq; /* Dummy CQ */
4483 struct ib_cq *cq = qp->recv_cq;