/kernel/linux/linux-5.10/drivers/infiniband/hw/hns/ |
H A D | hns_roce_qp.c | 209 struct ib_cq *send_cq, struct ib_cq *recv_cq) in add_qp_to_list() 214 hr_send_cq = send_cq ? to_hr_cq(send_cq) : NULL; in add_qp_to_list() 245 add_qp_to_list(hr_dev, hr_qp, init_attr->send_cq, in hns_roce_qp_store() 1206 void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq) 1207 __acquires(&send_cq->lock) __acquires(&recv_cq->lock) 1209 if (unlikely(send_cq == NULL && recv_cq == NULL)) { 1210 __acquire(&send_cq->lock); 1212 } else if (unlikely(send_cq != NULL && recv_cq == NULL)) { 1213 spin_lock_irq(&send_cq 207 add_qp_to_list(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, struct ib_cq *send_cq, struct ib_cq *recv_cq) add_qp_to_list() argument [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/core/ |
H A D | uverbs_std_types_qp.c | 95 struct ib_cq *send_cq = NULL; in UVERBS_METHOD_QP_CREATE() local 166 /* send_cq is optional */ in UVERBS_METHOD_QP_CREATE() 168 send_cq = uverbs_attr_get_obj(attrs, in UVERBS_METHOD_QP_CREATE() 170 if (IS_ERR(send_cq)) in UVERBS_METHOD_QP_CREATE() 171 return PTR_ERR(send_cq); in UVERBS_METHOD_QP_CREATE() 175 send_cq = uverbs_attr_get_obj(attrs, in UVERBS_METHOD_QP_CREATE() 177 if (IS_ERR(send_cq)) in UVERBS_METHOD_QP_CREATE() 178 return PTR_ERR(send_cq); in UVERBS_METHOD_QP_CREATE() 234 attr.send_cq = send_cq; in UVERBS_METHOD_QP_CREATE() [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/core/ |
H A D | uverbs_std_types_qp.c | 95 struct ib_cq *send_cq = NULL; in UVERBS_METHOD_QP_CREATE() local 166 /* send_cq is optinal */ in UVERBS_METHOD_QP_CREATE() 168 send_cq = uverbs_attr_get_obj(attrs, in UVERBS_METHOD_QP_CREATE() 170 if (IS_ERR(send_cq)) in UVERBS_METHOD_QP_CREATE() 171 return PTR_ERR(send_cq); in UVERBS_METHOD_QP_CREATE() 175 send_cq = uverbs_attr_get_obj(attrs, in UVERBS_METHOD_QP_CREATE() 177 if (IS_ERR(send_cq)) in UVERBS_METHOD_QP_CREATE() 178 return PTR_ERR(send_cq); in UVERBS_METHOD_QP_CREATE() 234 attr.send_cq = send_cq; in UVERBS_METHOD_QP_CREATE() [all...] |
H A D | core_priv.h | 345 qp->send_cq = attr->send_cq; in _ib_create_qp()
|
/kernel/linux/linux-5.10/drivers/infiniband/ulp/ipoib/ |
H A D | ipoib_verbs.c | 187 priv->send_cq = ib_create_cq(priv->ca, ipoib_ib_tx_completion, NULL, in ipoib_transport_dev_init() 189 if (IS_ERR(priv->send_cq)) { in ipoib_transport_dev_init() 197 init_attr.send_cq = priv->send_cq; in ipoib_transport_dev_init() 218 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP)) in ipoib_transport_dev_init() 244 ib_destroy_cq(priv->send_cq); in ipoib_transport_dev_init() 266 ib_destroy_cq(priv->send_cq); in ipoib_transport_dev_cleanup()
|
/kernel/linux/linux-6.6/drivers/infiniband/ulp/ipoib/ |
H A D | ipoib_verbs.c | 187 priv->send_cq = ib_create_cq(priv->ca, ipoib_ib_tx_completion, NULL, in ipoib_transport_dev_init() 189 if (IS_ERR(priv->send_cq)) { in ipoib_transport_dev_init() 197 init_attr.send_cq = priv->send_cq; in ipoib_transport_dev_init() 218 if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP)) in ipoib_transport_dev_init() 244 ib_destroy_cq(priv->send_cq); in ipoib_transport_dev_init() 266 ib_destroy_cq(priv->send_cq); in ipoib_transport_dev_cleanup()
|
/kernel/linux/linux-6.6/drivers/infiniband/hw/hns/ |
H A D | hns_roce_qp.c | 182 struct ib_cq *scq = init_attr->send_cq; in get_least_load_bankid_for_qp() 261 struct ib_cq *send_cq, struct ib_cq *recv_cq) in add_qp_to_list() 266 hr_send_cq = send_cq ? to_hr_cq(send_cq) : NULL; in add_qp_to_list() 297 add_qp_to_list(hr_dev, hr_qp, init_attr->send_cq, in hns_roce_qp_store() 1373 void hns_roce_lock_cqs(struct hns_roce_cq *send_cq, struct hns_roce_cq *recv_cq) 1374 __acquires(&send_cq->lock) __acquires(&recv_cq->lock) 1376 if (unlikely(send_cq == NULL && recv_cq == NULL)) { 1377 __acquire(&send_cq->lock); 1379 } else if (unlikely(send_cq ! 259 add_qp_to_list(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, struct ib_cq *send_cq, struct ib_cq *recv_cq) add_qp_to_list() argument [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/hw/mana/ |
H A D | qp.c | 264 struct mana_ib_cq *send_cq = in mana_ib_create_qp_raw() local 265 container_of(attr->send_cq, struct mana_ib_cq, ibcq); in mana_ib_create_qp_raw() 353 cq_spec.gdma_region = send_cq->gdma_region; in mana_ib_create_qp_raw() 354 cq_spec.queue_size = send_cq->cqe * COMP_ENTRY_SIZE; in mana_ib_create_qp_raw() 369 send_cq->gdma_region = GDMA_INVALID_DMA_REGION; in mana_ib_create_qp_raw() 372 send_cq->id = cq_spec.queue_index; in mana_ib_create_qp_raw() 376 qp->tx_object, qp->sq_id, send_cq->id); in mana_ib_create_qp_raw() 379 resp.cqid = send_cq->id; in mana_ib_create_qp_raw()
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/mthca/ |
H A D | mthca_qp.c | 735 qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn); in __mthca_modify_qp() 837 if (qp->ibqp.send_cq != qp->ibqp.recv_cq) in __mthca_modify_qp() 838 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, NULL); in __mthca_modify_qp() 1161 struct mthca_cq *send_cq, in mthca_alloc_qp_common() 1290 struct mthca_cq *send_cq, in mthca_alloc_qp() 1318 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, in mthca_alloc_qp() 1333 static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) 1334 __acquires(&send_cq->lock) __acquires(&recv_cq->lock) 1336 if (send_cq == recv_cq) { 1337 spin_lock_irq(&send_cq 1159 mthca_alloc_qp_common(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_cq *send_cq, struct mthca_cq *recv_cq, enum ib_sig_type send_policy, struct mthca_qp *qp, struct ib_udata *udata) mthca_alloc_qp_common() argument 1288 mthca_alloc_qp(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_cq *send_cq, struct mthca_cq *recv_cq, enum ib_qp_type type, enum ib_sig_type send_policy, struct ib_qp_cap *cap, struct mthca_qp *qp, struct ib_udata *udata) mthca_alloc_qp() argument 1363 mthca_alloc_sqp(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_cq *send_cq, struct mthca_cq *recv_cq, enum ib_sig_type send_policy, struct ib_qp_cap *cap, int qpn, int port, struct mthca_qp *qp, struct ib_udata *udata) mthca_alloc_sqp() argument 1445 struct mthca_cq *send_cq; mthca_free_qp() local [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/hw/mthca/ |
H A D | mthca_qp.c | 735 qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn); in __mthca_modify_qp() 837 if (qp->ibqp.send_cq != qp->ibqp.recv_cq) in __mthca_modify_qp() 838 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, NULL); in __mthca_modify_qp() 1164 struct mthca_cq *send_cq, in mthca_alloc_qp_common() 1293 struct mthca_cq *send_cq, in mthca_alloc_qp() 1321 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, in mthca_alloc_qp() 1336 static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq) 1337 __acquires(&send_cq->lock) __acquires(&recv_cq->lock) 1339 if (send_cq == recv_cq) { 1340 spin_lock_irq(&send_cq 1162 mthca_alloc_qp_common(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_cq *send_cq, struct mthca_cq *recv_cq, enum ib_sig_type send_policy, struct mthca_qp *qp, struct ib_udata *udata) mthca_alloc_qp_common() argument 1291 mthca_alloc_qp(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_cq *send_cq, struct mthca_cq *recv_cq, enum ib_qp_type type, enum ib_sig_type send_policy, struct ib_qp_cap *cap, struct mthca_qp *qp, struct ib_udata *udata) mthca_alloc_qp() argument 1366 mthca_alloc_sqp(struct mthca_dev *dev, struct mthca_pd *pd, struct mthca_cq *send_cq, struct mthca_cq *recv_cq, enum ib_sig_type send_policy, struct ib_qp_cap *cap, int qpn, u32 port, struct mthca_qp *qp, struct ib_udata *udata) mthca_alloc_sqp() argument 1448 struct mthca_cq *send_cq; mthca_free_qp() local [all...] |
/kernel/linux/linux-5.10/include/rdma/ |
H A D | rdmavt_qp.h | 805 struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.send_cq); in rvt_send_cq() 915 * @send_cq - The cq for send 920 static inline u32 ib_cq_tail(struct ib_cq *send_cq) in ib_cq_tail() argument 922 struct rvt_cq *cq = ibcq_to_rvtcq(send_cq); in ib_cq_tail() 924 return ibcq_to_rvtcq(send_cq)->ip ? in ib_cq_tail() 926 ibcq_to_rvtcq(send_cq)->kqueue->tail; in ib_cq_tail() 931 * @send_cq - The cq for send 936 static inline u32 ib_cq_head(struct ib_cq *send_cq) in ib_cq_head() argument 938 struct rvt_cq *cq = ibcq_to_rvtcq(send_cq); in ib_cq_head() 940 return ibcq_to_rvtcq(send_cq) in ib_cq_head() [all...] |
/kernel/linux/linux-6.6/include/rdma/ |
H A D | rdmavt_qp.h | 805 struct rvt_cq *cq = ibcq_to_rvtcq(qp->ibqp.send_cq); in rvt_send_cq() 915 * @send_cq - The cq for send 920 static inline u32 ib_cq_tail(struct ib_cq *send_cq) in ib_cq_tail() argument 922 struct rvt_cq *cq = ibcq_to_rvtcq(send_cq); in ib_cq_tail() 924 return ibcq_to_rvtcq(send_cq)->ip ? in ib_cq_tail() 926 ibcq_to_rvtcq(send_cq)->kqueue->tail; in ib_cq_tail() 931 * @send_cq - The cq for send 936 static inline u32 ib_cq_head(struct ib_cq *send_cq) in ib_cq_head() argument 938 struct rvt_cq *cq = ibcq_to_rvtcq(send_cq); in ib_cq_head() 940 return ibcq_to_rvtcq(send_cq) in ib_cq_head() [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/hw/mlx5/ |
H A D | qp.c | 88 struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq); 827 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, 829 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, 1299 static int get_sq_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq) in get_sq_ts_format() argument 1303 return get_ts_format(dev, send_cq, fr_supported(ts_cap), in get_sq_ts_format() 1307 static int get_qp_ts_format(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *send_cq, in get_qp_ts_format() argument 1316 send_cq ? get_ts_format(dev, send_cq, fr_sup, rt_sup) : in get_qp_ts_format() 1593 to_mcq(init_attr->send_cq)); in create_raw_packet_qp() 1934 scqe_sz = mlx5_ib_get_cqe_size(init_attr->send_cq); in configure_requester_scat_cqe() 2080 struct mlx5_ib_cq *send_cq; create_dci() local 2241 struct mlx5_ib_cq *send_cq; create_user_qp() local 2442 struct mlx5_ib_cq *send_cq; create_kernel_qp() local 2623 get_cqs(enum ib_qp_type qp_type, struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq, struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq) get_cqs() argument 2661 struct mlx5_ib_cq *send_cq, *recv_cq; destroy_qp_common() local 4151 struct mlx5_ib_cq *send_cq, *recv_cq; __mlx5_ib_modify_qp() local [all...] |
H A D | gsi.c | 50 struct ib_cq *gsi_cq = mqp->ibqp.send_cq; in generate_completions() 141 hw_init_attr.send_cq = gsi->cq; in mlx5_ib_create_gsi() 205 .send_cq = gsi->cq, in create_gsi_ud_qp()
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/mlx4/ |
H A D | qp.c | 52 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, 54 static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, 727 if (init_attr->send_cq || init_attr->cap.max_send_wr) { in _mlx4_ib_create_qp_rss() 925 mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq), in create_rq() 934 mcq = to_mcq(init_attr->send_cq); in create_rq() 938 mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq), in create_rq() 1199 mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq), in create_qp_common() 1208 mcq = to_mcq(init_attr->send_cq); in create_qp_common() 1212 mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq), in create_qp_common() 1267 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struc 1315 get_cqs(struct mlx4_ib_qp *qp, enum mlx4_ib_source_type src, struct mlx4_ib_cq **send_cq, struct mlx4_ib_cq **recv_cq) get_cqs() argument 1369 struct mlx4_ib_cq *send_cq, *recv_cq; destroy_qp_common() local 2088 struct mlx4_ib_cq *send_cq, *recv_cq; __mlx4_ib_modify_qp() local [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/hw/mlx4/ |
H A D | qp.c | 52 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, 54 static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, 762 if (init_attr->send_cq || init_attr->cap.max_send_wr) { in _mlx4_ib_create_qp_rss() 960 mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq), in create_rq() 969 mcq = to_mcq(init_attr->send_cq); in create_rq() 973 mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq), in create_rq() 1234 mlx4_ib_lock_cqs(to_mcq(init_attr->send_cq), in create_qp_common() 1243 mcq = to_mcq(init_attr->send_cq); in create_qp_common() 1247 mlx4_ib_unlock_cqs(to_mcq(init_attr->send_cq), in create_qp_common() 1302 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struc 1350 get_cqs(struct mlx4_ib_qp *qp, enum mlx4_ib_source_type src, struct mlx4_ib_cq **send_cq, struct mlx4_ib_cq **recv_cq) get_cqs() argument 1404 struct mlx4_ib_cq *send_cq, *recv_cq; destroy_qp_common() local 2121 struct mlx4_ib_cq *send_cq, *recv_cq; __mlx4_ib_modify_qp() local [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/hw/mlx5/ |
H A D | qp.c | 75 struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq); 744 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, 746 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, 1784 scqe_sz = mlx5_ib_get_cqe_size(init_attr->send_cq); in configure_requester_scat_cqe() 1929 struct mlx5_ib_cq *send_cq; in create_user_qp() local 2047 if (init_attr->send_cq) in create_user_qp() 2048 MLX5_SET(qpc, qpc, cqn_snd, to_mcq(init_attr->send_cq)->mcq.cqn); in create_user_qp() 2085 get_cqs(qp->type, init_attr->send_cq, init_attr->recv_cq, in create_user_qp() 2086 &send_cq, &recv_cq); in create_user_qp() 2088 mlx5_ib_lock_cqs(send_cq, recv_c in create_user_qp() 2119 struct mlx5_ib_cq *send_cq; create_kernel_qp() local 2296 get_cqs(enum ib_qp_type qp_type, struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq, struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq) get_cqs() argument 2334 struct mlx5_ib_cq *send_cq, *recv_cq; destroy_qp_common() local 3819 struct mlx5_ib_cq *send_cq, *recv_cq; __mlx5_ib_modify_qp() local [all...] |
H A D | gsi.c | 50 struct ib_cq *gsi_cq = mqp->ibqp.send_cq; in generate_completions() 143 hw_init_attr.send_cq = gsi->cq; in mlx5_ib_create_gsi() 162 gsi->rx_qp->send_cq = hw_init_attr.send_cq; in mlx5_ib_create_gsi() 227 .send_cq = gsi->cq, in create_gsi_ud_qp()
|
/kernel/linux/linux-6.6/net/sunrpc/xprtrdma/ |
H A D | verbs.c | 341 if (ep->re_attr.send_cq) in rpcrdma_ep_destroy() 342 ib_free_cq(ep->re_attr.send_cq); in rpcrdma_ep_destroy() 343 ep->re_attr.send_cq = NULL; in rpcrdma_ep_destroy() 410 ep->re_attr.send_cq = ib_alloc_cq_any(device, r_xprt, in rpcrdma_ep_create() 413 if (IS_ERR(ep->re_attr.send_cq)) { in rpcrdma_ep_create() 414 rc = PTR_ERR(ep->re_attr.send_cq); in rpcrdma_ep_create() 415 ep->re_attr.send_cq = NULL; in rpcrdma_ep_create() 613 sc->sc_cid.ci_queue_id = ep->re_attr.send_cq->res.id; in rpcrdma_sendctx_create()
|
/kernel/linux/linux-6.6/fs/smb/server/ |
H A D | transport_rdma.c | 104 struct ib_cq *send_cq; member 446 if (t->send_cq) in free_transport() 447 ib_free_cq(t->send_cq); in free_transport() 1884 t->send_cq = ib_alloc_cq(t->cm_id->device, t, in smb_direct_create_qpair() 1887 if (IS_ERR(t->send_cq)) { in smb_direct_create_qpair() 1889 ret = PTR_ERR(t->send_cq); in smb_direct_create_qpair() 1890 t->send_cq = NULL; in smb_direct_create_qpair() 1909 qp_attr.send_cq = t->send_cq; in smb_direct_create_qpair() 1944 if (t->send_cq) { in smb_direct_create_qpair() [all...] |
/kernel/linux/linux-5.10/net/sunrpc/xprtrdma/ |
H A D | verbs.c | 369 if (ep->re_attr.send_cq) in rpcrdma_ep_destroy() 370 ib_free_cq(ep->re_attr.send_cq); in rpcrdma_ep_destroy() 371 ep->re_attr.send_cq = NULL; in rpcrdma_ep_destroy() 447 ep->re_attr.send_cq = ib_alloc_cq_any(device, r_xprt, in rpcrdma_ep_create() 450 if (IS_ERR(ep->re_attr.send_cq)) { in rpcrdma_ep_create() 451 rc = PTR_ERR(ep->re_attr.send_cq); in rpcrdma_ep_create() 452 ep->re_attr.send_cq = NULL; in rpcrdma_ep_create()
|
/kernel/linux/linux-5.10/drivers/net/ethernet/ibm/ehea/ |
H A D | ehea_main.c | 195 arr[i++].fwh = pr->send_cq->fw_handle; in ehea_update_firmware_handles() 800 struct ehea_cq *send_cq = pr->send_cq; in ehea_proc_cqes() local 809 cqe = ehea_poll_cq(send_cq); in ehea_proc_cqes() 811 ehea_inc_cq(send_cq); in ehea_proc_cqes() 851 cqe = ehea_poll_cq(send_cq); in ehea_proc_cqes() 854 ehea_update_feca(send_cq, cqe_counter); in ehea_proc_cqes() 889 ehea_reset_cq_ep(pr->send_cq); in ehea_poll() 891 ehea_reset_cq_n1(pr->send_cq); in ehea_poll() 894 cqe_skb = ehea_poll_cq(pr->send_cq); in ehea_poll() [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/ibm/ehea/ |
H A D | ehea_main.c | 199 arr[i++].fwh = pr->send_cq->fw_handle; in ehea_update_firmware_handles() 804 struct ehea_cq *send_cq = pr->send_cq; in ehea_proc_cqes() local 813 cqe = ehea_poll_cq(send_cq); in ehea_proc_cqes() 815 ehea_inc_cq(send_cq); in ehea_proc_cqes() 855 cqe = ehea_poll_cq(send_cq); in ehea_proc_cqes() 858 ehea_update_feca(send_cq, cqe_counter); in ehea_proc_cqes() 893 ehea_reset_cq_ep(pr->send_cq); in ehea_poll() 895 ehea_reset_cq_n1(pr->send_cq); in ehea_poll() 898 cqe_skb = ehea_poll_cq(pr->send_cq); in ehea_poll() [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/hw/vmw_pvrdma/ |
H A D | pvrdma_qp.c | 58 static inline void get_cqs(struct pvrdma_qp *qp, struct pvrdma_cq **send_cq, in get_cqs() argument 61 *send_cq = to_vcq(qp->ibqp.send_cq); in get_cqs() 366 cmd->send_cq_handle = to_vcq(init_attr->send_cq)->cq_handle; in pvrdma_create_qp() 1050 init_attr->send_cq = qp->ibqp.send_cq; in pvrdma_query_qp()
|
/kernel/linux/linux-6.6/drivers/infiniband/hw/vmw_pvrdma/ |
H A D | pvrdma_qp.c | 58 static inline void get_cqs(struct pvrdma_qp *qp, struct pvrdma_cq **send_cq, in get_cqs() argument 61 *send_cq = to_vcq(qp->ibqp.send_cq); in get_cqs() 359 cmd->send_cq_handle = to_vcq(init_attr->send_cq)->cq_handle; in pvrdma_create_qp() 1034 init_attr->send_cq = qp->ibqp.send_cq; in pvrdma_query_qp()
|