Lines Matching defs:iwqp

739 	struct irdma_qp *iwqp = (struct irdma_qp *)ibqp;
741 refcount_inc(&iwqp->refcnt);
746 struct irdma_qp *iwqp = to_iwqp(ibqp);
747 struct irdma_device *iwdev = iwqp->iwdev;
752 if (!refcount_dec_and_test(&iwqp->refcnt)) {
757 qp_num = iwqp->ibqp.qp_num;
760 complete(&iwqp->free_qp);
898 struct irdma_qp *iwqp;
900 iwqp = qp->qp_uk.back_qp;
901 irdma_next_iw_state(iwqp, next_state, 0, term, term_len);
911 struct irdma_qp *iwqp;
916 iwqp = qp->qp_uk.back_qp;
917 spin_lock_irqsave(&iwqp->lock, flags);
918 if (iwqp->hte_added) {
919 iwqp->hte_added = 0;
924 spin_unlock_irqrestore(&iwqp->lock, flags);
929 irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, hte, 0, 0);
930 irdma_cm_disconn(iwqp);
936 struct irdma_qp *iwqp = from_timer(iwqp, t, terminate_timer);
937 struct irdma_sc_qp *qp = &iwqp->sc_qp;
940 irdma_qp_rem_ref(&iwqp->ibqp);
949 struct irdma_qp *iwqp;
951 iwqp = qp->qp_uk.back_qp;
952 irdma_qp_add_ref(&iwqp->ibqp);
953 timer_setup(&iwqp->terminate_timer, irdma_terminate_timeout, 0);
954 iwqp->terminate_timer.expires = jiffies + HZ;
956 add_timer(&iwqp->terminate_timer);
965 struct irdma_qp *iwqp;
968 iwqp = qp->qp_uk.back_qp;
969 ret = del_timer(&iwqp->terminate_timer);
971 irdma_qp_rem_ref(&iwqp->ibqp);
1141 * @iwqp: qp ptr (user or kernel)
1143 void irdma_free_qp_rsrc(struct irdma_qp *iwqp)
1145 struct irdma_device *iwdev = iwqp->iwdev;
1147 u32 qp_num = iwqp->ibqp.qp_num;
1149 irdma_ieq_cleanup_qp(iwdev->vsi.ieq, &iwqp->sc_qp);
1150 irdma_dealloc_push_page(rf, &iwqp->sc_qp);
1151 if (iwqp->sc_qp.vsi) {
1152 irdma_qp_rem_qos(&iwqp->sc_qp);
1153 iwqp->sc_qp.dev->ws_remove(iwqp->sc_qp.vsi,
1154 iwqp->sc_qp.user_pri);
1159 dma_free_coherent(rf->sc_dev.hw->device, iwqp->q2_ctx_mem.size,
1160 iwqp->q2_ctx_mem.va, iwqp->q2_ctx_mem.pa);
1161 iwqp->q2_ctx_mem.va = NULL;
1162 dma_free_coherent(rf->sc_dev.hw->device, iwqp->kqp.dma_mem.size,
1163 iwqp->kqp.dma_mem.va, iwqp->kqp.dma_mem.pa);
1164 iwqp->kqp.dma_mem.va = NULL;
1165 kfree(iwqp->kqp.sq_wrid_mem);
1166 kfree(iwqp->kqp.rq_wrid_mem);
1200 struct irdma_qp *iwqp;
1203 iwqp = cqp_info->in.u.qp_modify.qp->qp_uk.back_qp;
1204 atomic_dec(&iwqp->hw_mod_qp_pend);
1205 wake_up(&iwqp->mod_qp_waitq);
1211 * @iwqp: qp ptr (user or kernel)
1215 int irdma_hw_modify_qp(struct irdma_device *iwdev, struct irdma_qp *iwqp,
1230 atomic_inc(&iwqp->hw_mod_qp_pend);
1237 cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp;
1253 irdma_send_reset(iwqp->cm_node);
1255 iwqp->sc_qp.term_flags = IRDMA_TERM_DONE;
1259 irdma_gen_ae(rf, &iwqp->sc_qp, &ae_info, false);
1271 cqp_info->in.u.qp_modify.qp = &iwqp->sc_qp;
1417 struct irdma_qp *iwqp;
1442 iwqp = cm_node->iwqp;
1445 return &iwqp->sc_qp;
2357 void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event)
2361 if (!iwqp->ibqp.event_handler)
2375 ibevent.device = iwqp->ibqp.device;
2376 ibevent.element.qp = &iwqp->ibqp;
2377 iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context);
2451 void irdma_generate_flush_completions(struct irdma_qp *iwqp)
2453 struct irdma_qp_uk *qp = &iwqp->sc_qp.qp_uk;
2463 spin_lock_irqsave(&iwqp->iwscq->lock, flags1);
2464 if (irdma_cq_empty(iwqp->iwscq)) {
2467 spin_lock_irqsave(&iwqp->lock, flags2);
2471 spin_unlock_irqrestore(&iwqp->lock, flags2);
2472 spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1);
2491 ibdev_dbg(iwqp->iwscq->ibcq.device,
2494 list_add_tail(&cmpl->list, &iwqp->iwscq->cmpl_generated);
2497 spin_unlock_irqrestore(&iwqp->lock, flags2);
2498 spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1);
2500 irdma_comp_handler(iwqp->iwscq);
2502 spin_unlock_irqrestore(&iwqp->iwscq->lock, flags1);
2503 mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,
2507 spin_lock_irqsave(&iwqp->iwrcq->lock, flags1);
2508 if (irdma_cq_empty(iwqp->iwrcq)) {
2511 spin_lock_irqsave(&iwqp->lock, flags2);
2515 spin_unlock_irqrestore(&iwqp->lock, flags2);
2516 spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1);
2528 ibdev_dbg(iwqp->iwrcq->ibcq.device,
2532 list_add_tail(&cmpl->list, &iwqp->iwrcq->cmpl_generated);
2536 spin_unlock_irqrestore(&iwqp->lock, flags2);
2537 spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1);
2539 irdma_comp_handler(iwqp->iwrcq);
2541 spin_unlock_irqrestore(&iwqp->iwrcq->lock, flags1);
2542 mod_delayed_work(iwqp->iwdev->cleanup_wq, &iwqp->dwork_flush,