Lines Matching defs:iwqp
221 struct irdma_qp *iwqp = NULL;
247 iwqp = rf->qp_table[info->qp_cq_id];
248 if (!iwqp) {
260 irdma_qp_add_ref(&iwqp->ibqp);
262 qp = &iwqp->sc_qp;
263 spin_lock_irqsave(&iwqp->lock, flags);
264 iwqp->hw_tcp_state = info->tcp_state;
265 iwqp->hw_iwarp_state = info->iwarp_state;
267 iwqp->last_aeq = info->ae_id;
268 spin_unlock_irqrestore(&iwqp->lock, flags);
269 ctx_info = &iwqp->ctx_info;
278 cm_node = iwqp->cm_node;
283 iwqp->rts_ae_rcvd = 1;
284 wake_up_interruptible(&iwqp->waitq);
290 if (atomic_inc_return(&iwqp->close_timer_started) == 1) {
291 iwqp->hw_tcp_state = IRDMA_TCP_STATE_CLOSE_WAIT;
292 if (iwqp->hw_tcp_state == IRDMA_TCP_STATE_CLOSE_WAIT &&
293 iwqp->ibqp_state == IB_QPS_RTS) {
294 irdma_next_iw_state(iwqp,
297 irdma_cm_disconn(iwqp);
299 irdma_schedule_cm_timer(iwqp->cm_node,
300 (struct irdma_puda_buf *)iwqp,
309 irdma_cm_disconn(iwqp);
313 irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, 1, 0,
315 irdma_cm_disconn(iwqp);
318 if (atomic_read(&iwqp->close_timer_started))
320 irdma_cm_disconn(iwqp);
323 if (iwqp->iwdev->vsi.tc_change_pending) {
325 wake_up(&iwqp->iwdev->suspend_wq);
327 if (iwqp->suspend_pending) {
328 iwqp->suspend_pending = false;
329 wake_up(&iwqp->iwdev->suspend_wq);
400 irdma_sc_qp_setctx_roce(&iwqp->sc_qp, iwqp->host_ctx.va,
404 irdma_cm_disconn(iwqp);
412 irdma_sc_qp_setctx(&iwqp->sc_qp, iwqp->host_ctx.va,
415 if (iwqp->hw_iwarp_state != IRDMA_QP_STATE_RTS &&
416 iwqp->hw_iwarp_state != IRDMA_QP_STATE_TERMINATE) {
417 irdma_next_iw_state(iwqp, IRDMA_QP_STATE_ERROR, 1, 0, 0);
418 irdma_cm_disconn(iwqp);
425 irdma_qp_rem_ref(&iwqp->ibqp);
2160 * @iwqp: iwarp qp to modify
2166 void irdma_next_iw_state(struct irdma_qp *iwqp, u8 state, u8 del_hash, u8 term,
2183 if (iwqp->sc_qp.term_flags && state == IRDMA_QP_STATE_ERROR)
2185 iwqp->hw_iwarp_state = state;
2186 irdma_hw_modify_qp(iwqp->iwdev, iwqp, &info, 0);
2187 iwqp->iwarp_state = info.next_iwarp_state;
2549 struct irdma_qp *iwqp;
2555 iwqp = qp->qp_uk.back_qp;
2570 ibdev_err(&iwqp->iwdev->ibdev, "Flush QP[%d] failed, SQ has more work",
2572 irdma_ib_qp_event(iwqp, IRDMA_QP_EVENT_CATASTROPHIC);
2592 struct irdma_qp *iwqp = qp->qp_uk.back_qp;
2659 ibdev_err(&iwqp->iwdev->ibdev, "fatal QP event: SQ in error but not flushed, qp: %d",
2660 iwqp->ibqp.qp_num);
2662 irdma_ib_qp_event(iwqp, IRDMA_QP_EVENT_CATASTROPHIC);
2677 iwqp->ibqp.qp_num, rf->protocol_used, iwqp->iwarp_state,
2678 iwqp->ibqp_state, iwqp->last_aeq, iwqp->hw_iwarp_state,
2717 void irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask)
2720 struct irdma_pci_f *rf = iwqp->iwdev->rf;
2721 u8 flush_code = iwqp->sc_qp.flush_code;
2739 iwqp->sc_qp.flush_sq = false;
2741 iwqp->sc_qp.flush_rq = false;
2744 if (info.sq && iwqp->sc_qp.sq_flush_code)
2746 if (info.rq && iwqp->sc_qp.rq_flush_code)
2749 if (!iwqp->user_mode)
2750 queue_delayed_work(iwqp->iwdev->cleanup_wq,
2751 &iwqp->dwork_flush,
2756 (void)irdma_hw_flush_wqes(rf, &iwqp->sc_qp, &info,
2758 iwqp->flush_issued = true;