Lines Matching refs:qp
114 struct rxe_qp *qp = from_timer(qp, t, retrans_timer);
116 if (qp->valid) {
117 qp->comp.timeout = 1;
118 rxe_run_task(&qp->comp.task, 1);
122 void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb)
126 skb_queue_tail(&qp->resp_pkts, skb);
128 must_sched = skb_queue_len(&qp->resp_pkts) > 1;
132 rxe_run_task(&qp->comp.task, must_sched);
135 static inline enum comp_state get_wqe(struct rxe_qp *qp,
144 wqe = queue_head(qp->sq.queue);
163 static inline void reset_retry_counters(struct rxe_qp *qp)
165 qp->comp.retry_cnt = qp->attr.retry_cnt;
166 qp->comp.rnr_retry = qp->attr.rnr_retry;
167 qp->comp.started_retry = 0;
170 static inline enum comp_state check_psn(struct rxe_qp *qp,
185 reset_retry_counters(qp);
193 diff = psn_compare(pkt->psn, qp->comp.psn);
209 static inline enum comp_state check_ack(struct rxe_qp *qp,
215 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
218 switch (qp->comp.opcode) {
267 reset_retry_counters(qp);
279 reset_retry_counters(qp);
286 reset_retry_counters(qp);
299 if (psn_compare(pkt->psn, qp->comp.psn) > 0) {
302 qp->comp.psn = pkt->psn;
303 if (qp->req.wait_psn) {
304 qp->req.wait_psn = 0;
305 rxe_run_task(&qp->req.task, 0);
340 static inline enum comp_state do_read(struct rxe_qp *qp,
346 ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
358 static inline enum comp_state do_atomic(struct rxe_qp *qp,
366 ret = copy_data(qp->pd, IB_ACCESS_LOCAL_WRITE,
375 static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
380 if (!qp->is_user) {
390 wc->qp = &qp->ibqp;
401 uwc->qp_num = qp->ibqp.qp_num;
413 static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
415 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
418 if ((qp->sq_sig_type == IB_SIGNAL_ALL_WR) ||
421 make_send_cqe(qp, wqe, &cqe);
422 advance_consumer(qp->sq.queue);
423 rxe_cq_post(qp->scq, &cqe, 0);
425 advance_consumer(qp->sq.queue);
437 if (qp->req.wait_fence) {
438 qp->req.wait_fence = 0;
439 rxe_run_task(&qp->req.task, 0);
443 static inline enum comp_state complete_ack(struct rxe_qp *qp,
451 atomic_inc(&qp->req.rd_atomic);
452 if (qp->req.need_rd_atomic) {
453 qp->comp.timeout_retry = 0;
454 qp->req.need_rd_atomic = 0;
455 rxe_run_task(&qp->req.task, 0);
459 if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
461 spin_lock_irqsave(&qp->state_lock, flags);
462 if ((qp->req.state == QP_STATE_DRAIN) &&
463 (qp->comp.psn == qp->req.psn)) {
464 qp->req.state = QP_STATE_DRAINED;
465 spin_unlock_irqrestore(&qp->state_lock, flags);
467 if (qp->ibqp.event_handler) {
470 ev.device = qp->ibqp.device;
471 ev.element.qp = &qp->ibqp;
473 qp->ibqp.event_handler(&ev,
474 qp->ibqp.qp_context);
477 spin_unlock_irqrestore(&qp->state_lock, flags);
481 do_complete(qp, wqe);
483 if (psn_compare(pkt->psn, qp->comp.psn) >= 0)
489 static inline enum comp_state complete_wqe(struct rxe_qp *qp,
494 if (psn_compare(wqe->last_psn, qp->comp.psn) >= 0) {
495 qp->comp.psn = (wqe->last_psn + 1) & BTH_PSN_MASK;
496 qp->comp.opcode = -1;
499 if (qp->req.wait_psn) {
500 qp->req.wait_psn = 0;
501 rxe_run_task(&qp->req.task, 1);
505 do_complete(qp, wqe);
510 static void rxe_drain_resp_pkts(struct rxe_qp *qp, bool notify)
515 while ((skb = skb_dequeue(&qp->resp_pkts))) {
516 rxe_drop_ref(qp);
520 while ((wqe = queue_head(qp->sq.queue))) {
523 do_complete(qp, wqe);
525 advance_consumer(qp->sq.queue);
532 struct rxe_qp *qp = (struct rxe_qp *)arg;
533 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
539 rxe_add_ref(qp);
541 if (!qp->valid || qp->req.state == QP_STATE_ERROR ||
542 qp->req.state == QP_STATE_RESET) {
543 rxe_drain_resp_pkts(qp, qp->valid &&
544 qp->req.state == QP_STATE_ERROR);
548 if (qp->comp.timeout) {
549 qp->comp.timeout_retry = 1;
550 qp->comp.timeout = 0;
552 qp->comp.timeout_retry = 0;
555 if (qp->req.need_retry)
561 pr_debug("qp#%d state = %s\n", qp_num(qp),
565 skb = skb_dequeue(&qp->resp_pkts);
568 qp->comp.timeout_retry = 0;
574 state = get_wqe(qp, pkt, &wqe);
578 state = check_psn(qp, pkt, wqe);
582 state = check_ack(qp, pkt, wqe);
586 state = do_read(qp, pkt, wqe);
590 state = do_atomic(qp, pkt, wqe);
602 state = complete_ack(qp, pkt, wqe);
606 state = complete_wqe(qp, pkt, wqe);
611 qp->comp.opcode = -1;
613 qp->comp.opcode = pkt->opcode;
615 if (psn_compare(pkt->psn, qp->comp.psn) >= 0)
616 qp->comp.psn = (pkt->psn + 1) & BTH_PSN_MASK;
618 if (qp->req.wait_psn) {
619 qp->req.wait_psn = 0;
620 rxe_run_task(&qp->req.task, 1);
628 rxe_drop_ref(pkt->qp);
635 if (qp->comp.timeout_retry && wqe) {
648 if ((qp_type(qp) == IB_QPT_RC) &&
649 (qp->req.state == QP_STATE_READY) &&
650 (psn_compare(qp->req.psn, qp->comp.psn) > 0) &&
651 qp->qp_timeout_jiffies)
652 mod_timer(&qp->retrans_timer,
653 jiffies + qp->qp_timeout_jiffies);
672 if (qp->comp.started_retry &&
673 !qp->comp.timeout_retry) {
675 rxe_drop_ref(pkt->qp);
683 if (qp->comp.retry_cnt > 0) {
684 if (qp->comp.retry_cnt != 7)
685 qp->comp.retry_cnt--;
691 if (psn_compare(qp->req.psn,
692 qp->comp.psn) > 0) {
698 qp->req.need_retry = 1;
699 qp->comp.started_retry = 1;
700 rxe_run_task(&qp->req.task, 0);
704 rxe_drop_ref(pkt->qp);
719 if (qp->comp.rnr_retry > 0) {
720 if (qp->comp.rnr_retry != 7)
721 qp->comp.rnr_retry--;
723 qp->req.need_retry = 1;
724 pr_debug("qp#%d set rnr nak timer\n",
725 qp_num(qp));
726 mod_timer(&qp->rnr_nak_timer,
729 rxe_drop_ref(pkt->qp);
743 do_complete(qp, wqe);
744 rxe_qp_error(qp);
747 rxe_drop_ref(pkt->qp);
761 rxe_drop_ref(qp);
769 rxe_drop_ref(qp);