Lines Matching refs:qp

14 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
17 static inline void retry_first_write_send(struct rxe_qp *qp,
24 int to_send = (wqe->dma.resid > qp->mtu) ?
25 qp->mtu : wqe->dma.resid;
27 qp->req.opcode = next_opcode(qp, wqe,
37 wqe->iova += qp->mtu;
41 static void req_retry(struct rxe_qp *qp)
49 qp->req.wqe_index = consumer_index(qp->sq.queue);
50 qp->req.psn = qp->comp.psn;
51 qp->req.opcode = -1;
53 for (wqe_index = consumer_index(qp->sq.queue);
54 wqe_index != producer_index(qp->sq.queue);
55 wqe_index = next_index(qp->sq.queue, wqe_index)) {
56 wqe = addr_from_index(qp->sq.queue, wqe_index);
57 mask = wr_opcode_mask(wqe->wr.opcode, qp);
81 npsn = (qp->comp.psn - wqe->first_psn) &
83 retry_first_write_send(qp, wqe, mask, npsn);
88 qp->mtu;
89 wqe->iova += npsn * qp->mtu;
99 struct rxe_qp *qp = from_timer(qp, t, rnr_nak_timer);
101 pr_debug("qp#%d rnr nak timer fired\n", qp_num(qp));
102 rxe_run_task(&qp->req.task, 1);
105 static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp)
107 struct rxe_send_wqe *wqe = queue_head(qp->sq.queue);
110 if (unlikely(qp->req.state == QP_STATE_DRAIN)) {
114 spin_lock_irqsave(&qp->state_lock, flags);
116 if (qp->req.state != QP_STATE_DRAIN) {
118 spin_unlock_irqrestore(&qp->state_lock,
123 if (wqe && ((qp->req.wqe_index !=
124 consumer_index(qp->sq.queue)) ||
127 spin_unlock_irqrestore(&qp->state_lock,
132 qp->req.state = QP_STATE_DRAINED;
133 spin_unlock_irqrestore(&qp->state_lock, flags);
135 if (qp->ibqp.event_handler) {
138 ev.device = qp->ibqp.device;
139 ev.element.qp = &qp->ibqp;
141 qp->ibqp.event_handler(&ev,
142 qp->ibqp.qp_context);
147 if (qp->req.wqe_index == producer_index(qp->sq.queue))
150 wqe = addr_from_index(qp->sq.queue, qp->req.wqe_index);
152 if (unlikely((qp->req.state == QP_STATE_DRAIN ||
153 qp->req.state == QP_STATE_DRAINED) &&
158 (qp->req.wqe_index != consumer_index(qp->sq.queue)))) {
159 qp->req.wait_fence = 1;
163 wqe->mask = wr_opcode_mask(wqe->wr.opcode, qp);
167 static int next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits)
171 if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
172 qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
182 if (qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_FIRST ||
183 qp->req.opcode == IB_OPCODE_RC_RDMA_WRITE_MIDDLE)
193 if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
194 qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
204 if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
205 qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
224 if (qp->req.opcode == IB_OPCODE_RC_SEND_FIRST ||
225 qp->req.opcode == IB_OPCODE_RC_SEND_MIDDLE)
239 static int next_opcode_uc(struct rxe_qp *qp, u32 opcode, int fits)
243 if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
244 qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
254 if (qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_FIRST ||
255 qp->req.opcode == IB_OPCODE_UC_RDMA_WRITE_MIDDLE)
265 if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
266 qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
276 if (qp->req.opcode == IB_OPCODE_UC_SEND_FIRST ||
277 qp->req.opcode == IB_OPCODE_UC_SEND_MIDDLE)
290 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
293 int fits = (wqe->dma.resid <= qp->mtu);
295 switch (qp_type(qp)) {
297 return next_opcode_rc(qp, opcode, fits);
300 return next_opcode_uc(qp, opcode, fits);
321 static inline int check_init_depth(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
328 qp->req.need_rd_atomic = 1;
329 depth = atomic_dec_return(&qp->req.rd_atomic);
332 qp->req.need_rd_atomic = 0;
337 atomic_inc(&qp->req.rd_atomic);
341 static inline int get_mtu(struct rxe_qp *qp)
343 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
345 if ((qp_type(qp) == IB_QPT_RC) || (qp_type(qp) == IB_QPT_UC))
346 return qp->mtu;
351 static struct sk_buff *init_req_packet(struct rxe_qp *qp,
356 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
374 pkt->qp = qp;
375 pkt->psn = qp->req.psn;
397 qp->attr.dest_qp_num;
400 (qp->req.noack_pkts++ > RXE_MAX_PKT_PER_ACK));
402 qp->req.noack_pkts = 0;
433 if (qp->ibqp.qp_num == 1)
437 deth_set_sqp(pkt, qp->ibqp.qp_num);
443 static int fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
447 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
466 err = copy_data(qp->pd, 0, &wqe->dma,
487 static void update_wqe_state(struct rxe_qp *qp,
492 if (qp_type(qp) == IB_QPT_RC)
499 static void update_wqe_psn(struct rxe_qp *qp,
505 int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu;
512 wqe->first_psn = qp->req.psn;
513 wqe->last_psn = (qp->req.psn + num_pkt - 1) & BTH_PSN_MASK;
517 qp->req.psn = (wqe->first_psn + num_pkt) & BTH_PSN_MASK;
519 qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
523 struct rxe_qp *qp,
530 *rollback_psn = qp->req.psn;
534 struct rxe_qp *qp,
541 qp->req.psn = rollback_psn;
544 static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe,
547 qp->req.opcode = pkt->opcode;
550 qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index);
552 qp->need_req_skb = 0;
554 if (qp->qp_timeout_jiffies && !timer_pending(&qp->retrans_timer))
555 mod_timer(&qp->retrans_timer,
556 jiffies + qp->qp_timeout_jiffies);
561 struct rxe_qp *qp = (struct rxe_qp *)arg;
573 rxe_add_ref(qp);
576 if (unlikely(!qp->valid || qp->req.state == QP_STATE_ERROR))
579 if (unlikely(qp->req.state == QP_STATE_RESET)) {
580 qp->req.wqe_index = consumer_index(qp->sq.queue);
581 qp->req.opcode = -1;
582 qp->req.need_rd_atomic = 0;
583 qp->req.wait_psn = 0;
584 qp->req.need_retry = 0;
588 if (unlikely(qp->req.need_retry)) {
589 req_retry(qp);
590 qp->req.need_retry = 0;
593 wqe = req_next_wqe(qp);
599 struct rxe_dev *rxe = to_rdev(qp->ibqp.device);
629 qp->sq_sig_type == IB_SIGNAL_ALL_WR)
630 rxe_run_task(&qp->comp.task, 1);
631 qp->req.wqe_index = next_index(qp->sq.queue,
632 qp->req.wqe_index);
636 if (unlikely(qp_type(qp) == IB_QPT_RC &&
637 psn_compare(qp->req.psn, (qp->comp.psn +
639 qp->req.wait_psn = 1;
644 if (unlikely(atomic_read(&qp->skb_out) >
646 qp->need_req_skb = 1;
650 opcode = next_opcode(qp, wqe, wqe->wr.opcode);
658 if (check_init_depth(qp, wqe))
662 mtu = get_mtu(qp);
665 if (qp_type(qp) == IB_QPT_UD) {
673 wqe->first_psn = qp->req.psn;
674 wqe->last_psn = qp->req.psn;
675 qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK;
676 qp->req.opcode = IB_OPCODE_UD_SEND_ONLY;
677 qp->req.wqe_index = next_index(qp->sq.queue,
678 qp->req.wqe_index);
681 __rxe_do_task(&qp->comp.task);
682 rxe_drop_ref(qp);
688 skb = init_req_packet(qp, wqe, opcode, payload, &pkt);
690 pr_err("qp#%d Failed allocating skb\n", qp_num(qp));
694 if (fill_packet(qp, wqe, &pkt, skb, payload)) {
695 pr_debug("qp#%d Error during fill packet\n", qp_num(qp));
706 save_state(wqe, qp, &rollback_wqe, &rollback_psn);
707 update_wqe_state(qp, wqe, &pkt);
708 update_wqe_psn(qp, wqe, &pkt, payload);
709 ret = rxe_xmit_packet(qp, &pkt, skb);
711 qp->need_req_skb = 1;
713 rollback_state(wqe, qp, &rollback_wqe, rollback_psn);
716 rxe_run_task(&qp->req.task, 1);
723 update_state(qp, wqe, &pkt, payload);
730 __rxe_do_task(&qp->comp.task);
733 rxe_drop_ref(qp);