Lines Matching refs:qp
53 #include "qp.h"
58 struct rvt_ack_entry *find_prev_entry(struct rvt_qp *qp, u32 psn, u8 *prev,
60 __must_hold(&qp->s_lock)
66 for (i = qp->r_head_ack_queue; ; i = p) {
67 if (i == qp->s_tail_ack_queue)
72 p = rvt_size_atomic(ib_to_rvt(qp->ibqp.device));
73 if (p == qp->r_head_ack_queue) {
77 e = &qp->s_ack_queue[p];
83 if (p == qp->s_tail_ack_queue &&
101 * @qp: a pointer to the QP
109 static int make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp,
117 u32 bth1 = qp->remote_qpn | (HFI1_CAP_IS_KSET(OPFN) << IB_BTHE_E_SHIFT);
119 u32 pmtu = qp->pmtu;
120 struct hfi1_qp_priv *qpriv = qp->priv;
123 u8 next = qp->s_tail_ack_queue;
126 trace_hfi1_rsp_make_rc_ack(qp, 0);
127 lockdep_assert_held(&qp->s_lock);
129 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
139 switch (qp->s_ack_state) {
142 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
157 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
159 qp->s_acked_ack_queue == qp->s_tail_ack_queue)
160 qp->s_acked_ack_queue = next;
161 qp->s_tail_ack_queue = next;
162 trace_hfi1_rsp_make_rc_ack(qp, e->psn);
167 if (qp->r_head_ack_queue == qp->s_tail_ack_queue) {
168 if (qp->s_flags & RVT_S_ACK_PENDING)
173 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
176 hfi1_tid_rdma_ack_interlock(qp, e)) {
189 if (qp->s_acked_ack_queue ==
190 qp->s_tail_ack_queue)
191 qp->s_acked_ack_queue =
192 qp->r_head_ack_queue;
193 qp->s_tail_ack_queue = qp->r_head_ack_queue;
200 qp->s_ack_rdma_sge.sge = e->rdma_sge;
201 qp->s_ack_rdma_sge.num_sge = 1;
202 ps->s_txreq->ss = &qp->s_ack_rdma_sge;
205 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
207 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
210 ohdr->u.aeth = rvt_compute_aeth(qp);
212 qp->s_ack_rdma_psn = e->psn;
213 bth2 = mask_psn(qp->s_ack_rdma_psn++);
226 qp->s_ack_state = TID_OP(WRITE_RESP);
227 qp->s_ack_rdma_psn = mask_psn(e->psn + req->cur_seg);
238 if (qp->s_acked_ack_queue ==
239 qp->s_tail_ack_queue)
240 qp->s_acked_ack_queue =
241 qp->r_head_ack_queue;
242 qp->s_tail_ack_queue = qp->r_head_ack_queue;
249 qp->s_ack_rdma_sge.sge = e->rdma_sge;
250 qp->s_ack_rdma_sge.num_sge = 1;
251 qp->s_ack_state = TID_OP(READ_RESP);
257 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
258 ohdr->u.at.aeth = rvt_compute_aeth(qp);
264 trace_hfi1_tid_write_rsp_make_rc_ack(qp);
265 bth0 = qp->s_ack_state << 24;
269 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
272 ps->s_txreq->ss = &qp->s_ack_rdma_sge;
273 ps->s_txreq->mr = qp->s_ack_rdma_sge.sge.mr;
276 len = qp->s_ack_rdma_sge.sge.sge_length;
281 ohdr->u.aeth = rvt_compute_aeth(qp);
283 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
284 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
287 bth0 = qp->s_ack_state << 24;
288 bth2 = mask_psn(qp->s_ack_rdma_psn++);
312 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
321 qp->s_tail_ack_queue == qpriv->r_tid_alloc &&
327 bth2 = mask_psn(qp->s_ack_rdma_psn);
328 hdrlen = hfi1_build_tid_rdma_write_resp(qp, e, ohdr, &bth1,
335 bth0 = qp->s_ack_state << 24;
336 qp->s_ack_rdma_psn++;
337 trace_hfi1_tid_req_make_rc_ack_write(qp, 0, e->opcode, e->psn,
344 qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE);
349 e = &qp->s_ack_queue[qp->s_tail_ack_queue];
350 ps->s_txreq->ss = &qp->s_ack_rdma_sge;
351 delta = hfi1_build_tid_rdma_read_resp(qp, e, ohdr, &bth0,
360 * Increment qp->s_tail_ack_queue through s_ack_state
363 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
377 qp->s_ack_state = OP(SEND_ONLY);
379 if (qp->s_nak_state)
381 cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
382 (qp->s_nak_state <<
385 ohdr->u.aeth = rvt_compute_aeth(qp);
389 bth2 = mask_psn(qp->s_ack_psn);
390 qp->s_flags &= ~RVT_S_ACK_PENDING;
394 qp->s_rdma_ack_cnt++;
398 hfi1_make_ruc_header(qp, ohdr, bth0, bth1, bth2, middle, ps);
401 spin_unlock_irqrestore(&qp->s_lock, ps->flags);
402 spin_lock_irqsave(&qp->r_lock, ps->flags);
403 spin_lock(&qp->s_lock);
404 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
405 spin_unlock(&qp->s_lock);
406 spin_unlock_irqrestore(&qp->r_lock, ps->flags);
407 spin_lock_irqsave(&qp->s_lock, ps->flags);
409 qp->s_ack_state = OP(ACKNOWLEDGE);
415 qp->s_flags &= ~(RVT_S_RESP_PENDING
423 * @qp: a pointer to the QP
429 int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
431 struct hfi1_qp_priv *priv = qp->priv;
432 struct hfi1_ibdev *dev = to_idev(qp->ibqp.device);
442 u32 bth1 = qp->remote_qpn | (HFI1_CAP_IS_KSET(OPFN) << IB_BTHE_E_SHIFT);
443 u32 pmtu = qp->pmtu;
450 trace_hfi1_sender_make_rc_req(qp);
451 lockdep_assert_held(&qp->s_lock);
452 ps->s_txreq = get_txreq(ps->dev, qp);
459 if (rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)
466 if ((rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) &&
467 (hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))))
474 if ((qp->s_flags & RVT_S_RESP_PENDING) &&
475 make_rc_ack(dev, qp, ohdr, ps))
478 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
479 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND))
482 if (qp->s_last == READ_ONCE(qp->s_head))
486 qp->s_flags |= RVT_S_WAIT_DMA;
489 clear_ahg(qp);
490 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
491 hfi1_trdma_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
497 if (qp->s_flags & (RVT_S_WAIT_RNR | RVT_S_WAIT_ACK | HFI1_S_WAIT_HALT))
500 if (cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) {
501 if (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) {
502 qp->s_flags |= RVT_S_WAIT_PSN;
505 qp->s_sending_psn = qp->s_psn;
506 qp->s_sending_hpsn = qp->s_psn - 1;
510 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
512 switch (qp->s_state) {
514 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK))
524 if (qp->s_cur == qp->s_tail) {
526 if (qp->s_tail == READ_ONCE(qp->s_head)) {
527 clear_ahg(qp);
537 qp->s_num_rd_atomic &&
539 priv->pending_tid_r_segs < qp->s_num_rd_atomic)) {
540 qp->s_flags |= RVT_S_WAIT_FENCE;
552 if (qp->s_last != qp->s_cur)
554 if (++qp->s_cur == qp->s_size)
555 qp->s_cur = 0;
556 if (++qp->s_tail == qp->s_size)
557 qp->s_tail = 0;
561 qp,
565 rvt_send_complete(qp, wqe,
569 atomic_dec(&qp->local_ops_pending);
574 qp->s_psn = wqe->psn;
582 ss = &qp->s_sge;
583 bth2 = mask_psn(qp->s_psn);
590 hfi1_tid_rdma_wqe_interlock(qp, wqe))
598 if (!rvt_rc_credit_avail(qp, wqe))
601 qp->s_state = OP(SEND_FIRST);
606 qp->s_state = OP(SEND_ONLY);
608 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
613 qp->s_state = OP(SEND_ONLY_WITH_INVALIDATE);
622 if (++qp->s_cur == qp->s_size)
623 qp->s_cur = 0;
627 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
628 qp->s_lsn++;
632 if (!rvt_rc_credit_avail(qp, wqe))
643 qp->s_state = OP(RDMA_WRITE_FIRST);
648 qp->s_state = OP(RDMA_WRITE_ONLY);
650 qp->s_state =
659 if (++qp->s_cur == qp->s_size)
660 qp->s_cur = 0;
672 if (!(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
673 qp->s_lsn++;
676 hwords += hfi1_build_tid_rdma_write_req(qp, wqe, ohdr,
681 priv->s_tid_cur = qp->s_cur;
683 priv->s_tid_tail = qp->s_cur;
690 __w = rvt_get_swqe_ptr(qp, priv->s_tid_cur);
723 priv->s_tid_tail = qp->s_cur;
727 priv->s_tid_cur = qp->s_cur;
739 if (priv->s_tid_tail == qp->s_cur &&
745 priv->s_tid_head = qp->s_cur;
761 trace_hfi1_tid_write_sender_make_req(qp, newreq);
762 trace_hfi1_tid_req_make_req_write(qp, newreq,
766 if (++qp->s_cur == qp->s_size)
767 qp->s_cur = 0;
775 if (qp->s_num_rd_atomic >=
776 qp->s_max_rd_atomic) {
777 qp->s_flags |= RVT_S_WAIT_RDMAR;
780 qp->s_num_rd_atomic++;
781 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
782 qp->s_lsn++;
789 qp->s_state = OP(RDMA_READ_REQUEST);
794 if (++qp->s_cur == qp->s_size)
795 qp->s_cur = 0;
799 trace_hfi1_tid_read_sender_make_req(qp, newreq);
802 trace_hfi1_tid_req_make_req_read(qp, newreq,
806 delta = cmp_psn(qp->s_psn, wqe->psn);
813 * but the qp->s_state is set to OP(RDMA_READ_REQUEST)
818 if (qp->s_num_rd_atomic >= qp->s_max_rd_atomic) {
819 qp->s_flags |= RVT_S_WAIT_RDMAR;
833 qp->s_sge.sge = wqe->sg_list[0];
834 qp->s_sge.sg_list = wqe->sg_list + 1;
835 qp->s_sge.num_sge = wqe->wr.num_sge;
836 qp->s_sge.total_len = wqe->length;
837 qp->s_len = wqe->length;
851 req->s_next_psn = qp->s_psn;
855 delta = hfi1_build_tid_rdma_read_req(qp, wqe, ohdr,
862 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
863 qp->s_lsn++;
868 ++qp->s_cur == qp->s_size)
869 qp->s_cur = 0;
878 if (qp->s_num_rd_atomic >=
879 qp->s_max_rd_atomic) {
880 qp->s_flags |= RVT_S_WAIT_RDMAR;
883 qp->s_num_rd_atomic++;
886 if (newreq && !(qp->s_flags & RVT_S_UNLIMITED_CREDIT))
887 qp->s_lsn++;
890 qp->s_state = OP(COMPARE_SWAP);
896 qp->s_state = OP(FETCH_ADD);
909 if (++qp->s_cur == qp->s_size)
910 qp->s_cur = 0;
917 qp->s_sge.sge = wqe->sg_list[0];
918 qp->s_sge.sg_list = wqe->sg_list + 1;
919 qp->s_sge.num_sge = wqe->wr.num_sge;
920 qp->s_sge.total_len = wqe->length;
921 qp->s_len = wqe->length;
924 qp->s_tail++;
925 if (qp->s_tail >= qp->s_size)
926 qp->s_tail = 0;
930 qp->s_psn = wqe->lpsn + 1;
932 qp->s_psn = req->s_next_psn;
934 qp->s_psn++;
939 * qp->s_state is normally set to the opcode of the
947 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
950 qp->s_state = OP(SEND_MIDDLE);
953 bth2 = mask_psn(qp->s_psn++);
954 ss = &qp->s_sge;
955 len = qp->s_len;
962 qp->s_state = OP(SEND_LAST);
964 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
969 qp->s_state = OP(SEND_LAST_WITH_INVALIDATE);
977 qp->s_cur++;
978 if (qp->s_cur >= qp->s_size)
979 qp->s_cur = 0;
984 * qp->s_state is normally set to the opcode of the
992 qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu);
995 qp->s_state = OP(RDMA_WRITE_MIDDLE);
998 bth2 = mask_psn(qp->s_psn++);
999 ss = &qp->s_sge;
1000 len = qp->s_len;
1007 qp->s_state = OP(RDMA_WRITE_LAST);
1009 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
1017 qp->s_cur++;
1018 if (qp->s_cur >= qp->s_size)
1019 qp->s_cur = 0;
1024 * qp->s_state is normally set to the opcode of the
1032 len = (delta_psn(qp->s_psn, wqe->psn)) * pmtu;
1039 qp->s_state = OP(RDMA_READ_REQUEST);
1041 bth2 = mask_psn(qp->s_psn) | IB_BTH_REQ_ACK;
1042 qp->s_psn = wqe->lpsn + 1;
1045 qp->s_cur++;
1046 if (qp->s_cur == qp->s_size)
1047 qp->s_cur = 0;
1060 req->comp_seg = delta_psn(qp->s_psn, wqe->psn);
1064 bth2 = mask_psn(qp->s_psn);
1065 hwords += hfi1_build_tid_rdma_write_req(qp, wqe, ohdr, &bth1,
1067 qp->s_psn = wqe->lpsn + 1;
1069 qp->s_state = TID_OP(WRITE_REQ);
1071 priv->s_tid_cur = qp->s_cur;
1072 if (++qp->s_cur == qp->s_size)
1073 qp->s_cur = 0;
1074 trace_hfi1_tid_req_make_req_write(qp, 0, wqe->wr.opcode,
1085 * Back down. The field qp->s_psn has been set to the psn with
1089 req->cur_seg = delta_psn(qp->s_psn, wqe->psn) / priv->pkts_ps;
1098 hfi1_tid_rdma_restart_req(qp, wqe, &bth2);
1105 hfi1_kern_clear_hw_flow(priv->rcd, qp);
1107 hfi1_trdma_send_complete(qp, wqe, IB_WC_LOC_QP_OP_ERR);
1126 ++qp->s_cur == qp->s_size)
1127 qp->s_cur = 0;
1128 qp->s_psn = req->s_next_psn;
1129 trace_hfi1_tid_req_make_req_read(qp, 0, wqe->wr.opcode,
1134 delta = cmp_psn(qp->s_psn, wqe->psn);
1137 * of a new request, we need to change the qp->s_state so that
1141 qp->s_cur == qp->s_tail) {
1142 qp->s_state = OP(RDMA_READ_REQUEST);
1143 if (delta == 0 || qp->s_cur == qp->s_tail)
1150 if (qp->s_num_rd_atomic >= qp->s_max_rd_atomic) {
1151 qp->s_flags |= RVT_S_WAIT_RDMAR;
1159 delta = hfi1_build_tid_rdma_read_req(qp, wqe, ohdr, &bth1,
1169 ++qp->s_cur == qp->s_size)
1170 qp->s_cur = 0;
1171 qp->s_psn = req->s_next_psn;
1172 trace_hfi1_tid_req_make_req_read(qp, 0, wqe->wr.opcode,
1176 qp->s_sending_hpsn = bth2;
1181 if (qp->s_flags & RVT_S_SEND_ONE) {
1182 qp->s_flags &= ~RVT_S_SEND_ONE;
1183 qp->s_flags |= RVT_S_WAIT_ACK;
1186 qp->s_len -= len;
1192 qp,
1194 bth0 | (qp->s_state << 24),
1211 qp->s_flags &= ~RVT_S_BUSY;
1221 static inline void hfi1_make_bth_aeth(struct rvt_qp *qp,
1225 if (qp->r_nak_state)
1226 ohdr->u.aeth = cpu_to_be32((qp->r_msn & IB_MSN_MASK) |
1227 (qp->r_nak_state <<
1230 ohdr->u.aeth = rvt_compute_aeth(qp);
1233 ohdr->bth[1] = cpu_to_be32(bth1 | qp->remote_qpn);
1234 ohdr->bth[2] = cpu_to_be32(mask_psn(qp->r_ack_psn));
1239 struct rvt_qp *qp = packet->qp;
1243 spin_lock_irqsave(&qp->s_lock, flags);
1244 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK))
1248 qp->s_flags |= RVT_S_ACK_PENDING | RVT_S_RESP_PENDING;
1249 qp->s_nak_state = qp->r_nak_state;
1250 qp->s_ack_psn = qp->r_ack_psn;
1252 qp->s_flags |= RVT_S_ECN;
1255 hfi1_schedule_send(qp);
1257 spin_unlock_irqrestore(&qp->s_lock, flags);
1266 struct rvt_qp *qp = packet->qp;
1280 if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH)) {
1282 rdma_ah_read_grh(&qp->remote_ah_attr),
1291 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
1294 (rdma_ah_get_sl(&qp->remote_ah_attr) & IB_SL_MASK) <<
1298 opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr), 9B),
1299 ppd->lid | rdma_ah_get_path_bits(&qp->remote_ah_attr));
1302 if (qp->s_mig_state == IB_MIG_MIGRATED)
1310 hfi1_make_bth_aeth(qp, ohdr, bth0, bth1);
1319 struct rvt_qp *qp = packet->qp;
1337 if (unlikely(rdma_ah_get_ah_flags(&qp->remote_ah_attr) & IB_AH_GRH) &&
1338 hfi1_check_mcast(rdma_ah_get_dlid(&qp->remote_ah_attr))) {
1340 rdma_ah_read_grh(&qp->remote_ah_attr),
1348 pkey = hfi1_get_pkey(ibp, qp->s_pkey_index);
1354 (rdma_ah_get_path_bits(&qp->remote_ah_attr) &
1356 opa_get_lid(rdma_ah_get_dlid(&qp->remote_ah_attr),
1361 if (qp->s_mig_state == IB_MIG_MIGRATED)
1363 hfi1_make_bth_aeth(qp, ohdr, bth0, bth1);
1380 * @qp: a pointer to the QP
1389 struct rvt_qp *qp = packet->qp;
1391 struct hfi1_qp_priv *priv = qp->priv;
1393 u8 sc5 = ibp->sl_to_sc[rdma_ah_get_sl(&qp->remote_ah_attr)];
1402 qp->r_adefered = 0;
1405 if (qp->s_flags & RVT_S_RESP_PENDING) {
1411 if (qp->s_rdma_ack_cnt) {
1425 pbc = create_pbc(ppd, pbc_flags, qp->srate_mbps,
1438 trace_ack_output_ibhdr(dd_from_ibdev(qp->ibqp.device),
1450 * update_num_rd_atomic - update the qp->s_num_rd_atomic
1451 * @qp: the QP
1455 * This is called from reset_psn() to update qp->s_num_rd_atomic
1459 static void update_num_rd_atomic(struct rvt_qp *qp, u32 psn,
1467 qp->s_num_rd_atomic++;
1470 struct hfi1_qp_priv *priv = qp->priv;
1478 qp->s_num_rd_atomic += req->ack_pending;
1479 trace_hfi1_tid_req_update_num_rd_atomic(qp, 0,
1486 qp->s_num_rd_atomic += req->total_segs;
1493 * @qp: the QP
1500 static void reset_psn(struct rvt_qp *qp, u32 psn)
1502 u32 n = qp->s_acked;
1503 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, n);
1505 struct hfi1_qp_priv *priv = qp->priv;
1507 lockdep_assert_held(&qp->s_lock);
1508 qp->s_cur = n;
1511 qp->s_num_rd_atomic = 0;
1518 qp->s_state = OP(SEND_LAST);
1521 update_num_rd_atomic(qp, psn, wqe);
1527 if (++n == qp->s_size)
1529 if (n == qp->s_tail)
1531 wqe = rvt_get_swqe_ptr(qp, n);
1535 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
1538 qp->s_cur = n;
1544 qp->s_state = OP(SEND_LAST);
1548 update_num_rd_atomic(qp, psn, wqe);
1560 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
1565 qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
1569 qp->s_state = TID_OP(WRITE_RESP);
1573 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
1577 qp->s_state = TID_OP(READ_RESP);
1585 qp->s_state = OP(SEND_LAST);
1589 qp->s_psn = psn;
1595 if ((cmp_psn(qp->s_psn, qp->s_sending_hpsn) <= 0) &&
1596 (cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0))
1597 qp->s_flags |= RVT_S_WAIT_PSN;
1598 qp->s_flags &= ~HFI1_S_AHG_VALID;
1599 trace_hfi1_sender_reset_psn(qp);
1606 void hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait)
1608 struct hfi1_qp_priv *priv = qp->priv;
1609 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1612 lockdep_assert_held(&qp->r_lock);
1613 lockdep_assert_held(&qp->s_lock);
1614 trace_hfi1_sender_restart_rc(qp);
1615 if (qp->s_retry == 0) {
1616 if (qp->s_mig_state == IB_MIG_ARMED) {
1617 hfi1_migrate_qp(qp);
1618 qp->s_retry = qp->s_retry_cnt;
1619 } else if (qp->s_last == qp->s_acked) {
1626 to_iport(qp->ibqp.device, qp->port_num);
1632 opfn_conn_reply(qp, priv->opfn.curr);
1633 wqe = do_rc_completion(qp, wqe, ibp);
1634 qp->s_flags &= ~RVT_S_WAIT_ACK;
1636 trace_hfi1_tid_write_sender_restart_rc(qp, 0);
1642 hfi1_kern_clear_hw_flow(priv->rcd, qp);
1645 hfi1_trdma_send_complete(qp, wqe,
1647 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
1654 qp->s_retry--;
1657 ibp = to_iport(qp->ibqp.device, qp->port_num);
1662 ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn);
1664 qp->s_flags &= ~(RVT_S_WAIT_FENCE | RVT_S_WAIT_RDMAR |
1668 qp->s_flags |= RVT_S_SEND_ONE;
1669 reset_psn(qp, psn);
1673 * Set qp->s_sending_psn to the next PSN after the given one.
1677 static void reset_sending_psn(struct rvt_qp *qp, u32 psn)
1680 u32 n = qp->s_last;
1682 lockdep_assert_held(&qp->s_lock);
1685 wqe = rvt_get_swqe_ptr(qp, n);
1690 qp->s_sending_psn = wqe->lpsn + 1;
1692 qp->s_sending_psn = psn + 1;
1695 if (++n == qp->s_size)
1697 if (n == qp->s_tail)
1704 * @qp: the QP
1714 void hfi1_rc_verbs_aborted(struct rvt_qp *qp, struct hfi1_opa_header *opah)
1729 qp->s_flags |= RVT_S_SEND_ONE;
1735 void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah)
1738 struct hfi1_qp_priv *priv = qp->priv;
1744 lockdep_assert_held(&qp->s_lock);
1745 if (!(ib_rvt_state_ops[qp->state] & RVT_SEND_OR_FLUSH_OR_RECV_OK))
1754 WARN_ON(!qp->s_rdma_ack_cnt);
1755 qp->s_rdma_ack_cnt--;
1767 reset_sending_psn(qp, psn);
1782 wqe = rvt_get_swqe_ptr(qp, tail);
1786 tail = qp->s_size - 1;
1791 head = qp->s_tail;
1792 tail = qp->s_acked;
1802 !(qp->s_flags &
1804 (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
1806 rvt_add_retry_timer_ext(qp, priv->timeout_shift);
1808 rvt_add_retry_timer(qp);
1817 (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) {
1823 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1827 hfi1_add_tid_retry_timer(qp);
1830 while (qp->s_last != qp->s_acked) {
1831 wqe = rvt_get_swqe_ptr(qp, qp->s_last);
1832 if (cmp_psn(wqe->lpsn, qp->s_sending_psn) >= 0 &&
1833 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
1835 trdma_clean_swqe(qp, wqe);
1836 trace_hfi1_qp_send_completion(qp, wqe, qp->s_last);
1837 rvt_qp_complete_swqe(qp,
1846 trace_hfi1_sendcomplete(qp, psn);
1847 if (qp->s_flags & RVT_S_WAIT_PSN &&
1848 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
1849 qp->s_flags &= ~RVT_S_WAIT_PSN;
1850 qp->s_sending_psn = qp->s_psn;
1851 qp->s_sending_hpsn = qp->s_psn - 1;
1852 hfi1_schedule_send(qp);
1856 static inline void update_last_psn(struct rvt_qp *qp, u32 psn)
1858 qp->s_last_psn = psn;
1866 struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
1870 struct hfi1_qp_priv *priv = qp->priv;
1872 lockdep_assert_held(&qp->s_lock);
1878 trace_hfi1_rc_completion(qp, wqe->lpsn);
1879 if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 ||
1880 cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
1881 trdma_clean_swqe(qp, wqe);
1882 trace_hfi1_qp_send_completion(qp, wqe, qp->s_last);
1883 rvt_qp_complete_swqe(qp,
1897 u8 sl = rdma_ah_get_sl(&qp->remote_ah_attr);
1902 engine = qp_to_sdma_engine(qp, sc5);
1907 qp->s_retry = qp->s_retry_cnt;
1917 update_last_psn(qp, wqe->lpsn);
1924 if (qp->s_acked == qp->s_cur) {
1925 if (++qp->s_cur >= qp->s_size)
1926 qp->s_cur = 0;
1927 qp->s_acked = qp->s_cur;
1928 wqe = rvt_get_swqe_ptr(qp, qp->s_cur);
1929 if (qp->s_acked != qp->s_tail) {
1930 qp->s_state = OP(SEND_LAST);
1931 qp->s_psn = wqe->psn;
1934 if (++qp->s_acked >= qp->s_size)
1935 qp->s_acked = 0;
1936 if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur)
1937 qp->s_draining = 0;
1938 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
1942 hfi1_schedule_send(qp);
1947 static void set_restart_qp(struct rvt_qp *qp, struct hfi1_ctxtdata *rcd)
1950 if (!(qp->r_flags & RVT_R_RDMAR_SEQ)) {
1951 qp->r_flags |= RVT_R_RDMAR_SEQ;
1952 hfi1_restart_rc(qp, qp->s_last_psn + 1, 0);
1953 if (list_empty(&qp->rspwait)) {
1954 qp->r_flags |= RVT_R_RSP_SEND;
1955 rvt_get_qp(qp);
1956 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
1962 * update_qp_retry_state - Update qp retry state.
1963 * @qp: the QP
1968 * This function is called to update the qp retry state upon
1969 * receiving a TID WRITE RESP after the qp is scheduled to retry
1972 static void update_qp_retry_state(struct rvt_qp *qp, u32 psn, u32 spsn,
1975 struct hfi1_qp_priv *qpriv = qp->priv;
1977 qp->s_psn = psn + 1;
1985 qp->s_cur = qpriv->s_tid_cur + 1;
1986 if (qp->s_cur >= qp->s_size)
1987 qp->s_cur = 0;
1988 qp->s_state = TID_OP(WRITE_REQ);
1990 qp->s_cur = qpriv->s_tid_cur;
1991 qp->s_state = TID_OP(WRITE_RESP);
1997 * @qp: the QP the ACK came in on
2006 int do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode,
2011 struct hfi1_qp_priv *qpriv = qp->priv;
2018 lockdep_assert_held(&qp->s_lock);
2028 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
2065 (delta_psn(psn, qp->s_last_psn) != 1))) {
2066 set_restart_qp(qp, rcd);
2079 opfn_conn_reply(qp, val);
2081 if (qp->s_num_rd_atomic &&
2085 qp->s_num_rd_atomic--;
2087 if ((qp->s_flags & RVT_S_WAIT_FENCE) &&
2088 !qp->s_num_rd_atomic) {
2089 qp->s_flags &= ~(RVT_S_WAIT_FENCE |
2091 hfi1_schedule_send(qp);
2092 } else if (qp->s_flags & RVT_S_WAIT_RDMAR) {
2093 qp->s_flags &= ~(RVT_S_WAIT_RDMAR |
2095 hfi1_schedule_send(qp);
2106 wqe = do_rc_completion(qp, wqe, ibp);
2107 if (qp->s_acked == qp->s_tail)
2111 trace_hfi1_rc_ack_do(qp, aeth, psn, wqe);
2112 trace_hfi1_sender_do_rc_ack(qp);
2118 rvt_mod_retry_timer_ext(qp,
2121 rvt_stop_rc_timers(qp);
2122 } else if (qp->s_acked != qp->s_tail) {
2126 __w = rvt_get_swqe_ptr(qp, qpriv->s_tid_cur);
2146 if (cmp_psn(psn, qp->s_last_psn + 1)) {
2147 set_restart_qp(qp, rcd);
2154 if (qp->s_cur != qp->s_tail &&
2155 cmp_psn(qp->s_psn, psn) <= 0)
2156 update_qp_retry_state(qp, psn,
2160 rvt_mod_retry_timer(qp);
2162 rvt_stop_rc_timers(qp);
2168 rvt_mod_retry_timer(qp);
2174 if (cmp_psn(qp->s_psn, psn) <= 0)
2175 reset_psn(qp, psn + 1);
2179 rvt_stop_rc_timers(qp);
2180 if (cmp_psn(qp->s_psn, psn) <= 0) {
2181 qp->s_state = OP(SEND_LAST);
2182 qp->s_psn = psn + 1;
2185 if (qp->s_flags & RVT_S_WAIT_ACK) {
2186 qp->s_flags &= ~RVT_S_WAIT_ACK;
2187 hfi1_schedule_send(qp);
2189 rvt_get_credit(qp, aeth);
2190 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
2191 qp->s_retry = qp->s_retry_cnt;
2201 update_last_psn(qp, psn);
2206 if (qp->s_acked == qp->s_tail)
2208 if (qp->s_flags & RVT_S_WAIT_RNR)
2210 rdi = ib_to_rvt(qp->ibqp.device);
2213 if (qp->s_rnr_retry == 0) {
2217 if (qp->s_rnr_retry_cnt < 7 && qp->s_rnr_retry_cnt > 0)
2218 qp->s_rnr_retry--;
2228 reset_psn(qp, qp->s_last_psn + 1);
2230 update_last_psn(qp, psn - 1);
2231 reset_psn(qp, psn);
2234 ibp->rvp.n_rc_resends += delta_psn(qp->s_psn, psn);
2235 qp->s_flags &= ~(RVT_S_WAIT_SSN_CREDIT | RVT_S_WAIT_ACK);
2236 rvt_stop_rc_timers(qp);
2237 rvt_add_rnr_timer(qp, aeth);
2241 if (qp->s_acked == qp->s_tail)
2244 update_last_psn(qp, psn - 1);
2255 hfi1_restart_rc(qp, psn, 0);
2256 hfi1_schedule_send(qp);
2273 if (qp->s_last == qp->s_acked) {
2275 hfi1_kern_read_tid_flow_free(qp);
2277 hfi1_trdma_send_complete(qp, wqe, status);
2278 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
2286 qp->s_retry = qp->s_retry_cnt;
2287 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
2297 rvt_stop_rc_timers(qp);
2305 static void rdma_seq_err(struct rvt_qp *qp, struct hfi1_ibport *ibp, u32 psn,
2310 lockdep_assert_held(&qp->s_lock);
2312 rvt_stop_rc_timers(qp);
2314 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
2323 wqe = do_rc_completion(qp, wqe, ibp);
2327 qp->r_flags |= RVT_R_RDMAR_SEQ;
2328 hfi1_restart_rc(qp, qp->s_last_psn + 1, 0);
2329 if (list_empty(&qp->rspwait)) {
2330 qp->r_flags |= RVT_R_RSP_SEND;
2331 rvt_get_qp(qp);
2332 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2349 struct rvt_qp *qp = packet->qp;
2359 u32 pmtu = qp->pmtu;
2365 spin_lock_irqsave(&qp->s_lock, flags);
2366 trace_hfi1_ack(qp, psn);
2369 if (cmp_psn(psn, READ_ONCE(qp->s_next_psn)) >= 0)
2373 diff = cmp_psn(psn, qp->s_last_psn);
2379 rvt_get_credit(qp, aeth);
2388 if (qp->r_flags & RVT_R_RDMAR_SEQ) {
2389 if (cmp_psn(psn, qp->s_last_psn + 1) != 0)
2391 qp->r_flags &= ~RVT_R_RDMAR_SEQ;
2394 if (unlikely(qp->s_acked == qp->s_tail))
2396 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
2408 if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) ||
2411 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
2419 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
2425 if (unlikely(cmp_psn(psn, qp->s_last_psn + 1)))
2432 if (unlikely(pmtu >= qp->s_rdma_read_len))
2437 * 4.096 usec. * (1 << qp->timeout)
2439 rvt_mod_retry_timer(qp);
2440 if (qp->s_flags & RVT_S_WAIT_ACK) {
2441 qp->s_flags &= ~RVT_S_WAIT_ACK;
2442 hfi1_schedule_send(qp);
2446 qp->s_retry = qp->s_retry_cnt;
2452 qp->s_rdma_read_len -= pmtu;
2453 update_last_psn(qp, psn);
2454 spin_unlock_irqrestore(&qp->s_lock, flags);
2455 rvt_copy_sge(qp, &qp->s_rdma_read_sge,
2461 if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
2474 wqe = rvt_get_swqe_ptr(qp, qp->s_acked);
2475 qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge,
2481 if (unlikely(cmp_psn(psn, qp->s_last_psn + 1)))
2493 if (unlikely(tlen != qp->s_rdma_read_len))
2496 rvt_copy_sge(qp, &qp->s_rdma_read_sge,
2498 WARN_ON(qp->s_rdma_read_sge.num_sge);
2499 (void)do_rc_ack(qp, aeth, psn,
2510 rdma_seq_err(qp, ibp, psn, rcd);
2516 if (qp->s_last == qp->s_acked) {
2517 rvt_send_complete(qp, wqe, status);
2518 rvt_error_qp(qp, IB_WC_WR_FLUSH_ERR);
2521 spin_unlock_irqrestore(&qp->s_lock, flags);
2526 static inline void rc_cancel_ack(struct rvt_qp *qp)
2528 qp->r_adefered = 0;
2529 if (list_empty(&qp->rspwait))
2531 list_del_init(&qp->rspwait);
2532 qp->r_flags &= ~RVT_R_RSP_NAK;
2533 rvt_put_qp(qp);
2540 * @qp: the QP for this packet
2552 struct rvt_qp *qp, u32 opcode, u32 psn,
2562 trace_hfi1_rcv_error(qp, psn);
2569 if (!qp->r_nak_state) {
2571 qp->r_nak_state = IB_NAK_PSN_ERROR;
2573 qp->r_ack_psn = qp->r_psn;
2579 rc_defered_ack(rcd, qp);
2604 spin_lock_irqsave(&qp->s_lock, flags);
2606 e = find_prev_entry(qp, psn, &prev, &mra, &old_req);
2629 offset = delta_psn(psn, e->psn) * qp->pmtu;
2639 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey,
2651 if (qp->s_acked_ack_queue == qp->s_tail_ack_queue)
2652 qp->s_acked_ack_queue = prev;
2653 qp->s_tail_ack_queue = prev;
2666 if (qp->s_tail_ack_queue == qp->s_acked_ack_queue)
2667 qp->s_acked_ack_queue = prev;
2668 qp->s_tail_ack_queue = prev;
2683 if (mra == qp->r_head_ack_queue) {
2684 spin_unlock_irqrestore(&qp->s_lock, flags);
2685 qp->r_nak_state = 0;
2686 qp->r_ack_psn = qp->r_psn - 1;
2694 if (qp->s_tail_ack_queue == qp->s_acked_ack_queue)
2695 qp->s_acked_ack_queue = mra;
2696 qp->s_tail_ack_queue = mra;
2699 qp->s_ack_state = OP(ACKNOWLEDGE);
2700 qp->s_flags |= RVT_S_RESP_PENDING;
2701 qp->r_nak_state = 0;
2702 hfi1_schedule_send(qp);
2705 spin_unlock_irqrestore(&qp->s_lock, flags);
2809 struct rvt_qp *qp = packet->qp;
2810 struct hfi1_qp_priv *qpriv = qp->priv;
2818 u32 pmtu = qp->pmtu;
2827 lockdep_assert_held(&qp->r_lock);
2832 fecn = process_ecn(qp, packet);
2833 opfn_trigger_conn_request(qp, be32_to_cpu(ohdr->bth[1]));
2848 diff = delta_psn(psn, qp->r_psn);
2850 if (rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd))
2856 switch (qp->r_state) {
2891 if (qp->state == IB_QPS_RTR && !(qp->r_flags & RVT_R_COMM_EST))
2892 rvt_comm_est(qp);
2897 ret = rvt_get_rwqe(qp, false);
2902 qp->r_rcv_len = 0;
2915 qp->r_rcv_len += pmtu;
2916 if (unlikely(qp->r_rcv_len > qp->r_len))
2918 rvt_copy_sge(qp, &qp->r_sge, data, pmtu, true, false);
2923 ret = rvt_get_rwqe(qp, true);
2933 ret = rvt_get_rwqe(qp, false);
2938 qp->r_rcv_len = 0;
2952 if (rvt_invalidate_rkey(qp, rkey))
2958 copy_last = rvt_is_user_qp(qp);
2971 wc.byte_len = tlen + qp->r_rcv_len;
2972 if (unlikely(wc.byte_len > qp->r_len))
2974 rvt_copy_sge(qp, &qp->r_sge, data, tlen, true, copy_last);
2975 rvt_put_ss(&qp->r_sge);
2976 qp->r_msn++;
2977 if (!__test_and_clear_bit(RVT_R_WRID_VALID, &qp->r_aflags))
2979 wc.wr_id = qp->r_wr_id;
2986 wc.qp = &qp->ibqp;
2987 wc.src_qp = qp->remote_qpn;
2988 wc.slid = rdma_ah_get_dlid(&qp->remote_ah_attr) & U16_MAX;
3000 wc.sl = rdma_ah_get_sl(&qp->remote_ah_attr);
3007 rvt_recv_cq(qp, &wc, ib_bth_is_solicited(ohdr));
3011 copy_last = rvt_is_user_qp(qp);
3015 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
3019 qp->r_len = be32_to_cpu(reth->length);
3020 qp->r_rcv_len = 0;
3021 qp->r_sge.sg_list = NULL;
3022 if (qp->r_len != 0) {
3028 ok = rvt_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr,
3032 qp->r_sge.num_sge = 1;
3034 qp->r_sge.num_sge = 0;
3035 qp->r_sge.sge.mr = NULL;
3036 qp->r_sge.sge.vaddr = NULL;
3037 qp->r_sge.sge.length = 0;
3038 qp->r_sge.sge.sge_length = 0;
3044 ret = rvt_get_rwqe(qp, true);
3049 rvt_put_ss(&qp->r_sge);
3061 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
3063 next = qp->r_head_ack_queue + 1;
3065 if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
3067 spin_lock_irqsave(&qp->s_lock, flags);
3068 if (unlikely(next == qp->s_acked_ack_queue)) {
3069 if (!qp->s_ack_queue[next].sent)
3071 update_ack_queue(qp, next);
3073 e = &qp->s_ack_queue[qp->r_head_ack_queue];
3083 ok = rvt_rkey_ok(qp, &e->rdma_sge, len, vaddr,
3091 qp->r_psn += rvt_div_mtu(qp, len - 1);
3101 e->lpsn = qp->r_psn;
3107 qp->r_msn++;
3108 qp->r_psn++;
3109 qp->r_state = opcode;
3110 qp->r_nak_state = 0;
3111 qp->r_head_ack_queue = next;
3112 qpriv->r_tid_alloc = qp->r_head_ack_queue;
3115 qp->s_flags |= RVT_S_RESP_PENDING;
3117 qp->s_flags |= RVT_S_ECN;
3118 hfi1_schedule_send(qp);
3120 spin_unlock_irqrestore(&qp->s_lock, flags);
3136 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
3139 next = qp->r_head_ack_queue + 1;
3140 if (next > rvt_size_atomic(ib_to_rvt(qp->ibqp.device)))
3142 spin_lock_irqsave(&qp->s_lock, flags);
3143 if (unlikely(next == qp->s_acked_ack_queue)) {
3144 if (!qp->s_ack_queue[next].sent)
3146 update_ack_queue(qp, next);
3148 e = &qp->s_ack_queue[qp->r_head_ack_queue];
3152 opfn_conn_response(qp, e, ateth);
3159 if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64),
3164 maddr = (atomic64_t *)qp->r_sge.sge.vaddr;
3168 (u64)cmpxchg((u64 *)qp->r_sge.sge.vaddr,
3171 rvt_put_mr(qp->r_sge.sge.mr);
3172 qp->r_sge.num_sge = 0;
3178 qp->r_msn++;
3179 qp->r_psn++;
3180 qp->r_state = opcode;
3181 qp->r_nak_state = 0;
3182 qp->r_head_ack_queue = next;
3183 qpriv->r_tid_alloc = qp->r_head_ack_queue;
3186 qp->s_flags |= RVT_S_RESP_PENDING;
3188 qp->s_flags |= RVT_S_ECN;
3189 hfi1_schedule_send(qp);
3191 spin_unlock_irqrestore(&qp->s_lock, flags);
3199 qp->r_psn++;
3200 qp->r_state = opcode;
3201 qp->r_ack_psn = psn;
3202 qp->r_nak_state = 0;
3206 qp->r_adefered >= HFI1_PSN_CREDIT) {
3207 rc_cancel_ack(qp);
3210 qp->r_adefered++;
3211 rc_defered_ack(rcd, qp);
3216 qp->r_nak_state = qp->r_min_rnr_timer | IB_RNR_NAK;
3217 qp->r_ack_psn = qp->r_psn;
3219 rc_defered_ack(rcd, qp);
3223 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
3224 qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR;
3225 qp->r_ack_psn = qp->r_psn;
3227 rc_defered_ack(rcd, qp);
3231 spin_unlock_irqrestore(&qp->s_lock, flags);
3233 rvt_rc_error(qp, IB_WC_LOC_QP_OP_ERR);
3234 qp->r_nak_state = IB_NAK_INVALID_REQUEST;
3235 qp->r_ack_psn = qp->r_psn;
3237 rc_defered_ack(rcd, qp);
3241 spin_unlock_irqrestore(&qp->s_lock, flags);
3243 rvt_rc_error(qp, IB_WC_LOC_PROT_ERR);
3244 qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR;
3245 qp->r_ack_psn = qp->r_psn;
3253 struct rvt_qp *qp)
3268 diff = delta_psn(psn, qp->r_psn);
3269 if (!qp->r_nak_state && diff >= 0) {
3271 qp->r_nak_state = IB_NAK_PSN_ERROR;
3273 qp->r_ack_psn = qp->r_psn;
3282 rc_defered_ack(rcd, qp);