Lines Matching refs:psn
130 struct rvt_qp *qp, u32 psn, int diff, bool fecn);
349 qpriv->flow_state.psn = 0;
806 fs->psn = 0;
833 fs->psn = 0;
1522 flow->flow_state.spsn = qpriv->flow_state.psn;
1526 qpriv->flow_state.psn += flow->npkts;
1682 u32 psn, u16 *fidx)
1692 if (cmp_psn(psn, flow->flow_state.ib_spsn) >= 0 &&
1693 cmp_psn(psn, flow->flow_state.ib_lpsn) <= 0) {
1717 /* This is the IB psn used to send the request */
1800 trace_hfi1_tid_req_build_read_req(qp, 0, wqe->wr.opcode, wqe->psn,
1839 if ((qpriv->flow_state.psn + npkts) > MAX_TID_FLOW_PSN - 1) {
1875 /* Calculate the next segment start psn.*/
1893 u32 bth0, u32 psn, u64 vaddr, u32 len)
1951 flow->flow_state.ib_spsn = psn;
1965 e->psn = psn;
1966 e->lpsn = psn + flow->npkts - 1;
1978 req->r_flow_psn = e->psn;
1980 trace_hfi1_tid_req_rcv_read_req(qp, 0, e->opcode, e->psn, e->lpsn,
1987 struct rvt_qp *qp, u32 psn, int diff)
1999 trace_hfi1_rsp_tid_rcv_error(qp, psn);
2000 trace_hfi1_tid_rdma_rcv_err(qp, 0, psn, diff);
2015 e = find_prev_entry(qp, psn, &prev, NULL, &old_req);
2021 req->r_flow_psn = psn;
2022 trace_hfi1_tid_req_rcv_err(qp, 0, e->opcode, e->psn, e->lpsn, req);
2037 if (psn != e->psn || len != req->total_len)
2062 if (tid_rdma_rcv_read_request(qp, e, packet, ohdr, bth0, psn,
2133 delta_psn(psn, fstate->resp_ib_psn),
2136 delta_psn(psn, fstate->resp_ib_psn);
2146 req->cur_seg = delta_psn(psn, e->psn);
2162 trace_hfi1_tid_req_rcv_err(qp, 0, e->opcode, e->psn,
2195 * It's possible to receive a retry psn that is earlier than an RNRNAK
2196 * psn. In this case, the rnrnak state should be cleared.
2238 u32 bth0, psn, len, rkey;
2250 psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
2251 trace_hfi1_rsp_rcv_tid_read_req(qp, psn);
2266 diff = delta_psn(psn, qp->r_psn);
2268 tid_rdma_rcv_err(packet, ohdr, qp, psn, diff, fecn);
2295 if (tid_rdma_rcv_read_request(qp, e, packet, ohdr, bth0, psn, vaddr,
2307 qp->r_psn += e->lpsn - e->psn + 1;
2415 find_tid_request(struct rvt_qp *qp, u32 psn, enum ib_wr_opcode opcode)
2427 if (cmp_psn(psn, wqe->psn) >= 0 &&
2428 cmp_psn(psn, wqe->lpsn) <= 0) {
2529 req->e.swqe->psn, req->e.swqe->lpsn,
2657 u8 rte, u32 psn, u32 ibpsn)
2678 /* If the psn is out of valid range, drop the packet */
2741 trace_hfi1_tid_req_read_kdeth_eflags(qp, 0, wqe->wr.opcode, wqe->psn,
2761 diff = cmp_psn(psn,
2787 if (cmp_psn(fpsn, psn) == 0) {
2794 mask_psn(psn + 1);
2859 u32 qp_num, psn, ibpsn;
2884 psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
2924 ret = handle_read_kdeth_eflags(rcd, packet, rcv_type, rte, psn,
2946 trace_hfi1_eflags_err_write(qp, rcv_type, rte, psn);
2947 trace_hfi1_rsp_handle_kdeth_eflags(qp, psn);
2949 trace_hfi1_tid_req_handle_kdeth_eflags(qp, 0, e->opcode, e->psn,
2974 diff = cmp_psn(psn,
2987 if (psn == full_flow_psn(flow,
2991 mask_psn(psn + 1);
3064 wqe->psn, wqe->lpsn,
3132 trace_hfi1_tid_req_restart_req(qp, 0, wqe->wr.opcode, wqe->psn,
3335 wqe->lpsn = wqe->psn;
3357 wqe->psn, wqe->lpsn,
3510 trace_hfi1_tid_req_write_alloc_res(qp, 0, e->opcode, e->psn,
3548 if (qpriv->flow_state.psn + npkts > MAX_TID_FLOW_PSN - 1) {
3602 qp->r_psn = e->psn + req->alloc_seg;
3668 u32 bth0, psn, len, rkey, num_segs;
3679 psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
3680 trace_hfi1_rsp_rcv_tid_write_req(qp, psn);
3693 diff = delta_psn(psn, qp->r_psn);
3695 tid_rdma_rcv_err(packet, ohdr, qp, psn, diff, fecn);
3753 e->psn = psn;
3767 req->r_flow_psn = e->psn;
3785 trace_hfi1_tid_req_rcv_write_req(qp, 0, e->opcode, e->psn, e->lpsn,
3848 trace_hfi1_tid_req_build_write_resp(qp, 0, e->opcode, e->psn, e->lpsn,
4048 u32 opcode, aeth, psn, flow_psn, i, tidlen = 0, pktlen;
4053 psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
4060 if (cmp_psn(psn, qp->s_next_psn) >= 0)
4064 if (unlikely(cmp_psn(psn, qp->s_last_psn) <= 0))
4076 if (cmp_psn(psn, qp->s_last_psn + 1) != 0)
4101 if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
4104 trace_hfi1_ack(qp, psn);
4118 flow->flow_state.resp_ib_psn = psn;
4155 trace_hfi1_tid_req_rcv_write_resp(qp, 0, wqe->wr.opcode, wqe->psn,
4161 if (!cmp_psn(psn, wqe->psn)) {
4162 req->r_last_acked = mask_psn(wqe->psn - 1);
4280 u32 psn, next;
4285 psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
4296 if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.lpsn))) {
4299 if (cmp_psn(psn, flow->flow_state.r_next_psn))
4302 flow->flow_state.r_next_psn = mask_psn(psn + 1);
4323 len += delta_psn(psn,
4341 priv->r_next_psn_kdeth = mask_psn(psn + 1);
4346 flow->flow_state.r_next_psn = mask_psn(psn + 1);
4349 rcd->flows[flow->idx].psn = psn & HFI1_KDETH_BTH_SEQ_MASK;
4360 trace_hfi1_rsp_rcv_tid_write_data(qp, psn);
4361 trace_hfi1_tid_req_rcv_write_data(qp, 0, e->opcode, e->psn, e->lpsn,
4420 static bool hfi1_tid_rdma_is_resync_psn(u32 psn)
4422 return (bool)((psn & HFI1_KDETH_BTH_SEQ_MASK) ==
4505 u32 aeth, psn, req_psn, ack_psn, flpsn, resync_psn, ack_kpsn;
4511 psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
4517 trace_hfi1_rcv_tid_ack(qp, aeth, psn, req_psn, resync_psn);
4521 cmp_psn(psn, qpriv->s_resync_psn))
4525 if (hfi1_tid_rdma_is_resync_psn(psn))
4528 ack_kpsn = psn;
4543 trace_hfi1_tid_req_rcv_tid_ack(qp, 0, wqe->wr.opcode, wqe->psn,
4549 if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.spsn)) < 0 ||
4560 trace_hfi1_tid_req_rcv_tid_ack(qp, 0, wqe->wr.opcode, wqe->psn,
4579 trace_hfi1_tid_req_rcv_tid_ack(qp, 0, wqe->wr.opcode, wqe->psn,
4585 if (!hfi1_tid_rdma_is_resync_psn(psn)) {
4626 psn = mask_psn(psn + 1);
4627 generation = psn >> HFI1_KDETH_BTH_SEQ_SHIFT;
4648 req->r_ack_psn = psn;
4725 if (cmp_psn(psn, flpsn) > 0)
4824 qp, 0, wqe->wr.opcode, wqe->psn, wqe->lpsn, req);
4880 u32 psn, generation, idx, gen_next;
4885 psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
4887 generation = mask_psn(psn + 1) >> HFI1_KDETH_BTH_SEQ_SHIFT;
4915 fs->psn = 0;
4936 trace_hfi1_tid_req_rcv_resync(qp, 0, e->opcode, e->psn,
4953 flow->flow_state.spsn = fs->psn;
4959 fs->psn += flow->npkts;
5070 trace_hfi1_tid_req_make_tid_pkt(qp, 0, wqe->wr.opcode, wqe->psn,
5109 wqe->psn, wqe->lpsn, req);
5231 trace_hfi1_rsp_make_tid_ack(qp, e->psn);
5232 trace_hfi1_tid_req_make_tid_ack(qp, 0, e->opcode, e->psn, e->lpsn,
5306 trace_hfi1_tid_req_make_tid_ack(qp, 0, e->opcode, e->psn, e->lpsn,
5503 struct rvt_qp *qp, u32 psn, int diff, bool fecn)
5507 tid_rdma_rcv_error(packet, ohdr, qp, psn, diff);