Lines Matching refs:req

111 static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request *req);
112 static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req,
115 struct tid_rdma_request *req);
253 * Therefore, when priv->pkts_ps is used to calculate req->cur_seg
254 * during retry, it will lead to req->cur_seg = 0, which is exactly
892 trace_hfi1_tid_flow_page(flow->req->qp, flow, 0, 0, 0, vaddr);
895 trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 0, 0,
929 trace_hfi1_tid_pageset(flow->req->qp, setcount,
1027 trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 1, 0, v0);
1030 trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 1, 1, v1);
1084 struct tid_rdma_request *req = flow->req;
1086 u32 length = flow->req->seg_len;
1090 while (length && req->isge < ss->num_sge) {
1097 if (++req->isge < ss->num_sge)
1098 *sge = ss->sg_list[req->isge - 1];
1110 flow->length = flow->req->seg_len - length;
1111 *last = req->isge == ss->num_sge ? false : true;
1121 dd = flow->req->rcd->dd;
1137 struct hfi1_devdata *dd = flow->req->rcd->dd;
1166 * segment. All segments are of length flow->req->seg_len.
1176 trace_hfi1_tid_flow_alloc(flow->req->qp, flow->req->setup_head,
1185 if (flow->req->qp->pmtu == enum_to_mtu(OPA_MTU_4096))
1213 trace_hfi1_tid_node_add(flow->req->qp, s, flow->tnode_cnt - 1,
1232 struct hfi1_ctxtdata *rcd = flow->req->rcd;
1286 trace_hfi1_msg_alloc_tids(flow->req->qp, " insufficient tids: needed ",
1296 struct hfi1_ctxtdata *rcd = flow->req->rcd;
1301 u32 pmtu_pg = flow->req->qp->pmtu >> PAGE_SHIFT;
1340 flow->req->qp, flow->tidcnt - 1,
1363 struct hfi1_ctxtdata *rcd = flow->req->rcd;
1392 struct hfi1_ctxtdata *rcd = flow->req->rcd;
1409 trace_hfi1_tid_flow_alloc(flow->req->qp, flow->req->setup_head, flow);
1416 * @req: TID RDMA request for which the segment/flow is being set up
1433 * @req points to struct tid_rdma_request of which the segments are a part. The
1434 * function uses qp, rcd and seg_len members of @req. In the absence of errors,
1435 * req->flow_idx is the index of the flow which has been prepared in this
1436 * invocation of function call. With flow = &req->flows[req->flow_idx],
1445 * For the queuing, caller must hold the flow->req->qp s_lock from the send
1454 int hfi1_kern_exp_rcv_setup(struct tid_rdma_request *req,
1456 __must_hold(&req->qp->s_lock)
1458 struct tid_rdma_flow *flow = &req->flows[req->setup_head];
1459 struct hfi1_ctxtdata *rcd = req->rcd;
1460 struct hfi1_qp_priv *qpriv = req->qp->priv;
1463 u16 clear_tail = req->clear_tail;
1465 lockdep_assert_held(&req->qp->s_lock);
1472 if (!CIRC_SPACE(req->setup_head, clear_tail, MAX_FLOWS) ||
1473 CIRC_CNT(req->setup_head, clear_tail, MAX_FLOWS) >=
1474 req->n_flows)
1483 hfi1_wait_kmem(flow->req->qp);
1488 if (kernel_tid_waiters(rcd, &rcd->rarr_queue, flow->req->qp))
1521 dequeue_tid_waiter(rcd, &rcd->rarr_queue, flow->req->qp);
1527 req->setup_head = (req->setup_head + 1) & (MAX_FLOWS - 1);
1530 queue_qp_for_tid_wait(rcd, &rcd->rarr_queue, flow->req->qp);
1546 int hfi1_kern_exp_rcv_clear(struct tid_rdma_request *req)
1547 __must_hold(&req->qp->s_lock)
1549 struct tid_rdma_flow *flow = &req->flows[req->clear_tail];
1550 struct hfi1_ctxtdata *rcd = req->rcd;
1555 lockdep_assert_held(&req->qp->s_lock);
1557 if (!CIRC_CNT(req->setup_head, req->clear_tail, MAX_FLOWS))
1573 req->clear_tail = (req->clear_tail + 1) & (MAX_FLOWS - 1);
1575 if (fqp == req->qp) {
1589 void hfi1_kern_exp_rcv_clear_all(struct tid_rdma_request *req)
1590 __must_hold(&req->qp->s_lock)
1593 while (CIRC_CNT(req->setup_head, req->clear_tail, MAX_FLOWS)) {
1594 if (hfi1_kern_exp_rcv_clear(req))
1601 * @req - the tid rdma request to be cleaned
1603 static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request *req)
1605 kfree(req->flows);
1606 req->flows = NULL;
1624 static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req,
1630 if (likely(req->flows))
1633 req->rcd->numa_id);
1638 flows[i].req = req;
1643 req->flows = flows;
1648 struct tid_rdma_request *req)
1662 req->qp = qp;
1663 req->rcd = qpriv->rcd;
1674 static struct tid_rdma_flow *find_flow_ib(struct tid_rdma_request *req,
1680 head = req->setup_head;
1681 tail = req->clear_tail;
1684 flow = &req->flows[tail];
1700 struct tid_rdma_request *req = wqe_to_tid_req(wqe);
1701 struct tid_rdma_flow *flow = &req->flows[req->flow_idx];
1702 struct rvt_qp *qp = req->qp;
1712 trace_hfi1_tid_flow_build_read_pkt(qp, req->flow_idx, flow);
1742 req->cur_seg * req->seg_len + flow->sent);
1763 req->cur_seg++;
1765 req->ack_pending++;
1766 req->flow_idx = (req->flow_idx + 1) & (MAX_FLOWS - 1);
1786 struct tid_rdma_request *req = wqe_to_tid_req(wqe);
1794 wqe->lpsn, req);
1800 if (req->state == TID_REQUEST_SYNC) {
1804 hfi1_kern_clear_hw_flow(req->rcd, qp);
1806 req->state = TID_REQUEST_ACTIVE;
1811 * have been allocated before. In this case, req->flow_idx should
1812 * fall behind req->setup_head.
1814 if (req->flow_idx == req->setup_head) {
1816 if (req->state == TID_REQUEST_RESEND) {
1822 restart_sge(&qp->s_sge, wqe, req->s_next_psn,
1824 req->isge = 0;
1825 req->state = TID_REQUEST_ACTIVE;
1833 req->state = TID_REQUEST_SYNC;
1842 * The following call will advance req->setup_head after
1845 if (hfi1_kern_exp_rcv_setup(req, &qp->s_sge, &last)) {
1846 req->state = TID_REQUEST_QUEUED;
1856 /* req->flow_idx should only be one slot behind req->setup_head */
1857 flow = &req->flows[req->flow_idx];
1863 flow->flow_state.ib_spsn = req->s_next_psn;
1869 req->s_next_psn += flow->npkts;
1889 struct tid_rdma_request *req;
1893 req = ack_to_tid_req(e);
1896 flow = &req->flows[req->setup_head];
1929 req->clear_tail = req->setup_head;
1947 trace_hfi1_tid_flow_rcv_read_req(qp, req->setup_head, flow);
1949 req->flow_idx = req->setup_head;
1952 req->setup_head = (req->setup_head + 1) & (MAX_FLOWS - 1);
1962 req->n_flows = qpriv->tid_rdma.local.max_read;
1963 req->state = TID_REQUEST_ACTIVE;
1964 req->cur_seg = 0;
1965 req->comp_seg = 0;
1966 req->ack_seg = 0;
1967 req->isge = 0;
1968 req->seg_len = qpriv->tid_rdma.local.max_len;
1969 req->total_len = len;
1970 req->total_segs = 1;
1971 req->r_flow_psn = e->psn;
1974 req);
1987 struct tid_rdma_request *req;
2013 req = ack_to_tid_req(e);
2014 req->r_flow_psn = psn;
2015 trace_hfi1_tid_req_rcv_err(qp, 0, e->opcode, e->psn, e->lpsn, req);
2030 if (psn != e->psn || len != req->total_len)
2048 * req->clear_tail is advanced). However, when an earlier
2070 if (req->state == TID_REQUEST_RESEND) {
2071 req->state = TID_REQUEST_RESEND_ACTIVE;
2072 } else if (req->state == TID_REQUEST_INIT_RESEND) {
2073 req->state = TID_REQUEST_INIT;
2084 if (old_req || req->state == TID_REQUEST_INIT ||
2085 (req->state == TID_REQUEST_SYNC && !req->cur_seg)) {
2092 req = ack_to_tid_req(e);
2094 req->state == TID_REQUEST_INIT)
2095 req->state = TID_REQUEST_INIT_RESEND;
2111 if (req->clear_tail == req->setup_head)
2119 if (CIRC_CNT(req->flow_idx, req->clear_tail, MAX_FLOWS)) {
2120 fstate = &req->flows[req->clear_tail].flow_state;
2122 CIRC_CNT(req->flow_idx, req->clear_tail,
2124 req->flow_idx =
2125 CIRC_ADD(req->clear_tail,
2137 if (CIRC_CNT(req->setup_head, req->flow_idx,
2139 req->cur_seg = delta_psn(psn, e->psn);
2140 req->state = TID_REQUEST_RESEND_ACTIVE;
2154 req = ack_to_tid_req(e);
2156 e->lpsn, req);
2158 req->cur_seg == req->comp_seg ||
2159 req->state == TID_REQUEST_INIT ||
2160 req->state == TID_REQUEST_INIT_RESEND) {
2161 if (req->state == TID_REQUEST_INIT)
2162 req->state = TID_REQUEST_INIT_RESEND;
2166 CIRC_CNT(req->flow_idx,
2167 req->clear_tail,
2169 req->flow_idx = req->clear_tail;
2170 req->state = TID_REQUEST_RESEND;
2171 req->cur_seg = req->comp_seg;
2342 struct tid_rdma_request *req = &epriv->tid_req;
2344 struct tid_rdma_flow *flow = &req->flows[req->clear_tail];
2359 trace_hfi1_tid_flow_build_read_resp(qp, req->clear_tail, flow);
2391 req->clear_tail = (req->clear_tail + 1) &
2412 struct tid_rdma_request *req = NULL;
2423 req = wqe_to_tid_req(wqe);
2430 return req;
2448 struct tid_rdma_request *req;
2463 req = find_tid_request(qp, ipsn, IB_WR_TID_RDMA_READ);
2464 if (unlikely(!req))
2467 flow = &req->flows[req->clear_tail];
2494 len = restart_sge(&ss, req->e.swqe, ipsn, pmtu);
2506 req->ack_pending--;
2521 trace_hfi1_tid_req_rcv_read_resp(qp, 0, req->e.swqe->wr.opcode,
2522 req->e.swqe->psn, req->e.swqe->lpsn,
2523 req);
2524 trace_hfi1_tid_flow_rcv_read_resp(qp, req->clear_tail, flow);
2527 hfi1_kern_exp_rcv_clear(req);
2533 if (++req->comp_seg >= req->total_segs) {
2535 req->state = TID_REQUEST_COMPLETE;
2543 if ((req->state == TID_REQUEST_SYNC &&
2544 req->comp_seg == req->cur_seg) ||
2548 if (req->state == TID_REQUEST_SYNC)
2549 req->state = TID_REQUEST_ACTIVE;
2576 struct tid_rdma_request *req;
2584 req = wqe_to_tid_req(wqe);
2585 hfi1_kern_exp_rcv_clear_all(req);
2625 struct tid_rdma_request *req;
2630 req = wqe_to_tid_req(wqe);
2631 flow = &req->flows[req->clear_tail];
2657 struct tid_rdma_request *req;
2733 req = wqe_to_tid_req(wqe);
2735 wqe->lpsn, req);
2749 flow = &req->flows[req->clear_tail];
2751 req->clear_tail,
2857 struct tid_rdma_request *req;
2934 req = ack_to_tid_req(e);
2935 if (req->comp_seg == req->cur_seg)
2937 flow = &req->flows[req->clear_tail];
2942 e->lpsn, req);
2943 trace_hfi1_tid_flow_handle_kdeth_eflags(qp, req->clear_tail, flow);
3040 struct tid_rdma_request *req = wqe_to_tid_req(wqe);
3049 flow = find_flow_ib(req, *bth2, &fidx);
3056 req);
3060 fidx = req->acked_tail;
3061 flow = &req->flows[fidx];
3062 *bth2 = mask_psn(req->r_ack_psn);
3098 rvt_skip_sge(&qpriv->tid_ss, (req->cur_seg * req->seg_len) +
3118 req->flow_idx = fidx;
3120 req->clear_tail = fidx;
3124 wqe->lpsn, req);
3125 req->state = TID_REQUEST_ACTIVE;
3131 for (; CIRC_CNT(req->setup_head, fidx, MAX_FLOWS);
3133 req->flows[fidx].sent = 0;
3134 req->flows[fidx].pkt = 0;
3135 req->flows[fidx].tid_idx = 0;
3136 req->flows[fidx].tid_offset = 0;
3137 req->flows[fidx].resync_npkts = 0;
3145 req = wqe_to_tid_req(wqe);
3146 req->cur_seg = req->ack_seg;
3147 fidx = req->acked_tail;
3148 /* Pull req->clear_tail back */
3149 req->clear_tail = fidx;
3206 struct tid_rdma_request *req;
3221 req = wqe_to_tid_req(prev);
3222 if (req->ack_seg != req->total_segs)
3239 req = wqe_to_tid_req(prev);
3240 if (req->ack_seg != req->total_segs)
3359 struct tid_rdma_request *req = wqe_to_tid_req(wqe);
3368 req->n_flows = remote->max_write;
3369 req->state = TID_REQUEST_ACTIVE;
3448 * [request: qpriv->r_tid_alloc, segment: req->alloc_seg]
3450 * [request: qp->s_tail_ack_queue, segment:req->cur_seg]
3454 struct tid_rdma_request *req;
3497 req = ack_to_tid_req(e);
3499 e->lpsn, req);
3501 if (req->alloc_seg >= req->total_segs)
3530 npkts = rvt_div_round_up_mtu(qp, req->seg_len);
3542 * If overtaking req->acked_tail, send an RNR NAK. Because the
3548 if (!CIRC_SPACE(req->setup_head, req->acked_tail,
3557 ret = hfi1_kern_exp_rcv_setup(req, &req->ss, &last);
3564 req->alloc_seg++;
3590 qp->r_psn = e->psn + req->alloc_seg;
3655 struct tid_rdma_request *req;
3708 req = ack_to_tid_req(e);
3716 req->state = TID_REQUEST_INIT;
3730 (req->setup_head != req->clear_tail ||
3731 req->clear_tail != req->acked_tail))
3745 req->n_flows = min_t(u16, num_segs, qpriv->tid_rdma.local.max_write);
3746 req->state = TID_REQUEST_INIT;
3747 req->cur_seg = 0;
3748 req->comp_seg = 0;
3749 req->ack_seg = 0;
3750 req->alloc_seg = 0;
3751 req->isge = 0;
3752 req->seg_len = qpriv->tid_rdma.local.max_len;
3753 req->total_len = len;
3754 req->total_segs = num_segs;
3755 req->r_flow_psn = e->psn;
3756 req->ss.sge = e->rdma_sge;
3757 req->ss.num_sge = 1;
3759 req->flow_idx = req->setup_head;
3760 req->clear_tail = req->setup_head;
3761 req->acked_tail = req->setup_head;
3774 req);
3829 struct tid_rdma_request *req = &epriv->tid_req;
3837 req);
3840 flow = &req->flows[req->flow_idx];
3841 switch (req->state) {
3850 if (req->cur_seg >= req->alloc_seg)
3860 req->state = TID_REQUEST_ACTIVE;
3861 trace_hfi1_tid_flow_build_write_resp(qp, req->flow_idx, flow);
3862 req->flow_idx = CIRC_NEXT(req->flow_idx, MAX_FLOWS);
3868 trace_hfi1_tid_flow_build_write_resp(qp, req->flow_idx, flow);
3869 req->flow_idx = CIRC_NEXT(req->flow_idx, MAX_FLOWS);
3870 if (!CIRC_CNT(req->setup_head, req->flow_idx, MAX_FLOWS))
3871 req->state = TID_REQUEST_ACTIVE;
3879 req->cur_seg++;
3993 struct tid_rdma_request *req =
3996 hfi1_kern_exp_rcv_clear_all(req);
4033 struct tid_rdma_request *req;
4073 req = wqe_to_tid_req(wqe);
4079 if (!CIRC_SPACE(req->setup_head, req->acked_tail, MAX_FLOWS))
4094 flow = &req->flows[req->setup_head];
4107 flow->length = min_t(u32, req->seg_len,
4108 (wqe->length - (req->comp_seg * req->seg_len)));
4121 trace_hfi1_tid_flow_rcv_write_resp(qp, req->setup_head, flow);
4123 req->comp_seg++;
4144 wqe->lpsn, req);
4150 req->r_last_acked = mask_psn(wqe->psn - 1);
4152 req->acked_tail = req->setup_head;
4156 req->setup_head = CIRC_NEXT(req->setup_head, MAX_FLOWS);
4157 req->state = TID_REQUEST_ACTIVE;
4167 req->comp_seg == req->total_segs) {
4197 struct tid_rdma_request *req = wqe_to_tid_req(wqe);
4198 struct tid_rdma_flow *flow = &req->flows[req->clear_tail];
4200 struct rvt_qp *qp = req->qp;
4219 trace_hfi1_tid_flow_build_write_data(qp, req->clear_tail, flow);
4242 rvt_div_round_up_mtu(qp, req->seg_len) >
4244 req->state = TID_REQUEST_SYNC;
4264 struct tid_rdma_request *req;
4282 req = ack_to_tid_req(e);
4283 flow = &req->flows[req->clear_tail];
4310 len = req->comp_seg * req->seg_len;
4314 if (unlikely(req->total_len - len < pmtu))
4324 ss.total_len = req->total_len;
4335 hfi1_kern_exp_rcv_clear(req);
4338 req->comp_seg++;
4350 req);
4376 if (req->cur_seg < req->total_segs ||
4385 hfi1_mod_tid_reap_timer(req->qp);
4387 hfi1_stop_tid_reap_timer(req->qp);
4420 struct tid_rdma_request *req = ack_to_tid_req(e);
4421 struct tid_rdma_flow *flow = &req->flows[iflow];
4491 struct tid_rdma_request *req;
4530 req = wqe_to_tid_req(wqe);
4532 wqe->lpsn, req);
4533 flow = &req->flows[req->acked_tail];
4534 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow);
4543 req->ack_seg < req->cur_seg) {
4544 req->ack_seg++;
4546 req->acked_tail = CIRC_NEXT(req->acked_tail, MAX_FLOWS);
4547 req->r_last_acked = flow->flow_state.resp_ib_psn;
4549 wqe->lpsn, req);
4550 if (req->ack_seg == req->total_segs) {
4551 req->state = TID_REQUEST_COMPLETE;
4561 req = wqe_to_tid_req(wqe);
4563 flow = &req->flows[req->acked_tail];
4564 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow);
4568 wqe->lpsn, req);
4576 req->ack_seg < req->cur_seg)
4599 req->ack_seg == req->total_segs) ||
4605 if (req->ack_seg == req->comp_seg) {
4624 req = wqe_to_tid_req(wqe);
4625 flow = &req->flows[req->acked_tail];
4636 req->r_ack_psn = psn;
4653 rptr = req;
4695 req->cur_seg = req->ack_seg;
4709 if (!req->flows)
4711 flow = &req->flows[req->acked_tail];
4715 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail,
4717 req->r_ack_psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
4718 req->cur_seg = req->ack_seg;
4792 struct tid_rdma_request *req;
4810 req = wqe_to_tid_req(wqe);
4811 trace_hfi1_tid_req_tid_retry_timeout(/* req */
4812 qp, 0, wqe->wr.opcode, wqe->psn, wqe->lpsn, req);
4837 struct tid_rdma_request *req = wqe_to_tid_req(wqe);
4838 struct tid_rdma_flow *flow = &req->flows[fidx];
4865 struct tid_rdma_request *req;
4923 req = ack_to_tid_req(e);
4925 e->lpsn, req);
4928 for (flow_idx = req->clear_tail;
4929 CIRC_CNT(req->setup_head, flow_idx,
4935 flow = &req->flows[flow_idx];
5005 struct tid_rdma_request *req = ack_to_tid_req(e);
5019 (e->opcode == TID_OP(WRITE_REQ) && req->cur_seg < req->alloc_seg &&
5057 req = wqe_to_tid_req(wqe);
5059 wqe->lpsn, req);
5090 req = wqe_to_tid_req(wqe);
5093 if (!req->comp_seg || req->cur_seg == req->comp_seg)
5097 wqe->psn, wqe->lpsn, req);
5103 req->clear_tail = CIRC_NEXT(req->clear_tail,
5105 if (++req->cur_seg < req->total_segs) {
5106 if (!CIRC_CNT(req->setup_head, req->clear_tail,
5125 req = wqe_to_tid_req(wqe);
5127 if (!req->comp_seg) {
5131 req = wqe_to_tid_req(wqe);
5135 CIRC_PREV(req->setup_head,
5186 struct tid_rdma_request *req, *nreq;
5197 req = ack_to_tid_req(e);
5211 if (!req->ack_seg || req->ack_seg == req->total_segs)
5216 req = ack_to_tid_req(e);
5221 req);
5227 req->ack_seg == req->comp_seg)
5237 req->ack_seg +=
5239 CIRC_CNT(req->clear_tail, req->acked_tail,
5242 req->acked_tail = req->clear_tail;
5245 * req->clear_tail points to the segment currently being
5249 flow = CIRC_PREV(req->acked_tail, MAX_FLOWS);
5250 if (req->ack_seg != req->total_segs)
5252 req->state = TID_REQUEST_COMPLETE;
5266 req = ack_to_tid_req(e);
5271 * req could be pointing at the previous ack queue entry
5277 full_flow_psn(&req->flows[flow],
5278 req->flows[flow].flow_state.lpsn)) > 0))) {
5281 * requests. Therefore, we NAK with the req->acked_tail
5283 * this point as the req->clear_tail segment for the
5287 req = ack_to_tid_req(e);
5288 flow = req->acked_tail;
5289 } else if (req->ack_seg == req->total_segs &&
5295 req);
5456 struct tid_rdma_request *req;
5468 req = ack_to_tid_req(prev);
5469 if (req->ack_seg != req->total_segs) {