Lines Matching refs:req

111 static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request *req);
112 static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req,
115 struct tid_rdma_request *req);
253 * Therefore, when priv->pkts_ps is used to calculate req->cur_seg
254 * during retry, it will lead to req->cur_seg = 0, which is exactly
898 trace_hfi1_tid_flow_page(flow->req->qp, flow, 0, 0, 0, vaddr);
901 trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 0, 0,
935 trace_hfi1_tid_pageset(flow->req->qp, setcount,
1034 trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 1, 0, v0);
1037 trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 1, 1, v1);
1091 struct tid_rdma_request *req = flow->req;
1093 u32 length = flow->req->seg_len;
1097 while (length && req->isge < ss->num_sge) {
1104 if (++req->isge < ss->num_sge)
1105 *sge = ss->sg_list[req->isge - 1];
1117 flow->length = flow->req->seg_len - length;
1118 *last = req->isge != ss->num_sge;
1128 dd = flow->req->rcd->dd;
1144 struct hfi1_devdata *dd = flow->req->rcd->dd;
1173 * segment. All segments are of length flow->req->seg_len.
1183 trace_hfi1_tid_flow_alloc(flow->req->qp, flow->req->setup_head,
1192 if (flow->req->qp->pmtu == enum_to_mtu(OPA_MTU_4096))
1220 trace_hfi1_tid_node_add(flow->req->qp, s, flow->tnode_cnt - 1,
1239 struct hfi1_ctxtdata *rcd = flow->req->rcd;
1293 trace_hfi1_msg_alloc_tids(flow->req->qp, " insufficient tids: needed ",
1303 struct hfi1_ctxtdata *rcd = flow->req->rcd;
1308 u32 pmtu_pg = flow->req->qp->pmtu >> PAGE_SHIFT;
1347 flow->req->qp, flow->tidcnt - 1,
1370 struct hfi1_ctxtdata *rcd = flow->req->rcd;
1399 struct hfi1_ctxtdata *rcd = flow->req->rcd;
1416 trace_hfi1_tid_flow_alloc(flow->req->qp, flow->req->setup_head, flow);
1423 * @req: TID RDMA request for which the segment/flow is being set up
1440 * @req points to struct tid_rdma_request of which the segments are a part. The
1441 * function uses qp, rcd and seg_len members of @req. In the absence of errors,
1442 * req->flow_idx is the index of the flow which has been prepared in this
1443 * invocation of function call. With flow = &req->flows[req->flow_idx],
1452 * For the queuing, caller must hold the flow->req->qp s_lock from the send
1461 int hfi1_kern_exp_rcv_setup(struct tid_rdma_request *req,
1463 __must_hold(&req->qp->s_lock)
1465 struct tid_rdma_flow *flow = &req->flows[req->setup_head];
1466 struct hfi1_ctxtdata *rcd = req->rcd;
1467 struct hfi1_qp_priv *qpriv = req->qp->priv;
1470 u16 clear_tail = req->clear_tail;
1472 lockdep_assert_held(&req->qp->s_lock);
1479 if (!CIRC_SPACE(req->setup_head, clear_tail, MAX_FLOWS) ||
1480 CIRC_CNT(req->setup_head, clear_tail, MAX_FLOWS) >=
1481 req->n_flows)
1490 hfi1_wait_kmem(flow->req->qp);
1495 if (kernel_tid_waiters(rcd, &rcd->rarr_queue, flow->req->qp))
1528 dequeue_tid_waiter(rcd, &rcd->rarr_queue, flow->req->qp);
1534 req->setup_head = (req->setup_head + 1) & (MAX_FLOWS - 1);
1537 queue_qp_for_tid_wait(rcd, &rcd->rarr_queue, flow->req->qp);
1553 int hfi1_kern_exp_rcv_clear(struct tid_rdma_request *req)
1554 __must_hold(&req->qp->s_lock)
1556 struct tid_rdma_flow *flow = &req->flows[req->clear_tail];
1557 struct hfi1_ctxtdata *rcd = req->rcd;
1562 lockdep_assert_held(&req->qp->s_lock);
1564 if (!CIRC_CNT(req->setup_head, req->clear_tail, MAX_FLOWS))
1580 req->clear_tail = (req->clear_tail + 1) & (MAX_FLOWS - 1);
1582 if (fqp == req->qp) {
1596 void hfi1_kern_exp_rcv_clear_all(struct tid_rdma_request *req)
1597 __must_hold(&req->qp->s_lock)
1600 while (CIRC_CNT(req->setup_head, req->clear_tail, MAX_FLOWS)) {
1601 if (hfi1_kern_exp_rcv_clear(req))
1608 * @req: the tid rdma request to be cleaned
1610 static void hfi1_kern_exp_rcv_free_flows(struct tid_rdma_request *req)
1612 kfree(req->flows);
1613 req->flows = NULL;
1631 static int hfi1_kern_exp_rcv_alloc_flows(struct tid_rdma_request *req,
1637 if (likely(req->flows))
1640 req->rcd->numa_id);
1645 flows[i].req = req;
1650 req->flows = flows;
1655 struct tid_rdma_request *req)
1669 req->qp = qp;
1670 req->rcd = qpriv->rcd;
1681 static struct tid_rdma_flow *find_flow_ib(struct tid_rdma_request *req,
1687 head = req->setup_head;
1688 tail = req->clear_tail;
1691 flow = &req->flows[tail];
1707 struct tid_rdma_request *req = wqe_to_tid_req(wqe);
1708 struct tid_rdma_flow *flow = &req->flows[req->flow_idx];
1709 struct rvt_qp *qp = req->qp;
1719 trace_hfi1_tid_flow_build_read_pkt(qp, req->flow_idx, flow);
1749 req->cur_seg * req->seg_len + flow->sent);
1770 req->cur_seg++;
1772 req->ack_pending++;
1773 req->flow_idx = (req->flow_idx + 1) & (MAX_FLOWS - 1);
1793 struct tid_rdma_request *req = wqe_to_tid_req(wqe);
1801 wqe->lpsn, req);
1807 if (req->state == TID_REQUEST_SYNC) {
1811 hfi1_kern_clear_hw_flow(req->rcd, qp);
1813 req->state = TID_REQUEST_ACTIVE;
1818 * have been allocated before. In this case, req->flow_idx should
1819 * fall behind req->setup_head.
1821 if (req->flow_idx == req->setup_head) {
1823 if (req->state == TID_REQUEST_RESEND) {
1829 restart_sge(&qp->s_sge, wqe, req->s_next_psn,
1831 req->isge = 0;
1832 req->state = TID_REQUEST_ACTIVE;
1840 req->state = TID_REQUEST_SYNC;
1849 * The following call will advance req->setup_head after
1852 if (hfi1_kern_exp_rcv_setup(req, &qp->s_sge, &last)) {
1853 req->state = TID_REQUEST_QUEUED;
1863 /* req->flow_idx should only be one slot behind req->setup_head */
1864 flow = &req->flows[req->flow_idx];
1870 flow->flow_state.ib_spsn = req->s_next_psn;
1876 req->s_next_psn += flow->npkts;
1896 struct tid_rdma_request *req;
1900 req = ack_to_tid_req(e);
1903 flow = &req->flows[req->setup_head];
1936 req->clear_tail = req->setup_head;
1954 trace_hfi1_tid_flow_rcv_read_req(qp, req->setup_head, flow);
1956 req->flow_idx = req->setup_head;
1959 req->setup_head = (req->setup_head + 1) & (MAX_FLOWS - 1);
1969 req->n_flows = qpriv->tid_rdma.local.max_read;
1970 req->state = TID_REQUEST_ACTIVE;
1971 req->cur_seg = 0;
1972 req->comp_seg = 0;
1973 req->ack_seg = 0;
1974 req->isge = 0;
1975 req->seg_len = qpriv->tid_rdma.local.max_len;
1976 req->total_len = len;
1977 req->total_segs = 1;
1978 req->r_flow_psn = e->psn;
1981 req);
1994 struct tid_rdma_request *req;
2020 req = ack_to_tid_req(e);
2021 req->r_flow_psn = psn;
2022 trace_hfi1_tid_req_rcv_err(qp, 0, e->opcode, e->psn, e->lpsn, req);
2037 if (psn != e->psn || len != req->total_len)
2055 * req->clear_tail is advanced). However, when an earlier
2077 if (req->state == TID_REQUEST_RESEND) {
2078 req->state = TID_REQUEST_RESEND_ACTIVE;
2079 } else if (req->state == TID_REQUEST_INIT_RESEND) {
2080 req->state = TID_REQUEST_INIT;
2091 if (old_req || req->state == TID_REQUEST_INIT ||
2092 (req->state == TID_REQUEST_SYNC && !req->cur_seg)) {
2099 req = ack_to_tid_req(e);
2101 req->state == TID_REQUEST_INIT)
2102 req->state = TID_REQUEST_INIT_RESEND;
2118 if (req->clear_tail == req->setup_head)
2126 if (CIRC_CNT(req->flow_idx, req->clear_tail, MAX_FLOWS)) {
2127 fstate = &req->flows[req->clear_tail].flow_state;
2129 CIRC_CNT(req->flow_idx, req->clear_tail,
2131 req->flow_idx =
2132 CIRC_ADD(req->clear_tail,
2144 if (CIRC_CNT(req->setup_head, req->flow_idx,
2146 req->cur_seg = delta_psn(psn, e->psn);
2147 req->state = TID_REQUEST_RESEND_ACTIVE;
2161 req = ack_to_tid_req(e);
2163 e->lpsn, req);
2165 req->cur_seg == req->comp_seg ||
2166 req->state == TID_REQUEST_INIT ||
2167 req->state == TID_REQUEST_INIT_RESEND) {
2168 if (req->state == TID_REQUEST_INIT)
2169 req->state = TID_REQUEST_INIT_RESEND;
2173 CIRC_CNT(req->flow_idx,
2174 req->clear_tail,
2176 req->flow_idx = req->clear_tail;
2177 req->state = TID_REQUEST_RESEND;
2178 req->cur_seg = req->comp_seg;
2349 struct tid_rdma_request *req = &epriv->tid_req;
2351 struct tid_rdma_flow *flow = &req->flows[req->clear_tail];
2366 trace_hfi1_tid_flow_build_read_resp(qp, req->clear_tail, flow);
2398 req->clear_tail = (req->clear_tail + 1) &
2419 struct tid_rdma_request *req = NULL;
2430 req = wqe_to_tid_req(wqe);
2437 return req;
2455 struct tid_rdma_request *req;
2470 req = find_tid_request(qp, ipsn, IB_WR_TID_RDMA_READ);
2471 if (unlikely(!req))
2474 flow = &req->flows[req->clear_tail];
2501 len = restart_sge(&ss, req->e.swqe, ipsn, pmtu);
2513 req->ack_pending--;
2528 trace_hfi1_tid_req_rcv_read_resp(qp, 0, req->e.swqe->wr.opcode,
2529 req->e.swqe->psn, req->e.swqe->lpsn,
2530 req);
2531 trace_hfi1_tid_flow_rcv_read_resp(qp, req->clear_tail, flow);
2534 hfi1_kern_exp_rcv_clear(req);
2540 if (++req->comp_seg >= req->total_segs) {
2542 req->state = TID_REQUEST_COMPLETE;
2550 if ((req->state == TID_REQUEST_SYNC &&
2551 req->comp_seg == req->cur_seg) ||
2555 if (req->state == TID_REQUEST_SYNC)
2556 req->state = TID_REQUEST_ACTIVE;
2583 struct tid_rdma_request *req;
2591 req = wqe_to_tid_req(wqe);
2592 hfi1_kern_exp_rcv_clear_all(req);
2632 struct tid_rdma_request *req;
2637 req = wqe_to_tid_req(wqe);
2638 flow = &req->flows[req->clear_tail];
2664 struct tid_rdma_request *req;
2740 req = wqe_to_tid_req(wqe);
2742 wqe->lpsn, req);
2756 flow = &req->flows[req->clear_tail];
2758 req->clear_tail,
2865 struct tid_rdma_request *req;
2942 req = ack_to_tid_req(e);
2943 if (req->comp_seg == req->cur_seg)
2945 flow = &req->flows[req->clear_tail];
2950 e->lpsn, req);
2951 trace_hfi1_tid_flow_handle_kdeth_eflags(qp, req->clear_tail, flow);
3049 struct tid_rdma_request *req = wqe_to_tid_req(wqe);
3058 flow = find_flow_ib(req, *bth2, &fidx);
3065 req);
3069 fidx = req->acked_tail;
3070 flow = &req->flows[fidx];
3071 *bth2 = mask_psn(req->r_ack_psn);
3107 rvt_skip_sge(&qpriv->tid_ss, (req->cur_seg * req->seg_len) +
3127 req->flow_idx = fidx;
3129 req->clear_tail = fidx;
3133 wqe->lpsn, req);
3134 req->state = TID_REQUEST_ACTIVE;
3140 for (; CIRC_CNT(req->setup_head, fidx, MAX_FLOWS);
3142 req->flows[fidx].sent = 0;
3143 req->flows[fidx].pkt = 0;
3144 req->flows[fidx].tid_idx = 0;
3145 req->flows[fidx].tid_offset = 0;
3146 req->flows[fidx].resync_npkts = 0;
3154 req = wqe_to_tid_req(wqe);
3155 req->cur_seg = req->ack_seg;
3156 fidx = req->acked_tail;
3157 /* Pull req->clear_tail back */
3158 req->clear_tail = fidx;
3215 struct tid_rdma_request *req;
3230 req = wqe_to_tid_req(prev);
3231 if (req->ack_seg != req->total_segs)
3249 req = wqe_to_tid_req(prev);
3250 if (req->ack_seg != req->total_segs)
3371 struct tid_rdma_request *req = wqe_to_tid_req(wqe);
3380 req->n_flows = remote->max_write;
3381 req->state = TID_REQUEST_ACTIVE;
3460 * [request: qpriv->r_tid_alloc, segment: req->alloc_seg]
3462 * [request: qp->s_tail_ack_queue, segment:req->cur_seg]
3466 struct tid_rdma_request *req;
3509 req = ack_to_tid_req(e);
3511 e->lpsn, req);
3513 if (req->alloc_seg >= req->total_segs)
3542 npkts = rvt_div_round_up_mtu(qp, req->seg_len);
3554 * If overtaking req->acked_tail, send an RNR NAK. Because the
3560 if (!CIRC_SPACE(req->setup_head, req->acked_tail,
3569 ret = hfi1_kern_exp_rcv_setup(req, &req->ss, &last);
3576 req->alloc_seg++;
3602 qp->r_psn = e->psn + req->alloc_seg;
3667 struct tid_rdma_request *req;
3720 req = ack_to_tid_req(e);
3728 req->state = TID_REQUEST_INIT;
3742 (req->setup_head != req->clear_tail ||
3743 req->clear_tail != req->acked_tail))
3757 req->n_flows = min_t(u16, num_segs, qpriv->tid_rdma.local.max_write);
3758 req->state = TID_REQUEST_INIT;
3759 req->cur_seg = 0;
3760 req->comp_seg = 0;
3761 req->ack_seg = 0;
3762 req->alloc_seg = 0;
3763 req->isge = 0;
3764 req->seg_len = qpriv->tid_rdma.local.max_len;
3765 req->total_len = len;
3766 req->total_segs = num_segs;
3767 req->r_flow_psn = e->psn;
3768 req->ss.sge = e->rdma_sge;
3769 req->ss.num_sge = 1;
3771 req->flow_idx = req->setup_head;
3772 req->clear_tail = req->setup_head;
3773 req->acked_tail = req->setup_head;
3786 req);
3841 struct tid_rdma_request *req = &epriv->tid_req;
3849 req);
3852 flow = &req->flows[req->flow_idx];
3853 switch (req->state) {
3862 if (req->cur_seg >= req->alloc_seg)
3872 req->state = TID_REQUEST_ACTIVE;
3873 trace_hfi1_tid_flow_build_write_resp(qp, req->flow_idx, flow);
3874 req->flow_idx = CIRC_NEXT(req->flow_idx, MAX_FLOWS);
3880 trace_hfi1_tid_flow_build_write_resp(qp, req->flow_idx, flow);
3881 req->flow_idx = CIRC_NEXT(req->flow_idx, MAX_FLOWS);
3882 if (!CIRC_CNT(req->setup_head, req->flow_idx, MAX_FLOWS))
3883 req->state = TID_REQUEST_ACTIVE;
3891 req->cur_seg++;
4005 struct tid_rdma_request *req =
4008 hfi1_kern_exp_rcv_clear_all(req);
4045 struct tid_rdma_request *req;
4085 req = wqe_to_tid_req(wqe);
4091 if (!CIRC_SPACE(req->setup_head, req->acked_tail, MAX_FLOWS))
4106 flow = &req->flows[req->setup_head];
4119 flow->length = min_t(u32, req->seg_len,
4120 (wqe->length - (req->comp_seg * req->seg_len)));
4133 trace_hfi1_tid_flow_rcv_write_resp(qp, req->setup_head, flow);
4135 req->comp_seg++;
4156 wqe->lpsn, req);
4162 req->r_last_acked = mask_psn(wqe->psn - 1);
4164 req->acked_tail = req->setup_head;
4168 req->setup_head = CIRC_NEXT(req->setup_head, MAX_FLOWS);
4169 req->state = TID_REQUEST_ACTIVE;
4179 req->comp_seg == req->total_segs) {
4209 struct tid_rdma_request *req = wqe_to_tid_req(wqe);
4210 struct tid_rdma_flow *flow = &req->flows[req->clear_tail];
4212 struct rvt_qp *qp = req->qp;
4231 trace_hfi1_tid_flow_build_write_data(qp, req->clear_tail, flow);
4254 rvt_div_round_up_mtu(qp, req->seg_len) >
4256 req->state = TID_REQUEST_SYNC;
4276 struct tid_rdma_request *req;
4294 req = ack_to_tid_req(e);
4295 flow = &req->flows[req->clear_tail];
4322 len = req->comp_seg * req->seg_len;
4326 if (unlikely(req->total_len - len < pmtu))
4336 ss.total_len = req->total_len;
4347 hfi1_kern_exp_rcv_clear(req);
4350 req->comp_seg++;
4362 req);
4388 if (req->cur_seg < req->total_segs ||
4397 hfi1_mod_tid_reap_timer(req->qp);
4399 hfi1_stop_tid_reap_timer(req->qp);
4432 struct tid_rdma_request *req = ack_to_tid_req(e);
4433 struct tid_rdma_flow *flow = &req->flows[iflow];
4503 struct tid_rdma_request *req;
4542 req = wqe_to_tid_req(wqe);
4544 wqe->lpsn, req);
4545 flow = &req->flows[req->acked_tail];
4546 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow);
4555 req->ack_seg < req->cur_seg) {
4556 req->ack_seg++;
4558 req->acked_tail = CIRC_NEXT(req->acked_tail, MAX_FLOWS);
4559 req->r_last_acked = flow->flow_state.resp_ib_psn;
4561 wqe->lpsn, req);
4562 if (req->ack_seg == req->total_segs) {
4563 req->state = TID_REQUEST_COMPLETE;
4573 req = wqe_to_tid_req(wqe);
4575 flow = &req->flows[req->acked_tail];
4576 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow);
4580 wqe->lpsn, req);
4588 req->ack_seg < req->cur_seg)
4611 req->ack_seg == req->total_segs) ||
4617 if (req->ack_seg == req->comp_seg) {
4636 req = wqe_to_tid_req(wqe);
4637 flow = &req->flows[req->acked_tail];
4648 req->r_ack_psn = psn;
4665 rptr = req;
4707 req->cur_seg = req->ack_seg;
4721 if (!req->flows)
4723 flow = &req->flows[req->acked_tail];
4727 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail,
4729 req->r_ack_psn = mask_psn(be32_to_cpu(ohdr->bth[2]));
4730 req->cur_seg = req->ack_seg;
4804 struct tid_rdma_request *req;
4822 req = wqe_to_tid_req(wqe);
4823 trace_hfi1_tid_req_tid_retry_timeout(/* req */
4824 qp, 0, wqe->wr.opcode, wqe->psn, wqe->lpsn, req);
4849 struct tid_rdma_request *req = wqe_to_tid_req(wqe);
4850 struct tid_rdma_flow *flow = &req->flows[fidx];
4877 struct tid_rdma_request *req;
4935 req = ack_to_tid_req(e);
4937 e->lpsn, req);
4940 for (flow_idx = req->clear_tail;
4941 CIRC_CNT(req->setup_head, flow_idx,
4947 flow = &req->flows[flow_idx];
5017 struct tid_rdma_request *req = ack_to_tid_req(e);
5031 (e->opcode == TID_OP(WRITE_REQ) && req->cur_seg < req->alloc_seg &&
5069 req = wqe_to_tid_req(wqe);
5071 wqe->lpsn, req);
5102 req = wqe_to_tid_req(wqe);
5105 if (!req->comp_seg || req->cur_seg == req->comp_seg)
5109 wqe->psn, wqe->lpsn, req);
5115 req->clear_tail = CIRC_NEXT(req->clear_tail,
5117 if (++req->cur_seg < req->total_segs) {
5118 if (!CIRC_CNT(req->setup_head, req->clear_tail,
5137 req = wqe_to_tid_req(wqe);
5139 if (!req->comp_seg) {
5143 req = wqe_to_tid_req(wqe);
5147 CIRC_PREV(req->setup_head,
5198 struct tid_rdma_request *req, *nreq;
5209 req = ack_to_tid_req(e);
5223 if (!req->ack_seg || req->ack_seg == req->total_segs)
5228 req = ack_to_tid_req(e);
5233 req);
5239 req->ack_seg == req->comp_seg)
5249 req->ack_seg +=
5251 CIRC_CNT(req->clear_tail, req->acked_tail,
5254 req->acked_tail = req->clear_tail;
5257 * req->clear_tail points to the segment currently being
5261 flow = CIRC_PREV(req->acked_tail, MAX_FLOWS);
5262 if (req->ack_seg != req->total_segs)
5264 req->state = TID_REQUEST_COMPLETE;
5278 req = ack_to_tid_req(e);
5283 * req could be pointing at the previous ack queue entry
5289 full_flow_psn(&req->flows[flow],
5290 req->flows[flow].flow_state.lpsn)) > 0))) {
5293 * requests. Therefore, we NAK with the req->acked_tail
5295 * this point as the req->clear_tail segment for the
5299 req = ack_to_tid_req(e);
5300 flow = req->acked_tail;
5301 } else if (req->ack_seg == req->total_segs &&
5307 req);
5468 struct tid_rdma_request *req;
5480 req = ack_to_tid_req(prev);
5481 if (req->ack_seg != req->total_segs) {