Lines Matching defs:flow
37 /* Maximum number of packets within a flow generation. */
134 struct tid_rdma_flow *flow,
525 * This should be done after the hardware flow and
695 * kern_reserve_flow - allocate a hardware flow
697 * @last - the index of the preferred flow. Use RXE_NUM_TID_FLOWS to
701 * flow for use in receiving KDETH data packets. If a preferred flow is
702 * specified the function will attempt to reserve that flow again, if
716 /* Attempt to reserve the preferred flow index */
783 /* The QP already has an allocated flow */
797 /* Generation received in a RESYNC overrides default flow generation */
875 static u32 tid_rdma_find_phys_blocks_4k(struct tid_rdma_flow *flow,
892 trace_hfi1_tid_flow_page(flow->req->qp, flow, 0, 0, 0, vaddr);
895 trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 0, 0,
929 trace_hfi1_tid_pageset(flow->req->qp, setcount,
1013 static u32 tid_rdma_find_phys_blocks_8k(struct tid_rdma_flow *flow,
1027 trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 1, 0, v0);
1030 trace_hfi1_tid_flow_page(flow->req->qp, flow, i, 1, 1, v1);
1080 static u32 kern_find_pages(struct tid_rdma_flow *flow,
1084 struct tid_rdma_request *req = flow->req;
1086 u32 length = flow->req->seg_len;
1110 flow->length = flow->req->seg_len - length;
1115 static void dma_unmap_flow(struct tid_rdma_flow *flow)
1121 dd = flow->req->rcd->dd;
1122 for (i = 0, pset = &flow->pagesets[0]; i < flow->npagesets;
1134 static int dma_map_flow(struct tid_rdma_flow *flow, struct page **pages)
1137 struct hfi1_devdata *dd = flow->req->rcd->dd;
1140 for (i = 0, pset = &flow->pagesets[0]; i < flow->npagesets;
1150 dma_unmap_flow(flow);
1159 static inline bool dma_mapped(struct tid_rdma_flow *flow)
1161 return !!flow->pagesets[0].mapped;
1166 * segment. All segments are of length flow->req->seg_len.
1168 static int kern_get_phys_blocks(struct tid_rdma_flow *flow,
1175 if (flow->npagesets) {
1176 trace_hfi1_tid_flow_alloc(flow->req->qp, flow->req->setup_head,
1177 flow);
1178 if (!dma_mapped(flow))
1179 return dma_map_flow(flow, pages);
1183 npages = kern_find_pages(flow, pages, ss, last);
1185 if (flow->req->qp->pmtu == enum_to_mtu(OPA_MTU_4096))
1186 flow->npagesets =
1187 tid_rdma_find_phys_blocks_4k(flow, pages, npages,
1188 flow->pagesets);
1190 flow->npagesets =
1191 tid_rdma_find_phys_blocks_8k(flow, pages, npages,
1192 flow->pagesets);
1194 return dma_map_flow(flow, pages);
1197 static inline void kern_add_tid_node(struct tid_rdma_flow *flow,
1201 struct kern_tid_node *node = &flow->tnode[flow->tnode_cnt++];
1203 WARN_ON_ONCE(flow->tnode_cnt >=
1213 trace_hfi1_tid_node_add(flow->req->qp, s, flow->tnode_cnt - 1,
1230 static int kern_alloc_tids(struct tid_rdma_flow *flow)
1232 struct hfi1_ctxtdata *rcd = flow->req->rcd;
1238 flow->tnode_cnt = 0;
1239 ngroups = flow->npagesets / dd->rcv_entries.group_size;
1245 kern_add_tid_node(flow, rcd, "complete groups", group,
1253 if (pageidx >= flow->npagesets)
1259 use = min_t(u32, flow->npagesets - pageidx,
1261 kern_add_tid_node(flow, rcd, "used groups", used, use);
1264 if (pageidx >= flow->npagesets)
1280 use = min_t(u32, flow->npagesets - pageidx, group->size);
1281 kern_add_tid_node(flow, rcd, "complete continue", group, use);
1283 if (pageidx >= flow->npagesets)
1286 trace_hfi1_msg_alloc_tids(flow->req->qp, " insufficient tids: needed ",
1287 (u64)flow->npagesets);
1293 static void kern_program_rcv_group(struct tid_rdma_flow *flow, int grp_num,
1296 struct hfi1_ctxtdata *rcd = flow->req->rcd;
1298 struct kern_tid_node *node = &flow->tnode[grp_num];
1301 u32 pmtu_pg = flow->req->qp->pmtu >> PAGE_SHIFT;
1312 pset = &flow->pagesets[(*pset_idx)++];
1335 flow->tid_entry[flow->tidcnt++] =
1340 flow->req->qp, flow->tidcnt - 1,
1341 flow->tid_entry[flow->tidcnt - 1]);
1344 flow->npkts += (npages + pmtu_pg - 1) >> ilog2(pmtu_pg);
1361 static void kern_unprogram_rcv_group(struct tid_rdma_flow *flow, int grp_num)
1363 struct hfi1_ctxtdata *rcd = flow->req->rcd;
1365 struct kern_tid_node *node = &flow->tnode[grp_num];
1392 struct hfi1_ctxtdata *rcd = flow->req->rcd;
1400 static void kern_program_rcvarray(struct tid_rdma_flow *flow)
1405 flow->npkts = 0;
1406 flow->tidcnt = 0;
1407 for (i = 0; i < flow->tnode_cnt; i++)
1408 kern_program_rcv_group(flow, i, &pset_idx);
1409 trace_hfi1_tid_flow_alloc(flow->req->qp, flow->req->setup_head, flow);
1413 * hfi1_kern_exp_rcv_setup() - setup TID's and flow for one segment of a
1416 * @req: TID RDMA request for which the segment/flow is being set up
1421 * (1) finds a free flow entry in the flow circular buffer
1430 * (7) It also manages queing the QP when TID/flow resources are not
1435 * req->flow_idx is the index of the flow which has been prepared in this
1436 * invocation of function call. With flow = &req->flows[req->flow_idx],
1437 * flow->tid_entry contains the TID array which the sender can use for TID RDMA
1438 * sends and flow->npkts contains number of packets required to send the
1445 * For the queuing, caller must hold the flow->req->qp s_lock from the send
1449 * The function returns -EAGAIN if sufficient number of TID/flow resources to
1458 struct tid_rdma_flow *flow = &req->flows[req->setup_head];
1467 * We return error if either (a) we don't have space in the flow
1482 if (kern_get_phys_blocks(flow, qpriv->pages, ss, last)) {
1483 hfi1_wait_kmem(flow->req->qp);
1488 if (kernel_tid_waiters(rcd, &rcd->rarr_queue, flow->req->qp))
1496 if (kern_alloc_tids(flow))
1500 * tidarray and enable the HW flow
1502 kern_program_rcvarray(flow);
1505 * Setup the flow state with relevant information.
1508 * The flow is setup here as this is the most accurate time and place
1509 * to do so. Doing at a later time runs the risk of the flow data in
1512 memset(&flow->flow_state, 0x0, sizeof(flow->flow_state));
1513 flow->idx = qpriv->flow_state.index;
1514 flow->flow_state.generation = qpriv->flow_state.generation;
1515 flow->flow_state.spsn = qpriv->flow_state.psn;
1516 flow->flow_state.lpsn = flow->flow_state.spsn + flow->npkts - 1;
1517 flow->flow_state.r_next_psn =
1518 full_flow_psn(flow, flow->flow_state.spsn);
1519 qpriv->flow_state.psn += flow->npkts;
1521 dequeue_tid_waiter(rcd, &rcd->rarr_queue, flow->req->qp);
1530 queue_qp_for_tid_wait(rcd, &rcd->rarr_queue, flow->req->qp);
1535 static void hfi1_tid_rdma_reset_flow(struct tid_rdma_flow *flow)
1537 flow->npagesets = 0;
1542 * release the flow and TID HW/SW resources for that segment. The segments for a
1549 struct tid_rdma_flow *flow = &req->flows[req->clear_tail];
1556 /* Exit if we have nothing in the flow circular buffer */
1562 for (i = 0; i < flow->tnode_cnt; i++)
1563 kern_unprogram_rcv_group(flow, i);
1565 flow->tnode_cnt = 0;
1570 dma_unmap_flow(flow);
1572 hfi1_tid_rdma_reset_flow(flow);
1600 * hfi1_kern_exp_rcv_free_flows - free priviously allocated flow information
1678 struct tid_rdma_flow *flow;
1684 flow = &req->flows[tail];
1685 if (cmp_psn(psn, flow->flow_state.ib_spsn) >= 0 &&
1686 cmp_psn(psn, flow->flow_state.ib_lpsn) <= 0) {
1689 return flow;
1701 struct tid_rdma_flow *flow = &req->flows[req->flow_idx];
1711 *bth2 = mask_psn(flow->flow_state.ib_spsn + flow->pkt);
1712 trace_hfi1_tid_flow_build_read_pkt(qp, req->flow_idx, flow);
1715 req_addr = &flow->tid_entry[flow->tid_idx];
1716 req_len = sizeof(*flow->tid_entry) *
1717 (flow->tidcnt - flow->tid_idx);
1742 req->cur_seg * req->seg_len + flow->sent);
1746 cpu_to_be32((flow->flow_state.generation <<
1748 ((flow->flow_state.spsn + flow->pkt) &
1752 ((flow->idx & TID_RDMA_DESTQP_FLOW_MASK) <<
1762 flow->sent += *len;
1787 struct tid_rdma_flow *flow = NULL;
1797 * segments before freeing the flow.
1837 /* Allocate the flow if not yet */
1857 flow = &req->flows[req->flow_idx];
1858 flow->pkt = 0;
1859 flow->tid_idx = 0;
1860 flow->sent = 0;
1862 /* Set the first and last IB PSN for the flow in use.*/
1863 flow->flow_state.ib_spsn = req->s_next_psn;
1864 flow->flow_state.ib_lpsn =
1865 flow->flow_state.ib_spsn + flow->npkts - 1;
1869 req->s_next_psn += flow->npkts;
1890 struct tid_rdma_flow *flow;
1896 flow = &req->flows[req->setup_head];
1900 if (pktlen > sizeof(flow->tid_entry))
1902 memcpy(flow->tid_entry, packet->ebuf, pktlen);
1903 flow->tidcnt = pktlen / sizeof(*flow->tid_entry);
1909 flow->npkts = rvt_div_round_up_mtu(qp, len);
1910 for (i = 0; i < flow->tidcnt; i++) {
1912 flow->tid_entry[i]);
1913 tlen = EXP_TID_GET(flow->tid_entry[i], LEN);
1928 /* Empty the flow array */
1930 flow->pkt = 0;
1931 flow->tid_idx = 0;
1932 flow->tid_offset = 0;
1933 flow->sent = 0;
1934 flow->tid_qpn = be32_to_cpu(ohdr->u.tid_rdma.r_req.tid_flow_qp);
1935 flow->idx = (flow->tid_qpn >> TID_RDMA_DESTQP_FLOW_SHIFT) &
1938 flow->flow_state.generation = flow_psn >> HFI1_KDETH_BTH_SEQ_SHIFT;
1939 flow->flow_state.spsn = flow_psn & HFI1_KDETH_BTH_SEQ_MASK;
1940 flow->length = len;
1942 flow->flow_state.lpsn = flow->flow_state.spsn +
1943 flow->npkts - 1;
1944 flow->flow_state.ib_spsn = psn;
1945 flow->flow_state.ib_lpsn = flow->flow_state.ib_spsn + flow->npkts - 1;
1947 trace_hfi1_tid_flow_rcv_read_req(qp, req->setup_head, flow);
1948 /* Set the initial flow index to the current flow. */
1959 e->lpsn = psn + flow->npkts - 1;
2047 * == false) and the TID flow may be unusable (the
2051 * Consequently, we need to update the TID flow info everytime
2344 struct tid_rdma_flow *flow = &req->flows[req->clear_tail];
2345 u32 tidentry = flow->tid_entry[flow->tid_idx];
2353 *len = min_t(u32, qp->pmtu, tidlen - flow->tid_offset);
2354 flow->sent += *len;
2355 next_offset = flow->tid_offset + *len;
2356 last_pkt = (flow->sent >= flow->length);
2358 trace_hfi1_tid_entry_build_read_resp(qp, flow->tid_idx, tidentry);
2359 trace_hfi1_tid_flow_build_read_resp(qp, req->clear_tail, flow);
2373 KDETH_SET(resp->kdeth0, OFFSET, flow->tid_offset / om);
2379 resp->verbs_psn = cpu_to_be32(mask_psn(flow->flow_state.ib_spsn +
2380 flow->pkt));
2383 *bth1 = flow->tid_qpn;
2384 *bth2 = mask_psn(((flow->flow_state.spsn + flow->pkt++) &
2386 (flow->flow_state.generation <<
2390 /* Advance to next flow */
2395 flow->tid_offset = 0;
2396 flow->tid_idx++;
2398 flow->tid_offset = next_offset;
2441 * 4. Free the TID flow resources.
2449 struct tid_rdma_flow *flow;
2467 flow = &req->flows[req->clear_tail];
2469 if (cmp_psn(ipsn, flow->flow_state.ib_lpsn)) {
2470 update_r_next_psn_fecn(packet, priv, rcd, flow, fecn);
2472 if (cmp_psn(kpsn, flow->flow_state.r_next_psn))
2474 flow->flow_state.r_next_psn = mask_psn(kpsn + 1);
2505 flow->flow_state.r_next_psn = mask_psn(kpsn + 1);
2524 trace_hfi1_tid_flow_rcv_read_resp(qp, req->clear_tail, flow);
2539 * Clear the hw flow under two conditions:
2591 /* Free flow */
2626 struct tid_rdma_flow *flow;
2631 flow = &req->flows[req->clear_tail];
2632 hfi1_restart_rc(qp, flow->flow_state.ib_spsn, 0);
2658 struct tid_rdma_flow *flow;
2744 * After that, the flow is *not* reprogrammed and the
2749 flow = &req->flows[req->clear_tail];
2752 flow);
2755 flow->flow_state.r_next_psn);
2778 fpsn = full_flow_psn(flow,
2779 flow->flow_state.lpsn);
2786 flow->flow_state.r_next_psn =
2792 flow->idx);
2793 flow->flow_state.r_next_psn = last_psn;
2808 * Since the TID flow is able to ride through
2858 struct tid_rdma_flow *flow;
2937 flow = &req->flows[req->clear_tail];
2943 trace_hfi1_tid_flow_handle_kdeth_eflags(qp, req->clear_tail, flow);
2951 flow->flow_state.r_next_psn =
2953 flow->idx);
2955 flow->flow_state.r_next_psn;
2967 flow->flow_state.r_next_psn);
2979 if (psn == full_flow_psn(flow,
2980 flow->flow_state.lpsn))
2982 flow->flow_state.r_next_psn =
2985 flow->flow_state.r_next_psn;
3025 qpriv->s_nak_psn = mask_psn(flow->flow_state.r_next_psn);
3034 * find the proper flow, set the flow index to that flow,
3035 * and reset the flow information.
3041 struct tid_rdma_flow *flow;
3049 flow = find_flow_ib(req, *bth2, &fidx);
3050 if (!flow) {
3052 qp, "!!!!!! Could not find flow to restart: bth2 ",
3061 flow = &req->flows[fidx];
3066 delta_pkts = delta_psn(*bth2, flow->flow_state.ib_spsn);
3069 full_flow_psn(flow,
3070 flow->flow_state.spsn));
3072 trace_hfi1_tid_flow_restart_req(qp, fidx, flow);
3073 diff = delta_pkts + flow->resync_npkts;
3075 flow->sent = 0;
3076 flow->pkt = 0;
3077 flow->tid_idx = 0;
3078 flow->tid_offset = 0;
3080 for (tididx = 0; tididx < flow->tidcnt; tididx++) {
3081 u32 tidentry = flow->tid_entry[tididx], tidlen,
3084 flow->tid_offset = 0;
3088 flow->pkt += npkts;
3089 flow->sent += (npkts == tidnpkts ? tidlen :
3091 flow->tid_offset += npkts * qp->pmtu;
3099 flow->sent, 0);
3101 * Packet PSN is based on flow_state.spsn + flow->pkt. However,
3104 * flow and the SGE has been sufficiently advanced, we have to
3105 * adjust flow->pkt in order to calculate the correct PSN.
3107 flow->pkt -= flow->resync_npkts;
3110 if (flow->tid_offset ==
3111 EXP_TID_GET(flow->tid_entry[tididx], LEN) * PAGE_SIZE) {
3113 flow->tid_offset = 0;
3115 flow->tid_idx = tididx;
3122 trace_hfi1_tid_flow_restart_req(qp, fidx, flow);
3164 * First, clear the flow to help prevent any delayed packets from
3365 * Set the number of flow to be used based on negotiated
3393 * Heuristic for computing the RNR timeout when waiting on the flow
3395 * a flow will be available, we assume that if a QP is at position N in
3396 * the flow queue it has to wait approximately (N + 1) * (number of
3485 /* If all data has been received, clear the flow */
3519 /* Allocate flow if we don't have one */
3574 * Schedule an RNR NAK to be sent if (a) flow or rcv array allocation
3831 struct tid_rdma_flow *flow = NULL;
3840 flow = &req->flows[req->flow_idx];
3861 trace_hfi1_tid_flow_build_write_resp(qp, req->flow_idx, flow);
3868 trace_hfi1_tid_flow_build_write_resp(qp, req->flow_idx, flow);
3876 flow->flow_state.resp_ib_psn = bth2;
3877 resp_addr = (void *)flow->tid_entry;
3878 resp_len = sizeof(*flow->tid_entry) * flow->tidcnt;
3908 cpu_to_be32((flow->flow_state.generation <<
3910 (flow->flow_state.spsn &
3914 ((flow->idx & TID_RDMA_DESTQP_FLOW_MASK) <<
3989 * HW flow and RcvArray resources.
4034 struct tid_rdma_flow *flow;
4094 flow = &req->flows[req->setup_head];
4095 flow->pkt = 0;
4096 flow->tid_idx = 0;
4097 flow->tid_offset = 0;
4098 flow->sent = 0;
4099 flow->resync_npkts = 0;
4100 flow->tid_qpn = be32_to_cpu(ohdr->u.tid_rdma.w_rsp.tid_flow_qp);
4101 flow->idx = (flow->tid_qpn >> TID_RDMA_DESTQP_FLOW_SHIFT) &
4104 flow->flow_state.generation = flow_psn >> HFI1_KDETH_BTH_SEQ_SHIFT;
4105 flow->flow_state.spsn = flow_psn & HFI1_KDETH_BTH_SEQ_MASK;
4106 flow->flow_state.resp_ib_psn = psn;
4107 flow->length = min_t(u32, req->seg_len,
4110 flow->npkts = rvt_div_round_up_mtu(qp, flow->length);
4111 flow->flow_state.lpsn = flow->flow_state.spsn +
4112 flow->npkts - 1;
4115 if (pktlen > sizeof(flow->tid_entry)) {
4119 memcpy(flow->tid_entry, packet->ebuf, pktlen);
4120 flow->tidcnt = pktlen / sizeof(*flow->tid_entry);
4121 trace_hfi1_tid_flow_rcv_write_resp(qp, req->setup_head, flow);
4129 for (i = 0; i < flow->tidcnt; i++) {
4131 qp, i, flow->tid_entry[i]);
4132 if (!EXP_TID_GET(flow->tid_entry[i], LEN)) {
4136 tidlen += EXP_TID_GET(flow->tid_entry[i], LEN);
4138 if (tidlen * PAGE_SIZE < flow->length) {
4147 * flow index to the current flow.
4151 /* Set acked flow index to head index */
4198 struct tid_rdma_flow *flow = &req->flows[req->clear_tail];
4202 u32 tidentry = flow->tid_entry[flow->tid_idx];
4213 *len = min_t(u32, qp->pmtu, tidlen - flow->tid_offset);
4214 flow->sent += *len;
4215 next_offset = flow->tid_offset + *len;
4216 last_pkt = (flow->tid_idx == (flow->tidcnt - 1) &&
4217 next_offset >= tidlen) || (flow->sent >= flow->length);
4218 trace_hfi1_tid_entry_build_write_data(qp, flow->tid_idx, tidentry);
4219 trace_hfi1_tid_flow_build_write_data(qp, req->clear_tail, flow);
4229 KDETH_SET(wd->kdeth0, OFFSET, flow->tid_offset / om);
4234 *bth1 = flow->tid_qpn;
4235 *bth2 = mask_psn(((flow->flow_state.spsn + flow->pkt++) &
4237 (flow->flow_state.generation <<
4241 if (flow->flow_state.lpsn + 1 +
4249 flow->tid_offset = 0;
4250 flow->tid_idx++;
4252 flow->tid_offset = next_offset;
4265 struct tid_rdma_flow *flow;
4283 flow = &req->flows[req->clear_tail];
4284 if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.lpsn))) {
4285 update_r_next_psn_fecn(packet, priv, rcd, flow, fecn);
4287 if (cmp_psn(psn, flow->flow_state.r_next_psn))
4290 flow->flow_state.r_next_psn = mask_psn(psn + 1);
4312 full_flow_psn(flow, flow->flow_state.spsn)) *
4334 flow->flow_state.r_next_psn = mask_psn(psn + 1);
4337 rcd->flows[flow->idx].psn = psn & HFI1_KDETH_BTH_SEQ_MASK;
4342 * Release the flow if one of the following conditions has been met:
4393 priv->r_next_psn_kdeth = flow->flow_state.r_next_psn;
4402 priv->s_nak_psn = flow->flow_state.r_next_psn;
4421 struct tid_rdma_flow *flow = &req->flows[iflow];
4442 *bth2 = full_flow_psn(flow, flow->flow_state.lpsn);
4448 ((flow->idx & TID_RDMA_DESTQP_FLOW_MASK) <<
4454 cpu_to_be32(flow->flow_state.resp_ib_psn);
4492 struct tid_rdma_flow *flow;
4533 flow = &req->flows[req->acked_tail];
4534 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow);
4537 if (cmp_psn(psn, full_flow_psn(flow, flow->flow_state.spsn)) < 0 ||
4538 cmp_psn(req_psn, flow->flow_state.resp_ib_psn) < 0)
4542 full_flow_psn(flow, flow->flow_state.lpsn)) >= 0 &&
4547 req->r_last_acked = flow->flow_state.resp_ib_psn;
4563 flow = &req->flows[req->acked_tail];
4564 trace_hfi1_tid_flow_rcv_tid_ack(qp, req->acked_tail, flow);
4625 flow = &req->flows[req->acked_tail];
4630 * default number of packets. flow->resync_npkts is used
4635 fpsn = full_flow_psn(flow, flow->flow_state.spsn);
4638 * If resync_psn points to the last flow PSN for a
4643 if (flow->flow_state.generation !=
4646 flow->resync_npkts +=
4663 flow = &rptr->flows[fidx];
4664 gen = flow->flow_state.generation;
4666 flow->flow_state.spsn !=
4669 lpsn = flow->flow_state.lpsn;
4670 lpsn = full_flow_psn(flow, lpsn);
4671 flow->npkts =
4675 flow->flow_state.generation =
4677 flow->flow_state.spsn = spsn;
4678 flow->flow_state.lpsn =
4679 flow->flow_state.spsn +
4680 flow->npkts - 1;
4681 flow->pkt = 0;
4682 spsn += flow->npkts;
4683 resync_psn += flow->npkts;
4686 flow);
4711 flow = &req->flows[req->acked_tail];
4712 flpsn = full_flow_psn(flow, flow->flow_state.lpsn);
4716 flow);
4838 struct tid_rdma_flow *flow = &req->flows[fidx];
4848 generation = kern_flow_generation_next(flow->flow_state.generation);
4866 struct tid_rdma_flow *flow;
4894 * If we don't have a flow, save the generation so it can be
4895 * applied when a new flow is allocated
4899 /* Reprogram the QP flow with new generation */
4906 * sync point and the flow has/will be reprogrammed
4912 * Reset all TID flow information with the new generation.
4935 flow = &req->flows[flow_idx];
4936 lpsn = full_flow_psn(flow,
4937 flow->flow_state.lpsn);
4938 next = flow->flow_state.r_next_psn;
4939 flow->npkts = delta_psn(lpsn, next - 1);
4940 flow->flow_state.generation = fs->generation;
4941 flow->flow_state.spsn = fs->psn;
4942 flow->flow_state.lpsn =
4943 flow->flow_state.spsn + flow->npkts - 1;
4944 flow->flow_state.r_next_psn =
4945 full_flow_psn(flow,
4946 flow->flow_state.spsn);
4947 fs->psn += flow->npkts;
4949 flow);
5102 /* move pointer to next flow */
5185 u16 flow;
5233 * into the flow array is used. The distance between it
5249 flow = CIRC_PREV(req->acked_tail, MAX_FLOWS);
5277 full_flow_psn(&req->flows[flow],
5278 req->flows[flow].flow_state.lpsn)) > 0))) {
5288 flow = req->acked_tail;
5296 hwords += hfi1_build_tid_rdma_write_ack(qp, e, ohdr, flow, &bth1,
5483 * progress is to read the HW flow state.
5506 struct tid_rdma_flow *flow,
5517 flow->flow_state.r_next_psn =
5518 read_r_next_psn(dd, rcd->ctxt, flow->idx);