Lines Matching refs:rcd
133 struct hfi1_ctxtdata *rcd,
197 p->qp = (RVT_KDETH_QP_PREFIX << 16) | priv->rcd->ctxt;
199 p->jkey = priv->rcd->jkey;
203 p->urg = is_urg_masked(priv->rcd);
298 int hfi1_kern_exp_rcv_init(struct hfi1_ctxtdata *rcd, int reinit)
305 rcd->jkey = TID_RDMA_JKEY;
306 hfi1_set_ctxt_jkey(rcd->dd, rcd, rcd->jkey);
307 return hfi1_alloc_ctxt_rcv_groups(rcd);
334 return dd->rcd[ctxt];
343 qpriv->rcd = qp_to_rcd(rdi, qp);
368 struct hfi1_devdata *dd = qpriv->rcd->dd;
451 * the rcd wait queue to be fetched under the exp_lock and
459 * return head of rcd wait list
468 static struct rvt_qp *first_qp(struct hfi1_ctxtdata *rcd,
470 __must_hold(&rcd->exp_lock)
474 lockdep_assert_held(&rcd->exp_lock);
485 * kernel_tid_waiters - determine rcd wait
486 * @rcd: the receive context
502 static bool kernel_tid_waiters(struct hfi1_ctxtdata *rcd,
504 __must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock)
510 lockdep_assert_held(&rcd->exp_lock);
511 fqp = first_qp(rcd, queue);
528 * Must hold the qp s_lock and the rcd exp_lock.
533 static void dequeue_tid_waiter(struct hfi1_ctxtdata *rcd,
535 __must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock)
540 lockdep_assert_held(&rcd->exp_lock);
551 * @rcd: the receive context
554 * The qp is inserted at the tail of the rcd
559 static void queue_qp_for_tid_wait(struct hfi1_ctxtdata *rcd,
561 __must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock)
566 lockdep_assert_held(&rcd->exp_lock);
571 rcd->dd->verbs_dev.n_tidwait++;
674 spin_lock(&priv->rcd->exp_lock);
681 spin_unlock(&priv->rcd->exp_lock);
689 _tid_rdma_flush_wait(qp, &priv->rcd->flow_queue);
690 _tid_rdma_flush_wait(qp, &priv->rcd->rarr_queue);
696 * @rcd - the context to use for allocation
711 static int kern_reserve_flow(struct hfi1_ctxtdata *rcd, int last)
712 __must_hold(&rcd->exp_lock)
718 !test_and_set_bit(last, &rcd->flow_mask))
721 nr = ffz(rcd->flow_mask);
723 (sizeof(rcd->flow_mask) * BITS_PER_BYTE));
726 set_bit(nr, &rcd->flow_mask);
730 static void kern_set_hw_flow(struct hfi1_ctxtdata *rcd, u32 generation,
745 write_uctxt_csr(rcd->dd, rcd->ctxt,
749 static u32 kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, u32 flow_idx)
750 __must_hold(&rcd->exp_lock)
752 u32 generation = rcd->flows[flow_idx].generation;
754 kern_set_hw_flow(rcd, generation, flow_idx);
767 static void kern_clear_hw_flow(struct hfi1_ctxtdata *rcd, u32 flow_idx)
768 __must_hold(&rcd->exp_lock)
770 rcd->flows[flow_idx].generation =
771 kern_flow_generation_next(rcd->flows[flow_idx].generation);
772 kern_set_hw_flow(rcd, KERN_GENERATION_RESERVED, flow_idx);
775 int hfi1_kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp)
787 spin_lock_irqsave(&rcd->exp_lock, flags);
788 if (kernel_tid_waiters(rcd, &rcd->flow_queue, qp))
791 ret = kern_reserve_flow(rcd, fs->last_index);
799 rcd->flows[fs->index].generation = fs->generation;
800 fs->generation = kern_setup_hw_flow(rcd, fs->index);
802 dequeue_tid_waiter(rcd, &rcd->flow_queue, qp);
804 fqp = first_qp(rcd, &rcd->flow_queue);
805 spin_unlock_irqrestore(&rcd->exp_lock, flags);
810 queue_qp_for_tid_wait(rcd, &rcd->flow_queue, qp);
811 spin_unlock_irqrestore(&rcd->exp_lock, flags);
815 void hfi1_kern_clear_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp)
824 spin_lock_irqsave(&rcd->exp_lock, flags);
825 kern_clear_hw_flow(rcd, fs->index);
826 clear_bit(fs->index, &rcd->flow_mask);
832 fqp = first_qp(rcd, &rcd->flow_queue);
833 spin_unlock_irqrestore(&rcd->exp_lock, flags);
843 void hfi1_kern_init_ctxt_generations(struct hfi1_ctxtdata *rcd)
848 rcd->flows[i].generation = mask_generation(prandom_u32());
849 kern_set_hw_flow(rcd, KERN_GENERATION_RESERVED, i);
1121 dd = flow->req->rcd->dd;
1137 struct hfi1_devdata *dd = flow->req->rcd->dd;
1198 struct hfi1_ctxtdata *rcd, char *s,
1206 dd_dev_err(rcd->dd,
1232 struct hfi1_ctxtdata *rcd = flow->req->rcd;
1233 struct hfi1_devdata *dd = rcd->dd;
1244 list_for_each_entry(group, &rcd->tid_group_list.list, list) {
1245 kern_add_tid_node(flow, rcd, "complete groups", group,
1258 list_for_each_entry(used, &rcd->tid_used_list.list, list) {
1261 kern_add_tid_node(flow, rcd, "used groups", used, use);
1273 if (group && &group->list == &rcd->tid_group_list.list)
1275 group = list_prepare_entry(group, &rcd->tid_group_list.list,
1277 if (list_is_last(&group->list, &rcd->tid_group_list.list))
1281 kern_add_tid_node(flow, rcd, "complete continue", group, use);
1296 struct hfi1_ctxtdata *rcd = flow->req->rcd;
1297 struct hfi1_devdata *dd = rcd->dd;
1321 rcventry -= rcd->expected_base;
1349 tid_group_move(grp, &rcd->tid_used_list,
1350 &rcd->tid_full_list);
1352 tid_group_move(grp, &rcd->tid_group_list,
1353 &rcd->tid_used_list);
1363 struct hfi1_ctxtdata *rcd = flow->req->rcd;
1364 struct hfi1_devdata *dd = rcd->dd;
1385 tid_group_move(grp, &rcd->tid_full_list,
1386 &rcd->tid_used_list);
1388 tid_group_move(grp, &rcd->tid_used_list,
1389 &rcd->tid_group_list);
1392 struct hfi1_ctxtdata *rcd = flow->req->rcd;
1393 struct hfi1_devdata *dd = rcd->dd;
1434 * function uses qp, rcd and seg_len members of @req. In the absence of errors,
1459 struct hfi1_ctxtdata *rcd = req->rcd;
1487 spin_lock_irqsave(&rcd->exp_lock, flags);
1488 if (kernel_tid_waiters(rcd, &rcd->rarr_queue, flow->req->qp))
1521 dequeue_tid_waiter(rcd, &rcd->rarr_queue, flow->req->qp);
1523 fqp = first_qp(rcd, &rcd->rarr_queue);
1524 spin_unlock_irqrestore(&rcd->exp_lock, flags);
1530 queue_qp_for_tid_wait(rcd, &rcd->rarr_queue, flow->req->qp);
1531 spin_unlock_irqrestore(&rcd->exp_lock, flags);
1550 struct hfi1_ctxtdata *rcd = req->rcd;
1560 spin_lock_irqsave(&rcd->exp_lock, flags);
1567 fqp = first_qp(rcd, &rcd->rarr_queue);
1568 spin_unlock_irqrestore(&rcd->exp_lock, flags);
1633 req->rcd->numa_id);
1663 req->rcd = qpriv->rcd;
1754 qpriv->rcd->ctxt);
1804 hfi1_kern_clear_hw_flow(req->rcd, qp);
1838 if (hfi1_kern_setup_hw_flow(qpriv->rcd, qp))
1983 struct hfi1_ctxtdata *rcd = ((struct hfi1_qp_priv *)qp->priv)->rcd;
2000 rc_defered_ack(rcd, qp);
2223 struct hfi1_ctxtdata *rcd = packet->rcd;
2328 rc_defered_ack(rcd, qp);
2447 struct hfi1_ctxtdata *rcd = packet->rcd;
2470 update_r_next_psn_fecn(packet, priv, rcd, flow, fecn);
2529 if (!do_rc_ack(qp, aeth, ipsn, opcode, 0, rcd))
2546 hfi1_kern_clear_hw_flow(priv->rcd, qp);
2592 hfi1_kern_clear_hw_flow(priv->rcd, qp);
2622 static void restart_tid_rdma_read_req(struct hfi1_ctxtdata *rcd,
2636 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2648 static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
2653 struct hfi1_pportdata *ppd = rcd->ppd;
2700 restart_tid_rdma_read_req(rcd, qp,
2710 &rcd->qp_wait_list);
2791 last_psn = read_r_next_psn(dd, rcd->ctxt,
2800 restart_tid_rdma_read_req(rcd, qp,
2837 bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
2868 trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf)));
2916 ret = handle_read_kdeth_eflags(rcd, packet, rcv_type, rte, psn,
2952 read_r_next_psn(dd, rcd->ctxt,
3169 hfi1_kern_clear_hw_flow(qpriv->rcd, qp);
3277 struct hfi1_pportdata *ppd = qpriv->rcd->ppd;
3421 bytes_per_us = active_egress_rate(qpriv->rcd->ppd) / 8;
3456 struct hfi1_ctxtdata *rcd = qpriv->rcd;
3488 hfi1_kern_clear_hw_flow(rcd, qp);
3514 hfi1_kern_clear_hw_flow(rcd, qp);
3521 ret = hfi1_kern_setup_hw_flow(qpriv->rcd, qp);
3525 &rcd->flow_queue);
3559 to_seg = position_in_queue(qpriv, &rcd->rarr_queue);
3629 rc_defered_ack(rcd, qp);
3647 struct hfi1_ctxtdata *rcd = packet->rcd;
3814 rc_defered_ack(rcd, qp);
3916 qpriv->rcd->ctxt);
3991 hfi1_kern_clear_hw_flow(qpriv->rcd, qp);
4031 struct hfi1_ctxtdata *rcd = packet->rcd;
4089 if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
4261 struct hfi1_ctxtdata *rcd = priv->rcd;
4285 update_r_next_psn_fecn(packet, priv, rcd, flow, fecn);
4337 rcd->flows[flow->idx].psn = psn & HFI1_KDETH_BTH_SEQ_MASK;
4450 qpriv->rcd->ctxt);
4862 struct hfi1_ctxtdata *rcd = qpriv->rcd;
4891 spin_lock(&rcd->exp_lock);
4900 rcd->flows[fs->index].generation = generation;
4901 fs->generation = kern_setup_hw_flow(rcd, fs->index);
4956 spin_unlock(&rcd->exp_lock);
5505 struct hfi1_ctxtdata *rcd,
5515 struct hfi1_devdata *dd = rcd->dd;
5518 read_r_next_psn(dd, rcd->ctxt, flow->idx);