Lines Matching defs:rcd

133 				   struct hfi1_ctxtdata *rcd,
197 p->qp = (RVT_KDETH_QP_PREFIX << 16) | priv->rcd->ctxt;
199 p->jkey = priv->rcd->jkey;
203 p->urg = is_urg_masked(priv->rcd);
298 int hfi1_kern_exp_rcv_init(struct hfi1_ctxtdata *rcd, int reinit)
305 rcd->jkey = TID_RDMA_JKEY;
306 hfi1_set_ctxt_jkey(rcd->dd, rcd, rcd->jkey);
307 return hfi1_alloc_ctxt_rcv_groups(rcd);
335 return dd->rcd[ctxt];
344 qpriv->rcd = qp_to_rcd(rdi, qp);
369 struct hfi1_devdata *dd = qpriv->rcd->dd;
452 * the rcd wait queue to be fetched under the exp_lock and
460 * return head of rcd wait list
469 static struct rvt_qp *first_qp(struct hfi1_ctxtdata *rcd,
471 __must_hold(&rcd->exp_lock)
475 lockdep_assert_held(&rcd->exp_lock);
486 * kernel_tid_waiters - determine rcd wait
487 * @rcd: the receive context
504 static bool kernel_tid_waiters(struct hfi1_ctxtdata *rcd,
506 __must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock)
512 lockdep_assert_held(&rcd->exp_lock);
513 fqp = first_qp(rcd, queue);
522 * @rcd: the receive context
532 * Must hold the qp s_lock and the rcd exp_lock.
537 static void dequeue_tid_waiter(struct hfi1_ctxtdata *rcd,
539 __must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock)
544 lockdep_assert_held(&rcd->exp_lock);
555 * @rcd: the receive context
559 * The qp is inserted at the tail of the rcd
564 static void queue_qp_for_tid_wait(struct hfi1_ctxtdata *rcd,
566 __must_hold(&rcd->exp_lock) __must_hold(&qp->s_lock)
571 lockdep_assert_held(&rcd->exp_lock);
576 rcd->dd->verbs_dev.n_tidwait++;
679 spin_lock(&priv->rcd->exp_lock);
686 spin_unlock(&priv->rcd->exp_lock);
694 _tid_rdma_flush_wait(qp, &priv->rcd->flow_queue);
695 _tid_rdma_flush_wait(qp, &priv->rcd->rarr_queue);
701 * @rcd: the context to use for allocation
716 static int kern_reserve_flow(struct hfi1_ctxtdata *rcd, int last)
717 __must_hold(&rcd->exp_lock)
723 !test_and_set_bit(last, &rcd->flow_mask))
726 nr = ffz(rcd->flow_mask);
728 (sizeof(rcd->flow_mask) * BITS_PER_BYTE));
731 set_bit(nr, &rcd->flow_mask);
735 static void kern_set_hw_flow(struct hfi1_ctxtdata *rcd, u32 generation,
750 write_uctxt_csr(rcd->dd, rcd->ctxt,
754 static u32 kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, u32 flow_idx)
755 __must_hold(&rcd->exp_lock)
757 u32 generation = rcd->flows[flow_idx].generation;
759 kern_set_hw_flow(rcd, generation, flow_idx);
772 static void kern_clear_hw_flow(struct hfi1_ctxtdata *rcd, u32 flow_idx)
773 __must_hold(&rcd->exp_lock)
775 rcd->flows[flow_idx].generation =
776 kern_flow_generation_next(rcd->flows[flow_idx].generation);
777 kern_set_hw_flow(rcd, KERN_GENERATION_RESERVED, flow_idx);
780 int hfi1_kern_setup_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp)
792 spin_lock_irqsave(&rcd->exp_lock, flags);
793 if (kernel_tid_waiters(rcd, &rcd->flow_queue, qp))
796 ret = kern_reserve_flow(rcd, fs->last_index);
804 rcd->flows[fs->index].generation = fs->generation;
805 fs->generation = kern_setup_hw_flow(rcd, fs->index);
807 dequeue_tid_waiter(rcd, &rcd->flow_queue, qp);
809 fqp = first_qp(rcd, &rcd->flow_queue);
810 spin_unlock_irqrestore(&rcd->exp_lock, flags);
815 queue_qp_for_tid_wait(rcd, &rcd->flow_queue, qp);
816 spin_unlock_irqrestore(&rcd->exp_lock, flags);
820 void hfi1_kern_clear_hw_flow(struct hfi1_ctxtdata *rcd, struct rvt_qp *qp)
829 spin_lock_irqsave(&rcd->exp_lock, flags);
830 kern_clear_hw_flow(rcd, fs->index);
831 clear_bit(fs->index, &rcd->flow_mask);
837 fqp = first_qp(rcd, &rcd->flow_queue);
838 spin_unlock_irqrestore(&rcd->exp_lock, flags);
848 void hfi1_kern_init_ctxt_generations(struct hfi1_ctxtdata *rcd)
853 rcd->flows[i].generation = mask_generation(get_random_u32());
854 kern_set_hw_flow(rcd, KERN_GENERATION_RESERVED, i);
1128 dd = flow->req->rcd->dd;
1144 struct hfi1_devdata *dd = flow->req->rcd->dd;
1205 struct hfi1_ctxtdata *rcd, char *s,
1213 dd_dev_err(rcd->dd,
1239 struct hfi1_ctxtdata *rcd = flow->req->rcd;
1240 struct hfi1_devdata *dd = rcd->dd;
1251 list_for_each_entry(group, &rcd->tid_group_list.list, list) {
1252 kern_add_tid_node(flow, rcd, "complete groups", group,
1265 list_for_each_entry(used, &rcd->tid_used_list.list, list) {
1268 kern_add_tid_node(flow, rcd, "used groups", used, use);
1280 if (group && &group->list == &rcd->tid_group_list.list)
1282 group = list_prepare_entry(group, &rcd->tid_group_list.list,
1284 if (list_is_last(&group->list, &rcd->tid_group_list.list))
1288 kern_add_tid_node(flow, rcd, "complete continue", group, use);
1303 struct hfi1_ctxtdata *rcd = flow->req->rcd;
1304 struct hfi1_devdata *dd = rcd->dd;
1328 rcventry -= rcd->expected_base;
1356 tid_group_move(grp, &rcd->tid_used_list,
1357 &rcd->tid_full_list);
1359 tid_group_move(grp, &rcd->tid_group_list,
1360 &rcd->tid_used_list);
1370 struct hfi1_ctxtdata *rcd = flow->req->rcd;
1371 struct hfi1_devdata *dd = rcd->dd;
1392 tid_group_move(grp, &rcd->tid_full_list,
1393 &rcd->tid_used_list);
1395 tid_group_move(grp, &rcd->tid_used_list,
1396 &rcd->tid_group_list);
1399 struct hfi1_ctxtdata *rcd = flow->req->rcd;
1400 struct hfi1_devdata *dd = rcd->dd;
1441 * function uses qp, rcd and seg_len members of @req. In the absence of errors,
1466 struct hfi1_ctxtdata *rcd = req->rcd;
1494 spin_lock_irqsave(&rcd->exp_lock, flags);
1495 if (kernel_tid_waiters(rcd, &rcd->rarr_queue, flow->req->qp))
1528 dequeue_tid_waiter(rcd, &rcd->rarr_queue, flow->req->qp);
1530 fqp = first_qp(rcd, &rcd->rarr_queue);
1531 spin_unlock_irqrestore(&rcd->exp_lock, flags);
1537 queue_qp_for_tid_wait(rcd, &rcd->rarr_queue, flow->req->qp);
1538 spin_unlock_irqrestore(&rcd->exp_lock, flags);
1557 struct hfi1_ctxtdata *rcd = req->rcd;
1567 spin_lock_irqsave(&rcd->exp_lock, flags);
1574 fqp = first_qp(rcd, &rcd->rarr_queue);
1575 spin_unlock_irqrestore(&rcd->exp_lock, flags);
1640 req->rcd->numa_id);
1670 req->rcd = qpriv->rcd;
1761 qpriv->rcd->ctxt);
1811 hfi1_kern_clear_hw_flow(req->rcd, qp);
1845 if (hfi1_kern_setup_hw_flow(qpriv->rcd, qp))
1990 struct hfi1_ctxtdata *rcd = ((struct hfi1_qp_priv *)qp->priv)->rcd;
2007 rc_defered_ack(rcd, qp);
2230 struct hfi1_ctxtdata *rcd = packet->rcd;
2335 rc_defered_ack(rcd, qp);
2454 struct hfi1_ctxtdata *rcd = packet->rcd;
2477 update_r_next_psn_fecn(packet, priv, rcd, flow, fecn);
2536 if (!do_rc_ack(qp, aeth, ipsn, opcode, 0, rcd))
2553 hfi1_kern_clear_hw_flow(priv->rcd, qp);
2599 hfi1_kern_clear_hw_flow(priv->rcd, qp);
2629 static void restart_tid_rdma_read_req(struct hfi1_ctxtdata *rcd,
2643 list_add_tail(&qp->rspwait, &rcd->qp_wait_list);
2655 static bool handle_read_kdeth_eflags(struct hfi1_ctxtdata *rcd,
2660 struct hfi1_pportdata *ppd = rcd->ppd;
2707 restart_tid_rdma_read_req(rcd, qp,
2717 &rcd->qp_wait_list);
2798 last_psn = read_r_next_psn(dd, rcd->ctxt,
2807 restart_tid_rdma_read_req(rcd, qp,
2845 bool hfi1_handle_kdeth_eflags(struct hfi1_ctxtdata *rcd,
2876 trace_input_ibhdr(rcd->dd, packet, !!(rhf_dc_info(packet->rhf)));
2924 ret = handle_read_kdeth_eflags(rcd, packet, rcv_type, rte, psn,
2960 read_r_next_psn(dd, rcd->ctxt,
3178 hfi1_kern_clear_hw_flow(qpriv->rcd, qp);
3289 struct hfi1_pportdata *ppd = qpriv->rcd->ppd;
3433 bytes_per_us = active_egress_rate(qpriv->rcd->ppd) / 8;
3468 struct hfi1_ctxtdata *rcd = qpriv->rcd;
3500 hfi1_kern_clear_hw_flow(rcd, qp);
3526 hfi1_kern_clear_hw_flow(rcd, qp);
3533 ret = hfi1_kern_setup_hw_flow(qpriv->rcd, qp);
3537 &rcd->flow_queue);
3571 to_seg = position_in_queue(qpriv, &rcd->rarr_queue);
3641 rc_defered_ack(rcd, qp);
3659 struct hfi1_ctxtdata *rcd = packet->rcd;
3826 rc_defered_ack(rcd, qp);
3928 qpriv->rcd->ctxt);
4003 hfi1_kern_clear_hw_flow(qpriv->rcd, qp);
4043 struct hfi1_ctxtdata *rcd = packet->rcd;
4101 if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd))
4273 struct hfi1_ctxtdata *rcd = priv->rcd;
4297 update_r_next_psn_fecn(packet, priv, rcd, flow, fecn);
4349 rcd->flows[flow->idx].psn = psn & HFI1_KDETH_BTH_SEQ_MASK;
4462 qpriv->rcd->ctxt);
4874 struct hfi1_ctxtdata *rcd = qpriv->rcd;
4903 spin_lock(&rcd->exp_lock);
4912 rcd->flows[fs->index].generation = generation;
4913 fs->generation = kern_setup_hw_flow(rcd, fs->index);
4968 spin_unlock(&rcd->exp_lock);
5517 struct hfi1_ctxtdata *rcd,
5527 struct hfi1_devdata *dd = rcd->dd;
5530 read_r_next_psn(dd, rcd->ctxt, flow->idx);