Lines Matching refs:qpriv
340 struct hfi1_qp_priv *qpriv = qp->priv;
343 qpriv->rcd = qp_to_rcd(rdi, qp);
345 spin_lock_init(&qpriv->opfn.lock);
346 INIT_WORK(&qpriv->opfn.opfn_work, opfn_send_conn_request);
347 INIT_WORK(&qpriv->tid_rdma.trigger_work, tid_rdma_trigger_resume);
348 qpriv->flow_state.psn = 0;
349 qpriv->flow_state.index = RXE_NUM_TID_FLOWS;
350 qpriv->flow_state.last_index = RXE_NUM_TID_FLOWS;
351 qpriv->flow_state.generation = KERN_GENERATION_RESERVED;
352 qpriv->s_state = TID_OP(WRITE_RESP);
353 qpriv->s_tid_cur = HFI1_QP_WQE_INVALID;
354 qpriv->s_tid_head = HFI1_QP_WQE_INVALID;
355 qpriv->s_tid_tail = HFI1_QP_WQE_INVALID;
356 qpriv->rnr_nak_state = TID_RNR_NAK_INIT;
357 qpriv->r_tid_head = HFI1_QP_WQE_INVALID;
358 qpriv->r_tid_tail = HFI1_QP_WQE_INVALID;
359 qpriv->r_tid_ack = HFI1_QP_WQE_INVALID;
360 qpriv->r_tid_alloc = HFI1_QP_WQE_INVALID;
361 atomic_set(&qpriv->n_requests, 0);
362 atomic_set(&qpriv->n_tid_requests, 0);
363 timer_setup(&qpriv->s_tid_timer, hfi1_tid_timeout, 0);
364 timer_setup(&qpriv->s_tid_retry_timer, hfi1_tid_retry_timeout, 0);
365 INIT_LIST_HEAD(&qpriv->tid_wait);
368 struct hfi1_devdata *dd = qpriv->rcd->dd;
370 qpriv->pages = kzalloc_node(TID_RDMA_MAX_PAGES *
371 sizeof(*qpriv->pages),
373 if (!qpriv->pages)
414 struct hfi1_qp_priv *qpriv = qp->priv;
432 cancel_work_sync(&qpriv->opfn.opfn_work);
433 kfree(qpriv->pages);
434 qpriv->pages = NULL;
777 struct hfi1_qp_priv *qpriv = (struct hfi1_qp_priv *)qp->priv;
778 struct tid_flow_state *fs = &qpriv->flow_state;
817 struct hfi1_qp_priv *qpriv = (struct hfi1_qp_priv *)qp->priv;
818 struct tid_flow_state *fs = &qpriv->flow_state;
1460 struct hfi1_qp_priv *qpriv = req->qp->priv;
1482 if (kern_get_phys_blocks(flow, qpriv->pages, ss, last)) {
1510 * qpriv getting out of sync.
1513 flow->idx = qpriv->flow_state.index;
1514 flow->flow_state.generation = qpriv->flow_state.generation;
1515 flow->flow_state.spsn = qpriv->flow_state.psn;
1519 qpriv->flow_state.psn += flow->npkts;
1650 struct hfi1_qp_priv *qpriv = qp->priv;
1663 req->rcd = qpriv->rcd;
1703 struct hfi1_qp_priv *qpriv = qp->priv;
1737 remote = rcu_dereference(qpriv->tid_rdma.remote);
1751 cpu_to_be32(qpriv->tid_rdma.local.qp |
1754 qpriv->rcd->ctxt);
1767 qpriv->pending_tid_r_segs++;
1785 struct hfi1_qp_priv *qpriv = qp->priv;
1801 if (qpriv->pending_tid_r_segs)
1805 qpriv->s_flags &= ~HFI1_R_TID_SW_PSN;
1832 if ((qpriv->flow_state.psn + npkts) > MAX_TID_FLOW_PSN - 1) {
1838 if (hfi1_kern_setup_hw_flow(qpriv->rcd, qp))
1888 struct hfi1_qp_priv *qpriv = qp->priv;
1962 req->n_flows = qpriv->tid_rdma.local.max_read;
1968 req->seg_len = qpriv->tid_rdma.local.max_len;
1985 struct hfi1_qp_priv *qpriv = qp->priv;
2121 qpriv->pending_tid_w_segs -=
2128 qpriv->pending_tid_w_segs +=
2165 qpriv->pending_tid_w_segs -=
2173 qpriv->s_flags &= ~HFI1_R_TID_WAIT_INTERLCK;
2191 if (qpriv->rnr_nak_state) {
2193 qpriv->rnr_nak_state = TID_RNR_NAK_INIT;
2230 struct hfi1_qp_priv *qpriv = qp->priv;
2256 if (!len || len & ~PAGE_MASK || len > qpriv->tid_rdma.local.max_len)
2306 * queue, qpriv->r_tid_alloc follows qp->r_head_ack_queue. It is ok to
2310 qpriv->r_tid_alloc = qp->r_head_ack_queue;
2343 struct hfi1_qp_priv *qpriv = qp->priv;
2362 remote = rcu_dereference(qpriv->tid_rdma.remote);
2853 struct hfi1_qp_priv *qpriv;
2927 qpriv = qp->priv;
2928 if (qpriv->r_tid_tail == HFI1_QP_WQE_INVALID ||
2929 qpriv->r_tid_tail == qpriv->r_tid_head)
2931 e = &qp->s_ack_queue[qpriv->r_tid_tail];
2949 if (!(qpriv->s_flags & HFI1_R_TID_SW_PSN)) {
2950 qpriv->s_flags |= HFI1_R_TID_SW_PSN;
2954 qpriv->r_next_psn_kdeth =
2973 qpriv->s_nak_state = 0;
2984 qpriv->r_next_psn_kdeth =
3022 if (!qpriv->s_nak_state) {
3023 qpriv->s_nak_state = IB_NAK_PSN_ERROR;
3025 qpriv->s_nak_psn = mask_psn(flow->flow_state.r_next_psn);
3042 struct hfi1_qp_priv *qpriv = qp->priv;
3098 rvt_skip_sge(&qpriv->tid_ss, (req->cur_seg * req->seg_len) +
3129 i = qpriv->s_tid_tail;
3139 if (i == qpriv->s_tid_cur)
3157 struct hfi1_qp_priv *qpriv = qp->priv;
3167 fs = &qpriv->flow_state;
3169 hfi1_kern_clear_hw_flow(qpriv->rcd, qp);
3272 struct hfi1_qp_priv *qpriv = (struct hfi1_qp_priv *)qp->priv;
3277 struct hfi1_pportdata *ppd = qpriv->rcd->ppd;
3282 if (qpriv->hdr_type != HFI1_PKT_TYPE_9B)
3286 remote = rcu_dereference(qpriv->tid_rdma.remote);
3326 qpriv->tid_r_reqs++;
3330 atomic_inc(&qpriv->n_requests);
3358 struct hfi1_qp_priv *qpriv = qp->priv;
3363 remote = rcu_dereference(qpriv->tid_rdma.remote);
3403 static u32 position_in_queue(struct hfi1_qp_priv *qpriv,
3406 return qpriv->tid_enqueue - queue->dequeue;
3416 struct hfi1_qp_priv *qpriv = qp->priv;
3421 bytes_per_us = active_egress_rate(qpriv->rcd->ppd) / 8;
3448 * [request: qpriv->r_tid_alloc, segment: req->alloc_seg]
3455 struct hfi1_qp_priv *qpriv = qp->priv;
3456 struct hfi1_ctxtdata *rcd = qpriv->rcd;
3457 struct tid_rdma_params *local = &qpriv->tid_rdma.local;
3480 if (qpriv->rnr_nak_state == TID_RNR_NAK_SEND)
3484 if (qpriv->r_tid_alloc == qpriv->r_tid_head) {
3486 if (qpriv->flow_state.index < RXE_NUM_TID_FLOWS &&
3487 !qpriv->alloc_w_segs) {
3489 qpriv->s_flags &= ~HFI1_R_TID_SW_PSN;
3494 e = &qp->s_ack_queue[qpriv->r_tid_alloc];
3505 if (qpriv->alloc_w_segs >= local->max_write)
3509 if (qpriv->sync_pt && qpriv->alloc_w_segs)
3513 if (qpriv->sync_pt && !qpriv->alloc_w_segs) {
3515 qpriv->sync_pt = false;
3516 qpriv->s_flags &= ~HFI1_R_TID_SW_PSN;
3520 if (qpriv->flow_state.index >= RXE_NUM_TID_FLOWS) {
3521 ret = hfi1_kern_setup_hw_flow(qpriv->rcd, qp);
3524 position_in_queue(qpriv,
3536 if (qpriv->flow_state.psn + npkts > MAX_TID_FLOW_PSN - 1) {
3537 qpriv->sync_pt = true;
3559 to_seg = position_in_queue(qpriv, &rcd->rarr_queue);
3563 qpriv->alloc_w_segs++;
3568 if (++qpriv->r_tid_alloc >
3570 qpriv->r_tid_alloc = 0;
3597 qp->r_head_ack_queue = qpriv->r_tid_alloc + 1;
3600 qpriv->r_tid_head = qp->r_head_ack_queue;
3616 * qpriv->rnr_nak_state is used to determine when the scheduled RNR NAK
3622 qpriv->rnr_nak_state = TID_RNR_NAK_SEND;
3654 struct hfi1_qp_priv *qpriv = qp->priv;
3680 num_segs = DIV_ROUND_UP(len, qpriv->tid_rdma.local.max_len);
3692 if (qpriv->rnr_nak_state)
3711 if (qpriv->rnr_nak_state) {
3714 qpriv->rnr_nak_state = TID_RNR_NAK_INIT;
3745 req->n_flows = min_t(u16, num_segs, qpriv->tid_rdma.local.max_write);
3752 req->seg_len = qpriv->tid_rdma.local.max_len;
3776 if (qpriv->r_tid_tail == HFI1_QP_WQE_INVALID) {
3777 qpriv->r_tid_tail = qp->r_head_ack_queue;
3778 } else if (qpriv->r_tid_tail == qpriv->r_tid_head) {
3781 e = &qp->s_ack_queue[qpriv->r_tid_tail];
3786 if (qpriv->r_tid_tail == qpriv->r_tid_ack)
3787 qpriv->r_tid_ack = qp->r_head_ack_queue;
3788 qpriv->r_tid_tail = qp->r_head_ack_queue;
3793 qpriv->r_tid_head = qp->r_head_ack_queue;
3830 struct hfi1_qp_priv *qpriv = qp->priv;
3857 if (qpriv->rnr_nak_state == TID_RNR_NAK_SENT)
3902 remote = rcu_dereference(qpriv->tid_rdma.remote);
3913 cpu_to_be32(qpriv->tid_rdma.local.qp |
3916 qpriv->rcd->ctxt);
3921 qpriv->pending_tid_w_segs++;
3928 struct hfi1_qp_priv *qpriv = qp->priv;
3931 if (!(qpriv->s_flags & HFI1_R_TID_RSC_TIMER)) {
3932 qpriv->s_flags |= HFI1_R_TID_RSC_TIMER;
3933 qpriv->s_tid_timer.expires = jiffies +
3934 qpriv->tid_timer_timeout_jiffies;
3935 add_timer(&qpriv->s_tid_timer);
3941 struct hfi1_qp_priv *qpriv = qp->priv;
3944 qpriv->s_flags |= HFI1_R_TID_RSC_TIMER;
3945 mod_timer(&qpriv->s_tid_timer, jiffies +
3946 qpriv->tid_timer_timeout_jiffies);
3951 struct hfi1_qp_priv *qpriv = qp->priv;
3955 if (qpriv->s_flags & HFI1_R_TID_RSC_TIMER) {
3956 rval = del_timer(&qpriv->s_tid_timer);
3957 qpriv->s_flags &= ~HFI1_R_TID_RSC_TIMER;
3964 struct hfi1_qp_priv *qpriv = qp->priv;
3966 del_timer_sync(&qpriv->s_tid_timer);
3967 qpriv->s_flags &= ~HFI1_R_TID_RSC_TIMER;
3972 struct hfi1_qp_priv *qpriv = from_timer(qpriv, t, s_tid_timer);
3973 struct rvt_qp *qp = qpriv->owner;
3980 if (qpriv->s_flags & HFI1_R_TID_RSC_TIMER) {
3985 (u64)qpriv->tid_timer_timeout_jiffies);
3991 hfi1_kern_clear_hw_flow(qpriv->rcd, qp);
4030 struct hfi1_qp_priv *qpriv = qp->priv;
4069 wqe = rvt_get_swqe_ptr(qp, qpriv->s_tid_cur);
4166 if (qpriv->s_tid_cur != qpriv->s_tid_head &&
4168 for (i = qpriv->s_tid_cur + 1; ; i++) {
4172 if (i == qpriv->s_tid_head)
4177 qpriv->s_tid_cur = i;
4201 struct hfi1_qp_priv *qpriv = qp->priv;
4222 remote = rcu_dereference(qpriv->tid_rdma.remote);
4418 struct hfi1_qp_priv *qpriv = qp->priv;
4419 struct tid_flow_state *fs = &qpriv->flow_state;
4425 remote = rcu_dereference(qpriv->tid_rdma.remote);
4431 if (qpriv->resync) {
4435 } else if (qpriv->s_nak_state) {
4436 *bth2 = mask_psn(qpriv->s_nak_psn);
4439 (qpriv->s_nak_state <<
4447 cpu_to_be32(qpriv->tid_rdma.local.qp |
4450 qpriv->rcd->ctxt);
4456 if (qpriv->resync) {
4463 if (hfi1_tid_rdma_is_resync_psn(qpriv->r_next_psn_kdeth - 1)) {
4465 cpu_to_be32(qpriv->r_next_psn_kdeth_save);
4473 qpriv->r_next_psn_kdeth_save =
4474 qpriv->r_next_psn_kdeth - 1;
4476 cpu_to_be32(qpriv->r_next_psn_kdeth_save);
4477 qpriv->r_next_psn_kdeth = mask_psn(*bth2 + 1);
4479 qpriv->resync = false;
4489 struct hfi1_qp_priv *qpriv = qp->priv;
4509 cmp_psn(psn, qpriv->s_resync_psn))
4556 atomic_dec(&qpriv->n_tid_requests);
4571 if (qpriv->s_flags & RVT_S_WAIT_ACK)
4572 qpriv->s_flags &= ~RVT_S_WAIT_ACK;
4595 qpriv->s_flags &= ~RVT_S_SEND_ONE;
4598 if ((qp->s_acked == qpriv->s_tid_tail &&
4601 qpriv->s_state = TID_OP(WRITE_DATA_LAST);
4606 qpriv->s_state = TID_OP(WRITE_DATA);
4688 if (++last_acked == qpriv->s_tid_cur + 1)
4696 qpriv->s_tid_tail = qp->s_acked;
4697 qpriv->s_state = TID_OP(WRITE_REQ);
4701 qpriv->s_retry = qp->s_retry_cnt;
4719 qpriv->s_tid_tail = qp->s_acked;
4720 qpriv->s_state = TID_OP(WRITE_REQ);
4721 qpriv->s_retry = qp->s_retry_cnt;
4835 struct hfi1_qp_priv *qpriv = qp->priv;
4842 remote = rcu_dereference(qpriv->tid_rdma.remote);
4850 qpriv->s_resync_psn = *bth2;
4861 struct hfi1_qp_priv *qpriv = qp->priv;
4862 struct hfi1_ctxtdata *rcd = qpriv->rcd;
4867 struct tid_flow_state *fs = &qpriv->flow_state;
4888 if (qpriv->resync)
4908 qpriv->s_flags &= ~HFI1_R_TID_SW_PSN;
4916 for (idx = qpriv->r_tid_tail; ; idx++) {
4957 qpriv->resync = true;
4959 qpriv->s_nak_state = 0;
5179 struct hfi1_qp_priv *qpriv = qp->priv;
5196 e = &qp->s_ack_queue[qpriv->r_tid_ack];
5206 * to pull back qpriv->r_tid_ack, not the segment
5210 if (qpriv->resync) {
5212 qpriv->r_tid_ack = !qpriv->r_tid_ack ?
5214 qpriv->r_tid_ack - 1;
5215 e = &qp->s_ack_queue[qpriv->r_tid_ack];
5226 if (!qpriv->s_nak_state && !qpriv->resync &&
5254 next = qpriv->r_tid_ack + 1;
5257 qpriv->r_tid_ack = next;
5265 e = &qp->s_ack_queue[qpriv->r_tid_ack];
5270 * At this point qpriv->r_tid_ack == qpriv->r_tid_tail but e and
5273 if (qpriv->s_nak_state ||
5274 (qpriv->resync &&
5275 !hfi1_tid_rdma_is_resync_psn(qpriv->r_next_psn_kdeth - 1) &&
5276 (cmp_psn(qpriv->r_next_psn_kdeth - 1,
5282 * segment for the request at qpriv->r_tid_ack (same at
5284 * qpriv->r_tid_tail request)
5286 e = &qp->s_ack_queue[qpriv->r_tid_ack];
5290 qpriv->s_flags & HFI1_R_TID_WAIT_INTERLCK)
5291 qpriv->s_flags &= ~HFI1_R_TID_WAIT_INTERLCK;
5299 qpriv->s_flags &= ~RVT_S_ACK_PENDING;
5301 ps->s_txreq->sde = qpriv->s_sde;
5314 qpriv->s_flags &= ~RVT_S_ACK_PENDING;