Home
last modified time | relevance | path

Searched refs:s_acked (Results 1 - 16 of 16) sorted by relevance

/kernel/linux/linux-5.10/drivers/infiniband/hw/qib/
H A Dqib_rc.c252 rvt_send_complete(qp, wqe, qp->s_last != qp->s_acked ? in qib_make_rc_req()
737 u32 n = qp->s_acked; in reset_psn()
822 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked); in qib_restart_rc()
829 } else if (qp->s_last == qp->s_acked) { in qib_restart_rc()
912 if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail && in qib_rc_send_complete()
917 while (qp->s_last != qp->s_acked) { in qib_rc_send_complete()
976 if (qp->s_acked == qp->s_cur) { in do_rc_completion()
979 qp->s_acked = qp->s_cur; in do_rc_completion()
981 if (qp->s_acked != qp->s_tail) { in do_rc_completion()
986 if (++qp->s_acked > in do_rc_completion()
[all...]
H A Dqib_qp.c448 qp->s_last, qp->s_acked, qp->s_cur, in qib_qp_iter_print()
/kernel/linux/linux-6.6/drivers/infiniband/hw/qib/
H A Dqib_rc.c253 rvt_send_complete(qp, wqe, qp->s_last != qp->s_acked ? in qib_make_rc_req()
738 u32 n = qp->s_acked; in reset_psn()
823 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked); in qib_restart_rc()
830 } else if (qp->s_last == qp->s_acked) { in qib_restart_rc()
913 if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail && in qib_rc_send_complete()
918 while (qp->s_last != qp->s_acked) { in qib_rc_send_complete()
977 if (qp->s_acked == qp->s_cur) { in do_rc_completion()
980 qp->s_acked = qp->s_cur; in do_rc_completion()
982 if (qp->s_acked != qp->s_tail) { in do_rc_completion()
987 if (++qp->s_acked > in do_rc_completion()
[all...]
H A Dqib_qp.c448 qp->s_last, qp->s_acked, qp->s_cur, in qib_qp_iter_print()
/kernel/linux/linux-5.10/drivers/infiniband/hw/hfi1/
H A Drc.c491 hfi1_trdma_send_complete(qp, wqe, qp->s_last != qp->s_acked ? in hfi1_make_rc_req()
1502 u32 n = qp->s_acked; in reset_psn()
1609 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked); in hfi1_restart_rc()
1619 } else if (qp->s_last == qp->s_acked) { in hfi1_restart_rc()
1792 tail = qp->s_acked; in hfi1_rc_send_complete()
1823 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); in hfi1_rc_send_complete()
1830 while (qp->s_last != qp->s_acked) { in hfi1_rc_send_complete()
1924 if (qp->s_acked == qp->s_cur) { in do_rc_completion()
1927 qp->s_acked = qp->s_cur; in do_rc_completion()
1929 if (qp->s_acked ! in do_rc_completion()
[all...]
H A Dtid_rdma.c2418 for (i = qp->s_acked; i != end;) {
2559 * state. However, if the wqe queue is empty (qp->s_acked == qp->s_tail in hfi1_rc_rcv_tid_rdma_read_resp()
2561 * qp->s_acked here. Putting the qp into error state will safely flush in hfi1_rc_rcv_tid_rdma_read_resp()
2564 if (qp->s_last == qp->s_acked) in hfi1_rc_rcv_tid_rdma_read_resp()
2574 u32 n = qp->s_acked;
2682 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); in __must_hold()
2722 if (qp->s_acked == qp->s_tail) in __must_hold()
2726 if (qp->s_acked == qp->s_tail) in __must_hold()
3171 for (i = qp->s_acked; i != qp->s_head;) { in hfi1_qp_kern_exp_rcv_clear_all()
3235 if (qp->s_acked ! in hfi1_tid_rdma_wqe_interlock()
[all...]
H A Dqp.c628 qp->s_last == qp->s_acked && in qp_idle()
629 qp->s_acked == qp->s_cur && in qp_idle()
675 qp->s_last, qp->s_acked, qp->s_cur, in qp_iter_print()
H A Dtrace_tid.h48 "s_head %u s_acked %u s_last %u s_psn 0x%x " \
877 __field(u32, s_acked)
895 __entry->s_acked = qp->s_acked;
915 __entry->s_acked,
/kernel/linux/linux-6.6/drivers/infiniband/hw/hfi1/
H A Drc.c450 hfi1_trdma_send_complete(qp, wqe, qp->s_last != qp->s_acked ? in hfi1_make_rc_req()
1460 u32 n = qp->s_acked; in reset_psn()
1567 struct rvt_swqe *wqe = rvt_get_swqe_ptr(qp, qp->s_acked); in hfi1_restart_rc()
1577 } else if (qp->s_last == qp->s_acked) { in hfi1_restart_rc()
1750 tail = qp->s_acked; in hfi1_rc_send_complete()
1781 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); in hfi1_rc_send_complete()
1788 while (qp->s_last != qp->s_acked) { in hfi1_rc_send_complete()
1882 if (qp->s_acked == qp->s_cur) { in do_rc_completion()
1885 qp->s_acked = qp->s_cur; in do_rc_completion()
1887 if (qp->s_acked ! in do_rc_completion()
[all...]
H A Dtid_rdma.c2425 for (i = qp->s_acked; i != end;) {
2566 * state. However, if the wqe queue is empty (qp->s_acked == qp->s_tail in hfi1_rc_rcv_tid_rdma_read_resp()
2568 * qp->s_acked here. Putting the qp into error state will safely flush in hfi1_rc_rcv_tid_rdma_read_resp()
2571 if (qp->s_last == qp->s_acked) in hfi1_rc_rcv_tid_rdma_read_resp()
2581 u32 n = qp->s_acked;
2689 wqe = rvt_get_swqe_ptr(qp, qp->s_acked); in __must_hold()
2729 if (qp->s_acked == qp->s_tail) in __must_hold()
2733 if (qp->s_acked == qp->s_tail) in __must_hold()
3180 for (i = qp->s_acked; i != qp->s_head;) { in hfi1_qp_kern_exp_rcv_clear_all()
3245 if (qp->s_acked ! in hfi1_tid_rdma_wqe_interlock()
[all...]
H A Dqp.c587 qp->s_last == qp->s_acked && in qp_idle()
588 qp->s_acked == qp->s_cur && in qp_idle()
634 qp->s_last, qp->s_acked, qp->s_cur, in qp_iter_print()
H A Dtrace_tid.h48 "s_head %u s_acked %u s_last %u s_psn 0x%x " \
877 __field(u32, s_acked)
895 __entry->s_acked = qp->s_acked;
915 __entry->s_acked,
/kernel/linux/linux-5.10/include/rdma/
H A Drdmavt_qp.h420 u32 s_acked; /* last un-ACK'ed entry */ member
/kernel/linux/linux-6.6/include/rdma/
H A Drdmavt_qp.h420 u32 s_acked; /* last un-ACK'ed entry */ member
/kernel/linux/linux-5.10/drivers/infiniband/sw/rdmavt/
H A Dqp.c893 qp->s_acked = 0; in rvt_init_qp()
2002 qp->s_acked, qp->s_last); in rvt_qp_is_avail()
2843 if (qp->s_acked == old_last) in rvt_send_complete()
2844 qp->s_acked = last; in rvt_send_complete()
/kernel/linux/linux-6.6/drivers/infiniband/sw/rdmavt/
H A Dqp.c854 qp->s_acked = 0; in rvt_init_qp()
1950 qp->s_acked, qp->s_last); in rvt_qp_is_avail()
2792 if (qp->s_acked == old_last) in rvt_send_complete()
2793 qp->s_acked = last; in rvt_send_complete()

Completed in 40 milliseconds