/kernel/linux/linux-5.10/drivers/infiniband/hw/cxgb4/ |
H A D | qp.c | 87 static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state) in set_state() argument 90 spin_lock_irqsave(&qhp->lock, flag); in set_state() 91 qhp->attr.state = state; in set_state() 92 spin_unlock_irqrestore(&qhp->lock, flag); in set_state() 690 static void post_write_cmpl(struct c4iw_qp *qhp, const struct ib_send_wr *wr) in post_write_cmpl() argument 693 qhp->sq_sig_all; in post_write_cmpl() 695 qhp->sq_sig_all; in post_write_cmpl() 706 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue + in post_write_cmpl() 707 qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE); in post_write_cmpl() 708 build_rdma_write_cmpl(&qhp in post_write_cmpl() 759 build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe, const struct ib_recv_wr *wr, u8 *len16) build_rdma_recv() argument 912 ring_kernel_sq_db(struct c4iw_qp *qhp, u16 inc) ring_kernel_sq_db() argument 929 ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc) ring_kernel_rq_db() argument 979 complete_sq_drain_wr(struct c4iw_qp *qhp, const struct ib_send_wr *wr) complete_sq_drain_wr() argument 1018 complete_sq_drain_wrs(struct c4iw_qp *qhp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr) complete_sq_drain_wrs() argument 1035 complete_rq_drain_wr(struct c4iw_qp *qhp, const struct ib_recv_wr *wr) complete_rq_drain_wr() argument 1068 complete_rq_drain_wrs(struct c4iw_qp *qhp, const struct ib_recv_wr *wr) complete_rq_drain_wrs() argument 1084 struct c4iw_qp *qhp; c4iw_post_send() local 1265 struct c4iw_qp *qhp; c4iw_post_receive() local 1560 post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe, gfp_t gfp) post_terminate() argument 1596 __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, struct c4iw_cq *schp) __flush_qp() argument 1660 flush_qp(struct c4iw_qp *qhp) flush_qp() argument 1692 rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, struct c4iw_ep *ep) rdma_fini() argument 1749 rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) rdma_init() argument 1832 c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, enum c4iw_qp_attr_mask mask, struct c4iw_qp_attributes *attrs, int internal) c4iw_modify_qp() argument 2074 struct c4iw_qp *qhp; c4iw_destroy_qp() local 2116 struct c4iw_qp *qhp; c4iw_create_qp() local 2372 struct c4iw_qp *qhp; c4iw_ib_modify_qp() local 2467 struct c4iw_qp *qhp = to_c4iw_qp(ibqp); c4iw_ib_query_qp() local [all...] |
H A D | ev.c | 88 struct c4iw_qp *qhp, in post_qp_event() 98 if (qhp->attr.state == C4IW_QP_STATE_RTS) { in post_qp_event() 100 c4iw_modify_qp(qhp->rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, in post_qp_event() 109 event.element.qp = &qhp->ibqp; in post_qp_event() 110 if (qhp->ibqp.event_handler) in post_qp_event() 111 (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context); in post_qp_event() 123 struct c4iw_qp *qhp; in c4iw_ev_dispatch() local 127 qhp in c4iw_ev_dispatch() 87 post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp, struct c4iw_qp *qhp, struct t4_cqe *err_cqe, enum ib_event_type ib_event) post_qp_event() argument [all...] |
H A D | cq.c | 238 int c4iw_flush_sq(struct c4iw_qp *qhp) in c4iw_flush_sq() argument 241 struct t4_wq *wq = &qhp->wq; in c4iw_flush_sq() 242 struct c4iw_cq *chp = to_c4iw_cq(qhp->ibqp.send_cq); in c4iw_flush_sq() 338 struct c4iw_qp *qhp; in c4iw_flush_hw_cq() local 351 qhp = get_qhp(chp->rhp, CQE_QPID(hw_cqe)); in c4iw_flush_hw_cq() 356 if (qhp == NULL) in c4iw_flush_hw_cq() 359 if (flush_qhp != qhp) { in c4iw_flush_hw_cq() 360 spin_lock(&qhp->lock); in c4iw_flush_hw_cq() 362 if (qhp->wq.flushed == 1) in c4iw_flush_hw_cq() 386 if (!qhp in c4iw_flush_hw_cq() 754 __c4iw_poll_cq_one(struct c4iw_cq *chp, struct c4iw_qp *qhp, struct ib_wc *wc, struct c4iw_srq *srq) __c4iw_poll_cq_one() argument 924 struct c4iw_qp *qhp = NULL; c4iw_poll_cq_one() local 1170 c4iw_flush_srqidx(struct c4iw_qp *qhp, u32 srqidx) c4iw_flush_srqidx() argument [all...] |
H A D | restrack.c | 140 struct c4iw_qp *qhp = to_c4iw_qp(ibqp); in c4iw_fill_res_qp_entry() local 147 if (qhp->ucontext) in c4iw_fill_res_qp_entry() 155 spin_lock_irq(&qhp->lock); in c4iw_fill_res_qp_entry() 156 wq = qhp->wq; in c4iw_fill_res_qp_entry() 161 first_sqe = qhp->wq.sq.sw_sq[first_sq_idx]; in c4iw_fill_res_qp_entry() 167 last_sqe = qhp->wq.sq.sw_sq[last_sq_idx]; in c4iw_fill_res_qp_entry() 171 spin_unlock_irq(&qhp->lock); in c4iw_fill_res_qp_entry()
|
H A D | iw_cxgb4.h | 604 struct c4iw_qp *qhp, 1023 int c4iw_flush_sq(struct c4iw_qp *qhp); 1025 u16 c4iw_rqes_posted(struct c4iw_qp *qhp); 1026 int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe); 1050 void c4iw_flush_srqidx(struct c4iw_qp *qhp, u32 srqidx);
|
/kernel/linux/linux-6.6/drivers/infiniband/hw/cxgb4/ |
H A D | qp.c | 87 static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state) in set_state() argument 90 spin_lock_irqsave(&qhp->lock, flag); in set_state() 91 qhp->attr.state = state; in set_state() 92 spin_unlock_irqrestore(&qhp->lock, flag); in set_state() 690 static void post_write_cmpl(struct c4iw_qp *qhp, const struct ib_send_wr *wr) in post_write_cmpl() argument 693 qhp->sq_sig_all; in post_write_cmpl() 695 qhp->sq_sig_all; in post_write_cmpl() 706 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue + in post_write_cmpl() 707 qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE); in post_write_cmpl() 708 build_rdma_write_cmpl(&qhp in post_write_cmpl() 759 build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe, const struct ib_recv_wr *wr, u8 *len16) build_rdma_recv() argument 912 ring_kernel_sq_db(struct c4iw_qp *qhp, u16 inc) ring_kernel_sq_db() argument 929 ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc) ring_kernel_rq_db() argument 979 complete_sq_drain_wr(struct c4iw_qp *qhp, const struct ib_send_wr *wr) complete_sq_drain_wr() argument 1018 complete_sq_drain_wrs(struct c4iw_qp *qhp, const struct ib_send_wr *wr, const struct ib_send_wr **bad_wr) complete_sq_drain_wrs() argument 1035 complete_rq_drain_wr(struct c4iw_qp *qhp, const struct ib_recv_wr *wr) complete_rq_drain_wr() argument 1068 complete_rq_drain_wrs(struct c4iw_qp *qhp, const struct ib_recv_wr *wr) complete_rq_drain_wrs() argument 1084 struct c4iw_qp *qhp; c4iw_post_send() local 1265 struct c4iw_qp *qhp; c4iw_post_receive() local 1560 post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe, gfp_t gfp) post_terminate() argument 1596 __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp, struct c4iw_cq *schp) __flush_qp() argument 1660 flush_qp(struct c4iw_qp *qhp) flush_qp() argument 1692 rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp, struct c4iw_ep *ep) rdma_fini() argument 1749 rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp) rdma_init() argument 1832 c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp, enum c4iw_qp_attr_mask mask, struct c4iw_qp_attributes *attrs, int internal) c4iw_modify_qp() argument 2073 struct c4iw_qp *qhp; c4iw_destroy_qp() local 2114 struct c4iw_qp *qhp = to_c4iw_qp(qp); c4iw_create_qp() local 2360 struct c4iw_qp *qhp; c4iw_ib_modify_qp() local 2458 struct c4iw_qp *qhp = to_c4iw_qp(ibqp); c4iw_ib_query_qp() local [all...] |
H A D | ev.c | 88 struct c4iw_qp *qhp, in post_qp_event() 98 if (qhp->attr.state == C4IW_QP_STATE_RTS) { in post_qp_event() 100 c4iw_modify_qp(qhp->rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, in post_qp_event() 109 event.element.qp = &qhp->ibqp; in post_qp_event() 110 if (qhp->ibqp.event_handler) in post_qp_event() 111 (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context); in post_qp_event() 123 struct c4iw_qp *qhp; in c4iw_ev_dispatch() local 127 qhp in c4iw_ev_dispatch() 87 post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp, struct c4iw_qp *qhp, struct t4_cqe *err_cqe, enum ib_event_type ib_event) post_qp_event() argument [all...] |
H A D | cq.c | 238 int c4iw_flush_sq(struct c4iw_qp *qhp) in c4iw_flush_sq() argument 241 struct t4_wq *wq = &qhp->wq; in c4iw_flush_sq() 242 struct c4iw_cq *chp = to_c4iw_cq(qhp->ibqp.send_cq); in c4iw_flush_sq() 338 struct c4iw_qp *qhp; in c4iw_flush_hw_cq() local 351 qhp = get_qhp(chp->rhp, CQE_QPID(hw_cqe)); in c4iw_flush_hw_cq() 356 if (qhp == NULL) in c4iw_flush_hw_cq() 359 if (flush_qhp != qhp) { in c4iw_flush_hw_cq() 360 spin_lock(&qhp->lock); in c4iw_flush_hw_cq() 362 if (qhp->wq.flushed == 1) in c4iw_flush_hw_cq() 386 if (!qhp in c4iw_flush_hw_cq() 754 __c4iw_poll_cq_one(struct c4iw_cq *chp, struct c4iw_qp *qhp, struct ib_wc *wc, struct c4iw_srq *srq) __c4iw_poll_cq_one() argument 924 struct c4iw_qp *qhp = NULL; c4iw_poll_cq_one() local 1176 c4iw_flush_srqidx(struct c4iw_qp *qhp, u32 srqidx) c4iw_flush_srqidx() argument [all...] |
H A D | restrack.c | 140 struct c4iw_qp *qhp = to_c4iw_qp(ibqp); in c4iw_fill_res_qp_entry() local 147 if (qhp->ucontext) in c4iw_fill_res_qp_entry() 155 spin_lock_irq(&qhp->lock); in c4iw_fill_res_qp_entry() 156 wq = qhp->wq; in c4iw_fill_res_qp_entry() 161 first_sqe = qhp->wq.sq.sw_sq[first_sq_idx]; in c4iw_fill_res_qp_entry() 167 last_sqe = qhp->wq.sq.sw_sq[last_sq_idx]; in c4iw_fill_res_qp_entry() 171 spin_unlock_irq(&qhp->lock); in c4iw_fill_res_qp_entry()
|
H A D | iw_cxgb4.h | 598 struct c4iw_qp *qhp, 1009 int c4iw_flush_sq(struct c4iw_qp *qhp); 1011 u16 c4iw_rqes_posted(struct c4iw_qp *qhp); 1012 int c4iw_post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe); 1036 void c4iw_flush_srqidx(struct c4iw_qp *qhp, u32 srqidx);
|