/kernel/linux/linux-5.10/drivers/infiniband/hw/qib/ |
H A D | qib_rc.c | 54 * @qp: a pointer to the QP 62 static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp, in qib_make_rc_ack() argument 72 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) in qib_make_rc_ack() 78 switch (qp->s_ack_state) { in qib_make_rc_ack() 81 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; in qib_make_rc_ack() 93 if (++qp->s_tail_ack_queue > QIB_MAX_RDMA_ATOMIC) in qib_make_rc_ack() 94 qp->s_tail_ack_queue = 0; in qib_make_rc_ack() 99 if (qp->r_head_ack_queue == qp in qib_make_rc_ack() 215 qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags) qib_make_rc_req() argument 597 qib_send_rc_ack(struct rvt_qp *qp) qib_send_rc_ack() argument 735 reset_psn(struct rvt_qp *qp, u32 psn) reset_psn() argument 820 qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait) qib_restart_rc() argument 856 reset_sending_psn(struct rvt_qp *qp, u32 psn) reset_sending_psn() argument 881 qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr) qib_rc_send_complete() argument 940 update_last_psn(struct rvt_qp *qp, u32 psn) update_last_psn() argument 950 do_rc_completion(struct rvt_qp *qp, struct rvt_swqe *wqe, struct qib_ibport *ibp) do_rc_completion() argument 1006 do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, u64 val, struct qib_ctxtdata *rcd) do_rc_ack() argument 1223 rdma_seq_err(struct rvt_qp *qp, struct qib_ibport *ibp, u32 psn, struct qib_ctxtdata *rcd) rdma_seq_err() argument 1267 qib_rc_rcv_resp(struct qib_ibport *ibp, struct ib_other_headers *ohdr, void *data, u32 tlen, struct rvt_qp *qp, u32 opcode, u32 psn, u32 hdrsize, u32 pmtu, struct qib_ctxtdata *rcd) qib_rc_rcv_resp() argument 1490 qib_rc_rcv_error(struct ib_other_headers *ohdr, void *data, struct rvt_qp *qp, u32 opcode, u32 psn, int diff, struct qib_ctxtdata *rcd) qib_rc_rcv_error() argument 1687 qib_update_ack_queue(struct rvt_qp *qp, unsigned n) qib_update_ack_queue() argument 1711 qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr, int has_grh, void *data, u32 tlen, struct rvt_qp *qp) qib_rc_rcv() argument [all...] |
H A D | qib_uc.c | 42 * @qp: a pointer to the QP 48 int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags) in qib_make_uc_req() argument 50 struct qib_qp_priv *priv = qp->priv; in qib_make_uc_req() 56 u32 pmtu = qp->pmtu; in qib_make_uc_req() 59 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { in qib_make_uc_req() 60 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) in qib_make_uc_req() 63 if (qp->s_last == READ_ONCE(qp->s_head)) in qib_make_uc_req() 67 qp->s_flags |= RVT_S_WAIT_DMA; in qib_make_uc_req() 70 wqe = rvt_get_swqe_ptr(qp, q in qib_make_uc_req() 237 qib_uc_rcv(struct qib_ibport *ibp, struct ib_header *hdr, int has_grh, void *data, u32 tlen, struct rvt_qp *qp) qib_uc_rcv() argument [all...] |
H A D | qib_ruc.c | 44 void qib_migrate_qp(struct rvt_qp *qp) in qib_migrate_qp() argument 48 qp->s_mig_state = IB_MIG_MIGRATED; in qib_migrate_qp() 49 qp->remote_ah_attr = qp->alt_ah_attr; in qib_migrate_qp() 50 qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr); in qib_migrate_qp() 51 qp->s_pkey_index = qp->s_alt_pkey_index; in qib_migrate_qp() 53 ev.device = qp->ibqp.device; in qib_migrate_qp() 54 ev.element.qp in qib_migrate_qp() 82 qib_ruc_check_hdr(struct qib_ibport *ibp, struct ib_header *hdr, int has_grh, struct rvt_qp *qp, u32 bth0) qib_ruc_check_hdr() argument 206 qib_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr, u32 bth0, u32 bth2) qib_make_ruc_header() argument 250 struct rvt_qp *qp = priv->owner; _qib_do_send() local 263 qib_do_send(struct rvt_qp *qp) qib_do_send() argument [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/hw/qib/ |
H A D | qib_rc.c | 54 * @qp: a pointer to the QP 62 static int qib_make_rc_ack(struct qib_ibdev *dev, struct rvt_qp *qp, in qib_make_rc_ack() argument 72 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK)) in qib_make_rc_ack() 78 switch (qp->s_ack_state) { in qib_make_rc_ack() 81 e = &qp->s_ack_queue[qp->s_tail_ack_queue]; in qib_make_rc_ack() 93 if (++qp->s_tail_ack_queue > QIB_MAX_RDMA_ATOMIC) in qib_make_rc_ack() 94 qp->s_tail_ack_queue = 0; in qib_make_rc_ack() 99 if (qp->r_head_ack_queue == qp in qib_make_rc_ack() 216 qib_make_rc_req(struct rvt_qp *qp, unsigned long *flags) qib_make_rc_req() argument 598 qib_send_rc_ack(struct rvt_qp *qp) qib_send_rc_ack() argument 736 reset_psn(struct rvt_qp *qp, u32 psn) reset_psn() argument 821 qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait) qib_restart_rc() argument 857 reset_sending_psn(struct rvt_qp *qp, u32 psn) reset_sending_psn() argument 882 qib_rc_send_complete(struct rvt_qp *qp, struct ib_header *hdr) qib_rc_send_complete() argument 941 update_last_psn(struct rvt_qp *qp, u32 psn) update_last_psn() argument 951 do_rc_completion(struct rvt_qp *qp, struct rvt_swqe *wqe, struct qib_ibport *ibp) do_rc_completion() argument 1007 do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, u64 val, struct qib_ctxtdata *rcd) do_rc_ack() argument 1224 rdma_seq_err(struct rvt_qp *qp, struct qib_ibport *ibp, u32 psn, struct qib_ctxtdata *rcd) rdma_seq_err() argument 1269 qib_rc_rcv_resp(struct qib_ibport *ibp, struct ib_other_headers *ohdr, void *data, u32 tlen, struct rvt_qp *qp, u32 opcode, u32 psn, u32 hdrsize, u32 pmtu, struct qib_ctxtdata *rcd) qib_rc_rcv_resp() argument 1493 qib_rc_rcv_error(struct ib_other_headers *ohdr, void *data, struct rvt_qp *qp, u32 opcode, u32 psn, int diff, struct qib_ctxtdata *rcd) qib_rc_rcv_error() argument 1690 qib_update_ack_queue(struct rvt_qp *qp, unsigned n) qib_update_ack_queue() argument 1714 qib_rc_rcv(struct qib_ctxtdata *rcd, struct ib_header *hdr, int has_grh, void *data, u32 tlen, struct rvt_qp *qp) qib_rc_rcv() argument [all...] |
H A D | qib_uc.c | 42 * @qp: a pointer to the QP 49 int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags) in qib_make_uc_req() argument 51 struct qib_qp_priv *priv = qp->priv; in qib_make_uc_req() 57 u32 pmtu = qp->pmtu; in qib_make_uc_req() 60 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { in qib_make_uc_req() 61 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) in qib_make_uc_req() 64 if (qp->s_last == READ_ONCE(qp->s_head)) in qib_make_uc_req() 68 qp->s_flags |= RVT_S_WAIT_DMA; in qib_make_uc_req() 71 wqe = rvt_get_swqe_ptr(qp, q in qib_make_uc_req() 238 qib_uc_rcv(struct qib_ibport *ibp, struct ib_header *hdr, int has_grh, void *data, u32 tlen, struct rvt_qp *qp) qib_uc_rcv() argument [all...] |
H A D | qib_ruc.c | 44 void qib_migrate_qp(struct rvt_qp *qp) in qib_migrate_qp() argument 48 qp->s_mig_state = IB_MIG_MIGRATED; in qib_migrate_qp() 49 qp->remote_ah_attr = qp->alt_ah_attr; in qib_migrate_qp() 50 qp->port_num = rdma_ah_get_port_num(&qp->alt_ah_attr); in qib_migrate_qp() 51 qp->s_pkey_index = qp->s_alt_pkey_index; in qib_migrate_qp() 53 ev.device = qp->ibqp.device; in qib_migrate_qp() 54 ev.element.qp in qib_migrate_qp() 82 qib_ruc_check_hdr(struct qib_ibport *ibp, struct ib_header *hdr, int has_grh, struct rvt_qp *qp, u32 bth0) qib_ruc_check_hdr() argument 206 qib_make_ruc_header(struct rvt_qp *qp, struct ib_other_headers *ohdr, u32 bth0, u32 bth2) qib_make_ruc_header() argument 250 struct rvt_qp *qp = priv->owner; _qib_do_send() local 263 qib_do_send(struct rvt_qp *qp) qib_do_send() argument [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/sw/rxe/ |
H A D | rxe_qp.c | 103 static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n) in alloc_rd_atomic_resources() argument 105 qp->resp.res_head = 0; in alloc_rd_atomic_resources() 106 qp->resp.res_tail = 0; in alloc_rd_atomic_resources() 107 qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL); in alloc_rd_atomic_resources() 109 if (!qp->resp.resources) in alloc_rd_atomic_resources() 115 static void free_rd_atomic_resources(struct rxe_qp *qp) in free_rd_atomic_resources() argument 117 if (qp->resp.resources) { in free_rd_atomic_resources() 120 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) { in free_rd_atomic_resources() 121 struct resp_res *res = &qp->resp.resources[i]; in free_rd_atomic_resources() 125 kfree(qp in free_rd_atomic_resources() 135 cleanup_rd_atomic_resources(struct rxe_qp *qp) cleanup_rd_atomic_resources() argument 148 rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp, struct ib_qp_init_attr *init) rxe_qp_init_misc() argument 186 rxe_init_sq(struct rxe_qp *qp, struct ib_qp_init_attr *init, struct ib_udata *udata, struct rxe_create_qp_resp __user *uresp) rxe_init_sq() argument 235 rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, struct ib_qp_init_attr *init, struct ib_udata *udata, struct rxe_create_qp_resp __user *uresp) rxe_qp_init_req() argument 279 rxe_init_rq(struct rxe_qp *qp, struct ib_qp_init_attr *init, struct ib_udata *udata, struct rxe_create_qp_resp __user *uresp) rxe_init_rq() argument 324 rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp, struct ib_qp_init_attr *init, struct ib_udata *udata, struct rxe_create_qp_resp __user *uresp) rxe_qp_init_resp() argument 349 rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, struct ib_qp_init_attr *init, struct rxe_create_qp_resp __user *uresp, struct ib_pd *ibpd, struct ib_udata *udata) rxe_qp_from_init() argument 414 rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init) rxe_qp_to_init() argument 439 rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp, struct ib_qp_attr *attr, int mask) rxe_qp_chk_attr() argument 514 rxe_qp_reset(struct rxe_qp *qp) rxe_qp_reset() argument 557 rxe_qp_error(struct rxe_qp *qp) rxe_qp_error() argument 571 rxe_qp_sqd(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask) rxe_qp_sqd() argument 584 __qp_chk_state(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask) __qp_chk_state() argument 617 rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, struct ib_udata *udata) rxe_qp_from_attr() argument 759 rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask) rxe_qp_to_attr() argument 794 rxe_qp_chk_destroy(struct rxe_qp *qp) rxe_qp_chk_destroy() argument 811 struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work); rxe_qp_do_cleanup() local 877 struct rxe_qp *qp = container_of(elem, typeof(*qp), elem); rxe_qp_cleanup() local [all...] |
H A D | rxe_comp.c | 117 struct rxe_qp *qp = from_timer(qp, t, retrans_timer); in retransmit_timer() local 120 rxe_dbg_qp(qp, "retransmit timer fired\n"); in retransmit_timer() 122 spin_lock_irqsave(&qp->state_lock, flags); in retransmit_timer() 123 if (qp->valid) { in retransmit_timer() 124 qp->comp.timeout = 1; in retransmit_timer() 125 rxe_sched_task(&qp->comp.task); in retransmit_timer() 127 spin_unlock_irqrestore(&qp->state_lock, flags); in retransmit_timer() 130 void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb) in rxe_comp_queue_pkt() argument 134 skb_queue_tail(&qp in rxe_comp_queue_pkt() 146 get_wqe(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe **wqe_p) get_wqe() argument 174 reset_retry_counters(struct rxe_qp *qp) reset_retry_counters() argument 181 check_psn(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) check_psn() argument 224 check_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) check_ack() argument 363 do_read(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) do_read() argument 383 do_atomic(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) do_atomic() argument 402 make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe, struct rxe_cqe *cqe) make_send_cqe() argument 449 do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe) do_complete() argument 483 comp_check_sq_drain_done(struct rxe_qp *qp) comp_check_sq_drain_done() argument 508 complete_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) complete_ack() argument 532 complete_wqe(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) complete_wqe() argument 554 drain_resp_pkts(struct rxe_qp *qp) drain_resp_pkts() argument 566 flush_send_wqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe) flush_send_wqe() argument 594 flush_send_queue(struct rxe_qp *qp, bool notify) flush_send_queue() argument 617 struct rxe_qp *qp = pkt->qp; free_pkt() local 633 reset_retry_timer(struct rxe_qp *qp) reset_retry_timer() argument 647 rxe_completer(struct rxe_qp *qp) rxe_completer() argument [all...] |
H A D | rxe_req.c | 14 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe, 17 static inline void retry_first_write_send(struct rxe_qp *qp, in retry_first_write_send() argument 23 int to_send = (wqe->dma.resid > qp->mtu) ? in retry_first_write_send() 24 qp->mtu : wqe->dma.resid; in retry_first_write_send() 26 qp->req.opcode = next_opcode(qp, wqe, in retry_first_write_send() 38 static void req_retry(struct rxe_qp *qp) in req_retry() argument 45 struct rxe_queue *q = qp->sq.queue; in req_retry() 52 qp->req.wqe_index = cons; in req_retry() 53 qp in req_retry() 101 struct rxe_qp *qp = from_timer(qp, t, rnr_nak_timer); rnr_nak_timer() local 116 req_check_sq_drain_done(struct rxe_qp *qp) req_check_sq_drain_done() argument 162 __req_next_wqe(struct rxe_qp *qp) __req_next_wqe() argument 175 req_next_wqe(struct rxe_qp *qp) req_next_wqe() argument 206 rxe_wqe_is_fenced(struct rxe_qp *qp, struct rxe_send_wqe *wqe) rxe_wqe_is_fenced() argument 224 next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits) next_opcode_rc() argument 303 next_opcode_uc(struct rxe_qp *qp, u32 opcode, int fits) next_opcode_uc() argument 354 next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe, u32 opcode) next_opcode() argument 384 check_init_depth(struct rxe_qp *qp, struct rxe_send_wqe *wqe) check_init_depth() argument 404 get_mtu(struct rxe_qp *qp) get_mtu() argument 414 init_req_packet(struct rxe_qp *qp, struct rxe_av *av, struct rxe_send_wqe *wqe, int opcode, u32 payload, struct rxe_pkt_info *pkt) init_req_packet() argument 498 finish_packet(struct rxe_qp *qp, struct rxe_av *av, struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt, struct sk_buff *skb, u32 payload) finish_packet() argument 541 update_wqe_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt) update_wqe_state() argument 553 update_wqe_psn(struct rxe_qp *qp, struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt, u32 payload) update_wqe_psn() argument 576 save_state(struct rxe_send_wqe *wqe, struct rxe_qp *qp, struct rxe_send_wqe *rollback_wqe, u32 *rollback_psn) save_state() argument 588 rollback_state(struct rxe_send_wqe *wqe, struct rxe_qp *qp, struct rxe_send_wqe *rollback_wqe, u32 rollback_psn) rollback_state() argument 600 update_state(struct rxe_qp *qp, struct rxe_pkt_info *pkt) update_state() argument 615 rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe) rxe_do_local_ops() argument 667 rxe_requester(struct rxe_qp *qp) rxe_requester() argument [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/sw/rxe/ |
H A D | rxe_qp.c | 98 static int alloc_rd_atomic_resources(struct rxe_qp *qp, unsigned int n) in alloc_rd_atomic_resources() argument 100 qp->resp.res_head = 0; in alloc_rd_atomic_resources() 101 qp->resp.res_tail = 0; in alloc_rd_atomic_resources() 102 qp->resp.resources = kcalloc(n, sizeof(struct resp_res), GFP_KERNEL); in alloc_rd_atomic_resources() 104 if (!qp->resp.resources) in alloc_rd_atomic_resources() 110 static void free_rd_atomic_resources(struct rxe_qp *qp) in free_rd_atomic_resources() argument 112 if (qp->resp.resources) { in free_rd_atomic_resources() 115 for (i = 0; i < qp->attr.max_dest_rd_atomic; i++) { in free_rd_atomic_resources() 116 struct resp_res *res = &qp->resp.resources[i]; in free_rd_atomic_resources() 118 free_rd_atomic_resource(qp, re in free_rd_atomic_resources() 125 free_rd_atomic_resource(struct rxe_qp *qp, struct resp_res *res) free_rd_atomic_resource() argument 136 cleanup_rd_atomic_resources(struct rxe_qp *qp) cleanup_rd_atomic_resources() argument 149 rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp, struct ib_qp_init_attr *init) rxe_qp_init_misc() argument 202 rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, struct ib_qp_init_attr *init, struct ib_udata *udata, struct rxe_create_qp_resp __user *uresp) rxe_qp_init_req() argument 264 rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp, struct ib_qp_init_attr *init, struct ib_udata *udata, struct rxe_create_qp_resp __user *uresp) rxe_qp_init_resp() argument 308 rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, struct ib_qp_init_attr *init, struct rxe_create_qp_resp __user *uresp, struct ib_pd *ibpd, struct ib_udata *udata) rxe_qp_from_init() argument 363 rxe_qp_to_init(struct rxe_qp *qp, struct ib_qp_init_attr *init) rxe_qp_to_init() argument 391 rxe_qp_chk_attr(struct rxe_dev *rxe, struct rxe_qp *qp, struct ib_qp_attr *attr, int mask) rxe_qp_chk_attr() argument 477 rxe_qp_reset(struct rxe_qp *qp) rxe_qp_reset() argument 534 rxe_qp_drain(struct rxe_qp *qp) rxe_qp_drain() argument 549 rxe_qp_error(struct rxe_qp *qp) rxe_qp_error() argument 566 rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask, struct ib_udata *udata) rxe_qp_from_attr() argument 723 rxe_qp_to_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask) rxe_qp_to_attr() argument 759 rxe_qp_destroy(struct rxe_qp *qp) rxe_qp_destroy() argument 786 struct rxe_qp *qp = container_of(work, typeof(*qp), cleanup_work.work); rxe_qp_do_cleanup() local 825 struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem); rxe_qp_cleanup() local [all...] |
H A D | rxe_comp.c | 114 struct rxe_qp *qp = from_timer(qp, t, retrans_timer); in retransmit_timer() local 116 if (qp->valid) { in retransmit_timer() 117 qp->comp.timeout = 1; in retransmit_timer() 118 rxe_run_task(&qp->comp.task, 1); in retransmit_timer() 122 void rxe_comp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb) in rxe_comp_queue_pkt() argument 126 skb_queue_tail(&qp->resp_pkts, skb); in rxe_comp_queue_pkt() 128 must_sched = skb_queue_len(&qp->resp_pkts) > 1; in rxe_comp_queue_pkt() 132 rxe_run_task(&qp->comp.task, must_sched); in rxe_comp_queue_pkt() 135 static inline enum comp_state get_wqe(struct rxe_qp *qp, in get_wqe() argument 163 reset_retry_counters(struct rxe_qp *qp) reset_retry_counters() argument 170 check_psn(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) check_psn() argument 209 check_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) check_ack() argument 340 do_read(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) do_read() argument 358 do_atomic(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) do_atomic() argument 375 make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe, struct rxe_cqe *cqe) make_send_cqe() argument 413 do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe) do_complete() argument 443 complete_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) complete_ack() argument 489 complete_wqe(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) complete_wqe() argument 510 rxe_drain_resp_pkts(struct rxe_qp *qp, bool notify) rxe_drain_resp_pkts() argument 532 struct rxe_qp *qp = (struct rxe_qp *)arg; rxe_completer() local [all...] |
H A D | rxe_resp.c | 80 void rxe_resp_queue_pkt(struct rxe_qp *qp, struct sk_buff *skb) in rxe_resp_queue_pkt() argument 85 skb_queue_tail(&qp->req_pkts, skb); in rxe_resp_queue_pkt() 88 (skb_queue_len(&qp->req_pkts) > 1); in rxe_resp_queue_pkt() 90 rxe_run_task(&qp->resp.task, must_sched); in rxe_resp_queue_pkt() 93 static inline enum resp_states get_req(struct rxe_qp *qp, in get_req() argument 98 if (qp->resp.state == QP_STATE_ERROR) { in get_req() 99 while ((skb = skb_dequeue(&qp->req_pkts))) { in get_req() 100 rxe_drop_ref(qp); in get_req() 108 skb = skb_peek(&qp->req_pkts); in get_req() 114 return (qp in get_req() 117 check_psn(struct rxe_qp *qp, struct rxe_pkt_info *pkt) check_psn() argument 161 check_op_seq(struct rxe_qp *qp, struct rxe_pkt_info *pkt) check_op_seq() argument 251 check_op_valid(struct rxe_qp *qp, struct rxe_pkt_info *pkt) check_op_valid() argument 289 get_srq_wqe(struct rxe_qp *qp) get_srq_wqe() argument 331 check_resource(struct rxe_qp *qp, struct rxe_pkt_info *pkt) check_resource() argument 375 check_length(struct rxe_qp *qp, struct rxe_pkt_info *pkt) check_length() argument 390 check_rkey(struct rxe_qp *qp, struct rxe_pkt_info *pkt) check_rkey() argument 480 send_data_in(struct rxe_qp *qp, void *data_addr, int data_len) send_data_in() argument 494 write_data_in(struct rxe_qp *qp, struct rxe_pkt_info *pkt) write_data_in() argument 518 process_atomic(struct rxe_qp *qp, struct rxe_pkt_info *pkt) process_atomic() argument 558 prepare_ack_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_pkt_info *ack, int opcode, int payload, u32 psn, u8 syndrome, u32 *crcp) prepare_ack_packet() argument 630 read_reply(struct rxe_qp *qp, struct rxe_pkt_info *req_pkt) read_reply() argument 757 execute(struct rxe_qp *qp, struct rxe_pkt_info *pkt) execute() argument 810 do_complete(struct rxe_qp *qp, struct rxe_pkt_info *pkt) do_complete() argument 931 send_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, u8 syndrome, u32 psn) send_ack() argument 953 send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, u8 syndrome) send_atomic_ack() argument 993 acknowledge(struct rxe_qp *qp, struct rxe_pkt_info *pkt) acknowledge() argument 1009 cleanup(struct rxe_qp *qp, struct rxe_pkt_info *pkt) cleanup() argument 1028 find_resource(struct rxe_qp *qp, u32 psn) find_resource() argument 1047 duplicate_request(struct rxe_qp *qp, struct rxe_pkt_info *pkt) duplicate_request() argument 1131 do_class_ac_error(struct rxe_qp *qp, u8 syndrome, enum ib_wc_status status) do_class_ac_error() argument 1141 do_class_d1e_error(struct rxe_qp *qp) do_class_d1e_error() argument 1175 rxe_drain_req_pkts(struct rxe_qp *qp, bool notify) rxe_drain_req_pkts() argument 1193 struct rxe_qp *qp = (struct rxe_qp *)arg; rxe_responder() local [all...] |
H A D | rxe_req.c | 14 static int next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe, 17 static inline void retry_first_write_send(struct rxe_qp *qp, in retry_first_write_send() argument 24 int to_send = (wqe->dma.resid > qp->mtu) ? in retry_first_write_send() 25 qp->mtu : wqe->dma.resid; in retry_first_write_send() 27 qp->req.opcode = next_opcode(qp, wqe, in retry_first_write_send() 37 wqe->iova += qp->mtu; in retry_first_write_send() 41 static void req_retry(struct rxe_qp *qp) in req_retry() argument 49 qp->req.wqe_index = consumer_index(qp in req_retry() 99 struct rxe_qp *qp = from_timer(qp, t, rnr_nak_timer); rnr_nak_timer() local 105 req_next_wqe(struct rxe_qp *qp) req_next_wqe() argument 167 next_opcode_rc(struct rxe_qp *qp, u32 opcode, int fits) next_opcode_rc() argument 239 next_opcode_uc(struct rxe_qp *qp, u32 opcode, int fits) next_opcode_uc() argument 290 next_opcode(struct rxe_qp *qp, struct rxe_send_wqe *wqe, u32 opcode) next_opcode() argument 321 check_init_depth(struct rxe_qp *qp, struct rxe_send_wqe *wqe) check_init_depth() argument 341 get_mtu(struct rxe_qp *qp) get_mtu() argument 351 init_req_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe, int opcode, int payload, struct rxe_pkt_info *pkt) init_req_packet() argument 443 fill_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt, struct sk_buff *skb, int paylen) fill_packet() argument 487 update_wqe_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt) update_wqe_state() argument 499 update_wqe_psn(struct rxe_qp *qp, struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt, int payload) update_wqe_psn() argument 522 save_state(struct rxe_send_wqe *wqe, struct rxe_qp *qp, struct rxe_send_wqe *rollback_wqe, u32 *rollback_psn) save_state() argument 533 rollback_state(struct rxe_send_wqe *wqe, struct rxe_qp *qp, struct rxe_send_wqe *rollback_wqe, u32 rollback_psn) rollback_state() argument 544 update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe, struct rxe_pkt_info *pkt, int payload) update_state() argument 561 struct rxe_qp *qp = (struct rxe_qp *)arg; rxe_requester() local [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/hw/hfi1/ |
H A D | qp.c | 58 #include "qp.h" 66 static void flush_tx_list(struct rvt_qp *qp); 75 static void qp_pio_drain(struct rvt_qp *qp); 164 static void flush_tx_list(struct rvt_qp *qp) in flush_tx_list() argument 166 struct hfi1_qp_priv *priv = qp->priv; in flush_tx_list() 172 static void flush_iowait(struct rvt_qp *qp) in flush_iowait() argument 174 struct hfi1_qp_priv *priv = qp->priv; in flush_iowait() 184 rvt_put_qp(qp); in flush_iowait() 202 int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, in hfi1_check_modify_qp() argument 205 struct ib_qp *ibqp = &qp in hfi1_check_modify_qp() 244 qp_set_16b(struct rvt_qp *qp) qp_set_16b() argument 264 hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) hfi1_modify_qp() argument 305 hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe, bool *call_send) hfi1_setup_wqe() argument 364 _hfi1_schedule_send(struct rvt_qp *qp) _hfi1_schedule_send() argument 381 qp_pio_drain(struct rvt_qp *qp) qp_pio_drain() argument 407 hfi1_schedule_send(struct rvt_qp *qp) hfi1_schedule_send() argument 420 hfi1_qp_schedule(struct rvt_qp *qp) hfi1_qp_schedule() argument 437 hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag) hfi1_qp_wakeup() argument 452 hfi1_qp_unbusy(struct rvt_qp *qp, struct iowait_work *wait) hfi1_qp_unbusy() argument 485 struct rvt_qp *qp; iowait_sleep() local 536 struct rvt_qp *qp = iowait_to_qp(wait); iowait_wakeup() local 544 struct rvt_qp *qp = iowait_to_qp(wait); iowait_sdma_drained() local 563 struct rvt_qp *qp = iowait_to_qp(w); hfi1_init_priority() local 580 qp_to_sdma_engine(struct rvt_qp *qp, u8 sc5) qp_to_sdma_engine() argument 605 qp_to_send_context(struct rvt_qp *qp, u8 sc5) qp_to_send_context() argument 625 qp_idle(struct rvt_qp *qp) qp_idle() argument 642 struct rvt_qp *qp = iter->qp; qp_iter_print() local 709 qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp) qp_priv_alloc() argument 739 qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp) qp_priv_free() argument 773 flush_qp_waiters(struct rvt_qp *qp) flush_qp_waiters() argument 780 stop_send_queue(struct rvt_qp *qp) stop_send_queue() argument 789 quiesce_qp(struct rvt_qp *qp) quiesce_qp() argument 800 notify_qp_reset(struct rvt_qp *qp) notify_qp_reset() argument 815 hfi1_migrate_qp(struct rvt_qp *qp) hfi1_migrate_qp() argument 840 mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu) mtu_from_qp() argument 862 get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp, struct ib_qp_attr *attr) get_pmtu_from_attr() argument 882 notify_error_qp(struct rvt_qp *qp) notify_error_qp() argument 920 hfi1_qp_iter_cb(struct rvt_qp *qp, u64 v) hfi1_qp_iter_cb() argument [all...] |
H A D | rc.c | 53 #include "qp.h" 58 struct rvt_ack_entry *find_prev_entry(struct rvt_qp *qp, u32 psn, u8 *prev, 60 __must_hold(&qp->s_lock) 66 for (i = qp->r_head_ack_queue; ; i = p) { 67 if (i == qp->s_tail_ack_queue) 72 p = rvt_size_atomic(ib_to_rvt(qp->ibqp.device)); 73 if (p == qp->r_head_ack_queue) { 77 e = &qp->s_ack_queue[p]; 83 if (p == qp->s_tail_ack_queue && 101 * @qp 109 make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp, struct ib_other_headers *ohdr, struct hfi1_pkt_state *ps) make_rc_ack() argument 429 hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) hfi1_make_rc_req() argument 1221 hfi1_make_bth_aeth(struct rvt_qp *qp, struct ib_other_headers *ohdr, u32 bth0, u32 bth1) hfi1_make_bth_aeth() argument 1239 struct rvt_qp *qp = packet->qp; hfi1_queue_rc_ack() local 1266 struct rvt_qp *qp = packet->qp; hfi1_make_rc_ack_9B() local 1319 struct rvt_qp *qp = packet->qp; hfi1_make_rc_ack_16B() local 1389 struct rvt_qp *qp = packet->qp; hfi1_send_rc_ack() local 1459 update_num_rd_atomic(struct rvt_qp *qp, u32 psn, struct rvt_swqe *wqe) update_num_rd_atomic() argument 1500 reset_psn(struct rvt_qp *qp, u32 psn) reset_psn() argument 1606 hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait) hfi1_restart_rc() argument 1677 reset_sending_psn(struct rvt_qp *qp, u32 psn) reset_sending_psn() argument 1714 hfi1_rc_verbs_aborted(struct rvt_qp *qp, struct hfi1_opa_header *opah) hfi1_rc_verbs_aborted() argument 1735 hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah) hfi1_rc_send_complete() argument 1856 update_last_psn(struct rvt_qp *qp, u32 psn) update_last_psn() argument 1866 do_rc_completion(struct rvt_qp *qp, struct rvt_swqe *wqe, struct hfi1_ibport *ibp) do_rc_completion() argument 1947 set_restart_qp(struct rvt_qp *qp, struct hfi1_ctxtdata *rcd) set_restart_qp() argument 1972 update_qp_retry_state(struct rvt_qp *qp, u32 psn, u32 spsn, u32 lpsn) update_qp_retry_state() argument 2006 do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, u64 val, struct hfi1_ctxtdata *rcd) do_rc_ack() argument 2305 rdma_seq_err(struct rvt_qp *qp, struct hfi1_ibport *ibp, u32 psn, struct hfi1_ctxtdata *rcd) rdma_seq_err() argument 2349 struct rvt_qp *qp = packet->qp; rc_rcv_resp() local 2526 rc_cancel_ack(struct rvt_qp *qp) rc_cancel_ack() argument 2551 rc_rcv_error(struct ib_other_headers *ohdr, void *data, struct rvt_qp *qp, u32 opcode, u32 psn, int diff, struct hfi1_ctxtdata *rcd) rc_rcv_error() argument 2809 struct rvt_qp *qp = packet->qp; hfi1_rc_rcv() local 3250 hfi1_rc_hdrerr( struct hfi1_ctxtdata *rcd, struct hfi1_packet *packet, struct rvt_qp *qp) hfi1_rc_hdrerr() argument [all...] |
H A D | uc.c | 50 #include "qp.h" 57 * @qp: a pointer to the QP 63 int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) in hfi1_make_uc_req() argument 65 struct hfi1_qp_priv *priv = qp->priv; in hfi1_make_uc_req() 71 u32 pmtu = qp->pmtu; in hfi1_make_uc_req() 74 ps->s_txreq = get_txreq(ps->dev, qp); in hfi1_make_uc_req() 78 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { in hfi1_make_uc_req() 79 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) in hfi1_make_uc_req() 82 if (qp->s_last == READ_ONCE(qp in hfi1_make_uc_req() 310 struct rvt_qp *qp = packet->qp; hfi1_uc_rcv() local [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/hw/hfi1/ |
H A D | qp.c | 16 #include "qp.h" 24 static void flush_tx_list(struct rvt_qp *qp); 33 static void qp_pio_drain(struct rvt_qp *qp); 122 static void flush_tx_list(struct rvt_qp *qp) in flush_tx_list() argument 124 struct hfi1_qp_priv *priv = qp->priv; in flush_tx_list() 130 static void flush_iowait(struct rvt_qp *qp) in flush_iowait() argument 132 struct hfi1_qp_priv *priv = qp->priv; in flush_iowait() 142 rvt_put_qp(qp); in flush_iowait() 160 int hfi1_check_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, in hfi1_check_modify_qp() argument 163 struct ib_qp *ibqp = &qp in hfi1_check_modify_qp() 202 qp_set_16b(struct rvt_qp *qp) qp_set_16b() argument 222 hfi1_modify_qp(struct rvt_qp *qp, struct ib_qp_attr *attr, int attr_mask, struct ib_udata *udata) hfi1_modify_qp() argument 263 hfi1_setup_wqe(struct rvt_qp *qp, struct rvt_swqe *wqe, bool *call_send) hfi1_setup_wqe() argument 323 _hfi1_schedule_send(struct rvt_qp *qp) _hfi1_schedule_send() argument 340 qp_pio_drain(struct rvt_qp *qp) qp_pio_drain() argument 366 hfi1_schedule_send(struct rvt_qp *qp) hfi1_schedule_send() argument 379 hfi1_qp_schedule(struct rvt_qp *qp) hfi1_qp_schedule() argument 396 hfi1_qp_wakeup(struct rvt_qp *qp, u32 flag) hfi1_qp_wakeup() argument 411 hfi1_qp_unbusy(struct rvt_qp *qp, struct iowait_work *wait) hfi1_qp_unbusy() argument 444 struct rvt_qp *qp; iowait_sleep() local 495 struct rvt_qp *qp = iowait_to_qp(wait); iowait_wakeup() local 503 struct rvt_qp *qp = iowait_to_qp(wait); iowait_sdma_drained() local 522 struct rvt_qp *qp = iowait_to_qp(w); hfi1_init_priority() local 539 qp_to_sdma_engine(struct rvt_qp *qp, u8 sc5) qp_to_sdma_engine() argument 564 qp_to_send_context(struct rvt_qp *qp, u8 sc5) qp_to_send_context() argument 584 qp_idle(struct rvt_qp *qp) qp_idle() argument 601 struct rvt_qp *qp = iter->qp; qp_iter_print() local 668 qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp) qp_priv_alloc() argument 698 qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp) qp_priv_free() argument 732 flush_qp_waiters(struct rvt_qp *qp) flush_qp_waiters() argument 739 stop_send_queue(struct rvt_qp *qp) stop_send_queue() argument 748 quiesce_qp(struct rvt_qp *qp) quiesce_qp() argument 759 notify_qp_reset(struct rvt_qp *qp) notify_qp_reset() argument 774 hfi1_migrate_qp(struct rvt_qp *qp) hfi1_migrate_qp() argument 799 mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu) mtu_from_qp() argument 821 get_pmtu_from_attr(struct rvt_dev_info *rdi, struct rvt_qp *qp, struct ib_qp_attr *attr) get_pmtu_from_attr() argument 841 notify_error_qp(struct rvt_qp *qp) notify_error_qp() argument 879 hfi1_qp_iter_cb(struct rvt_qp *qp, u64 v) hfi1_qp_iter_cb() argument [all...] |
H A D | rc.c | 11 #include "qp.h" 16 struct rvt_ack_entry *find_prev_entry(struct rvt_qp *qp, u32 psn, u8 *prev, 18 __must_hold(&qp->s_lock) 24 for (i = qp->r_head_ack_queue; ; i = p) { 25 if (i == qp->s_tail_ack_queue) 30 p = rvt_size_atomic(ib_to_rvt(qp->ibqp.device)); 31 if (p == qp->r_head_ack_queue) { 35 e = &qp->s_ack_queue[p]; 41 if (p == qp->s_tail_ack_queue && 59 * @qp 67 make_rc_ack(struct hfi1_ibdev *dev, struct rvt_qp *qp, struct ib_other_headers *ohdr, struct hfi1_pkt_state *ps) make_rc_ack() argument 388 hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) hfi1_make_rc_req() argument 1180 hfi1_make_bth_aeth(struct rvt_qp *qp, struct ib_other_headers *ohdr, u32 bth0, u32 bth1) hfi1_make_bth_aeth() argument 1198 struct rvt_qp *qp = packet->qp; hfi1_queue_rc_ack() local 1225 struct rvt_qp *qp = packet->qp; hfi1_make_rc_ack_9B() local 1278 struct rvt_qp *qp = packet->qp; hfi1_make_rc_ack_16B() local 1347 struct rvt_qp *qp = packet->qp; hfi1_send_rc_ack() local 1417 update_num_rd_atomic(struct rvt_qp *qp, u32 psn, struct rvt_swqe *wqe) update_num_rd_atomic() argument 1458 reset_psn(struct rvt_qp *qp, u32 psn) reset_psn() argument 1564 hfi1_restart_rc(struct rvt_qp *qp, u32 psn, int wait) hfi1_restart_rc() argument 1635 reset_sending_psn(struct rvt_qp *qp, u32 psn) reset_sending_psn() argument 1672 hfi1_rc_verbs_aborted(struct rvt_qp *qp, struct hfi1_opa_header *opah) hfi1_rc_verbs_aborted() argument 1693 hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah) hfi1_rc_send_complete() argument 1814 update_last_psn(struct rvt_qp *qp, u32 psn) update_last_psn() argument 1824 do_rc_completion(struct rvt_qp *qp, struct rvt_swqe *wqe, struct hfi1_ibport *ibp) do_rc_completion() argument 1905 set_restart_qp(struct rvt_qp *qp, struct hfi1_ctxtdata *rcd) set_restart_qp() argument 1930 update_qp_retry_state(struct rvt_qp *qp, u32 psn, u32 spsn, u32 lpsn) update_qp_retry_state() argument 1964 do_rc_ack(struct rvt_qp *qp, u32 aeth, u32 psn, int opcode, u64 val, struct hfi1_ctxtdata *rcd) do_rc_ack() argument 2263 rdma_seq_err(struct rvt_qp *qp, struct hfi1_ibport *ibp, u32 psn, struct hfi1_ctxtdata *rcd) rdma_seq_err() argument 2307 struct rvt_qp *qp = packet->qp; rc_rcv_resp() local 2484 rc_cancel_ack(struct rvt_qp *qp) rc_cancel_ack() argument 2510 rc_rcv_error(struct ib_other_headers *ohdr, void *data, struct rvt_qp *qp, u32 opcode, u32 psn, int diff, struct hfi1_ctxtdata *rcd) rc_rcv_error() argument 2768 struct rvt_qp *qp = packet->qp; hfi1_rc_rcv() local 3209 hfi1_rc_hdrerr( struct hfi1_ctxtdata *rcd, struct hfi1_packet *packet, struct rvt_qp *qp) hfi1_rc_hdrerr() argument [all...] |
H A D | uc.c | 8 #include "qp.h" 15 * @qp: a pointer to the QP 22 int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) in hfi1_make_uc_req() argument 24 struct hfi1_qp_priv *priv = qp->priv; in hfi1_make_uc_req() 30 u32 pmtu = qp->pmtu; in hfi1_make_uc_req() 33 ps->s_txreq = get_txreq(ps->dev, qp); in hfi1_make_uc_req() 37 if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) { in hfi1_make_uc_req() 38 if (!(ib_rvt_state_ops[qp->state] & RVT_FLUSH_SEND)) in hfi1_make_uc_req() 41 if (qp->s_last == READ_ONCE(qp in hfi1_make_uc_req() 264 struct rvt_qp *qp = packet->qp; hfi1_uc_rcv() local [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/sw/siw/ |
H A D | siw_qp.c | 95 struct siw_qp *qp; in siw_qp_llp_data_ready() local 102 qp = sk_to_qp(sk); in siw_qp_llp_data_ready() 104 if (likely(!qp->rx_stream.rx_suspend && in siw_qp_llp_data_ready() 105 down_read_trylock(&qp->state_lock))) { in siw_qp_llp_data_ready() 106 read_descriptor_t rd_desc = { .arg.data = qp, .count = 1 }; in siw_qp_llp_data_ready() 108 if (likely(qp->attrs.state == SIW_QP_STATE_RTS)) in siw_qp_llp_data_ready() 117 up_read(&qp->state_lock); in siw_qp_llp_data_ready() 119 siw_dbg_qp(qp, "unable to process RX, suspend: %d\n", in siw_qp_llp_data_ready() 120 qp->rx_stream.rx_suspend); in siw_qp_llp_data_ready() 126 void siw_qp_llp_close(struct siw_qp *qp) in siw_qp_llp_close() argument 200 siw_qp_readq_init(struct siw_qp *qp, int irq_size, int orq_size) siw_qp_readq_init() argument 226 siw_qp_enable_crc(struct siw_qp *qp) siw_qp_enable_crc() argument 260 siw_qp_mpa_rts(struct siw_qp *qp, enum mpa_v2_ctrl ctrl) siw_qp_mpa_rts() argument 364 siw_init_terminate(struct siw_qp *qp, enum term_elayer layer, u8 etype, u8 ecode, int in_tx) siw_init_terminate() argument 387 siw_send_terminate(struct siw_qp *qp) siw_send_terminate() argument 612 siw_qp_modify_nonstate(struct siw_qp *qp, struct siw_qp_attrs *attrs, enum siw_qp_attr_mask mask) siw_qp_modify_nonstate() argument 634 siw_qp_nextstate_from_idle(struct siw_qp *qp, struct siw_qp_attrs *attrs, enum siw_qp_attr_mask mask) siw_qp_nextstate_from_idle() argument 703 siw_qp_nextstate_from_rts(struct siw_qp *qp, struct siw_qp_attrs *attrs) siw_qp_nextstate_from_rts() argument 764 siw_qp_nextstate_from_term(struct siw_qp *qp, struct siw_qp_attrs *attrs) siw_qp_nextstate_from_term() argument 781 siw_qp_nextstate_from_close(struct siw_qp *qp, struct siw_qp_attrs *attrs) siw_qp_nextstate_from_close() argument 825 siw_qp_modify(struct siw_qp *qp, struct siw_qp_attrs *attrs, enum siw_qp_attr_mask mask) siw_qp_modify() argument 881 siw_activate_tx_from_sq(struct siw_qp *qp) siw_activate_tx_from_sq() argument 977 siw_activate_tx(struct siw_qp *qp) siw_activate_tx() argument 1060 siw_sqe_complete(struct siw_qp *qp, struct siw_sqe *sqe, u32 bytes, enum siw_wc_status status) siw_sqe_complete() argument 1118 siw_rqe_complete(struct siw_qp *qp, struct siw_rqe *rqe, u32 bytes, u32 inval_stag, enum siw_wc_status status) siw_rqe_complete() argument 1188 siw_sq_flush(struct siw_qp *qp) siw_sq_flush() argument 1264 siw_rq_flush(struct siw_qp *qp) siw_rq_flush() argument 1311 siw_qp_add(struct siw_device *sdev, struct siw_qp *qp) siw_qp_add() argument 1326 struct siw_qp *found, *qp = container_of(ref, struct siw_qp, ref); siw_free_qp() local [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/sw/siw/ |
H A D | siw_qp.c | 96 struct siw_qp *qp; in siw_qp_llp_data_ready() local 105 qp = sk_to_qp(sk); in siw_qp_llp_data_ready() 107 if (likely(!qp->rx_stream.rx_suspend && in siw_qp_llp_data_ready() 108 down_read_trylock(&qp->state_lock))) { in siw_qp_llp_data_ready() 109 read_descriptor_t rd_desc = { .arg.data = qp, .count = 1 }; in siw_qp_llp_data_ready() 111 if (likely(qp->attrs.state == SIW_QP_STATE_RTS)) in siw_qp_llp_data_ready() 120 up_read(&qp->state_lock); in siw_qp_llp_data_ready() 122 siw_dbg_qp(qp, "unable to process RX, suspend: %d\n", in siw_qp_llp_data_ready() 123 qp->rx_stream.rx_suspend); in siw_qp_llp_data_ready() 129 void siw_qp_llp_close(struct siw_qp *qp) in siw_qp_llp_close() argument 203 siw_qp_readq_init(struct siw_qp *qp, int irq_size, int orq_size) siw_qp_readq_init() argument 229 siw_qp_enable_crc(struct siw_qp *qp) siw_qp_enable_crc() argument 263 siw_qp_mpa_rts(struct siw_qp *qp, enum mpa_v2_ctrl ctrl) siw_qp_mpa_rts() argument 367 siw_init_terminate(struct siw_qp *qp, enum term_elayer layer, u8 etype, u8 ecode, int in_tx) siw_init_terminate() argument 390 siw_send_terminate(struct siw_qp *qp) siw_send_terminate() argument 615 siw_qp_modify_nonstate(struct siw_qp *qp, struct siw_qp_attrs *attrs, enum siw_qp_attr_mask mask) siw_qp_modify_nonstate() argument 637 siw_qp_nextstate_from_idle(struct siw_qp *qp, struct siw_qp_attrs *attrs, enum siw_qp_attr_mask mask) siw_qp_nextstate_from_idle() argument 706 siw_qp_nextstate_from_rts(struct siw_qp *qp, struct siw_qp_attrs *attrs) siw_qp_nextstate_from_rts() argument 767 siw_qp_nextstate_from_term(struct siw_qp *qp, struct siw_qp_attrs *attrs) siw_qp_nextstate_from_term() argument 784 siw_qp_nextstate_from_close(struct siw_qp *qp, struct siw_qp_attrs *attrs) siw_qp_nextstate_from_close() argument 828 siw_qp_modify(struct siw_qp *qp, struct siw_qp_attrs *attrs, enum siw_qp_attr_mask mask) siw_qp_modify() argument 884 siw_activate_tx_from_sq(struct siw_qp *qp) siw_activate_tx_from_sq() argument 980 siw_activate_tx(struct siw_qp *qp) siw_activate_tx() argument 1063 siw_sqe_complete(struct siw_qp *qp, struct siw_sqe *sqe, u32 bytes, enum siw_wc_status status) siw_sqe_complete() argument 1121 siw_rqe_complete(struct siw_qp *qp, struct siw_rqe *rqe, u32 bytes, u32 inval_stag, enum siw_wc_status status) siw_rqe_complete() argument 1191 siw_sq_flush(struct siw_qp *qp) siw_sq_flush() argument 1267 siw_rq_flush(struct siw_qp *qp) siw_rq_flush() argument 1314 siw_qp_add(struct siw_device *sdev, struct siw_qp *qp) siw_qp_add() argument 1329 struct siw_qp *found, *qp = container_of(ref, struct siw_qp, ref); siw_free_qp() local [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/sw/rdmavt/ |
H A D | qp.c | 57 #include "qp.h" 64 static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, 393 * rvt_driver_qp_init - Init driver qp resources 457 * rvt_free_qp_cb - callback function to reset a qp 458 * @qp: the qp to reset 461 * This function resets the qp and removes it from the 462 * qp hash table. 464 static void rvt_free_qp_cb(struct rvt_qp *qp, u64 v) in rvt_free_qp_cb() argument 467 struct rvt_dev_info *rdi = ib_to_rvt(qp in rvt_free_qp_cb() 619 rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends) rvt_clear_mr_refs() argument 679 rvt_qp_sends_has_lkey(struct rvt_qp *qp, u32 lkey) rvt_qp_sends_has_lkey() argument 703 rvt_qp_acks_has_lkey(struct rvt_qp *qp, u32 lkey) rvt_qp_acks_has_lkey() argument 728 rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey) rvt_qp_mr_clean() argument 769 rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp) rvt_remove_qp() argument 862 rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, enum ib_qp_type type) rvt_init_qp() argument 927 rvt_stop_rc_timers(qp); global() variable 935 rvt_del_timers_sync(qp); global() variable 969 rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, enum ib_qp_type type) rvt_reset_qp() argument 1014 free_ud_wq_attr(struct rvt_qp *qp) free_ud_wq_attr() argument 1034 alloc_ud_wq_attr(struct rvt_qp *qp, int node) alloc_ud_wq_attr() argument 1070 struct rvt_qp *qp; rvt_create_qp() local 1342 rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err) rvt_error_qp() argument 1428 rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp) rvt_insert_qp() argument 1462 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); rvt_modify_qp() local 1731 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); rvt_destroy_qp() local 1773 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); rvt_query_qp() local 1836 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); rvt_post_recv() local 1914 rvt_qp_valid_operation( struct rvt_qp *qp, const struct rvt_operation_params *post_parms, const struct ib_send_wr *wr) rvt_qp_valid_operation() argument 1961 rvt_qp_is_avail( struct rvt_qp *qp, struct rvt_dev_info *rdi, bool reserved_op) rvt_qp_is_avail() argument 2011 rvt_post_one_wr(struct rvt_qp *qp, const struct ib_send_wr *wr, bool *call_send) rvt_post_one_wr() argument 2197 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); rvt_post_send() local 2318 init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe) init_sge() argument 2394 rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only) rvt_get_rwqe() argument 2501 rvt_comm_est(struct rvt_qp *qp) rvt_comm_est() argument 2515 rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err) rvt_rc_error() argument 2558 rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift) rvt_add_retry_timer_ext() argument 2577 rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth) rvt_add_rnr_timer() argument 2595 rvt_stop_rc_timers(struct rvt_qp *qp) rvt_stop_rc_timers() argument 2614 rvt_stop_rnr_timer(struct rvt_qp *qp) rvt_stop_rnr_timer() argument 2628 rvt_del_timers_sync(struct rvt_qp *qp) rvt_del_timers_sync() argument 2640 struct rvt_qp *qp = from_timer(qp, t, s_timer); rvt_rc_timeout() local 2668 struct rvt_qp *qp = container_of(t, struct rvt_qp, s_rnr_timer); rvt_rc_rnr_retry() local 2700 rvt_qp_iter_init(struct rvt_dev_info *rdi, u64 v, void (*cb)(struct rvt_qp *qp, u64 v)) rvt_qp_iter_init() argument 2738 struct rvt_qp *qp; __must_hold() local 2799 rvt_qp_iter(struct rvt_dev_info *rdi, u64 v, void (*cb)(struct rvt_qp *qp, u64 v)) rvt_qp_iter() argument 2829 rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, enum ib_wc_status status) rvt_send_complete() argument 2863 rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss, void *data, u32 length, bool release, bool copy_last) rvt_copy_sge() argument 2957 struct rvt_qp *qp; rvt_ruc_loopback() local [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/sw/rdmavt/ |
H A D | qp.c | 15 #include "qp.h" 22 static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, 352 * rvt_driver_qp_init - Init driver qp resources 416 * rvt_free_qp_cb - callback function to reset a qp 417 * @qp: the qp to reset 420 * This function resets the qp and removes it from the 421 * qp hash table. 423 static void rvt_free_qp_cb(struct rvt_qp *qp, u64 v) in rvt_free_qp_cb() argument 426 struct rvt_dev_info *rdi = ib_to_rvt(qp in rvt_free_qp_cb() 579 rvt_clear_mr_refs(struct rvt_qp *qp, int clr_sends) rvt_clear_mr_refs() argument 639 rvt_qp_sends_has_lkey(struct rvt_qp *qp, u32 lkey) rvt_qp_sends_has_lkey() argument 663 rvt_qp_acks_has_lkey(struct rvt_qp *qp, u32 lkey) rvt_qp_acks_has_lkey() argument 688 rvt_qp_mr_clean(struct rvt_qp *qp, u32 lkey) rvt_qp_mr_clean() argument 729 rvt_remove_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp) rvt_remove_qp() argument 823 rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, enum ib_qp_type type) rvt_init_qp() argument 889 rvt_stop_rc_timers(qp); global() variable 897 rvt_del_timers_sync(qp); global() variable 931 rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, enum ib_qp_type type) rvt_reset_qp() argument 977 free_ud_wq_attr(struct rvt_qp *qp) free_ud_wq_attr() argument 997 alloc_ud_wq_attr(struct rvt_qp *qp, int node) alloc_ud_wq_attr() argument 1032 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); rvt_create_qp() local 1286 rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err) rvt_error_qp() argument 1372 rvt_insert_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp) rvt_insert_qp() argument 1406 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); rvt_modify_qp() local 1679 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); rvt_destroy_qp() local 1721 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); rvt_query_qp() local 1784 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); rvt_post_recv() local 1862 rvt_qp_valid_operation( struct rvt_qp *qp, const struct rvt_operation_params *post_parms, const struct ib_send_wr *wr) rvt_qp_valid_operation() argument 1909 rvt_qp_is_avail( struct rvt_qp *qp, struct rvt_dev_info *rdi, bool reserved_op) rvt_qp_is_avail() argument 1960 rvt_post_one_wr(struct rvt_qp *qp, const struct ib_send_wr *wr, bool *call_send) rvt_post_one_wr() argument 2146 struct rvt_qp *qp = ibqp_to_rvtqp(ibqp); rvt_post_send() local 2267 init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe) init_sge() argument 2343 rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only) rvt_get_rwqe() argument 2450 rvt_comm_est(struct rvt_qp *qp) rvt_comm_est() argument 2464 rvt_rc_error(struct rvt_qp *qp, enum ib_wc_status err) rvt_rc_error() argument 2507 rvt_add_retry_timer_ext(struct rvt_qp *qp, u8 shift) rvt_add_retry_timer_ext() argument 2526 rvt_add_rnr_timer(struct rvt_qp *qp, u32 aeth) rvt_add_rnr_timer() argument 2544 rvt_stop_rc_timers(struct rvt_qp *qp) rvt_stop_rc_timers() argument 2563 rvt_stop_rnr_timer(struct rvt_qp *qp) rvt_stop_rnr_timer() argument 2577 rvt_del_timers_sync(struct rvt_qp *qp) rvt_del_timers_sync() argument 2589 struct rvt_qp *qp = from_timer(qp, t, s_timer); rvt_rc_timeout() local 2617 struct rvt_qp *qp = container_of(t, struct rvt_qp, s_rnr_timer); rvt_rc_rnr_retry() local 2649 rvt_qp_iter_init(struct rvt_dev_info *rdi, u64 v, void (*cb)(struct rvt_qp *qp, u64 v)) rvt_qp_iter_init() argument 2687 struct rvt_qp *qp; __must_hold() local 2748 rvt_qp_iter(struct rvt_dev_info *rdi, u64 v, void (*cb)(struct rvt_qp *qp, u64 v)) rvt_qp_iter() argument 2778 rvt_send_complete(struct rvt_qp *qp, struct rvt_swqe *wqe, enum ib_wc_status status) rvt_send_complete() argument 2812 rvt_copy_sge(struct rvt_qp *qp, struct rvt_sge_state *ss, void *data, u32 length, bool release, bool copy_last) rvt_copy_sge() argument 2906 struct rvt_qp *qp; rvt_ruc_loopback() local [all...] |
/kernel/linux/linux-5.10/drivers/ntb/ |
H A D | ntb_transport.c | 120 struct ntb_transport_qp *qp; member 148 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data, 160 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, 272 #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count) 279 static int ntb_async_tx_submit(struct ntb_transport_qp *qp, 468 struct ntb_transport_qp *qp; in debugfs_read() local 472 qp = filp->private_data; in debugfs_read() 474 if (!qp || !qp in debugfs_read() 618 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; ntb_transport_setup_qp_mw() local 687 struct ntb_transport_qp *qp = dev; ntb_transport_isr() local 697 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; ntb_transport_setup_qp_peer_msi() local 724 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; ntb_transport_setup_qp_msi() local 914 ntb_qp_link_context_reset(struct ntb_transport_qp *qp) ntb_qp_link_context_reset() argument 937 ntb_qp_link_down_reset(struct ntb_transport_qp *qp) ntb_qp_link_down_reset() argument 944 ntb_qp_link_cleanup(struct ntb_transport_qp *qp) ntb_qp_link_cleanup() argument 960 struct ntb_transport_qp *qp = container_of(work, ntb_qp_link_cleanup_work() local 972 ntb_qp_link_down(struct ntb_transport_qp *qp) ntb_qp_link_down() argument 979 struct ntb_transport_qp *qp; ntb_transport_link_cleanup() local 1106 struct ntb_transport_qp *qp = &nt->qp_vec[i]; ntb_transport_link_work() local 1133 struct ntb_transport_qp *qp = container_of(work, ntb_qp_link_work() local 1168 struct ntb_transport_qp *qp; ntb_transport_init_queue() local 1413 struct ntb_transport_qp *qp; ntb_transport_free() local 1446 ntb_complete_rxc(struct ntb_transport_qp *qp) ntb_complete_rxc() argument 1496 struct ntb_transport_qp *qp = entry->qp; ntb_rx_copy_callback() local 1532 struct ntb_transport_qp *qp = entry->qp; ntb_async_rx_submit() local 1599 struct ntb_transport_qp *qp = entry->qp; ntb_async_rx() local 1623 ntb_process_rxc(struct ntb_transport_qp *qp) ntb_process_rxc() argument 1697 struct ntb_transport_qp *qp = (void *)data; ntb_transport_rxc_db() local 1738 struct ntb_transport_qp *qp = entry->qp; ntb_tx_copy_callback() local 1808 ntb_async_tx_submit(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry) ntb_async_tx_submit() argument 1867 ntb_async_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry) ntb_async_tx() argument 1903 ntb_process_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry) ntb_process_tx() argument 1930 ntb_send_link_down(struct ntb_transport_qp *qp) ntb_send_link_down() argument 1991 struct ntb_transport_qp *qp; ntb_transport_create_queue() local 2115 ntb_transport_free_queue(struct ntb_transport_qp *qp) ntb_transport_free_queue() argument 2207 ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len) ntb_transport_rx_remove() argument 2240 ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, unsigned int len) ntb_transport_rx_enqueue() argument 2282 ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, unsigned int len) ntb_transport_tx_enqueue() argument 2324 ntb_transport_link_up(struct ntb_transport_qp *qp) ntb_transport_link_up() argument 2344 ntb_transport_link_down(struct ntb_transport_qp *qp) ntb_transport_link_down() argument 2372 ntb_transport_link_query(struct ntb_transport_qp *qp) ntb_transport_link_query() argument 2389 ntb_transport_qp_num(struct ntb_transport_qp *qp) ntb_transport_qp_num() argument 2406 ntb_transport_max_size(struct ntb_transport_qp *qp) ntb_transport_max_size() argument 2429 ntb_transport_tx_free_entry(struct ntb_transport_qp *qp) ntb_transport_tx_free_entry() argument 2441 struct ntb_transport_qp *qp; ntb_transport_doorbell_callback() local [all...] |
/kernel/linux/linux-6.6/drivers/ntb/ |
H A D | ntb_transport.c | 120 struct ntb_transport_qp *qp; member 148 void (*tx_handler)(struct ntb_transport_qp *qp, void *qp_data, 160 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, 272 #define QP_TO_MW(nt, qp) ((qp) % nt->mw_count) 279 static int ntb_async_tx_submit(struct ntb_transport_qp *qp, 466 struct ntb_transport_qp *qp; in debugfs_read() local 470 qp = filp->private_data; in debugfs_read() 472 if (!qp || !qp in debugfs_read() 616 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; ntb_transport_setup_qp_mw() local 685 struct ntb_transport_qp *qp = dev; ntb_transport_isr() local 695 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; ntb_transport_setup_qp_peer_msi() local 722 struct ntb_transport_qp *qp = &nt->qp_vec[qp_num]; ntb_transport_setup_qp_msi() local 912 ntb_qp_link_context_reset(struct ntb_transport_qp *qp) ntb_qp_link_context_reset() argument 935 ntb_qp_link_down_reset(struct ntb_transport_qp *qp) ntb_qp_link_down_reset() argument 942 ntb_qp_link_cleanup(struct ntb_transport_qp *qp) ntb_qp_link_cleanup() argument 958 struct ntb_transport_qp *qp = container_of(work, ntb_qp_link_cleanup_work() local 970 ntb_qp_link_down(struct ntb_transport_qp *qp) ntb_qp_link_down() argument 977 struct ntb_transport_qp *qp; ntb_transport_link_cleanup() local 1104 struct ntb_transport_qp *qp = &nt->qp_vec[i]; ntb_transport_link_work() local 1131 struct ntb_transport_qp *qp = container_of(work, ntb_qp_link_work() local 1166 struct ntb_transport_qp *qp; ntb_transport_init_queue() local 1411 struct ntb_transport_qp *qp; ntb_transport_free() local 1444 ntb_complete_rxc(struct ntb_transport_qp *qp) ntb_complete_rxc() argument 1494 struct ntb_transport_qp *qp = entry->qp; ntb_rx_copy_callback() local 1530 struct ntb_transport_qp *qp = entry->qp; ntb_async_rx_submit() local 1597 struct ntb_transport_qp *qp = entry->qp; ntb_async_rx() local 1621 ntb_process_rxc(struct ntb_transport_qp *qp) ntb_process_rxc() argument 1695 struct ntb_transport_qp *qp = (void *)data; ntb_transport_rxc_db() local 1736 struct ntb_transport_qp *qp = entry->qp; ntb_tx_copy_callback() local 1806 ntb_async_tx_submit(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry) ntb_async_tx_submit() argument 1865 ntb_async_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry) ntb_async_tx() argument 1901 ntb_process_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry) ntb_process_tx() argument 1928 ntb_send_link_down(struct ntb_transport_qp *qp) ntb_send_link_down() argument 1989 struct ntb_transport_qp *qp; ntb_transport_create_queue() local 2113 ntb_transport_free_queue(struct ntb_transport_qp *qp) ntb_transport_free_queue() argument 2205 ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len) ntb_transport_rx_remove() argument 2238 ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, unsigned int len) ntb_transport_rx_enqueue() argument 2280 ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, unsigned int len) ntb_transport_tx_enqueue() argument 2322 ntb_transport_link_up(struct ntb_transport_qp *qp) ntb_transport_link_up() argument 2342 ntb_transport_link_down(struct ntb_transport_qp *qp) ntb_transport_link_down() argument 2370 ntb_transport_link_query(struct ntb_transport_qp *qp) ntb_transport_link_query() argument 2387 ntb_transport_qp_num(struct ntb_transport_qp *qp) ntb_transport_qp_num() argument 2404 ntb_transport_max_size(struct ntb_transport_qp *qp) ntb_transport_max_size() argument 2427 ntb_transport_tx_free_entry(struct ntb_transport_qp *qp) ntb_transport_tx_free_entry() argument 2439 struct ntb_transport_qp *qp; ntb_transport_doorbell_callback() local [all...] |