/kernel/linux/linux-5.10/drivers/infiniband/core/ |
H A D | mad.c | 59 struct ib_mad_qp_info *qp_info, in create_mad_addr_info() 63 struct ib_device *dev = qp_info->port_priv->device; in create_mad_addr_info() 64 u8 pnum = qp_info->port_priv->port_num; in create_mad_addr_info() 102 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, 364 if (!port_priv->qp_info[qpn].qp) { in ib_register_mad_agent() 387 mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; in ib_register_mad_agent() 394 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; in ib_register_mad_agent() 503 port_priv = mad_agent_priv->qp_info->port_priv; in unregister_mad_agent() 614 size_t mad_size = port_mad_size(mad_agent_priv->qp_info in handle_outgoing_dr_smp() 58 create_mad_addr_info(struct ib_mad_send_wr_private *mad_send_wr, struct ib_mad_qp_info *qp_info, struct trace_event_raw_ib_mad_send_template *entry) create_mad_addr_info() argument 1003 struct ib_mad_qp_info *qp_info; ib_send_mad() local 1625 validate_mad(const struct ib_mad_hdr *mad_hdr, const struct ib_mad_qp_info *qp_info, bool opa) validate_mad() argument 1867 handle_ib_smi(const struct ib_mad_port_private *port_priv, const struct ib_mad_qp_info *qp_info, const struct ib_wc *wc, int port_num, struct ib_mad_private *recv, struct ib_mad_private *response) handle_ib_smi() argument 1954 handle_opa_smi(struct ib_mad_port_private *port_priv, struct ib_mad_qp_info *qp_info, struct ib_wc *wc, int port_num, struct ib_mad_private *recv, struct ib_mad_private *response) handle_opa_smi() argument 2010 handle_smi(struct ib_mad_port_private *port_priv, struct ib_mad_qp_info *qp_info, struct ib_wc *wc, int port_num, struct ib_mad_private *recv, struct ib_mad_private *response, bool opa) handle_smi() argument 2033 struct ib_mad_qp_info *qp_info; ib_mad_recv_done() local 2282 struct ib_mad_qp_info *qp_info; ib_mad_send_done() local 2345 mark_sends_for_retry(struct ib_mad_qp_info *qp_info) mark_sends_for_retry() argument 2366 struct ib_mad_qp_info *qp_info = mad_list->mad_queue->qp_info; ib_mad_send_error() local 2683 ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, struct ib_mad_private *mad) ib_mad_post_receive_mads() argument 2758 cleanup_recv_queue(struct ib_mad_qp_info *qp_info) cleanup_recv_queue() argument 2876 struct ib_mad_qp_info *qp_info = qp_context; qp_event_handler() local 2884 init_mad_queue(struct ib_mad_qp_info *qp_info, struct ib_mad_queue *mad_queue) init_mad_queue() argument 2893 init_mad_qp(struct ib_mad_port_private *port_priv, struct ib_mad_qp_info *qp_info) init_mad_qp() argument 2902 create_mad_qp(struct ib_mad_qp_info *qp_info, enum ib_qp_type qp_type) create_mad_qp() argument 2937 destroy_mad_qp(struct ib_mad_qp_info *qp_info) destroy_mad_qp() argument [all...] |
H A D | mad_priv.h | 94 struct ib_mad_qp_info *qp_info; member 115 struct ib_mad_qp_info *qp_info; member 184 struct ib_mad_qp_info *qp_info; member 209 struct ib_mad_qp_info qp_info[IB_MAD_QPS_CORE]; member
|
H A D | mad_rmpp.c | 99 flush_workqueue(agent->qp_info->port_priv->wq); in ib_cancel_rmpp_recvs() 396 return max(agent->qp_info->recv_queue.max_active >> 3, 1); in window_size() 431 bool opa = rdma_cap_opa_mad(rmpp_recv->agent->qp_info->port_priv->device, in get_mad_len() 432 rmpp_recv->agent->qp_info->port_priv->port_num); in get_mad_len() 463 queue_delayed_work(rmpp_recv->agent->qp_info->port_priv->wq, in complete_rmpp() 556 queue_delayed_work(agent->qp_info->port_priv->wq, in start_rmpp()
|
/kernel/linux/linux-6.6/drivers/infiniband/core/ |
H A D | mad.c | 59 struct ib_mad_qp_info *qp_info, in create_mad_addr_info() 97 static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, 358 if (!port_priv->qp_info[qpn].qp) { in ib_register_mad_agent() 381 mad_agent_priv->qp_info = &port_priv->qp_info[qpn]; in ib_register_mad_agent() 388 mad_agent_priv->agent.qp = port_priv->qp_info[qpn].qp; in ib_register_mad_agent() 497 port_priv = mad_agent_priv->qp_info->port_priv; in unregister_mad_agent() 608 size_t mad_size = port_mad_size(mad_agent_priv->qp_info->port_priv); in handle_outgoing_dr_smp() 611 bool opa = rdma_cap_opa_mad(mad_agent_priv->qp_info->port_priv->device, in handle_outgoing_dr_smp() 612 mad_agent_priv->qp_info in handle_outgoing_dr_smp() 58 create_mad_addr_info(struct ib_mad_send_wr_private *mad_send_wr, struct ib_mad_qp_info *qp_info, struct trace_event_raw_ib_mad_send_template *entry) create_mad_addr_info() argument 995 struct ib_mad_qp_info *qp_info; ib_send_mad() local 1614 validate_mad(const struct ib_mad_hdr *mad_hdr, const struct ib_mad_qp_info *qp_info, bool opa) validate_mad() argument 1856 handle_ib_smi(const struct ib_mad_port_private *port_priv, const struct ib_mad_qp_info *qp_info, const struct ib_wc *wc, u32 port_num, struct ib_mad_private *recv, struct ib_mad_private *response) handle_ib_smi() argument 1943 handle_opa_smi(struct ib_mad_port_private *port_priv, struct ib_mad_qp_info *qp_info, struct ib_wc *wc, u32 port_num, struct ib_mad_private *recv, struct ib_mad_private *response) handle_opa_smi() argument 1999 handle_smi(struct ib_mad_port_private *port_priv, struct ib_mad_qp_info *qp_info, struct ib_wc *wc, u32 port_num, struct ib_mad_private *recv, struct ib_mad_private *response, bool opa) handle_smi() argument 2022 struct ib_mad_qp_info *qp_info; ib_mad_recv_done() local 2272 struct ib_mad_qp_info *qp_info; ib_mad_send_done() local 2335 mark_sends_for_retry(struct ib_mad_qp_info *qp_info) mark_sends_for_retry() argument 2356 struct ib_mad_qp_info *qp_info = mad_list->mad_queue->qp_info; ib_mad_send_error() local 2668 ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info, struct ib_mad_private *mad) ib_mad_post_receive_mads() argument 2743 cleanup_recv_queue(struct ib_mad_qp_info *qp_info) cleanup_recv_queue() argument 2861 struct ib_mad_qp_info *qp_info = qp_context; qp_event_handler() local 2869 init_mad_queue(struct ib_mad_qp_info *qp_info, struct ib_mad_queue *mad_queue) init_mad_queue() argument 2878 init_mad_qp(struct ib_mad_port_private *port_priv, struct ib_mad_qp_info *qp_info) init_mad_qp() argument 2887 create_mad_qp(struct ib_mad_qp_info *qp_info, enum ib_qp_type qp_type) create_mad_qp() argument 2922 destroy_mad_qp(struct ib_mad_qp_info *qp_info) destroy_mad_qp() argument [all...] |
H A D | mad_priv.h | 94 struct ib_mad_qp_info *qp_info; member 115 struct ib_mad_qp_info *qp_info; member 183 struct ib_mad_qp_info *qp_info; member 208 struct ib_mad_qp_info qp_info[IB_MAD_QPS_CORE]; member
|
H A D | mad_rmpp.c | 99 flush_workqueue(agent->qp_info->port_priv->wq); in ib_cancel_rmpp_recvs() 396 return max(agent->qp_info->recv_queue.max_active >> 3, 1); in window_size() 431 bool opa = rdma_cap_opa_mad(rmpp_recv->agent->qp_info->port_priv->device, in get_mad_len() 432 rmpp_recv->agent->qp_info->port_priv->port_num); in get_mad_len() 463 queue_delayed_work(rmpp_recv->agent->qp_info->port_priv->wq, in complete_rmpp() 556 queue_delayed_work(agent->qp_info->port_priv->wq, in start_rmpp()
|
/kernel/linux/linux-5.10/include/trace/events/ |
H A D | ib_mad.h | 19 struct ib_mad_qp_info *qp_info, 25 struct ib_mad_qp_info *qp_info), 26 TP_ARGS(wr, qp_info), 58 __entry->qp_num = wr->mad_agent_priv->qp_info->qp->qp_num; 84 create_mad_addr_info(wr, qp_info, __entry); 110 struct ib_mad_qp_info *qp_info), 111 TP_ARGS(wr, qp_info)); 114 struct ib_mad_qp_info *qp_info), 115 TP_ARGS(wr, qp_info)); 118 struct ib_mad_qp_info *qp_info), [all...] |
/kernel/linux/linux-6.6/include/trace/events/ |
H A D | ib_mad.h | 19 struct ib_mad_qp_info *qp_info, 25 struct ib_mad_qp_info *qp_info), 26 TP_ARGS(wr, qp_info), 57 __entry->qp_num = wr->mad_agent_priv->qp_info->qp->qp_num; 83 create_mad_addr_info(wr, qp_info, __entry); 109 struct ib_mad_qp_info *qp_info), 110 TP_ARGS(wr, qp_info)); 113 struct ib_mad_qp_info *qp_info), 114 TP_ARGS(wr, qp_info)); 117 struct ib_mad_qp_info *qp_info), [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/hw/i40iw/ |
H A D | i40iw_utils.c | 1184 struct i40iw_create_qp_info *qp_info; in i40iw_cqp_qp_create_cmd() local 1192 qp_info = &cqp_request->info.in.u.qp_create.info; in i40iw_cqp_qp_create_cmd() 1194 memset(qp_info, 0, sizeof(*qp_info)); in i40iw_cqp_qp_create_cmd() 1196 qp_info->cq_num_valid = true; in i40iw_cqp_qp_create_cmd() 1197 qp_info->next_iwarp_state = I40IW_QP_STATE_RTS; in i40iw_cqp_qp_create_cmd()
|
H A D | i40iw_verbs.c | 524 struct i40iw_create_qp_info *qp_info; in i40iw_create_qp() local 686 qp_info = &cqp_request->info.in.u.qp_create.info; in i40iw_create_qp() 688 memset(qp_info, 0, sizeof(*qp_info)); in i40iw_create_qp() 690 qp_info->cq_num_valid = true; in i40iw_create_qp() 691 qp_info->next_iwarp_state = I40IW_QP_STATE_IDLE; in i40iw_create_qp()
|
/kernel/linux/linux-5.10/drivers/scsi/bnx2i/ |
H A D | bnx2i.h | 576 * struct qp_info - QP (share queue region) atrributes structure 624 * and receive asynchoronous / scsi sense info (RQ). 'qp_info' structure 628 struct qp_info { struct 760 struct qp_info qp;
|
H A D | bnx2i_hwi.c | 1960 struct qp_info *qp; in bnx2i_process_new_cqes()
|
/kernel/linux/linux-6.6/drivers/scsi/bnx2i/ |
H A D | bnx2i.h | 576 * struct qp_info - QP (share queue region) atrributes structure 624 * and receive asynchoronous / scsi sense info (RQ). 'qp_info' structure 628 struct qp_info { struct 760 struct qp_info qp;
|
H A D | bnx2i_hwi.c | 1960 struct qp_info *qp; in bnx2i_process_new_cqes()
|
/kernel/linux/linux-6.6/drivers/infiniband/hw/irdma/ |
H A D | utils.c | 1082 struct irdma_create_qp_info *qp_info; in irdma_cqp_qp_create_cmd() local 1090 qp_info = &cqp_request->info.in.u.qp_create.info; in irdma_cqp_qp_create_cmd() 1091 memset(qp_info, 0, sizeof(*qp_info)); in irdma_cqp_qp_create_cmd() 1092 qp_info->cq_num_valid = true; in irdma_cqp_qp_create_cmd() 1093 qp_info->next_iwarp_state = IRDMA_QP_STATE_RTS; in irdma_cqp_qp_create_cmd()
|
H A D | verbs.c | 736 struct irdma_create_qp_info *qp_info; in irdma_cqp_create_qp_cmd() local 744 qp_info = &cqp_request->info.in.u.qp_create.info; in irdma_cqp_create_qp_cmd() 745 memset(qp_info, 0, sizeof(*qp_info)); in irdma_cqp_create_qp_cmd() 746 qp_info->mac_valid = true; in irdma_cqp_create_qp_cmd() 747 qp_info->cq_num_valid = true; in irdma_cqp_create_qp_cmd() 748 qp_info->next_iwarp_state = IRDMA_QP_STATE_IDLE; in irdma_cqp_create_qp_cmd()
|
/kernel/linux/linux-6.6/drivers/crypto/hisilicon/ |
H A D | qm.c | 2469 struct hisi_qp_info qp_info; in hisi_qm_uacce_ioctl() local 2489 if (copy_from_user(&qp_info, (void __user *)arg, in hisi_qm_uacce_ioctl() 2493 qp_info.sqe_size = qp->qm->sqe_size; in hisi_qm_uacce_ioctl() 2494 qp_info.sq_depth = qp->sq_depth; in hisi_qm_uacce_ioctl() 2495 qp_info.cq_depth = qp->cq_depth; in hisi_qm_uacce_ioctl() 2497 if (copy_to_user((void __user *)arg, &qp_info, in hisi_qm_uacce_ioctl()
|