/kernel/linux/linux-5.10/drivers/infiniband/hw/mthca/ |
H A D | mthca_cq.c | 174 static inline struct mthca_cqe *cqe_sw(struct mthca_cqe *cqe) in cqe_sw() argument 176 return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe; in cqe_sw() 181 return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe)); in next_cqe_sw() 184 static inline void set_cqe_hw(struct mthca_cqe *cqe) in set_cqe_hw() argument 186 cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW; in set_cqe_hw() 191 __be32 *cqe = cqe_ptr; in dump_cqe() local 193 (void) cqe; /* avoid warning if mthca_dbg compiled away... */ in dump_cqe() 195 be32_to_cpu(cqe[0]), be32_to_cpu(cqe[ in dump_cqe() 264 is_recv_cqe(struct mthca_cqe *cqe) is_recv_cqe() argument 276 struct mthca_cqe *cqe; mthca_cq_clean() local 311 cqe, MTHCA_CQ_ENTRY_SIZE); mthca_cq_clean() local 366 mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int cqe) mthca_free_cq_buf() argument 372 handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq, struct mthca_qp *qp, int wqe_index, int is_send, struct mthca_err_cqe *cqe, struct ib_wc *entry, int *free_cqe) handle_error_cqe() argument 485 struct mthca_cqe *cqe; mthca_poll_one() local [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/hw/mthca/ |
H A D | mthca_cq.c | 174 static inline struct mthca_cqe *cqe_sw(struct mthca_cqe *cqe) in cqe_sw() argument 176 return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe; in cqe_sw() 181 return cqe_sw(get_cqe(cq, cq->cons_index & cq->ibcq.cqe)); in next_cqe_sw() 184 static inline void set_cqe_hw(struct mthca_cqe *cqe) in set_cqe_hw() argument 186 cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW; in set_cqe_hw() 191 __be32 *cqe = cqe_ptr; in dump_cqe() local 193 (void) cqe; /* avoid warning if mthca_dbg compiled away... */ in dump_cqe() 195 be32_to_cpu(cqe[0]), be32_to_cpu(cqe[ in dump_cqe() 264 is_recv_cqe(struct mthca_cqe *cqe) is_recv_cqe() argument 276 struct mthca_cqe *cqe; mthca_cq_clean() local 311 cqe, MTHCA_CQ_ENTRY_SIZE); mthca_cq_clean() local 366 mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int cqe) mthca_free_cq_buf() argument 372 handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq, struct mthca_qp *qp, int wqe_index, int is_send, struct mthca_err_cqe *cqe, struct ib_wc *entry, int *free_cqe) handle_error_cqe() argument 485 struct mthca_cqe *cqe; mthca_poll_one() local [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/sw/rxe/ |
H A D | rxe_cq.c | 12 int cqe, int comp_vector) in rxe_cq_chk_attr() 16 if (cqe <= 0) { in rxe_cq_chk_attr() 17 rxe_dbg_dev(rxe, "cqe(%d) <= 0\n", cqe); in rxe_cq_chk_attr() 21 if (cqe > rxe->attr.max_cqe) { in rxe_cq_chk_attr() 22 rxe_dbg_dev(rxe, "cqe(%d) > max_cqe(%d)\n", in rxe_cq_chk_attr() 23 cqe, rxe->attr.max_cqe); in rxe_cq_chk_attr() 29 if (cqe < count) { in rxe_cq_chk_attr() 30 rxe_dbg_cq(cq, "cqe(%d) < current # elements in queue (%d)", in rxe_cq_chk_attr() 31 cqe, coun in rxe_cq_chk_attr() 11 rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, int comp_vector) rxe_cq_chk_attr() argument 42 rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, int comp_vector, struct ib_udata *udata, struct rxe_create_cq_resp __user *uresp) rxe_cq_from_init() argument 72 rxe_cq_resize_queue(struct rxe_cq *cq, int cqe, struct rxe_resize_cq_resp __user *uresp, struct ib_udata *udata) rxe_cq_resize_queue() argument 88 rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited) rxe_cq_post() argument [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/hw/mlx4/ |
H A D | cq.c | 81 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe() local 82 struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe); in get_sw_cqe() 85 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe; in get_sw_cqe() 133 static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe) in mlx4_ib_free_cq_buf() argument 135 mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf); in mlx4_ib_free_cq_buf() 140 struct ib_umem **umem, u64 buf_addr, int cqe) in mlx4_ib_get_cq_umem() 147 *umem = ib_umem_get(&dev->ib_dev, buf_addr, cqe * cqe_siz in mlx4_ib_get_cq_umem() 138 mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_udata *udata, struct mlx4_ib_cq_buf *buf, struct ib_umem **umem, u64 buf_addr, int cqe) mlx4_ib_get_cq_umem() argument 356 struct mlx4_cqe *cqe, *new_cqe; mlx4_ib_cq_resize_copy_cqes() local 500 dump_cqe(void *cqe) dump_cqe() argument 510 mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe, struct ib_wc *wc) mlx4_ib_handle_error_cqe() argument 580 use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc, unsigned tail, struct mlx4_cqe *cqe, int is_eth) use_tunnel_data() argument 660 struct mlx4_cqe *cqe; mlx4_ib_poll_one() local 919 struct mlx4_cqe *cqe, *dest; __mlx4_ib_cq_clean() local [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/hw/mlx4/ |
H A D | cq.c | 81 struct mlx4_cqe *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe() local 82 struct mlx4_cqe *tcqe = ((cq->buf.entry_size == 64) ? (cqe + 1) : cqe); in get_sw_cqe() 85 !!(n & (cq->ibcq.cqe + 1))) ? NULL : cqe; in get_sw_cqe() 133 static void mlx4_ib_free_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int cqe) in mlx4_ib_free_cq_buf() argument 135 mlx4_buf_free(dev->dev, (cqe + 1) * buf->entry_size, &buf->buf); in mlx4_ib_free_cq_buf() 140 struct ib_umem **umem, u64 buf_addr, int cqe) in mlx4_ib_get_cq_umem() 147 *umem = ib_umem_get(&dev->ib_dev, buf_addr, cqe * cqe_siz in mlx4_ib_get_cq_umem() 138 mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, struct ib_umem **umem, u64 buf_addr, int cqe) mlx4_ib_get_cq_umem() argument 356 struct mlx4_cqe *cqe, *new_cqe; mlx4_ib_cq_resize_copy_cqes() local 500 dump_cqe(void *cqe) dump_cqe() argument 510 mlx4_ib_handle_error_cqe(struct mlx4_err_cqe *cqe, struct ib_wc *wc) mlx4_ib_handle_error_cqe() argument 580 use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc, unsigned tail, struct mlx4_cqe *cqe, int is_eth) use_tunnel_data() argument 660 struct mlx4_cqe *cqe; mlx4_ib_poll_one() local 919 struct mlx4_cqe *cqe, *dest; __mlx4_ib_cq_clean() local [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/sw/siw/ |
H A D | siw_cq.c | 50 struct siw_cqe *cqe; in siw_reap_cqe() local 55 cqe = &cq->queue[cq->cq_get % cq->num_cqe]; in siw_reap_cqe() 56 if (READ_ONCE(cqe->flags) & SIW_WQE_VALID) { in siw_reap_cqe() 58 wc->wr_id = cqe->id; in siw_reap_cqe() 59 wc->byte_len = cqe->bytes; in siw_reap_cqe() 67 if (cqe->flags & SIW_WQE_REM_INVAL) { in siw_reap_cqe() 68 wc->ex.invalidate_rkey = cqe->inval_stag; in siw_reap_cqe() 71 wc->qp = cqe->base_qp; in siw_reap_cqe() 72 wc->opcode = map_wc_opcode[cqe->opcode]; in siw_reap_cqe() 73 wc->status = map_cqe_status[cqe in siw_reap_cqe() [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/sw/siw/ |
H A D | siw_cq.c | 50 struct siw_cqe *cqe; in siw_reap_cqe() local 55 cqe = &cq->queue[cq->cq_get % cq->num_cqe]; in siw_reap_cqe() 56 if (READ_ONCE(cqe->flags) & SIW_WQE_VALID) { in siw_reap_cqe() 58 wc->wr_id = cqe->id; in siw_reap_cqe() 59 wc->byte_len = cqe->bytes; in siw_reap_cqe() 67 if (cqe->flags & SIW_WQE_REM_INVAL) { in siw_reap_cqe() 68 wc->ex.invalidate_rkey = cqe->inval_stag; in siw_reap_cqe() 71 wc->qp = cqe->base_qp; in siw_reap_cqe() 72 wc->opcode = map_wc_opcode[cqe->opcode]; in siw_reap_cqe() 73 wc->status = map_cqe_status[cqe in siw_reap_cqe() [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/sw/rxe/ |
H A D | rxe_cq.c | 12 int cqe, int comp_vector) in rxe_cq_chk_attr() 16 if (cqe <= 0) { in rxe_cq_chk_attr() 17 pr_warn("cqe(%d) <= 0\n", cqe); in rxe_cq_chk_attr() 21 if (cqe > rxe->attr.max_cqe) { in rxe_cq_chk_attr() 22 pr_warn("cqe(%d) > max_cqe(%d)\n", in rxe_cq_chk_attr() 23 cqe, rxe->attr.max_cqe); in rxe_cq_chk_attr() 29 if (cqe < count) { in rxe_cq_chk_attr() 30 pr_warn("cqe(%d) < current # elements in queue (%d)", in rxe_cq_chk_attr() 31 cqe, coun in rxe_cq_chk_attr() 11 rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, int comp_vector) rxe_cq_chk_attr() argument 57 rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, int comp_vector, struct ib_udata *udata, struct rxe_create_cq_resp __user *uresp) rxe_cq_from_init() argument 90 rxe_cq_resize_queue(struct rxe_cq *cq, int cqe, struct rxe_resize_cq_resp __user *uresp, struct ib_udata *udata) rxe_cq_resize_queue() argument 105 rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited) rxe_cq_post() argument [all...] |
/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlx5/core/ |
H A D | en_rx.c | 63 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 64 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 114 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); in mlx5e_cqes_update_owner() local 116 cqe->op_own = op_own; in mlx5e_cqes_update_owner() 122 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); in mlx5e_cqes_update_owner() local 124 cqe->op_own = op_own; in mlx5e_cqes_update_owner() 638 struct mlx5_cqe64 *cqe; in mlx5e_poll_ico_cq() local 645 cqe = mlx5_cqwq_get_cqe(&cq->wq); in mlx5e_poll_ico_cq() 646 if (likely(!cqe)) in mlx5e_poll_ico_cq() 661 wqe_counter = be16_to_cpu(cqe in mlx5e_poll_ico_cq() 773 mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp) mlx5e_lro_update_tcp_hdr() argument 789 mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, u32 cqe_bcnt) mlx5e_lro_update_hdr() argument 843 mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe, struct sk_buff *skb) mlx5e_skb_set_hash() argument 968 mlx5e_handle_csum(struct net_device *netdev, struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq, struct sk_buff *skb, bool lro) mlx5e_handle_csum() argument 1039 mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, u32 cqe_bcnt, struct mlx5e_rq *rq, struct sk_buff *skb) mlx5e_build_rx_skb() argument 1094 mlx5e_complete_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, u32 cqe_bcnt, struct sk_buff *skb) mlx5e_complete_rx_cqe() argument 1136 mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt) mlx5e_skb_from_cqe_linear() argument 1172 mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt) mlx5e_skb_from_cqe_nonlinear() argument 1215 trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) trigger_report() argument 1226 mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) mlx5e_handle_rx_cqe() argument 1276 mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) mlx5e_handle_rx_cqe_rep() argument 1334 mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) mlx5e_handle_rx_cqe_mpwrq_rep() argument 1494 mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) mlx5e_handle_rx_cqe_mpwrq() argument 1557 struct mlx5_cqe64 *cqe; mlx5e_poll_rx_cq() local 1611 mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, u32 cqe_bcnt, struct sk_buff *skb) mlx5i_complete_rx_cqe() argument 1697 mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) mlx5i_handle_rx_cqe() argument 1741 mlx5e_ipsec_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) mlx5e_ipsec_handle_rx_cqe() argument [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/hw/vmw_pvrdma/ |
H A D | pvrdma_cq.c | 83 cq->ibcq.cqe, &head); in pvrdma_req_notify_cq() 105 int entries = attr->cqe; in pvrdma_create_cq() 129 cq->ibcq.cqe = entries; in pvrdma_create_cq() 183 cmd->cqe = entries; in pvrdma_create_cq() 192 cq->ibcq.cqe = resp->cqe; in pvrdma_create_cq() 285 cq->ibcq.cqe, &head); in _pvrdma_flush_cqe() 290 cq->ibcq.cqe); in _pvrdma_flush_cqe() 291 struct pvrdma_cqe *cqe; in _pvrdma_flush_cqe() local 295 (cq->ibcq.cqe in _pvrdma_flush_cqe() 326 struct pvrdma_cqe *cqe; pvrdma_poll_one() local [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/hw/vmw_pvrdma/ |
H A D | pvrdma_cq.c | 83 cq->ibcq.cqe, &head); in pvrdma_req_notify_cq() 105 int entries = attr->cqe; in pvrdma_create_cq() 132 cq->ibcq.cqe = entries; in pvrdma_create_cq() 186 cmd->cqe = entries; in pvrdma_create_cq() 195 cq->ibcq.cqe = resp->cqe; in pvrdma_create_cq() 288 cq->ibcq.cqe, &head); in _pvrdma_flush_cqe() 293 cq->ibcq.cqe); in _pvrdma_flush_cqe() 294 struct pvrdma_cqe *cqe; in _pvrdma_flush_cqe() local 298 (cq->ibcq.cqe in _pvrdma_flush_cqe() 329 struct pvrdma_cqe *cqe; pvrdma_poll_one() local [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/mellanox/mlx5/core/ |
H A D | en_rx.c | 65 struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset, 69 struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset, 71 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 72 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 73 static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); 90 struct mlx5_cqe64 *cqe) in mlx5e_read_enhanced_title_slot() 95 memcpy(title, cqe, sizeof(struct mlx5_cqe64)); in mlx5e_read_enhanced_title_slot() 138 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci); in mlx5e_cqes_update_owner() local 140 cqe->op_own = op_own; in mlx5e_cqes_update_owner() 146 struct mlx5_cqe64 *cqe in mlx5e_cqes_update_owner() local 89 mlx5e_read_enhanced_title_slot(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) mlx5e_read_enhanced_title_slot() argument 194 mlx5e_decompress_enhanced_cqe(struct mlx5e_rq *rq, struct mlx5_cqwq *wq, struct mlx5_cqe64 *cqe, int budget_rem) mlx5e_decompress_enhanced_cqe() argument 999 struct mlx5_cqe64 *cqe; mlx5e_poll_ico_cq() local 1146 mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp) mlx5e_lro_update_tcp_hdr() argument 1162 mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, u32 cqe_bcnt) mlx5e_lro_update_hdr() argument 1262 mlx5e_shampo_update_fin_psh_flags(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, struct tcphdr *skb_tcp_hd) mlx5e_shampo_update_fin_psh_flags() argument 1274 mlx5e_shampo_update_ipv4_tcp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4, struct mlx5_cqe64 *cqe, bool match) mlx5e_shampo_update_ipv4_tcp_hdr() argument 1298 mlx5e_shampo_update_ipv6_tcp_hdr(struct mlx5e_rq *rq, struct ipv6hdr *ipv6, struct mlx5_cqe64 *cqe, bool match) mlx5e_shampo_update_ipv6_tcp_hdr() argument 1319 mlx5e_shampo_update_hdr(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match) mlx5e_shampo_update_hdr() argument 1352 mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe, struct sk_buff *skb) mlx5e_skb_set_hash() argument 1477 mlx5e_handle_csum(struct net_device *netdev, struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq, struct sk_buff *skb, bool lro) mlx5e_handle_csum() argument 1548 mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, u32 cqe_bcnt, struct mlx5e_rq *rq, struct sk_buff *skb) mlx5e_build_rx_skb() argument 1607 mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, u32 cqe_bcnt, struct sk_buff *skb) mlx5e_shampo_complete_rx_cqe() argument 1628 mlx5e_complete_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, u32 cqe_bcnt, struct sk_buff *skb) mlx5e_complete_rx_cqe() argument 1661 mlx5e_fill_mxbuf(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, void *va, u16 headroom, u32 frame_sz, u32 len, struct mlx5e_xdp_buff *mxbuf) mlx5e_fill_mxbuf() argument 1672 mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi, struct mlx5_cqe64 *cqe, u32 cqe_bcnt) mlx5e_skb_from_cqe_linear() argument 1720 mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi, struct mlx5_cqe64 *cqe, u32 cqe_bcnt) mlx5e_skb_from_cqe_nonlinear() argument 1804 trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) trigger_report() argument 1816 mlx5e_handle_rx_err_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) mlx5e_handle_rx_err_cqe() argument 1822 mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) mlx5e_handle_rx_cqe() argument 1866 mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) mlx5e_handle_rx_cqe_rep() argument 1909 mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) mlx5e_handle_rx_cqe_mpwrq_rep() argument 1994 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset, u32 page_idx) mlx5e_skb_from_cqe_mpwrq_nonlinear() argument 2139 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset, u32 page_idx) mlx5e_skb_from_cqe_mpwrq_linear() argument 2197 mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, struct mlx5_cqe64 *cqe, u16 header_index) mlx5e_skb_from_cqe_shampo() argument 2260 mlx5e_shampo_flush_skb(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match) mlx5e_shampo_flush_skb() argument 2297 mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) mlx5e_handle_rx_cqe_mpwrq_shampo() argument 2382 mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) mlx5e_handle_rx_cqe_mpwrq() argument 2445 struct mlx5_cqe64 *cqe, *title_cqe = NULL; mlx5e_rx_cq_process_enhanced_cqe_comp() local 2494 struct mlx5_cqe64 *cqe; mlx5e_rx_cq_process_basic_cqe_comp() local 2557 mlx5i_complete_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, u32 cqe_bcnt, struct sk_buff *skb) mlx5i_complete_rx_cqe() argument 2642 mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) mlx5i_handle_rx_cqe() argument 2732 mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) mlx5e_trap_handle_rx_cqe() argument [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/mellanox/mlxsw/ |
H A D | pci_hw.h | 111 static inline u32 mlxsw_pci_cqe_##name##_get(enum mlxsw_pci_cqe_v v, char *cqe) \ 116 return mlxsw_pci_cqe##v0##_##name##_get(cqe); \ 118 return mlxsw_pci_cqe##v1##_##name##_get(cqe); \ 120 return mlxsw_pci_cqe##v2##_##name##_get(cqe); \ 124 char *cqe, u32 val) \ 129 mlxsw_pci_cqe##v0##_##name##_set(cqe, val); \ 132 mlxsw_pci_cqe##v1##_##name##_set(cqe, val); \ 135 mlxsw_pci_cqe##v2##_##name##_set(cqe, val); \ 153 MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16); 164 MLXSW_ITEM32(pci, cqe, wqe_counte 281 mlxsw_pci_cqe2_mirror_cong_get(const char *cqe) mlxsw_pci_cqe2_mirror_cong_get() argument 338 mlxsw_pci_cqe2_time_stamp_get(const char *cqe) mlxsw_pci_cqe2_time_stamp_get() argument 346 mlxsw_pci_cqe2_time_stamp_sec_get(const char *cqe) mlxsw_pci_cqe2_time_stamp_sec_get() argument 353 mlxsw_pci_cqe2_time_stamp_nsec_get(const char *cqe) mlxsw_pci_cqe2_time_stamp_nsec_get() argument [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/hw/cxgb4/ |
H A D | cq.c | 186 struct t4_cqe cqe; in insert_recv_cqe() local 190 memset(&cqe, 0, sizeof(cqe)); in insert_recv_cqe() 191 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | in insert_recv_cqe() 196 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); in insert_recv_cqe() 198 cqe.u.srcqe.abs_rqe_idx = cpu_to_be32(srqidx); in insert_recv_cqe() 199 cq->sw_queue[cq->sw_pidx] = cqe; in insert_recv_cqe() 220 struct t4_cqe cqe; in insert_sq_cqe() local 224 memset(&cqe, 0, sizeof(cqe)); in insert_sq_cqe() 422 cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq) cqe_completes_wr() argument 445 struct t4_cqe *cqe; c4iw_count_rcqes() local 544 poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, u8 *cqe_flushed, u64 *cookie, u32 *credit, struct t4_srq *srq) poll_cq() argument 757 struct t4_cqe cqe; __c4iw_poll_cq_one() local [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/hw/cxgb4/ |
H A D | cq.c | 186 struct t4_cqe cqe; in insert_recv_cqe() local 190 memset(&cqe, 0, sizeof(cqe)); in insert_recv_cqe() 191 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) | in insert_recv_cqe() 196 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen)); in insert_recv_cqe() 198 cqe.u.srcqe.abs_rqe_idx = cpu_to_be32(srqidx); in insert_recv_cqe() 199 cq->sw_queue[cq->sw_pidx] = cqe; in insert_recv_cqe() 220 struct t4_cqe cqe; in insert_sq_cqe() local 224 memset(&cqe, 0, sizeof(cqe)); in insert_sq_cqe() 422 cqe_completes_wr(struct t4_cqe *cqe, struct t4_wq *wq) cqe_completes_wr() argument 445 struct t4_cqe *cqe; c4iw_count_rcqes() local 544 poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe, u8 *cqe_flushed, u64 *cookie, u32 *credit, struct t4_srq *srq) poll_cq() argument 757 struct t4_cqe cqe; __c4iw_poll_cq_one() local [all...] |
/kernel/linux/linux-5.10/tools/io_uring/ |
H A D | io_uring-cp.c | 126 struct io_uring_cqe *cqe; in copy_file() local 175 ret = io_uring_wait_cqe(ring, &cqe); in copy_file() 178 ret = io_uring_peek_cqe(ring, &cqe); in copy_file() 184 if (!cqe) in copy_file() 187 data = io_uring_cqe_get_data(cqe); in copy_file() 188 if (cqe->res < 0) { in copy_file() 189 if (cqe->res == -EAGAIN) { in copy_file() 191 io_uring_cqe_seen(ring, cqe); in copy_file() 194 fprintf(stderr, "cqe failed: %s\n", in copy_file() 195 strerror(-cqe in copy_file() [all...] |
/kernel/linux/linux-5.10/drivers/net/ethernet/marvell/octeontx2/nic/ |
H A D | otx2_txrx.c | 81 struct nix_cqe_tx_s *cqe, in otx2_snd_pkt_handler() 84 struct nix_send_comp_s *snd_comp = &cqe->comp; in otx2_snd_pkt_handler() 167 struct nix_cqe_rx_s *cqe, struct sk_buff *skb) in otx2_set_rxhash() 183 hash = cqe->hdr.flow_tag; in otx2_set_rxhash() 188 static void otx2_free_rcv_seg(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe, in otx2_free_rcv_seg() argument 191 struct nix_rx_sg_s *sg = &cqe->sg; in otx2_free_rcv_seg() 197 end = start + ((cqe->parse.desc_sizem1 + 1) * 16); in otx2_free_rcv_seg() 208 struct nix_cqe_rx_s *cqe, int qidx) in otx2_check_rcv_errors() 211 struct nix_rx_parse_s *parse = &cqe->parse; in otx2_check_rcv_errors() 258 if (cqe in otx2_check_rcv_errors() 78 otx2_snd_pkt_handler(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, struct otx2_snd_queue *sq, struct nix_cqe_tx_s *cqe, int budget, int *tx_pkts, int *tx_bytes) otx2_snd_pkt_handler() argument 166 otx2_set_rxhash(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe, struct sk_buff *skb) otx2_set_rxhash() argument 207 otx2_check_rcv_errors(struct otx2_nic *pfvf, struct nix_cqe_rx_s *cqe, int qidx) otx2_check_rcv_errors() argument 272 otx2_rcv_pkt_handler(struct otx2_nic *pfvf, struct napi_struct *napi, struct otx2_cq_queue *cq, struct nix_cqe_rx_s *cqe) otx2_rcv_pkt_handler() argument 305 struct nix_cqe_rx_s *cqe; otx2_rx_napi_handler() local 362 struct nix_cqe_tx_s *cqe; otx2_tx_napi_handler() local 908 struct nix_cqe_rx_s *cqe; otx2_cleanup_rx_cqes() local 935 struct nix_cqe_tx_s *cqe; otx2_cleanup_tx_cqes() local [all...] |
/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlxsw/ |
H A D | pci_hw.h | 114 static inline u32 mlxsw_pci_cqe_##name##_get(enum mlxsw_pci_cqe_v v, char *cqe) \ 119 return mlxsw_pci_cqe##v0##_##name##_get(cqe); \ 121 return mlxsw_pci_cqe##v1##_##name##_get(cqe); \ 123 return mlxsw_pci_cqe##v2##_##name##_get(cqe); \ 127 char *cqe, u32 val) \ 132 mlxsw_pci_cqe##v0##_##name##_set(cqe, val); \ 135 mlxsw_pci_cqe##v1##_##name##_set(cqe, val); \ 138 mlxsw_pci_cqe##v2##_##name##_set(cqe, val); \ 156 MLXSW_ITEM32(pci, cqe, system_port, 0x00, 0, 16); 167 MLXSW_ITEM32(pci, cqe, wqe_counte [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/hw/erdma/ |
H A D | erdma_cq.c | 11 __be32 *cqe = get_queue_entry(cq->kern_cq.qbuf, cq->kern_cq.ci, in get_next_valid_cqe() local 14 be32_to_cpu(READ_ONCE(*cqe))); in get_next_valid_cqe() 16 return owner ^ !!(cq->kern_cq.ci & cq->depth) ? cqe : NULL; in get_next_valid_cqe() 115 struct erdma_cqe *cqe; in erdma_poll_one_cqe() local 122 cqe = get_next_valid_cqe(cq); in erdma_poll_one_cqe() 123 if (!cqe) in erdma_poll_one_cqe() 131 qpn = be32_to_cpu(cqe->qpn); in erdma_poll_one_cqe() 132 wqe_idx = be32_to_cpu(cqe->qe_idx); in erdma_poll_one_cqe() 133 cqe_hdr = be32_to_cpu(cqe->hdr); in erdma_poll_one_cqe() 158 wc->byte_len = be32_to_cpu(cqe in erdma_poll_one_cqe() [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/sw/rdmavt/ |
H A D | cq.c | 96 if (head >= (unsigned)cq->ibcq.cqe) { in rvt_cq_enter() 97 head = cq->ibcq.cqe; in rvt_cq_enter() 209 unsigned int entries = attr->cqe; in rvt_create_cq() 279 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe. in rvt_create_cq() 291 cq->ibcq.cqe = entries; in rvt_create_cq() 380 int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) in rvt_resize_cq() argument 392 if (cqe < 1 || cqe > rdi->dparms.props.max_cqe) in rvt_resize_cq() 399 sz = sizeof(struct ib_uverbs_wc) * (cqe + 1); in rvt_resize_cq() 405 sz = sizeof(struct ib_wc) * (cqe in rvt_resize_cq() [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/sw/rdmavt/ |
H A D | cq.c | 54 if (head >= (unsigned)cq->ibcq.cqe) { in rvt_cq_enter() 55 head = cq->ibcq.cqe; in rvt_cq_enter() 167 unsigned int entries = attr->cqe; in rvt_create_cq() 237 * ib_create_cq() will initialize cq->ibcq except for cq->ibcq.cqe. in rvt_create_cq() 249 cq->ibcq.cqe = entries; in rvt_create_cq() 338 int rvt_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata) in rvt_resize_cq() argument 350 if (cqe < 1 || cqe > rdi->dparms.props.max_cqe) in rvt_resize_cq() 357 sz = sizeof(struct ib_uverbs_wc) * (cqe + 1); in rvt_resize_cq() 363 sz = sizeof(struct ib_wc) * (cqe in rvt_resize_cq() [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/hw/bnxt_re/ |
H A D | qplib_fp.c | 1447 struct cq_req *cqe = (struct cq_req *)hw_cqe; in __clean_cq() local 1449 if (qp == le64_to_cpu(cqe->qp_handle)) in __clean_cq() 1450 cqe->qp_handle = 0; in __clean_cq() 1457 struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe; in __clean_cq() local 1459 if (qp == le64_to_cpu(cqe->qp_handle)) in __clean_cq() 1460 cqe->qp_handle = 0; in __clean_cq() 2136 struct bnxt_qplib_cqe *cqe; in __flush_sq() local 2142 cqe = *pcqe; in __flush_sq() 2152 memset(cqe, 0, sizeof(*cqe)); in __flush_sq() 2176 struct bnxt_qplib_cqe *cqe; __flush_rq() local 2339 struct bnxt_qplib_cqe *cqe; bnxt_qplib_cq_process_req() local 2446 struct bnxt_qplib_cqe *cqe; bnxt_qplib_cq_process_res_rc() local 2527 struct bnxt_qplib_cqe *cqe; bnxt_qplib_cq_process_res_ud() local 2630 struct bnxt_qplib_cqe *cqe; bnxt_qplib_cq_process_res_raweth_qp1() local 2723 struct bnxt_qplib_cqe *cqe; bnxt_qplib_cq_process_terminal() local 2833 bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, int num_cqes) bnxt_qplib_process_flush_list() argument 2856 bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, int num_cqes, struct bnxt_qplib_qp **lib_qp) bnxt_qplib_poll_cq() argument [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/hw/bnxt_re/ |
H A D | qplib_fp.c | 1474 struct cq_req *cqe = (struct cq_req *)hw_cqe; in __clean_cq() local 1476 if (qp == le64_to_cpu(cqe->qp_handle)) in __clean_cq() 1477 cqe->qp_handle = 0; in __clean_cq() 1484 struct cq_res_rc *cqe = (struct cq_res_rc *)hw_cqe; in __clean_cq() local 1486 if (qp == le64_to_cpu(cqe->qp_handle)) in __clean_cq() 1487 cqe->qp_handle = 0; in __clean_cq() 2218 struct bnxt_qplib_cqe *cqe; in __flush_sq() local 2224 cqe = *pcqe; in __flush_sq() 2234 memset(cqe, 0, sizeof(*cqe)); in __flush_sq() 2258 struct bnxt_qplib_cqe *cqe; __flush_rq() local 2421 struct bnxt_qplib_cqe *cqe; bnxt_qplib_cq_process_req() local 2528 struct bnxt_qplib_cqe *cqe; bnxt_qplib_cq_process_res_rc() local 2607 struct bnxt_qplib_cqe *cqe; bnxt_qplib_cq_process_res_ud() local 2709 struct bnxt_qplib_cqe *cqe; bnxt_qplib_cq_process_res_raweth_qp1() local 2800 struct bnxt_qplib_cqe *cqe; bnxt_qplib_cq_process_terminal() local 2910 bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, int num_cqes) bnxt_qplib_process_flush_list() argument 2933 bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, int num_cqes, struct bnxt_qplib_qp **lib_qp) bnxt_qplib_poll_cq() argument [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/hw/mlx5/ |
H A D | cq.c | 81 void *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe() local 84 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in get_sw_cqe() 87 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) { in get_sw_cqe() 88 return cqe; in get_sw_cqe() 117 static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe, in handle_good_req() argument 121 switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) { in handle_good_req() 137 wc->byte_len = be32_to_cpu(cqe->byte_cnt); in handle_good_req() 166 static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, in handle_responder() argument 270 dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe, struct ib_wc *wc, const char *level) dump_cqe() argument 279 mlx5_handle_error_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe, struct ib_wc *wc) mlx5_handle_error_cqe() argument 362 get_sig_err_item(struct mlx5_sig_err_cqe *cqe, struct ib_sig_err *item) get_sig_err_item() argument 459 void *cqe; mlx5_poll_one() local 869 void *cqe; init_cq_frag_buf() local 1074 void *cqe, *dest; __mlx5_ib_cq_clean() local [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/hw/mlx5/ |
H A D | cq.c | 81 void *cqe = get_cqe(cq, n & cq->ibcq.cqe); in get_sw_cqe() local 84 cqe64 = (cq->mcq.cqe_sz == 64) ? cqe : cqe + 64; in get_sw_cqe() 87 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & (cq->ibcq.cqe + 1)))) { in get_sw_cqe() 88 return cqe; in get_sw_cqe() 117 static void handle_good_req(struct ib_wc *wc, struct mlx5_cqe64 *cqe, in handle_good_req() argument 121 switch (be32_to_cpu(cqe->sop_drop_qpn) >> 24) { in handle_good_req() 137 wc->byte_len = be32_to_cpu(cqe->byte_cnt); in handle_good_req() 166 static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe, in handle_responder() argument 270 dump_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe) dump_cqe() argument 276 mlx5_handle_error_cqe(struct mlx5_ib_dev *dev, struct mlx5_err_cqe *cqe, struct ib_wc *wc) mlx5_handle_error_cqe() argument 356 get_sig_err_item(struct mlx5_sig_err_cqe *cqe, struct ib_sig_err *item) get_sig_err_item() argument 453 void *cqe; mlx5_poll_one() local 844 void *cqe; init_cq_frag_buf() local 1049 void *cqe, *dest; __mlx5_ib_cq_clean() local [all...] |