/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlx4/ |
H A D | cq.c | 104 void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn) in mlx4_cq_completion() argument 110 cqn & (dev->caps.num_cqs - 1)); in mlx4_cq_completion() 114 mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn); in mlx4_cq_completion() 126 void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type) in mlx4_cq_event() argument 132 cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1)); in mlx4_cq_event() 136 mlx4_dbg(dev, "Async event for bogus CQ %08x\n", cqn); in mlx4_cq_event() 184 err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1); in mlx4_cq_modify() 210 err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 0); in mlx4_cq_resize() 217 int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn) in __mlx4_cq_alloc_icm() argument 223 *cqn in __mlx4_cq_alloc_icm() 244 mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn, u8 usage) mlx4_cq_alloc_icm() argument 264 __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn) __mlx4_cq_free_icm() argument 274 mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn) mlx4_cq_free_icm() argument [all...] |
H A D | en_resources.c | 41 int is_tx, int rss, int qpn, int cqn, in mlx4_en_fill_qp_context() 72 context->cqn_send = cpu_to_be32(cqn); in mlx4_en_fill_qp_context() 73 context->cqn_recv = cpu_to_be32(cqn); in mlx4_en_fill_qp_context() 40 mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, int is_tx, int rss, int qpn, int cqn, int user_prio, struct mlx4_qp_context *context) mlx4_en_fill_qp_context() argument
|
/kernel/linux/linux-6.6/drivers/net/ethernet/mellanox/mlx4/ |
H A D | cq.c | 104 void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn) in mlx4_cq_completion() argument 110 cqn & (dev->caps.num_cqs - 1)); in mlx4_cq_completion() 114 mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn); in mlx4_cq_completion() 126 void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type) in mlx4_cq_event() argument 132 cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1)); in mlx4_cq_event() 136 mlx4_dbg(dev, "Async event for bogus CQ %08x\n", cqn); in mlx4_cq_event() 184 err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 1); in mlx4_cq_modify() 210 err = mlx4_MODIFY_CQ(dev, mailbox, cq->cqn, 0); in mlx4_cq_resize() 217 int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn) in __mlx4_cq_alloc_icm() argument 223 *cqn in __mlx4_cq_alloc_icm() 244 mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn, u8 usage) mlx4_cq_alloc_icm() argument 264 __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn) __mlx4_cq_free_icm() argument 274 mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn) mlx4_cq_free_icm() argument [all...] |
H A D | en_resources.c | 41 int is_tx, int rss, int qpn, int cqn, in mlx4_en_fill_qp_context() 72 context->cqn_send = cpu_to_be32(cqn); in mlx4_en_fill_qp_context() 73 context->cqn_recv = cpu_to_be32(cqn); in mlx4_en_fill_qp_context() 40 mlx4_en_fill_qp_context(struct mlx4_en_priv *priv, int size, int stride, int is_tx, int rss, int qpn, int cqn, int user_prio, struct mlx4_qp_context *context) mlx4_en_fill_qp_context() argument
|
/kernel/linux/linux-6.6/drivers/infiniband/hw/hns/ |
H A D | hns_roce_cq.c | 76 hr_cq->cqn = (id << CQ_BANKID_SHIFT) | bankid; in alloc_cqn() 83 static inline u8 get_cq_bankid(unsigned long cqn) in get_cq_bankid() argument 86 return (u8)(cqn & GENMASK(1, 0)); in get_cq_bankid() 89 static void free_cqn(struct hns_roce_dev *hr_dev, unsigned long cqn) in free_cqn() argument 94 bank = &cq_table->bank[get_cq_bankid(cqn)]; in free_cqn() 96 ida_free(&bank->ida, cqn >> CQ_BANKID_SHIFT); in free_cqn() 120 hr_cq->cqn); in hns_roce_create_cqc() 124 hr_cq->cqn, ret); in hns_roce_create_cqc() 147 ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn); in alloc_cqc() 150 hr_cq->cqn, re in alloc_cqc() 449 hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn) hns_roce_cq_completion() argument 468 hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type) hns_roce_cq_event() argument [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/hw/hns/ |
H A D | hns_roce_cq.c | 59 ret = hns_roce_bitmap_alloc(&cq_table->bitmap, &hr_cq->cqn); in alloc_cqc() 66 ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn); in alloc_cqc() 69 hr_cq->cqn, ret); in alloc_cqc() 73 ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL)); in alloc_cqc() 89 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 0, in alloc_cqc() 95 hr_cq->cqn, ret); in alloc_cqc() 108 xa_erase(&cq_table->array, hr_cq->cqn); in alloc_cqc() 111 hns_roce_table_put(hr_dev, &cq_table->table, hr_cq->cqn); in alloc_cqc() 114 hns_roce_bitmap_free(&cq_table->bitmap, hr_cq->cqn, BITMAP_NO_RR); in alloc_cqc() 124 ret = hns_roce_cmd_mbox(hr_dev, 0, 0, hr_cq->cqn, in free_cqc() 348 hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn) hns_roce_cq_completion() argument 367 hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type) hns_roce_cq_event() argument [all...] |
H A D | hns_roce_hw_v2_dfx.c | 9 int hns_roce_v2_query_cqc_info(struct hns_roce_dev *hr_dev, u32 cqn, in hns_roce_v2_query_cqc_info() argument 21 ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, cqn, 0, in hns_roce_v2_query_cqc_info()
|
H A D | hns_roce_srq.c | 81 u32 pdn, u32 cqn, u16 xrcd, u64 db_rec_addr) in alloc_srqc() 136 hr_dev->hw->write_srqc(hr_dev, srq, pdn, xrcd, cqn, mailbox->buf, in alloc_srqc() 293 u32 cqn; in hns_roce_create_srq() local 338 cqn = ib_srq_has_cq(init_attr->srq_type) ? in hns_roce_create_srq() 339 to_hr_cq(init_attr->ext.cq)->cqn : 0; in hns_roce_create_srq() 342 ret = alloc_srqc(hr_dev, srq, to_hr_pd(ib_srq->pd)->pdn, cqn, 0, 0); in hns_roce_create_srq() 80 alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq, u32 pdn, u32 cqn, u16 xrcd, u64 db_rec_addr) alloc_srqc() argument
|
/kernel/linux/linux-5.10/drivers/net/ethernet/mellanox/mlx5/core/ |
H A D | cq.c | 107 cq->cqn = MLX5_GET(create_cq_out, out, cqn); in mlx5_core_create_cq() 134 cq->cqn); in mlx5_core_create_cq() 145 MLX5_SET(destroy_cq_in, din, cqn, cq->cqn); in mlx5_core_create_cq() 163 MLX5_SET(destroy_cq_in, in, cqn, cq->cqn); in mlx5_core_destroy_cq() 183 MLX5_SET(query_cq_in, in, cqn, cq->cqn); in mlx5_core_query_cq() 207 MLX5_SET(modify_cq_in, in, cqn, c in mlx5_core_modify_cq_moderation() [all...] |
H A D | eq.c | 112 static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn) in mlx5_eq_cq_get() argument 118 cq = radix_tree_lookup(&table->tree, cqn); in mlx5_eq_cq_get() 135 u32 cqn = -1; in mlx5_eq_comp_int() local 149 cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff; in mlx5_eq_comp_int() 151 cq = mlx5_eq_cq_get(eq, cqn); in mlx5_eq_comp_int() 158 "Completion event for bogus CQ 0x%x\n", cqn); in mlx5_eq_comp_int() 168 if (cqn != -1) in mlx5_eq_comp_int() 425 err = radix_tree_insert(&table->tree, cq->cqn, cq); in mlx5_eq_add_cq() 437 tmp = radix_tree_delete(&table->tree, cq->cqn); in mlx5_eq_del_cq() 517 u32 cqn; cq_err_event_notifier() local [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/hw/mthca/ |
H A D | mthca_cq.c | 76 __be32 cqn; member 211 mthca_write64(MTHCA_TAVOR_CQ_DB_INC_CI | cq->cqn, incr - 1, in update_cons_index() 217 void mthca_cq_completion(struct mthca_dev *dev, u32 cqn) in mthca_cq_completion() argument 221 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); in mthca_cq_completion() 224 mthca_warn(dev, "Completion event for bogus CQ %08x\n", cqn); in mthca_cq_completion() 233 void mthca_cq_event(struct mthca_dev *dev, u32 cqn, in mthca_cq_event() argument 241 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); in mthca_cq_event() 248 mthca_warn(dev, "Async event for bogus CQ %08x\n", cqn); in mthca_cq_event() 297 qpn, cq->cqn, cq->cons_index, prod_index); in mthca_cq_clean() 384 cq->cqn, c in handle_error_cqe() [all...] |
H A D | mthca_eq.c | 132 __be32 cqn; member 149 __be32 cqn; member 219 static inline void disarm_cq(struct mthca_dev *dev, int eqn, int cqn) in disarm_cq() argument 222 mthca_write64(MTHCA_EQ_DB_DISARM_CQ | eqn, cqn, in disarm_cq() 276 disarm_cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff; in mthca_eq_int() 343 be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff); in mthca_eq_int() 344 mthca_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn), in mthca_eq_int()
|
/kernel/linux/linux-6.6/drivers/infiniband/hw/mthca/ |
H A D | mthca_cq.c | 76 __be32 cqn; member 211 mthca_write64(MTHCA_TAVOR_CQ_DB_INC_CI | cq->cqn, incr - 1, in update_cons_index() 217 void mthca_cq_completion(struct mthca_dev *dev, u32 cqn) in mthca_cq_completion() argument 221 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); in mthca_cq_completion() 224 mthca_warn(dev, "Completion event for bogus CQ %08x\n", cqn); in mthca_cq_completion() 233 void mthca_cq_event(struct mthca_dev *dev, u32 cqn, in mthca_cq_event() argument 241 cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); in mthca_cq_event() 248 mthca_warn(dev, "Async event for bogus CQ %08x\n", cqn); in mthca_cq_event() 297 qpn, cq->cqn, cq->cons_index, prod_index); in mthca_cq_clean() 384 cq->cqn, c in handle_error_cqe() [all...] |
H A D | mthca_eq.c | 132 __be32 cqn; member 149 __be32 cqn; member 219 static inline void disarm_cq(struct mthca_dev *dev, int eqn, int cqn) in disarm_cq() argument 222 mthca_write64(MTHCA_EQ_DB_DISARM_CQ | eqn, cqn, in disarm_cq() 276 disarm_cqn = be32_to_cpu(eqe->event.comp.cqn) & 0xffffff; in mthca_eq_int() 343 be32_to_cpu(eqe->event.cq_err.cqn) & 0xffffff); in mthca_eq_int() 344 mthca_cq_event(dev, be32_to_cpu(eqe->event.cq_err.cqn), in mthca_eq_int()
|
/kernel/linux/linux-6.6/drivers/net/ethernet/mellanox/mlx5/core/ |
H A D | cq.c | 108 cq->cqn = MLX5_GET(create_cq_out, out, cqn); in mlx5_create_cq() 135 cq->cqn); in mlx5_create_cq() 146 MLX5_SET(destroy_cq_in, din, cqn, cq->cqn); in mlx5_create_cq() 174 MLX5_SET(destroy_cq_in, in, cqn, cq->cqn); in mlx5_core_destroy_cq() 194 MLX5_SET(query_cq_in, in, cqn, cq->cqn); in mlx5_core_query_cq() 218 MLX5_SET(modify_cq_in, in, cqn, c in mlx5_core_modify_cq_moderation() [all...] |
H A D | eq.c | 94 static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn) in mlx5_eq_cq_get() argument 100 cq = radix_tree_lookup(&table->tree, cqn); in mlx5_eq_cq_get() 117 u32 cqn = -1; in mlx5_eq_comp_int() local 131 cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff; in mlx5_eq_comp_int() 133 cq = mlx5_eq_cq_get(eq, cqn); in mlx5_eq_comp_int() 140 "Completion event for bogus CQ 0x%x\n", cqn); in mlx5_eq_comp_int() 150 if (cqn != -1) in mlx5_eq_comp_int() 413 err = radix_tree_insert(&table->tree, cq->cqn, cq); in mlx5_eq_add_cq() 425 tmp = radix_tree_delete(&table->tree, cq->cqn); in mlx5_eq_del_cq() 508 u32 cqn; cq_err_event_notifier() local [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/hw/erdma/ |
H A D | erdma_eq.c | 33 u32 cqn, qpn; in erdma_aeq_event_handler() local 54 cqn = le32_to_cpu(aeqe->event_data0); in erdma_aeq_event_handler() 55 cq = find_cq_by_cqn(dev, cqn); in erdma_aeq_event_handler() 130 int cqn; in erdma_ceq_completion_handler() local 143 cqn = FIELD_GET(ERDMA_CEQE_HDR_CQN_MASK, READ_ONCE(*ceqe)); in erdma_ceq_completion_handler() 145 cq = find_cq_by_cqn(dev, cqn); in erdma_ceq_completion_handler()
|
/kernel/linux/linux-5.10/include/linux/mlx5/ |
H A D | cq.h | 40 u32 cqn; member 170 doorbell[1] = cpu_to_be32(cq->cqn); in mlx5_cq_arm()
|
/kernel/linux/linux-6.6/include/linux/mlx5/ |
H A D | cq.h | 40 u32 cqn; member 170 doorbell[1] = cpu_to_be32(cq->cqn); in mlx5_cq_arm()
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/mlx5/ |
H A D | srq.c | 295 in.cqn = to_mcq(init_attr->ext.cq)->mcq.cqn; in mlx5_ib_create_srq() 297 in.cqn = to_mcq(dev->devr.c0)->mcq.cqn; in mlx5_ib_create_srq()
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/mlx4/ |
H A D | srq.c | 82 u32 cqn; in mlx4_ib_create_srq() local 176 cqn = ib_srq_has_cq(init_attr->srq_type) ? in mlx4_ib_create_srq() 177 to_mcq(init_attr->ext.cq)->mcq.cqn : 0; in mlx4_ib_create_srq() 181 err = mlx4_srq_alloc(dev->dev, to_mpd(ib_srq->pd)->pdn, cqn, xrcdn, in mlx4_ib_create_srq()
|
/kernel/linux/linux-6.6/drivers/infiniband/hw/mlx4/ |
H A D | srq.c | 82 u32 cqn; in mlx4_ib_create_srq() local 180 cqn = ib_srq_has_cq(init_attr->srq_type) ? in mlx4_ib_create_srq() 181 to_mcq(init_attr->ext.cq)->mcq.cqn : 0; in mlx4_ib_create_srq() 185 err = mlx4_srq_alloc(dev->dev, to_mpd(ib_srq->pd)->pdn, cqn, xrcdn, in mlx4_ib_create_srq()
|
/kernel/linux/linux-6.6/drivers/infiniband/hw/mlx5/ |
H A D | srq.c | 274 in.cqn = to_mcq(init_attr->ext.cq)->mcq.cqn; in mlx5_ib_create_srq() 276 in.cqn = to_mcq(dev->devr.c0)->mcq.cqn; in mlx5_ib_create_srq()
|
/kernel/linux/linux-5.10/include/uapi/rdma/ |
H A D | hns-abi.h | 47 __aligned_u64 cqn; /* Only 32 bits used, 64 for compat */ member
|
/kernel/linux/patches/linux-5.10/prebuilts/usr/include/rdma/ |
H A D | hns-abi.h | 29 __aligned_u64 cqn; member
|