/kernel/linux/linux-6.6/drivers/infiniband/hw/hns/ |
H A D | hns_roce_cq.c | 58 static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) in alloc_cqn() argument 76 hr_cq->cqn = (id << CQ_BANKID_SHIFT) | bankid; in alloc_cqn() 104 struct hns_roce_cq *hr_cq, in hns_roce_create_cqc() 117 hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle); in hns_roce_create_cqc() 120 hr_cq->cqn); in hns_roce_create_cqc() 124 hr_cq->cqn, ret); in hns_roce_create_cqc() 131 static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) in alloc_cqc() argument 139 ret = hns_roce_mtr_find(hr_dev, &hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts), in alloc_cqc() 147 ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn); in alloc_cqc() 150 hr_cq in alloc_cqc() 103 hns_roce_create_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, u64 *mtts, dma_addr_t dma_handle) hns_roce_create_cqc() argument 174 free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) free_cqc() argument 199 alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, struct ib_udata *udata, unsigned long addr) alloc_cq_buf() argument 220 free_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) free_cq_buf() argument 225 alloc_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, struct ib_udata *udata, unsigned long addr, struct hns_roce_ib_create_cq_resp *resp) alloc_cq_db() argument 260 free_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, struct ib_udata *udata) free_cq_db() argument 299 get_cq_ucmd(struct hns_roce_cq *hr_cq, struct ib_udata *udata, struct hns_roce_ib_create_cq *ucmd) get_cq_ucmd() argument 314 set_cq_param(struct hns_roce_cq *hr_cq, u32 cq_entries, int vector, struct hns_roce_ib_create_cq *ucmd) set_cq_param() argument 330 set_cqe_size(struct hns_roce_cq *hr_cq, struct ib_udata *udata, struct hns_roce_ib_create_cq *ucmd) set_cqe_size() argument 361 struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); hns_roce_create_cq() local 439 struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); hns_roce_destroy_cq() local 451 struct hns_roce_cq *hr_cq; hns_roce_cq_completion() local 471 struct hns_roce_cq *hr_cq; hns_roce_cq_event() local [all...] |
H A D | hns_roce_restrack.c | 14 struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); in hns_roce_fill_res_cq_entry() local 21 if (rdma_nl_put_driver_u32(msg, "cq_depth", hr_cq->cq_depth)) in hns_roce_fill_res_cq_entry() 24 if (rdma_nl_put_driver_u32(msg, "cons_index", hr_cq->cons_index)) in hns_roce_fill_res_cq_entry() 27 if (rdma_nl_put_driver_u32(msg, "cqe_size", hr_cq->cqe_size)) in hns_roce_fill_res_cq_entry() 30 if (rdma_nl_put_driver_u32(msg, "arm_sn", hr_cq->arm_sn)) in hns_roce_fill_res_cq_entry() 46 struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); in hns_roce_fill_res_cq_entry_raw() local 53 ret = hr_dev->hw->query_cqc(hr_dev, hr_cq->cqn, &context); in hns_roce_fill_res_cq_entry_raw()
|
H A D | hns_roce_hw_v2.c | 2570 struct hns_roce_cq *hr_cq; in free_mr_init_cq() local 2575 hr_cq = kzalloc(sizeof(*hr_cq), GFP_KERNEL); in free_mr_init_cq() 2576 if (ZERO_OR_NULL_PTR(hr_cq)) in free_mr_init_cq() 2579 cq = &hr_cq->ib_cq; in free_mr_init_cq() 2584 kfree(hr_cq); in free_mr_init_cq() 3455 static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n) in get_cqe_v2() argument 3457 return hns_roce_buf_offset(hr_cq->mtr.kmem, n * hr_cq->cqe_size); in get_cqe_v2() 3460 static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, unsigne argument 3469 update_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) update_cq_db() argument 3486 __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn, struct hns_roce_srq *srq) __hns_roce_v2_cq_clean() argument 3529 hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn, struct hns_roce_srq *srq) hns_roce_v2_cq_clean() argument 3537 hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts, dma_addr_t dma_handle) hns_roce_v2_write_cqc() argument 3590 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); hns_roce_v2_req_notify_cq() local 3637 hns_roce_v2_sw_poll_cq(struct hns_roce_cq *hr_cq, int num_entries, struct ib_wc *wc) hns_roce_v2_sw_poll_cq() argument 3717 get_cur_qp(struct hns_roce_cq *hr_cq, struct hns_roce_v2_cqe *cqe, struct hns_roce_qp **cur_qp) get_cur_qp() argument 3870 hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq, struct hns_roce_qp **cur_qp, struct ib_wc *wc) hns_roce_v2_poll_one() argument 3939 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); hns_roce_v2_poll_cq() local 5712 struct hns_roce_cq *hr_cq = to_hr_cq(cq); hns_roce_v2_modify_cq() local [all...] |
H A D | hns_roce_main.c | 968 struct hns_roce_cq *hr_cq = to_hr_cq(cq); in check_and_get_armed_cq() local 971 spin_lock_irqsave(&hr_cq->lock, flags); in check_and_get_armed_cq() 973 if (!hr_cq->is_armed) { in check_and_get_armed_cq() 974 hr_cq->is_armed = 1; in check_and_get_armed_cq() 975 list_add_tail(&hr_cq->node, cq_list); in check_and_get_armed_cq() 978 spin_unlock_irqrestore(&hr_cq->lock, flags); in check_and_get_armed_cq() 984 struct hns_roce_cq *hr_cq; in hns_roce_handle_device_err() local 1004 list_for_each_entry(hr_cq, &cq_list, node) in hns_roce_handle_device_err() 1005 hns_roce_cq_completion(hr_dev, hr_cq->cqn); in hns_roce_handle_device_err()
|
H A D | hns_roce_qp.c | 1445 struct hns_roce_cq *hr_cq; in hns_roce_wq_overflow() local 1452 hr_cq = to_hr_cq(ib_cq); in hns_roce_wq_overflow() 1453 spin_lock(&hr_cq->lock); in hns_roce_wq_overflow() 1455 spin_unlock(&hr_cq->lock); in hns_roce_wq_overflow()
|
H A D | hns_roce_device.h | 893 struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/hns/ |
H A D | hns_roce_cq.c | 42 static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) in alloc_cqc() argument 51 ret = hns_roce_mtr_find(hr_dev, &hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts), in alloc_cqc() 59 ret = hns_roce_bitmap_alloc(&cq_table->bitmap, &hr_cq->cqn); in alloc_cqc() 66 ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn); in alloc_cqc() 69 hr_cq->cqn, ret); in alloc_cqc() 73 ret = xa_err(xa_store(&cq_table->array, hr_cq->cqn, hr_cq, GFP_KERNEL)); in alloc_cqc() 86 hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle); in alloc_cqc() 89 ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_cq->cqn, 0, in alloc_cqc() 95 hr_cq in alloc_cqc() 118 free_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) free_cqc() argument 145 alloc_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, struct ib_udata *udata, unsigned long addr) alloc_cq_buf() argument 167 free_cq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) free_cq_buf() argument 172 alloc_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, struct ib_udata *udata, unsigned long addr, struct hns_roce_ib_create_cq_resp *resp) alloc_cq_db() argument 208 free_cq_db(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, struct ib_udata *udata) free_cq_db() argument 227 set_cqe_size(struct hns_roce_cq *hr_cq, struct ib_udata *udata, struct hns_roce_ib_create_cq *ucmd) set_cqe_size() argument 247 struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); hns_roce_create_cq() local 337 struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); hns_roce_destroy_cq() local 350 struct hns_roce_cq *hr_cq; hns_roce_cq_completion() local 370 struct hns_roce_cq *hr_cq; hns_roce_cq_event() local [all...] |
H A D | hns_roce_hw_v1.c | 1973 static void *get_cqe(struct hns_roce_cq *hr_cq, int n) in get_cqe() argument 1975 return hns_roce_buf_offset(hr_cq->mtr.kmem, n * HNS_ROCE_V1_CQE_SIZE); in get_cqe() 1978 static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n) in get_sw_cqe() argument 1980 struct hns_roce_cqe *hr_cqe = get_cqe(hr_cq, n & hr_cq->ib_cq.cqe); in get_sw_cqe() 1984 !!(n & hr_cq->cq_depth)) ? hr_cqe : NULL; in get_sw_cqe() 1987 static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq) in next_cqe_sw() argument 1989 return get_sw_cqe(hr_cq, hr_cq->cons_index); in next_cqe_sw() 1992 static void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u3 argument 2009 __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn, struct hns_roce_srq *srq) __hns_roce_v1_cq_clean() argument 2057 hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn, struct hns_roce_srq *srq) hns_roce_v1_cq_clean() argument 2065 hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts, dma_addr_t dma_handle) hns_roce_v1_write_cqc() argument 2144 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); hns_roce_v1_req_notify_cq() local 2170 hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq, struct hns_roce_qp **cur_qp, struct ib_wc *wc) hns_roce_v1_poll_one() argument 2375 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); hns_roce_v1_poll_cq() local 3646 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); hns_roce_v1_destroy_cq() local [all...] |
H A D | hns_roce_restrack.c | 83 struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); in hns_roce_fill_res_cq_entry() local 95 ret = hr_dev->dfx->query_cqc_info(hr_dev, hr_cq->cqn, (int *)context); in hns_roce_fill_res_cq_entry()
|
H A D | hns_roce_main.c | 806 struct hns_roce_cq *hr_cq = to_hr_cq(cq); in check_and_get_armed_cq() local 809 spin_lock_irqsave(&hr_cq->lock, flags); in check_and_get_armed_cq() 811 if (!hr_cq->is_armed) { in check_and_get_armed_cq() 812 hr_cq->is_armed = 1; in check_and_get_armed_cq() 813 list_add_tail(&hr_cq->node, cq_list); in check_and_get_armed_cq() 816 spin_unlock_irqrestore(&hr_cq->lock, flags); in check_and_get_armed_cq() 822 struct hns_roce_cq *hr_cq; in hns_roce_handle_device_err() local 842 list_for_each_entry(hr_cq, &cq_list, node) in hns_roce_handle_device_err() 843 hns_roce_cq_completion(hr_dev, hr_cq->cqn); in hns_roce_handle_device_err()
|
H A D | hns_roce_hw_v2.c | 2968 static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n) in get_cqe_v2() argument 2970 return hns_roce_buf_offset(hr_cq->mtr.kmem, n * hr_cq->cqe_size); in get_cqe_v2() 2973 static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, int n) in get_sw_cqe_v2() argument 2975 struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe); in get_sw_cqe_v2() 2979 !!(n & hr_cq->cq_depth)) ? cqe : NULL; in get_sw_cqe_v2() 2982 static inline void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 ci) in hns_roce_v2_cq_set_ci() argument 2984 *hr_cq->set_ci_db = ci & V2_CQ_DB_PARAMETER_CONS_IDX_M; in hns_roce_v2_cq_set_ci() 2987 static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u3 argument 3041 hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn, struct hns_roce_srq *srq) hns_roce_v2_cq_clean() argument 3049 hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts, dma_addr_t dma_handle) hns_roce_v2_write_cqc() argument 3128 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); hns_roce_v2_req_notify_cq() local 3216 hns_roce_v2_sw_poll_cq(struct hns_roce_cq *hr_cq, int num_entries, struct ib_wc *wc) hns_roce_v2_sw_poll_cq() argument 3306 hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq, struct hns_roce_qp **cur_qp, struct ib_wc *wc) hns_roce_v2_poll_one() argument 3518 struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); hns_roce_v2_poll_cq() local 5330 struct hns_roce_cq *hr_cq = to_hr_cq(cq); hns_roce_v2_modify_cq() local [all...] |
H A D | hns_roce_qp.c | 1278 struct hns_roce_cq *hr_cq; in hns_roce_wq_overflow() local 1285 hr_cq = to_hr_cq(ib_cq); in hns_roce_wq_overflow() 1286 spin_lock(&hr_cq->lock); in hns_roce_wq_overflow() 1288 spin_unlock(&hr_cq->lock); in hns_roce_wq_overflow()
|
H A D | hns_roce_device.h | 916 struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
|