/kernel/linux/linux-5.10/drivers/atm/ |
H A D | nicstar.c | 27 * 2 - Per SCQ scq spinlock 115 #define scq_virt_to_bus(scq, p) \ 116 (scq->dma + ((unsigned long)(p) - (unsigned long)(scq)->org)) 126 static void free_scq(ns_dev *card, scq_info * scq, struct atm_vcc *vcc); 133 static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd, 136 static void drain_scq(ns_dev * card, scq_info * scq, int pos); 245 free_scq(card, card->scd2vc[j]->scq, card->scd2vc[j]->tx_vcc); in nicstar_remove_one() 861 scq_info *scq; in get_scq() local 867 scq in get_scq() 904 free_scq(ns_dev *card, scq_info *scq, struct atm_vcc *vcc) free_scq() argument 1275 scq_info *scq; ns_open() local 1469 scq_info *scq; ns_close() local 1530 scq_info *scq = card->scq0; ns_close() local 1629 scq_info *scq; ns_send() local 1721 push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd, struct sk_buff *skb) push_scqe() argument 1836 scq_info *scq; process_tsq() local 1908 drain_scq(ns_dev * card, scq_info * scq, int pos) drain_scq() argument [all...] |
H A D | idt77252.c | 639 struct scq_info *scq; in alloc_scq() local 641 scq = kzalloc(sizeof(struct scq_info), GFP_KERNEL); in alloc_scq() 642 if (!scq) in alloc_scq() 644 scq->base = dma_alloc_coherent(&card->pcidev->dev, SCQ_SIZE, in alloc_scq() 645 &scq->paddr, GFP_KERNEL); in alloc_scq() 646 if (scq->base == NULL) { in alloc_scq() 647 kfree(scq); in alloc_scq() 651 scq->next = scq->base; in alloc_scq() 652 scq in alloc_scq() 668 free_scq(struct idt77252_dev *card, struct scq_info *scq) free_scq() argument 705 struct scq_info *scq = vc->scq; push_on_scq() local 793 struct scq_info *scq = vc->scq; drain_scq() local 950 fill_scd(struct idt77252_dev *card, struct scq_info *scq, int class) fill_scd() argument 959 clear_scd(struct idt77252_dev *card, struct scq_info *scq, int class) clear_scd() argument [all...] |
H A D | nicstar.h | 702 scq_info *scq; /* To keep track of the SCQ */ member
|
/kernel/linux/linux-6.6/drivers/atm/ |
H A D | nicstar.c | 27 * 2 - Per SCQ scq spinlock 115 #define scq_virt_to_bus(scq, p) \ 116 (scq->dma + ((unsigned long)(p) - (unsigned long)(scq)->org)) 126 static void free_scq(ns_dev *card, scq_info * scq, struct atm_vcc *vcc); 134 static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd, 137 static void drain_scq(ns_dev * card, scq_info * scq, int pos); 247 free_scq(card, card->scd2vc[j]->scq, card->scd2vc[j]->tx_vcc); in nicstar_remove_one() 863 scq_info *scq; in get_scq() local 868 scq in get_scq() 900 free_scq(ns_dev *card, scq_info *scq, struct atm_vcc *vcc) free_scq() argument 1271 scq_info *scq; ns_open() local 1465 scq_info *scq; ns_close() local 1526 scq_info *scq = card->scq0; ns_close() local 1625 scq_info *scq; _ns_send() local 1727 push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd, struct sk_buff *skb, bool may_sleep) push_scqe() argument 1842 scq_info *scq; process_tsq() local 1914 drain_scq(ns_dev * card, scq_info * scq, int pos) drain_scq() argument [all...] |
H A D | idt77252.c | 639 struct scq_info *scq; in alloc_scq() local 641 scq = kzalloc(sizeof(struct scq_info), GFP_KERNEL); in alloc_scq() 642 if (!scq) in alloc_scq() 644 scq->base = dma_alloc_coherent(&card->pcidev->dev, SCQ_SIZE, in alloc_scq() 645 &scq->paddr, GFP_KERNEL); in alloc_scq() 646 if (scq->base == NULL) { in alloc_scq() 647 kfree(scq); in alloc_scq() 651 scq->next = scq->base; in alloc_scq() 652 scq in alloc_scq() 668 free_scq(struct idt77252_dev *card, struct scq_info *scq) free_scq() argument 705 struct scq_info *scq = vc->scq; push_on_scq() local 793 struct scq_info *scq = vc->scq; drain_scq() local 950 fill_scd(struct idt77252_dev *card, struct scq_info *scq, int class) fill_scd() argument 959 clear_scd(struct idt77252_dev *card, struct scq_info *scq, int class) clear_scd() argument [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/hw/vmw_pvrdma/ |
H A D | pvrdma_qp.c | 65 static void pvrdma_lock_cqs(struct pvrdma_cq *scq, struct pvrdma_cq *rcq, 68 __acquires(scq->cq_lock) __acquires(rcq->cq_lock) 70 if (scq == rcq) { 71 spin_lock_irqsave(&scq->cq_lock, *scq_flags); 73 } else if (scq->cq_handle < rcq->cq_handle) { 74 spin_lock_irqsave(&scq->cq_lock, *scq_flags); 79 spin_lock_irqsave_nested(&scq->cq_lock, *scq_flags, 84 static void pvrdma_unlock_cqs(struct pvrdma_cq *scq, struct pvrdma_cq *rcq, 87 __releases(scq->cq_lock) __releases(rcq->cq_lock) 89 if (scq 103 struct pvrdma_cq *scq, *rcq; pvrdma_reset_qp() local 464 struct pvrdma_cq *scq; pvrdma_free_qp() local [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/hw/vmw_pvrdma/ |
H A D | pvrdma_qp.c | 65 static void pvrdma_lock_cqs(struct pvrdma_cq *scq, struct pvrdma_cq *rcq, 68 __acquires(scq->cq_lock) __acquires(rcq->cq_lock) 70 if (scq == rcq) { 71 spin_lock_irqsave(&scq->cq_lock, *scq_flags); 73 } else if (scq->cq_handle < rcq->cq_handle) { 74 spin_lock_irqsave(&scq->cq_lock, *scq_flags); 79 spin_lock_irqsave_nested(&scq->cq_lock, *scq_flags, 84 static void pvrdma_unlock_cqs(struct pvrdma_cq *scq, struct pvrdma_cq *rcq, 87 __releases(scq->cq_lock) __releases(rcq->cq_lock) 89 if (scq 103 struct pvrdma_cq *scq, *rcq; pvrdma_reset_qp() local 453 struct pvrdma_cq *scq; pvrdma_free_qp() local [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/sw/rxe/ |
H A D | rxe_qp.c | 357 struct rxe_cq *scq = to_rcq(init->send_cq); in rxe_qp_from_init() local 363 rxe_get(scq); in rxe_qp_from_init() 369 qp->scq = scq; in rxe_qp_from_init() 373 atomic_inc(&scq->num_wq); in rxe_qp_from_init() 397 atomic_dec(&scq->num_wq); in rxe_qp_from_init() 401 qp->scq = NULL; in rxe_qp_from_init() 406 rxe_put(scq); in rxe_qp_from_init() 847 if (qp->scq) { in rxe_qp_do_cleanup() 848 atomic_dec(&qp->scq in rxe_qp_do_cleanup() [all...] |
H A D | rxe_comp.c | 466 rxe_cq_post(qp->scq, &cqe, 0); in do_complete() 583 err = rxe_cq_post(qp->scq, &cqe, 0); in flush_send_wqe() 585 rxe_dbg_cq(qp->scq, "post cq failed, err = %d", err); in flush_send_wqe()
|
H A D | rxe_verbs.h | 212 struct rxe_cq *scq; member
|
/kernel/linux/linux-5.10/drivers/infiniband/sw/rxe/ |
H A D | rxe_qp.c | 316 struct rxe_cq *scq = to_rcq(init->send_cq); in rxe_qp_from_init() local 321 rxe_add_ref(scq); in rxe_qp_from_init() 327 qp->scq = scq; in rxe_qp_from_init() 350 qp->scq = NULL; in rxe_qp_from_init() 355 rxe_drop_ref(scq); in rxe_qp_from_init() 799 if (qp->scq) in rxe_qp_do_cleanup() 800 rxe_drop_ref(qp->scq); in rxe_qp_do_cleanup()
|
H A D | rxe_verbs.h | 218 struct rxe_cq *scq; member
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/bnxt_re/ |
H A D | qplib_fp.c | 69 struct bnxt_qplib_cq *scq, *rcq; in __bnxt_qplib_add_flush_qp() local 71 scq = qp->scq; in __bnxt_qplib_add_flush_qp() 75 dev_dbg(&scq->hwq.pdev->dev, in __bnxt_qplib_add_flush_qp() 78 list_add_tail(&qp->sq_flush, &scq->sqf_head); in __bnxt_qplib_add_flush_qp() 93 __acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock) 95 spin_lock_irqsave(&qp->scq->flush_lock, *flags); 96 if (qp->scq == qp->rcq) 104 __releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock) 106 if (qp->scq [all...] |
H A D | ib_verbs.h | 92 struct bnxt_re_cq *scq; member
|
H A D | ib_verbs.c | 720 __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock) 724 spin_lock_irqsave(&qp->scq->cq_lock, flags); 725 if (qp->rcq != qp->scq) 735 __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock) 737 if (qp->rcq != qp->scq) 741 spin_unlock_irqrestore(&qp->scq->cq_lock, flags); 1050 qp->qplib_qp.scq = qp1_qp->scq; in bnxt_re_create_shadow_qp() 1285 qplqp->scq = &cq->qplib_cq; in bnxt_re_init_qp_attr() 1286 qp->scq in bnxt_re_init_qp_attr() [all...] |
H A D | qplib_fp.h | 309 struct bnxt_qplib_cq *scq; member 418 * Each flush list is protected by qplib_cq->flush_lock. Both scq and rcq
|
/kernel/linux/linux-6.6/drivers/infiniband/hw/bnxt_re/ |
H A D | qplib_fp.c | 70 struct bnxt_qplib_cq *scq, *rcq; in __bnxt_qplib_add_flush_qp() local 72 scq = qp->scq; in __bnxt_qplib_add_flush_qp() 76 dev_dbg(&scq->hwq.pdev->dev, in __bnxt_qplib_add_flush_qp() 79 list_add_tail(&qp->sq_flush, &scq->sqf_head); in __bnxt_qplib_add_flush_qp() 94 __acquires(&qp->scq->flush_lock) __acquires(&qp->rcq->flush_lock) 96 spin_lock_irqsave(&qp->scq->flush_lock, *flags); 97 if (qp->scq == qp->rcq) 105 __releases(&qp->scq->flush_lock) __releases(&qp->rcq->flush_lock) 107 if (qp->scq [all...] |
H A D | ib_verbs.h | 94 struct bnxt_re_cq *scq; member
|
H A D | ib_verbs.c | 808 __acquires(&qp->scq->cq_lock) __acquires(&qp->rcq->cq_lock) 812 spin_lock_irqsave(&qp->scq->cq_lock, flags); 813 if (qp->rcq != qp->scq) 823 __releases(&qp->scq->cq_lock) __releases(&qp->rcq->cq_lock) 825 if (qp->rcq != qp->scq) 829 spin_unlock_irqrestore(&qp->scq->cq_lock, flags); 924 scq_nq = qplib_qp->scq->nq; in bnxt_re_destroy_qp() 1153 qp->qplib_qp.scq = qp1_qp->scq; in bnxt_re_create_shadow_qp() 1380 qplqp->scq in bnxt_re_init_qp_attr() [all...] |
/kernel/linux/linux-5.10/drivers/infiniband/hw/cxgb4/ |
H A D | ev.c | 139 cqid = qhp->attr.scq; in c4iw_ev_dispatch()
|
/kernel/linux/linux-6.6/drivers/infiniband/hw/cxgb4/ |
H A D | ev.c | 139 cqid = qhp->attr.scq; in c4iw_ev_dispatch()
|
/kernel/linux/linux-6.6/drivers/infiniband/hw/erdma/ |
H A D | erdma_verbs.h | 233 struct erdma_cq *scq; member
|
/kernel/linux/linux-5.10/drivers/infiniband/core/ |
H A D | uverbs_cmd.c | 1252 struct ib_cq *scq = NULL, *rcq = NULL; in create_qp() local 1349 scq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, in create_qp() 1352 rcq = rcq ?: scq; in create_qp() 1355 if (!pd || (!scq && has_sq)) { in create_qp() 1364 attr.send_cq = scq; in create_qp() 1448 if (scq) in create_qp() 1449 rdma_lookup_put_uobject(&scq->uobject->uevent.uobject, in create_qp() 1451 if (rcq && rcq != scq) in create_qp() 1479 if (scq) in create_qp() 1480 rdma_lookup_put_uobject(&scq in create_qp() [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/core/ |
H A D | uverbs_cmd.c | 1286 struct ib_cq *scq = NULL, *rcq = NULL; in create_qp() local 1383 scq = uobj_get_obj_read(cq, UVERBS_OBJECT_CQ, in create_qp() 1386 rcq = rcq ?: scq; in create_qp() 1389 if (!pd || (!scq && has_sq)) { in create_qp() 1398 attr.send_cq = scq; in create_qp() 1459 if (scq) in create_qp() 1460 rdma_lookup_put_uobject(&scq->uobject->uevent.uobject, in create_qp() 1462 if (rcq && rcq != scq) in create_qp() 1487 if (scq) in create_qp() 1488 rdma_lookup_put_uobject(&scq in create_qp() [all...] |
/kernel/linux/linux-6.6/drivers/infiniband/hw/hns/ |
H A D | hns_roce_qp.c | 182 struct ib_cq *scq = init_attr->send_cq; in get_least_load_bankid_for_qp() local 189 if (scq) in get_least_load_bankid_for_qp() 190 cqn = to_hr_cq(scq)->cqn; in get_least_load_bankid_for_qp() 193 if (scq && (get_affinity_cq_bank(i) != (cqn & CQ_BANKID_MASK))) in get_least_load_bankid_for_qp()
|