/kernel/linux/linux-5.10/drivers/net/ethernet/brocade/bna/ |
H A D | bfa_msgq.c | 311 static void bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq); 322 bfa_fsm_state_decl(rspq, stopped, struct bfa_msgq_rspq, enum rspq_event); 323 bfa_fsm_state_decl(rspq, init_wait, struct bfa_msgq_rspq, 325 bfa_fsm_state_decl(rspq, ready, struct bfa_msgq_rspq, enum rspq_event); 326 bfa_fsm_state_decl(rspq, dbell_wait, struct bfa_msgq_rspq, 330 rspq_sm_stopped_entry(struct bfa_msgq_rspq *rspq) in rspq_sm_stopped_entry() argument 332 rspq->producer_index = 0; in rspq_sm_stopped_entry() 333 rspq->consumer_index = 0; in rspq_sm_stopped_entry() 334 rspq->flags = 0; in rspq_sm_stopped_entry() 338 rspq_sm_stopped(struct bfa_msgq_rspq *rspq, enu argument 356 rspq_sm_init_wait_entry(struct bfa_msgq_rspq *rspq) rspq_sm_init_wait_entry() argument 362 rspq_sm_init_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event) rspq_sm_init_wait() argument 380 rspq_sm_ready_entry(struct bfa_msgq_rspq *rspq) rspq_sm_ready_entry() argument 385 rspq_sm_ready(struct bfa_msgq_rspq *rspq, enum rspq_event event) rspq_sm_ready() argument 403 rspq_sm_dbell_wait_entry(struct bfa_msgq_rspq *rspq) rspq_sm_dbell_wait_entry() argument 410 rspq_sm_dbell_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event) rspq_sm_dbell_wait() argument 438 struct bfa_msgq_rspq *rspq = (struct bfa_msgq_rspq *)arg; bfa_msgq_rspq_dbell_ready() local 443 bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq) bfa_msgq_rspq_dbell() argument 460 bfa_msgq_rspq_pi_update(struct bfa_msgq_rspq *rspq, struct bfi_mbmsg *mb) bfa_msgq_rspq_pi_update() argument 491 bfa_msgq_rspq_attach(struct bfa_msgq_rspq *rspq, struct bfa_msgq *msgq) bfa_msgq_rspq_attach() argument 639 struct bfa_msgq_rspq *rspq = &msgq->rspq; bfa_msgq_rsp_copy() local [all...] |
H A D | bfa_msgq.h | 105 struct bfa_msgq_rspq rspq; member
|
/kernel/linux/linux-6.6/drivers/net/ethernet/brocade/bna/ |
H A D | bfa_msgq.c | 309 static void bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq); 320 bfa_fsm_state_decl(rspq, stopped, struct bfa_msgq_rspq, enum rspq_event); 321 bfa_fsm_state_decl(rspq, init_wait, struct bfa_msgq_rspq, 323 bfa_fsm_state_decl(rspq, ready, struct bfa_msgq_rspq, enum rspq_event); 324 bfa_fsm_state_decl(rspq, dbell_wait, struct bfa_msgq_rspq, 328 rspq_sm_stopped_entry(struct bfa_msgq_rspq *rspq) in rspq_sm_stopped_entry() argument 330 rspq->producer_index = 0; in rspq_sm_stopped_entry() 331 rspq->consumer_index = 0; in rspq_sm_stopped_entry() 332 rspq->flags = 0; in rspq_sm_stopped_entry() 336 rspq_sm_stopped(struct bfa_msgq_rspq *rspq, enu argument 354 rspq_sm_init_wait_entry(struct bfa_msgq_rspq *rspq) rspq_sm_init_wait_entry() argument 360 rspq_sm_init_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event) rspq_sm_init_wait() argument 378 rspq_sm_ready_entry(struct bfa_msgq_rspq *rspq) rspq_sm_ready_entry() argument 383 rspq_sm_ready(struct bfa_msgq_rspq *rspq, enum rspq_event event) rspq_sm_ready() argument 401 rspq_sm_dbell_wait_entry(struct bfa_msgq_rspq *rspq) rspq_sm_dbell_wait_entry() argument 408 rspq_sm_dbell_wait(struct bfa_msgq_rspq *rspq, enum rspq_event event) rspq_sm_dbell_wait() argument 436 struct bfa_msgq_rspq *rspq = (struct bfa_msgq_rspq *)arg; bfa_msgq_rspq_dbell_ready() local 441 bfa_msgq_rspq_dbell(struct bfa_msgq_rspq *rspq) bfa_msgq_rspq_dbell() argument 458 bfa_msgq_rspq_pi_update(struct bfa_msgq_rspq *rspq, struct bfi_mbmsg *mb) bfa_msgq_rspq_pi_update() argument 489 bfa_msgq_rspq_attach(struct bfa_msgq_rspq *rspq, struct bfa_msgq *msgq) bfa_msgq_rspq_attach() argument 637 struct bfa_msgq_rspq *rspq = &msgq->rspq; bfa_msgq_rsp_copy() local [all...] |
H A D | bfa_cs.h | 46 BFA_SM_TABLE(rspq, bfa_msgq_rspq, rspq_event, bfa_fsm_msgq_rspq_t)
|
H A D | bfa_msgq.h | 109 struct bfa_msgq_rspq rspq; member
|
/kernel/linux/linux-5.10/drivers/net/ethernet/chelsio/cxgb4vf/ |
H A D | sge.c | 1570 struct adapter *adapter = rxq->rspq.adapter; in do_gro() 1576 skb = napi_get_frags(&rxq->rspq.napi); in do_gro() 1588 skb_record_rx_queue(skb, rxq->rspq.idx); in do_gro() 1596 ret = napi_gro_frags(&rxq->rspq.napi); in do_gro() 1608 * @rspq: the response queue that received the packet 1614 int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, in t4vf_ethrx_handler() argument 1620 (rspq->netdev->features & NETIF_F_RXCSUM); in t4vf_ethrx_handler() 1621 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); in t4vf_ethrx_handler() 1622 struct adapter *adapter = rspq in t4vf_ethrx_handler() 1685 is_new_response(const struct rsp_ctrl *rc, const struct sge_rspq *rspq) is_new_response() argument 1734 rspq_next(struct sge_rspq *rspq) rspq_next() argument 1757 process_responses(struct sge_rspq *rspq, int budget) process_responses() argument 1891 struct sge_rspq *rspq = container_of(napi, struct sge_rspq, napi); napi_rx_handler() local 1927 struct sge_rspq *rspq = cookie; t4vf_sge_intr_msix() local 1948 struct sge_rspq *rspq; process_intrq() local 2204 t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, bool iqasynch, struct net_device *dev, int intr_dest, struct sge_fl *fl, rspq_handler_t hnd) t4vf_sge_alloc_rxq() argument 2530 free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq, struct sge_fl *fl) free_rspq_fl() argument [all...] |
H A D | cxgb4vf_main.c | 401 &s->ethrxq[rxq].rspq); in request_msix_queue_irqs() 410 free_irq(adapter->msix_info[--msi].vec, &s->ethrxq[rxq].rspq); in request_msix_queue_irqs() 427 &s->ethrxq[rxq].rspq); in free_msix_queue_irqs() 433 static void qenable(struct sge_rspq *rspq) in qenable() argument 435 napi_enable(&rspq->napi); in qenable() 441 t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, in qenable() 443 SEINTARM_V(rspq->intr_params) | in qenable() 444 INGRESSQID_V(rspq->cntxt_id)); in qenable() 456 qenable(&s->ethrxq[rxq].rspq); in enable_rx() 480 napi_disable(&s->ethrxq[rxq].rspq in quiesce_rx() 487 fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp, const struct pkt_gl *gl) fwevtq_handler() argument 1054 qtimer_val(const struct adapter *adapter, const struct sge_rspq *rspq) qtimer_val() argument 1075 set_rxq_intr_params(struct adapter *adapter, struct sge_rspq *rspq, unsigned int us, unsigned int cnt) set_rxq_intr_params() argument 1654 const struct sge_rspq *rspq = &adapter->sge.ethrxq[pi->first_qset].rspq; cxgb4vf_get_coalesce() local 2640 init_rspq(struct sge_rspq *rspq, u8 timer_idx, u8 pkt_cnt_idx, unsigned int size, unsigned int iqe_size) init_rspq() argument [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/chelsio/cxgb4vf/ |
H A D | sge.c | 1567 struct adapter *adapter = rxq->rspq.adapter; in do_gro() 1573 skb = napi_get_frags(&rxq->rspq.napi); in do_gro() 1585 skb_record_rx_queue(skb, rxq->rspq.idx); in do_gro() 1593 ret = napi_gro_frags(&rxq->rspq.napi); in do_gro() 1605 * @rspq: the response queue that received the packet 1611 int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp, in t4vf_ethrx_handler() argument 1617 (rspq->netdev->features & NETIF_F_RXCSUM); in t4vf_ethrx_handler() 1618 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); in t4vf_ethrx_handler() 1619 struct adapter *adapter = rspq in t4vf_ethrx_handler() 1682 is_new_response(const struct rsp_ctrl *rc, const struct sge_rspq *rspq) is_new_response() argument 1731 rspq_next(struct sge_rspq *rspq) rspq_next() argument 1754 process_responses(struct sge_rspq *rspq, int budget) process_responses() argument 1888 struct sge_rspq *rspq = container_of(napi, struct sge_rspq, napi); napi_rx_handler() local 1924 struct sge_rspq *rspq = cookie; t4vf_sge_intr_msix() local 1945 struct sge_rspq *rspq; process_intrq() local 2201 t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq, bool iqasynch, struct net_device *dev, int intr_dest, struct sge_fl *fl, rspq_handler_t hnd) t4vf_sge_alloc_rxq() argument 2527 free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq, struct sge_fl *fl) free_rspq_fl() argument [all...] |
H A D | cxgb4vf_main.c | 401 &s->ethrxq[rxq].rspq); in request_msix_queue_irqs() 410 free_irq(adapter->msix_info[--msi].vec, &s->ethrxq[rxq].rspq); in request_msix_queue_irqs() 427 &s->ethrxq[rxq].rspq); in free_msix_queue_irqs() 433 static void qenable(struct sge_rspq *rspq) in qenable() argument 435 napi_enable(&rspq->napi); in qenable() 441 t4_write_reg(rspq->adapter, T4VF_SGE_BASE_ADDR + SGE_VF_GTS, in qenable() 443 SEINTARM_V(rspq->intr_params) | in qenable() 444 INGRESSQID_V(rspq->cntxt_id)); in qenable() 456 qenable(&s->ethrxq[rxq].rspq); in enable_rx() 480 napi_disable(&s->ethrxq[rxq].rspq in quiesce_rx() 487 fwevtq_handler(struct sge_rspq *rspq, const __be64 *rsp, const struct pkt_gl *gl) fwevtq_handler() argument 1054 qtimer_val(const struct adapter *adapter, const struct sge_rspq *rspq) qtimer_val() argument 1075 set_rxq_intr_params(struct adapter *adapter, struct sge_rspq *rspq, unsigned int us, unsigned int cnt) set_rxq_intr_params() argument 1660 const struct sge_rspq *rspq = &adapter->sge.ethrxq[pi->first_qset].rspq; cxgb4vf_get_coalesce() local 2648 init_rspq(struct sge_rspq *rspq, u8 timer_idx, u8 pkt_cnt_idx, unsigned int size, unsigned int iqe_size) init_rspq() argument [all...] |
/kernel/linux/linux-5.10/drivers/scsi/bfa/ |
H A D | bfa_hw_cb.c | 46 bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq, u32 ci) in bfa_hwcb_rspq_ack_msix() argument 48 writel(__HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq), in bfa_hwcb_rspq_ack_msix() 51 if (bfa_rspq_ci(bfa, rspq) == ci) in bfa_hwcb_rspq_ack_msix() 54 bfa_rspq_ci(bfa, rspq) = ci; in bfa_hwcb_rspq_ack_msix() 55 writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]); in bfa_hwcb_rspq_ack_msix() 59 bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci) in bfa_hwcb_rspq_ack() argument 61 if (bfa_rspq_ci(bfa, rspq) == ci) in bfa_hwcb_rspq_ack() 64 bfa_rspq_ci(bfa, rspq) = ci; in bfa_hwcb_rspq_ack() 65 writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]); in bfa_hwcb_rspq_ack()
|
H A D | bfa_hw_ct.c | 67 bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci) in bfa_hwct_rspq_ack() argument 71 r32 = readl(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]); in bfa_hwct_rspq_ack() 72 writel(r32, bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]); in bfa_hwct_rspq_ack() 74 bfa_rspq_ci(bfa, rspq) = ci; in bfa_hwct_rspq_ack() 75 writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]); in bfa_hwct_rspq_ack() 84 bfa_hwct2_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci) in bfa_hwct2_rspq_ack() argument 86 bfa_rspq_ci(bfa, rspq) = ci; in bfa_hwct2_rspq_ack() 87 writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]); in bfa_hwct2_rspq_ack()
|
H A D | bfa.h | 179 void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq, u32 ci); 313 void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci); 325 void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq); 326 void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci); 327 void bfa_hwct2_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci);
|
/kernel/linux/linux-6.6/drivers/scsi/bfa/ |
H A D | bfa_hw_cb.c | 46 bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq, u32 ci) in bfa_hwcb_rspq_ack_msix() argument 48 writel(__HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq), in bfa_hwcb_rspq_ack_msix() 51 if (bfa_rspq_ci(bfa, rspq) == ci) in bfa_hwcb_rspq_ack_msix() 54 bfa_rspq_ci(bfa, rspq) = ci; in bfa_hwcb_rspq_ack_msix() 55 writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]); in bfa_hwcb_rspq_ack_msix() 59 bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci) in bfa_hwcb_rspq_ack() argument 61 if (bfa_rspq_ci(bfa, rspq) == ci) in bfa_hwcb_rspq_ack() 64 bfa_rspq_ci(bfa, rspq) = ci; in bfa_hwcb_rspq_ack() 65 writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]); in bfa_hwcb_rspq_ack()
|
H A D | bfa_hw_ct.c | 67 bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci) in bfa_hwct_rspq_ack() argument 71 r32 = readl(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]); in bfa_hwct_rspq_ack() 72 writel(r32, bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]); in bfa_hwct_rspq_ack() 74 bfa_rspq_ci(bfa, rspq) = ci; in bfa_hwct_rspq_ack() 75 writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]); in bfa_hwct_rspq_ack() 84 bfa_hwct2_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci) in bfa_hwct2_rspq_ack() argument 86 bfa_rspq_ci(bfa, rspq) = ci; in bfa_hwct2_rspq_ack() 87 writel(ci, bfa->iocfc.bfa_regs.rme_q_ci[rspq]); in bfa_hwct2_rspq_ack()
|
H A D | bfa.h | 178 void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq, u32 ci); 312 void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci); 324 void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq); 325 void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci); 326 void bfa_hwct2_rspq_ack(struct bfa_s *bfa, int rspq, u32 ci);
|
/kernel/linux/linux-5.10/drivers/net/ethernet/chelsio/cxgb4/ |
H A D | cxgb4_uld.c | 78 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq); in uldrx_handler() 146 err = t4_sge_alloc_rxq(adap, &q->rspq, false, in alloc_uld_rxqs() 158 ids[i] = q->rspq.abs_id; in alloc_uld_rxqs() 164 if (q->rspq.desc) in alloc_uld_rxqs() 165 free_rspq_fl(adap, &q->rspq, in alloc_uld_rxqs() 183 /* Tell uP to route control queue completions to rdma rspq */ in setup_sge_queues_uld() 191 cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id; in setup_sge_queues_uld() 206 if (q->rspq.desc) in t4_free_uld_rxqs() 207 free_rspq_fl(adap, &q->rspq, in t4_free_uld_rxqs() 289 init_rspq(adap, &r->rspq, in cfg_queues_uld() [all...] |
H A D | cxgb4_debugfs.c | 2693 rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A"); in sge_qinfo_show() 2708 R("RspQ ID:", rspq.abs_id); in sge_qinfo_show() 2709 R("RspQ size:", rspq.size); in sge_qinfo_show() 2710 R("RspQE size:", rspq.iqe_len); in sge_qinfo_show() 2711 R("RspQ CIDX:", rspq.cidx); in sge_qinfo_show() 2712 R("RspQ Gen:", rspq.gen); in sge_qinfo_show() 2713 S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq)); in sge_qinfo_show() 2714 S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]); in sge_qinfo_show() 2766 rx[i].rspq in sge_qinfo_show() [all...] |
H A D | cxgb4_tc_mqprio.c | 200 init_rspq(adap, &eorxq->rspq, in cxgb4_mqprio_alloc_hw_resources() 208 ret = t4_sge_alloc_rxq(adap, &eorxq->rspq, false, in cxgb4_mqprio_alloc_hw_resources() 218 eorxq->rspq.cntxt_id); in cxgb4_mqprio_alloc_hw_resources() 225 eorxq->msix->desc, &eorxq->rspq); in cxgb4_mqprio_alloc_hw_resources() 234 cxgb4_enable_rx(adap, &eorxq->rspq); in cxgb4_mqprio_alloc_hw_resources() 244 cxgb4_quiesce_rx(&eorxq->rspq); in cxgb4_mqprio_alloc_hw_resources() 249 free_irq(eorxq->msix->vec, &eorxq->rspq); in cxgb4_mqprio_alloc_hw_resources() 258 if (eorxq->rspq.desc) in cxgb4_mqprio_alloc_hw_resources() 259 free_rspq_fl(adap, &eorxq->rspq, &eorxq->fl); in cxgb4_mqprio_alloc_hw_resources() 285 if (!adap->sge.eohw_rxq[pi->first_qset].rspq in cxgb4_mqprio_free_hw_resources() [all...] |
H A D | sge.c | 2594 flowc->mnemval[3].val = cpu_to_be32(rxq->rspq.abs_id); in cxgb4_ethofld_send_flowc() 3440 struct adapter *adapter = rxq->rspq.adap; in do_gro() 3446 skb = napi_get_frags(&rxq->rspq.napi); in do_gro() 3460 skb_record_rx_queue(skb, rxq->rspq.idx); in do_gro() 3465 if (rxq->rspq.netdev->features & NETIF_F_RXHASH) in do_gro() 3473 ret = napi_gro_frags(&rxq->rspq.napi); in do_gro() 3571 * @rspq: Ethernet RX Response Queue associated with Ethernet TX Queue 3584 static void t4_tx_completion_handler(struct sge_rspq *rspq, in t4_tx_completion_handler() argument 3589 struct port_info *pi = netdev_priv(rspq->netdev); in t4_tx_completion_handler() 3590 struct adapter *adapter = rspq in t4_tx_completion_handler() [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/chelsio/cxgb4/ |
H A D | cxgb4_uld.c | 78 struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq); in uldrx_handler() 146 err = t4_sge_alloc_rxq(adap, &q->rspq, false, in alloc_uld_rxqs() 158 ids[i] = q->rspq.abs_id; in alloc_uld_rxqs() 164 if (q->rspq.desc) in alloc_uld_rxqs() 165 free_rspq_fl(adap, &q->rspq, in alloc_uld_rxqs() 183 /* Tell uP to route control queue completions to rdma rspq */ in setup_sge_queues_uld() 191 cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id; in setup_sge_queues_uld() 206 if (q->rspq.desc) in t4_free_uld_rxqs() 207 free_rspq_fl(adap, &q->rspq, in t4_free_uld_rxqs() 289 init_rspq(adap, &r->rspq, in cfg_queues_uld() [all...] |
H A D | cxgb4_debugfs.c | 2693 rx[i].rspq.netdev ? rx[i].rspq.netdev->name : "N/A"); in sge_qinfo_show() 2708 R("RspQ ID:", rspq.abs_id); in sge_qinfo_show() 2709 R("RspQ size:", rspq.size); in sge_qinfo_show() 2710 R("RspQE size:", rspq.iqe_len); in sge_qinfo_show() 2711 R("RspQ CIDX:", rspq.cidx); in sge_qinfo_show() 2712 R("RspQ Gen:", rspq.gen); in sge_qinfo_show() 2713 S3("u", "Intr delay:", qtimer_val(adap, &rx[i].rspq)); in sge_qinfo_show() 2714 S3("u", "Intr pktcnt:", s->counter_val[rx[i].rspq.pktcnt_idx]); in sge_qinfo_show() 2766 rx[i].rspq in sge_qinfo_show() [all...] |
H A D | cxgb4_tc_mqprio.c | 200 init_rspq(adap, &eorxq->rspq, in cxgb4_mqprio_alloc_hw_resources() 208 ret = t4_sge_alloc_rxq(adap, &eorxq->rspq, false, in cxgb4_mqprio_alloc_hw_resources() 218 eorxq->rspq.cntxt_id); in cxgb4_mqprio_alloc_hw_resources() 225 eorxq->msix->desc, &eorxq->rspq); in cxgb4_mqprio_alloc_hw_resources() 234 cxgb4_enable_rx(adap, &eorxq->rspq); in cxgb4_mqprio_alloc_hw_resources() 244 cxgb4_quiesce_rx(&eorxq->rspq); in cxgb4_mqprio_alloc_hw_resources() 249 free_irq(eorxq->msix->vec, &eorxq->rspq); in cxgb4_mqprio_alloc_hw_resources() 258 if (eorxq->rspq.desc) in cxgb4_mqprio_alloc_hw_resources() 259 free_rspq_fl(adap, &eorxq->rspq, &eorxq->fl); in cxgb4_mqprio_alloc_hw_resources() 285 if (!adap->sge.eohw_rxq[pi->first_qset].rspq in cxgb4_mqprio_free_hw_resources() [all...] |
H A D | sge.c | 2600 flowc->mnemval[3].val = cpu_to_be32(rxq->rspq.abs_id); in cxgb4_ethofld_send_flowc() 3446 struct adapter *adapter = rxq->rspq.adap; in do_gro() 3452 skb = napi_get_frags(&rxq->rspq.napi); in do_gro() 3466 skb_record_rx_queue(skb, rxq->rspq.idx); in do_gro() 3471 if (rxq->rspq.netdev->features & NETIF_F_RXHASH) in do_gro() 3479 ret = napi_gro_frags(&rxq->rspq.napi); in do_gro() 3577 * @rspq: Ethernet RX Response Queue associated with Ethernet TX Queue 3590 static void t4_tx_completion_handler(struct sge_rspq *rspq, in t4_tx_completion_handler() argument 3595 struct port_info *pi = netdev_priv(rspq->netdev); in t4_tx_completion_handler() 3596 struct adapter *adapter = rspq in t4_tx_completion_handler() [all...] |
/kernel/linux/linux-5.10/drivers/net/ethernet/chelsio/cxgb3/ |
H A D | sge.c | 174 return container_of(q, struct sge_qset, rspq); in rspq_to_qset() 656 memset(&q->rspq, 0, sizeof(q->rspq)); in t3_reset_qset() 711 if (q->rspq.desc) { in t3_free_qset() 713 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id); in t3_free_qset() 716 q->rspq.size * sizeof(struct rsp_desc), in t3_free_qset() 717 q->rspq.desc, q->rspq.phys_addr); in t3_free_qset() 732 qs->rspq.cntxt_id = id; in init_qset_cntxt() 1901 struct sge_rspq *q = &qs->rspq; in ofld_poll() [all...] |
/kernel/linux/linux-6.6/drivers/net/ethernet/chelsio/cxgb3/ |
H A D | sge.c | 171 return container_of(q, struct sge_qset, rspq); in rspq_to_qset() 652 memset(&q->rspq, 0, sizeof(q->rspq)); in t3_reset_qset() 707 if (q->rspq.desc) { in t3_free_qset() 709 t3_sge_disable_rspcntxt(adapter, q->rspq.cntxt_id); in t3_free_qset() 712 q->rspq.size * sizeof(struct rsp_desc), in t3_free_qset() 713 q->rspq.desc, q->rspq.phys_addr); in t3_free_qset() 728 qs->rspq.cntxt_id = id; in init_qset_cntxt() 1899 struct sge_rspq *q = &qs->rspq; in ofld_poll() [all...] |