/kernel/linux/linux-5.10/drivers/net/ethernet/cavium/liquidio/ |
H A D | cn23xx_vf_device.c | 54 u32 q_no; in cn23xx_vf_reset_io_queues() local 57 for (q_no = 0; q_no < num_queues; q_no++) { in cn23xx_vf_reset_io_queues() 60 CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)); in cn23xx_vf_reset_io_queues() 62 octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), in cn23xx_vf_reset_io_queues() 67 for (q_no = 0; q_no < num_queues; q_no++) { in cn23xx_vf_reset_io_queues() 69 CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)); in cn23xx_vf_reset_io_queues() 105 u64 q_no, intr_threshold; cn23xx_vf_setup_global_input_regs() local 154 u32 q_no; cn23xx_vf_setup_global_output_regs() local 320 u32 q_no; cn23xx_enable_vf_io_queues() local 409 u32 q_no, count = 0; cn23xx_octeon_pfvf_handshake() local 547 u32 q_no, time_threshold; cn23xx_enable_vf_interrupt() local 587 u32 q_no; cn23xx_disable_vf_interrupt() local [all...] |
H A D | cn23xx_pf_device.c | 349 u32 q_no, srn, ern; in cn23xx_reset_io_queues() local 359 for (q_no = srn; q_no < ern; q_no++) { in cn23xx_reset_io_queues() 361 d64 = octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); in cn23xx_reset_io_queues() 363 octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), d64); in cn23xx_reset_io_queues() 367 for (q_no = srn; q_no < ern; q_no++) { in cn23xx_reset_io_queues() 369 CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); in cn23xx_reset_io_queues() 405 u32 q_no, ern, srn; cn23xx_pf_setup_global_input_regs() local 478 u32 q_no, ern, srn; cn23xx_pf_setup_global_output_regs() local 684 u32 q_no, i; cn23xx_pf_mbox_thread() local 714 u32 q_no, i; cn23xx_setup_pf_mbox() local 775 u32 q_no, i; cn23xx_free_pf_mbox() local 793 u32 srn, ern, q_no; cn23xx_enable_io_queues() local 869 int q_no, loop; cn23xx_disable_io_queues() local 988 u32 i, q_no; cn23xx_handle_pf_mbox_intr() local [all...] |
H A D | octeon_droq.c | 198 int octeon_delete_droq(struct octeon_device *oct, u32 q_no) in octeon_delete_droq() argument 200 struct octeon_droq *droq = oct->droq[q_no]; in octeon_delete_droq() 202 dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no); in octeon_delete_droq() 212 oct->io_qmask.oq &= ~(1ULL << q_no); in octeon_delete_droq() 213 vfree(oct->droq[q_no]); in octeon_delete_droq() 214 oct->droq[q_no] = NULL; in octeon_delete_droq() 221 u32 q_no, in octeon_init_droq() 231 dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no); in octeon_init_droq() 233 droq = oct->droq[q_no]; in octeon_init_droq() 237 droq->q_no in octeon_init_droq() 220 octeon_init_droq(struct octeon_device *oct, u32 q_no, u32 num_descs, u32 desc_size, void *app_ctx) octeon_init_droq() argument 825 octeon_enable_irq(struct octeon_device *oct, u32 q_no) octeon_enable_irq() argument 865 octeon_register_droq_ops(struct octeon_device *oct, u32 q_no, struct octeon_droq_ops *ops) octeon_register_droq_ops() argument 894 octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no) octeon_unregister_droq_ops() argument 925 octeon_create_droq(struct octeon_device *oct, u32 q_no, u32 num_descs, u32 desc_size, void *app_ctx) octeon_create_droq() argument [all...] |
H A D | octeon_droq.h | 248 u32 q_no; member 333 * @param q_no - droq no. ranges from 0 - 3. 338 u32 q_no, 347 * @param q_no - droq no. ranges from 0 - 3. 350 int octeon_delete_droq(struct octeon_device *oct_dev, u32 q_no); 354 * on output queues given by q_no irrespective of the type of packet. 359 * @param q_no - octeon output queue number (0 <= q_no <= MAX_OCTEON_DROQ-1 365 u32 q_no, 371 * given by q_no [all...] |
H A D | octeon_mailbox.c | 65 mbox->mbox_req.q_no = mbox->q_no; in octeon_mbox_read() 77 mbox->mbox_resp.q_no = mbox->q_no; in octeon_mbox_read() 134 struct octeon_mbox *mbox = oct->mbox[mbox_cmd->q_no]; in octeon_mbox_write() 262 mbox->q_no); in octeon_mbox_process_cmd() 263 pcie_flr(oct->sriov_info.dpiring_to_vfpcidev_lut[mbox->q_no]); in octeon_mbox_process_cmd() 355 int octeon_mbox_cancel(struct octeon_device *oct, int q_no) in octeon_mbox_cancel() argument 357 struct octeon_mbox *mbox = oct->mbox[q_no]; in octeon_mbox_cancel()
|
H A D | lio_core.c | 170 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; in liquidio_set_feature() 435 struct cavium_wq *wq = &lio->rxq_status_wq[droq->q_no]; in octeon_schedule_rxq_oom_work() 446 int q_no = wk->ctxul; in octnet_poll_check_rxq_oom_status() local 447 struct octeon_droq *droq = oct->droq[q_no]; in octnet_poll_check_rxq_oom_status() 461 int q, q_no; in setup_rx_oom_poll_fn() local 464 q_no = lio->linfo.rxpciq[q].s.q_no; in setup_rx_oom_poll_fn() 465 wq = &lio->rxq_status_wq[q_no]; in setup_rx_oom_poll_fn() 476 wq->wk.ctxul = q_no; in setup_rx_oom_poll_fn() 487 int q_no; in cleanup_rx_oom_poll_fn() local 533 octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs, int desc_size, void *app_ctx) octeon_setup_droq() argument 819 int q, q_no; liquidio_setup_io_queues() local [all...] |
H A D | octeon_mailbox.h | 65 u32 q_no; member 88 u32 q_no; member 120 int octeon_mbox_cancel(struct octeon_device *oct, int q_no);
|
H A D | octeon_device.c | 894 txpciq.s.q_no = iq_no; in octeon_setup_instr_queues() 953 u32 q_no; in octeon_set_io_queues_off() local 959 for (q_no = 0; q_no < oct->sriov_info.rings_per_vf; q_no++) { in octeon_set_io_queues_off() 961 oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)); in octeon_set_io_queues_off() 967 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); in octeon_set_io_queues_off() 973 q_no); in octeon_set_io_queues_off() 979 CN23XX_SLI_IQ_PKT_CONTROL64(q_no), in octeon_set_io_queues_off() 983 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); in octeon_set_io_queues_off() 994 octeon_set_droq_pkt_op(struct octeon_device *oct, u32 q_no, u32 enable) octeon_set_droq_pkt_op() argument 1267 octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no) octeon_get_tx_qsize() argument 1277 octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no) octeon_get_rx_qsize() argument [all...] |
H A D | lio_ethtool.c | 480 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; in lio_send_queue_count_update() 715 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; in octnet_gpio_access() 741 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; in octnet_id_active() 784 sc->iq_no = lio->linfo.txpciq[0].s.q_no; in octnet_mdio45_access() 1069 lio->txq = lio->linfo.txpciq[0].s.q_no; in lio_23xx_reconfigure_queue_count() 1070 lio->rxq = lio->linfo.rxpciq[0].s.q_no; in lio_23xx_reconfigure_queue_count() 1389 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; in lio_set_pauseparam() 1754 j = lio->linfo.txpciq[vj].s.q_no; in lio_vf_get_ethtool_stats() 1796 j = lio->linfo.rxpciq[vj].s.q_no; in lio_vf_get_ethtool_stats() 2013 sc->iq_no = lio->linfo.txpciq[0].s.q_no; in octnet_get_intrmod_cfg() 2242 int q_no; oct_cfg_rx_intrcnt() local 2264 int q_no; oct_cfg_rx_intrcnt() local 2319 int q_no; oct_cfg_rx_intrtime() local 2341 int q_no; oct_cfg_rx_intrtime() local 2385 int q_no; oct_cfg_tx_intrcnt() local 2420 u32 j, q_no; lio_set_intr_coalesce() local [all...] |
H A D | octeon_nic.h | 85 u32 q_no; member 112 static inline int octnet_iq_is_full(struct octeon_device *oct, u32 q_no) in octnet_iq_is_full() argument 114 return ((u32)atomic_read(&oct->instr_queue[q_no]->instr_pending) in octnet_iq_is_full() 115 >= (oct->instr_queue[q_no]->max_count - 2)); in octnet_iq_is_full() 238 * @param q_no - which queue for back pressure
|
H A D | cn66xx_regs.h | 473 #define CN6XXX_DPI_DMA_ENG_ENB(q_no) \ 474 (CN6XXX_DPI_DMA_ENG0_ENB + ((q_no) * 8)) 478 #define CN6XXX_DPI_DMA_ENG_BUF(q_no) \ 479 (CN6XXX_DPI_DMA_ENG0_BUF + ((q_no) * 8))
|
/kernel/linux/linux-6.6/drivers/net/ethernet/cavium/liquidio/ |
H A D | cn23xx_vf_device.c | 54 u32 q_no; in cn23xx_vf_reset_io_queues() local 57 for (q_no = 0; q_no < num_queues; q_no++) { in cn23xx_vf_reset_io_queues() 60 CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)); in cn23xx_vf_reset_io_queues() 62 octeon_write_csr64(oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no), in cn23xx_vf_reset_io_queues() 67 for (q_no = 0; q_no < num_queues; q_no++) { in cn23xx_vf_reset_io_queues() 69 CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)); in cn23xx_vf_reset_io_queues() 105 u64 q_no, intr_threshold; cn23xx_vf_setup_global_input_regs() local 154 u32 q_no; cn23xx_vf_setup_global_output_regs() local 318 u32 q_no; cn23xx_enable_vf_io_queues() local 408 u32 q_no, count = 0; cn23xx_octeon_pfvf_handshake() local 547 u32 q_no, time_threshold; cn23xx_enable_vf_interrupt() local 587 u32 q_no; cn23xx_disable_vf_interrupt() local [all...] |
H A D | cn23xx_pf_device.c | 349 u32 q_no, srn, ern; in cn23xx_reset_io_queues() local 359 for (q_no = srn; q_no < ern; q_no++) { in cn23xx_reset_io_queues() 361 d64 = octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); in cn23xx_reset_io_queues() 363 octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), d64); in cn23xx_reset_io_queues() 367 for (q_no = srn; q_no < ern; q_no++) { in cn23xx_reset_io_queues() 369 CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); in cn23xx_reset_io_queues() 405 u32 q_no, ern, srn; cn23xx_pf_setup_global_input_regs() local 478 u32 q_no, ern, srn; cn23xx_pf_setup_global_output_regs() local 684 u32 q_no, i; cn23xx_pf_mbox_thread() local 714 u32 q_no, i; cn23xx_setup_pf_mbox() local 773 u32 q_no, i; cn23xx_free_pf_mbox() local 791 u32 srn, ern, q_no; cn23xx_enable_io_queues() local 867 int q_no, loop; cn23xx_disable_io_queues() local 986 u32 i, q_no; cn23xx_handle_pf_mbox_intr() local [all...] |
H A D | octeon_droq.c | 199 int octeon_delete_droq(struct octeon_device *oct, u32 q_no) in octeon_delete_droq() argument 201 struct octeon_droq *droq = oct->droq[q_no]; in octeon_delete_droq() 203 dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no); in octeon_delete_droq() 213 oct->io_qmask.oq &= ~(1ULL << q_no); in octeon_delete_droq() 214 vfree(oct->droq[q_no]); in octeon_delete_droq() 215 oct->droq[q_no] = NULL; in octeon_delete_droq() 223 u32 q_no, in octeon_init_droq() 233 dev_dbg(&oct->pci_dev->dev, "%s[%d]\n", __func__, q_no); in octeon_init_droq() 235 droq = oct->droq[q_no]; in octeon_init_droq() 239 droq->q_no in octeon_init_droq() 222 octeon_init_droq(struct octeon_device *oct, u32 q_no, u32 num_descs, u32 desc_size, void *app_ctx) octeon_init_droq() argument 828 octeon_enable_irq(struct octeon_device *oct, u32 q_no) octeon_enable_irq() argument 868 octeon_register_droq_ops(struct octeon_device *oct, u32 q_no, struct octeon_droq_ops *ops) octeon_register_droq_ops() argument 897 octeon_unregister_droq_ops(struct octeon_device *oct, u32 q_no) octeon_unregister_droq_ops() argument 929 octeon_create_droq(struct octeon_device *oct, u32 q_no, u32 num_descs, u32 desc_size, void *app_ctx) octeon_create_droq() argument [all...] |
H A D | octeon_droq.h | 248 u32 q_no; member 333 * @param q_no - droq no. ranges from 0 - 3. 338 u32 q_no, 347 * @param q_no - droq no. ranges from 0 - 3. 350 int octeon_delete_droq(struct octeon_device *oct_dev, u32 q_no); 354 * on output queues given by q_no irrespective of the type of packet. 359 * @param q_no - octeon output queue number (0 <= q_no <= MAX_OCTEON_DROQ-1 365 u32 q_no, 371 * given by q_no [all...] |
H A D | octeon_mailbox.c | 65 mbox->mbox_req.q_no = mbox->q_no; in octeon_mbox_read() 77 mbox->mbox_resp.q_no = mbox->q_no; in octeon_mbox_read() 134 struct octeon_mbox *mbox = oct->mbox[mbox_cmd->q_no]; in octeon_mbox_write() 262 mbox->q_no); in octeon_mbox_process_cmd() 263 pcie_flr(oct->sriov_info.dpiring_to_vfpcidev_lut[mbox->q_no]); in octeon_mbox_process_cmd() 355 int octeon_mbox_cancel(struct octeon_device *oct, int q_no) in octeon_mbox_cancel() argument 357 struct octeon_mbox *mbox = oct->mbox[q_no]; in octeon_mbox_cancel()
|
H A D | lio_core.c | 175 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; in liquidio_set_feature() 442 struct cavium_wq *wq = &lio->rxq_status_wq[droq->q_no]; in octeon_schedule_rxq_oom_work() 453 int q_no = wk->ctxul; in octnet_poll_check_rxq_oom_status() local 454 struct octeon_droq *droq = oct->droq[q_no]; in octnet_poll_check_rxq_oom_status() 468 int q, q_no; in setup_rx_oom_poll_fn() local 471 q_no = lio->linfo.rxpciq[q].s.q_no; in setup_rx_oom_poll_fn() 472 wq = &lio->rxq_status_wq[q_no]; in setup_rx_oom_poll_fn() 483 wq->wk.ctxul = q_no; in setup_rx_oom_poll_fn() 495 int q_no; in cleanup_rx_oom_poll_fn() local 541 octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs, int desc_size, void *app_ctx) octeon_setup_droq() argument 822 int q, q_no; liquidio_setup_io_queues() local [all...] |
H A D | octeon_mailbox.h | 65 u32 q_no; member 88 u32 q_no; member 120 int octeon_mbox_cancel(struct octeon_device *oct, int q_no);
|
H A D | octeon_device.c | 901 txpciq.s.q_no = iq_no; in octeon_setup_instr_queues() 962 u32 q_no; in octeon_set_io_queues_off() local 968 for (q_no = 0; q_no < oct->sriov_info.rings_per_vf; q_no++) { in octeon_set_io_queues_off() 970 oct, CN23XX_VF_SLI_IQ_PKT_CONTROL64(q_no)); in octeon_set_io_queues_off() 976 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); in octeon_set_io_queues_off() 982 q_no); in octeon_set_io_queues_off() 988 CN23XX_SLI_IQ_PKT_CONTROL64(q_no), in octeon_set_io_queues_off() 992 oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)); in octeon_set_io_queues_off() 1004 octeon_set_droq_pkt_op(struct octeon_device *oct, u32 q_no, u32 enable) octeon_set_droq_pkt_op() argument 1281 octeon_get_tx_qsize(struct octeon_device *oct, u32 q_no) octeon_get_tx_qsize() argument 1292 octeon_get_rx_qsize(struct octeon_device *oct, u32 q_no) octeon_get_rx_qsize() argument [all...] |
H A D | lio_ethtool.c | 481 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; in lio_send_queue_count_update() 716 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; in octnet_gpio_access() 742 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; in octnet_id_active() 785 sc->iq_no = lio->linfo.txpciq[0].s.q_no; in octnet_mdio45_access() 1072 lio->txq = lio->linfo.txpciq[0].s.q_no; in lio_23xx_reconfigure_queue_count() 1073 lio->rxq = lio->linfo.rxpciq[0].s.q_no; in lio_23xx_reconfigure_queue_count() 1395 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; in lio_set_pauseparam() 1760 j = lio->linfo.txpciq[vj].s.q_no; in lio_vf_get_ethtool_stats() 1802 j = lio->linfo.rxpciq[vj].s.q_no; in lio_vf_get_ethtool_stats() 2019 sc->iq_no = lio->linfo.txpciq[0].s.q_no; in octnet_get_intrmod_cfg() 2250 int q_no; oct_cfg_rx_intrcnt() local 2272 int q_no; oct_cfg_rx_intrcnt() local 2327 int q_no; oct_cfg_rx_intrtime() local 2349 int q_no; oct_cfg_rx_intrtime() local 2393 int q_no; oct_cfg_tx_intrcnt() local 2430 u32 j, q_no; lio_set_intr_coalesce() local [all...] |
H A D | octeon_nic.h | 85 u32 q_no; member 112 static inline int octnet_iq_is_full(struct octeon_device *oct, u32 q_no) in octnet_iq_is_full() argument 114 return ((u32)atomic_read(&oct->instr_queue[q_no]->instr_pending) in octnet_iq_is_full() 115 >= (oct->instr_queue[q_no]->max_count - 2)); in octnet_iq_is_full() 238 * @param q_no - which queue for back pressure
|
/kernel/linux/linux-6.6/drivers/net/ethernet/marvell/octeon_ep/ |
H A D | octep_tx.c | 93 if (unlikely(__netif_subqueue_stopped(iq->netdev, iq->q_no)) && in octep_iq_process_completions() 96 netif_wake_subqueue(iq->netdev, iq->q_no); in octep_iq_process_completions() 149 netdev_tx_reset_queue(netdev_get_tx_queue(iq->netdev, iq->q_no)); in octep_iq_free_pending() 174 * @q_no: Tx queue number to be setup. 178 static int octep_setup_iq(struct octep_device *oct, int q_no) in octep_setup_iq() argument 187 oct->iq[q_no] = iq; in octep_setup_iq() 192 iq->q_no = q_no; in octep_setup_iq() 196 iq->netdev_q = netdev_get_tx_queue(iq->netdev, q_no); in octep_setup_iq() 204 "Failed to allocate DMA memory for IQ-%d\n", q_no); in octep_setup_iq() 269 int q_no = iq->q_no; octep_free_iq() local [all...] |
H A D | octep_cn9k_pf.c | 110 static int cn93_reset_iq(struct octep_device *oct, int q_no) in cn93_reset_iq() argument 115 dev_dbg(&oct->pdev->dev, "Reset PF IQ-%d\n", q_no); in cn93_reset_iq() 118 q_no += conf->pf_ring_cfg.srn; in cn93_reset_iq() 121 octep_write_csr64(oct, CN93_SDP_R_IN_ENABLE(q_no), val); in cn93_reset_iq() 124 octep_write_csr64(oct, CN93_SDP_R_IN_CNTS(q_no), val); in cn93_reset_iq() 125 octep_write_csr64(oct, CN93_SDP_R_IN_INT_LEVELS(q_no), val); in cn93_reset_iq() 126 octep_write_csr64(oct, CN93_SDP_R_IN_PKT_CNT(q_no), val); in cn93_reset_iq() 127 octep_write_csr64(oct, CN93_SDP_R_IN_BYTE_CNT(q_no), val); in cn93_reset_iq() 128 octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_BADDR(q_no), val); in cn93_reset_iq() 129 octep_write_csr64(oct, CN93_SDP_R_IN_INSTR_RSIZE(q_no), va in cn93_reset_iq() 138 cn93_reset_oq(struct octep_device *oct, int q_no) cn93_reset_oq() argument 360 octep_setup_mbox_regs_cn93_pf(struct octep_device *oct, int q_no) octep_setup_mbox_regs_cn93_pf() argument [all...] |
H A D | octep_rx.c | 50 oq->q_no); in octep_oq_fill_ring_buffers() 99 oq->q_no); in octep_oq_refill() 119 * @q_no: Rx queue number to be setup. 123 static int octep_setup_oq(struct octep_device *oct, int q_no) in octep_setup_oq() argument 131 oct->oq[q_no] = oq; in octep_setup_oq() 136 oq->q_no = q_no; in octep_setup_oq() 157 "Failed to allocate DMA memory for OQ-%d !!\n", q_no); in octep_setup_oq() 164 "Failed to allocate buffer info for OQ-%d\n", q_no); in octep_setup_oq() 172 oct->hw_ops.setup_oq_regs(oct, q_no); in octep_setup_oq() 228 int q_no = oq->q_no; octep_free_oq() local [all...] |
H A D | octep_main.c | 359 netdev_dbg(iq->netdev, "enabling intr for Q-%u\n", iq->q_no); in octep_enable_ioq_irq() 602 netif_stop_subqueue(iq->netdev, iq->q_no); in octep_iq_full_check() 609 netif_start_subqueue(iq->netdev, iq->q_no); in octep_iq_full_check() 638 u16 q_no, wi; in octep_start_xmit() local 640 q_no = skb_get_queue_mapping(skb); in octep_start_xmit() 641 if (q_no >= oct->num_iqs) { in octep_start_xmit() 642 netdev_err(netdev, "Invalid Tx skb->queue_mapping=%d\n", q_no); in octep_start_xmit() 643 q_no = q_no % oct->num_iqs; in octep_start_xmit() 646 iq = oct->iq[q_no]; in octep_start_xmit() [all...] |