Lines Matching refs:bnad

23 #include "bnad.h"
73 bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)
89 bnad_tx_buff_unmap(struct bnad *bnad,
103 dma_unmap_single(&bnad->pcidev->dev,
118 dma_unmap_page(&bnad->pcidev->dev,
137 bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
147 bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i);
159 bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
190 cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons);
204 bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
206 struct net_device *netdev = bnad->netdev;
212 sent = bnad_txcmpl_process(bnad, tcb);
220 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
239 struct bnad *bnad = tcb->bnad;
241 bnad_tx_complete(bnad, tcb);
247 bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb)
259 bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
264 bnad_rxq_alloc_uninit(bnad, rcb);
292 bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap)
297 dma_unmap_page(&bnad->pcidev->dev,
307 bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap)
312 dma_unmap_single(&bnad->pcidev->dev,
322 bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
331 bnad_rxq_cleanup_skb(bnad, unmap);
333 bnad_rxq_cleanup_page(bnad, unmap);
335 bnad_rxq_alloc_uninit(bnad, rcb);
339 bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
370 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
375 dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset,
377 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
379 BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
413 bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
430 skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz);
433 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
438 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
440 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
442 BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
469 bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
479 bnad_rxq_refill_skb(bnad, rcb, to_alloc);
481 bnad_rxq_refill_page(bnad, rcb, to_alloc);
499 bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb,
512 bnad_rxq_cleanup_skb(bnad, unmap);
514 bnad_rxq_cleanup_page(bnad, unmap);
522 struct bnad *bnad;
533 bnad = rcb->bnad;
547 dma_unmap_page(&bnad->pcidev->dev,
570 bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb,
575 dma_unmap_single(&bnad->pcidev->dev,
580 skb->protocol = eth_type_trans(skb, bnad->netdev);
587 bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
600 prefetch(bnad->netdev);
683 bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs);
690 bnad_cq_setup_skb(bnad, skb, unmap, len);
701 ((bnad->netdev->features & NETIF_F_RXCSUM) &&
711 (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
732 bnad_rxq_post(bnad, ccb->rcb[0]);
734 bnad_rxq_post(bnad, ccb->rcb[1]);
740 bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
759 bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
773 struct bnad *bnad = (struct bnad *)data;
775 spin_lock_irqsave(&bnad->bna_lock, flags);
776 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
777 spin_unlock_irqrestore(&bnad->bna_lock, flags);
781 bna_intr_status_get(&bnad->bna, intr_status);
783 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
784 bna_mbox_handler(&bnad->bna, intr_status);
786 spin_unlock_irqrestore(&bnad->bna_lock, flags);
797 struct bnad *bnad = (struct bnad *)data;
802 spin_lock_irqsave(&bnad->bna_lock, flags);
803 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
804 spin_unlock_irqrestore(&bnad->bna_lock, flags);
808 bna_intr_status_get(&bnad->bna, intr_status);
811 spin_unlock_irqrestore(&bnad->bna_lock, flags);
815 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
816 bna_mbox_handler(&bnad->bna, intr_status);
818 spin_unlock_irqrestore(&bnad->bna_lock, flags);
825 for (i = 0; i < bnad->num_tx; i++) {
826 for (j = 0; j < bnad->num_txq_per_tx; j++) {
827 tcb = bnad->tx_info[i].tcb[j];
829 bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]);
833 for (i = 0; i < bnad->num_rx; i++) {
834 rx_info = &bnad->rx_info[i];
837 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
840 bnad_netif_rx_schedule_poll(bnad,
852 bnad_enable_mbox_irq(struct bnad *bnad)
854 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
856 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
860 * Called with bnad->bna_lock held b'cos of
861 * bnad->cfg_flags access.
864 bnad_disable_mbox_irq(struct bnad *bnad)
866 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
868 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
872 bnad_set_netdev_perm_addr(struct bnad *bnad)
874 struct net_device *netdev = bnad->netdev;
876 ether_addr_copy(netdev->perm_addr, bnad->perm_addr);
878 eth_hw_addr_set(netdev, bnad->perm_addr);
885 bnad_cb_mbox_intr_enable(struct bnad *bnad)
887 bnad_enable_mbox_irq(bnad);
891 bnad_cb_mbox_intr_disable(struct bnad *bnad)
893 bnad_disable_mbox_irq(bnad);
897 bnad_cb_ioceth_ready(struct bnad *bnad)
899 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
900 complete(&bnad->bnad_completions.ioc_comp);
904 bnad_cb_ioceth_failed(struct bnad *bnad)
906 bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
907 complete(&bnad->bnad_completions.ioc_comp);
911 bnad_cb_ioceth_disabled(struct bnad *bnad)
913 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
914 complete(&bnad->bnad_completions.ioc_comp);
920 struct bnad *bnad = (struct bnad *)arg;
922 netif_carrier_off(bnad->netdev);
923 complete(&bnad->bnad_completions.enet_comp);
927 bnad_cb_ethport_link_status(struct bnad *bnad,
935 if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
936 BNAD_UPDATE_CTR(bnad, cee_toggle);
937 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
939 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
940 BNAD_UPDATE_CTR(bnad, cee_toggle);
941 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
945 if (!netif_carrier_ok(bnad->netdev)) {
947 netdev_info(bnad->netdev, "link up\n");
948 netif_carrier_on(bnad->netdev);
949 BNAD_UPDATE_CTR(bnad, link_toggle);
950 for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
951 for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
954 bnad->tx_info[tx_id].tcb[tcb_id];
967 bnad->netdev,
969 BNAD_UPDATE_CTR(bnad,
973 bnad->netdev,
975 BNAD_UPDATE_CTR(bnad,
982 if (netif_carrier_ok(bnad->netdev)) {
983 netdev_info(bnad->netdev, "link down\n");
984 netif_carrier_off(bnad->netdev);
985 BNAD_UPDATE_CTR(bnad, link_toggle);
993 struct bnad *bnad = (struct bnad *)arg;
995 complete(&bnad->bnad_completions.tx_comp);
999 bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
1009 bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
1019 bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
1029 bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
1038 bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
1051 netif_stop_subqueue(bnad->netdev, txq_id);
1056 bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
1073 if (netif_carrier_ok(bnad->netdev)) {
1074 netif_wake_subqueue(bnad->netdev, txq_id);
1075 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
1084 if (is_zero_ether_addr(bnad->perm_addr)) {
1085 bna_enet_perm_mac_get(&bnad->bna.enet, bnad->perm_addr);
1086 bnad_set_netdev_perm_addr(bnad);
1098 struct bnad *bnad = NULL;
1108 bnad = tcb->bnad;
1115 bnad_txq_cleanup(bnad, tcb);
1122 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work,
1127 spin_lock_irqsave(&bnad->bna_lock, flags);
1129 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1133 bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
1145 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0);
1149 bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
1178 struct bnad *bnad = NULL;
1188 bnad = rx_ctrl->ccb->bnad;
1196 bnad_cq_cleanup(bnad, rx_ctrl->ccb);
1197 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]);
1199 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]);
1202 spin_lock_irqsave(&bnad->bna_lock, flags);
1204 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1208 bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
1227 queue_work(bnad->work_q, &rx_info->rx_cleanup_work);
1231 bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1252 bnad_rxq_alloc_init(bnad, rcb);
1255 bnad_rxq_post(bnad, rcb);
1263 struct bnad *bnad = (struct bnad *)arg;
1265 complete(&bnad->bnad_completions.rx_comp);
1269 bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
1271 bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
1272 complete(&bnad->bnad_completions.mcast_comp);
1276 bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1280 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1282 if (!netif_running(bnad->netdev) ||
1283 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1286 mod_timer(&bnad->stats_timer,
1291 bnad_cb_enet_mtu_set(struct bnad *bnad)
1293 bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1294 complete(&bnad->bnad_completions.mtu_comp);
1310 bnad_mem_free(struct bnad *bnad,
1324 dma_free_coherent(&bnad->pcidev->dev,
1336 bnad_mem_alloc(struct bnad *bnad,
1356 dma_alloc_coherent(&bnad->pcidev->dev,
1378 bnad_mem_free(bnad, mem_info);
1384 bnad_mbox_irq_free(struct bnad *bnad)
1389 spin_lock_irqsave(&bnad->bna_lock, flags);
1390 bnad_disable_mbox_irq(bnad);
1391 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1393 irq = BNAD_GET_MBOX_IRQ(bnad);
1394 free_irq(irq, bnad);
1403 bnad_mbox_irq_alloc(struct bnad *bnad)
1410 spin_lock_irqsave(&bnad->bna_lock, flags);
1411 if (bnad->cfg_flags & BNAD_CF_MSIX) {
1413 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
1417 irq = bnad->pcidev->irq;
1421 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1422 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1428 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1430 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1433 bnad->mbox_irq_name, bnad);
1439 bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1447 bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1454 spin_lock_irqsave(&bnad->bna_lock, flags);
1455 cfg_flags = bnad->cfg_flags;
1456 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1473 (bnad->num_tx * bnad->num_txq_per_tx) +
1509 bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1520 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1528 bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1537 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1539 err = request_irq(bnad->msix_table[vector_num].vector,
1551 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1559 bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1570 free_irq(bnad->msix_table[vector_num].vector,
1579 bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1589 bnad->netdev->name,
1591 err = request_irq(bnad->msix_table[vector_num].vector,
1603 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1609 bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1615 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1617 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1623 bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1630 err = bnad_mem_alloc(bnad,
1633 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1641 bnad_tx_res_free(bnad, res_info);
1647 bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1653 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1655 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1661 bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1669 err = bnad_mem_alloc(bnad,
1672 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1680 bnad_rx_res_free(bnad, res_info);
1689 struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.ioc_timer);
1692 spin_lock_irqsave(&bnad->bna_lock, flags);
1693 bfa_nw_ioc_timeout(&bnad->bna.ioceth.ioc);
1694 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1700 struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.hb_timer);
1703 spin_lock_irqsave(&bnad->bna_lock, flags);
1704 bfa_nw_ioc_hb_check(&bnad->bna.ioceth.ioc);
1705 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1711 struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.iocpf_timer);
1714 spin_lock_irqsave(&bnad->bna_lock, flags);
1715 bfa_nw_iocpf_timeout(&bnad->bna.ioceth.ioc);
1716 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1722 struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.sem_timer);
1725 spin_lock_irqsave(&bnad->bna_lock, flags);
1726 bfa_nw_iocpf_sem_timeout(&bnad->bna.ioceth.ioc);
1727 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1731 * All timer routines use bnad->bna_lock to protect against
1744 struct bnad *bnad = from_timer(bnad, t, dim_timer);
1750 if (!netif_carrier_ok(bnad->netdev))
1753 spin_lock_irqsave(&bnad->bna_lock, flags);
1754 for (i = 0; i < bnad->num_rx; i++) {
1755 rx_info = &bnad->rx_info[i];
1758 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1767 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1768 mod_timer(&bnad->dim_timer,
1770 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1777 struct bnad *bnad = from_timer(bnad, t, stats_timer);
1780 if (!netif_running(bnad->netdev) ||
1781 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1784 spin_lock_irqsave(&bnad->bna_lock, flags);
1785 bna_hw_stats_get(&bnad->bna);
1786 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1791 * Called with bnad->bna_lock held
1794 bnad_dim_timer_start(struct bnad *bnad)
1796 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1797 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1798 timer_setup(&bnad->dim_timer, bnad_dim_timeout, 0);
1799 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1800 mod_timer(&bnad->dim_timer,
1807 * Called with mutex_lock(&bnad->conf_mutex) held
1810 bnad_stats_timer_start(struct bnad *bnad)
1814 spin_lock_irqsave(&bnad->bna_lock, flags);
1815 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1816 timer_setup(&bnad->stats_timer, bnad_stats_timeout, 0);
1817 mod_timer(&bnad->stats_timer,
1820 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1825 * Called with mutex_lock(&bnad->conf_mutex) held
1828 bnad_stats_timer_stop(struct bnad *bnad)
1833 spin_lock_irqsave(&bnad->bna_lock, flags);
1834 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1836 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1838 del_timer_sync(&bnad->stats_timer);
1860 struct bnad *bnad = rx_ctrl->bnad;
1865 if (!netif_carrier_ok(bnad->netdev))
1868 rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget);
1884 bnad_napi_add(struct bnad *bnad, u32 rx_id)
1890 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1891 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1892 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1898 bnad_napi_delete(struct bnad *bnad, u32 rx_id)
1903 for (i = 0; i < bnad->num_rxp_per_rx; i++)
1904 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1909 bnad_destroy_tx(struct bnad *bnad, u32 tx_id)
1911 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1912 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1918 init_completion(&bnad->bnad_completions.tx_comp);
1919 spin_lock_irqsave(&bnad->bna_lock, flags);
1921 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1922 wait_for_completion(&bnad->bnad_completions.tx_comp);
1925 bnad_tx_msix_unregister(bnad, tx_info,
1926 bnad->num_txq_per_tx);
1928 spin_lock_irqsave(&bnad->bna_lock, flags);
1930 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1935 bnad_tx_res_free(bnad, res_info);
1940 bnad_setup_tx(struct bnad *bnad, u32 tx_id)
1943 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1944 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1947 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1962 tx_config->num_txq = bnad->num_txq_per_tx;
1963 tx_config->txq_depth = bnad->txq_depth;
1965 tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
1968 spin_lock_irqsave(&bnad->bna_lock, flags);
1969 bna_tx_res_req(bnad->num_txq_per_tx,
1970 bnad->txq_depth, res_info);
1971 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1975 bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) *
1976 bnad->txq_depth));
1979 err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1984 spin_lock_irqsave(&bnad->bna_lock, flags);
1985 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1987 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1999 err = bnad_tx_msix_register(bnad, tx_info,
2000 tx_id, bnad->num_txq_per_tx);
2005 spin_lock_irqsave(&bnad->bna_lock, flags);
2007 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2012 spin_lock_irqsave(&bnad->bna_lock, flags);
2014 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2018 bnad_tx_res_free(bnad, res_info);
2023 /* bnad decides the configuration */
2025 bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
2029 rx_config->num_paths = bnad->num_rxp_per_rx;
2030 rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
2032 if (bnad->num_rxp_per_rx > 1) {
2040 bnad->num_rxp_per_rx - 1;
2049 rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu);
2059 if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
2068 rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs;
2073 rx_config->q0_depth = bnad->rxq_depth;
2078 rx_config->q1_depth = bnad->rxq_depth;
2083 (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ?
2088 bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
2090 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2093 for (i = 0; i < bnad->num_rxp_per_rx; i++)
2094 rx_info->rx_ctrl[i].bnad = bnad;
2097 /* Called with mutex_lock(&bnad->conf_mutex) held */
2099 bnad_reinit_rx(struct bnad *bnad)
2101 struct net_device *netdev = bnad->netdev;
2107 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2108 if (!bnad->rx_info[rx_id].rx)
2110 bnad_destroy_rx(bnad, rx_id);
2113 spin_lock_irqsave(&bnad->bna_lock, flags);
2114 bna_enet_mtu_set(&bnad->bna.enet,
2115 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2116 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2118 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2120 current_err = bnad_setup_rx(bnad, rx_id);
2128 if (bnad->rx_info[0].rx && !err) {
2129 bnad_restore_vlans(bnad, 0);
2130 bnad_enable_default_bcast(bnad);
2131 spin_lock_irqsave(&bnad->bna_lock, flags);
2132 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2133 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2142 bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
2144 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2145 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2146 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2154 spin_lock_irqsave(&bnad->bna_lock, flags);
2155 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
2156 test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
2157 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
2160 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2162 del_timer_sync(&bnad->dim_timer);
2165 init_completion(&bnad->bnad_completions.rx_comp);
2166 spin_lock_irqsave(&bnad->bna_lock, flags);
2168 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2169 wait_for_completion(&bnad->bnad_completions.rx_comp);
2172 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
2174 bnad_napi_delete(bnad, rx_id);
2176 spin_lock_irqsave(&bnad->bna_lock, flags);
2181 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2183 bnad_rx_res_free(bnad, res_info);
2186 /* Called with mutex_lock(&bnad->conf_mutex) held */
2188 bnad_setup_rx(struct bnad *bnad, u32 rx_id)
2191 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2192 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2195 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2211 bnad_init_rx_config(bnad, rx_config);
2214 spin_lock_irqsave(&bnad->bna_lock, flags);
2216 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2233 err = bnad_rx_res_alloc(bnad, res_info, rx_id);
2237 bnad_rx_ctrl_init(bnad, rx_id);
2240 spin_lock_irqsave(&bnad->bna_lock, flags);
2241 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
2245 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2249 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2258 bnad_napi_add(bnad, rx_id);
2262 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
2268 spin_lock_irqsave(&bnad->bna_lock, flags);
2271 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
2272 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
2278 bnad_dim_timer_start(bnad);
2282 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2287 bnad_destroy_rx(bnad, rx_id);
2291 /* Called with conf_lock & bnad->bna_lock held */
2293 bnad_tx_coalescing_timeo_set(struct bnad *bnad)
2297 tx_info = &bnad->tx_info[0];
2301 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
2304 /* Called with conf_lock & bnad->bna_lock held */
2306 bnad_rx_coalescing_timeo_set(struct bnad *bnad)
2311 for (i = 0; i < bnad->num_rx; i++) {
2312 rx_info = &bnad->rx_info[i];
2316 bnad->rx_coalescing_timeo);
2321 * Called with bnad->bna_lock held
2324 bnad_mac_addr_set_locked(struct bnad *bnad, const u8 *mac_addr)
2332 if (!bnad->rx_info[0].rx)
2335 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr);
2344 bnad_enable_default_bcast(struct bnad *bnad)
2346 struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2350 init_completion(&bnad->bnad_completions.mcast_comp);
2352 spin_lock_irqsave(&bnad->bna_lock, flags);
2355 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2358 wait_for_completion(&bnad->bnad_completions.mcast_comp);
2362 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2368 /* Called with mutex_lock(&bnad->conf_mutex) held */
2370 bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2375 for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
2376 spin_lock_irqsave(&bnad->bna_lock, flags);
2377 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
2378 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2384 bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2388 for (i = 0; i < bnad->num_rx; i++) {
2389 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2390 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
2391 stats->rx_packets += bnad->rx_info[i].
2393 stats->rx_bytes += bnad->rx_info[i].
2395 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2396 bnad->rx_info[i].rx_ctrl[j].ccb->
2399 bnad->rx_info[i].rx_ctrl[j].
2402 bnad->rx_info[i].rx_ctrl[j].
2408 for (i = 0; i < bnad->num_tx; i++) {
2409 for (j = 0; j < bnad->num_txq_per_tx; j++) {
2410 if (bnad->tx_info[i].tcb[j]) {
2412 bnad->tx_info[i].tcb[j]->txq->tx_packets;
2414 bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2424 bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2430 mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
2449 bmap = bna_rx_rid_mask(&bnad->bna);
2453 bnad->stats.bna_stats->
2462 bnad_mbox_irq_sync(struct bnad *bnad)
2467 spin_lock_irqsave(&bnad->bna_lock, flags);
2468 if (bnad->cfg_flags & BNAD_CF_MSIX)
2469 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
2471 irq = bnad->pcidev->irq;
2472 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2479 bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2485 BNAD_UPDATE_CTR(bnad, tso_err);
2503 BNAD_UPDATE_CTR(bnad, tso4);
2506 BNAD_UPDATE_CTR(bnad, tso6);
2514 * Called with bnad->bna_lock held, because of cfg_flags
2518 bnad_q_num_init(struct bnad *bnad)
2525 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2528 bnad->num_rx = 1;
2529 bnad->num_tx = 1;
2530 bnad->num_rxp_per_rx = rxps;
2531 bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2538 * Called with bnad->bna_lock held b'cos of cfg_flags access
2541 bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
2543 bnad->num_txq_per_tx = 1;
2544 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
2546 (bnad->cfg_flags & BNAD_CF_MSIX)) {
2547 bnad->num_rxp_per_rx = msix_vectors -
2548 (bnad->num_tx * bnad->num_txq_per_tx) -
2551 bnad->num_rxp_per_rx = 1;
2556 bnad_ioceth_disable(struct bnad *bnad)
2561 spin_lock_irqsave(&bnad->bna_lock, flags);
2562 init_completion(&bnad->bnad_completions.ioc_comp);
2563 bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
2564 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2566 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2569 err = bnad->bnad_completions.ioc_comp_status;
2574 bnad_ioceth_enable(struct bnad *bnad)
2579 spin_lock_irqsave(&bnad->bna_lock, flags);
2580 init_completion(&bnad->bnad_completions.ioc_comp);
2581 bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2582 bna_ioceth_enable(&bnad->bna.ioceth);
2583 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2585 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2588 err = bnad->bnad_completions.ioc_comp_status;
2595 bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2601 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2606 bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2612 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2619 bnad_res_free(bnad, res_info, res_val_max);
2625 bnad_enable_msix(struct bnad *bnad)
2630 spin_lock_irqsave(&bnad->bna_lock, flags);
2631 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2632 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2635 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2637 if (bnad->msix_table)
2640 bnad->msix_table =
2641 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2643 if (!bnad->msix_table)
2646 for (i = 0; i < bnad->msix_num; i++)
2647 bnad->msix_table[i].entry = i;
2649 ret = pci_enable_msix_range(bnad->pcidev, bnad->msix_table,
2650 1, bnad->msix_num);
2653 } else if (ret < bnad->msix_num) {
2654 dev_warn(&bnad->pcidev->dev,
2656 ret, bnad->msix_num);
2658 spin_lock_irqsave(&bnad->bna_lock, flags);
2660 bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
2662 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2664 bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
2667 if (bnad->msix_num > ret) {
2668 pci_disable_msix(bnad->pcidev);
2673 pci_intx(bnad->pcidev, 0);
2678 dev_warn(&bnad->pcidev->dev,
2681 kfree(bnad->msix_table);
2682 bnad->msix_table = NULL;
2683 bnad->msix_num = 0;
2684 spin_lock_irqsave(&bnad->bna_lock, flags);
2685 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2686 bnad_q_num_init(bnad);
2687 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2691 bnad_disable_msix(struct bnad *bnad)
2696 spin_lock_irqsave(&bnad->bna_lock, flags);
2697 cfg_flags = bnad->cfg_flags;
2698 if (bnad->cfg_flags & BNAD_CF_MSIX)
2699 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2700 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2703 pci_disable_msix(bnad->pcidev);
2704 kfree(bnad->msix_table);
2705 bnad->msix_table = NULL;
2714 struct bnad *bnad = netdev_priv(netdev);
2718 mutex_lock(&bnad->conf_mutex);
2721 err = bnad_setup_tx(bnad, 0);
2726 err = bnad_setup_rx(bnad, 0);
2734 spin_lock_irqsave(&bnad->bna_lock, flags);
2735 bna_enet_mtu_set(&bnad->bna.enet,
2736 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2737 bna_enet_pause_config(&bnad->bna.enet, &pause_config);
2738 bna_enet_enable(&bnad->bna.enet);
2739 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2742 bnad_enable_default_bcast(bnad);
2745 bnad_restore_vlans(bnad, 0);
2748 spin_lock_irqsave(&bnad->bna_lock, flags);
2749 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2750 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2753 bnad_stats_timer_start(bnad);
2755 mutex_unlock(&bnad->conf_mutex);
2760 bnad_destroy_tx(bnad, 0);
2763 mutex_unlock(&bnad->conf_mutex);
2770 struct bnad *bnad = netdev_priv(netdev);
2773 mutex_lock(&bnad->conf_mutex);
2776 bnad_stats_timer_stop(bnad);
2778 init_completion(&bnad->bnad_completions.enet_comp);
2780 spin_lock_irqsave(&bnad->bna_lock, flags);
2781 bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2783 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2785 wait_for_completion(&bnad->bnad_completions.enet_comp);
2787 bnad_destroy_tx(bnad, 0);
2788 bnad_destroy_rx(bnad, 0);
2791 bnad_mbox_irq_sync(bnad);
2793 mutex_unlock(&bnad->conf_mutex);
2801 bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
2812 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2821 if (unlikely(gso_size > bnad->netdev->mtu)) {
2822 BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
2828 BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2834 if (bnad_tso_prepare(bnad, skb)) {
2835 BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
2847 if (unlikely(skb->len > (bnad->netdev->mtu + VLAN_ETH_HLEN))) {
2848 BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
2870 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2874 BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
2883 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2887 BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
2892 BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
2912 struct bnad *bnad = netdev_priv(netdev);
2928 BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2933 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2938 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2942 tcb = bnad->tx_info[0].tcb[txq_id];
2950 BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
2963 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
2972 sent = bnad_txcmpl_process(bnad, tcb);
2979 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2989 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2993 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
3001 if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) {
3013 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
3015 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
3017 BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
3031 bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3034 BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
3049 dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
3051 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
3053 bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3056 BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
3070 bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index);
3072 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
3098 struct bnad *bnad = netdev_priv(netdev);
3101 spin_lock_irqsave(&bnad->bna_lock, flags);
3103 bnad_netdev_qstats_fill(bnad, stats);
3104 bnad_netdev_hwstats_fill(bnad, stats);
3106 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3110 bnad_set_rx_ucast_fltr(struct bnad *bnad)
3112 struct net_device *netdev = bnad->netdev;
3119 if (netdev_uc_empty(bnad->netdev)) {
3120 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
3124 if (uc_count > bna_attr(&bnad->bna)->num_ucmac)
3137 ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry, mac_list);
3147 bnad->cfg_flags |= BNAD_CF_DEFAULT;
3148 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
3152 bnad_set_rx_mcast_fltr(struct bnad *bnad)
3154 struct net_device *netdev = bnad->netdev;
3165 if (mc_count > bna_attr(&bnad->bna)->num_mcmac)
3177 ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, mac_list);
3186 bnad->cfg_flags |= BNAD_CF_ALLMULTI;
3187 bna_rx_mcast_delall(bnad->rx_info[0].rx);
3193 struct bnad *bnad = netdev_priv(netdev);
3197 spin_lock_irqsave(&bnad->bna_lock, flags);
3199 if (bnad->rx_info[0].rx == NULL) {
3200 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3204 /* clear bnad flags to update it with new settings */
3205 bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT |
3211 bnad->cfg_flags |= BNAD_CF_PROMISC;
3213 bnad_set_rx_mcast_fltr(bnad);
3215 if (bnad->cfg_flags & BNAD_CF_ALLMULTI)
3218 bnad_set_rx_ucast_fltr(bnad);
3220 if (bnad->cfg_flags & BNAD_CF_DEFAULT)
3226 bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask);
3228 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3240 struct bnad *bnad = netdev_priv(netdev);
3244 spin_lock_irqsave(&bnad->bna_lock, flags);
3246 err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
3250 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3256 bnad_mtu_set(struct bnad *bnad, int frame_size)
3260 init_completion(&bnad->bnad_completions.mtu_comp);
3262 spin_lock_irqsave(&bnad->bna_lock, flags);
3263 bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set);
3264 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3266 wait_for_completion(&bnad->bnad_completions.mtu_comp);
3268 return bnad->bnad_completions.mtu_comp_status;
3275 struct bnad *bnad = netdev_priv(netdev);
3278 mutex_lock(&bnad->conf_mutex);
3287 if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
3288 netif_running(bnad->netdev)) {
3292 bnad_reinit_rx(bnad);
3295 err = bnad_mtu_set(bnad, new_frame);
3299 mutex_unlock(&bnad->conf_mutex);
3306 struct bnad *bnad = netdev_priv(netdev);
3309 if (!bnad->rx_info[0].rx)
3312 mutex_lock(&bnad->conf_mutex);
3314 spin_lock_irqsave(&bnad->bna_lock, flags);
3315 bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
3316 set_bit(vid, bnad->active_vlans);
3317 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3319 mutex_unlock(&bnad->conf_mutex);
3327 struct bnad *bnad = netdev_priv(netdev);
3330 if (!bnad->rx_info[0].rx)
3333 mutex_lock(&bnad->conf_mutex);
3335 spin_lock_irqsave(&bnad->bna_lock, flags);
3336 clear_bit(vid, bnad->active_vlans);
3337 bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
3338 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3340 mutex_unlock(&bnad->conf_mutex);
3347 struct bnad *bnad = netdev_priv(dev);
3353 spin_lock_irqsave(&bnad->bna_lock, flags);
3356 bna_rx_vlan_strip_enable(bnad->rx_info[0].rx);
3358 bna_rx_vlan_strip_disable(bnad->rx_info[0].rx);
3360 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3370 struct bnad *bnad = netdev_priv(netdev);
3376 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
3377 bna_intx_disable(&bnad->bna, curr_mask);
3378 bnad_isr(bnad->pcidev->irq, netdev);
3379 bna_intx_enable(&bnad->bna, curr_mask);
3387 for (i = 0; i < bnad->num_rx; i++) {
3388 rx_info = &bnad->rx_info[i];
3391 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
3394 bnad_netif_rx_schedule_poll(bnad,
3420 bnad_netdev_init(struct bnad *bnad)
3422 struct net_device *netdev = bnad->netdev;
3436 netdev->mem_start = bnad->mmio_start;
3437 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
3448 * 1. Initialize the bnad structure
3454 bnad_init(struct bnad *bnad,
3462 bnad->netdev = netdev;
3463 bnad->pcidev = pdev;
3464 bnad->mmio_start = pci_resource_start(pdev, 0);
3465 bnad->mmio_len = pci_resource_len(pdev, 0);
3466 bnad->bar0 = ioremap(bnad->mmio_start, bnad->mmio_len);
3467 if (!bnad->bar0) {
3471 dev_info(&pdev->dev, "bar0 mapped to %p, len %llu\n", bnad->bar0,
3472 (unsigned long long) bnad->mmio_len);
3474 spin_lock_irqsave(&bnad->bna_lock, flags);
3476 bnad->cfg_flags = BNAD_CF_MSIX;
3478 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3480 bnad_q_num_init(bnad);
3481 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3483 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3484 (bnad->num_rx * bnad->num_rxp_per_rx) +
3487 bnad->txq_depth = BNAD_TXQ_DEPTH;
3488 bnad->rxq_depth = BNAD_RXQ_DEPTH;
3490 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3491 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3493 sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
3494 bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
3495 if (!bnad->work_q) {
3496 iounmap(bnad->bar0);
3509 bnad_uninit(struct bnad *bnad)
3511 if (bnad->work_q) {
3512 destroy_workqueue(bnad->work_q);
3513 bnad->work_q = NULL;
3516 if (bnad->bar0)
3517 iounmap(bnad->bar0);
3527 bnad_lock_init(struct bnad *bnad)
3529 spin_lock_init(&bnad->bna_lock);
3530 mutex_init(&bnad->conf_mutex);
3534 bnad_lock_uninit(struct bnad *bnad)
3536 mutex_destroy(&bnad->conf_mutex);
3541 bnad_pci_init(struct bnad *bnad, struct pci_dev *pdev)
3577 struct bnad *bnad;
3592 * Allocates sizeof(struct net_device + struct bnad)
3593 * bnad = netdev->priv
3595 netdev = alloc_etherdev(sizeof(struct bnad));
3600 bnad = netdev_priv(netdev);
3601 bnad_lock_init(bnad);
3602 bnad->id = atomic_inc_return(&bna_id) - 1;
3604 mutex_lock(&bnad->conf_mutex);
3606 err = bnad_pci_init(bnad, pdev);
3611 * Initialize bnad structure
3614 err = bnad_init(bnad, pdev, netdev);
3619 bnad_netdev_init(bnad);
3626 bnad_debugfs_init(bnad);
3629 spin_lock_irqsave(&bnad->bna_lock, flags);
3630 bna_res_req(&bnad->res_info[0]);
3631 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3634 err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3638 bna = &bnad->bna;
3641 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3642 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3643 pcidev_info.device_id = bnad->pcidev->device;
3644 pcidev_info.pci_bar_kva = bnad->bar0;
3646 spin_lock_irqsave(&bnad->bna_lock, flags);
3647 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3648 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3650 bnad->stats.bna_stats = &bna->stats;
3652 bnad_enable_msix(bnad);
3653 err = bnad_mbox_irq_alloc(bnad);
3658 timer_setup(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout, 0);
3659 timer_setup(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check, 0);
3660 timer_setup(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout, 0);
3661 timer_setup(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
3669 err = bnad_ioceth_enable(bnad);
3675 spin_lock_irqsave(&bnad->bna_lock, flags);
3678 bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3684 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3688 spin_lock_irqsave(&bnad->bna_lock, flags);
3689 bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3690 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3692 err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3698 spin_lock_irqsave(&bnad->bna_lock, flags);
3699 bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3700 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3703 spin_lock_irqsave(&bnad->bna_lock, flags);
3704 bna_enet_perm_mac_get(&bna->enet, bnad->perm_addr);
3705 bnad_set_netdev_perm_addr(bnad);
3706 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3708 mutex_unlock(&bnad->conf_mutex);
3716 set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
3721 mutex_unlock(&bnad->conf_mutex);
3725 mutex_lock(&bnad->conf_mutex);
3726 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3728 bnad_ioceth_disable(bnad);
3729 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3730 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3731 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3732 spin_lock_irqsave(&bnad->bna_lock, flags);
3734 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3735 bnad_mbox_irq_free(bnad);
3736 bnad_disable_msix(bnad);
3738 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3740 /* Remove the debugfs node for this bnad */
3741 kfree(bnad->regdata);
3742 bnad_debugfs_uninit(bnad);
3743 bnad_uninit(bnad);
3747 mutex_unlock(&bnad->conf_mutex);
3748 bnad_lock_uninit(bnad);
3757 struct bnad *bnad;
3764 bnad = netdev_priv(netdev);
3765 bna = &bnad->bna;
3767 if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
3770 mutex_lock(&bnad->conf_mutex);
3771 bnad_ioceth_disable(bnad);
3772 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3773 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3774 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3775 spin_lock_irqsave(&bnad->bna_lock, flags);
3777 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3779 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3780 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3781 bnad_mbox_irq_free(bnad);
3782 bnad_disable_msix(bnad);
3784 mutex_unlock(&bnad->conf_mutex);
3785 bnad_lock_uninit(bnad);
3786 /* Remove the debugfs node for this bnad */
3787 kfree(bnad->regdata);
3788 bnad_debugfs_uninit(bnad);
3789 bnad_uninit(bnad);