Lines Matching refs:bnad
23 #include "bnad.h"
73 bnad_cq_cleanup(struct bnad *bnad, struct bna_ccb *ccb)
89 bnad_tx_buff_unmap(struct bnad *bnad,
103 dma_unmap_single(&bnad->pcidev->dev,
118 dma_unmap_page(&bnad->pcidev->dev,
137 bnad_txq_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
147 bnad_tx_buff_unmap(bnad, unmap_q, tcb->q_depth, i);
159 bnad_txcmpl_process(struct bnad *bnad, struct bna_tcb *tcb)
190 cons = bnad_tx_buff_unmap(bnad, unmap_q, q_depth, cons);
204 bnad_tx_complete(struct bnad *bnad, struct bna_tcb *tcb)
206 struct net_device *netdev = bnad->netdev;
212 sent = bnad_txcmpl_process(bnad, tcb);
220 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
239 struct bnad *bnad = tcb->bnad;
241 bnad_tx_complete(bnad, tcb);
247 bnad_rxq_alloc_uninit(struct bnad *bnad, struct bna_rcb *rcb)
259 bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb)
264 bnad_rxq_alloc_uninit(bnad, rcb);
292 bnad_rxq_cleanup_page(struct bnad *bnad, struct bnad_rx_unmap *unmap)
297 dma_unmap_page(&bnad->pcidev->dev,
307 bnad_rxq_cleanup_skb(struct bnad *bnad, struct bnad_rx_unmap *unmap)
312 dma_unmap_single(&bnad->pcidev->dev,
322 bnad_rxq_cleanup(struct bnad *bnad, struct bna_rcb *rcb)
331 bnad_rxq_cleanup_skb(bnad, unmap);
333 bnad_rxq_cleanup_page(bnad, unmap);
335 bnad_rxq_alloc_uninit(bnad, rcb);
339 bnad_rxq_refill_page(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
370 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
375 dma_addr = dma_map_page(&bnad->pcidev->dev, page, page_offset,
377 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
379 BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
413 bnad_rxq_refill_skb(struct bnad *bnad, struct bna_rcb *rcb, u32 nalloc)
430 skb = netdev_alloc_skb_ip_align(bnad->netdev, buff_sz);
433 BNAD_UPDATE_CTR(bnad, rxbuf_alloc_failed);
438 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
440 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
442 BNAD_UPDATE_CTR(bnad, rxbuf_map_failed);
469 bnad_rxq_post(struct bnad *bnad, struct bna_rcb *rcb)
479 bnad_rxq_refill_skb(bnad, rcb, to_alloc);
481 bnad_rxq_refill_page(bnad, rcb, to_alloc);
499 bnad_cq_drop_packet(struct bnad *bnad, struct bna_rcb *rcb,
512 bnad_rxq_cleanup_skb(bnad, unmap);
514 bnad_rxq_cleanup_page(bnad, unmap);
522 struct bnad *bnad;
533 bnad = rcb->bnad;
547 dma_unmap_page(&bnad->pcidev->dev,
570 bnad_cq_setup_skb(struct bnad *bnad, struct sk_buff *skb,
575 dma_unmap_single(&bnad->pcidev->dev,
580 skb->protocol = eth_type_trans(skb, bnad->netdev);
587 bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
600 prefetch(bnad->netdev);
683 bnad_cq_drop_packet(bnad, rcb, sop_ci, nvecs);
690 bnad_cq_setup_skb(bnad, skb, unmap, len);
701 ((bnad->netdev->features & NETIF_F_RXCSUM) &&
711 (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
732 bnad_rxq_post(bnad, ccb->rcb[0]);
734 bnad_rxq_post(bnad, ccb->rcb[1]);
740 bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
759 bnad_netif_rx_schedule_poll(ccb->bnad, ccb);
773 struct bnad *bnad = (struct bnad *)data;
775 spin_lock_irqsave(&bnad->bna_lock, flags);
776 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
777 spin_unlock_irqrestore(&bnad->bna_lock, flags);
781 bna_intr_status_get(&bnad->bna, intr_status);
783 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
784 bna_mbox_handler(&bnad->bna, intr_status);
786 spin_unlock_irqrestore(&bnad->bna_lock, flags);
797 struct bnad *bnad = (struct bnad *)data;
802 spin_lock_irqsave(&bnad->bna_lock, flags);
803 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) {
804 spin_unlock_irqrestore(&bnad->bna_lock, flags);
808 bna_intr_status_get(&bnad->bna, intr_status);
811 spin_unlock_irqrestore(&bnad->bna_lock, flags);
815 if (BNA_IS_MBOX_ERR_INTR(&bnad->bna, intr_status))
816 bna_mbox_handler(&bnad->bna, intr_status);
818 spin_unlock_irqrestore(&bnad->bna_lock, flags);
825 for (i = 0; i < bnad->num_tx; i++) {
826 for (j = 0; j < bnad->num_txq_per_tx; j++) {
827 tcb = bnad->tx_info[i].tcb[j];
829 bnad_tx_complete(bnad, bnad->tx_info[i].tcb[j]);
833 for (i = 0; i < bnad->num_rx; i++) {
834 rx_info = &bnad->rx_info[i];
837 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
840 bnad_netif_rx_schedule_poll(bnad,
852 bnad_enable_mbox_irq(struct bnad *bnad)
854 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
856 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
860 * Called with bnad->bna_lock held b'cos of
861 * bnad->cfg_flags access.
864 bnad_disable_mbox_irq(struct bnad *bnad)
866 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
868 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
872 bnad_set_netdev_perm_addr(struct bnad *bnad)
874 struct net_device *netdev = bnad->netdev;
876 ether_addr_copy(netdev->perm_addr, bnad->perm_addr);
878 ether_addr_copy(netdev->dev_addr, bnad->perm_addr);
885 bnad_cb_mbox_intr_enable(struct bnad *bnad)
887 bnad_enable_mbox_irq(bnad);
891 bnad_cb_mbox_intr_disable(struct bnad *bnad)
893 bnad_disable_mbox_irq(bnad);
897 bnad_cb_ioceth_ready(struct bnad *bnad)
899 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
900 complete(&bnad->bnad_completions.ioc_comp);
904 bnad_cb_ioceth_failed(struct bnad *bnad)
906 bnad->bnad_completions.ioc_comp_status = BNA_CB_FAIL;
907 complete(&bnad->bnad_completions.ioc_comp);
911 bnad_cb_ioceth_disabled(struct bnad *bnad)
913 bnad->bnad_completions.ioc_comp_status = BNA_CB_SUCCESS;
914 complete(&bnad->bnad_completions.ioc_comp);
920 struct bnad *bnad = (struct bnad *)arg;
922 netif_carrier_off(bnad->netdev);
923 complete(&bnad->bnad_completions.enet_comp);
927 bnad_cb_ethport_link_status(struct bnad *bnad,
935 if (!test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
936 BNAD_UPDATE_CTR(bnad, cee_toggle);
937 set_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
939 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags))
940 BNAD_UPDATE_CTR(bnad, cee_toggle);
941 clear_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags);
945 if (!netif_carrier_ok(bnad->netdev)) {
947 netdev_info(bnad->netdev, "link up\n");
948 netif_carrier_on(bnad->netdev);
949 BNAD_UPDATE_CTR(bnad, link_toggle);
950 for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) {
951 for (tcb_id = 0; tcb_id < bnad->num_txq_per_tx;
954 bnad->tx_info[tx_id].tcb[tcb_id];
967 bnad->netdev,
969 BNAD_UPDATE_CTR(bnad,
973 bnad->netdev,
975 BNAD_UPDATE_CTR(bnad,
982 if (netif_carrier_ok(bnad->netdev)) {
983 netdev_info(bnad->netdev, "link down\n");
984 netif_carrier_off(bnad->netdev);
985 BNAD_UPDATE_CTR(bnad, link_toggle);
993 struct bnad *bnad = (struct bnad *)arg;
995 complete(&bnad->bnad_completions.tx_comp);
999 bnad_cb_tcb_setup(struct bnad *bnad, struct bna_tcb *tcb)
1009 bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
1019 bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
1029 bnad_cb_ccb_destroy(struct bnad *bnad, struct bna_ccb *ccb)
1038 bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx)
1052 netif_stop_subqueue(bnad->netdev, txq_id);
1057 bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx)
1074 if (netif_carrier_ok(bnad->netdev)) {
1075 netif_wake_subqueue(bnad->netdev, txq_id);
1076 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
1085 if (is_zero_ether_addr(bnad->perm_addr)) {
1086 bna_enet_perm_mac_get(&bnad->bna.enet, bnad->perm_addr);
1087 bnad_set_netdev_perm_addr(bnad);
1099 struct bnad *bnad = NULL;
1109 bnad = tcb->bnad;
1116 bnad_txq_cleanup(bnad, tcb);
1123 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work,
1128 spin_lock_irqsave(&bnad->bna_lock, flags);
1130 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1134 bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tx *tx)
1146 queue_delayed_work(bnad->work_q, &tx_info->tx_cleanup_work, 0);
1150 bnad_cb_rx_stall(struct bnad *bnad, struct bna_rx *rx)
1179 struct bnad *bnad = NULL;
1189 bnad = rx_ctrl->ccb->bnad;
1197 bnad_cq_cleanup(bnad, rx_ctrl->ccb);
1198 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[0]);
1200 bnad_rxq_cleanup(bnad, rx_ctrl->ccb->rcb[1]);
1203 spin_lock_irqsave(&bnad->bna_lock, flags);
1205 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1209 bnad_cb_rx_cleanup(struct bnad *bnad, struct bna_rx *rx)
1228 queue_work(bnad->work_q, &rx_info->rx_cleanup_work);
1232 bnad_cb_rx_post(struct bnad *bnad, struct bna_rx *rx)
1253 bnad_rxq_alloc_init(bnad, rcb);
1256 bnad_rxq_post(bnad, rcb);
1264 struct bnad *bnad = (struct bnad *)arg;
1266 complete(&bnad->bnad_completions.rx_comp);
1270 bnad_cb_rx_mcast_add(struct bnad *bnad, struct bna_rx *rx)
1272 bnad->bnad_completions.mcast_comp_status = BNA_CB_SUCCESS;
1273 complete(&bnad->bnad_completions.mcast_comp);
1277 bnad_cb_stats_get(struct bnad *bnad, enum bna_cb_status status,
1281 BNAD_UPDATE_CTR(bnad, hw_stats_updates);
1283 if (!netif_running(bnad->netdev) ||
1284 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1287 mod_timer(&bnad->stats_timer,
1292 bnad_cb_enet_mtu_set(struct bnad *bnad)
1294 bnad->bnad_completions.mtu_comp_status = BNA_CB_SUCCESS;
1295 complete(&bnad->bnad_completions.mtu_comp);
1311 bnad_mem_free(struct bnad *bnad,
1325 dma_free_coherent(&bnad->pcidev->dev,
1337 bnad_mem_alloc(struct bnad *bnad,
1357 dma_alloc_coherent(&bnad->pcidev->dev,
1379 bnad_mem_free(bnad, mem_info);
1385 bnad_mbox_irq_free(struct bnad *bnad)
1390 spin_lock_irqsave(&bnad->bna_lock, flags);
1391 bnad_disable_mbox_irq(bnad);
1392 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1394 irq = BNAD_GET_MBOX_IRQ(bnad);
1395 free_irq(irq, bnad);
1404 bnad_mbox_irq_alloc(struct bnad *bnad)
1411 spin_lock_irqsave(&bnad->bna_lock, flags);
1412 if (bnad->cfg_flags & BNAD_CF_MSIX) {
1414 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
1418 irq = bnad->pcidev->irq;
1422 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1423 sprintf(bnad->mbox_irq_name, "%s", BNAD_NAME);
1429 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1431 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1434 bnad->mbox_irq_name, bnad);
1440 bnad_txrx_irq_free(struct bnad *bnad, struct bna_intr_info *intr_info)
1448 bnad_txrx_irq_alloc(struct bnad *bnad, enum bnad_intr_source src,
1455 spin_lock_irqsave(&bnad->bna_lock, flags);
1456 cfg_flags = bnad->cfg_flags;
1457 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1474 (bnad->num_tx * bnad->num_txq_per_tx) +
1510 bnad_tx_msix_unregister(struct bnad *bnad, struct bnad_tx_info *tx_info,
1521 free_irq(bnad->msix_table[vector_num].vector, tx_info->tcb[i]);
1529 bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info,
1538 sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name,
1540 err = request_irq(bnad->msix_table[vector_num].vector,
1552 bnad_tx_msix_unregister(bnad, tx_info, (i - 1));
1560 bnad_rx_msix_unregister(struct bnad *bnad, struct bnad_rx_info *rx_info,
1571 free_irq(bnad->msix_table[vector_num].vector,
1580 bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info,
1590 bnad->netdev->name,
1592 err = request_irq(bnad->msix_table[vector_num].vector,
1604 bnad_rx_msix_unregister(bnad, rx_info, (i - 1));
1610 bnad_tx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1616 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1618 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1624 bnad_tx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1631 err = bnad_mem_alloc(bnad,
1634 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_TX, tx_id,
1642 bnad_tx_res_free(bnad, res_info);
1648 bnad_rx_res_free(struct bnad *bnad, struct bna_res_info *res_info)
1654 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
1656 bnad_txrx_irq_free(bnad, &res_info[i].res_u.intr_info);
1662 bnad_rx_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
1670 err = bnad_mem_alloc(bnad,
1673 err = bnad_txrx_irq_alloc(bnad, BNAD_INTR_RX, rx_id,
1681 bnad_rx_res_free(bnad, res_info);
1690 struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.ioc_timer);
1693 spin_lock_irqsave(&bnad->bna_lock, flags);
1694 bfa_nw_ioc_timeout(&bnad->bna.ioceth.ioc);
1695 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1701 struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.hb_timer);
1704 spin_lock_irqsave(&bnad->bna_lock, flags);
1705 bfa_nw_ioc_hb_check(&bnad->bna.ioceth.ioc);
1706 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1712 struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.iocpf_timer);
1715 spin_lock_irqsave(&bnad->bna_lock, flags);
1716 bfa_nw_iocpf_timeout(&bnad->bna.ioceth.ioc);
1717 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1723 struct bnad *bnad = from_timer(bnad, t, bna.ioceth.ioc.sem_timer);
1726 spin_lock_irqsave(&bnad->bna_lock, flags);
1727 bfa_nw_iocpf_sem_timeout(&bnad->bna.ioceth.ioc);
1728 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1732 * All timer routines use bnad->bna_lock to protect against
1745 struct bnad *bnad = from_timer(bnad, t, dim_timer);
1751 if (!netif_carrier_ok(bnad->netdev))
1754 spin_lock_irqsave(&bnad->bna_lock, flags);
1755 for (i = 0; i < bnad->num_rx; i++) {
1756 rx_info = &bnad->rx_info[i];
1759 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
1768 if (test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags))
1769 mod_timer(&bnad->dim_timer,
1771 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1778 struct bnad *bnad = from_timer(bnad, t, stats_timer);
1781 if (!netif_running(bnad->netdev) ||
1782 !test_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1785 spin_lock_irqsave(&bnad->bna_lock, flags);
1786 bna_hw_stats_get(&bnad->bna);
1787 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1792 * Called with bnad->bna_lock held
1795 bnad_dim_timer_start(struct bnad *bnad)
1797 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
1798 !test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
1799 timer_setup(&bnad->dim_timer, bnad_dim_timeout, 0);
1800 set_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
1801 mod_timer(&bnad->dim_timer,
1808 * Called with mutex_lock(&bnad->conf_mutex) held
1811 bnad_stats_timer_start(struct bnad *bnad)
1815 spin_lock_irqsave(&bnad->bna_lock, flags);
1816 if (!test_and_set_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags)) {
1817 timer_setup(&bnad->stats_timer, bnad_stats_timeout, 0);
1818 mod_timer(&bnad->stats_timer,
1821 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1826 * Called with mutex_lock(&bnad->conf_mutex) held
1829 bnad_stats_timer_stop(struct bnad *bnad)
1834 spin_lock_irqsave(&bnad->bna_lock, flags);
1835 if (test_and_clear_bit(BNAD_RF_STATS_TIMER_RUNNING, &bnad->run_flags))
1837 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1839 del_timer_sync(&bnad->stats_timer);
1861 struct bnad *bnad = rx_ctrl->bnad;
1866 if (!netif_carrier_ok(bnad->netdev))
1869 rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget);
1886 bnad_napi_add(struct bnad *bnad, u32 rx_id)
1892 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1893 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1894 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1900 bnad_napi_delete(struct bnad *bnad, u32 rx_id)
1905 for (i = 0; i < bnad->num_rxp_per_rx; i++)
1906 netif_napi_del(&bnad->rx_info[rx_id].rx_ctrl[i].napi);
1911 bnad_destroy_tx(struct bnad *bnad, u32 tx_id)
1913 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1914 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1920 init_completion(&bnad->bnad_completions.tx_comp);
1921 spin_lock_irqsave(&bnad->bna_lock, flags);
1923 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1924 wait_for_completion(&bnad->bnad_completions.tx_comp);
1927 bnad_tx_msix_unregister(bnad, tx_info,
1928 bnad->num_txq_per_tx);
1930 spin_lock_irqsave(&bnad->bna_lock, flags);
1932 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1937 bnad_tx_res_free(bnad, res_info);
1942 bnad_setup_tx(struct bnad *bnad, u32 tx_id)
1945 struct bnad_tx_info *tx_info = &bnad->tx_info[tx_id];
1946 struct bna_res_info *res_info = &bnad->tx_res_info[tx_id].res_info[0];
1949 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
1964 tx_config->num_txq = bnad->num_txq_per_tx;
1965 tx_config->txq_depth = bnad->txq_depth;
1967 tx_config->coalescing_timeo = bnad->tx_coalescing_timeo;
1970 spin_lock_irqsave(&bnad->bna_lock, flags);
1971 bna_tx_res_req(bnad->num_txq_per_tx,
1972 bnad->txq_depth, res_info);
1973 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1977 bnad->num_txq_per_tx, (sizeof(struct bnad_tx_unmap) *
1978 bnad->txq_depth));
1981 err = bnad_tx_res_alloc(bnad, res_info, tx_id);
1986 spin_lock_irqsave(&bnad->bna_lock, flags);
1987 tx = bna_tx_create(&bnad->bna, bnad, tx_config, &tx_cbfn, res_info,
1989 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2001 err = bnad_tx_msix_register(bnad, tx_info,
2002 tx_id, bnad->num_txq_per_tx);
2007 spin_lock_irqsave(&bnad->bna_lock, flags);
2009 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2014 spin_lock_irqsave(&bnad->bna_lock, flags);
2016 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2020 bnad_tx_res_free(bnad, res_info);
2025 /* bnad decides the configuration */
2027 bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
2031 rx_config->num_paths = bnad->num_rxp_per_rx;
2032 rx_config->coalescing_timeo = bnad->rx_coalescing_timeo;
2034 if (bnad->num_rxp_per_rx > 1) {
2042 bnad->num_rxp_per_rx - 1;
2051 rx_config->frame_size = BNAD_FRAME_SIZE(bnad->netdev->mtu);
2061 if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
2070 rx_config->q0_depth = bnad->rxq_depth * rx_config->q0_num_vecs;
2075 rx_config->q0_depth = bnad->rxq_depth;
2080 rx_config->q1_depth = bnad->rxq_depth;
2085 (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ?
2090 bnad_rx_ctrl_init(struct bnad *bnad, u32 rx_id)
2092 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2095 for (i = 0; i < bnad->num_rxp_per_rx; i++)
2096 rx_info->rx_ctrl[i].bnad = bnad;
2099 /* Called with mutex_lock(&bnad->conf_mutex) held */
2101 bnad_reinit_rx(struct bnad *bnad)
2103 struct net_device *netdev = bnad->netdev;
2109 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2110 if (!bnad->rx_info[rx_id].rx)
2112 bnad_destroy_rx(bnad, rx_id);
2115 spin_lock_irqsave(&bnad->bna_lock, flags);
2116 bna_enet_mtu_set(&bnad->bna.enet,
2117 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2118 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2120 for (rx_id = 0; rx_id < bnad->num_rx; rx_id++) {
2122 current_err = bnad_setup_rx(bnad, rx_id);
2130 if (bnad->rx_info[0].rx && !err) {
2131 bnad_restore_vlans(bnad, 0);
2132 bnad_enable_default_bcast(bnad);
2133 spin_lock_irqsave(&bnad->bna_lock, flags);
2134 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2135 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2144 bnad_destroy_rx(struct bnad *bnad, u32 rx_id)
2146 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2147 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2148 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2156 spin_lock_irqsave(&bnad->bna_lock, flags);
2157 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED &&
2158 test_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags)) {
2159 clear_bit(BNAD_RF_DIM_TIMER_RUNNING, &bnad->run_flags);
2162 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2164 del_timer_sync(&bnad->dim_timer);
2167 init_completion(&bnad->bnad_completions.rx_comp);
2168 spin_lock_irqsave(&bnad->bna_lock, flags);
2170 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2171 wait_for_completion(&bnad->bnad_completions.rx_comp);
2174 bnad_rx_msix_unregister(bnad, rx_info, rx_config->num_paths);
2176 bnad_napi_delete(bnad, rx_id);
2178 spin_lock_irqsave(&bnad->bna_lock, flags);
2183 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2185 bnad_rx_res_free(bnad, res_info);
2188 /* Called with mutex_lock(&bnad->conf_mutex) held */
2190 bnad_setup_rx(struct bnad *bnad, u32 rx_id)
2193 struct bnad_rx_info *rx_info = &bnad->rx_info[rx_id];
2194 struct bna_res_info *res_info = &bnad->rx_res_info[rx_id].res_info[0];
2197 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
2213 bnad_init_rx_config(bnad, rx_config);
2216 spin_lock_irqsave(&bnad->bna_lock, flags);
2218 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2235 err = bnad_rx_res_alloc(bnad, res_info, rx_id);
2239 bnad_rx_ctrl_init(bnad, rx_id);
2242 spin_lock_irqsave(&bnad->bna_lock, flags);
2243 rx = bna_rx_create(&bnad->bna, bnad, rx_config, &rx_cbfn, res_info,
2247 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2251 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2260 bnad_napi_add(bnad, rx_id);
2264 err = bnad_rx_msix_register(bnad, rx_info, rx_id,
2270 spin_lock_irqsave(&bnad->bna_lock, flags);
2273 if (bnad->cfg_flags & BNAD_CF_DIM_ENABLED)
2274 bna_rx_dim_reconfig(&bnad->bna, bna_napi_dim_vector);
2280 bnad_dim_timer_start(bnad);
2284 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2289 bnad_destroy_rx(bnad, rx_id);
2293 /* Called with conf_lock & bnad->bna_lock held */
2295 bnad_tx_coalescing_timeo_set(struct bnad *bnad)
2299 tx_info = &bnad->tx_info[0];
2303 bna_tx_coalescing_timeo_set(tx_info->tx, bnad->tx_coalescing_timeo);
2306 /* Called with conf_lock & bnad->bna_lock held */
2308 bnad_rx_coalescing_timeo_set(struct bnad *bnad)
2313 for (i = 0; i < bnad->num_rx; i++) {
2314 rx_info = &bnad->rx_info[i];
2318 bnad->rx_coalescing_timeo);
2323 * Called with bnad->bna_lock held
2326 bnad_mac_addr_set_locked(struct bnad *bnad, const u8 *mac_addr)
2334 if (!bnad->rx_info[0].rx)
2337 ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr);
2346 bnad_enable_default_bcast(struct bnad *bnad)
2348 struct bnad_rx_info *rx_info = &bnad->rx_info[0];
2352 init_completion(&bnad->bnad_completions.mcast_comp);
2354 spin_lock_irqsave(&bnad->bna_lock, flags);
2357 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2360 wait_for_completion(&bnad->bnad_completions.mcast_comp);
2364 if (bnad->bnad_completions.mcast_comp_status != BNA_CB_SUCCESS)
2370 /* Called with mutex_lock(&bnad->conf_mutex) held */
2372 bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
2377 for_each_set_bit(vid, bnad->active_vlans, VLAN_N_VID) {
2378 spin_lock_irqsave(&bnad->bna_lock, flags);
2379 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vid);
2380 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2386 bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2390 for (i = 0; i < bnad->num_rx; i++) {
2391 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
2392 if (bnad->rx_info[i].rx_ctrl[j].ccb) {
2393 stats->rx_packets += bnad->rx_info[i].
2395 stats->rx_bytes += bnad->rx_info[i].
2397 if (bnad->rx_info[i].rx_ctrl[j].ccb->rcb[1] &&
2398 bnad->rx_info[i].rx_ctrl[j].ccb->
2401 bnad->rx_info[i].rx_ctrl[j].
2404 bnad->rx_info[i].rx_ctrl[j].
2410 for (i = 0; i < bnad->num_tx; i++) {
2411 for (j = 0; j < bnad->num_txq_per_tx; j++) {
2412 if (bnad->tx_info[i].tcb[j]) {
2414 bnad->tx_info[i].tcb[j]->txq->tx_packets;
2416 bnad->tx_info[i].tcb[j]->txq->tx_bytes;
2426 bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
2432 mac_stats = &bnad->stats.bna_stats->hw_stats.mac_stats;
2451 bmap = bna_rx_rid_mask(&bnad->bna);
2455 bnad->stats.bna_stats->
2464 bnad_mbox_irq_sync(struct bnad *bnad)
2469 spin_lock_irqsave(&bnad->bna_lock, flags);
2470 if (bnad->cfg_flags & BNAD_CF_MSIX)
2471 irq = bnad->msix_table[BNAD_MAILBOX_MSIX_INDEX].vector;
2473 irq = bnad->pcidev->irq;
2474 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2481 bnad_tso_prepare(struct bnad *bnad, struct sk_buff *skb)
2487 BNAD_UPDATE_CTR(bnad, tso_err);
2505 BNAD_UPDATE_CTR(bnad, tso4);
2508 BNAD_UPDATE_CTR(bnad, tso6);
2516 * Called with bnad->bna_lock held, because of cfg_flags
2520 bnad_q_num_init(struct bnad *bnad)
2527 if (!(bnad->cfg_flags & BNAD_CF_MSIX))
2530 bnad->num_rx = 1;
2531 bnad->num_tx = 1;
2532 bnad->num_rxp_per_rx = rxps;
2533 bnad->num_txq_per_tx = BNAD_TXQ_NUM;
2540 * Called with bnad->bna_lock held b'cos of cfg_flags access
2543 bnad_q_num_adjust(struct bnad *bnad, int msix_vectors, int temp)
2545 bnad->num_txq_per_tx = 1;
2546 if ((msix_vectors >= (bnad->num_tx * bnad->num_txq_per_tx) +
2548 (bnad->cfg_flags & BNAD_CF_MSIX)) {
2549 bnad->num_rxp_per_rx = msix_vectors -
2550 (bnad->num_tx * bnad->num_txq_per_tx) -
2553 bnad->num_rxp_per_rx = 1;
2558 bnad_ioceth_disable(struct bnad *bnad)
2563 spin_lock_irqsave(&bnad->bna_lock, flags);
2564 init_completion(&bnad->bnad_completions.ioc_comp);
2565 bna_ioceth_disable(&bnad->bna.ioceth, BNA_HARD_CLEANUP);
2566 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2568 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2571 err = bnad->bnad_completions.ioc_comp_status;
2576 bnad_ioceth_enable(struct bnad *bnad)
2581 spin_lock_irqsave(&bnad->bna_lock, flags);
2582 init_completion(&bnad->bnad_completions.ioc_comp);
2583 bnad->bnad_completions.ioc_comp_status = BNA_CB_WAITING;
2584 bna_ioceth_enable(&bnad->bna.ioceth);
2585 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2587 wait_for_completion_timeout(&bnad->bnad_completions.ioc_comp,
2590 err = bnad->bnad_completions.ioc_comp_status;
2597 bnad_res_free(struct bnad *bnad, struct bna_res_info *res_info,
2603 bnad_mem_free(bnad, &res_info[i].res_u.mem_info);
2608 bnad_res_alloc(struct bnad *bnad, struct bna_res_info *res_info,
2614 err = bnad_mem_alloc(bnad, &res_info[i].res_u.mem_info);
2621 bnad_res_free(bnad, res_info, res_val_max);
2627 bnad_enable_msix(struct bnad *bnad)
2632 spin_lock_irqsave(&bnad->bna_lock, flags);
2633 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
2634 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2637 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2639 if (bnad->msix_table)
2642 bnad->msix_table =
2643 kcalloc(bnad->msix_num, sizeof(struct msix_entry), GFP_KERNEL);
2645 if (!bnad->msix_table)
2648 for (i = 0; i < bnad->msix_num; i++)
2649 bnad->msix_table[i].entry = i;
2651 ret = pci_enable_msix_range(bnad->pcidev, bnad->msix_table,
2652 1, bnad->msix_num);
2655 } else if (ret < bnad->msix_num) {
2656 dev_warn(&bnad->pcidev->dev,
2658 ret, bnad->msix_num);
2660 spin_lock_irqsave(&bnad->bna_lock, flags);
2662 bnad_q_num_adjust(bnad, (ret - BNAD_MAILBOX_MSIX_VECTORS) / 2,
2664 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2666 bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
2669 if (bnad->msix_num > ret) {
2670 pci_disable_msix(bnad->pcidev);
2675 pci_intx(bnad->pcidev, 0);
2680 dev_warn(&bnad->pcidev->dev,
2683 kfree(bnad->msix_table);
2684 bnad->msix_table = NULL;
2685 bnad->msix_num = 0;
2686 spin_lock_irqsave(&bnad->bna_lock, flags);
2687 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2688 bnad_q_num_init(bnad);
2689 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2693 bnad_disable_msix(struct bnad *bnad)
2698 spin_lock_irqsave(&bnad->bna_lock, flags);
2699 cfg_flags = bnad->cfg_flags;
2700 if (bnad->cfg_flags & BNAD_CF_MSIX)
2701 bnad->cfg_flags &= ~BNAD_CF_MSIX;
2702 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2705 pci_disable_msix(bnad->pcidev);
2706 kfree(bnad->msix_table);
2707 bnad->msix_table = NULL;
2716 struct bnad *bnad = netdev_priv(netdev);
2720 mutex_lock(&bnad->conf_mutex);
2723 err = bnad_setup_tx(bnad, 0);
2728 err = bnad_setup_rx(bnad, 0);
2736 spin_lock_irqsave(&bnad->bna_lock, flags);
2737 bna_enet_mtu_set(&bnad->bna.enet,
2738 BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL);
2739 bna_enet_pause_config(&bnad->bna.enet, &pause_config);
2740 bna_enet_enable(&bnad->bna.enet);
2741 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2744 bnad_enable_default_bcast(bnad);
2747 bnad_restore_vlans(bnad, 0);
2750 spin_lock_irqsave(&bnad->bna_lock, flags);
2751 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
2752 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2755 bnad_stats_timer_start(bnad);
2757 mutex_unlock(&bnad->conf_mutex);
2762 bnad_destroy_tx(bnad, 0);
2765 mutex_unlock(&bnad->conf_mutex);
2772 struct bnad *bnad = netdev_priv(netdev);
2775 mutex_lock(&bnad->conf_mutex);
2778 bnad_stats_timer_stop(bnad);
2780 init_completion(&bnad->bnad_completions.enet_comp);
2782 spin_lock_irqsave(&bnad->bna_lock, flags);
2783 bna_enet_disable(&bnad->bna.enet, BNA_HARD_CLEANUP,
2785 spin_unlock_irqrestore(&bnad->bna_lock, flags);
2787 wait_for_completion(&bnad->bnad_completions.enet_comp);
2789 bnad_destroy_tx(bnad, 0);
2790 bnad_destroy_rx(bnad, 0);
2793 bnad_mbox_irq_sync(bnad);
2795 mutex_unlock(&bnad->conf_mutex);
2803 bnad_txq_wi_prepare(struct bnad *bnad, struct bna_tcb *tcb,
2814 if (test_bit(BNAD_RF_CEE_RUNNING, &bnad->run_flags)) {
2823 if (unlikely(gso_size > bnad->netdev->mtu)) {
2824 BNAD_UPDATE_CTR(bnad, tx_skb_mss_too_long);
2831 BNAD_UPDATE_CTR(bnad, tx_skb_tso_too_short);
2837 if (bnad_tso_prepare(bnad, skb)) {
2838 BNAD_UPDATE_CTR(bnad, tx_skb_tso_prepare);
2850 if (unlikely(skb->len > (bnad->netdev->mtu + VLAN_ETH_HLEN))) {
2851 BNAD_UPDATE_CTR(bnad, tx_skb_non_tso_too_long);
2873 BNAD_UPDATE_CTR(bnad, tcpcsum_offload);
2878 BNAD_UPDATE_CTR(bnad, tx_skb_tcp_hdr);
2887 BNAD_UPDATE_CTR(bnad, udpcsum_offload);
2891 BNAD_UPDATE_CTR(bnad, tx_skb_udp_hdr);
2896 BNAD_UPDATE_CTR(bnad, tx_skb_csum_err);
2916 struct bnad *bnad = netdev_priv(netdev);
2932 BNAD_UPDATE_CTR(bnad, tx_skb_too_short);
2937 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2942 BNAD_UPDATE_CTR(bnad, tx_skb_headlen_zero);
2946 tcb = bnad->tx_info[0].tcb[txq_id];
2954 BNAD_UPDATE_CTR(bnad, tx_skb_stopping);
2967 BNAD_UPDATE_CTR(bnad, tx_skb_max_vectors);
2976 sent = bnad_txcmpl_process(bnad, tcb);
2983 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2993 BNAD_UPDATE_CTR(bnad, netif_queue_stop);
2997 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
3005 if (bnad_txq_wi_prepare(bnad, tcb, skb, txqent)) {
3017 dma_addr = dma_map_single(&bnad->pcidev->dev, skb->data,
3019 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
3021 BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
3035 bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3038 BNAD_UPDATE_CTR(bnad, tx_skb_frag_zero);
3053 dma_addr = skb_frag_dma_map(&bnad->pcidev->dev, frag,
3055 if (dma_mapping_error(&bnad->pcidev->dev, dma_addr)) {
3057 bnad_tx_buff_unmap(bnad, unmap_q, q_depth,
3060 BNAD_UPDATE_CTR(bnad, tx_skb_map_failed);
3074 bnad_tx_buff_unmap(bnad, unmap_q, q_depth, tcb->producer_index);
3076 BNAD_UPDATE_CTR(bnad, tx_skb_len_mismatch);
3102 struct bnad *bnad = netdev_priv(netdev);
3105 spin_lock_irqsave(&bnad->bna_lock, flags);
3107 bnad_netdev_qstats_fill(bnad, stats);
3108 bnad_netdev_hwstats_fill(bnad, stats);
3110 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3114 bnad_set_rx_ucast_fltr(struct bnad *bnad)
3116 struct net_device *netdev = bnad->netdev;
3123 if (netdev_uc_empty(bnad->netdev)) {
3124 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
3128 if (uc_count > bna_attr(&bnad->bna)->num_ucmac)
3141 ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry, mac_list);
3151 bnad->cfg_flags |= BNAD_CF_DEFAULT;
3152 bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL);
3156 bnad_set_rx_mcast_fltr(struct bnad *bnad)
3158 struct net_device *netdev = bnad->netdev;
3169 if (mc_count > bna_attr(&bnad->bna)->num_mcmac)
3181 ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, mac_list);
3190 bnad->cfg_flags |= BNAD_CF_ALLMULTI;
3191 bna_rx_mcast_delall(bnad->rx_info[0].rx);
3197 struct bnad *bnad = netdev_priv(netdev);
3201 spin_lock_irqsave(&bnad->bna_lock, flags);
3203 if (bnad->rx_info[0].rx == NULL) {
3204 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3208 /* clear bnad flags to update it with new settings */
3209 bnad->cfg_flags &= ~(BNAD_CF_PROMISC | BNAD_CF_DEFAULT |
3215 bnad->cfg_flags |= BNAD_CF_PROMISC;
3217 bnad_set_rx_mcast_fltr(bnad);
3219 if (bnad->cfg_flags & BNAD_CF_ALLMULTI)
3222 bnad_set_rx_ucast_fltr(bnad);
3224 if (bnad->cfg_flags & BNAD_CF_DEFAULT)
3230 bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask);
3232 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3244 struct bnad *bnad = netdev_priv(netdev);
3248 spin_lock_irqsave(&bnad->bna_lock, flags);
3250 err = bnad_mac_addr_set_locked(bnad, sa->sa_data);
3254 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3260 bnad_mtu_set(struct bnad *bnad, int frame_size)
3264 init_completion(&bnad->bnad_completions.mtu_comp);
3266 spin_lock_irqsave(&bnad->bna_lock, flags);
3267 bna_enet_mtu_set(&bnad->bna.enet, frame_size, bnad_cb_enet_mtu_set);
3268 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3270 wait_for_completion(&bnad->bnad_completions.mtu_comp);
3272 return bnad->bnad_completions.mtu_comp_status;
3279 struct bnad *bnad = netdev_priv(netdev);
3282 mutex_lock(&bnad->conf_mutex);
3291 if (BNAD_PCI_DEV_IS_CAT2(bnad) &&
3292 netif_running(bnad->netdev)) {
3296 bnad_reinit_rx(bnad);
3299 err = bnad_mtu_set(bnad, new_frame);
3303 mutex_unlock(&bnad->conf_mutex);
3310 struct bnad *bnad = netdev_priv(netdev);
3313 if (!bnad->rx_info[0].rx)
3316 mutex_lock(&bnad->conf_mutex);
3318 spin_lock_irqsave(&bnad->bna_lock, flags);
3319 bna_rx_vlan_add(bnad->rx_info[0].rx, vid);
3320 set_bit(vid, bnad->active_vlans);
3321 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3323 mutex_unlock(&bnad->conf_mutex);
3331 struct bnad *bnad = netdev_priv(netdev);
3334 if (!bnad->rx_info[0].rx)
3337 mutex_lock(&bnad->conf_mutex);
3339 spin_lock_irqsave(&bnad->bna_lock, flags);
3340 clear_bit(vid, bnad->active_vlans);
3341 bna_rx_vlan_del(bnad->rx_info[0].rx, vid);
3342 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3344 mutex_unlock(&bnad->conf_mutex);
3351 struct bnad *bnad = netdev_priv(dev);
3357 spin_lock_irqsave(&bnad->bna_lock, flags);
3360 bna_rx_vlan_strip_enable(bnad->rx_info[0].rx);
3362 bna_rx_vlan_strip_disable(bnad->rx_info[0].rx);
3364 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3374 struct bnad *bnad = netdev_priv(netdev);
3380 if (!(bnad->cfg_flags & BNAD_CF_MSIX)) {
3381 bna_intx_disable(&bnad->bna, curr_mask);
3382 bnad_isr(bnad->pcidev->irq, netdev);
3383 bna_intx_enable(&bnad->bna, curr_mask);
3391 for (i = 0; i < bnad->num_rx; i++) {
3392 rx_info = &bnad->rx_info[i];
3395 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
3398 bnad_netif_rx_schedule_poll(bnad,
3424 bnad_netdev_init(struct bnad *bnad, bool using_dac)
3426 struct net_device *netdev = bnad->netdev;
3442 netdev->mem_start = bnad->mmio_start;
3443 netdev->mem_end = bnad->mmio_start + bnad->mmio_len - 1;
3454 * 1. Initialize the bnad structure
3460 bnad_init(struct bnad *bnad,
3468 bnad->netdev = netdev;
3469 bnad->pcidev = pdev;
3470 bnad->mmio_start = pci_resource_start(pdev, 0);
3471 bnad->mmio_len = pci_resource_len(pdev, 0);
3472 bnad->bar0 = ioremap(bnad->mmio_start, bnad->mmio_len);
3473 if (!bnad->bar0) {
3477 dev_info(&pdev->dev, "bar0 mapped to %p, len %llu\n", bnad->bar0,
3478 (unsigned long long) bnad->mmio_len);
3480 spin_lock_irqsave(&bnad->bna_lock, flags);
3482 bnad->cfg_flags = BNAD_CF_MSIX;
3484 bnad->cfg_flags |= BNAD_CF_DIM_ENABLED;
3486 bnad_q_num_init(bnad);
3487 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3489 bnad->msix_num = (bnad->num_tx * bnad->num_txq_per_tx) +
3490 (bnad->num_rx * bnad->num_rxp_per_rx) +
3493 bnad->txq_depth = BNAD_TXQ_DEPTH;
3494 bnad->rxq_depth = BNAD_RXQ_DEPTH;
3496 bnad->tx_coalescing_timeo = BFI_TX_COALESCING_TIMEO;
3497 bnad->rx_coalescing_timeo = BFI_RX_COALESCING_TIMEO;
3499 sprintf(bnad->wq_name, "%s_wq_%d", BNAD_NAME, bnad->id);
3500 bnad->work_q = create_singlethread_workqueue(bnad->wq_name);
3501 if (!bnad->work_q) {
3502 iounmap(bnad->bar0);
3515 bnad_uninit(struct bnad *bnad)
3517 if (bnad->work_q) {
3518 flush_workqueue(bnad->work_q);
3519 destroy_workqueue(bnad->work_q);
3520 bnad->work_q = NULL;
3523 if (bnad->bar0)
3524 iounmap(bnad->bar0);
3534 bnad_lock_init(struct bnad *bnad)
3536 spin_lock_init(&bnad->bna_lock);
3537 mutex_init(&bnad->conf_mutex);
3541 bnad_lock_uninit(struct bnad *bnad)
3543 mutex_destroy(&bnad->conf_mutex);
3548 bnad_pci_init(struct bnad *bnad,
3591 struct bnad *bnad;
3606 * Allocates sizeof(struct net_device + struct bnad)
3607 * bnad = netdev->priv
3609 netdev = alloc_etherdev(sizeof(struct bnad));
3614 bnad = netdev_priv(netdev);
3615 bnad_lock_init(bnad);
3616 bnad->id = atomic_inc_return(&bna_id) - 1;
3618 mutex_lock(&bnad->conf_mutex);
3625 err = bnad_pci_init(bnad, pdev, &using_dac);
3630 * Initialize bnad structure
3633 err = bnad_init(bnad, pdev, netdev);
3638 bnad_netdev_init(bnad, using_dac);
3645 bnad_debugfs_init(bnad);
3648 spin_lock_irqsave(&bnad->bna_lock, flags);
3649 bna_res_req(&bnad->res_info[0]);
3650 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3653 err = bnad_res_alloc(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3657 bna = &bnad->bna;
3660 pcidev_info.pci_slot = PCI_SLOT(bnad->pcidev->devfn);
3661 pcidev_info.pci_func = PCI_FUNC(bnad->pcidev->devfn);
3662 pcidev_info.device_id = bnad->pcidev->device;
3663 pcidev_info.pci_bar_kva = bnad->bar0;
3665 spin_lock_irqsave(&bnad->bna_lock, flags);
3666 bna_init(bna, bnad, &pcidev_info, &bnad->res_info[0]);
3667 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3669 bnad->stats.bna_stats = &bna->stats;
3671 bnad_enable_msix(bnad);
3672 err = bnad_mbox_irq_alloc(bnad);
3677 timer_setup(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout, 0);
3678 timer_setup(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check, 0);
3679 timer_setup(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout, 0);
3680 timer_setup(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
3688 err = bnad_ioceth_enable(bnad);
3694 spin_lock_irqsave(&bnad->bna_lock, flags);
3697 bnad_q_num_adjust(bnad, bna_attr(bna)->num_txq - 1,
3703 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3707 spin_lock_irqsave(&bnad->bna_lock, flags);
3708 bna_mod_res_req(&bnad->bna, &bnad->mod_res_info[0]);
3709 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3711 err = bnad_res_alloc(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3717 spin_lock_irqsave(&bnad->bna_lock, flags);
3718 bna_mod_init(&bnad->bna, &bnad->mod_res_info[0]);
3719 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3722 spin_lock_irqsave(&bnad->bna_lock, flags);
3723 bna_enet_perm_mac_get(&bna->enet, bnad->perm_addr);
3724 bnad_set_netdev_perm_addr(bnad);
3725 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3727 mutex_unlock(&bnad->conf_mutex);
3735 set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags);
3740 mutex_unlock(&bnad->conf_mutex);
3744 mutex_lock(&bnad->conf_mutex);
3745 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3747 bnad_ioceth_disable(bnad);
3748 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3749 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3750 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3751 spin_lock_irqsave(&bnad->bna_lock, flags);
3753 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3754 bnad_mbox_irq_free(bnad);
3755 bnad_disable_msix(bnad);
3757 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3759 /* Remove the debugfs node for this bnad */
3760 kfree(bnad->regdata);
3761 bnad_debugfs_uninit(bnad);
3762 bnad_uninit(bnad);
3766 mutex_unlock(&bnad->conf_mutex);
3767 bnad_lock_uninit(bnad);
3776 struct bnad *bnad;
3783 bnad = netdev_priv(netdev);
3784 bna = &bnad->bna;
3786 if (test_and_clear_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags))
3789 mutex_lock(&bnad->conf_mutex);
3790 bnad_ioceth_disable(bnad);
3791 del_timer_sync(&bnad->bna.ioceth.ioc.ioc_timer);
3792 del_timer_sync(&bnad->bna.ioceth.ioc.sem_timer);
3793 del_timer_sync(&bnad->bna.ioceth.ioc.hb_timer);
3794 spin_lock_irqsave(&bnad->bna_lock, flags);
3796 spin_unlock_irqrestore(&bnad->bna_lock, flags);
3798 bnad_res_free(bnad, &bnad->mod_res_info[0], BNA_MOD_RES_T_MAX);
3799 bnad_res_free(bnad, &bnad->res_info[0], BNA_RES_T_MAX);
3800 bnad_mbox_irq_free(bnad);
3801 bnad_disable_msix(bnad);
3803 mutex_unlock(&bnad->conf_mutex);
3804 bnad_lock_uninit(bnad);
3805 /* Remove the debugfs node for this bnad */
3806 kfree(bnad->regdata);
3807 bnad_debugfs_uninit(bnad);
3808 bnad_uninit(bnad);