Lines Matching refs:bp

243 static void bnx2_init_napi(struct bnx2 *bp);
244 static void bnx2_del_napi(struct bnx2 *bp);
246 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
259 return bp->tx_ring_size - diff;
263 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
268 spin_lock_irqsave(&bp->indirect_lock, flags);
269 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
270 val = BNX2_RD(bp, BNX2_PCICFG_REG_WINDOW);
271 spin_unlock_irqrestore(&bp->indirect_lock, flags);
276 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
280 spin_lock_irqsave(&bp->indirect_lock, flags);
281 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
282 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
283 spin_unlock_irqrestore(&bp->indirect_lock, flags);
287 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
289 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
293 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
295 return bnx2_reg_rd_ind(bp, bp->shmem_base + offset);
299 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
304 spin_lock_irqsave(&bp->indirect_lock, flags);
305 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
308 BNX2_WR(bp, BNX2_CTX_CTX_DATA, val);
309 BNX2_WR(bp, BNX2_CTX_CTX_CTRL,
312 val = BNX2_RD(bp, BNX2_CTX_CTX_CTRL);
318 BNX2_WR(bp, BNX2_CTX_DATA_ADR, offset);
319 BNX2_WR(bp, BNX2_CTX_DATA, val);
321 spin_unlock_irqrestore(&bp->indirect_lock, flags);
328 struct bnx2 *bp = netdev_priv(dev);
333 bnx2_reg_wr_ind(bp, io->offset, io->data);
336 io->data = bnx2_reg_rd_ind(bp, io->offset);
339 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
347 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
349 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
350 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
353 if (bp->flags & BNX2_FLAG_USING_MSIX) {
356 sb_id = bp->irq_nvecs;
366 cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
377 struct bnx2 *bp = netdev_priv(dev);
378 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
386 if (!bnx2_reg_rd_ind(bp, BNX2_FW_MAX_ISCSI_CONN))
389 bp->cnic_data = data;
390 rcu_assign_pointer(bp->cnic_ops, ops);
395 bnx2_setup_cnic_irq_info(bp);
402 struct bnx2 *bp = netdev_priv(dev);
403 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
404 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
406 mutex_lock(&bp->cnic_lock);
409 RCU_INIT_POINTER(bp->cnic_ops, NULL);
410 mutex_unlock(&bp->cnic_lock);
417 struct bnx2 *bp = netdev_priv(dev);
418 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
424 cp->chip_id = bp->chip_id;
425 cp->pdev = bp->pdev;
426 cp->io_base = bp->regview;
435 bnx2_cnic_stop(struct bnx2 *bp)
440 mutex_lock(&bp->cnic_lock);
441 c_ops = rcu_dereference_protected(bp->cnic_ops,
442 lockdep_is_held(&bp->cnic_lock));
445 c_ops->cnic_ctl(bp->cnic_data, &info);
447 mutex_unlock(&bp->cnic_lock);
451 bnx2_cnic_start(struct bnx2 *bp)
456 mutex_lock(&bp->cnic_lock);
457 c_ops = rcu_dereference_protected(bp->cnic_ops,
458 lockdep_is_held(&bp->cnic_lock));
460 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
461 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
466 c_ops->cnic_ctl(bp->cnic_data, &info);
468 mutex_unlock(&bp->cnic_lock);
474 bnx2_cnic_stop(struct bnx2 *bp)
479 bnx2_cnic_start(struct bnx2 *bp)
486 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
491 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
492 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
495 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
496 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
501 val1 = (bp->phy_addr << 21) | (reg << 16) |
504 BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
509 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
513 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
529 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
530 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
533 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
534 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
543 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
548 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
549 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
552 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
553 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
558 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
561 BNX2_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
566 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_COMM);
578 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
579 val1 = BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
582 BNX2_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
583 BNX2_RD(bp, BNX2_EMAC_MDIO_MODE);
592 bnx2_disable_int(struct bnx2 *bp)
597 for (i = 0; i < bp->irq_nvecs; i++) {
598 bnapi = &bp->bnx2_napi[i];
599 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
602 BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
606 bnx2_enable_int(struct bnx2 *bp)
611 for (i = 0; i < bp->irq_nvecs; i++) {
612 bnapi = &bp->bnx2_napi[i];
614 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
619 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
623 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
627 bnx2_disable_int_sync(struct bnx2 *bp)
631 atomic_inc(&bp->intr_sem);
632 if (!netif_running(bp->dev))
635 bnx2_disable_int(bp);
636 for (i = 0; i < bp->irq_nvecs; i++)
637 synchronize_irq(bp->irq_tbl[i].vector);
641 bnx2_napi_disable(struct bnx2 *bp)
645 for (i = 0; i < bp->irq_nvecs; i++)
646 napi_disable(&bp->bnx2_napi[i].napi);
650 bnx2_napi_enable(struct bnx2 *bp)
654 for (i = 0; i < bp->irq_nvecs; i++)
655 napi_enable(&bp->bnx2_napi[i].napi);
659 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
662 bnx2_cnic_stop(bp);
663 if (netif_running(bp->dev)) {
664 bnx2_napi_disable(bp);
665 netif_tx_disable(bp->dev);
667 bnx2_disable_int_sync(bp);
668 netif_carrier_off(bp->dev); /* prevent tx timeout */
672 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
674 if (atomic_dec_and_test(&bp->intr_sem)) {
675 if (netif_running(bp->dev)) {
676 netif_tx_wake_all_queues(bp->dev);
677 spin_lock_bh(&bp->phy_lock);
678 if (bp->link_up)
679 netif_carrier_on(bp->dev);
680 spin_unlock_bh(&bp->phy_lock);
681 bnx2_napi_enable(bp);
682 bnx2_enable_int(bp);
684 bnx2_cnic_start(bp);
690 bnx2_free_tx_mem(struct bnx2 *bp)
694 for (i = 0; i < bp->num_tx_rings; i++) {
695 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
699 dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
710 bnx2_free_rx_mem(struct bnx2 *bp)
714 for (i = 0; i < bp->num_rx_rings; i++) {
715 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
719 for (j = 0; j < bp->rx_max_ring; j++) {
721 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
729 for (j = 0; j < bp->rx_max_pg_ring; j++) {
731 dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
742 bnx2_alloc_tx_mem(struct bnx2 *bp)
746 for (i = 0; i < bp->num_tx_rings; i++) {
747 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
755 dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
764 bnx2_alloc_rx_mem(struct bnx2 *bp)
768 for (i = 0; i < bp->num_rx_rings; i++) {
769 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
774 vzalloc(array_size(SW_RXBD_RING_SIZE, bp->rx_max_ring));
778 for (j = 0; j < bp->rx_max_ring; j++) {
780 dma_alloc_coherent(&bp->pdev->dev,
789 if (bp->rx_pg_ring_size) {
792 bp->rx_max_pg_ring));
798 for (j = 0; j < bp->rx_max_pg_ring; j++) {
800 dma_alloc_coherent(&bp->pdev->dev,
815 struct bnx2 *bp = netdev_priv(dev);
817 if (bp->status_blk) {
818 dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
819 bp->status_blk,
820 bp->status_blk_mapping);
821 bp->status_blk = NULL;
822 bp->stats_blk = NULL;
831 struct bnx2 *bp = netdev_priv(dev);
835 if (bp->flags & BNX2_FLAG_MSIX_CAP)
838 bp->status_stats_size = status_blk_size +
840 status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
841 &bp->status_blk_mapping, GFP_KERNEL);
845 bp->status_blk = status_blk;
846 bp->stats_blk = status_blk + status_blk_size;
847 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
853 bnx2_free_mem(struct bnx2 *bp)
856 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
858 bnx2_free_tx_mem(bp);
859 bnx2_free_rx_mem(bp);
861 for (i = 0; i < bp->ctx_pages; i++) {
862 if (bp->ctx_blk[i]) {
863 dma_free_coherent(&bp->pdev->dev, BNX2_PAGE_SIZE,
864 bp->ctx_blk[i],
865 bp->ctx_blk_mapping[i]);
866 bp->ctx_blk[i] = NULL;
875 bnx2_alloc_mem(struct bnx2 *bp)
880 bnapi = &bp->bnx2_napi[0];
881 bnapi->status_blk.msi = bp->status_blk;
886 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
887 for (i = 1; i < bp->irq_nvecs; i++) {
890 bnapi = &bp->bnx2_napi[i];
892 sblk = (bp->status_blk + BNX2_SBLK_MSIX_ALIGN_SIZE * i);
902 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
903 bp->ctx_pages = 0x2000 / BNX2_PAGE_SIZE;
904 if (bp->ctx_pages == 0)
905 bp->ctx_pages = 1;
906 for (i = 0; i < bp->ctx_pages; i++) {
907 bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
909 &bp->ctx_blk_mapping[i],
911 if (!bp->ctx_blk[i])
916 err = bnx2_alloc_rx_mem(bp);
920 err = bnx2_alloc_tx_mem(bp);
927 bnx2_free_mem(bp);
932 bnx2_report_fw_link(struct bnx2 *bp)
936 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
939 if (bp->link_up) {
942 switch (bp->line_speed) {
944 if (bp->duplex == DUPLEX_HALF)
950 if (bp->duplex == DUPLEX_HALF)
956 if (bp->duplex == DUPLEX_HALF)
962 if (bp->duplex == DUPLEX_HALF)
971 if (bp->autoneg) {
974 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
975 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
978 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
987 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
991 bnx2_xceiver_str(struct bnx2 *bp)
993 return (bp->phy_port == PORT_FIBRE) ? "SerDes" :
994 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
999 bnx2_report_link(struct bnx2 *bp)
1001 if (bp->link_up) {
1002 netif_carrier_on(bp->dev);
1003 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
1004 bnx2_xceiver_str(bp),
1005 bp->line_speed,
1006 bp->duplex == DUPLEX_FULL ? "full" : "half");
1008 if (bp->flow_ctrl) {
1009 if (bp->flow_ctrl & FLOW_CTRL_RX) {
1011 if (bp->flow_ctrl & FLOW_CTRL_TX)
1021 netif_carrier_off(bp->dev);
1022 netdev_err(bp->dev, "NIC %s Link is Down\n",
1023 bnx2_xceiver_str(bp));
1026 bnx2_report_fw_link(bp);
1030 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1034 bp->flow_ctrl = 0;
1035 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1038 if (bp->duplex == DUPLEX_FULL) {
1039 bp->flow_ctrl = bp->req_flow_ctrl;
1044 if (bp->duplex != DUPLEX_FULL) {
1048 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1049 (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
1052 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1054 bp->flow_ctrl |= FLOW_CTRL_TX;
1056 bp->flow_ctrl |= FLOW_CTRL_RX;
1060 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1061 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1063 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1084 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1087 bp->flow_ctrl = FLOW_CTRL_RX;
1092 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1100 bp->flow_ctrl = FLOW_CTRL_TX;
1106 bnx2_5709s_linkup(struct bnx2 *bp)
1110 bp->link_up = 1;
1112 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1113 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1114 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1116 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1117 bp->line_speed = bp->req_line_speed;
1118 bp->duplex = bp->req_duplex;
1124 bp->line_speed = SPEED_10;
1127 bp->line_speed = SPEED_100;
1131 bp->line_speed = SPEED_1000;
1134 bp->line_speed = SPEED_2500;
1138 bp->duplex = DUPLEX_FULL;
1140 bp->duplex = DUPLEX_HALF;
1145 bnx2_5708s_linkup(struct bnx2 *bp)
1149 bp->link_up = 1;
1150 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1153 bp->line_speed = SPEED_10;
1156 bp->line_speed = SPEED_100;
1159 bp->line_speed = SPEED_1000;
1162 bp->line_speed = SPEED_2500;
1166 bp->duplex = DUPLEX_FULL;
1168 bp->duplex = DUPLEX_HALF;
1174 bnx2_5706s_linkup(struct bnx2 *bp)
1178 bp->link_up = 1;
1179 bp->line_speed = SPEED_1000;
1181 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1183 bp->duplex = DUPLEX_FULL;
1186 bp->duplex = DUPLEX_HALF;
1193 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1194 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1200 bp->duplex = DUPLEX_FULL;
1203 bp->duplex = DUPLEX_HALF;
1211 bnx2_copper_linkup(struct bnx2 *bp)
1215 bp->phy_flags &= ~BNX2_PHY_FLAG_MDIX;
1217 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1221 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1222 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1226 bp->line_speed = SPEED_1000;
1227 bp->duplex = DUPLEX_FULL;
1230 bp->line_speed = SPEED_1000;
1231 bp->duplex = DUPLEX_HALF;
1234 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1235 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1239 bp->line_speed = SPEED_100;
1240 bp->duplex = DUPLEX_FULL;
1243 bp->line_speed = SPEED_100;
1244 bp->duplex = DUPLEX_HALF;
1247 bp->line_speed = SPEED_10;
1248 bp->duplex = DUPLEX_FULL;
1251 bp->line_speed = SPEED_10;
1252 bp->duplex = DUPLEX_HALF;
1255 bp->line_speed = 0;
1256 bp->link_up = 0;
1262 bp->line_speed = SPEED_100;
1265 bp->line_speed = SPEED_10;
1268 bp->duplex = DUPLEX_FULL;
1271 bp->duplex = DUPLEX_HALF;
1275 if (bp->link_up) {
1278 bnx2_read_phy(bp, MII_BNX2_EXT_STATUS, &ext_status);
1280 bp->phy_flags |= BNX2_PHY_FLAG_MDIX;
1287 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1295 if (bp->flow_ctrl & FLOW_CTRL_TX)
1298 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1302 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1307 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1310 bnx2_init_rx_context(bp, cid);
1315 bnx2_set_mac_link(struct bnx2 *bp)
1319 BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1320 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1321 (bp->duplex == DUPLEX_HALF)) {
1322 BNX2_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1326 val = BNX2_RD(bp, BNX2_EMAC_MODE);
1332 if (bp->link_up) {
1333 switch (bp->line_speed) {
1335 if (BNX2_CHIP(bp) != BNX2_CHIP_5706) {
1356 if (bp->duplex == DUPLEX_HALF)
1358 BNX2_WR(bp, BNX2_EMAC_MODE, val);
1361 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1363 if (bp->flow_ctrl & FLOW_CTRL_RX)
1364 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1365 BNX2_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1368 val = BNX2_RD(bp, BNX2_EMAC_TX_MODE);
1371 if (bp->flow_ctrl & FLOW_CTRL_TX)
1373 BNX2_WR(bp, BNX2_EMAC_TX_MODE, val);
1376 BNX2_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1378 bnx2_init_all_rx_contexts(bp);
1382 bnx2_enable_bmsr1(struct bnx2 *bp)
1384 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1385 (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1386 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1391 bnx2_disable_bmsr1(struct bnx2 *bp)
1393 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1394 (BNX2_CHIP(bp) == BNX2_CHIP_5709))
1395 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1400 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1405 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1408 if (bp->autoneg & AUTONEG_SPEED)
1409 bp->advertising |= ADVERTISED_2500baseX_Full;
1411 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1412 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1414 bnx2_read_phy(bp, bp->mii_up1, &up1);
1417 bnx2_write_phy(bp, bp->mii_up1, up1);
1421 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1422 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1429 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1434 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1437 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1438 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1440 bnx2_read_phy(bp, bp->mii_up1, &up1);
1443 bnx2_write_phy(bp, bp->mii_up1, up1);
1447 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1448 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1455 bnx2_enable_forced_2g5(struct bnx2 *bp)
1460 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1463 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1466 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1468 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1472 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1475 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1477 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1479 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1480 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1490 if (bp->autoneg & AUTONEG_SPEED) {
1492 if (bp->req_duplex == DUPLEX_FULL)
1495 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1499 bnx2_disable_forced_2g5(struct bnx2 *bp)
1504 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1507 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1510 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1512 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1514 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1517 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1519 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1521 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1522 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1532 if (bp->autoneg & AUTONEG_SPEED)
1534 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1538 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1542 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1543 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1545 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1547 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1551 bnx2_set_link(struct bnx2 *bp)
1556 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1557 bp->link_up = 1;
1561 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1564 link_up = bp->link_up;
1566 bnx2_enable_bmsr1(bp);
1567 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1568 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1569 bnx2_disable_bmsr1(bp);
1571 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1572 (BNX2_CHIP(bp) == BNX2_CHIP_5706)) {
1575 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1576 bnx2_5706s_force_link_dn(bp, 0);
1577 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1579 val = BNX2_RD(bp, BNX2_EMAC_STATUS);
1581 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1582 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1583 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1593 bp->link_up = 1;
1595 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1596 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
1597 bnx2_5706s_linkup(bp);
1598 else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
1599 bnx2_5708s_linkup(bp);
1600 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
1601 bnx2_5709s_linkup(bp);
1604 bnx2_copper_linkup(bp);
1606 bnx2_resolve_flow_ctrl(bp);
1609 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1610 (bp->autoneg & AUTONEG_SPEED))
1611 bnx2_disable_forced_2g5(bp);
1613 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1616 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1618 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1620 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1622 bp->link_up = 0;
1625 if (bp->link_up != link_up) {
1626 bnx2_report_link(bp);
1629 bnx2_set_mac_link(bp);
1635 bnx2_reset_phy(struct bnx2 *bp)
1640 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1646 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1659 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1663 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1666 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1673 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1674 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1681 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1682 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1695 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1696 __releases(&bp->phy_lock)
1697 __acquires(&bp->phy_lock)
1701 pause_adv = bnx2_phy_get_pause_adv(bp);
1703 if (bp->autoneg & AUTONEG_SPEED) {
1705 if (bp->advertising & ADVERTISED_10baseT_Half)
1707 if (bp->advertising & ADVERTISED_10baseT_Full)
1709 if (bp->advertising & ADVERTISED_100baseT_Half)
1711 if (bp->advertising & ADVERTISED_100baseT_Full)
1713 if (bp->advertising & ADVERTISED_1000baseT_Full)
1715 if (bp->advertising & ADVERTISED_2500baseX_Full)
1718 if (bp->req_line_speed == SPEED_2500)
1720 else if (bp->req_line_speed == SPEED_1000)
1722 else if (bp->req_line_speed == SPEED_100) {
1723 if (bp->req_duplex == DUPLEX_FULL)
1727 } else if (bp->req_line_speed == SPEED_10) {
1728 if (bp->req_duplex == DUPLEX_FULL)
1744 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1746 spin_unlock_bh(&bp->phy_lock);
1747 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1748 spin_lock_bh(&bp->phy_lock);
1754 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1755 __releases(&bp->phy_lock)
1756 __acquires(&bp->phy_lock)
1761 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1762 return bnx2_setup_remote_phy(bp, port);
1764 if (!(bp->autoneg & AUTONEG_SPEED)) {
1768 if (bp->req_line_speed == SPEED_2500) {
1769 if (!bnx2_test_and_enable_2g5(bp))
1771 } else if (bp->req_line_speed == SPEED_1000) {
1772 if (bnx2_test_and_disable_2g5(bp))
1775 bnx2_read_phy(bp, bp->mii_adv, &adv);
1778 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1782 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
1783 if (bp->req_line_speed == SPEED_2500)
1784 bnx2_enable_forced_2g5(bp);
1785 else if (bp->req_line_speed == SPEED_1000) {
1786 bnx2_disable_forced_2g5(bp);
1790 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5708) {
1791 if (bp->req_line_speed == SPEED_2500)
1797 if (bp->req_duplex == DUPLEX_FULL) {
1807 if (bp->link_up) {
1808 bnx2_write_phy(bp, bp->mii_adv, adv &
1811 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1814 bp->link_up = 0;
1815 netif_carrier_off(bp->dev);
1816 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1817 bnx2_report_link(bp);
1819 bnx2_write_phy(bp, bp->mii_adv, adv);
1820 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1822 bnx2_resolve_flow_ctrl(bp);
1823 bnx2_set_mac_link(bp);
1828 bnx2_test_and_enable_2g5(bp);
1830 if (bp->advertising & ADVERTISED_1000baseT_Full)
1833 new_adv |= bnx2_phy_get_pause_adv(bp);
1835 bnx2_read_phy(bp, bp->mii_adv, &adv);
1836 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1838 bp->serdes_an_pending = 0;
1841 if (bp->link_up) {
1842 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1843 spin_unlock_bh(&bp->phy_lock);
1845 spin_lock_bh(&bp->phy_lock);
1848 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1849 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1859 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1860 bp->serdes_an_pending = 1;
1861 mod_timer(&bp->timer, jiffies + bp->current_interval);
1863 bnx2_resolve_flow_ctrl(bp);
1864 bnx2_set_mac_link(bp);
1871 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1886 bnx2_set_default_remote_link(struct bnx2 *bp)
1890 if (bp->phy_port == PORT_TP)
1891 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1893 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1896 bp->req_line_speed = 0;
1897 bp->autoneg |= AUTONEG_SPEED;
1898 bp->advertising = ADVERTISED_Autoneg;
1900 bp->advertising |= ADVERTISED_10baseT_Half;
1902 bp->advertising |= ADVERTISED_10baseT_Full;
1904 bp->advertising |= ADVERTISED_100baseT_Half;
1906 bp->advertising |= ADVERTISED_100baseT_Full;
1908 bp->advertising |= ADVERTISED_1000baseT_Full;
1910 bp->advertising |= ADVERTISED_2500baseX_Full;
1912 bp->autoneg = 0;
1913 bp->advertising = 0;
1914 bp->req_duplex = DUPLEX_FULL;
1916 bp->req_line_speed = SPEED_10;
1918 bp->req_duplex = DUPLEX_HALF;
1921 bp->req_line_speed = SPEED_100;
1923 bp->req_duplex = DUPLEX_HALF;
1926 bp->req_line_speed = SPEED_1000;
1928 bp->req_line_speed = SPEED_2500;
1933 bnx2_set_default_link(struct bnx2 *bp)
1935 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1936 bnx2_set_default_remote_link(bp);
1940 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1941 bp->req_line_speed = 0;
1942 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1945 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1947 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1950 bp->autoneg = 0;
1951 bp->req_line_speed = bp->line_speed = SPEED_1000;
1952 bp->req_duplex = DUPLEX_FULL;
1955 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1959 bnx2_send_heart_beat(struct bnx2 *bp)
1964 spin_lock(&bp->indirect_lock);
1965 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1966 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1967 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1968 BNX2_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1969 spin_unlock(&bp->indirect_lock);
1973 bnx2_remote_phy_event(struct bnx2 *bp)
1976 u8 link_up = bp->link_up;
1979 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1982 bnx2_send_heart_beat(bp);
1987 bp->link_up = 0;
1991 bp->link_up = 1;
1993 bp->duplex = DUPLEX_FULL;
1996 bp->duplex = DUPLEX_HALF;
1999 bp->line_speed = SPEED_10;
2002 bp->duplex = DUPLEX_HALF;
2006 bp->line_speed = SPEED_100;
2009 bp->duplex = DUPLEX_HALF;
2012 bp->line_speed = SPEED_1000;
2015 bp->duplex = DUPLEX_HALF;
2018 bp->line_speed = SPEED_2500;
2021 bp->line_speed = 0;
2025 bp->flow_ctrl = 0;
2026 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2028 if (bp->duplex == DUPLEX_FULL)
2029 bp->flow_ctrl = bp->req_flow_ctrl;
2032 bp->flow_ctrl |= FLOW_CTRL_TX;
2034 bp->flow_ctrl |= FLOW_CTRL_RX;
2037 old_port = bp->phy_port;
2039 bp->phy_port = PORT_FIBRE;
2041 bp->phy_port = PORT_TP;
2043 if (old_port != bp->phy_port)
2044 bnx2_set_default_link(bp);
2047 if (bp->link_up != link_up)
2048 bnx2_report_link(bp);
2050 bnx2_set_mac_link(bp);
2054 bnx2_set_remote_link(struct bnx2 *bp)
2058 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2061 bnx2_remote_phy_event(bp);
2065 bnx2_send_heart_beat(bp);
2072 bnx2_setup_copper_phy(struct bnx2 *bp)
2073 __releases(&bp->phy_lock)
2074 __acquires(&bp->phy_lock)
2079 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2081 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2085 new_adv = ADVERTISE_CSMA | ethtool_adv_to_mii_adv_t(bp->advertising);
2087 if (bp->autoneg & AUTONEG_SPEED) {
2091 new_adv |= bnx2_phy_get_pause_adv(bp);
2093 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2096 new_adv1000 |= ethtool_adv_to_mii_ctrl1000_t(bp->advertising);
2101 bnx2_write_phy(bp, bp->mii_adv, new_adv);
2102 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000);
2103 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2106 else if (bp->link_up) {
2110 bnx2_resolve_flow_ctrl(bp);
2111 bnx2_set_mac_link(bp);
2118 bnx2_write_phy(bp, bp->mii_adv, new_adv);
2121 if (bp->req_line_speed == SPEED_100) {
2124 if (bp->req_duplex == DUPLEX_FULL) {
2130 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2131 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2135 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2136 spin_unlock_bh(&bp->phy_lock);
2138 spin_lock_bh(&bp->phy_lock);
2140 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2141 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2144 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2151 bp->line_speed = bp->req_line_speed;
2152 bp->duplex = bp->req_duplex;
2153 bnx2_resolve_flow_ctrl(bp);
2154 bnx2_set_mac_link(bp);
2157 bnx2_resolve_flow_ctrl(bp);
2158 bnx2_set_mac_link(bp);
2164 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2165 __releases(&bp->phy_lock)
2166 __acquires(&bp->phy_lock)
2168 if (bp->loopback == MAC_LOOPBACK)
2171 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2172 return bnx2_setup_serdes_phy(bp, port);
2175 return bnx2_setup_copper_phy(bp);
2180 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2184 bp->mii_bmcr = MII_BMCR + 0x10;
2185 bp->mii_bmsr = MII_BMSR + 0x10;
2186 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2187 bp->mii_adv = MII_ADVERTISE + 0x10;
2188 bp->mii_lpa = MII_LPA + 0x10;
2189 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2191 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2192 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2194 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2196 bnx2_reset_phy(bp);
2198 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2200 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2203 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2205 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2206 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2207 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2211 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2213 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2214 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2216 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2218 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2222 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2224 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2230 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2235 bnx2_reset_phy(bp);
2237 bp->mii_up1 = BCM5708S_UP1;
2239 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2240 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2241 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2243 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2245 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2247 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2249 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2251 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2252 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2254 bnx2_write_phy(bp, BCM5708S_UP1, val);
2257 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
2258 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
2259 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1)) {
2261 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2263 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2265 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2266 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2269 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2275 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2277 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2279 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2280 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2288 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2291 bnx2_reset_phy(bp);
2293 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2295 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2296 BNX2_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2298 if (bp->dev->mtu > ETH_DATA_LEN) {
2302 bnx2_write_phy(bp, 0x18, 0x7);
2303 bnx2_read_phy(bp, 0x18, &val);
2304 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2306 bnx2_write_phy(bp, 0x1c, 0x6c00);
2307 bnx2_read_phy(bp, 0x1c, &val);
2308 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2313 bnx2_write_phy(bp, 0x18, 0x7);
2314 bnx2_read_phy(bp, 0x18, &val);
2315 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2317 bnx2_write_phy(bp, 0x1c, 0x6c00);
2318 bnx2_read_phy(bp, 0x1c, &val);
2319 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2326 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2331 bnx2_reset_phy(bp);
2333 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2334 bnx2_write_phy(bp, 0x18, 0x0c00);
2335 bnx2_write_phy(bp, 0x17, 0x000a);
2336 bnx2_write_phy(bp, 0x15, 0x310b);
2337 bnx2_write_phy(bp, 0x17, 0x201f);
2338 bnx2_write_phy(bp, 0x15, 0x9506);
2339 bnx2_write_phy(bp, 0x17, 0x401f);
2340 bnx2_write_phy(bp, 0x15, 0x14e2);
2341 bnx2_write_phy(bp, 0x18, 0x0400);
2344 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2345 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2347 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2349 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2352 if (bp->dev->mtu > ETH_DATA_LEN) {
2354 bnx2_write_phy(bp, 0x18, 0x7);
2355 bnx2_read_phy(bp, 0x18, &val);
2356 bnx2_write_phy(bp, 0x18, val | 0x4000);
2358 bnx2_read_phy(bp, 0x10, &val);
2359 bnx2_write_phy(bp, 0x10, val | 0x1);
2362 bnx2_write_phy(bp, 0x18, 0x7);
2363 bnx2_read_phy(bp, 0x18, &val);
2364 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2366 bnx2_read_phy(bp, 0x10, &val);
2367 bnx2_write_phy(bp, 0x10, val & ~0x1);
2371 bnx2_write_phy(bp, MII_BNX2_AUX_CTL, AUX_CTL_MISC_CTL);
2372 bnx2_read_phy(bp, MII_BNX2_AUX_CTL, &val);
2376 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2379 bnx2_write_phy(bp, MII_BNX2_AUX_CTL, val);
2385 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2386 __releases(&bp->phy_lock)
2387 __acquires(&bp->phy_lock)
2392 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2393 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2395 bp->mii_bmcr = MII_BMCR;
2396 bp->mii_bmsr = MII_BMSR;
2397 bp->mii_bmsr1 = MII_BMSR;
2398 bp->mii_adv = MII_ADVERTISE;
2399 bp->mii_lpa = MII_LPA;
2401 BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2403 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2406 bnx2_read_phy(bp, MII_PHYSID1, &val);
2407 bp->phy_id = val << 16;
2408 bnx2_read_phy(bp, MII_PHYSID2, &val);
2409 bp->phy_id |= val & 0xffff;
2411 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2412 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
2413 rc = bnx2_init_5706s_phy(bp, reset_phy);
2414 else if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
2415 rc = bnx2_init_5708s_phy(bp, reset_phy);
2416 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
2417 rc = bnx2_init_5709s_phy(bp, reset_phy);
2420 rc = bnx2_init_copper_phy(bp, reset_phy);
2425 rc = bnx2_setup_phy(bp, bp->phy_port);
2431 bnx2_set_mac_loopback(struct bnx2 *bp)
2435 mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2438 BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2439 bp->link_up = 1;
2446 bnx2_set_phy_loopback(struct bnx2 *bp)
2451 spin_lock_bh(&bp->phy_lock);
2452 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2454 spin_unlock_bh(&bp->phy_lock);
2459 if (bnx2_test_link(bp) == 0)
2464 mac_mode = BNX2_RD(bp, BNX2_EMAC_MODE);
2470 BNX2_WR(bp, BNX2_EMAC_MODE, mac_mode);
2471 bp->link_up = 1;
2476 bnx2_dump_mcp_state(struct bnx2 *bp)
2478 struct net_device *dev = bp->dev;
2482 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
2490 bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
2492 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_MODE),
2493 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_STATE),
2494 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_EVENT_MASK));
2496 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2497 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_PROGRAM_COUNTER),
2498 bnx2_reg_rd_ind(bp, BNX2_MCP_CPU_INSTRUCTION));
2501 bnx2_shmem_rd(bp, BNX2_DRV_MB),
2502 bnx2_shmem_rd(bp, BNX2_FW_MB),
2503 bnx2_shmem_rd(bp, BNX2_LINK_STATUS));
2504 pr_cont(" drv_pulse_mb[%08x]\n", bnx2_shmem_rd(bp, BNX2_DRV_PULSE_MB));
2506 bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE),
2507 bnx2_shmem_rd(bp, BNX2_BC_STATE_RESET_TYPE));
2509 bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION));
2510 DP_SHMEM_LINE(bp, BNX2_BC_RESET_TYPE);
2511 DP_SHMEM_LINE(bp, 0x3cc);
2512 DP_SHMEM_LINE(bp, 0x3dc);
2513 DP_SHMEM_LINE(bp, 0x3ec);
2514 netdev_err(dev, "DEBUG: 0x3fc[%08x]\n", bnx2_shmem_rd(bp, 0x3fc));
2519 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2524 bp->fw_wr_seq++;
2525 msg_data |= bp->fw_wr_seq;
2526 bp->fw_last_msg = msg_data;
2528 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2537 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2550 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2553 bnx2_dump_mcp_state(bp);
2566 bnx2_init_5709_context(struct bnx2 *bp)
2573 BNX2_WR(bp, BNX2_CTX_COMMAND, val);
2575 val = BNX2_RD(bp, BNX2_CTX_COMMAND);
2583 for (i = 0; i < bp->ctx_pages; i++) {
2586 if (bp->ctx_blk[i])
2587 memset(bp->ctx_blk[i], 0, BNX2_PAGE_SIZE);
2591 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2592 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2594 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2595 (u64) bp->ctx_blk_mapping[i] >> 32);
2596 BNX2_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2600 val = BNX2_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2614 bnx2_init_context(struct bnx2 *bp)
2625 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
2646 BNX2_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2647 BNX2_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2651 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2657 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2667 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2673 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2675 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2678 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2688 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2699 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2706 bnx2_set_mac_addr(struct bnx2 *bp, const u8 *mac_addr, u32 pos)
2712 BNX2_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2717 BNX2_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2721 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2731 mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
2733 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2746 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2754 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
2762 bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
2770 data = kmalloc(bp->rx_buf_size, gfp);
2774 mapping = dma_map_single(&bp->pdev->dev,
2776 bp->rx_buf_use_size,
2778 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
2789 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2795 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2805 BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2807 BNX2_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2815 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2817 spin_lock(&bp->phy_lock);
2819 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2820 bnx2_set_link(bp);
2821 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2822 bnx2_set_remote_link(bp);
2824 spin_unlock(&bp->phy_lock);
2841 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2849 index = (bnapi - bp->bnx2_napi);
2850 txq = netdev_get_tx_queue(bp->dev, index);
2882 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
2894 dma_unmap_page(&bp->pdev->dev,
2924 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2927 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2936 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2993 bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
3002 dma_sync_single_for_device(&bp->pdev->dev,
3006 rxr->rx_prod_bseq += bp->rx_buf_use_size;
3023 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u8 *data,
3031 err = bnx2_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
3033 bnx2_reuse_rx_data(bp, rxr, data, (u16) (ring_idx >> 16), prod);
3039 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3044 dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
3074 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3099 err = bnx2_alloc_rx_page(bp, rxr,
3105 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3110 dma_unmap_page(&bp->pdev->dev, mapping_old,
3140 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3179 dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
3194 } else if (len > bp->rx_jumbo_thresh) {
3195 hdr_len = bp->rx_jumbo_thresh;
3205 bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3212 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3219 if (len <= bp->rx_copy_thresh) {
3220 skb = netdev_alloc_skb(bp->dev, len + 6);
3222 bnx2_reuse_rx_data(bp, rxr, data, sw_ring_cons,
3234 bnx2_reuse_rx_data(bp, rxr, data,
3238 skb = bnx2_rx_skb(bp, rxr, data, len, hdr_len, dma_addr,
3244 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG))
3247 skb->protocol = eth_type_trans(skb, bp->dev);
3249 if (len > (bp->dev->mtu + ETH_HLEN) &&
3259 if ((bp->dev->features & NETIF_F_RXCSUM) &&
3267 if ((bp->dev->features & NETIF_F_RXHASH) &&
3273 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3294 BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3296 BNX2_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3298 BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3311 struct bnx2 *bp = bnapi->bp;
3314 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3319 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3331 struct bnx2 *bp = bnapi->bp;
3336 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3348 struct bnx2 *bp = bnapi->bp;
3358 (BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3362 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3369 BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3372 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3419 bnx2_chk_missed_msi(struct bnx2 *bp)
3421 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3425 msi_ctrl = BNX2_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3429 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3430 BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3432 BNX2_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3433 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3437 bp->idle_chk_status_idx = bnapi->last_status_idx;
3441 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3449 c_ops = rcu_dereference(bp->cnic_ops);
3451 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3457 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3466 bnx2_phy_int(bp, bnapi);
3471 BNX2_WR(bp, BNX2_HC_COMMAND,
3472 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3473 BNX2_RD(bp, BNX2_HC_COMMAND);
3477 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3484 bnx2_tx_int(bp, bnapi, 0);
3487 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3495 struct bnx2 *bp = bnapi->bp;
3500 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3510 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3522 struct bnx2 *bp = bnapi->bp;
3527 bnx2_poll_link(bp, bnapi);
3529 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3532 bnx2_poll_cnic(bp, bnapi);
3547 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3548 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3553 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3558 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3574 struct bnx2 *bp = netdev_priv(dev);
3582 spin_lock_bh(&bp->phy_lock);
3584 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3588 (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3598 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3621 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3636 bnx2_set_mac_addr(bp, ha->addr,
3645 if (rx_mode != bp->rx_mode) {
3646 bp->rx_mode = rx_mode;
3647 BNX2_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3650 BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3651 BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3652 BNX2_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3654 spin_unlock_bh(&bp->phy_lock);
3684 static void bnx2_release_firmware(struct bnx2 *bp)
3686 if (bp->rv2p_firmware) {
3687 release_firmware(bp->mips_firmware);
3688 release_firmware(bp->rv2p_firmware);
3689 bp->rv2p_firmware = NULL;
3693 static int bnx2_request_uncached_firmware(struct bnx2 *bp)
3700 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
3702 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A0) ||
3703 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5709_A1))
3712 rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3718 rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3723 mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3724 rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3725 if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3726 check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3727 check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3728 check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3729 check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3730 check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3735 if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3736 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3737 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3746 release_firmware(bp->rv2p_firmware);
3747 bp->rv2p_firmware = NULL;
3749 release_firmware(bp->mips_firmware);
3753 static int bnx2_request_firmware(struct bnx2 *bp)
3755 return bp->rv2p_firmware ? 0 : bnx2_request_uncached_firmware(bp);
3771 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3782 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3793 BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3795 BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3799 BNX2_WR(bp, addr, val);
3802 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3809 BNX2_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3812 BNX2_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3815 BNX2_WR(bp, addr, val);
3821 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3824 BNX2_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3831 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3840 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3842 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3843 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3849 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3856 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3863 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3870 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3877 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3884 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3888 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3891 bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3894 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3896 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3897 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3901 bnx2_init_cpus(struct bnx2 *bp)
3904 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3906 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3909 load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3910 load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3913 load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3916 load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3919 load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3922 load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3925 load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3929 bnx2_setup_wol(struct bnx2 *bp)
3934 if (bp->wol) {
3938 autoneg = bp->autoneg;
3939 advertising = bp->advertising;
3941 if (bp->phy_port == PORT_TP) {
3942 bp->autoneg = AUTONEG_SPEED;
3943 bp->advertising = ADVERTISED_10baseT_Half |
3950 spin_lock_bh(&bp->phy_lock);
3951 bnx2_setup_phy(bp, bp->phy_port);
3952 spin_unlock_bh(&bp->phy_lock);
3954 bp->autoneg = autoneg;
3955 bp->advertising = advertising;
3957 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3959 val = BNX2_RD(bp, BNX2_EMAC_MODE);
3966 if (bp->phy_port == PORT_TP) {
3970 if (bp->line_speed == SPEED_2500)
3974 BNX2_WR(bp, BNX2_EMAC_MODE, val);
3978 BNX2_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3981 BNX2_WR(bp, BNX2_EMAC_RX_MODE, BNX2_EMAC_RX_MODE_SORT_MODE);
3984 BNX2_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3985 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val);
3986 BNX2_WR(bp, BNX2_RPM_SORT_USER0, val | BNX2_RPM_SORT_USER0_ENA);
3989 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3994 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
3996 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4003 if (!(bp->flags & BNX2_FLAG_NO_WOL)) {
4007 if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) {
4008 bnx2_fw_sync(bp, wol_msg, 1, 0);
4014 val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
4015 bnx2_shmem_wr(bp, BNX2_PORT_FEATURE,
4017 bnx2_fw_sync(bp, wol_msg, 1, 0);
4018 bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val);
4024 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
4030 pci_enable_wake(bp->pdev, PCI_D0, false);
4031 pci_set_power_state(bp->pdev, PCI_D0);
4033 val = BNX2_RD(bp, BNX2_EMAC_MODE);
4036 BNX2_WR(bp, BNX2_EMAC_MODE, val);
4038 val = BNX2_RD(bp, BNX2_RPM_CONFIG);
4040 BNX2_WR(bp, BNX2_RPM_CONFIG, val);
4044 bnx2_setup_wol(bp);
4045 pci_wake_from_d3(bp->pdev, bp->wol);
4046 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4047 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)) {
4049 if (bp->wol)
4050 pci_set_power_state(bp->pdev, PCI_D3hot);
4054 if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4061 val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
4064 bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val);
4066 pci_set_power_state(bp->pdev, PCI_D3hot);
4080 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4086 BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4088 val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4102 bnx2_release_nvram_lock(struct bnx2 *bp)
4108 BNX2_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4111 val = BNX2_RD(bp, BNX2_NVM_SW_ARB);
4126 bnx2_enable_nvram_write(struct bnx2 *bp)
4130 val = BNX2_RD(bp, BNX2_MISC_CFG);
4131 BNX2_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4133 if (bp->flash_info->flags & BNX2_NV_WREN) {
4136 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4137 BNX2_WR(bp, BNX2_NVM_COMMAND,
4143 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4155 bnx2_disable_nvram_write(struct bnx2 *bp)
4159 val = BNX2_RD(bp, BNX2_MISC_CFG);
4160 BNX2_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4165 bnx2_enable_nvram_access(struct bnx2 *bp)
4169 val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4171 BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4176 bnx2_disable_nvram_access(struct bnx2 *bp)
4180 val = BNX2_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4182 BNX2_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4188 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4193 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4202 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4205 BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4208 BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4216 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4228 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4237 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4238 offset = ((offset / bp->flash_info->page_size) <<
4239 bp->flash_info->page_bits) +
4240 (offset % bp->flash_info->page_size);
4244 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4247 BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4250 BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4258 val = BNX2_RD(bp, BNX2_NVM_COMMAND);
4260 __be32 v = cpu_to_be32(BNX2_RD(bp, BNX2_NVM_READ));
4273 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4283 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4284 offset = ((offset / bp->flash_info->page_size) <<
4285 bp->flash_info->page_bits) +
4286 (offset % bp->flash_info->page_size);
4290 BNX2_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4295 BNX2_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4298 BNX2_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4301 BNX2_WR(bp, BNX2_NVM_COMMAND, cmd);
4307 if (BNX2_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4317 bnx2_init_nvram(struct bnx2 *bp)
4323 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4324 bp->flash_info = &flash_5709;
4329 val = BNX2_RD(bp, BNX2_NVM_CFG1);
4340 bp->flash_info = flash;
4358 bp->flash_info = flash;
4361 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4365 bnx2_enable_nvram_access(bp);
4368 BNX2_WR(bp, BNX2_NVM_CFG1, flash->config1);
4369 BNX2_WR(bp, BNX2_NVM_CFG2, flash->config2);
4370 BNX2_WR(bp, BNX2_NVM_CFG3, flash->config3);
4371 BNX2_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4374 bnx2_disable_nvram_access(bp);
4375 bnx2_release_nvram_lock(bp);
4383 bp->flash_info = NULL;
4389 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4392 bp->flash_size = val;
4394 bp->flash_size = bp->flash_info->total_size;
4400 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4410 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4414 bnx2_enable_nvram_access(bp);
4438 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4463 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4476 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4484 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4496 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4502 bnx2_disable_nvram_access(bp);
4504 bnx2_release_nvram_lock(bp);
4510 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4528 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4535 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4553 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4569 page_start -= (page_start % bp->flash_info->page_size);
4571 page_end = page_start + bp->flash_info->page_size;
4579 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4583 bnx2_enable_nvram_access(bp);
4586 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4591 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4592 if (j == (bp->flash_info->page_size - 4)) {
4595 rc = bnx2_nvram_read_dword(bp,
4608 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4614 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4616 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4620 bnx2_enable_nvram_write(bp);
4625 rc = bnx2_nvram_write_dword(bp, addr,
4638 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4643 rc = bnx2_nvram_write_dword(bp, addr, buf,
4655 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4662 rc = bnx2_nvram_write_dword(bp, addr,
4673 bnx2_disable_nvram_write(bp);
4676 bnx2_disable_nvram_access(bp);
4677 bnx2_release_nvram_lock(bp);
4690 bnx2_init_fw_cap(struct bnx2 *bp)
4694 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4695 bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4697 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4698 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4700 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4705 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4709 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4713 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4715 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4717 bp->phy_port = PORT_FIBRE;
4719 bp->phy_port = PORT_TP;
4725 if (netif_running(bp->dev) && sig)
4726 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4730 bnx2_setup_msix_tbl(struct bnx2 *bp)
4732 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4734 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4735 BNX2_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4739 bnx2_wait_dma_complete(struct bnx2 *bp)
4748 if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
4749 (BNX2_CHIP(bp) == BNX2_CHIP_5708)) {
4750 BNX2_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4755 val = BNX2_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4758 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4760 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4761 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4765 val = BNX2_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4776 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4784 bnx2_wait_dma_complete(bp);
4787 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4791 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4796 val = BNX2_RD(bp, BNX2_MISC_ID);
4798 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4799 BNX2_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4800 BNX2_RD(bp, BNX2_MISC_COMMAND);
4806 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4814 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4820 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
4821 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1))
4826 val = BNX2_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4841 val = BNX2_RD(bp, BNX2_PCI_SWAP_DIAG0);
4848 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4852 spin_lock_bh(&bp->phy_lock);
4853 old_port = bp->phy_port;
4854 bnx2_init_fw_cap(bp);
4855 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4856 old_port != bp->phy_port)
4857 bnx2_set_default_remote_link(bp);
4858 spin_unlock_bh(&bp->phy_lock);
4860 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4863 BNX2_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4866 rc = bnx2_alloc_bad_rbuf(bp);
4869 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4870 bnx2_setup_msix_tbl(bp);
4872 BNX2_WR(bp, BNX2_MISC_ECO_HW_CTL,
4880 bnx2_init_chip(struct bnx2 *bp)
4886 BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4899 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4902 if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) &&
4903 (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0) &&
4904 !(bp->flags & BNX2_FLAG_PCIX))
4907 BNX2_WR(bp, BNX2_DMA_CONFIG, val);
4909 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
4910 val = BNX2_RD(bp, BNX2_TDMA_CONFIG);
4912 BNX2_WR(bp, BNX2_TDMA_CONFIG, val);
4915 if (bp->flags & BNX2_FLAG_PCIX) {
4918 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4920 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4924 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4931 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4932 rc = bnx2_init_5709_context(bp);
4936 bnx2_init_context(bp);
4938 bnx2_init_cpus(bp);
4940 bnx2_init_nvram(bp);
4942 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4944 val = BNX2_RD(bp, BNX2_MQ_CONFIG);
4947 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
4949 if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
4953 BNX2_WR(bp, BNX2_MQ_CONFIG, val);
4956 BNX2_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4957 BNX2_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4960 BNX2_WR(bp, BNX2_RV2P_CONFIG, val);
4963 val = BNX2_RD(bp, BNX2_TBDR_CONFIG);
4966 BNX2_WR(bp, BNX2_TBDR_CONFIG, val);
4968 val = bp->mac_addr[0] +
4969 (bp->mac_addr[1] << 8) +
4970 (bp->mac_addr[2] << 16) +
4971 bp->mac_addr[3] +
4972 (bp->mac_addr[4] << 8) +
4973 (bp->mac_addr[5] << 16);
4974 BNX2_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4977 mtu = bp->dev->mtu;
4981 BNX2_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4986 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4987 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4988 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4990 memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4992 bp->bnx2_napi[i].last_status_idx = 0;
4994 bp->idle_chk_status_idx = 0xffff;
4997 BNX2_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4999 BNX2_WR(bp, BNX2_HC_STATUS_ADDR_L,
5000 (u64) bp->status_blk_mapping & 0xffffffff);
5001 BNX2_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
5003 BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
5004 (u64) bp->stats_blk_mapping & 0xffffffff);
5005 BNX2_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
5006 (u64) bp->stats_blk_mapping >> 32);
5008 BNX2_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
5009 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
5011 BNX2_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
5012 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
5014 BNX2_WR(bp, BNX2_HC_COMP_PROD_TRIP,
5015 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
5017 BNX2_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
5019 BNX2_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
5021 BNX2_WR(bp, BNX2_HC_COM_TICKS,
5022 (bp->com_ticks_int << 16) | bp->com_ticks);
5024 BNX2_WR(bp, BNX2_HC_CMD_TICKS,
5025 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
5027 if (bp->flags & BNX2_FLAG_BROKEN_STATS)
5028 BNX2_WR(bp, BNX2_HC_STATS_TICKS, 0);
5030 BNX2_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
5031 BNX2_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
5033 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1)
5040 if (bp->flags & BNX2_FLAG_USING_MSIX) {
5041 BNX2_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
5047 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
5050 BNX2_WR(bp, BNX2_HC_CONFIG, val);
5052 if (bp->rx_ticks < 25)
5053 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 1);
5055 bnx2_reg_wr_ind(bp, BNX2_FW_RX_LOW_LATENCY, 0);
5057 for (i = 1; i < bp->irq_nvecs; i++) {
5061 BNX2_WR(bp, base,
5066 BNX2_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
5067 (bp->tx_quick_cons_trip_int << 16) |
5068 bp->tx_quick_cons_trip);
5070 BNX2_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
5071 (bp->tx_ticks_int << 16) | bp->tx_ticks);
5073 BNX2_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
5074 (bp->rx_quick_cons_trip_int << 16) |
5075 bp->rx_quick_cons_trip);
5077 BNX2_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
5078 (bp->rx_ticks_int << 16) | bp->rx_ticks);
5082 BNX2_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
5084 BNX2_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
5087 bnx2_set_rx_mode(bp->dev);
5089 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5090 val = BNX2_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5092 BNX2_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5094 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5097 BNX2_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5098 BNX2_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5102 bp->hc_cmd = BNX2_RD(bp, BNX2_HC_COMMAND);
5108 bnx2_clear_ring_states(struct bnx2 *bp)
5116 bnapi = &bp->bnx2_napi[i];
5131 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5136 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5148 bnx2_ctx_wr(bp, cid_addr, offset0, val);
5151 bnx2_ctx_wr(bp, cid_addr, offset1, val);
5154 bnx2_ctx_wr(bp, cid_addr, offset2, val);
5157 bnx2_ctx_wr(bp, cid_addr, offset3, val);
5161 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5168 bnapi = &bp->bnx2_napi[ring_num];
5176 bp->tx_wake_thresh = bp->tx_ring_size / 2;
5189 bnx2_init_tx_context(bp, cid, txr);
5217 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5222 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5233 bp->rx_buf_use_size, bp->rx_max_ring);
5235 bnx2_init_rx_context(bp, cid);
5237 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
5238 val = BNX2_RD(bp, BNX2_MQ_MAP_L2_5);
5239 BNX2_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5242 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5243 if (bp->rx_pg_ring_size) {
5246 PAGE_SIZE, bp->rx_max_pg_ring);
5247 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5248 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5249 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5253 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5256 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5258 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5259 BNX2_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5263 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5266 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5269 for (i = 0; i < bp->rx_pg_ring_size; i++) {
5270 if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5271 netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5272 ring_num, i, bp->rx_pg_ring_size);
5281 for (i = 0; i < bp->rx_ring_size; i++) {
5282 if (bnx2_alloc_rx_data(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
5283 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5284 ring_num, i, bp->rx_ring_size);
5296 BNX2_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5297 BNX2_WR16(bp, rxr->rx_bidx_addr, prod);
5299 BNX2_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5303 bnx2_init_all_rings(struct bnx2 *bp)
5308 bnx2_clear_ring_states(bp);
5310 BNX2_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5311 for (i = 0; i < bp->num_tx_rings; i++)
5312 bnx2_init_tx_ring(bp, i);
5314 if (bp->num_tx_rings > 1)
5315 BNX2_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5318 BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5319 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5321 for (i = 0; i < bp->num_rx_rings; i++)
5322 bnx2_init_rx_ring(bp, i);
5324 if (bp->num_rx_rings > 1) {
5330 tbl_32 |= (i % (bp->num_rx_rings - 1)) << shift;
5332 BNX2_WR(bp, BNX2_RLUP_RSS_DATA, tbl_32);
5333 BNX2_WR(bp, BNX2_RLUP_RSS_COMMAND, (i >> 3) |
5344 BNX2_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5369 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5374 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5379 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5380 bp->rx_pg_ring_size = 0;
5381 bp->rx_max_pg_ring = 0;
5382 bp->rx_max_pg_ring_idx = 0;
5383 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5384 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5390 bp->rx_pg_ring_size = jumbo_size;
5391 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5393 bp->rx_max_pg_ring_idx =
5394 (bp->rx_max_pg_ring * BNX2_RX_DESC_CNT) - 1;
5396 bp->rx_copy_thresh = 0;
5399 bp->rx_buf_use_size = rx_size;
5401 bp->rx_buf_size = kmalloc_size_roundup(
5402 SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
5404 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5405 bp->rx_ring_size = size;
5406 bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
5407 bp->rx_max_ring_idx = (bp->rx_max_ring * BNX2_RX_DESC_CNT) - 1;
5411 bnx2_free_tx_skbs(struct bnx2 *bp)
5415 for (i = 0; i < bp->num_tx_rings; i++) {
5416 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5433 dma_unmap_single(&bp->pdev->dev,
5444 dma_unmap_page(&bp->pdev->dev,
5451 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
5456 bnx2_free_rx_skbs(struct bnx2 *bp)
5460 for (i = 0; i < bp->num_rx_rings; i++) {
5461 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5468 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5475 dma_unmap_single(&bp->pdev->dev,
5477 bp->rx_buf_use_size,
5484 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5485 bnx2_free_rx_page(bp, rxr, j);
5490 bnx2_free_skbs(struct bnx2 *bp)
5492 bnx2_free_tx_skbs(bp);
5493 bnx2_free_rx_skbs(bp);
5497 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5501 rc = bnx2_reset_chip(bp, reset_code);
5502 bnx2_free_skbs(bp);
5506 if ((rc = bnx2_init_chip(bp)) != 0)
5509 bnx2_init_all_rings(bp);
5514 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5518 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5521 spin_lock_bh(&bp->phy_lock);
5522 bnx2_init_phy(bp, reset_phy);
5523 bnx2_set_link(bp);
5524 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5525 bnx2_remote_phy_event(bp);
5526 spin_unlock_bh(&bp->phy_lock);
5531 bnx2_shutdown_chip(struct bnx2 *bp)
5535 if (bp->flags & BNX2_FLAG_NO_WOL)
5537 else if (bp->wol)
5542 return bnx2_reset_chip(bp, reset_code);
5546 bnx2_test_registers(struct bnx2 *bp)
5667 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5681 save_val = readl(bp->regview + offset);
5683 writel(0, bp->regview + offset);
5685 val = readl(bp->regview + offset);
5694 writel(0xffffffff, bp->regview + offset);
5696 val = readl(bp->regview + offset);
5705 writel(save_val, bp->regview + offset);
5709 writel(save_val, bp->regview + offset);
5717 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5728 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5730 if (bnx2_reg_rd_ind(bp, start + offset) !=
5740 bnx2_test_memory(struct bnx2 *bp)
5766 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
5772 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5785 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5797 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5806 bp->loopback = MAC_LOOPBACK;
5807 bnx2_set_mac_loopback(bp);
5810 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5813 bp->loopback = PHY_LOOPBACK;
5814 bnx2_set_phy_loopback(bp);
5819 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5820 skb = netdev_alloc_skb(bp->dev, pkt_size);
5824 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
5829 map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
5831 if (dma_mapping_error(&bp->pdev->dev, map)) {
5836 BNX2_WR(bp, BNX2_HC_COMMAND,
5837 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5839 BNX2_RD(bp, BNX2_HC_COMMAND);
5857 BNX2_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5858 BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5862 BNX2_WR(bp, BNX2_HC_COMMAND,
5863 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5865 BNX2_RD(bp, BNX2_HC_COMMAND);
5869 dma_unmap_single(&bp->pdev->dev, map, pkt_size, DMA_TO_DEVICE);
5886 dma_sync_single_for_cpu(&bp->pdev->dev,
5888 bp->rx_buf_use_size, DMA_FROM_DEVICE);
5913 bp->loopback = 0;
5923 bnx2_test_loopback(struct bnx2 *bp)
5927 if (!netif_running(bp->dev))
5930 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5931 spin_lock_bh(&bp->phy_lock);
5932 bnx2_init_phy(bp, 1);
5933 spin_unlock_bh(&bp->phy_lock);
5934 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5936 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5945 bnx2_test_nvram(struct bnx2 *bp)
5952 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5961 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5980 bnx2_test_link(struct bnx2 *bp)
5984 if (!netif_running(bp->dev))
5987 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5988 if (bp->link_up)
5992 spin_lock_bh(&bp->phy_lock);
5993 bnx2_enable_bmsr1(bp);
5994 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5995 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5996 bnx2_disable_bmsr1(bp);
5997 spin_unlock_bh(&bp->phy_lock);
6006 bnx2_test_intr(struct bnx2 *bp)
6011 if (!netif_running(bp->dev))
6014 status_idx = BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
6017 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
6018 BNX2_RD(bp, BNX2_HC_COMMAND);
6021 if ((BNX2_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
6037 bnx2_5706_serdes_has_link(struct bnx2 *bp)
6041 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
6044 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
6045 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
6050 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6051 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6052 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
6057 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
6058 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6059 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
6068 bnx2_5706_serdes_timer(struct bnx2 *bp)
6072 spin_lock(&bp->phy_lock);
6073 if (bp->serdes_an_pending) {
6074 bp->serdes_an_pending--;
6076 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6079 bp->current_interval = BNX2_TIMER_INTERVAL;
6081 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6084 if (bnx2_5706_serdes_has_link(bp)) {
6087 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6088 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
6092 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
6093 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6096 bnx2_write_phy(bp, 0x17, 0x0f01);
6097 bnx2_read_phy(bp, 0x15, &phy2);
6101 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6103 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6105 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6108 bp->current_interval = BNX2_TIMER_INTERVAL;
6113 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6114 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6115 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6117 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6118 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6119 bnx2_5706s_force_link_dn(bp, 1);
6120 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6122 bnx2_set_link(bp);
6123 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6124 bnx2_set_link(bp);
6126 spin_unlock(&bp->phy_lock);
6130 bnx2_5708_serdes_timer(struct bnx2 *bp)
6132 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6135 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6136 bp->serdes_an_pending = 0;
6140 spin_lock(&bp->phy_lock);
6141 if (bp->serdes_an_pending)
6142 bp->serdes_an_pending--;
6143 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6146 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6148 bnx2_enable_forced_2g5(bp);
6149 bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6151 bnx2_disable_forced_2g5(bp);
6152 bp->serdes_an_pending = 2;
6153 bp->current_interval = BNX2_TIMER_INTERVAL;
6157 bp->current_interval = BNX2_TIMER_INTERVAL;
6159 spin_unlock(&bp->phy_lock);
6165 struct bnx2 *bp = from_timer(bp, t, timer);
6167 if (!netif_running(bp->dev))
6170 if (atomic_read(&bp->intr_sem) != 0)
6173 if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6175 bnx2_chk_missed_msi(bp);
6177 bnx2_send_heart_beat(bp);
6179 bp->stats_blk->stat_FwRxDrop =
6180 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6183 if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6184 BNX2_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6187 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6188 if (BNX2_CHIP(bp) == BNX2_CHIP_5706)
6189 bnx2_5706_serdes_timer(bp);
6191 bnx2_5708_serdes_timer(bp);
6195 mod_timer(&bp->timer, jiffies + bp->current_interval);
6199 bnx2_request_irq(struct bnx2 *bp)
6205 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6210 for (i = 0; i < bp->irq_nvecs; i++) {
6211 irq = &bp->irq_tbl[i];
6213 &bp->bnx2_napi[i]);
6222 __bnx2_free_irq(struct bnx2 *bp)
6227 for (i = 0; i < bp->irq_nvecs; i++) {
6228 irq = &bp->irq_tbl[i];
6230 free_irq(irq->vector, &bp->bnx2_napi[i]);
6236 bnx2_free_irq(struct bnx2 *bp)
6239 __bnx2_free_irq(bp);
6240 if (bp->flags & BNX2_FLAG_USING_MSI)
6241 pci_disable_msi(bp->pdev);
6242 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6243 pci_disable_msix(bp->pdev);
6245 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6249 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6253 struct net_device *dev = bp->dev;
6254 const int len = sizeof(bp->irq_tbl[0].name);
6256 bnx2_setup_msix_tbl(bp);
6257 BNX2_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6258 BNX2_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6259 BNX2_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6263 BNX2_RD(bp, BNX2_PCI_MSIX_CONTROL);
6274 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent,
6283 bp->irq_nvecs = msix_vecs;
6284 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6286 bp->irq_tbl[i].vector = msix_ent[i].vector;
6287 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6288 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6293 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6298 if (!bp->num_req_rx_rings)
6299 msix_vecs = max(cpus + 1, bp->num_req_tx_rings);
6300 else if (!bp->num_req_tx_rings)
6301 msix_vecs = max(cpus, bp->num_req_rx_rings);
6303 msix_vecs = max(bp->num_req_rx_rings, bp->num_req_tx_rings);
6307 bp->irq_tbl[0].handler = bnx2_interrupt;
6308 strcpy(bp->irq_tbl[0].name, bp->dev->name);
6309 bp->irq_nvecs = 1;
6310 bp->irq_tbl[0].vector = bp->pdev->irq;
6312 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6313 bnx2_enable_msix(bp, msix_vecs);
6315 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6316 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6317 if (pci_enable_msi(bp->pdev) == 0) {
6318 bp->flags |= BNX2_FLAG_USING_MSI;
6319 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
6320 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6321 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6323 bp->irq_tbl[0].handler = bnx2_msi;
6325 bp->irq_tbl[0].vector = bp->pdev->irq;
6329 if (!bp->num_req_tx_rings)
6330 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6332 bp->num_tx_rings = min(bp->irq_nvecs, bp->num_req_tx_rings);
6334 if (!bp->num_req_rx_rings)
6335 bp->num_rx_rings = bp->irq_nvecs;
6337 bp->num_rx_rings = min(bp->irq_nvecs, bp->num_req_rx_rings);
6339 netif_set_real_num_tx_queues(bp->dev, bp->num_tx_rings);
6341 return netif_set_real_num_rx_queues(bp->dev, bp->num_rx_rings);
6348 struct bnx2 *bp = netdev_priv(dev);
6351 rc = bnx2_request_firmware(bp);
6357 bnx2_disable_int(bp);
6359 rc = bnx2_setup_int_mode(bp, disable_msi);
6362 bnx2_init_napi(bp);
6363 bnx2_napi_enable(bp);
6364 rc = bnx2_alloc_mem(bp);
6368 rc = bnx2_request_irq(bp);
6372 rc = bnx2_init_nic(bp, 1);
6376 mod_timer(&bp->timer, jiffies + bp->current_interval);
6378 atomic_set(&bp->intr_sem, 0);
6380 memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6382 bnx2_enable_int(bp);
6384 if (bp->flags & BNX2_FLAG_USING_MSI) {
6388 if (bnx2_test_intr(bp) != 0) {
6389 netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6391 bnx2_disable_int(bp);
6392 bnx2_free_irq(bp);
6394 bnx2_setup_int_mode(bp, 1);
6396 rc = bnx2_init_nic(bp, 0);
6399 rc = bnx2_request_irq(bp);
6402 del_timer_sync(&bp->timer);
6405 bnx2_enable_int(bp);
6408 if (bp->flags & BNX2_FLAG_USING_MSI)
6410 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6418 bnx2_napi_disable(bp);
6419 bnx2_free_skbs(bp);
6420 bnx2_free_irq(bp);
6421 bnx2_free_mem(bp);
6422 bnx2_del_napi(bp);
6423 bnx2_release_firmware(bp);
6430 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6435 if (!netif_running(bp->dev)) {
6440 bnx2_netif_stop(bp, true);
6442 pci_read_config_word(bp->pdev, PCI_COMMAND, &pcicmd);
6445 pci_restore_state(bp->pdev);
6446 pci_save_state(bp->pdev);
6448 rc = bnx2_init_nic(bp, 1);
6450 netdev_err(bp->dev, "failed to reset NIC, closing\n");
6451 bnx2_napi_enable(bp);
6452 dev_close(bp->dev);
6457 atomic_set(&bp->intr_sem, 1);
6458 bnx2_netif_start(bp, true);
6465 bnx2_dump_ftq(struct bnx2 *bp)
6469 struct net_device *dev = bp->dev;
6493 bnx2_reg_rd_ind(bp, ftq_arr[i].off));
6498 reg, bnx2_reg_rd_ind(bp, reg),
6499 bnx2_reg_rd_ind(bp, reg + 4),
6500 bnx2_reg_rd_ind(bp, reg + 8),
6501 bnx2_reg_rd_ind(bp, reg + 0x1c),
6502 bnx2_reg_rd_ind(bp, reg + 0x1c),
6503 bnx2_reg_rd_ind(bp, reg + 0x20));
6508 BNX2_RD(bp, BNX2_TBDC_STATUS) & BNX2_TBDC_STATUS_FREE_CNT);
6513 BNX2_WR(bp, BNX2_TBDC_BD_ADDR, i);
6514 BNX2_WR(bp, BNX2_TBDC_CAM_OPCODE,
6516 BNX2_WR(bp, BNX2_TBDC_COMMAND, BNX2_TBDC_COMMAND_CMD_REG_ARB);
6517 while ((BNX2_RD(bp, BNX2_TBDC_COMMAND) &
6521 cid = BNX2_RD(bp, BNX2_TBDC_CID);
6522 bdidx = BNX2_RD(bp, BNX2_TBDC_BIDX);
6523 valid = BNX2_RD(bp, BNX2_TBDC_CAM_OPCODE);
6532 bnx2_dump_state(struct bnx2 *bp)
6534 struct net_device *dev = bp->dev;
6537 pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6539 atomic_read(&bp->intr_sem), val1);
6540 pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6541 pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6544 BNX2_RD(bp, BNX2_EMAC_TX_STATUS),
6545 BNX2_RD(bp, BNX2_EMAC_RX_STATUS));
6547 BNX2_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6549 BNX2_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6550 if (bp->flags & BNX2_FLAG_USING_MSIX)
6552 BNX2_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6558 struct bnx2 *bp = netdev_priv(dev);
6560 bnx2_dump_ftq(bp);
6561 bnx2_dump_state(bp);
6562 bnx2_dump_mcp_state(bp);
6565 schedule_work(&bp->reset_task);
6575 struct bnx2 *bp = netdev_priv(dev);
6588 bnapi = &bp->bnx2_napi[i];
6592 if (unlikely(bnx2_tx_avail(bp, txr) <
6647 mapping = dma_map_single(&bp->pdev->dev, skb->data, len,
6649 if (dma_mapping_error(&bp->pdev->dev, mapping)) {
6677 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0, len,
6679 if (dma_mapping_error(&bp->pdev->dev, mapping))
6700 BNX2_WR16(bp, txr->tx_bidx_addr, prod);
6701 BNX2_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6705 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6714 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6728 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6736 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
6749 struct bnx2 *bp = netdev_priv(dev);
6751 bnx2_disable_int_sync(bp);
6752 bnx2_napi_disable(bp);
6754 del_timer_sync(&bp->timer);
6755 bnx2_shutdown_chip(bp);
6756 bnx2_free_irq(bp);
6757 bnx2_free_skbs(bp);
6758 bnx2_free_mem(bp);
6759 bnx2_del_napi(bp);
6760 bp->link_up = 0;
6761 netif_carrier_off(bp->dev);
6766 bnx2_save_stats(struct bnx2 *bp)
6768 u32 *hw_stats = (u32 *) bp->stats_blk;
6769 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6793 GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
6794 GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6797 (unsigned long) (bp->stats_blk->ctr + \
6798 bp->temp_stats_blk->ctr)
6803 struct bnx2 *bp = netdev_priv(dev);
6805 if (!bp->stats_blk)
6852 if ((BNX2_CHIP(bp) == BNX2_CHIP_5706) ||
6853 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
6878 struct bnx2 *bp = netdev_priv(dev);
6883 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6886 } else if (bp->phy_port == PORT_FIBRE)
6894 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6906 spin_lock_bh(&bp->phy_lock);
6907 cmd->base.port = bp->phy_port;
6908 advertising = bp->advertising;
6910 if (bp->autoneg & AUTONEG_SPEED) {
6917 cmd->base.speed = bp->line_speed;
6918 cmd->base.duplex = bp->duplex;
6919 if (!(bp->phy_flags & BNX2_PHY_FLAG_SERDES)) {
6920 if (bp->phy_flags & BNX2_PHY_FLAG_MDIX)
6930 spin_unlock_bh(&bp->phy_lock);
6932 cmd->base.phy_address = bp->phy_addr;
6946 struct bnx2 *bp = netdev_priv(dev);
6947 u8 autoneg = bp->autoneg;
6948 u8 req_duplex = bp->req_duplex;
6949 u16 req_line_speed = bp->req_line_speed;
6950 u32 advertising = bp->advertising;
6953 spin_lock_bh(&bp->phy_lock);
6958 if (cmd->base.port != bp->phy_port &&
6959 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6965 if (!netif_running(dev) && cmd->base.port != bp->phy_port)
6995 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
7006 bp->autoneg = autoneg;
7007 bp->advertising = advertising;
7008 bp->req_line_speed = req_line_speed;
7009 bp->req_duplex = req_duplex;
7016 err = bnx2_setup_phy(bp, cmd->base.port);
7019 spin_unlock_bh(&bp->phy_lock);
7027 struct bnx2 *bp = netdev_priv(dev);
7030 strscpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info));
7031 strscpy(info->fw_version, bp->fw_version, sizeof(info->fw_version));
7047 struct bnx2 *bp = netdev_priv(dev);
7077 if (!netif_running(bp->dev))
7084 *p++ = BNX2_RD(bp, offset);
7097 struct bnx2 *bp = netdev_priv(dev);
7099 if (bp->flags & BNX2_FLAG_NO_WOL) {
7105 if (bp->wol)
7116 struct bnx2 *bp = netdev_priv(dev);
7122 if (bp->flags & BNX2_FLAG_NO_WOL)
7125 bp->wol = 1;
7128 bp->wol = 0;
7131 device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
7139 struct bnx2 *bp = netdev_priv(dev);
7145 if (!(bp->autoneg & AUTONEG_SPEED)) {
7149 spin_lock_bh(&bp->phy_lock);
7151 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7154 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
7155 spin_unlock_bh(&bp->phy_lock);
7160 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7161 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
7162 spin_unlock_bh(&bp->phy_lock);
7166 spin_lock_bh(&bp->phy_lock);
7168 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
7169 bp->serdes_an_pending = 1;
7170 mod_timer(&bp->timer, jiffies + bp->current_interval);
7173 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
7175 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
7177 spin_unlock_bh(&bp->phy_lock);
7185 struct bnx2 *bp = netdev_priv(dev);
7187 return bp->link_up;
7193 struct bnx2 *bp = netdev_priv(dev);
7195 if (!bp->flash_info)
7198 return (int) bp->flash_size;
7205 struct bnx2 *bp = netdev_priv(dev);
7210 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7219 struct bnx2 *bp = netdev_priv(dev);
7224 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7234 struct bnx2 *bp = netdev_priv(dev);
7238 coal->rx_coalesce_usecs = bp->rx_ticks;
7239 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7240 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7241 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7243 coal->tx_coalesce_usecs = bp->tx_ticks;
7244 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7245 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7246 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7248 coal->stats_block_coalesce_usecs = bp->stats_ticks;
7258 struct bnx2 *bp = netdev_priv(dev);
7260 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7261 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7263 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7264 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7266 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7267 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7269 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7270 if (bp->rx_quick_cons_trip_int > 0xff)
7271 bp->rx_quick_cons_trip_int = 0xff;
7273 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7274 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7276 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7277 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7279 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7280 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7282 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7283 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7286 bp->stats_ticks = coal->stats_block_coalesce_usecs;
7287 if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7288 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7289 bp->stats_ticks = USEC_PER_SEC;
7291 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7292 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7293 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7295 if (netif_running(bp->dev)) {
7296 bnx2_netif_stop(bp, true);
7297 bnx2_init_nic(bp, 0);
7298 bnx2_netif_start(bp, true);
7309 struct bnx2 *bp = netdev_priv(dev);
7314 ering->rx_pending = bp->rx_ring_size;
7315 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7318 ering->tx_pending = bp->tx_ring_size;
7322 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx, bool reset_irq)
7324 if (netif_running(bp->dev)) {
7326 bnx2_save_stats(bp);
7328 bnx2_netif_stop(bp, true);
7329 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7331 bnx2_free_irq(bp);
7332 bnx2_del_napi(bp);
7334 __bnx2_free_irq(bp);
7336 bnx2_free_skbs(bp);
7337 bnx2_free_mem(bp);
7340 bnx2_set_rx_ring_size(bp, rx);
7341 bp->tx_ring_size = tx;
7343 if (netif_running(bp->dev)) {
7347 rc = bnx2_setup_int_mode(bp, disable_msi);
7348 bnx2_init_napi(bp);
7352 rc = bnx2_alloc_mem(bp);
7355 rc = bnx2_request_irq(bp);
7358 rc = bnx2_init_nic(bp, 0);
7361 bnx2_napi_enable(bp);
7362 dev_close(bp->dev);
7366 mutex_lock(&bp->cnic_lock);
7368 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7369 bnx2_setup_cnic_irq_info(bp);
7370 mutex_unlock(&bp->cnic_lock);
7372 bnx2_netif_start(bp, true);
7382 struct bnx2 *bp = netdev_priv(dev);
7391 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending,
7399 struct bnx2 *bp = netdev_priv(dev);
7401 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7402 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7403 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7409 struct bnx2 *bp = netdev_priv(dev);
7411 bp->req_flow_ctrl = 0;
7413 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7415 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7418 bp->autoneg |= AUTONEG_FLOW_CTRL;
7421 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7425 spin_lock_bh(&bp->phy_lock);
7426 bnx2_setup_phy(bp, bp->phy_port);
7427 spin_unlock_bh(&bp->phy_lock);
7587 struct bnx2 *bp = netdev_priv(dev);
7593 bnx2_netif_stop(bp, true);
7594 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7595 bnx2_free_skbs(bp);
7597 if (bnx2_test_registers(bp) != 0) {
7601 if (bnx2_test_memory(bp) != 0) {
7605 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7608 if (!netif_running(bp->dev))
7609 bnx2_shutdown_chip(bp);
7611 bnx2_init_nic(bp, 1);
7612 bnx2_netif_start(bp, true);
7617 if (bp->link_up)
7623 if (bnx2_test_nvram(bp) != 0) {
7627 if (bnx2_test_intr(bp) != 0) {
7632 if (bnx2_test_link(bp) != 0) {
7658 struct bnx2 *bp = netdev_priv(dev);
7660 u32 *hw_stats = (u32 *) bp->stats_blk;
7661 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7669 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) ||
7670 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) ||
7671 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A2) ||
7672 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0))
7704 struct bnx2 *bp = netdev_priv(dev);
7708 bp->leds_save = BNX2_RD(bp, BNX2_MISC_CFG);
7709 BNX2_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7713 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7722 BNX2_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7726 BNX2_WR(bp, BNX2_EMAC_LED, 0);
7727 BNX2_WR(bp, BNX2_MISC_CFG, bp->leds_save);
7737 struct bnx2 *bp = netdev_priv(dev);
7746 !!(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) &&
7748 bnx2_netif_stop(bp, false);
7751 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
7752 bnx2_netif_start(bp, false);
7762 struct bnx2 *bp = netdev_priv(dev);
7766 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7775 channels->rx_count = bp->num_rx_rings;
7776 channels->tx_count = bp->num_tx_rings;
7784 struct bnx2 *bp = netdev_priv(dev);
7789 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !disable_msi) {
7797 bp->num_req_rx_rings = channels->rx_count;
7798 bp->num_req_tx_rings = channels->tx_count;
7801 rc = bnx2_change_ring_size(bp, bp->rx_ring_size,
7802 bp->tx_ring_size, true);
7845 struct bnx2 *bp = netdev_priv(dev);
7850 data->phy_id = bp->phy_addr;
7856 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7862 spin_lock_bh(&bp->phy_lock);
7863 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7864 spin_unlock_bh(&bp->phy_lock);
7872 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7878 spin_lock_bh(&bp->phy_lock);
7879 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7880 spin_unlock_bh(&bp->phy_lock);
7896 struct bnx2 *bp = netdev_priv(dev);
7903 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7912 struct bnx2 *bp = netdev_priv(dev);
7915 return bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size,
7923 struct bnx2 *bp = netdev_priv(dev);
7926 for (i = 0; i < bp->irq_nvecs; i++) {
7927 struct bnx2_irq *irq = &bp->irq_tbl[i];
7930 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7937 bnx2_get_5709_media(struct bnx2 *bp)
7939 u32 val = BNX2_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7946 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7955 if (bp->func == 0) {
7960 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7968 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7975 bnx2_get_pci_speed(struct bnx2 *bp)
7979 reg = BNX2_RD(bp, BNX2_PCICFG_MISC_STATUS);
7983 bp->flags |= BNX2_FLAG_PCIX;
7985 clkreg = BNX2_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7990 bp->bus_speed_mhz = 133;
7994 bp->bus_speed_mhz = 100;
7999 bp->bus_speed_mhz = 66;
8004 bp->bus_speed_mhz = 50;
8010 bp->bus_speed_mhz = 33;
8016 bp->bus_speed_mhz = 66;
8018 bp->bus_speed_mhz = 33;
8022 bp->flags |= BNX2_FLAG_PCI_32BIT;
8027 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
8041 rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data, BNX2_VPD_LEN);
8065 memcpy(bp->fw_version, &data[j], len);
8066 bp->fw_version[len] = ' ';
8075 struct bnx2 *bp;
8081 bp = netdev_priv(dev);
8083 bp->flags = 0;
8084 bp->phy_flags = 0;
8086 bp->temp_stats_blk =
8089 if (!bp->temp_stats_blk) {
8116 bp->pm_cap = pdev->pm_cap;
8117 if (bp->pm_cap == 0) {
8124 bp->dev = dev;
8125 bp->pdev = pdev;
8127 spin_lock_init(&bp->phy_lock);
8128 spin_lock_init(&bp->indirect_lock);
8130 mutex_init(&bp->cnic_lock);
8132 INIT_WORK(&bp->reset_task, bnx2_reset_task);
8134 bp->regview = pci_iomap(pdev, 0, MB_GET_CID_ADDR(TX_TSS_CID +
8136 if (!bp->regview) {
8146 BNX2_WR(bp, BNX2_PCICFG_MISC_CONFIG,
8150 bp->chip_id = BNX2_RD(bp, BNX2_MISC_ID);
8152 if (BNX2_CHIP(bp) == BNX2_CHIP_5709) {
8158 bp->flags |= BNX2_FLAG_PCIE;
8159 if (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax)
8160 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
8162 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
8163 if (bp->pcix_cap == 0) {
8169 bp->flags |= BNX2_FLAG_BROKEN_STATS;
8172 if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8173 BNX2_CHIP_REV(bp) != BNX2_CHIP_REV_Ax) {
8175 bp->flags |= BNX2_FLAG_MSIX_CAP;
8178 if (BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A0 &&
8179 BNX2_CHIP_ID(bp) != BNX2_CHIP_ID_5706_A1) {
8181 bp->flags |= BNX2_FLAG_MSI_CAP;
8185 if (BNX2_CHIP(bp) == BNX2_CHIP_5708)
8204 if (!(bp->flags & BNX2_FLAG_PCIE))
8205 bnx2_get_pci_speed(bp);
8208 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8209 reg = BNX2_RD(bp, PCI_COMMAND);
8211 BNX2_WR(bp, PCI_COMMAND, reg);
8212 } else if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A1) &&
8213 !(bp->flags & BNX2_FLAG_PCIX)) {
8220 bnx2_init_nvram(bp);
8222 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8224 if (bnx2_reg_rd_ind(bp, BNX2_MCP_TOE_ID) & BNX2_MCP_TOE_ID_FUNCTION_ID)
8225 bp->func = 1;
8229 u32 off = bp->func << 2;
8231 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8233 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8238 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8247 bnx2_read_vpd_fw_ver(bp);
8249 j = strlen(bp->fw_version);
8250 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8255 bp->fw_version[j++] = 'b';
8256 bp->fw_version[j++] = 'c';
8257 bp->fw_version[j++] = ' ';
8262 bp->fw_version[j++] = (num / k) + '0';
8267 bp->fw_version[j++] = '.';
8269 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8271 bp->wol = 1;
8274 bp->flags |= BNX2_FLAG_ASF_ENABLE;
8277 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8283 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8287 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8290 bp->fw_version[j++] = ' ';
8292 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8294 memcpy(&bp->fw_version[j], &reg, 4);
8299 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8300 bp->mac_addr[0] = (u8) (reg >> 8);
8301 bp->mac_addr[1] = (u8) reg;
8303 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8304 bp->mac_addr[2] = (u8) (reg >> 24);
8305 bp->mac_addr[3] = (u8) (reg >> 16);
8306 bp->mac_addr[4] = (u8) (reg >> 8);
8307 bp->mac_addr[5] = (u8) reg;
8309 bp->tx_ring_size = BNX2_MAX_TX_DESC_CNT;
8310 bnx2_set_rx_ring_size(bp, 255);
8312 bp->tx_quick_cons_trip_int = 2;
8313 bp->tx_quick_cons_trip = 20;
8314 bp->tx_ticks_int = 18;
8315 bp->tx_ticks = 80;
8317 bp->rx_quick_cons_trip_int = 2;
8318 bp->rx_quick_cons_trip = 12;
8319 bp->rx_ticks_int = 18;
8320 bp->rx_ticks = 18;
8322 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8324 bp->current_interval = BNX2_TIMER_INTERVAL;
8326 bp->phy_addr = 1;
8334 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8335 bnx2_get_5709_media(bp);
8336 else if (BNX2_CHIP_BOND(bp) & BNX2_CHIP_BOND_SERDES_BIT)
8337 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8339 bp->phy_port = PORT_TP;
8340 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8341 bp->phy_port = PORT_FIBRE;
8342 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8344 bp->flags |= BNX2_FLAG_NO_WOL;
8345 bp->wol = 0;
8347 if (BNX2_CHIP(bp) == BNX2_CHIP_5706) {
8354 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8356 bp->phy_addr = 2;
8358 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8360 } else if (BNX2_CHIP(bp) == BNX2_CHIP_5706 ||
8361 BNX2_CHIP(bp) == BNX2_CHIP_5708)
8362 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8363 else if (BNX2_CHIP(bp) == BNX2_CHIP_5709 &&
8364 (BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Ax ||
8365 BNX2_CHIP_REV(bp) == BNX2_CHIP_REV_Bx))
8366 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8368 bnx2_init_fw_cap(bp);
8370 if ((BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_A0) ||
8371 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B0) ||
8372 (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5708_B1) ||
8373 !(BNX2_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8374 bp->flags |= BNX2_FLAG_NO_WOL;
8375 bp->wol = 0;
8378 if (bp->flags & BNX2_FLAG_NO_WOL)
8379 device_set_wakeup_capable(&bp->pdev->dev, false);
8381 device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
8383 if (BNX2_CHIP_ID(bp) == BNX2_CHIP_ID_5706_A0) {
8384 bp->tx_quick_cons_trip_int =
8385 bp->tx_quick_cons_trip;
8386 bp->tx_ticks_int = bp->tx_ticks;
8387 bp->rx_quick_cons_trip_int =
8388 bp->rx_quick_cons_trip;
8389 bp->rx_ticks_int = bp->rx_ticks;
8390 bp->comp_prod_trip_int = bp->comp_prod_trip;
8391 bp->com_ticks_int = bp->com_ticks;
8392 bp->cmd_ticks_int = bp->cmd_ticks;
8405 if (BNX2_CHIP(bp) == BNX2_CHIP_5706 && disable_msi == 0) {
8421 bnx2_set_default_link(bp);
8422 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8424 timer_setup(&bp->timer, bnx2_timer, 0);
8425 bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8428 if (bnx2_shmem_rd(bp, BNX2_ISCSI_INITIATOR) & BNX2_ISCSI_INITIATOR_EN)
8429 bp->cnic_eth_dev.max_iscsi_conn =
8430 (bnx2_shmem_rd(bp, BNX2_ISCSI_MAX_CONN) &
8432 bp->cnic_probe = bnx2_cnic_probe;
8439 pci_iounmap(pdev, bp->regview);
8440 bp->regview = NULL;
8449 kfree(bp->temp_stats_blk);
8455 bnx2_bus_string(struct bnx2 *bp, char *str)
8459 if (bp->flags & BNX2_FLAG_PCIE) {
8463 if (bp->flags & BNX2_FLAG_PCIX)
8465 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8469 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8475 bnx2_del_napi(struct bnx2 *bp)
8479 for (i = 0; i < bp->irq_nvecs; i++)
8480 netif_napi_del(&bp->bnx2_napi[i].napi);
8484 bnx2_init_napi(struct bnx2 *bp)
8488 for (i = 0; i < bp->irq_nvecs; i++) {
8489 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8497 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll);
8498 bnapi->bp = bp;
8523 struct bnx2 *bp;
8528 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8540 bp = netdev_priv(dev);
8551 bnx2_wait_dma_complete(bp);
8553 eth_hw_addr_set(dev, bp->mac_addr);
8559 if (BNX2_CHIP(bp) == BNX2_CHIP_5709)
8569 if (!(bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
8579 ((BNX2_CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8580 ((BNX2_CHIP_ID(bp) & 0x0ff0) >> 4),
8581 bnx2_bus_string(bp, str), (long)pci_resource_start(pdev, 0),
8587 pci_iounmap(pdev, bp->regview);
8600 struct bnx2 *bp = netdev_priv(dev);
8604 del_timer_sync(&bp->timer);
8605 cancel_work_sync(&bp->reset_task);
8607 pci_iounmap(bp->pdev, bp->regview);
8610 kfree(bp->temp_stats_blk);
8612 bnx2_release_firmware(bp);
8625 struct bnx2 *bp = netdev_priv(dev);
8628 cancel_work_sync(&bp->reset_task);
8629 bnx2_netif_stop(bp, true);
8631 del_timer_sync(&bp->timer);
8632 bnx2_shutdown_chip(bp);
8633 __bnx2_free_irq(bp);
8634 bnx2_free_skbs(bp);
8636 bnx2_setup_wol(bp);
8644 struct bnx2 *bp = netdev_priv(dev);
8649 bnx2_set_power_state(bp, PCI_D0);
8651 bnx2_request_irq(bp);
8652 bnx2_init_nic(bp, 1);
8653 bnx2_netif_start(bp, true);
8677 struct bnx2 *bp = netdev_priv(dev);
8688 bnx2_netif_stop(bp, true);
8689 del_timer_sync(&bp->timer);
8690 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8709 struct bnx2 *bp = netdev_priv(dev);
8723 err = bnx2_init_nic(bp, 1);
8730 bnx2_napi_enable(bp);
8748 struct bnx2 *bp = netdev_priv(dev);
8752 bnx2_netif_start(bp, true);
8761 struct bnx2 *bp;
8766 bp = netdev_priv(dev);
8767 if (!bp)
8772 dev_close(bp->dev);
8775 bnx2_set_power_state(bp, PCI_D3hot);