Lines Matching refs:mdp

349 	struct sh_eth_private *mdp = netdev_priv(ndev);
350 u16 offset = mdp->reg_offset[enum_index];
355 iowrite32(data, mdp->addr + offset);
360 struct sh_eth_private *mdp = netdev_priv(ndev);
361 u16 offset = mdp->reg_offset[enum_index];
366 return ioread32(mdp->addr + offset);
376 static u16 sh_eth_tsu_get_offset(struct sh_eth_private *mdp, int enum_index)
378 return mdp->reg_offset[enum_index];
381 static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data,
384 u16 offset = sh_eth_tsu_get_offset(mdp, enum_index);
389 iowrite32(data, mdp->tsu_addr + offset);
392 static u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index)
394 u16 offset = sh_eth_tsu_get_offset(mdp, enum_index);
399 return ioread32(mdp->tsu_addr + offset);
415 struct sh_eth_private *mdp = netdev_priv(ndev);
418 switch (mdp->phy_interface) {
443 struct sh_eth_private *mdp = netdev_priv(ndev);
445 sh_eth_modify(ndev, ECMR, ECMR_DM, mdp->duplex ? ECMR_DM : 0);
450 struct sh_eth_private *mdp = netdev_priv(ndev);
453 sh_eth_tsu_write(mdp, ARSTR_ARST, ARSTR);
482 struct sh_eth_private *mdp = netdev_priv(ndev);
503 if (mdp->cd->csmr)
507 if (mdp->cd->select_mii)
515 struct sh_eth_private *mdp = netdev_priv(ndev);
517 if (WARN_ON(!mdp->cd->gecmr))
520 switch (mdp->speed) {
634 struct sh_eth_private *mdp = netdev_priv(ndev);
636 switch (mdp->speed) {
797 struct sh_eth_private *mdp = netdev_priv(ndev);
799 switch (mdp->speed) {
841 struct sh_eth_private *mdp = netdev_priv(ndev);
843 switch (mdp->speed) {
912 struct sh_eth_private *mdp = netdev_priv(ndev);
914 if (WARN_ON(!mdp->cd->gecmr))
917 switch (mdp->speed) {
1233 struct sh_eth_private *mdp = netdev_priv(ndev);
1239 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
1240 entry = mdp->dirty_tx % mdp->num_tx_ring;
1241 txdesc = &mdp->tx_ring[entry];
1247 netif_info(mdp, tx_done, ndev,
1251 if (mdp->tx_skbuff[entry]) {
1252 dma_unmap_single(&mdp->pdev->dev,
1256 dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1257 mdp->tx_skbuff[entry] = NULL;
1261 if (entry >= mdp->num_tx_ring - 1)
1275 struct sh_eth_private *mdp = netdev_priv(ndev);
1278 if (mdp->rx_ring) {
1279 for (i = 0; i < mdp->num_rx_ring; i++) {
1280 if (mdp->rx_skbuff[i]) {
1281 struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i];
1283 dma_unmap_single(&mdp->pdev->dev,
1285 ALIGN(mdp->rx_buf_sz, 32),
1289 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1290 dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->rx_ring,
1291 mdp->rx_desc_dma);
1292 mdp->rx_ring = NULL;
1296 if (mdp->rx_skbuff) {
1297 for (i = 0; i < mdp->num_rx_ring; i++)
1298 dev_kfree_skb(mdp->rx_skbuff[i]);
1300 kfree(mdp->rx_skbuff);
1301 mdp->rx_skbuff = NULL;
1303 if (mdp->tx_ring) {
1306 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1307 dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->tx_ring,
1308 mdp->tx_desc_dma);
1309 mdp->tx_ring = NULL;
1313 kfree(mdp->tx_skbuff);
1314 mdp->tx_skbuff = NULL;
1320 struct sh_eth_private *mdp = netdev_priv(ndev);
1325 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
1326 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
1327 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
1331 mdp->cur_rx = 0;
1332 mdp->cur_tx = 0;
1333 mdp->dirty_rx = 0;
1334 mdp->dirty_tx = 0;
1336 memset(mdp->rx_ring, 0, rx_ringsize);
1339 for (i = 0; i < mdp->num_rx_ring; i++) {
1341 mdp->rx_skbuff[i] = NULL;
1348 buf_len = ALIGN(mdp->rx_buf_sz, 32);
1349 dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, buf_len,
1351 if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
1355 mdp->rx_skbuff[i] = skb;
1358 rxdesc = &mdp->rx_ring[i];
1365 sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
1366 if (mdp->cd->xdfar_rw)
1367 sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
1371 mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
1377 memset(mdp->tx_ring, 0, tx_ringsize);
1380 for (i = 0; i < mdp->num_tx_ring; i++) {
1381 mdp->tx_skbuff[i] = NULL;
1382 txdesc = &mdp->tx_ring[i];
1387 sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
1388 if (mdp->cd->xdfar_rw)
1389 sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
1399 struct sh_eth_private *mdp = netdev_priv(ndev);
1407 mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
1409 if (mdp->cd->rpadir)
1410 mdp->rx_buf_sz += NET_IP_ALIGN;
1413 mdp->rx_skbuff = kcalloc(mdp->num_rx_ring, sizeof(*mdp->rx_skbuff),
1415 if (!mdp->rx_skbuff)
1418 mdp->tx_skbuff = kcalloc(mdp->num_tx_ring, sizeof(*mdp->tx_skbuff),
1420 if (!mdp->tx_skbuff)
1424 rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1425 mdp->rx_ring = dma_alloc_coherent(&mdp->pdev->dev, rx_ringsize,
1426 &mdp->rx_desc_dma, GFP_KERNEL);
1427 if (!mdp->rx_ring)
1430 mdp->dirty_rx = 0;
1433 tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1434 mdp->tx_ring = dma_alloc_coherent(&mdp->pdev->dev, tx_ringsize,
1435 &mdp->tx_desc_dma, GFP_KERNEL);
1436 if (!mdp->tx_ring)
1449 struct sh_eth_private *mdp = netdev_priv(ndev);
1453 ret = mdp->cd->soft_reset(ndev);
1457 if (mdp->cd->rmiimode)
1462 if (mdp->cd->rpadir)
1469 if (mdp->cd->hw_swap)
1476 sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
1482 sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER);
1485 if (mdp->cd->nbst)
1489 if (mdp->cd->bculr)
1492 sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
1494 if (!mdp->cd->no_trimd)
1502 mdp->irq_enabled = true;
1503 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1506 sh_eth_write(ndev, ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) |
1510 if (mdp->cd->set_rate)
1511 mdp->cd->set_rate(ndev);
1514 sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
1517 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
1523 if (mdp->cd->apr)
1525 if (mdp->cd->mpr)
1527 if (mdp->cd->tpauser)
1538 struct sh_eth_private *mdp = netdev_priv(ndev);
1544 for (i = 0; i < mdp->num_tx_ring; i++)
1545 mdp->tx_ring[i].status &= ~cpu_to_le32(TD_TACT);
1560 mdp->cd->soft_reset(ndev);
1563 if (mdp->cd->rmiimode)
1586 struct sh_eth_private *mdp = netdev_priv(ndev);
1589 int entry = mdp->cur_rx % mdp->num_rx_ring;
1590 int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
1594 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
1601 rxdesc = &mdp->rx_ring[entry];
1611 netif_info(mdp, rx_status, ndev,
1624 if (mdp->cd->csmr)
1627 skb = mdp->rx_skbuff[entry];
1645 if (!mdp->cd->hw_swap)
1649 mdp->rx_skbuff[entry] = NULL;
1650 if (mdp->cd->rpadir)
1652 dma_unmap_single(&mdp->pdev->dev, dma_addr,
1653 ALIGN(mdp->rx_buf_sz, 32),
1665 entry = (++mdp->cur_rx) % mdp->num_rx_ring;
1666 rxdesc = &mdp->rx_ring[entry];
1670 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
1671 entry = mdp->dirty_rx % mdp->num_rx_ring;
1672 rxdesc = &mdp->rx_ring[entry];
1674 buf_len = ALIGN(mdp->rx_buf_sz, 32);
1677 if (mdp->rx_skbuff[entry] == NULL) {
1682 dma_addr = dma_map_single(&mdp->pdev->dev, skb->data,
1684 if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
1688 mdp->rx_skbuff[entry] = skb;
1694 if (entry >= mdp->num_rx_ring - 1)
1705 if (intr_status & EESR_RDE && !mdp->cd->no_xdfar) {
1709 mdp->cur_rx = count;
1710 mdp->dirty_rx = count;
1735 struct sh_eth_private *mdp = netdev_priv(ndev);
1744 pm_wakeup_event(&mdp->pdev->dev, 0);
1747 if (mdp->cd->no_psr || mdp->no_ether_link)
1750 if (mdp->ether_link_active_low)
1769 struct sh_eth_private *mdp = netdev_priv(ndev);
1776 netif_err(mdp, tx_err, ndev, "Transmit Abort\n");
1791 netif_err(mdp, tx_err, ndev, "Transmit Descriptor Empty\n");
1797 netif_err(mdp, tx_err, ndev, "Transmit FIFO Under flow\n");
1810 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1813 netif_err(mdp, tx_err, ndev, "Address Error\n");
1817 if (mdp->cd->no_ade)
1825 intr_status, mdp->cur_tx, mdp->dirty_tx,
1831 if (edtrr ^ mdp->cd->edtrr_trns) {
1833 sh_eth_write(ndev, mdp->cd->edtrr_trns, EDTRR);
1843 struct sh_eth_private *mdp = netdev_priv(ndev);
1844 struct sh_eth_cpu_data *cd = mdp->cd;
1848 spin_lock(&mdp->lock);
1866 if (unlikely(!mdp->irq_enabled)) {
1872 if (napi_schedule_prep(&mdp->napi)) {
1876 __napi_schedule(&mdp->napi);
1905 spin_unlock(&mdp->lock);
1912 struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private,
1932 if (mdp->irq_enabled)
1933 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1941 struct sh_eth_private *mdp = netdev_priv(ndev);
1946 spin_lock_irqsave(&mdp->lock, flags);
1949 if (mdp->cd->no_psr || mdp->no_ether_link)
1953 if (phydev->duplex != mdp->duplex) {
1955 mdp->duplex = phydev->duplex;
1956 if (mdp->cd->set_duplex)
1957 mdp->cd->set_duplex(ndev);
1960 if (phydev->speed != mdp->speed) {
1962 mdp->speed = phydev->speed;
1963 if (mdp->cd->set_rate)
1964 mdp->cd->set_rate(ndev);
1966 if (!mdp->link) {
1969 mdp->link = phydev->link;
1971 } else if (mdp->link) {
1973 mdp->link = 0;
1974 mdp->speed = 0;
1975 mdp->duplex = -1;
1979 if ((mdp->cd->no_psr || mdp->no_ether_link) && phydev->link)
1982 spin_unlock_irqrestore(&mdp->lock, flags);
1984 if (new_state && netif_msg_link(mdp))
1992 struct sh_eth_private *mdp = netdev_priv(ndev);
1995 mdp->link = 0;
1996 mdp->speed = 0;
1997 mdp->duplex = -1;
2006 mdp->phy_interface);
2015 mdp->mii_bus->id, mdp->phy_id);
2018 mdp->phy_interface);
2027 if (mdp->cd->register_type != SH_ETH_REG_GIGABIT)
2059 struct sh_eth_private *mdp = netdev_priv(ndev);
2060 struct sh_eth_cpu_data *cd = mdp->cd;
2084 if (mdp->reg_offset[reg] != SH_ETH_OFFSET_INVALID) { \
2093 #define add_tsu_reg(reg) add_reg_from(reg, sh_eth_tsu_read(mdp, reg))
2207 *buf++ = ioread32(mdp->tsu_addr +
2208 mdp->reg_offset[TSU_ADRH0] +
2230 struct sh_eth_private *mdp = netdev_priv(ndev);
2234 pm_runtime_get_sync(&mdp->pdev->dev);
2236 pm_runtime_put_sync(&mdp->pdev->dev);
2241 struct sh_eth_private *mdp = netdev_priv(ndev);
2242 return mdp->msg_enable;
2247 struct sh_eth_private *mdp = netdev_priv(ndev);
2248 mdp->msg_enable = value;
2270 struct sh_eth_private *mdp = netdev_priv(ndev);
2274 data[i++] = mdp->cur_rx;
2275 data[i++] = mdp->cur_tx;
2276 data[i++] = mdp->dirty_rx;
2277 data[i++] = mdp->dirty_tx;
2295 struct sh_eth_private *mdp = netdev_priv(ndev);
2299 ring->rx_pending = mdp->num_rx_ring;
2300 ring->tx_pending = mdp->num_tx_ring;
2308 struct sh_eth_private *mdp = netdev_priv(ndev);
2328 mdp->irq_enabled = false;
2330 napi_synchronize(&mdp->napi);
2340 mdp->num_rx_ring = ring->rx_pending;
2341 mdp->num_tx_ring = ring->tx_pending;
2365 struct sh_eth_private *mdp = netdev_priv(ndev);
2370 if (mdp->cd->magic) {
2372 wol->wolopts = mdp->wol_enabled ? WAKE_MAGIC : 0;
2378 struct sh_eth_private *mdp = netdev_priv(ndev);
2380 if (!mdp->cd->magic || wol->wolopts & ~WAKE_MAGIC)
2383 mdp->wol_enabled = !!(wol->wolopts & WAKE_MAGIC);
2385 device_set_wakeup_enable(&mdp->pdev->dev, mdp->wol_enabled);
2411 struct sh_eth_private *mdp = netdev_priv(ndev);
2414 pm_runtime_get_sync(&mdp->pdev->dev);
2416 napi_enable(&mdp->napi);
2419 mdp->cd->irq_flags, ndev->name, ndev);
2442 mdp->is_opened = 1;
2449 napi_disable(&mdp->napi);
2450 pm_runtime_put_sync(&mdp->pdev->dev);
2457 struct sh_eth_private *mdp = netdev_priv(ndev);
2463 netif_err(mdp, timer, ndev,
2471 for (i = 0; i < mdp->num_rx_ring; i++) {
2472 rxdesc = &mdp->rx_ring[i];
2475 dev_kfree_skb(mdp->rx_skbuff[i]);
2476 mdp->rx_skbuff[i] = NULL;
2478 for (i = 0; i < mdp->num_tx_ring; i++) {
2479 dev_kfree_skb(mdp->tx_skbuff[i]);
2480 mdp->tx_skbuff[i] = NULL;
2493 struct sh_eth_private *mdp = netdev_priv(ndev);
2499 spin_lock_irqsave(&mdp->lock, flags);
2500 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
2502 netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
2504 spin_unlock_irqrestore(&mdp->lock, flags);
2508 spin_unlock_irqrestore(&mdp->lock, flags);
2513 entry = mdp->cur_tx % mdp->num_tx_ring;
2514 mdp->tx_skbuff[entry] = skb;
2515 txdesc = &mdp->tx_ring[entry];
2517 if (!mdp->cd->hw_swap)
2519 dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, skb->len,
2521 if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
2529 if (entry >= mdp->num_tx_ring - 1)
2535 mdp->cur_tx++;
2537 if (!(sh_eth_read(ndev, EDTRR) & mdp->cd->edtrr_trns))
2538 sh_eth_write(ndev, mdp->cd->edtrr_trns, EDTRR);
2561 struct sh_eth_private *mdp = netdev_priv(ndev);
2563 if (mdp->cd->no_tx_cntrs)
2566 if (!mdp->is_opened)
2573 if (mdp->cd->cexcr) {
2589 struct sh_eth_private *mdp = netdev_priv(ndev);
2597 mdp->irq_enabled = false;
2599 napi_disable(&mdp->napi);
2615 mdp->is_opened = 0;
2617 pm_runtime_put(&mdp->pdev->dev);
2639 static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry)
2641 return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4));
2647 struct sh_eth_private *mdp = netdev_priv(ndev);
2651 tmp = sh_eth_tsu_read(mdp, reg);
2652 sh_eth_tsu_write(mdp, tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg);
2658 struct sh_eth_private *mdp = netdev_priv(ndev);
2663 ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask;
2665 tmp = sh_eth_tsu_read(mdp, reg);
2666 sh_eth_tsu_write(mdp, tmp & ~post_mask, reg);
2675 struct sh_eth_private *mdp = netdev_priv(ndev);
2677 while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) {
2692 struct sh_eth_private *mdp = netdev_priv(ndev);
2696 iowrite32(val, mdp->tsu_addr + offset);
2701 iowrite32(val, mdp->tsu_addr + offset + 4);
2710 struct sh_eth_private *mdp = netdev_priv(ndev);
2713 val = ioread32(mdp->tsu_addr + offset);
2718 val = ioread32(mdp->tsu_addr + offset + 4);
2726 struct sh_eth_private *mdp = netdev_priv(ndev);
2727 u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2753 struct sh_eth_private *mdp = netdev_priv(ndev);
2754 u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2758 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) &
2770 struct sh_eth_private *mdp = netdev_priv(ndev);
2771 u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2774 if (!mdp->cd->tsu)
2788 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) |
2800 struct sh_eth_private *mdp = netdev_priv(ndev);
2803 if (!mdp->cd->tsu)
2823 struct sh_eth_private *mdp = netdev_priv(ndev);
2826 if (!mdp->cd->tsu)
2844 struct sh_eth_private *mdp = netdev_priv(ndev);
2845 u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2849 if (!mdp->cd->tsu)
2862 struct sh_eth_private *mdp = netdev_priv(ndev);
2867 spin_lock_irqsave(&mdp->lock, flags);
2872 if (mdp->cd->tsu)
2888 } else if (mdp->cd->tsu) {
2907 spin_unlock_irqrestore(&mdp->lock, flags);
2912 struct sh_eth_private *mdp = netdev_priv(ndev);
2915 spin_lock_irqsave(&mdp->lock, flags);
2926 spin_unlock_irqrestore(&mdp->lock, flags);
2933 struct sh_eth_private *mdp = netdev_priv(ndev);
2935 if (changed & NETIF_F_RXCSUM && mdp->cd->rx_csum)
2943 static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
2945 if (!mdp->port)
2954 struct sh_eth_private *mdp = netdev_priv(ndev);
2955 int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2957 if (unlikely(!mdp->cd->tsu))
2964 mdp->vlan_num_ids++;
2969 if (mdp->vlan_num_ids > 1) {
2971 sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2975 sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK),
2984 struct sh_eth_private *mdp = netdev_priv(ndev);
2985 int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2987 if (unlikely(!mdp->cd->tsu))
2994 mdp->vlan_num_ids--;
2995 sh_eth_tsu_write(mdp, 0, vtag_reg_index);
3001 static void sh_eth_tsu_init(struct sh_eth_private *mdp)
3003 if (!mdp->cd->dual_port) {
3004 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
3005 sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL,
3010 sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */
3011 sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */
3012 sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */
3013 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
3014 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
3015 sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
3016 sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
3017 sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
3018 sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
3019 sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
3020 sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */
3021 sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */
3022 sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */
3023 sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */
3024 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
3025 sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */
3026 sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */
3027 sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */
3028 sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */
3032 static int sh_mdio_release(struct sh_eth_private *mdp)
3035 mdiobus_unregister(mdp->mii_bus);
3038 free_mdio_bitbang(mdp->mii_bus);
3089 static int sh_mdio_init(struct sh_eth_private *mdp,
3094 struct platform_device *pdev = mdp->pdev;
3095 struct device *dev = &mdp->pdev->dev;
3105 bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
3110 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
3111 if (!mdp->mii_bus)
3115 mdp->mii_bus->read = sh_mdiobb_read_c22;
3116 mdp->mii_bus->write = sh_mdiobb_write_c22;
3117 mdp->mii_bus->read_c45 = sh_mdiobb_read_c45;
3118 mdp->mii_bus->write_c45 = sh_mdiobb_write_c45;
3121 mdp->mii_bus->name = "sh_mii";
3122 mdp->mii_bus->parent = dev;
3123 snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
3128 mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
3130 ret = of_mdiobus_register(mdp->mii_bus, dev->of_node);
3145 free_mdio_bitbang(mdp->mii_bus);
3258 struct sh_eth_private *mdp;
3276 mdp = netdev_priv(ndev);
3277 mdp->num_tx_ring = TX_RING_SIZE;
3278 mdp->num_rx_ring = RX_RING_SIZE;
3279 mdp->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
3280 if (IS_ERR(mdp->addr)) {
3281 ret = PTR_ERR(mdp->addr);
3287 spin_lock_init(&mdp->lock);
3288 mdp->pdev = pdev;
3299 mdp->phy_id = pd->phy;
3300 mdp->phy_interface = pd->phy_interface;
3301 mdp->no_ether_link = pd->no_ether_link;
3302 mdp->ether_link_active_low = pd->ether_link_active_low;
3306 mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
3308 mdp->cd = (struct sh_eth_cpu_data *)of_device_get_match_data(&pdev->dev);
3310 mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
3311 if (!mdp->reg_offset) {
3313 mdp->cd->register_type);
3317 sh_eth_set_default_cpu_data(mdp->cd);
3326 if (mdp->cd->rx_csum) {
3332 if (mdp->cd->tsu)
3340 mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
3350 if (mdp->cd->tsu) {
3372 mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start,
3374 if (!mdp->tsu_addr) {
3379 mdp->port = port;
3384 if (mdp->cd->chip_reset)
3385 mdp->cd->chip_reset(ndev);
3388 sh_eth_tsu_init(mdp);
3392 if (mdp->cd->rmiimode)
3396 ret = sh_mdio_init(mdp, pd);
3402 netif_napi_add(ndev, &mdp->napi, sh_eth_poll);
3409 if (mdp->cd->magic)
3422 netif_napi_del(&mdp->napi);
3423 sh_mdio_release(mdp);
3437 struct sh_eth_private *mdp = netdev_priv(ndev);
3440 netif_napi_del(&mdp->napi);
3441 sh_mdio_release(mdp);
3452 struct sh_eth_private *mdp = netdev_priv(ndev);
3456 napi_disable(&mdp->napi);
3467 struct sh_eth_private *mdp = netdev_priv(ndev);
3470 napi_enable(&mdp->napi);
3491 struct sh_eth_private *mdp = netdev_priv(ndev);
3499 if (mdp->wol_enabled)
3510 struct sh_eth_private *mdp = netdev_priv(ndev);
3516 if (mdp->wol_enabled)