Lines Matching refs:mdp

351 	struct sh_eth_private *mdp = netdev_priv(ndev);
352 u16 offset = mdp->reg_offset[enum_index];
357 iowrite32(data, mdp->addr + offset);
362 struct sh_eth_private *mdp = netdev_priv(ndev);
363 u16 offset = mdp->reg_offset[enum_index];
368 return ioread32(mdp->addr + offset);
378 static u16 sh_eth_tsu_get_offset(struct sh_eth_private *mdp, int enum_index)
380 return mdp->reg_offset[enum_index];
383 static void sh_eth_tsu_write(struct sh_eth_private *mdp, u32 data,
386 u16 offset = sh_eth_tsu_get_offset(mdp, enum_index);
391 iowrite32(data, mdp->tsu_addr + offset);
394 static u32 sh_eth_tsu_read(struct sh_eth_private *mdp, int enum_index)
396 u16 offset = sh_eth_tsu_get_offset(mdp, enum_index);
401 return ioread32(mdp->tsu_addr + offset);
417 struct sh_eth_private *mdp = netdev_priv(ndev);
420 switch (mdp->phy_interface) {
445 struct sh_eth_private *mdp = netdev_priv(ndev);
447 sh_eth_modify(ndev, ECMR, ECMR_DM, mdp->duplex ? ECMR_DM : 0);
452 struct sh_eth_private *mdp = netdev_priv(ndev);
455 sh_eth_tsu_write(mdp, ARSTR_ARST, ARSTR);
484 struct sh_eth_private *mdp = netdev_priv(ndev);
505 if (mdp->cd->csmr)
509 if (mdp->cd->select_mii)
517 struct sh_eth_private *mdp = netdev_priv(ndev);
519 if (WARN_ON(!mdp->cd->gecmr))
522 switch (mdp->speed) {
636 struct sh_eth_private *mdp = netdev_priv(ndev);
638 switch (mdp->speed) {
799 struct sh_eth_private *mdp = netdev_priv(ndev);
801 switch (mdp->speed) {
843 struct sh_eth_private *mdp = netdev_priv(ndev);
845 switch (mdp->speed) {
914 struct sh_eth_private *mdp = netdev_priv(ndev);
916 if (WARN_ON(!mdp->cd->gecmr))
919 switch (mdp->speed) {
1233 struct sh_eth_private *mdp = netdev_priv(ndev);
1239 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
1240 entry = mdp->dirty_tx % mdp->num_tx_ring;
1241 txdesc = &mdp->tx_ring[entry];
1247 netif_info(mdp, tx_done, ndev,
1251 if (mdp->tx_skbuff[entry]) {
1252 dma_unmap_single(&mdp->pdev->dev,
1256 dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1257 mdp->tx_skbuff[entry] = NULL;
1261 if (entry >= mdp->num_tx_ring - 1)
1275 struct sh_eth_private *mdp = netdev_priv(ndev);
1278 if (mdp->rx_ring) {
1279 for (i = 0; i < mdp->num_rx_ring; i++) {
1280 if (mdp->rx_skbuff[i]) {
1281 struct sh_eth_rxdesc *rxdesc = &mdp->rx_ring[i];
1283 dma_unmap_single(&mdp->pdev->dev,
1285 ALIGN(mdp->rx_buf_sz, 32),
1289 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1290 dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->rx_ring,
1291 mdp->rx_desc_dma);
1292 mdp->rx_ring = NULL;
1296 if (mdp->rx_skbuff) {
1297 for (i = 0; i < mdp->num_rx_ring; i++)
1298 dev_kfree_skb(mdp->rx_skbuff[i]);
1300 kfree(mdp->rx_skbuff);
1301 mdp->rx_skbuff = NULL;
1303 if (mdp->tx_ring) {
1306 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1307 dma_free_coherent(&mdp->pdev->dev, ringsize, mdp->tx_ring,
1308 mdp->tx_desc_dma);
1309 mdp->tx_ring = NULL;
1313 kfree(mdp->tx_skbuff);
1314 mdp->tx_skbuff = NULL;
1320 struct sh_eth_private *mdp = netdev_priv(ndev);
1325 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
1326 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
1327 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
1331 mdp->cur_rx = 0;
1332 mdp->cur_tx = 0;
1333 mdp->dirty_rx = 0;
1334 mdp->dirty_tx = 0;
1336 memset(mdp->rx_ring, 0, rx_ringsize);
1339 for (i = 0; i < mdp->num_rx_ring; i++) {
1341 mdp->rx_skbuff[i] = NULL;
1348 buf_len = ALIGN(mdp->rx_buf_sz, 32);
1349 dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, buf_len,
1351 if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
1355 mdp->rx_skbuff[i] = skb;
1358 rxdesc = &mdp->rx_ring[i];
1365 sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
1366 if (mdp->cd->xdfar_rw)
1367 sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
1371 mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
1377 memset(mdp->tx_ring, 0, tx_ringsize);
1380 for (i = 0; i < mdp->num_tx_ring; i++) {
1381 mdp->tx_skbuff[i] = NULL;
1382 txdesc = &mdp->tx_ring[i];
1387 sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
1388 if (mdp->cd->xdfar_rw)
1389 sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
1399 struct sh_eth_private *mdp = netdev_priv(ndev);
1407 mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
1409 if (mdp->cd->rpadir)
1410 mdp->rx_buf_sz += NET_IP_ALIGN;
1413 mdp->rx_skbuff = kcalloc(mdp->num_rx_ring, sizeof(*mdp->rx_skbuff),
1415 if (!mdp->rx_skbuff)
1418 mdp->tx_skbuff = kcalloc(mdp->num_tx_ring, sizeof(*mdp->tx_skbuff),
1420 if (!mdp->tx_skbuff)
1424 rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
1425 mdp->rx_ring = dma_alloc_coherent(&mdp->pdev->dev, rx_ringsize,
1426 &mdp->rx_desc_dma, GFP_KERNEL);
1427 if (!mdp->rx_ring)
1430 mdp->dirty_rx = 0;
1433 tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
1434 mdp->tx_ring = dma_alloc_coherent(&mdp->pdev->dev, tx_ringsize,
1435 &mdp->tx_desc_dma, GFP_KERNEL);
1436 if (!mdp->tx_ring)
1449 struct sh_eth_private *mdp = netdev_priv(ndev);
1453 ret = mdp->cd->soft_reset(ndev);
1457 if (mdp->cd->rmiimode)
1462 if (mdp->cd->rpadir)
1469 if (mdp->cd->hw_swap)
1476 sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
1482 sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER);
1485 if (mdp->cd->nbst)
1489 if (mdp->cd->bculr)
1492 sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
1494 if (!mdp->cd->no_trimd)
1502 mdp->irq_enabled = true;
1503 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1506 sh_eth_write(ndev, ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) |
1510 if (mdp->cd->set_rate)
1511 mdp->cd->set_rate(ndev);
1514 sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
1517 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
1523 if (mdp->cd->apr)
1525 if (mdp->cd->mpr)
1527 if (mdp->cd->tpauser)
1538 struct sh_eth_private *mdp = netdev_priv(ndev);
1544 for (i = 0; i < mdp->num_tx_ring; i++)
1545 mdp->tx_ring[i].status &= ~cpu_to_le32(TD_TACT);
1560 mdp->cd->soft_reset(ndev);
1563 if (mdp->cd->rmiimode)
1586 struct sh_eth_private *mdp = netdev_priv(ndev);
1589 int entry = mdp->cur_rx % mdp->num_rx_ring;
1590 int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
1594 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN + 32 - 1;
1601 rxdesc = &mdp->rx_ring[entry];
1611 netif_info(mdp, rx_status, ndev,
1624 if (mdp->cd->csmr)
1627 skb = mdp->rx_skbuff[entry];
1645 if (!mdp->cd->hw_swap)
1649 mdp->rx_skbuff[entry] = NULL;
1650 if (mdp->cd->rpadir)
1652 dma_unmap_single(&mdp->pdev->dev, dma_addr,
1653 ALIGN(mdp->rx_buf_sz, 32),
1665 entry = (++mdp->cur_rx) % mdp->num_rx_ring;
1666 rxdesc = &mdp->rx_ring[entry];
1670 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
1671 entry = mdp->dirty_rx % mdp->num_rx_ring;
1672 rxdesc = &mdp->rx_ring[entry];
1674 buf_len = ALIGN(mdp->rx_buf_sz, 32);
1677 if (mdp->rx_skbuff[entry] == NULL) {
1682 dma_addr = dma_map_single(&mdp->pdev->dev, skb->data,
1684 if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
1688 mdp->rx_skbuff[entry] = skb;
1694 if (entry >= mdp->num_rx_ring - 1)
1705 if (intr_status & EESR_RDE && !mdp->cd->no_xdfar) {
1709 mdp->cur_rx = count;
1710 mdp->dirty_rx = count;
1735 struct sh_eth_private *mdp = netdev_priv(ndev);
1744 pm_wakeup_event(&mdp->pdev->dev, 0);
1747 if (mdp->cd->no_psr || mdp->no_ether_link)
1750 if (mdp->ether_link_active_low)
1769 struct sh_eth_private *mdp = netdev_priv(ndev);
1776 netif_err(mdp, tx_err, ndev, "Transmit Abort\n");
1791 netif_err(mdp, tx_err, ndev, "Transmit Descriptor Empty\n");
1797 netif_err(mdp, tx_err, ndev, "Transmit FIFO Under flow\n");
1810 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1813 netif_err(mdp, tx_err, ndev, "Address Error\n");
1817 if (mdp->cd->no_ade)
1825 intr_status, mdp->cur_tx, mdp->dirty_tx,
1831 if (edtrr ^ mdp->cd->edtrr_trns) {
1833 sh_eth_write(ndev, mdp->cd->edtrr_trns, EDTRR);
1843 struct sh_eth_private *mdp = netdev_priv(ndev);
1844 struct sh_eth_cpu_data *cd = mdp->cd;
1848 spin_lock(&mdp->lock);
1866 if (unlikely(!mdp->irq_enabled)) {
1872 if (napi_schedule_prep(&mdp->napi)) {
1876 __napi_schedule(&mdp->napi);
1905 spin_unlock(&mdp->lock);
1912 struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private,
1932 if (mdp->irq_enabled)
1933 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1941 struct sh_eth_private *mdp = netdev_priv(ndev);
1946 spin_lock_irqsave(&mdp->lock, flags);
1949 if (mdp->cd->no_psr || mdp->no_ether_link)
1953 if (phydev->duplex != mdp->duplex) {
1955 mdp->duplex = phydev->duplex;
1956 if (mdp->cd->set_duplex)
1957 mdp->cd->set_duplex(ndev);
1960 if (phydev->speed != mdp->speed) {
1962 mdp->speed = phydev->speed;
1963 if (mdp->cd->set_rate)
1964 mdp->cd->set_rate(ndev);
1966 if (!mdp->link) {
1969 mdp->link = phydev->link;
1971 } else if (mdp->link) {
1973 mdp->link = 0;
1974 mdp->speed = 0;
1975 mdp->duplex = -1;
1979 if ((mdp->cd->no_psr || mdp->no_ether_link) && phydev->link)
1982 spin_unlock_irqrestore(&mdp->lock, flags);
1984 if (new_state && netif_msg_link(mdp))
1992 struct sh_eth_private *mdp = netdev_priv(ndev);
1995 mdp->link = 0;
1996 mdp->speed = 0;
1997 mdp->duplex = -1;
2006 mdp->phy_interface);
2015 mdp->mii_bus->id, mdp->phy_id);
2018 mdp->phy_interface);
2027 if (mdp->cd->register_type != SH_ETH_REG_GIGABIT) {
2065 struct sh_eth_private *mdp = netdev_priv(ndev);
2066 struct sh_eth_cpu_data *cd = mdp->cd;
2090 if (mdp->reg_offset[reg] != SH_ETH_OFFSET_INVALID) { \
2099 #define add_tsu_reg(reg) add_reg_from(reg, sh_eth_tsu_read(mdp, reg))
2213 *buf++ = ioread32(mdp->tsu_addr +
2214 mdp->reg_offset[TSU_ADRH0] +
2236 struct sh_eth_private *mdp = netdev_priv(ndev);
2240 pm_runtime_get_sync(&mdp->pdev->dev);
2242 pm_runtime_put_sync(&mdp->pdev->dev);
2247 struct sh_eth_private *mdp = netdev_priv(ndev);
2248 return mdp->msg_enable;
2253 struct sh_eth_private *mdp = netdev_priv(ndev);
2254 mdp->msg_enable = value;
2276 struct sh_eth_private *mdp = netdev_priv(ndev);
2280 data[i++] = mdp->cur_rx;
2281 data[i++] = mdp->cur_tx;
2282 data[i++] = mdp->dirty_rx;
2283 data[i++] = mdp->dirty_tx;
2299 struct sh_eth_private *mdp = netdev_priv(ndev);
2303 ring->rx_pending = mdp->num_rx_ring;
2304 ring->tx_pending = mdp->num_tx_ring;
2310 struct sh_eth_private *mdp = netdev_priv(ndev);
2330 mdp->irq_enabled = false;
2332 napi_synchronize(&mdp->napi);
2342 mdp->num_rx_ring = ring->rx_pending;
2343 mdp->num_tx_ring = ring->tx_pending;
2367 struct sh_eth_private *mdp = netdev_priv(ndev);
2372 if (mdp->cd->magic) {
2374 wol->wolopts = mdp->wol_enabled ? WAKE_MAGIC : 0;
2380 struct sh_eth_private *mdp = netdev_priv(ndev);
2382 if (!mdp->cd->magic || wol->wolopts & ~WAKE_MAGIC)
2385 mdp->wol_enabled = !!(wol->wolopts & WAKE_MAGIC);
2387 device_set_wakeup_enable(&mdp->pdev->dev, mdp->wol_enabled);
2413 struct sh_eth_private *mdp = netdev_priv(ndev);
2416 pm_runtime_get_sync(&mdp->pdev->dev);
2418 napi_enable(&mdp->napi);
2421 mdp->cd->irq_flags, ndev->name, ndev);
2444 mdp->is_opened = 1;
2451 napi_disable(&mdp->napi);
2452 pm_runtime_put_sync(&mdp->pdev->dev);
2459 struct sh_eth_private *mdp = netdev_priv(ndev);
2465 netif_err(mdp, timer, ndev,
2473 for (i = 0; i < mdp->num_rx_ring; i++) {
2474 rxdesc = &mdp->rx_ring[i];
2477 dev_kfree_skb(mdp->rx_skbuff[i]);
2478 mdp->rx_skbuff[i] = NULL;
2480 for (i = 0; i < mdp->num_tx_ring; i++) {
2481 dev_kfree_skb(mdp->tx_skbuff[i]);
2482 mdp->tx_skbuff[i] = NULL;
2495 struct sh_eth_private *mdp = netdev_priv(ndev);
2501 spin_lock_irqsave(&mdp->lock, flags);
2502 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
2504 netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
2506 spin_unlock_irqrestore(&mdp->lock, flags);
2510 spin_unlock_irqrestore(&mdp->lock, flags);
2515 entry = mdp->cur_tx % mdp->num_tx_ring;
2516 mdp->tx_skbuff[entry] = skb;
2517 txdesc = &mdp->tx_ring[entry];
2519 if (!mdp->cd->hw_swap)
2521 dma_addr = dma_map_single(&mdp->pdev->dev, skb->data, skb->len,
2523 if (dma_mapping_error(&mdp->pdev->dev, dma_addr)) {
2531 if (entry >= mdp->num_tx_ring - 1)
2537 mdp->cur_tx++;
2539 if (!(sh_eth_read(ndev, EDTRR) & mdp->cd->edtrr_trns))
2540 sh_eth_write(ndev, mdp->cd->edtrr_trns, EDTRR);
2563 struct sh_eth_private *mdp = netdev_priv(ndev);
2565 if (mdp->cd->no_tx_cntrs)
2568 if (!mdp->is_opened)
2575 if (mdp->cd->cexcr) {
2591 struct sh_eth_private *mdp = netdev_priv(ndev);
2599 mdp->irq_enabled = false;
2601 napi_disable(&mdp->napi);
2617 mdp->is_opened = 0;
2619 pm_runtime_put(&mdp->pdev->dev);
2641 static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry)
2643 return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4));
2649 struct sh_eth_private *mdp = netdev_priv(ndev);
2653 tmp = sh_eth_tsu_read(mdp, reg);
2654 sh_eth_tsu_write(mdp, tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg);
2660 struct sh_eth_private *mdp = netdev_priv(ndev);
2665 ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask;
2667 tmp = sh_eth_tsu_read(mdp, reg);
2668 sh_eth_tsu_write(mdp, tmp & ~post_mask, reg);
2677 struct sh_eth_private *mdp = netdev_priv(ndev);
2679 while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) {
2694 struct sh_eth_private *mdp = netdev_priv(ndev);
2698 iowrite32(val, mdp->tsu_addr + offset);
2703 iowrite32(val, mdp->tsu_addr + offset + 4);
2712 struct sh_eth_private *mdp = netdev_priv(ndev);
2715 val = ioread32(mdp->tsu_addr + offset);
2720 val = ioread32(mdp->tsu_addr + offset + 4);
2728 struct sh_eth_private *mdp = netdev_priv(ndev);
2729 u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2755 struct sh_eth_private *mdp = netdev_priv(ndev);
2756 u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2760 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) &
2772 struct sh_eth_private *mdp = netdev_priv(ndev);
2773 u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2776 if (!mdp->cd->tsu)
2790 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) |
2802 struct sh_eth_private *mdp = netdev_priv(ndev);
2805 if (!mdp->cd->tsu)
2825 struct sh_eth_private *mdp = netdev_priv(ndev);
2828 if (!mdp->cd->tsu)
2846 struct sh_eth_private *mdp = netdev_priv(ndev);
2847 u16 reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2851 if (!mdp->cd->tsu)
2864 struct sh_eth_private *mdp = netdev_priv(ndev);
2869 spin_lock_irqsave(&mdp->lock, flags);
2874 if (mdp->cd->tsu)
2890 } else if (mdp->cd->tsu) {
2909 spin_unlock_irqrestore(&mdp->lock, flags);
2914 struct sh_eth_private *mdp = netdev_priv(ndev);
2917 spin_lock_irqsave(&mdp->lock, flags);
2928 spin_unlock_irqrestore(&mdp->lock, flags);
2935 struct sh_eth_private *mdp = netdev_priv(ndev);
2937 if (changed & NETIF_F_RXCSUM && mdp->cd->rx_csum)
2945 static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
2947 if (!mdp->port)
2956 struct sh_eth_private *mdp = netdev_priv(ndev);
2957 int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2959 if (unlikely(!mdp->cd->tsu))
2966 mdp->vlan_num_ids++;
2971 if (mdp->vlan_num_ids > 1) {
2973 sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2977 sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK),
2986 struct sh_eth_private *mdp = netdev_priv(ndev);
2987 int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2989 if (unlikely(!mdp->cd->tsu))
2996 mdp->vlan_num_ids--;
2997 sh_eth_tsu_write(mdp, 0, vtag_reg_index);
3003 static void sh_eth_tsu_init(struct sh_eth_private *mdp)
3005 if (!mdp->cd->dual_port) {
3006 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
3007 sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL,
3012 sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */
3013 sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */
3014 sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */
3015 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
3016 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
3017 sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
3018 sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
3019 sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
3020 sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
3021 sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
3022 sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */
3023 sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */
3024 sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */
3025 sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */
3026 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
3027 sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */
3028 sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */
3029 sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */
3030 sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */
3034 static int sh_mdio_release(struct sh_eth_private *mdp)
3037 mdiobus_unregister(mdp->mii_bus);
3040 free_mdio_bitbang(mdp->mii_bus);
3046 static int sh_mdio_init(struct sh_eth_private *mdp,
3051 struct platform_device *pdev = mdp->pdev;
3052 struct device *dev = &mdp->pdev->dev;
3060 bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
3065 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
3066 if (!mdp->mii_bus)
3070 mdp->mii_bus->name = "sh_mii";
3071 mdp->mii_bus->parent = dev;
3072 snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
3077 mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
3079 ret = of_mdiobus_register(mdp->mii_bus, dev->of_node);
3086 free_mdio_bitbang(mdp->mii_bus);
3202 struct sh_eth_private *mdp;
3223 mdp = netdev_priv(ndev);
3224 mdp->num_tx_ring = TX_RING_SIZE;
3225 mdp->num_rx_ring = RX_RING_SIZE;
3226 mdp->addr = devm_ioremap_resource(&pdev->dev, res);
3227 if (IS_ERR(mdp->addr)) {
3228 ret = PTR_ERR(mdp->addr);
3234 spin_lock_init(&mdp->lock);
3235 mdp->pdev = pdev;
3246 mdp->phy_id = pd->phy;
3247 mdp->phy_interface = pd->phy_interface;
3248 mdp->no_ether_link = pd->no_ether_link;
3249 mdp->ether_link_active_low = pd->ether_link_active_low;
3253 mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
3255 mdp->cd = (struct sh_eth_cpu_data *)of_device_get_match_data(&pdev->dev);
3257 mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
3258 if (!mdp->reg_offset) {
3260 mdp->cd->register_type);
3264 sh_eth_set_default_cpu_data(mdp->cd);
3273 if (mdp->cd->rx_csum) {
3279 if (mdp->cd->tsu)
3287 mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
3297 if (mdp->cd->tsu) {
3319 mdp->tsu_addr = devm_ioremap(&pdev->dev, rtsu->start,
3321 if (!mdp->tsu_addr) {
3326 mdp->port = port;
3331 if (mdp->cd->chip_reset)
3332 mdp->cd->chip_reset(ndev);
3335 sh_eth_tsu_init(mdp);
3339 if (mdp->cd->rmiimode)
3343 ret = sh_mdio_init(mdp, pd);
3350 netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64);
3357 if (mdp->cd->magic)
3370 netif_napi_del(&mdp->napi);
3371 sh_mdio_release(mdp);
3385 struct sh_eth_private *mdp = netdev_priv(ndev);
3388 netif_napi_del(&mdp->napi);
3389 sh_mdio_release(mdp);
3400 struct sh_eth_private *mdp = netdev_priv(ndev);
3404 napi_disable(&mdp->napi);
3415 struct sh_eth_private *mdp = netdev_priv(ndev);
3418 napi_enable(&mdp->napi);
3441 struct sh_eth_private *mdp = netdev_priv(ndev);
3449 if (mdp->wol_enabled)
3460 struct sh_eth_private *mdp = netdev_priv(ndev);
3466 if (mdp->wol_enabled)