Lines Matching refs:pep

266 static int pxa168_init_hw(struct pxa168_eth_private *pep);
273 static inline u32 rdl(struct pxa168_eth_private *pep, int offset)
275 return readl_relaxed(pep->base + offset);
278 static inline void wrl(struct pxa168_eth_private *pep, int offset, u32 data)
280 writel_relaxed(data, pep->base + offset);
283 static void abort_dma(struct pxa168_eth_private *pep)
289 wrl(pep, SDMA_CMD, SDMA_CMD_AR | SDMA_CMD_AT);
293 while ((rdl(pep, SDMA_CMD) & (SDMA_CMD_AR | SDMA_CMD_AT))
300 netdev_err(pep->dev, "%s : DMA Stuck\n", __func__);
305 struct pxa168_eth_private *pep = netdev_priv(dev);
310 while (pep->rx_desc_count < pep->rx_ring_size) {
313 skb = netdev_alloc_skb(dev, pep->skb_size);
318 pep->rx_desc_count++;
320 used_rx_desc = pep->rx_used_desc_q;
321 p_used_rx_desc = &pep->p_rx_desc_area[used_rx_desc];
323 p_used_rx_desc->buf_ptr = dma_map_single(&pep->pdev->dev,
328 pep->rx_skb[used_rx_desc] = skb;
336 pep->rx_used_desc_q = (used_rx_desc + 1) % pep->rx_ring_size;
339 pep->rx_resource_err = 0;
348 if (pep->rx_desc_count == 0) {
349 pep->timeout.expires = jiffies + (HZ / 10);
350 add_timer(&pep->timeout);
356 struct pxa168_eth_private *pep = from_timer(pep, t, timeout);
357 napi_schedule(&pep->napi);
423 * pep - ETHERNET .
436 static int add_del_hash_entry(struct pxa168_eth_private *pep,
466 start = pep->htpr;
491 netdev_info(pep->dev,
522 static void update_hash_table_mac_address(struct pxa168_eth_private *pep,
528 add_del_hash_entry(pep, oaddr, 1, 0, HASH_DELETE);
530 add_del_hash_entry(pep, addr, 1, 0, HASH_ADD);
533 static int init_hash_table(struct pxa168_eth_private *pep)
548 if (!pep->htpr) {
549 pep->htpr = dma_alloc_coherent(pep->dev->dev.parent,
551 &pep->htpr_dma, GFP_KERNEL);
552 if (!pep->htpr)
555 memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
557 wrl(pep, HTPR, pep->htpr_dma);
563 struct pxa168_eth_private *pep = netdev_priv(dev);
567 val = rdl(pep, PORT_CONFIG);
572 wrl(pep, PORT_CONFIG, val);
578 memset(pep->htpr, 0, HASH_ADDR_TABLE_SIZE);
579 update_hash_table_mac_address(pep, NULL, dev->dev_addr);
582 update_hash_table_mac_address(pep, NULL, ha->addr);
588 struct pxa168_eth_private *pep = netdev_priv(dev);
589 unsigned int mac_h = rdl(pep, MAC_ADDR_HIGH);
590 unsigned int mac_l = rdl(pep, MAC_ADDR_LOW);
603 struct pxa168_eth_private *pep = netdev_priv(dev);
618 wrl(pep, MAC_ADDR_HIGH, mac_h);
619 wrl(pep, MAC_ADDR_LOW, mac_l);
622 update_hash_table_mac_address(pep, oldMac, dev->dev_addr);
630 struct pxa168_eth_private *pep = netdev_priv(dev);
636 tx_curr_desc = pep->tx_curr_desc_q;
637 wrl(pep, ETH_C_TX_DESC_1,
638 (u32) (pep->tx_desc_dma + tx_curr_desc * sizeof(struct tx_desc)));
641 rx_curr_desc = pep->rx_curr_desc_q;
642 wrl(pep, ETH_C_RX_DESC_0,
643 (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
645 wrl(pep, ETH_F_RX_DESC_0,
646 (u32) (pep->rx_desc_dma + rx_curr_desc * sizeof(struct rx_desc)));
649 wrl(pep, INT_CAUSE, 0);
652 wrl(pep, INT_MASK, ALL_INTS);
654 val = rdl(pep, PORT_CONFIG);
656 wrl(pep, PORT_CONFIG, val);
659 val = rdl(pep, SDMA_CMD);
661 wrl(pep, SDMA_CMD, val);
666 struct pxa168_eth_private *pep = netdev_priv(dev);
670 wrl(pep, INT_MASK, 0);
673 wrl(pep, INT_CAUSE, 0);
676 val = rdl(pep, SDMA_CMD);
682 abort_dma(pep);
685 val = rdl(pep, PORT_CONFIG);
687 wrl(pep, PORT_CONFIG, val);
698 struct pxa168_eth_private *pep = netdev_priv(dev);
709 pep->work_todo &= ~WORK_TX_DONE;
710 while (pep->tx_desc_count > 0) {
711 tx_index = pep->tx_used_desc_q;
712 desc = &pep->p_tx_desc_area[tx_index];
722 pep->tx_used_desc_q = (tx_index + 1) % pep->tx_ring_size;
723 pep->tx_desc_count--;
726 skb = pep->tx_skb[tx_index];
728 pep->tx_skb[tx_index] = NULL;
735 dma_unmap_single(&pep->pdev->dev, addr, count, DMA_TO_DEVICE);
747 struct pxa168_eth_private *pep = netdev_priv(dev);
749 netdev_info(dev, "TX timeout desc_count %d\n", pep->tx_desc_count);
751 schedule_work(&pep->tx_timeout_task);
756 struct pxa168_eth_private *pep = container_of(work,
759 struct net_device *dev = pep->dev;
766 struct pxa168_eth_private *pep = netdev_priv(dev);
777 if (pep->rx_resource_err)
779 rx_curr_desc = pep->rx_curr_desc_q;
780 rx_used_desc = pep->rx_used_desc_q;
781 rx_desc = &pep->p_rx_desc_area[rx_curr_desc];
786 skb = pep->rx_skb[rx_curr_desc];
787 pep->rx_skb[rx_curr_desc] = NULL;
789 rx_next_curr_desc = (rx_curr_desc + 1) % pep->rx_ring_size;
790 pep->rx_curr_desc_q = rx_next_curr_desc;
795 pep->rx_resource_err = 1;
796 pep->rx_desc_count--;
797 dma_unmap_single(&pep->pdev->dev, rx_desc->buf_ptr,
840 static int pxa168_eth_collect_events(struct pxa168_eth_private *pep,
846 icr = rdl(pep, INT_CAUSE);
850 wrl(pep, INT_CAUSE, ~icr);
852 pep->work_todo |= WORK_TX_DONE;
863 struct pxa168_eth_private *pep = netdev_priv(dev);
865 if (unlikely(!pxa168_eth_collect_events(pep, dev)))
868 wrl(pep, INT_MASK, 0);
869 napi_schedule(&pep->napi);
873 static void pxa168_eth_recalc_skb_size(struct pxa168_eth_private *pep)
883 skb_size = pep->dev->mtu + 36;
890 pep->skb_size = (skb_size + 7) & ~7;
898 pep->skb_size += SKB_DMA_REALIGN;
902 static int set_port_config_ext(struct pxa168_eth_private *pep)
906 pxa168_eth_recalc_skb_size(pep);
907 if (pep->skb_size <= 1518)
909 else if (pep->skb_size <= 1536)
911 else if (pep->skb_size <= 2048)
917 wrl(pep, PORT_CONFIG_EXT,
931 struct pxa168_eth_private *pep = netdev_priv(dev);
933 u32 cfg, cfg_o = rdl(pep, PORT_CONFIG);
934 u32 cfgext, cfgext_o = rdl(pep, PORT_CONFIG_EXT);
952 wrl(pep, PORT_CONFIG, cfg);
953 wrl(pep, PORT_CONFIG_EXT, cfgext);
960 struct pxa168_eth_private *pep = netdev_priv(dev);
968 phy = mdiobus_scan(pep->smi_bus, pep->phy_addr);
973 pep->phy_intf);
977 cmd.base.phy_address = pep->phy_addr;
978 cmd.base.speed = pep->phy_speed;
979 cmd.base.duplex = pep->phy_duplex;
990 static int pxa168_init_hw(struct pxa168_eth_private *pep)
995 wrl(pep, INT_MASK, 0);
996 wrl(pep, INT_CAUSE, 0);
998 wrl(pep, INT_W_CLEAR, 0);
1002 abort_dma(pep);
1004 err = init_hash_table(pep);
1008 wrl(pep, SDMA_CONFIG, SDCR_BSZ8 | /* Burst size = 32 bytes */
1014 wrl(pep, PORT_CONFIG, PCR_HS); /* Hash size is 1/2kb */
1015 set_port_config_ext(pep);
1022 struct pxa168_eth_private *pep = netdev_priv(dev);
1025 int rx_desc_num = pep->rx_ring_size;
1028 pep->rx_skb = kcalloc(rx_desc_num, sizeof(*pep->rx_skb), GFP_KERNEL);
1029 if (!pep->rx_skb)
1033 pep->rx_desc_count = 0;
1034 size = pep->rx_ring_size * sizeof(struct rx_desc);
1035 pep->rx_desc_area_size = size;
1036 pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
1037 &pep->rx_desc_dma,
1039 if (!pep->p_rx_desc_area)
1043 p_rx_desc = pep->p_rx_desc_area;
1045 p_rx_desc[i].next_desc_ptr = pep->rx_desc_dma +
1049 pep->rx_curr_desc_q = 0;
1050 pep->rx_used_desc_q = 0;
1051 pep->rx_desc_area_size = rx_desc_num * sizeof(struct rx_desc);
1054 kfree(pep->rx_skb);
1060 struct pxa168_eth_private *pep = netdev_priv(dev);
1064 for (curr = 0; pep->rx_desc_count && curr < pep->rx_ring_size; curr++) {
1065 if (pep->rx_skb[curr]) {
1066 dev_kfree_skb(pep->rx_skb[curr]);
1067 pep->rx_desc_count--;
1070 if (pep->rx_desc_count)
1072 pep->rx_desc_count);
1074 if (pep->p_rx_desc_area)
1075 dma_free_coherent(pep->dev->dev.parent, pep->rx_desc_area_size,
1076 pep->p_rx_desc_area, pep->rx_desc_dma);
1077 kfree(pep->rx_skb);
1082 struct pxa168_eth_private *pep = netdev_priv(dev);
1085 int tx_desc_num = pep->tx_ring_size;
1087 pep->tx_skb = kcalloc(tx_desc_num, sizeof(*pep->tx_skb), GFP_KERNEL);
1088 if (!pep->tx_skb)
1092 pep->tx_desc_count = 0;
1093 size = pep->tx_ring_size * sizeof(struct tx_desc);
1094 pep->tx_desc_area_size = size;
1095 pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
1096 &pep->tx_desc_dma,
1098 if (!pep->p_tx_desc_area)
1101 p_tx_desc = pep->p_tx_desc_area;
1103 p_tx_desc[i].next_desc_ptr = pep->tx_desc_dma +
1106 pep->tx_curr_desc_q = 0;
1107 pep->tx_used_desc_q = 0;
1108 pep->tx_desc_area_size = tx_desc_num * sizeof(struct tx_desc);
1111 kfree(pep->tx_skb);
1117 struct pxa168_eth_private *pep = netdev_priv(dev);
1121 BUG_ON(pep->tx_used_desc_q != pep->tx_curr_desc_q);
1123 if (pep->p_tx_desc_area)
1124 dma_free_coherent(pep->dev->dev.parent, pep->tx_desc_area_size,
1125 pep->p_tx_desc_area, pep->tx_desc_dma);
1126 kfree(pep->tx_skb);
1131 struct pxa168_eth_private *pep = netdev_priv(dev);
1143 pep->rx_resource_err = 0;
1150 pep->rx_used_desc_q = 0;
1151 pep->rx_curr_desc_q = 0;
1155 pep->rx_used_desc_q = 0;
1156 pep->rx_curr_desc_q = 0;
1158 napi_enable(&pep->napi);
1170 struct pxa168_eth_private *pep = netdev_priv(dev);
1174 wrl(pep, INT_MASK, 0);
1175 wrl(pep, INT_CAUSE, 0);
1177 wrl(pep, INT_W_CLEAR, 0);
1178 napi_disable(&pep->napi);
1179 del_timer_sync(&pep->timeout);
1190 struct pxa168_eth_private *pep = netdev_priv(dev);
1193 set_port_config_ext(pep);
1213 static int eth_alloc_tx_desc_index(struct pxa168_eth_private *pep)
1217 tx_desc_curr = pep->tx_curr_desc_q;
1218 pep->tx_curr_desc_q = (tx_desc_curr + 1) % pep->tx_ring_size;
1219 BUG_ON(pep->tx_curr_desc_q == pep->tx_used_desc_q);
1220 pep->tx_desc_count++;
1227 struct pxa168_eth_private *pep =
1229 struct net_device *dev = pep->dev;
1239 && pep->tx_ring_size - pep->tx_desc_count > 1) {
1245 wrl(pep, INT_MASK, ALL_INTS);
1254 struct pxa168_eth_private *pep = netdev_priv(dev);
1260 tx_index = eth_alloc_tx_desc_index(pep);
1261 desc = &pep->p_tx_desc_area[tx_index];
1263 pep->tx_skb[tx_index] = skb;
1265 desc->buf_ptr = dma_map_single(&pep->pdev->dev, skb->data, length,
1274 wrl(pep, SDMA_CMD, SDMA_CMD_TXDH | SDMA_CMD_ERD);
1279 if (pep->tx_ring_size - pep->tx_desc_count <= 1) {
1287 static int smi_wait_ready(struct pxa168_eth_private *pep)
1292 for (i = 0; rdl(pep, SMI) & SMI_BUSY; i++) {
1303 struct pxa168_eth_private *pep = bus->priv;
1307 if (smi_wait_ready(pep)) {
1308 netdev_warn(pep->dev, "pxa168_eth: SMI bus busy timeout\n");
1311 wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) | SMI_OP_R);
1313 for (i = 0; !((val = rdl(pep, SMI)) & SMI_R_VALID); i++) {
1315 netdev_warn(pep->dev,
1328 struct pxa168_eth_private *pep = bus->priv;
1330 if (smi_wait_ready(pep)) {
1331 netdev_warn(pep->dev, "pxa168_eth: SMI bus busy timeout\n");
1335 wrl(pep, SMI, (phy_addr << 16) | (regnum << 21) |
1338 if (smi_wait_ready(pep)) {
1339 netdev_err(pep->dev, "pxa168_eth: SMI bus busy timeout\n");
1390 struct pxa168_eth_private *pep = NULL;
1414 pep = netdev_priv(dev);
1415 pep->dev = dev;
1416 pep->clk = clk;
1418 pep->base = devm_platform_ioremap_resource(pdev, 0);
1419 if (IS_ERR(pep->base)) {
1420 err = PTR_ERR(pep->base);
1436 INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task);
1452 pep->rx_ring_size = NUM_RX_DESCS;
1453 pep->tx_ring_size = NUM_TX_DESCS;
1455 pep->pd = dev_get_platdata(&pdev->dev);
1456 if (pep->pd) {
1457 if (pep->pd->rx_queue_size)
1458 pep->rx_ring_size = pep->pd->rx_queue_size;
1460 if (pep->pd->tx_queue_size)
1461 pep->tx_ring_size = pep->pd->tx_queue_size;
1463 pep->port_num = pep->pd->port_number;
1464 pep->phy_addr = pep->pd->phy_addr;
1465 pep->phy_speed = pep->pd->speed;
1466 pep->phy_duplex = pep->pd->duplex;
1467 pep->phy_intf = pep->pd->intf;
1469 if (pep->pd->init)
1470 pep->pd->init();
1473 &pep->port_num);
1481 of_property_read_u32(np, "reg", &pep->phy_addr);
1483 err = of_get_phy_mode(pdev->dev.of_node, &pep->phy_intf);
1489 BUG_ON(pep->port_num > 2);
1490 netif_napi_add(dev, &pep->napi, pxa168_rx_poll, pep->rx_ring_size);
1492 memset(&pep->timeout, 0, sizeof(struct timer_list));
1493 timer_setup(&pep->timeout, rxq_refill_timer_wrapper, 0);
1495 pep->smi_bus = mdiobus_alloc();
1496 if (!pep->smi_bus) {
1500 pep->smi_bus->priv = pep;
1501 pep->smi_bus->name = "pxa168_eth smi";
1502 pep->smi_bus->read = pxa168_smi_read;
1503 pep->smi_bus->write = pxa168_smi_write;
1504 snprintf(pep->smi_bus->id, MII_BUS_ID_SIZE, "%s-%d",
1506 pep->smi_bus->parent = &pdev->dev;
1507 pep->smi_bus->phy_mask = 0xffffffff;
1508 err = mdiobus_register(pep->smi_bus);
1512 pep->pdev = pdev;
1514 pxa168_init_hw(pep);
1521 mdiobus_unregister(pep->smi_bus);
1523 mdiobus_free(pep->smi_bus);
1534 struct pxa168_eth_private *pep = netdev_priv(dev);
1536 if (pep->htpr) {
1537 dma_free_coherent(pep->dev->dev.parent, HASH_ADDR_TABLE_SIZE,
1538 pep->htpr, pep->htpr_dma);
1539 pep->htpr = NULL;
1544 clk_disable_unprepare(pep->clk);
1545 mdiobus_unregister(pep->smi_bus);
1546 mdiobus_free(pep->smi_bus);
1547 cancel_work_sync(&pep->tx_timeout_task);