Lines Matching refs:pdata
172 static void xgbe_free_channels(struct xgbe_prv_data *pdata)
176 for (i = 0; i < ARRAY_SIZE(pdata->channel); i++) {
177 if (!pdata->channel[i])
180 kfree(pdata->channel[i]->rx_ring);
181 kfree(pdata->channel[i]->tx_ring);
182 kfree(pdata->channel[i]);
184 pdata->channel[i] = NULL;
187 pdata->channel_count = 0;
190 static int xgbe_alloc_channels(struct xgbe_prv_data *pdata)
198 count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
201 cpu = cpumask_local_spread(i, dev_to_node(pdata->dev));
209 pdata->channel[i] = channel;
212 channel->pdata = pdata;
214 channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
219 if (pdata->per_channel_irq)
220 channel->dma_irq = pdata->channel_irq[i];
222 if (i < pdata->tx_ring_count) {
233 if (i < pdata->rx_ring_count) {
244 netif_dbg(pdata, drv, pdata->netdev,
247 netif_dbg(pdata, drv, pdata->netdev,
253 pdata->channel_count = count;
258 xgbe_free_channels(pdata);
276 struct xgbe_prv_data *pdata = channel->pdata;
279 netif_info(pdata, drv, pdata->netdev,
281 netif_stop_subqueue(pdata->netdev, channel->queue_index);
288 pdata->hw_if.tx_start_xmit(channel, ring);
309 static void xgbe_enable_rx_tx_int(struct xgbe_prv_data *pdata,
312 struct xgbe_hw_if *hw_if = &pdata->hw_if;
327 static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
331 for (i = 0; i < pdata->channel_count; i++)
332 xgbe_enable_rx_tx_int(pdata, pdata->channel[i]);
335 static void xgbe_disable_rx_tx_int(struct xgbe_prv_data *pdata,
338 struct xgbe_hw_if *hw_if = &pdata->hw_if;
353 static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
357 for (i = 0; i < pdata->channel_count; i++)
358 xgbe_disable_rx_tx_int(pdata, pdata->channel[i]);
361 static bool xgbe_ecc_sec(struct xgbe_prv_data *pdata, unsigned long *period,
372 dev_warn_once(pdata->dev,
377 dev_warn_once(pdata->dev,
386 static bool xgbe_ecc_ded(struct xgbe_prv_data *pdata, unsigned long *period,
397 netdev_alert(pdata->netdev,
408 struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_ecc);
413 ecc_isr = XP_IOREAD(pdata, XP_ECC_ISR);
414 ecc_isr &= XP_IOREAD(pdata, XP_ECC_IER);
415 netif_dbg(pdata, intr, pdata->netdev, "ECC_ISR=%#010x\n", ecc_isr);
418 stop |= xgbe_ecc_ded(pdata, &pdata->tx_ded_period,
419 &pdata->tx_ded_count, "TX fifo");
423 stop |= xgbe_ecc_ded(pdata, &pdata->rx_ded_period,
424 &pdata->rx_ded_count, "RX fifo");
428 stop |= xgbe_ecc_ded(pdata, &pdata->desc_ded_period,
429 &pdata->desc_ded_count,
434 pdata->hw_if.disable_ecc_ded(pdata);
435 schedule_work(&pdata->stopdev_work);
440 if (xgbe_ecc_sec(pdata, &pdata->tx_sec_period,
441 &pdata->tx_sec_count, "TX fifo"))
442 pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_TX);
446 if (xgbe_ecc_sec(pdata, &pdata->rx_sec_period,
447 &pdata->rx_sec_count, "RX fifo"))
448 pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_RX);
451 if (xgbe_ecc_sec(pdata, &pdata->desc_sec_period,
452 &pdata->desc_sec_count, "descriptor cache"))
453 pdata->hw_if.disable_ecc_sec(pdata, XGBE_ECC_SEC_DESC);
457 XP_IOWRITE(pdata, XP_ECC_ISR, ecc_isr);
460 if (pdata->vdata->irq_reissue_support)
461 XP_IOWRITE(pdata, XP_INT_REISSUE_EN, 1 << 1);
466 struct xgbe_prv_data *pdata = data;
468 if (pdata->isr_as_tasklet)
469 tasklet_schedule(&pdata->tasklet_ecc);
471 xgbe_ecc_isr_task(&pdata->tasklet_ecc);
478 struct xgbe_prv_data *pdata = from_tasklet(pdata, t, tasklet_dev);
479 struct xgbe_hw_if *hw_if = &pdata->hw_if;
489 dma_isr = XGMAC_IOREAD(pdata, DMA_ISR);
493 netif_dbg(pdata, intr, pdata->netdev, "DMA_ISR=%#010x\n", dma_isr);
495 for (i = 0; i < pdata->channel_count; i++) {
499 channel = pdata->channel[i];
502 netif_dbg(pdata, intr, pdata->netdev, "DMA_CH%u_ISR=%#010x\n",
509 if (!pdata->per_channel_irq &&
512 if (napi_schedule_prep(&pdata->napi)) {
514 xgbe_disable_rx_tx_ints(pdata);
517 __napi_schedule(&pdata->napi);
529 pdata->ext_stats.rx_buffer_unavailable++;
533 schedule_work(&pdata->restart_work);
540 mac_isr = XGMAC_IOREAD(pdata, MAC_ISR);
542 netif_dbg(pdata, intr, pdata->netdev, "MAC_ISR=%#010x\n",
546 hw_if->tx_mmc_int(pdata);
549 hw_if->rx_mmc_int(pdata);
552 mac_tssr = XGMAC_IOREAD(pdata, MAC_TSSR);
554 netif_dbg(pdata, intr, pdata->netdev,
559 pdata->tx_tstamp =
560 hw_if->get_tx_tstamp(pdata);
561 queue_work(pdata->dev_workqueue,
562 &pdata->tx_tstamp_work);
567 mac_mdioisr = XGMAC_IOREAD(pdata, MAC_MDIOISR);
569 netif_dbg(pdata, intr, pdata->netdev,
574 complete(&pdata->mdio_complete);
580 if (pdata->dev_irq == pdata->an_irq)
581 pdata->phy_if.an_isr(pdata);
584 if (pdata->vdata->ecc_support && (pdata->dev_irq == pdata->ecc_irq))
585 xgbe_ecc_isr_task(&pdata->tasklet_ecc);
588 if (pdata->vdata->i2c_support && (pdata->dev_irq == pdata->i2c_irq))
589 pdata->i2c_if.i2c_isr(pdata);
592 if (pdata->vdata->irq_reissue_support) {
596 if (!pdata->per_channel_irq)
599 XP_IOWRITE(pdata, XP_INT_REISSUE_EN, reissue_mask);
605 struct xgbe_prv_data *pdata = data;
607 if (pdata->isr_as_tasklet)
608 tasklet_schedule(&pdata->tasklet_dev);
610 xgbe_isr_task(&pdata->tasklet_dev);
618 struct xgbe_prv_data *pdata = channel->pdata;
626 if (pdata->channel_irq_mode)
627 xgbe_disable_rx_tx_int(pdata, channel);
647 struct xgbe_prv_data *pdata = channel->pdata;
652 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
656 if (pdata->per_channel_irq)
657 if (pdata->channel_irq_mode)
658 xgbe_disable_rx_tx_int(pdata, channel);
662 xgbe_disable_rx_tx_ints(pdata);
675 struct xgbe_prv_data *pdata = container_of(work,
679 pdata->phy_if.phy_status(pdata);
684 struct xgbe_prv_data *pdata = from_timer(pdata, t, service_timer);
688 queue_work(pdata->dev_workqueue, &pdata->service_work);
690 mod_timer(&pdata->service_timer, jiffies + HZ);
692 if (!pdata->tx_usecs)
695 for (i = 0; i < pdata->channel_count; i++) {
696 channel = pdata->channel[i];
701 jiffies + usecs_to_jiffies(pdata->tx_usecs));
705 static void xgbe_init_timers(struct xgbe_prv_data *pdata)
710 timer_setup(&pdata->service_timer, xgbe_service_timer, 0);
712 for (i = 0; i < pdata->channel_count; i++) {
713 channel = pdata->channel[i];
721 static void xgbe_start_timers(struct xgbe_prv_data *pdata)
723 mod_timer(&pdata->service_timer, jiffies + HZ);
726 static void xgbe_stop_timers(struct xgbe_prv_data *pdata)
731 del_timer_sync(&pdata->service_timer);
733 for (i = 0; i < pdata->channel_count; i++) {
734 channel = pdata->channel[i];
744 void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
747 struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
749 mac_hfr0 = XGMAC_IOREAD(pdata, MAC_HWF0R);
750 mac_hfr1 = XGMAC_IOREAD(pdata, MAC_HWF1R);
751 mac_hfr2 = XGMAC_IOREAD(pdata, MAC_HWF2R);
755 hw_feat->version = XGMAC_IOREAD(pdata, MAC_VR);
844 if (netif_msg_probe(pdata)) {
845 dev_dbg(pdata->dev, "Hardware features:\n");
848 dev_dbg(pdata->dev, " 1GbE support : %s\n",
850 dev_dbg(pdata->dev, " VLAN hash filter : %s\n",
852 dev_dbg(pdata->dev, " MDIO interface : %s\n",
854 dev_dbg(pdata->dev, " Wake-up packet support : %s\n",
856 dev_dbg(pdata->dev, " Magic packet support : %s\n",
858 dev_dbg(pdata->dev, " Management counters : %s\n",
860 dev_dbg(pdata->dev, " ARP offload : %s\n",
862 dev_dbg(pdata->dev, " IEEE 1588-2008 Timestamp : %s\n",
864 dev_dbg(pdata->dev, " Energy Efficient Ethernet : %s\n",
866 dev_dbg(pdata->dev, " TX checksum offload : %s\n",
868 dev_dbg(pdata->dev, " RX checksum offload : %s\n",
870 dev_dbg(pdata->dev, " Additional MAC addresses : %u\n",
872 dev_dbg(pdata->dev, " Timestamp source : %s\n",
876 dev_dbg(pdata->dev, " SA/VLAN insertion : %s\n",
878 dev_dbg(pdata->dev, " VXLAN/NVGRE support : %s\n",
882 dev_dbg(pdata->dev, " RX fifo size : %u\n",
884 dev_dbg(pdata->dev, " TX fifo size : %u\n",
886 dev_dbg(pdata->dev, " IEEE 1588 high word : %s\n",
888 dev_dbg(pdata->dev, " DMA width : %u\n",
890 dev_dbg(pdata->dev, " Data Center Bridging : %s\n",
892 dev_dbg(pdata->dev, " Split header : %s\n",
894 dev_dbg(pdata->dev, " TCP Segmentation Offload : %s\n",
896 dev_dbg(pdata->dev, " Debug memory interface : %s\n",
898 dev_dbg(pdata->dev, " Receive Side Scaling : %s\n",
900 dev_dbg(pdata->dev, " Traffic Class count : %u\n",
902 dev_dbg(pdata->dev, " Hash table size : %u\n",
904 dev_dbg(pdata->dev, " L3/L4 Filters : %u\n",
908 dev_dbg(pdata->dev, " RX queue count : %u\n",
910 dev_dbg(pdata->dev, " TX queue count : %u\n",
912 dev_dbg(pdata->dev, " RX DMA channel count : %u\n",
914 dev_dbg(pdata->dev, " TX DMA channel count : %u\n",
916 dev_dbg(pdata->dev, " PPS outputs : %u\n",
918 dev_dbg(pdata->dev, " Auxiliary snapshot inputs : %u\n",
926 struct xgbe_prv_data *pdata = netdev_priv(netdev);
928 pdata->vxlan_port = be16_to_cpu(ti->port);
929 pdata->hw_if.enable_vxlan(pdata);
937 struct xgbe_prv_data *pdata = netdev_priv(netdev);
939 pdata->hw_if.disable_vxlan(pdata);
940 pdata->vxlan_port = 0;
959 static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
964 if (pdata->per_channel_irq) {
965 for (i = 0; i < pdata->channel_count; i++) {
966 channel = pdata->channel[i];
968 netif_napi_add(pdata->netdev, &channel->napi,
975 netif_napi_add(pdata->netdev, &pdata->napi,
978 napi_enable(&pdata->napi);
982 static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
987 if (pdata->per_channel_irq) {
988 for (i = 0; i < pdata->channel_count; i++) {
989 channel = pdata->channel[i];
996 napi_disable(&pdata->napi);
999 netif_napi_del(&pdata->napi);
1003 static int xgbe_request_irqs(struct xgbe_prv_data *pdata)
1006 struct net_device *netdev = pdata->netdev;
1010 tasklet_setup(&pdata->tasklet_dev, xgbe_isr_task);
1011 tasklet_setup(&pdata->tasklet_ecc, xgbe_ecc_isr_task);
1013 ret = devm_request_irq(pdata->dev, pdata->dev_irq, xgbe_isr, 0,
1014 netdev_name(netdev), pdata);
1017 pdata->dev_irq);
1021 if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq)) {
1022 ret = devm_request_irq(pdata->dev, pdata->ecc_irq, xgbe_ecc_isr,
1023 0, pdata->ecc_name, pdata);
1026 pdata->ecc_irq);
1031 if (!pdata->per_channel_irq)
1034 for (i = 0; i < pdata->channel_count; i++) {
1035 channel = pdata->channel[i];
1041 ret = devm_request_irq(pdata->dev, channel->dma_irq,
1058 for (i--; i < pdata->channel_count; i--) {
1059 channel = pdata->channel[i];
1062 devm_free_irq(pdata->dev, channel->dma_irq, channel);
1065 if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq))
1066 devm_free_irq(pdata->dev, pdata->ecc_irq, pdata);
1069 devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
1074 static void xgbe_free_irqs(struct xgbe_prv_data *pdata)
1079 devm_free_irq(pdata->dev, pdata->dev_irq, pdata);
1081 tasklet_kill(&pdata->tasklet_dev);
1082 tasklet_kill(&pdata->tasklet_ecc);
1084 if (pdata->vdata->ecc_support && (pdata->dev_irq != pdata->ecc_irq))
1085 devm_free_irq(pdata->dev, pdata->ecc_irq, pdata);
1087 if (!pdata->per_channel_irq)
1090 for (i = 0; i < pdata->channel_count; i++) {
1091 channel = pdata->channel[i];
1094 devm_free_irq(pdata->dev, channel->dma_irq, channel);
1098 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
1100 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1104 pdata->tx_usecs = XGMAC_INIT_DMA_TX_USECS;
1105 pdata->tx_frames = XGMAC_INIT_DMA_TX_FRAMES;
1107 hw_if->config_tx_coalesce(pdata);
1112 void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
1114 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1118 pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS);
1119 pdata->rx_usecs = XGMAC_INIT_DMA_RX_USECS;
1120 pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES;
1122 hw_if->config_rx_coalesce(pdata);
1127 static void xgbe_free_tx_data(struct xgbe_prv_data *pdata)
1129 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1136 for (i = 0; i < pdata->channel_count; i++) {
1137 ring = pdata->channel[i]->tx_ring;
1143 desc_if->unmap_rdata(pdata, rdata);
1150 static void xgbe_free_rx_data(struct xgbe_prv_data *pdata)
1152 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1159 for (i = 0; i < pdata->channel_count; i++) {
1160 ring = pdata->channel[i]->rx_ring;
1166 desc_if->unmap_rdata(pdata, rdata);
1173 static int xgbe_phy_reset(struct xgbe_prv_data *pdata)
1175 pdata->phy_link = -1;
1176 pdata->phy_speed = SPEED_UNKNOWN;
1178 return pdata->phy_if.phy_reset(pdata);
1183 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1184 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1190 (caller == XGMAC_IOCTL_CONTEXT && pdata->power_down)) {
1196 spin_lock_irqsave(&pdata->lock, flags);
1203 xgbe_stop_timers(pdata);
1204 flush_workqueue(pdata->dev_workqueue);
1206 hw_if->powerdown_tx(pdata);
1207 hw_if->powerdown_rx(pdata);
1209 xgbe_napi_disable(pdata, 0);
1211 pdata->power_down = 1;
1213 spin_unlock_irqrestore(&pdata->lock, flags);
1222 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1223 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1229 (caller == XGMAC_IOCTL_CONTEXT && !pdata->power_down)) {
1235 spin_lock_irqsave(&pdata->lock, flags);
1237 pdata->power_down = 0;
1239 xgbe_napi_enable(pdata, 0);
1241 hw_if->powerup_tx(pdata);
1242 hw_if->powerup_rx(pdata);
1249 xgbe_start_timers(pdata);
1251 spin_unlock_irqrestore(&pdata->lock, flags);
1258 static void xgbe_free_memory(struct xgbe_prv_data *pdata)
1260 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1263 desc_if->free_ring_resources(pdata);
1266 xgbe_free_channels(pdata);
1269 static int xgbe_alloc_memory(struct xgbe_prv_data *pdata)
1271 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1272 struct net_device *netdev = pdata->netdev;
1275 if (pdata->new_tx_ring_count) {
1276 pdata->tx_ring_count = pdata->new_tx_ring_count;
1277 pdata->tx_q_count = pdata->tx_ring_count;
1278 pdata->new_tx_ring_count = 0;
1281 if (pdata->new_rx_ring_count) {
1282 pdata->rx_ring_count = pdata->new_rx_ring_count;
1283 pdata->new_rx_ring_count = 0;
1287 pdata->rx_buf_size = xgbe_calc_rx_buf_size(netdev, netdev->mtu);
1290 ret = xgbe_alloc_channels(pdata);
1295 ret = desc_if->alloc_ring_resources(pdata);
1300 xgbe_init_timers(pdata);
1305 xgbe_free_memory(pdata);
1310 static int xgbe_start(struct xgbe_prv_data *pdata)
1312 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1313 struct xgbe_phy_if *phy_if = &pdata->phy_if;
1314 struct net_device *netdev = pdata->netdev;
1319 ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
1325 ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count);
1333 XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
1334 i % pdata->rx_ring_count);
1336 ret = hw_if->init(pdata);
1340 xgbe_napi_enable(pdata, 1);
1342 ret = xgbe_request_irqs(pdata);
1346 ret = phy_if->phy_start(pdata);
1350 hw_if->enable_tx(pdata);
1351 hw_if->enable_rx(pdata);
1357 xgbe_start_timers(pdata);
1358 queue_work(pdata->dev_workqueue, &pdata->service_work);
1360 clear_bit(XGBE_STOPPED, &pdata->dev_state);
1365 xgbe_free_irqs(pdata);
1368 xgbe_napi_disable(pdata, 1);
1370 hw_if->exit(pdata);
1375 static void xgbe_stop(struct xgbe_prv_data *pdata)
1377 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1378 struct xgbe_phy_if *phy_if = &pdata->phy_if;
1380 struct net_device *netdev = pdata->netdev;
1386 if (test_bit(XGBE_STOPPED, &pdata->dev_state))
1390 netif_carrier_off(pdata->netdev);
1392 xgbe_stop_timers(pdata);
1393 flush_workqueue(pdata->dev_workqueue);
1397 hw_if->disable_tx(pdata);
1398 hw_if->disable_rx(pdata);
1400 phy_if->phy_stop(pdata);
1402 xgbe_free_irqs(pdata);
1404 xgbe_napi_disable(pdata, 1);
1406 hw_if->exit(pdata);
1408 for (i = 0; i < pdata->channel_count; i++) {
1409 channel = pdata->channel[i];
1417 set_bit(XGBE_STOPPED, &pdata->dev_state);
1424 struct xgbe_prv_data *pdata = container_of(work,
1430 xgbe_stop(pdata);
1432 xgbe_free_tx_data(pdata);
1433 xgbe_free_rx_data(pdata);
1437 netdev_alert(pdata->netdev, "device stopped\n");
1440 void xgbe_full_restart_dev(struct xgbe_prv_data *pdata)
1443 if (!netif_running(pdata->netdev))
1446 xgbe_stop(pdata);
1448 xgbe_free_memory(pdata);
1449 xgbe_alloc_memory(pdata);
1451 xgbe_start(pdata);
1454 void xgbe_restart_dev(struct xgbe_prv_data *pdata)
1457 if (!netif_running(pdata->netdev))
1460 xgbe_stop(pdata);
1462 xgbe_free_tx_data(pdata);
1463 xgbe_free_rx_data(pdata);
1465 xgbe_start(pdata);
1470 struct xgbe_prv_data *pdata = container_of(work,
1476 xgbe_restart_dev(pdata);
1483 struct xgbe_prv_data *pdata = container_of(work,
1490 spin_lock_irqsave(&pdata->tstamp_lock, flags);
1491 if (!pdata->tx_tstamp_skb)
1494 if (pdata->tx_tstamp) {
1495 nsec = timecounter_cyc2time(&pdata->tstamp_tc,
1496 pdata->tx_tstamp);
1500 skb_tstamp_tx(pdata->tx_tstamp_skb, &hwtstamps);
1503 dev_kfree_skb_any(pdata->tx_tstamp_skb);
1505 pdata->tx_tstamp_skb = NULL;
1508 spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
1511 static int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata,
1514 if (copy_to_user(ifreq->ifr_data, &pdata->tstamp_config,
1515 sizeof(pdata->tstamp_config)))
1521 static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
1645 pdata->hw_if.config_tstamp(pdata, mac_tscr);
1647 memcpy(&pdata->tstamp_config, &config, sizeof(config));
1652 static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
1659 spin_lock_irqsave(&pdata->tstamp_lock, flags);
1660 if (pdata->tx_tstamp_skb) {
1665 pdata->tx_tstamp_skb = skb_get(skb);
1668 spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
1761 static void xgbe_packet_info(struct xgbe_prv_data *pdata,
1814 (pdata->tstamp_config.tx_type == HWTSTAMP_TX_ON))
1834 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1838 snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs",
1841 snprintf(pdata->ecc_name, sizeof(pdata->ecc_name) - 1, "%s-ecc",
1844 snprintf(pdata->i2c_name, sizeof(pdata->i2c_name) - 1, "%s-i2c",
1848 pdata->dev_workqueue =
1850 if (!pdata->dev_workqueue) {
1855 pdata->an_workqueue =
1856 create_singlethread_workqueue(pdata->an_name);
1857 if (!pdata->an_workqueue) {
1864 ret = xgbe_phy_reset(pdata);
1869 ret = clk_prepare_enable(pdata->sysclk);
1875 ret = clk_prepare_enable(pdata->ptpclk);
1881 INIT_WORK(&pdata->service_work, xgbe_service);
1882 INIT_WORK(&pdata->restart_work, xgbe_restart);
1883 INIT_WORK(&pdata->stopdev_work, xgbe_stopdev);
1884 INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
1886 ret = xgbe_alloc_memory(pdata);
1890 ret = xgbe_start(pdata);
1894 clear_bit(XGBE_DOWN, &pdata->dev_state);
1899 xgbe_free_memory(pdata);
1902 clk_disable_unprepare(pdata->ptpclk);
1905 clk_disable_unprepare(pdata->sysclk);
1908 destroy_workqueue(pdata->an_workqueue);
1911 destroy_workqueue(pdata->dev_workqueue);
1918 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1921 xgbe_stop(pdata);
1923 xgbe_free_memory(pdata);
1926 clk_disable_unprepare(pdata->ptpclk);
1927 clk_disable_unprepare(pdata->sysclk);
1929 destroy_workqueue(pdata->an_workqueue);
1931 destroy_workqueue(pdata->dev_workqueue);
1933 set_bit(XGBE_DOWN, &pdata->dev_state);
1940 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1941 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1942 struct xgbe_desc_if *desc_if = &pdata->desc_if;
1951 channel = pdata->channel[skb->queue_mapping];
1959 netif_err(pdata, tx_err, netdev,
1967 xgbe_packet_info(pdata, ring, skb, packet);
1976 netif_err(pdata, tx_err, netdev,
1988 xgbe_prep_tx_tstamp(pdata, skb, packet);
1996 if (netif_msg_pktdata(pdata))
2010 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2011 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2015 hw_if->config_rx_mode(pdata);
2022 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2023 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2033 hw_if->set_mac_address(pdata, netdev->dev_addr);
2042 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2047 ret = xgbe_get_hwtstamp_settings(pdata, ifreq);
2051 ret = xgbe_set_hwtstamp_settings(pdata, ifreq);
2063 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2072 pdata->rx_buf_size = ret;
2075 xgbe_restart_dev(pdata);
2084 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2087 schedule_work(&pdata->restart_work);
2093 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2094 struct xgbe_mmc_stats *pstats = &pdata->mmc_stats;
2098 pdata->hw_if.read_mmc_stats(pdata);
2122 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2123 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2127 set_bit(vid, pdata->active_vlans);
2128 hw_if->update_vlan_hash_table(pdata);
2138 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2139 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2143 clear_bit(vid, pdata->active_vlans);
2144 hw_if->update_vlan_hash_table(pdata);
2154 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2160 if (pdata->per_channel_irq) {
2161 for (i = 0; i < pdata->channel_count; i++) {
2162 channel = pdata->channel[i];
2166 disable_irq(pdata->dev_irq);
2167 xgbe_isr(pdata->dev_irq, pdata);
2168 enable_irq(pdata->dev_irq);
2178 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2188 if (tc > pdata->hw_feat.tc_cnt)
2191 pdata->num_tcs = tc;
2192 pdata->hw_if.config_tc(pdata);
2200 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2205 if (!pdata->hw_feat.vxn)
2243 struct xgbe_prv_data *pdata = netdev_priv(netdev);
2244 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2248 rxhash = pdata->netdev_features & NETIF_F_RXHASH;
2249 rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
2250 rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
2251 rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
2254 ret = hw_if->enable_rss(pdata);
2256 ret = hw_if->disable_rss(pdata);
2261 hw_if->enable_rx_csum(pdata);
2263 hw_if->disable_rx_csum(pdata);
2266 hw_if->enable_rx_vlan_stripping(pdata);
2268 hw_if->disable_rx_vlan_stripping(pdata);
2271 hw_if->enable_rx_vlan_filtering(pdata);
2273 hw_if->disable_rx_vlan_filtering(pdata);
2275 pdata->netdev_features = features;
2321 struct xgbe_prv_data *pdata = channel->pdata;
2322 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2323 struct xgbe_desc_if *desc_if = &pdata->desc_if;
2331 desc_if->unmap_rdata(pdata, rdata);
2333 if (desc_if->map_rx_buffer(pdata, ring, rdata))
2336 hw_if->rx_desc_reset(pdata, rdata, ring->dirty);
2351 static struct sk_buff *xgbe_create_skb(struct xgbe_prv_data *pdata,
2366 dma_sync_single_range_for_cpu(pdata->dev, rdata->rx.hdr.dma_base,
2417 struct xgbe_prv_data *pdata = channel->pdata;
2418 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2419 struct xgbe_desc_if *desc_if = &pdata->desc_if;
2423 struct net_device *netdev = pdata->netdev;
2454 if (netif_msg_tx_done(pdata))
2455 xgbe_dump_tx_desc(pdata, ring, ring->dirty, 1, 0);
2463 desc_if->unmap_rdata(pdata, rdata);
2488 struct xgbe_prv_data *pdata = channel->pdata;
2489 struct xgbe_hw_if *hw_if = &pdata->hw_if;
2493 struct net_device *netdev = pdata->netdev;
2511 napi = (pdata->per_channel_irq) ? &channel->napi : &pdata->napi;
2557 netif_err(pdata, rx_err, netdev,
2579 skb = xgbe_create_skb(pdata, napi, rdata,
2588 dma_sync_single_range_for_cpu(pdata->dev,
2619 netif_err(pdata, rx_err, netdev,
2625 if (netif_msg_pktdata(pdata))
2651 nsec = timecounter_cyc2time(&pdata->tstamp_tc,
2690 struct xgbe_prv_data *pdata = channel->pdata;
2704 if (pdata->channel_irq_mode)
2705 xgbe_enable_rx_tx_int(pdata, channel);
2717 struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data,
2727 ring_budget = budget / pdata->rx_ring_count;
2731 for (i = 0; i < pdata->channel_count; i++) {
2732 channel = pdata->channel[i];
2747 xgbe_enable_rx_tx_ints(pdata);
2755 void xgbe_dump_tx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
2764 netdev_dbg(pdata->netdev,
2775 void xgbe_dump_rx_desc(struct xgbe_prv_data *pdata, struct xgbe_ring *ring,
2783 netdev_dbg(pdata->netdev,