Lines Matching defs:ugeth
205 static struct sk_buff *get_new_skb(struct ucc_geth_private *ugeth,
210 skb = netdev_alloc_skb(ugeth->ndev,
211 ugeth->ug_info->uf_info.max_rx_buf_length +
225 dma_map_single(ugeth->dev,
227 ugeth->ug_info->uf_info.max_rx_buf_length +
237 static int rx_bd_buffer_set(struct ucc_geth_private *ugeth, u8 rxQ)
244 bd = ugeth->p_rx_bd_ring[rxQ];
249 skb = get_new_skb(ugeth, bd);
255 ugeth->rx_skbuff[rxQ][i] = skb;
265 static int fill_init_enet_entries(struct ucc_geth_private *ugeth,
279 if (netif_msg_ifup(ugeth))
290 if (netif_msg_ifup(ugeth))
304 static int return_init_enet_entries(struct ucc_geth_private *ugeth,
338 static int dump_init_enet_entries(struct ucc_geth_private *ugeth,
390 static int hw_clear_addr_in_paddr(struct ucc_geth_private *ugeth, u8 paddr_num)
400 (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram->
412 static void hw_add_addr_in_hash(struct ucc_geth_private *ugeth,
419 (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->p_rx_glbl_pram->
423 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
436 static void get_statistics(struct ucc_geth_private *ugeth,
448 ug_regs = ugeth->ug_regs;
450 p_tx_fw_statistics_pram = ugeth->p_tx_fw_statistics_pram;
451 p_rx_fw_statistics_pram = ugeth->p_rx_fw_statistics_pram;
556 static void dump_bds(struct ucc_geth_private *ugeth)
561 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
562 if (ugeth->p_tx_bd_ring[i]) {
564 (ugeth->ug_info->bdRingLenTx[i] *
567 mem_disp(ugeth->p_tx_bd_ring[i], length);
570 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
571 if (ugeth->p_rx_bd_ring[i]) {
573 (ugeth->ug_info->bdRingLenRx[i] *
576 mem_disp(ugeth->p_rx_bd_ring[i], length);
581 static void dump_regs(struct ucc_geth_private *ugeth)
585 pr_info("UCC%d Geth registers:\n", ugeth->ug_info->uf_info.ucc_num + 1);
586 pr_info("Base address: 0x%08x\n", (u32)ugeth->ug_regs);
589 (u32)&ugeth->ug_regs->maccfg1,
590 in_be32(&ugeth->ug_regs->maccfg1));
592 (u32)&ugeth->ug_regs->maccfg2,
593 in_be32(&ugeth->ug_regs->maccfg2));
595 (u32)&ugeth->ug_regs->ipgifg,
596 in_be32(&ugeth->ug_regs->ipgifg));
598 (u32)&ugeth->ug_regs->hafdup,
599 in_be32(&ugeth->ug_regs->hafdup));
601 (u32)&ugeth->ug_regs->ifctl,
602 in_be32(&ugeth->ug_regs->ifctl));
604 (u32)&ugeth->ug_regs->ifstat,
605 in_be32(&ugeth->ug_regs->ifstat));
607 (u32)&ugeth->ug_regs->macstnaddr1,
608 in_be32(&ugeth->ug_regs->macstnaddr1));
610 (u32)&ugeth->ug_regs->macstnaddr2,
611 in_be32(&ugeth->ug_regs->macstnaddr2));
613 (u32)&ugeth->ug_regs->uempr,
614 in_be32(&ugeth->ug_regs->uempr));
616 (u32)&ugeth->ug_regs->utbipar,
617 in_be32(&ugeth->ug_regs->utbipar));
619 (u32)&ugeth->ug_regs->uescr,
620 in_be16(&ugeth->ug_regs->uescr));
622 (u32)&ugeth->ug_regs->tx64,
623 in_be32(&ugeth->ug_regs->tx64));
625 (u32)&ugeth->ug_regs->tx127,
626 in_be32(&ugeth->ug_regs->tx127));
628 (u32)&ugeth->ug_regs->tx255,
629 in_be32(&ugeth->ug_regs->tx255));
631 (u32)&ugeth->ug_regs->rx64,
632 in_be32(&ugeth->ug_regs->rx64));
634 (u32)&ugeth->ug_regs->rx127,
635 in_be32(&ugeth->ug_regs->rx127));
637 (u32)&ugeth->ug_regs->rx255,
638 in_be32(&ugeth->ug_regs->rx255));
640 (u32)&ugeth->ug_regs->txok,
641 in_be32(&ugeth->ug_regs->txok));
643 (u32)&ugeth->ug_regs->txcf,
644 in_be16(&ugeth->ug_regs->txcf));
646 (u32)&ugeth->ug_regs->tmca,
647 in_be32(&ugeth->ug_regs->tmca));
649 (u32)&ugeth->ug_regs->tbca,
650 in_be32(&ugeth->ug_regs->tbca));
652 (u32)&ugeth->ug_regs->rxfok,
653 in_be32(&ugeth->ug_regs->rxfok));
655 (u32)&ugeth->ug_regs->rxbok,
656 in_be32(&ugeth->ug_regs->rxbok));
658 (u32)&ugeth->ug_regs->rbyt,
659 in_be32(&ugeth->ug_regs->rbyt));
661 (u32)&ugeth->ug_regs->rmca,
662 in_be32(&ugeth->ug_regs->rmca));
664 (u32)&ugeth->ug_regs->rbca,
665 in_be32(&ugeth->ug_regs->rbca));
667 (u32)&ugeth->ug_regs->scar,
668 in_be32(&ugeth->ug_regs->scar));
670 (u32)&ugeth->ug_regs->scam,
671 in_be32(&ugeth->ug_regs->scam));
673 if (ugeth->p_thread_data_tx) {
675 switch (ugeth->ug_info->numThreadsTx) {
698 (u32)ugeth->p_thread_data_tx);
702 (u32)&ugeth->p_thread_data_tx[i]);
703 mem_disp((u8 *) & ugeth->p_thread_data_tx[i],
707 if (ugeth->p_thread_data_rx) {
709 switch (ugeth->ug_info->numThreadsRx) {
732 (u32)ugeth->p_thread_data_rx);
736 (u32)&ugeth->p_thread_data_rx[i]);
737 mem_disp((u8 *) & ugeth->p_thread_data_rx[i],
741 if (ugeth->p_exf_glbl_param) {
744 (u32)ugeth->p_exf_glbl_param);
745 mem_disp((u8 *) ugeth->p_exf_glbl_param,
746 sizeof(*ugeth->p_exf_glbl_param));
748 if (ugeth->p_tx_glbl_pram) {
750 pr_info("Base address: 0x%08x\n", (u32)ugeth->p_tx_glbl_pram);
752 (u32)&ugeth->p_tx_glbl_pram->temoder,
753 in_be16(&ugeth->p_tx_glbl_pram->temoder));
755 (u32)&ugeth->p_tx_glbl_pram->sqptr,
756 in_be32(&ugeth->p_tx_glbl_pram->sqptr));
758 (u32)&ugeth->p_tx_glbl_pram->schedulerbasepointer,
759 in_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer));
761 (u32)&ugeth->p_tx_glbl_pram->txrmonbaseptr,
762 in_be32(&ugeth->p_tx_glbl_pram->txrmonbaseptr));
764 (u32)&ugeth->p_tx_glbl_pram->tstate,
765 in_be32(&ugeth->p_tx_glbl_pram->tstate));
767 (u32)&ugeth->p_tx_glbl_pram->iphoffset[0],
768 ugeth->p_tx_glbl_pram->iphoffset[0]);
770 (u32)&ugeth->p_tx_glbl_pram->iphoffset[1],
771 ugeth->p_tx_glbl_pram->iphoffset[1]);
773 (u32)&ugeth->p_tx_glbl_pram->iphoffset[2],
774 ugeth->p_tx_glbl_pram->iphoffset[2]);
776 (u32)&ugeth->p_tx_glbl_pram->iphoffset[3],
777 ugeth->p_tx_glbl_pram->iphoffset[3]);
779 (u32)&ugeth->p_tx_glbl_pram->iphoffset[4],
780 ugeth->p_tx_glbl_pram->iphoffset[4]);
782 (u32)&ugeth->p_tx_glbl_pram->iphoffset[5],
783 ugeth->p_tx_glbl_pram->iphoffset[5]);
785 (u32)&ugeth->p_tx_glbl_pram->iphoffset[6],
786 ugeth->p_tx_glbl_pram->iphoffset[6]);
788 (u32)&ugeth->p_tx_glbl_pram->iphoffset[7],
789 ugeth->p_tx_glbl_pram->iphoffset[7]);
791 (u32)&ugeth->p_tx_glbl_pram->vtagtable[0],
792 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[0]));
794 (u32)&ugeth->p_tx_glbl_pram->vtagtable[1],
795 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[1]));
797 (u32)&ugeth->p_tx_glbl_pram->vtagtable[2],
798 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[2]));
800 (u32)&ugeth->p_tx_glbl_pram->vtagtable[3],
801 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[3]));
803 (u32)&ugeth->p_tx_glbl_pram->vtagtable[4],
804 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[4]));
806 (u32)&ugeth->p_tx_glbl_pram->vtagtable[5],
807 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[5]));
809 (u32)&ugeth->p_tx_glbl_pram->vtagtable[6],
810 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[6]));
812 (u32)&ugeth->p_tx_glbl_pram->vtagtable[7],
813 in_be32(&ugeth->p_tx_glbl_pram->vtagtable[7]));
815 (u32)&ugeth->p_tx_glbl_pram->tqptr,
816 in_be32(&ugeth->p_tx_glbl_pram->tqptr));
818 if (ugeth->p_rx_glbl_pram) {
820 pr_info("Base address: 0x%08x\n", (u32)ugeth->p_rx_glbl_pram);
822 (u32)&ugeth->p_rx_glbl_pram->remoder,
823 in_be32(&ugeth->p_rx_glbl_pram->remoder));
825 (u32)&ugeth->p_rx_glbl_pram->rqptr,
826 in_be32(&ugeth->p_rx_glbl_pram->rqptr));
828 (u32)&ugeth->p_rx_glbl_pram->typeorlen,
829 in_be16(&ugeth->p_rx_glbl_pram->typeorlen));
831 (u32)&ugeth->p_rx_glbl_pram->rxgstpack,
832 ugeth->p_rx_glbl_pram->rxgstpack);
834 (u32)&ugeth->p_rx_glbl_pram->rxrmonbaseptr,
835 in_be32(&ugeth->p_rx_glbl_pram->rxrmonbaseptr));
837 (u32)&ugeth->p_rx_glbl_pram->intcoalescingptr,
838 in_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr));
840 (u32)&ugeth->p_rx_glbl_pram->rstate,
841 ugeth->p_rx_glbl_pram->rstate);
843 (u32)&ugeth->p_rx_glbl_pram->mrblr,
844 in_be16(&ugeth->p_rx_glbl_pram->mrblr));
846 (u32)&ugeth->p_rx_glbl_pram->rbdqptr,
847 in_be32(&ugeth->p_rx_glbl_pram->rbdqptr));
849 (u32)&ugeth->p_rx_glbl_pram->mflr,
850 in_be16(&ugeth->p_rx_glbl_pram->mflr));
852 (u32)&ugeth->p_rx_glbl_pram->minflr,
853 in_be16(&ugeth->p_rx_glbl_pram->minflr));
855 (u32)&ugeth->p_rx_glbl_pram->maxd1,
856 in_be16(&ugeth->p_rx_glbl_pram->maxd1));
858 (u32)&ugeth->p_rx_glbl_pram->maxd2,
859 in_be16(&ugeth->p_rx_glbl_pram->maxd2));
861 (u32)&ugeth->p_rx_glbl_pram->ecamptr,
862 in_be32(&ugeth->p_rx_glbl_pram->ecamptr));
864 (u32)&ugeth->p_rx_glbl_pram->l2qt,
865 in_be32(&ugeth->p_rx_glbl_pram->l2qt));
867 (u32)&ugeth->p_rx_glbl_pram->l3qt[0],
868 in_be32(&ugeth->p_rx_glbl_pram->l3qt[0]));
870 (u32)&ugeth->p_rx_glbl_pram->l3qt[1],
871 in_be32(&ugeth->p_rx_glbl_pram->l3qt[1]));
873 (u32)&ugeth->p_rx_glbl_pram->l3qt[2],
874 in_be32(&ugeth->p_rx_glbl_pram->l3qt[2]));
876 (u32)&ugeth->p_rx_glbl_pram->l3qt[3],
877 in_be32(&ugeth->p_rx_glbl_pram->l3qt[3]));
879 (u32)&ugeth->p_rx_glbl_pram->l3qt[4],
880 in_be32(&ugeth->p_rx_glbl_pram->l3qt[4]));
882 (u32)&ugeth->p_rx_glbl_pram->l3qt[5],
883 in_be32(&ugeth->p_rx_glbl_pram->l3qt[5]));
885 (u32)&ugeth->p_rx_glbl_pram->l3qt[6],
886 in_be32(&ugeth->p_rx_glbl_pram->l3qt[6]));
888 (u32)&ugeth->p_rx_glbl_pram->l3qt[7],
889 in_be32(&ugeth->p_rx_glbl_pram->l3qt[7]));
891 (u32)&ugeth->p_rx_glbl_pram->vlantype,
892 in_be16(&ugeth->p_rx_glbl_pram->vlantype));
894 (u32)&ugeth->p_rx_glbl_pram->vlantci,
895 in_be16(&ugeth->p_rx_glbl_pram->vlantci));
899 (u32)&ugeth->p_rx_glbl_pram->addressfiltering[i],
900 ugeth->p_rx_glbl_pram->addressfiltering[i]);
902 (u32)&ugeth->p_rx_glbl_pram->exfGlobalParam,
903 in_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam));
905 if (ugeth->p_send_q_mem_reg) {
907 pr_info("Base address: 0x%08x\n", (u32)ugeth->p_send_q_mem_reg);
908 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
911 (u32)&ugeth->p_send_q_mem_reg->sqqd[i]);
912 mem_disp((u8 *) & ugeth->p_send_q_mem_reg->sqqd[i],
916 if (ugeth->p_scheduler) {
918 pr_info("Base address: 0x%08x\n", (u32)ugeth->p_scheduler);
919 mem_disp((u8 *) ugeth->p_scheduler,
920 sizeof(*ugeth->p_scheduler));
922 if (ugeth->p_tx_fw_statistics_pram) {
925 (u32)ugeth->p_tx_fw_statistics_pram);
926 mem_disp((u8 *) ugeth->p_tx_fw_statistics_pram,
927 sizeof(*ugeth->p_tx_fw_statistics_pram));
929 if (ugeth->p_rx_fw_statistics_pram) {
932 (u32)ugeth->p_rx_fw_statistics_pram);
933 mem_disp((u8 *) ugeth->p_rx_fw_statistics_pram,
934 sizeof(*ugeth->p_rx_fw_statistics_pram));
936 if (ugeth->p_rx_irq_coalescing_tbl) {
939 (u32)ugeth->p_rx_irq_coalescing_tbl);
940 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
943 (u32)&ugeth->p_rx_irq_coalescing_tbl->
946 (u32)&ugeth->p_rx_irq_coalescing_tbl->
948 in_be32(&ugeth->p_rx_irq_coalescing_tbl->
952 (u32)&ugeth->p_rx_irq_coalescing_tbl->
954 in_be32(&ugeth->p_rx_irq_coalescing_tbl->
959 if (ugeth->p_rx_bd_qs_tbl) {
961 pr_info("Base address: 0x%08x\n", (u32)ugeth->p_rx_bd_qs_tbl);
962 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
965 (u32)&ugeth->p_rx_bd_qs_tbl[i]);
967 (u32)&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr,
968 in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdbaseptr));
970 (u32)&ugeth->p_rx_bd_qs_tbl[i].bdptr,
971 in_be32(&ugeth->p_rx_bd_qs_tbl[i].bdptr));
973 (u32)&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
974 in_be32(&ugeth->p_rx_bd_qs_tbl[i].
977 (u32)&ugeth->p_rx_bd_qs_tbl[i].externalbdptr,
978 in_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdptr));
982 (&ugeth->p_rx_bd_qs_tbl[i].
986 (&ugeth->p_rx_bd_qs_tbl[i].
991 if (ugeth->p_init_enet_param_shadow) {
995 (u32) ugeth->p_init_enet_param_shadow);
996 mem_disp((u8 *) ugeth->p_init_enet_param_shadow,
997 sizeof(*ugeth->p_init_enet_param_shadow));
1000 if (ugeth->ug_info->rxExtendedFiltering) {
1003 if (ugeth->ug_info->largestexternallookupkeysize ==
1007 if (ugeth->ug_info->largestexternallookupkeysize ==
1013 dump_init_enet_entries(ugeth,
1014 &(ugeth->p_init_enet_param_shadow->
1018 ugeth->ug_info->riscTx, 0);
1019 dump_init_enet_entries(ugeth,
1020 &(ugeth->p_init_enet_param_shadow->
1023 ugeth->ug_info->riscRx, 1);
1310 static int adjust_enet_interface(struct ucc_geth_private *ugeth)
1321 ug_info = ugeth->ug_info;
1322 ug_regs = ugeth->ug_regs;
1323 uf_regs = ugeth->uccf->uf_regs;
1328 if ((ugeth->max_speed == SPEED_10) ||
1329 (ugeth->max_speed == SPEED_100))
1331 else if (ugeth->max_speed == SPEED_1000)
1340 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
1341 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
1342 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1343 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1344 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
1345 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
1346 if (ugeth->phy_interface != PHY_INTERFACE_MODE_RMII)
1348 switch (ugeth->max_speed) {
1353 if (ugeth->phy_interface != PHY_INTERFACE_MODE_RTBI)
1357 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
1358 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
1361 if (ugeth->phy_interface == PHY_INTERFACE_MODE_SGMII)
1369 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_TBI) ||
1370 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
1371 struct ucc_geth_info *ug_info = ugeth->ug_info;
1392 if (netif_msg_probe(ugeth))
1400 static int ugeth_graceful_stop_tx(struct ucc_geth_private *ugeth)
1407 uccf = ugeth->uccf;
1415 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
1430 static int ugeth_graceful_stop_rx(struct ucc_geth_private *ugeth)
1437 uccf = ugeth->uccf;
1440 temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack);
1442 out_8(&ugeth->p_rx_glbl_pram->rxgstpack, temp);
1449 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.
1454 temp = in_8(&ugeth->p_rx_glbl_pram->rxgstpack);
1462 static int ugeth_restart_tx(struct ucc_geth_private *ugeth)
1467 uccf = ugeth->uccf;
1470 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
1477 static int ugeth_restart_rx(struct ucc_geth_private *ugeth)
1482 uccf = ugeth->uccf;
1485 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
1493 static int ugeth_enable(struct ucc_geth_private *ugeth, enum comm_dir mode)
1498 uccf = ugeth->uccf;
1501 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
1502 if (netif_msg_probe(ugeth))
1513 ugeth_restart_tx(ugeth);
1515 ugeth_restart_rx(ugeth);
1523 static int ugeth_disable(struct ucc_geth_private *ugeth, enum comm_dir mode)
1527 uccf = ugeth->uccf;
1530 if (ugeth->ug_info->uf_info.ucc_num >= UCC_MAX_NUM) {
1531 if (netif_msg_probe(ugeth))
1538 ugeth_graceful_stop_tx(ugeth);
1542 ugeth_graceful_stop_rx(ugeth);
1544 ucc_fast_disable(ugeth->uccf, mode); /* OK to do even if not enabled */
1549 static void ugeth_quiesce(struct ucc_geth_private *ugeth)
1552 netif_tx_stop_all_queues(ugeth->ndev);
1555 disable_irq(ugeth->ug_info->uf_info.irq);
1558 napi_disable(&ugeth->napi);
1561 static void ugeth_activate(struct ucc_geth_private *ugeth)
1563 napi_enable(&ugeth->napi);
1564 enable_irq(ugeth->ug_info->uf_info.irq);
1567 netif_tx_wake_all_queues(ugeth->ndev);
1568 __netdev_watchdog_up(ugeth->ndev);
1573 * information through variables in the ugeth structure, and this
1580 struct ucc_geth_private *ugeth = netdev_priv(dev);
1583 struct phy_device *phydev = ugeth->phydev;
1586 ug_regs = ugeth->ug_regs;
1587 uf_regs = ugeth->uccf->uf_regs;
1594 if (phydev->duplex != ugeth->oldduplex) {
1600 ugeth->oldduplex = phydev->duplex;
1603 if (phydev->speed != ugeth->oldspeed) {
1617 if ((ugeth->phy_interface == PHY_INTERFACE_MODE_RMII) ||
1618 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII) ||
1619 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1620 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1621 (ugeth->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) ||
1622 (ugeth->phy_interface == PHY_INTERFACE_MODE_RTBI)) {
1630 if (netif_msg_link(ugeth))
1636 ugeth->oldspeed = phydev->speed;
1639 if (!ugeth->oldlink) {
1641 ugeth->oldlink = 1;
1648 * ugeth->lock, which is a bad idea since 'graceful
1652 ugeth_quiesce(ugeth);
1653 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
1658 ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
1659 ugeth_activate(ugeth);
1661 } else if (ugeth->oldlink) {
1663 ugeth->oldlink = 0;
1664 ugeth->oldspeed = 0;
1665 ugeth->oldduplex = -1;
1668 if (new_state && netif_msg_link(ugeth))
1682 struct ucc_geth_private *ugeth = netdev_priv(dev);
1683 struct ucc_geth_info *ug_info = ugeth->ug_info;
1749 static void ugeth_dump_regs(struct ucc_geth_private *ugeth)
1752 ucc_fast_dump_regs(ugeth->uccf);
1753 dump_regs(ugeth);
1754 dump_bds(ugeth);
1759 ugeth,
1772 uccf = ugeth->uccf;
1776 ugeth->p_rx_glbl_pram->addressfiltering;
1781 p_lh = &ugeth->group_hash_q;
1782 p_counter = &(ugeth->numGroupAddrInHash);
1786 p_lh = &ugeth->ind_hash_q;
1787 p_counter = &(ugeth->numIndAddrInHash);
1797 ugeth_disable(ugeth, comm_dir);
1815 ugeth_enable(ugeth, comm_dir);
1820 static int ugeth_82xx_filtering_clear_addr_in_paddr(struct ucc_geth_private *ugeth,
1823 ugeth->indAddrRegUsed[paddr_num] = 0; /* mark this paddr as not used */
1824 return hw_clear_addr_in_paddr(ugeth, paddr_num);/* clear in hardware */
1827 static void ucc_geth_free_rx(struct ucc_geth_private *ugeth)
1835 ug_info = ugeth->ug_info;
1838 for (i = 0; i < ugeth->ug_info->numQueuesRx; i++) {
1839 if (ugeth->p_rx_bd_ring[i]) {
1841 bd = ugeth->p_rx_bd_ring[i];
1842 for (j = 0; j < ugeth->ug_info->bdRingLenRx[i]; j++) {
1843 if (ugeth->rx_skbuff[i][j]) {
1844 dma_unmap_single(ugeth->dev,
1846 ugeth->ug_info->
1851 ugeth->rx_skbuff[i][j]);
1852 ugeth->rx_skbuff[i][j] = NULL;
1857 kfree(ugeth->rx_skbuff[i]);
1859 if (ugeth->ug_info->uf_info.bd_mem_part ==
1861 kfree((void *)ugeth->rx_bd_ring_offset[i]);
1862 else if (ugeth->ug_info->uf_info.bd_mem_part ==
1864 qe_muram_free(ugeth->rx_bd_ring_offset[i]);
1865 ugeth->p_rx_bd_ring[i] = NULL;
1871 static void ucc_geth_free_tx(struct ucc_geth_private *ugeth)
1878 netdev_reset_queue(ugeth->ndev);
1880 ug_info = ugeth->ug_info;
1883 for (i = 0; i < ugeth->ug_info->numQueuesTx; i++) {
1884 bd = ugeth->p_tx_bd_ring[i];
1887 for (j = 0; j < ugeth->ug_info->bdRingLenTx[i]; j++) {
1888 if (ugeth->tx_skbuff[i][j]) {
1889 dma_unmap_single(ugeth->dev,
1894 dev_kfree_skb_any(ugeth->tx_skbuff[i][j]);
1895 ugeth->tx_skbuff[i][j] = NULL;
1899 kfree(ugeth->tx_skbuff[i]);
1901 if (ugeth->p_tx_bd_ring[i]) {
1902 if (ugeth->ug_info->uf_info.bd_mem_part ==
1904 kfree((void *)ugeth->tx_bd_ring_offset[i]);
1905 else if (ugeth->ug_info->uf_info.bd_mem_part ==
1907 qe_muram_free(ugeth->tx_bd_ring_offset[i]);
1908 ugeth->p_tx_bd_ring[i] = NULL;
1914 static void ucc_geth_memclean(struct ucc_geth_private *ugeth)
1916 if (!ugeth)
1919 if (ugeth->uccf) {
1920 ucc_fast_free(ugeth->uccf);
1921 ugeth->uccf = NULL;
1924 if (ugeth->p_thread_data_tx) {
1925 qe_muram_free(ugeth->thread_dat_tx_offset);
1926 ugeth->p_thread_data_tx = NULL;
1928 if (ugeth->p_thread_data_rx) {
1929 qe_muram_free(ugeth->thread_dat_rx_offset);
1930 ugeth->p_thread_data_rx = NULL;
1932 if (ugeth->p_exf_glbl_param) {
1933 qe_muram_free(ugeth->exf_glbl_param_offset);
1934 ugeth->p_exf_glbl_param = NULL;
1936 if (ugeth->p_rx_glbl_pram) {
1937 qe_muram_free(ugeth->rx_glbl_pram_offset);
1938 ugeth->p_rx_glbl_pram = NULL;
1940 if (ugeth->p_tx_glbl_pram) {
1941 qe_muram_free(ugeth->tx_glbl_pram_offset);
1942 ugeth->p_tx_glbl_pram = NULL;
1944 if (ugeth->p_send_q_mem_reg) {
1945 qe_muram_free(ugeth->send_q_mem_reg_offset);
1946 ugeth->p_send_q_mem_reg = NULL;
1948 if (ugeth->p_scheduler) {
1949 qe_muram_free(ugeth->scheduler_offset);
1950 ugeth->p_scheduler = NULL;
1952 if (ugeth->p_tx_fw_statistics_pram) {
1953 qe_muram_free(ugeth->tx_fw_statistics_pram_offset);
1954 ugeth->p_tx_fw_statistics_pram = NULL;
1956 if (ugeth->p_rx_fw_statistics_pram) {
1957 qe_muram_free(ugeth->rx_fw_statistics_pram_offset);
1958 ugeth->p_rx_fw_statistics_pram = NULL;
1960 if (ugeth->p_rx_irq_coalescing_tbl) {
1961 qe_muram_free(ugeth->rx_irq_coalescing_tbl_offset);
1962 ugeth->p_rx_irq_coalescing_tbl = NULL;
1964 if (ugeth->p_rx_bd_qs_tbl) {
1965 qe_muram_free(ugeth->rx_bd_qs_tbl_offset);
1966 ugeth->p_rx_bd_qs_tbl = NULL;
1968 if (ugeth->p_init_enet_param_shadow) {
1969 return_init_enet_entries(ugeth,
1970 &(ugeth->p_init_enet_param_shadow->
1973 ugeth->ug_info->riscRx, 1);
1974 return_init_enet_entries(ugeth,
1975 &(ugeth->p_init_enet_param_shadow->
1978 ugeth->ug_info->riscTx, 0);
1979 kfree(ugeth->p_init_enet_param_shadow);
1980 ugeth->p_init_enet_param_shadow = NULL;
1982 ucc_geth_free_tx(ugeth);
1983 ucc_geth_free_rx(ugeth);
1984 while (!list_empty(&ugeth->group_hash_q))
1986 (dequeue(&ugeth->group_hash_q)));
1987 while (!list_empty(&ugeth->ind_hash_q))
1989 (dequeue(&ugeth->ind_hash_q)));
1990 if (ugeth->ug_regs) {
1991 iounmap(ugeth->ug_regs);
1992 ugeth->ug_regs = NULL;
1998 struct ucc_geth_private *ugeth;
2003 ugeth = netdev_priv(dev);
2005 uf_regs = ugeth->uccf->uf_regs;
2013 (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->
2032 hw_add_addr_in_hash(ugeth, ha->addr);
2038 static void ucc_geth_stop(struct ucc_geth_private *ugeth)
2040 struct ucc_geth __iomem *ug_regs = ugeth->ug_regs;
2041 struct phy_device *phydev = ugeth->phydev;
2053 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
2056 out_be32(ugeth->uccf->p_uccm, 0x00000000);
2059 out_be32(ugeth->uccf->p_ucce, 0xffffffff);
2064 ucc_geth_memclean(ugeth);
2067 static int ucc_struct_init(struct ucc_geth_private *ugeth)
2073 ug_info = ugeth->ug_info;
2078 if (netif_msg_probe(ugeth))
2088 if (netif_msg_probe(ugeth))
2097 if (netif_msg_probe(ugeth))
2106 if (netif_msg_probe(ugeth))
2113 if (netif_msg_probe(ugeth))
2120 if (netif_msg_probe(ugeth))
2128 if (netif_msg_probe(ugeth))
2137 if (netif_msg_probe(ugeth))
2144 if (netif_msg_probe(ugeth))
2152 if (netif_msg_probe(ugeth))
2165 if (ucc_fast_init(uf_info, &ugeth->uccf)) {
2166 if (netif_msg_probe(ugeth))
2179 ugeth->ug_regs = ioremap(uf_info->regs, sizeof(*ugeth->ug_regs));
2180 if (!ugeth->ug_regs) {
2181 if (netif_msg_probe(ugeth))
2189 static int ucc_geth_alloc_tx(struct ucc_geth_private *ugeth)
2197 ug_info = ugeth->ug_info;
2215 ugeth->tx_bd_ring_offset[j] =
2218 if (ugeth->tx_bd_ring_offset[j] != 0)
2219 ugeth->p_tx_bd_ring[j] =
2220 (u8 __iomem *)((ugeth->tx_bd_ring_offset[j] +
2223 ugeth->tx_bd_ring_offset[j] =
2226 if (!IS_ERR_VALUE(ugeth->tx_bd_ring_offset[j]))
2227 ugeth->p_tx_bd_ring[j] =
2228 (u8 __iomem *) qe_muram_addr(ugeth->
2231 if (!ugeth->p_tx_bd_ring[j]) {
2232 if (netif_msg_ifup(ugeth))
2237 memset_io((void __iomem *)(ugeth->p_tx_bd_ring[j] +
2245 ugeth->tx_skbuff[j] =
2246 kmalloc_array(ugeth->ug_info->bdRingLenTx[j],
2249 if (ugeth->tx_skbuff[j] == NULL) {
2250 if (netif_msg_ifup(ugeth))
2255 for (i = 0; i < ugeth->ug_info->bdRingLenTx[j]; i++)
2256 ugeth->tx_skbuff[j][i] = NULL;
2258 ugeth->skb_curtx[j] = ugeth->skb_dirtytx[j] = 0;
2259 bd = ugeth->confBd[j] = ugeth->txBd[j] = ugeth->p_tx_bd_ring[j];
2275 static int ucc_geth_alloc_rx(struct ucc_geth_private *ugeth)
2283 ug_info = ugeth->ug_info;
2293 ugeth->rx_bd_ring_offset[j] =
2295 if (ugeth->rx_bd_ring_offset[j] != 0)
2296 ugeth->p_rx_bd_ring[j] =
2297 (u8 __iomem *)((ugeth->rx_bd_ring_offset[j] +
2300 ugeth->rx_bd_ring_offset[j] =
2303 if (!IS_ERR_VALUE(ugeth->rx_bd_ring_offset[j]))
2304 ugeth->p_rx_bd_ring[j] =
2305 (u8 __iomem *) qe_muram_addr(ugeth->
2308 if (!ugeth->p_rx_bd_ring[j]) {
2309 if (netif_msg_ifup(ugeth))
2318 ugeth->rx_skbuff[j] =
2319 kmalloc_array(ugeth->ug_info->bdRingLenRx[j],
2322 if (ugeth->rx_skbuff[j] == NULL) {
2323 if (netif_msg_ifup(ugeth))
2328 for (i = 0; i < ugeth->ug_info->bdRingLenRx[j]; i++)
2329 ugeth->rx_skbuff[j][i] = NULL;
2331 ugeth->skb_currx[j] = 0;
2332 bd = ugeth->rxBd[j] = ugeth->p_rx_bd_ring[j];
2348 static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2368 uccf = ugeth->uccf;
2369 ug_info = ugeth->ug_info;
2372 ug_regs = ugeth->ug_regs;
2391 if (netif_msg_ifup(ugeth))
2413 if (netif_msg_ifup(ugeth))
2419 ugeth->rx_non_dynamic_extended_features = ug_info->ipCheckSumCheck ||
2424 ugeth->rx_extended_features = ugeth->rx_non_dynamic_extended_features ||
2461 if (netif_msg_ifup(ugeth))
2477 if (netif_msg_ifup(ugeth))
2497 ret_val = ucc_geth_alloc_tx(ugeth);
2501 ret_val = ucc_geth_alloc_rx(ugeth);
2510 ugeth->tx_glbl_pram_offset =
2513 if (IS_ERR_VALUE(ugeth->tx_glbl_pram_offset)) {
2514 if (netif_msg_ifup(ugeth))
2518 ugeth->p_tx_glbl_pram =
2519 (struct ucc_geth_tx_global_pram __iomem *) qe_muram_addr(ugeth->
2522 memset_io((void __iomem *)ugeth->p_tx_glbl_pram, 0, sizeof(struct ucc_geth_tx_global_pram));
2528 ugeth->thread_dat_tx_offset =
2533 if (IS_ERR_VALUE(ugeth->thread_dat_tx_offset)) {
2534 if (netif_msg_ifup(ugeth))
2539 ugeth->p_thread_data_tx =
2540 (struct ucc_geth_thread_data_tx __iomem *) qe_muram_addr(ugeth->
2542 out_be32(&ugeth->p_tx_glbl_pram->tqptr, ugeth->thread_dat_tx_offset);
2546 out_be32(&ugeth->p_tx_glbl_pram->vtagtable[i],
2551 out_8(&ugeth->p_tx_glbl_pram->iphoffset[i],
2556 ugeth->send_q_mem_reg_offset =
2560 if (IS_ERR_VALUE(ugeth->send_q_mem_reg_offset)) {
2561 if (netif_msg_ifup(ugeth))
2566 ugeth->p_send_q_mem_reg =
2567 (struct ucc_geth_send_queue_mem_region __iomem *) qe_muram_addr(ugeth->
2569 out_be32(&ugeth->p_tx_glbl_pram->sqptr, ugeth->send_q_mem_reg_offset);
2575 ugeth->p_tx_bd_ring[i] + (ug_info->bdRingLenTx[i] -
2577 if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
2578 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
2579 (u32) virt_to_phys(ugeth->p_tx_bd_ring[i]));
2580 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
2583 } else if (ugeth->ug_info->uf_info.bd_mem_part ==
2585 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].bd_ring_base,
2586 (u32)qe_muram_dma(ugeth->p_tx_bd_ring[i]));
2587 out_be32(&ugeth->p_send_q_mem_reg->sqqd[i].
2597 ugeth->scheduler_offset =
2600 if (IS_ERR_VALUE(ugeth->scheduler_offset)) {
2601 if (netif_msg_ifup(ugeth))
2606 ugeth->p_scheduler =
2607 (struct ucc_geth_scheduler __iomem *) qe_muram_addr(ugeth->
2609 out_be32(&ugeth->p_tx_glbl_pram->schedulerbasepointer,
2610 ugeth->scheduler_offset);
2612 memset_io((void __iomem *)ugeth->p_scheduler, 0, sizeof(struct ucc_geth_scheduler));
2615 out_be32(&ugeth->p_scheduler->mblinterval,
2617 out_be16(&ugeth->p_scheduler->nortsrbytetime,
2619 out_8(&ugeth->p_scheduler->fracsiz, ug_info->fracsiz);
2620 out_8(&ugeth->p_scheduler->strictpriorityq,
2622 out_8(&ugeth->p_scheduler->txasap, ug_info->txasap);
2623 out_8(&ugeth->p_scheduler->extrabw, ug_info->extrabw);
2625 out_8(&ugeth->p_scheduler->weightfactor[i],
2629 ugeth->p_cpucount[0] = &(ugeth->p_scheduler->cpucount0);
2630 ugeth->p_cpucount[1] = &(ugeth->p_scheduler->cpucount1);
2631 ugeth->p_cpucount[2] = &(ugeth->p_scheduler->cpucount2);
2632 ugeth->p_cpucount[3] = &(ugeth->p_scheduler->cpucount3);
2633 ugeth->p_cpucount[4] = &(ugeth->p_scheduler->cpucount4);
2634 ugeth->p_cpucount[5] = &(ugeth->p_scheduler->cpucount5);
2635 ugeth->p_cpucount[6] = &(ugeth->p_scheduler->cpucount6);
2636 ugeth->p_cpucount[7] = &(ugeth->p_scheduler->cpucount7);
2643 ugeth->tx_fw_statistics_pram_offset =
2647 if (IS_ERR_VALUE(ugeth->tx_fw_statistics_pram_offset)) {
2648 if (netif_msg_ifup(ugeth))
2652 ugeth->p_tx_fw_statistics_pram =
2654 qe_muram_addr(ugeth->tx_fw_statistics_pram_offset);
2656 memset_io((void __iomem *)ugeth->p_tx_fw_statistics_pram,
2668 out_be16(&ugeth->p_tx_glbl_pram->temoder, temoder);
2670 test = in_be16(&ugeth->p_tx_glbl_pram->temoder);
2677 out_be32(&ugeth->p_tx_glbl_pram->tstate, ((u32) function_code) << 24);
2681 ugeth->rx_glbl_pram_offset =
2684 if (IS_ERR_VALUE(ugeth->rx_glbl_pram_offset)) {
2685 if (netif_msg_ifup(ugeth))
2689 ugeth->p_rx_glbl_pram =
2690 (struct ucc_geth_rx_global_pram __iomem *) qe_muram_addr(ugeth->
2693 memset_io((void __iomem *)ugeth->p_rx_glbl_pram, 0, sizeof(struct ucc_geth_rx_global_pram));
2699 ugeth->thread_dat_rx_offset =
2703 if (IS_ERR_VALUE(ugeth->thread_dat_rx_offset)) {
2704 if (netif_msg_ifup(ugeth))
2709 ugeth->p_thread_data_rx =
2710 (struct ucc_geth_thread_data_rx __iomem *) qe_muram_addr(ugeth->
2712 out_be32(&ugeth->p_rx_glbl_pram->rqptr, ugeth->thread_dat_rx_offset);
2715 out_be16(&ugeth->p_rx_glbl_pram->typeorlen, ug_info->typeorlen);
2720 ugeth->rx_fw_statistics_pram_offset =
2724 if (IS_ERR_VALUE(ugeth->rx_fw_statistics_pram_offset)) {
2725 if (netif_msg_ifup(ugeth))
2729 ugeth->p_rx_fw_statistics_pram =
2731 qe_muram_addr(ugeth->rx_fw_statistics_pram_offset);
2733 memset_io((void __iomem *)ugeth->p_rx_fw_statistics_pram, 0,
2740 ugeth->rx_irq_coalescing_tbl_offset =
2744 if (IS_ERR_VALUE(ugeth->rx_irq_coalescing_tbl_offset)) {
2745 if (netif_msg_ifup(ugeth))
2750 ugeth->p_rx_irq_coalescing_tbl =
2752 qe_muram_addr(ugeth->rx_irq_coalescing_tbl_offset);
2753 out_be32(&ugeth->p_rx_glbl_pram->intcoalescingptr,
2754 ugeth->rx_irq_coalescing_tbl_offset);
2758 out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
2761 out_be32(&ugeth->p_rx_irq_coalescing_tbl->coalescingentry[i].
2768 &ugeth->p_rx_glbl_pram->mrblr);
2770 out_be16(&ugeth->p_rx_glbl_pram->mflr, ug_info->maxFrameLength);
2773 &ugeth->p_rx_glbl_pram->minflr,
2774 &ugeth->p_rx_glbl_pram->mrblr);
2776 out_be16(&ugeth->p_rx_glbl_pram->maxd1, ug_info->maxD1Length);
2778 out_be16(&ugeth->p_rx_glbl_pram->maxd2, ug_info->maxD2Length);
2784 out_be32(&ugeth->p_rx_glbl_pram->l2qt, l2qt);
2791 out_be32(&ugeth->p_rx_glbl_pram->l3qt[j/8], l3qt);
2795 out_be16(&ugeth->p_rx_glbl_pram->vlantype, ug_info->vlantype);
2798 out_be16(&ugeth->p_rx_glbl_pram->vlantci, ug_info->vlantci);
2801 out_be32(&ugeth->p_rx_glbl_pram->ecamptr, ug_info->ecamptr);
2805 ugeth->rx_bd_qs_tbl_offset =
2810 if (IS_ERR_VALUE(ugeth->rx_bd_qs_tbl_offset)) {
2811 if (netif_msg_ifup(ugeth))
2816 ugeth->p_rx_bd_qs_tbl =
2817 (struct ucc_geth_rx_bd_queues_entry __iomem *) qe_muram_addr(ugeth->
2819 out_be32(&ugeth->p_rx_glbl_pram->rbdqptr, ugeth->rx_bd_qs_tbl_offset);
2821 memset_io((void __iomem *)ugeth->p_rx_bd_qs_tbl,
2829 if (ugeth->ug_info->uf_info.bd_mem_part == MEM_PART_SYSTEM) {
2830 out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
2831 (u32) virt_to_phys(ugeth->p_rx_bd_ring[i]));
2832 } else if (ugeth->ug_info->uf_info.bd_mem_part ==
2834 out_be32(&ugeth->p_rx_bd_qs_tbl[i].externalbdbaseptr,
2835 (u32)qe_muram_dma(ugeth->p_rx_bd_ring[i]));
2843 if (ugeth->rx_extended_features)
2862 out_be32(&ugeth->p_rx_glbl_pram->remoder, remoder);
2872 &ugeth->p_tx_glbl_pram->txrmonbaseptr,
2873 ugeth->tx_fw_statistics_pram_offset,
2874 &ugeth->p_rx_glbl_pram->rxrmonbaseptr,
2875 ugeth->rx_fw_statistics_pram_offset,
2876 &ugeth->p_tx_glbl_pram->temoder,
2877 &ugeth->p_rx_glbl_pram->remoder);
2880 out_8(&ugeth->p_rx_glbl_pram->rstate, function_code);
2885 if (netif_msg_ifup(ugeth))
2892 ugeth->exf_glbl_param_offset =
2895 if (IS_ERR_VALUE(ugeth->exf_glbl_param_offset)) {
2896 if (netif_msg_ifup(ugeth))
2901 ugeth->p_exf_glbl_param =
2902 (struct ucc_geth_exf_global_pram __iomem *) qe_muram_addr(ugeth->
2904 out_be32(&ugeth->p_rx_glbl_pram->exfGlobalParam,
2905 ugeth->exf_glbl_param_offset);
2906 out_be32(&ugeth->p_exf_glbl_param->l2pcdptr,
2914 ugeth_82xx_filtering_clear_addr_in_paddr(ugeth, (u8) j);
2917 (struct ucc_geth_82xx_address_filtering_pram __iomem *) ugeth->
2920 ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
2922 ugeth_82xx_filtering_clear_all_addr_in_hash(ugeth,
2939 if (!(ugeth->p_init_enet_param_shadow =
2941 if (netif_msg_ifup(ugeth))
2946 memset((char *)ugeth->p_init_enet_param_shadow,
2951 ugeth->p_init_enet_param_shadow->resinit1 =
2953 ugeth->p_init_enet_param_shadow->resinit2 =
2955 ugeth->p_init_enet_param_shadow->resinit3 =
2957 ugeth->p_init_enet_param_shadow->resinit4 =
2959 ugeth->p_init_enet_param_shadow->resinit5 =
2961 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
2963 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
2966 ugeth->p_init_enet_param_shadow->rgftgfrxglobal |=
2967 ugeth->rx_glbl_pram_offset | ug_info->riscRx;
2974 if (netif_msg_ifup(ugeth))
2978 ugeth->p_init_enet_param_shadow->largestexternallookupkeysize =
2993 if ((ret_val = fill_init_enet_entries(ugeth, &(ugeth->
2999 if (netif_msg_ifup(ugeth))
3004 ugeth->p_init_enet_param_shadow->txglobal =
3005 ugeth->tx_glbl_pram_offset | ug_info->riscTx;
3007 fill_init_enet_entries(ugeth,
3008 &(ugeth->p_init_enet_param_shadow->
3013 if (netif_msg_ifup(ugeth))
3020 if ((ret_val = rx_bd_buffer_set(ugeth, (u8) i)) != 0) {
3021 if (netif_msg_ifup(ugeth))
3030 if (netif_msg_ifup(ugeth))
3039 ugeth->p_init_enet_param_shadow->resinit1);
3041 ugeth->p_init_enet_param_shadow->resinit2);
3043 ugeth->p_init_enet_param_shadow->resinit3);
3045 ugeth->p_init_enet_param_shadow->resinit4);
3047 ugeth->p_init_enet_param_shadow->resinit5);
3049 ugeth->p_init_enet_param_shadow->largestexternallookupkeysize);
3051 ugeth->p_init_enet_param_shadow->rgftgfrxglobal);
3054 ugeth->p_init_enet_param_shadow->rxthread[i]);
3056 ugeth->p_init_enet_param_shadow->txglobal);
3059 ugeth->p_init_enet_param_shadow->txthread[i]);
3063 ucc_fast_get_qe_cr_subblock(ugeth->ug_info->uf_info.ucc_num);
3078 struct ucc_geth_private *ugeth = netdev_priv(dev);
3090 spin_lock_irqsave(&ugeth->lock, flags);
3095 bd = ugeth->txBd[txQ];
3098 ugeth->tx_skbuff[txQ][ugeth->skb_curtx[txQ]] = skb;
3101 ugeth->skb_curtx[txQ] =
3102 (ugeth->skb_curtx[txQ] +
3103 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
3107 dma_map_single(ugeth->dev, skb->data,
3121 bd = ugeth->p_tx_bd_ring[txQ];
3125 if (bd == ugeth->confBd[txQ]) {
3130 ugeth->txBd[txQ] = bd;
3134 if (ugeth->p_scheduler) {
3135 ugeth->cpucount[txQ]++;
3140 out_be16(ugeth->p_cpucount[txQ], ugeth->cpucount[txQ]);
3144 uccf = ugeth->uccf;
3147 spin_unlock_irqrestore(&ugeth->lock, flags);
3152 static int ucc_geth_rx(struct ucc_geth_private *ugeth, u8 rxQ, int rx_work_limit)
3163 dev = ugeth->ndev;
3166 bd = ugeth->rxBd[rxQ];
3174 skb = ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]];
3181 if (netif_msg_rx_err(ugeth))
3186 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = NULL;
3196 skb->protocol = eth_type_trans(skb, ugeth->ndev);
3203 skb = get_new_skb(ugeth, bd);
3205 if (netif_msg_rx_err(ugeth))
3211 ugeth->rx_skbuff[rxQ][ugeth->skb_currx[rxQ]] = skb;
3214 ugeth->skb_currx[rxQ] =
3215 (ugeth->skb_currx[rxQ] +
3216 1) & RX_RING_MOD_MASK(ugeth->ug_info->bdRingLenRx[rxQ]);
3219 bd = ugeth->p_rx_bd_ring[rxQ];
3226 ugeth->rxBd[rxQ] = bd;
3233 struct ucc_geth_private *ugeth = netdev_priv(dev);
3239 bd = ugeth->confBd[txQ];
3250 skb = ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]];
3259 ugeth->tx_skbuff[txQ][ugeth->skb_dirtytx[txQ]] = NULL;
3260 ugeth->skb_dirtytx[txQ] =
3261 (ugeth->skb_dirtytx[txQ] +
3262 1) & TX_RING_MOD_MASK(ugeth->ug_info->bdRingLenTx[txQ]);
3272 bd = ugeth->p_tx_bd_ring[txQ];
3275 ugeth->confBd[txQ] = bd;
3282 struct ucc_geth_private *ugeth = container_of(napi, struct ucc_geth_private, napi);
3286 ug_info = ugeth->ug_info;
3289 spin_lock(&ugeth->lock);
3291 ucc_geth_tx(ugeth->ndev, i);
3292 spin_unlock(&ugeth->lock);
3296 howmany += ucc_geth_rx(ugeth, i, budget - howmany);
3300 setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS | UCCE_TX_EVENTS);
3309 struct ucc_geth_private *ugeth = netdev_priv(dev);
3317 uccf = ugeth->uccf;
3318 ug_info = ugeth->ug_info;
3328 if (napi_schedule_prep(&ugeth->napi)) {
3331 __napi_schedule(&ugeth->napi);
3354 struct ucc_geth_private *ugeth = netdev_priv(dev);
3355 int irq = ugeth->ug_info->uf_info.irq;
3365 struct ucc_geth_private *ugeth = netdev_priv(dev);
3380 spin_lock_irq(&ugeth->lock);
3387 &ugeth->ug_regs->macstnaddr1,
3388 &ugeth->ug_regs->macstnaddr2);
3389 spin_unlock_irq(&ugeth->lock);
3394 static int ucc_geth_init_mac(struct ucc_geth_private *ugeth)
3396 struct net_device *dev = ugeth->ndev;
3399 err = ucc_struct_init(ugeth);
3401 netif_err(ugeth, ifup, dev, "Cannot configure internal struct, aborting\n");
3405 err = ucc_geth_startup(ugeth);
3407 netif_err(ugeth, ifup, dev, "Cannot configure net device, aborting\n");
3411 err = adjust_enet_interface(ugeth);
3413 netif_err(ugeth, ifup, dev, "Cannot configure net device, aborting\n");
3425 &ugeth->ug_regs->macstnaddr1,
3426 &ugeth->ug_regs->macstnaddr2);
3428 err = ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
3430 netif_err(ugeth, ifup, dev, "Cannot enable net device, aborting\n");
3436 ucc_geth_stop(ugeth);
3444 struct ucc_geth_private *ugeth = netdev_priv(dev);
3451 netif_err(ugeth, ifup, dev,
3458 netif_err(ugeth, ifup, dev, "Cannot initialize PHY, aborting\n");
3462 err = ucc_geth_init_mac(ugeth);
3464 netif_err(ugeth, ifup, dev, "Cannot initialize MAC, aborting\n");
3468 err = request_irq(ugeth->ug_info->uf_info.irq, ucc_geth_irq_handler,
3471 netif_err(ugeth, ifup, dev, "Cannot get IRQ for net device, aborting\n");
3475 phy_start(ugeth->phydev);
3476 napi_enable(&ugeth->napi);
3481 qe_alive_during_sleep() || ugeth->phydev->irq);
3482 device_set_wakeup_enable(&dev->dev, ugeth->wol_en);
3487 ucc_geth_stop(ugeth);
3494 struct ucc_geth_private *ugeth = netdev_priv(dev);
3498 napi_disable(&ugeth->napi);
3500 cancel_work_sync(&ugeth->timeout_work);
3501 ucc_geth_stop(ugeth);
3502 phy_disconnect(ugeth->phydev);
3503 ugeth->phydev = NULL;
3505 free_irq(ugeth->ug_info->uf_info.irq, ugeth->ndev);
3516 struct ucc_geth_private *ugeth;
3519 ugeth = container_of(work, struct ucc_geth_private, timeout_work);
3520 dev = ugeth->ndev;
3526 ugeth_dump_regs(ugeth);
3534 ucc_geth_stop(ugeth);
3535 ucc_geth_init_mac(ugeth);
3537 phy_start(ugeth->phydev);
3550 struct ucc_geth_private *ugeth = netdev_priv(dev);
3552 schedule_work(&ugeth->timeout_work);
3561 struct ucc_geth_private *ugeth = netdev_priv(ndev);
3567 napi_disable(&ugeth->napi);
3573 ugeth_disable(ugeth, COMM_DIR_RX_AND_TX);
3575 if (ugeth->wol_en & WAKE_MAGIC) {
3576 setbits32(ugeth->uccf->p_uccm, UCC_GETH_UCCE_MPD);
3577 setbits32(&ugeth->ug_regs->maccfg2, MACCFG2_MPE);
3578 ucc_fast_enable(ugeth->uccf, COMM_DIR_RX_AND_TX);
3579 } else if (!(ugeth->wol_en & WAKE_PHY)) {
3580 phy_stop(ugeth->phydev);
3589 struct ucc_geth_private *ugeth = netdev_priv(ndev);
3596 if (ugeth->wol_en & WAKE_MAGIC) {
3597 ucc_fast_disable(ugeth->uccf, COMM_DIR_RX_AND_TX);
3598 clrbits32(&ugeth->ug_regs->maccfg2, MACCFG2_MPE);
3599 clrbits32(ugeth->uccf->p_uccm, UCC_GETH_UCCE_MPD);
3601 ugeth_enable(ugeth, COMM_DIR_RX_AND_TX);
3607 ucc_geth_memclean(ugeth);
3609 err = ucc_geth_init_mac(ugeth);
3616 ugeth->oldlink = 0;
3617 ugeth->oldspeed = 0;
3618 ugeth->oldduplex = -1;
3620 phy_stop(ugeth->phydev);
3621 phy_start(ugeth->phydev);
3623 napi_enable(&ugeth->napi);
3662 struct ucc_geth_private *ugeth = netdev_priv(dev);
3667 if (!ugeth->phydev)
3670 return phy_mii_ioctl(ugeth->phydev, rq, cmd);
3693 struct ucc_geth_private *ugeth = NULL;
3864 dev = alloc_etherdev(sizeof(*ugeth));
3871 ugeth = netdev_priv(dev);
3872 spin_lock_init(&ugeth->lock);
3875 INIT_LIST_HEAD(&ugeth->group_hash_q);
3876 INIT_LIST_HEAD(&ugeth->ind_hash_q);
3889 INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work);
3890 netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, 64);
3894 ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT);
3895 ugeth->phy_interface = phy_interface;
3896 ugeth->max_speed = max_speed;
3903 if (netif_msg_probe(ugeth))
3913 ugeth->ug_info = ug_info;
3914 ugeth->dev = device;
3915 ugeth->ndev = dev;
3916 ugeth->node = np;
3934 struct ucc_geth_private *ugeth = netdev_priv(dev);
3938 ucc_geth_memclean(ugeth);
3941 of_node_put(ugeth->ug_info->tbi_node);
3942 of_node_put(ugeth->ug_info->phy_node);