Lines Matching defs:data
149 struct tsi108_prv_data *data = netdev_priv(dev);
155 TSI_READ(TSI108_EC_INTMASK), data->phy_ok,
156 data->link_up, data->speed, data->duplex);
159 data->txhead, data->txtail, data->txfree,
166 data->rxhead, data->rxtail, data->rxfree,
169 TSI_READ(TSI108_EC_RXERR), data->rxpending);
180 static int tsi108_read_mii(struct tsi108_prv_data *data, int reg)
185 (data->phy << TSI108_MAC_MII_ADDR_PHY) |
202 static void tsi108_write_mii(struct tsi108_prv_data *data,
207 (data->phy << TSI108_MAC_MII_ADDR_PHY) |
220 struct tsi108_prv_data *data = netdev_priv(dev);
221 return tsi108_read_mii(data, reg);
226 struct tsi108_prv_data *data = netdev_priv(dev);
227 tsi108_write_mii(data, reg, val);
230 static inline void tsi108_write_tbi(struct tsi108_prv_data *data,
273 struct tsi108_prv_data *data = netdev_priv(dev);
281 if (!data->phy_ok)
284 duplex = mii_check_media(&data->mii_if, netif_msg_link(data), data->init_media);
285 data->init_media = 0;
289 speed = mii_speed(&data->mii_if);
291 if ((speed != data->speed) || duplex) {
306 data->speed = speed;
308 if (data->mii_if.full_duplex) {
311 data->duplex = 2;
315 data->duplex = 1;
322 if (data->link_up == 0) {
328 spin_lock(&data->txlock);
329 if (is_valid_ether_addr(dev->dev_addr) && data->txfree)
332 data->link_up = 1;
333 spin_unlock(&data->txlock);
336 if (data->link_up == 1) {
338 data->link_up = 0;
360 struct tsi108_prv_data *data = netdev_priv(dev);
364 spin_lock_irqsave(&data->misclock, flags);
373 TSI108_STAT_RXBYTES_CARRY, &data->stats.rx_bytes);
377 &data->stats.rx_packets);
380 TSI108_STAT_RXFCS_CARRY, &data->rx_fcs);
384 &data->stats.multicast);
388 &data->stats.rx_frame_errors);
392 &data->stats.rx_length_errors);
395 TSI108_STAT_RXRUNT_CARRY, &data->rx_underruns);
398 TSI108_STAT_RXJUMBO_CARRY, &data->rx_overruns);
401 TSI108_STAT_RXFRAG_CARRY, &data->rx_short_fcs);
404 TSI108_STAT_RXJABBER_CARRY, &data->rx_long_fcs);
408 &data->stats.rx_missed_errors);
411 TSI108_STAT_TXBYTES_CARRY, &data->stats.tx_bytes);
415 &data->stats.tx_packets);
419 &data->stats.tx_aborted_errors);
422 TSI108_STAT_TXEXCOL_CARRY, &data->tx_coll_abort);
426 &data->stats.collisions);
430 &data->tx_pause_drop);
432 spin_unlock_irqrestore(&data->misclock, flags);
436 * data->misclock must be held.
439 tsi108_read_stat(struct tsi108_prv_data * data, int reg, int carry_bit,
471 struct tsi108_prv_data *data = netdev_priv(dev);
472 spin_lock_irq(&data->misclock);
474 data->tmpstats.rx_packets =
475 tsi108_read_stat(data, TSI108_STAT_RXPKTS,
477 TSI108_STAT_RXPKTS_CARRY, &data->stats.rx_packets);
479 data->tmpstats.tx_packets =
480 tsi108_read_stat(data, TSI108_STAT_TXPKTS,
482 TSI108_STAT_TXPKTS_CARRY, &data->stats.tx_packets);
484 data->tmpstats.rx_bytes =
485 tsi108_read_stat(data, TSI108_STAT_RXBYTES,
487 TSI108_STAT_RXBYTES_CARRY, &data->stats.rx_bytes);
489 data->tmpstats.tx_bytes =
490 tsi108_read_stat(data, TSI108_STAT_TXBYTES,
492 TSI108_STAT_TXBYTES_CARRY, &data->stats.tx_bytes);
494 data->tmpstats.multicast =
495 tsi108_read_stat(data, TSI108_STAT_RXMCAST,
497 TSI108_STAT_RXMCAST_CARRY, &data->stats.multicast);
499 excol = tsi108_read_stat(data, TSI108_STAT_TXEXCOL,
502 &data->tx_coll_abort);
504 data->tmpstats.collisions =
505 tsi108_read_stat(data, TSI108_STAT_TXTCOL,
507 TSI108_STAT_TXTCOL_CARRY, &data->stats.collisions);
509 data->tmpstats.collisions += excol;
511 data->tmpstats.rx_length_errors =
512 tsi108_read_stat(data, TSI108_STAT_RXLENGTH,
515 &data->stats.rx_length_errors);
517 data->tmpstats.rx_length_errors +=
518 tsi108_read_stat(data, TSI108_STAT_RXRUNT,
520 TSI108_STAT_RXRUNT_CARRY, &data->rx_underruns);
522 data->tmpstats.rx_length_errors +=
523 tsi108_read_stat(data, TSI108_STAT_RXJUMBO,
525 TSI108_STAT_RXJUMBO_CARRY, &data->rx_overruns);
527 data->tmpstats.rx_frame_errors =
528 tsi108_read_stat(data, TSI108_STAT_RXALIGN,
531 &data->stats.rx_frame_errors);
533 data->tmpstats.rx_frame_errors +=
534 tsi108_read_stat(data, TSI108_STAT_RXFCS,
536 &data->rx_fcs);
538 data->tmpstats.rx_frame_errors +=
539 tsi108_read_stat(data, TSI108_STAT_RXFRAG,
541 TSI108_STAT_RXFRAG_CARRY, &data->rx_short_fcs);
543 data->tmpstats.rx_missed_errors =
544 tsi108_read_stat(data, TSI108_STAT_RXDROP,
547 &data->stats.rx_missed_errors);
550 data->tmpstats.rx_fifo_errors = data->stats.rx_fifo_errors;
551 data->tmpstats.rx_crc_errors = data->stats.rx_crc_errors;
553 data->tmpstats.tx_aborted_errors =
554 tsi108_read_stat(data, TSI108_STAT_TXEXDEF,
557 &data->stats.tx_aborted_errors);
559 data->tmpstats.tx_aborted_errors +=
560 tsi108_read_stat(data, TSI108_STAT_TXPAUSEDROP,
563 &data->tx_pause_drop);
565 data->tmpstats.tx_aborted_errors += excol;
567 data->tmpstats.tx_errors = data->tmpstats.tx_aborted_errors;
568 data->tmpstats.rx_errors = data->tmpstats.rx_length_errors +
569 data->tmpstats.rx_crc_errors +
570 data->tmpstats.rx_frame_errors +
571 data->tmpstats.rx_fifo_errors + data->tmpstats.rx_missed_errors;
573 spin_unlock_irq(&data->misclock);
574 return &data->tmpstats;
577 static void tsi108_restart_rx(struct tsi108_prv_data * data, struct net_device *dev)
586 static void tsi108_restart_tx(struct tsi108_prv_data * data)
600 struct tsi108_prv_data *data = netdev_priv(dev);
605 while (!data->txfree || data->txhead != data->txtail) {
606 tx = data->txtail;
608 if (data->txring[tx].misc & TSI108_TX_OWN)
611 skb = data->txskbs[tx];
613 if (!(data->txring[tx].misc & TSI108_TX_OK))
615 dev->name, data->txring[tx].misc);
617 data->txtail = (data->txtail + 1) % TSI108_TXRING_LEN;
618 data->txfree++;
620 if (data->txring[tx].misc & TSI108_TX_EOF) {
627 if (is_valid_ether_addr(dev->dev_addr) && data->link_up)
634 struct tsi108_prv_data *data = netdev_priv(dev);
638 if (!data->phy_ok && net_ratelimit())
641 if (!data->link_up) {
648 if (data->txfree < MAX_SKB_FRAGS + 1) {
657 if (data->txfree - frags < MAX_SKB_FRAGS + 1) {
661 spin_lock_irq(&data->txlock);
665 int tx = data->txhead;
678 ((TSI108_TXRING_LEN - data->txfree) >= TSI108_TX_INT_FREQ))
681 data->txskbs[tx] = skb;
684 data->txring[tx].buf0 = dma_map_single(&data->pdev->dev,
685 skb->data, skb_headlen(skb),
687 data->txring[tx].len = skb_headlen(skb);
692 data->txring[tx].buf0 =
693 skb_frag_dma_map(&data->pdev->dev, frag,
696 data->txring[tx].len = skb_frag_size(frag);
702 if (netif_msg_pktdata(data)) {
707 printk(" %2.2x", skb->data[i]);
710 data->txring[tx].misc = misc | TSI108_TX_OWN;
712 data->txhead = (data->txhead + 1) % TSI108_TXRING_LEN;
713 data->txfree--;
723 tsi108_restart_tx(data);
725 spin_unlock_irq(&data->txlock);
731 struct tsi108_prv_data *data = netdev_priv(dev);
734 while (data->rxfree && done != budget) {
735 int rx = data->rxtail;
738 if (data->rxring[rx].misc & TSI108_RX_OWN)
741 skb = data->rxskbs[rx];
742 data->rxtail = (data->rxtail + 1) % TSI108_RXRING_LEN;
743 data->rxfree--;
746 if (data->rxring[rx].misc & TSI108_RX_BAD) {
747 spin_lock_irq(&data->misclock);
749 if (data->rxring[rx].misc & TSI108_RX_CRC)
750 data->stats.rx_crc_errors++;
751 if (data->rxring[rx].misc & TSI108_RX_OVER)
752 data->stats.rx_fifo_errors++;
754 spin_unlock_irq(&data->misclock);
759 if (netif_msg_pktdata(data)) {
762 dev->name, data->rxring[rx].len);
763 for (i = 0; i < data->rxring[rx].len; i++)
764 printk(" %2.2x", skb->data[i]);
768 skb_put(skb, data->rxring[rx].len);
778 struct tsi108_prv_data *data = netdev_priv(dev);
781 while (data->rxfree != TSI108_RXRING_LEN && done != budget) {
782 int rx = data->rxhead;
786 data->rxskbs[rx] = skb;
790 data->rxring[rx].buf0 = dma_map_single(&data->pdev->dev,
791 skb->data, TSI108_RX_SKB_SIZE,
799 data->rxring[rx].blen = TSI108_RX_SKB_SIZE;
800 data->rxring[rx].misc = TSI108_RX_OWN | TSI108_RX_INT;
802 data->rxhead = (data->rxhead + 1) % TSI108_RXRING_LEN;
803 data->rxfree++;
809 tsi108_restart_rx(data, dev);
816 struct tsi108_prv_data *data = container_of(napi, struct tsi108_prv_data, napi);
817 struct net_device *dev = data->dev;
828 if (data->rxpending || (estat & TSI108_EC_RXESTAT_Q0_DESCINT))
843 if (data->rxfree < TSI108_RXRING_LEN)
857 tsi108_restart_rx(data, dev);
862 spin_lock_irq(&data->misclock);
863 data->stats.rx_fifo_errors++;
864 spin_unlock_irq(&data->misclock);
868 data->rxpending = 0;
879 data->rxpending = 1;
887 struct tsi108_prv_data *data = netdev_priv(dev);
900 if (napi_schedule_prep(&data->napi)) {
911 __napi_schedule(&data->napi);
951 struct tsi108_prv_data *data = netdev_priv(dev);
958 if (netif_running(dev) && data->rxfree < TSI108_RXRING_LEN / 4)
964 struct tsi108_prv_data *data = netdev_priv(dev);
979 spin_lock(&data->txlock);
981 spin_unlock(&data->txlock);
989 struct tsi108_prv_data *data = netdev_priv(dev);
1021 struct tsi108_prv_data *data = netdev_priv(dev);
1042 static void tsi108_reset_ether(struct tsi108_prv_data * data)
1078 struct tsi108_prv_data *data = netdev_priv(dev);
1092 if (0x8 == data->phy)
1127 struct tsi108_prv_data *data = netdev_priv(dev);
1141 spin_lock_irq(&data->misclock);
1144 spin_lock(&data->txlock);
1146 if (data->txfree && data->link_up)
1149 spin_unlock(&data->txlock);
1150 spin_unlock_irq(&data->misclock);
1157 struct tsi108_prv_data *data = netdev_priv(dev);
1173 memset(data->mc_hash, 0, sizeof(data->mc_hash));
1180 __set_bit(hash, &data->mc_hash[0]);
1189 * back-to-back writes to the data register.
1193 data->mc_hash[i]);
1203 struct tsi108_prv_data *data = netdev_priv(dev);
1210 tsi108_write_mii(data, MII_BMCR, BMCR_RESET);
1212 if(!(tsi108_read_mii(data, MII_BMCR) & BMCR_RESET))
1219 if (data->phy_type == TSI108_PHY_BCM54XX) {
1220 tsi108_write_mii(data, 0x09, 0x0300);
1221 tsi108_write_mii(data, 0x10, 0x1020);
1222 tsi108_write_mii(data, 0x1c, 0x8c00);
1225 tsi108_write_mii(data,
1228 while (tsi108_read_mii(data, MII_BMCR) & BMCR_ANRESTART)
1236 tsi108_write_tbi(data, 0x11, 0x30);
1242 data->link_up = 0;
1244 while (!((phyval = tsi108_read_mii(data, MII_BMSR)) &
1254 data->mii_if.supports_gmii = mii_check_gmii_support(&data->mii_if);
1256 data->phy_ok = 1;
1257 data->init_media = 1;
1263 struct tsi108_prv_data *data = netdev_priv(dev);
1267 tsi108_write_mii(data, MII_BMCR, BMCR_PDOWN);
1268 data->phy_ok = 0;
1275 struct tsi108_prv_data *data = netdev_priv(dev);
1279 i = request_irq(data->irq_num, tsi108_irq, 0, dev->name, dev);
1282 data->id, data->irq_num);
1285 dev->irq = data->irq_num;
1288 data->id, dev->irq, dev->name);
1291 data->rxring = dma_alloc_coherent(&data->pdev->dev, rxring_size,
1292 &data->rxdma, GFP_KERNEL);
1293 if (!data->rxring) {
1294 free_irq(data->irq_num, dev);
1298 data->txring = dma_alloc_coherent(&data->pdev->dev, txring_size,
1299 &data->txdma, GFP_KERNEL);
1300 if (!data->txring) {
1301 free_irq(data->irq_num, dev);
1302 dma_free_coherent(&data->pdev->dev, rxring_size, data->rxring,
1303 data->rxdma);
1308 data->rxring[i].next0 = data->rxdma + (i + 1) * sizeof(rx_desc);
1309 data->rxring[i].blen = TSI108_RXBUF_SIZE;
1310 data->rxring[i].vlan = 0;
1313 data->rxring[TSI108_RXRING_LEN - 1].next0 = data->rxdma;
1315 data->rxtail = 0;
1316 data->rxhead = 0;
1330 data->rxhead = i;
1334 data->rxskbs[i] = skb;
1335 data->rxring[i].buf0 = virt_to_phys(data->rxskbs[i]->data);
1336 data->rxring[i].misc = TSI108_RX_OWN | TSI108_RX_INT;
1339 data->rxfree = i;
1340 TSI_WRITE(TSI108_EC_RXQ_PTRLOW, data->rxdma);
1343 data->txring[i].next0 = data->txdma + (i + 1) * sizeof(tx_desc);
1344 data->txring[i].misc = 0;
1347 data->txring[TSI108_TXRING_LEN - 1].next0 = data->txdma;
1348 data->txtail = 0;
1349 data->txhead = 0;
1350 data->txfree = TSI108_TXRING_LEN;
1351 TSI_WRITE(TSI108_EC_TXQ_PTRLOW, data->txdma);
1354 napi_enable(&data->napi);
1356 timer_setup(&data->timer, tsi108_timed_checker, 0);
1357 mod_timer(&data->timer, jiffies + 1);
1359 tsi108_restart_rx(data, dev);
1377 struct tsi108_prv_data *data = netdev_priv(dev);
1380 napi_disable(&data->napi);
1382 del_timer_sync(&data->timer);
1391 while (!data->txfree || data->txhead != data->txtail) {
1392 int tx = data->txtail;
1394 skb = data->txskbs[tx];
1395 data->txtail = (data->txtail + 1) % TSI108_TXRING_LEN;
1396 data->txfree++;
1400 free_irq(data->irq_num, dev);
1404 while (data->rxfree) {
1405 int rx = data->rxtail;
1408 skb = data->rxskbs[rx];
1409 data->rxtail = (data->rxtail + 1) % TSI108_RXRING_LEN;
1410 data->rxfree--;
1414 dma_free_coherent(&data->pdev->dev,
1416 data->rxring, data->rxdma);
1417 dma_free_coherent(&data->pdev->dev,
1419 data->txring, data->txdma);
1426 struct tsi108_prv_data *data = netdev_priv(dev);
1488 struct tsi108_prv_data *data = netdev_priv(dev);
1491 spin_lock_irqsave(&data->txlock, flags);
1492 mii_ethtool_get_link_ksettings(&data->mii_if, cmd);
1493 spin_unlock_irqrestore(&data->txlock, flags);
1501 struct tsi108_prv_data *data = netdev_priv(dev);
1505 spin_lock_irqsave(&data->txlock, flags);
1506 rc = mii_ethtool_set_link_ksettings(&data->mii_if, cmd);
1507 spin_unlock_irqrestore(&data->txlock, flags);
1514 struct tsi108_prv_data *data = netdev_priv(dev);
1517 return generic_mii_ioctl(&data->mii_if, if_mii(rq), cmd, NULL);
1541 struct tsi108_prv_data *data = NULL;
1548 printk(KERN_ERR "tsi-eth %d: Missing additional data!\n",
1560 data = netdev_priv(dev);
1561 data->dev = dev;
1562 data->pdev = pdev;
1568 data->regs = ioremap(einfo->regs, 0x400);
1569 if (NULL == data->regs) {
1574 data->phyregs = ioremap(einfo->phyregs, 0x400);
1575 if (NULL == data->phyregs) {
1580 data->mii_if.dev = dev;
1581 data->mii_if.mdio_read = tsi108_mdio_read;
1582 data->mii_if.mdio_write = tsi108_mdio_write;
1583 data->mii_if.phy_id = einfo->phy;
1584 data->mii_if.phy_id_mask = 0x1f;
1585 data->mii_if.reg_num_mask = 0x1f;
1587 data->phy = einfo->phy;
1588 data->phy_type = einfo->phy_type;
1589 data->irq_num = einfo->irq_num;
1590 data->id = pdev->id;
1591 netif_napi_add(dev, &data->napi, tsi108_poll);
1605 spin_lock_init(&data->txlock);
1606 spin_lock_init(&data->misclock);
1608 tsi108_reset_ether(data);
1629 data->msg_enable = DEBUG;
1636 iounmap(data->phyregs);
1639 iounmap(data->regs);
1655 struct tsi108_prv_data *data = from_timer(data, t, timer);
1656 struct net_device *dev = data->dev;
1660 mod_timer(&data->timer, jiffies + CHECK_PHY_INTERVAL);