Lines Matching defs:dev

115  * 	o several entry points race with dev->close
1064 nic->mii.dev = nic->netdev;
1261 err = request_firmware(&fw, fw_name, &nic->pdev->dev);
1603 struct net_device *dev = nic->netdev;
1604 struct net_device_stats *ns = &dev->stats;
1742 dma_addr = dma_map_single(&nic->pdev->dev, skb->data, skb->len,
1745 if (dma_mapping_error(&nic->pdev->dev, dma_addr))
1808 struct net_device *dev = nic->netdev;
1825 dev->stats.tx_packets++;
1826 dev->stats.tx_bytes += cb->skb->len;
1828 dma_unmap_single(&nic->pdev->dev,
1855 dma_unmap_single(&nic->pdev->dev,
1925 rx->dma_addr = dma_map_single(&nic->pdev->dev, rx->skb->data,
1928 if (dma_mapping_error(&nic->pdev->dev, rx->dma_addr)) {
1941 dma_sync_single_for_device(&nic->pdev->dev,
1953 struct net_device *dev = nic->netdev;
1963 dma_sync_single_for_cpu(&nic->pdev->dev, rx->dma_addr,
1983 dma_sync_single_for_device(&nic->pdev->dev, rx->dma_addr,
1990 if (unlikely(dev->features & NETIF_F_RXFCS))
1997 dma_unmap_single(&nic->pdev->dev, rx->dma_addr, RFD_BUF_LEN,
2021 if (unlikely(dev->features & NETIF_F_RXALL)) {
2037 dev->stats.rx_packets++;
2038 dev->stats.rx_bytes += (actual_size - fcs_pad);
2099 dma_sync_single_for_device(&nic->pdev->dev,
2108 dma_sync_single_for_device(&nic->pdev->dev,
2114 dma_sync_single_for_device(&nic->pdev->dev,
2139 dma_unmap_single(&nic->pdev->dev,
2183 dma_sync_single_for_device(&nic->pdev->dev, rx->dma_addr,
2383 dma_sync_single_for_cpu(&nic->pdev->dev, nic->rx_to_clean->dma_addr,
2485 !device_can_wakeup(&nic->pdev->dev))
2493 device_set_wakeup_enable(&nic->pdev->dev, wol->wolopts);
2765 nic->mem = dma_alloc_coherent(&nic->pdev->dev, sizeof(struct mem),
2773 dma_free_coherent(&nic->pdev->dev, sizeof(struct mem),
2867 if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))) {
2872 SET_NETDEV_DEV(netdev, &pdev->dev);
2936 device_set_wakeup_enable(&pdev->dev, true);
2948 &nic->pdev->dev,