Lines Matching defs:hw_priv
3470 struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
3471 struct pci_dev *pdev = hw_priv->pdev;
3488 struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
3489 struct pci_dev *pdev = hw_priv->pdev;
3510 struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
3511 struct pci_dev *pdev = hw_priv->pdev;
4634 struct dev_info *hw_priv = priv->adapter;
4635 struct ksz_hw *hw = &hw_priv->hw;
4662 dma_buf->dma = dma_map_single(&hw_priv->pdev->dev, skb->data,
4680 dma_buf->dma = dma_map_single(&hw_priv->pdev->dev,
4703 dma_buf->dma = dma_map_single(&hw_priv->pdev->dev, skb->data,
4729 * @hw_priv: Network device.
4734 static void transmit_cleanup(struct dev_info *hw_priv, int normal)
4738 struct ksz_hw *hw = &hw_priv->hw;
4744 spin_lock_irq(&hw_priv->hwlock);
4759 dma_unmap_single(&hw_priv->pdev->dev, dma_buf->dma,
4777 spin_unlock_irq(&hw_priv->hwlock);
4786 * @hw_priv: Network device.
4791 static void tx_done(struct dev_info *hw_priv)
4793 struct ksz_hw *hw = &hw_priv->hw;
4796 transmit_cleanup(hw_priv, 1);
4829 struct dev_info *hw_priv = priv->adapter;
4830 struct ksz_hw *hw = &hw_priv->hw;
4855 spin_lock_irq(&hw_priv->hwlock);
4884 spin_unlock_irq(&hw_priv->hwlock);
4904 struct dev_info *hw_priv = priv->adapter;
4905 struct ksz_hw *hw = &hw_priv->hw;
4914 hw_priv = NULL;
4918 if (hw_priv) {
4922 transmit_cleanup(hw_priv, 0);
4925 ksz_init_rx_buffers(hw_priv);
4985 struct dev_info *hw_priv = priv->adapter;
4993 dma_sync_single_for_cpu(&hw_priv->pdev->dev, dma_buf->dma,
5028 static int dev_rcv_packets(struct dev_info *hw_priv)
5032 struct ksz_hw *hw = &hw_priv->hw;
5064 static int port_rcv_packets(struct dev_info *hw_priv)
5068 struct ksz_hw *hw = &hw_priv->hw;
5109 static int dev_rcv_special(struct dev_info *hw_priv)
5113 struct ksz_hw *hw = &hw_priv->hw;
5170 struct dev_info *hw_priv = from_tasklet(hw_priv, t, rx_tasklet);
5171 struct ksz_hw *hw = &hw_priv->hw;
5175 if (unlikely(!hw_priv->dev_rcv(hw_priv))) {
5181 spin_lock_irq(&hw_priv->hwlock);
5183 spin_unlock_irq(&hw_priv->hwlock);
5186 tasklet_schedule(&hw_priv->rx_tasklet);
5192 struct dev_info *hw_priv = from_tasklet(hw_priv, t, tx_tasklet);
5193 struct ksz_hw *hw = &hw_priv->hw;
5197 tx_done(hw_priv);
5200 spin_lock_irq(&hw_priv->hwlock);
5202 spin_unlock_irq(&hw_priv->hwlock);
5236 struct dev_info *hw_priv = priv->adapter;
5237 struct ksz_hw *hw = &hw_priv->hw;
5239 spin_lock(&hw_priv->hwlock);
5245 spin_unlock(&hw_priv->hwlock);
5255 tasklet_schedule(&hw_priv->tx_tasklet);
5260 tasklet_schedule(&hw_priv->rx_tasklet);
5294 spin_unlock(&hw_priv->hwlock);
5309 struct dev_info *hw_priv = priv->adapter;
5311 hw_dis_intr(&hw_priv->hw);
5349 struct dev_info *hw_priv = priv->adapter;
5351 struct ksz_hw *hw = &hw_priv->hw;
5373 if (!hw_priv->wol_enable)
5381 hw_priv->opened--;
5382 if (!(hw_priv->opened)) {
5383 ksz_stop_timer(&hw_priv->mib_timer_info);
5384 flush_work(&hw_priv->mib_read);
5393 tasklet_kill(&hw_priv->rx_tasklet);
5394 tasklet_kill(&hw_priv->tx_tasklet);
5395 free_irq(dev->irq, hw_priv->dev);
5397 transmit_cleanup(hw_priv, 0);
5409 static void hw_cfg_huge_frame(struct dev_info *hw_priv, struct ksz_hw *hw)
5423 hw_priv->dev_rcv = dev_rcv_special;
5427 hw_priv->dev_rcv = port_rcv_packets;
5429 hw_priv->dev_rcv = dev_rcv_packets;
5436 struct dev_info *hw_priv = priv->adapter;
5437 struct ksz_hw *hw = &hw_priv->hw;
5441 hw_priv->dev = dev;
5445 tasklet_setup(&hw_priv->rx_tasklet, rx_proc_task);
5446 tasklet_setup(&hw_priv->tx_tasklet, tx_proc_task);
5457 hw_cfg_huge_frame(hw_priv, hw);
5458 ksz_init_rx_buffers(hw_priv);
5486 struct dev_info *hw_priv = priv->adapter;
5487 struct ksz_hw *hw = &hw_priv->hw;
5501 if (!(hw_priv->opened)) {
5510 hw_priv->counter[i].time = next_jiffies;
5519 hw_clr_wol_pme_status(&hw_priv->hw);
5546 if (!(hw_priv->opened)) {
5552 ksz_start_timer(&hw_priv->mib_timer_info,
5553 hw_priv->mib_timer_info.period);
5556 hw_priv->opened++;
5647 struct dev_info *hw_priv = priv->adapter;
5648 struct ksz_hw *hw = &hw_priv->hw;
5735 struct dev_info *hw_priv = priv->adapter;
5736 struct ksz_hw *hw = &hw_priv->hw;
5742 if (hw_priv->hw.dev_count > 1)
5747 if (hw_priv->hw.dev_count > 1)
5783 struct dev_info *hw_priv = priv->adapter;
5784 struct ksz_hw *hw = &hw_priv->hw;
5792 if (dev != hw_priv->dev)
5804 hw_priv->mtu = hw_mtu;
5823 struct dev_info *hw_priv = priv->adapter;
5824 struct ksz_hw *hw = &hw_priv->hw;
5942 struct dev_info *hw_priv = priv->adapter;
5944 mutex_lock(&hw_priv->lock);
5947 mutex_unlock(&hw_priv->lock);
5969 struct dev_info *hw_priv = priv->adapter;
6002 mutex_lock(&hw_priv->lock);
6024 mutex_unlock(&hw_priv->lock);
6039 struct dev_info *hw_priv = priv->adapter;
6042 mutex_lock(&hw_priv->lock);
6044 mutex_unlock(&hw_priv->lock);
6076 struct dev_info *hw_priv = priv->adapter;
6080 strlcpy(info->bus_info, pci_name(hw_priv->pdev),
6129 struct dev_info *hw_priv = priv->adapter;
6130 struct ksz_hw *hw = &hw_priv->hw;
6135 mutex_lock(&hw_priv->lock);
6138 pci_read_config_dword(hw_priv->pdev, len, buf);
6148 mutex_unlock(&hw_priv->lock);
6167 struct dev_info *hw_priv = priv->adapter;
6169 wol->supported = hw_priv->wol_support;
6170 wol->wolopts = hw_priv->wol_enable;
6187 struct dev_info *hw_priv = priv->adapter;
6192 if (wol->wolopts & ~hw_priv->wol_support)
6195 hw_priv->wol_enable = wol->wolopts;
6199 hw_priv->wol_enable |= WAKE_PHY;
6200 hw_enable_wol(&hw_priv->hw, hw_priv->wol_enable, net_addr);
6262 struct dev_info *hw_priv = priv->adapter;
6269 eeprom_data[i] = eeprom_read(&hw_priv->hw, i);
6290 struct dev_info *hw_priv = priv->adapter;
6301 eeprom_data[i] = eeprom_read(&hw_priv->hw, i);
6307 eeprom_write(&hw_priv->hw, i, eeprom_data[i]);
6324 struct dev_info *hw_priv = priv->adapter;
6325 struct ksz_hw *hw = &hw_priv->hw;
6357 struct dev_info *hw_priv = priv->adapter;
6358 struct ksz_hw *hw = &hw_priv->hw;
6361 mutex_lock(&hw_priv->lock);
6386 mutex_unlock(&hw_priv->lock);
6402 struct dev_info *hw_priv = priv->adapter;
6403 struct ksz_hw *hw = &hw_priv->hw;
6465 struct dev_info *hw_priv = priv->adapter;
6466 struct ksz_hw *hw = &hw_priv->hw;
6485 struct dev_info *hw_priv = priv->adapter;
6486 struct ksz_hw *hw = &hw_priv->hw;
6508 struct dev_info *hw_priv = priv->adapter;
6509 struct ksz_hw *hw = &hw_priv->hw;
6517 mutex_lock(&hw_priv->lock);
6521 hw_priv->counter[p].read = 1;
6528 mutex_unlock(&hw_priv->lock);
6531 schedule_work(&hw_priv->mib_read);
6536 hw_priv->counter[p].counter,
6537 2 == hw_priv->counter[p].read,
6543 hw_priv->counter[p].counter,
6544 2 == hw_priv->counter[p].read,
6548 hw_priv->counter[p].counter,
6549 2 == hw_priv->counter[p].read,
6576 struct dev_info *hw_priv = priv->adapter;
6577 struct ksz_hw *hw = &hw_priv->hw;
6579 mutex_lock(&hw_priv->lock);
6590 mutex_unlock(&hw_priv->lock);
6634 struct dev_info *hw_priv =
6636 struct ksz_hw *hw = &hw_priv->hw;
6645 if (mib->cnt_ptr || 1 == hw_priv->counter[i].read) {
6650 hw_priv->counter[i].read = 0;
6654 hw_priv->counter[i].read = 2;
6656 &hw_priv->counter[i].counter);
6658 } else if (time_after_eq(jiffies, hw_priv->counter[i].time)) {
6661 hw_priv->counter[i].read = 1;
6663 hw_priv->counter[i].time = next_jiffies;
6670 hw_priv->counter[i].read = 1;
6677 struct dev_info *hw_priv = from_timer(hw_priv, t, mib_timer_info.timer);
6679 mib_read_work(&hw_priv->mib_read);
6682 if (hw_priv->pme_wait) {
6683 if (time_is_before_eq_jiffies(hw_priv->pme_wait)) {
6684 hw_clr_wol_pme_status(&hw_priv->hw);
6685 hw_priv->pme_wait = 0;
6687 } else if (hw_chk_wol_pme_status(&hw_priv->hw)) {
6690 hw_priv->pme_wait = jiffies + HZ * 2;
6693 ksz_update_timer(&hw_priv->mib_timer_info);
6706 struct dev_info *hw_priv = priv->adapter;
6707 struct ksz_hw *hw = &hw_priv->hw;
6838 static void get_mac_addr(struct dev_info *hw_priv, u8 *macaddr, int port)
6864 hw_priv->hw.override_addr[j++] = (u8) num;
6865 hw_priv->hw.override_addr[5] +=
6866 hw_priv->hw.id;
6868 hw_priv->hw.ksz_switch->other_addr[j++] =
6870 hw_priv->hw.ksz_switch->other_addr[5] +=
6871 hw_priv->hw.id;
6879 hw_priv->hw.mac_override = 1;
6911 struct dev_info *hw_priv;
6951 hw_priv = &info->dev_info;
6952 hw_priv->pdev = pdev;
6954 hw = &hw_priv->hw;
6970 dev_info(&hw_priv->pdev->dev, "%s\n", banner);
6971 dev_dbg(&hw_priv->pdev->dev, "Mem = %p; IRQ = %d\n", hw->io, pdev->irq);
7009 hw->parent = hw_priv;
7012 hw_priv->mtu = (REGULAR_RX_BUF_SIZE + 3) & ~3;
7014 if (ksz_alloc_mem(hw_priv))
7017 hw_priv->hw.id = net_device_present;
7019 spin_lock_init(&hw_priv->hwlock);
7020 mutex_init(&hw_priv->lock);
7023 init_waitqueue_head(&hw_priv->counter[i].counter);
7026 get_mac_addr(hw_priv, macaddr, MAIN_PORT);
7036 get_mac_addr(hw_priv, mac1addr, OTHER_PORT);
7043 hw_priv->wol_support = WOL_SUPPORT;
7044 hw_priv->wol_enable = 0;
7047 INIT_WORK(&hw_priv->mib_read, mib_read_work);
7050 ksz_init_timer(&hw_priv->mib_timer_info, 500 * HZ / 1000,
7061 priv->adapter = hw_priv;
7083 memcpy(dev->dev_addr, hw_priv->hw.override_addr,
7104 pci_dev_get(hw_priv->pdev);
7117 ksz_free_mem(hw_priv);
7136 struct dev_info *hw_priv = &info->dev_info;
7140 for (i = 0; i < hw_priv->hw.dev_count; i++) {
7144 if (hw_priv->hw.io)
7145 iounmap(hw_priv->hw.io);
7146 ksz_free_mem(hw_priv);
7147 kfree(hw_priv->hw.ksz_switch);
7148 pci_dev_put(hw_priv->pdev);
7156 struct dev_info *hw_priv = &info->dev_info;
7157 struct ksz_hw *hw = &hw_priv->hw;
7161 if (hw_priv->wol_enable)
7180 struct dev_info *hw_priv = &info->dev_info;
7181 struct ksz_hw *hw = &hw_priv->hw;
7196 if (hw_priv->wol_enable) {
7197 hw_enable_wol(hw, hw_priv->wol_enable, net_addr);