Lines Matching defs:hw_priv
3097 struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
3098 struct pci_dev *pdev = hw_priv->pdev;
3115 struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
3116 struct pci_dev *pdev = hw_priv->pdev;
3137 struct dev_info *hw_priv = container_of(hw, struct dev_info, hw);
3138 struct pci_dev *pdev = hw_priv->pdev;
4261 struct dev_info *hw_priv = priv->adapter;
4262 struct ksz_hw *hw = &hw_priv->hw;
4289 dma_buf->dma = dma_map_single(&hw_priv->pdev->dev, skb->data,
4307 dma_buf->dma = dma_map_single(&hw_priv->pdev->dev,
4330 dma_buf->dma = dma_map_single(&hw_priv->pdev->dev, skb->data,
4356 * @hw_priv: Network device.
4361 static void transmit_cleanup(struct dev_info *hw_priv, int normal)
4365 struct ksz_hw *hw = &hw_priv->hw;
4371 spin_lock_irq(&hw_priv->hwlock);
4386 dma_unmap_single(&hw_priv->pdev->dev, dma_buf->dma,
4404 spin_unlock_irq(&hw_priv->hwlock);
4413 * @hw_priv: Network device.
4418 static void tx_done(struct dev_info *hw_priv)
4420 struct ksz_hw *hw = &hw_priv->hw;
4423 transmit_cleanup(hw_priv, 1);
4456 struct dev_info *hw_priv = priv->adapter;
4457 struct ksz_hw *hw = &hw_priv->hw;
4482 spin_lock_irq(&hw_priv->hwlock);
4511 spin_unlock_irq(&hw_priv->hwlock);
4531 struct dev_info *hw_priv = priv->adapter;
4532 struct ksz_hw *hw = &hw_priv->hw;
4541 hw_priv = NULL;
4545 if (hw_priv) {
4549 transmit_cleanup(hw_priv, 0);
4552 ksz_init_rx_buffers(hw_priv);
4612 struct dev_info *hw_priv = priv->adapter;
4620 dma_sync_single_for_cpu(&hw_priv->pdev->dev, dma_buf->dma,
4655 static int dev_rcv_packets(struct dev_info *hw_priv)
4659 struct ksz_hw *hw = &hw_priv->hw;
4691 static int port_rcv_packets(struct dev_info *hw_priv)
4695 struct ksz_hw *hw = &hw_priv->hw;
4736 static int dev_rcv_special(struct dev_info *hw_priv)
4740 struct ksz_hw *hw = &hw_priv->hw;
4797 struct dev_info *hw_priv = from_tasklet(hw_priv, t, rx_tasklet);
4798 struct ksz_hw *hw = &hw_priv->hw;
4802 if (unlikely(!hw_priv->dev_rcv(hw_priv))) {
4808 spin_lock_irq(&hw_priv->hwlock);
4810 spin_unlock_irq(&hw_priv->hwlock);
4813 tasklet_schedule(&hw_priv->rx_tasklet);
4819 struct dev_info *hw_priv = from_tasklet(hw_priv, t, tx_tasklet);
4820 struct ksz_hw *hw = &hw_priv->hw;
4824 tx_done(hw_priv);
4827 spin_lock_irq(&hw_priv->hwlock);
4829 spin_unlock_irq(&hw_priv->hwlock);
4863 struct dev_info *hw_priv = priv->adapter;
4864 struct ksz_hw *hw = &hw_priv->hw;
4866 spin_lock(&hw_priv->hwlock);
4872 spin_unlock(&hw_priv->hwlock);
4882 tasklet_schedule(&hw_priv->tx_tasklet);
4887 tasklet_schedule(&hw_priv->rx_tasklet);
4921 spin_unlock(&hw_priv->hwlock);
4935 struct dev_info *hw_priv = priv->adapter;
4937 hw_dis_intr(&hw_priv->hw);
4975 struct dev_info *hw_priv = priv->adapter;
4977 struct ksz_hw *hw = &hw_priv->hw;
4999 if (!hw_priv->wol_enable)
5007 hw_priv->opened--;
5008 if (!(hw_priv->opened)) {
5009 ksz_stop_timer(&hw_priv->mib_timer_info);
5010 flush_work(&hw_priv->mib_read);
5019 tasklet_kill(&hw_priv->rx_tasklet);
5020 tasklet_kill(&hw_priv->tx_tasklet);
5021 free_irq(dev->irq, hw_priv->dev);
5023 transmit_cleanup(hw_priv, 0);
5035 static void hw_cfg_huge_frame(struct dev_info *hw_priv, struct ksz_hw *hw)
5049 hw_priv->dev_rcv = dev_rcv_special;
5053 hw_priv->dev_rcv = port_rcv_packets;
5055 hw_priv->dev_rcv = dev_rcv_packets;
5062 struct dev_info *hw_priv = priv->adapter;
5063 struct ksz_hw *hw = &hw_priv->hw;
5067 hw_priv->dev = dev;
5071 tasklet_setup(&hw_priv->rx_tasklet, rx_proc_task);
5072 tasklet_setup(&hw_priv->tx_tasklet, tx_proc_task);
5083 hw_cfg_huge_frame(hw_priv, hw);
5084 ksz_init_rx_buffers(hw_priv);
5112 struct dev_info *hw_priv = priv->adapter;
5113 struct ksz_hw *hw = &hw_priv->hw;
5129 if (!(hw_priv->opened)) {
5135 hw_priv->counter[i].time = next_jiffies;
5144 hw_clr_wol_pme_status(&hw_priv->hw);
5171 if (!(hw_priv->opened)) {
5177 ksz_start_timer(&hw_priv->mib_timer_info,
5178 hw_priv->mib_timer_info.period);
5181 hw_priv->opened++;
5272 struct dev_info *hw_priv = priv->adapter;
5273 struct ksz_hw *hw = &hw_priv->hw;
5360 struct dev_info *hw_priv = priv->adapter;
5361 struct ksz_hw *hw = &hw_priv->hw;
5367 if (hw_priv->hw.dev_count > 1)
5372 if (hw_priv->hw.dev_count > 1)
5408 struct dev_info *hw_priv = priv->adapter;
5409 struct ksz_hw *hw = &hw_priv->hw;
5417 if (dev != hw_priv->dev)
5429 hw_priv->mtu = hw_mtu;
5448 struct dev_info *hw_priv = priv->adapter;
5449 struct ksz_hw *hw = &hw_priv->hw;
5567 struct dev_info *hw_priv = priv->adapter;
5569 mutex_lock(&hw_priv->lock);
5572 mutex_unlock(&hw_priv->lock);
5594 struct dev_info *hw_priv = priv->adapter;
5627 mutex_lock(&hw_priv->lock);
5649 mutex_unlock(&hw_priv->lock);
5664 struct dev_info *hw_priv = priv->adapter;
5667 mutex_lock(&hw_priv->lock);
5669 mutex_unlock(&hw_priv->lock);
5701 struct dev_info *hw_priv = priv->adapter;
5705 strscpy(info->bus_info, pci_name(hw_priv->pdev),
5754 struct dev_info *hw_priv = priv->adapter;
5755 struct ksz_hw *hw = &hw_priv->hw;
5760 mutex_lock(&hw_priv->lock);
5763 pci_read_config_dword(hw_priv->pdev, len, buf);
5773 mutex_unlock(&hw_priv->lock);
5792 struct dev_info *hw_priv = priv->adapter;
5794 wol->supported = hw_priv->wol_support;
5795 wol->wolopts = hw_priv->wol_enable;
5812 struct dev_info *hw_priv = priv->adapter;
5817 if (wol->wolopts & ~hw_priv->wol_support)
5820 hw_priv->wol_enable = wol->wolopts;
5824 hw_priv->wol_enable |= WAKE_PHY;
5825 hw_enable_wol(&hw_priv->hw, hw_priv->wol_enable, net_addr);
5887 struct dev_info *hw_priv = priv->adapter;
5894 eeprom_data[i] = eeprom_read(&hw_priv->hw, i);
5915 struct dev_info *hw_priv = priv->adapter;
5926 eeprom_data[i] = eeprom_read(&hw_priv->hw, i);
5932 eeprom_write(&hw_priv->hw, i, eeprom_data[i]);
5949 struct dev_info *hw_priv = priv->adapter;
5950 struct ksz_hw *hw = &hw_priv->hw;
5982 struct dev_info *hw_priv = priv->adapter;
5983 struct ksz_hw *hw = &hw_priv->hw;
5986 mutex_lock(&hw_priv->lock);
6011 mutex_unlock(&hw_priv->lock);
6031 struct dev_info *hw_priv = priv->adapter;
6032 struct ksz_hw *hw = &hw_priv->hw;
6094 struct dev_info *hw_priv = priv->adapter;
6095 struct ksz_hw *hw = &hw_priv->hw;
6114 struct dev_info *hw_priv = priv->adapter;
6115 struct ksz_hw *hw = &hw_priv->hw;
6137 struct dev_info *hw_priv = priv->adapter;
6138 struct ksz_hw *hw = &hw_priv->hw;
6146 mutex_lock(&hw_priv->lock);
6150 hw_priv->counter[p].read = 1;
6157 mutex_unlock(&hw_priv->lock);
6160 schedule_work(&hw_priv->mib_read);
6165 hw_priv->counter[p].counter,
6166 2 == hw_priv->counter[p].read,
6172 hw_priv->counter[p].counter,
6173 2 == hw_priv->counter[p].read,
6177 hw_priv->counter[p].counter,
6178 2 == hw_priv->counter[p].read,
6205 struct dev_info *hw_priv = priv->adapter;
6206 struct ksz_hw *hw = &hw_priv->hw;
6208 mutex_lock(&hw_priv->lock);
6219 mutex_unlock(&hw_priv->lock);
6263 struct dev_info *hw_priv =
6265 struct ksz_hw *hw = &hw_priv->hw;
6275 if (mib->cnt_ptr || 1 == hw_priv->counter[i].read) {
6280 hw_priv->counter[i].read = 0;
6284 hw_priv->counter[i].read = 2;
6286 &hw_priv->counter[i].counter);
6288 } else if (time_after_eq(jiffies, hw_priv->counter[i].time)) {
6291 hw_priv->counter[i].read = 1;
6293 hw_priv->counter[i].time = next_jiffies;
6300 hw_priv->counter[i].read = 1;
6307 struct dev_info *hw_priv = from_timer(hw_priv, t, mib_timer_info.timer);
6309 mib_read_work(&hw_priv->mib_read);
6312 if (hw_priv->pme_wait) {
6313 if (time_is_before_eq_jiffies(hw_priv->pme_wait)) {
6314 hw_clr_wol_pme_status(&hw_priv->hw);
6315 hw_priv->pme_wait = 0;
6317 } else if (hw_chk_wol_pme_status(&hw_priv->hw)) {
6320 hw_priv->pme_wait = jiffies + HZ * 2;
6323 ksz_update_timer(&hw_priv->mib_timer_info);
6336 struct dev_info *hw_priv = priv->adapter;
6337 struct ksz_hw *hw = &hw_priv->hw;
6468 static void get_mac_addr(struct dev_info *hw_priv, u8 *macaddr, int port)
6494 hw_priv->hw.override_addr[j++] = (u8) num;
6495 hw_priv->hw.override_addr[5] +=
6496 hw_priv->hw.id;
6498 hw_priv->hw.ksz_switch->other_addr[j++] =
6500 hw_priv->hw.ksz_switch->other_addr[5] +=
6501 hw_priv->hw.id;
6509 hw_priv->hw.mac_override = 1;
6541 struct dev_info *hw_priv;
6581 hw_priv = &info->dev_info;
6582 hw_priv->pdev = pdev;
6584 hw = &hw_priv->hw;
6600 dev_info(&hw_priv->pdev->dev, "%s\n", banner);
6601 dev_dbg(&hw_priv->pdev->dev, "Mem = %p; IRQ = %d\n", hw->io, pdev->irq);
6639 hw->parent = hw_priv;
6642 hw_priv->mtu = (REGULAR_RX_BUF_SIZE + 3) & ~3;
6644 if (ksz_alloc_mem(hw_priv))
6647 hw_priv->hw.id = net_device_present;
6649 spin_lock_init(&hw_priv->hwlock);
6650 mutex_init(&hw_priv->lock);
6653 init_waitqueue_head(&hw_priv->counter[i].counter);
6656 get_mac_addr(hw_priv, macaddr, MAIN_PORT);
6666 get_mac_addr(hw_priv, mac1addr, OTHER_PORT);
6673 hw_priv->wol_support = WOL_SUPPORT;
6674 hw_priv->wol_enable = 0;
6677 INIT_WORK(&hw_priv->mib_read, mib_read_work);
6680 ksz_init_timer(&hw_priv->mib_timer_info, 500 * HZ / 1000,
6691 priv->adapter = hw_priv;
6713 eth_hw_addr_set(dev, hw_priv->hw.override_addr);
6736 pci_dev_get(hw_priv->pdev);
6749 ksz_free_mem(hw_priv);
6768 struct dev_info *hw_priv = &info->dev_info;
6772 for (i = 0; i < hw_priv->hw.dev_count; i++) {
6776 if (hw_priv->hw.io)
6777 iounmap(hw_priv->hw.io);
6778 ksz_free_mem(hw_priv);
6779 kfree(hw_priv->hw.ksz_switch);
6780 pci_dev_put(hw_priv->pdev);
6788 struct dev_info *hw_priv = &info->dev_info;
6789 struct ksz_hw *hw = &hw_priv->hw;
6793 if (hw_priv->wol_enable)
6812 struct dev_info *hw_priv = &info->dev_info;
6813 struct ksz_hw *hw = &hw_priv->hw;
6828 if (hw_priv->wol_enable) {
6829 hw_enable_wol(hw, hw_priv->wol_enable, net_addr);