Lines Matching defs:adapter
50 vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, const u8 *mac);
56 vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
58 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
63 vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
65 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
73 vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
77 for (i = 0; i < adapter->intr.num_intrs; i++)
78 vmxnet3_enable_intr(adapter, i);
79 if (!VMXNET3_VERSION_GE_6(adapter) ||
80 !adapter->queuesExtEnabled) {
81 adapter->shared->devRead.intrConf.intrCtrl &=
84 adapter->shared->devReadExt.intrConfExt.intrCtrl &=
91 vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
95 if (!VMXNET3_VERSION_GE_6(adapter) ||
96 !adapter->queuesExtEnabled) {
97 adapter->shared->devRead.intrConf.intrCtrl |=
100 adapter->shared->devReadExt.intrConfExt.intrCtrl |=
103 for (i = 0; i < adapter->intr.num_intrs; i++)
104 vmxnet3_disable_intr(adapter, i);
109 vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
111 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
116 vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
123 vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
126 netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
131 vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
134 netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
139 vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
143 netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
165 vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
171 spin_lock_irqsave(&adapter->cmd_lock, flags);
172 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
173 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
174 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
176 adapter->link_speed = ret >> 16;
178 netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n",
179 adapter->link_speed);
180 netif_carrier_on(adapter->netdev);
183 for (i = 0; i < adapter->num_tx_queues; i++)
184 vmxnet3_tq_start(&adapter->tx_queue[i],
185 adapter);
188 netdev_info(adapter->netdev, "NIC Link is Down\n");
189 netif_carrier_off(adapter->netdev);
192 for (i = 0; i < adapter->num_tx_queues; i++)
193 vmxnet3_tq_stop(&adapter->tx_queue[i], adapter);
199 vmxnet3_process_events(struct vmxnet3_adapter *adapter)
203 u32 events = le32_to_cpu(adapter->shared->ecr);
207 vmxnet3_ack_events(adapter, events);
211 vmxnet3_check_link(adapter, true);
215 spin_lock_irqsave(&adapter->cmd_lock, flags);
216 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
218 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
220 for (i = 0; i < adapter->num_tx_queues; i++)
221 if (adapter->tqd_start[i].status.stopped)
222 dev_err(&adapter->netdev->dev,
224 adapter->netdev->name, i, le32_to_cpu(
225 adapter->tqd_start[i].status.error));
226 for (i = 0; i < adapter->num_rx_queues; i++)
227 if (adapter->rqd_start[i].status.stopped)
228 dev_err(&adapter->netdev->dev,
230 adapter->netdev->name, i,
231 adapter->rqd_start[i].status.error);
233 schedule_work(&adapter->work);
359 struct pci_dev *pdev, struct vmxnet3_adapter *adapter,
402 struct vmxnet3_adapter *adapter)
419 &gdesc->tcd), tq, adapter->pdev,
420 adapter, &bq);
430 if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
433 netif_carrier_ok(adapter->netdev))) {
434 vmxnet3_tq_wake(tq, adapter);
444 struct vmxnet3_adapter *adapter)
459 vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
487 struct vmxnet3_adapter *adapter)
490 dma_free_coherent(&adapter->pdev->dev, tq->tx_ring.size *
496 dma_free_coherent(&adapter->pdev->dev,
502 dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size *
514 vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter)
518 for (i = 0; i < adapter->num_tx_queues; i++)
519 vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter);
525 struct vmxnet3_adapter *adapter)
555 struct vmxnet3_adapter *adapter)
560 tq->tx_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
564 netdev_err(adapter->netdev, "failed to allocate tx ring\n");
568 tq->data_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
572 netdev_err(adapter->netdev, "failed to allocate tx data ring\n");
576 tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev,
580 netdev_err(adapter->netdev, "failed to allocate tx comp ring\n");
586 dev_to_node(&adapter->pdev->dev));
593 vmxnet3_tq_destroy(tq, adapter);
598 vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter)
602 for (i = 0; i < adapter->num_tx_queues; i++)
603 vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter);
614 int num_to_alloc, struct vmxnet3_adapter *adapter)
641 rbi->skb = __netdev_alloc_skb_ip_align(adapter->netdev,
650 &adapter->pdev->dev,
653 if (dma_mapping_error(&adapter->pdev->dev,
675 &adapter->pdev->dev,
678 if (dma_mapping_error(&adapter->pdev->dev,
707 netdev_dbg(adapter->netdev,
736 struct vmxnet3_adapter *adapter)
763 netdev_dbg(adapter->netdev,
790 tbi->dma_addr = dma_map_single(&adapter->pdev->dev,
793 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
805 netdev_dbg(adapter->netdev,
832 tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
835 if (dma_mapping_error(&adapter->pdev->dev, tbi->dma_addr))
847 netdev_dbg(adapter->netdev,
871 vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
875 for (i = 0; i < adapter->num_tx_queues; i++)
876 vmxnet3_tq_init(&adapter->tx_queue[i], adapter);
900 struct vmxnet3_adapter *adapter)
905 if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) {
922 if (VMXNET3_VERSION_GE_4(adapter) &&
1003 struct vmxnet3_adapter *adapter)
1012 netdev_dbg(adapter->netdev,
1082 struct vmxnet3_adapter *adapter, struct net_device *netdev)
1150 ret = vmxnet3_parse_hdr(skb, tq, &ctx, adapter);
1179 netdev_dbg(adapter->netdev,
1181 " next2fill %u\n", adapter->netdev->name,
1184 vmxnet3_tq_stop(tq, adapter);
1190 vmxnet3_copy_hdr(skb, tq, &ctx, adapter);
1193 if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter))
1209 if (VMXNET3_VERSION_GE_4(adapter) && skb->encapsulation) {
1211 if (VMXNET3_VERSION_GE_7(adapter)) {
1229 if (VMXNET3_VERSION_GE_4(adapter) &&
1233 if (VMXNET3_VERSION_GE_7(adapter)) {
1278 netdev_dbg(adapter->netdev,
1288 VMXNET3_WRITE_BAR0_REG(adapter,
1289 adapter->tx_prod_offset + tq->qid * 8,
1304 vmxnet3_create_pp(struct vmxnet3_adapter *adapter,
1307 bool xdp_prog = vmxnet3_xdp_enabled(adapter);
1313 .dev = &adapter->pdev->dev,
1325 err = xdp_rxq_info_reg(&rq->xdp_rxq, adapter->netdev, rq->qid,
1364 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1366 BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
1368 &adapter->tx_queue[skb->queue_mapping],
1369 adapter, netdev);
1374 vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
1378 if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
1422 struct vmxnet3_rx_ctx *ctx, struct vmxnet3_adapter *adapter)
1448 vmxnet3_get_hdr_len(struct vmxnet3_adapter *adapter, struct sk_buff *skb,
1504 struct vmxnet3_adapter *adapter, int quota)
1507 adapter->rx_prod_offset, adapter->rx_prod2_offset
1548 ring_idx = VMXNET3_GET_RING_IDX(adapter, rcd->rqID);
1558 vmxnet3_rx_error(rq, rcd, ctx, adapter);
1562 if (rcd->sop && rcd->eop && vmxnet3_xdp_enabled(adapter)) {
1566 if (VMXNET3_RX_DATA_RING(adapter, rcd->rqID)) {
1574 act = vmxnet3_process_xdp(adapter, rq, rcd, rbi, rxd,
1602 netdev_dbg(adapter->netdev,
1612 VMXNET3_RX_DATA_RING(adapter, rcd->rqID);
1615 if (rxDataRingUsed && vmxnet3_xdp_enabled(adapter)) {
1621 act = vmxnet3_process_xdp_small(adapter, rq,
1633 new_skb = netdev_alloc_skb_ip_align(adapter->netdev,
1646 if (rxDataRingUsed && adapter->rxdataring_enabled) {
1659 dma_map_single(&adapter->pdev->dev,
1662 if (dma_mapping_error(&adapter->pdev->dev,
1676 dma_unmap_single(&adapter->pdev->dev,
1691 if (VMXNET3_VERSION_GE_2(adapter) &&
1736 new_dma_addr = dma_map_page(&adapter->pdev->dev,
1740 if (dma_mapping_error(&adapter->pdev->dev,
1750 dma_unmap_page(&adapter->pdev->dev,
1768 u32 mtu = adapter->netdev->mtu;
1773 (adapter->netdev->features & NETIF_F_RXHASH)) {
1796 vmxnet3_rx_csum(adapter, skb,
1798 skb->protocol = eth_type_trans(skb, adapter->netdev);
1800 !(adapter->netdev->features & NETIF_F_LRO))
1811 hlen = vmxnet3_get_hdr_len(adapter, skb,
1832 if ((adapter->netdev->features & NETIF_F_LRO) &&
1862 if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_OOORX_COMP)))
1893 VMXNET3_WRITE_BAR0_REG(adapter,
1911 struct vmxnet3_adapter *adapter)
1938 dma_unmap_single(&adapter->pdev->dev, rxd->addr,
1944 dma_unmap_page(&adapter->pdev->dev, rxd->addr,
1962 vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
1966 for (i = 0; i < adapter->num_rx_queues; i++)
1967 vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
1968 rcu_assign_pointer(adapter->xdp_bpf_prog, NULL);
1973 struct vmxnet3_adapter *adapter)
1989 dma_free_coherent(&adapter->pdev->dev,
2004 dma_free_coherent(&adapter->pdev->dev,
2011 dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size
2023 vmxnet3_rq_destroy_all_rxdataring(struct vmxnet3_adapter *adapter)
2027 for (i = 0; i < adapter->num_rx_queues; i++) {
2028 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2031 dma_free_coherent(&adapter->pdev->dev,
2044 struct vmxnet3_adapter *adapter)
2052 if (i % adapter->rx_buf_per_pkt == 0) {
2053 rq->buf_info[0][i].buf_type = vmxnet3_xdp_enabled(adapter) ?
2056 rq->buf_info[0][i].len = adapter->skb_buf_size;
2077 err = vmxnet3_create_pp(adapter, rq,
2083 adapter) == 0) {
2091 vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
2108 vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter)
2112 for (i = 0; i < adapter->num_rx_queues; i++) {
2113 err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter);
2115 dev_err(&adapter->netdev->dev, "%s: failed to "
2117 adapter->netdev->name, i);
2127 vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
2137 &adapter->pdev->dev, sz,
2141 netdev_err(adapter->netdev,
2147 if ((adapter->rxdataring_enabled) && (rq->data_ring.desc_size != 0)) {
2150 dma_alloc_coherent(&adapter->pdev->dev, sz,
2154 netdev_err(adapter->netdev,
2156 adapter->rxdataring_enabled = false;
2164 rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz,
2168 netdev_err(adapter->netdev, "failed to allocate rx comp ring\n");
2174 dev_to_node(&adapter->pdev->dev));
2184 vmxnet3_rq_destroy(rq, adapter);
2190 vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
2194 adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
2196 for (i = 0; i < adapter->num_rx_queues; i++) {
2197 err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
2199 dev_err(&adapter->netdev->dev,
2201 adapter->netdev->name, i);
2206 if (!adapter->rxdataring_enabled)
2207 vmxnet3_rq_destroy_all_rxdataring(adapter);
2211 vmxnet3_rq_destroy_all(adapter);
2219 vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
2222 if (unlikely(adapter->shared->ecr))
2223 vmxnet3_process_events(adapter);
2224 for (i = 0; i < adapter->num_tx_queues; i++)
2225 vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter);
2227 for (i = 0; i < adapter->num_rx_queues; i++)
2228 rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i],
2229 adapter, budget);
2241 rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
2245 vmxnet3_enable_all_intrs(rx_queue->adapter);
2260 struct vmxnet3_adapter *adapter = rq->adapter;
2266 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
2268 &adapter->tx_queue[rq - adapter->rx_queue];
2269 vmxnet3_tq_tx_complete(tq, adapter);
2272 rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
2276 vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
2293 struct vmxnet3_adapter *adapter = tq->adapter;
2295 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
2296 vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
2299 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
2301 for (i = 0; i < adapter->num_tx_queues; i++) {
2302 struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i];
2303 vmxnet3_tq_tx_complete(txq, adapter);
2306 vmxnet3_tq_tx_complete(tq, adapter);
2308 vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
2323 struct vmxnet3_adapter *adapter = rq->adapter;
2326 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
2327 vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
2350 struct vmxnet3_adapter *adapter = netdev_priv(dev);
2353 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
2354 vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx);
2356 if (adapter->shared->ecr)
2357 vmxnet3_process_events(adapter);
2359 vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx);
2372 struct vmxnet3_adapter *adapter = netdev_priv(dev);
2374 if (adapter->intr.type == VMXNET3_IT_INTX) {
2375 u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
2383 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
2384 vmxnet3_disable_all_intrs(adapter);
2386 napi_schedule(&adapter->rx_queue[0].napi);
2397 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2399 switch (adapter->intr.type) {
2403 for (i = 0; i < adapter->num_rx_queues; i++)
2404 vmxnet3_msix_rx(0, &adapter->rx_queue[i]);
2410 vmxnet3_intr(0, adapter->netdev);
2418 vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
2420 struct vmxnet3_intr *intr = &adapter->intr;
2425 if (adapter->intr.type == VMXNET3_IT_MSIX) {
2426 for (i = 0; i < adapter->num_tx_queues; i++) {
2427 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
2428 sprintf(adapter->tx_queue[i].name, "%s-tx-%d",
2429 adapter->netdev->name, vector);
2433 adapter->tx_queue[i].name,
2434 &adapter->tx_queue[i]);
2436 sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d",
2437 adapter->netdev->name, vector);
2440 dev_err(&adapter->netdev->dev,
2443 adapter->tx_queue[i].name, err);
2449 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
2450 for (; i < adapter->num_tx_queues; i++)
2451 adapter->tx_queue[i].comp_ring.intr_idx
2456 adapter->tx_queue[i].comp_ring.intr_idx
2460 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE)
2463 for (i = 0; i < adapter->num_rx_queues; i++) {
2464 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE)
2465 sprintf(adapter->rx_queue[i].name, "%s-rx-%d",
2466 adapter->netdev->name, vector);
2468 sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d",
2469 adapter->netdev->name, vector);
2472 adapter->rx_queue[i].name,
2473 &(adapter->rx_queue[i]));
2475 netdev_err(adapter->netdev,
2478 adapter->rx_queue[i].name, err);
2482 adapter->rx_queue[i].comp_ring.intr_idx = vector++;
2486 adapter->netdev->name, vector);
2489 intr->event_msi_vector_name, adapter->netdev);
2493 adapter->num_rx_queues = 1;
2494 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
2495 adapter->netdev->name, adapter->netdev);
2498 adapter->num_rx_queues = 1;
2499 err = request_irq(adapter->pdev->irq, vmxnet3_intr,
2500 IRQF_SHARED, adapter->netdev->name,
2501 adapter->netdev);
2507 netdev_err(adapter->netdev,
2512 for (i = 0; i < adapter->num_rx_queues; i++) {
2513 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2515 rq->qid2 = i + adapter->num_rx_queues;
2516 rq->dataRingQid = i + 2 * adapter->num_rx_queues;
2522 if (adapter->intr.type != VMXNET3_IT_MSIX) {
2523 adapter->intr.event_intr_idx = 0;
2524 for (i = 0; i < adapter->num_tx_queues; i++)
2525 adapter->tx_queue[i].comp_ring.intr_idx = 0;
2526 adapter->rx_queue[0].comp_ring.intr_idx = 0;
2529 netdev_info(adapter->netdev,
2539 vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
2541 struct vmxnet3_intr *intr = &adapter->intr;
2550 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
2551 for (i = 0; i < adapter->num_tx_queues; i++) {
2553 &(adapter->tx_queue[i]));
2554 if (adapter->share_intr == VMXNET3_INTR_TXSHARE)
2559 for (i = 0; i < adapter->num_rx_queues; i++) {
2561 &(adapter->rx_queue[i]));
2565 adapter->netdev);
2571 free_irq(adapter->pdev->irq, adapter->netdev);
2574 free_irq(adapter->pdev->irq, adapter->netdev);
2583 vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
2585 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2591 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2599 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2602 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2606 spin_lock_irqsave(&adapter->cmd_lock, flags);
2607 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2609 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2612 set_bit(vid, adapter->active_vlans);
2621 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2624 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2628 spin_lock_irqsave(&adapter->cmd_lock, flags);
2629 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2631 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2634 clear_bit(vid, adapter->active_vlans);
2666 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2669 &adapter->shared->devRead.rxFilterConf;
2676 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2681 vmxnet3_restore_vlan(adapter);
2697 &adapter->pdev->dev,
2701 if (!dma_mapping_error(&adapter->pdev->dev,
2721 spin_lock_irqsave(&adapter->cmd_lock, flags);
2724 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2726 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2730 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2732 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2735 dma_unmap_single(&adapter->pdev->dev, new_table_pa,
2741 vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter)
2745 for (i = 0; i < adapter->num_rx_queues; i++)
2746 vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter);
2751 * Set up driver_shared based on settings in adapter.
2755 vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2757 struct Vmxnet3_DriverShared *shared = adapter->shared;
2778 devRead->misc.ddPA = cpu_to_le64(adapter->adapter_pa);
2782 if (adapter->netdev->features & NETIF_F_RXCSUM)
2785 if (adapter->netdev->features & NETIF_F_LRO) {
2789 if (adapter->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2792 if (adapter->netdev->features & (NETIF_F_GSO_UDP_TUNNEL |
2796 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
2797 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
2799 adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
2800 adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc));
2803 devRead->misc.numTxQueues = adapter->num_tx_queues;
2804 for (i = 0; i < adapter->num_tx_queues; i++) {
2805 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
2806 BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
2807 tqc = &adapter->tqd_start[i].conf;
2821 devRead->misc.numRxQueues = adapter->num_rx_queues;
2822 for (i = 0; i < adapter->num_rx_queues; i++) {
2823 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2824 rqc = &adapter->rqd_start[i].conf;
2834 if (VMXNET3_VERSION_GE_3(adapter)) {
2843 memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf));
2845 if (adapter->rss) {
2846 struct UPT1_RSSConf *rssConf = adapter->rss_conf;
2849 devRead->misc.numRxQueues = adapter->num_rx_queues;
2861 i, adapter->num_rx_queues);
2866 cpu_to_le64(adapter->rss_conf_pa);
2872 if (!VMXNET3_VERSION_GE_6(adapter) ||
2873 !adapter->queuesExtEnabled) {
2874 devRead->intrConf.autoMask = adapter->intr.mask_mode ==
2876 devRead->intrConf.numIntrs = adapter->intr.num_intrs;
2877 for (i = 0; i < adapter->intr.num_intrs; i++)
2878 devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
2880 devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
2883 devReadExt->intrConfExt.autoMask = adapter->intr.mask_mode ==
2885 devReadExt->intrConfExt.numIntrs = adapter->intr.num_intrs;
2886 for (i = 0; i < adapter->intr.num_intrs; i++)
2887 devReadExt->intrConfExt.modLevels[i] = adapter->intr.mod_levels[i];
2889 devReadExt->intrConfExt.eventIntrIdx = adapter->intr.event_intr_idx;
2895 vmxnet3_restore_vlan(adapter);
2896 vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
2902 vmxnet3_init_bufsize(struct vmxnet3_adapter *adapter)
2904 struct Vmxnet3_DriverShared *shared = adapter->shared;
2908 if (!VMXNET3_VERSION_GE_7(adapter))
2911 cmdInfo->ringBufSize = adapter->ringBufSize;
2912 spin_lock_irqsave(&adapter->cmd_lock, flags);
2913 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2915 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2919 vmxnet3_init_coalesce(struct vmxnet3_adapter *adapter)
2921 struct Vmxnet3_DriverShared *shared = adapter->shared;
2925 if (!VMXNET3_VERSION_GE_3(adapter))
2928 spin_lock_irqsave(&adapter->cmd_lock, flags);
2931 cpu_to_le32(sizeof(*adapter->coal_conf));
2932 cmdInfo->varConf.confPA = cpu_to_le64(adapter->coal_conf_pa);
2934 if (adapter->default_coal_mode) {
2935 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2938 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2942 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2946 vmxnet3_init_rssfields(struct vmxnet3_adapter *adapter)
2948 struct Vmxnet3_DriverShared *shared = adapter->shared;
2952 if (!VMXNET3_VERSION_GE_4(adapter))
2955 spin_lock_irqsave(&adapter->cmd_lock, flags);
2957 if (adapter->default_rss_fields) {
2958 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2960 adapter->rss_fields =
2961 VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2963 if (VMXNET3_VERSION_GE_7(adapter)) {
2964 if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_UDPIP4 ||
2965 adapter->rss_fields & VMXNET3_RSS_FIELDS_UDPIP6) &&
2966 vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
2968 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_UDP_RSS;
2970 adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_UDP_RSS);
2973 if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_ESPIP4) &&
2974 vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
2976 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV4;
2978 adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV4);
2981 if ((adapter->rss_fields & VMXNET3_RSS_FIELDS_ESPIP6) &&
2982 vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
2984 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_ESP_RSS_IPV6;
2986 adapter->dev_caps[0] &= ~(1UL << VMXNET3_CAP_ESP_RSS_IPV6);
2989 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
2990 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
2991 adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2993 cmdInfo->setRssFields = adapter->rss_fields;
2994 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2999 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3001 adapter->rss_fields =
3002 VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3005 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3009 vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
3015 netdev_dbg(adapter->netdev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
3016 " ring sizes %u %u %u\n", adapter->netdev->name,
3017 adapter->skb_buf_size, adapter->rx_buf_per_pkt,
3018 adapter->tx_queue[0].tx_ring.size,
3019 adapter->rx_queue[0].rx_ring[0].size,
3020 adapter->rx_queue[0].rx_ring[1].size);
3022 vmxnet3_tq_init_all(adapter);
3023 err = vmxnet3_rq_init_all(adapter);
3025 netdev_err(adapter->netdev,
3030 err = vmxnet3_request_irqs(adapter);
3032 netdev_err(adapter->netdev,
3037 vmxnet3_setup_driver_shared(adapter);
3039 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
3040 adapter->shared_pa));
3041 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
3042 adapter->shared_pa));
3043 spin_lock_irqsave(&adapter->cmd_lock, flags);
3044 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3046 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3047 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3050 netdev_err(adapter->netdev,
3056 vmxnet3_init_bufsize(adapter);
3057 vmxnet3_init_coalesce(adapter);
3058 vmxnet3_init_rssfields(adapter);
3060 for (i = 0; i < adapter->num_rx_queues; i++) {
3061 VMXNET3_WRITE_BAR0_REG(adapter,
3062 adapter->rx_prod_offset + i * VMXNET3_REG_ALIGN,
3063 adapter->rx_queue[i].rx_ring[0].next2fill);
3064 VMXNET3_WRITE_BAR0_REG(adapter, (adapter->rx_prod2_offset +
3066 adapter->rx_queue[i].rx_ring[1].next2fill);
3070 vmxnet3_set_mc(adapter->netdev);
3076 vmxnet3_check_link(adapter, true);
3077 netif_tx_wake_all_queues(adapter->netdev);
3078 for (i = 0; i < adapter->num_rx_queues; i++)
3079 napi_enable(&adapter->rx_queue[i].napi);
3080 vmxnet3_enable_all_intrs(adapter);
3081 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
3085 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
3086 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
3087 vmxnet3_free_irqs(adapter);
3091 vmxnet3_rq_cleanup_all(adapter);
3097 vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
3100 spin_lock_irqsave(&adapter->cmd_lock, flags);
3101 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
3102 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3107 vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
3111 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
3115 spin_lock_irqsave(&adapter->cmd_lock, flags);
3116 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3118 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3119 vmxnet3_disable_all_intrs(adapter);
3121 for (i = 0; i < adapter->num_rx_queues; i++)
3122 napi_disable(&adapter->rx_queue[i].napi);
3123 netif_tx_disable(adapter->netdev);
3124 adapter->link_speed = 0;
3125 netif_carrier_off(adapter->netdev);
3127 vmxnet3_tq_cleanup_all(adapter);
3128 vmxnet3_rq_cleanup_all(adapter);
3129 vmxnet3_free_irqs(adapter);
3135 vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, const u8 *mac)
3140 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
3143 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
3151 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3154 vmxnet3_write_mac_addr(adapter, addr->sa_data);
3163 vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter)
3167 struct pci_dev *pdev = adapter->pdev;
3171 dev_err(&pdev->dev, "Failed to enable adapter: error %d\n", err);
3179 "Failed to request region for adapter: error %d\n", err);
3187 adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
3188 if (!adapter->hw_addr0) {
3196 adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
3197 if (!adapter->hw_addr1) {
3205 iounmap(adapter->hw_addr0);
3215 vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
3217 BUG_ON(!adapter->pdev);
3219 iounmap(adapter->hw_addr0);
3220 iounmap(adapter->hw_addr1);
3221 pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
3222 pci_disable_device(adapter->pdev);
3227 vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
3231 if (!VMXNET3_VERSION_GE_7(adapter)) {
3232 if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
3234 adapter->skb_buf_size = adapter->netdev->mtu +
3236 if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
3237 adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
3239 adapter->rx_buf_per_pkt = 1;
3241 adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
3242 sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
3244 adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
3247 adapter->skb_buf_size = min((int)adapter->netdev->mtu + VMXNET3_MAX_ETH_HDR_SIZE,
3249 adapter->rx_buf_per_pkt = 1;
3250 adapter->ringBufSize.ring1BufSizeType0 = cpu_to_le16(adapter->skb_buf_size);
3251 adapter->ringBufSize.ring1BufSizeType1 = 0;
3252 adapter->ringBufSize.ring2BufSizeType1 = cpu_to_le16(PAGE_SIZE);
3259 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
3260 ring0_size = adapter->rx_queue[0].rx_ring[0].size;
3264 ring1_size = adapter->rx_queue[0].rx_ring[1].size;
3269 if (VMXNET3_VERSION_GE_7(adapter)) {
3275 for (i = 0; i < adapter->num_rx_queues; i++) {
3276 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
3286 vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
3292 for (i = 0; i < adapter->num_tx_queues; i++) {
3293 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
3298 tq->shared = &adapter->tqd_start[i].ctrl;
3300 tq->adapter = adapter;
3302 err = vmxnet3_tq_create(tq, adapter);
3311 adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
3312 adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
3313 vmxnet3_adjust_rx_ring_size(adapter);
3315 adapter->rxdataring_enabled = VMXNET3_VERSION_GE_3(adapter);
3316 for (i = 0; i < adapter->num_rx_queues; i++) {
3317 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
3320 rq->shared = &adapter->rqd_start[i].ctrl;
3321 rq->adapter = adapter;
3323 err = vmxnet3_rq_create(rq, adapter);
3326 netdev_err(adapter->netdev,
3331 netdev_info(adapter->netdev,
3334 adapter->num_rx_queues = i;
3341 if (!adapter->rxdataring_enabled)
3342 vmxnet3_rq_destroy_all_rxdataring(adapter);
3346 vmxnet3_tq_destroy_all(adapter);
3353 struct vmxnet3_adapter *adapter;
3356 adapter = netdev_priv(netdev);
3358 for (i = 0; i < adapter->num_tx_queues; i++)
3359 spin_lock_init(&adapter->tx_queue[i].tx_lock);
3361 if (VMXNET3_VERSION_GE_3(adapter)) {
3365 spin_lock_irqsave(&adapter->cmd_lock, flags);
3366 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3368 txdata_desc_size = VMXNET3_READ_BAR1_REG(adapter,
3370 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3375 adapter->txdata_desc_size =
3378 adapter->txdata_desc_size = txdata_desc_size;
3381 adapter->txdata_desc_size = sizeof(struct Vmxnet3_TxDataDesc);
3384 err = vmxnet3_create_queues(adapter,
3385 adapter->tx_ring_size,
3386 adapter->rx_ring_size,
3387 adapter->rx_ring2_size,
3388 adapter->txdata_desc_size,
3389 adapter->rxdata_desc_size);
3393 err = vmxnet3_activate_dev(adapter);
3400 vmxnet3_rq_destroy_all(adapter);
3401 vmxnet3_tq_destroy_all(adapter);
3410 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3416 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3419 vmxnet3_quiesce_dev(adapter);
3421 vmxnet3_rq_destroy_all(adapter);
3422 vmxnet3_tq_destroy_all(adapter);
3424 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3432 vmxnet3_force_close(struct vmxnet3_adapter *adapter)
3440 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
3443 for (i = 0; i < adapter->num_rx_queues; i++)
3444 napi_enable(&adapter->rx_queue[i].napi);
3449 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
3450 dev_close(adapter->netdev);
3457 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3466 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3470 vmxnet3_quiesce_dev(adapter);
3471 vmxnet3_reset_dev(adapter);
3474 vmxnet3_rq_destroy_all(adapter);
3475 vmxnet3_adjust_rx_ring_size(adapter);
3476 err = vmxnet3_rq_create_all(adapter);
3484 err = vmxnet3_activate_dev(adapter);
3494 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3496 vmxnet3_force_close(adapter);
3503 vmxnet3_declare_features(struct vmxnet3_adapter *adapter)
3505 struct net_device *netdev = adapter->netdev;
3512 if (VMXNET3_VERSION_GE_4(adapter)) {
3523 if (VMXNET3_VERSION_GE_7(adapter)) {
3526 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3528 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD;
3530 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3532 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD;
3534 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3536 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_TSO;
3538 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3540 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_TSO;
3542 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3544 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD;
3546 if (vmxnet3_check_ptcapability(adapter->ptcap_supported[0],
3548 adapter->dev_caps[0] |= 1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD;
3551 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
3552 spin_lock_irqsave(&adapter->cmd_lock, flags);
3553 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
3554 adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3555 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3557 if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_CHECKSUM_OFFLOAD)) &&
3558 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_CHECKSUM_OFFLOAD)) &&
3559 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_TSO)) &&
3560 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_TSO))) {
3564 if (!(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_GENEVE_OUTER_CHECKSUM_OFFLOAD)) &&
3565 !(adapter->dev_caps[0] & (1UL << VMXNET3_CAP_VXLAN_OUTER_CHECKSUM_OFFLOAD))) {
3579 vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
3583 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
3586 tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
3603 vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, int nvec)
3605 int ret = pci_enable_msix_range(adapter->pdev,
3606 adapter->intr.msix_entries, nvec, nvec);
3609 dev_err(&adapter->netdev->dev,
3613 ret = pci_enable_msix_range(adapter->pdev,
3614 adapter->intr.msix_entries,
3620 dev_err(&adapter->netdev->dev,
3631 vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
3637 spin_lock_irqsave(&adapter->cmd_lock, flags);
3638 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3640 cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3641 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3642 adapter->intr.type = cfg & 0x3;
3643 adapter->intr.mask_mode = (cfg >> 2) & 0x3;
3645 if (adapter->intr.type == VMXNET3_IT_AUTO) {
3646 adapter->intr.type = VMXNET3_IT_MSIX;
3650 if (adapter->intr.type == VMXNET3_IT_MSIX) {
3653 nvec = adapter->share_intr == VMXNET3_INTR_TXSHARE ?
3654 1 : adapter->num_tx_queues;
3655 nvec += adapter->share_intr == VMXNET3_INTR_BUDDYSHARE ?
3656 0 : adapter->num_rx_queues;
3662 adapter->intr.msix_entries[i].entry = i;
3664 nvec_allocated = vmxnet3_acquire_msix_vectors(adapter, nvec);
3673 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
3674 || adapter->num_rx_queues != 1) {
3675 adapter->share_intr = VMXNET3_INTR_TXSHARE;
3676 netdev_err(adapter->netdev,
3678 adapter->num_rx_queues = 1;
3682 adapter->intr.num_intrs = nvec_allocated;
3687 dev_info(&adapter->pdev->dev,
3691 adapter->intr.type = VMXNET3_IT_MSI;
3694 if (adapter->intr.type == VMXNET3_IT_MSI) {
3695 if (!pci_enable_msi(adapter->pdev)) {
3696 adapter->num_rx_queues = 1;
3697 adapter->intr.num_intrs = 1;
3703 adapter->num_rx_queues = 1;
3704 dev_info(&adapter->netdev->dev,
3706 adapter->intr.type = VMXNET3_IT_INTX;
3709 adapter->intr.num_intrs = 1;
3714 vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
3716 if (adapter->intr.type == VMXNET3_IT_MSIX)
3717 pci_disable_msix(adapter->pdev);
3718 else if (adapter->intr.type == VMXNET3_IT_MSI)
3719 pci_disable_msi(adapter->pdev);
3721 BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
3728 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3729 adapter->tx_timeout_count++;
3731 netdev_err(adapter->netdev, "tx hang\n");
3732 schedule_work(&adapter->work);
3739 struct vmxnet3_adapter *adapter;
3741 adapter = container_of(data, struct vmxnet3_adapter, work);
3744 if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
3749 if (netif_running(adapter->netdev)) {
3750 netdev_notice(adapter->netdev, "resetting\n");
3751 vmxnet3_quiesce_dev(adapter);
3752 vmxnet3_reset_dev(adapter);
3753 vmxnet3_activate_dev(adapter);
3755 netdev_info(adapter->netdev, "already closed\n");
3759 netif_wake_queue(adapter->netdev);
3760 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
3791 struct vmxnet3_adapter *adapter;
3822 adapter = netdev_priv(netdev);
3823 adapter->netdev = netdev;
3824 adapter->pdev = pdev;
3826 adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
3827 adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
3828 adapter->rx_ring2_size = VMXNET3_DEF_RX_RING2_SIZE;
3836 spin_lock_init(&adapter->cmd_lock);
3837 adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
3840 if (dma_mapping_error(&adapter->pdev->dev, adapter->adapter_pa)) {
3845 adapter->shared = dma_alloc_coherent(
3846 &adapter->pdev->dev,
3848 &adapter->shared_pa, GFP_KERNEL);
3849 if (!adapter->shared) {
3855 err = vmxnet3_alloc_pci_resources(adapter);
3859 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
3861 VMXNET3_WRITE_BAR1_REG(adapter,
3864 adapter->version = VMXNET3_REV_7 + 1;
3866 VMXNET3_WRITE_BAR1_REG(adapter,
3869 adapter->version = VMXNET3_REV_6 + 1;
3871 VMXNET3_WRITE_BAR1_REG(adapter,
3874 adapter->version = VMXNET3_REV_5 + 1;
3876 VMXNET3_WRITE_BAR1_REG(adapter,
3879 adapter->version = VMXNET3_REV_4 + 1;
3881 VMXNET3_WRITE_BAR1_REG(adapter,
3884 adapter->version = VMXNET3_REV_3 + 1;
3886 VMXNET3_WRITE_BAR1_REG(adapter,
3889 adapter->version = VMXNET3_REV_2 + 1;
3891 VMXNET3_WRITE_BAR1_REG(adapter,
3894 adapter->version = VMXNET3_REV_1 + 1;
3897 "Incompatible h/w version (0x%x) for adapter\n", ver);
3901 dev_dbg(&pdev->dev, "Using device version %d\n", adapter->version);
3903 ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
3905 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
3908 "Incompatible upt version (0x%x) for adapter\n", ver);
3913 if (VMXNET3_VERSION_GE_7(adapter)) {
3914 adapter->devcap_supported[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_DCR);
3915 adapter->ptcap_supported[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_PTCR);
3916 if (adapter->devcap_supported[0] & (1UL << VMXNET3_CAP_LARGE_BAR)) {
3917 adapter->dev_caps[0] = adapter->devcap_supported[0] &
3920 if (!(adapter->ptcap_supported[0] & (1UL << VMXNET3_DCR_ERROR)) &&
3921 adapter->ptcap_supported[0] & (1UL << VMXNET3_CAP_OOORX_COMP) &&
3922 adapter->devcap_supported[0] & (1UL << VMXNET3_CAP_OOORX_COMP)) {
3923 adapter->dev_caps[0] |= adapter->devcap_supported[0] &
3926 if (adapter->dev_caps[0])
3927 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DCR, adapter->dev_caps[0]);
3929 spin_lock_irqsave(&adapter->cmd_lock, flags);
3930 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_DCR0_REG);
3931 adapter->dev_caps[0] = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3932 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3935 if (VMXNET3_VERSION_GE_7(adapter) &&
3936 adapter->dev_caps[0] & (1UL << VMXNET3_CAP_LARGE_BAR)) {
3937 adapter->tx_prod_offset = VMXNET3_REG_LB_TXPROD;
3938 adapter->rx_prod_offset = VMXNET3_REG_LB_RXPROD;
3939 adapter->rx_prod2_offset = VMXNET3_REG_LB_RXPROD2;
3941 adapter->tx_prod_offset = VMXNET3_REG_TXPROD;
3942 adapter->rx_prod_offset = VMXNET3_REG_RXPROD;
3943 adapter->rx_prod2_offset = VMXNET3_REG_RXPROD2;
3946 if (VMXNET3_VERSION_GE_6(adapter)) {
3947 spin_lock_irqsave(&adapter->cmd_lock, flags);
3948 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3950 queues = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
3951 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3953 adapter->num_rx_queues = min(num_rx_queues, ((queues >> 8) & 0xff));
3954 adapter->num_tx_queues = min(num_tx_queues, (queues & 0xff));
3956 adapter->num_rx_queues = min(num_rx_queues,
3958 adapter->num_tx_queues = min(num_tx_queues,
3961 if (adapter->num_rx_queues > VMXNET3_MAX_RX_QUEUES ||
3962 adapter->num_tx_queues > VMXNET3_MAX_TX_QUEUES) {
3963 adapter->queuesExtEnabled = true;
3965 adapter->queuesExtEnabled = false;
3968 adapter->queuesExtEnabled = false;
3971 adapter->num_rx_queues = min(num_rx_queues,
3973 adapter->num_tx_queues = min(num_tx_queues,
3978 adapter->num_tx_queues, adapter->num_rx_queues);
3980 adapter->rx_buf_per_pkt = 1;
3982 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
3983 size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
3984 adapter->tqd_start = dma_alloc_coherent(&adapter->pdev->dev, size,
3985 &adapter->queue_desc_pa,
3988 if (!adapter->tqd_start) {
3993 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
3994 adapter->num_tx_queues);
3996 adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev,
3998 &adapter->pm_conf_pa,
4000 if (adapter->pm_conf == NULL) {
4007 adapter->rss_conf = dma_alloc_coherent(&adapter->pdev->dev,
4009 &adapter->rss_conf_pa,
4011 if (adapter->rss_conf == NULL) {
4017 if (VMXNET3_VERSION_GE_3(adapter)) {
4018 adapter->coal_conf =
4019 dma_alloc_coherent(&adapter->pdev->dev,
4022 &adapter->coal_conf_pa,
4024 if (!adapter->coal_conf) {
4028 adapter->coal_conf->coalMode = VMXNET3_COALESCE_DISABLED;
4029 adapter->default_coal_mode = true;
4032 if (VMXNET3_VERSION_GE_4(adapter)) {
4033 adapter->default_rss_fields = true;
4034 adapter->rss_fields = VMXNET3_RSS_FIELDS_DEFAULT;
4038 vmxnet3_declare_features(adapter);
4042 adapter->rxdata_desc_size = VMXNET3_VERSION_GE_3(adapter) ?
4045 if (adapter->num_tx_queues == adapter->num_rx_queues)
4046 adapter->share_intr = VMXNET3_INTR_BUDDYSHARE;
4048 adapter->share_intr = VMXNET3_INTR_DONTSHARE;
4050 vmxnet3_alloc_intr_resources(adapter);
4053 if (adapter->num_rx_queues > 1 &&
4054 adapter->intr.type == VMXNET3_IT_MSIX) {
4055 adapter->rss = true;
4060 adapter->rss = false;
4064 vmxnet3_read_mac_addr(adapter, mac);
4073 if (VMXNET3_VERSION_GE_6(adapter))
4078 INIT_WORK(&adapter->work, vmxnet3_reset_work);
4079 set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
4081 if (adapter->intr.type == VMXNET3_IT_MSIX) {
4083 for (i = 0; i < adapter->num_rx_queues; i++) {
4084 netif_napi_add(adapter->netdev,
4085 &adapter->rx_queue[i].napi,
4089 netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
4093 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
4094 netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
4100 dev_err(&pdev->dev, "Failed to register adapter\n");
4104 vmxnet3_check_link(adapter, false);
4108 if (VMXNET3_VERSION_GE_3(adapter)) {
4109 dma_free_coherent(&adapter->pdev->dev,
4111 adapter->coal_conf, adapter->coal_conf_pa);
4113 vmxnet3_free_intr_resources(adapter);
4116 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
4117 adapter->rss_conf, adapter->rss_conf_pa);
4120 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
4121 adapter->pm_conf, adapter->pm_conf_pa);
4123 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
4124 adapter->queue_desc_pa);
4126 vmxnet3_free_pci_resources(adapter);
4128 dma_free_coherent(&adapter->pdev->dev,
4130 adapter->shared, adapter->shared_pa);
4132 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
4144 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
4156 if (!VMXNET3_VERSION_GE_6(adapter)) {
4159 if (VMXNET3_VERSION_GE_6(adapter)) {
4160 spin_lock_irqsave(&adapter->cmd_lock, flags);
4161 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
4163 rx_queues = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
4164 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
4175 cancel_work_sync(&adapter->work);
4179 vmxnet3_free_intr_resources(adapter);
4180 vmxnet3_free_pci_resources(adapter);
4181 if (VMXNET3_VERSION_GE_3(adapter)) {
4182 dma_free_coherent(&adapter->pdev->dev,
4184 adapter->coal_conf, adapter->coal_conf_pa);
4187 dma_free_coherent(&adapter->pdev->dev, sizeof(struct UPT1_RSSConf),
4188 adapter->rss_conf, adapter->rss_conf_pa);
4190 dma_free_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf),
4191 adapter->pm_conf, adapter->pm_conf_pa);
4193 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
4195 dma_free_coherent(&adapter->pdev->dev, size, adapter->tqd_start,
4196 adapter->queue_desc_pa);
4197 dma_free_coherent(&adapter->pdev->dev,
4199 adapter->shared, adapter->shared_pa);
4200 dma_unmap_single(&adapter->pdev->dev, adapter->adapter_pa,
4208 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
4214 while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
4218 &adapter->state)) {
4219 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
4222 spin_lock_irqsave(&adapter->cmd_lock, flags);
4223 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
4225 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
4226 vmxnet3_disable_all_intrs(adapter);
4228 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
4239 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
4252 for (i = 0; i < adapter->num_rx_queues; i++)
4253 napi_disable(&adapter->rx_queue[i].napi);
4255 vmxnet3_disable_all_intrs(adapter);
4256 vmxnet3_free_irqs(adapter);
4257 vmxnet3_free_intr_resources(adapter);
4262 pmConf = adapter->pm_conf;
4265 if (adapter->wol & WAKE_UCAST) {
4275 if (adapter->wol & WAKE_ARP) {
4325 if (adapter->wol & WAKE_MAGIC)
4330 adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
4331 adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
4333 adapter->shared->devRead.pmConfDesc.confPA =
4334 cpu_to_le64(adapter->pm_conf_pa);
4336 spin_lock_irqsave(&adapter->cmd_lock, flags);
4337 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
4339 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
4343 adapter->wol);
4358 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
4371 vmxnet3_alloc_intr_resources(adapter);
4377 /* Need not check adapter state as other reset tasks cannot run during
4380 spin_lock_irqsave(&adapter->cmd_lock, flags);
4381 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
4383 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
4384 vmxnet3_tq_cleanup_all(adapter);
4385 vmxnet3_rq_cleanup_all(adapter);
4387 vmxnet3_reset_dev(adapter);
4388 err = vmxnet3_activate_dev(adapter);
4392 vmxnet3_force_close(adapter);