Lines Matching refs:vi
391 /* Find end of list, sew whole thing into vi->rq.pages. */
410 static void virtnet_rq_free_buf(struct virtnet_info *vi,
413 if (vi->mergeable_rx_bufs)
415 else if (vi->big_packets)
421 static void enable_delayed_refill(struct virtnet_info *vi)
423 spin_lock_bh(&vi->refill_lock);
424 vi->refill_enabled = true;
425 spin_unlock_bh(&vi->refill_lock);
428 static void disable_delayed_refill(struct virtnet_info *vi)
430 spin_lock_bh(&vi->refill_lock);
431 vi->refill_enabled = false;
432 spin_unlock_bh(&vi->refill_lock);
460 struct virtnet_info *vi = vq->vdev->priv;
461 struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
470 netif_wake_subqueue(vi->dev, vq2txq(vq));
507 static struct sk_buff *page_to_skb(struct virtnet_info *vi,
523 hdr_len = vi->hdr_len;
524 if (vi->mergeable_rx_bufs)
566 if (vi->mergeable_rx_bufs) {
730 static void virtnet_rq_set_premapped(struct virtnet_info *vi)
735 if (!vi->mergeable_rx_bufs && vi->big_packets)
738 for (i = 0; i < vi->max_queue_pairs; i++) {
739 if (virtqueue_set_dma_premapped(vi->rq[i].vq))
742 vi->rq[i].do_dma = true;
748 struct virtnet_info *vi = vq->vdev->priv;
752 rq = &vi->rq[i];
757 virtnet_rq_free_buf(vi, rq, buf);
796 static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
798 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
800 else if (q < vi->curr_queue_pairs)
806 static void check_sq_full_and_disable(struct virtnet_info *vi,
813 qnum = sq - vi->sq;
841 static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
850 if (unlikely(xdpf->headroom < vi->hdr_len))
865 xdpf->headroom -= vi->hdr_len;
866 xdpf->data -= vi->hdr_len;
869 memset(hdr, 0, vi->hdr_len);
870 xdpf->len += vi->hdr_len;
889 /* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
897 #define virtnet_xdp_get_sq(vi) ({ \
900 typeof(vi) v = (vi); \
916 #define virtnet_xdp_put_sq(vi, q) { \
918 typeof(vi) v = (vi); \
930 struct virtnet_info *vi = netdev_priv(dev);
931 struct receive_queue *rq = vi->rq;
950 sq = virtnet_xdp_get_sq(vi);
976 if (__virtnet_xdp_xmit_one(vi, sq, xdpf))
982 if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq))
983 check_sq_full_and_disable(vi, dev, sq);
998 virtnet_xdp_put_sq(vi, sq);
1071 static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
1073 return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
1144 static struct sk_buff *receive_small_build_skb(struct virtnet_info *vi,
1155 headroom = vi->hdr_len + header_offset;
1164 memcpy(skb_vnet_common_hdr(skb), buf, vi->hdr_len);
1170 struct virtnet_info *vi,
1180 unsigned int headroom = vi->hdr_len + header_offset;
1196 if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
1198 unsigned int tlen = len + vi->hdr_len;
1201 xdp_headroom = virtnet_get_headroom(vi);
1203 headroom = vi->hdr_len + header_offset;
1218 xdp_prepare_buff(&xdp, buf + VIRTNET_RX_PAD + vi->hdr_len,
1257 struct virtnet_info *vi,
1268 len -= vi->hdr_len;
1278 if (unlikely(vi->xdp_enabled)) {
1284 skb = receive_small_xdp(dev, vi, rq, xdp_prog, buf,
1293 skb = receive_small_build_skb(vi, xdp_headroom, buf, len);
1304 struct virtnet_info *vi,
1312 page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0);
1314 u64_stats_add(&stats->bytes, len - vi->hdr_len);
1356 struct virtnet_info *vi,
1399 struct virtnet_info *vi,
1421 VIRTIO_XDP_HEADROOM + vi->hdr_len, len - vi->hdr_len, true);
1447 virtio16_to_cpu(vi->vdev, hdr->num_buffers));
1487 static void *mergeable_xdp_get_buf(struct virtnet_info *vi,
1517 if (likely(headroom >= virtnet_get_headroom(vi) &&
1562 struct virtnet_info *vi,
1572 int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
1583 data = mergeable_xdp_get_buf(vi, rq, xdp_prog, ctx, &frame_sz, &num_buf, &page,
1588 err = virtnet_build_xdp_buff_mrg(dev, vi, rq, &xdp, data, len, frame_sz,
1597 head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz);
1622 struct virtnet_info *vi,
1631 int num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
1641 u64_stats_add(&stats->bytes, len - vi->hdr_len);
1650 if (unlikely(vi->xdp_enabled)) {
1656 head_skb = receive_mergeable_xdp(dev, vi, rq, xdp_prog, buf, ctx,
1664 head_skb = page_to_skb(vi, rq, page, offset, len, truesize, headroom);
1676 virtio16_to_cpu(vi->vdev,
1768 static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
1773 struct net_device *dev = vi->dev;
1777 if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
1780 virtnet_rq_free_buf(vi, rq, buf);
1784 if (vi->mergeable_rx_bufs)
1785 skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
1787 else if (vi->big_packets)
1788 skb = receive_big(dev, vi, rq, buf, len, stats);
1790 skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats);
1796 if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report)
1803 virtio_is_little_endian(vi->vdev))) {
1828 static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
1832 unsigned int xdp_headroom = virtnet_get_headroom(vi);
1834 int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom;
1845 vi->hdr_len + GOOD_PACKET_LEN);
1857 static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
1864 sg_init_table(rq->sg, vi->big_packets_num_skbfrags + 2);
1866 /* page in rq->sg[vi->big_packets_num_skbfrags + 1] is list tail */
1867 for (i = vi->big_packets_num_skbfrags + 1; i > 1; --i) {
1890 sg_set_buf(&rq->sg[0], p, vi->hdr_len);
1898 err = virtqueue_add_inbuf(rq->vq, rq->sg, vi->big_packets_num_skbfrags + 2,
1910 struct virtnet_info *vi = rq->vq->vdev->priv;
1911 const size_t hdr_len = vi->hdr_len;
1923 static int add_recvbuf_mergeable(struct virtnet_info *vi,
1927 unsigned int headroom = virtnet_get_headroom(vi);
1979 static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
1986 if (vi->mergeable_rx_bufs)
1987 err = add_recvbuf_mergeable(vi, rq, gfp);
1988 else if (vi->big_packets)
1989 err = add_recvbuf_big(vi, rq, gfp);
1991 err = add_recvbuf_small(vi, rq, gfp);
2010 struct virtnet_info *vi = rvq->vdev->priv;
2011 struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
2029 static void virtnet_napi_tx_enable(struct virtnet_info *vi,
2039 if (!vi->affinity_hint_set) {
2055 struct virtnet_info *vi =
2060 for (i = 0; i < vi->curr_queue_pairs; i++) {
2061 struct receive_queue *rq = &vi->rq[i];
2064 still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
2071 schedule_delayed_work(&vi->refill, HZ/2);
2078 struct virtnet_info *vi = rq->vq->vdev->priv;
2085 if (!vi->big_packets || vi->mergeable_rx_bufs) {
2090 receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
2096 receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats);
2102 if (!try_fill_recv(vi, rq, GFP_ATOMIC)) {
2103 spin_lock(&vi->refill_lock);
2104 if (vi->refill_enabled)
2105 schedule_delayed_work(&vi->refill, 0);
2106 spin_unlock(&vi->refill_lock);
2127 struct virtnet_info *vi = rq->vq->vdev->priv;
2129 struct send_queue *sq = &vi->sq[index];
2130 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
2132 if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
2157 struct virtnet_info *vi = rq->vq->vdev->priv;
2174 sq = virtnet_xdp_get_sq(vi);
2180 virtnet_xdp_put_sq(vi, sq);
2186 static void virtnet_disable_queue_pair(struct virtnet_info *vi, int qp_index)
2188 virtnet_napi_tx_disable(&vi->sq[qp_index].napi);
2189 napi_disable(&vi->rq[qp_index].napi);
2190 xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
2193 static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index)
2195 struct net_device *dev = vi->dev;
2198 err = xdp_rxq_info_reg(&vi->rq[qp_index].xdp_rxq, dev, qp_index,
2199 vi->rq[qp_index].napi.napi_id);
2203 err = xdp_rxq_info_reg_mem_model(&vi->rq[qp_index].xdp_rxq,
2208 virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi);
2209 virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi);
2214 xdp_rxq_info_unreg(&vi->rq[qp_index].xdp_rxq);
2220 struct virtnet_info *vi = netdev_priv(dev);
2223 enable_delayed_refill(vi);
2225 for (i = 0; i < vi->max_queue_pairs; i++) {
2226 if (i < vi->curr_queue_pairs)
2228 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
2229 schedule_delayed_work(&vi->refill, 0);
2231 err = virtnet_enable_queue_pair(vi, i);
2239 disable_delayed_refill(vi);
2240 cancel_delayed_work_sync(&vi->refill);
2243 virtnet_disable_queue_pair(vi, i);
2250 struct virtnet_info *vi = sq->vq->vdev->priv;
2256 if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
2262 txq = netdev_get_tx_queue(vi->dev, index);
2297 struct virtnet_info *vi = sq->vq->vdev->priv;
2299 unsigned hdr_len = vi->hdr_len;
2302 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
2304 can_push = vi->any_header_sg &&
2315 virtio_is_little_endian(vi->vdev), false,
2319 if (vi->mergeable_rx_bufs)
2342 struct virtnet_info *vi = netdev_priv(dev);
2344 struct send_queue *sq = &vi->sq[qnum];
2384 check_sq_full_and_disable(vi, dev, sq);
2397 static int virtnet_rx_resize(struct virtnet_info *vi,
2400 bool running = netif_running(vi->dev);
2403 qindex = rq - vi->rq;
2410 netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err);
2412 if (!try_fill_recv(vi, rq, GFP_KERNEL))
2413 schedule_delayed_work(&vi->refill, 0);
2420 static int virtnet_tx_resize(struct virtnet_info *vi,
2423 bool running = netif_running(vi->dev);
2427 qindex = sq - vi->sq;
2432 txq = netdev_get_tx_queue(vi->dev, qindex);
2443 netif_stop_subqueue(vi->dev, qindex);
2449 netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err);
2457 virtnet_napi_tx_enable(vi, sq->vq, &sq->napi);
2466 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
2474 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
2476 vi->ctrl->status = ~0;
2477 vi->ctrl->hdr.class = class;
2478 vi->ctrl->hdr.cmd = cmd;
2480 sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr));
2487 sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
2491 ret = virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
2493 dev_warn(&vi->vdev->dev,
2498 if (unlikely(!virtqueue_kick(vi->cvq)))
2499 return vi->ctrl->status == VIRTIO_NET_OK;
2504 while (!virtqueue_get_buf(vi->cvq, &tmp) &&
2505 !virtqueue_is_broken(vi->cvq))
2508 return vi->ctrl->status == VIRTIO_NET_OK;
2513 struct virtnet_info *vi = netdev_priv(dev);
2514 struct virtio_device *vdev = vi->vdev;
2519 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
2532 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
2561 struct virtnet_info *vi = netdev_priv(dev);
2565 for (i = 0; i < vi->max_queue_pairs; i++) {
2567 struct receive_queue *rq = &vi->rq[i];
2568 struct send_queue *sq = &vi->sq[i];
2598 static void virtnet_ack_link_announce(struct virtnet_info *vi)
2601 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
2603 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
2607 static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
2610 struct net_device *dev = vi->dev;
2612 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
2615 vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
2616 sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
2618 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
2624 vi->curr_queue_pairs = queue_pairs;
2627 schedule_delayed_work(&vi->refill, 0);
2633 static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
2638 err = _virtnet_set_queues(vi, queue_pairs);
2645 struct virtnet_info *vi = netdev_priv(dev);
2649 disable_delayed_refill(vi);
2651 cancel_delayed_work_sync(&vi->refill);
2653 for (i = 0; i < vi->max_queue_pairs; i++)
2654 virtnet_disable_queue_pair(vi, i);
2661 struct virtnet_info *vi = netdev_priv(dev);
2671 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
2674 vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
2675 vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
2677 sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
2679 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
2682 vi->ctrl->promisc ? "en" : "dis");
2684 sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti));
2686 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
2689 vi->ctrl->allmulti ? "en" : "dis");
2703 mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count);
2714 mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count);
2722 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
2732 struct virtnet_info *vi = netdev_priv(dev);
2735 vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
2736 sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
2738 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
2747 struct virtnet_info *vi = netdev_priv(dev);
2750 vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
2751 sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
2753 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
2759 static void virtnet_clean_affinity(struct virtnet_info *vi)
2763 if (vi->affinity_hint_set) {
2764 for (i = 0; i < vi->max_queue_pairs; i++) {
2765 virtqueue_set_affinity(vi->rq[i].vq, NULL);
2766 virtqueue_set_affinity(vi->sq[i].vq, NULL);
2769 vi->affinity_hint_set = false;
2773 static void virtnet_set_affinity(struct virtnet_info *vi)
2783 virtnet_clean_affinity(vi);
2788 stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1);
2789 stragglers = num_cpu >= vi->curr_queue_pairs ?
2790 num_cpu % vi->curr_queue_pairs :
2794 for (i = 0; i < vi->curr_queue_pairs; i++) {
2802 virtqueue_set_affinity(vi->rq[i].vq, mask);
2803 virtqueue_set_affinity(vi->sq[i].vq, mask);
2804 __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, XPS_CPUS);
2808 vi->affinity_hint_set = true;
2814 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
2816 virtnet_set_affinity(vi);
2822 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
2824 virtnet_set_affinity(vi);
2830 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
2833 virtnet_clean_affinity(vi);
2839 static int virtnet_cpu_notif_add(struct virtnet_info *vi)
2843 ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node);
2847 &vi->node_dead);
2850 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
2854 static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
2856 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
2858 &vi->node_dead);
2866 struct virtnet_info *vi = netdev_priv(dev);
2868 ring->rx_max_pending = vi->rq[0].vq->num_max;
2869 ring->tx_max_pending = vi->sq[0].vq->num_max;
2870 ring->rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
2871 ring->tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
2874 static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi,
2882 struct virtnet_info *vi = netdev_priv(dev);
2891 rx_pending = virtqueue_get_vring_size(vi->rq[0].vq);
2892 tx_pending = virtqueue_get_vring_size(vi->sq[0].vq);
2898 if (ring->rx_pending > vi->rq[0].vq->num_max)
2901 if (ring->tx_pending > vi->sq[0].vq->num_max)
2904 for (i = 0; i < vi->max_queue_pairs; i++) {
2905 rq = vi->rq + i;
2906 sq = vi->sq + i;
2909 err = virtnet_tx_resize(vi, sq, ring->tx_pending);
2918 err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(i),
2919 vi->intr_coal_tx.max_usecs,
2920 vi->intr_coal_tx.max_packets);
2924 vi->sq[i].intr_coal.max_usecs = vi->intr_coal_tx.max_usecs;
2925 vi->sq[i].intr_coal.max_packets = vi->intr_coal_tx.max_packets;
2929 err = virtnet_rx_resize(vi, rq, ring->rx_pending);
2934 err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(i),
2935 vi->intr_coal_rx.max_usecs,
2936 vi->intr_coal_rx.max_packets);
2940 vi->rq[i].intr_coal.max_usecs = vi->intr_coal_rx.max_usecs;
2941 vi->rq[i].intr_coal.max_packets = vi->intr_coal_rx.max_packets;
2948 static bool virtnet_commit_rss_command(struct virtnet_info *vi)
2950 struct net_device *dev = vi->dev;
2958 sg_set_buf(&sgs[0], &vi->ctrl->rss, sg_buf_size);
2960 sg_buf_size = sizeof(uint16_t) * (vi->ctrl->rss.indirection_table_mask + 1);
2961 sg_set_buf(&sgs[1], vi->ctrl->rss.indirection_table, sg_buf_size);
2965 sg_set_buf(&sgs[2], &vi->ctrl->rss.max_tx_vq, sg_buf_size);
2967 sg_buf_size = vi->rss_key_size;
2968 sg_set_buf(&sgs[3], vi->ctrl->rss.key, sg_buf_size);
2970 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
2971 vi->has_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG
2979 static void virtnet_init_default_rss(struct virtnet_info *vi)
2984 vi->ctrl->rss.hash_types = vi->rss_hash_types_supported;
2985 vi->rss_hash_types_saved = vi->rss_hash_types_supported;
2986 vi->ctrl->rss.indirection_table_mask = vi->rss_indir_table_size
2987 ? vi->rss_indir_table_size - 1 : 0;
2988 vi->ctrl->rss.unclassified_queue = 0;
2990 for (; i < vi->rss_indir_table_size; ++i) {
2991 indir_val = ethtool_rxfh_indir_default(i, vi->curr_queue_pairs);
2992 vi->ctrl->rss.indirection_table[i] = indir_val;
2995 vi->ctrl->rss.max_tx_vq = vi->has_rss ? vi->curr_queue_pairs : 0;
2996 vi->ctrl->rss.hash_key_length = vi->rss_key_size;
2998 netdev_rss_key_fill(vi->ctrl->rss.key, vi->rss_key_size);
3001 static void virtnet_get_hashflow(const struct virtnet_info *vi, struct ethtool_rxnfc *info)
3006 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv4) {
3009 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
3014 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_TCPv6) {
3017 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
3022 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv4) {
3025 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4) {
3030 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_UDPv6) {
3033 } else if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6) {
3038 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv4)
3043 if (vi->rss_hash_types_saved & VIRTIO_NET_RSS_HASH_TYPE_IPv6)
3053 static bool virtnet_set_hashflow(struct virtnet_info *vi, struct ethtool_rxnfc *info)
3055 u32 new_hashtypes = vi->rss_hash_types_saved;
3104 if (new_hashtypes != (new_hashtypes & vi->rss_hash_types_supported))
3107 if (new_hashtypes != vi->rss_hash_types_saved) {
3108 vi->rss_hash_types_saved = new_hashtypes;
3109 vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
3110 if (vi->dev->features & NETIF_F_RXHASH)
3111 return virtnet_commit_rss_command(vi);
3120 struct virtnet_info *vi = netdev_priv(dev);
3121 struct virtio_device *vdev = vi->vdev;
3133 struct virtnet_info *vi = netdev_priv(dev);
3143 if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
3150 if (vi->rq[0].xdp_prog)
3154 err = _virtnet_set_queues(vi, queue_pairs);
3159 virtnet_set_affinity(vi);
3170 struct virtnet_info *vi = netdev_priv(dev);
3176 for (i = 0; i < vi->curr_queue_pairs; i++) {
3182 for (i = 0; i < vi->curr_queue_pairs; i++) {
3193 struct virtnet_info *vi = netdev_priv(dev);
3197 return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN +
3207 struct virtnet_info *vi = netdev_priv(dev);
3213 for (i = 0; i < vi->curr_queue_pairs; i++) {
3214 struct receive_queue *rq = &vi->rq[i];
3228 for (i = 0; i < vi->curr_queue_pairs; i++) {
3229 struct send_queue *sq = &vi->sq[i];
3247 struct virtnet_info *vi = netdev_priv(dev);
3249 channels->combined_count = vi->curr_queue_pairs;
3250 channels->max_combined = vi->max_queue_pairs;
3260 struct virtnet_info *vi = netdev_priv(dev);
3263 &vi->speed, &vi->duplex);
3269 struct virtnet_info *vi = netdev_priv(dev);
3271 cmd->base.speed = vi->speed;
3272 cmd->base.duplex = vi->duplex;
3278 static int virtnet_send_notf_coal_cmds(struct virtnet_info *vi,
3284 vi->ctrl->coal_tx.tx_usecs = cpu_to_le32(ec->tx_coalesce_usecs);
3285 vi->ctrl->coal_tx.tx_max_packets = cpu_to_le32(ec->tx_max_coalesced_frames);
3286 sg_init_one(&sgs_tx, &vi->ctrl->coal_tx, sizeof(vi->ctrl->coal_tx));
3288 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
3294 vi->intr_coal_tx.max_usecs = ec->tx_coalesce_usecs;
3295 vi->intr_coal_tx.max_packets = ec->tx_max_coalesced_frames;
3296 for (i = 0; i < vi->max_queue_pairs; i++) {
3297 vi->sq[i].intr_coal.max_usecs = ec->tx_coalesce_usecs;
3298 vi->sq[i].intr_coal.max_packets = ec->tx_max_coalesced_frames;
3301 vi->ctrl->coal_rx.rx_usecs = cpu_to_le32(ec->rx_coalesce_usecs);
3302 vi->ctrl->coal_rx.rx_max_packets = cpu_to_le32(ec->rx_max_coalesced_frames);
3303 sg_init_one(&sgs_rx, &vi->ctrl->coal_rx, sizeof(vi->ctrl->coal_rx));
3305 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
3311 vi->intr_coal_rx.max_usecs = ec->rx_coalesce_usecs;
3312 vi->intr_coal_rx.max_packets = ec->rx_max_coalesced_frames;
3313 for (i = 0; i < vi->max_queue_pairs; i++) {
3314 vi->rq[i].intr_coal.max_usecs = ec->rx_coalesce_usecs;
3315 vi->rq[i].intr_coal.max_packets = ec->rx_max_coalesced_frames;
3321 static int virtnet_send_ctrl_coal_vq_cmd(struct virtnet_info *vi,
3326 vi->ctrl->coal_vq.vqn = cpu_to_le16(vqn);
3327 vi->ctrl->coal_vq.coal.max_usecs = cpu_to_le32(max_usecs);
3328 vi->ctrl->coal_vq.coal.max_packets = cpu_to_le32(max_packets);
3329 sg_init_one(&sgs, &vi->ctrl->coal_vq, sizeof(vi->ctrl->coal_vq));
3331 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_NOTF_COAL,
3339 static int virtnet_send_notf_coal_vq_cmds(struct virtnet_info *vi,
3345 err = virtnet_send_ctrl_coal_vq_cmd(vi, rxq2vq(queue),
3351 vi->rq[queue].intr_coal.max_usecs = ec->rx_coalesce_usecs;
3352 vi->rq[queue].intr_coal.max_packets = ec->rx_max_coalesced_frames;
3354 err = virtnet_send_ctrl_coal_vq_cmd(vi, txq2vq(queue),
3360 vi->sq[queue].intr_coal.max_usecs = ec->tx_coalesce_usecs;
3361 vi->sq[queue].intr_coal.max_packets = ec->tx_max_coalesced_frames;
3398 struct virtnet_info *vi = netdev_priv(dev);
3404 for (queue_number = 0; queue_number < vi->max_queue_pairs; queue_number++) {
3406 vi->sq[queue_number].napi.weight,
3412 /* All queues that belong to [queue_number, vi->max_queue_pairs] will be
3419 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL))
3420 ret = virtnet_send_notf_coal_cmds(vi, ec);
3428 for (; queue_number < vi->max_queue_pairs; queue_number++)
3429 vi->sq[queue_number].napi.weight = napi_weight;
3440 struct virtnet_info *vi = netdev_priv(dev);
3442 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
3443 ec->rx_coalesce_usecs = vi->intr_coal_rx.max_usecs;
3444 ec->tx_coalesce_usecs = vi->intr_coal_tx.max_usecs;
3445 ec->tx_max_coalesced_frames = vi->intr_coal_tx.max_packets;
3446 ec->rx_max_coalesced_frames = vi->intr_coal_rx.max_packets;
3450 if (vi->sq[0].napi.weight)
3461 struct virtnet_info *vi = netdev_priv(dev);
3465 if (queue >= vi->max_queue_pairs)
3471 vi->sq[queue].napi.weight,
3476 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL))
3477 ret = virtnet_send_notf_coal_vq_cmds(vi, ec, queue);
3485 vi->sq[queue].napi.weight = napi_weight;
3494 struct virtnet_info *vi = netdev_priv(dev);
3496 if (queue >= vi->max_queue_pairs)
3499 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) {
3500 ec->rx_coalesce_usecs = vi->rq[queue].intr_coal.max_usecs;
3501 ec->tx_coalesce_usecs = vi->sq[queue].intr_coal.max_usecs;
3502 ec->tx_max_coalesced_frames = vi->sq[queue].intr_coal.max_packets;
3503 ec->rx_max_coalesced_frames = vi->rq[queue].intr_coal.max_packets;
3507 if (vi->sq[queue].napi.weight)
3516 struct virtnet_info *vi = netdev_priv(dev);
3518 vi->speed = SPEED_UNKNOWN;
3519 vi->duplex = DUPLEX_UNKNOWN;
3522 static void virtnet_update_settings(struct virtnet_info *vi)
3527 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
3530 virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed);
3533 vi->speed = speed;
3535 virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex);
3538 vi->duplex = duplex;
3553 struct virtnet_info *vi = netdev_priv(dev);
3557 for (i = 0; i < vi->rss_indir_table_size; ++i)
3558 indir[i] = vi->ctrl->rss.indirection_table[i];
3562 memcpy(key, vi->ctrl->rss.key, vi->rss_key_size);
3572 struct virtnet_info *vi = netdev_priv(dev);
3579 for (i = 0; i < vi->rss_indir_table_size; ++i)
3580 vi->ctrl->rss.indirection_table[i] = indir[i];
3583 memcpy(vi->ctrl->rss.key, key, vi->rss_key_size);
3585 virtnet_commit_rss_command(vi);
3592 struct virtnet_info *vi = netdev_priv(dev);
3597 info->data = vi->curr_queue_pairs;
3600 virtnet_get_hashflow(vi, info);
3611 struct virtnet_info *vi = netdev_priv(dev);
3616 if (!virtnet_set_hashflow(vi, info))
3656 struct virtnet_info *vi = vdev->priv;
3659 flush_work(&vi->config_work);
3661 netif_tx_lock_bh(vi->dev);
3662 netif_device_detach(vi->dev);
3663 netif_tx_unlock_bh(vi->dev);
3664 if (netif_running(vi->dev))
3665 virtnet_close(vi->dev);
3668 static int init_vqs(struct virtnet_info *vi);
3672 struct virtnet_info *vi = vdev->priv;
3675 err = init_vqs(vi);
3681 enable_delayed_refill(vi);
3683 if (netif_running(vi->dev)) {
3684 err = virtnet_open(vi->dev);
3689 netif_tx_lock_bh(vi->dev);
3690 netif_device_attach(vi->dev);
3691 netif_tx_unlock_bh(vi->dev);
3695 static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
3698 vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads);
3700 sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads));
3702 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
3704 dev_warn(&vi->dev->dev, "Fail to set guest offload.\n");
3711 static int virtnet_clear_guest_offloads(struct virtnet_info *vi)
3715 if (!vi->guest_offloads)
3718 return virtnet_set_guest_offloads(vi, offloads);
3721 static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
3723 u64 offloads = vi->guest_offloads;
3725 if (!vi->guest_offloads)
3728 return virtnet_set_guest_offloads(vi, offloads);
3737 struct virtnet_info *vi = netdev_priv(dev);
3742 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)
3743 && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
3744 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
3745 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
3746 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
3747 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM) ||
3748 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) ||
3749 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6))) {
3754 if (vi->mergeable_rx_bufs && !vi->any_header_sg) {
3765 curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs;
3770 if (curr_qp + xdp_qp > vi->max_queue_pairs) {
3772 curr_qp + xdp_qp, vi->max_queue_pairs);
3776 old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
3781 bpf_prog_add(prog, vi->max_queue_pairs - 1);
3785 for (i = 0; i < vi->max_queue_pairs; i++) {
3786 napi_disable(&vi->rq[i].napi);
3787 virtnet_napi_tx_disable(&vi->sq[i].napi);
3792 for (i = 0; i < vi->max_queue_pairs; i++) {
3793 rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
3795 virtnet_restore_guest_offloads(vi);
3800 err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
3804 vi->xdp_queue_pairs = xdp_qp;
3807 vi->xdp_enabled = true;
3808 for (i = 0; i < vi->max_queue_pairs; i++) {
3809 rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
3811 virtnet_clear_guest_offloads(vi);
3817 vi->xdp_enabled = false;
3820 for (i = 0; i < vi->max_queue_pairs; i++) {
3824 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
3825 virtnet_napi_tx_enable(vi, vi->sq[i].vq,
3826 &vi->sq[i].napi);
3834 virtnet_clear_guest_offloads(vi);
3835 for (i = 0; i < vi->max_queue_pairs; i++)
3836 rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
3840 for (i = 0; i < vi->max_queue_pairs; i++) {
3841 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
3842 virtnet_napi_tx_enable(vi, vi->sq[i].vq,
3843 &vi->sq[i].napi);
3847 bpf_prog_sub(prog, vi->max_queue_pairs - 1);
3864 struct virtnet_info *vi = netdev_priv(dev);
3867 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
3880 struct virtnet_info *vi = netdev_priv(dev);
3885 if (vi->xdp_enabled)
3889 offloads = vi->guest_offloads_capable;
3891 offloads = vi->guest_offloads_capable &
3894 err = virtnet_set_guest_offloads(vi, offloads);
3897 vi->guest_offloads = offloads;
3902 vi->ctrl->rss.hash_types = vi->rss_hash_types_saved;
3904 vi->ctrl->rss.hash_types = VIRTIO_NET_HASH_REPORT_NONE;
3906 if (!virtnet_commit_rss_command(vi))
3948 struct virtnet_info *vi =
3952 if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
3957 netdev_notify_peers(vi->dev);
3958 virtnet_ack_link_announce(vi);
3964 if (vi->status == v)
3967 vi->status = v;
3969 if (vi->status & VIRTIO_NET_S_LINK_UP) {
3970 virtnet_update_settings(vi);
3971 netif_carrier_on(vi->dev);
3972 netif_tx_wake_all_queues(vi->dev);
3974 netif_carrier_off(vi->dev);
3975 netif_tx_stop_all_queues(vi->dev);
3981 struct virtnet_info *vi = vdev->priv;
3983 schedule_work(&vi->config_work);
3986 static void virtnet_free_queues(struct virtnet_info *vi)
3990 for (i = 0; i < vi->max_queue_pairs; i++) {
3991 __netif_napi_del(&vi->rq[i].napi);
3992 __netif_napi_del(&vi->sq[i].napi);
3996 * we need to respect an RCU grace period before freeing vi->rq
4000 kfree(vi->rq);
4001 kfree(vi->sq);
4002 kfree(vi->ctrl);
4005 static void _free_receive_bufs(struct virtnet_info *vi)
4010 for (i = 0; i < vi->max_queue_pairs; i++) {
4011 while (vi->rq[i].pages)
4012 __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
4014 old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
4015 RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL);
4021 static void free_receive_bufs(struct virtnet_info *vi)
4024 _free_receive_bufs(vi);
4028 static void free_receive_page_frags(struct virtnet_info *vi)
4031 for (i = 0; i < vi->max_queue_pairs; i++)
4032 if (vi->rq[i].alloc_frag.page) {
4033 if (vi->rq[i].do_dma && vi->rq[i].last_dma)
4034 virtnet_rq_unmap(&vi->rq[i], vi->rq[i].last_dma, 0);
4035 put_page(vi->rq[i].alloc_frag.page);
4047 static void free_unused_bufs(struct virtnet_info *vi)
4052 for (i = 0; i < vi->max_queue_pairs; i++) {
4053 struct virtqueue *vq = vi->sq[i].vq;
4059 for (i = 0; i < vi->max_queue_pairs; i++) {
4060 struct virtqueue *vq = vi->rq[i].vq;
4068 static void virtnet_del_vqs(struct virtnet_info *vi)
4070 struct virtio_device *vdev = vi->vdev;
4072 virtnet_clean_affinity(vi);
4076 virtnet_free_queues(vi);
4083 static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq)
4085 const unsigned int hdr_len = vi->hdr_len;
4087 unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu;
4095 static int virtnet_find_vqs(struct virtnet_info *vi)
4109 total_vqs = vi->max_queue_pairs * 2 +
4110 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);
4122 if (!vi->big_packets || vi->mergeable_rx_bufs) {
4131 if (vi->has_cvq) {
4137 for (i = 0; i < vi->max_queue_pairs; i++) {
4140 sprintf(vi->rq[i].name, "input.%u", i);
4141 sprintf(vi->sq[i].name, "output.%u", i);
4142 names[rxq2vq(i)] = vi->rq[i].name;
4143 names[txq2vq(i)] = vi->sq[i].name;
4148 ret = virtio_find_vqs_ctx(vi->vdev, total_vqs, vqs, callbacks,
4153 if (vi->has_cvq) {
4154 vi->cvq = vqs[total_vqs - 1];
4155 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
4156 vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4159 for (i = 0; i < vi->max_queue_pairs; i++) {
4160 vi->rq[i].vq = vqs[rxq2vq(i)];
4161 vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq);
4162 vi->sq[i].vq = vqs[txq2vq(i)];
4180 static int virtnet_alloc_queues(struct virtnet_info *vi)
4184 if (vi->has_cvq) {
4185 vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
4186 if (!vi->ctrl)
4189 vi->ctrl = NULL;
4191 vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL);
4192 if (!vi->sq)
4194 vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL);
4195 if (!vi->rq)
4198 INIT_DELAYED_WORK(&vi->refill, refill_work);
4199 for (i = 0; i < vi->max_queue_pairs; i++) {
4200 vi->rq[i].pages = NULL;
4201 netif_napi_add_weight(vi->dev, &vi->rq[i].napi, virtnet_poll,
4203 netif_napi_add_tx_weight(vi->dev, &vi->sq[i].napi,
4207 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
4208 ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
4209 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
4211 u64_stats_init(&vi->rq[i].stats.syncp);
4212 u64_stats_init(&vi->sq[i].stats.syncp);
4218 kfree(vi->sq);
4220 kfree(vi->ctrl);
4225 static int init_vqs(struct virtnet_info *vi)
4230 ret = virtnet_alloc_queues(vi);
4234 ret = virtnet_find_vqs(vi);
4238 virtnet_rq_set_premapped(vi);
4241 virtnet_set_affinity(vi);
4247 virtnet_free_queues(vi);
4256 struct virtnet_info *vi = netdev_priv(queue->dev);
4258 unsigned int headroom = virtnet_get_headroom(vi);
4262 BUG_ON(queue_index >= vi->max_queue_pairs);
4263 avg = &vi->rq[queue_index].mrg_avg_pkt_len;
4265 get_mergeable_buf_len(&vi->rq[queue_index], avg,
4356 static bool virtnet_check_guest_gso(const struct virtnet_info *vi)
4358 return virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
4359 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
4360 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
4361 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
4362 (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO4) &&
4363 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_USO6));
4366 static void virtnet_set_big_packets(struct virtnet_info *vi, const int mtu)
4368 bool guest_gso = virtnet_check_guest_gso(vi);
4375 vi->big_packets = true;
4376 vi->big_packets_num_skbfrags = guest_gso ? MAX_SKB_FRAGS : DIV_ROUND_UP(mtu, PAGE_SIZE);
4384 struct virtnet_info *vi;
4471 vi = netdev_priv(dev);
4472 vi->dev = dev;
4473 vi->vdev = vdev;
4474 vdev->priv = vi;
4476 INIT_WORK(&vi->config_work, virtnet_config_changed_work);
4477 spin_lock_init(&vi->refill_lock);
4480 vi->mergeable_rx_bufs = true;
4484 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_NOTF_COAL)) {
4485 vi->intr_coal_rx.max_usecs = 0;
4486 vi->intr_coal_tx.max_usecs = 0;
4487 vi->intr_coal_tx.max_packets = 0;
4488 vi->intr_coal_rx.max_packets = 0;
4492 vi->has_rss_hash_report = true;
4495 vi->has_rss = true;
4497 if (vi->has_rss || vi->has_rss_hash_report) {
4498 vi->rss_indir_table_size =
4501 vi->rss_key_size =
4504 vi->rss_hash_types_supported =
4506 vi->rss_hash_types_supported &=
4514 if (vi->has_rss_hash_report)
4515 vi->hdr_len = sizeof(struct virtio_net_hdr_v1_hash);
4518 vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
4520 vi->hdr_len = sizeof(struct virtio_net_hdr);
4524 vi->any_header_sg = true;
4527 vi->has_cvq = true;
4548 virtnet_set_big_packets(vi, mtu);
4550 if (vi->any_header_sg)
4551 dev->needed_headroom = vi->hdr_len;
4555 vi->curr_queue_pairs = max_queue_pairs;
4557 vi->curr_queue_pairs = num_online_cpus();
4558 vi->max_queue_pairs = max_queue_pairs;
4561 err = init_vqs(vi);
4566 if (vi->mergeable_rx_bufs)
4569 netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
4570 netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
4575 vi->failover = net_failover_create(vi->dev);
4576 if (IS_ERR(vi->failover)) {
4577 err = PTR_ERR(vi->failover);
4582 if (vi->has_rss || vi->has_rss_hash_report)
4583 virtnet_init_default_rss(vi);
4597 _virtnet_set_queues(vi, vi->curr_queue_pairs);
4604 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
4608 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
4619 err = virtnet_cpu_notif_add(vi);
4628 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
4629 schedule_work(&vi->config_work);
4631 vi->status = VIRTIO_NET_S_LINK_UP;
4632 virtnet_update_settings(vi);
4637 if (virtio_has_feature(vi->vdev, guest_offloads[i]))
4638 set_bit(guest_offloads[i], &vi->guest_offloads);
4639 vi->guest_offloads_capable = vi->guest_offloads;
4649 net_failover_destroy(vi->failover);
4652 cancel_delayed_work_sync(&vi->refill);
4653 free_receive_page_frags(vi);
4654 virtnet_del_vqs(vi);
4660 static void remove_vq_common(struct virtnet_info *vi)
4662 virtio_reset_device(vi->vdev);
4665 free_unused_bufs(vi);
4667 free_receive_bufs(vi);
4669 free_receive_page_frags(vi);
4671 virtnet_del_vqs(vi);
4676 struct virtnet_info *vi = vdev->priv;
4678 virtnet_cpu_notif_remove(vi);
4681 flush_work(&vi->config_work);
4683 unregister_netdev(vi->dev);
4685 net_failover_destroy(vi->failover);
4687 remove_vq_common(vi);
4689 free_netdev(vi->dev);
4694 struct virtnet_info *vi = vdev->priv;
4696 virtnet_cpu_notif_remove(vi);
4698 remove_vq_common(vi);
4705 struct virtnet_info *vi = vdev->priv;
4711 virtnet_set_queues(vi, vi->curr_queue_pairs);
4713 err = virtnet_cpu_notif_add(vi);
4716 remove_vq_common(vi);