Lines Matching refs:vi
309 /* Find end of list, sew whole thing into vi->rq.pages. */
328 static void enable_delayed_refill(struct virtnet_info *vi)
330 spin_lock_bh(&vi->refill_lock);
331 vi->refill_enabled = true;
332 spin_unlock_bh(&vi->refill_lock);
335 static void disable_delayed_refill(struct virtnet_info *vi)
337 spin_lock_bh(&vi->refill_lock);
338 vi->refill_enabled = false;
339 spin_unlock_bh(&vi->refill_lock);
367 struct virtnet_info *vi = vq->vdev->priv;
368 struct napi_struct *napi = &vi->sq[vq2txq(vq)].napi;
377 netif_wake_subqueue(vi->dev, vq2txq(vq));
398 static struct sk_buff *page_to_skb(struct virtnet_info *vi,
418 hdr_len = vi->hdr_len;
419 if (vi->mergeable_rx_bufs)
449 if (vi->mergeable_rx_bufs) {
484 static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
491 if (unlikely(xdpf->headroom < vi->hdr_len))
495 xdpf->data -= vi->hdr_len;
498 memset(hdr, 0, vi->hdr_len);
499 xdpf->len += vi->hdr_len;
511 /* when vi->curr_queue_pairs > nr_cpu_ids, the txq/sq is only used for xdp tx on
519 #define virtnet_xdp_get_sq(vi) ({ \
521 typeof(vi) v = (vi); \
537 #define virtnet_xdp_put_sq(vi, q) { \
539 typeof(vi) v = (vi); \
551 struct virtnet_info *vi = netdev_priv(dev);
552 struct receive_queue *rq = vi->rq;
571 sq = virtnet_xdp_get_sq(vi);
598 err = __virtnet_xdp_xmit_one(vi, sq, xdpf);
619 virtnet_xdp_put_sq(vi, sq);
623 static unsigned int virtnet_get_headroom(struct virtnet_info *vi)
625 return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0;
697 struct virtnet_info *vi,
708 unsigned int headroom = vi->hdr_len + header_offset;
717 len -= vi->hdr_len;
738 if (unlikely(xdp_headroom < virtnet_get_headroom(vi))) {
740 unsigned int tlen = len + vi->hdr_len;
743 xdp_headroom = virtnet_get_headroom(vi);
745 headroom = vi->hdr_len + header_offset;
759 xdp.data_hard_start = buf + VIRTNET_RX_PAD + vi->hdr_len;
783 trace_xdp_exception(vi->dev, xdp_prog, act);
801 trace_xdp_exception(vi->dev, xdp_prog, act);
817 memcpy(skb_vnet_hdr(skb), buf, vi->hdr_len);
837 struct virtnet_info *vi,
845 page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, true, 0);
847 stats->bytes += len - vi->hdr_len;
860 struct virtnet_info *vi,
869 u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
881 stats->bytes += len - vi->hdr_len;
917 headroom < virtnet_get_headroom(vi))) {
936 xdp.data_hard_start = data - VIRTIO_XDP_HEADROOM + vi->hdr_len;
937 xdp.data = data + vi->hdr_len;
938 xdp.data_end = xdp.data + (len - vi->hdr_len);
941 xdp.frame_sz = frame_sz - vi->hdr_len;
956 vi->hdr_len - metasize;
961 len = xdp.data_end - xdp.data + vi->hdr_len + metasize;
966 head_skb = page_to_skb(vi, rq, xdp_page, offset,
982 trace_xdp_exception(vi->dev, xdp_prog, act);
1009 trace_xdp_exception(vi->dev, xdp_prog, act);
1019 head_skb = page_to_skb(vi, rq, page, offset, len, truesize, !xdp_prog,
1032 virtio16_to_cpu(vi->vdev,
1106 static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
1111 struct net_device *dev = vi->dev;
1115 if (unlikely(len < vi->hdr_len + ETH_HLEN)) {
1118 if (vi->mergeable_rx_bufs) {
1120 } else if (vi->big_packets) {
1128 if (vi->mergeable_rx_bufs)
1129 skb = receive_mergeable(dev, vi, rq, buf, ctx, len, xdp_xmit,
1131 else if (vi->big_packets)
1132 skb = receive_big(dev, vi, rq, buf, len, stats);
1134 skb = receive_small(dev, vi, rq, buf, ctx, len, xdp_xmit, stats);
1145 virtio_is_little_endian(vi->vdev))) {
1170 static int add_recvbuf_small(struct virtnet_info *vi, struct receive_queue *rq,
1175 unsigned int xdp_headroom = virtnet_get_headroom(vi);
1177 int len = vi->hdr_len + VIRTNET_RX_PAD + GOOD_PACKET_LEN + xdp_headroom;
1189 vi->hdr_len + GOOD_PACKET_LEN);
1196 static int add_recvbuf_big(struct virtnet_info *vi, struct receive_queue *rq,
1229 sg_set_buf(&rq->sg[0], p, vi->hdr_len);
1261 static int add_recvbuf_mergeable(struct virtnet_info *vi,
1265 unsigned int headroom = virtnet_get_headroom(vi);
1311 static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq,
1318 if (vi->mergeable_rx_bufs)
1319 err = add_recvbuf_mergeable(vi, rq, gfp);
1320 else if (vi->big_packets)
1321 err = add_recvbuf_big(vi, rq, gfp);
1323 err = add_recvbuf_small(vi, rq, gfp);
1342 struct virtnet_info *vi = rvq->vdev->priv;
1343 struct receive_queue *rq = &vi->rq[vq2rxq(rvq)];
1361 static void virtnet_napi_tx_enable(struct virtnet_info *vi,
1371 if (!vi->affinity_hint_set) {
1387 struct virtnet_info *vi =
1392 for (i = 0; i < vi->curr_queue_pairs; i++) {
1393 struct receive_queue *rq = &vi->rq[i];
1396 still_empty = !try_fill_recv(vi, rq, GFP_KERNEL);
1403 schedule_delayed_work(&vi->refill, HZ/2);
1410 struct virtnet_info *vi = rq->vq->vdev->priv;
1416 if (!vi->big_packets || vi->mergeable_rx_bufs) {
1421 receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats);
1427 receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats);
1433 if (!try_fill_recv(vi, rq, GFP_ATOMIC)) {
1434 spin_lock(&vi->refill_lock);
1435 if (vi->refill_enabled)
1436 schedule_delayed_work(&vi->refill, 0);
1437 spin_unlock(&vi->refill_lock);
1490 static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
1492 if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
1494 else if (q < vi->curr_queue_pairs)
1502 struct virtnet_info *vi = rq->vq->vdev->priv;
1504 struct send_queue *sq = &vi->sq[index];
1505 struct netdev_queue *txq = netdev_get_tx_queue(vi->dev, index);
1507 if (!sq->napi.weight || is_xdp_raw_buffer_queue(vi, index))
1523 struct virtnet_info *vi = rq->vq->vdev->priv;
1540 sq = virtnet_xdp_get_sq(vi);
1546 virtnet_xdp_put_sq(vi, sq);
1554 struct virtnet_info *vi = netdev_priv(dev);
1557 enable_delayed_refill(vi);
1559 for (i = 0; i < vi->max_queue_pairs; i++) {
1560 if (i < vi->curr_queue_pairs)
1562 if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL))
1563 schedule_delayed_work(&vi->refill, 0);
1565 err = xdp_rxq_info_reg(&vi->rq[i].xdp_rxq, dev, i);
1569 err = xdp_rxq_info_reg_mem_model(&vi->rq[i].xdp_rxq,
1572 xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
1576 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
1577 virtnet_napi_tx_enable(vi, vi->sq[i].vq, &vi->sq[i].napi);
1586 struct virtnet_info *vi = sq->vq->vdev->priv;
1592 if (unlikely(is_xdp_raw_buffer_queue(vi, index))) {
1598 txq = netdev_get_tx_queue(vi->dev, index);
1633 struct virtnet_info *vi = sq->vq->vdev->priv;
1635 unsigned hdr_len = vi->hdr_len;
1638 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
1640 can_push = vi->any_header_sg &&
1651 virtio_is_little_endian(vi->vdev), false,
1655 if (vi->mergeable_rx_bufs)
1678 struct virtnet_info *vi = netdev_priv(dev);
1680 struct send_queue *sq = &vi->sq[qnum];
1755 static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
1762 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ));
1764 vi->ctrl->status = ~0;
1765 vi->ctrl->hdr.class = class;
1766 vi->ctrl->hdr.cmd = cmd;
1768 sg_init_one(&hdr, &vi->ctrl->hdr, sizeof(vi->ctrl->hdr));
1775 sg_init_one(&stat, &vi->ctrl->status, sizeof(vi->ctrl->status));
1779 virtqueue_add_sgs(vi->cvq, sgs, out_num, 1, vi, GFP_ATOMIC);
1781 if (unlikely(!virtqueue_kick(vi->cvq)))
1782 return vi->ctrl->status == VIRTIO_NET_OK;
1787 while (!virtqueue_get_buf(vi->cvq, &tmp) &&
1788 !virtqueue_is_broken(vi->cvq))
1791 return vi->ctrl->status == VIRTIO_NET_OK;
1796 struct virtnet_info *vi = netdev_priv(dev);
1797 struct virtio_device *vdev = vi->vdev;
1802 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
1815 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
1844 struct virtnet_info *vi = netdev_priv(dev);
1848 for (i = 0; i < vi->max_queue_pairs; i++) {
1850 struct receive_queue *rq = &vi->rq[i];
1851 struct send_queue *sq = &vi->sq[i];
1879 static void virtnet_ack_link_announce(struct virtnet_info *vi)
1882 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_ANNOUNCE,
1884 dev_warn(&vi->dev->dev, "Failed to ack link announce.\n");
1888 static int _virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
1891 struct net_device *dev = vi->dev;
1893 if (!vi->has_cvq || !virtio_has_feature(vi->vdev, VIRTIO_NET_F_MQ))
1896 vi->ctrl->mq.virtqueue_pairs = cpu_to_virtio16(vi->vdev, queue_pairs);
1897 sg_init_one(&sg, &vi->ctrl->mq, sizeof(vi->ctrl->mq));
1899 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MQ,
1905 vi->curr_queue_pairs = queue_pairs;
1908 schedule_delayed_work(&vi->refill, 0);
1914 static int virtnet_set_queues(struct virtnet_info *vi, u16 queue_pairs)
1919 err = _virtnet_set_queues(vi, queue_pairs);
1926 struct virtnet_info *vi = netdev_priv(dev);
1930 disable_delayed_refill(vi);
1932 cancel_delayed_work_sync(&vi->refill);
1934 for (i = 0; i < vi->max_queue_pairs; i++) {
1935 napi_disable(&vi->rq[i].napi);
1936 xdp_rxq_info_unreg(&vi->rq[i].xdp_rxq);
1937 virtnet_napi_tx_disable(&vi->sq[i].napi);
1945 struct virtnet_info *vi = netdev_priv(dev);
1955 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
1958 vi->ctrl->promisc = ((dev->flags & IFF_PROMISC) != 0);
1959 vi->ctrl->allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
1961 sg_init_one(sg, &vi->ctrl->promisc, sizeof(vi->ctrl->promisc));
1963 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
1966 vi->ctrl->promisc ? "en" : "dis");
1968 sg_init_one(sg, &vi->ctrl->allmulti, sizeof(vi->ctrl->allmulti));
1970 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
1973 vi->ctrl->allmulti ? "en" : "dis");
1987 mac_data->entries = cpu_to_virtio32(vi->vdev, uc_count);
1998 mac_data->entries = cpu_to_virtio32(vi->vdev, mc_count);
2006 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
2016 struct virtnet_info *vi = netdev_priv(dev);
2019 vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
2020 sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
2022 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
2031 struct virtnet_info *vi = netdev_priv(dev);
2034 vi->ctrl->vid = cpu_to_virtio16(vi->vdev, vid);
2035 sg_init_one(&sg, &vi->ctrl->vid, sizeof(vi->ctrl->vid));
2037 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
2043 static void virtnet_clean_affinity(struct virtnet_info *vi)
2047 if (vi->affinity_hint_set) {
2048 for (i = 0; i < vi->max_queue_pairs; i++) {
2049 virtqueue_set_affinity(vi->rq[i].vq, NULL);
2050 virtqueue_set_affinity(vi->sq[i].vq, NULL);
2053 vi->affinity_hint_set = false;
2057 static void virtnet_set_affinity(struct virtnet_info *vi)
2067 virtnet_clean_affinity(vi);
2072 stride = max_t(int, num_cpu / vi->curr_queue_pairs, 1);
2073 stragglers = num_cpu >= vi->curr_queue_pairs ?
2074 num_cpu % vi->curr_queue_pairs :
2078 for (i = 0; i < vi->curr_queue_pairs; i++) {
2086 virtqueue_set_affinity(vi->rq[i].vq, mask);
2087 virtqueue_set_affinity(vi->sq[i].vq, mask);
2088 __netif_set_xps_queue(vi->dev, cpumask_bits(mask), i, false);
2092 vi->affinity_hint_set = true;
2098 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
2100 virtnet_set_affinity(vi);
2106 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
2108 virtnet_set_affinity(vi);
2114 struct virtnet_info *vi = hlist_entry_safe(node, struct virtnet_info,
2117 virtnet_clean_affinity(vi);
2123 static int virtnet_cpu_notif_add(struct virtnet_info *vi)
2127 ret = cpuhp_state_add_instance_nocalls(virtionet_online, &vi->node);
2131 &vi->node_dead);
2134 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
2138 static void virtnet_cpu_notif_remove(struct virtnet_info *vi)
2140 cpuhp_state_remove_instance_nocalls(virtionet_online, &vi->node);
2142 &vi->node_dead);
2148 struct virtnet_info *vi = netdev_priv(dev);
2150 ring->rx_max_pending = virtqueue_get_vring_size(vi->rq[0].vq);
2151 ring->tx_max_pending = virtqueue_get_vring_size(vi->sq[0].vq);
2160 struct virtnet_info *vi = netdev_priv(dev);
2161 struct virtio_device *vdev = vi->vdev;
2173 struct virtnet_info *vi = netdev_priv(dev);
2183 if (queue_pairs > vi->max_queue_pairs || queue_pairs == 0)
2190 if (vi->rq[0].xdp_prog)
2194 err = _virtnet_set_queues(vi, queue_pairs);
2199 virtnet_set_affinity(vi);
2210 struct virtnet_info *vi = netdev_priv(dev);
2216 for (i = 0; i < vi->curr_queue_pairs; i++) {
2224 for (i = 0; i < vi->curr_queue_pairs; i++) {
2237 struct virtnet_info *vi = netdev_priv(dev);
2241 return vi->curr_queue_pairs * (VIRTNET_RQ_STATS_LEN +
2251 struct virtnet_info *vi = netdev_priv(dev);
2256 for (i = 0; i < vi->curr_queue_pairs; i++) {
2257 struct receive_queue *rq = &vi->rq[i];
2270 for (i = 0; i < vi->curr_queue_pairs; i++) {
2271 struct send_queue *sq = &vi->sq[i];
2288 struct virtnet_info *vi = netdev_priv(dev);
2290 channels->combined_count = vi->curr_queue_pairs;
2291 channels->max_combined = vi->max_queue_pairs;
2301 struct virtnet_info *vi = netdev_priv(dev);
2304 &vi->speed, &vi->duplex);
2310 struct virtnet_info *vi = netdev_priv(dev);
2312 cmd->base.speed = vi->speed;
2313 cmd->base.duplex = vi->duplex;
2322 struct virtnet_info *vi = netdev_priv(dev);
2330 if (napi_weight ^ vi->sq[0].napi.weight) {
2333 for (i = 0; i < vi->max_queue_pairs; i++)
2334 vi->sq[i].napi.weight = napi_weight;
2347 struct virtnet_info *vi = netdev_priv(dev);
2351 if (vi->sq[0].napi.weight)
2359 struct virtnet_info *vi = netdev_priv(dev);
2361 vi->speed = SPEED_UNKNOWN;
2362 vi->duplex = DUPLEX_UNKNOWN;
2365 static void virtnet_update_settings(struct virtnet_info *vi)
2370 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_SPEED_DUPLEX))
2373 virtio_cread_le(vi->vdev, struct virtio_net_config, speed, &speed);
2376 vi->speed = speed;
2378 virtio_cread_le(vi->vdev, struct virtio_net_config, duplex, &duplex);
2381 vi->duplex = duplex;
2403 struct virtnet_info *vi = vdev->priv;
2406 flush_work(&vi->config_work);
2408 netif_tx_lock_bh(vi->dev);
2409 netif_device_detach(vi->dev);
2410 netif_tx_unlock_bh(vi->dev);
2411 if (netif_running(vi->dev))
2412 virtnet_close(vi->dev);
2415 static int init_vqs(struct virtnet_info *vi);
2419 struct virtnet_info *vi = vdev->priv;
2422 err = init_vqs(vi);
2428 enable_delayed_refill(vi);
2430 if (netif_running(vi->dev)) {
2431 err = virtnet_open(vi->dev);
2436 netif_tx_lock_bh(vi->dev);
2437 netif_device_attach(vi->dev);
2438 netif_tx_unlock_bh(vi->dev);
2442 static int virtnet_set_guest_offloads(struct virtnet_info *vi, u64 offloads)
2445 vi->ctrl->offloads = cpu_to_virtio64(vi->vdev, offloads);
2447 sg_init_one(&sg, &vi->ctrl->offloads, sizeof(vi->ctrl->offloads));
2449 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_GUEST_OFFLOADS,
2451 dev_warn(&vi->dev->dev, "Fail to set guest offload.\n");
2458 static int virtnet_clear_guest_offloads(struct virtnet_info *vi)
2462 if (!vi->guest_offloads)
2465 return virtnet_set_guest_offloads(vi, offloads);
2468 static int virtnet_restore_guest_offloads(struct virtnet_info *vi)
2470 u64 offloads = vi->guest_offloads;
2472 if (!vi->guest_offloads)
2475 return virtnet_set_guest_offloads(vi, offloads);
2482 struct virtnet_info *vi = netdev_priv(dev);
2487 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)
2488 && (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
2489 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
2490 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
2491 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_UFO) ||
2492 virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_CSUM))) {
2497 if (vi->mergeable_rx_bufs && !vi->any_header_sg) {
2508 curr_qp = vi->curr_queue_pairs - vi->xdp_queue_pairs;
2513 if (curr_qp + xdp_qp > vi->max_queue_pairs) {
2515 curr_qp + xdp_qp, vi->max_queue_pairs);
2519 old_prog = rtnl_dereference(vi->rq[0].xdp_prog);
2524 bpf_prog_add(prog, vi->max_queue_pairs - 1);
2528 for (i = 0; i < vi->max_queue_pairs; i++) {
2529 napi_disable(&vi->rq[i].napi);
2530 virtnet_napi_tx_disable(&vi->sq[i].napi);
2535 for (i = 0; i < vi->max_queue_pairs; i++) {
2536 rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
2538 virtnet_restore_guest_offloads(vi);
2543 err = _virtnet_set_queues(vi, curr_qp + xdp_qp);
2547 vi->xdp_queue_pairs = xdp_qp;
2550 vi->xdp_enabled = true;
2551 for (i = 0; i < vi->max_queue_pairs; i++) {
2552 rcu_assign_pointer(vi->rq[i].xdp_prog, prog);
2554 virtnet_clear_guest_offloads(vi);
2557 vi->xdp_enabled = false;
2560 for (i = 0; i < vi->max_queue_pairs; i++) {
2564 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
2565 virtnet_napi_tx_enable(vi, vi->sq[i].vq,
2566 &vi->sq[i].napi);
2574 virtnet_clear_guest_offloads(vi);
2575 for (i = 0; i < vi->max_queue_pairs; i++)
2576 rcu_assign_pointer(vi->rq[i].xdp_prog, old_prog);
2580 for (i = 0; i < vi->max_queue_pairs; i++) {
2581 virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi);
2582 virtnet_napi_tx_enable(vi, vi->sq[i].vq,
2583 &vi->sq[i].napi);
2587 bpf_prog_sub(prog, vi->max_queue_pairs - 1);
2604 struct virtnet_info *vi = netdev_priv(dev);
2607 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STANDBY))
2620 struct virtnet_info *vi = netdev_priv(dev);
2625 if (vi->xdp_enabled)
2629 offloads = vi->guest_offloads_capable;
2631 offloads = vi->guest_offloads_capable &
2634 err = virtnet_set_guest_offloads(vi, offloads);
2637 vi->guest_offloads = offloads;
2662 struct virtnet_info *vi =
2666 if (virtio_cread_feature(vi->vdev, VIRTIO_NET_F_STATUS,
2671 netdev_notify_peers(vi->dev);
2672 virtnet_ack_link_announce(vi);
2678 if (vi->status == v)
2681 vi->status = v;
2683 if (vi->status & VIRTIO_NET_S_LINK_UP) {
2684 virtnet_update_settings(vi);
2685 netif_carrier_on(vi->dev);
2686 netif_tx_wake_all_queues(vi->dev);
2688 netif_carrier_off(vi->dev);
2689 netif_tx_stop_all_queues(vi->dev);
2695 struct virtnet_info *vi = vdev->priv;
2697 schedule_work(&vi->config_work);
2700 static void virtnet_free_queues(struct virtnet_info *vi)
2704 for (i = 0; i < vi->max_queue_pairs; i++) {
2705 __netif_napi_del(&vi->rq[i].napi);
2706 __netif_napi_del(&vi->sq[i].napi);
2710 * we need to respect an RCU grace period before freeing vi->rq
2714 kfree(vi->rq);
2715 kfree(vi->sq);
2716 kfree(vi->ctrl);
2719 static void _free_receive_bufs(struct virtnet_info *vi)
2724 for (i = 0; i < vi->max_queue_pairs; i++) {
2725 while (vi->rq[i].pages)
2726 __free_pages(get_a_page(&vi->rq[i], GFP_KERNEL), 0);
2728 old_prog = rtnl_dereference(vi->rq[i].xdp_prog);
2729 RCU_INIT_POINTER(vi->rq[i].xdp_prog, NULL);
2735 static void free_receive_bufs(struct virtnet_info *vi)
2738 _free_receive_bufs(vi);
2742 static void free_receive_page_frags(struct virtnet_info *vi)
2745 for (i = 0; i < vi->max_queue_pairs; i++)
2746 if (vi->rq[i].alloc_frag.page)
2747 put_page(vi->rq[i].alloc_frag.page);
2760 struct virtnet_info *vi = vq->vdev->priv;
2763 if (vi->mergeable_rx_bufs)
2765 else if (vi->big_packets)
2766 give_pages(&vi->rq[i], buf);
2771 static void free_unused_bufs(struct virtnet_info *vi)
2776 for (i = 0; i < vi->max_queue_pairs; i++) {
2777 struct virtqueue *vq = vi->sq[i].vq;
2783 for (i = 0; i < vi->max_queue_pairs; i++) {
2784 struct virtqueue *vq = vi->rq[i].vq;
2791 static void virtnet_del_vqs(struct virtnet_info *vi)
2793 struct virtio_device *vdev = vi->vdev;
2795 virtnet_clean_affinity(vi);
2799 virtnet_free_queues(vi);
2806 static unsigned int mergeable_min_buf_len(struct virtnet_info *vi, struct virtqueue *vq)
2810 unsigned int packet_len = vi->big_packets ? IP_MAX_MTU : vi->dev->max_mtu;
2818 static int virtnet_find_vqs(struct virtnet_info *vi)
2832 total_vqs = vi->max_queue_pairs * 2 +
2833 virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ);
2845 if (!vi->big_packets || vi->mergeable_rx_bufs) {
2854 if (vi->has_cvq) {
2860 for (i = 0; i < vi->max_queue_pairs; i++) {
2863 sprintf(vi->rq[i].name, "input.%u", i);
2864 sprintf(vi->sq[i].name, "output.%u", i);
2865 names[rxq2vq(i)] = vi->rq[i].name;
2866 names[txq2vq(i)] = vi->sq[i].name;
2871 ret = vi->vdev->config->find_vqs(vi->vdev, total_vqs, vqs, callbacks,
2876 if (vi->has_cvq) {
2877 vi->cvq = vqs[total_vqs - 1];
2878 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
2879 vi->dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2882 for (i = 0; i < vi->max_queue_pairs; i++) {
2883 vi->rq[i].vq = vqs[rxq2vq(i)];
2884 vi->rq[i].min_buf_len = mergeable_min_buf_len(vi, vi->rq[i].vq);
2885 vi->sq[i].vq = vqs[txq2vq(i)];
2903 static int virtnet_alloc_queues(struct virtnet_info *vi)
2907 vi->ctrl = kzalloc(sizeof(*vi->ctrl), GFP_KERNEL);
2908 if (!vi->ctrl)
2910 vi->sq = kcalloc(vi->max_queue_pairs, sizeof(*vi->sq), GFP_KERNEL);
2911 if (!vi->sq)
2913 vi->rq = kcalloc(vi->max_queue_pairs, sizeof(*vi->rq), GFP_KERNEL);
2914 if (!vi->rq)
2917 INIT_DELAYED_WORK(&vi->refill, refill_work);
2918 for (i = 0; i < vi->max_queue_pairs; i++) {
2919 vi->rq[i].pages = NULL;
2920 netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
2922 netif_tx_napi_add(vi->dev, &vi->sq[i].napi, virtnet_poll_tx,
2925 sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
2926 ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
2927 sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg));
2929 u64_stats_init(&vi->rq[i].stats.syncp);
2930 u64_stats_init(&vi->sq[i].stats.syncp);
2936 kfree(vi->sq);
2938 kfree(vi->ctrl);
2943 static int init_vqs(struct virtnet_info *vi)
2948 ret = virtnet_alloc_queues(vi);
2952 ret = virtnet_find_vqs(vi);
2957 virtnet_set_affinity(vi);
2963 virtnet_free_queues(vi);
2972 struct virtnet_info *vi = netdev_priv(queue->dev);
2974 unsigned int headroom = virtnet_get_headroom(vi);
2978 BUG_ON(queue_index >= vi->max_queue_pairs);
2979 avg = &vi->rq[queue_index].mrg_avg_pkt_len;
2981 get_mergeable_buf_len(&vi->rq[queue_index], avg,
3062 struct virtnet_info *vi;
3138 vi = netdev_priv(dev);
3139 vi->dev = dev;
3140 vi->vdev = vdev;
3141 vdev->priv = vi;
3143 INIT_WORK(&vi->config_work, virtnet_config_changed_work);
3144 spin_lock_init(&vi->refill_lock);
3151 vi->big_packets = true;
3154 vi->mergeable_rx_bufs = true;
3158 vi->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
3160 vi->hdr_len = sizeof(struct virtio_net_hdr);
3164 vi->any_header_sg = true;
3167 vi->has_cvq = true;
3189 vi->big_packets = true;
3192 if (vi->any_header_sg)
3193 dev->needed_headroom = vi->hdr_len;
3197 vi->curr_queue_pairs = max_queue_pairs;
3199 vi->curr_queue_pairs = num_online_cpus();
3200 vi->max_queue_pairs = max_queue_pairs;
3203 err = init_vqs(vi);
3208 if (vi->mergeable_rx_bufs)
3211 netif_set_real_num_tx_queues(dev, vi->curr_queue_pairs);
3212 netif_set_real_num_rx_queues(dev, vi->curr_queue_pairs);
3217 vi->failover = net_failover_create(vi->dev);
3218 if (IS_ERR(vi->failover)) {
3219 err = PTR_ERR(vi->failover);
3236 _virtnet_set_queues(vi, vi->curr_queue_pairs);
3240 err = virtnet_cpu_notif_add(vi);
3249 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
3250 schedule_work(&vi->config_work);
3252 vi->status = VIRTIO_NET_S_LINK_UP;
3253 virtnet_update_settings(vi);
3258 if (virtio_has_feature(vi->vdev, guest_offloads[i]))
3259 set_bit(guest_offloads[i], &vi->guest_offloads);
3260 vi->guest_offloads_capable = vi->guest_offloads;
3268 vi->vdev->config->reset(vdev);
3272 net_failover_destroy(vi->failover);
3274 cancel_delayed_work_sync(&vi->refill);
3275 free_receive_page_frags(vi);
3276 virtnet_del_vqs(vi);
3282 static void remove_vq_common(struct virtnet_info *vi)
3284 vi->vdev->config->reset(vi->vdev);
3287 free_unused_bufs(vi);
3289 free_receive_bufs(vi);
3291 free_receive_page_frags(vi);
3293 virtnet_del_vqs(vi);
3298 struct virtnet_info *vi = vdev->priv;
3300 virtnet_cpu_notif_remove(vi);
3303 flush_work(&vi->config_work);
3305 unregister_netdev(vi->dev);
3307 net_failover_destroy(vi->failover);
3309 remove_vq_common(vi);
3311 free_netdev(vi->dev);
3316 struct virtnet_info *vi = vdev->priv;
3318 virtnet_cpu_notif_remove(vi);
3320 remove_vq_common(vi);
3327 struct virtnet_info *vi = vdev->priv;
3333 virtnet_set_queues(vi, vi->curr_queue_pairs);
3335 err = virtnet_cpu_notif_add(vi);
3338 remove_vq_common(vi);