Lines Matching refs:nvdev
87 struct netvsc_device *nvdev;
96 nvdev = rcu_dereference(ndev_ctx->nvdev);
97 if (nvdev)
98 rndis_filter_update(nvdev);
115 struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev);
122 ret = rndis_filter_open(nvdev);
128 rdev = nvdev->extension;
131 netvsc_tx_enable(nvdev, net);
148 static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
157 for (i = 0; i < nvdev->num_chn; i++) {
159 = nvdev->chan_table[i].channel;
165 napi_synchronize(&nvdev->chan_table[i].napi);
202 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
205 netvsc_tx_disable(nvdev, net);
208 if (!nvdev)
211 ret = rndis_filter_close(nvdev);
217 ret = netvsc_wait_until_empty(nvdev);
965 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
967 if (nvdev) {
968 channel->max_combined = nvdev->max_chn;
969 channel->combined_count = nvdev->num_chn;
977 struct netvsc_device_info *netvsc_devinfo_get(struct netvsc_device *nvdev)
987 if (nvdev) {
990 dev_info->num_chn = nvdev->num_chn;
991 dev_info->send_sections = nvdev->send_section_cnt;
992 dev_info->send_section_size = nvdev->send_section_size;
993 dev_info->recv_sections = nvdev->recv_section_cnt;
994 dev_info->recv_section_size = nvdev->recv_section_size;
996 memcpy(dev_info->rss_key, nvdev->extension->rss_key,
999 prog = netvsc_xdp_get(nvdev);
1027 struct netvsc_device *nvdev)
1034 if (cancel_work_sync(&nvdev->subchan_work))
1035 nvdev->num_chn = 1;
1037 netvsc_xdp_set(ndev, NULL, NULL, nvdev);
1041 netvsc_tx_disable(nvdev, ndev);
1043 ret = rndis_filter_close(nvdev);
1050 ret = netvsc_wait_until_empty(nvdev);
1060 rndis_filter_device_remove(hdev, nvdev);
1070 struct netvsc_device *nvdev;
1075 nvdev = rndis_filter_device_add(hdev, dev_info);
1076 if (IS_ERR(nvdev))
1077 return PTR_ERR(nvdev);
1079 if (nvdev->num_chn > 1) {
1080 ret = rndis_set_subchannel(ndev, nvdev, dev_info);
1084 nvdev->max_chn = 1;
1085 nvdev->num_chn = 1;
1092 ret = netvsc_xdp_set(ndev, prog, NULL, nvdev);
1100 nvdev->tx_disable = false;
1107 ret = rndis_filter_open(nvdev);
1111 rdev = nvdev->extension;
1122 rndis_filter_device_remove(hdev, nvdev);
1131 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
1141 if (!nvdev || nvdev->destroy)
1144 if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5)
1147 if (count > nvdev->max_chn)
1150 orig = nvdev->num_chn;
1152 device_info = netvsc_devinfo_get(nvdev);
1159 ret = netvsc_detach(net, nvdev);
1227 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1232 if (!nvdev || nvdev->destroy)
1235 device_info = netvsc_devinfo_get(nvdev);
1247 ret = netvsc_detach(ndev, nvdev);
1305 struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
1329 for (i = 0; i < nvdev->num_chn; i++) {
1330 const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
1363 struct netvsc_device *nvdev;
1369 nvdev = rcu_dereference(ndev_ctx->nvdev);
1370 if (!nvdev)
1382 for (i = 0; i < nvdev->num_chn; i++) {
1383 const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
1418 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1426 if (!nvdev)
1435 err = rndis_filter_set_device_mac(nvdev, addr->sa_data);
1499 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1501 if (!nvdev)
1508 + NETVSC_QUEUE_STATS_LEN(nvdev)
1519 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1529 if (!nvdev)
1539 for (j = 0; j < nvdev->num_chn; j++) {
1540 qstats = &nvdev->chan_table[j].tx_stats;
1550 qstats = &nvdev->chan_table[j].rx_stats;
1582 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1586 if (!nvdev)
1601 for (i = 0; i < nvdev->num_chn; i++) {
1674 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1676 if (!nvdev)
1681 info->data = nvdev->num_chn;
1772 struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
1798 struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
1831 static void __netvsc_get_ringparam(struct netvsc_device *nvdev,
1836 ring->rx_pending = nvdev->recv_section_cnt;
1837 ring->tx_pending = nvdev->send_section_cnt;
1839 if (nvdev->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
1844 ring->rx_max_pending = max_buf_size / nvdev->recv_section_size;
1846 / nvdev->send_section_size;
1853 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1855 if (!nvdev)
1858 __netvsc_get_ringparam(nvdev, ring);
1865 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1871 if (!nvdev || nvdev->destroy)
1875 __netvsc_get_ringparam(nvdev, &orig);
1886 device_info = netvsc_devinfo_get(nvdev);
1894 ret = netvsc_detach(ndev, nvdev);
1916 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1918 if (!nvdev || nvdev->destroy)
1921 if ((features & NETIF_F_LRO) && netvsc_xdp_get(nvdev)) {
1934 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1939 if (!nvdev || nvdev->destroy)
1955 ret = rndis_filter_set_offload_params(ndev, nvdev, &offloads);
2067 net_device = rtnl_dereference(ndev_ctx->nvdev);
2162 if (!rtnl_dereference(net_device_ctx->nvdev))
2365 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
2426 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
2477 struct netvsc_device *nvdev;
2531 /* We must get rtnl lock before scheduling nvdev->subchan_work,
2546 nvdev = rndis_filter_device_add(dev, device_info);
2547 if (IS_ERR(nvdev)) {
2548 ret = PTR_ERR(nvdev);
2555 if (nvdev->num_chn > 1)
2556 schedule_work(&nvdev->subchan_work);
2568 if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
2573 nvdev->tx_disable = false;
2588 rndis_filter_device_remove(dev, nvdev);
2605 struct netvsc_device *nvdev;
2618 nvdev = rtnl_dereference(ndev_ctx->nvdev);
2619 if (nvdev) {
2620 cancel_work_sync(&nvdev->subchan_work);
2621 netvsc_xdp_set(net, NULL, NULL, nvdev);
2632 if (nvdev)
2633 rndis_filter_device_remove(dev, nvdev);
2650 struct netvsc_device *nvdev;
2661 nvdev = rtnl_dereference(ndev_ctx->nvdev);
2662 if (nvdev == NULL) {
2668 ndev_ctx->saved_netvsc_dev_info = netvsc_devinfo_get(nvdev);
2673 ret = netvsc_detach(net, nvdev);