Lines Matching refs:nvdev

89 	struct netvsc_device *nvdev;
98 nvdev = rcu_dereference(ndev_ctx->nvdev);
99 if (nvdev)
100 rndis_filter_update(nvdev);
117 struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev);
124 ret = rndis_filter_open(nvdev);
130 rdev = nvdev->extension;
133 netvsc_tx_enable(nvdev, net);
150 static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
159 for (i = 0; i < nvdev->num_chn; i++) {
161 = nvdev->chan_table[i].channel;
167 napi_synchronize(&nvdev->chan_table[i].napi);
204 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
207 netvsc_tx_disable(nvdev, net);
210 if (!nvdev)
213 ret = rndis_filter_close(nvdev);
219 ret = netvsc_wait_until_empty(nvdev);
950 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
952 if (nvdev) {
953 channel->max_combined = nvdev->max_chn;
954 channel->combined_count = nvdev->num_chn;
962 struct netvsc_device_info *netvsc_devinfo_get(struct netvsc_device *nvdev)
972 if (nvdev) {
975 dev_info->num_chn = nvdev->num_chn;
976 dev_info->send_sections = nvdev->send_section_cnt;
977 dev_info->send_section_size = nvdev->send_section_size;
978 dev_info->recv_sections = nvdev->recv_section_cnt;
979 dev_info->recv_section_size = nvdev->recv_section_size;
981 memcpy(dev_info->rss_key, nvdev->extension->rss_key,
984 prog = netvsc_xdp_get(nvdev);
1012 struct netvsc_device *nvdev)
1019 if (cancel_work_sync(&nvdev->subchan_work))
1020 nvdev->num_chn = 1;
1022 netvsc_xdp_set(ndev, NULL, NULL, nvdev);
1026 netvsc_tx_disable(nvdev, ndev);
1028 ret = rndis_filter_close(nvdev);
1035 ret = netvsc_wait_until_empty(nvdev);
1045 rndis_filter_device_remove(hdev, nvdev);
1055 struct netvsc_device *nvdev;
1060 nvdev = rndis_filter_device_add(hdev, dev_info);
1061 if (IS_ERR(nvdev))
1062 return PTR_ERR(nvdev);
1064 if (nvdev->num_chn > 1) {
1065 ret = rndis_set_subchannel(ndev, nvdev, dev_info);
1069 nvdev->max_chn = 1;
1070 nvdev->num_chn = 1;
1077 ret = netvsc_xdp_set(ndev, prog, NULL, nvdev);
1085 nvdev->tx_disable = false;
1092 ret = rndis_filter_open(nvdev);
1096 rdev = nvdev->extension;
1107 rndis_filter_device_remove(hdev, nvdev);
1116 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
1126 if (!nvdev || nvdev->destroy)
1129 if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5)
1132 if (count > nvdev->max_chn)
1135 orig = nvdev->num_chn;
1137 device_info = netvsc_devinfo_get(nvdev);
1144 ret = netvsc_detach(net, nvdev);
1212 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1217 if (!nvdev || nvdev->destroy)
1220 device_info = netvsc_devinfo_get(nvdev);
1232 ret = netvsc_detach(ndev, nvdev);
1290 struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
1314 for (i = 0; i < nvdev->num_chn; i++) {
1315 const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
1349 struct netvsc_device *nvdev;
1355 nvdev = rcu_dereference(ndev_ctx->nvdev);
1356 if (!nvdev)
1368 for (i = 0; i < nvdev->num_chn; i++) {
1369 const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
1405 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1413 if (!nvdev)
1422 err = rndis_filter_set_device_mac(nvdev, addr->sa_data);
1486 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1488 if (!nvdev)
1495 + NETVSC_QUEUE_STATS_LEN(nvdev)
1506 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1520 if (!nvdev)
1530 for (j = 0; j < nvdev->num_chn; j++) {
1531 tx_stats = &nvdev->chan_table[j].tx_stats;
1543 rx_stats = &nvdev->chan_table[j].rx_stats;
1579 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1583 if (!nvdev)
1594 for (i = 0; i < nvdev->num_chn; i++) {
1663 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
1665 if (!nvdev)
1670 info->data = nvdev->num_chn;
1763 struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
1789 struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
1822 static void __netvsc_get_ringparam(struct netvsc_device *nvdev,
1827 ring->rx_pending = nvdev->recv_section_cnt;
1828 ring->tx_pending = nvdev->send_section_cnt;
1830 if (nvdev->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
1835 ring->rx_max_pending = max_buf_size / nvdev->recv_section_size;
1837 / nvdev->send_section_size;
1846 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1848 if (!nvdev)
1851 __netvsc_get_ringparam(nvdev, ring);
1860 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1866 if (!nvdev || nvdev->destroy)
1870 __netvsc_get_ringparam(nvdev, &orig);
1881 device_info = netvsc_devinfo_get(nvdev);
1889 ret = netvsc_detach(ndev, nvdev);
1911 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1913 if (!nvdev || nvdev->destroy)
1916 if ((features & NETIF_F_LRO) && netvsc_xdp_get(nvdev)) {
1929 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1934 if (!nvdev || nvdev->destroy)
1950 ret = rndis_filter_set_offload_params(ndev, nvdev, &offloads);
2063 net_device = rtnl_dereference(ndev_ctx->nvdev);
2155 if (!rtnl_dereference(net_device_ctx->nvdev))
2371 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
2439 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
2520 struct netvsc_device *nvdev;
2575 /* We must get rtnl lock before scheduling nvdev->subchan_work,
2590 nvdev = rndis_filter_device_add(dev, device_info);
2591 if (IS_ERR(nvdev)) {
2592 ret = PTR_ERR(nvdev);
2599 if (nvdev->num_chn > 1)
2600 schedule_work(&nvdev->subchan_work);
2615 if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
2620 nvdev->tx_disable = false;
2659 rndis_filter_device_remove(dev, nvdev);
2676 struct netvsc_device *nvdev;
2689 nvdev = rtnl_dereference(ndev_ctx->nvdev);
2690 if (nvdev) {
2691 cancel_work_sync(&nvdev->subchan_work);
2692 netvsc_xdp_set(net, NULL, NULL, nvdev);
2703 if (nvdev)
2704 rndis_filter_device_remove(dev, nvdev);
2720 struct netvsc_device *nvdev;
2731 nvdev = rtnl_dereference(ndev_ctx->nvdev);
2732 if (nvdev == NULL) {
2738 ndev_ctx->saved_netvsc_dev_info = netvsc_devinfo_get(nvdev);
2743 ret = netvsc_detach(net, nvdev);