Lines Matching refs:ndev_ctx
67 struct net_device_context *ndev_ctx = netdev_priv(net);
68 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
87 struct net_device_context *ndev_ctx = netdev_priv(net);
92 vf_netdev = rcu_dereference(ndev_ctx->vf_netdev);
98 nvdev = rcu_dereference(ndev_ctx->nvdev);
115 struct net_device_context *ndev_ctx = netdev_priv(net);
116 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
117 struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev);
452 struct net_device_context *ndev_ctx = netdev_priv(net);
462 = this_cpu_ptr(ndev_ctx->vf_stats);
469 this_cpu_inc(ndev_ctx->vf_stats->tx_dropped);
701 struct net_device_context *ndev_ctx = netdev_priv(net);
735 ndev_ctx->speed = speed;
753 spin_lock_irqsave(&ndev_ctx->lock, flags);
754 list_add_tail(&event->list, &ndev_ctx->reconfig_events);
755 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
757 schedule_delayed_work(&ndev_ctx->dwork, 0);
1014 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1015 struct hv_device *hdev = ndev_ctx->device_ctx;
1053 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1054 struct hv_device *hdev = ndev_ctx->device_ctx;
1259 struct net_device_context *ndev_ctx = netdev_priv(net);
1266 = per_cpu_ptr(ndev_ctx->vf_stats, i);
1289 struct net_device_context *ndev_ctx = netdev_priv(net);
1290 struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
1296 per_cpu_ptr(ndev_ctx->vf_stats, i);
1348 struct net_device_context *ndev_ctx = netdev_priv(net);
1355 nvdev = rcu_dereference(ndev_ctx->nvdev);
1986 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1988 return ndev_ctx->msg_enable;
1993 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1995 ndev_ctx->msg_enable = val;
2047 struct net_device_context *ndev_ctx =
2049 struct hv_device *device_obj = ndev_ctx->device_ctx;
2059 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
2063 net_device = rtnl_dereference(ndev_ctx->nvdev);
2069 next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
2077 schedule_delayed_work(&ndev_ctx->dwork, delay);
2080 ndev_ctx->last_reconfig = jiffies;
2082 spin_lock_irqsave(&ndev_ctx->lock, flags);
2083 if (!list_empty(&ndev_ctx->reconfig_events)) {
2084 event = list_first_entry(&ndev_ctx->reconfig_events,
2087 reschedule = !list_empty(&ndev_ctx->reconfig_events);
2089 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
2123 spin_lock_irqsave(&ndev_ctx->lock, flags);
2124 list_add(&event->list, &ndev_ctx->reconfig_events);
2125 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
2137 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
2169 struct net_device_context *ndev_ctx = netdev_priv(ndev);
2171 = this_cpu_ptr(ndev_ctx->vf_stats);
2192 struct net_device_context *ndev_ctx = netdev_priv(ndev);
2217 schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
2263 struct net_device_context *ndev_ctx
2265 struct net_device *ndev = hv_get_drvdata(ndev_ctx->device_ctx);
2269 schedule_delayed_work(&ndev_ctx->vf_takeover, 0);
2273 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
2286 struct net_device_context *ndev_ctx;
2306 list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
2307 if (!ndev_ctx->vf_alloc)
2310 if (ndev_ctx->vf_serial != serial)
2313 ndev = hv_get_drvdata(ndev_ctx->device_ctx);
2330 list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
2331 ndev = hv_get_drvdata(ndev_ctx->device_ctx);
2674 struct net_device_context *ndev_ctx;
2684 ndev_ctx = netdev_priv(net);
2686 cancel_delayed_work_sync(&ndev_ctx->dwork);
2689 nvdev = rtnl_dereference(ndev_ctx->nvdev);
2699 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
2707 list_del(&ndev_ctx->list);
2713 free_percpu(ndev_ctx->vf_stats);
2719 struct net_device_context *ndev_ctx;
2726 ndev_ctx = netdev_priv(net);
2727 cancel_delayed_work_sync(&ndev_ctx->dwork);
2731 nvdev = rtnl_dereference(ndev_ctx->nvdev);
2738 ndev_ctx->saved_netvsc_dev_info = netvsc_devinfo_get(nvdev);
2739 if (!ndev_ctx->saved_netvsc_dev_info) {