Lines Matching refs:ndev_ctx

65 	struct net_device_context *ndev_ctx = netdev_priv(net);
66 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
85 struct net_device_context *ndev_ctx = netdev_priv(net);
90 vf_netdev = rcu_dereference(ndev_ctx->vf_netdev);
96 nvdev = rcu_dereference(ndev_ctx->nvdev);
113 struct net_device_context *ndev_ctx = netdev_priv(net);
114 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
115 struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev);
500 struct net_device_context *ndev_ctx = netdev_priv(net);
510 = this_cpu_ptr(ndev_ctx->vf_stats);
517 this_cpu_inc(ndev_ctx->vf_stats->tx_dropped);
747 struct net_device_context *ndev_ctx = netdev_priv(net);
764 ndev_ctx->speed = speed;
782 spin_lock_irqsave(&ndev_ctx->lock, flags);
783 list_add_tail(&event->list, &ndev_ctx->reconfig_events);
784 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
786 schedule_delayed_work(&ndev_ctx->dwork, 0);
1029 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1030 struct hv_device *hdev = ndev_ctx->device_ctx;
1068 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1069 struct hv_device *hdev = ndev_ctx->device_ctx;
1274 struct net_device_context *ndev_ctx = netdev_priv(net);
1281 = per_cpu_ptr(ndev_ctx->vf_stats, i);
1304 struct net_device_context *ndev_ctx = netdev_priv(net);
1305 struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
1311 per_cpu_ptr(ndev_ctx->vf_stats, i);
1362 struct net_device_context *ndev_ctx = netdev_priv(net);
1369 nvdev = rcu_dereference(ndev_ctx->nvdev);
1991 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1993 return ndev_ctx->msg_enable;
1998 struct net_device_context *ndev_ctx = netdev_priv(ndev);
2000 ndev_ctx->msg_enable = val;
2051 struct net_device_context *ndev_ctx =
2053 struct hv_device *device_obj = ndev_ctx->device_ctx;
2063 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
2067 net_device = rtnl_dereference(ndev_ctx->nvdev);
2073 next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
2081 schedule_delayed_work(&ndev_ctx->dwork, delay);
2084 ndev_ctx->last_reconfig = jiffies;
2086 spin_lock_irqsave(&ndev_ctx->lock, flags);
2087 if (!list_empty(&ndev_ctx->reconfig_events)) {
2088 event = list_first_entry(&ndev_ctx->reconfig_events,
2091 reschedule = !list_empty(&ndev_ctx->reconfig_events);
2093 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
2127 spin_lock_irqsave(&ndev_ctx->lock, flags);
2128 list_add(&event->list, &ndev_ctx->reconfig_events);
2129 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
2144 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
2176 struct net_device_context *ndev_ctx = netdev_priv(ndev);
2178 = this_cpu_ptr(ndev_ctx->vf_stats);
2199 struct net_device_context *ndev_ctx = netdev_priv(ndev);
2220 schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
2266 struct net_device_context *ndev_ctx
2268 struct net_device *ndev = hv_get_drvdata(ndev_ctx->device_ctx);
2272 schedule_delayed_work(&ndev_ctx->vf_takeover, 0);
2276 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
2289 struct net_device_context *ndev_ctx;
2309 list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
2310 if (!ndev_ctx->vf_alloc)
2313 if (ndev_ctx->vf_serial == serial)
2314 return hv_get_drvdata(ndev_ctx->device_ctx);
2324 list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
2325 ndev = hv_get_drvdata(ndev_ctx->device_ctx);
2603 struct net_device_context *ndev_ctx;
2613 ndev_ctx = netdev_priv(net);
2615 cancel_delayed_work_sync(&ndev_ctx->dwork);
2618 nvdev = rtnl_dereference(ndev_ctx->nvdev);
2628 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
2636 list_del(&ndev_ctx->list);
2642 free_percpu(ndev_ctx->vf_stats);
2649 struct net_device_context *ndev_ctx;
2656 ndev_ctx = netdev_priv(net);
2657 cancel_delayed_work_sync(&ndev_ctx->dwork);
2661 nvdev = rtnl_dereference(ndev_ctx->nvdev);
2668 ndev_ctx->saved_netvsc_dev_info = netvsc_devinfo_get(nvdev);
2669 if (!ndev_ctx->saved_netvsc_dev_info) {