Lines Matching refs:vif

72 static int xenvif_schedulable(struct xenvif *vif)
74 return netif_running(vif->dev) &&
75 test_bit(VIF_STATUS_CONNECTED, &vif->status) &&
76 !vif->disabled;
111 /* This vif is rogue, we pretend we've there is nothing to do
112 * for this vif to deschedule it from NAPI. But this interface
115 if (unlikely(queue->vif->disabled)) {
183 struct xenvif *vif = netdev_priv(dev);
184 unsigned int size = vif->hash.size;
189 num_queues = READ_ONCE(vif->num_queues);
193 if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
197 xenvif_set_skb_hash(vif, skb);
202 return vif->hash.mapping[vif->hash.mapping_sel]
209 struct xenvif *vif = netdev_priv(dev);
221 num_queues = READ_ONCE(vif->num_queues);
229 index, vif->dev->name);
232 queue = &vif->queues[index];
237 !xenvif_schedulable(vif))
240 if (vif->multicast_control && skb->pkt_type == PACKET_MULTICAST) {
243 if (!xenvif_mcast_match(vif, eth->h_dest))
248 cb->expires = jiffies + vif->drain_timeout;
254 if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
265 vif->dev->stats.tx_dropped++;
272 struct xenvif *vif = netdev_priv(dev);
282 num_queues = READ_ONCE(vif->num_queues);
286 queue = &vif->queues[index];
295 vif->dev->stats.rx_bytes = rx_bytes;
296 vif->dev->stats.rx_packets = rx_packets;
297 vif->dev->stats.tx_bytes = tx_bytes;
298 vif->dev->stats.tx_packets = tx_packets;
300 return &vif->dev->stats;
303 static void xenvif_up(struct xenvif *vif)
306 unsigned int num_queues = vif->num_queues;
310 queue = &vif->queues[queue_index];
319 static void xenvif_down(struct xenvif *vif)
322 unsigned int num_queues = vif->num_queues;
326 queue = &vif->queues[queue_index];
337 struct xenvif *vif = netdev_priv(dev);
338 if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
339 xenvif_up(vif);
346 struct xenvif *vif = netdev_priv(dev);
347 if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
348 xenvif_down(vif);
355 struct xenvif *vif = netdev_priv(dev);
356 int max = vif->can_sg ? ETH_MAX_MTU - VLAN_ETH_HLEN : ETH_DATA_LEN;
367 struct xenvif *vif = netdev_priv(dev);
369 if (!vif->can_sg)
371 if (~(vif->gso_mask) & GSO_BIT(TCPV4))
373 if (~(vif->gso_mask) & GSO_BIT(TCPV6))
375 if (!vif->ip_csum)
377 if (!vif->ipv6_csum)
428 struct xenvif *vif = netdev_priv(dev);
434 num_queues = READ_ONCE(vif->num_queues);
439 void *vif_stats = &vif->queues[queue_index].stats;
486 struct xenvif *vif;
489 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
503 vif = netdev_priv(dev);
505 vif->domid = domid;
506 vif->handle = handle;
507 vif->can_sg = 1;
508 vif->ip_csum = 1;
509 vif->dev = dev;
510 vif->disabled = false;
511 vif->drain_timeout = msecs_to_jiffies(rx_drain_timeout_msecs);
512 vif->stall_timeout = msecs_to_jiffies(rx_stall_timeout_msecs);
515 vif->queues = NULL;
516 vif->num_queues = 0;
518 vif->xdp_headroom = 0;
520 spin_lock_init(&vif->lock);
521 INIT_LIST_HEAD(&vif->fe_mcast_addr);
555 return vif;
587 netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
602 void xenvif_carrier_on(struct xenvif *vif)
605 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
606 dev_set_mtu(vif->dev, ETH_DATA_LEN);
607 netdev_update_features(vif->dev);
608 set_bit(VIF_STATUS_CONNECTED, &vif->status);
609 if (netif_running(vif->dev))
610 xenvif_up(vif);
614 int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
617 struct net_device *dev = vif->dev;
623 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
632 BACK_RING_ATTACH(&vif->ctrl, shared, rsp_prod, XEN_PAGE_SIZE);
635 if (req_prod - rsp_prod > RING_SIZE(&vif->ctrl))
638 err = bind_interdomain_evtchn_to_irq_lateeoi(vif->domid, evtchn);
642 vif->ctrl_irq = err;
644 xenvif_init_hash(vif);
646 err = request_threaded_irq(vif->ctrl_irq, NULL, xenvif_ctrl_irq_fn,
647 IRQF_ONESHOT, "xen-netback-ctrl", vif);
656 xenvif_deinit_hash(vif);
657 unbind_from_irqhandler(vif->ctrl_irq, vif);
658 vif->ctrl_irq = 0;
661 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
662 vif->ctrl.sring);
663 vif->ctrl.sring = NULL;
724 netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
749 queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
760 queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
770 queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
788 void xenvif_carrier_off(struct xenvif *vif)
790 struct net_device *dev = vif->dev;
793 if (test_and_clear_bit(VIF_STATUS_CONNECTED, &vif->status)) {
796 xenvif_down(vif);
801 void xenvif_disconnect_data(struct xenvif *vif)
804 unsigned int num_queues = vif->num_queues;
807 xenvif_carrier_off(vif);
810 queue = &vif->queues[queue_index];
815 xenvif_mcast_addr_list_free(vif);
818 void xenvif_disconnect_ctrl(struct xenvif *vif)
820 if (vif->ctrl_irq) {
821 xenvif_deinit_hash(vif);
822 unbind_from_irqhandler(vif->ctrl_irq, vif);
823 vif->ctrl_irq = 0;
826 if (vif->ctrl.sring) {
827 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
828 vif->ctrl.sring);
829 vif->ctrl.sring = NULL;
842 void xenvif_free(struct xenvif *vif)
844 struct xenvif_queue *queues = vif->queues;
845 unsigned int num_queues = vif->num_queues;
848 unregister_netdev(vif->dev);
849 free_netdev(vif->dev);