1 /*
2 * Network-device interface management.
3 *
4 * Copyright (c) 2004-2005, Keir Fraser
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License version 2
8 * as published by the Free Software Foundation; or, when distributed
9 * separately from the Linux kernel or incorporated into other
10 * software packages, subject to the following license:
11 *
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this source file (the "Software"), to deal in the Software without
14 * restriction, including without limitation the rights to use, copy, modify,
15 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
16 * and to permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 * IN THE SOFTWARE.
29 */
30
31 #include "common.h"
32
33 #include <linux/kthread.h>
34 #include <linux/sched/task.h>
35 #include <linux/ethtool.h>
36 #include <linux/rtnetlink.h>
37 #include <linux/if_vlan.h>
38 #include <linux/vmalloc.h>
39
40 #include <xen/events.h>
41 #include <asm/xen/hypercall.h>
42 #include <xen/balloon.h>
43
44 #define XENVIF_NAPI_WEIGHT 64
45
46 /* Number of bytes allowed on the internal guest Rx queue. */
47 #define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)
48
49 /* This function is used to set SKBTX_DEV_ZEROCOPY as well as
50 * increasing the inflight counter. We need to increase the inflight
51 * counter because core driver calls into xenvif_zerocopy_callback
52 * which calls xenvif_skb_zerocopy_complete.
53 */
xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue, struct sk_buff *skb)54 void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
55 struct sk_buff *skb)
56 {
57 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
58 atomic_inc(&queue->inflight_packets);
59 }
60
xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)61 void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
62 {
63 atomic_dec(&queue->inflight_packets);
64
65 /* Wake the dealloc thread _after_ decrementing inflight_packets so
66 * that if kthread_stop() has already been called, the dealloc thread
67 * does not wait forever with nothing to wake it.
68 */
69 wake_up(&queue->dealloc_wq);
70 }
71
xenvif_schedulable(struct xenvif *vif)72 static int xenvif_schedulable(struct xenvif *vif)
73 {
74 return netif_running(vif->dev) &&
75 test_bit(VIF_STATUS_CONNECTED, &vif->status) &&
76 !vif->disabled;
77 }
78
xenvif_handle_tx_interrupt(struct xenvif_queue *queue)79 static bool xenvif_handle_tx_interrupt(struct xenvif_queue *queue)
80 {
81 bool rc;
82
83 rc = RING_HAS_UNCONSUMED_REQUESTS(&queue->tx);
84 if (rc)
85 napi_schedule(&queue->napi);
86 return rc;
87 }
88
xenvif_tx_interrupt(int irq, void *dev_id)89 static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
90 {
91 struct xenvif_queue *queue = dev_id;
92 int old;
93
94 old = atomic_fetch_or(NETBK_TX_EOI, &queue->eoi_pending);
95 WARN(old & NETBK_TX_EOI, "Interrupt while EOI pending\n");
96
97 if (!xenvif_handle_tx_interrupt(queue)) {
98 atomic_andnot(NETBK_TX_EOI, &queue->eoi_pending);
99 xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
100 }
101
102 return IRQ_HANDLED;
103 }
104
xenvif_poll(struct napi_struct *napi, int budget)105 static int xenvif_poll(struct napi_struct *napi, int budget)
106 {
107 struct xenvif_queue *queue =
108 container_of(napi, struct xenvif_queue, napi);
109 int work_done;
110
111 /* This vif is rogue, we pretend we've there is nothing to do
112 * for this vif to deschedule it from NAPI. But this interface
113 * will be turned off in thread context later.
114 */
115 if (unlikely(queue->vif->disabled)) {
116 napi_complete(napi);
117 return 0;
118 }
119
120 work_done = xenvif_tx_action(queue, budget);
121
122 if (work_done < budget) {
123 napi_complete_done(napi, work_done);
124 /* If the queue is rate-limited, it shall be
125 * rescheduled in the timer callback.
126 */
127 if (likely(!queue->rate_limited))
128 xenvif_napi_schedule_or_enable_events(queue);
129 }
130
131 return work_done;
132 }
133
xenvif_handle_rx_interrupt(struct xenvif_queue *queue)134 static bool xenvif_handle_rx_interrupt(struct xenvif_queue *queue)
135 {
136 bool rc;
137
138 rc = xenvif_have_rx_work(queue, false);
139 if (rc)
140 xenvif_kick_thread(queue);
141 return rc;
142 }
143
xenvif_rx_interrupt(int irq, void *dev_id)144 static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
145 {
146 struct xenvif_queue *queue = dev_id;
147 int old;
148
149 old = atomic_fetch_or(NETBK_RX_EOI, &queue->eoi_pending);
150 WARN(old & NETBK_RX_EOI, "Interrupt while EOI pending\n");
151
152 if (!xenvif_handle_rx_interrupt(queue)) {
153 atomic_andnot(NETBK_RX_EOI, &queue->eoi_pending);
154 xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
155 }
156
157 return IRQ_HANDLED;
158 }
159
xenvif_interrupt(int irq, void *dev_id)160 irqreturn_t xenvif_interrupt(int irq, void *dev_id)
161 {
162 struct xenvif_queue *queue = dev_id;
163 int old;
164 bool has_rx, has_tx;
165
166 old = atomic_fetch_or(NETBK_COMMON_EOI, &queue->eoi_pending);
167 WARN(old, "Interrupt while EOI pending\n");
168
169 has_tx = xenvif_handle_tx_interrupt(queue);
170 has_rx = xenvif_handle_rx_interrupt(queue);
171
172 if (!has_rx && !has_tx) {
173 atomic_andnot(NETBK_COMMON_EOI, &queue->eoi_pending);
174 xen_irq_lateeoi(irq, XEN_EOI_FLAG_SPURIOUS);
175 }
176
177 return IRQ_HANDLED;
178 }
179
xenvif_select_queue(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev)180 static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
181 struct net_device *sb_dev)
182 {
183 struct xenvif *vif = netdev_priv(dev);
184 unsigned int size = vif->hash.size;
185 unsigned int num_queues;
186
187 /* If queues are not set up internally - always return 0
188 * as the packet going to be dropped anyway */
189 num_queues = READ_ONCE(vif->num_queues);
190 if (num_queues < 1)
191 return 0;
192
193 if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
194 return netdev_pick_tx(dev, skb, NULL) %
195 dev->real_num_tx_queues;
196
197 xenvif_set_skb_hash(vif, skb);
198
199 if (size == 0)
200 return skb_get_hash_raw(skb) % dev->real_num_tx_queues;
201
202 return vif->hash.mapping[vif->hash.mapping_sel]
203 [skb_get_hash_raw(skb) % size];
204 }
205
206 static netdev_tx_t
xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)207 xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
208 {
209 struct xenvif *vif = netdev_priv(dev);
210 struct xenvif_queue *queue = NULL;
211 unsigned int num_queues;
212 u16 index;
213 struct xenvif_rx_cb *cb;
214
215 BUG_ON(skb->dev != dev);
216
217 /* Drop the packet if queues are not set up.
218 * This handler should be called inside an RCU read section
219 * so we don't need to enter it here explicitly.
220 */
221 num_queues = READ_ONCE(vif->num_queues);
222 if (num_queues < 1)
223 goto drop;
224
225 /* Obtain the queue to be used to transmit this packet */
226 index = skb_get_queue_mapping(skb);
227 if (index >= num_queues) {
228 pr_warn_ratelimited("Invalid queue %hu for packet on interface %s\n",
229 index, vif->dev->name);
230 index %= num_queues;
231 }
232 queue = &vif->queues[index];
233
234 /* Drop the packet if queue is not ready */
235 if (queue->task == NULL ||
236 queue->dealloc_task == NULL ||
237 !xenvif_schedulable(vif))
238 goto drop;
239
240 if (vif->multicast_control && skb->pkt_type == PACKET_MULTICAST) {
241 struct ethhdr *eth = (struct ethhdr *)skb->data;
242
243 if (!xenvif_mcast_match(vif, eth->h_dest))
244 goto drop;
245 }
246
247 cb = XENVIF_RX_CB(skb);
248 cb->expires = jiffies + vif->drain_timeout;
249
250 /* If there is no hash algorithm configured then make sure there
251 * is no hash information in the socket buffer otherwise it
252 * would be incorrectly forwarded to the frontend.
253 */
254 if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
255 skb_clear_hash(skb);
256
257 if (!xenvif_rx_queue_tail(queue, skb))
258 goto drop;
259
260 xenvif_kick_thread(queue);
261
262 return NETDEV_TX_OK;
263
264 drop:
265 vif->dev->stats.tx_dropped++;
266 dev_kfree_skb_any(skb);
267 return NETDEV_TX_OK;
268 }
269
xenvif_get_stats(struct net_device *dev)270 static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
271 {
272 struct xenvif *vif = netdev_priv(dev);
273 struct xenvif_queue *queue = NULL;
274 unsigned int num_queues;
275 u64 rx_bytes = 0;
276 u64 rx_packets = 0;
277 u64 tx_bytes = 0;
278 u64 tx_packets = 0;
279 unsigned int index;
280
281 rcu_read_lock();
282 num_queues = READ_ONCE(vif->num_queues);
283
284 /* Aggregate tx and rx stats from each queue */
285 for (index = 0; index < num_queues; ++index) {
286 queue = &vif->queues[index];
287 rx_bytes += queue->stats.rx_bytes;
288 rx_packets += queue->stats.rx_packets;
289 tx_bytes += queue->stats.tx_bytes;
290 tx_packets += queue->stats.tx_packets;
291 }
292
293 rcu_read_unlock();
294
295 vif->dev->stats.rx_bytes = rx_bytes;
296 vif->dev->stats.rx_packets = rx_packets;
297 vif->dev->stats.tx_bytes = tx_bytes;
298 vif->dev->stats.tx_packets = tx_packets;
299
300 return &vif->dev->stats;
301 }
302
xenvif_up(struct xenvif *vif)303 static void xenvif_up(struct xenvif *vif)
304 {
305 struct xenvif_queue *queue = NULL;
306 unsigned int num_queues = vif->num_queues;
307 unsigned int queue_index;
308
309 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
310 queue = &vif->queues[queue_index];
311 napi_enable(&queue->napi);
312 enable_irq(queue->tx_irq);
313 if (queue->tx_irq != queue->rx_irq)
314 enable_irq(queue->rx_irq);
315 xenvif_napi_schedule_or_enable_events(queue);
316 }
317 }
318
xenvif_down(struct xenvif *vif)319 static void xenvif_down(struct xenvif *vif)
320 {
321 struct xenvif_queue *queue = NULL;
322 unsigned int num_queues = vif->num_queues;
323 unsigned int queue_index;
324
325 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
326 queue = &vif->queues[queue_index];
327 disable_irq(queue->tx_irq);
328 if (queue->tx_irq != queue->rx_irq)
329 disable_irq(queue->rx_irq);
330 napi_disable(&queue->napi);
331 del_timer_sync(&queue->credit_timeout);
332 }
333 }
334
xenvif_open(struct net_device *dev)335 static int xenvif_open(struct net_device *dev)
336 {
337 struct xenvif *vif = netdev_priv(dev);
338 if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
339 xenvif_up(vif);
340 netif_tx_start_all_queues(dev);
341 return 0;
342 }
343
xenvif_close(struct net_device *dev)344 static int xenvif_close(struct net_device *dev)
345 {
346 struct xenvif *vif = netdev_priv(dev);
347 if (test_bit(VIF_STATUS_CONNECTED, &vif->status))
348 xenvif_down(vif);
349 netif_tx_stop_all_queues(dev);
350 return 0;
351 }
352
xenvif_change_mtu(struct net_device *dev, int mtu)353 static int xenvif_change_mtu(struct net_device *dev, int mtu)
354 {
355 struct xenvif *vif = netdev_priv(dev);
356 int max = vif->can_sg ? ETH_MAX_MTU - VLAN_ETH_HLEN : ETH_DATA_LEN;
357
358 if (mtu > max)
359 return -EINVAL;
360 dev->mtu = mtu;
361 return 0;
362 }
363
xenvif_fix_features(struct net_device *dev, netdev_features_t features)364 static netdev_features_t xenvif_fix_features(struct net_device *dev,
365 netdev_features_t features)
366 {
367 struct xenvif *vif = netdev_priv(dev);
368
369 if (!vif->can_sg)
370 features &= ~NETIF_F_SG;
371 if (~(vif->gso_mask) & GSO_BIT(TCPV4))
372 features &= ~NETIF_F_TSO;
373 if (~(vif->gso_mask) & GSO_BIT(TCPV6))
374 features &= ~NETIF_F_TSO6;
375 if (!vif->ip_csum)
376 features &= ~NETIF_F_IP_CSUM;
377 if (!vif->ipv6_csum)
378 features &= ~NETIF_F_IPV6_CSUM;
379
380 return features;
381 }
382
383 static const struct xenvif_stat {
384 char name[ETH_GSTRING_LEN];
385 u16 offset;
386 } xenvif_stats[] = {
387 {
388 "rx_gso_checksum_fixup",
389 offsetof(struct xenvif_stats, rx_gso_checksum_fixup)
390 },
391 /* If (sent != success + fail), there are probably packets never
392 * freed up properly!
393 */
394 {
395 "tx_zerocopy_sent",
396 offsetof(struct xenvif_stats, tx_zerocopy_sent),
397 },
398 {
399 "tx_zerocopy_success",
400 offsetof(struct xenvif_stats, tx_zerocopy_success),
401 },
402 {
403 "tx_zerocopy_fail",
404 offsetof(struct xenvif_stats, tx_zerocopy_fail)
405 },
406 /* Number of packets exceeding MAX_SKB_FRAG slots. You should use
407 * a guest with the same MAX_SKB_FRAG
408 */
409 {
410 "tx_frag_overflow",
411 offsetof(struct xenvif_stats, tx_frag_overflow)
412 },
413 };
414
xenvif_get_sset_count(struct net_device *dev, int string_set)415 static int xenvif_get_sset_count(struct net_device *dev, int string_set)
416 {
417 switch (string_set) {
418 case ETH_SS_STATS:
419 return ARRAY_SIZE(xenvif_stats);
420 default:
421 return -EINVAL;
422 }
423 }
424
xenvif_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 * data)425 static void xenvif_get_ethtool_stats(struct net_device *dev,
426 struct ethtool_stats *stats, u64 * data)
427 {
428 struct xenvif *vif = netdev_priv(dev);
429 unsigned int num_queues;
430 int i;
431 unsigned int queue_index;
432
433 rcu_read_lock();
434 num_queues = READ_ONCE(vif->num_queues);
435
436 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
437 unsigned long accum = 0;
438 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
439 void *vif_stats = &vif->queues[queue_index].stats;
440 accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset);
441 }
442 data[i] = accum;
443 }
444
445 rcu_read_unlock();
446 }
447
xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)448 static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
449 {
450 int i;
451
452 switch (stringset) {
453 case ETH_SS_STATS:
454 for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++)
455 memcpy(data + i * ETH_GSTRING_LEN,
456 xenvif_stats[i].name, ETH_GSTRING_LEN);
457 break;
458 }
459 }
460
461 static const struct ethtool_ops xenvif_ethtool_ops = {
462 .get_link = ethtool_op_get_link,
463
464 .get_sset_count = xenvif_get_sset_count,
465 .get_ethtool_stats = xenvif_get_ethtool_stats,
466 .get_strings = xenvif_get_strings,
467 };
468
469 static const struct net_device_ops xenvif_netdev_ops = {
470 .ndo_select_queue = xenvif_select_queue,
471 .ndo_start_xmit = xenvif_start_xmit,
472 .ndo_get_stats = xenvif_get_stats,
473 .ndo_open = xenvif_open,
474 .ndo_stop = xenvif_close,
475 .ndo_change_mtu = xenvif_change_mtu,
476 .ndo_fix_features = xenvif_fix_features,
477 .ndo_set_mac_address = eth_mac_addr,
478 .ndo_validate_addr = eth_validate_addr,
479 };
480
xenvif_alloc(struct device *parent, domid_t domid, unsigned int handle)481 struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
482 unsigned int handle)
483 {
484 int err;
485 struct net_device *dev;
486 struct xenvif *vif;
487 char name[IFNAMSIZ] = {};
488
489 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
490 /* Allocate a netdev with the max. supported number of queues.
491 * When the guest selects the desired number, it will be updated
492 * via netif_set_real_num_*_queues().
493 */
494 dev = alloc_netdev_mq(sizeof(struct xenvif), name, NET_NAME_UNKNOWN,
495 ether_setup, xenvif_max_queues);
496 if (dev == NULL) {
497 pr_warn("Could not allocate netdev for %s\n", name);
498 return ERR_PTR(-ENOMEM);
499 }
500
501 SET_NETDEV_DEV(dev, parent);
502
503 vif = netdev_priv(dev);
504
505 vif->domid = domid;
506 vif->handle = handle;
507 vif->can_sg = 1;
508 vif->ip_csum = 1;
509 vif->dev = dev;
510 vif->disabled = false;
511 vif->drain_timeout = msecs_to_jiffies(rx_drain_timeout_msecs);
512 vif->stall_timeout = msecs_to_jiffies(rx_stall_timeout_msecs);
513
514 /* Start out with no queues. */
515 vif->queues = NULL;
516 vif->num_queues = 0;
517
518 vif->xdp_headroom = 0;
519
520 spin_lock_init(&vif->lock);
521 INIT_LIST_HEAD(&vif->fe_mcast_addr);
522
523 dev->netdev_ops = &xenvif_netdev_ops;
524 dev->hw_features = NETIF_F_SG |
525 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
526 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_FRAGLIST;
527 dev->features = dev->hw_features | NETIF_F_RXCSUM;
528 dev->ethtool_ops = &xenvif_ethtool_ops;
529
530 dev->min_mtu = ETH_MIN_MTU;
531 dev->max_mtu = ETH_MAX_MTU - VLAN_ETH_HLEN;
532
533 /*
534 * Initialise a dummy MAC address. We choose the numerically
535 * largest non-broadcast address to prevent the address getting
536 * stolen by an Ethernet bridge for STP purposes.
537 * (FE:FF:FF:FF:FF:FF)
538 */
539 eth_broadcast_addr(dev->dev_addr);
540 dev->dev_addr[0] &= ~0x01;
541
542 netif_carrier_off(dev);
543
544 err = register_netdev(dev);
545 if (err) {
546 netdev_warn(dev, "Could not register device: err=%d\n", err);
547 free_netdev(dev);
548 return ERR_PTR(err);
549 }
550
551 netdev_dbg(dev, "Successfully created xenvif\n");
552
553 __module_get(THIS_MODULE);
554
555 return vif;
556 }
557
xenvif_init_queue(struct xenvif_queue *queue)558 int xenvif_init_queue(struct xenvif_queue *queue)
559 {
560 int err, i;
561
562 queue->credit_bytes = queue->remaining_credit = ~0UL;
563 queue->credit_usec = 0UL;
564 timer_setup(&queue->credit_timeout, xenvif_tx_credit_callback, 0);
565 queue->credit_window_start = get_jiffies_64();
566
567 queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES;
568
569 skb_queue_head_init(&queue->rx_queue);
570 skb_queue_head_init(&queue->tx_queue);
571
572 queue->pending_cons = 0;
573 queue->pending_prod = MAX_PENDING_REQS;
574 for (i = 0; i < MAX_PENDING_REQS; ++i)
575 queue->pending_ring[i] = i;
576
577 spin_lock_init(&queue->callback_lock);
578 spin_lock_init(&queue->response_lock);
579
580 /* If ballooning is disabled, this will consume real memory, so you
581 * better enable it. The long term solution would be to use just a
582 * bunch of valid page descriptors, without dependency on ballooning
583 */
584 err = gnttab_alloc_pages(MAX_PENDING_REQS,
585 queue->mmap_pages);
586 if (err) {
587 netdev_err(queue->vif->dev, "Could not reserve mmap_pages\n");
588 return -ENOMEM;
589 }
590
591 for (i = 0; i < MAX_PENDING_REQS; i++) {
592 queue->pending_tx_info[i].callback_struct = (struct ubuf_info)
593 { .callback = xenvif_zerocopy_callback,
594 { { .ctx = NULL,
595 .desc = i } } };
596 queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
597 }
598
599 return 0;
600 }
601
xenvif_carrier_on(struct xenvif *vif)602 void xenvif_carrier_on(struct xenvif *vif)
603 {
604 rtnl_lock();
605 if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
606 dev_set_mtu(vif->dev, ETH_DATA_LEN);
607 netdev_update_features(vif->dev);
608 set_bit(VIF_STATUS_CONNECTED, &vif->status);
609 if (netif_running(vif->dev))
610 xenvif_up(vif);
611 rtnl_unlock();
612 }
613
xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref, unsigned int evtchn)614 int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref,
615 unsigned int evtchn)
616 {
617 struct net_device *dev = vif->dev;
618 void *addr;
619 struct xen_netif_ctrl_sring *shared;
620 RING_IDX rsp_prod, req_prod;
621 int err;
622
623 err = xenbus_map_ring_valloc(xenvif_to_xenbus_device(vif),
624 &ring_ref, 1, &addr);
625 if (err)
626 goto err;
627
628 shared = (struct xen_netif_ctrl_sring *)addr;
629 rsp_prod = READ_ONCE(shared->rsp_prod);
630 req_prod = READ_ONCE(shared->req_prod);
631
632 BACK_RING_ATTACH(&vif->ctrl, shared, rsp_prod, XEN_PAGE_SIZE);
633
634 err = -EIO;
635 if (req_prod - rsp_prod > RING_SIZE(&vif->ctrl))
636 goto err_unmap;
637
638 err = bind_interdomain_evtchn_to_irq_lateeoi(vif->domid, evtchn);
639 if (err < 0)
640 goto err_unmap;
641
642 vif->ctrl_irq = err;
643
644 xenvif_init_hash(vif);
645
646 err = request_threaded_irq(vif->ctrl_irq, NULL, xenvif_ctrl_irq_fn,
647 IRQF_ONESHOT, "xen-netback-ctrl", vif);
648 if (err) {
649 pr_warn("Could not setup irq handler for %s\n", dev->name);
650 goto err_deinit;
651 }
652
653 return 0;
654
655 err_deinit:
656 xenvif_deinit_hash(vif);
657 unbind_from_irqhandler(vif->ctrl_irq, vif);
658 vif->ctrl_irq = 0;
659
660 err_unmap:
661 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
662 vif->ctrl.sring);
663 vif->ctrl.sring = NULL;
664
665 err:
666 return err;
667 }
668
xenvif_disconnect_queue(struct xenvif_queue *queue)669 static void xenvif_disconnect_queue(struct xenvif_queue *queue)
670 {
671 if (queue->task) {
672 kthread_stop(queue->task);
673 put_task_struct(queue->task);
674 queue->task = NULL;
675 }
676
677 if (queue->dealloc_task) {
678 kthread_stop(queue->dealloc_task);
679 queue->dealloc_task = NULL;
680 }
681
682 if (queue->napi.poll) {
683 netif_napi_del(&queue->napi);
684 queue->napi.poll = NULL;
685 }
686
687 if (queue->tx_irq) {
688 unbind_from_irqhandler(queue->tx_irq, queue);
689 if (queue->tx_irq == queue->rx_irq)
690 queue->rx_irq = 0;
691 queue->tx_irq = 0;
692 }
693
694 if (queue->rx_irq) {
695 unbind_from_irqhandler(queue->rx_irq, queue);
696 queue->rx_irq = 0;
697 }
698
699 xenvif_unmap_frontend_data_rings(queue);
700 }
701
xenvif_connect_data(struct xenvif_queue *queue, unsigned long tx_ring_ref, unsigned long rx_ring_ref, unsigned int tx_evtchn, unsigned int rx_evtchn)702 int xenvif_connect_data(struct xenvif_queue *queue,
703 unsigned long tx_ring_ref,
704 unsigned long rx_ring_ref,
705 unsigned int tx_evtchn,
706 unsigned int rx_evtchn)
707 {
708 struct task_struct *task;
709 int err;
710
711 BUG_ON(queue->tx_irq);
712 BUG_ON(queue->task);
713 BUG_ON(queue->dealloc_task);
714
715 err = xenvif_map_frontend_data_rings(queue, tx_ring_ref,
716 rx_ring_ref);
717 if (err < 0)
718 goto err;
719
720 init_waitqueue_head(&queue->wq);
721 init_waitqueue_head(&queue->dealloc_wq);
722 atomic_set(&queue->inflight_packets, 0);
723
724 netif_napi_add(queue->vif->dev, &queue->napi, xenvif_poll,
725 XENVIF_NAPI_WEIGHT);
726
727 queue->stalled = true;
728
729 task = kthread_run(xenvif_kthread_guest_rx, queue,
730 "%s-guest-rx", queue->name);
731 if (IS_ERR(task))
732 goto kthread_err;
733 queue->task = task;
734 /*
735 * Take a reference to the task in order to prevent it from being freed
736 * if the thread function returns before kthread_stop is called.
737 */
738 get_task_struct(task);
739
740 task = kthread_run(xenvif_dealloc_kthread, queue,
741 "%s-dealloc", queue->name);
742 if (IS_ERR(task))
743 goto kthread_err;
744 queue->dealloc_task = task;
745
746 if (tx_evtchn == rx_evtchn) {
747 /* feature-split-event-channels == 0 */
748 err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
749 queue->vif->domid, tx_evtchn, xenvif_interrupt, 0,
750 queue->name, queue);
751 if (err < 0)
752 goto err;
753 queue->tx_irq = queue->rx_irq = err;
754 disable_irq(queue->tx_irq);
755 } else {
756 /* feature-split-event-channels == 1 */
757 snprintf(queue->tx_irq_name, sizeof(queue->tx_irq_name),
758 "%s-tx", queue->name);
759 err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
760 queue->vif->domid, tx_evtchn, xenvif_tx_interrupt, 0,
761 queue->tx_irq_name, queue);
762 if (err < 0)
763 goto err;
764 queue->tx_irq = err;
765 disable_irq(queue->tx_irq);
766
767 snprintf(queue->rx_irq_name, sizeof(queue->rx_irq_name),
768 "%s-rx", queue->name);
769 err = bind_interdomain_evtchn_to_irqhandler_lateeoi(
770 queue->vif->domid, rx_evtchn, xenvif_rx_interrupt, 0,
771 queue->rx_irq_name, queue);
772 if (err < 0)
773 goto err;
774 queue->rx_irq = err;
775 disable_irq(queue->rx_irq);
776 }
777
778 return 0;
779
780 kthread_err:
781 pr_warn("Could not allocate kthread for %s\n", queue->name);
782 err = PTR_ERR(task);
783 err:
784 xenvif_disconnect_queue(queue);
785 return err;
786 }
787
xenvif_carrier_off(struct xenvif *vif)788 void xenvif_carrier_off(struct xenvif *vif)
789 {
790 struct net_device *dev = vif->dev;
791
792 rtnl_lock();
793 if (test_and_clear_bit(VIF_STATUS_CONNECTED, &vif->status)) {
794 netif_carrier_off(dev); /* discard queued packets */
795 if (netif_running(dev))
796 xenvif_down(vif);
797 }
798 rtnl_unlock();
799 }
800
xenvif_disconnect_data(struct xenvif *vif)801 void xenvif_disconnect_data(struct xenvif *vif)
802 {
803 struct xenvif_queue *queue = NULL;
804 unsigned int num_queues = vif->num_queues;
805 unsigned int queue_index;
806
807 xenvif_carrier_off(vif);
808
809 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
810 queue = &vif->queues[queue_index];
811
812 xenvif_disconnect_queue(queue);
813 }
814
815 xenvif_mcast_addr_list_free(vif);
816 }
817
xenvif_disconnect_ctrl(struct xenvif *vif)818 void xenvif_disconnect_ctrl(struct xenvif *vif)
819 {
820 if (vif->ctrl_irq) {
821 xenvif_deinit_hash(vif);
822 unbind_from_irqhandler(vif->ctrl_irq, vif);
823 vif->ctrl_irq = 0;
824 }
825
826 if (vif->ctrl.sring) {
827 xenbus_unmap_ring_vfree(xenvif_to_xenbus_device(vif),
828 vif->ctrl.sring);
829 vif->ctrl.sring = NULL;
830 }
831 }
832
833 /* Reverse the relevant parts of xenvif_init_queue().
834 * Used for queue teardown from xenvif_free(), and on the
835 * error handling paths in xenbus.c:connect().
836 */
xenvif_deinit_queue(struct xenvif_queue *queue)837 void xenvif_deinit_queue(struct xenvif_queue *queue)
838 {
839 gnttab_free_pages(MAX_PENDING_REQS, queue->mmap_pages);
840 }
841
xenvif_free(struct xenvif *vif)842 void xenvif_free(struct xenvif *vif)
843 {
844 struct xenvif_queue *queues = vif->queues;
845 unsigned int num_queues = vif->num_queues;
846 unsigned int queue_index;
847
848 unregister_netdev(vif->dev);
849 free_netdev(vif->dev);
850
851 for (queue_index = 0; queue_index < num_queues; ++queue_index)
852 xenvif_deinit_queue(&queues[queue_index]);
853 vfree(queues);
854
855 module_put(THIS_MODULE);
856 }
857