Lines Matching defs:utn
111 udp_tunnel_nic_entry_queue(struct udp_tunnel_nic *utn,
116 utn->need_sync = 1;
130 udp_tunnel_nic_is_empty(struct net_device *dev, struct udp_tunnel_nic *utn)
135 for (i = 0; i < utn->n_tables; i++)
137 if (!udp_tunnel_nic_entry_is_free(&utn->entries[i][j]))
143 udp_tunnel_nic_should_replay(struct net_device *dev, struct udp_tunnel_nic *utn)
148 if (!utn->missed)
151 for (i = 0; i < utn->n_tables; i++) {
153 if (!test_bit(i, &utn->missed))
157 if (udp_tunnel_nic_entry_is_free(&utn->entries[i][j]))
169 struct udp_tunnel_nic *utn;
171 utn = dev->udp_tunnel_nic;
172 entry = &utn->entries[table][idx];
210 struct udp_tunnel_nic *utn,
217 entry = &utn->entries[table][idx];
239 struct udp_tunnel_nic *utn)
244 for (i = 0; i < utn->n_tables; i++)
246 udp_tunnel_nic_device_sync_one(dev, utn, i, j);
251 struct udp_tunnel_nic *utn)
257 for (i = 0; i < utn->n_tables; i++) {
260 if (udp_tunnel_nic_entry_is_queued(&utn->entries[i][j]))
273 entry = &utn->entries[i][j];
281 __udp_tunnel_nic_device_sync(struct net_device *dev, struct udp_tunnel_nic *utn)
283 if (!utn->need_sync)
287 udp_tunnel_nic_device_sync_by_table(dev, utn);
289 udp_tunnel_nic_device_sync_by_port(dev, utn);
291 utn->need_sync = 0;
295 utn->need_replay = udp_tunnel_nic_should_replay(dev, utn);
299 udp_tunnel_nic_device_sync(struct net_device *dev, struct udp_tunnel_nic *utn)
304 if (!utn->need_sync)
312 __udp_tunnel_nic_device_sync(dev, utn);
313 if (may_sleep || utn->need_replay) {
314 queue_work(udp_tunnel_nic_workqueue, &utn->work);
315 utn->work_pending = 1;
327 udp_tunnel_nic_is_capable(struct net_device *dev, struct udp_tunnel_nic *utn,
338 for (i = 0; i < utn->n_tables; i++)
345 udp_tunnel_nic_has_collision(struct net_device *dev, struct udp_tunnel_nic *utn,
352 for (i = 0; i < utn->n_tables; i++)
354 entry = &utn->entries[i][j];
359 __set_bit(i, &utn->missed);
367 udp_tunnel_nic_entry_adj(struct udp_tunnel_nic *utn,
370 struct udp_tunnel_nic_table_entry *entry = &utn->entries[table][idx];
401 udp_tunnel_nic_entry_queue(utn, entry, to);
405 udp_tunnel_nic_entry_try_adj(struct udp_tunnel_nic *utn,
409 struct udp_tunnel_nic_table_entry *entry = &utn->entries[table][idx];
419 udp_tunnel_nic_entry_adj(utn, table, idx, use_cnt_adj);
429 udp_tunnel_nic_try_existing(struct net_device *dev, struct udp_tunnel_nic *utn,
435 for (i = 0; i < utn->n_tables; i++) {
441 if (udp_tunnel_nic_entry_try_adj(utn, i, j, ti,
450 udp_tunnel_nic_add_existing(struct net_device *dev, struct udp_tunnel_nic *utn,
453 return udp_tunnel_nic_try_existing(dev, utn, ti, +1);
457 udp_tunnel_nic_del_existing(struct net_device *dev, struct udp_tunnel_nic *utn,
460 return udp_tunnel_nic_try_existing(dev, utn, ti, -1);
464 udp_tunnel_nic_add_new(struct net_device *dev, struct udp_tunnel_nic *utn,
470 for (i = 0; i < utn->n_tables; i++) {
478 entry = &utn->entries[i][j];
485 udp_tunnel_nic_entry_queue(utn, entry,
494 __set_bit(i, &utn->missed);
504 struct udp_tunnel_nic *utn;
506 utn = dev->udp_tunnel_nic;
507 if (!utn)
518 if (!udp_tunnel_nic_is_capable(dev, utn, ti))
523 * Rely on utn->missed to re-add this port later.
525 if (udp_tunnel_nic_has_collision(dev, utn, ti))
528 if (!udp_tunnel_nic_add_existing(dev, utn, ti))
529 udp_tunnel_nic_add_new(dev, utn, ti);
531 udp_tunnel_nic_device_sync(dev, utn);
537 struct udp_tunnel_nic *utn;
539 utn = dev->udp_tunnel_nic;
540 if (!utn)
543 if (!udp_tunnel_nic_is_capable(dev, utn, ti))
546 udp_tunnel_nic_del_existing(dev, utn, ti);
548 udp_tunnel_nic_device_sync(dev, utn);
554 struct udp_tunnel_nic *utn;
559 utn = dev->udp_tunnel_nic;
560 if (!utn)
563 utn->need_sync = false;
564 for (i = 0; i < utn->n_tables; i++)
568 entry = &utn->entries[i][j];
577 udp_tunnel_nic_entry_queue(utn, entry,
581 __udp_tunnel_nic_device_sync(dev, utn);
588 struct udp_tunnel_nic *utn;
592 utn = dev->udp_tunnel_nic;
593 if (!utn)
598 if (!udp_tunnel_nic_entry_is_present(&utn->entries[table][j]))
614 struct udp_tunnel_nic *utn;
618 utn = dev->udp_tunnel_nic;
619 if (!utn)
623 if (!udp_tunnel_nic_entry_is_present(&utn->entries[table][j]))
629 utn->entries[table][j].port) ||
631 ilog2(utn->entries[table][j].type)))
655 udp_tunnel_nic_flush(struct net_device *dev, struct udp_tunnel_nic *utn)
660 for (i = 0; i < utn->n_tables; i++)
662 int adj_cnt = -utn->entries[i][j].use_cnt;
665 udp_tunnel_nic_entry_adj(utn, i, j, adj_cnt);
668 __udp_tunnel_nic_device_sync(dev, utn);
670 for (i = 0; i < utn->n_tables; i++)
671 memset(utn->entries[i], 0, array_size(info->tables[i].n_entries,
672 sizeof(**utn->entries)));
673 WARN_ON(utn->need_sync);
674 utn->need_replay = 0;
678 udp_tunnel_nic_replay(struct net_device *dev, struct udp_tunnel_nic *utn)
687 for (i = 0; i < utn->n_tables; i++)
689 udp_tunnel_nic_entry_freeze_used(&utn->entries[i][j]);
690 utn->missed = 0;
691 utn->need_replay = 0;
700 for (i = 0; i < utn->n_tables; i++)
702 udp_tunnel_nic_entry_unfreeze(&utn->entries[i][j]);
707 struct udp_tunnel_nic *utn =
711 utn->work_pending = 0;
712 __udp_tunnel_nic_device_sync(utn->dev, utn);
714 if (utn->need_replay)
715 udp_tunnel_nic_replay(utn->dev, utn);
723 struct udp_tunnel_nic *utn;
726 utn = kzalloc(sizeof(*utn), GFP_KERNEL);
727 if (!utn)
729 utn->n_tables = n_tables;
730 INIT_WORK(&utn->work, udp_tunnel_nic_device_sync_work);
732 utn->entries = kmalloc_array(n_tables, sizeof(void *), GFP_KERNEL);
733 if (!utn->entries)
737 utn->entries[i] = kcalloc(info->tables[i].n_entries,
738 sizeof(*utn->entries[i]), GFP_KERNEL);
739 if (!utn->entries[i])
743 return utn;
747 kfree(utn->entries[i]);
748 kfree(utn->entries);
750 kfree(utn);
754 static void udp_tunnel_nic_free(struct udp_tunnel_nic *utn)
758 for (i = 0; i < utn->n_tables; i++)
759 kfree(utn->entries[i]);
760 kfree(utn->entries);
761 kfree(utn);
768 struct udp_tunnel_nic *utn;
771 BUILD_BUG_ON(sizeof(utn->missed) * BITS_PER_BYTE <
807 utn = info->shared->udp_tunnel_nic_info;
809 utn = udp_tunnel_nic_alloc(info, n_tables);
810 if (!utn) {
819 info->shared->udp_tunnel_nic_info = utn;
825 utn->dev = dev;
827 dev->udp_tunnel_nic = utn;
836 udp_tunnel_nic_unregister(struct net_device *dev, struct udp_tunnel_nic *utn)
859 utn->dev = first->dev;
869 udp_tunnel_nic_flush(dev, utn);
874 if (utn->work_pending)
877 udp_tunnel_nic_free(utn);
889 struct udp_tunnel_nic *utn;
904 utn = dev->udp_tunnel_nic;
905 if (!utn)
909 udp_tunnel_nic_unregister(dev, utn);
918 WARN_ON(!udp_tunnel_nic_is_empty(dev, utn));
923 udp_tunnel_nic_flush(dev, utn);