Lines Matching defs:utn

111 udp_tunnel_nic_entry_queue(struct udp_tunnel_nic *utn,
116 utn->need_sync = 1;
130 udp_tunnel_nic_is_empty(struct net_device *dev, struct udp_tunnel_nic *utn)
135 for (i = 0; i < utn->n_tables; i++)
137 if (!udp_tunnel_nic_entry_is_free(&utn->entries[i][j]))
143 udp_tunnel_nic_should_replay(struct net_device *dev, struct udp_tunnel_nic *utn)
148 if (!utn->missed)
151 for (i = 0; i < utn->n_tables; i++) {
153 if (!test_bit(i, &utn->missed))
157 if (udp_tunnel_nic_entry_is_free(&utn->entries[i][j]))
169 struct udp_tunnel_nic *utn;
171 utn = dev->udp_tunnel_nic;
172 entry = &utn->entries[table][idx];
210 struct udp_tunnel_nic *utn,
217 entry = &utn->entries[table][idx];
239 struct udp_tunnel_nic *utn)
244 for (i = 0; i < utn->n_tables; i++)
246 udp_tunnel_nic_device_sync_one(dev, utn, i, j);
251 struct udp_tunnel_nic *utn)
257 for (i = 0; i < utn->n_tables; i++) {
260 if (udp_tunnel_nic_entry_is_queued(&utn->entries[i][j]))
273 entry = &utn->entries[i][j];
281 __udp_tunnel_nic_device_sync(struct net_device *dev, struct udp_tunnel_nic *utn)
283 if (!utn->need_sync)
287 udp_tunnel_nic_device_sync_by_table(dev, utn);
289 udp_tunnel_nic_device_sync_by_port(dev, utn);
291 utn->need_sync = 0;
295 utn->need_replay = udp_tunnel_nic_should_replay(dev, utn);
299 udp_tunnel_nic_device_sync(struct net_device *dev, struct udp_tunnel_nic *utn)
304 if (!utn->need_sync)
312 __udp_tunnel_nic_device_sync(dev, utn);
313 if (may_sleep || utn->need_replay) {
314 queue_work(udp_tunnel_nic_workqueue, &utn->work);
315 utn->work_pending = 1;
327 udp_tunnel_nic_is_capable(struct net_device *dev, struct udp_tunnel_nic *utn,
338 for (i = 0; i < utn->n_tables; i++)
345 udp_tunnel_nic_has_collision(struct net_device *dev, struct udp_tunnel_nic *utn,
352 for (i = 0; i < utn->n_tables; i++)
354 entry = &utn->entries[i][j];
359 __set_bit(i, &utn->missed);
367 udp_tunnel_nic_entry_adj(struct udp_tunnel_nic *utn,
370 struct udp_tunnel_nic_table_entry *entry = &utn->entries[table][idx];
401 udp_tunnel_nic_entry_queue(utn, entry, to);
405 udp_tunnel_nic_entry_try_adj(struct udp_tunnel_nic *utn,
409 struct udp_tunnel_nic_table_entry *entry = &utn->entries[table][idx];
419 udp_tunnel_nic_entry_adj(utn, table, idx, use_cnt_adj);
429 udp_tunnel_nic_try_existing(struct net_device *dev, struct udp_tunnel_nic *utn,
435 for (i = 0; i < utn->n_tables; i++) {
441 if (udp_tunnel_nic_entry_try_adj(utn, i, j, ti,
450 udp_tunnel_nic_add_existing(struct net_device *dev, struct udp_tunnel_nic *utn,
453 return udp_tunnel_nic_try_existing(dev, utn, ti, +1);
457 udp_tunnel_nic_del_existing(struct net_device *dev, struct udp_tunnel_nic *utn,
460 return udp_tunnel_nic_try_existing(dev, utn, ti, -1);
464 udp_tunnel_nic_add_new(struct net_device *dev, struct udp_tunnel_nic *utn,
470 for (i = 0; i < utn->n_tables; i++) {
478 entry = &utn->entries[i][j];
485 udp_tunnel_nic_entry_queue(utn, entry,
494 __set_bit(i, &utn->missed);
504 struct udp_tunnel_nic *utn;
506 utn = dev->udp_tunnel_nic;
507 if (!utn)
518 if (!udp_tunnel_nic_is_capable(dev, utn, ti))
523 * Rely on utn->missed to re-add this port later.
525 if (udp_tunnel_nic_has_collision(dev, utn, ti))
528 if (!udp_tunnel_nic_add_existing(dev, utn, ti))
529 udp_tunnel_nic_add_new(dev, utn, ti);
531 udp_tunnel_nic_device_sync(dev, utn);
537 struct udp_tunnel_nic *utn;
539 utn = dev->udp_tunnel_nic;
540 if (!utn)
543 if (!udp_tunnel_nic_is_capable(dev, utn, ti))
546 udp_tunnel_nic_del_existing(dev, utn, ti);
548 udp_tunnel_nic_device_sync(dev, utn);
554 struct udp_tunnel_nic *utn;
559 utn = dev->udp_tunnel_nic;
560 if (!utn)
563 utn->need_sync = false;
564 for (i = 0; i < utn->n_tables; i++)
568 entry = &utn->entries[i][j];
577 udp_tunnel_nic_entry_queue(utn, entry,
581 __udp_tunnel_nic_device_sync(dev, utn);
588 struct udp_tunnel_nic *utn;
592 utn = dev->udp_tunnel_nic;
593 if (!utn)
598 if (!udp_tunnel_nic_entry_is_present(&utn->entries[table][j]))
614 struct udp_tunnel_nic *utn;
618 utn = dev->udp_tunnel_nic;
619 if (!utn)
623 if (!udp_tunnel_nic_entry_is_present(&utn->entries[table][j]))
631 utn->entries[table][j].port) ||
633 ilog2(utn->entries[table][j].type)))
657 udp_tunnel_nic_flush(struct net_device *dev, struct udp_tunnel_nic *utn)
662 for (i = 0; i < utn->n_tables; i++)
664 int adj_cnt = -utn->entries[i][j].use_cnt;
667 udp_tunnel_nic_entry_adj(utn, i, j, adj_cnt);
670 __udp_tunnel_nic_device_sync(dev, utn);
672 for (i = 0; i < utn->n_tables; i++)
673 memset(utn->entries[i], 0, array_size(info->tables[i].n_entries,
674 sizeof(**utn->entries)));
675 WARN_ON(utn->need_sync);
676 utn->need_replay = 0;
680 udp_tunnel_nic_replay(struct net_device *dev, struct udp_tunnel_nic *utn)
689 for (i = 0; i < utn->n_tables; i++)
691 udp_tunnel_nic_entry_freeze_used(&utn->entries[i][j]);
692 utn->missed = 0;
693 utn->need_replay = 0;
702 for (i = 0; i < utn->n_tables; i++)
704 udp_tunnel_nic_entry_unfreeze(&utn->entries[i][j]);
709 struct udp_tunnel_nic *utn =
713 utn->work_pending = 0;
714 __udp_tunnel_nic_device_sync(utn->dev, utn);
716 if (utn->need_replay)
717 udp_tunnel_nic_replay(utn->dev, utn);
725 struct udp_tunnel_nic *utn;
728 utn = kzalloc(sizeof(*utn), GFP_KERNEL);
729 if (!utn)
731 utn->n_tables = n_tables;
732 INIT_WORK(&utn->work, udp_tunnel_nic_device_sync_work);
734 utn->entries = kmalloc_array(n_tables, sizeof(void *), GFP_KERNEL);
735 if (!utn->entries)
739 utn->entries[i] = kcalloc(info->tables[i].n_entries,
740 sizeof(*utn->entries[i]), GFP_KERNEL);
741 if (!utn->entries[i])
745 return utn;
749 kfree(utn->entries[i]);
750 kfree(utn->entries);
752 kfree(utn);
756 static void udp_tunnel_nic_free(struct udp_tunnel_nic *utn)
760 for (i = 0; i < utn->n_tables; i++)
761 kfree(utn->entries[i]);
762 kfree(utn->entries);
763 kfree(utn);
770 struct udp_tunnel_nic *utn;
773 BUILD_BUG_ON(sizeof(utn->missed) * BITS_PER_BYTE <
809 utn = info->shared->udp_tunnel_nic_info;
811 utn = udp_tunnel_nic_alloc(info, n_tables);
812 if (!utn) {
821 info->shared->udp_tunnel_nic_info = utn;
827 utn->dev = dev;
829 dev->udp_tunnel_nic = utn;
838 udp_tunnel_nic_unregister(struct net_device *dev, struct udp_tunnel_nic *utn)
861 utn->dev = first->dev;
871 udp_tunnel_nic_flush(dev, utn);
876 if (utn->work_pending)
879 udp_tunnel_nic_free(utn);
891 struct udp_tunnel_nic *utn;
906 utn = dev->udp_tunnel_nic;
907 if (!utn)
911 udp_tunnel_nic_unregister(dev, utn);
920 WARN_ON(!udp_tunnel_nic_is_empty(dev, utn));
925 udp_tunnel_nic_flush(dev, utn);