Lines Matching refs:tbl
58 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
66 Neighbour hash table buckets are protected with rwlock tbl->lock.
124 atomic_dec(&n->tbl->gc_entries);
132 write_lock_bh(&n->tbl->lock);
147 atomic_dec(&n->tbl->gc_entries);
150 list_add_tail(&n->gc_list, &n->tbl->gc_list);
151 atomic_inc(&n->tbl->gc_entries);
156 write_unlock_bh(&n->tbl->lock);
182 struct neigh_table *tbl)
191 lockdep_is_held(&tbl->lock));
202 bool neigh_remove_one(struct neighbour *ndel, struct neigh_table *tbl)
210 nht = rcu_dereference_protected(tbl->nht,
211 lockdep_is_held(&tbl->lock));
212 hash_val = tbl->hash(pkey, ndel->dev, nht->hash_rnd);
217 lockdep_is_held(&tbl->lock)))) {
219 return neigh_del(n, np, tbl);
225 static int neigh_forced_gc(struct neigh_table *tbl)
227 int max_clean = atomic_read(&tbl->gc_entries) -
228 READ_ONCE(tbl->gc_thresh2);
235 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
237 write_lock_bh(&tbl->lock);
239 list_for_each_entry_safe(n, tmp, &tbl->gc_list, gc_list) {
246 (tbl->is_multicast &&
247 tbl->is_multicast(n->primary_key)) ||
252 if (remove && neigh_remove_one(n, tbl))
264 WRITE_ONCE(tbl->last_flush, jiffies);
266 write_unlock_bh(&tbl->lock);
326 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev,
332 nht = rcu_dereference_protected(tbl->nht,
333 lockdep_is_held(&tbl->lock));
340 lockdep_is_held(&tbl->lock))) != NULL) {
351 lockdep_is_held(&tbl->lock)));
380 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
382 write_lock_bh(&tbl->lock);
383 neigh_flush_dev(tbl, dev, false);
384 write_unlock_bh(&tbl->lock);
388 static int __neigh_ifdown(struct neigh_table *tbl, struct net_device *dev,
391 write_lock_bh(&tbl->lock);
392 neigh_flush_dev(tbl, dev, skip_perm);
393 pneigh_ifdown_and_unlock(tbl, dev);
394 pneigh_queue_purge(&tbl->proxy_queue, dev ? dev_net(dev) : NULL);
395 if (skb_queue_empty_lockless(&tbl->proxy_queue))
396 del_timer_sync(&tbl->proxy_timer);
400 int neigh_carrier_down(struct neigh_table *tbl, struct net_device *dev)
402 __neigh_ifdown(tbl, dev, true);
407 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
409 __neigh_ifdown(tbl, dev, false);
414 static struct neighbour *neigh_alloc(struct neigh_table *tbl,
425 entries = atomic_inc_return(&tbl->gc_entries) - 1;
426 gc_thresh3 = READ_ONCE(tbl->gc_thresh3);
428 (entries >= READ_ONCE(tbl->gc_thresh2) &&
429 time_after(now, READ_ONCE(tbl->last_flush) + 5 * HZ))) {
430 if (!neigh_forced_gc(tbl) && entries >= gc_thresh3) {
432 tbl->id);
433 NEIGH_CACHE_STAT_INC(tbl, table_fulls);
439 n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
451 n->parms = neigh_parms_clone(&tbl->parms);
454 NEIGH_CACHE_STAT_INC(tbl, allocs);
455 n->tbl = tbl;
460 atomic_inc(&tbl->entries);
466 atomic_dec(&tbl->gc_entries);
521 static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
527 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
529 old_nht = rcu_dereference_protected(tbl->nht,
530 lockdep_is_held(&tbl->lock));
539 lockdep_is_held(&tbl->lock));
542 hash = tbl->hash(n->primary_key, n->dev,
547 lockdep_is_held(&tbl->lock));
552 lockdep_is_held(&tbl->lock)));
557 rcu_assign_pointer(tbl->nht, new_nht);
562 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
567 NEIGH_CACHE_STAT_INC(tbl, lookups);
570 n = __neigh_lookup_noref(tbl, pkey, dev);
574 NEIGH_CACHE_STAT_INC(tbl, hits);
583 ___neigh_create(struct neigh_table *tbl, const void *pkey,
587 u32 hash_val, key_len = tbl->key_len;
592 n = neigh_alloc(tbl, dev, flags, exempt_from_gc);
593 trace_neigh_create(tbl, dev, pkey, n, exempt_from_gc);
604 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
626 write_lock_bh(&tbl->lock);
627 nht = rcu_dereference_protected(tbl->nht,
628 lockdep_is_held(&tbl->lock));
630 if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
631 nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
633 hash_val = tbl->hash(n->primary_key, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
641 lockdep_is_held(&tbl->lock));
644 lockdep_is_held(&tbl->lock))) {
655 list_add_tail(&n->gc_list, &n->tbl->gc_list);
661 lockdep_is_held(&tbl->lock)));
663 write_unlock_bh(&tbl->lock);
669 write_unlock_bh(&tbl->lock);
672 atomic_dec(&tbl->gc_entries);
677 struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
680 return ___neigh_create(tbl, pkey, dev, 0, false, want_ref);
710 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
713 unsigned int key_len = tbl->key_len;
716 return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
721 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
726 unsigned int key_len = tbl->key_len;
729 read_lock_bh(&tbl->lock);
730 n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
732 read_unlock_bh(&tbl->lock);
749 if (tbl->pconstructor && tbl->pconstructor(n)) {
757 write_lock_bh(&tbl->lock);
758 n->next = tbl->phash_buckets[hash_val];
759 tbl->phash_buckets[hash_val] = n;
760 write_unlock_bh(&tbl->lock);
767 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
771 unsigned int key_len = tbl->key_len;
774 write_lock_bh(&tbl->lock);
775 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
780 write_unlock_bh(&tbl->lock);
781 if (tbl->pdestructor)
782 tbl->pdestructor(n);
789 write_unlock_bh(&tbl->lock);
793 static int pneigh_ifdown_and_unlock(struct neigh_table *tbl,
800 np = &tbl->phash_buckets[h];
811 write_unlock_bh(&tbl->lock);
815 if (tbl->pdestructor)
816 tbl->pdestructor(n);
840 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
864 atomic_dec(&neigh->tbl->entries);
895 struct neigh_table *tbl = container_of(work, struct neigh_table, gc_work.work);
901 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
903 write_lock_bh(&tbl->lock);
904 nht = rcu_dereference_protected(tbl->nht,
905 lockdep_is_held(&tbl->lock));
911 if (time_after(jiffies, tbl->last_rand + 300 * HZ)) {
914 WRITE_ONCE(tbl->last_rand, jiffies);
915 list_for_each_entry(p, &tbl->parms_list, list)
920 if (atomic_read(&tbl->entries) < READ_ONCE(tbl->gc_thresh1))
927 lockdep_is_held(&tbl->lock))) != NULL) {
949 lockdep_is_held(&tbl->lock)));
964 write_unlock_bh(&tbl->lock);
966 write_lock_bh(&tbl->lock);
967 nht = rcu_dereference_protected(tbl->nht,
968 lockdep_is_held(&tbl->lock));
975 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
976 NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME) >> 1);
977 write_unlock_bh(&tbl->lock);
994 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
1173 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1456 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1460 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1473 __be16 prot = n->tbl->protocol;
1555 struct neigh_table *tbl = from_timer(tbl, t, proxy_timer);
1560 spin_lock(&tbl->proxy_queue.lock);
1562 skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1568 __skb_unlink(skb, &tbl->proxy_queue);
1569 if (tbl->proxy_redo && netif_running(dev)) {
1571 tbl->proxy_redo(skb);
1581 del_timer(&tbl->proxy_timer);
1583 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1584 spin_unlock(&tbl->proxy_queue.lock);
1587 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1595 if (tbl->proxy_queue.qlen > NEIGH_VAR(p, PROXY_QLEN)) {
1603 spin_lock(&tbl->proxy_queue.lock);
1604 if (del_timer(&tbl->proxy_timer)) {
1605 if (time_before(tbl->proxy_timer.expires, sched_next))
1606 sched_next = tbl->proxy_timer.expires;
1610 __skb_queue_tail(&tbl->proxy_queue, skb);
1611 mod_timer(&tbl->proxy_timer, sched_next);
1612 spin_unlock(&tbl->proxy_queue.lock);
1616 static inline struct neigh_parms *lookup_neigh_parms(struct neigh_table *tbl,
1621 list_for_each_entry(p, &tbl->parms_list, list) {
1631 struct neigh_table *tbl)
1637 p = kmemdup(&tbl->parms, sizeof(*p), GFP_KERNEL);
1639 p->tbl = tbl;
1654 write_lock_bh(&tbl->lock);
1655 list_add(&p->list, &tbl->parms.list);
1656 write_unlock_bh(&tbl->lock);
1672 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1674 if (!parms || parms == &tbl->parms)
1676 write_lock_bh(&tbl->lock);
1679 write_unlock_bh(&tbl->lock);
1695 void neigh_table_init(int index, struct neigh_table *tbl)
1700 INIT_LIST_HEAD(&tbl->parms_list);
1701 INIT_LIST_HEAD(&tbl->gc_list);
1702 list_add(&tbl->parms.list, &tbl->parms_list);
1703 write_pnet(&tbl->parms.net, &init_net);
1704 refcount_set(&tbl->parms.refcnt, 1);
1705 tbl->parms.reachable_time =
1706 neigh_rand_reach_time(NEIGH_VAR(&tbl->parms, BASE_REACHABLE_TIME));
1708 tbl->stats = alloc_percpu(struct neigh_statistics);
1709 if (!tbl->stats)
1713 if (!proc_create_seq_data(tbl->id, 0, init_net.proc_net_stat,
1714 &neigh_stat_seq_ops, tbl))
1718 RCU_INIT_POINTER(tbl->nht, neigh_hash_alloc(3));
1721 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1723 if (!tbl->nht || !tbl->phash_buckets)
1726 if (!tbl->entry_size)
1727 tbl->entry_size = ALIGN(offsetof(struct neighbour, primary_key) +
1728 tbl->key_len, NEIGH_PRIV_ALIGN);
1730 WARN_ON(tbl->entry_size % NEIGH_PRIV_ALIGN);
1732 rwlock_init(&tbl->lock);
1733 INIT_DEFERRABLE_WORK(&tbl->gc_work, neigh_periodic_work);
1734 queue_delayed_work(system_power_efficient_wq, &tbl->gc_work,
1735 tbl->parms.reachable_time);
1736 timer_setup(&tbl->proxy_timer, neigh_proxy_process, 0);
1737 skb_queue_head_init_class(&tbl->proxy_queue,
1740 tbl->last_flush = now;
1741 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1743 neigh_tables[index] = tbl;
1747 int neigh_table_clear(int index, struct neigh_table *tbl)
1751 cancel_delayed_work_sync(&tbl->gc_work);
1752 del_timer_sync(&tbl->proxy_timer);
1753 pneigh_queue_purge(&tbl->proxy_queue, NULL);
1754 neigh_ifdown(tbl, NULL);
1755 if (atomic_read(&tbl->entries))
1758 call_rcu(&rcu_dereference_protected(tbl->nht, 1)->rcu,
1760 tbl->nht = NULL;
1762 kfree(tbl->phash_buckets);
1763 tbl->phash_buckets = NULL;
1765 remove_proc_entry(tbl->id, init_net.proc_net_stat);
1767 free_percpu(tbl->stats);
1768 tbl->stats = NULL;
1776 struct neigh_table *tbl = NULL;
1780 tbl = neigh_tables[NEIGH_ARP_TABLE];
1783 tbl = neigh_tables[NEIGH_ND_TABLE];
1787 tbl = neigh_tables[NEIGH_NND_TABLE];
1792 return tbl;
1817 struct neigh_table *tbl;
1841 tbl = neigh_find_table(ndm->ndm_family);
1842 if (tbl == NULL)
1845 if (nla_len(dst_attr) < (int)tbl->key_len) {
1851 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1858 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1867 write_lock_bh(&tbl->lock);
1869 neigh_remove_one(neigh, tbl);
1870 write_unlock_bh(&tbl->lock);
1884 struct neigh_table *tbl;
1917 tbl = neigh_find_table(ndm->ndm_family);
1918 if (tbl == NULL)
1921 if (nla_len(tb[NDA_DST]) < (int)tbl->key_len) {
1936 pn = pneigh_lookup(tbl, net, dst, dev, 1);
1951 if (tbl->allow_add && !tbl->allow_add(dev, extack)) {
1956 neigh = neigh_lookup(tbl, dst, dev);
1967 neigh = ___neigh_create(tbl, dst, dev,
2054 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
2066 read_lock_bh(&tbl->lock);
2067 ndtmsg->ndtm_family = tbl->family;
2071 if (nla_put_string(skb, NDTA_NAME, tbl->id) ||
2072 nla_put_msecs(skb, NDTA_GC_INTERVAL, READ_ONCE(tbl->gc_interval),
2074 nla_put_u32(skb, NDTA_THRESH1, READ_ONCE(tbl->gc_thresh1)) ||
2075 nla_put_u32(skb, NDTA_THRESH2, READ_ONCE(tbl->gc_thresh2)) ||
2076 nla_put_u32(skb, NDTA_THRESH3, READ_ONCE(tbl->gc_thresh3)))
2080 long flush_delta = now - READ_ONCE(tbl->last_flush);
2081 long rand_delta = now - READ_ONCE(tbl->last_rand);
2084 .ndtc_key_len = tbl->key_len,
2085 .ndtc_entry_size = tbl->entry_size,
2086 .ndtc_entries = atomic_read(&tbl->entries),
2089 .ndtc_proxy_qlen = READ_ONCE(tbl->proxy_queue.qlen),
2093 nht = rcu_dereference_bh(tbl->nht);
2111 st = per_cpu_ptr(tbl->stats, cpu);
2130 BUG_ON(tbl->parms.dev);
2131 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
2134 read_unlock_bh(&tbl->lock);
2139 read_unlock_bh(&tbl->lock);
2145 struct neigh_table *tbl,
2159 read_lock_bh(&tbl->lock);
2160 ndtmsg->ndtm_family = tbl->family;
2164 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
2168 read_unlock_bh(&tbl->lock);
2172 read_unlock_bh(&tbl->lock);
2207 struct neigh_table *tbl;
2226 tbl = neigh_tables[tidx];
2227 if (!tbl)
2229 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
2231 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0) {
2241 * We acquire tbl->lock to be nice to the periodic timers and
2244 write_lock_bh(&tbl->lock);
2260 p = lookup_neigh_parms(tbl, net, ifindex);
2346 WRITE_ONCE(tbl->gc_thresh1, nla_get_u32(tb[NDTA_THRESH1]));
2349 WRITE_ONCE(tbl->gc_thresh2, nla_get_u32(tb[NDTA_THRESH2]));
2352 WRITE_ONCE(tbl->gc_thresh3, nla_get_u32(tb[NDTA_THRESH3]));
2355 WRITE_ONCE(tbl->gc_interval, nla_get_msecs(tb[NDTA_GC_INTERVAL]));
2360 write_unlock_bh(&tbl->lock);
2396 struct neigh_table *tbl;
2410 tbl = neigh_tables[tidx];
2411 if (!tbl)
2414 if (tidx < tbl_skip || (family && tbl->family != family))
2417 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).portid,
2423 p = list_next_entry(&tbl->parms, list);
2424 list_for_each_entry_from(p, &tbl->parms_list, list) {
2431 if (neightbl_fill_param_info(skb, tbl, p,
2470 if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2508 struct neigh_table *tbl)
2518 ndm->ndm_family = tbl->family;
2526 if (nla_put(skb, NDA_DST, tbl->key_len, pn->key))
2573 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2588 nht = rcu_dereference_bh(tbl->nht);
2620 static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2633 read_lock_bh(&tbl->lock);
2638 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
2646 RTM_NEWNEIGH, flags, tbl) < 0) {
2647 read_unlock_bh(&tbl->lock);
2656 read_unlock_bh(&tbl->lock);
2730 struct neigh_table *tbl;
2751 tbl = neigh_tables[t];
2753 if (!tbl)
2755 if (t < s_t || (family && tbl->family != family))
2761 err = pneigh_dump_table(tbl, skb, cb, &filter);
2763 err = neigh_dump_table(tbl, skb, cb, &filter);
2773 struct neigh_table **tbl,
2805 *tbl = neigh_find_table(ndm->ndm_family);
2806 if (*tbl == NULL) {
2817 if (nla_len(tb[i]) != (int)(*tbl)->key_len) {
2871 u32 pid, u32 seq, struct neigh_table *tbl)
2880 err = pneigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0, tbl);
2896 struct neigh_table *tbl = NULL;
2903 err = neigh_valid_get_req(nlh, &tbl, &dst, &dev_idx, &ndm_flags,
2924 pn = pneigh_lookup(tbl, net, dst, dev, 0);
2930 nlh->nlmsg_seq, tbl);
2938 neigh = neigh_lookup(tbl, dst, dev);
2952 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2958 nht = rcu_dereference_bh(tbl->nht);
2960 read_lock(&tbl->lock); /* avoid resizes */
2969 read_unlock(&tbl->lock);
2974 /* The tbl->lock must be held as a writer and BH disabled. */
2975 void __neigh_for_each_release(struct neigh_table *tbl,
2981 nht = rcu_dereference_protected(tbl->nht,
2982 lockdep_is_held(&tbl->lock));
2989 lockdep_is_held(&tbl->lock))) != NULL) {
2997 lockdep_is_held(&tbl->lock)));
3014 struct neigh_table *tbl;
3017 tbl = neigh_tables[index];
3018 if (!tbl)
3026 neigh = __neigh_lookup_noref(tbl, addr, dev);
3029 neigh = __neigh_create(tbl, addr, dev, false);
3161 struct neigh_table *tbl = state->tbl;
3167 pn = tbl->phash_buckets[bucket];
3184 struct neigh_table *tbl = state->tbl;
3193 pn = tbl->phash_buckets[state->bucket];
3234 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
3235 __acquires(tbl->lock)
3240 state->tbl = tbl;
3245 state->nht = rcu_dereference_bh(tbl->nht);
3246 read_lock(&tbl->lock);
3280 __releases(tbl->lock)
3284 struct neigh_table *tbl = state->tbl;
3286 read_unlock(&tbl->lock);
3295 struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
3305 return per_cpu_ptr(tbl->stats, cpu);
3312 struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
3319 return per_cpu_ptr(tbl->stats, cpu);
3332 struct neigh_table *tbl = PDE_DATA(file_inode(seq->file));
3342 atomic_read(&tbl->entries),
3668 struct neigh_table *tbl = p->tbl;
3670 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3671 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3672 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3673 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;