Lines Matching refs:neigh
42 #include <trace/events/neigh.h>
56 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid);
77 - with rwlock neigh->lock
81 neigh->lock mainly serializes ll address data and its validity state.
86 Again, nothing clever shall be made under neigh->lock,
92 static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
98 static void neigh_cleanup_and_release(struct neighbour *neigh)
100 trace_neigh_cleanup_and_release(neigh, 0);
101 __neigh_notify(neigh, RTM_DELNEIGH, 0, 0);
102 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
103 neigh_release(neigh);
179 static void neigh_update_flags(struct neighbour *neigh, u32 flags, int *notify,
182 u32 ndm_flags, old_flags = neigh->flags;
192 neigh->flags |= NTF_EXT_LEARNED;
194 neigh->flags &= ~NTF_EXT_LEARNED;
200 neigh->flags |= NTF_MANAGED;
202 neigh->flags &= ~NTF_MANAGED;
215 struct neighbour *neigh;
217 neigh = rcu_dereference_protected(n->next,
219 rcu_assign_pointer(*np, neigh);
426 neigh_dbg(2, "neigh %p is stray\n", n);
721 neigh_dbg(2, "neigh %p is created\n", n);
889 void neigh_destroy(struct neighbour *neigh)
891 struct net_device *dev = neigh->dev;
893 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
895 if (!neigh->dead) {
896 pr_warn("Destroying alive neighbour %p\n", neigh);
901 if (neigh_del_timer(neigh))
904 write_lock_bh(&neigh->lock);
905 __skb_queue_purge(&neigh->arp_queue);
906 write_unlock_bh(&neigh->lock);
907 neigh->arp_queue_len_bytes = 0;
910 dev->netdev_ops->ndo_neigh_destroy(dev, neigh);
912 netdev_put(dev, &neigh->dev_tracker);
913 neigh_parms_put(neigh->parms);
915 neigh_dbg(2, "neigh %p is destroyed\n", neigh);
917 atomic_dec(&neigh->tbl->entries);
918 kfree_rcu(neigh, rcu);
925 Called with write_locked neigh.
927 static void neigh_suspect(struct neighbour *neigh)
929 neigh_dbg(2, "neigh %p is suspected\n", neigh);
931 WRITE_ONCE(neigh->output, neigh->ops->output);
937 Called with write_locked neigh.
939 static void neigh_connect(struct neighbour *neigh)
941 neigh_dbg(2, "neigh %p is connected\n", neigh);
943 WRITE_ONCE(neigh->output, neigh->ops->connected_output);
1041 static void neigh_invalidate(struct neighbour *neigh)
1042 __releases(neigh->lock)
1043 __acquires(neigh->lock)
1047 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
1048 neigh_dbg(2, "neigh %p is failed\n", neigh);
1049 neigh->updated = jiffies;
1056 while (neigh->nud_state == NUD_FAILED &&
1057 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1058 write_unlock(&neigh->lock);
1059 neigh->ops->error_report(neigh, skb);
1060 write_lock(&neigh->lock);
1062 __skb_queue_purge(&neigh->arp_queue);
1063 neigh->arp_queue_len_bytes = 0;
1066 static void neigh_probe(struct neighbour *neigh)
1067 __releases(neigh->lock)
1069 struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
1073 write_unlock(&neigh->lock);
1074 if (neigh->ops->solicit)
1075 neigh->ops->solicit(neigh, skb);
1076 atomic_inc(&neigh->probes);
1085 struct neighbour *neigh = from_timer(neigh, t, timer);
1089 write_lock(&neigh->lock);
1091 state = neigh->nud_state;
1100 neigh->confirmed + neigh->parms->reachable_time)) {
1101 neigh_dbg(2, "neigh %p is still alive\n", neigh);
1102 next = neigh->confirmed + neigh->parms->reachable_time;
1104 neigh->used +
1105 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1106 neigh_dbg(2, "neigh %p is delayed\n", neigh);
1107 WRITE_ONCE(neigh->nud_state, NUD_DELAY);
1108 neigh->updated = jiffies;
1109 neigh_suspect(neigh);
1110 next = now + NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME);
1112 neigh_dbg(2, "neigh %p is suspected\n", neigh);
1113 WRITE_ONCE(neigh->nud_state, NUD_STALE);
1114 neigh->updated = jiffies;
1115 neigh_suspect(neigh);
1120 neigh->confirmed +
1121 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME))) {
1122 neigh_dbg(2, "neigh %p is now reachable\n", neigh);
1123 WRITE_ONCE(neigh->nud_state, NUD_REACHABLE);
1124 neigh->updated = jiffies;
1125 neigh_connect(neigh);
1127 next = neigh->confirmed + neigh->parms->reachable_time;
1129 neigh_dbg(2, "neigh %p is probed\n", neigh);
1130 WRITE_ONCE(neigh->nud_state, NUD_PROBE);
1131 neigh->updated = jiffies;
1132 atomic_set(&neigh->probes, 0);
1134 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1139 next = now + max(NEIGH_VAR(neigh->parms, RETRANS_TIME), HZ/100);
1142 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
1143 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
1144 WRITE_ONCE(neigh->nud_state, NUD_FAILED);
1146 neigh_invalidate(neigh);
1150 if (neigh->nud_state & NUD_IN_TIMER) {
1153 if (!mod_timer(&neigh->timer, next))
1154 neigh_hold(neigh);
1156 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
1157 neigh_probe(neigh);
1160 write_unlock(&neigh->lock);
1164 neigh_update_notify(neigh, 0);
1166 trace_neigh_timer_handler(neigh, 0);
1168 neigh_release(neigh);
1171 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb,
1177 write_lock_bh(&neigh->lock);
1180 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
1182 if (neigh->dead)
1185 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
1186 if (NEIGH_VAR(neigh->parms, MCAST_PROBES) +
1187 NEIGH_VAR(neigh->parms, APP_PROBES)) {
1190 atomic_set(&neigh->probes,
1191 NEIGH_VAR(neigh->parms, UCAST_PROBES));
1192 neigh_del_timer(neigh);
1193 WRITE_ONCE(neigh->nud_state, NUD_INCOMPLETE);
1194 neigh->updated = now;
1199 next = now + max(NEIGH_VAR(neigh->parms,
1203 neigh_add_timer(neigh, next);
1205 WRITE_ONCE(neigh->nud_state, NUD_FAILED);
1206 neigh->updated = jiffies;
1207 write_unlock_bh(&neigh->lock);
1212 } else if (neigh->nud_state & NUD_STALE) {
1213 neigh_dbg(2, "neigh %p is delayed\n", neigh);
1214 neigh_del_timer(neigh);
1215 WRITE_ONCE(neigh->nud_state, NUD_DELAY);
1216 neigh->updated = jiffies;
1217 neigh_add_timer(neigh, jiffies +
1218 NEIGH_VAR(neigh->parms, DELAY_PROBE_TIME));
1221 if (neigh->nud_state == NUD_INCOMPLETE) {
1223 while (neigh->arp_queue_len_bytes + skb->truesize >
1224 NEIGH_VAR(neigh->parms, QUEUE_LEN_BYTES)) {
1227 buff = __skb_dequeue(&neigh->arp_queue);
1230 neigh->arp_queue_len_bytes -= buff->truesize;
1232 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
1235 __skb_queue_tail(&neigh->arp_queue, skb);
1236 neigh->arp_queue_len_bytes += skb->truesize;
1242 neigh_probe(neigh);
1244 write_unlock(&neigh->lock);
1246 trace_neigh_event_send_done(neigh, rc);
1250 if (neigh->nud_state & NUD_STALE)
1252 write_unlock_bh(&neigh->lock);
1254 trace_neigh_event_send_dead(neigh, 1);
1259 static void neigh_update_hhs(struct neighbour *neigh)
1265 if (neigh->dev->header_ops)
1266 update = neigh->dev->header_ops->cache_update;
1269 hh = &neigh->hh;
1272 update(hh, neigh->dev, neigh->ha);
1297 static int __neigh_update(struct neighbour *neigh, const u8 *lladdr,
1307 trace_neigh_update(neigh, lladdr, new, flags, nlmsg_pid);
1309 write_lock_bh(&neigh->lock);
1311 dev = neigh->dev;
1312 old = neigh->nud_state;
1315 if (neigh->dead) {
1324 neigh_update_flags(neigh, flags, ¬ify, &gc_update, &managed_update);
1327 WRITE_ONCE(neigh->nud_state, new);
1333 neigh_del_timer(neigh);
1335 neigh_suspect(neigh);
1336 WRITE_ONCE(neigh->nud_state, new);
1341 neigh_invalidate(neigh);
1350 lladdr = neigh->ha;
1358 !memcmp(lladdr, neigh->ha, dev->addr_len))
1359 lladdr = neigh->ha;
1369 lladdr = neigh->ha;
1376 neigh->confirmed = jiffies;
1384 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1388 lladdr = neigh->ha;
1393 if (lladdr == neigh->ha && new == NUD_STALE &&
1403 if (new != old || lladdr != neigh->ha)
1404 neigh->updated = jiffies;
1407 neigh_del_timer(neigh);
1409 atomic_set(&neigh->probes, 0);
1411 neigh_add_timer(neigh, (jiffies +
1413 neigh->parms->reachable_time :
1415 WRITE_ONCE(neigh->nud_state, new);
1419 if (lladdr != neigh->ha) {
1420 write_seqlock(&neigh->ha_lock);
1421 memcpy(&neigh->ha, lladdr, dev->addr_len);
1422 write_sequnlock(&neigh->ha_lock);
1423 neigh_update_hhs(neigh);
1425 neigh->confirmed = jiffies -
1426 (NEIGH_VAR(neigh->parms, BASE_REACHABLE_TIME) << 1);
1432 neigh_connect(neigh);
1434 neigh_suspect(neigh);
1440 while (neigh->nud_state & NUD_VALID &&
1441 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1443 struct neighbour *n2, *n1 = neigh;
1444 write_unlock_bh(&neigh->lock);
1448 /* Why not just use 'neigh' as-is? The problem is that
1450 * using alternative, different, neigh objects to output
1452 * here is re-lookup the top-level neigh in the path so
1466 write_lock_bh(&neigh->lock);
1468 __skb_queue_purge(&neigh->arp_queue);
1469 neigh->arp_queue_len_bytes = 0;
1473 neigh_update_is_router(neigh, flags, ¬ify);
1474 write_unlock_bh(&neigh->lock);
1476 neigh_update_gc_list(neigh);
1478 neigh_update_managed_list(neigh);
1480 neigh_update_notify(neigh, nlmsg_pid);
1481 trace_neigh_update_done(neigh, err);
1485 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
1488 return __neigh_update(neigh, lladdr, new, flags, nlmsg_pid, NULL);
1492 /* Update the neigh to listen temporarily for probe responses, even if it is
1493 * in a NUD_FAILED state. The caller has to hold neigh->lock for writing.
1495 void __neigh_set_probe_once(struct neighbour *neigh)
1497 if (neigh->dead)
1499 neigh->updated = jiffies;
1500 if (!(neigh->nud_state & NUD_FAILED))
1502 WRITE_ONCE(neigh->nud_state, NUD_INCOMPLETE);
1503 atomic_set(&neigh->probes, neigh_max_probes(neigh));
1504 neigh_add_timer(neigh,
1505 jiffies + max(NEIGH_VAR(neigh->parms, RETRANS_TIME),
1514 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1516 if (neigh)
1517 neigh_update(neigh, lladdr, NUD_STALE,
1519 return neigh;
1543 int neigh_resolve_output(struct neighbour *neigh, struct sk_buff *skb)
1547 if (!neigh_event_send(neigh, skb)) {
1549 struct net_device *dev = neigh->dev;
1552 if (dev->header_ops->cache && !READ_ONCE(neigh->hh.hh_len))
1553 neigh_hh_init(neigh);
1557 seq = read_seqbegin(&neigh->ha_lock);
1559 neigh->ha, NULL, skb->len);
1560 } while (read_seqretry(&neigh->ha_lock, seq));
1578 int neigh_connected_output(struct neighbour *neigh, struct sk_buff *skb)
1580 struct net_device *dev = neigh->dev;
1586 seq = read_seqbegin(&neigh->ha_lock);
1588 neigh->ha, NULL, skb->len);
1589 } while (read_seqretry(&neigh->ha_lock, seq));
1601 int neigh_direct_output(struct neighbour *neigh, struct sk_buff *skb)
1611 struct neighbour *neigh;
1614 list_for_each_entry(neigh, &tbl->managed_list, managed_list)
1615 neigh_event_send_probe(neigh, NULL, false);
1901 struct neighbour *neigh;
1941 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1942 if (neigh == NULL) {
1947 err = __neigh_update(neigh, NULL, NUD_FAILED,
1951 neigh_release(neigh);
1952 neigh_remove_one(neigh, tbl);
1969 struct neighbour *neigh;
1992 BUILD_BUG_ON(sizeof(neigh->flags) * BITS_PER_BYTE <
2053 neigh = neigh_lookup(tbl, dst, dev);
2054 if (neigh == NULL) {
2069 neigh = ___neigh_create(tbl, dst, dev,
2073 if (IS_ERR(neigh)) {
2074 err = PTR_ERR(neigh);
2080 neigh_release(neigh);
2090 neigh->protocol = protocol;
2100 err = __neigh_update(neigh, lladdr, ndm->ndm_state, flags,
2103 neigh_event_send(neigh, NULL);
2106 neigh_release(neigh);
2562 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2575 neigh_flags_ext = neigh->flags >> NTF_EXT_SHIFT;
2576 neigh_flags = neigh->flags & NTF_OLD_MASK;
2579 ndm->ndm_family = neigh->ops->family;
2583 ndm->ndm_type = neigh->type;
2584 ndm->ndm_ifindex = neigh->dev->ifindex;
2586 if (nla_put(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key))
2589 read_lock_bh(&neigh->lock);
2590 ndm->ndm_state = neigh->nud_state;
2591 if (neigh->nud_state & NUD_VALID) {
2594 neigh_ha_snapshot(haddr, neigh, neigh->dev);
2595 if (nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, haddr) < 0) {
2596 read_unlock_bh(&neigh->lock);
2601 ci.ndm_used = jiffies_to_clock_t(now - neigh->used);
2602 ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2603 ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated);
2604 ci.ndm_refcnt = refcount_read(&neigh->refcnt) - 1;
2605 read_unlock_bh(&neigh->lock);
2607 if (nla_put_u32(skb, NDA_PROBES, atomic_read(&neigh->probes)) ||
2611 if (neigh->protocol && nla_put_u8(skb, NDA_PROTOCOL, neigh->protocol))
2664 static void neigh_update_notify(struct neighbour *neigh, u32 nlmsg_pid)
2666 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2667 __neigh_notify(neigh, RTM_NEWNEIGH, 0, nlmsg_pid);
2974 static int neigh_get_reply(struct net *net, struct neighbour *neigh,
2984 err = neigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0);
3003 static int pneigh_get_reply(struct net *net, struct pneigh_entry *neigh,
3013 err = pneigh_fill_info(skb, neigh, pid, seq, RTM_NEWNEIGH, 0, tbl);
3030 struct neighbour *neigh;
3071 neigh = neigh_lookup(tbl, dst, dev);
3072 if (!neigh) {
3077 err = neigh_get_reply(net, neigh, NETLINK_CB(in_skb).portid,
3080 neigh_release(neigh);
3148 struct neighbour *neigh;
3157 neigh = __ipv4_neigh_lookup_noref(dev, key);
3159 neigh = __neigh_lookup_noref(tbl, addr, dev);
3161 if (!neigh)
3162 neigh = __neigh_create(tbl, addr, dev, false);
3163 err = PTR_ERR(neigh);
3164 if (IS_ERR(neigh)) {
3168 err = READ_ONCE(neigh->output)(neigh, skb);
3792 char neigh_path[ sizeof("net//neigh/") + IFNAMSIZ + IFNAMSIZ ];
3857 snprintf(neigh_path, sizeof(neigh_path), "net/%s/neigh/%s",