1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Based on net/ipv6/ip6_fib.c
4 * Authors:
5 * Pedro Roque <roque@di.fc.ul.pt>
6 *
7 * Changes:
8 * Yuji SEKIYA @USAGI: Support default route on router node;
9 * remove ip6_null_entry from the top of
10 * routing table.
11 * Ville Nuorvala: Fixed routing subtrees.
12 *
13 * Linux NewIP INET implementation
14 * Forwarding Information Database
15 */
16 #define pr_fmt(fmt) KBUILD_MODNAME ": [%s:%d] " fmt, __func__, __LINE__
17
18 #include <linux/errno.h>
19 #include <linux/types.h>
20 #include <linux/net.h>
21 #include <linux/route.h>
22 #include <linux/netdevice.h>
23 #include <linux/init.h>
24 #include <linux/list.h>
25
26 #include <net/nip.h>
27 #include <net/ndisc.h>
28 #include <net/addrconf.h>
29
30 #include <net/nip_fib.h>
31 #include <net/nip_route.h>
32 #include "tcp_nip_parameter.h"
33
34 static struct kmem_cache *nip_fib_node_kmem __read_mostly;
35
nip_fib_get_table(struct net *net, u32 id)36 struct nip_fib_table *nip_fib_get_table(struct net *net, u32 id)
37 {
38 if (id == NIP_RT_TABLE_MAIN)
39 return net->newip.nip_fib_main_tbl;
40 else if (id == NIP_RT_TABLE_LOCAL)
41 return net->newip.nip_fib_local_tbl;
42 else
43 return NULL;
44 }
45
nip_node_alloc(void)46 static struct nip_fib_node *nip_node_alloc(void)
47 {
48 struct nip_fib_node *fn;
49
50 fn = kmem_cache_zalloc(nip_fib_node_kmem, GFP_ATOMIC);
51
52 return fn;
53 }
54
nip_rt_free_pcpu(struct nip_rt_info *non_pcpu_rt)55 void nip_rt_free_pcpu(struct nip_rt_info *non_pcpu_rt)
56 {
57 int cpu;
58
59 if (!non_pcpu_rt->rt_pcpu)
60 return;
61
62 for_each_possible_cpu(cpu) {
63 struct nip_rt_info **ppcpu_rt;
64 struct nip_rt_info *pcpu_rt;
65
66 ppcpu_rt = per_cpu_ptr(non_pcpu_rt->rt_pcpu, cpu);
67 pcpu_rt = *ppcpu_rt;
68 if (pcpu_rt) {
69 dst_dev_put(&pcpu_rt->dst);
70 dst_release(&pcpu_rt->dst);
71 *ppcpu_rt = NULL;
72 }
73 }
74
75 free_percpu(non_pcpu_rt->rt_pcpu);
76 non_pcpu_rt->rt_pcpu = NULL;
77 }
78
ninet_route_hash(const struct nip_addr *addr)79 static u32 ninet_route_hash(const struct nip_addr *addr)
80 {
81 return hash_32(nip_addr_hash(addr), NIN_ROUTE_HSIZE_SHIFT);
82 }
83
nip_fib_locate(struct hlist_head *nip_tb_head, const struct nip_addr *daddr)84 struct nip_fib_node *nip_fib_locate(struct hlist_head *nip_tb_head,
85 const struct nip_addr *daddr)
86 {
87 struct nip_fib_node *fib_node;
88 struct hlist_head *h;
89 unsigned int hash;
90
91 /* hash calc ensures that the hash index is valid without memory overruns */
92 hash = ninet_route_hash(daddr);
93 h = &nip_tb_head[hash];
94
95 hlist_for_each_entry_rcu(fib_node, h, fib_hlist) {
96 if (nip_addr_eq(&fib_node->nip_route_info->rt_dst, daddr))
97 return fib_node;
98 }
99
100 /* find default route */
101 /* hash calc ensures that the hash index is valid without memory overruns */
102 hash = ninet_route_hash(&nip_any_addr);
103 h = &nip_tb_head[hash];
104
105 hlist_for_each_entry_rcu(fib_node, h, fib_hlist) {
106 if (nip_addr_eq(&fib_node->nip_route_info->rt_dst, &nip_any_addr))
107 return fib_node;
108 }
109
110 return NULL;
111 }
112
is_nip_route_exist(const struct hlist_head *h, const struct nip_rt_info *rt, u8 table_id)113 static bool is_nip_route_exist(const struct hlist_head *h, const struct nip_rt_info *rt,
114 u8 table_id)
115 {
116 struct nip_fib_node *fib_node;
117
118 hlist_for_each_entry(fib_node, h, fib_hlist) {
119 if (table_id == NIP_RT_TABLE_MAIN) {
120 if (nip_addr_eq(&fib_node->nip_route_info->rt_dst,
121 &rt->rt_dst))
122 return true;
123 } else if (table_id == NIP_RT_TABLE_LOCAL) {
124 if (nip_addr_and_ifindex_eq
125 (&fib_node->nip_route_info->rt_dst, &rt->rt_dst,
126 fib_node->nip_route_info->rt_idev->dev->ifindex,
127 rt->rt_idev->dev->ifindex))
128 return true;
129 }
130 }
131 return false;
132 }
133
134 /* nip_tb_lock must be taken to avoid racing */
nip_fib_add(struct nip_fib_table *table, struct nip_rt_info *rt)135 int nip_fib_add(struct nip_fib_table *table, struct nip_rt_info *rt)
136 {
137 struct nip_fib_node *new_node;
138 int err = 0;
139 struct hlist_head *h;
140 unsigned int hash;
141 char dst[NIP_ADDR_BIT_LEN_MAX] = {0};
142 char gateway[NIP_ADDR_BIT_LEN_MAX] = {0};
143
144 /* hash calc ensures that the hash index is valid without memory overruns */
145 hash = ninet_route_hash(&rt->rt_dst);
146 h = &table->nip_tb_head[hash];
147
148 nip_addr_to_str(&rt->rt_dst, dst, NIP_ADDR_BIT_LEN_MAX);
149 nip_addr_to_str(&rt->gateway, gateway, NIP_ADDR_BIT_LEN_MAX);
150 nip_dbg("%s ifindex=%u (addr=%s, gateway=%s, rt_idev->refcnt=%u)",
151 rt->rt_idev->dev->name, rt->rt_idev->dev->ifindex,
152 dst, gateway, refcount_read(&rt->rt_idev->refcnt));
153
154 if (is_nip_route_exist(h, rt, table->nip_tb_id)) {
155 err = -EEXIST;
156 nip_dbg("File exists");
157 goto fail;
158 }
159
160 new_node = nip_node_alloc();
161 if (!new_node) {
162 nip_dbg("fail to alloc mem");
163 err = -ENOMEM;
164 goto fail;
165 }
166 new_node->nip_route_info = rt;
167 rcu_assign_pointer(rt->rt_node, new_node);
168 atomic_inc(&rt->rt_ref);
169 hlist_add_tail_rcu(&new_node->fib_hlist, h);
170
171 out:
172 return err;
173
174 fail:
175 dst_release_immediate(&rt->dst);
176 goto out;
177 }
178
nip_fib_destroy_rcu(struct rcu_head *head)179 static void nip_fib_destroy_rcu(struct rcu_head *head)
180 {
181 struct nip_fib_node *fn = container_of(head, struct nip_fib_node, rcu);
182
183 nip_rt_release(fn->nip_route_info);
184 kfree(fn);
185 }
186
187 /* nip_tb_lock must be taken to avoid racing */
nip_fib_del(struct nip_rt_info *rt, struct nl_info *info)188 int nip_fib_del(struct nip_rt_info *rt, struct nl_info *info)
189 {
190 struct nip_fib_node *fn;
191 struct net *net = info->nl_net;
192
193 fn = rcu_dereference_protected(rt->rt_node,
194 lockdep_is_held(&rt->rt_table->nip_tb_lock));
195 if (!fn || rt == net->newip.nip_null_entry)
196 return -ENOENT;
197
198 hlist_del_init_rcu(&fn->fib_hlist);
199
200 /* route_info directed by the fib_node can be released
201 * only after the fib_node is released
202 */
203 RCU_INIT_POINTER(rt->rt_node, NULL);
204 call_rcu(&fn->rcu, nip_fib_destroy_rcu);
205
206 return 0;
207 }
208
nip_fib_free_table(struct nip_fib_table *table)209 static void nip_fib_free_table(struct nip_fib_table *table)
210 {
211 kfree(table);
212 }
213
214 /* caller must hold nip_tb_lock */
nip_fib_clean_hash(struct net *net, struct hlist_head *nip_tb_head, int (*func)(struct nip_rt_info *, void *arg), void *arg)215 static void nip_fib_clean_hash(struct net *net, struct hlist_head *nip_tb_head,
216 int (*func)(struct nip_rt_info *, void *arg),
217 void *arg)
218 {
219 int i;
220 int err;
221 struct nip_fib_node *fn;
222 struct hlist_node *tmp;
223 struct nl_info info = {
224 .nl_net = net,
225 };
226
227 for (i = 0; i < NIN_ROUTE_HSIZE; i++) {
228 struct hlist_head *h = &nip_tb_head[i];
229
230 hlist_for_each_entry_safe(fn, tmp, h, fib_hlist) {
231 if (func(fn->nip_route_info, arg) < 0) {
232 char dst[NIP_ADDR_BIT_LEN_MAX] = {0};
233 char gateway[NIP_ADDR_BIT_LEN_MAX] = {0};
234
235 nip_addr_to_str(&fn->nip_route_info->rt_dst, dst,
236 NIP_ADDR_BIT_LEN_MAX);
237 nip_addr_to_str(&fn->nip_route_info->gateway, gateway,
238 NIP_ADDR_BIT_LEN_MAX);
239
240 nip_dbg("try to del rt_info, rt_dst=%s, gateway=%s", dst, gateway);
241 err = nip_fib_del(fn->nip_route_info, &info);
242 if (err)
243 nip_dbg("nip_fib_del failed");
244 }
245 }
246 }
247 }
248
nip_fib_clean_all(struct net *net, int (*func)(struct nip_rt_info *, void *arg), void *arg)249 void nip_fib_clean_all(struct net *net,
250 int (*func)(struct nip_rt_info *, void *arg), void *arg)
251 {
252 struct nip_fib_table *main_tbl = net->newip.nip_fib_main_tbl;
253 struct nip_fib_table *local_tbl = net->newip.nip_fib_local_tbl;
254
255 spin_lock_bh(&main_tbl->nip_tb_lock);
256 nip_fib_clean_hash(net, main_tbl->nip_tb_head, func, arg);
257 spin_unlock_bh(&main_tbl->nip_tb_lock);
258
259 spin_lock_bh(&local_tbl->nip_tb_lock);
260 nip_fib_clean_hash(net, local_tbl->nip_tb_head, func, arg);
261 spin_unlock_bh(&local_tbl->nip_tb_lock);
262 }
263
nip_fib_link_table(struct nip_fib_table *tb)264 static void nip_fib_link_table(struct nip_fib_table *tb)
265 {
266 /* You need to initialize multiple routing tables */
267 spin_lock_init(&tb->nip_tb_lock);
268 }
269
nip_fib_tables_init(struct net *net)270 static void __net_init nip_fib_tables_init(struct net *net)
271 {
272 nip_fib_link_table(net->newip.nip_fib_main_tbl);
273 nip_fib_link_table(net->newip.nip_fib_local_tbl);
274 }
275
nip_fib_net_init(struct net *net)276 static int __net_init nip_fib_net_init(struct net *net)
277 {
278 net->newip.nip_fib_main_tbl =
279 kzalloc(sizeof(*net->newip.nip_fib_main_tbl), GFP_KERNEL);
280 if (!net->newip.nip_fib_main_tbl)
281 goto out_fib_table_hash;
282
283 net->newip.nip_fib_main_tbl->nip_tb_id = NIP_RT_TABLE_MAIN;
284 net->newip.nip_fib_main_tbl->flags = 1;
285
286 net->newip.nip_fib_local_tbl =
287 kzalloc(sizeof(*net->newip.nip_fib_local_tbl), GFP_KERNEL);
288 if (!net->newip.nip_fib_local_tbl)
289 goto out_main_tbl;
290
291 net->newip.nip_fib_local_tbl->nip_tb_id = NIP_RT_TABLE_LOCAL;
292
293 nip_fib_tables_init(net);
294
295 return 0;
296
297 out_main_tbl:
298 kfree(net->newip.nip_fib_main_tbl);
299 out_fib_table_hash:
300 return -ENOMEM;
301 }
302
nip_fib_net_exit(struct net *net)303 static void __net_exit nip_fib_net_exit(struct net *net)
304 {
305 nip_fib_free_table(net->newip.nip_fib_main_tbl);
306 nip_fib_free_table(net->newip.nip_fib_local_tbl);
307 }
308
309 static struct pernet_operations nip_fib_net_ops = {
310 .init = nip_fib_net_init,
311 .exit = nip_fib_net_exit,
312 };
313
nip_fib_init(void)314 int __init nip_fib_init(void)
315 {
316 int ret = -ENOMEM;
317
318 nip_fib_node_kmem = kmem_cache_create("nip_fib_nodes",
319 sizeof(struct nip_fib_node),
320 0, SLAB_HWCACHE_ALIGN, NULL);
321 if (!nip_fib_node_kmem)
322 goto out;
323
324 nip_dbg("nip_fib_node size is %lu",
325 sizeof(struct nip_fib_node) + sizeof(struct nip_rt_info));
326
327 ret = register_pernet_subsys(&nip_fib_net_ops);
328 if (ret)
329 goto out_kmem_cache_create;
330
331 out:
332 return ret;
333
334 out_kmem_cache_create:
335 kmem_cache_destroy(nip_fib_node_kmem);
336 goto out;
337 }
338
339 /* When adding the __exit tag to a function, it is important to
340 * ensure that the function is only called during the exit phase
341 * to avoid unnecessary warnings and errors.
342 */
nip_fib_gc_cleanup(void)343 void nip_fib_gc_cleanup(void)
344 {
345 unregister_pernet_subsys(&nip_fib_net_ops);
346 kmem_cache_destroy(nip_fib_node_kmem);
347 }
348
349