xref: /kernel/linux/linux-5.10/net/can/gw.c (revision 8c2ecf20)
1// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2/* gw.c - CAN frame Gateway/Router/Bridge with netlink interface
3 *
4 * Copyright (c) 2019 Volkswagen Group Electronic Research
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of Volkswagen nor the names of its contributors
16 *    may be used to endorse or promote products derived from this software
17 *    without specific prior written permission.
18 *
19 * Alternatively, provided that this notice is retained in full, this
20 * software may be distributed under the terms of the GNU General
21 * Public License ("GPL") version 2, in which case the provisions of the
22 * GPL apply INSTEAD OF those given above.
23 *
24 * The provided data structures and external interfaces from this code
25 * are not restricted to be used by modules with a GPL compatible license.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
38 * DAMAGE.
39 *
40 */
41
42#include <linux/module.h>
43#include <linux/init.h>
44#include <linux/types.h>
45#include <linux/kernel.h>
46#include <linux/list.h>
47#include <linux/spinlock.h>
48#include <linux/rcupdate.h>
49#include <linux/rculist.h>
50#include <linux/net.h>
51#include <linux/netdevice.h>
52#include <linux/if_arp.h>
53#include <linux/skbuff.h>
54#include <linux/can.h>
55#include <linux/can/core.h>
56#include <linux/can/skb.h>
57#include <linux/can/gw.h>
58#include <net/rtnetlink.h>
59#include <net/net_namespace.h>
60#include <net/sock.h>
61
62#define CAN_GW_NAME "can-gw"
63
64MODULE_DESCRIPTION("PF_CAN netlink gateway");
65MODULE_LICENSE("Dual BSD/GPL");
66MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
67MODULE_ALIAS(CAN_GW_NAME);
68
69#define CGW_MIN_HOPS 1
70#define CGW_MAX_HOPS 6
71#define CGW_DEFAULT_HOPS 1
72
73static unsigned int max_hops __read_mostly = CGW_DEFAULT_HOPS;
74module_param(max_hops, uint, 0444);
75MODULE_PARM_DESC(max_hops,
76		 "maximum " CAN_GW_NAME " routing hops for CAN frames "
77		 "(valid values: " __stringify(CGW_MIN_HOPS) "-"
78		 __stringify(CGW_MAX_HOPS) " hops, "
79		 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
80
81static struct notifier_block notifier;
82static struct kmem_cache *cgw_cache __read_mostly;
83
84/* structure that contains the (on-the-fly) CAN frame modifications */
85struct cf_mod {
86	struct {
87		struct canfd_frame and;
88		struct canfd_frame or;
89		struct canfd_frame xor;
90		struct canfd_frame set;
91	} modframe;
92	struct {
93		u8 and;
94		u8 or;
95		u8 xor;
96		u8 set;
97	} modtype;
98	void (*modfunc[MAX_MODFUNCTIONS])(struct canfd_frame *cf,
99					  struct cf_mod *mod);
100
101	/* CAN frame checksum calculation after CAN frame modifications */
102	struct {
103		struct cgw_csum_xor xor;
104		struct cgw_csum_crc8 crc8;
105	} csum;
106	struct {
107		void (*xor)(struct canfd_frame *cf,
108			    struct cgw_csum_xor *xor);
109		void (*crc8)(struct canfd_frame *cf,
110			     struct cgw_csum_crc8 *crc8);
111	} csumfunc;
112	u32 uid;
113};
114
115/* So far we just support CAN -> CAN routing and frame modifications.
116 *
117 * The internal can_can_gw structure contains data and attributes for
118 * a CAN -> CAN gateway job.
119 */
120struct can_can_gw {
121	struct can_filter filter;
122	int src_idx;
123	int dst_idx;
124};
125
126/* list entry for CAN gateways jobs */
127struct cgw_job {
128	struct hlist_node list;
129	struct rcu_head rcu;
130	u32 handled_frames;
131	u32 dropped_frames;
132	u32 deleted_frames;
133	struct cf_mod mod;
134	union {
135		/* CAN frame data source */
136		struct net_device *dev;
137	} src;
138	union {
139		/* CAN frame data destination */
140		struct net_device *dev;
141	} dst;
142	union {
143		struct can_can_gw ccgw;
144		/* tbc */
145	};
146	u8 gwtype;
147	u8 limit_hops;
148	u16 flags;
149};
150
151/* modification functions that are invoked in the hot path in can_can_gw_rcv */
152
153#define MODFUNC(func, op) static void func(struct canfd_frame *cf, \
154					   struct cf_mod *mod) { op ; }
155
156MODFUNC(mod_and_id, cf->can_id &= mod->modframe.and.can_id)
157MODFUNC(mod_and_len, cf->len &= mod->modframe.and.len)
158MODFUNC(mod_and_flags, cf->flags &= mod->modframe.and.flags)
159MODFUNC(mod_and_data, *(u64 *)cf->data &= *(u64 *)mod->modframe.and.data)
160MODFUNC(mod_or_id, cf->can_id |= mod->modframe.or.can_id)
161MODFUNC(mod_or_len, cf->len |= mod->modframe.or.len)
162MODFUNC(mod_or_flags, cf->flags |= mod->modframe.or.flags)
163MODFUNC(mod_or_data, *(u64 *)cf->data |= *(u64 *)mod->modframe.or.data)
164MODFUNC(mod_xor_id, cf->can_id ^= mod->modframe.xor.can_id)
165MODFUNC(mod_xor_len, cf->len ^= mod->modframe.xor.len)
166MODFUNC(mod_xor_flags, cf->flags ^= mod->modframe.xor.flags)
167MODFUNC(mod_xor_data, *(u64 *)cf->data ^= *(u64 *)mod->modframe.xor.data)
168MODFUNC(mod_set_id, cf->can_id = mod->modframe.set.can_id)
169MODFUNC(mod_set_len, cf->len = mod->modframe.set.len)
170MODFUNC(mod_set_flags, cf->flags = mod->modframe.set.flags)
171MODFUNC(mod_set_data, *(u64 *)cf->data = *(u64 *)mod->modframe.set.data)
172
173static void mod_and_fddata(struct canfd_frame *cf, struct cf_mod *mod)
174{
175	int i;
176
177	for (i = 0; i < CANFD_MAX_DLEN; i += 8)
178		*(u64 *)(cf->data + i) &= *(u64 *)(mod->modframe.and.data + i);
179}
180
181static void mod_or_fddata(struct canfd_frame *cf, struct cf_mod *mod)
182{
183	int i;
184
185	for (i = 0; i < CANFD_MAX_DLEN; i += 8)
186		*(u64 *)(cf->data + i) |= *(u64 *)(mod->modframe.or.data + i);
187}
188
189static void mod_xor_fddata(struct canfd_frame *cf, struct cf_mod *mod)
190{
191	int i;
192
193	for (i = 0; i < CANFD_MAX_DLEN; i += 8)
194		*(u64 *)(cf->data + i) ^= *(u64 *)(mod->modframe.xor.data + i);
195}
196
197static void mod_set_fddata(struct canfd_frame *cf, struct cf_mod *mod)
198{
199	memcpy(cf->data, mod->modframe.set.data, CANFD_MAX_DLEN);
200}
201
202static void canframecpy(struct canfd_frame *dst, struct can_frame *src)
203{
204	/* Copy the struct members separately to ensure that no uninitialized
205	 * data are copied in the 3 bytes hole of the struct. This is needed
206	 * to make easy compares of the data in the struct cf_mod.
207	 */
208
209	dst->can_id = src->can_id;
210	dst->len = src->can_dlc;
211	*(u64 *)dst->data = *(u64 *)src->data;
212}
213
214static void canfdframecpy(struct canfd_frame *dst, struct canfd_frame *src)
215{
216	/* Copy the struct members separately to ensure that no uninitialized
217	 * data are copied in the 2 bytes hole of the struct. This is needed
218	 * to make easy compares of the data in the struct cf_mod.
219	 */
220
221	dst->can_id = src->can_id;
222	dst->flags = src->flags;
223	dst->len = src->len;
224	memcpy(dst->data, src->data, CANFD_MAX_DLEN);
225}
226
227static int cgw_chk_csum_parms(s8 fr, s8 to, s8 re, struct rtcanmsg *r)
228{
229	s8 dlen = CAN_MAX_DLEN;
230
231	if (r->flags & CGW_FLAGS_CAN_FD)
232		dlen = CANFD_MAX_DLEN;
233
234	/* absolute dlc values 0 .. 7 => 0 .. 7, e.g. data [0]
235	 * relative to received dlc -1 .. -8 :
236	 * e.g. for received dlc = 8
237	 * -1 => index = 7 (data[7])
238	 * -3 => index = 5 (data[5])
239	 * -8 => index = 0 (data[0])
240	 */
241
242	if (fr >= -dlen && fr < dlen &&
243	    to >= -dlen && to < dlen &&
244	    re >= -dlen && re < dlen)
245		return 0;
246	else
247		return -EINVAL;
248}
249
250static inline int calc_idx(int idx, int rx_len)
251{
252	if (idx < 0)
253		return rx_len + idx;
254	else
255		return idx;
256}
257
258static void cgw_csum_xor_rel(struct canfd_frame *cf, struct cgw_csum_xor *xor)
259{
260	int from = calc_idx(xor->from_idx, cf->len);
261	int to = calc_idx(xor->to_idx, cf->len);
262	int res = calc_idx(xor->result_idx, cf->len);
263	u8 val = xor->init_xor_val;
264	int i;
265
266	if (from < 0 || to < 0 || res < 0)
267		return;
268
269	if (from <= to) {
270		for (i = from; i <= to; i++)
271			val ^= cf->data[i];
272	} else {
273		for (i = from; i >= to; i--)
274			val ^= cf->data[i];
275	}
276
277	cf->data[res] = val;
278}
279
280static void cgw_csum_xor_pos(struct canfd_frame *cf, struct cgw_csum_xor *xor)
281{
282	u8 val = xor->init_xor_val;
283	int i;
284
285	for (i = xor->from_idx; i <= xor->to_idx; i++)
286		val ^= cf->data[i];
287
288	cf->data[xor->result_idx] = val;
289}
290
291static void cgw_csum_xor_neg(struct canfd_frame *cf, struct cgw_csum_xor *xor)
292{
293	u8 val = xor->init_xor_val;
294	int i;
295
296	for (i = xor->from_idx; i >= xor->to_idx; i--)
297		val ^= cf->data[i];
298
299	cf->data[xor->result_idx] = val;
300}
301
302static void cgw_csum_crc8_rel(struct canfd_frame *cf,
303			      struct cgw_csum_crc8 *crc8)
304{
305	int from = calc_idx(crc8->from_idx, cf->len);
306	int to = calc_idx(crc8->to_idx, cf->len);
307	int res = calc_idx(crc8->result_idx, cf->len);
308	u8 crc = crc8->init_crc_val;
309	int i;
310
311	if (from < 0 || to < 0 || res < 0)
312		return;
313
314	if (from <= to) {
315		for (i = crc8->from_idx; i <= crc8->to_idx; i++)
316			crc = crc8->crctab[crc ^ cf->data[i]];
317	} else {
318		for (i = crc8->from_idx; i >= crc8->to_idx; i--)
319			crc = crc8->crctab[crc ^ cf->data[i]];
320	}
321
322	switch (crc8->profile) {
323	case CGW_CRC8PRF_1U8:
324		crc = crc8->crctab[crc ^ crc8->profile_data[0]];
325		break;
326
327	case  CGW_CRC8PRF_16U8:
328		crc = crc8->crctab[crc ^ crc8->profile_data[cf->data[1] & 0xF]];
329		break;
330
331	case CGW_CRC8PRF_SFFID_XOR:
332		crc = crc8->crctab[crc ^ (cf->can_id & 0xFF) ^
333				   (cf->can_id >> 8 & 0xFF)];
334		break;
335	}
336
337	cf->data[crc8->result_idx] = crc ^ crc8->final_xor_val;
338}
339
340static void cgw_csum_crc8_pos(struct canfd_frame *cf,
341			      struct cgw_csum_crc8 *crc8)
342{
343	u8 crc = crc8->init_crc_val;
344	int i;
345
346	for (i = crc8->from_idx; i <= crc8->to_idx; i++)
347		crc = crc8->crctab[crc ^ cf->data[i]];
348
349	switch (crc8->profile) {
350	case CGW_CRC8PRF_1U8:
351		crc = crc8->crctab[crc ^ crc8->profile_data[0]];
352		break;
353
354	case  CGW_CRC8PRF_16U8:
355		crc = crc8->crctab[crc ^ crc8->profile_data[cf->data[1] & 0xF]];
356		break;
357
358	case CGW_CRC8PRF_SFFID_XOR:
359		crc = crc8->crctab[crc ^ (cf->can_id & 0xFF) ^
360				   (cf->can_id >> 8 & 0xFF)];
361		break;
362	}
363
364	cf->data[crc8->result_idx] = crc ^ crc8->final_xor_val;
365}
366
367static void cgw_csum_crc8_neg(struct canfd_frame *cf,
368			      struct cgw_csum_crc8 *crc8)
369{
370	u8 crc = crc8->init_crc_val;
371	int i;
372
373	for (i = crc8->from_idx; i >= crc8->to_idx; i--)
374		crc = crc8->crctab[crc ^ cf->data[i]];
375
376	switch (crc8->profile) {
377	case CGW_CRC8PRF_1U8:
378		crc = crc8->crctab[crc ^ crc8->profile_data[0]];
379		break;
380
381	case  CGW_CRC8PRF_16U8:
382		crc = crc8->crctab[crc ^ crc8->profile_data[cf->data[1] & 0xF]];
383		break;
384
385	case CGW_CRC8PRF_SFFID_XOR:
386		crc = crc8->crctab[crc ^ (cf->can_id & 0xFF) ^
387				   (cf->can_id >> 8 & 0xFF)];
388		break;
389	}
390
391	cf->data[crc8->result_idx] = crc ^ crc8->final_xor_val;
392}
393
394/* the receive & process & send function */
395static void can_can_gw_rcv(struct sk_buff *skb, void *data)
396{
397	struct cgw_job *gwj = (struct cgw_job *)data;
398	struct canfd_frame *cf;
399	struct sk_buff *nskb;
400	int modidx = 0;
401
402	/* process strictly Classic CAN or CAN FD frames */
403	if (gwj->flags & CGW_FLAGS_CAN_FD) {
404		if (skb->len != CANFD_MTU)
405			return;
406	} else {
407		if (skb->len != CAN_MTU)
408			return;
409	}
410
411	/* Do not handle CAN frames routed more than 'max_hops' times.
412	 * In general we should never catch this delimiter which is intended
413	 * to cover a misconfiguration protection (e.g. circular CAN routes).
414	 *
415	 * The Controller Area Network controllers only accept CAN frames with
416	 * correct CRCs - which are not visible in the controller registers.
417	 * According to skbuff.h documentation the csum_start element for IP
418	 * checksums is undefined/unused when ip_summed == CHECKSUM_UNNECESSARY.
419	 * Only CAN skbs can be processed here which already have this property.
420	 */
421
422#define cgw_hops(skb) ((skb)->csum_start)
423
424	BUG_ON(skb->ip_summed != CHECKSUM_UNNECESSARY);
425
426	if (cgw_hops(skb) >= max_hops) {
427		/* indicate deleted frames due to misconfiguration */
428		gwj->deleted_frames++;
429		return;
430	}
431
432	if (!(gwj->dst.dev->flags & IFF_UP)) {
433		gwj->dropped_frames++;
434		return;
435	}
436
437	/* is sending the skb back to the incoming interface not allowed? */
438	if (!(gwj->flags & CGW_FLAGS_CAN_IIF_TX_OK) &&
439	    can_skb_prv(skb)->ifindex == gwj->dst.dev->ifindex)
440		return;
441
442	/* clone the given skb, which has not been done in can_rcv()
443	 *
444	 * When there is at least one modification function activated,
445	 * we need to copy the skb as we want to modify skb->data.
446	 */
447	if (gwj->mod.modfunc[0])
448		nskb = skb_copy(skb, GFP_ATOMIC);
449	else
450		nskb = skb_clone(skb, GFP_ATOMIC);
451
452	if (!nskb) {
453		gwj->dropped_frames++;
454		return;
455	}
456
457	/* put the incremented hop counter in the cloned skb */
458	cgw_hops(nskb) = cgw_hops(skb) + 1;
459
460	/* first processing of this CAN frame -> adjust to private hop limit */
461	if (gwj->limit_hops && cgw_hops(nskb) == 1)
462		cgw_hops(nskb) = max_hops - gwj->limit_hops + 1;
463
464	nskb->dev = gwj->dst.dev;
465
466	/* pointer to modifiable CAN frame */
467	cf = (struct canfd_frame *)nskb->data;
468
469	/* perform preprocessed modification functions if there are any */
470	while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx])
471		(*gwj->mod.modfunc[modidx++])(cf, &gwj->mod);
472
473	/* Has the CAN frame been modified? */
474	if (modidx) {
475		/* get available space for the processed CAN frame type */
476		int max_len = nskb->len - offsetof(struct canfd_frame, data);
477
478		/* dlc may have changed, make sure it fits to the CAN frame */
479		if (cf->len > max_len) {
480			/* delete frame due to misconfiguration */
481			gwj->deleted_frames++;
482			kfree_skb(nskb);
483			return;
484		}
485
486		/* check for checksum updates */
487		if (gwj->mod.csumfunc.crc8)
488			(*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8);
489
490		if (gwj->mod.csumfunc.xor)
491			(*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor);
492	}
493
494	/* clear the skb timestamp if not configured the other way */
495	if (!(gwj->flags & CGW_FLAGS_CAN_SRC_TSTAMP))
496		nskb->tstamp = 0;
497
498	/* send to netdevice */
499	if (can_send(nskb, gwj->flags & CGW_FLAGS_CAN_ECHO))
500		gwj->dropped_frames++;
501	else
502		gwj->handled_frames++;
503}
504
505static inline int cgw_register_filter(struct net *net, struct cgw_job *gwj)
506{
507	return can_rx_register(net, gwj->src.dev, gwj->ccgw.filter.can_id,
508			       gwj->ccgw.filter.can_mask, can_can_gw_rcv,
509			       gwj, "gw", NULL);
510}
511
512static inline void cgw_unregister_filter(struct net *net, struct cgw_job *gwj)
513{
514	can_rx_unregister(net, gwj->src.dev, gwj->ccgw.filter.can_id,
515			  gwj->ccgw.filter.can_mask, can_can_gw_rcv, gwj);
516}
517
518static int cgw_notifier(struct notifier_block *nb,
519			unsigned long msg, void *ptr)
520{
521	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
522	struct net *net = dev_net(dev);
523
524	if (dev->type != ARPHRD_CAN)
525		return NOTIFY_DONE;
526
527	if (msg == NETDEV_UNREGISTER) {
528		struct cgw_job *gwj = NULL;
529		struct hlist_node *nx;
530
531		ASSERT_RTNL();
532
533		hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) {
534			if (gwj->src.dev == dev || gwj->dst.dev == dev) {
535				hlist_del(&gwj->list);
536				cgw_unregister_filter(net, gwj);
537				synchronize_rcu();
538				kmem_cache_free(cgw_cache, gwj);
539			}
540		}
541	}
542
543	return NOTIFY_DONE;
544}
545
546static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj, int type,
547		       u32 pid, u32 seq, int flags)
548{
549	struct rtcanmsg *rtcan;
550	struct nlmsghdr *nlh;
551
552	nlh = nlmsg_put(skb, pid, seq, type, sizeof(*rtcan), flags);
553	if (!nlh)
554		return -EMSGSIZE;
555
556	rtcan = nlmsg_data(nlh);
557	rtcan->can_family = AF_CAN;
558	rtcan->gwtype = gwj->gwtype;
559	rtcan->flags = gwj->flags;
560
561	/* add statistics if available */
562
563	if (gwj->handled_frames) {
564		if (nla_put_u32(skb, CGW_HANDLED, gwj->handled_frames) < 0)
565			goto cancel;
566	}
567
568	if (gwj->dropped_frames) {
569		if (nla_put_u32(skb, CGW_DROPPED, gwj->dropped_frames) < 0)
570			goto cancel;
571	}
572
573	if (gwj->deleted_frames) {
574		if (nla_put_u32(skb, CGW_DELETED, gwj->deleted_frames) < 0)
575			goto cancel;
576	}
577
578	/* check non default settings of attributes */
579
580	if (gwj->limit_hops) {
581		if (nla_put_u8(skb, CGW_LIM_HOPS, gwj->limit_hops) < 0)
582			goto cancel;
583	}
584
585	if (gwj->flags & CGW_FLAGS_CAN_FD) {
586		struct cgw_fdframe_mod mb;
587
588		if (gwj->mod.modtype.and) {
589			memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf));
590			mb.modtype = gwj->mod.modtype.and;
591			if (nla_put(skb, CGW_FDMOD_AND, sizeof(mb), &mb) < 0)
592				goto cancel;
593		}
594
595		if (gwj->mod.modtype.or) {
596			memcpy(&mb.cf, &gwj->mod.modframe.or, sizeof(mb.cf));
597			mb.modtype = gwj->mod.modtype.or;
598			if (nla_put(skb, CGW_FDMOD_OR, sizeof(mb), &mb) < 0)
599				goto cancel;
600		}
601
602		if (gwj->mod.modtype.xor) {
603			memcpy(&mb.cf, &gwj->mod.modframe.xor, sizeof(mb.cf));
604			mb.modtype = gwj->mod.modtype.xor;
605			if (nla_put(skb, CGW_FDMOD_XOR, sizeof(mb), &mb) < 0)
606				goto cancel;
607		}
608
609		if (gwj->mod.modtype.set) {
610			memcpy(&mb.cf, &gwj->mod.modframe.set, sizeof(mb.cf));
611			mb.modtype = gwj->mod.modtype.set;
612			if (nla_put(skb, CGW_FDMOD_SET, sizeof(mb), &mb) < 0)
613				goto cancel;
614		}
615	} else {
616		struct cgw_frame_mod mb;
617
618		if (gwj->mod.modtype.and) {
619			memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf));
620			mb.modtype = gwj->mod.modtype.and;
621			if (nla_put(skb, CGW_MOD_AND, sizeof(mb), &mb) < 0)
622				goto cancel;
623		}
624
625		if (gwj->mod.modtype.or) {
626			memcpy(&mb.cf, &gwj->mod.modframe.or, sizeof(mb.cf));
627			mb.modtype = gwj->mod.modtype.or;
628			if (nla_put(skb, CGW_MOD_OR, sizeof(mb), &mb) < 0)
629				goto cancel;
630		}
631
632		if (gwj->mod.modtype.xor) {
633			memcpy(&mb.cf, &gwj->mod.modframe.xor, sizeof(mb.cf));
634			mb.modtype = gwj->mod.modtype.xor;
635			if (nla_put(skb, CGW_MOD_XOR, sizeof(mb), &mb) < 0)
636				goto cancel;
637		}
638
639		if (gwj->mod.modtype.set) {
640			memcpy(&mb.cf, &gwj->mod.modframe.set, sizeof(mb.cf));
641			mb.modtype = gwj->mod.modtype.set;
642			if (nla_put(skb, CGW_MOD_SET, sizeof(mb), &mb) < 0)
643				goto cancel;
644		}
645	}
646
647	if (gwj->mod.uid) {
648		if (nla_put_u32(skb, CGW_MOD_UID, gwj->mod.uid) < 0)
649			goto cancel;
650	}
651
652	if (gwj->mod.csumfunc.crc8) {
653		if (nla_put(skb, CGW_CS_CRC8, CGW_CS_CRC8_LEN,
654			    &gwj->mod.csum.crc8) < 0)
655			goto cancel;
656	}
657
658	if (gwj->mod.csumfunc.xor) {
659		if (nla_put(skb, CGW_CS_XOR, CGW_CS_XOR_LEN,
660			    &gwj->mod.csum.xor) < 0)
661			goto cancel;
662	}
663
664	if (gwj->gwtype == CGW_TYPE_CAN_CAN) {
665		if (gwj->ccgw.filter.can_id || gwj->ccgw.filter.can_mask) {
666			if (nla_put(skb, CGW_FILTER, sizeof(struct can_filter),
667				    &gwj->ccgw.filter) < 0)
668				goto cancel;
669		}
670
671		if (nla_put_u32(skb, CGW_SRC_IF, gwj->ccgw.src_idx) < 0)
672			goto cancel;
673
674		if (nla_put_u32(skb, CGW_DST_IF, gwj->ccgw.dst_idx) < 0)
675			goto cancel;
676	}
677
678	nlmsg_end(skb, nlh);
679	return 0;
680
681cancel:
682	nlmsg_cancel(skb, nlh);
683	return -EMSGSIZE;
684}
685
686/* Dump information about all CAN gateway jobs, in response to RTM_GETROUTE */
687static int cgw_dump_jobs(struct sk_buff *skb, struct netlink_callback *cb)
688{
689	struct net *net = sock_net(skb->sk);
690	struct cgw_job *gwj = NULL;
691	int idx = 0;
692	int s_idx = cb->args[0];
693
694	rcu_read_lock();
695	hlist_for_each_entry_rcu(gwj, &net->can.cgw_list, list) {
696		if (idx < s_idx)
697			goto cont;
698
699		if (cgw_put_job(skb, gwj, RTM_NEWROUTE,
700				NETLINK_CB(cb->skb).portid,
701				cb->nlh->nlmsg_seq, NLM_F_MULTI) < 0)
702			break;
703cont:
704		idx++;
705	}
706	rcu_read_unlock();
707
708	cb->args[0] = idx;
709
710	return skb->len;
711}
712
713static const struct nla_policy cgw_policy[CGW_MAX + 1] = {
714	[CGW_MOD_AND]	= { .len = sizeof(struct cgw_frame_mod) },
715	[CGW_MOD_OR]	= { .len = sizeof(struct cgw_frame_mod) },
716	[CGW_MOD_XOR]	= { .len = sizeof(struct cgw_frame_mod) },
717	[CGW_MOD_SET]	= { .len = sizeof(struct cgw_frame_mod) },
718	[CGW_CS_XOR]	= { .len = sizeof(struct cgw_csum_xor) },
719	[CGW_CS_CRC8]	= { .len = sizeof(struct cgw_csum_crc8) },
720	[CGW_SRC_IF]	= { .type = NLA_U32 },
721	[CGW_DST_IF]	= { .type = NLA_U32 },
722	[CGW_FILTER]	= { .len = sizeof(struct can_filter) },
723	[CGW_LIM_HOPS]	= { .type = NLA_U8 },
724	[CGW_MOD_UID]	= { .type = NLA_U32 },
725	[CGW_FDMOD_AND]	= { .len = sizeof(struct cgw_fdframe_mod) },
726	[CGW_FDMOD_OR]	= { .len = sizeof(struct cgw_fdframe_mod) },
727	[CGW_FDMOD_XOR]	= { .len = sizeof(struct cgw_fdframe_mod) },
728	[CGW_FDMOD_SET]	= { .len = sizeof(struct cgw_fdframe_mod) },
729};
730
731/* check for common and gwtype specific attributes */
732static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
733			  u8 gwtype, void *gwtypeattr, u8 *limhops)
734{
735	struct nlattr *tb[CGW_MAX + 1];
736	struct rtcanmsg *r = nlmsg_data(nlh);
737	int modidx = 0;
738	int err = 0;
739
740	/* initialize modification & checksum data space */
741	memset(mod, 0, sizeof(*mod));
742
743	err = nlmsg_parse_deprecated(nlh, sizeof(struct rtcanmsg), tb,
744				     CGW_MAX, cgw_policy, NULL);
745	if (err < 0)
746		return err;
747
748	if (tb[CGW_LIM_HOPS]) {
749		*limhops = nla_get_u8(tb[CGW_LIM_HOPS]);
750
751		if (*limhops < 1 || *limhops > max_hops)
752			return -EINVAL;
753	}
754
755	/* check for AND/OR/XOR/SET modifications */
756	if (r->flags & CGW_FLAGS_CAN_FD) {
757		struct cgw_fdframe_mod mb;
758
759		if (tb[CGW_FDMOD_AND]) {
760			nla_memcpy(&mb, tb[CGW_FDMOD_AND], CGW_FDMODATTR_LEN);
761
762			canfdframecpy(&mod->modframe.and, &mb.cf);
763			mod->modtype.and = mb.modtype;
764
765			if (mb.modtype & CGW_MOD_ID)
766				mod->modfunc[modidx++] = mod_and_id;
767
768			if (mb.modtype & CGW_MOD_LEN)
769				mod->modfunc[modidx++] = mod_and_len;
770
771			if (mb.modtype & CGW_MOD_FLAGS)
772				mod->modfunc[modidx++] = mod_and_flags;
773
774			if (mb.modtype & CGW_MOD_DATA)
775				mod->modfunc[modidx++] = mod_and_fddata;
776		}
777
778		if (tb[CGW_FDMOD_OR]) {
779			nla_memcpy(&mb, tb[CGW_FDMOD_OR], CGW_FDMODATTR_LEN);
780
781			canfdframecpy(&mod->modframe.or, &mb.cf);
782			mod->modtype.or = mb.modtype;
783
784			if (mb.modtype & CGW_MOD_ID)
785				mod->modfunc[modidx++] = mod_or_id;
786
787			if (mb.modtype & CGW_MOD_LEN)
788				mod->modfunc[modidx++] = mod_or_len;
789
790			if (mb.modtype & CGW_MOD_FLAGS)
791				mod->modfunc[modidx++] = mod_or_flags;
792
793			if (mb.modtype & CGW_MOD_DATA)
794				mod->modfunc[modidx++] = mod_or_fddata;
795		}
796
797		if (tb[CGW_FDMOD_XOR]) {
798			nla_memcpy(&mb, tb[CGW_FDMOD_XOR], CGW_FDMODATTR_LEN);
799
800			canfdframecpy(&mod->modframe.xor, &mb.cf);
801			mod->modtype.xor = mb.modtype;
802
803			if (mb.modtype & CGW_MOD_ID)
804				mod->modfunc[modidx++] = mod_xor_id;
805
806			if (mb.modtype & CGW_MOD_LEN)
807				mod->modfunc[modidx++] = mod_xor_len;
808
809			if (mb.modtype & CGW_MOD_FLAGS)
810				mod->modfunc[modidx++] = mod_xor_flags;
811
812			if (mb.modtype & CGW_MOD_DATA)
813				mod->modfunc[modidx++] = mod_xor_fddata;
814		}
815
816		if (tb[CGW_FDMOD_SET]) {
817			nla_memcpy(&mb, tb[CGW_FDMOD_SET], CGW_FDMODATTR_LEN);
818
819			canfdframecpy(&mod->modframe.set, &mb.cf);
820			mod->modtype.set = mb.modtype;
821
822			if (mb.modtype & CGW_MOD_ID)
823				mod->modfunc[modidx++] = mod_set_id;
824
825			if (mb.modtype & CGW_MOD_LEN)
826				mod->modfunc[modidx++] = mod_set_len;
827
828			if (mb.modtype & CGW_MOD_FLAGS)
829				mod->modfunc[modidx++] = mod_set_flags;
830
831			if (mb.modtype & CGW_MOD_DATA)
832				mod->modfunc[modidx++] = mod_set_fddata;
833		}
834	} else {
835		struct cgw_frame_mod mb;
836
837		if (tb[CGW_MOD_AND]) {
838			nla_memcpy(&mb, tb[CGW_MOD_AND], CGW_MODATTR_LEN);
839
840			canframecpy(&mod->modframe.and, &mb.cf);
841			mod->modtype.and = mb.modtype;
842
843			if (mb.modtype & CGW_MOD_ID)
844				mod->modfunc[modidx++] = mod_and_id;
845
846			if (mb.modtype & CGW_MOD_LEN)
847				mod->modfunc[modidx++] = mod_and_len;
848
849			if (mb.modtype & CGW_MOD_DATA)
850				mod->modfunc[modidx++] = mod_and_data;
851		}
852
853		if (tb[CGW_MOD_OR]) {
854			nla_memcpy(&mb, tb[CGW_MOD_OR], CGW_MODATTR_LEN);
855
856			canframecpy(&mod->modframe.or, &mb.cf);
857			mod->modtype.or = mb.modtype;
858
859			if (mb.modtype & CGW_MOD_ID)
860				mod->modfunc[modidx++] = mod_or_id;
861
862			if (mb.modtype & CGW_MOD_LEN)
863				mod->modfunc[modidx++] = mod_or_len;
864
865			if (mb.modtype & CGW_MOD_DATA)
866				mod->modfunc[modidx++] = mod_or_data;
867		}
868
869		if (tb[CGW_MOD_XOR]) {
870			nla_memcpy(&mb, tb[CGW_MOD_XOR], CGW_MODATTR_LEN);
871
872			canframecpy(&mod->modframe.xor, &mb.cf);
873			mod->modtype.xor = mb.modtype;
874
875			if (mb.modtype & CGW_MOD_ID)
876				mod->modfunc[modidx++] = mod_xor_id;
877
878			if (mb.modtype & CGW_MOD_LEN)
879				mod->modfunc[modidx++] = mod_xor_len;
880
881			if (mb.modtype & CGW_MOD_DATA)
882				mod->modfunc[modidx++] = mod_xor_data;
883		}
884
885		if (tb[CGW_MOD_SET]) {
886			nla_memcpy(&mb, tb[CGW_MOD_SET], CGW_MODATTR_LEN);
887
888			canframecpy(&mod->modframe.set, &mb.cf);
889			mod->modtype.set = mb.modtype;
890
891			if (mb.modtype & CGW_MOD_ID)
892				mod->modfunc[modidx++] = mod_set_id;
893
894			if (mb.modtype & CGW_MOD_LEN)
895				mod->modfunc[modidx++] = mod_set_len;
896
897			if (mb.modtype & CGW_MOD_DATA)
898				mod->modfunc[modidx++] = mod_set_data;
899		}
900	}
901
902	/* check for checksum operations after CAN frame modifications */
903	if (modidx) {
904		if (tb[CGW_CS_CRC8]) {
905			struct cgw_csum_crc8 *c = nla_data(tb[CGW_CS_CRC8]);
906
907			err = cgw_chk_csum_parms(c->from_idx, c->to_idx,
908						 c->result_idx, r);
909			if (err)
910				return err;
911
912			nla_memcpy(&mod->csum.crc8, tb[CGW_CS_CRC8],
913				   CGW_CS_CRC8_LEN);
914
915			/* select dedicated processing function to reduce
916			 * runtime operations in receive hot path.
917			 */
918			if (c->from_idx < 0 || c->to_idx < 0 ||
919			    c->result_idx < 0)
920				mod->csumfunc.crc8 = cgw_csum_crc8_rel;
921			else if (c->from_idx <= c->to_idx)
922				mod->csumfunc.crc8 = cgw_csum_crc8_pos;
923			else
924				mod->csumfunc.crc8 = cgw_csum_crc8_neg;
925		}
926
927		if (tb[CGW_CS_XOR]) {
928			struct cgw_csum_xor *c = nla_data(tb[CGW_CS_XOR]);
929
930			err = cgw_chk_csum_parms(c->from_idx, c->to_idx,
931						 c->result_idx, r);
932			if (err)
933				return err;
934
935			nla_memcpy(&mod->csum.xor, tb[CGW_CS_XOR],
936				   CGW_CS_XOR_LEN);
937
938			/* select dedicated processing function to reduce
939			 * runtime operations in receive hot path.
940			 */
941			if (c->from_idx < 0 || c->to_idx < 0 ||
942			    c->result_idx < 0)
943				mod->csumfunc.xor = cgw_csum_xor_rel;
944			else if (c->from_idx <= c->to_idx)
945				mod->csumfunc.xor = cgw_csum_xor_pos;
946			else
947				mod->csumfunc.xor = cgw_csum_xor_neg;
948		}
949
950		if (tb[CGW_MOD_UID])
951			nla_memcpy(&mod->uid, tb[CGW_MOD_UID], sizeof(u32));
952	}
953
954	if (gwtype == CGW_TYPE_CAN_CAN) {
955		/* check CGW_TYPE_CAN_CAN specific attributes */
956		struct can_can_gw *ccgw = (struct can_can_gw *)gwtypeattr;
957
958		memset(ccgw, 0, sizeof(*ccgw));
959
960		/* check for can_filter in attributes */
961		if (tb[CGW_FILTER])
962			nla_memcpy(&ccgw->filter, tb[CGW_FILTER],
963				   sizeof(struct can_filter));
964
965		err = -ENODEV;
966
967		/* specifying two interfaces is mandatory */
968		if (!tb[CGW_SRC_IF] || !tb[CGW_DST_IF])
969			return err;
970
971		ccgw->src_idx = nla_get_u32(tb[CGW_SRC_IF]);
972		ccgw->dst_idx = nla_get_u32(tb[CGW_DST_IF]);
973
974		/* both indices set to 0 for flushing all routing entries */
975		if (!ccgw->src_idx && !ccgw->dst_idx)
976			return 0;
977
978		/* only one index set to 0 is an error */
979		if (!ccgw->src_idx || !ccgw->dst_idx)
980			return err;
981	}
982
983	/* add the checks for other gwtypes here */
984
985	return 0;
986}
987
988static int cgw_create_job(struct sk_buff *skb,  struct nlmsghdr *nlh,
989			  struct netlink_ext_ack *extack)
990{
991	struct net *net = sock_net(skb->sk);
992	struct rtcanmsg *r;
993	struct cgw_job *gwj;
994	struct cf_mod mod;
995	struct can_can_gw ccgw;
996	u8 limhops = 0;
997	int err = 0;
998
999	if (!netlink_capable(skb, CAP_NET_ADMIN))
1000		return -EPERM;
1001
1002	if (nlmsg_len(nlh) < sizeof(*r))
1003		return -EINVAL;
1004
1005	r = nlmsg_data(nlh);
1006	if (r->can_family != AF_CAN)
1007		return -EPFNOSUPPORT;
1008
1009	/* so far we only support CAN -> CAN routings */
1010	if (r->gwtype != CGW_TYPE_CAN_CAN)
1011		return -EINVAL;
1012
1013	err = cgw_parse_attr(nlh, &mod, CGW_TYPE_CAN_CAN, &ccgw, &limhops);
1014	if (err < 0)
1015		return err;
1016
1017	if (mod.uid) {
1018		ASSERT_RTNL();
1019
1020		/* check for updating an existing job with identical uid */
1021		hlist_for_each_entry(gwj, &net->can.cgw_list, list) {
1022			if (gwj->mod.uid != mod.uid)
1023				continue;
1024
1025			/* interfaces & filters must be identical */
1026			if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw)))
1027				return -EINVAL;
1028
1029			/* update modifications with disabled softirq & quit */
1030			local_bh_disable();
1031			memcpy(&gwj->mod, &mod, sizeof(mod));
1032			local_bh_enable();
1033			return 0;
1034		}
1035	}
1036
1037	/* ifindex == 0 is not allowed for job creation */
1038	if (!ccgw.src_idx || !ccgw.dst_idx)
1039		return -ENODEV;
1040
1041	gwj = kmem_cache_alloc(cgw_cache, GFP_KERNEL);
1042	if (!gwj)
1043		return -ENOMEM;
1044
1045	gwj->handled_frames = 0;
1046	gwj->dropped_frames = 0;
1047	gwj->deleted_frames = 0;
1048	gwj->flags = r->flags;
1049	gwj->gwtype = r->gwtype;
1050	gwj->limit_hops = limhops;
1051
1052	/* insert already parsed information */
1053	memcpy(&gwj->mod, &mod, sizeof(mod));
1054	memcpy(&gwj->ccgw, &ccgw, sizeof(ccgw));
1055
1056	err = -ENODEV;
1057
1058	gwj->src.dev = __dev_get_by_index(net, gwj->ccgw.src_idx);
1059
1060	if (!gwj->src.dev)
1061		goto out;
1062
1063	if (gwj->src.dev->type != ARPHRD_CAN)
1064		goto out;
1065
1066	gwj->dst.dev = __dev_get_by_index(net, gwj->ccgw.dst_idx);
1067
1068	if (!gwj->dst.dev)
1069		goto out;
1070
1071	if (gwj->dst.dev->type != ARPHRD_CAN)
1072		goto out;
1073
1074	ASSERT_RTNL();
1075
1076	err = cgw_register_filter(net, gwj);
1077	if (!err)
1078		hlist_add_head_rcu(&gwj->list, &net->can.cgw_list);
1079out:
1080	if (err)
1081		kmem_cache_free(cgw_cache, gwj);
1082
1083	return err;
1084}
1085
1086static void cgw_remove_all_jobs(struct net *net)
1087{
1088	struct cgw_job *gwj = NULL;
1089	struct hlist_node *nx;
1090
1091	ASSERT_RTNL();
1092
1093	hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) {
1094		hlist_del(&gwj->list);
1095		cgw_unregister_filter(net, gwj);
1096		synchronize_rcu();
1097		kmem_cache_free(cgw_cache, gwj);
1098	}
1099}
1100
1101static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh,
1102			  struct netlink_ext_ack *extack)
1103{
1104	struct net *net = sock_net(skb->sk);
1105	struct cgw_job *gwj = NULL;
1106	struct hlist_node *nx;
1107	struct rtcanmsg *r;
1108	struct cf_mod mod;
1109	struct can_can_gw ccgw;
1110	u8 limhops = 0;
1111	int err = 0;
1112
1113	if (!netlink_capable(skb, CAP_NET_ADMIN))
1114		return -EPERM;
1115
1116	if (nlmsg_len(nlh) < sizeof(*r))
1117		return -EINVAL;
1118
1119	r = nlmsg_data(nlh);
1120	if (r->can_family != AF_CAN)
1121		return -EPFNOSUPPORT;
1122
1123	/* so far we only support CAN -> CAN routings */
1124	if (r->gwtype != CGW_TYPE_CAN_CAN)
1125		return -EINVAL;
1126
1127	err = cgw_parse_attr(nlh, &mod, CGW_TYPE_CAN_CAN, &ccgw, &limhops);
1128	if (err < 0)
1129		return err;
1130
1131	/* two interface indices both set to 0 => remove all entries */
1132	if (!ccgw.src_idx && !ccgw.dst_idx) {
1133		cgw_remove_all_jobs(net);
1134		return 0;
1135	}
1136
1137	err = -EINVAL;
1138
1139	ASSERT_RTNL();
1140
1141	/* remove only the first matching entry */
1142	hlist_for_each_entry_safe(gwj, nx, &net->can.cgw_list, list) {
1143		if (gwj->flags != r->flags)
1144			continue;
1145
1146		if (gwj->limit_hops != limhops)
1147			continue;
1148
1149		/* we have a match when uid is enabled and identical */
1150		if (gwj->mod.uid || mod.uid) {
1151			if (gwj->mod.uid != mod.uid)
1152				continue;
1153		} else {
1154			/* no uid => check for identical modifications */
1155			if (memcmp(&gwj->mod, &mod, sizeof(mod)))
1156				continue;
1157		}
1158
1159		/* if (r->gwtype == CGW_TYPE_CAN_CAN) - is made sure here */
1160		if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw)))
1161			continue;
1162
1163		hlist_del(&gwj->list);
1164		cgw_unregister_filter(net, gwj);
1165		synchronize_rcu();
1166		kmem_cache_free(cgw_cache, gwj);
1167		err = 0;
1168		break;
1169	}
1170
1171	return err;
1172}
1173
1174static int __net_init cangw_pernet_init(struct net *net)
1175{
1176	INIT_HLIST_HEAD(&net->can.cgw_list);
1177	return 0;
1178}
1179
1180static void __net_exit cangw_pernet_exit(struct net *net)
1181{
1182	rtnl_lock();
1183	cgw_remove_all_jobs(net);
1184	rtnl_unlock();
1185}
1186
1187static struct pernet_operations cangw_pernet_ops = {
1188	.init = cangw_pernet_init,
1189	.exit = cangw_pernet_exit,
1190};
1191
1192static __init int cgw_module_init(void)
1193{
1194	int ret;
1195
1196	/* sanitize given module parameter */
1197	max_hops = clamp_t(unsigned int, max_hops, CGW_MIN_HOPS, CGW_MAX_HOPS);
1198
1199	pr_info("can: netlink gateway - max_hops=%d\n",	max_hops);
1200
1201	ret = register_pernet_subsys(&cangw_pernet_ops);
1202	if (ret)
1203		return ret;
1204
1205	ret = -ENOMEM;
1206	cgw_cache = kmem_cache_create("can_gw", sizeof(struct cgw_job),
1207				      0, 0, NULL);
1208	if (!cgw_cache)
1209		goto out_cache_create;
1210
1211	/* set notifier */
1212	notifier.notifier_call = cgw_notifier;
1213	ret = register_netdevice_notifier(&notifier);
1214	if (ret)
1215		goto out_register_notifier;
1216
1217	ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_GETROUTE,
1218				   NULL, cgw_dump_jobs, 0);
1219	if (ret)
1220		goto out_rtnl_register1;
1221
1222	ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_NEWROUTE,
1223				   cgw_create_job, NULL, 0);
1224	if (ret)
1225		goto out_rtnl_register2;
1226	ret = rtnl_register_module(THIS_MODULE, PF_CAN, RTM_DELROUTE,
1227				   cgw_remove_job, NULL, 0);
1228	if (ret)
1229		goto out_rtnl_register3;
1230
1231	return 0;
1232
1233out_rtnl_register3:
1234	rtnl_unregister(PF_CAN, RTM_NEWROUTE);
1235out_rtnl_register2:
1236	rtnl_unregister(PF_CAN, RTM_GETROUTE);
1237out_rtnl_register1:
1238	unregister_netdevice_notifier(&notifier);
1239out_register_notifier:
1240	kmem_cache_destroy(cgw_cache);
1241out_cache_create:
1242	unregister_pernet_subsys(&cangw_pernet_ops);
1243
1244	return ret;
1245}
1246
1247static __exit void cgw_module_exit(void)
1248{
1249	rtnl_unregister_all(PF_CAN);
1250
1251	unregister_netdevice_notifier(&notifier);
1252
1253	unregister_pernet_subsys(&cangw_pernet_ops);
1254	rcu_barrier(); /* Wait for completion of call_rcu()'s */
1255
1256	kmem_cache_destroy(cgw_cache);
1257}
1258
1259module_init(cgw_module_init);
1260module_exit(cgw_module_exit);
1261