xref: /kernel/linux/linux-5.10/net/can/j1939/socket.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (c) 2010-2011 EIA Electronics,
3//                         Pieter Beyens <pieter.beyens@eia.be>
4// Copyright (c) 2010-2011 EIA Electronics,
5//                         Kurt Van Dijck <kurt.van.dijck@eia.be>
6// Copyright (c) 2018 Protonic,
7//                         Robin van der Gracht <robin@protonic.nl>
8// Copyright (c) 2017-2019 Pengutronix,
9//                         Marc Kleine-Budde <kernel@pengutronix.de>
10// Copyright (c) 2017-2019 Pengutronix,
11//                         Oleksij Rempel <kernel@pengutronix.de>
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/can/can-ml.h>
16#include <linux/can/core.h>
17#include <linux/can/skb.h>
18#include <linux/errqueue.h>
19#include <linux/if_arp.h>
20
21#include "j1939-priv.h"
22
23#define J1939_MIN_NAMELEN CAN_REQUIRED_SIZE(struct sockaddr_can, can_addr.j1939)
24
25/* conversion function between struct sock::sk_priority from linux and
26 * j1939 priority field
27 */
28static inline priority_t j1939_prio(u32 sk_priority)
29{
30	sk_priority = min(sk_priority, 7U);
31
32	return 7 - sk_priority;
33}
34
35static inline u32 j1939_to_sk_priority(priority_t prio)
36{
37	return 7 - prio;
38}
39
40/* function to see if pgn is to be evaluated */
41static inline bool j1939_pgn_is_valid(pgn_t pgn)
42{
43	return pgn <= J1939_PGN_MAX;
44}
45
46/* test function to avoid non-zero DA placeholder for pdu1 pgn's */
47static inline bool j1939_pgn_is_clean_pdu(pgn_t pgn)
48{
49	if (j1939_pgn_is_pdu1(pgn))
50		return !(pgn & 0xff);
51	else
52		return true;
53}
54
55static inline void j1939_sock_pending_add(struct sock *sk)
56{
57	struct j1939_sock *jsk = j1939_sk(sk);
58
59	atomic_inc(&jsk->skb_pending);
60}
61
62static int j1939_sock_pending_get(struct sock *sk)
63{
64	struct j1939_sock *jsk = j1939_sk(sk);
65
66	return atomic_read(&jsk->skb_pending);
67}
68
69void j1939_sock_pending_del(struct sock *sk)
70{
71	struct j1939_sock *jsk = j1939_sk(sk);
72
73	/* atomic_dec_return returns the new value */
74	if (!atomic_dec_return(&jsk->skb_pending))
75		wake_up(&jsk->waitq);	/* no pending SKB's */
76}
77
78static void j1939_jsk_add(struct j1939_priv *priv, struct j1939_sock *jsk)
79{
80	jsk->state |= J1939_SOCK_BOUND;
81	j1939_priv_get(priv);
82
83	spin_lock_bh(&priv->j1939_socks_lock);
84	list_add_tail(&jsk->list, &priv->j1939_socks);
85	spin_unlock_bh(&priv->j1939_socks_lock);
86}
87
88static void j1939_jsk_del(struct j1939_priv *priv, struct j1939_sock *jsk)
89{
90	spin_lock_bh(&priv->j1939_socks_lock);
91	list_del_init(&jsk->list);
92	spin_unlock_bh(&priv->j1939_socks_lock);
93
94	j1939_priv_put(priv);
95	jsk->state &= ~J1939_SOCK_BOUND;
96}
97
98static bool j1939_sk_queue_session(struct j1939_session *session)
99{
100	struct j1939_sock *jsk = j1939_sk(session->sk);
101	bool empty;
102
103	spin_lock_bh(&jsk->sk_session_queue_lock);
104	empty = list_empty(&jsk->sk_session_queue);
105	j1939_session_get(session);
106	list_add_tail(&session->sk_session_queue_entry, &jsk->sk_session_queue);
107	spin_unlock_bh(&jsk->sk_session_queue_lock);
108	j1939_sock_pending_add(&jsk->sk);
109
110	return empty;
111}
112
113static struct
114j1939_session *j1939_sk_get_incomplete_session(struct j1939_sock *jsk)
115{
116	struct j1939_session *session = NULL;
117
118	spin_lock_bh(&jsk->sk_session_queue_lock);
119	if (!list_empty(&jsk->sk_session_queue)) {
120		session = list_last_entry(&jsk->sk_session_queue,
121					  struct j1939_session,
122					  sk_session_queue_entry);
123		if (session->total_queued_size == session->total_message_size)
124			session = NULL;
125		else
126			j1939_session_get(session);
127	}
128	spin_unlock_bh(&jsk->sk_session_queue_lock);
129
130	return session;
131}
132
133static void j1939_sk_queue_drop_all(struct j1939_priv *priv,
134				    struct j1939_sock *jsk, int err)
135{
136	struct j1939_session *session, *tmp;
137
138	netdev_dbg(priv->ndev, "%s: err: %i\n", __func__, err);
139	spin_lock_bh(&jsk->sk_session_queue_lock);
140	list_for_each_entry_safe(session, tmp, &jsk->sk_session_queue,
141				 sk_session_queue_entry) {
142		list_del_init(&session->sk_session_queue_entry);
143		session->err = err;
144		j1939_session_put(session);
145	}
146	spin_unlock_bh(&jsk->sk_session_queue_lock);
147}
148
149static void j1939_sk_queue_activate_next_locked(struct j1939_session *session)
150{
151	struct j1939_sock *jsk;
152	struct j1939_session *first;
153	int err;
154
155	/* RX-Session don't have a socket (yet) */
156	if (!session->sk)
157		return;
158
159	jsk = j1939_sk(session->sk);
160	lockdep_assert_held(&jsk->sk_session_queue_lock);
161
162	err = session->err;
163
164	first = list_first_entry_or_null(&jsk->sk_session_queue,
165					 struct j1939_session,
166					 sk_session_queue_entry);
167
168	/* Some else has already activated the next session */
169	if (first != session)
170		return;
171
172activate_next:
173	list_del_init(&first->sk_session_queue_entry);
174	j1939_session_put(first);
175	first = list_first_entry_or_null(&jsk->sk_session_queue,
176					 struct j1939_session,
177					 sk_session_queue_entry);
178	if (!first)
179		return;
180
181	if (j1939_session_activate(first)) {
182		netdev_warn_once(first->priv->ndev,
183				 "%s: 0x%p: Identical session is already activated.\n",
184				 __func__, first);
185		first->err = -EBUSY;
186		goto activate_next;
187	} else {
188		/* Give receiver some time (arbitrary chosen) to recover */
189		int time_ms = 0;
190
191		if (err)
192			time_ms = 10 + prandom_u32_max(16);
193
194		j1939_tp_schedule_txtimer(first, time_ms);
195	}
196}
197
198void j1939_sk_queue_activate_next(struct j1939_session *session)
199{
200	struct j1939_sock *jsk;
201
202	if (!session->sk)
203		return;
204
205	jsk = j1939_sk(session->sk);
206
207	spin_lock_bh(&jsk->sk_session_queue_lock);
208	j1939_sk_queue_activate_next_locked(session);
209	spin_unlock_bh(&jsk->sk_session_queue_lock);
210}
211
212static bool j1939_sk_match_dst(struct j1939_sock *jsk,
213			       const struct j1939_sk_buff_cb *skcb)
214{
215	if ((jsk->state & J1939_SOCK_PROMISC))
216		return true;
217
218	/* Destination address filter */
219	if (jsk->addr.src_name && skcb->addr.dst_name) {
220		if (jsk->addr.src_name != skcb->addr.dst_name)
221			return false;
222	} else {
223		/* receive (all sockets) if
224		 * - all packages that match our bind() address
225		 * - all broadcast on a socket if SO_BROADCAST
226		 *   is set
227		 */
228		if (j1939_address_is_unicast(skcb->addr.da)) {
229			if (jsk->addr.sa != skcb->addr.da)
230				return false;
231		} else if (!sock_flag(&jsk->sk, SOCK_BROADCAST)) {
232			/* receiving broadcast without SO_BROADCAST
233			 * flag is not allowed
234			 */
235			return false;
236		}
237	}
238
239	/* Source address filter */
240	if (jsk->state & J1939_SOCK_CONNECTED) {
241		/* receive (all sockets) if
242		 * - all packages that match our connect() name or address
243		 */
244		if (jsk->addr.dst_name && skcb->addr.src_name) {
245			if (jsk->addr.dst_name != skcb->addr.src_name)
246				return false;
247		} else {
248			if (jsk->addr.da != skcb->addr.sa)
249				return false;
250		}
251	}
252
253	/* PGN filter */
254	if (j1939_pgn_is_valid(jsk->pgn_rx_filter) &&
255	    jsk->pgn_rx_filter != skcb->addr.pgn)
256		return false;
257
258	return true;
259}
260
261/* matches skb control buffer (addr) with a j1939 filter */
262static bool j1939_sk_match_filter(struct j1939_sock *jsk,
263				  const struct j1939_sk_buff_cb *skcb)
264{
265	const struct j1939_filter *f;
266	int nfilter;
267
268	spin_lock_bh(&jsk->filters_lock);
269
270	f = jsk->filters;
271	nfilter = jsk->nfilters;
272
273	if (!nfilter)
274		/* receive all when no filters are assigned */
275		goto filter_match_found;
276
277	for (; nfilter; ++f, --nfilter) {
278		if ((skcb->addr.pgn & f->pgn_mask) != f->pgn)
279			continue;
280		if ((skcb->addr.sa & f->addr_mask) != f->addr)
281			continue;
282		if ((skcb->addr.src_name & f->name_mask) != f->name)
283			continue;
284		goto filter_match_found;
285	}
286
287	spin_unlock_bh(&jsk->filters_lock);
288	return false;
289
290filter_match_found:
291	spin_unlock_bh(&jsk->filters_lock);
292	return true;
293}
294
295static bool j1939_sk_recv_match_one(struct j1939_sock *jsk,
296				    const struct j1939_sk_buff_cb *skcb)
297{
298	if (!(jsk->state & J1939_SOCK_BOUND))
299		return false;
300
301	if (!j1939_sk_match_dst(jsk, skcb))
302		return false;
303
304	if (!j1939_sk_match_filter(jsk, skcb))
305		return false;
306
307	return true;
308}
309
310static void j1939_sk_recv_one(struct j1939_sock *jsk, struct sk_buff *oskb)
311{
312	const struct j1939_sk_buff_cb *oskcb = j1939_skb_to_cb(oskb);
313	struct j1939_sk_buff_cb *skcb;
314	struct sk_buff *skb;
315
316	if (oskb->sk == &jsk->sk)
317		return;
318
319	if (!j1939_sk_recv_match_one(jsk, oskcb))
320		return;
321
322	skb = skb_clone(oskb, GFP_ATOMIC);
323	if (!skb) {
324		pr_warn("skb clone failed\n");
325		return;
326	}
327	can_skb_set_owner(skb, oskb->sk);
328
329	skcb = j1939_skb_to_cb(skb);
330	skcb->msg_flags &= ~(MSG_DONTROUTE);
331	if (skb->sk)
332		skcb->msg_flags |= MSG_DONTROUTE;
333
334	if (sock_queue_rcv_skb(&jsk->sk, skb) < 0)
335		kfree_skb(skb);
336}
337
338bool j1939_sk_recv_match(struct j1939_priv *priv, struct j1939_sk_buff_cb *skcb)
339{
340	struct j1939_sock *jsk;
341	bool match = false;
342
343	spin_lock_bh(&priv->j1939_socks_lock);
344	list_for_each_entry(jsk, &priv->j1939_socks, list) {
345		match = j1939_sk_recv_match_one(jsk, skcb);
346		if (match)
347			break;
348	}
349	spin_unlock_bh(&priv->j1939_socks_lock);
350
351	return match;
352}
353
354void j1939_sk_recv(struct j1939_priv *priv, struct sk_buff *skb)
355{
356	struct j1939_sock *jsk;
357
358	spin_lock_bh(&priv->j1939_socks_lock);
359	list_for_each_entry(jsk, &priv->j1939_socks, list) {
360		j1939_sk_recv_one(jsk, skb);
361	}
362	spin_unlock_bh(&priv->j1939_socks_lock);
363}
364
365static void j1939_sk_sock_destruct(struct sock *sk)
366{
367	struct j1939_sock *jsk = j1939_sk(sk);
368
369	/* This function will be call by the generic networking code, when then
370	 * the socket is ultimately closed (sk->sk_destruct).
371	 *
372	 * The race between
373	 * - processing a received CAN frame
374	 *   (can_receive -> j1939_can_recv)
375	 *   and accessing j1939_priv
376	 * ... and ...
377	 * - closing a socket
378	 *   (j1939_can_rx_unregister -> can_rx_unregister)
379	 *   and calling the final j1939_priv_put()
380	 *
381	 * is avoided by calling the final j1939_priv_put() from this
382	 * RCU deferred cleanup call.
383	 */
384	if (jsk->priv) {
385		j1939_priv_put(jsk->priv);
386		jsk->priv = NULL;
387	}
388
389	/* call generic CAN sock destruct */
390	can_sock_destruct(sk);
391}
392
393static int j1939_sk_init(struct sock *sk)
394{
395	struct j1939_sock *jsk = j1939_sk(sk);
396
397	/* Ensure that "sk" is first member in "struct j1939_sock", so that we
398	 * can skip it during memset().
399	 */
400	BUILD_BUG_ON(offsetof(struct j1939_sock, sk) != 0);
401	memset((void *)jsk + sizeof(jsk->sk), 0x0,
402	       sizeof(*jsk) - sizeof(jsk->sk));
403
404	INIT_LIST_HEAD(&jsk->list);
405	init_waitqueue_head(&jsk->waitq);
406	jsk->sk.sk_priority = j1939_to_sk_priority(6);
407	jsk->sk.sk_reuse = 1; /* per default */
408	jsk->addr.sa = J1939_NO_ADDR;
409	jsk->addr.da = J1939_NO_ADDR;
410	jsk->addr.pgn = J1939_NO_PGN;
411	jsk->pgn_rx_filter = J1939_NO_PGN;
412	atomic_set(&jsk->skb_pending, 0);
413	spin_lock_init(&jsk->sk_session_queue_lock);
414	INIT_LIST_HEAD(&jsk->sk_session_queue);
415	spin_lock_init(&jsk->filters_lock);
416
417	/* j1939_sk_sock_destruct() depends on SOCK_RCU_FREE flag */
418	sock_set_flag(sk, SOCK_RCU_FREE);
419	sk->sk_destruct = j1939_sk_sock_destruct;
420	sk->sk_protocol = CAN_J1939;
421
422	return 0;
423}
424
425static int j1939_sk_sanity_check(struct sockaddr_can *addr, int len)
426{
427	if (!addr)
428		return -EDESTADDRREQ;
429	if (len < J1939_MIN_NAMELEN)
430		return -EINVAL;
431	if (addr->can_family != AF_CAN)
432		return -EINVAL;
433	if (!addr->can_ifindex)
434		return -ENODEV;
435	if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn) &&
436	    !j1939_pgn_is_clean_pdu(addr->can_addr.j1939.pgn))
437		return -EINVAL;
438
439	return 0;
440}
441
442static int j1939_sk_bind(struct socket *sock, struct sockaddr *uaddr, int len)
443{
444	struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
445	struct j1939_sock *jsk = j1939_sk(sock->sk);
446	struct j1939_priv *priv;
447	struct sock *sk;
448	struct net *net;
449	int ret = 0;
450
451	ret = j1939_sk_sanity_check(addr, len);
452	if (ret)
453		return ret;
454
455	lock_sock(sock->sk);
456
457	priv = jsk->priv;
458	sk = sock->sk;
459	net = sock_net(sk);
460
461	/* Already bound to an interface? */
462	if (jsk->state & J1939_SOCK_BOUND) {
463		/* A re-bind() to a different interface is not
464		 * supported.
465		 */
466		if (jsk->ifindex != addr->can_ifindex) {
467			ret = -EINVAL;
468			goto out_release_sock;
469		}
470
471		/* drop old references */
472		j1939_jsk_del(priv, jsk);
473		j1939_local_ecu_put(priv, jsk->addr.src_name, jsk->addr.sa);
474	} else {
475		struct can_ml_priv *can_ml;
476		struct net_device *ndev;
477
478		ndev = dev_get_by_index(net, addr->can_ifindex);
479		if (!ndev) {
480			ret = -ENODEV;
481			goto out_release_sock;
482		}
483
484		can_ml = can_get_ml_priv(ndev);
485		if (!can_ml) {
486			dev_put(ndev);
487			ret = -ENODEV;
488			goto out_release_sock;
489		}
490
491		if (!(ndev->flags & IFF_UP)) {
492			dev_put(ndev);
493			ret = -ENETDOWN;
494			goto out_release_sock;
495		}
496
497		priv = j1939_netdev_start(ndev);
498		dev_put(ndev);
499		if (IS_ERR(priv)) {
500			ret = PTR_ERR(priv);
501			goto out_release_sock;
502		}
503
504		jsk->ifindex = addr->can_ifindex;
505
506		/* the corresponding j1939_priv_put() is called via
507		 * sk->sk_destruct, which points to j1939_sk_sock_destruct()
508		 */
509		j1939_priv_get(priv);
510		jsk->priv = priv;
511	}
512
513	/* set default transmit pgn */
514	if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn))
515		jsk->pgn_rx_filter = addr->can_addr.j1939.pgn;
516	jsk->addr.src_name = addr->can_addr.j1939.name;
517	jsk->addr.sa = addr->can_addr.j1939.addr;
518
519	/* get new references */
520	ret = j1939_local_ecu_get(priv, jsk->addr.src_name, jsk->addr.sa);
521	if (ret) {
522		j1939_netdev_stop(priv);
523		goto out_release_sock;
524	}
525
526	j1939_jsk_add(priv, jsk);
527
528 out_release_sock: /* fall through */
529	release_sock(sock->sk);
530
531	return ret;
532}
533
534static int j1939_sk_connect(struct socket *sock, struct sockaddr *uaddr,
535			    int len, int flags)
536{
537	struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
538	struct j1939_sock *jsk = j1939_sk(sock->sk);
539	int ret = 0;
540
541	ret = j1939_sk_sanity_check(addr, len);
542	if (ret)
543		return ret;
544
545	lock_sock(sock->sk);
546
547	/* bind() before connect() is mandatory */
548	if (!(jsk->state & J1939_SOCK_BOUND)) {
549		ret = -EINVAL;
550		goto out_release_sock;
551	}
552
553	/* A connect() to a different interface is not supported. */
554	if (jsk->ifindex != addr->can_ifindex) {
555		ret = -EINVAL;
556		goto out_release_sock;
557	}
558
559	if (!addr->can_addr.j1939.name &&
560	    addr->can_addr.j1939.addr == J1939_NO_ADDR &&
561	    !sock_flag(&jsk->sk, SOCK_BROADCAST)) {
562		/* broadcast, but SO_BROADCAST not set */
563		ret = -EACCES;
564		goto out_release_sock;
565	}
566
567	jsk->addr.dst_name = addr->can_addr.j1939.name;
568	jsk->addr.da = addr->can_addr.j1939.addr;
569
570	if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn))
571		jsk->addr.pgn = addr->can_addr.j1939.pgn;
572
573	jsk->state |= J1939_SOCK_CONNECTED;
574
575 out_release_sock: /* fall through */
576	release_sock(sock->sk);
577
578	return ret;
579}
580
581static void j1939_sk_sock2sockaddr_can(struct sockaddr_can *addr,
582				       const struct j1939_sock *jsk, int peer)
583{
584	/* There are two holes (2 bytes and 3 bytes) to clear to avoid
585	 * leaking kernel information to user space.
586	 */
587	memset(addr, 0, J1939_MIN_NAMELEN);
588
589	addr->can_family = AF_CAN;
590	addr->can_ifindex = jsk->ifindex;
591	addr->can_addr.j1939.pgn = jsk->addr.pgn;
592	if (peer) {
593		addr->can_addr.j1939.name = jsk->addr.dst_name;
594		addr->can_addr.j1939.addr = jsk->addr.da;
595	} else {
596		addr->can_addr.j1939.name = jsk->addr.src_name;
597		addr->can_addr.j1939.addr = jsk->addr.sa;
598	}
599}
600
601static int j1939_sk_getname(struct socket *sock, struct sockaddr *uaddr,
602			    int peer)
603{
604	struct sockaddr_can *addr = (struct sockaddr_can *)uaddr;
605	struct sock *sk = sock->sk;
606	struct j1939_sock *jsk = j1939_sk(sk);
607	int ret = 0;
608
609	lock_sock(sk);
610
611	if (peer && !(jsk->state & J1939_SOCK_CONNECTED)) {
612		ret = -EADDRNOTAVAIL;
613		goto failure;
614	}
615
616	j1939_sk_sock2sockaddr_can(addr, jsk, peer);
617	ret = J1939_MIN_NAMELEN;
618
619 failure:
620	release_sock(sk);
621
622	return ret;
623}
624
625static int j1939_sk_release(struct socket *sock)
626{
627	struct sock *sk = sock->sk;
628	struct j1939_sock *jsk;
629
630	if (!sk)
631		return 0;
632
633	lock_sock(sk);
634	jsk = j1939_sk(sk);
635
636	if (jsk->state & J1939_SOCK_BOUND) {
637		struct j1939_priv *priv = jsk->priv;
638
639		if (wait_event_interruptible(jsk->waitq,
640					     !j1939_sock_pending_get(&jsk->sk))) {
641			j1939_cancel_active_session(priv, sk);
642			j1939_sk_queue_drop_all(priv, jsk, ESHUTDOWN);
643		}
644
645		j1939_jsk_del(priv, jsk);
646
647		j1939_local_ecu_put(priv, jsk->addr.src_name,
648				    jsk->addr.sa);
649
650		j1939_netdev_stop(priv);
651	}
652
653	kfree(jsk->filters);
654	sock_orphan(sk);
655	sock->sk = NULL;
656
657	release_sock(sk);
658	sock_put(sk);
659
660	return 0;
661}
662
663static int j1939_sk_setsockopt_flag(struct j1939_sock *jsk, sockptr_t optval,
664				    unsigned int optlen, int flag)
665{
666	int tmp;
667
668	if (optlen != sizeof(tmp))
669		return -EINVAL;
670	if (copy_from_sockptr(&tmp, optval, optlen))
671		return -EFAULT;
672	lock_sock(&jsk->sk);
673	if (tmp)
674		jsk->state |= flag;
675	else
676		jsk->state &= ~flag;
677	release_sock(&jsk->sk);
678	return tmp;
679}
680
681static int j1939_sk_setsockopt(struct socket *sock, int level, int optname,
682			       sockptr_t optval, unsigned int optlen)
683{
684	struct sock *sk = sock->sk;
685	struct j1939_sock *jsk = j1939_sk(sk);
686	int tmp, count = 0, ret = 0;
687	struct j1939_filter *filters = NULL, *ofilters;
688
689	if (level != SOL_CAN_J1939)
690		return -EINVAL;
691
692	switch (optname) {
693	case SO_J1939_FILTER:
694		if (!sockptr_is_null(optval) && optlen != 0) {
695			struct j1939_filter *f;
696			int c;
697
698			if (optlen % sizeof(*filters) != 0)
699				return -EINVAL;
700
701			if (optlen > J1939_FILTER_MAX *
702			    sizeof(struct j1939_filter))
703				return -EINVAL;
704
705			count = optlen / sizeof(*filters);
706			filters = memdup_sockptr(optval, optlen);
707			if (IS_ERR(filters))
708				return PTR_ERR(filters);
709
710			for (f = filters, c = count; c; f++, c--) {
711				f->name &= f->name_mask;
712				f->pgn &= f->pgn_mask;
713				f->addr &= f->addr_mask;
714			}
715		}
716
717		lock_sock(&jsk->sk);
718		spin_lock_bh(&jsk->filters_lock);
719		ofilters = jsk->filters;
720		jsk->filters = filters;
721		jsk->nfilters = count;
722		spin_unlock_bh(&jsk->filters_lock);
723		release_sock(&jsk->sk);
724		kfree(ofilters);
725		return 0;
726	case SO_J1939_PROMISC:
727		return j1939_sk_setsockopt_flag(jsk, optval, optlen,
728						J1939_SOCK_PROMISC);
729	case SO_J1939_ERRQUEUE:
730		ret = j1939_sk_setsockopt_flag(jsk, optval, optlen,
731					       J1939_SOCK_ERRQUEUE);
732		if (ret < 0)
733			return ret;
734
735		if (!(jsk->state & J1939_SOCK_ERRQUEUE))
736			skb_queue_purge(&sk->sk_error_queue);
737		return ret;
738	case SO_J1939_SEND_PRIO:
739		if (optlen != sizeof(tmp))
740			return -EINVAL;
741		if (copy_from_sockptr(&tmp, optval, optlen))
742			return -EFAULT;
743		if (tmp < 0 || tmp > 7)
744			return -EDOM;
745		if (tmp < 2 && !capable(CAP_NET_ADMIN))
746			return -EPERM;
747		lock_sock(&jsk->sk);
748		jsk->sk.sk_priority = j1939_to_sk_priority(tmp);
749		release_sock(&jsk->sk);
750		return 0;
751	default:
752		return -ENOPROTOOPT;
753	}
754}
755
756static int j1939_sk_getsockopt(struct socket *sock, int level, int optname,
757			       char __user *optval, int __user *optlen)
758{
759	struct sock *sk = sock->sk;
760	struct j1939_sock *jsk = j1939_sk(sk);
761	int ret, ulen;
762	/* set defaults for using 'int' properties */
763	int tmp = 0;
764	int len = sizeof(tmp);
765	void *val = &tmp;
766
767	if (level != SOL_CAN_J1939)
768		return -EINVAL;
769	if (get_user(ulen, optlen))
770		return -EFAULT;
771	if (ulen < 0)
772		return -EINVAL;
773
774	lock_sock(&jsk->sk);
775	switch (optname) {
776	case SO_J1939_PROMISC:
777		tmp = (jsk->state & J1939_SOCK_PROMISC) ? 1 : 0;
778		break;
779	case SO_J1939_ERRQUEUE:
780		tmp = (jsk->state & J1939_SOCK_ERRQUEUE) ? 1 : 0;
781		break;
782	case SO_J1939_SEND_PRIO:
783		tmp = j1939_prio(jsk->sk.sk_priority);
784		break;
785	default:
786		ret = -ENOPROTOOPT;
787		goto no_copy;
788	}
789
790	/* copy to user, based on 'len' & 'val'
791	 * but most sockopt's are 'int' properties, and have 'len' & 'val'
792	 * left unchanged, but instead modified 'tmp'
793	 */
794	if (len > ulen)
795		ret = -EFAULT;
796	else if (put_user(len, optlen))
797		ret = -EFAULT;
798	else if (copy_to_user(optval, val, len))
799		ret = -EFAULT;
800	else
801		ret = 0;
802 no_copy:
803	release_sock(&jsk->sk);
804	return ret;
805}
806
807static int j1939_sk_recvmsg(struct socket *sock, struct msghdr *msg,
808			    size_t size, int flags)
809{
810	struct sock *sk = sock->sk;
811	struct sk_buff *skb;
812	struct j1939_sk_buff_cb *skcb;
813	int ret = 0;
814
815	if (flags & ~(MSG_DONTWAIT | MSG_ERRQUEUE | MSG_CMSG_COMPAT))
816		return -EINVAL;
817
818	if (flags & MSG_ERRQUEUE)
819		return sock_recv_errqueue(sock->sk, msg, size, SOL_CAN_J1939,
820					  SCM_J1939_ERRQUEUE);
821
822	skb = skb_recv_datagram(sk, flags, 0, &ret);
823	if (!skb)
824		return ret;
825
826	if (size < skb->len)
827		msg->msg_flags |= MSG_TRUNC;
828	else
829		size = skb->len;
830
831	ret = memcpy_to_msg(msg, skb->data, size);
832	if (ret < 0) {
833		skb_free_datagram(sk, skb);
834		return ret;
835	}
836
837	skcb = j1939_skb_to_cb(skb);
838	if (j1939_address_is_valid(skcb->addr.da))
839		put_cmsg(msg, SOL_CAN_J1939, SCM_J1939_DEST_ADDR,
840			 sizeof(skcb->addr.da), &skcb->addr.da);
841
842	if (skcb->addr.dst_name)
843		put_cmsg(msg, SOL_CAN_J1939, SCM_J1939_DEST_NAME,
844			 sizeof(skcb->addr.dst_name), &skcb->addr.dst_name);
845
846	put_cmsg(msg, SOL_CAN_J1939, SCM_J1939_PRIO,
847		 sizeof(skcb->priority), &skcb->priority);
848
849	if (msg->msg_name) {
850		struct sockaddr_can *paddr = msg->msg_name;
851
852		msg->msg_namelen = J1939_MIN_NAMELEN;
853		memset(msg->msg_name, 0, msg->msg_namelen);
854		paddr->can_family = AF_CAN;
855		paddr->can_ifindex = skb->skb_iif;
856		paddr->can_addr.j1939.name = skcb->addr.src_name;
857		paddr->can_addr.j1939.addr = skcb->addr.sa;
858		paddr->can_addr.j1939.pgn = skcb->addr.pgn;
859	}
860
861	sock_recv_ts_and_drops(msg, sk, skb);
862	msg->msg_flags |= skcb->msg_flags;
863	skb_free_datagram(sk, skb);
864
865	return size;
866}
867
868static struct sk_buff *j1939_sk_alloc_skb(struct net_device *ndev,
869					  struct sock *sk,
870					  struct msghdr *msg, size_t size,
871					  int *errcode)
872{
873	struct j1939_sock *jsk = j1939_sk(sk);
874	struct j1939_sk_buff_cb *skcb;
875	struct sk_buff *skb;
876	int ret;
877
878	skb = sock_alloc_send_skb(sk,
879				  size +
880				  sizeof(struct can_frame) -
881				  sizeof(((struct can_frame *)NULL)->data) +
882				  sizeof(struct can_skb_priv),
883				  msg->msg_flags & MSG_DONTWAIT, &ret);
884	if (!skb)
885		goto failure;
886
887	can_skb_reserve(skb);
888	can_skb_prv(skb)->ifindex = ndev->ifindex;
889	can_skb_prv(skb)->skbcnt = 0;
890	skb_reserve(skb, offsetof(struct can_frame, data));
891
892	ret = memcpy_from_msg(skb_put(skb, size), msg, size);
893	if (ret < 0)
894		goto free_skb;
895
896	skb->dev = ndev;
897
898	skcb = j1939_skb_to_cb(skb);
899	memset(skcb, 0, sizeof(*skcb));
900	skcb->addr = jsk->addr;
901	skcb->priority = j1939_prio(sk->sk_priority);
902
903	if (msg->msg_name) {
904		struct sockaddr_can *addr = msg->msg_name;
905
906		if (addr->can_addr.j1939.name ||
907		    addr->can_addr.j1939.addr != J1939_NO_ADDR) {
908			skcb->addr.dst_name = addr->can_addr.j1939.name;
909			skcb->addr.da = addr->can_addr.j1939.addr;
910		}
911		if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn))
912			skcb->addr.pgn = addr->can_addr.j1939.pgn;
913	}
914
915	*errcode = ret;
916	return skb;
917
918free_skb:
919	kfree_skb(skb);
920failure:
921	*errcode = ret;
922	return NULL;
923}
924
925static size_t j1939_sk_opt_stats_get_size(void)
926{
927	return
928		nla_total_size(sizeof(u32)) + /* J1939_NLA_BYTES_ACKED */
929		0;
930}
931
932static struct sk_buff *
933j1939_sk_get_timestamping_opt_stats(struct j1939_session *session)
934{
935	struct sk_buff *stats;
936	u32 size;
937
938	stats = alloc_skb(j1939_sk_opt_stats_get_size(), GFP_ATOMIC);
939	if (!stats)
940		return NULL;
941
942	if (session->skcb.addr.type == J1939_SIMPLE)
943		size = session->total_message_size;
944	else
945		size = min(session->pkt.tx_acked * 7,
946			   session->total_message_size);
947
948	nla_put_u32(stats, J1939_NLA_BYTES_ACKED, size);
949
950	return stats;
951}
952
953void j1939_sk_errqueue(struct j1939_session *session,
954		       enum j1939_sk_errqueue_type type)
955{
956	struct j1939_priv *priv = session->priv;
957	struct sock *sk = session->sk;
958	struct j1939_sock *jsk;
959	struct sock_exterr_skb *serr;
960	struct sk_buff *skb;
961	char *state = "UNK";
962	int err;
963
964	/* currently we have no sk for the RX session */
965	if (!sk)
966		return;
967
968	jsk = j1939_sk(sk);
969
970	if (!(jsk->state & J1939_SOCK_ERRQUEUE))
971		return;
972
973	skb = j1939_sk_get_timestamping_opt_stats(session);
974	if (!skb)
975		return;
976
977	skb->tstamp = ktime_get_real();
978
979	BUILD_BUG_ON(sizeof(struct sock_exterr_skb) > sizeof(skb->cb));
980
981	serr = SKB_EXT_ERR(skb);
982	memset(serr, 0, sizeof(*serr));
983	switch (type) {
984	case J1939_ERRQUEUE_ACK:
985		if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_ACK)) {
986			kfree_skb(skb);
987			return;
988		}
989
990		serr->ee.ee_errno = ENOMSG;
991		serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
992		serr->ee.ee_info = SCM_TSTAMP_ACK;
993		state = "ACK";
994		break;
995	case J1939_ERRQUEUE_SCHED:
996		if (!(sk->sk_tsflags & SOF_TIMESTAMPING_TX_SCHED)) {
997			kfree_skb(skb);
998			return;
999		}
1000
1001		serr->ee.ee_errno = ENOMSG;
1002		serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING;
1003		serr->ee.ee_info = SCM_TSTAMP_SCHED;
1004		state = "SCH";
1005		break;
1006	case J1939_ERRQUEUE_ABORT:
1007		serr->ee.ee_errno = session->err;
1008		serr->ee.ee_origin = SO_EE_ORIGIN_LOCAL;
1009		serr->ee.ee_info = J1939_EE_INFO_TX_ABORT;
1010		state = "ABT";
1011		break;
1012	default:
1013		netdev_err(priv->ndev, "Unknown errqueue type %i\n", type);
1014	}
1015
1016	serr->opt_stats = true;
1017	if (sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
1018		serr->ee.ee_data = session->tskey;
1019
1020	netdev_dbg(session->priv->ndev, "%s: 0x%p tskey: %i, state: %s\n",
1021		   __func__, session, session->tskey, state);
1022	err = sock_queue_err_skb(sk, skb);
1023
1024	if (err)
1025		kfree_skb(skb);
1026};
1027
1028void j1939_sk_send_loop_abort(struct sock *sk, int err)
1029{
1030	struct j1939_sock *jsk = j1939_sk(sk);
1031
1032	if (jsk->state & J1939_SOCK_ERRQUEUE)
1033		return;
1034
1035	sk->sk_err = err;
1036
1037	sk->sk_error_report(sk);
1038}
1039
1040static int j1939_sk_send_loop(struct j1939_priv *priv,  struct sock *sk,
1041			      struct msghdr *msg, size_t size)
1042
1043{
1044	struct j1939_sock *jsk = j1939_sk(sk);
1045	struct j1939_session *session = j1939_sk_get_incomplete_session(jsk);
1046	struct sk_buff *skb;
1047	size_t segment_size, todo_size;
1048	int ret = 0;
1049
1050	if (session &&
1051	    session->total_message_size != session->total_queued_size + size) {
1052		j1939_session_put(session);
1053		return -EIO;
1054	}
1055
1056	todo_size = size;
1057
1058	while (todo_size) {
1059		struct j1939_sk_buff_cb *skcb;
1060
1061		segment_size = min_t(size_t, J1939_MAX_TP_PACKET_SIZE,
1062				     todo_size);
1063
1064		/* Allocate skb for one segment */
1065		skb = j1939_sk_alloc_skb(priv->ndev, sk, msg, segment_size,
1066					 &ret);
1067		if (ret)
1068			break;
1069
1070		skcb = j1939_skb_to_cb(skb);
1071
1072		if (!session) {
1073			/* at this point the size should be full size
1074			 * of the session
1075			 */
1076			skcb->offset = 0;
1077			session = j1939_tp_send(priv, skb, size);
1078			if (IS_ERR(session)) {
1079				ret = PTR_ERR(session);
1080				goto kfree_skb;
1081			}
1082			if (j1939_sk_queue_session(session)) {
1083				/* try to activate session if we a
1084				 * fist in the queue
1085				 */
1086				if (!j1939_session_activate(session)) {
1087					j1939_tp_schedule_txtimer(session, 0);
1088				} else {
1089					ret = -EBUSY;
1090					session->err = ret;
1091					j1939_sk_queue_drop_all(priv, jsk,
1092								EBUSY);
1093					break;
1094				}
1095			}
1096		} else {
1097			skcb->offset = session->total_queued_size;
1098			j1939_session_skb_queue(session, skb);
1099		}
1100
1101		todo_size -= segment_size;
1102		session->total_queued_size += segment_size;
1103	}
1104
1105	switch (ret) {
1106	case 0: /* OK */
1107		if (todo_size)
1108			netdev_warn(priv->ndev,
1109				    "no error found and not completely queued?! %zu\n",
1110				    todo_size);
1111		ret = size;
1112		break;
1113	case -ERESTARTSYS:
1114		ret = -EINTR;
1115		fallthrough;
1116	case -EAGAIN: /* OK */
1117		if (todo_size != size)
1118			ret = size - todo_size;
1119		break;
1120	default: /* ERROR */
1121		break;
1122	}
1123
1124	if (session)
1125		j1939_session_put(session);
1126
1127	return ret;
1128
1129 kfree_skb:
1130	kfree_skb(skb);
1131	return ret;
1132}
1133
1134static int j1939_sk_sendmsg(struct socket *sock, struct msghdr *msg,
1135			    size_t size)
1136{
1137	struct sock *sk = sock->sk;
1138	struct j1939_sock *jsk = j1939_sk(sk);
1139	struct j1939_priv *priv;
1140	int ifindex;
1141	int ret;
1142
1143	lock_sock(sock->sk);
1144	/* various socket state tests */
1145	if (!(jsk->state & J1939_SOCK_BOUND)) {
1146		ret = -EBADFD;
1147		goto sendmsg_done;
1148	}
1149
1150	priv = jsk->priv;
1151	ifindex = jsk->ifindex;
1152
1153	if (!jsk->addr.src_name && jsk->addr.sa == J1939_NO_ADDR) {
1154		/* no source address assigned yet */
1155		ret = -EBADFD;
1156		goto sendmsg_done;
1157	}
1158
1159	/* deal with provided destination address info */
1160	if (msg->msg_name) {
1161		struct sockaddr_can *addr = msg->msg_name;
1162
1163		if (msg->msg_namelen < J1939_MIN_NAMELEN) {
1164			ret = -EINVAL;
1165			goto sendmsg_done;
1166		}
1167
1168		if (addr->can_family != AF_CAN) {
1169			ret = -EINVAL;
1170			goto sendmsg_done;
1171		}
1172
1173		if (addr->can_ifindex && addr->can_ifindex != ifindex) {
1174			ret = -EBADFD;
1175			goto sendmsg_done;
1176		}
1177
1178		if (j1939_pgn_is_valid(addr->can_addr.j1939.pgn) &&
1179		    !j1939_pgn_is_clean_pdu(addr->can_addr.j1939.pgn)) {
1180			ret = -EINVAL;
1181			goto sendmsg_done;
1182		}
1183
1184		if (!addr->can_addr.j1939.name &&
1185		    addr->can_addr.j1939.addr == J1939_NO_ADDR &&
1186		    !sock_flag(sk, SOCK_BROADCAST)) {
1187			/* broadcast, but SO_BROADCAST not set */
1188			ret = -EACCES;
1189			goto sendmsg_done;
1190		}
1191	} else {
1192		if (!jsk->addr.dst_name && jsk->addr.da == J1939_NO_ADDR &&
1193		    !sock_flag(sk, SOCK_BROADCAST)) {
1194			/* broadcast, but SO_BROADCAST not set */
1195			ret = -EACCES;
1196			goto sendmsg_done;
1197		}
1198	}
1199
1200	ret = j1939_sk_send_loop(priv, sk, msg, size);
1201
1202sendmsg_done:
1203	release_sock(sock->sk);
1204
1205	return ret;
1206}
1207
1208void j1939_sk_netdev_event_netdown(struct j1939_priv *priv)
1209{
1210	struct j1939_sock *jsk;
1211	int error_code = ENETDOWN;
1212
1213	spin_lock_bh(&priv->j1939_socks_lock);
1214	list_for_each_entry(jsk, &priv->j1939_socks, list) {
1215		jsk->sk.sk_err = error_code;
1216		if (!sock_flag(&jsk->sk, SOCK_DEAD))
1217			jsk->sk.sk_error_report(&jsk->sk);
1218
1219		j1939_sk_queue_drop_all(priv, jsk, error_code);
1220	}
1221	spin_unlock_bh(&priv->j1939_socks_lock);
1222}
1223
1224static int j1939_sk_no_ioctlcmd(struct socket *sock, unsigned int cmd,
1225				unsigned long arg)
1226{
1227	/* no ioctls for socket layer -> hand it down to NIC layer */
1228	return -ENOIOCTLCMD;
1229}
1230
1231static const struct proto_ops j1939_ops = {
1232	.family = PF_CAN,
1233	.release = j1939_sk_release,
1234	.bind = j1939_sk_bind,
1235	.connect = j1939_sk_connect,
1236	.socketpair = sock_no_socketpair,
1237	.accept = sock_no_accept,
1238	.getname = j1939_sk_getname,
1239	.poll = datagram_poll,
1240	.ioctl = j1939_sk_no_ioctlcmd,
1241	.listen = sock_no_listen,
1242	.shutdown = sock_no_shutdown,
1243	.setsockopt = j1939_sk_setsockopt,
1244	.getsockopt = j1939_sk_getsockopt,
1245	.sendmsg = j1939_sk_sendmsg,
1246	.recvmsg = j1939_sk_recvmsg,
1247	.mmap = sock_no_mmap,
1248	.sendpage = sock_no_sendpage,
1249};
1250
1251static struct proto j1939_proto __read_mostly = {
1252	.name = "CAN_J1939",
1253	.owner = THIS_MODULE,
1254	.obj_size = sizeof(struct j1939_sock),
1255	.init = j1939_sk_init,
1256};
1257
1258const struct can_proto j1939_can_proto = {
1259	.type = SOCK_DGRAM,
1260	.protocol = CAN_J1939,
1261	.ops = &j1939_ops,
1262	.prot = &j1939_proto,
1263};
1264