xref: /kernel/linux/linux-5.10/drivers/net/macsec.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * drivers/net/macsec.c - MACsec device
4 *
5 * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net>
6 */
7
8#include <linux/types.h>
9#include <linux/skbuff.h>
10#include <linux/socket.h>
11#include <linux/module.h>
12#include <crypto/aead.h>
13#include <linux/etherdevice.h>
14#include <linux/netdevice.h>
15#include <linux/rtnetlink.h>
16#include <linux/refcount.h>
17#include <net/genetlink.h>
18#include <net/sock.h>
19#include <net/gro_cells.h>
20#include <net/macsec.h>
21#include <linux/phy.h>
22#include <linux/byteorder/generic.h>
23#include <linux/if_arp.h>
24
25#include <uapi/linux/if_macsec.h>
26
27#define MACSEC_SCI_LEN 8
28
29/* SecTAG length = macsec_eth_header without the optional SCI */
30#define MACSEC_TAG_LEN 6
31
32struct macsec_eth_header {
33	struct ethhdr eth;
34	/* SecTAG */
35	u8  tci_an;
36#if defined(__LITTLE_ENDIAN_BITFIELD)
37	u8  short_length:6,
38		  unused:2;
39#elif defined(__BIG_ENDIAN_BITFIELD)
40	u8        unused:2,
41	    short_length:6;
42#else
43#error	"Please fix <asm/byteorder.h>"
44#endif
45	__be32 packet_number;
46	u8 secure_channel_id[8]; /* optional */
47} __packed;
48
49#define MACSEC_TCI_VERSION 0x80
50#define MACSEC_TCI_ES      0x40 /* end station */
51#define MACSEC_TCI_SC      0x20 /* SCI present */
52#define MACSEC_TCI_SCB     0x10 /* epon */
53#define MACSEC_TCI_E       0x08 /* encryption */
54#define MACSEC_TCI_C       0x04 /* changed text */
55#define MACSEC_AN_MASK     0x03 /* association number */
56#define MACSEC_TCI_CONFID  (MACSEC_TCI_E | MACSEC_TCI_C)
57
58/* minimum secure data length deemed "not short", see IEEE 802.1AE-2006 9.7 */
59#define MIN_NON_SHORT_LEN 48
60
61#define GCM_AES_IV_LEN 12
62#define DEFAULT_ICV_LEN 16
63
64#define for_each_rxsc(secy, sc)				\
65	for (sc = rcu_dereference_bh(secy->rx_sc);	\
66	     sc;					\
67	     sc = rcu_dereference_bh(sc->next))
68#define for_each_rxsc_rtnl(secy, sc)			\
69	for (sc = rtnl_dereference(secy->rx_sc);	\
70	     sc;					\
71	     sc = rtnl_dereference(sc->next))
72
73#define pn_same_half(pn1, pn2) (!(((pn1) >> 31) ^ ((pn2) >> 31)))
74
75struct gcm_iv_xpn {
76	union {
77		u8 short_secure_channel_id[4];
78		ssci_t ssci;
79	};
80	__be64 pn;
81} __packed;
82
83struct gcm_iv {
84	union {
85		u8 secure_channel_id[8];
86		sci_t sci;
87	};
88	__be32 pn;
89};
90
91#define MACSEC_VALIDATE_DEFAULT MACSEC_VALIDATE_STRICT
92
93struct pcpu_secy_stats {
94	struct macsec_dev_stats stats;
95	struct u64_stats_sync syncp;
96};
97
98/**
99 * struct macsec_dev - private data
100 * @secy: SecY config
101 * @real_dev: pointer to underlying netdevice
102 * @stats: MACsec device stats
103 * @secys: linked list of SecY's on the underlying device
104 * @offload: status of offloading on the MACsec device
105 */
106struct macsec_dev {
107	struct macsec_secy secy;
108	struct net_device *real_dev;
109	struct pcpu_secy_stats __percpu *stats;
110	struct list_head secys;
111	struct gro_cells gro_cells;
112	enum macsec_offload offload;
113};
114
115/**
116 * struct macsec_rxh_data - rx_handler private argument
117 * @secys: linked list of SecY's on this underlying device
118 */
119struct macsec_rxh_data {
120	struct list_head secys;
121};
122
123static struct macsec_dev *macsec_priv(const struct net_device *dev)
124{
125	return (struct macsec_dev *)netdev_priv(dev);
126}
127
128static struct macsec_rxh_data *macsec_data_rcu(const struct net_device *dev)
129{
130	return rcu_dereference_bh(dev->rx_handler_data);
131}
132
133static struct macsec_rxh_data *macsec_data_rtnl(const struct net_device *dev)
134{
135	return rtnl_dereference(dev->rx_handler_data);
136}
137
138struct macsec_cb {
139	struct aead_request *req;
140	union {
141		struct macsec_tx_sa *tx_sa;
142		struct macsec_rx_sa *rx_sa;
143	};
144	u8 assoc_num;
145	bool valid;
146	bool has_sci;
147};
148
149static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr)
150{
151	struct macsec_rx_sa *sa = rcu_dereference_bh(ptr);
152
153	if (!sa || !sa->active)
154		return NULL;
155
156	if (!refcount_inc_not_zero(&sa->refcnt))
157		return NULL;
158
159	return sa;
160}
161
162static struct macsec_rx_sa *macsec_active_rxsa_get(struct macsec_rx_sc *rx_sc)
163{
164	struct macsec_rx_sa *sa = NULL;
165	int an;
166
167	for (an = 0; an < MACSEC_NUM_AN; an++)	{
168		sa = macsec_rxsa_get(rx_sc->sa[an]);
169		if (sa)
170			break;
171	}
172	return sa;
173}
174
175static void free_rx_sc_rcu(struct rcu_head *head)
176{
177	struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head);
178
179	free_percpu(rx_sc->stats);
180	kfree(rx_sc);
181}
182
183static struct macsec_rx_sc *macsec_rxsc_get(struct macsec_rx_sc *sc)
184{
185	return refcount_inc_not_zero(&sc->refcnt) ? sc : NULL;
186}
187
188static void macsec_rxsc_put(struct macsec_rx_sc *sc)
189{
190	if (refcount_dec_and_test(&sc->refcnt))
191		call_rcu(&sc->rcu_head, free_rx_sc_rcu);
192}
193
194static void free_rxsa(struct rcu_head *head)
195{
196	struct macsec_rx_sa *sa = container_of(head, struct macsec_rx_sa, rcu);
197
198	crypto_free_aead(sa->key.tfm);
199	free_percpu(sa->stats);
200	kfree(sa);
201}
202
203static void macsec_rxsa_put(struct macsec_rx_sa *sa)
204{
205	if (refcount_dec_and_test(&sa->refcnt))
206		call_rcu(&sa->rcu, free_rxsa);
207}
208
209static struct macsec_tx_sa *macsec_txsa_get(struct macsec_tx_sa __rcu *ptr)
210{
211	struct macsec_tx_sa *sa = rcu_dereference_bh(ptr);
212
213	if (!sa || !sa->active)
214		return NULL;
215
216	if (!refcount_inc_not_zero(&sa->refcnt))
217		return NULL;
218
219	return sa;
220}
221
222static void free_txsa(struct rcu_head *head)
223{
224	struct macsec_tx_sa *sa = container_of(head, struct macsec_tx_sa, rcu);
225
226	crypto_free_aead(sa->key.tfm);
227	free_percpu(sa->stats);
228	kfree(sa);
229}
230
231static void macsec_txsa_put(struct macsec_tx_sa *sa)
232{
233	if (refcount_dec_and_test(&sa->refcnt))
234		call_rcu(&sa->rcu, free_txsa);
235}
236
237static struct macsec_cb *macsec_skb_cb(struct sk_buff *skb)
238{
239	BUILD_BUG_ON(sizeof(struct macsec_cb) > sizeof(skb->cb));
240	return (struct macsec_cb *)skb->cb;
241}
242
243#define MACSEC_PORT_ES (htons(0x0001))
244#define MACSEC_PORT_SCB (0x0000)
245#define MACSEC_UNDEF_SCI ((__force sci_t)0xffffffffffffffffULL)
246#define MACSEC_UNDEF_SSCI ((__force ssci_t)0xffffffff)
247
248#define MACSEC_GCM_AES_128_SAK_LEN 16
249#define MACSEC_GCM_AES_256_SAK_LEN 32
250
251#define DEFAULT_SAK_LEN MACSEC_GCM_AES_128_SAK_LEN
252#define DEFAULT_XPN false
253#define DEFAULT_SEND_SCI true
254#define DEFAULT_ENCRYPT false
255#define DEFAULT_ENCODING_SA 0
256#define MACSEC_XPN_MAX_REPLAY_WINDOW (((1 << 30) - 1))
257
258static bool send_sci(const struct macsec_secy *secy)
259{
260	const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
261
262	return tx_sc->send_sci ||
263		(secy->n_rx_sc > 1 && !tx_sc->end_station && !tx_sc->scb);
264}
265
266static sci_t make_sci(u8 *addr, __be16 port)
267{
268	sci_t sci;
269
270	memcpy(&sci, addr, ETH_ALEN);
271	memcpy(((char *)&sci) + ETH_ALEN, &port, sizeof(port));
272
273	return sci;
274}
275
276static sci_t macsec_frame_sci(struct macsec_eth_header *hdr, bool sci_present)
277{
278	sci_t sci;
279
280	if (sci_present)
281		memcpy(&sci, hdr->secure_channel_id,
282		       sizeof(hdr->secure_channel_id));
283	else
284		sci = make_sci(hdr->eth.h_source, MACSEC_PORT_ES);
285
286	return sci;
287}
288
289static unsigned int macsec_sectag_len(bool sci_present)
290{
291	return MACSEC_TAG_LEN + (sci_present ? MACSEC_SCI_LEN : 0);
292}
293
294static unsigned int macsec_hdr_len(bool sci_present)
295{
296	return macsec_sectag_len(sci_present) + ETH_HLEN;
297}
298
299static unsigned int macsec_extra_len(bool sci_present)
300{
301	return macsec_sectag_len(sci_present) + sizeof(__be16);
302}
303
304/* Fill SecTAG according to IEEE 802.1AE-2006 10.5.3 */
305static void macsec_fill_sectag(struct macsec_eth_header *h,
306			       const struct macsec_secy *secy, u32 pn,
307			       bool sci_present)
308{
309	const struct macsec_tx_sc *tx_sc = &secy->tx_sc;
310
311	memset(&h->tci_an, 0, macsec_sectag_len(sci_present));
312	h->eth.h_proto = htons(ETH_P_MACSEC);
313
314	if (sci_present) {
315		h->tci_an |= MACSEC_TCI_SC;
316		memcpy(&h->secure_channel_id, &secy->sci,
317		       sizeof(h->secure_channel_id));
318	} else {
319		if (tx_sc->end_station)
320			h->tci_an |= MACSEC_TCI_ES;
321		if (tx_sc->scb)
322			h->tci_an |= MACSEC_TCI_SCB;
323	}
324
325	h->packet_number = htonl(pn);
326
327	/* with GCM, C/E clear for !encrypt, both set for encrypt */
328	if (tx_sc->encrypt)
329		h->tci_an |= MACSEC_TCI_CONFID;
330	else if (secy->icv_len != DEFAULT_ICV_LEN)
331		h->tci_an |= MACSEC_TCI_C;
332
333	h->tci_an |= tx_sc->encoding_sa;
334}
335
336static void macsec_set_shortlen(struct macsec_eth_header *h, size_t data_len)
337{
338	if (data_len < MIN_NON_SHORT_LEN)
339		h->short_length = data_len;
340}
341
342/* Checks if a MACsec interface is being offloaded to an hardware engine */
343static bool macsec_is_offloaded(struct macsec_dev *macsec)
344{
345	if (macsec->offload == MACSEC_OFFLOAD_MAC ||
346	    macsec->offload == MACSEC_OFFLOAD_PHY)
347		return true;
348
349	return false;
350}
351
352/* Checks if underlying layers implement MACsec offloading functions. */
353static bool macsec_check_offload(enum macsec_offload offload,
354				 struct macsec_dev *macsec)
355{
356	if (!macsec || !macsec->real_dev)
357		return false;
358
359	if (offload == MACSEC_OFFLOAD_PHY)
360		return macsec->real_dev->phydev &&
361		       macsec->real_dev->phydev->macsec_ops;
362	else if (offload == MACSEC_OFFLOAD_MAC)
363		return macsec->real_dev->features & NETIF_F_HW_MACSEC &&
364		       macsec->real_dev->macsec_ops;
365
366	return false;
367}
368
369static const struct macsec_ops *__macsec_get_ops(enum macsec_offload offload,
370						 struct macsec_dev *macsec,
371						 struct macsec_context *ctx)
372{
373	if (ctx) {
374		memset(ctx, 0, sizeof(*ctx));
375		ctx->offload = offload;
376
377		if (offload == MACSEC_OFFLOAD_PHY)
378			ctx->phydev = macsec->real_dev->phydev;
379		else if (offload == MACSEC_OFFLOAD_MAC)
380			ctx->netdev = macsec->real_dev;
381	}
382
383	if (offload == MACSEC_OFFLOAD_PHY)
384		return macsec->real_dev->phydev->macsec_ops;
385	else
386		return macsec->real_dev->macsec_ops;
387}
388
389/* Returns a pointer to the MACsec ops struct if any and updates the MACsec
390 * context device reference if provided.
391 */
392static const struct macsec_ops *macsec_get_ops(struct macsec_dev *macsec,
393					       struct macsec_context *ctx)
394{
395	if (!macsec_check_offload(macsec->offload, macsec))
396		return NULL;
397
398	return __macsec_get_ops(macsec->offload, macsec, ctx);
399}
400
401/* validate MACsec packet according to IEEE 802.1AE-2018 9.12 */
402static bool macsec_validate_skb(struct sk_buff *skb, u16 icv_len, bool xpn)
403{
404	struct macsec_eth_header *h = (struct macsec_eth_header *)skb->data;
405	int len = skb->len - 2 * ETH_ALEN;
406	int extra_len = macsec_extra_len(!!(h->tci_an & MACSEC_TCI_SC)) + icv_len;
407
408	/* a) It comprises at least 17 octets */
409	if (skb->len <= 16)
410		return false;
411
412	/* b) MACsec EtherType: already checked */
413
414	/* c) V bit is clear */
415	if (h->tci_an & MACSEC_TCI_VERSION)
416		return false;
417
418	/* d) ES or SCB => !SC */
419	if ((h->tci_an & MACSEC_TCI_ES || h->tci_an & MACSEC_TCI_SCB) &&
420	    (h->tci_an & MACSEC_TCI_SC))
421		return false;
422
423	/* e) Bits 7 and 8 of octet 4 of the SecTAG are clear */
424	if (h->unused)
425		return false;
426
427	/* rx.pn != 0 if not XPN (figure 10-5 with 802.11AEbw-2013 amendment) */
428	if (!h->packet_number && !xpn)
429		return false;
430
431	/* length check, f) g) h) i) */
432	if (h->short_length)
433		return len == extra_len + h->short_length;
434	return len >= extra_len + MIN_NON_SHORT_LEN;
435}
436
437#define MACSEC_NEEDED_HEADROOM (macsec_extra_len(true))
438#define MACSEC_NEEDED_TAILROOM MACSEC_STD_ICV_LEN
439
440static void macsec_fill_iv_xpn(unsigned char *iv, ssci_t ssci, u64 pn,
441			       salt_t salt)
442{
443	struct gcm_iv_xpn *gcm_iv = (struct gcm_iv_xpn *)iv;
444
445	gcm_iv->ssci = ssci ^ salt.ssci;
446	gcm_iv->pn = cpu_to_be64(pn) ^ salt.pn;
447}
448
449static void macsec_fill_iv(unsigned char *iv, sci_t sci, u32 pn)
450{
451	struct gcm_iv *gcm_iv = (struct gcm_iv *)iv;
452
453	gcm_iv->sci = sci;
454	gcm_iv->pn = htonl(pn);
455}
456
457static struct macsec_eth_header *macsec_ethhdr(struct sk_buff *skb)
458{
459	return (struct macsec_eth_header *)skb_mac_header(skb);
460}
461
462static sci_t dev_to_sci(struct net_device *dev, __be16 port)
463{
464	return make_sci(dev->dev_addr, port);
465}
466
467static void __macsec_pn_wrapped(struct macsec_secy *secy,
468				struct macsec_tx_sa *tx_sa)
469{
470	pr_debug("PN wrapped, transitioning to !oper\n");
471	tx_sa->active = false;
472	if (secy->protect_frames)
473		secy->operational = false;
474}
475
476void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa)
477{
478	spin_lock_bh(&tx_sa->lock);
479	__macsec_pn_wrapped(secy, tx_sa);
480	spin_unlock_bh(&tx_sa->lock);
481}
482EXPORT_SYMBOL_GPL(macsec_pn_wrapped);
483
484static pn_t tx_sa_update_pn(struct macsec_tx_sa *tx_sa,
485			    struct macsec_secy *secy)
486{
487	pn_t pn;
488
489	spin_lock_bh(&tx_sa->lock);
490
491	pn = tx_sa->next_pn_halves;
492	if (secy->xpn)
493		tx_sa->next_pn++;
494	else
495		tx_sa->next_pn_halves.lower++;
496
497	if (tx_sa->next_pn == 0)
498		__macsec_pn_wrapped(secy, tx_sa);
499	spin_unlock_bh(&tx_sa->lock);
500
501	return pn;
502}
503
504static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev)
505{
506	struct macsec_dev *macsec = netdev_priv(dev);
507
508	skb->dev = macsec->real_dev;
509	skb_reset_mac_header(skb);
510	skb->protocol = eth_hdr(skb)->h_proto;
511}
512
513static unsigned int macsec_msdu_len(struct sk_buff *skb)
514{
515	struct macsec_dev *macsec = macsec_priv(skb->dev);
516	struct macsec_secy *secy = &macsec->secy;
517	bool sci_present = macsec_skb_cb(skb)->has_sci;
518
519	return skb->len - macsec_hdr_len(sci_present) - secy->icv_len;
520}
521
522static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc,
523			    struct macsec_tx_sa *tx_sa)
524{
525	unsigned int msdu_len = macsec_msdu_len(skb);
526	struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats);
527
528	u64_stats_update_begin(&txsc_stats->syncp);
529	if (tx_sc->encrypt) {
530		txsc_stats->stats.OutOctetsEncrypted += msdu_len;
531		txsc_stats->stats.OutPktsEncrypted++;
532		this_cpu_inc(tx_sa->stats->OutPktsEncrypted);
533	} else {
534		txsc_stats->stats.OutOctetsProtected += msdu_len;
535		txsc_stats->stats.OutPktsProtected++;
536		this_cpu_inc(tx_sa->stats->OutPktsProtected);
537	}
538	u64_stats_update_end(&txsc_stats->syncp);
539}
540
541static void count_tx(struct net_device *dev, int ret, int len)
542{
543	if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
544		struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
545
546		u64_stats_update_begin(&stats->syncp);
547		stats->tx_packets++;
548		stats->tx_bytes += len;
549		u64_stats_update_end(&stats->syncp);
550	}
551}
552
553static void macsec_encrypt_done(struct crypto_async_request *base, int err)
554{
555	struct sk_buff *skb = base->data;
556	struct net_device *dev = skb->dev;
557	struct macsec_dev *macsec = macsec_priv(dev);
558	struct macsec_tx_sa *sa = macsec_skb_cb(skb)->tx_sa;
559	int len, ret;
560
561	aead_request_free(macsec_skb_cb(skb)->req);
562
563	rcu_read_lock_bh();
564	macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
565	/* packet is encrypted/protected so tx_bytes must be calculated */
566	len = macsec_msdu_len(skb) + 2 * ETH_ALEN;
567	macsec_encrypt_finish(skb, dev);
568	ret = dev_queue_xmit(skb);
569	count_tx(dev, ret, len);
570	rcu_read_unlock_bh();
571
572	macsec_txsa_put(sa);
573	dev_put(dev);
574}
575
576static struct aead_request *macsec_alloc_req(struct crypto_aead *tfm,
577					     unsigned char **iv,
578					     struct scatterlist **sg,
579					     int num_frags)
580{
581	size_t size, iv_offset, sg_offset;
582	struct aead_request *req;
583	void *tmp;
584
585	size = sizeof(struct aead_request) + crypto_aead_reqsize(tfm);
586	iv_offset = size;
587	size += GCM_AES_IV_LEN;
588
589	size = ALIGN(size, __alignof__(struct scatterlist));
590	sg_offset = size;
591	size += sizeof(struct scatterlist) * num_frags;
592
593	tmp = kmalloc(size, GFP_ATOMIC);
594	if (!tmp)
595		return NULL;
596
597	*iv = (unsigned char *)(tmp + iv_offset);
598	*sg = (struct scatterlist *)(tmp + sg_offset);
599	req = tmp;
600
601	aead_request_set_tfm(req, tfm);
602
603	return req;
604}
605
606static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
607				      struct net_device *dev)
608{
609	int ret;
610	struct scatterlist *sg;
611	struct sk_buff *trailer;
612	unsigned char *iv;
613	struct ethhdr *eth;
614	struct macsec_eth_header *hh;
615	size_t unprotected_len;
616	struct aead_request *req;
617	struct macsec_secy *secy;
618	struct macsec_tx_sc *tx_sc;
619	struct macsec_tx_sa *tx_sa;
620	struct macsec_dev *macsec = macsec_priv(dev);
621	bool sci_present;
622	pn_t pn;
623
624	secy = &macsec->secy;
625	tx_sc = &secy->tx_sc;
626
627	/* 10.5.1 TX SA assignment */
628	tx_sa = macsec_txsa_get(tx_sc->sa[tx_sc->encoding_sa]);
629	if (!tx_sa) {
630		secy->operational = false;
631		kfree_skb(skb);
632		return ERR_PTR(-EINVAL);
633	}
634
635	if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM ||
636		     skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) {
637		struct sk_buff *nskb = skb_copy_expand(skb,
638						       MACSEC_NEEDED_HEADROOM,
639						       MACSEC_NEEDED_TAILROOM,
640						       GFP_ATOMIC);
641		if (likely(nskb)) {
642			consume_skb(skb);
643			skb = nskb;
644		} else {
645			macsec_txsa_put(tx_sa);
646			kfree_skb(skb);
647			return ERR_PTR(-ENOMEM);
648		}
649	} else {
650		skb = skb_unshare(skb, GFP_ATOMIC);
651		if (!skb) {
652			macsec_txsa_put(tx_sa);
653			return ERR_PTR(-ENOMEM);
654		}
655	}
656
657	unprotected_len = skb->len;
658	eth = eth_hdr(skb);
659	sci_present = send_sci(secy);
660	hh = skb_push(skb, macsec_extra_len(sci_present));
661	memmove(hh, eth, 2 * ETH_ALEN);
662
663	pn = tx_sa_update_pn(tx_sa, secy);
664	if (pn.full64 == 0) {
665		macsec_txsa_put(tx_sa);
666		kfree_skb(skb);
667		return ERR_PTR(-ENOLINK);
668	}
669	macsec_fill_sectag(hh, secy, pn.lower, sci_present);
670	macsec_set_shortlen(hh, unprotected_len - 2 * ETH_ALEN);
671
672	skb_put(skb, secy->icv_len);
673
674	if (skb->len - ETH_HLEN > macsec_priv(dev)->real_dev->mtu) {
675		struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
676
677		u64_stats_update_begin(&secy_stats->syncp);
678		secy_stats->stats.OutPktsTooLong++;
679		u64_stats_update_end(&secy_stats->syncp);
680
681		macsec_txsa_put(tx_sa);
682		kfree_skb(skb);
683		return ERR_PTR(-EINVAL);
684	}
685
686	ret = skb_cow_data(skb, 0, &trailer);
687	if (unlikely(ret < 0)) {
688		macsec_txsa_put(tx_sa);
689		kfree_skb(skb);
690		return ERR_PTR(ret);
691	}
692
693	req = macsec_alloc_req(tx_sa->key.tfm, &iv, &sg, ret);
694	if (!req) {
695		macsec_txsa_put(tx_sa);
696		kfree_skb(skb);
697		return ERR_PTR(-ENOMEM);
698	}
699
700	if (secy->xpn)
701		macsec_fill_iv_xpn(iv, tx_sa->ssci, pn.full64, tx_sa->key.salt);
702	else
703		macsec_fill_iv(iv, secy->sci, pn.lower);
704
705	sg_init_table(sg, ret);
706	ret = skb_to_sgvec(skb, sg, 0, skb->len);
707	if (unlikely(ret < 0)) {
708		aead_request_free(req);
709		macsec_txsa_put(tx_sa);
710		kfree_skb(skb);
711		return ERR_PTR(ret);
712	}
713
714	if (tx_sc->encrypt) {
715		int len = skb->len - macsec_hdr_len(sci_present) -
716			  secy->icv_len;
717		aead_request_set_crypt(req, sg, sg, len, iv);
718		aead_request_set_ad(req, macsec_hdr_len(sci_present));
719	} else {
720		aead_request_set_crypt(req, sg, sg, 0, iv);
721		aead_request_set_ad(req, skb->len - secy->icv_len);
722	}
723
724	macsec_skb_cb(skb)->req = req;
725	macsec_skb_cb(skb)->tx_sa = tx_sa;
726	macsec_skb_cb(skb)->has_sci = sci_present;
727	aead_request_set_callback(req, 0, macsec_encrypt_done, skb);
728
729	dev_hold(skb->dev);
730	ret = crypto_aead_encrypt(req);
731	if (ret == -EINPROGRESS) {
732		return ERR_PTR(ret);
733	} else if (ret != 0) {
734		dev_put(skb->dev);
735		kfree_skb(skb);
736		aead_request_free(req);
737		macsec_txsa_put(tx_sa);
738		return ERR_PTR(-EINVAL);
739	}
740
741	dev_put(skb->dev);
742	aead_request_free(req);
743	macsec_txsa_put(tx_sa);
744
745	return skb;
746}
747
748static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u32 pn)
749{
750	struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
751	struct pcpu_rx_sc_stats *rxsc_stats = this_cpu_ptr(rx_sa->sc->stats);
752	struct macsec_eth_header *hdr = macsec_ethhdr(skb);
753	u32 lowest_pn = 0;
754
755	spin_lock(&rx_sa->lock);
756	if (rx_sa->next_pn_halves.lower >= secy->replay_window)
757		lowest_pn = rx_sa->next_pn_halves.lower - secy->replay_window;
758
759	/* Now perform replay protection check again
760	 * (see IEEE 802.1AE-2006 figure 10-5)
761	 */
762	if (secy->replay_protect && pn < lowest_pn &&
763	    (!secy->xpn || pn_same_half(pn, lowest_pn))) {
764		spin_unlock(&rx_sa->lock);
765		u64_stats_update_begin(&rxsc_stats->syncp);
766		rxsc_stats->stats.InPktsLate++;
767		u64_stats_update_end(&rxsc_stats->syncp);
768		DEV_STATS_INC(secy->netdev, rx_dropped);
769		return false;
770	}
771
772	if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) {
773		unsigned int msdu_len = macsec_msdu_len(skb);
774		u64_stats_update_begin(&rxsc_stats->syncp);
775		if (hdr->tci_an & MACSEC_TCI_E)
776			rxsc_stats->stats.InOctetsDecrypted += msdu_len;
777		else
778			rxsc_stats->stats.InOctetsValidated += msdu_len;
779		u64_stats_update_end(&rxsc_stats->syncp);
780	}
781
782	if (!macsec_skb_cb(skb)->valid) {
783		spin_unlock(&rx_sa->lock);
784
785		/* 10.6.5 */
786		if (hdr->tci_an & MACSEC_TCI_C ||
787		    secy->validate_frames == MACSEC_VALIDATE_STRICT) {
788			u64_stats_update_begin(&rxsc_stats->syncp);
789			rxsc_stats->stats.InPktsNotValid++;
790			u64_stats_update_end(&rxsc_stats->syncp);
791			this_cpu_inc(rx_sa->stats->InPktsNotValid);
792			DEV_STATS_INC(secy->netdev, rx_errors);
793			return false;
794		}
795
796		u64_stats_update_begin(&rxsc_stats->syncp);
797		if (secy->validate_frames == MACSEC_VALIDATE_CHECK) {
798			rxsc_stats->stats.InPktsInvalid++;
799			this_cpu_inc(rx_sa->stats->InPktsInvalid);
800		} else if (pn < lowest_pn) {
801			rxsc_stats->stats.InPktsDelayed++;
802		} else {
803			rxsc_stats->stats.InPktsUnchecked++;
804		}
805		u64_stats_update_end(&rxsc_stats->syncp);
806	} else {
807		u64_stats_update_begin(&rxsc_stats->syncp);
808		if (pn < lowest_pn) {
809			rxsc_stats->stats.InPktsDelayed++;
810		} else {
811			rxsc_stats->stats.InPktsOK++;
812			this_cpu_inc(rx_sa->stats->InPktsOK);
813		}
814		u64_stats_update_end(&rxsc_stats->syncp);
815
816		// Instead of "pn >=" - to support pn overflow in xpn
817		if (pn + 1 > rx_sa->next_pn_halves.lower) {
818			rx_sa->next_pn_halves.lower = pn + 1;
819		} else if (secy->xpn &&
820			   !pn_same_half(pn, rx_sa->next_pn_halves.lower)) {
821			rx_sa->next_pn_halves.upper++;
822			rx_sa->next_pn_halves.lower = pn + 1;
823		}
824
825		spin_unlock(&rx_sa->lock);
826	}
827
828	return true;
829}
830
831static void macsec_reset_skb(struct sk_buff *skb, struct net_device *dev)
832{
833	skb->pkt_type = PACKET_HOST;
834	skb->protocol = eth_type_trans(skb, dev);
835
836	skb_reset_network_header(skb);
837	if (!skb_transport_header_was_set(skb))
838		skb_reset_transport_header(skb);
839	skb_reset_mac_len(skb);
840}
841
842static void macsec_finalize_skb(struct sk_buff *skb, u8 icv_len, u8 hdr_len)
843{
844	skb->ip_summed = CHECKSUM_NONE;
845	memmove(skb->data + hdr_len, skb->data, 2 * ETH_ALEN);
846	skb_pull(skb, hdr_len);
847	pskb_trim_unique(skb, skb->len - icv_len);
848}
849
850static void count_rx(struct net_device *dev, int len)
851{
852	struct pcpu_sw_netstats *stats = this_cpu_ptr(dev->tstats);
853
854	u64_stats_update_begin(&stats->syncp);
855	stats->rx_packets++;
856	stats->rx_bytes += len;
857	u64_stats_update_end(&stats->syncp);
858}
859
860static void macsec_decrypt_done(struct crypto_async_request *base, int err)
861{
862	struct sk_buff *skb = base->data;
863	struct net_device *dev = skb->dev;
864	struct macsec_dev *macsec = macsec_priv(dev);
865	struct macsec_rx_sa *rx_sa = macsec_skb_cb(skb)->rx_sa;
866	struct macsec_rx_sc *rx_sc = rx_sa->sc;
867	int len;
868	u32 pn;
869
870	aead_request_free(macsec_skb_cb(skb)->req);
871
872	if (!err)
873		macsec_skb_cb(skb)->valid = true;
874
875	rcu_read_lock_bh();
876	pn = ntohl(macsec_ethhdr(skb)->packet_number);
877	if (!macsec_post_decrypt(skb, &macsec->secy, pn)) {
878		rcu_read_unlock_bh();
879		kfree_skb(skb);
880		goto out;
881	}
882
883	macsec_finalize_skb(skb, macsec->secy.icv_len,
884			    macsec_extra_len(macsec_skb_cb(skb)->has_sci));
885	len = skb->len;
886	macsec_reset_skb(skb, macsec->secy.netdev);
887
888	if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS)
889		count_rx(dev, len);
890
891	rcu_read_unlock_bh();
892
893out:
894	macsec_rxsa_put(rx_sa);
895	macsec_rxsc_put(rx_sc);
896	dev_put(dev);
897}
898
899static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
900				      struct net_device *dev,
901				      struct macsec_rx_sa *rx_sa,
902				      sci_t sci,
903				      struct macsec_secy *secy)
904{
905	int ret;
906	struct scatterlist *sg;
907	struct sk_buff *trailer;
908	unsigned char *iv;
909	struct aead_request *req;
910	struct macsec_eth_header *hdr;
911	u32 hdr_pn;
912	u16 icv_len = secy->icv_len;
913
914	macsec_skb_cb(skb)->valid = false;
915	skb = skb_share_check(skb, GFP_ATOMIC);
916	if (!skb)
917		return ERR_PTR(-ENOMEM);
918
919	ret = skb_cow_data(skb, 0, &trailer);
920	if (unlikely(ret < 0)) {
921		kfree_skb(skb);
922		return ERR_PTR(ret);
923	}
924	req = macsec_alloc_req(rx_sa->key.tfm, &iv, &sg, ret);
925	if (!req) {
926		kfree_skb(skb);
927		return ERR_PTR(-ENOMEM);
928	}
929
930	hdr = (struct macsec_eth_header *)skb->data;
931	hdr_pn = ntohl(hdr->packet_number);
932
933	if (secy->xpn) {
934		pn_t recovered_pn = rx_sa->next_pn_halves;
935
936		recovered_pn.lower = hdr_pn;
937		if (hdr_pn < rx_sa->next_pn_halves.lower &&
938		    !pn_same_half(hdr_pn, rx_sa->next_pn_halves.lower))
939			recovered_pn.upper++;
940
941		macsec_fill_iv_xpn(iv, rx_sa->ssci, recovered_pn.full64,
942				   rx_sa->key.salt);
943	} else {
944		macsec_fill_iv(iv, sci, hdr_pn);
945	}
946
947	sg_init_table(sg, ret);
948	ret = skb_to_sgvec(skb, sg, 0, skb->len);
949	if (unlikely(ret < 0)) {
950		aead_request_free(req);
951		kfree_skb(skb);
952		return ERR_PTR(ret);
953	}
954
955	if (hdr->tci_an & MACSEC_TCI_E) {
956		/* confidentiality: ethernet + macsec header
957		 * authenticated, encrypted payload
958		 */
959		int len = skb->len - macsec_hdr_len(macsec_skb_cb(skb)->has_sci);
960
961		aead_request_set_crypt(req, sg, sg, len, iv);
962		aead_request_set_ad(req, macsec_hdr_len(macsec_skb_cb(skb)->has_sci));
963		skb = skb_unshare(skb, GFP_ATOMIC);
964		if (!skb) {
965			aead_request_free(req);
966			return ERR_PTR(-ENOMEM);
967		}
968	} else {
969		/* integrity only: all headers + data authenticated */
970		aead_request_set_crypt(req, sg, sg, icv_len, iv);
971		aead_request_set_ad(req, skb->len - icv_len);
972	}
973
974	macsec_skb_cb(skb)->req = req;
975	skb->dev = dev;
976	aead_request_set_callback(req, 0, macsec_decrypt_done, skb);
977
978	dev_hold(dev);
979	ret = crypto_aead_decrypt(req);
980	if (ret == -EINPROGRESS) {
981		return ERR_PTR(ret);
982	} else if (ret != 0) {
983		/* decryption/authentication failed
984		 * 10.6 if validateFrames is disabled, deliver anyway
985		 */
986		if (ret != -EBADMSG) {
987			kfree_skb(skb);
988			skb = ERR_PTR(ret);
989		}
990	} else {
991		macsec_skb_cb(skb)->valid = true;
992	}
993	dev_put(dev);
994
995	aead_request_free(req);
996
997	return skb;
998}
999
1000static struct macsec_rx_sc *find_rx_sc(struct macsec_secy *secy, sci_t sci)
1001{
1002	struct macsec_rx_sc *rx_sc;
1003
1004	for_each_rxsc(secy, rx_sc) {
1005		if (rx_sc->sci == sci)
1006			return rx_sc;
1007	}
1008
1009	return NULL;
1010}
1011
1012static struct macsec_rx_sc *find_rx_sc_rtnl(struct macsec_secy *secy, sci_t sci)
1013{
1014	struct macsec_rx_sc *rx_sc;
1015
1016	for_each_rxsc_rtnl(secy, rx_sc) {
1017		if (rx_sc->sci == sci)
1018			return rx_sc;
1019	}
1020
1021	return NULL;
1022}
1023
1024static enum rx_handler_result handle_not_macsec(struct sk_buff *skb)
1025{
1026	/* Deliver to the uncontrolled port by default */
1027	enum rx_handler_result ret = RX_HANDLER_PASS;
1028	struct ethhdr *hdr = eth_hdr(skb);
1029	struct macsec_rxh_data *rxd;
1030	struct macsec_dev *macsec;
1031
1032	rcu_read_lock();
1033	rxd = macsec_data_rcu(skb->dev);
1034
1035	list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1036		struct sk_buff *nskb;
1037		struct pcpu_secy_stats *secy_stats = this_cpu_ptr(macsec->stats);
1038		struct net_device *ndev = macsec->secy.netdev;
1039
1040		/* If h/w offloading is enabled, HW decodes frames and strips
1041		 * the SecTAG, so we have to deduce which port to deliver to.
1042		 */
1043		if (macsec_is_offloaded(macsec) && netif_running(ndev)) {
1044			if (ether_addr_equal_64bits(hdr->h_dest,
1045						    ndev->dev_addr)) {
1046				/* exact match, divert skb to this port */
1047				skb->dev = ndev;
1048				skb->pkt_type = PACKET_HOST;
1049				ret = RX_HANDLER_ANOTHER;
1050				goto out;
1051			} else if (is_multicast_ether_addr_64bits(
1052					   hdr->h_dest)) {
1053				/* multicast frame, deliver on this port too */
1054				nskb = skb_clone(skb, GFP_ATOMIC);
1055				if (!nskb)
1056					break;
1057
1058				nskb->dev = ndev;
1059				if (ether_addr_equal_64bits(hdr->h_dest,
1060							    ndev->broadcast))
1061					nskb->pkt_type = PACKET_BROADCAST;
1062				else
1063					nskb->pkt_type = PACKET_MULTICAST;
1064
1065				netif_rx(nskb);
1066			}
1067			continue;
1068		}
1069
1070		/* 10.6 If the management control validateFrames is not
1071		 * Strict, frames without a SecTAG are received, counted, and
1072		 * delivered to the Controlled Port
1073		 */
1074		if (macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
1075			u64_stats_update_begin(&secy_stats->syncp);
1076			secy_stats->stats.InPktsNoTag++;
1077			u64_stats_update_end(&secy_stats->syncp);
1078			DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
1079			continue;
1080		}
1081
1082		/* deliver on this port */
1083		nskb = skb_clone(skb, GFP_ATOMIC);
1084		if (!nskb)
1085			break;
1086
1087		nskb->dev = ndev;
1088
1089		if (netif_rx(nskb) == NET_RX_SUCCESS) {
1090			u64_stats_update_begin(&secy_stats->syncp);
1091			secy_stats->stats.InPktsUntagged++;
1092			u64_stats_update_end(&secy_stats->syncp);
1093		}
1094	}
1095
1096out:
1097	rcu_read_unlock();
1098	return ret;
1099}
1100
1101static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
1102{
1103	struct sk_buff *skb = *pskb;
1104	struct net_device *dev = skb->dev;
1105	struct macsec_eth_header *hdr;
1106	struct macsec_secy *secy = NULL;
1107	struct macsec_rx_sc *rx_sc;
1108	struct macsec_rx_sa *rx_sa;
1109	struct macsec_rxh_data *rxd;
1110	struct macsec_dev *macsec;
1111	unsigned int len;
1112	sci_t sci;
1113	u32 hdr_pn;
1114	bool cbit;
1115	struct pcpu_rx_sc_stats *rxsc_stats;
1116	struct pcpu_secy_stats *secy_stats;
1117	bool pulled_sci;
1118	int ret;
1119
1120	if (skb_headroom(skb) < ETH_HLEN)
1121		goto drop_direct;
1122
1123	hdr = macsec_ethhdr(skb);
1124	if (hdr->eth.h_proto != htons(ETH_P_MACSEC))
1125		return handle_not_macsec(skb);
1126
1127	skb = skb_unshare(skb, GFP_ATOMIC);
1128	*pskb = skb;
1129	if (!skb)
1130		return RX_HANDLER_CONSUMED;
1131
1132	pulled_sci = pskb_may_pull(skb, macsec_extra_len(true));
1133	if (!pulled_sci) {
1134		if (!pskb_may_pull(skb, macsec_extra_len(false)))
1135			goto drop_direct;
1136	}
1137
1138	hdr = macsec_ethhdr(skb);
1139
1140	/* Frames with a SecTAG that has the TCI E bit set but the C
1141	 * bit clear are discarded, as this reserved encoding is used
1142	 * to identify frames with a SecTAG that are not to be
1143	 * delivered to the Controlled Port.
1144	 */
1145	if ((hdr->tci_an & (MACSEC_TCI_C | MACSEC_TCI_E)) == MACSEC_TCI_E)
1146		return RX_HANDLER_PASS;
1147
1148	/* now, pull the extra length */
1149	if (hdr->tci_an & MACSEC_TCI_SC) {
1150		if (!pulled_sci)
1151			goto drop_direct;
1152	}
1153
1154	/* ethernet header is part of crypto processing */
1155	skb_push(skb, ETH_HLEN);
1156
1157	macsec_skb_cb(skb)->has_sci = !!(hdr->tci_an & MACSEC_TCI_SC);
1158	macsec_skb_cb(skb)->assoc_num = hdr->tci_an & MACSEC_AN_MASK;
1159	sci = macsec_frame_sci(hdr, macsec_skb_cb(skb)->has_sci);
1160
1161	rcu_read_lock();
1162	rxd = macsec_data_rcu(skb->dev);
1163
1164	list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1165		struct macsec_rx_sc *sc = find_rx_sc(&macsec->secy, sci);
1166
1167		sc = sc ? macsec_rxsc_get(sc) : NULL;
1168
1169		if (sc) {
1170			secy = &macsec->secy;
1171			rx_sc = sc;
1172			break;
1173		}
1174	}
1175
1176	if (!secy)
1177		goto nosci;
1178
1179	dev = secy->netdev;
1180	macsec = macsec_priv(dev);
1181	secy_stats = this_cpu_ptr(macsec->stats);
1182	rxsc_stats = this_cpu_ptr(rx_sc->stats);
1183
1184	if (!macsec_validate_skb(skb, secy->icv_len, secy->xpn)) {
1185		u64_stats_update_begin(&secy_stats->syncp);
1186		secy_stats->stats.InPktsBadTag++;
1187		u64_stats_update_end(&secy_stats->syncp);
1188		DEV_STATS_INC(secy->netdev, rx_errors);
1189		goto drop_nosa;
1190	}
1191
1192	rx_sa = macsec_rxsa_get(rx_sc->sa[macsec_skb_cb(skb)->assoc_num]);
1193	if (!rx_sa) {
1194		/* 10.6.1 if the SA is not in use */
1195
1196		/* If validateFrames is Strict or the C bit in the
1197		 * SecTAG is set, discard
1198		 */
1199		struct macsec_rx_sa *active_rx_sa = macsec_active_rxsa_get(rx_sc);
1200		if (hdr->tci_an & MACSEC_TCI_C ||
1201		    secy->validate_frames == MACSEC_VALIDATE_STRICT) {
1202			u64_stats_update_begin(&rxsc_stats->syncp);
1203			rxsc_stats->stats.InPktsNotUsingSA++;
1204			u64_stats_update_end(&rxsc_stats->syncp);
1205			DEV_STATS_INC(secy->netdev, rx_errors);
1206			if (active_rx_sa)
1207				this_cpu_inc(active_rx_sa->stats->InPktsNotUsingSA);
1208			goto drop_nosa;
1209		}
1210
1211		/* not Strict, the frame (with the SecTAG and ICV
1212		 * removed) is delivered to the Controlled Port.
1213		 */
1214		u64_stats_update_begin(&rxsc_stats->syncp);
1215		rxsc_stats->stats.InPktsUnusedSA++;
1216		u64_stats_update_end(&rxsc_stats->syncp);
1217		if (active_rx_sa)
1218			this_cpu_inc(active_rx_sa->stats->InPktsUnusedSA);
1219		goto deliver;
1220	}
1221
1222	/* First, PN check to avoid decrypting obviously wrong packets */
1223	hdr_pn = ntohl(hdr->packet_number);
1224	if (secy->replay_protect) {
1225		bool late;
1226
1227		spin_lock(&rx_sa->lock);
1228		late = rx_sa->next_pn_halves.lower >= secy->replay_window &&
1229		       hdr_pn < (rx_sa->next_pn_halves.lower - secy->replay_window);
1230
1231		if (secy->xpn)
1232			late = late && pn_same_half(rx_sa->next_pn_halves.lower, hdr_pn);
1233		spin_unlock(&rx_sa->lock);
1234
1235		if (late) {
1236			u64_stats_update_begin(&rxsc_stats->syncp);
1237			rxsc_stats->stats.InPktsLate++;
1238			u64_stats_update_end(&rxsc_stats->syncp);
1239			DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
1240			goto drop;
1241		}
1242	}
1243
1244	macsec_skb_cb(skb)->rx_sa = rx_sa;
1245
1246	/* Disabled && !changed text => skip validation */
1247	if (hdr->tci_an & MACSEC_TCI_C ||
1248	    secy->validate_frames != MACSEC_VALIDATE_DISABLED)
1249		skb = macsec_decrypt(skb, dev, rx_sa, sci, secy);
1250
1251	if (IS_ERR(skb)) {
1252		/* the decrypt callback needs the reference */
1253		if (PTR_ERR(skb) != -EINPROGRESS) {
1254			macsec_rxsa_put(rx_sa);
1255			macsec_rxsc_put(rx_sc);
1256		}
1257		rcu_read_unlock();
1258		*pskb = NULL;
1259		return RX_HANDLER_CONSUMED;
1260	}
1261
1262	if (!macsec_post_decrypt(skb, secy, hdr_pn))
1263		goto drop;
1264
1265deliver:
1266	macsec_finalize_skb(skb, secy->icv_len,
1267			    macsec_extra_len(macsec_skb_cb(skb)->has_sci));
1268	len = skb->len;
1269	macsec_reset_skb(skb, secy->netdev);
1270
1271	if (rx_sa)
1272		macsec_rxsa_put(rx_sa);
1273	macsec_rxsc_put(rx_sc);
1274
1275	skb_orphan(skb);
1276	ret = gro_cells_receive(&macsec->gro_cells, skb);
1277	if (ret == NET_RX_SUCCESS)
1278		count_rx(dev, len);
1279	else
1280		DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
1281
1282	rcu_read_unlock();
1283
1284	*pskb = NULL;
1285	return RX_HANDLER_CONSUMED;
1286
1287drop:
1288	macsec_rxsa_put(rx_sa);
1289drop_nosa:
1290	macsec_rxsc_put(rx_sc);
1291	rcu_read_unlock();
1292drop_direct:
1293	kfree_skb(skb);
1294	*pskb = NULL;
1295	return RX_HANDLER_CONSUMED;
1296
1297nosci:
1298	/* 10.6.1 if the SC is not found */
1299	cbit = !!(hdr->tci_an & MACSEC_TCI_C);
1300	if (!cbit)
1301		macsec_finalize_skb(skb, DEFAULT_ICV_LEN,
1302				    macsec_extra_len(macsec_skb_cb(skb)->has_sci));
1303
1304	list_for_each_entry_rcu(macsec, &rxd->secys, secys) {
1305		struct sk_buff *nskb;
1306
1307		secy_stats = this_cpu_ptr(macsec->stats);
1308
1309		/* If validateFrames is Strict or the C bit in the
1310		 * SecTAG is set, discard
1311		 */
1312		if (cbit ||
1313		    macsec->secy.validate_frames == MACSEC_VALIDATE_STRICT) {
1314			u64_stats_update_begin(&secy_stats->syncp);
1315			secy_stats->stats.InPktsNoSCI++;
1316			u64_stats_update_end(&secy_stats->syncp);
1317			DEV_STATS_INC(macsec->secy.netdev, rx_errors);
1318			continue;
1319		}
1320
1321		/* not strict, the frame (with the SecTAG and ICV
1322		 * removed) is delivered to the Controlled Port.
1323		 */
1324		nskb = skb_clone(skb, GFP_ATOMIC);
1325		if (!nskb)
1326			break;
1327
1328		macsec_reset_skb(nskb, macsec->secy.netdev);
1329
1330		ret = netif_rx(nskb);
1331		if (ret == NET_RX_SUCCESS) {
1332			u64_stats_update_begin(&secy_stats->syncp);
1333			secy_stats->stats.InPktsUnknownSCI++;
1334			u64_stats_update_end(&secy_stats->syncp);
1335		} else {
1336			DEV_STATS_INC(macsec->secy.netdev, rx_dropped);
1337		}
1338	}
1339
1340	rcu_read_unlock();
1341	*pskb = skb;
1342	return RX_HANDLER_PASS;
1343}
1344
1345static struct crypto_aead *macsec_alloc_tfm(char *key, int key_len, int icv_len)
1346{
1347	struct crypto_aead *tfm;
1348	int ret;
1349
1350	tfm = crypto_alloc_aead("gcm(aes)", 0, 0);
1351
1352	if (IS_ERR(tfm))
1353		return tfm;
1354
1355	ret = crypto_aead_setkey(tfm, key, key_len);
1356	if (ret < 0)
1357		goto fail;
1358
1359	ret = crypto_aead_setauthsize(tfm, icv_len);
1360	if (ret < 0)
1361		goto fail;
1362
1363	return tfm;
1364fail:
1365	crypto_free_aead(tfm);
1366	return ERR_PTR(ret);
1367}
1368
1369static int init_rx_sa(struct macsec_rx_sa *rx_sa, char *sak, int key_len,
1370		      int icv_len)
1371{
1372	rx_sa->stats = alloc_percpu(struct macsec_rx_sa_stats);
1373	if (!rx_sa->stats)
1374		return -ENOMEM;
1375
1376	rx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
1377	if (IS_ERR(rx_sa->key.tfm)) {
1378		free_percpu(rx_sa->stats);
1379		return PTR_ERR(rx_sa->key.tfm);
1380	}
1381
1382	rx_sa->ssci = MACSEC_UNDEF_SSCI;
1383	rx_sa->active = false;
1384	rx_sa->next_pn = 1;
1385	refcount_set(&rx_sa->refcnt, 1);
1386	spin_lock_init(&rx_sa->lock);
1387
1388	return 0;
1389}
1390
1391static void clear_rx_sa(struct macsec_rx_sa *rx_sa)
1392{
1393	rx_sa->active = false;
1394
1395	macsec_rxsa_put(rx_sa);
1396}
1397
1398static void free_rx_sc(struct macsec_rx_sc *rx_sc)
1399{
1400	int i;
1401
1402	for (i = 0; i < MACSEC_NUM_AN; i++) {
1403		struct macsec_rx_sa *sa = rtnl_dereference(rx_sc->sa[i]);
1404
1405		RCU_INIT_POINTER(rx_sc->sa[i], NULL);
1406		if (sa)
1407			clear_rx_sa(sa);
1408	}
1409
1410	macsec_rxsc_put(rx_sc);
1411}
1412
1413static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci)
1414{
1415	struct macsec_rx_sc *rx_sc, __rcu **rx_scp;
1416
1417	for (rx_scp = &secy->rx_sc, rx_sc = rtnl_dereference(*rx_scp);
1418	     rx_sc;
1419	     rx_scp = &rx_sc->next, rx_sc = rtnl_dereference(*rx_scp)) {
1420		if (rx_sc->sci == sci) {
1421			if (rx_sc->active)
1422				secy->n_rx_sc--;
1423			rcu_assign_pointer(*rx_scp, rx_sc->next);
1424			return rx_sc;
1425		}
1426	}
1427
1428	return NULL;
1429}
1430
1431static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci,
1432					 bool active)
1433{
1434	struct macsec_rx_sc *rx_sc;
1435	struct macsec_dev *macsec;
1436	struct net_device *real_dev = macsec_priv(dev)->real_dev;
1437	struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
1438	struct macsec_secy *secy;
1439
1440	list_for_each_entry(macsec, &rxd->secys, secys) {
1441		if (find_rx_sc_rtnl(&macsec->secy, sci))
1442			return ERR_PTR(-EEXIST);
1443	}
1444
1445	rx_sc = kzalloc(sizeof(*rx_sc), GFP_KERNEL);
1446	if (!rx_sc)
1447		return ERR_PTR(-ENOMEM);
1448
1449	rx_sc->stats = netdev_alloc_pcpu_stats(struct pcpu_rx_sc_stats);
1450	if (!rx_sc->stats) {
1451		kfree(rx_sc);
1452		return ERR_PTR(-ENOMEM);
1453	}
1454
1455	rx_sc->sci = sci;
1456	rx_sc->active = active;
1457	refcount_set(&rx_sc->refcnt, 1);
1458
1459	secy = &macsec_priv(dev)->secy;
1460	rcu_assign_pointer(rx_sc->next, secy->rx_sc);
1461	rcu_assign_pointer(secy->rx_sc, rx_sc);
1462
1463	if (rx_sc->active)
1464		secy->n_rx_sc++;
1465
1466	return rx_sc;
1467}
1468
1469static int init_tx_sa(struct macsec_tx_sa *tx_sa, char *sak, int key_len,
1470		      int icv_len)
1471{
1472	tx_sa->stats = alloc_percpu(struct macsec_tx_sa_stats);
1473	if (!tx_sa->stats)
1474		return -ENOMEM;
1475
1476	tx_sa->key.tfm = macsec_alloc_tfm(sak, key_len, icv_len);
1477	if (IS_ERR(tx_sa->key.tfm)) {
1478		free_percpu(tx_sa->stats);
1479		return PTR_ERR(tx_sa->key.tfm);
1480	}
1481
1482	tx_sa->ssci = MACSEC_UNDEF_SSCI;
1483	tx_sa->active = false;
1484	refcount_set(&tx_sa->refcnt, 1);
1485	spin_lock_init(&tx_sa->lock);
1486
1487	return 0;
1488}
1489
1490static void clear_tx_sa(struct macsec_tx_sa *tx_sa)
1491{
1492	tx_sa->active = false;
1493
1494	macsec_txsa_put(tx_sa);
1495}
1496
1497static struct genl_family macsec_fam;
1498
1499static struct net_device *get_dev_from_nl(struct net *net,
1500					  struct nlattr **attrs)
1501{
1502	int ifindex = nla_get_u32(attrs[MACSEC_ATTR_IFINDEX]);
1503	struct net_device *dev;
1504
1505	dev = __dev_get_by_index(net, ifindex);
1506	if (!dev)
1507		return ERR_PTR(-ENODEV);
1508
1509	if (!netif_is_macsec(dev))
1510		return ERR_PTR(-ENODEV);
1511
1512	return dev;
1513}
1514
1515static enum macsec_offload nla_get_offload(const struct nlattr *nla)
1516{
1517	return (__force enum macsec_offload)nla_get_u8(nla);
1518}
1519
1520static sci_t nla_get_sci(const struct nlattr *nla)
1521{
1522	return (__force sci_t)nla_get_u64(nla);
1523}
1524
1525static int nla_put_sci(struct sk_buff *skb, int attrtype, sci_t value,
1526		       int padattr)
1527{
1528	return nla_put_u64_64bit(skb, attrtype, (__force u64)value, padattr);
1529}
1530
1531static ssci_t nla_get_ssci(const struct nlattr *nla)
1532{
1533	return (__force ssci_t)nla_get_u32(nla);
1534}
1535
1536static int nla_put_ssci(struct sk_buff *skb, int attrtype, ssci_t value)
1537{
1538	return nla_put_u32(skb, attrtype, (__force u64)value);
1539}
1540
1541static struct macsec_tx_sa *get_txsa_from_nl(struct net *net,
1542					     struct nlattr **attrs,
1543					     struct nlattr **tb_sa,
1544					     struct net_device **devp,
1545					     struct macsec_secy **secyp,
1546					     struct macsec_tx_sc **scp,
1547					     u8 *assoc_num)
1548{
1549	struct net_device *dev;
1550	struct macsec_secy *secy;
1551	struct macsec_tx_sc *tx_sc;
1552	struct macsec_tx_sa *tx_sa;
1553
1554	if (!tb_sa[MACSEC_SA_ATTR_AN])
1555		return ERR_PTR(-EINVAL);
1556
1557	*assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1558
1559	dev = get_dev_from_nl(net, attrs);
1560	if (IS_ERR(dev))
1561		return ERR_CAST(dev);
1562
1563	if (*assoc_num >= MACSEC_NUM_AN)
1564		return ERR_PTR(-EINVAL);
1565
1566	secy = &macsec_priv(dev)->secy;
1567	tx_sc = &secy->tx_sc;
1568
1569	tx_sa = rtnl_dereference(tx_sc->sa[*assoc_num]);
1570	if (!tx_sa)
1571		return ERR_PTR(-ENODEV);
1572
1573	*devp = dev;
1574	*scp = tx_sc;
1575	*secyp = secy;
1576	return tx_sa;
1577}
1578
1579static struct macsec_rx_sc *get_rxsc_from_nl(struct net *net,
1580					     struct nlattr **attrs,
1581					     struct nlattr **tb_rxsc,
1582					     struct net_device **devp,
1583					     struct macsec_secy **secyp)
1584{
1585	struct net_device *dev;
1586	struct macsec_secy *secy;
1587	struct macsec_rx_sc *rx_sc;
1588	sci_t sci;
1589
1590	dev = get_dev_from_nl(net, attrs);
1591	if (IS_ERR(dev))
1592		return ERR_CAST(dev);
1593
1594	secy = &macsec_priv(dev)->secy;
1595
1596	if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
1597		return ERR_PTR(-EINVAL);
1598
1599	sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
1600	rx_sc = find_rx_sc_rtnl(secy, sci);
1601	if (!rx_sc)
1602		return ERR_PTR(-ENODEV);
1603
1604	*secyp = secy;
1605	*devp = dev;
1606
1607	return rx_sc;
1608}
1609
1610static struct macsec_rx_sa *get_rxsa_from_nl(struct net *net,
1611					     struct nlattr **attrs,
1612					     struct nlattr **tb_rxsc,
1613					     struct nlattr **tb_sa,
1614					     struct net_device **devp,
1615					     struct macsec_secy **secyp,
1616					     struct macsec_rx_sc **scp,
1617					     u8 *assoc_num)
1618{
1619	struct macsec_rx_sc *rx_sc;
1620	struct macsec_rx_sa *rx_sa;
1621
1622	if (!tb_sa[MACSEC_SA_ATTR_AN])
1623		return ERR_PTR(-EINVAL);
1624
1625	*assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1626	if (*assoc_num >= MACSEC_NUM_AN)
1627		return ERR_PTR(-EINVAL);
1628
1629	rx_sc = get_rxsc_from_nl(net, attrs, tb_rxsc, devp, secyp);
1630	if (IS_ERR(rx_sc))
1631		return ERR_CAST(rx_sc);
1632
1633	rx_sa = rtnl_dereference(rx_sc->sa[*assoc_num]);
1634	if (!rx_sa)
1635		return ERR_PTR(-ENODEV);
1636
1637	*scp = rx_sc;
1638	return rx_sa;
1639}
1640
1641static const struct nla_policy macsec_genl_policy[NUM_MACSEC_ATTR] = {
1642	[MACSEC_ATTR_IFINDEX] = { .type = NLA_U32 },
1643	[MACSEC_ATTR_RXSC_CONFIG] = { .type = NLA_NESTED },
1644	[MACSEC_ATTR_SA_CONFIG] = { .type = NLA_NESTED },
1645	[MACSEC_ATTR_OFFLOAD] = { .type = NLA_NESTED },
1646};
1647
1648static const struct nla_policy macsec_genl_rxsc_policy[NUM_MACSEC_RXSC_ATTR] = {
1649	[MACSEC_RXSC_ATTR_SCI] = { .type = NLA_U64 },
1650	[MACSEC_RXSC_ATTR_ACTIVE] = { .type = NLA_U8 },
1651};
1652
1653static const struct nla_policy macsec_genl_sa_policy[NUM_MACSEC_SA_ATTR] = {
1654	[MACSEC_SA_ATTR_AN] = { .type = NLA_U8 },
1655	[MACSEC_SA_ATTR_ACTIVE] = { .type = NLA_U8 },
1656	[MACSEC_SA_ATTR_PN] = NLA_POLICY_MIN_LEN(4),
1657	[MACSEC_SA_ATTR_KEYID] = { .type = NLA_BINARY,
1658				   .len = MACSEC_KEYID_LEN, },
1659	[MACSEC_SA_ATTR_KEY] = { .type = NLA_BINARY,
1660				 .len = MACSEC_MAX_KEY_LEN, },
1661	[MACSEC_SA_ATTR_SSCI] = { .type = NLA_U32 },
1662	[MACSEC_SA_ATTR_SALT] = { .type = NLA_BINARY,
1663				  .len = MACSEC_SALT_LEN, },
1664};
1665
1666static const struct nla_policy macsec_genl_offload_policy[NUM_MACSEC_OFFLOAD_ATTR] = {
1667	[MACSEC_OFFLOAD_ATTR_TYPE] = { .type = NLA_U8 },
1668};
1669
1670/* Offloads an operation to a device driver */
1671static int macsec_offload(int (* const func)(struct macsec_context *),
1672			  struct macsec_context *ctx)
1673{
1674	int ret;
1675
1676	if (unlikely(!func))
1677		return 0;
1678
1679	if (ctx->offload == MACSEC_OFFLOAD_PHY)
1680		mutex_lock(&ctx->phydev->lock);
1681
1682	/* Phase I: prepare. The drive should fail here if there are going to be
1683	 * issues in the commit phase.
1684	 */
1685	ctx->prepare = true;
1686	ret = (*func)(ctx);
1687	if (ret)
1688		goto phy_unlock;
1689
1690	/* Phase II: commit. This step cannot fail. */
1691	ctx->prepare = false;
1692	ret = (*func)(ctx);
1693	/* This should never happen: commit is not allowed to fail */
1694	if (unlikely(ret))
1695		WARN(1, "MACsec offloading commit failed (%d)\n", ret);
1696
1697phy_unlock:
1698	if (ctx->offload == MACSEC_OFFLOAD_PHY)
1699		mutex_unlock(&ctx->phydev->lock);
1700
1701	return ret;
1702}
1703
1704static int parse_sa_config(struct nlattr **attrs, struct nlattr **tb_sa)
1705{
1706	if (!attrs[MACSEC_ATTR_SA_CONFIG])
1707		return -EINVAL;
1708
1709	if (nla_parse_nested_deprecated(tb_sa, MACSEC_SA_ATTR_MAX, attrs[MACSEC_ATTR_SA_CONFIG], macsec_genl_sa_policy, NULL))
1710		return -EINVAL;
1711
1712	return 0;
1713}
1714
1715static int parse_rxsc_config(struct nlattr **attrs, struct nlattr **tb_rxsc)
1716{
1717	if (!attrs[MACSEC_ATTR_RXSC_CONFIG])
1718		return -EINVAL;
1719
1720	if (nla_parse_nested_deprecated(tb_rxsc, MACSEC_RXSC_ATTR_MAX, attrs[MACSEC_ATTR_RXSC_CONFIG], macsec_genl_rxsc_policy, NULL))
1721		return -EINVAL;
1722
1723	return 0;
1724}
1725
1726static bool validate_add_rxsa(struct nlattr **attrs)
1727{
1728	if (!attrs[MACSEC_SA_ATTR_AN] ||
1729	    !attrs[MACSEC_SA_ATTR_KEY] ||
1730	    !attrs[MACSEC_SA_ATTR_KEYID])
1731		return false;
1732
1733	if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
1734		return false;
1735
1736	if (attrs[MACSEC_SA_ATTR_PN] &&
1737	    nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
1738		return false;
1739
1740	if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
1741		if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
1742			return false;
1743	}
1744
1745	if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
1746		return false;
1747
1748	return true;
1749}
1750
1751static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
1752{
1753	struct net_device *dev;
1754	struct nlattr **attrs = info->attrs;
1755	struct macsec_secy *secy;
1756	struct macsec_rx_sc *rx_sc;
1757	struct macsec_rx_sa *rx_sa;
1758	unsigned char assoc_num;
1759	int pn_len;
1760	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
1761	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
1762	int err;
1763
1764	if (!attrs[MACSEC_ATTR_IFINDEX])
1765		return -EINVAL;
1766
1767	if (parse_sa_config(attrs, tb_sa))
1768		return -EINVAL;
1769
1770	if (parse_rxsc_config(attrs, tb_rxsc))
1771		return -EINVAL;
1772
1773	if (!validate_add_rxsa(tb_sa))
1774		return -EINVAL;
1775
1776	rtnl_lock();
1777	rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
1778	if (IS_ERR(rx_sc)) {
1779		rtnl_unlock();
1780		return PTR_ERR(rx_sc);
1781	}
1782
1783	assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
1784
1785	if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
1786		pr_notice("macsec: nl: add_rxsa: bad key length: %d != %d\n",
1787			  nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
1788		rtnl_unlock();
1789		return -EINVAL;
1790	}
1791
1792	pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
1793	if (tb_sa[MACSEC_SA_ATTR_PN] &&
1794	    nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
1795		pr_notice("macsec: nl: add_rxsa: bad pn length: %d != %d\n",
1796			  nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
1797		rtnl_unlock();
1798		return -EINVAL;
1799	}
1800
1801	if (secy->xpn) {
1802		if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) {
1803			rtnl_unlock();
1804			return -EINVAL;
1805		}
1806
1807		if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
1808			pr_notice("macsec: nl: add_rxsa: bad salt length: %d != %d\n",
1809				  nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
1810				  MACSEC_SALT_LEN);
1811			rtnl_unlock();
1812			return -EINVAL;
1813		}
1814	}
1815
1816	rx_sa = rtnl_dereference(rx_sc->sa[assoc_num]);
1817	if (rx_sa) {
1818		rtnl_unlock();
1819		return -EBUSY;
1820	}
1821
1822	rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL);
1823	if (!rx_sa) {
1824		rtnl_unlock();
1825		return -ENOMEM;
1826	}
1827
1828	err = init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
1829			 secy->key_len, secy->icv_len);
1830	if (err < 0) {
1831		kfree(rx_sa);
1832		rtnl_unlock();
1833		return err;
1834	}
1835
1836	if (tb_sa[MACSEC_SA_ATTR_PN]) {
1837		spin_lock_bh(&rx_sa->lock);
1838		rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
1839		spin_unlock_bh(&rx_sa->lock);
1840	}
1841
1842	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
1843		rx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
1844
1845	rx_sa->sc = rx_sc;
1846
1847	/* If h/w offloading is available, propagate to the device */
1848	if (macsec_is_offloaded(netdev_priv(dev))) {
1849		const struct macsec_ops *ops;
1850		struct macsec_context ctx;
1851
1852		ops = macsec_get_ops(netdev_priv(dev), &ctx);
1853		if (!ops) {
1854			err = -EOPNOTSUPP;
1855			goto cleanup;
1856		}
1857
1858		ctx.sa.assoc_num = assoc_num;
1859		ctx.sa.rx_sa = rx_sa;
1860		ctx.secy = secy;
1861		memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
1862		       secy->key_len);
1863
1864		err = macsec_offload(ops->mdo_add_rxsa, &ctx);
1865		memzero_explicit(ctx.sa.key, secy->key_len);
1866		if (err)
1867			goto cleanup;
1868	}
1869
1870	if (secy->xpn) {
1871		rx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
1872		nla_memcpy(rx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
1873			   MACSEC_SALT_LEN);
1874	}
1875
1876	nla_memcpy(rx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
1877	rcu_assign_pointer(rx_sc->sa[assoc_num], rx_sa);
1878
1879	rtnl_unlock();
1880
1881	return 0;
1882
1883cleanup:
1884	macsec_rxsa_put(rx_sa);
1885	rtnl_unlock();
1886	return err;
1887}
1888
1889static bool validate_add_rxsc(struct nlattr **attrs)
1890{
1891	if (!attrs[MACSEC_RXSC_ATTR_SCI])
1892		return false;
1893
1894	if (attrs[MACSEC_RXSC_ATTR_ACTIVE]) {
1895		if (nla_get_u8(attrs[MACSEC_RXSC_ATTR_ACTIVE]) > 1)
1896			return false;
1897	}
1898
1899	return true;
1900}
1901
1902static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info)
1903{
1904	struct net_device *dev;
1905	sci_t sci = MACSEC_UNDEF_SCI;
1906	struct nlattr **attrs = info->attrs;
1907	struct macsec_rx_sc *rx_sc;
1908	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
1909	struct macsec_secy *secy;
1910	bool active = true;
1911	int ret;
1912
1913	if (!attrs[MACSEC_ATTR_IFINDEX])
1914		return -EINVAL;
1915
1916	if (parse_rxsc_config(attrs, tb_rxsc))
1917		return -EINVAL;
1918
1919	if (!validate_add_rxsc(tb_rxsc))
1920		return -EINVAL;
1921
1922	rtnl_lock();
1923	dev = get_dev_from_nl(genl_info_net(info), attrs);
1924	if (IS_ERR(dev)) {
1925		rtnl_unlock();
1926		return PTR_ERR(dev);
1927	}
1928
1929	secy = &macsec_priv(dev)->secy;
1930	sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
1931
1932	if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE])
1933		active = nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
1934
1935	rx_sc = create_rx_sc(dev, sci, active);
1936	if (IS_ERR(rx_sc)) {
1937		rtnl_unlock();
1938		return PTR_ERR(rx_sc);
1939	}
1940
1941	if (macsec_is_offloaded(netdev_priv(dev))) {
1942		const struct macsec_ops *ops;
1943		struct macsec_context ctx;
1944
1945		ops = macsec_get_ops(netdev_priv(dev), &ctx);
1946		if (!ops) {
1947			ret = -EOPNOTSUPP;
1948			goto cleanup;
1949		}
1950
1951		ctx.rx_sc = rx_sc;
1952		ctx.secy = secy;
1953
1954		ret = macsec_offload(ops->mdo_add_rxsc, &ctx);
1955		if (ret)
1956			goto cleanup;
1957	}
1958
1959	rtnl_unlock();
1960
1961	return 0;
1962
1963cleanup:
1964	del_rx_sc(secy, sci);
1965	free_rx_sc(rx_sc);
1966	rtnl_unlock();
1967	return ret;
1968}
1969
1970static bool validate_add_txsa(struct nlattr **attrs)
1971{
1972	if (!attrs[MACSEC_SA_ATTR_AN] ||
1973	    !attrs[MACSEC_SA_ATTR_PN] ||
1974	    !attrs[MACSEC_SA_ATTR_KEY] ||
1975	    !attrs[MACSEC_SA_ATTR_KEYID])
1976		return false;
1977
1978	if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
1979		return false;
1980
1981	if (nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
1982		return false;
1983
1984	if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
1985		if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
1986			return false;
1987	}
1988
1989	if (nla_len(attrs[MACSEC_SA_ATTR_KEYID]) != MACSEC_KEYID_LEN)
1990		return false;
1991
1992	return true;
1993}
1994
1995static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
1996{
1997	struct net_device *dev;
1998	struct nlattr **attrs = info->attrs;
1999	struct macsec_secy *secy;
2000	struct macsec_tx_sc *tx_sc;
2001	struct macsec_tx_sa *tx_sa;
2002	unsigned char assoc_num;
2003	int pn_len;
2004	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2005	bool was_operational;
2006	int err;
2007
2008	if (!attrs[MACSEC_ATTR_IFINDEX])
2009		return -EINVAL;
2010
2011	if (parse_sa_config(attrs, tb_sa))
2012		return -EINVAL;
2013
2014	if (!validate_add_txsa(tb_sa))
2015		return -EINVAL;
2016
2017	rtnl_lock();
2018	dev = get_dev_from_nl(genl_info_net(info), attrs);
2019	if (IS_ERR(dev)) {
2020		rtnl_unlock();
2021		return PTR_ERR(dev);
2022	}
2023
2024	secy = &macsec_priv(dev)->secy;
2025	tx_sc = &secy->tx_sc;
2026
2027	assoc_num = nla_get_u8(tb_sa[MACSEC_SA_ATTR_AN]);
2028
2029	if (nla_len(tb_sa[MACSEC_SA_ATTR_KEY]) != secy->key_len) {
2030		pr_notice("macsec: nl: add_txsa: bad key length: %d != %d\n",
2031			  nla_len(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len);
2032		rtnl_unlock();
2033		return -EINVAL;
2034	}
2035
2036	pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
2037	if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
2038		pr_notice("macsec: nl: add_txsa: bad pn length: %d != %d\n",
2039			  nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
2040		rtnl_unlock();
2041		return -EINVAL;
2042	}
2043
2044	if (secy->xpn) {
2045		if (!tb_sa[MACSEC_SA_ATTR_SSCI] || !tb_sa[MACSEC_SA_ATTR_SALT]) {
2046			rtnl_unlock();
2047			return -EINVAL;
2048		}
2049
2050		if (nla_len(tb_sa[MACSEC_SA_ATTR_SALT]) != MACSEC_SALT_LEN) {
2051			pr_notice("macsec: nl: add_txsa: bad salt length: %d != %d\n",
2052				  nla_len(tb_sa[MACSEC_SA_ATTR_SALT]),
2053				  MACSEC_SALT_LEN);
2054			rtnl_unlock();
2055			return -EINVAL;
2056		}
2057	}
2058
2059	tx_sa = rtnl_dereference(tx_sc->sa[assoc_num]);
2060	if (tx_sa) {
2061		rtnl_unlock();
2062		return -EBUSY;
2063	}
2064
2065	tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL);
2066	if (!tx_sa) {
2067		rtnl_unlock();
2068		return -ENOMEM;
2069	}
2070
2071	err = init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
2072			 secy->key_len, secy->icv_len);
2073	if (err < 0) {
2074		kfree(tx_sa);
2075		rtnl_unlock();
2076		return err;
2077	}
2078
2079	spin_lock_bh(&tx_sa->lock);
2080	tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
2081	spin_unlock_bh(&tx_sa->lock);
2082
2083	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2084		tx_sa->active = !!nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2085
2086	was_operational = secy->operational;
2087	if (assoc_num == tx_sc->encoding_sa && tx_sa->active)
2088		secy->operational = true;
2089
2090	/* If h/w offloading is available, propagate to the device */
2091	if (macsec_is_offloaded(netdev_priv(dev))) {
2092		const struct macsec_ops *ops;
2093		struct macsec_context ctx;
2094
2095		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2096		if (!ops) {
2097			err = -EOPNOTSUPP;
2098			goto cleanup;
2099		}
2100
2101		ctx.sa.assoc_num = assoc_num;
2102		ctx.sa.tx_sa = tx_sa;
2103		ctx.secy = secy;
2104		memcpy(ctx.sa.key, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
2105		       secy->key_len);
2106
2107		err = macsec_offload(ops->mdo_add_txsa, &ctx);
2108		memzero_explicit(ctx.sa.key, secy->key_len);
2109		if (err)
2110			goto cleanup;
2111	}
2112
2113	if (secy->xpn) {
2114		tx_sa->ssci = nla_get_ssci(tb_sa[MACSEC_SA_ATTR_SSCI]);
2115		nla_memcpy(tx_sa->key.salt.bytes, tb_sa[MACSEC_SA_ATTR_SALT],
2116			   MACSEC_SALT_LEN);
2117	}
2118
2119	nla_memcpy(tx_sa->key.id, tb_sa[MACSEC_SA_ATTR_KEYID], MACSEC_KEYID_LEN);
2120	rcu_assign_pointer(tx_sc->sa[assoc_num], tx_sa);
2121
2122	rtnl_unlock();
2123
2124	return 0;
2125
2126cleanup:
2127	secy->operational = was_operational;
2128	macsec_txsa_put(tx_sa);
2129	rtnl_unlock();
2130	return err;
2131}
2132
2133static int macsec_del_rxsa(struct sk_buff *skb, struct genl_info *info)
2134{
2135	struct nlattr **attrs = info->attrs;
2136	struct net_device *dev;
2137	struct macsec_secy *secy;
2138	struct macsec_rx_sc *rx_sc;
2139	struct macsec_rx_sa *rx_sa;
2140	u8 assoc_num;
2141	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2142	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2143	int ret;
2144
2145	if (!attrs[MACSEC_ATTR_IFINDEX])
2146		return -EINVAL;
2147
2148	if (parse_sa_config(attrs, tb_sa))
2149		return -EINVAL;
2150
2151	if (parse_rxsc_config(attrs, tb_rxsc))
2152		return -EINVAL;
2153
2154	rtnl_lock();
2155	rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
2156				 &dev, &secy, &rx_sc, &assoc_num);
2157	if (IS_ERR(rx_sa)) {
2158		rtnl_unlock();
2159		return PTR_ERR(rx_sa);
2160	}
2161
2162	if (rx_sa->active) {
2163		rtnl_unlock();
2164		return -EBUSY;
2165	}
2166
2167	/* If h/w offloading is available, propagate to the device */
2168	if (macsec_is_offloaded(netdev_priv(dev))) {
2169		const struct macsec_ops *ops;
2170		struct macsec_context ctx;
2171
2172		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2173		if (!ops) {
2174			ret = -EOPNOTSUPP;
2175			goto cleanup;
2176		}
2177
2178		ctx.sa.assoc_num = assoc_num;
2179		ctx.sa.rx_sa = rx_sa;
2180		ctx.secy = secy;
2181
2182		ret = macsec_offload(ops->mdo_del_rxsa, &ctx);
2183		if (ret)
2184			goto cleanup;
2185	}
2186
2187	RCU_INIT_POINTER(rx_sc->sa[assoc_num], NULL);
2188	clear_rx_sa(rx_sa);
2189
2190	rtnl_unlock();
2191
2192	return 0;
2193
2194cleanup:
2195	rtnl_unlock();
2196	return ret;
2197}
2198
2199static int macsec_del_rxsc(struct sk_buff *skb, struct genl_info *info)
2200{
2201	struct nlattr **attrs = info->attrs;
2202	struct net_device *dev;
2203	struct macsec_secy *secy;
2204	struct macsec_rx_sc *rx_sc;
2205	sci_t sci;
2206	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2207	int ret;
2208
2209	if (!attrs[MACSEC_ATTR_IFINDEX])
2210		return -EINVAL;
2211
2212	if (parse_rxsc_config(attrs, tb_rxsc))
2213		return -EINVAL;
2214
2215	if (!tb_rxsc[MACSEC_RXSC_ATTR_SCI])
2216		return -EINVAL;
2217
2218	rtnl_lock();
2219	dev = get_dev_from_nl(genl_info_net(info), info->attrs);
2220	if (IS_ERR(dev)) {
2221		rtnl_unlock();
2222		return PTR_ERR(dev);
2223	}
2224
2225	secy = &macsec_priv(dev)->secy;
2226	sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]);
2227
2228	rx_sc = del_rx_sc(secy, sci);
2229	if (!rx_sc) {
2230		rtnl_unlock();
2231		return -ENODEV;
2232	}
2233
2234	/* If h/w offloading is available, propagate to the device */
2235	if (macsec_is_offloaded(netdev_priv(dev))) {
2236		const struct macsec_ops *ops;
2237		struct macsec_context ctx;
2238
2239		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2240		if (!ops) {
2241			ret = -EOPNOTSUPP;
2242			goto cleanup;
2243		}
2244
2245		ctx.rx_sc = rx_sc;
2246		ctx.secy = secy;
2247		ret = macsec_offload(ops->mdo_del_rxsc, &ctx);
2248		if (ret)
2249			goto cleanup;
2250	}
2251
2252	free_rx_sc(rx_sc);
2253	rtnl_unlock();
2254
2255	return 0;
2256
2257cleanup:
2258	rtnl_unlock();
2259	return ret;
2260}
2261
2262static int macsec_del_txsa(struct sk_buff *skb, struct genl_info *info)
2263{
2264	struct nlattr **attrs = info->attrs;
2265	struct net_device *dev;
2266	struct macsec_secy *secy;
2267	struct macsec_tx_sc *tx_sc;
2268	struct macsec_tx_sa *tx_sa;
2269	u8 assoc_num;
2270	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2271	int ret;
2272
2273	if (!attrs[MACSEC_ATTR_IFINDEX])
2274		return -EINVAL;
2275
2276	if (parse_sa_config(attrs, tb_sa))
2277		return -EINVAL;
2278
2279	rtnl_lock();
2280	tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
2281				 &dev, &secy, &tx_sc, &assoc_num);
2282	if (IS_ERR(tx_sa)) {
2283		rtnl_unlock();
2284		return PTR_ERR(tx_sa);
2285	}
2286
2287	if (tx_sa->active) {
2288		rtnl_unlock();
2289		return -EBUSY;
2290	}
2291
2292	/* If h/w offloading is available, propagate to the device */
2293	if (macsec_is_offloaded(netdev_priv(dev))) {
2294		const struct macsec_ops *ops;
2295		struct macsec_context ctx;
2296
2297		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2298		if (!ops) {
2299			ret = -EOPNOTSUPP;
2300			goto cleanup;
2301		}
2302
2303		ctx.sa.assoc_num = assoc_num;
2304		ctx.sa.tx_sa = tx_sa;
2305		ctx.secy = secy;
2306
2307		ret = macsec_offload(ops->mdo_del_txsa, &ctx);
2308		if (ret)
2309			goto cleanup;
2310	}
2311
2312	RCU_INIT_POINTER(tx_sc->sa[assoc_num], NULL);
2313	clear_tx_sa(tx_sa);
2314
2315	rtnl_unlock();
2316
2317	return 0;
2318
2319cleanup:
2320	rtnl_unlock();
2321	return ret;
2322}
2323
2324static bool validate_upd_sa(struct nlattr **attrs)
2325{
2326	if (!attrs[MACSEC_SA_ATTR_AN] ||
2327	    attrs[MACSEC_SA_ATTR_KEY] ||
2328	    attrs[MACSEC_SA_ATTR_KEYID] ||
2329	    attrs[MACSEC_SA_ATTR_SSCI] ||
2330	    attrs[MACSEC_SA_ATTR_SALT])
2331		return false;
2332
2333	if (nla_get_u8(attrs[MACSEC_SA_ATTR_AN]) >= MACSEC_NUM_AN)
2334		return false;
2335
2336	if (attrs[MACSEC_SA_ATTR_PN] && nla_get_u64(attrs[MACSEC_SA_ATTR_PN]) == 0)
2337		return false;
2338
2339	if (attrs[MACSEC_SA_ATTR_ACTIVE]) {
2340		if (nla_get_u8(attrs[MACSEC_SA_ATTR_ACTIVE]) > 1)
2341			return false;
2342	}
2343
2344	return true;
2345}
2346
2347static int macsec_upd_txsa(struct sk_buff *skb, struct genl_info *info)
2348{
2349	struct nlattr **attrs = info->attrs;
2350	struct net_device *dev;
2351	struct macsec_secy *secy;
2352	struct macsec_tx_sc *tx_sc;
2353	struct macsec_tx_sa *tx_sa;
2354	u8 assoc_num;
2355	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2356	bool was_operational, was_active;
2357	pn_t prev_pn;
2358	int ret = 0;
2359
2360	prev_pn.full64 = 0;
2361
2362	if (!attrs[MACSEC_ATTR_IFINDEX])
2363		return -EINVAL;
2364
2365	if (parse_sa_config(attrs, tb_sa))
2366		return -EINVAL;
2367
2368	if (!validate_upd_sa(tb_sa))
2369		return -EINVAL;
2370
2371	rtnl_lock();
2372	tx_sa = get_txsa_from_nl(genl_info_net(info), attrs, tb_sa,
2373				 &dev, &secy, &tx_sc, &assoc_num);
2374	if (IS_ERR(tx_sa)) {
2375		rtnl_unlock();
2376		return PTR_ERR(tx_sa);
2377	}
2378
2379	if (tb_sa[MACSEC_SA_ATTR_PN]) {
2380		int pn_len;
2381
2382		pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
2383		if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
2384			pr_notice("macsec: nl: upd_txsa: bad pn length: %d != %d\n",
2385				  nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
2386			rtnl_unlock();
2387			return -EINVAL;
2388		}
2389
2390		spin_lock_bh(&tx_sa->lock);
2391		prev_pn = tx_sa->next_pn_halves;
2392		tx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
2393		spin_unlock_bh(&tx_sa->lock);
2394	}
2395
2396	was_active = tx_sa->active;
2397	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2398		tx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2399
2400	was_operational = secy->operational;
2401	if (assoc_num == tx_sc->encoding_sa)
2402		secy->operational = tx_sa->active;
2403
2404	/* If h/w offloading is available, propagate to the device */
2405	if (macsec_is_offloaded(netdev_priv(dev))) {
2406		const struct macsec_ops *ops;
2407		struct macsec_context ctx;
2408
2409		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2410		if (!ops) {
2411			ret = -EOPNOTSUPP;
2412			goto cleanup;
2413		}
2414
2415		ctx.sa.assoc_num = assoc_num;
2416		ctx.sa.tx_sa = tx_sa;
2417		ctx.sa.update_pn = !!prev_pn.full64;
2418		ctx.secy = secy;
2419
2420		ret = macsec_offload(ops->mdo_upd_txsa, &ctx);
2421		if (ret)
2422			goto cleanup;
2423	}
2424
2425	rtnl_unlock();
2426
2427	return 0;
2428
2429cleanup:
2430	if (tb_sa[MACSEC_SA_ATTR_PN]) {
2431		spin_lock_bh(&tx_sa->lock);
2432		tx_sa->next_pn_halves = prev_pn;
2433		spin_unlock_bh(&tx_sa->lock);
2434	}
2435	tx_sa->active = was_active;
2436	secy->operational = was_operational;
2437	rtnl_unlock();
2438	return ret;
2439}
2440
2441static int macsec_upd_rxsa(struct sk_buff *skb, struct genl_info *info)
2442{
2443	struct nlattr **attrs = info->attrs;
2444	struct net_device *dev;
2445	struct macsec_secy *secy;
2446	struct macsec_rx_sc *rx_sc;
2447	struct macsec_rx_sa *rx_sa;
2448	u8 assoc_num;
2449	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2450	struct nlattr *tb_sa[MACSEC_SA_ATTR_MAX + 1];
2451	bool was_active;
2452	pn_t prev_pn;
2453	int ret = 0;
2454
2455	prev_pn.full64 = 0;
2456
2457	if (!attrs[MACSEC_ATTR_IFINDEX])
2458		return -EINVAL;
2459
2460	if (parse_rxsc_config(attrs, tb_rxsc))
2461		return -EINVAL;
2462
2463	if (parse_sa_config(attrs, tb_sa))
2464		return -EINVAL;
2465
2466	if (!validate_upd_sa(tb_sa))
2467		return -EINVAL;
2468
2469	rtnl_lock();
2470	rx_sa = get_rxsa_from_nl(genl_info_net(info), attrs, tb_rxsc, tb_sa,
2471				 &dev, &secy, &rx_sc, &assoc_num);
2472	if (IS_ERR(rx_sa)) {
2473		rtnl_unlock();
2474		return PTR_ERR(rx_sa);
2475	}
2476
2477	if (tb_sa[MACSEC_SA_ATTR_PN]) {
2478		int pn_len;
2479
2480		pn_len = secy->xpn ? MACSEC_XPN_PN_LEN : MACSEC_DEFAULT_PN_LEN;
2481		if (nla_len(tb_sa[MACSEC_SA_ATTR_PN]) != pn_len) {
2482			pr_notice("macsec: nl: upd_rxsa: bad pn length: %d != %d\n",
2483				  nla_len(tb_sa[MACSEC_SA_ATTR_PN]), pn_len);
2484			rtnl_unlock();
2485			return -EINVAL;
2486		}
2487
2488		spin_lock_bh(&rx_sa->lock);
2489		prev_pn = rx_sa->next_pn_halves;
2490		rx_sa->next_pn = nla_get_u64(tb_sa[MACSEC_SA_ATTR_PN]);
2491		spin_unlock_bh(&rx_sa->lock);
2492	}
2493
2494	was_active = rx_sa->active;
2495	if (tb_sa[MACSEC_SA_ATTR_ACTIVE])
2496		rx_sa->active = nla_get_u8(tb_sa[MACSEC_SA_ATTR_ACTIVE]);
2497
2498	/* If h/w offloading is available, propagate to the device */
2499	if (macsec_is_offloaded(netdev_priv(dev))) {
2500		const struct macsec_ops *ops;
2501		struct macsec_context ctx;
2502
2503		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2504		if (!ops) {
2505			ret = -EOPNOTSUPP;
2506			goto cleanup;
2507		}
2508
2509		ctx.sa.assoc_num = assoc_num;
2510		ctx.sa.rx_sa = rx_sa;
2511		ctx.sa.update_pn = !!prev_pn.full64;
2512		ctx.secy = secy;
2513
2514		ret = macsec_offload(ops->mdo_upd_rxsa, &ctx);
2515		if (ret)
2516			goto cleanup;
2517	}
2518
2519	rtnl_unlock();
2520	return 0;
2521
2522cleanup:
2523	if (tb_sa[MACSEC_SA_ATTR_PN]) {
2524		spin_lock_bh(&rx_sa->lock);
2525		rx_sa->next_pn_halves = prev_pn;
2526		spin_unlock_bh(&rx_sa->lock);
2527	}
2528	rx_sa->active = was_active;
2529	rtnl_unlock();
2530	return ret;
2531}
2532
2533static int macsec_upd_rxsc(struct sk_buff *skb, struct genl_info *info)
2534{
2535	struct nlattr **attrs = info->attrs;
2536	struct net_device *dev;
2537	struct macsec_secy *secy;
2538	struct macsec_rx_sc *rx_sc;
2539	struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1];
2540	unsigned int prev_n_rx_sc;
2541	bool was_active;
2542	int ret;
2543
2544	if (!attrs[MACSEC_ATTR_IFINDEX])
2545		return -EINVAL;
2546
2547	if (parse_rxsc_config(attrs, tb_rxsc))
2548		return -EINVAL;
2549
2550	if (!validate_add_rxsc(tb_rxsc))
2551		return -EINVAL;
2552
2553	rtnl_lock();
2554	rx_sc = get_rxsc_from_nl(genl_info_net(info), attrs, tb_rxsc, &dev, &secy);
2555	if (IS_ERR(rx_sc)) {
2556		rtnl_unlock();
2557		return PTR_ERR(rx_sc);
2558	}
2559
2560	was_active = rx_sc->active;
2561	prev_n_rx_sc = secy->n_rx_sc;
2562	if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) {
2563		bool new = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]);
2564
2565		if (rx_sc->active != new)
2566			secy->n_rx_sc += new ? 1 : -1;
2567
2568		rx_sc->active = new;
2569	}
2570
2571	/* If h/w offloading is available, propagate to the device */
2572	if (macsec_is_offloaded(netdev_priv(dev))) {
2573		const struct macsec_ops *ops;
2574		struct macsec_context ctx;
2575
2576		ops = macsec_get_ops(netdev_priv(dev), &ctx);
2577		if (!ops) {
2578			ret = -EOPNOTSUPP;
2579			goto cleanup;
2580		}
2581
2582		ctx.rx_sc = rx_sc;
2583		ctx.secy = secy;
2584
2585		ret = macsec_offload(ops->mdo_upd_rxsc, &ctx);
2586		if (ret)
2587			goto cleanup;
2588	}
2589
2590	rtnl_unlock();
2591
2592	return 0;
2593
2594cleanup:
2595	secy->n_rx_sc = prev_n_rx_sc;
2596	rx_sc->active = was_active;
2597	rtnl_unlock();
2598	return ret;
2599}
2600
2601static bool macsec_is_configured(struct macsec_dev *macsec)
2602{
2603	struct macsec_secy *secy = &macsec->secy;
2604	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
2605	int i;
2606
2607	if (secy->rx_sc)
2608		return true;
2609
2610	for (i = 0; i < MACSEC_NUM_AN; i++)
2611		if (tx_sc->sa[i])
2612			return true;
2613
2614	return false;
2615}
2616
2617static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info)
2618{
2619	struct nlattr *tb_offload[MACSEC_OFFLOAD_ATTR_MAX + 1];
2620	enum macsec_offload offload, prev_offload;
2621	int (*func)(struct macsec_context *ctx);
2622	struct nlattr **attrs = info->attrs;
2623	struct net_device *dev;
2624	const struct macsec_ops *ops;
2625	struct macsec_context ctx;
2626	struct macsec_dev *macsec;
2627	int ret = 0;
2628
2629	if (!attrs[MACSEC_ATTR_IFINDEX])
2630		return -EINVAL;
2631
2632	if (!attrs[MACSEC_ATTR_OFFLOAD])
2633		return -EINVAL;
2634
2635	if (nla_parse_nested_deprecated(tb_offload, MACSEC_OFFLOAD_ATTR_MAX,
2636					attrs[MACSEC_ATTR_OFFLOAD],
2637					macsec_genl_offload_policy, NULL))
2638		return -EINVAL;
2639
2640	rtnl_lock();
2641
2642	dev = get_dev_from_nl(genl_info_net(info), attrs);
2643	if (IS_ERR(dev)) {
2644		ret = PTR_ERR(dev);
2645		goto out;
2646	}
2647	macsec = macsec_priv(dev);
2648
2649	if (!tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]) {
2650		ret = -EINVAL;
2651		goto out;
2652	}
2653
2654	offload = nla_get_u8(tb_offload[MACSEC_OFFLOAD_ATTR_TYPE]);
2655	if (macsec->offload == offload)
2656		goto out;
2657
2658	/* Check if the offloading mode is supported by the underlying layers */
2659	if (offload != MACSEC_OFFLOAD_OFF &&
2660	    !macsec_check_offload(offload, macsec)) {
2661		ret = -EOPNOTSUPP;
2662		goto out;
2663	}
2664
2665	/* Check if the net device is busy. */
2666	if (netif_running(dev)) {
2667		ret = -EBUSY;
2668		goto out;
2669	}
2670
2671	prev_offload = macsec->offload;
2672	macsec->offload = offload;
2673
2674	/* Check if the device already has rules configured: we do not support
2675	 * rules migration.
2676	 */
2677	if (macsec_is_configured(macsec)) {
2678		ret = -EBUSY;
2679		goto rollback;
2680	}
2681
2682	ops = __macsec_get_ops(offload == MACSEC_OFFLOAD_OFF ? prev_offload : offload,
2683			       macsec, &ctx);
2684	if (!ops) {
2685		ret = -EOPNOTSUPP;
2686		goto rollback;
2687	}
2688
2689	if (prev_offload == MACSEC_OFFLOAD_OFF)
2690		func = ops->mdo_add_secy;
2691	else
2692		func = ops->mdo_del_secy;
2693
2694	ctx.secy = &macsec->secy;
2695	ret = macsec_offload(func, &ctx);
2696	if (ret)
2697		goto rollback;
2698
2699	rtnl_unlock();
2700	return 0;
2701
2702rollback:
2703	macsec->offload = prev_offload;
2704out:
2705	rtnl_unlock();
2706	return ret;
2707}
2708
2709static void get_tx_sa_stats(struct net_device *dev, int an,
2710			    struct macsec_tx_sa *tx_sa,
2711			    struct macsec_tx_sa_stats *sum)
2712{
2713	struct macsec_dev *macsec = macsec_priv(dev);
2714	int cpu;
2715
2716	/* If h/w offloading is available, propagate to the device */
2717	if (macsec_is_offloaded(macsec)) {
2718		const struct macsec_ops *ops;
2719		struct macsec_context ctx;
2720
2721		ops = macsec_get_ops(macsec, &ctx);
2722		if (ops) {
2723			ctx.sa.assoc_num = an;
2724			ctx.sa.tx_sa = tx_sa;
2725			ctx.stats.tx_sa_stats = sum;
2726			ctx.secy = &macsec_priv(dev)->secy;
2727			macsec_offload(ops->mdo_get_tx_sa_stats, &ctx);
2728		}
2729		return;
2730	}
2731
2732	for_each_possible_cpu(cpu) {
2733		const struct macsec_tx_sa_stats *stats =
2734			per_cpu_ptr(tx_sa->stats, cpu);
2735
2736		sum->OutPktsProtected += stats->OutPktsProtected;
2737		sum->OutPktsEncrypted += stats->OutPktsEncrypted;
2738	}
2739}
2740
2741static int copy_tx_sa_stats(struct sk_buff *skb, struct macsec_tx_sa_stats *sum)
2742{
2743	if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_PROTECTED,
2744			sum->OutPktsProtected) ||
2745	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_OUT_PKTS_ENCRYPTED,
2746			sum->OutPktsEncrypted))
2747		return -EMSGSIZE;
2748
2749	return 0;
2750}
2751
2752static void get_rx_sa_stats(struct net_device *dev,
2753			    struct macsec_rx_sc *rx_sc, int an,
2754			    struct macsec_rx_sa *rx_sa,
2755			    struct macsec_rx_sa_stats *sum)
2756{
2757	struct macsec_dev *macsec = macsec_priv(dev);
2758	int cpu;
2759
2760	/* If h/w offloading is available, propagate to the device */
2761	if (macsec_is_offloaded(macsec)) {
2762		const struct macsec_ops *ops;
2763		struct macsec_context ctx;
2764
2765		ops = macsec_get_ops(macsec, &ctx);
2766		if (ops) {
2767			ctx.sa.assoc_num = an;
2768			ctx.sa.rx_sa = rx_sa;
2769			ctx.stats.rx_sa_stats = sum;
2770			ctx.secy = &macsec_priv(dev)->secy;
2771			ctx.rx_sc = rx_sc;
2772			macsec_offload(ops->mdo_get_rx_sa_stats, &ctx);
2773		}
2774		return;
2775	}
2776
2777	for_each_possible_cpu(cpu) {
2778		const struct macsec_rx_sa_stats *stats =
2779			per_cpu_ptr(rx_sa->stats, cpu);
2780
2781		sum->InPktsOK         += stats->InPktsOK;
2782		sum->InPktsInvalid    += stats->InPktsInvalid;
2783		sum->InPktsNotValid   += stats->InPktsNotValid;
2784		sum->InPktsNotUsingSA += stats->InPktsNotUsingSA;
2785		sum->InPktsUnusedSA   += stats->InPktsUnusedSA;
2786	}
2787}
2788
2789static int copy_rx_sa_stats(struct sk_buff *skb,
2790			    struct macsec_rx_sa_stats *sum)
2791{
2792	if (nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_OK, sum->InPktsOK) ||
2793	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_INVALID,
2794			sum->InPktsInvalid) ||
2795	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_VALID,
2796			sum->InPktsNotValid) ||
2797	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_NOT_USING_SA,
2798			sum->InPktsNotUsingSA) ||
2799	    nla_put_u32(skb, MACSEC_SA_STATS_ATTR_IN_PKTS_UNUSED_SA,
2800			sum->InPktsUnusedSA))
2801		return -EMSGSIZE;
2802
2803	return 0;
2804}
2805
2806static void get_rx_sc_stats(struct net_device *dev,
2807			    struct macsec_rx_sc *rx_sc,
2808			    struct macsec_rx_sc_stats *sum)
2809{
2810	struct macsec_dev *macsec = macsec_priv(dev);
2811	int cpu;
2812
2813	/* If h/w offloading is available, propagate to the device */
2814	if (macsec_is_offloaded(macsec)) {
2815		const struct macsec_ops *ops;
2816		struct macsec_context ctx;
2817
2818		ops = macsec_get_ops(macsec, &ctx);
2819		if (ops) {
2820			ctx.stats.rx_sc_stats = sum;
2821			ctx.secy = &macsec_priv(dev)->secy;
2822			ctx.rx_sc = rx_sc;
2823			macsec_offload(ops->mdo_get_rx_sc_stats, &ctx);
2824		}
2825		return;
2826	}
2827
2828	for_each_possible_cpu(cpu) {
2829		const struct pcpu_rx_sc_stats *stats;
2830		struct macsec_rx_sc_stats tmp;
2831		unsigned int start;
2832
2833		stats = per_cpu_ptr(rx_sc->stats, cpu);
2834		do {
2835			start = u64_stats_fetch_begin_irq(&stats->syncp);
2836			memcpy(&tmp, &stats->stats, sizeof(tmp));
2837		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2838
2839		sum->InOctetsValidated += tmp.InOctetsValidated;
2840		sum->InOctetsDecrypted += tmp.InOctetsDecrypted;
2841		sum->InPktsUnchecked   += tmp.InPktsUnchecked;
2842		sum->InPktsDelayed     += tmp.InPktsDelayed;
2843		sum->InPktsOK          += tmp.InPktsOK;
2844		sum->InPktsInvalid     += tmp.InPktsInvalid;
2845		sum->InPktsLate        += tmp.InPktsLate;
2846		sum->InPktsNotValid    += tmp.InPktsNotValid;
2847		sum->InPktsNotUsingSA  += tmp.InPktsNotUsingSA;
2848		sum->InPktsUnusedSA    += tmp.InPktsUnusedSA;
2849	}
2850}
2851
2852static int copy_rx_sc_stats(struct sk_buff *skb, struct macsec_rx_sc_stats *sum)
2853{
2854	if (nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_VALIDATED,
2855			      sum->InOctetsValidated,
2856			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2857	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_OCTETS_DECRYPTED,
2858			      sum->InOctetsDecrypted,
2859			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2860	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNCHECKED,
2861			      sum->InPktsUnchecked,
2862			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2863	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_DELAYED,
2864			      sum->InPktsDelayed,
2865			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2866	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_OK,
2867			      sum->InPktsOK,
2868			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2869	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_INVALID,
2870			      sum->InPktsInvalid,
2871			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2872	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_LATE,
2873			      sum->InPktsLate,
2874			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2875	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_VALID,
2876			      sum->InPktsNotValid,
2877			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2878	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_NOT_USING_SA,
2879			      sum->InPktsNotUsingSA,
2880			      MACSEC_RXSC_STATS_ATTR_PAD) ||
2881	    nla_put_u64_64bit(skb, MACSEC_RXSC_STATS_ATTR_IN_PKTS_UNUSED_SA,
2882			      sum->InPktsUnusedSA,
2883			      MACSEC_RXSC_STATS_ATTR_PAD))
2884		return -EMSGSIZE;
2885
2886	return 0;
2887}
2888
2889static void get_tx_sc_stats(struct net_device *dev,
2890			    struct macsec_tx_sc_stats *sum)
2891{
2892	struct macsec_dev *macsec = macsec_priv(dev);
2893	int cpu;
2894
2895	/* If h/w offloading is available, propagate to the device */
2896	if (macsec_is_offloaded(macsec)) {
2897		const struct macsec_ops *ops;
2898		struct macsec_context ctx;
2899
2900		ops = macsec_get_ops(macsec, &ctx);
2901		if (ops) {
2902			ctx.stats.tx_sc_stats = sum;
2903			ctx.secy = &macsec_priv(dev)->secy;
2904			macsec_offload(ops->mdo_get_tx_sc_stats, &ctx);
2905		}
2906		return;
2907	}
2908
2909	for_each_possible_cpu(cpu) {
2910		const struct pcpu_tx_sc_stats *stats;
2911		struct macsec_tx_sc_stats tmp;
2912		unsigned int start;
2913
2914		stats = per_cpu_ptr(macsec_priv(dev)->secy.tx_sc.stats, cpu);
2915		do {
2916			start = u64_stats_fetch_begin_irq(&stats->syncp);
2917			memcpy(&tmp, &stats->stats, sizeof(tmp));
2918		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2919
2920		sum->OutPktsProtected   += tmp.OutPktsProtected;
2921		sum->OutPktsEncrypted   += tmp.OutPktsEncrypted;
2922		sum->OutOctetsProtected += tmp.OutOctetsProtected;
2923		sum->OutOctetsEncrypted += tmp.OutOctetsEncrypted;
2924	}
2925}
2926
2927static int copy_tx_sc_stats(struct sk_buff *skb, struct macsec_tx_sc_stats *sum)
2928{
2929	if (nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_PROTECTED,
2930			      sum->OutPktsProtected,
2931			      MACSEC_TXSC_STATS_ATTR_PAD) ||
2932	    nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_PKTS_ENCRYPTED,
2933			      sum->OutPktsEncrypted,
2934			      MACSEC_TXSC_STATS_ATTR_PAD) ||
2935	    nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_PROTECTED,
2936			      sum->OutOctetsProtected,
2937			      MACSEC_TXSC_STATS_ATTR_PAD) ||
2938	    nla_put_u64_64bit(skb, MACSEC_TXSC_STATS_ATTR_OUT_OCTETS_ENCRYPTED,
2939			      sum->OutOctetsEncrypted,
2940			      MACSEC_TXSC_STATS_ATTR_PAD))
2941		return -EMSGSIZE;
2942
2943	return 0;
2944}
2945
2946static void get_secy_stats(struct net_device *dev, struct macsec_dev_stats *sum)
2947{
2948	struct macsec_dev *macsec = macsec_priv(dev);
2949	int cpu;
2950
2951	/* If h/w offloading is available, propagate to the device */
2952	if (macsec_is_offloaded(macsec)) {
2953		const struct macsec_ops *ops;
2954		struct macsec_context ctx;
2955
2956		ops = macsec_get_ops(macsec, &ctx);
2957		if (ops) {
2958			ctx.stats.dev_stats = sum;
2959			ctx.secy = &macsec_priv(dev)->secy;
2960			macsec_offload(ops->mdo_get_dev_stats, &ctx);
2961		}
2962		return;
2963	}
2964
2965	for_each_possible_cpu(cpu) {
2966		const struct pcpu_secy_stats *stats;
2967		struct macsec_dev_stats tmp;
2968		unsigned int start;
2969
2970		stats = per_cpu_ptr(macsec_priv(dev)->stats, cpu);
2971		do {
2972			start = u64_stats_fetch_begin_irq(&stats->syncp);
2973			memcpy(&tmp, &stats->stats, sizeof(tmp));
2974		} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
2975
2976		sum->OutPktsUntagged  += tmp.OutPktsUntagged;
2977		sum->InPktsUntagged   += tmp.InPktsUntagged;
2978		sum->OutPktsTooLong   += tmp.OutPktsTooLong;
2979		sum->InPktsNoTag      += tmp.InPktsNoTag;
2980		sum->InPktsBadTag     += tmp.InPktsBadTag;
2981		sum->InPktsUnknownSCI += tmp.InPktsUnknownSCI;
2982		sum->InPktsNoSCI      += tmp.InPktsNoSCI;
2983		sum->InPktsOverrun    += tmp.InPktsOverrun;
2984	}
2985}
2986
2987static int copy_secy_stats(struct sk_buff *skb, struct macsec_dev_stats *sum)
2988{
2989	if (nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_UNTAGGED,
2990			      sum->OutPktsUntagged,
2991			      MACSEC_SECY_STATS_ATTR_PAD) ||
2992	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNTAGGED,
2993			      sum->InPktsUntagged,
2994			      MACSEC_SECY_STATS_ATTR_PAD) ||
2995	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_OUT_PKTS_TOO_LONG,
2996			      sum->OutPktsTooLong,
2997			      MACSEC_SECY_STATS_ATTR_PAD) ||
2998	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_TAG,
2999			      sum->InPktsNoTag,
3000			      MACSEC_SECY_STATS_ATTR_PAD) ||
3001	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_BAD_TAG,
3002			      sum->InPktsBadTag,
3003			      MACSEC_SECY_STATS_ATTR_PAD) ||
3004	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_UNKNOWN_SCI,
3005			      sum->InPktsUnknownSCI,
3006			      MACSEC_SECY_STATS_ATTR_PAD) ||
3007	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_NO_SCI,
3008			      sum->InPktsNoSCI,
3009			      MACSEC_SECY_STATS_ATTR_PAD) ||
3010	    nla_put_u64_64bit(skb, MACSEC_SECY_STATS_ATTR_IN_PKTS_OVERRUN,
3011			      sum->InPktsOverrun,
3012			      MACSEC_SECY_STATS_ATTR_PAD))
3013		return -EMSGSIZE;
3014
3015	return 0;
3016}
3017
3018static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
3019{
3020	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
3021	struct nlattr *secy_nest = nla_nest_start_noflag(skb,
3022							 MACSEC_ATTR_SECY);
3023	u64 csid;
3024
3025	if (!secy_nest)
3026		return 1;
3027
3028	switch (secy->key_len) {
3029	case MACSEC_GCM_AES_128_SAK_LEN:
3030		csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID;
3031		break;
3032	case MACSEC_GCM_AES_256_SAK_LEN:
3033		csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256;
3034		break;
3035	default:
3036		goto cancel;
3037	}
3038
3039	if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci,
3040			MACSEC_SECY_ATTR_PAD) ||
3041	    nla_put_u64_64bit(skb, MACSEC_SECY_ATTR_CIPHER_SUITE,
3042			      csid, MACSEC_SECY_ATTR_PAD) ||
3043	    nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) ||
3044	    nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) ||
3045	    nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) ||
3046	    nla_put_u8(skb, MACSEC_SECY_ATTR_REPLAY, secy->replay_protect) ||
3047	    nla_put_u8(skb, MACSEC_SECY_ATTR_VALIDATE, secy->validate_frames) ||
3048	    nla_put_u8(skb, MACSEC_SECY_ATTR_ENCRYPT, tx_sc->encrypt) ||
3049	    nla_put_u8(skb, MACSEC_SECY_ATTR_INC_SCI, tx_sc->send_sci) ||
3050	    nla_put_u8(skb, MACSEC_SECY_ATTR_ES, tx_sc->end_station) ||
3051	    nla_put_u8(skb, MACSEC_SECY_ATTR_SCB, tx_sc->scb) ||
3052	    nla_put_u8(skb, MACSEC_SECY_ATTR_ENCODING_SA, tx_sc->encoding_sa))
3053		goto cancel;
3054
3055	if (secy->replay_protect) {
3056		if (nla_put_u32(skb, MACSEC_SECY_ATTR_WINDOW, secy->replay_window))
3057			goto cancel;
3058	}
3059
3060	nla_nest_end(skb, secy_nest);
3061	return 0;
3062
3063cancel:
3064	nla_nest_cancel(skb, secy_nest);
3065	return 1;
3066}
3067
3068static noinline_for_stack int
3069dump_secy(struct macsec_secy *secy, struct net_device *dev,
3070	  struct sk_buff *skb, struct netlink_callback *cb)
3071{
3072	struct macsec_tx_sc_stats tx_sc_stats = {0, };
3073	struct macsec_tx_sa_stats tx_sa_stats = {0, };
3074	struct macsec_rx_sc_stats rx_sc_stats = {0, };
3075	struct macsec_rx_sa_stats rx_sa_stats = {0, };
3076	struct macsec_dev *macsec = netdev_priv(dev);
3077	struct macsec_dev_stats dev_stats = {0, };
3078	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
3079	struct nlattr *txsa_list, *rxsc_list;
3080	struct macsec_rx_sc *rx_sc;
3081	struct nlattr *attr;
3082	void *hdr;
3083	int i, j;
3084
3085	hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
3086			  &macsec_fam, NLM_F_MULTI, MACSEC_CMD_GET_TXSC);
3087	if (!hdr)
3088		return -EMSGSIZE;
3089
3090	genl_dump_check_consistent(cb, hdr);
3091
3092	if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex))
3093		goto nla_put_failure;
3094
3095	attr = nla_nest_start_noflag(skb, MACSEC_ATTR_OFFLOAD);
3096	if (!attr)
3097		goto nla_put_failure;
3098	if (nla_put_u8(skb, MACSEC_OFFLOAD_ATTR_TYPE, macsec->offload))
3099		goto nla_put_failure;
3100	nla_nest_end(skb, attr);
3101
3102	if (nla_put_secy(secy, skb))
3103		goto nla_put_failure;
3104
3105	attr = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSC_STATS);
3106	if (!attr)
3107		goto nla_put_failure;
3108
3109	get_tx_sc_stats(dev, &tx_sc_stats);
3110	if (copy_tx_sc_stats(skb, &tx_sc_stats)) {
3111		nla_nest_cancel(skb, attr);
3112		goto nla_put_failure;
3113	}
3114	nla_nest_end(skb, attr);
3115
3116	attr = nla_nest_start_noflag(skb, MACSEC_ATTR_SECY_STATS);
3117	if (!attr)
3118		goto nla_put_failure;
3119	get_secy_stats(dev, &dev_stats);
3120	if (copy_secy_stats(skb, &dev_stats)) {
3121		nla_nest_cancel(skb, attr);
3122		goto nla_put_failure;
3123	}
3124	nla_nest_end(skb, attr);
3125
3126	txsa_list = nla_nest_start_noflag(skb, MACSEC_ATTR_TXSA_LIST);
3127	if (!txsa_list)
3128		goto nla_put_failure;
3129	for (i = 0, j = 1; i < MACSEC_NUM_AN; i++) {
3130		struct macsec_tx_sa *tx_sa = rtnl_dereference(tx_sc->sa[i]);
3131		struct nlattr *txsa_nest;
3132		u64 pn;
3133		int pn_len;
3134
3135		if (!tx_sa)
3136			continue;
3137
3138		txsa_nest = nla_nest_start_noflag(skb, j++);
3139		if (!txsa_nest) {
3140			nla_nest_cancel(skb, txsa_list);
3141			goto nla_put_failure;
3142		}
3143
3144		attr = nla_nest_start_noflag(skb, MACSEC_SA_ATTR_STATS);
3145		if (!attr) {
3146			nla_nest_cancel(skb, txsa_nest);
3147			nla_nest_cancel(skb, txsa_list);
3148			goto nla_put_failure;
3149		}
3150		memset(&tx_sa_stats, 0, sizeof(tx_sa_stats));
3151		get_tx_sa_stats(dev, i, tx_sa, &tx_sa_stats);
3152		if (copy_tx_sa_stats(skb, &tx_sa_stats)) {
3153			nla_nest_cancel(skb, attr);
3154			nla_nest_cancel(skb, txsa_nest);
3155			nla_nest_cancel(skb, txsa_list);
3156			goto nla_put_failure;
3157		}
3158		nla_nest_end(skb, attr);
3159
3160		if (secy->xpn) {
3161			pn = tx_sa->next_pn;
3162			pn_len = MACSEC_XPN_PN_LEN;
3163		} else {
3164			pn = tx_sa->next_pn_halves.lower;
3165			pn_len = MACSEC_DEFAULT_PN_LEN;
3166		}
3167
3168		if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
3169		    nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) ||
3170		    nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, tx_sa->key.id) ||
3171		    (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, tx_sa->ssci)) ||
3172		    nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, tx_sa->active)) {
3173			nla_nest_cancel(skb, txsa_nest);
3174			nla_nest_cancel(skb, txsa_list);
3175			goto nla_put_failure;
3176		}
3177
3178		nla_nest_end(skb, txsa_nest);
3179	}
3180	nla_nest_end(skb, txsa_list);
3181
3182	rxsc_list = nla_nest_start_noflag(skb, MACSEC_ATTR_RXSC_LIST);
3183	if (!rxsc_list)
3184		goto nla_put_failure;
3185
3186	j = 1;
3187	for_each_rxsc_rtnl(secy, rx_sc) {
3188		int k;
3189		struct nlattr *rxsa_list;
3190		struct nlattr *rxsc_nest = nla_nest_start_noflag(skb, j++);
3191
3192		if (!rxsc_nest) {
3193			nla_nest_cancel(skb, rxsc_list);
3194			goto nla_put_failure;
3195		}
3196
3197		if (nla_put_u8(skb, MACSEC_RXSC_ATTR_ACTIVE, rx_sc->active) ||
3198		    nla_put_sci(skb, MACSEC_RXSC_ATTR_SCI, rx_sc->sci,
3199				MACSEC_RXSC_ATTR_PAD)) {
3200			nla_nest_cancel(skb, rxsc_nest);
3201			nla_nest_cancel(skb, rxsc_list);
3202			goto nla_put_failure;
3203		}
3204
3205		attr = nla_nest_start_noflag(skb, MACSEC_RXSC_ATTR_STATS);
3206		if (!attr) {
3207			nla_nest_cancel(skb, rxsc_nest);
3208			nla_nest_cancel(skb, rxsc_list);
3209			goto nla_put_failure;
3210		}
3211		memset(&rx_sc_stats, 0, sizeof(rx_sc_stats));
3212		get_rx_sc_stats(dev, rx_sc, &rx_sc_stats);
3213		if (copy_rx_sc_stats(skb, &rx_sc_stats)) {
3214			nla_nest_cancel(skb, attr);
3215			nla_nest_cancel(skb, rxsc_nest);
3216			nla_nest_cancel(skb, rxsc_list);
3217			goto nla_put_failure;
3218		}
3219		nla_nest_end(skb, attr);
3220
3221		rxsa_list = nla_nest_start_noflag(skb,
3222						  MACSEC_RXSC_ATTR_SA_LIST);
3223		if (!rxsa_list) {
3224			nla_nest_cancel(skb, rxsc_nest);
3225			nla_nest_cancel(skb, rxsc_list);
3226			goto nla_put_failure;
3227		}
3228
3229		for (i = 0, k = 1; i < MACSEC_NUM_AN; i++) {
3230			struct macsec_rx_sa *rx_sa = rtnl_dereference(rx_sc->sa[i]);
3231			struct nlattr *rxsa_nest;
3232			u64 pn;
3233			int pn_len;
3234
3235			if (!rx_sa)
3236				continue;
3237
3238			rxsa_nest = nla_nest_start_noflag(skb, k++);
3239			if (!rxsa_nest) {
3240				nla_nest_cancel(skb, rxsa_list);
3241				nla_nest_cancel(skb, rxsc_nest);
3242				nla_nest_cancel(skb, rxsc_list);
3243				goto nla_put_failure;
3244			}
3245
3246			attr = nla_nest_start_noflag(skb,
3247						     MACSEC_SA_ATTR_STATS);
3248			if (!attr) {
3249				nla_nest_cancel(skb, rxsa_list);
3250				nla_nest_cancel(skb, rxsc_nest);
3251				nla_nest_cancel(skb, rxsc_list);
3252				goto nla_put_failure;
3253			}
3254			memset(&rx_sa_stats, 0, sizeof(rx_sa_stats));
3255			get_rx_sa_stats(dev, rx_sc, i, rx_sa, &rx_sa_stats);
3256			if (copy_rx_sa_stats(skb, &rx_sa_stats)) {
3257				nla_nest_cancel(skb, attr);
3258				nla_nest_cancel(skb, rxsa_list);
3259				nla_nest_cancel(skb, rxsc_nest);
3260				nla_nest_cancel(skb, rxsc_list);
3261				goto nla_put_failure;
3262			}
3263			nla_nest_end(skb, attr);
3264
3265			if (secy->xpn) {
3266				pn = rx_sa->next_pn;
3267				pn_len = MACSEC_XPN_PN_LEN;
3268			} else {
3269				pn = rx_sa->next_pn_halves.lower;
3270				pn_len = MACSEC_DEFAULT_PN_LEN;
3271			}
3272
3273			if (nla_put_u8(skb, MACSEC_SA_ATTR_AN, i) ||
3274			    nla_put(skb, MACSEC_SA_ATTR_PN, pn_len, &pn) ||
3275			    nla_put(skb, MACSEC_SA_ATTR_KEYID, MACSEC_KEYID_LEN, rx_sa->key.id) ||
3276			    (secy->xpn && nla_put_ssci(skb, MACSEC_SA_ATTR_SSCI, rx_sa->ssci)) ||
3277			    nla_put_u8(skb, MACSEC_SA_ATTR_ACTIVE, rx_sa->active)) {
3278				nla_nest_cancel(skb, rxsa_nest);
3279				nla_nest_cancel(skb, rxsc_nest);
3280				nla_nest_cancel(skb, rxsc_list);
3281				goto nla_put_failure;
3282			}
3283			nla_nest_end(skb, rxsa_nest);
3284		}
3285
3286		nla_nest_end(skb, rxsa_list);
3287		nla_nest_end(skb, rxsc_nest);
3288	}
3289
3290	nla_nest_end(skb, rxsc_list);
3291
3292	genlmsg_end(skb, hdr);
3293
3294	return 0;
3295
3296nla_put_failure:
3297	genlmsg_cancel(skb, hdr);
3298	return -EMSGSIZE;
3299}
3300
3301static int macsec_generation = 1; /* protected by RTNL */
3302
3303static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb)
3304{
3305	struct net *net = sock_net(skb->sk);
3306	struct net_device *dev;
3307	int dev_idx, d;
3308
3309	dev_idx = cb->args[0];
3310
3311	d = 0;
3312	rtnl_lock();
3313
3314	cb->seq = macsec_generation;
3315
3316	for_each_netdev(net, dev) {
3317		struct macsec_secy *secy;
3318
3319		if (d < dev_idx)
3320			goto next;
3321
3322		if (!netif_is_macsec(dev))
3323			goto next;
3324
3325		secy = &macsec_priv(dev)->secy;
3326		if (dump_secy(secy, dev, skb, cb) < 0)
3327			goto done;
3328next:
3329		d++;
3330	}
3331
3332done:
3333	rtnl_unlock();
3334	cb->args[0] = d;
3335	return skb->len;
3336}
3337
3338static const struct genl_small_ops macsec_genl_ops[] = {
3339	{
3340		.cmd = MACSEC_CMD_GET_TXSC,
3341		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3342		.dumpit = macsec_dump_txsc,
3343	},
3344	{
3345		.cmd = MACSEC_CMD_ADD_RXSC,
3346		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3347		.doit = macsec_add_rxsc,
3348		.flags = GENL_ADMIN_PERM,
3349	},
3350	{
3351		.cmd = MACSEC_CMD_DEL_RXSC,
3352		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3353		.doit = macsec_del_rxsc,
3354		.flags = GENL_ADMIN_PERM,
3355	},
3356	{
3357		.cmd = MACSEC_CMD_UPD_RXSC,
3358		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3359		.doit = macsec_upd_rxsc,
3360		.flags = GENL_ADMIN_PERM,
3361	},
3362	{
3363		.cmd = MACSEC_CMD_ADD_TXSA,
3364		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3365		.doit = macsec_add_txsa,
3366		.flags = GENL_ADMIN_PERM,
3367	},
3368	{
3369		.cmd = MACSEC_CMD_DEL_TXSA,
3370		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3371		.doit = macsec_del_txsa,
3372		.flags = GENL_ADMIN_PERM,
3373	},
3374	{
3375		.cmd = MACSEC_CMD_UPD_TXSA,
3376		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3377		.doit = macsec_upd_txsa,
3378		.flags = GENL_ADMIN_PERM,
3379	},
3380	{
3381		.cmd = MACSEC_CMD_ADD_RXSA,
3382		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3383		.doit = macsec_add_rxsa,
3384		.flags = GENL_ADMIN_PERM,
3385	},
3386	{
3387		.cmd = MACSEC_CMD_DEL_RXSA,
3388		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3389		.doit = macsec_del_rxsa,
3390		.flags = GENL_ADMIN_PERM,
3391	},
3392	{
3393		.cmd = MACSEC_CMD_UPD_RXSA,
3394		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3395		.doit = macsec_upd_rxsa,
3396		.flags = GENL_ADMIN_PERM,
3397	},
3398	{
3399		.cmd = MACSEC_CMD_UPD_OFFLOAD,
3400		.validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP,
3401		.doit = macsec_upd_offload,
3402		.flags = GENL_ADMIN_PERM,
3403	},
3404};
3405
3406static struct genl_family macsec_fam __ro_after_init = {
3407	.name		= MACSEC_GENL_NAME,
3408	.hdrsize	= 0,
3409	.version	= MACSEC_GENL_VERSION,
3410	.maxattr	= MACSEC_ATTR_MAX,
3411	.policy = macsec_genl_policy,
3412	.netnsok	= true,
3413	.module		= THIS_MODULE,
3414	.small_ops	= macsec_genl_ops,
3415	.n_small_ops	= ARRAY_SIZE(macsec_genl_ops),
3416};
3417
3418static netdev_tx_t macsec_start_xmit(struct sk_buff *skb,
3419				     struct net_device *dev)
3420{
3421	struct macsec_dev *macsec = netdev_priv(dev);
3422	struct macsec_secy *secy = &macsec->secy;
3423	struct pcpu_secy_stats *secy_stats;
3424	int ret, len;
3425
3426	if (macsec_is_offloaded(netdev_priv(dev))) {
3427		skb->dev = macsec->real_dev;
3428		return dev_queue_xmit(skb);
3429	}
3430
3431	/* 10.5 */
3432	if (!secy->protect_frames) {
3433		secy_stats = this_cpu_ptr(macsec->stats);
3434		u64_stats_update_begin(&secy_stats->syncp);
3435		secy_stats->stats.OutPktsUntagged++;
3436		u64_stats_update_end(&secy_stats->syncp);
3437		skb->dev = macsec->real_dev;
3438		len = skb->len;
3439		ret = dev_queue_xmit(skb);
3440		count_tx(dev, ret, len);
3441		return ret;
3442	}
3443
3444	if (!secy->operational) {
3445		kfree_skb(skb);
3446		DEV_STATS_INC(dev, tx_dropped);
3447		return NETDEV_TX_OK;
3448	}
3449
3450	len = skb->len;
3451	skb = macsec_encrypt(skb, dev);
3452	if (IS_ERR(skb)) {
3453		if (PTR_ERR(skb) != -EINPROGRESS)
3454			DEV_STATS_INC(dev, tx_dropped);
3455		return NETDEV_TX_OK;
3456	}
3457
3458	macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa);
3459
3460	macsec_encrypt_finish(skb, dev);
3461	ret = dev_queue_xmit(skb);
3462	count_tx(dev, ret, len);
3463	return ret;
3464}
3465
3466#define MACSEC_FEATURES \
3467	(NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST)
3468
3469static int macsec_dev_init(struct net_device *dev)
3470{
3471	struct macsec_dev *macsec = macsec_priv(dev);
3472	struct net_device *real_dev = macsec->real_dev;
3473	int err;
3474
3475	dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
3476	if (!dev->tstats)
3477		return -ENOMEM;
3478
3479	err = gro_cells_init(&macsec->gro_cells, dev);
3480	if (err) {
3481		free_percpu(dev->tstats);
3482		return err;
3483	}
3484
3485	dev->features = real_dev->features & MACSEC_FEATURES;
3486	dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE;
3487
3488	dev->needed_headroom = real_dev->needed_headroom +
3489			       MACSEC_NEEDED_HEADROOM;
3490	dev->needed_tailroom = real_dev->needed_tailroom +
3491			       MACSEC_NEEDED_TAILROOM;
3492
3493	if (is_zero_ether_addr(dev->dev_addr))
3494		eth_hw_addr_inherit(dev, real_dev);
3495	if (is_zero_ether_addr(dev->broadcast))
3496		memcpy(dev->broadcast, real_dev->broadcast, dev->addr_len);
3497
3498	return 0;
3499}
3500
3501static void macsec_dev_uninit(struct net_device *dev)
3502{
3503	struct macsec_dev *macsec = macsec_priv(dev);
3504
3505	gro_cells_destroy(&macsec->gro_cells);
3506	free_percpu(dev->tstats);
3507}
3508
3509static netdev_features_t macsec_fix_features(struct net_device *dev,
3510					     netdev_features_t features)
3511{
3512	struct macsec_dev *macsec = macsec_priv(dev);
3513	struct net_device *real_dev = macsec->real_dev;
3514
3515	features &= (real_dev->features & MACSEC_FEATURES) |
3516		    NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES;
3517	features |= NETIF_F_LLTX;
3518
3519	return features;
3520}
3521
3522static int macsec_dev_open(struct net_device *dev)
3523{
3524	struct macsec_dev *macsec = macsec_priv(dev);
3525	struct net_device *real_dev = macsec->real_dev;
3526	int err;
3527
3528	err = dev_uc_add(real_dev, dev->dev_addr);
3529	if (err < 0)
3530		return err;
3531
3532	if (dev->flags & IFF_ALLMULTI) {
3533		err = dev_set_allmulti(real_dev, 1);
3534		if (err < 0)
3535			goto del_unicast;
3536	}
3537
3538	if (dev->flags & IFF_PROMISC) {
3539		err = dev_set_promiscuity(real_dev, 1);
3540		if (err < 0)
3541			goto clear_allmulti;
3542	}
3543
3544	/* If h/w offloading is available, propagate to the device */
3545	if (macsec_is_offloaded(macsec)) {
3546		const struct macsec_ops *ops;
3547		struct macsec_context ctx;
3548
3549		ops = macsec_get_ops(netdev_priv(dev), &ctx);
3550		if (!ops) {
3551			err = -EOPNOTSUPP;
3552			goto clear_allmulti;
3553		}
3554
3555		ctx.secy = &macsec->secy;
3556		err = macsec_offload(ops->mdo_dev_open, &ctx);
3557		if (err)
3558			goto clear_allmulti;
3559	}
3560
3561	if (netif_carrier_ok(real_dev))
3562		netif_carrier_on(dev);
3563
3564	return 0;
3565clear_allmulti:
3566	if (dev->flags & IFF_ALLMULTI)
3567		dev_set_allmulti(real_dev, -1);
3568del_unicast:
3569	dev_uc_del(real_dev, dev->dev_addr);
3570	netif_carrier_off(dev);
3571	return err;
3572}
3573
3574static int macsec_dev_stop(struct net_device *dev)
3575{
3576	struct macsec_dev *macsec = macsec_priv(dev);
3577	struct net_device *real_dev = macsec->real_dev;
3578
3579	netif_carrier_off(dev);
3580
3581	/* If h/w offloading is available, propagate to the device */
3582	if (macsec_is_offloaded(macsec)) {
3583		const struct macsec_ops *ops;
3584		struct macsec_context ctx;
3585
3586		ops = macsec_get_ops(macsec, &ctx);
3587		if (ops) {
3588			ctx.secy = &macsec->secy;
3589			macsec_offload(ops->mdo_dev_stop, &ctx);
3590		}
3591	}
3592
3593	dev_mc_unsync(real_dev, dev);
3594	dev_uc_unsync(real_dev, dev);
3595
3596	if (dev->flags & IFF_ALLMULTI)
3597		dev_set_allmulti(real_dev, -1);
3598
3599	if (dev->flags & IFF_PROMISC)
3600		dev_set_promiscuity(real_dev, -1);
3601
3602	dev_uc_del(real_dev, dev->dev_addr);
3603
3604	return 0;
3605}
3606
3607static void macsec_dev_change_rx_flags(struct net_device *dev, int change)
3608{
3609	struct net_device *real_dev = macsec_priv(dev)->real_dev;
3610
3611	if (!(dev->flags & IFF_UP))
3612		return;
3613
3614	if (change & IFF_ALLMULTI)
3615		dev_set_allmulti(real_dev, dev->flags & IFF_ALLMULTI ? 1 : -1);
3616
3617	if (change & IFF_PROMISC)
3618		dev_set_promiscuity(real_dev,
3619				    dev->flags & IFF_PROMISC ? 1 : -1);
3620}
3621
3622static void macsec_dev_set_rx_mode(struct net_device *dev)
3623{
3624	struct net_device *real_dev = macsec_priv(dev)->real_dev;
3625
3626	dev_mc_sync(real_dev, dev);
3627	dev_uc_sync(real_dev, dev);
3628}
3629
3630static int macsec_set_mac_address(struct net_device *dev, void *p)
3631{
3632	struct macsec_dev *macsec = macsec_priv(dev);
3633	struct net_device *real_dev = macsec->real_dev;
3634	struct sockaddr *addr = p;
3635	int err;
3636
3637	if (!is_valid_ether_addr(addr->sa_data))
3638		return -EADDRNOTAVAIL;
3639
3640	if (!(dev->flags & IFF_UP))
3641		goto out;
3642
3643	err = dev_uc_add(real_dev, addr->sa_data);
3644	if (err < 0)
3645		return err;
3646
3647	dev_uc_del(real_dev, dev->dev_addr);
3648
3649out:
3650	ether_addr_copy(dev->dev_addr, addr->sa_data);
3651	macsec->secy.sci = dev_to_sci(dev, MACSEC_PORT_ES);
3652
3653	/* If h/w offloading is available, propagate to the device */
3654	if (macsec_is_offloaded(macsec)) {
3655		const struct macsec_ops *ops;
3656		struct macsec_context ctx;
3657
3658		ops = macsec_get_ops(macsec, &ctx);
3659		if (ops) {
3660			ctx.secy = &macsec->secy;
3661			macsec_offload(ops->mdo_upd_secy, &ctx);
3662		}
3663	}
3664
3665	return 0;
3666}
3667
3668static int macsec_change_mtu(struct net_device *dev, int new_mtu)
3669{
3670	struct macsec_dev *macsec = macsec_priv(dev);
3671	unsigned int extra = macsec->secy.icv_len + macsec_extra_len(true);
3672
3673	if (macsec->real_dev->mtu - extra < new_mtu)
3674		return -ERANGE;
3675
3676	dev->mtu = new_mtu;
3677
3678	return 0;
3679}
3680
3681static void macsec_get_stats64(struct net_device *dev,
3682			       struct rtnl_link_stats64 *s)
3683{
3684	if (!dev->tstats)
3685		return;
3686
3687	dev_fetch_sw_netstats(s, dev->tstats);
3688
3689	s->rx_dropped = DEV_STATS_READ(dev, rx_dropped);
3690	s->tx_dropped = DEV_STATS_READ(dev, tx_dropped);
3691	s->rx_errors = DEV_STATS_READ(dev, rx_errors);
3692}
3693
3694static int macsec_get_iflink(const struct net_device *dev)
3695{
3696	return macsec_priv(dev)->real_dev->ifindex;
3697}
3698
3699static const struct net_device_ops macsec_netdev_ops = {
3700	.ndo_init		= macsec_dev_init,
3701	.ndo_uninit		= macsec_dev_uninit,
3702	.ndo_open		= macsec_dev_open,
3703	.ndo_stop		= macsec_dev_stop,
3704	.ndo_fix_features	= macsec_fix_features,
3705	.ndo_change_mtu		= macsec_change_mtu,
3706	.ndo_set_rx_mode	= macsec_dev_set_rx_mode,
3707	.ndo_change_rx_flags	= macsec_dev_change_rx_flags,
3708	.ndo_set_mac_address	= macsec_set_mac_address,
3709	.ndo_start_xmit		= macsec_start_xmit,
3710	.ndo_get_stats64	= macsec_get_stats64,
3711	.ndo_get_iflink		= macsec_get_iflink,
3712};
3713
3714static const struct device_type macsec_type = {
3715	.name = "macsec",
3716};
3717
3718static const struct nla_policy macsec_rtnl_policy[IFLA_MACSEC_MAX + 1] = {
3719	[IFLA_MACSEC_SCI] = { .type = NLA_U64 },
3720	[IFLA_MACSEC_PORT] = { .type = NLA_U16 },
3721	[IFLA_MACSEC_ICV_LEN] = { .type = NLA_U8 },
3722	[IFLA_MACSEC_CIPHER_SUITE] = { .type = NLA_U64 },
3723	[IFLA_MACSEC_WINDOW] = { .type = NLA_U32 },
3724	[IFLA_MACSEC_ENCODING_SA] = { .type = NLA_U8 },
3725	[IFLA_MACSEC_ENCRYPT] = { .type = NLA_U8 },
3726	[IFLA_MACSEC_PROTECT] = { .type = NLA_U8 },
3727	[IFLA_MACSEC_INC_SCI] = { .type = NLA_U8 },
3728	[IFLA_MACSEC_ES] = { .type = NLA_U8 },
3729	[IFLA_MACSEC_SCB] = { .type = NLA_U8 },
3730	[IFLA_MACSEC_REPLAY_PROTECT] = { .type = NLA_U8 },
3731	[IFLA_MACSEC_VALIDATION] = { .type = NLA_U8 },
3732	[IFLA_MACSEC_OFFLOAD] = { .type = NLA_U8 },
3733};
3734
3735static void macsec_free_netdev(struct net_device *dev)
3736{
3737	struct macsec_dev *macsec = macsec_priv(dev);
3738
3739	free_percpu(macsec->stats);
3740	free_percpu(macsec->secy.tx_sc.stats);
3741
3742}
3743
3744static void macsec_setup(struct net_device *dev)
3745{
3746	ether_setup(dev);
3747	dev->min_mtu = 0;
3748	dev->max_mtu = ETH_MAX_MTU;
3749	dev->priv_flags |= IFF_NO_QUEUE;
3750	dev->netdev_ops = &macsec_netdev_ops;
3751	dev->needs_free_netdev = true;
3752	dev->priv_destructor = macsec_free_netdev;
3753	SET_NETDEV_DEVTYPE(dev, &macsec_type);
3754
3755	eth_zero_addr(dev->broadcast);
3756}
3757
3758static int macsec_changelink_common(struct net_device *dev,
3759				    struct nlattr *data[])
3760{
3761	struct macsec_secy *secy;
3762	struct macsec_tx_sc *tx_sc;
3763
3764	secy = &macsec_priv(dev)->secy;
3765	tx_sc = &secy->tx_sc;
3766
3767	if (data[IFLA_MACSEC_ENCODING_SA]) {
3768		struct macsec_tx_sa *tx_sa;
3769
3770		tx_sc->encoding_sa = nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]);
3771		tx_sa = rtnl_dereference(tx_sc->sa[tx_sc->encoding_sa]);
3772
3773		secy->operational = tx_sa && tx_sa->active;
3774	}
3775
3776	if (data[IFLA_MACSEC_ENCRYPT])
3777		tx_sc->encrypt = !!nla_get_u8(data[IFLA_MACSEC_ENCRYPT]);
3778
3779	if (data[IFLA_MACSEC_PROTECT])
3780		secy->protect_frames = !!nla_get_u8(data[IFLA_MACSEC_PROTECT]);
3781
3782	if (data[IFLA_MACSEC_INC_SCI])
3783		tx_sc->send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]);
3784
3785	if (data[IFLA_MACSEC_ES])
3786		tx_sc->end_station = !!nla_get_u8(data[IFLA_MACSEC_ES]);
3787
3788	if (data[IFLA_MACSEC_SCB])
3789		tx_sc->scb = !!nla_get_u8(data[IFLA_MACSEC_SCB]);
3790
3791	if (data[IFLA_MACSEC_REPLAY_PROTECT])
3792		secy->replay_protect = !!nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT]);
3793
3794	if (data[IFLA_MACSEC_VALIDATION])
3795		secy->validate_frames = nla_get_u8(data[IFLA_MACSEC_VALIDATION]);
3796
3797	if (data[IFLA_MACSEC_CIPHER_SUITE]) {
3798		switch (nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE])) {
3799		case MACSEC_CIPHER_ID_GCM_AES_128:
3800		case MACSEC_DEFAULT_CIPHER_ID:
3801			secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
3802			secy->xpn = false;
3803			break;
3804		case MACSEC_CIPHER_ID_GCM_AES_256:
3805			secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
3806			secy->xpn = false;
3807			break;
3808		case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
3809			secy->key_len = MACSEC_GCM_AES_128_SAK_LEN;
3810			secy->xpn = true;
3811			break;
3812		case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
3813			secy->key_len = MACSEC_GCM_AES_256_SAK_LEN;
3814			secy->xpn = true;
3815			break;
3816		default:
3817			return -EINVAL;
3818		}
3819	}
3820
3821	if (data[IFLA_MACSEC_WINDOW]) {
3822		secy->replay_window = nla_get_u32(data[IFLA_MACSEC_WINDOW]);
3823
3824		/* IEEE 802.1AEbw-2013 10.7.8 - maximum replay window
3825		 * for XPN cipher suites */
3826		if (secy->xpn &&
3827		    secy->replay_window > MACSEC_XPN_MAX_REPLAY_WINDOW)
3828			return -EINVAL;
3829	}
3830
3831	return 0;
3832}
3833
3834static int macsec_changelink(struct net_device *dev, struct nlattr *tb[],
3835			     struct nlattr *data[],
3836			     struct netlink_ext_ack *extack)
3837{
3838	struct macsec_dev *macsec = macsec_priv(dev);
3839	struct macsec_tx_sc tx_sc;
3840	struct macsec_secy secy;
3841	int ret;
3842
3843	if (!data)
3844		return 0;
3845
3846	if (data[IFLA_MACSEC_CIPHER_SUITE] ||
3847	    data[IFLA_MACSEC_ICV_LEN] ||
3848	    data[IFLA_MACSEC_SCI] ||
3849	    data[IFLA_MACSEC_PORT])
3850		return -EINVAL;
3851
3852	/* Keep a copy of unmodified secy and tx_sc, in case the offload
3853	 * propagation fails, to revert macsec_changelink_common.
3854	 */
3855	memcpy(&secy, &macsec->secy, sizeof(secy));
3856	memcpy(&tx_sc, &macsec->secy.tx_sc, sizeof(tx_sc));
3857
3858	ret = macsec_changelink_common(dev, data);
3859	if (ret)
3860		goto cleanup;
3861
3862	/* If h/w offloading is available, propagate to the device */
3863	if (macsec_is_offloaded(macsec)) {
3864		const struct macsec_ops *ops;
3865		struct macsec_context ctx;
3866
3867		ops = macsec_get_ops(netdev_priv(dev), &ctx);
3868		if (!ops) {
3869			ret = -EOPNOTSUPP;
3870			goto cleanup;
3871		}
3872
3873		ctx.secy = &macsec->secy;
3874		ret = macsec_offload(ops->mdo_upd_secy, &ctx);
3875		if (ret)
3876			goto cleanup;
3877	}
3878
3879	return 0;
3880
3881cleanup:
3882	memcpy(&macsec->secy.tx_sc, &tx_sc, sizeof(tx_sc));
3883	memcpy(&macsec->secy, &secy, sizeof(secy));
3884
3885	return ret;
3886}
3887
3888static void macsec_del_dev(struct macsec_dev *macsec)
3889{
3890	int i;
3891
3892	while (macsec->secy.rx_sc) {
3893		struct macsec_rx_sc *rx_sc = rtnl_dereference(macsec->secy.rx_sc);
3894
3895		rcu_assign_pointer(macsec->secy.rx_sc, rx_sc->next);
3896		free_rx_sc(rx_sc);
3897	}
3898
3899	for (i = 0; i < MACSEC_NUM_AN; i++) {
3900		struct macsec_tx_sa *sa = rtnl_dereference(macsec->secy.tx_sc.sa[i]);
3901
3902		if (sa) {
3903			RCU_INIT_POINTER(macsec->secy.tx_sc.sa[i], NULL);
3904			clear_tx_sa(sa);
3905		}
3906	}
3907}
3908
3909static void macsec_common_dellink(struct net_device *dev, struct list_head *head)
3910{
3911	struct macsec_dev *macsec = macsec_priv(dev);
3912	struct net_device *real_dev = macsec->real_dev;
3913
3914	/* If h/w offloading is available, propagate to the device */
3915	if (macsec_is_offloaded(macsec)) {
3916		const struct macsec_ops *ops;
3917		struct macsec_context ctx;
3918
3919		ops = macsec_get_ops(netdev_priv(dev), &ctx);
3920		if (ops) {
3921			ctx.secy = &macsec->secy;
3922			macsec_offload(ops->mdo_del_secy, &ctx);
3923		}
3924	}
3925
3926	unregister_netdevice_queue(dev, head);
3927	list_del_rcu(&macsec->secys);
3928	macsec_del_dev(macsec);
3929	netdev_upper_dev_unlink(real_dev, dev);
3930
3931	macsec_generation++;
3932}
3933
3934static void macsec_dellink(struct net_device *dev, struct list_head *head)
3935{
3936	struct macsec_dev *macsec = macsec_priv(dev);
3937	struct net_device *real_dev = macsec->real_dev;
3938	struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
3939
3940	macsec_common_dellink(dev, head);
3941
3942	if (list_empty(&rxd->secys)) {
3943		netdev_rx_handler_unregister(real_dev);
3944		kfree(rxd);
3945	}
3946}
3947
3948static int register_macsec_dev(struct net_device *real_dev,
3949			       struct net_device *dev)
3950{
3951	struct macsec_dev *macsec = macsec_priv(dev);
3952	struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
3953
3954	if (!rxd) {
3955		int err;
3956
3957		rxd = kmalloc(sizeof(*rxd), GFP_KERNEL);
3958		if (!rxd)
3959			return -ENOMEM;
3960
3961		INIT_LIST_HEAD(&rxd->secys);
3962
3963		err = netdev_rx_handler_register(real_dev, macsec_handle_frame,
3964						 rxd);
3965		if (err < 0) {
3966			kfree(rxd);
3967			return err;
3968		}
3969	}
3970
3971	list_add_tail_rcu(&macsec->secys, &rxd->secys);
3972	return 0;
3973}
3974
3975static bool sci_exists(struct net_device *dev, sci_t sci)
3976{
3977	struct macsec_rxh_data *rxd = macsec_data_rtnl(dev);
3978	struct macsec_dev *macsec;
3979
3980	list_for_each_entry(macsec, &rxd->secys, secys) {
3981		if (macsec->secy.sci == sci)
3982			return true;
3983	}
3984
3985	return false;
3986}
3987
3988static int macsec_add_dev(struct net_device *dev, sci_t sci, u8 icv_len)
3989{
3990	struct macsec_dev *macsec = macsec_priv(dev);
3991	struct macsec_secy *secy = &macsec->secy;
3992
3993	macsec->stats = netdev_alloc_pcpu_stats(struct pcpu_secy_stats);
3994	if (!macsec->stats)
3995		return -ENOMEM;
3996
3997	secy->tx_sc.stats = netdev_alloc_pcpu_stats(struct pcpu_tx_sc_stats);
3998	if (!secy->tx_sc.stats) {
3999		free_percpu(macsec->stats);
4000		return -ENOMEM;
4001	}
4002
4003	if (sci == MACSEC_UNDEF_SCI)
4004		sci = dev_to_sci(dev, MACSEC_PORT_ES);
4005
4006	secy->netdev = dev;
4007	secy->operational = true;
4008	secy->key_len = DEFAULT_SAK_LEN;
4009	secy->icv_len = icv_len;
4010	secy->validate_frames = MACSEC_VALIDATE_DEFAULT;
4011	secy->protect_frames = true;
4012	secy->replay_protect = false;
4013	secy->xpn = DEFAULT_XPN;
4014
4015	secy->sci = sci;
4016	secy->tx_sc.active = true;
4017	secy->tx_sc.encoding_sa = DEFAULT_ENCODING_SA;
4018	secy->tx_sc.encrypt = DEFAULT_ENCRYPT;
4019	secy->tx_sc.send_sci = DEFAULT_SEND_SCI;
4020	secy->tx_sc.end_station = false;
4021	secy->tx_sc.scb = false;
4022
4023	return 0;
4024}
4025
4026static struct lock_class_key macsec_netdev_addr_lock_key;
4027
4028static int macsec_newlink(struct net *net, struct net_device *dev,
4029			  struct nlattr *tb[], struct nlattr *data[],
4030			  struct netlink_ext_ack *extack)
4031{
4032	struct macsec_dev *macsec = macsec_priv(dev);
4033	rx_handler_func_t *rx_handler;
4034	u8 icv_len = DEFAULT_ICV_LEN;
4035	struct net_device *real_dev;
4036	int err, mtu;
4037	sci_t sci;
4038
4039	if (!tb[IFLA_LINK])
4040		return -EINVAL;
4041	real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
4042	if (!real_dev)
4043		return -ENODEV;
4044	if (real_dev->type != ARPHRD_ETHER)
4045		return -EINVAL;
4046
4047	dev->priv_flags |= IFF_MACSEC;
4048
4049	macsec->real_dev = real_dev;
4050
4051	if (data && data[IFLA_MACSEC_OFFLOAD])
4052		macsec->offload = nla_get_offload(data[IFLA_MACSEC_OFFLOAD]);
4053	else
4054		/* MACsec offloading is off by default */
4055		macsec->offload = MACSEC_OFFLOAD_OFF;
4056
4057	/* Check if the offloading mode is supported by the underlying layers */
4058	if (macsec->offload != MACSEC_OFFLOAD_OFF &&
4059	    !macsec_check_offload(macsec->offload, macsec))
4060		return -EOPNOTSUPP;
4061
4062	/* send_sci must be set to true when transmit sci explicitly is set */
4063	if ((data && data[IFLA_MACSEC_SCI]) &&
4064	    (data && data[IFLA_MACSEC_INC_SCI])) {
4065		u8 send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]);
4066
4067		if (!send_sci)
4068			return -EINVAL;
4069	}
4070
4071	if (data && data[IFLA_MACSEC_ICV_LEN])
4072		icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
4073	mtu = real_dev->mtu - icv_len - macsec_extra_len(true);
4074	if (mtu < 0)
4075		dev->mtu = 0;
4076	else
4077		dev->mtu = mtu;
4078
4079	rx_handler = rtnl_dereference(real_dev->rx_handler);
4080	if (rx_handler && rx_handler != macsec_handle_frame)
4081		return -EBUSY;
4082
4083	err = register_netdevice(dev);
4084	if (err < 0)
4085		return err;
4086
4087	netdev_lockdep_set_classes(dev);
4088	lockdep_set_class(&dev->addr_list_lock,
4089			  &macsec_netdev_addr_lock_key);
4090
4091	err = netdev_upper_dev_link(real_dev, dev, extack);
4092	if (err < 0)
4093		goto unregister;
4094
4095	/* need to be already registered so that ->init has run and
4096	 * the MAC addr is set
4097	 */
4098	if (data && data[IFLA_MACSEC_SCI])
4099		sci = nla_get_sci(data[IFLA_MACSEC_SCI]);
4100	else if (data && data[IFLA_MACSEC_PORT])
4101		sci = dev_to_sci(dev, nla_get_be16(data[IFLA_MACSEC_PORT]));
4102	else
4103		sci = dev_to_sci(dev, MACSEC_PORT_ES);
4104
4105	if (rx_handler && sci_exists(real_dev, sci)) {
4106		err = -EBUSY;
4107		goto unlink;
4108	}
4109
4110	err = macsec_add_dev(dev, sci, icv_len);
4111	if (err)
4112		goto unlink;
4113
4114	if (data) {
4115		err = macsec_changelink_common(dev, data);
4116		if (err)
4117			goto del_dev;
4118	}
4119
4120	/* If h/w offloading is available, propagate to the device */
4121	if (macsec_is_offloaded(macsec)) {
4122		const struct macsec_ops *ops;
4123		struct macsec_context ctx;
4124
4125		ops = macsec_get_ops(macsec, &ctx);
4126		if (ops) {
4127			ctx.secy = &macsec->secy;
4128			err = macsec_offload(ops->mdo_add_secy, &ctx);
4129			if (err)
4130				goto del_dev;
4131		}
4132	}
4133
4134	err = register_macsec_dev(real_dev, dev);
4135	if (err < 0)
4136		goto del_dev;
4137
4138	netif_stacked_transfer_operstate(real_dev, dev);
4139	linkwatch_fire_event(dev);
4140
4141	macsec_generation++;
4142
4143	return 0;
4144
4145del_dev:
4146	macsec_del_dev(macsec);
4147unlink:
4148	netdev_upper_dev_unlink(real_dev, dev);
4149unregister:
4150	unregister_netdevice(dev);
4151	return err;
4152}
4153
4154static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[],
4155				struct netlink_ext_ack *extack)
4156{
4157	u64 csid = MACSEC_DEFAULT_CIPHER_ID;
4158	u8 icv_len = DEFAULT_ICV_LEN;
4159	int flag;
4160	bool es, scb, sci;
4161
4162	if (!data)
4163		return 0;
4164
4165	if (data[IFLA_MACSEC_CIPHER_SUITE])
4166		csid = nla_get_u64(data[IFLA_MACSEC_CIPHER_SUITE]);
4167
4168	if (data[IFLA_MACSEC_ICV_LEN]) {
4169		icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
4170		if (icv_len != DEFAULT_ICV_LEN) {
4171			char dummy_key[DEFAULT_SAK_LEN] = { 0 };
4172			struct crypto_aead *dummy_tfm;
4173
4174			dummy_tfm = macsec_alloc_tfm(dummy_key,
4175						     DEFAULT_SAK_LEN,
4176						     icv_len);
4177			if (IS_ERR(dummy_tfm))
4178				return PTR_ERR(dummy_tfm);
4179			crypto_free_aead(dummy_tfm);
4180		}
4181	}
4182
4183	switch (csid) {
4184	case MACSEC_CIPHER_ID_GCM_AES_128:
4185	case MACSEC_CIPHER_ID_GCM_AES_256:
4186	case MACSEC_CIPHER_ID_GCM_AES_XPN_128:
4187	case MACSEC_CIPHER_ID_GCM_AES_XPN_256:
4188	case MACSEC_DEFAULT_CIPHER_ID:
4189		if (icv_len < MACSEC_MIN_ICV_LEN ||
4190		    icv_len > MACSEC_STD_ICV_LEN)
4191			return -EINVAL;
4192		break;
4193	default:
4194		return -EINVAL;
4195	}
4196
4197	if (data[IFLA_MACSEC_ENCODING_SA]) {
4198		if (nla_get_u8(data[IFLA_MACSEC_ENCODING_SA]) >= MACSEC_NUM_AN)
4199			return -EINVAL;
4200	}
4201
4202	for (flag = IFLA_MACSEC_ENCODING_SA + 1;
4203	     flag < IFLA_MACSEC_VALIDATION;
4204	     flag++) {
4205		if (data[flag]) {
4206			if (nla_get_u8(data[flag]) > 1)
4207				return -EINVAL;
4208		}
4209	}
4210
4211	es  = data[IFLA_MACSEC_ES] ? nla_get_u8(data[IFLA_MACSEC_ES]) : false;
4212	sci = data[IFLA_MACSEC_INC_SCI] ? nla_get_u8(data[IFLA_MACSEC_INC_SCI]) : false;
4213	scb = data[IFLA_MACSEC_SCB] ? nla_get_u8(data[IFLA_MACSEC_SCB]) : false;
4214
4215	if ((sci && (scb || es)) || (scb && es))
4216		return -EINVAL;
4217
4218	if (data[IFLA_MACSEC_VALIDATION] &&
4219	    nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX)
4220		return -EINVAL;
4221
4222	if ((data[IFLA_MACSEC_REPLAY_PROTECT] &&
4223	     nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) &&
4224	    !data[IFLA_MACSEC_WINDOW])
4225		return -EINVAL;
4226
4227	return 0;
4228}
4229
4230static struct net *macsec_get_link_net(const struct net_device *dev)
4231{
4232	return dev_net(macsec_priv(dev)->real_dev);
4233}
4234
4235static size_t macsec_get_size(const struct net_device *dev)
4236{
4237	return  nla_total_size_64bit(8) + /* IFLA_MACSEC_SCI */
4238		nla_total_size(1) + /* IFLA_MACSEC_ICV_LEN */
4239		nla_total_size_64bit(8) + /* IFLA_MACSEC_CIPHER_SUITE */
4240		nla_total_size(4) + /* IFLA_MACSEC_WINDOW */
4241		nla_total_size(1) + /* IFLA_MACSEC_ENCODING_SA */
4242		nla_total_size(1) + /* IFLA_MACSEC_ENCRYPT */
4243		nla_total_size(1) + /* IFLA_MACSEC_PROTECT */
4244		nla_total_size(1) + /* IFLA_MACSEC_INC_SCI */
4245		nla_total_size(1) + /* IFLA_MACSEC_ES */
4246		nla_total_size(1) + /* IFLA_MACSEC_SCB */
4247		nla_total_size(1) + /* IFLA_MACSEC_REPLAY_PROTECT */
4248		nla_total_size(1) + /* IFLA_MACSEC_VALIDATION */
4249		0;
4250}
4251
4252static int macsec_fill_info(struct sk_buff *skb,
4253			    const struct net_device *dev)
4254{
4255	struct macsec_secy *secy = &macsec_priv(dev)->secy;
4256	struct macsec_tx_sc *tx_sc = &secy->tx_sc;
4257	u64 csid;
4258
4259	switch (secy->key_len) {
4260	case MACSEC_GCM_AES_128_SAK_LEN:
4261		csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_128 : MACSEC_DEFAULT_CIPHER_ID;
4262		break;
4263	case MACSEC_GCM_AES_256_SAK_LEN:
4264		csid = secy->xpn ? MACSEC_CIPHER_ID_GCM_AES_XPN_256 : MACSEC_CIPHER_ID_GCM_AES_256;
4265		break;
4266	default:
4267		goto nla_put_failure;
4268	}
4269
4270	if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci,
4271			IFLA_MACSEC_PAD) ||
4272	    nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) ||
4273	    nla_put_u64_64bit(skb, IFLA_MACSEC_CIPHER_SUITE,
4274			      csid, IFLA_MACSEC_PAD) ||
4275	    nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) ||
4276	    nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) ||
4277	    nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) ||
4278	    nla_put_u8(skb, IFLA_MACSEC_INC_SCI, tx_sc->send_sci) ||
4279	    nla_put_u8(skb, IFLA_MACSEC_ES, tx_sc->end_station) ||
4280	    nla_put_u8(skb, IFLA_MACSEC_SCB, tx_sc->scb) ||
4281	    nla_put_u8(skb, IFLA_MACSEC_REPLAY_PROTECT, secy->replay_protect) ||
4282	    nla_put_u8(skb, IFLA_MACSEC_VALIDATION, secy->validate_frames) ||
4283	    0)
4284		goto nla_put_failure;
4285
4286	if (secy->replay_protect) {
4287		if (nla_put_u32(skb, IFLA_MACSEC_WINDOW, secy->replay_window))
4288			goto nla_put_failure;
4289	}
4290
4291	return 0;
4292
4293nla_put_failure:
4294	return -EMSGSIZE;
4295}
4296
4297static struct rtnl_link_ops macsec_link_ops __read_mostly = {
4298	.kind		= "macsec",
4299	.priv_size	= sizeof(struct macsec_dev),
4300	.maxtype	= IFLA_MACSEC_MAX,
4301	.policy		= macsec_rtnl_policy,
4302	.setup		= macsec_setup,
4303	.validate	= macsec_validate_attr,
4304	.newlink	= macsec_newlink,
4305	.changelink	= macsec_changelink,
4306	.dellink	= macsec_dellink,
4307	.get_size	= macsec_get_size,
4308	.fill_info	= macsec_fill_info,
4309	.get_link_net	= macsec_get_link_net,
4310};
4311
4312static bool is_macsec_master(struct net_device *dev)
4313{
4314	return rcu_access_pointer(dev->rx_handler) == macsec_handle_frame;
4315}
4316
4317static int macsec_notify(struct notifier_block *this, unsigned long event,
4318			 void *ptr)
4319{
4320	struct net_device *real_dev = netdev_notifier_info_to_dev(ptr);
4321	LIST_HEAD(head);
4322
4323	if (!is_macsec_master(real_dev))
4324		return NOTIFY_DONE;
4325
4326	switch (event) {
4327	case NETDEV_DOWN:
4328	case NETDEV_UP:
4329	case NETDEV_CHANGE: {
4330		struct macsec_dev *m, *n;
4331		struct macsec_rxh_data *rxd;
4332
4333		rxd = macsec_data_rtnl(real_dev);
4334		list_for_each_entry_safe(m, n, &rxd->secys, secys) {
4335			struct net_device *dev = m->secy.netdev;
4336
4337			netif_stacked_transfer_operstate(real_dev, dev);
4338		}
4339		break;
4340	}
4341	case NETDEV_UNREGISTER: {
4342		struct macsec_dev *m, *n;
4343		struct macsec_rxh_data *rxd;
4344
4345		rxd = macsec_data_rtnl(real_dev);
4346		list_for_each_entry_safe(m, n, &rxd->secys, secys) {
4347			macsec_common_dellink(m->secy.netdev, &head);
4348		}
4349
4350		netdev_rx_handler_unregister(real_dev);
4351		kfree(rxd);
4352
4353		unregister_netdevice_many(&head);
4354		break;
4355	}
4356	case NETDEV_CHANGEMTU: {
4357		struct macsec_dev *m;
4358		struct macsec_rxh_data *rxd;
4359
4360		rxd = macsec_data_rtnl(real_dev);
4361		list_for_each_entry(m, &rxd->secys, secys) {
4362			struct net_device *dev = m->secy.netdev;
4363			unsigned int mtu = real_dev->mtu - (m->secy.icv_len +
4364							    macsec_extra_len(true));
4365
4366			if (dev->mtu > mtu)
4367				dev_set_mtu(dev, mtu);
4368		}
4369	}
4370	}
4371
4372	return NOTIFY_OK;
4373}
4374
4375static struct notifier_block macsec_notifier = {
4376	.notifier_call = macsec_notify,
4377};
4378
4379static int __init macsec_init(void)
4380{
4381	int err;
4382
4383	pr_info("MACsec IEEE 802.1AE\n");
4384	err = register_netdevice_notifier(&macsec_notifier);
4385	if (err)
4386		return err;
4387
4388	err = rtnl_link_register(&macsec_link_ops);
4389	if (err)
4390		goto notifier;
4391
4392	err = genl_register_family(&macsec_fam);
4393	if (err)
4394		goto rtnl;
4395
4396	return 0;
4397
4398rtnl:
4399	rtnl_link_unregister(&macsec_link_ops);
4400notifier:
4401	unregister_netdevice_notifier(&macsec_notifier);
4402	return err;
4403}
4404
4405static void __exit macsec_exit(void)
4406{
4407	genl_unregister_family(&macsec_fam);
4408	rtnl_link_unregister(&macsec_link_ops);
4409	unregister_netdevice_notifier(&macsec_notifier);
4410	rcu_barrier();
4411}
4412
4413module_init(macsec_init);
4414module_exit(macsec_exit);
4415
4416MODULE_ALIAS_RTNL_LINK("macsec");
4417MODULE_ALIAS_GENL_FAMILY("macsec");
4418
4419MODULE_DESCRIPTION("MACsec IEEE 802.1AE");
4420MODULE_LICENSE("GPL v2");
4421