xref: /kernel/linux/linux-5.10/net/xfrm/xfrm_user.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-only
2/* xfrm_user.c: User interface to configure xfrm engine.
3 *
4 * Copyright (C) 2002 David S. Miller (davem@redhat.com)
5 *
6 * Changes:
7 *	Mitsuru KANDA @USAGI
8 * 	Kazunori MIYAZAWA @USAGI
9 * 	Kunihiro Ishiguro <kunihiro@ipinfusion.com>
10 * 		IPv6 support
11 *
12 */
13
14#include <linux/crypto.h>
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/types.h>
18#include <linux/slab.h>
19#include <linux/socket.h>
20#include <linux/string.h>
21#include <linux/net.h>
22#include <linux/skbuff.h>
23#include <linux/pfkeyv2.h>
24#include <linux/ipsec.h>
25#include <linux/init.h>
26#include <linux/security.h>
27#include <net/sock.h>
28#include <net/xfrm.h>
29#include <net/netlink.h>
30#include <net/ah.h>
31#include <linux/uaccess.h>
32#if IS_ENABLED(CONFIG_IPV6)
33#include <linux/in6.h>
34#endif
35#include <asm/unaligned.h>
36
37static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type)
38{
39	struct nlattr *rt = attrs[type];
40	struct xfrm_algo *algp;
41
42	if (!rt)
43		return 0;
44
45	algp = nla_data(rt);
46	if (nla_len(rt) < (int)xfrm_alg_len(algp))
47		return -EINVAL;
48
49	switch (type) {
50	case XFRMA_ALG_AUTH:
51	case XFRMA_ALG_CRYPT:
52	case XFRMA_ALG_COMP:
53		break;
54
55	default:
56		return -EINVAL;
57	}
58
59	algp->alg_name[sizeof(algp->alg_name) - 1] = '\0';
60	return 0;
61}
62
63static int verify_auth_trunc(struct nlattr **attrs)
64{
65	struct nlattr *rt = attrs[XFRMA_ALG_AUTH_TRUNC];
66	struct xfrm_algo_auth *algp;
67
68	if (!rt)
69		return 0;
70
71	algp = nla_data(rt);
72	if (nla_len(rt) < (int)xfrm_alg_auth_len(algp))
73		return -EINVAL;
74
75	algp->alg_name[sizeof(algp->alg_name) - 1] = '\0';
76	return 0;
77}
78
79static int verify_aead(struct nlattr **attrs)
80{
81	struct nlattr *rt = attrs[XFRMA_ALG_AEAD];
82	struct xfrm_algo_aead *algp;
83
84	if (!rt)
85		return 0;
86
87	algp = nla_data(rt);
88	if (nla_len(rt) < (int)aead_len(algp))
89		return -EINVAL;
90
91	algp->alg_name[sizeof(algp->alg_name) - 1] = '\0';
92	return 0;
93}
94
95static void verify_one_addr(struct nlattr **attrs, enum xfrm_attr_type_t type,
96			   xfrm_address_t **addrp)
97{
98	struct nlattr *rt = attrs[type];
99
100	if (rt && addrp)
101		*addrp = nla_data(rt);
102}
103
104static inline int verify_sec_ctx_len(struct nlattr **attrs)
105{
106	struct nlattr *rt = attrs[XFRMA_SEC_CTX];
107	struct xfrm_user_sec_ctx *uctx;
108
109	if (!rt)
110		return 0;
111
112	uctx = nla_data(rt);
113	if (uctx->len > nla_len(rt) ||
114	    uctx->len != (sizeof(struct xfrm_user_sec_ctx) + uctx->ctx_len))
115		return -EINVAL;
116
117	return 0;
118}
119
120static inline int verify_replay(struct xfrm_usersa_info *p,
121				struct nlattr **attrs)
122{
123	struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL];
124	struct xfrm_replay_state_esn *rs;
125
126	if (!rt)
127		return (p->flags & XFRM_STATE_ESN) ? -EINVAL : 0;
128
129	rs = nla_data(rt);
130
131	if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8)
132		return -EINVAL;
133
134	if (nla_len(rt) < (int)xfrm_replay_state_esn_len(rs) &&
135	    nla_len(rt) != sizeof(*rs))
136		return -EINVAL;
137
138	/* As only ESP and AH support ESN feature. */
139	if ((p->id.proto != IPPROTO_ESP) && (p->id.proto != IPPROTO_AH))
140		return -EINVAL;
141
142	if (p->replay_window != 0)
143		return -EINVAL;
144
145	return 0;
146}
147
148static int verify_newsa_info(struct xfrm_usersa_info *p,
149			     struct nlattr **attrs)
150{
151	int err;
152
153	err = -EINVAL;
154	switch (p->family) {
155	case AF_INET:
156		break;
157
158	case AF_INET6:
159#if IS_ENABLED(CONFIG_IPV6)
160		break;
161#else
162		err = -EAFNOSUPPORT;
163		goto out;
164#endif
165
166	default:
167		goto out;
168	}
169
170	switch (p->sel.family) {
171	case AF_UNSPEC:
172		break;
173
174	case AF_INET:
175		if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32)
176			goto out;
177
178		break;
179
180	case AF_INET6:
181#if IS_ENABLED(CONFIG_IPV6)
182		if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128)
183			goto out;
184
185		break;
186#else
187		err = -EAFNOSUPPORT;
188		goto out;
189#endif
190
191	default:
192		goto out;
193	}
194
195	err = -EINVAL;
196	switch (p->id.proto) {
197	case IPPROTO_AH:
198		if ((!attrs[XFRMA_ALG_AUTH]	&&
199		     !attrs[XFRMA_ALG_AUTH_TRUNC]) ||
200		    attrs[XFRMA_ALG_AEAD]	||
201		    attrs[XFRMA_ALG_CRYPT]	||
202		    attrs[XFRMA_ALG_COMP]	||
203		    attrs[XFRMA_TFCPAD])
204			goto out;
205		break;
206
207	case IPPROTO_ESP:
208		if (attrs[XFRMA_ALG_COMP])
209			goto out;
210		if (!attrs[XFRMA_ALG_AUTH] &&
211		    !attrs[XFRMA_ALG_AUTH_TRUNC] &&
212		    !attrs[XFRMA_ALG_CRYPT] &&
213		    !attrs[XFRMA_ALG_AEAD])
214			goto out;
215		if ((attrs[XFRMA_ALG_AUTH] ||
216		     attrs[XFRMA_ALG_AUTH_TRUNC] ||
217		     attrs[XFRMA_ALG_CRYPT]) &&
218		    attrs[XFRMA_ALG_AEAD])
219			goto out;
220		if (attrs[XFRMA_TFCPAD] &&
221		    p->mode != XFRM_MODE_TUNNEL)
222			goto out;
223		break;
224
225	case IPPROTO_COMP:
226		if (!attrs[XFRMA_ALG_COMP]	||
227		    attrs[XFRMA_ALG_AEAD]	||
228		    attrs[XFRMA_ALG_AUTH]	||
229		    attrs[XFRMA_ALG_AUTH_TRUNC]	||
230		    attrs[XFRMA_ALG_CRYPT]	||
231		    attrs[XFRMA_TFCPAD]		||
232		    (ntohl(p->id.spi) >= 0x10000))
233			goto out;
234		break;
235
236#if IS_ENABLED(CONFIG_IPV6)
237	case IPPROTO_DSTOPTS:
238	case IPPROTO_ROUTING:
239		if (attrs[XFRMA_ALG_COMP]	||
240		    attrs[XFRMA_ALG_AUTH]	||
241		    attrs[XFRMA_ALG_AUTH_TRUNC]	||
242		    attrs[XFRMA_ALG_AEAD]	||
243		    attrs[XFRMA_ALG_CRYPT]	||
244		    attrs[XFRMA_ENCAP]		||
245		    attrs[XFRMA_SEC_CTX]	||
246		    attrs[XFRMA_TFCPAD]		||
247		    !attrs[XFRMA_COADDR])
248			goto out;
249		break;
250#endif
251
252	default:
253		goto out;
254	}
255
256	if ((err = verify_aead(attrs)))
257		goto out;
258	if ((err = verify_auth_trunc(attrs)))
259		goto out;
260	if ((err = verify_one_alg(attrs, XFRMA_ALG_AUTH)))
261		goto out;
262	if ((err = verify_one_alg(attrs, XFRMA_ALG_CRYPT)))
263		goto out;
264	if ((err = verify_one_alg(attrs, XFRMA_ALG_COMP)))
265		goto out;
266	if ((err = verify_sec_ctx_len(attrs)))
267		goto out;
268	if ((err = verify_replay(p, attrs)))
269		goto out;
270
271	err = -EINVAL;
272	switch (p->mode) {
273	case XFRM_MODE_TRANSPORT:
274	case XFRM_MODE_TUNNEL:
275	case XFRM_MODE_ROUTEOPTIMIZATION:
276	case XFRM_MODE_BEET:
277		break;
278
279	default:
280		goto out;
281	}
282
283	err = 0;
284
285	if (attrs[XFRMA_MTIMER_THRESH])
286		if (!attrs[XFRMA_ENCAP])
287			err = -EINVAL;
288
289out:
290	return err;
291}
292
293static int attach_one_algo(struct xfrm_algo **algpp, u8 *props,
294			   struct xfrm_algo_desc *(*get_byname)(const char *, int),
295			   struct nlattr *rta)
296{
297	struct xfrm_algo *p, *ualg;
298	struct xfrm_algo_desc *algo;
299
300	if (!rta)
301		return 0;
302
303	ualg = nla_data(rta);
304
305	algo = get_byname(ualg->alg_name, 1);
306	if (!algo)
307		return -ENOSYS;
308	*props = algo->desc.sadb_alg_id;
309
310	p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL);
311	if (!p)
312		return -ENOMEM;
313
314	strcpy(p->alg_name, algo->name);
315	*algpp = p;
316	return 0;
317}
318
319static int attach_crypt(struct xfrm_state *x, struct nlattr *rta)
320{
321	struct xfrm_algo *p, *ualg;
322	struct xfrm_algo_desc *algo;
323
324	if (!rta)
325		return 0;
326
327	ualg = nla_data(rta);
328
329	algo = xfrm_ealg_get_byname(ualg->alg_name, 1);
330	if (!algo)
331		return -ENOSYS;
332	x->props.ealgo = algo->desc.sadb_alg_id;
333
334	p = kmemdup(ualg, xfrm_alg_len(ualg), GFP_KERNEL);
335	if (!p)
336		return -ENOMEM;
337
338	strcpy(p->alg_name, algo->name);
339	x->ealg = p;
340	x->geniv = algo->uinfo.encr.geniv;
341	return 0;
342}
343
344static int attach_auth(struct xfrm_algo_auth **algpp, u8 *props,
345		       struct nlattr *rta)
346{
347	struct xfrm_algo *ualg;
348	struct xfrm_algo_auth *p;
349	struct xfrm_algo_desc *algo;
350
351	if (!rta)
352		return 0;
353
354	ualg = nla_data(rta);
355
356	algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
357	if (!algo)
358		return -ENOSYS;
359	*props = algo->desc.sadb_alg_id;
360
361	p = kmalloc(sizeof(*p) + (ualg->alg_key_len + 7) / 8, GFP_KERNEL);
362	if (!p)
363		return -ENOMEM;
364
365	strcpy(p->alg_name, algo->name);
366	p->alg_key_len = ualg->alg_key_len;
367	p->alg_trunc_len = algo->uinfo.auth.icv_truncbits;
368	memcpy(p->alg_key, ualg->alg_key, (ualg->alg_key_len + 7) / 8);
369
370	*algpp = p;
371	return 0;
372}
373
374static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props,
375			     struct nlattr *rta)
376{
377	struct xfrm_algo_auth *p, *ualg;
378	struct xfrm_algo_desc *algo;
379
380	if (!rta)
381		return 0;
382
383	ualg = nla_data(rta);
384
385	algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
386	if (!algo)
387		return -ENOSYS;
388	if (ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits)
389		return -EINVAL;
390	*props = algo->desc.sadb_alg_id;
391
392	p = kmemdup(ualg, xfrm_alg_auth_len(ualg), GFP_KERNEL);
393	if (!p)
394		return -ENOMEM;
395
396	strcpy(p->alg_name, algo->name);
397	if (!p->alg_trunc_len)
398		p->alg_trunc_len = algo->uinfo.auth.icv_truncbits;
399
400	*algpp = p;
401	return 0;
402}
403
404static int attach_aead(struct xfrm_state *x, struct nlattr *rta)
405{
406	struct xfrm_algo_aead *p, *ualg;
407	struct xfrm_algo_desc *algo;
408
409	if (!rta)
410		return 0;
411
412	ualg = nla_data(rta);
413
414	algo = xfrm_aead_get_byname(ualg->alg_name, ualg->alg_icv_len, 1);
415	if (!algo)
416		return -ENOSYS;
417	x->props.ealgo = algo->desc.sadb_alg_id;
418
419	p = kmemdup(ualg, aead_len(ualg), GFP_KERNEL);
420	if (!p)
421		return -ENOMEM;
422
423	strcpy(p->alg_name, algo->name);
424	x->aead = p;
425	x->geniv = algo->uinfo.aead.geniv;
426	return 0;
427}
428
429static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_esn,
430					 struct nlattr *rp)
431{
432	struct xfrm_replay_state_esn *up;
433	unsigned int ulen;
434
435	if (!replay_esn || !rp)
436		return 0;
437
438	up = nla_data(rp);
439	ulen = xfrm_replay_state_esn_len(up);
440
441	/* Check the overall length and the internal bitmap length to avoid
442	 * potential overflow. */
443	if (nla_len(rp) < (int)ulen ||
444	    xfrm_replay_state_esn_len(replay_esn) != ulen ||
445	    replay_esn->bmp_len != up->bmp_len)
446		return -EINVAL;
447
448	if (up->replay_window > up->bmp_len * sizeof(__u32) * 8)
449		return -EINVAL;
450
451	return 0;
452}
453
454static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn,
455				       struct xfrm_replay_state_esn **preplay_esn,
456				       struct nlattr *rta)
457{
458	struct xfrm_replay_state_esn *p, *pp, *up;
459	unsigned int klen, ulen;
460
461	if (!rta)
462		return 0;
463
464	up = nla_data(rta);
465	klen = xfrm_replay_state_esn_len(up);
466	ulen = nla_len(rta) >= (int)klen ? klen : sizeof(*up);
467
468	p = kzalloc(klen, GFP_KERNEL);
469	if (!p)
470		return -ENOMEM;
471
472	pp = kzalloc(klen, GFP_KERNEL);
473	if (!pp) {
474		kfree(p);
475		return -ENOMEM;
476	}
477
478	memcpy(p, up, ulen);
479	memcpy(pp, up, ulen);
480
481	*replay_esn = p;
482	*preplay_esn = pp;
483
484	return 0;
485}
486
487static inline unsigned int xfrm_user_sec_ctx_size(struct xfrm_sec_ctx *xfrm_ctx)
488{
489	unsigned int len = 0;
490
491	if (xfrm_ctx) {
492		len += sizeof(struct xfrm_user_sec_ctx);
493		len += xfrm_ctx->ctx_len;
494	}
495	return len;
496}
497
498static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
499{
500	memcpy(&x->id, &p->id, sizeof(x->id));
501	memcpy(&x->sel, &p->sel, sizeof(x->sel));
502	memcpy(&x->lft, &p->lft, sizeof(x->lft));
503	x->props.mode = p->mode;
504	x->props.replay_window = min_t(unsigned int, p->replay_window,
505					sizeof(x->replay.bitmap) * 8);
506	x->props.reqid = p->reqid;
507	x->props.family = p->family;
508	memcpy(&x->props.saddr, &p->saddr, sizeof(x->props.saddr));
509	x->props.flags = p->flags;
510
511	if (!x->sel.family && !(p->flags & XFRM_STATE_AF_UNSPEC))
512		x->sel.family = p->family;
513}
514
515/*
516 * someday when pfkey also has support, we could have the code
517 * somehow made shareable and move it to xfrm_state.c - JHS
518 *
519*/
520static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs,
521				  int update_esn)
522{
523	struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
524	struct nlattr *re = update_esn ? attrs[XFRMA_REPLAY_ESN_VAL] : NULL;
525	struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
526	struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
527	struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
528	struct nlattr *mt = attrs[XFRMA_MTIMER_THRESH];
529
530	if (re && x->replay_esn && x->preplay_esn) {
531		struct xfrm_replay_state_esn *replay_esn;
532		replay_esn = nla_data(re);
533		memcpy(x->replay_esn, replay_esn,
534		       xfrm_replay_state_esn_len(replay_esn));
535		memcpy(x->preplay_esn, replay_esn,
536		       xfrm_replay_state_esn_len(replay_esn));
537	}
538
539	if (rp) {
540		struct xfrm_replay_state *replay;
541		replay = nla_data(rp);
542		memcpy(&x->replay, replay, sizeof(*replay));
543		memcpy(&x->preplay, replay, sizeof(*replay));
544	}
545
546	if (lt) {
547		struct xfrm_lifetime_cur *ltime;
548		ltime = nla_data(lt);
549		x->curlft.bytes = ltime->bytes;
550		x->curlft.packets = ltime->packets;
551		x->curlft.add_time = ltime->add_time;
552		x->curlft.use_time = ltime->use_time;
553	}
554
555	if (et)
556		x->replay_maxage = nla_get_u32(et);
557
558	if (rt)
559		x->replay_maxdiff = nla_get_u32(rt);
560
561	if (mt)
562		x->mapping_maxage = nla_get_u32(mt);
563}
564
565static void xfrm_smark_init(struct nlattr **attrs, struct xfrm_mark *m)
566{
567	if (attrs[XFRMA_SET_MARK]) {
568		m->v = nla_get_u32(attrs[XFRMA_SET_MARK]);
569		if (attrs[XFRMA_SET_MARK_MASK])
570			m->m = nla_get_u32(attrs[XFRMA_SET_MARK_MASK]);
571		else
572			m->m = 0xffffffff;
573	} else {
574		m->v = m->m = 0;
575	}
576}
577
578static struct xfrm_state *xfrm_state_construct(struct net *net,
579					       struct xfrm_usersa_info *p,
580					       struct nlattr **attrs,
581					       int *errp)
582{
583	struct xfrm_state *x = xfrm_state_alloc(net);
584	int err = -ENOMEM;
585
586	if (!x)
587		goto error_no_put;
588
589	copy_from_user_state(x, p);
590
591	if (attrs[XFRMA_ENCAP]) {
592		x->encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]),
593				   sizeof(*x->encap), GFP_KERNEL);
594		if (x->encap == NULL)
595			goto error;
596	}
597
598	if (attrs[XFRMA_COADDR]) {
599		x->coaddr = kmemdup(nla_data(attrs[XFRMA_COADDR]),
600				    sizeof(*x->coaddr), GFP_KERNEL);
601		if (x->coaddr == NULL)
602			goto error;
603	}
604
605	if (attrs[XFRMA_SA_EXTRA_FLAGS])
606		x->props.extra_flags = nla_get_u32(attrs[XFRMA_SA_EXTRA_FLAGS]);
607
608	if ((err = attach_aead(x, attrs[XFRMA_ALG_AEAD])))
609		goto error;
610	if ((err = attach_auth_trunc(&x->aalg, &x->props.aalgo,
611				     attrs[XFRMA_ALG_AUTH_TRUNC])))
612		goto error;
613	if (!x->props.aalgo) {
614		if ((err = attach_auth(&x->aalg, &x->props.aalgo,
615				       attrs[XFRMA_ALG_AUTH])))
616			goto error;
617	}
618	if ((err = attach_crypt(x, attrs[XFRMA_ALG_CRYPT])))
619		goto error;
620	if ((err = attach_one_algo(&x->calg, &x->props.calgo,
621				   xfrm_calg_get_byname,
622				   attrs[XFRMA_ALG_COMP])))
623		goto error;
624
625	if (attrs[XFRMA_TFCPAD])
626		x->tfcpad = nla_get_u32(attrs[XFRMA_TFCPAD]);
627
628	xfrm_mark_get(attrs, &x->mark);
629
630	xfrm_smark_init(attrs, &x->props.smark);
631
632	if (attrs[XFRMA_IF_ID])
633		x->if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
634
635	err = __xfrm_init_state(x, false, attrs[XFRMA_OFFLOAD_DEV]);
636	if (err)
637		goto error;
638
639	if (attrs[XFRMA_SEC_CTX]) {
640		err = security_xfrm_state_alloc(x,
641						nla_data(attrs[XFRMA_SEC_CTX]));
642		if (err)
643			goto error;
644	}
645
646	if ((err = xfrm_alloc_replay_state_esn(&x->replay_esn, &x->preplay_esn,
647					       attrs[XFRMA_REPLAY_ESN_VAL])))
648		goto error;
649
650	x->km.seq = p->seq;
651	x->replay_maxdiff = net->xfrm.sysctl_aevent_rseqth;
652	/* sysctl_xfrm_aevent_etime is in 100ms units */
653	x->replay_maxage = (net->xfrm.sysctl_aevent_etime*HZ)/XFRM_AE_ETH_M;
654
655	if ((err = xfrm_init_replay(x)))
656		goto error;
657
658	/* override default values from above */
659	xfrm_update_ae_params(x, attrs, 0);
660
661	/* configure the hardware if offload is requested */
662	if (attrs[XFRMA_OFFLOAD_DEV]) {
663		err = xfrm_dev_state_add(net, x,
664					 nla_data(attrs[XFRMA_OFFLOAD_DEV]));
665		if (err)
666			goto error;
667	}
668
669	return x;
670
671error:
672	x->km.state = XFRM_STATE_DEAD;
673	xfrm_state_put(x);
674error_no_put:
675	*errp = err;
676	return NULL;
677}
678
679static int xfrm_add_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
680		struct nlattr **attrs)
681{
682	struct net *net = sock_net(skb->sk);
683	struct xfrm_usersa_info *p = nlmsg_data(nlh);
684	struct xfrm_state *x;
685	int err;
686	struct km_event c;
687
688	err = verify_newsa_info(p, attrs);
689	if (err)
690		return err;
691
692	x = xfrm_state_construct(net, p, attrs, &err);
693	if (!x)
694		return err;
695
696	xfrm_state_hold(x);
697	if (nlh->nlmsg_type == XFRM_MSG_NEWSA)
698		err = xfrm_state_add(x);
699	else
700		err = xfrm_state_update(x);
701
702	xfrm_audit_state_add(x, err ? 0 : 1, true);
703
704	if (err < 0) {
705		x->km.state = XFRM_STATE_DEAD;
706		xfrm_dev_state_delete(x);
707		__xfrm_state_put(x);
708		goto out;
709	}
710
711	if (x->km.state == XFRM_STATE_VOID)
712		x->km.state = XFRM_STATE_VALID;
713
714	c.seq = nlh->nlmsg_seq;
715	c.portid = nlh->nlmsg_pid;
716	c.event = nlh->nlmsg_type;
717
718	km_state_notify(x, &c);
719out:
720	xfrm_state_put(x);
721	return err;
722}
723
724static struct xfrm_state *xfrm_user_state_lookup(struct net *net,
725						 struct xfrm_usersa_id *p,
726						 struct nlattr **attrs,
727						 int *errp)
728{
729	struct xfrm_state *x = NULL;
730	struct xfrm_mark m;
731	int err;
732	u32 mark = xfrm_mark_get(attrs, &m);
733
734	if (xfrm_id_proto_match(p->proto, IPSEC_PROTO_ANY)) {
735		err = -ESRCH;
736		x = xfrm_state_lookup(net, mark, &p->daddr, p->spi, p->proto, p->family);
737	} else {
738		xfrm_address_t *saddr = NULL;
739
740		verify_one_addr(attrs, XFRMA_SRCADDR, &saddr);
741		if (!saddr) {
742			err = -EINVAL;
743			goto out;
744		}
745
746		err = -ESRCH;
747		x = xfrm_state_lookup_byaddr(net, mark,
748					     &p->daddr, saddr,
749					     p->proto, p->family);
750	}
751
752 out:
753	if (!x && errp)
754		*errp = err;
755	return x;
756}
757
758static int xfrm_del_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
759		struct nlattr **attrs)
760{
761	struct net *net = sock_net(skb->sk);
762	struct xfrm_state *x;
763	int err = -ESRCH;
764	struct km_event c;
765	struct xfrm_usersa_id *p = nlmsg_data(nlh);
766
767	x = xfrm_user_state_lookup(net, p, attrs, &err);
768	if (x == NULL)
769		return err;
770
771	if ((err = security_xfrm_state_delete(x)) != 0)
772		goto out;
773
774	if (xfrm_state_kern(x)) {
775		err = -EPERM;
776		goto out;
777	}
778
779	err = xfrm_state_delete(x);
780
781	if (err < 0)
782		goto out;
783
784	c.seq = nlh->nlmsg_seq;
785	c.portid = nlh->nlmsg_pid;
786	c.event = nlh->nlmsg_type;
787	km_state_notify(x, &c);
788
789out:
790	xfrm_audit_state_delete(x, err ? 0 : 1, true);
791	xfrm_state_put(x);
792	return err;
793}
794
795static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
796{
797	memset(p, 0, sizeof(*p));
798	memcpy(&p->id, &x->id, sizeof(p->id));
799	memcpy(&p->sel, &x->sel, sizeof(p->sel));
800	memcpy(&p->lft, &x->lft, sizeof(p->lft));
801	memcpy(&p->curlft, &x->curlft, sizeof(p->curlft));
802	put_unaligned(x->stats.replay_window, &p->stats.replay_window);
803	put_unaligned(x->stats.replay, &p->stats.replay);
804	put_unaligned(x->stats.integrity_failed, &p->stats.integrity_failed);
805	memcpy(&p->saddr, &x->props.saddr, sizeof(p->saddr));
806	p->mode = x->props.mode;
807	p->replay_window = x->props.replay_window;
808	p->reqid = x->props.reqid;
809	p->family = x->props.family;
810	p->flags = x->props.flags;
811	p->seq = x->km.seq;
812}
813
814struct xfrm_dump_info {
815	struct sk_buff *in_skb;
816	struct sk_buff *out_skb;
817	u32 nlmsg_seq;
818	u16 nlmsg_flags;
819};
820
821static int copy_sec_ctx(struct xfrm_sec_ctx *s, struct sk_buff *skb)
822{
823	struct xfrm_user_sec_ctx *uctx;
824	struct nlattr *attr;
825	int ctx_size = sizeof(*uctx) + s->ctx_len;
826
827	attr = nla_reserve(skb, XFRMA_SEC_CTX, ctx_size);
828	if (attr == NULL)
829		return -EMSGSIZE;
830
831	uctx = nla_data(attr);
832	uctx->exttype = XFRMA_SEC_CTX;
833	uctx->len = ctx_size;
834	uctx->ctx_doi = s->ctx_doi;
835	uctx->ctx_alg = s->ctx_alg;
836	uctx->ctx_len = s->ctx_len;
837	memcpy(uctx + 1, s->ctx_str, s->ctx_len);
838
839	return 0;
840}
841
842static int copy_user_offload(struct xfrm_state_offload *xso, struct sk_buff *skb)
843{
844	struct xfrm_user_offload *xuo;
845	struct nlattr *attr;
846
847	attr = nla_reserve(skb, XFRMA_OFFLOAD_DEV, sizeof(*xuo));
848	if (attr == NULL)
849		return -EMSGSIZE;
850
851	xuo = nla_data(attr);
852	memset(xuo, 0, sizeof(*xuo));
853	xuo->ifindex = xso->dev->ifindex;
854	xuo->flags = xso->flags;
855
856	return 0;
857}
858
859static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb)
860{
861	struct xfrm_algo *algo;
862	struct nlattr *nla;
863
864	nla = nla_reserve(skb, XFRMA_ALG_AUTH,
865			  sizeof(*algo) + (auth->alg_key_len + 7) / 8);
866	if (!nla)
867		return -EMSGSIZE;
868
869	algo = nla_data(nla);
870	strncpy(algo->alg_name, auth->alg_name, sizeof(algo->alg_name));
871	memcpy(algo->alg_key, auth->alg_key, (auth->alg_key_len + 7) / 8);
872	algo->alg_key_len = auth->alg_key_len;
873
874	return 0;
875}
876
877static int xfrm_smark_put(struct sk_buff *skb, struct xfrm_mark *m)
878{
879	int ret = 0;
880
881	if (m->v | m->m) {
882		ret = nla_put_u32(skb, XFRMA_SET_MARK, m->v);
883		if (!ret)
884			ret = nla_put_u32(skb, XFRMA_SET_MARK_MASK, m->m);
885	}
886	return ret;
887}
888
889/* Don't change this without updating xfrm_sa_len! */
890static int copy_to_user_state_extra(struct xfrm_state *x,
891				    struct xfrm_usersa_info *p,
892				    struct sk_buff *skb)
893{
894	int ret = 0;
895
896	copy_to_user_state(x, p);
897
898	if (x->props.extra_flags) {
899		ret = nla_put_u32(skb, XFRMA_SA_EXTRA_FLAGS,
900				  x->props.extra_flags);
901		if (ret)
902			goto out;
903	}
904
905	if (x->coaddr) {
906		ret = nla_put(skb, XFRMA_COADDR, sizeof(*x->coaddr), x->coaddr);
907		if (ret)
908			goto out;
909	}
910	if (x->lastused) {
911		ret = nla_put_u64_64bit(skb, XFRMA_LASTUSED, x->lastused,
912					XFRMA_PAD);
913		if (ret)
914			goto out;
915	}
916	if (x->aead) {
917		ret = nla_put(skb, XFRMA_ALG_AEAD, aead_len(x->aead), x->aead);
918		if (ret)
919			goto out;
920	}
921	if (x->aalg) {
922		ret = copy_to_user_auth(x->aalg, skb);
923		if (!ret)
924			ret = nla_put(skb, XFRMA_ALG_AUTH_TRUNC,
925				      xfrm_alg_auth_len(x->aalg), x->aalg);
926		if (ret)
927			goto out;
928	}
929	if (x->ealg) {
930		ret = nla_put(skb, XFRMA_ALG_CRYPT, xfrm_alg_len(x->ealg), x->ealg);
931		if (ret)
932			goto out;
933	}
934	if (x->calg) {
935		ret = nla_put(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg);
936		if (ret)
937			goto out;
938	}
939	if (x->encap) {
940		ret = nla_put(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap);
941		if (ret)
942			goto out;
943	}
944	if (x->tfcpad) {
945		ret = nla_put_u32(skb, XFRMA_TFCPAD, x->tfcpad);
946		if (ret)
947			goto out;
948	}
949	ret = xfrm_mark_put(skb, &x->mark);
950	if (ret)
951		goto out;
952
953	ret = xfrm_smark_put(skb, &x->props.smark);
954	if (ret)
955		goto out;
956
957	if (x->replay_esn)
958		ret = nla_put(skb, XFRMA_REPLAY_ESN_VAL,
959			      xfrm_replay_state_esn_len(x->replay_esn),
960			      x->replay_esn);
961	else
962		ret = nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay),
963			      &x->replay);
964	if (ret)
965		goto out;
966	if(x->xso.dev)
967		ret = copy_user_offload(&x->xso, skb);
968	if (ret)
969		goto out;
970	if (x->if_id) {
971		ret = nla_put_u32(skb, XFRMA_IF_ID, x->if_id);
972		if (ret)
973			goto out;
974	}
975	if (x->security) {
976		ret = copy_sec_ctx(x->security, skb);
977		if (ret)
978			goto out;
979	}
980	if (x->mapping_maxage)
981		ret = nla_put_u32(skb, XFRMA_MTIMER_THRESH, x->mapping_maxage);
982out:
983	return ret;
984}
985
986static int dump_one_state(struct xfrm_state *x, int count, void *ptr)
987{
988	struct xfrm_dump_info *sp = ptr;
989	struct sk_buff *in_skb = sp->in_skb;
990	struct sk_buff *skb = sp->out_skb;
991	struct xfrm_translator *xtr;
992	struct xfrm_usersa_info *p;
993	struct nlmsghdr *nlh;
994	int err;
995
996	nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq,
997			XFRM_MSG_NEWSA, sizeof(*p), sp->nlmsg_flags);
998	if (nlh == NULL)
999		return -EMSGSIZE;
1000
1001	p = nlmsg_data(nlh);
1002
1003	err = copy_to_user_state_extra(x, p, skb);
1004	if (err) {
1005		nlmsg_cancel(skb, nlh);
1006		return err;
1007	}
1008	nlmsg_end(skb, nlh);
1009
1010	xtr = xfrm_get_translator();
1011	if (xtr) {
1012		err = xtr->alloc_compat(skb, nlh);
1013
1014		xfrm_put_translator(xtr);
1015		if (err) {
1016			nlmsg_cancel(skb, nlh);
1017			return err;
1018		}
1019	}
1020
1021	return 0;
1022}
1023
1024static int xfrm_dump_sa_done(struct netlink_callback *cb)
1025{
1026	struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
1027	struct sock *sk = cb->skb->sk;
1028	struct net *net = sock_net(sk);
1029
1030	if (cb->args[0])
1031		xfrm_state_walk_done(walk, net);
1032	return 0;
1033}
1034
1035static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
1036{
1037	struct net *net = sock_net(skb->sk);
1038	struct xfrm_state_walk *walk = (struct xfrm_state_walk *) &cb->args[1];
1039	struct xfrm_dump_info info;
1040
1041	BUILD_BUG_ON(sizeof(struct xfrm_state_walk) >
1042		     sizeof(cb->args) - sizeof(cb->args[0]));
1043
1044	info.in_skb = cb->skb;
1045	info.out_skb = skb;
1046	info.nlmsg_seq = cb->nlh->nlmsg_seq;
1047	info.nlmsg_flags = NLM_F_MULTI;
1048
1049	if (!cb->args[0]) {
1050		struct nlattr *attrs[XFRMA_MAX+1];
1051		struct xfrm_address_filter *filter = NULL;
1052		u8 proto = 0;
1053		int err;
1054
1055		err = nlmsg_parse_deprecated(cb->nlh, 0, attrs, XFRMA_MAX,
1056					     xfrma_policy, cb->extack);
1057		if (err < 0)
1058			return err;
1059
1060		if (attrs[XFRMA_ADDRESS_FILTER]) {
1061			filter = kmemdup(nla_data(attrs[XFRMA_ADDRESS_FILTER]),
1062					 sizeof(*filter), GFP_KERNEL);
1063			if (filter == NULL)
1064				return -ENOMEM;
1065
1066			/* see addr_match(), (prefix length >> 5) << 2
1067			 * will be used to compare xfrm_address_t
1068			 */
1069			if (filter->splen > (sizeof(xfrm_address_t) << 3) ||
1070			    filter->dplen > (sizeof(xfrm_address_t) << 3)) {
1071				kfree(filter);
1072				return -EINVAL;
1073			}
1074		}
1075
1076		if (attrs[XFRMA_PROTO])
1077			proto = nla_get_u8(attrs[XFRMA_PROTO]);
1078
1079		xfrm_state_walk_init(walk, proto, filter);
1080		cb->args[0] = 1;
1081	}
1082
1083	(void) xfrm_state_walk(net, walk, dump_one_state, &info);
1084
1085	return skb->len;
1086}
1087
1088static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
1089					  struct xfrm_state *x, u32 seq)
1090{
1091	struct xfrm_dump_info info;
1092	struct sk_buff *skb;
1093	int err;
1094
1095	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
1096	if (!skb)
1097		return ERR_PTR(-ENOMEM);
1098
1099	info.in_skb = in_skb;
1100	info.out_skb = skb;
1101	info.nlmsg_seq = seq;
1102	info.nlmsg_flags = 0;
1103
1104	err = dump_one_state(x, 0, &info);
1105	if (err) {
1106		kfree_skb(skb);
1107		return ERR_PTR(err);
1108	}
1109
1110	return skb;
1111}
1112
1113/* A wrapper for nlmsg_multicast() checking that nlsk is still available.
1114 * Must be called with RCU read lock.
1115 */
1116static inline int xfrm_nlmsg_multicast(struct net *net, struct sk_buff *skb,
1117				       u32 pid, unsigned int group)
1118{
1119	struct sock *nlsk = rcu_dereference(net->xfrm.nlsk);
1120	struct xfrm_translator *xtr;
1121
1122	if (!nlsk) {
1123		kfree_skb(skb);
1124		return -EPIPE;
1125	}
1126
1127	xtr = xfrm_get_translator();
1128	if (xtr) {
1129		int err = xtr->alloc_compat(skb, nlmsg_hdr(skb));
1130
1131		xfrm_put_translator(xtr);
1132		if (err) {
1133			kfree_skb(skb);
1134			return err;
1135		}
1136	}
1137
1138	return nlmsg_multicast(nlsk, skb, pid, group, GFP_ATOMIC);
1139}
1140
1141static inline unsigned int xfrm_spdinfo_msgsize(void)
1142{
1143	return NLMSG_ALIGN(4)
1144	       + nla_total_size(sizeof(struct xfrmu_spdinfo))
1145	       + nla_total_size(sizeof(struct xfrmu_spdhinfo))
1146	       + nla_total_size(sizeof(struct xfrmu_spdhthresh))
1147	       + nla_total_size(sizeof(struct xfrmu_spdhthresh));
1148}
1149
1150static int build_spdinfo(struct sk_buff *skb, struct net *net,
1151			 u32 portid, u32 seq, u32 flags)
1152{
1153	struct xfrmk_spdinfo si;
1154	struct xfrmu_spdinfo spc;
1155	struct xfrmu_spdhinfo sph;
1156	struct xfrmu_spdhthresh spt4, spt6;
1157	struct nlmsghdr *nlh;
1158	int err;
1159	u32 *f;
1160	unsigned lseq;
1161
1162	nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSPDINFO, sizeof(u32), 0);
1163	if (nlh == NULL) /* shouldn't really happen ... */
1164		return -EMSGSIZE;
1165
1166	f = nlmsg_data(nlh);
1167	*f = flags;
1168	xfrm_spd_getinfo(net, &si);
1169	spc.incnt = si.incnt;
1170	spc.outcnt = si.outcnt;
1171	spc.fwdcnt = si.fwdcnt;
1172	spc.inscnt = si.inscnt;
1173	spc.outscnt = si.outscnt;
1174	spc.fwdscnt = si.fwdscnt;
1175	sph.spdhcnt = si.spdhcnt;
1176	sph.spdhmcnt = si.spdhmcnt;
1177
1178	do {
1179		lseq = read_seqbegin(&net->xfrm.policy_hthresh.lock);
1180
1181		spt4.lbits = net->xfrm.policy_hthresh.lbits4;
1182		spt4.rbits = net->xfrm.policy_hthresh.rbits4;
1183		spt6.lbits = net->xfrm.policy_hthresh.lbits6;
1184		spt6.rbits = net->xfrm.policy_hthresh.rbits6;
1185	} while (read_seqretry(&net->xfrm.policy_hthresh.lock, lseq));
1186
1187	err = nla_put(skb, XFRMA_SPD_INFO, sizeof(spc), &spc);
1188	if (!err)
1189		err = nla_put(skb, XFRMA_SPD_HINFO, sizeof(sph), &sph);
1190	if (!err)
1191		err = nla_put(skb, XFRMA_SPD_IPV4_HTHRESH, sizeof(spt4), &spt4);
1192	if (!err)
1193		err = nla_put(skb, XFRMA_SPD_IPV6_HTHRESH, sizeof(spt6), &spt6);
1194	if (err) {
1195		nlmsg_cancel(skb, nlh);
1196		return err;
1197	}
1198
1199	nlmsg_end(skb, nlh);
1200	return 0;
1201}
1202
1203static int xfrm_set_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
1204			    struct nlattr **attrs)
1205{
1206	struct net *net = sock_net(skb->sk);
1207	struct xfrmu_spdhthresh *thresh4 = NULL;
1208	struct xfrmu_spdhthresh *thresh6 = NULL;
1209
1210	/* selector prefixlen thresholds to hash policies */
1211	if (attrs[XFRMA_SPD_IPV4_HTHRESH]) {
1212		struct nlattr *rta = attrs[XFRMA_SPD_IPV4_HTHRESH];
1213
1214		if (nla_len(rta) < sizeof(*thresh4))
1215			return -EINVAL;
1216		thresh4 = nla_data(rta);
1217		if (thresh4->lbits > 32 || thresh4->rbits > 32)
1218			return -EINVAL;
1219	}
1220	if (attrs[XFRMA_SPD_IPV6_HTHRESH]) {
1221		struct nlattr *rta = attrs[XFRMA_SPD_IPV6_HTHRESH];
1222
1223		if (nla_len(rta) < sizeof(*thresh6))
1224			return -EINVAL;
1225		thresh6 = nla_data(rta);
1226		if (thresh6->lbits > 128 || thresh6->rbits > 128)
1227			return -EINVAL;
1228	}
1229
1230	if (thresh4 || thresh6) {
1231		write_seqlock(&net->xfrm.policy_hthresh.lock);
1232		if (thresh4) {
1233			net->xfrm.policy_hthresh.lbits4 = thresh4->lbits;
1234			net->xfrm.policy_hthresh.rbits4 = thresh4->rbits;
1235		}
1236		if (thresh6) {
1237			net->xfrm.policy_hthresh.lbits6 = thresh6->lbits;
1238			net->xfrm.policy_hthresh.rbits6 = thresh6->rbits;
1239		}
1240		write_sequnlock(&net->xfrm.policy_hthresh.lock);
1241
1242		xfrm_policy_hash_rebuild(net);
1243	}
1244
1245	return 0;
1246}
1247
1248static int xfrm_get_spdinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
1249		struct nlattr **attrs)
1250{
1251	struct net *net = sock_net(skb->sk);
1252	struct sk_buff *r_skb;
1253	u32 *flags = nlmsg_data(nlh);
1254	u32 sportid = NETLINK_CB(skb).portid;
1255	u32 seq = nlh->nlmsg_seq;
1256	int err;
1257
1258	r_skb = nlmsg_new(xfrm_spdinfo_msgsize(), GFP_ATOMIC);
1259	if (r_skb == NULL)
1260		return -ENOMEM;
1261
1262	err = build_spdinfo(r_skb, net, sportid, seq, *flags);
1263	BUG_ON(err < 0);
1264
1265	return nlmsg_unicast(net->xfrm.nlsk, r_skb, sportid);
1266}
1267
1268static inline unsigned int xfrm_sadinfo_msgsize(void)
1269{
1270	return NLMSG_ALIGN(4)
1271	       + nla_total_size(sizeof(struct xfrmu_sadhinfo))
1272	       + nla_total_size(4); /* XFRMA_SAD_CNT */
1273}
1274
1275static int build_sadinfo(struct sk_buff *skb, struct net *net,
1276			 u32 portid, u32 seq, u32 flags)
1277{
1278	struct xfrmk_sadinfo si;
1279	struct xfrmu_sadhinfo sh;
1280	struct nlmsghdr *nlh;
1281	int err;
1282	u32 *f;
1283
1284	nlh = nlmsg_put(skb, portid, seq, XFRM_MSG_NEWSADINFO, sizeof(u32), 0);
1285	if (nlh == NULL) /* shouldn't really happen ... */
1286		return -EMSGSIZE;
1287
1288	f = nlmsg_data(nlh);
1289	*f = flags;
1290	xfrm_sad_getinfo(net, &si);
1291
1292	sh.sadhmcnt = si.sadhmcnt;
1293	sh.sadhcnt = si.sadhcnt;
1294
1295	err = nla_put_u32(skb, XFRMA_SAD_CNT, si.sadcnt);
1296	if (!err)
1297		err = nla_put(skb, XFRMA_SAD_HINFO, sizeof(sh), &sh);
1298	if (err) {
1299		nlmsg_cancel(skb, nlh);
1300		return err;
1301	}
1302
1303	nlmsg_end(skb, nlh);
1304	return 0;
1305}
1306
1307static int xfrm_get_sadinfo(struct sk_buff *skb, struct nlmsghdr *nlh,
1308		struct nlattr **attrs)
1309{
1310	struct net *net = sock_net(skb->sk);
1311	struct sk_buff *r_skb;
1312	u32 *flags = nlmsg_data(nlh);
1313	u32 sportid = NETLINK_CB(skb).portid;
1314	u32 seq = nlh->nlmsg_seq;
1315	int err;
1316
1317	r_skb = nlmsg_new(xfrm_sadinfo_msgsize(), GFP_ATOMIC);
1318	if (r_skb == NULL)
1319		return -ENOMEM;
1320
1321	err = build_sadinfo(r_skb, net, sportid, seq, *flags);
1322	BUG_ON(err < 0);
1323
1324	return nlmsg_unicast(net->xfrm.nlsk, r_skb, sportid);
1325}
1326
1327static int xfrm_get_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
1328		struct nlattr **attrs)
1329{
1330	struct net *net = sock_net(skb->sk);
1331	struct xfrm_usersa_id *p = nlmsg_data(nlh);
1332	struct xfrm_state *x;
1333	struct sk_buff *resp_skb;
1334	int err = -ESRCH;
1335
1336	x = xfrm_user_state_lookup(net, p, attrs, &err);
1337	if (x == NULL)
1338		goto out_noput;
1339
1340	resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
1341	if (IS_ERR(resp_skb)) {
1342		err = PTR_ERR(resp_skb);
1343	} else {
1344		err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid);
1345	}
1346	xfrm_state_put(x);
1347out_noput:
1348	return err;
1349}
1350
1351static int xfrm_alloc_userspi(struct sk_buff *skb, struct nlmsghdr *nlh,
1352		struct nlattr **attrs)
1353{
1354	struct net *net = sock_net(skb->sk);
1355	struct xfrm_state *x;
1356	struct xfrm_userspi_info *p;
1357	struct xfrm_translator *xtr;
1358	struct sk_buff *resp_skb;
1359	xfrm_address_t *daddr;
1360	int family;
1361	int err;
1362	u32 mark;
1363	struct xfrm_mark m;
1364	u32 if_id = 0;
1365
1366	p = nlmsg_data(nlh);
1367	err = verify_spi_info(p->info.id.proto, p->min, p->max);
1368	if (err)
1369		goto out_noput;
1370
1371	family = p->info.family;
1372	daddr = &p->info.id.daddr;
1373
1374	x = NULL;
1375
1376	mark = xfrm_mark_get(attrs, &m);
1377
1378	if (attrs[XFRMA_IF_ID])
1379		if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
1380
1381	if (p->info.seq) {
1382		x = xfrm_find_acq_byseq(net, mark, p->info.seq);
1383		if (x && !xfrm_addr_equal(&x->id.daddr, daddr, family)) {
1384			xfrm_state_put(x);
1385			x = NULL;
1386		}
1387	}
1388
1389	if (!x)
1390		x = xfrm_find_acq(net, &m, p->info.mode, p->info.reqid,
1391				  if_id, p->info.id.proto, daddr,
1392				  &p->info.saddr, 1,
1393				  family);
1394	err = -ENOENT;
1395	if (x == NULL)
1396		goto out_noput;
1397
1398	err = xfrm_alloc_spi(x, p->min, p->max);
1399	if (err)
1400		goto out;
1401
1402	resp_skb = xfrm_state_netlink(skb, x, nlh->nlmsg_seq);
1403	if (IS_ERR(resp_skb)) {
1404		err = PTR_ERR(resp_skb);
1405		goto out;
1406	}
1407
1408	xtr = xfrm_get_translator();
1409	if (xtr) {
1410		err = xtr->alloc_compat(skb, nlmsg_hdr(skb));
1411
1412		xfrm_put_translator(xtr);
1413		if (err) {
1414			kfree_skb(resp_skb);
1415			goto out;
1416		}
1417	}
1418
1419	err = nlmsg_unicast(net->xfrm.nlsk, resp_skb, NETLINK_CB(skb).portid);
1420
1421out:
1422	xfrm_state_put(x);
1423out_noput:
1424	return err;
1425}
1426
1427static int verify_policy_dir(u8 dir)
1428{
1429	switch (dir) {
1430	case XFRM_POLICY_IN:
1431	case XFRM_POLICY_OUT:
1432	case XFRM_POLICY_FWD:
1433		break;
1434
1435	default:
1436		return -EINVAL;
1437	}
1438
1439	return 0;
1440}
1441
1442static int verify_policy_type(u8 type)
1443{
1444	switch (type) {
1445	case XFRM_POLICY_TYPE_MAIN:
1446#ifdef CONFIG_XFRM_SUB_POLICY
1447	case XFRM_POLICY_TYPE_SUB:
1448#endif
1449		break;
1450
1451	default:
1452		return -EINVAL;
1453	}
1454
1455	return 0;
1456}
1457
1458static int verify_newpolicy_info(struct xfrm_userpolicy_info *p)
1459{
1460	int ret;
1461
1462	switch (p->share) {
1463	case XFRM_SHARE_ANY:
1464	case XFRM_SHARE_SESSION:
1465	case XFRM_SHARE_USER:
1466	case XFRM_SHARE_UNIQUE:
1467		break;
1468
1469	default:
1470		return -EINVAL;
1471	}
1472
1473	switch (p->action) {
1474	case XFRM_POLICY_ALLOW:
1475	case XFRM_POLICY_BLOCK:
1476		break;
1477
1478	default:
1479		return -EINVAL;
1480	}
1481
1482	switch (p->sel.family) {
1483	case AF_INET:
1484		if (p->sel.prefixlen_d > 32 || p->sel.prefixlen_s > 32)
1485			return -EINVAL;
1486
1487		break;
1488
1489	case AF_INET6:
1490#if IS_ENABLED(CONFIG_IPV6)
1491		if (p->sel.prefixlen_d > 128 || p->sel.prefixlen_s > 128)
1492			return -EINVAL;
1493
1494		break;
1495#else
1496		return  -EAFNOSUPPORT;
1497#endif
1498
1499	default:
1500		return -EINVAL;
1501	}
1502
1503	ret = verify_policy_dir(p->dir);
1504	if (ret)
1505		return ret;
1506	if (p->index && (xfrm_policy_id2dir(p->index) != p->dir))
1507		return -EINVAL;
1508
1509	return 0;
1510}
1511
1512static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct nlattr **attrs)
1513{
1514	struct nlattr *rt = attrs[XFRMA_SEC_CTX];
1515	struct xfrm_user_sec_ctx *uctx;
1516
1517	if (!rt)
1518		return 0;
1519
1520	uctx = nla_data(rt);
1521	return security_xfrm_policy_alloc(&pol->security, uctx, GFP_KERNEL);
1522}
1523
1524static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
1525			   int nr)
1526{
1527	int i;
1528
1529	xp->xfrm_nr = nr;
1530	for (i = 0; i < nr; i++, ut++) {
1531		struct xfrm_tmpl *t = &xp->xfrm_vec[i];
1532
1533		memcpy(&t->id, &ut->id, sizeof(struct xfrm_id));
1534		memcpy(&t->saddr, &ut->saddr,
1535		       sizeof(xfrm_address_t));
1536		t->reqid = ut->reqid;
1537		t->mode = ut->mode;
1538		t->share = ut->share;
1539		t->optional = ut->optional;
1540		t->aalgos = ut->aalgos;
1541		t->ealgos = ut->ealgos;
1542		t->calgos = ut->calgos;
1543		/* If all masks are ~0, then we allow all algorithms. */
1544		t->allalgs = !~(t->aalgos & t->ealgos & t->calgos);
1545		t->encap_family = ut->family;
1546	}
1547}
1548
1549static int validate_tmpl(int nr, struct xfrm_user_tmpl *ut, u16 family)
1550{
1551	u16 prev_family;
1552	int i;
1553
1554	if (nr > XFRM_MAX_DEPTH)
1555		return -EINVAL;
1556
1557	prev_family = family;
1558
1559	for (i = 0; i < nr; i++) {
1560		/* We never validated the ut->family value, so many
1561		 * applications simply leave it at zero.  The check was
1562		 * never made and ut->family was ignored because all
1563		 * templates could be assumed to have the same family as
1564		 * the policy itself.  Now that we will have ipv4-in-ipv6
1565		 * and ipv6-in-ipv4 tunnels, this is no longer true.
1566		 */
1567		if (!ut[i].family)
1568			ut[i].family = family;
1569
1570		switch (ut[i].mode) {
1571		case XFRM_MODE_TUNNEL:
1572		case XFRM_MODE_BEET:
1573			break;
1574		default:
1575			if (ut[i].family != prev_family)
1576				return -EINVAL;
1577			break;
1578		}
1579		if (ut[i].mode >= XFRM_MODE_MAX)
1580			return -EINVAL;
1581
1582		prev_family = ut[i].family;
1583
1584		switch (ut[i].family) {
1585		case AF_INET:
1586			break;
1587#if IS_ENABLED(CONFIG_IPV6)
1588		case AF_INET6:
1589			break;
1590#endif
1591		default:
1592			return -EINVAL;
1593		}
1594
1595		if (!xfrm_id_proto_valid(ut[i].id.proto))
1596			return -EINVAL;
1597	}
1598
1599	return 0;
1600}
1601
1602static int copy_from_user_tmpl(struct xfrm_policy *pol, struct nlattr **attrs)
1603{
1604	struct nlattr *rt = attrs[XFRMA_TMPL];
1605
1606	if (!rt) {
1607		pol->xfrm_nr = 0;
1608	} else {
1609		struct xfrm_user_tmpl *utmpl = nla_data(rt);
1610		int nr = nla_len(rt) / sizeof(*utmpl);
1611		int err;
1612
1613		err = validate_tmpl(nr, utmpl, pol->family);
1614		if (err)
1615			return err;
1616
1617		copy_templates(pol, utmpl, nr);
1618	}
1619	return 0;
1620}
1621
1622static int copy_from_user_policy_type(u8 *tp, struct nlattr **attrs)
1623{
1624	struct nlattr *rt = attrs[XFRMA_POLICY_TYPE];
1625	struct xfrm_userpolicy_type *upt;
1626	u8 type = XFRM_POLICY_TYPE_MAIN;
1627	int err;
1628
1629	if (rt) {
1630		upt = nla_data(rt);
1631		type = upt->type;
1632	}
1633
1634	err = verify_policy_type(type);
1635	if (err)
1636		return err;
1637
1638	*tp = type;
1639	return 0;
1640}
1641
1642static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p)
1643{
1644	xp->priority = p->priority;
1645	xp->index = p->index;
1646	memcpy(&xp->selector, &p->sel, sizeof(xp->selector));
1647	memcpy(&xp->lft, &p->lft, sizeof(xp->lft));
1648	xp->action = p->action;
1649	xp->flags = p->flags;
1650	xp->family = p->sel.family;
1651	/* XXX xp->share = p->share; */
1652}
1653
1654static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir)
1655{
1656	memset(p, 0, sizeof(*p));
1657	memcpy(&p->sel, &xp->selector, sizeof(p->sel));
1658	memcpy(&p->lft, &xp->lft, sizeof(p->lft));
1659	memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft));
1660	p->priority = xp->priority;
1661	p->index = xp->index;
1662	p->sel.family = xp->family;
1663	p->dir = dir;
1664	p->action = xp->action;
1665	p->flags = xp->flags;
1666	p->share = XFRM_SHARE_ANY; /* XXX xp->share */
1667}
1668
1669static struct xfrm_policy *xfrm_policy_construct(struct net *net, struct xfrm_userpolicy_info *p, struct nlattr **attrs, int *errp)
1670{
1671	struct xfrm_policy *xp = xfrm_policy_alloc(net, GFP_KERNEL);
1672	int err;
1673
1674	if (!xp) {
1675		*errp = -ENOMEM;
1676		return NULL;
1677	}
1678
1679	copy_from_user_policy(xp, p);
1680
1681	err = copy_from_user_policy_type(&xp->type, attrs);
1682	if (err)
1683		goto error;
1684
1685	if (!(err = copy_from_user_tmpl(xp, attrs)))
1686		err = copy_from_user_sec_ctx(xp, attrs);
1687	if (err)
1688		goto error;
1689
1690	xfrm_mark_get(attrs, &xp->mark);
1691
1692	if (attrs[XFRMA_IF_ID])
1693		xp->if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
1694
1695	return xp;
1696 error:
1697	*errp = err;
1698	xp->walk.dead = 1;
1699	xfrm_policy_destroy(xp);
1700	return NULL;
1701}
1702
1703static int xfrm_add_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
1704		struct nlattr **attrs)
1705{
1706	struct net *net = sock_net(skb->sk);
1707	struct xfrm_userpolicy_info *p = nlmsg_data(nlh);
1708	struct xfrm_policy *xp;
1709	struct km_event c;
1710	int err;
1711	int excl;
1712
1713	err = verify_newpolicy_info(p);
1714	if (err)
1715		return err;
1716	err = verify_sec_ctx_len(attrs);
1717	if (err)
1718		return err;
1719
1720	xp = xfrm_policy_construct(net, p, attrs, &err);
1721	if (!xp)
1722		return err;
1723
1724	/* shouldn't excl be based on nlh flags??
1725	 * Aha! this is anti-netlink really i.e  more pfkey derived
1726	 * in netlink excl is a flag and you wouldnt need
1727	 * a type XFRM_MSG_UPDPOLICY - JHS */
1728	excl = nlh->nlmsg_type == XFRM_MSG_NEWPOLICY;
1729	err = xfrm_policy_insert(p->dir, xp, excl);
1730	xfrm_audit_policy_add(xp, err ? 0 : 1, true);
1731
1732	if (err) {
1733		security_xfrm_policy_free(xp->security);
1734		kfree(xp);
1735		return err;
1736	}
1737
1738	c.event = nlh->nlmsg_type;
1739	c.seq = nlh->nlmsg_seq;
1740	c.portid = nlh->nlmsg_pid;
1741	km_policy_notify(xp, p->dir, &c);
1742
1743	xfrm_pol_put(xp);
1744
1745	return 0;
1746}
1747
1748static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
1749{
1750	struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
1751	int i;
1752
1753	if (xp->xfrm_nr == 0)
1754		return 0;
1755
1756	for (i = 0; i < xp->xfrm_nr; i++) {
1757		struct xfrm_user_tmpl *up = &vec[i];
1758		struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
1759
1760		memset(up, 0, sizeof(*up));
1761		memcpy(&up->id, &kp->id, sizeof(up->id));
1762		up->family = kp->encap_family;
1763		memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr));
1764		up->reqid = kp->reqid;
1765		up->mode = kp->mode;
1766		up->share = kp->share;
1767		up->optional = kp->optional;
1768		up->aalgos = kp->aalgos;
1769		up->ealgos = kp->ealgos;
1770		up->calgos = kp->calgos;
1771	}
1772
1773	return nla_put(skb, XFRMA_TMPL,
1774		       sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr, vec);
1775}
1776
1777static inline int copy_to_user_state_sec_ctx(struct xfrm_state *x, struct sk_buff *skb)
1778{
1779	if (x->security) {
1780		return copy_sec_ctx(x->security, skb);
1781	}
1782	return 0;
1783}
1784
1785static inline int copy_to_user_sec_ctx(struct xfrm_policy *xp, struct sk_buff *skb)
1786{
1787	if (xp->security)
1788		return copy_sec_ctx(xp->security, skb);
1789	return 0;
1790}
1791static inline unsigned int userpolicy_type_attrsize(void)
1792{
1793#ifdef CONFIG_XFRM_SUB_POLICY
1794	return nla_total_size(sizeof(struct xfrm_userpolicy_type));
1795#else
1796	return 0;
1797#endif
1798}
1799
1800#ifdef CONFIG_XFRM_SUB_POLICY
1801static int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
1802{
1803	struct xfrm_userpolicy_type upt;
1804
1805	/* Sadly there are two holes in struct xfrm_userpolicy_type */
1806	memset(&upt, 0, sizeof(upt));
1807	upt.type = type;
1808
1809	return nla_put(skb, XFRMA_POLICY_TYPE, sizeof(upt), &upt);
1810}
1811
1812#else
1813static inline int copy_to_user_policy_type(u8 type, struct sk_buff *skb)
1814{
1815	return 0;
1816}
1817#endif
1818
1819static int dump_one_policy(struct xfrm_policy *xp, int dir, int count, void *ptr)
1820{
1821	struct xfrm_dump_info *sp = ptr;
1822	struct xfrm_userpolicy_info *p;
1823	struct sk_buff *in_skb = sp->in_skb;
1824	struct sk_buff *skb = sp->out_skb;
1825	struct xfrm_translator *xtr;
1826	struct nlmsghdr *nlh;
1827	int err;
1828
1829	nlh = nlmsg_put(skb, NETLINK_CB(in_skb).portid, sp->nlmsg_seq,
1830			XFRM_MSG_NEWPOLICY, sizeof(*p), sp->nlmsg_flags);
1831	if (nlh == NULL)
1832		return -EMSGSIZE;
1833
1834	p = nlmsg_data(nlh);
1835	copy_to_user_policy(xp, p, dir);
1836	err = copy_to_user_tmpl(xp, skb);
1837	if (!err)
1838		err = copy_to_user_sec_ctx(xp, skb);
1839	if (!err)
1840		err = copy_to_user_policy_type(xp->type, skb);
1841	if (!err)
1842		err = xfrm_mark_put(skb, &xp->mark);
1843	if (!err)
1844		err = xfrm_if_id_put(skb, xp->if_id);
1845	if (err) {
1846		nlmsg_cancel(skb, nlh);
1847		return err;
1848	}
1849	nlmsg_end(skb, nlh);
1850
1851	xtr = xfrm_get_translator();
1852	if (xtr) {
1853		err = xtr->alloc_compat(skb, nlh);
1854
1855		xfrm_put_translator(xtr);
1856		if (err) {
1857			nlmsg_cancel(skb, nlh);
1858			return err;
1859		}
1860	}
1861
1862	return 0;
1863}
1864
1865static int xfrm_dump_policy_done(struct netlink_callback *cb)
1866{
1867	struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
1868	struct net *net = sock_net(cb->skb->sk);
1869
1870	xfrm_policy_walk_done(walk, net);
1871	return 0;
1872}
1873
1874static int xfrm_dump_policy_start(struct netlink_callback *cb)
1875{
1876	struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
1877
1878	BUILD_BUG_ON(sizeof(*walk) > sizeof(cb->args));
1879
1880	xfrm_policy_walk_init(walk, XFRM_POLICY_TYPE_ANY);
1881	return 0;
1882}
1883
1884static int xfrm_dump_policy(struct sk_buff *skb, struct netlink_callback *cb)
1885{
1886	struct net *net = sock_net(skb->sk);
1887	struct xfrm_policy_walk *walk = (struct xfrm_policy_walk *)cb->args;
1888	struct xfrm_dump_info info;
1889
1890	info.in_skb = cb->skb;
1891	info.out_skb = skb;
1892	info.nlmsg_seq = cb->nlh->nlmsg_seq;
1893	info.nlmsg_flags = NLM_F_MULTI;
1894
1895	(void) xfrm_policy_walk(net, walk, dump_one_policy, &info);
1896
1897	return skb->len;
1898}
1899
1900static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
1901					  struct xfrm_policy *xp,
1902					  int dir, u32 seq)
1903{
1904	struct xfrm_dump_info info;
1905	struct sk_buff *skb;
1906	int err;
1907
1908	skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
1909	if (!skb)
1910		return ERR_PTR(-ENOMEM);
1911
1912	info.in_skb = in_skb;
1913	info.out_skb = skb;
1914	info.nlmsg_seq = seq;
1915	info.nlmsg_flags = 0;
1916
1917	err = dump_one_policy(xp, dir, 0, &info);
1918	if (err) {
1919		kfree_skb(skb);
1920		return ERR_PTR(err);
1921	}
1922
1923	return skb;
1924}
1925
1926static int xfrm_notify_userpolicy(struct net *net)
1927{
1928	struct xfrm_userpolicy_default *up;
1929	int len = NLMSG_ALIGN(sizeof(*up));
1930	struct nlmsghdr *nlh;
1931	struct sk_buff *skb;
1932	int err;
1933
1934	skb = nlmsg_new(len, GFP_ATOMIC);
1935	if (skb == NULL)
1936		return -ENOMEM;
1937
1938	nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_GETDEFAULT, sizeof(*up), 0);
1939	if (nlh == NULL) {
1940		kfree_skb(skb);
1941		return -EMSGSIZE;
1942	}
1943
1944	up = nlmsg_data(nlh);
1945	up->in = net->xfrm.policy_default[XFRM_POLICY_IN];
1946	up->fwd = net->xfrm.policy_default[XFRM_POLICY_FWD];
1947	up->out = net->xfrm.policy_default[XFRM_POLICY_OUT];
1948
1949	nlmsg_end(skb, nlh);
1950
1951	rcu_read_lock();
1952	err = xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY);
1953	rcu_read_unlock();
1954
1955	return err;
1956}
1957
1958static bool xfrm_userpolicy_is_valid(__u8 policy)
1959{
1960	return policy == XFRM_USERPOLICY_BLOCK ||
1961	       policy == XFRM_USERPOLICY_ACCEPT;
1962}
1963
1964static int xfrm_set_default(struct sk_buff *skb, struct nlmsghdr *nlh,
1965			    struct nlattr **attrs)
1966{
1967	struct net *net = sock_net(skb->sk);
1968	struct xfrm_userpolicy_default *up = nlmsg_data(nlh);
1969
1970	if (xfrm_userpolicy_is_valid(up->in))
1971		net->xfrm.policy_default[XFRM_POLICY_IN] = up->in;
1972
1973	if (xfrm_userpolicy_is_valid(up->fwd))
1974		net->xfrm.policy_default[XFRM_POLICY_FWD] = up->fwd;
1975
1976	if (xfrm_userpolicy_is_valid(up->out))
1977		net->xfrm.policy_default[XFRM_POLICY_OUT] = up->out;
1978
1979	rt_genid_bump_all(net);
1980
1981	xfrm_notify_userpolicy(net);
1982	return 0;
1983}
1984
1985static int xfrm_get_default(struct sk_buff *skb, struct nlmsghdr *nlh,
1986			    struct nlattr **attrs)
1987{
1988	struct sk_buff *r_skb;
1989	struct nlmsghdr *r_nlh;
1990	struct net *net = sock_net(skb->sk);
1991	struct xfrm_userpolicy_default *r_up;
1992	int len = NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_default));
1993	u32 portid = NETLINK_CB(skb).portid;
1994	u32 seq = nlh->nlmsg_seq;
1995
1996	r_skb = nlmsg_new(len, GFP_ATOMIC);
1997	if (!r_skb)
1998		return -ENOMEM;
1999
2000	r_nlh = nlmsg_put(r_skb, portid, seq, XFRM_MSG_GETDEFAULT, sizeof(*r_up), 0);
2001	if (!r_nlh) {
2002		kfree_skb(r_skb);
2003		return -EMSGSIZE;
2004	}
2005
2006	r_up = nlmsg_data(r_nlh);
2007	r_up->in = net->xfrm.policy_default[XFRM_POLICY_IN];
2008	r_up->fwd = net->xfrm.policy_default[XFRM_POLICY_FWD];
2009	r_up->out = net->xfrm.policy_default[XFRM_POLICY_OUT];
2010	nlmsg_end(r_skb, r_nlh);
2011
2012	return nlmsg_unicast(net->xfrm.nlsk, r_skb, portid);
2013}
2014
2015static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
2016		struct nlattr **attrs)
2017{
2018	struct net *net = sock_net(skb->sk);
2019	struct xfrm_policy *xp;
2020	struct xfrm_userpolicy_id *p;
2021	u8 type = XFRM_POLICY_TYPE_MAIN;
2022	int err;
2023	struct km_event c;
2024	int delete;
2025	struct xfrm_mark m;
2026	u32 if_id = 0;
2027
2028	p = nlmsg_data(nlh);
2029	delete = nlh->nlmsg_type == XFRM_MSG_DELPOLICY;
2030
2031	err = copy_from_user_policy_type(&type, attrs);
2032	if (err)
2033		return err;
2034
2035	err = verify_policy_dir(p->dir);
2036	if (err)
2037		return err;
2038
2039	if (attrs[XFRMA_IF_ID])
2040		if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
2041
2042	xfrm_mark_get(attrs, &m);
2043
2044	if (p->index)
2045		xp = xfrm_policy_byid(net, &m, if_id, type, p->dir,
2046				      p->index, delete, &err);
2047	else {
2048		struct nlattr *rt = attrs[XFRMA_SEC_CTX];
2049		struct xfrm_sec_ctx *ctx;
2050
2051		err = verify_sec_ctx_len(attrs);
2052		if (err)
2053			return err;
2054
2055		ctx = NULL;
2056		if (rt) {
2057			struct xfrm_user_sec_ctx *uctx = nla_data(rt);
2058
2059			err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL);
2060			if (err)
2061				return err;
2062		}
2063		xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir,
2064					   &p->sel, ctx, delete, &err);
2065		security_xfrm_policy_free(ctx);
2066	}
2067	if (xp == NULL)
2068		return -ENOENT;
2069
2070	if (!delete) {
2071		struct sk_buff *resp_skb;
2072
2073		resp_skb = xfrm_policy_netlink(skb, xp, p->dir, nlh->nlmsg_seq);
2074		if (IS_ERR(resp_skb)) {
2075			err = PTR_ERR(resp_skb);
2076		} else {
2077			err = nlmsg_unicast(net->xfrm.nlsk, resp_skb,
2078					    NETLINK_CB(skb).portid);
2079		}
2080	} else {
2081		xfrm_audit_policy_delete(xp, err ? 0 : 1, true);
2082
2083		if (err != 0)
2084			goto out;
2085
2086		c.data.byid = p->index;
2087		c.event = nlh->nlmsg_type;
2088		c.seq = nlh->nlmsg_seq;
2089		c.portid = nlh->nlmsg_pid;
2090		km_policy_notify(xp, p->dir, &c);
2091	}
2092
2093out:
2094	xfrm_pol_put(xp);
2095	return err;
2096}
2097
2098static int xfrm_flush_sa(struct sk_buff *skb, struct nlmsghdr *nlh,
2099		struct nlattr **attrs)
2100{
2101	struct net *net = sock_net(skb->sk);
2102	struct km_event c;
2103	struct xfrm_usersa_flush *p = nlmsg_data(nlh);
2104	int err;
2105
2106	err = xfrm_state_flush(net, p->proto, true, false);
2107	if (err) {
2108		if (err == -ESRCH) /* empty table */
2109			return 0;
2110		return err;
2111	}
2112	c.data.proto = p->proto;
2113	c.event = nlh->nlmsg_type;
2114	c.seq = nlh->nlmsg_seq;
2115	c.portid = nlh->nlmsg_pid;
2116	c.net = net;
2117	km_state_notify(NULL, &c);
2118
2119	return 0;
2120}
2121
2122static inline unsigned int xfrm_aevent_msgsize(struct xfrm_state *x)
2123{
2124	unsigned int replay_size = x->replay_esn ?
2125			      xfrm_replay_state_esn_len(x->replay_esn) :
2126			      sizeof(struct xfrm_replay_state);
2127
2128	return NLMSG_ALIGN(sizeof(struct xfrm_aevent_id))
2129	       + nla_total_size(replay_size)
2130	       + nla_total_size_64bit(sizeof(struct xfrm_lifetime_cur))
2131	       + nla_total_size(sizeof(struct xfrm_mark))
2132	       + nla_total_size(4) /* XFRM_AE_RTHR */
2133	       + nla_total_size(4); /* XFRM_AE_ETHR */
2134}
2135
2136static int build_aevent(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c)
2137{
2138	struct xfrm_aevent_id *id;
2139	struct nlmsghdr *nlh;
2140	int err;
2141
2142	nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_NEWAE, sizeof(*id), 0);
2143	if (nlh == NULL)
2144		return -EMSGSIZE;
2145
2146	id = nlmsg_data(nlh);
2147	memset(&id->sa_id, 0, sizeof(id->sa_id));
2148	memcpy(&id->sa_id.daddr, &x->id.daddr, sizeof(x->id.daddr));
2149	id->sa_id.spi = x->id.spi;
2150	id->sa_id.family = x->props.family;
2151	id->sa_id.proto = x->id.proto;
2152	memcpy(&id->saddr, &x->props.saddr, sizeof(x->props.saddr));
2153	id->reqid = x->props.reqid;
2154	id->flags = c->data.aevent;
2155
2156	if (x->replay_esn) {
2157		err = nla_put(skb, XFRMA_REPLAY_ESN_VAL,
2158			      xfrm_replay_state_esn_len(x->replay_esn),
2159			      x->replay_esn);
2160	} else {
2161		err = nla_put(skb, XFRMA_REPLAY_VAL, sizeof(x->replay),
2162			      &x->replay);
2163	}
2164	if (err)
2165		goto out_cancel;
2166	err = nla_put_64bit(skb, XFRMA_LTIME_VAL, sizeof(x->curlft), &x->curlft,
2167			    XFRMA_PAD);
2168	if (err)
2169		goto out_cancel;
2170
2171	if (id->flags & XFRM_AE_RTHR) {
2172		err = nla_put_u32(skb, XFRMA_REPLAY_THRESH, x->replay_maxdiff);
2173		if (err)
2174			goto out_cancel;
2175	}
2176	if (id->flags & XFRM_AE_ETHR) {
2177		err = nla_put_u32(skb, XFRMA_ETIMER_THRESH,
2178				  x->replay_maxage * 10 / HZ);
2179		if (err)
2180			goto out_cancel;
2181	}
2182	err = xfrm_mark_put(skb, &x->mark);
2183	if (err)
2184		goto out_cancel;
2185
2186	err = xfrm_if_id_put(skb, x->if_id);
2187	if (err)
2188		goto out_cancel;
2189
2190	nlmsg_end(skb, nlh);
2191	return 0;
2192
2193out_cancel:
2194	nlmsg_cancel(skb, nlh);
2195	return err;
2196}
2197
2198static int xfrm_get_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
2199		struct nlattr **attrs)
2200{
2201	struct net *net = sock_net(skb->sk);
2202	struct xfrm_state *x;
2203	struct sk_buff *r_skb;
2204	int err;
2205	struct km_event c;
2206	u32 mark;
2207	struct xfrm_mark m;
2208	struct xfrm_aevent_id *p = nlmsg_data(nlh);
2209	struct xfrm_usersa_id *id = &p->sa_id;
2210
2211	mark = xfrm_mark_get(attrs, &m);
2212
2213	x = xfrm_state_lookup(net, mark, &id->daddr, id->spi, id->proto, id->family);
2214	if (x == NULL)
2215		return -ESRCH;
2216
2217	r_skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC);
2218	if (r_skb == NULL) {
2219		xfrm_state_put(x);
2220		return -ENOMEM;
2221	}
2222
2223	/*
2224	 * XXX: is this lock really needed - none of the other
2225	 * gets lock (the concern is things getting updated
2226	 * while we are still reading) - jhs
2227	*/
2228	spin_lock_bh(&x->lock);
2229	c.data.aevent = p->flags;
2230	c.seq = nlh->nlmsg_seq;
2231	c.portid = nlh->nlmsg_pid;
2232
2233	err = build_aevent(r_skb, x, &c);
2234	BUG_ON(err < 0);
2235
2236	err = nlmsg_unicast(net->xfrm.nlsk, r_skb, NETLINK_CB(skb).portid);
2237	spin_unlock_bh(&x->lock);
2238	xfrm_state_put(x);
2239	return err;
2240}
2241
2242static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
2243		struct nlattr **attrs)
2244{
2245	struct net *net = sock_net(skb->sk);
2246	struct xfrm_state *x;
2247	struct km_event c;
2248	int err = -EINVAL;
2249	u32 mark = 0;
2250	struct xfrm_mark m;
2251	struct xfrm_aevent_id *p = nlmsg_data(nlh);
2252	struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
2253	struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL];
2254	struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
2255	struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
2256	struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
2257
2258	if (!lt && !rp && !re && !et && !rt)
2259		return err;
2260
2261	/* pedantic mode - thou shalt sayeth replaceth */
2262	if (!(nlh->nlmsg_flags&NLM_F_REPLACE))
2263		return err;
2264
2265	mark = xfrm_mark_get(attrs, &m);
2266
2267	x = xfrm_state_lookup(net, mark, &p->sa_id.daddr, p->sa_id.spi, p->sa_id.proto, p->sa_id.family);
2268	if (x == NULL)
2269		return -ESRCH;
2270
2271	if (x->km.state != XFRM_STATE_VALID)
2272		goto out;
2273
2274	err = xfrm_replay_verify_len(x->replay_esn, re);
2275	if (err)
2276		goto out;
2277
2278	spin_lock_bh(&x->lock);
2279	xfrm_update_ae_params(x, attrs, 1);
2280	spin_unlock_bh(&x->lock);
2281
2282	c.event = nlh->nlmsg_type;
2283	c.seq = nlh->nlmsg_seq;
2284	c.portid = nlh->nlmsg_pid;
2285	c.data.aevent = XFRM_AE_CU;
2286	km_state_notify(x, &c);
2287	err = 0;
2288out:
2289	xfrm_state_put(x);
2290	return err;
2291}
2292
2293static int xfrm_flush_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
2294		struct nlattr **attrs)
2295{
2296	struct net *net = sock_net(skb->sk);
2297	struct km_event c;
2298	u8 type = XFRM_POLICY_TYPE_MAIN;
2299	int err;
2300
2301	err = copy_from_user_policy_type(&type, attrs);
2302	if (err)
2303		return err;
2304
2305	err = xfrm_policy_flush(net, type, true);
2306	if (err) {
2307		if (err == -ESRCH) /* empty table */
2308			return 0;
2309		return err;
2310	}
2311
2312	c.data.type = type;
2313	c.event = nlh->nlmsg_type;
2314	c.seq = nlh->nlmsg_seq;
2315	c.portid = nlh->nlmsg_pid;
2316	c.net = net;
2317	km_policy_notify(NULL, 0, &c);
2318	return 0;
2319}
2320
2321static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
2322		struct nlattr **attrs)
2323{
2324	struct net *net = sock_net(skb->sk);
2325	struct xfrm_policy *xp;
2326	struct xfrm_user_polexpire *up = nlmsg_data(nlh);
2327	struct xfrm_userpolicy_info *p = &up->pol;
2328	u8 type = XFRM_POLICY_TYPE_MAIN;
2329	int err = -ENOENT;
2330	struct xfrm_mark m;
2331	u32 if_id = 0;
2332
2333	err = copy_from_user_policy_type(&type, attrs);
2334	if (err)
2335		return err;
2336
2337	err = verify_policy_dir(p->dir);
2338	if (err)
2339		return err;
2340
2341	if (attrs[XFRMA_IF_ID])
2342		if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
2343
2344	xfrm_mark_get(attrs, &m);
2345
2346	if (p->index)
2347		xp = xfrm_policy_byid(net, &m, if_id, type, p->dir, p->index,
2348				      0, &err);
2349	else {
2350		struct nlattr *rt = attrs[XFRMA_SEC_CTX];
2351		struct xfrm_sec_ctx *ctx;
2352
2353		err = verify_sec_ctx_len(attrs);
2354		if (err)
2355			return err;
2356
2357		ctx = NULL;
2358		if (rt) {
2359			struct xfrm_user_sec_ctx *uctx = nla_data(rt);
2360
2361			err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL);
2362			if (err)
2363				return err;
2364		}
2365		xp = xfrm_policy_bysel_ctx(net, &m, if_id, type, p->dir,
2366					   &p->sel, ctx, 0, &err);
2367		security_xfrm_policy_free(ctx);
2368	}
2369	if (xp == NULL)
2370		return -ENOENT;
2371
2372	if (unlikely(xp->walk.dead))
2373		goto out;
2374
2375	err = 0;
2376	if (up->hard) {
2377		xfrm_policy_delete(xp, p->dir);
2378		xfrm_audit_policy_delete(xp, 1, true);
2379	}
2380	km_policy_expired(xp, p->dir, up->hard, nlh->nlmsg_pid);
2381
2382out:
2383	xfrm_pol_put(xp);
2384	return err;
2385}
2386
2387static int xfrm_add_sa_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
2388		struct nlattr **attrs)
2389{
2390	struct net *net = sock_net(skb->sk);
2391	struct xfrm_state *x;
2392	int err;
2393	struct xfrm_user_expire *ue = nlmsg_data(nlh);
2394	struct xfrm_usersa_info *p = &ue->state;
2395	struct xfrm_mark m;
2396	u32 mark = xfrm_mark_get(attrs, &m);
2397
2398	x = xfrm_state_lookup(net, mark, &p->id.daddr, p->id.spi, p->id.proto, p->family);
2399
2400	err = -ENOENT;
2401	if (x == NULL)
2402		return err;
2403
2404	spin_lock_bh(&x->lock);
2405	err = -EINVAL;
2406	if (x->km.state != XFRM_STATE_VALID)
2407		goto out;
2408	km_state_expired(x, ue->hard, nlh->nlmsg_pid);
2409
2410	if (ue->hard) {
2411		__xfrm_state_delete(x);
2412		xfrm_audit_state_delete(x, 1, true);
2413	}
2414	err = 0;
2415out:
2416	spin_unlock_bh(&x->lock);
2417	xfrm_state_put(x);
2418	return err;
2419}
2420
2421static int xfrm_add_acquire(struct sk_buff *skb, struct nlmsghdr *nlh,
2422		struct nlattr **attrs)
2423{
2424	struct net *net = sock_net(skb->sk);
2425	struct xfrm_policy *xp;
2426	struct xfrm_user_tmpl *ut;
2427	int i;
2428	struct nlattr *rt = attrs[XFRMA_TMPL];
2429	struct xfrm_mark mark;
2430
2431	struct xfrm_user_acquire *ua = nlmsg_data(nlh);
2432	struct xfrm_state *x = xfrm_state_alloc(net);
2433	int err = -ENOMEM;
2434
2435	if (!x)
2436		goto nomem;
2437
2438	xfrm_mark_get(attrs, &mark);
2439
2440	err = verify_newpolicy_info(&ua->policy);
2441	if (err)
2442		goto free_state;
2443	err = verify_sec_ctx_len(attrs);
2444	if (err)
2445		goto free_state;
2446
2447	/*   build an XP */
2448	xp = xfrm_policy_construct(net, &ua->policy, attrs, &err);
2449	if (!xp)
2450		goto free_state;
2451
2452	memcpy(&x->id, &ua->id, sizeof(ua->id));
2453	memcpy(&x->props.saddr, &ua->saddr, sizeof(ua->saddr));
2454	memcpy(&x->sel, &ua->sel, sizeof(ua->sel));
2455	xp->mark.m = x->mark.m = mark.m;
2456	xp->mark.v = x->mark.v = mark.v;
2457	ut = nla_data(rt);
2458	/* extract the templates and for each call km_key */
2459	for (i = 0; i < xp->xfrm_nr; i++, ut++) {
2460		struct xfrm_tmpl *t = &xp->xfrm_vec[i];
2461		memcpy(&x->id, &t->id, sizeof(x->id));
2462		x->props.mode = t->mode;
2463		x->props.reqid = t->reqid;
2464		x->props.family = ut->family;
2465		t->aalgos = ua->aalgos;
2466		t->ealgos = ua->ealgos;
2467		t->calgos = ua->calgos;
2468		err = km_query(x, t, xp);
2469
2470	}
2471
2472	xfrm_state_free(x);
2473	kfree(xp);
2474
2475	return 0;
2476
2477free_state:
2478	xfrm_state_free(x);
2479nomem:
2480	return err;
2481}
2482
2483#ifdef CONFIG_XFRM_MIGRATE
2484static int copy_from_user_migrate(struct xfrm_migrate *ma,
2485				  struct xfrm_kmaddress *k,
2486				  struct nlattr **attrs, int *num)
2487{
2488	struct nlattr *rt = attrs[XFRMA_MIGRATE];
2489	struct xfrm_user_migrate *um;
2490	int i, num_migrate;
2491
2492	if (k != NULL) {
2493		struct xfrm_user_kmaddress *uk;
2494
2495		uk = nla_data(attrs[XFRMA_KMADDRESS]);
2496		memcpy(&k->local, &uk->local, sizeof(k->local));
2497		memcpy(&k->remote, &uk->remote, sizeof(k->remote));
2498		k->family = uk->family;
2499		k->reserved = uk->reserved;
2500	}
2501
2502	um = nla_data(rt);
2503	num_migrate = nla_len(rt) / sizeof(*um);
2504
2505	if (num_migrate <= 0 || num_migrate > XFRM_MAX_DEPTH)
2506		return -EINVAL;
2507
2508	for (i = 0; i < num_migrate; i++, um++, ma++) {
2509		memcpy(&ma->old_daddr, &um->old_daddr, sizeof(ma->old_daddr));
2510		memcpy(&ma->old_saddr, &um->old_saddr, sizeof(ma->old_saddr));
2511		memcpy(&ma->new_daddr, &um->new_daddr, sizeof(ma->new_daddr));
2512		memcpy(&ma->new_saddr, &um->new_saddr, sizeof(ma->new_saddr));
2513
2514		ma->proto = um->proto;
2515		ma->mode = um->mode;
2516		ma->reqid = um->reqid;
2517
2518		ma->old_family = um->old_family;
2519		ma->new_family = um->new_family;
2520	}
2521
2522	*num = i;
2523	return 0;
2524}
2525
2526static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
2527			   struct nlattr **attrs)
2528{
2529	struct xfrm_userpolicy_id *pi = nlmsg_data(nlh);
2530	struct xfrm_migrate m[XFRM_MAX_DEPTH];
2531	struct xfrm_kmaddress km, *kmp;
2532	u8 type;
2533	int err;
2534	int n = 0;
2535	struct net *net = sock_net(skb->sk);
2536	struct xfrm_encap_tmpl  *encap = NULL;
2537	u32 if_id = 0;
2538
2539	if (attrs[XFRMA_MIGRATE] == NULL)
2540		return -EINVAL;
2541
2542	kmp = attrs[XFRMA_KMADDRESS] ? &km : NULL;
2543
2544	err = copy_from_user_policy_type(&type, attrs);
2545	if (err)
2546		return err;
2547
2548	err = copy_from_user_migrate((struct xfrm_migrate *)m, kmp, attrs, &n);
2549	if (err)
2550		return err;
2551
2552	if (!n)
2553		return 0;
2554
2555	if (attrs[XFRMA_ENCAP]) {
2556		encap = kmemdup(nla_data(attrs[XFRMA_ENCAP]),
2557				sizeof(*encap), GFP_KERNEL);
2558		if (!encap)
2559			return 0;
2560	}
2561
2562	if (attrs[XFRMA_IF_ID])
2563		if_id = nla_get_u32(attrs[XFRMA_IF_ID]);
2564
2565	err = xfrm_migrate(&pi->sel, pi->dir, type, m, n, kmp, net, encap, if_id);
2566
2567	kfree(encap);
2568
2569	return err;
2570}
2571#else
2572static int xfrm_do_migrate(struct sk_buff *skb, struct nlmsghdr *nlh,
2573			   struct nlattr **attrs)
2574{
2575	return -ENOPROTOOPT;
2576}
2577#endif
2578
2579#ifdef CONFIG_XFRM_MIGRATE
2580static int copy_to_user_migrate(const struct xfrm_migrate *m, struct sk_buff *skb)
2581{
2582	struct xfrm_user_migrate um;
2583
2584	memset(&um, 0, sizeof(um));
2585	um.proto = m->proto;
2586	um.mode = m->mode;
2587	um.reqid = m->reqid;
2588	um.old_family = m->old_family;
2589	memcpy(&um.old_daddr, &m->old_daddr, sizeof(um.old_daddr));
2590	memcpy(&um.old_saddr, &m->old_saddr, sizeof(um.old_saddr));
2591	um.new_family = m->new_family;
2592	memcpy(&um.new_daddr, &m->new_daddr, sizeof(um.new_daddr));
2593	memcpy(&um.new_saddr, &m->new_saddr, sizeof(um.new_saddr));
2594
2595	return nla_put(skb, XFRMA_MIGRATE, sizeof(um), &um);
2596}
2597
2598static int copy_to_user_kmaddress(const struct xfrm_kmaddress *k, struct sk_buff *skb)
2599{
2600	struct xfrm_user_kmaddress uk;
2601
2602	memset(&uk, 0, sizeof(uk));
2603	uk.family = k->family;
2604	uk.reserved = k->reserved;
2605	memcpy(&uk.local, &k->local, sizeof(uk.local));
2606	memcpy(&uk.remote, &k->remote, sizeof(uk.remote));
2607
2608	return nla_put(skb, XFRMA_KMADDRESS, sizeof(uk), &uk);
2609}
2610
2611static inline unsigned int xfrm_migrate_msgsize(int num_migrate, int with_kma,
2612						int with_encp)
2613{
2614	return NLMSG_ALIGN(sizeof(struct xfrm_userpolicy_id))
2615	      + (with_kma ? nla_total_size(sizeof(struct xfrm_kmaddress)) : 0)
2616	      + (with_encp ? nla_total_size(sizeof(struct xfrm_encap_tmpl)) : 0)
2617	      + nla_total_size(sizeof(struct xfrm_user_migrate) * num_migrate)
2618	      + userpolicy_type_attrsize();
2619}
2620
2621static int build_migrate(struct sk_buff *skb, const struct xfrm_migrate *m,
2622			 int num_migrate, const struct xfrm_kmaddress *k,
2623			 const struct xfrm_selector *sel,
2624			 const struct xfrm_encap_tmpl *encap, u8 dir, u8 type)
2625{
2626	const struct xfrm_migrate *mp;
2627	struct xfrm_userpolicy_id *pol_id;
2628	struct nlmsghdr *nlh;
2629	int i, err;
2630
2631	nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MIGRATE, sizeof(*pol_id), 0);
2632	if (nlh == NULL)
2633		return -EMSGSIZE;
2634
2635	pol_id = nlmsg_data(nlh);
2636	/* copy data from selector, dir, and type to the pol_id */
2637	memset(pol_id, 0, sizeof(*pol_id));
2638	memcpy(&pol_id->sel, sel, sizeof(pol_id->sel));
2639	pol_id->dir = dir;
2640
2641	if (k != NULL) {
2642		err = copy_to_user_kmaddress(k, skb);
2643		if (err)
2644			goto out_cancel;
2645	}
2646	if (encap) {
2647		err = nla_put(skb, XFRMA_ENCAP, sizeof(*encap), encap);
2648		if (err)
2649			goto out_cancel;
2650	}
2651	err = copy_to_user_policy_type(type, skb);
2652	if (err)
2653		goto out_cancel;
2654	for (i = 0, mp = m ; i < num_migrate; i++, mp++) {
2655		err = copy_to_user_migrate(mp, skb);
2656		if (err)
2657			goto out_cancel;
2658	}
2659
2660	nlmsg_end(skb, nlh);
2661	return 0;
2662
2663out_cancel:
2664	nlmsg_cancel(skb, nlh);
2665	return err;
2666}
2667
2668static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
2669			     const struct xfrm_migrate *m, int num_migrate,
2670			     const struct xfrm_kmaddress *k,
2671			     const struct xfrm_encap_tmpl *encap)
2672{
2673	struct net *net = &init_net;
2674	struct sk_buff *skb;
2675	int err;
2676
2677	skb = nlmsg_new(xfrm_migrate_msgsize(num_migrate, !!k, !!encap),
2678			GFP_ATOMIC);
2679	if (skb == NULL)
2680		return -ENOMEM;
2681
2682	/* build migrate */
2683	err = build_migrate(skb, m, num_migrate, k, sel, encap, dir, type);
2684	BUG_ON(err < 0);
2685
2686	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_MIGRATE);
2687}
2688#else
2689static int xfrm_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type,
2690			     const struct xfrm_migrate *m, int num_migrate,
2691			     const struct xfrm_kmaddress *k,
2692			     const struct xfrm_encap_tmpl *encap)
2693{
2694	return -ENOPROTOOPT;
2695}
2696#endif
2697
2698#define XMSGSIZE(type) sizeof(struct type)
2699
2700const int xfrm_msg_min[XFRM_NR_MSGTYPES] = {
2701	[XFRM_MSG_NEWSA       - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
2702	[XFRM_MSG_DELSA       - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
2703	[XFRM_MSG_GETSA       - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_id),
2704	[XFRM_MSG_NEWPOLICY   - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
2705	[XFRM_MSG_DELPOLICY   - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
2706	[XFRM_MSG_GETPOLICY   - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
2707	[XFRM_MSG_ALLOCSPI    - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userspi_info),
2708	[XFRM_MSG_ACQUIRE     - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_acquire),
2709	[XFRM_MSG_EXPIRE      - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_expire),
2710	[XFRM_MSG_UPDPOLICY   - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_info),
2711	[XFRM_MSG_UPDSA       - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_info),
2712	[XFRM_MSG_POLEXPIRE   - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_polexpire),
2713	[XFRM_MSG_FLUSHSA     - XFRM_MSG_BASE] = XMSGSIZE(xfrm_usersa_flush),
2714	[XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = 0,
2715	[XFRM_MSG_NEWAE       - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
2716	[XFRM_MSG_GETAE       - XFRM_MSG_BASE] = XMSGSIZE(xfrm_aevent_id),
2717	[XFRM_MSG_REPORT      - XFRM_MSG_BASE] = XMSGSIZE(xfrm_user_report),
2718	[XFRM_MSG_MIGRATE     - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_id),
2719	[XFRM_MSG_GETSADINFO  - XFRM_MSG_BASE] = sizeof(u32),
2720	[XFRM_MSG_NEWSPDINFO  - XFRM_MSG_BASE] = sizeof(u32),
2721	[XFRM_MSG_GETSPDINFO  - XFRM_MSG_BASE] = sizeof(u32),
2722	[XFRM_MSG_SETDEFAULT  - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_default),
2723	[XFRM_MSG_GETDEFAULT  - XFRM_MSG_BASE] = XMSGSIZE(xfrm_userpolicy_default),
2724};
2725EXPORT_SYMBOL_GPL(xfrm_msg_min);
2726
2727#undef XMSGSIZE
2728
2729const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
2730	[XFRMA_SA]		= { .len = sizeof(struct xfrm_usersa_info)},
2731	[XFRMA_POLICY]		= { .len = sizeof(struct xfrm_userpolicy_info)},
2732	[XFRMA_LASTUSED]	= { .type = NLA_U64},
2733	[XFRMA_ALG_AUTH_TRUNC]	= { .len = sizeof(struct xfrm_algo_auth)},
2734	[XFRMA_ALG_AEAD]	= { .len = sizeof(struct xfrm_algo_aead) },
2735	[XFRMA_ALG_AUTH]	= { .len = sizeof(struct xfrm_algo) },
2736	[XFRMA_ALG_CRYPT]	= { .len = sizeof(struct xfrm_algo) },
2737	[XFRMA_ALG_COMP]	= { .len = sizeof(struct xfrm_algo) },
2738	[XFRMA_ENCAP]		= { .len = sizeof(struct xfrm_encap_tmpl) },
2739	[XFRMA_TMPL]		= { .len = sizeof(struct xfrm_user_tmpl) },
2740	[XFRMA_SEC_CTX]		= { .len = sizeof(struct xfrm_user_sec_ctx) },
2741	[XFRMA_LTIME_VAL]	= { .len = sizeof(struct xfrm_lifetime_cur) },
2742	[XFRMA_REPLAY_VAL]	= { .len = sizeof(struct xfrm_replay_state) },
2743	[XFRMA_REPLAY_THRESH]	= { .type = NLA_U32 },
2744	[XFRMA_ETIMER_THRESH]	= { .type = NLA_U32 },
2745	[XFRMA_SRCADDR]		= { .len = sizeof(xfrm_address_t) },
2746	[XFRMA_COADDR]		= { .len = sizeof(xfrm_address_t) },
2747	[XFRMA_POLICY_TYPE]	= { .len = sizeof(struct xfrm_userpolicy_type)},
2748	[XFRMA_MIGRATE]		= { .len = sizeof(struct xfrm_user_migrate) },
2749	[XFRMA_KMADDRESS]	= { .len = sizeof(struct xfrm_user_kmaddress) },
2750	[XFRMA_MARK]		= { .len = sizeof(struct xfrm_mark) },
2751	[XFRMA_TFCPAD]		= { .type = NLA_U32 },
2752	[XFRMA_REPLAY_ESN_VAL]	= { .len = sizeof(struct xfrm_replay_state_esn) },
2753	[XFRMA_SA_EXTRA_FLAGS]	= { .type = NLA_U32 },
2754	[XFRMA_PROTO]		= { .type = NLA_U8 },
2755	[XFRMA_ADDRESS_FILTER]	= { .len = sizeof(struct xfrm_address_filter) },
2756	[XFRMA_OFFLOAD_DEV]	= { .len = sizeof(struct xfrm_user_offload) },
2757	[XFRMA_SET_MARK]	= { .type = NLA_U32 },
2758	[XFRMA_SET_MARK_MASK]	= { .type = NLA_U32 },
2759	[XFRMA_IF_ID]		= { .type = NLA_U32 },
2760	[XFRMA_MTIMER_THRESH]   = { .type = NLA_U32 },
2761};
2762EXPORT_SYMBOL_GPL(xfrma_policy);
2763
2764static const struct nla_policy xfrma_spd_policy[XFRMA_SPD_MAX+1] = {
2765	[XFRMA_SPD_IPV4_HTHRESH] = { .len = sizeof(struct xfrmu_spdhthresh) },
2766	[XFRMA_SPD_IPV6_HTHRESH] = { .len = sizeof(struct xfrmu_spdhthresh) },
2767};
2768
2769static const struct xfrm_link {
2770	int (*doit)(struct sk_buff *, struct nlmsghdr *, struct nlattr **);
2771	int (*start)(struct netlink_callback *);
2772	int (*dump)(struct sk_buff *, struct netlink_callback *);
2773	int (*done)(struct netlink_callback *);
2774	const struct nla_policy *nla_pol;
2775	int nla_max;
2776} xfrm_dispatch[XFRM_NR_MSGTYPES] = {
2777	[XFRM_MSG_NEWSA       - XFRM_MSG_BASE] = { .doit = xfrm_add_sa        },
2778	[XFRM_MSG_DELSA       - XFRM_MSG_BASE] = { .doit = xfrm_del_sa        },
2779	[XFRM_MSG_GETSA       - XFRM_MSG_BASE] = { .doit = xfrm_get_sa,
2780						   .dump = xfrm_dump_sa,
2781						   .done = xfrm_dump_sa_done  },
2782	[XFRM_MSG_NEWPOLICY   - XFRM_MSG_BASE] = { .doit = xfrm_add_policy    },
2783	[XFRM_MSG_DELPOLICY   - XFRM_MSG_BASE] = { .doit = xfrm_get_policy    },
2784	[XFRM_MSG_GETPOLICY   - XFRM_MSG_BASE] = { .doit = xfrm_get_policy,
2785						   .start = xfrm_dump_policy_start,
2786						   .dump = xfrm_dump_policy,
2787						   .done = xfrm_dump_policy_done },
2788	[XFRM_MSG_ALLOCSPI    - XFRM_MSG_BASE] = { .doit = xfrm_alloc_userspi },
2789	[XFRM_MSG_ACQUIRE     - XFRM_MSG_BASE] = { .doit = xfrm_add_acquire   },
2790	[XFRM_MSG_EXPIRE      - XFRM_MSG_BASE] = { .doit = xfrm_add_sa_expire },
2791	[XFRM_MSG_UPDPOLICY   - XFRM_MSG_BASE] = { .doit = xfrm_add_policy    },
2792	[XFRM_MSG_UPDSA       - XFRM_MSG_BASE] = { .doit = xfrm_add_sa        },
2793	[XFRM_MSG_POLEXPIRE   - XFRM_MSG_BASE] = { .doit = xfrm_add_pol_expire},
2794	[XFRM_MSG_FLUSHSA     - XFRM_MSG_BASE] = { .doit = xfrm_flush_sa      },
2795	[XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy  },
2796	[XFRM_MSG_NEWAE       - XFRM_MSG_BASE] = { .doit = xfrm_new_ae  },
2797	[XFRM_MSG_GETAE       - XFRM_MSG_BASE] = { .doit = xfrm_get_ae  },
2798	[XFRM_MSG_MIGRATE     - XFRM_MSG_BASE] = { .doit = xfrm_do_migrate    },
2799	[XFRM_MSG_GETSADINFO  - XFRM_MSG_BASE] = { .doit = xfrm_get_sadinfo   },
2800	[XFRM_MSG_NEWSPDINFO  - XFRM_MSG_BASE] = { .doit = xfrm_set_spdinfo,
2801						   .nla_pol = xfrma_spd_policy,
2802						   .nla_max = XFRMA_SPD_MAX },
2803	[XFRM_MSG_GETSPDINFO  - XFRM_MSG_BASE] = { .doit = xfrm_get_spdinfo   },
2804	[XFRM_MSG_SETDEFAULT  - XFRM_MSG_BASE] = { .doit = xfrm_set_default   },
2805	[XFRM_MSG_GETDEFAULT  - XFRM_MSG_BASE] = { .doit = xfrm_get_default   },
2806};
2807
2808static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
2809			     struct netlink_ext_ack *extack)
2810{
2811	struct net *net = sock_net(skb->sk);
2812	struct nlattr *attrs[XFRMA_MAX+1];
2813	const struct xfrm_link *link;
2814	struct nlmsghdr *nlh64 = NULL;
2815	int type, err;
2816
2817	type = nlh->nlmsg_type;
2818	if (type > XFRM_MSG_MAX)
2819		return -EINVAL;
2820
2821	type -= XFRM_MSG_BASE;
2822	link = &xfrm_dispatch[type];
2823
2824	/* All operations require privileges, even GET */
2825	if (!netlink_net_capable(skb, CAP_NET_ADMIN))
2826		return -EPERM;
2827
2828	if (in_compat_syscall()) {
2829		struct xfrm_translator *xtr = xfrm_get_translator();
2830
2831		if (!xtr)
2832			return -EOPNOTSUPP;
2833
2834		nlh64 = xtr->rcv_msg_compat(nlh, link->nla_max,
2835					    link->nla_pol, extack);
2836		xfrm_put_translator(xtr);
2837		if (IS_ERR(nlh64))
2838			return PTR_ERR(nlh64);
2839		if (nlh64)
2840			nlh = nlh64;
2841	}
2842
2843	if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
2844	     type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) &&
2845	    (nlh->nlmsg_flags & NLM_F_DUMP)) {
2846		struct netlink_dump_control c = {
2847			.start = link->start,
2848			.dump = link->dump,
2849			.done = link->done,
2850		};
2851
2852		if (link->dump == NULL) {
2853			err = -EINVAL;
2854			goto err;
2855		}
2856
2857		err = netlink_dump_start(net->xfrm.nlsk, skb, nlh, &c);
2858		goto err;
2859	}
2860
2861	err = nlmsg_parse_deprecated(nlh, xfrm_msg_min[type], attrs,
2862				     link->nla_max ? : XFRMA_MAX,
2863				     link->nla_pol ? : xfrma_policy, extack);
2864	if (err < 0)
2865		goto err;
2866
2867	if (link->doit == NULL) {
2868		err = -EINVAL;
2869		goto err;
2870	}
2871
2872	err = link->doit(skb, nlh, attrs);
2873
2874	/* We need to free skb allocated in xfrm_alloc_compat() before
2875	 * returning from this function, because consume_skb() won't take
2876	 * care of frag_list since netlink destructor sets
2877	 * sbk->head to NULL. (see netlink_skb_destructor())
2878	 */
2879	if (skb_has_frag_list(skb)) {
2880		kfree_skb(skb_shinfo(skb)->frag_list);
2881		skb_shinfo(skb)->frag_list = NULL;
2882	}
2883
2884err:
2885	kvfree(nlh64);
2886	return err;
2887}
2888
2889static void xfrm_netlink_rcv(struct sk_buff *skb)
2890{
2891	struct net *net = sock_net(skb->sk);
2892
2893	mutex_lock(&net->xfrm.xfrm_cfg_mutex);
2894	netlink_rcv_skb(skb, &xfrm_user_rcv_msg);
2895	mutex_unlock(&net->xfrm.xfrm_cfg_mutex);
2896}
2897
2898static inline unsigned int xfrm_expire_msgsize(void)
2899{
2900	return NLMSG_ALIGN(sizeof(struct xfrm_user_expire))
2901	       + nla_total_size(sizeof(struct xfrm_mark));
2902}
2903
2904static int build_expire(struct sk_buff *skb, struct xfrm_state *x, const struct km_event *c)
2905{
2906	struct xfrm_user_expire *ue;
2907	struct nlmsghdr *nlh;
2908	int err;
2909
2910	nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_EXPIRE, sizeof(*ue), 0);
2911	if (nlh == NULL)
2912		return -EMSGSIZE;
2913
2914	ue = nlmsg_data(nlh);
2915	copy_to_user_state(x, &ue->state);
2916	ue->hard = (c->data.hard != 0) ? 1 : 0;
2917	/* clear the padding bytes */
2918	memset(&ue->hard + 1, 0, sizeof(*ue) - offsetofend(typeof(*ue), hard));
2919
2920	err = xfrm_mark_put(skb, &x->mark);
2921	if (err)
2922		return err;
2923
2924	err = xfrm_if_id_put(skb, x->if_id);
2925	if (err)
2926		return err;
2927
2928	nlmsg_end(skb, nlh);
2929	return 0;
2930}
2931
2932static int xfrm_exp_state_notify(struct xfrm_state *x, const struct km_event *c)
2933{
2934	struct net *net = xs_net(x);
2935	struct sk_buff *skb;
2936
2937	skb = nlmsg_new(xfrm_expire_msgsize(), GFP_ATOMIC);
2938	if (skb == NULL)
2939		return -ENOMEM;
2940
2941	if (build_expire(skb, x, c) < 0) {
2942		kfree_skb(skb);
2943		return -EMSGSIZE;
2944	}
2945
2946	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_EXPIRE);
2947}
2948
2949static int xfrm_aevent_state_notify(struct xfrm_state *x, const struct km_event *c)
2950{
2951	struct net *net = xs_net(x);
2952	struct sk_buff *skb;
2953	int err;
2954
2955	skb = nlmsg_new(xfrm_aevent_msgsize(x), GFP_ATOMIC);
2956	if (skb == NULL)
2957		return -ENOMEM;
2958
2959	err = build_aevent(skb, x, c);
2960	BUG_ON(err < 0);
2961
2962	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_AEVENTS);
2963}
2964
2965static int xfrm_notify_sa_flush(const struct km_event *c)
2966{
2967	struct net *net = c->net;
2968	struct xfrm_usersa_flush *p;
2969	struct nlmsghdr *nlh;
2970	struct sk_buff *skb;
2971	int len = NLMSG_ALIGN(sizeof(struct xfrm_usersa_flush));
2972
2973	skb = nlmsg_new(len, GFP_ATOMIC);
2974	if (skb == NULL)
2975		return -ENOMEM;
2976
2977	nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHSA, sizeof(*p), 0);
2978	if (nlh == NULL) {
2979		kfree_skb(skb);
2980		return -EMSGSIZE;
2981	}
2982
2983	p = nlmsg_data(nlh);
2984	p->proto = c->data.proto;
2985
2986	nlmsg_end(skb, nlh);
2987
2988	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_SA);
2989}
2990
2991static inline unsigned int xfrm_sa_len(struct xfrm_state *x)
2992{
2993	unsigned int l = 0;
2994	if (x->aead)
2995		l += nla_total_size(aead_len(x->aead));
2996	if (x->aalg) {
2997		l += nla_total_size(sizeof(struct xfrm_algo) +
2998				    (x->aalg->alg_key_len + 7) / 8);
2999		l += nla_total_size(xfrm_alg_auth_len(x->aalg));
3000	}
3001	if (x->ealg)
3002		l += nla_total_size(xfrm_alg_len(x->ealg));
3003	if (x->calg)
3004		l += nla_total_size(sizeof(*x->calg));
3005	if (x->encap)
3006		l += nla_total_size(sizeof(*x->encap));
3007	if (x->tfcpad)
3008		l += nla_total_size(sizeof(x->tfcpad));
3009	if (x->replay_esn)
3010		l += nla_total_size(xfrm_replay_state_esn_len(x->replay_esn));
3011	else
3012		l += nla_total_size(sizeof(struct xfrm_replay_state));
3013	if (x->security)
3014		l += nla_total_size(sizeof(struct xfrm_user_sec_ctx) +
3015				    x->security->ctx_len);
3016	if (x->coaddr)
3017		l += nla_total_size(sizeof(*x->coaddr));
3018	if (x->props.extra_flags)
3019		l += nla_total_size(sizeof(x->props.extra_flags));
3020	if (x->xso.dev)
3021		 l += nla_total_size(sizeof(struct xfrm_user_offload));
3022	if (x->props.smark.v | x->props.smark.m) {
3023		l += nla_total_size(sizeof(x->props.smark.v));
3024		l += nla_total_size(sizeof(x->props.smark.m));
3025	}
3026	if (x->if_id)
3027		l += nla_total_size(sizeof(x->if_id));
3028
3029	/* Must count x->lastused as it may become non-zero behind our back. */
3030	l += nla_total_size_64bit(sizeof(u64));
3031
3032	if (x->mapping_maxage)
3033		l += nla_total_size(sizeof(x->mapping_maxage));
3034
3035	return l;
3036}
3037
3038static int xfrm_notify_sa(struct xfrm_state *x, const struct km_event *c)
3039{
3040	struct net *net = xs_net(x);
3041	struct xfrm_usersa_info *p;
3042	struct xfrm_usersa_id *id;
3043	struct nlmsghdr *nlh;
3044	struct sk_buff *skb;
3045	unsigned int len = xfrm_sa_len(x);
3046	unsigned int headlen;
3047	int err;
3048
3049	headlen = sizeof(*p);
3050	if (c->event == XFRM_MSG_DELSA) {
3051		len += nla_total_size(headlen);
3052		headlen = sizeof(*id);
3053		len += nla_total_size(sizeof(struct xfrm_mark));
3054	}
3055	len += NLMSG_ALIGN(headlen);
3056
3057	skb = nlmsg_new(len, GFP_ATOMIC);
3058	if (skb == NULL)
3059		return -ENOMEM;
3060
3061	nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0);
3062	err = -EMSGSIZE;
3063	if (nlh == NULL)
3064		goto out_free_skb;
3065
3066	p = nlmsg_data(nlh);
3067	if (c->event == XFRM_MSG_DELSA) {
3068		struct nlattr *attr;
3069
3070		id = nlmsg_data(nlh);
3071		memset(id, 0, sizeof(*id));
3072		memcpy(&id->daddr, &x->id.daddr, sizeof(id->daddr));
3073		id->spi = x->id.spi;
3074		id->family = x->props.family;
3075		id->proto = x->id.proto;
3076
3077		attr = nla_reserve(skb, XFRMA_SA, sizeof(*p));
3078		err = -EMSGSIZE;
3079		if (attr == NULL)
3080			goto out_free_skb;
3081
3082		p = nla_data(attr);
3083	}
3084	err = copy_to_user_state_extra(x, p, skb);
3085	if (err)
3086		goto out_free_skb;
3087
3088	nlmsg_end(skb, nlh);
3089
3090	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_SA);
3091
3092out_free_skb:
3093	kfree_skb(skb);
3094	return err;
3095}
3096
3097static int xfrm_send_state_notify(struct xfrm_state *x, const struct km_event *c)
3098{
3099
3100	switch (c->event) {
3101	case XFRM_MSG_EXPIRE:
3102		return xfrm_exp_state_notify(x, c);
3103	case XFRM_MSG_NEWAE:
3104		return xfrm_aevent_state_notify(x, c);
3105	case XFRM_MSG_DELSA:
3106	case XFRM_MSG_UPDSA:
3107	case XFRM_MSG_NEWSA:
3108		return xfrm_notify_sa(x, c);
3109	case XFRM_MSG_FLUSHSA:
3110		return xfrm_notify_sa_flush(c);
3111	default:
3112		printk(KERN_NOTICE "xfrm_user: Unknown SA event %d\n",
3113		       c->event);
3114		break;
3115	}
3116
3117	return 0;
3118
3119}
3120
3121static inline unsigned int xfrm_acquire_msgsize(struct xfrm_state *x,
3122						struct xfrm_policy *xp)
3123{
3124	return NLMSG_ALIGN(sizeof(struct xfrm_user_acquire))
3125	       + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
3126	       + nla_total_size(sizeof(struct xfrm_mark))
3127	       + nla_total_size(xfrm_user_sec_ctx_size(x->security))
3128	       + userpolicy_type_attrsize();
3129}
3130
3131static int build_acquire(struct sk_buff *skb, struct xfrm_state *x,
3132			 struct xfrm_tmpl *xt, struct xfrm_policy *xp)
3133{
3134	__u32 seq = xfrm_get_acqseq();
3135	struct xfrm_user_acquire *ua;
3136	struct nlmsghdr *nlh;
3137	int err;
3138
3139	nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_ACQUIRE, sizeof(*ua), 0);
3140	if (nlh == NULL)
3141		return -EMSGSIZE;
3142
3143	ua = nlmsg_data(nlh);
3144	memcpy(&ua->id, &x->id, sizeof(ua->id));
3145	memcpy(&ua->saddr, &x->props.saddr, sizeof(ua->saddr));
3146	memcpy(&ua->sel, &x->sel, sizeof(ua->sel));
3147	copy_to_user_policy(xp, &ua->policy, XFRM_POLICY_OUT);
3148	ua->aalgos = xt->aalgos;
3149	ua->ealgos = xt->ealgos;
3150	ua->calgos = xt->calgos;
3151	ua->seq = x->km.seq = seq;
3152
3153	err = copy_to_user_tmpl(xp, skb);
3154	if (!err)
3155		err = copy_to_user_state_sec_ctx(x, skb);
3156	if (!err)
3157		err = copy_to_user_policy_type(xp->type, skb);
3158	if (!err)
3159		err = xfrm_mark_put(skb, &xp->mark);
3160	if (!err)
3161		err = xfrm_if_id_put(skb, xp->if_id);
3162	if (err) {
3163		nlmsg_cancel(skb, nlh);
3164		return err;
3165	}
3166
3167	nlmsg_end(skb, nlh);
3168	return 0;
3169}
3170
3171static int xfrm_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *xt,
3172			     struct xfrm_policy *xp)
3173{
3174	struct net *net = xs_net(x);
3175	struct sk_buff *skb;
3176	int err;
3177
3178	skb = nlmsg_new(xfrm_acquire_msgsize(x, xp), GFP_ATOMIC);
3179	if (skb == NULL)
3180		return -ENOMEM;
3181
3182	err = build_acquire(skb, x, xt, xp);
3183	BUG_ON(err < 0);
3184
3185	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_ACQUIRE);
3186}
3187
3188/* User gives us xfrm_user_policy_info followed by an array of 0
3189 * or more templates.
3190 */
3191static struct xfrm_policy *xfrm_compile_policy(struct sock *sk, int opt,
3192					       u8 *data, int len, int *dir)
3193{
3194	struct net *net = sock_net(sk);
3195	struct xfrm_userpolicy_info *p = (struct xfrm_userpolicy_info *)data;
3196	struct xfrm_user_tmpl *ut = (struct xfrm_user_tmpl *) (p + 1);
3197	struct xfrm_policy *xp;
3198	int nr;
3199
3200	switch (sk->sk_family) {
3201	case AF_INET:
3202		if (opt != IP_XFRM_POLICY) {
3203			*dir = -EOPNOTSUPP;
3204			return NULL;
3205		}
3206		break;
3207#if IS_ENABLED(CONFIG_IPV6)
3208	case AF_INET6:
3209		if (opt != IPV6_XFRM_POLICY) {
3210			*dir = -EOPNOTSUPP;
3211			return NULL;
3212		}
3213		break;
3214#endif
3215	default:
3216		*dir = -EINVAL;
3217		return NULL;
3218	}
3219
3220	*dir = -EINVAL;
3221
3222	if (len < sizeof(*p) ||
3223	    verify_newpolicy_info(p))
3224		return NULL;
3225
3226	nr = ((len - sizeof(*p)) / sizeof(*ut));
3227	if (validate_tmpl(nr, ut, p->sel.family))
3228		return NULL;
3229
3230	if (p->dir > XFRM_POLICY_OUT)
3231		return NULL;
3232
3233	xp = xfrm_policy_alloc(net, GFP_ATOMIC);
3234	if (xp == NULL) {
3235		*dir = -ENOBUFS;
3236		return NULL;
3237	}
3238
3239	copy_from_user_policy(xp, p);
3240	xp->type = XFRM_POLICY_TYPE_MAIN;
3241	copy_templates(xp, ut, nr);
3242
3243	*dir = p->dir;
3244
3245	return xp;
3246}
3247
3248static inline unsigned int xfrm_polexpire_msgsize(struct xfrm_policy *xp)
3249{
3250	return NLMSG_ALIGN(sizeof(struct xfrm_user_polexpire))
3251	       + nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr)
3252	       + nla_total_size(xfrm_user_sec_ctx_size(xp->security))
3253	       + nla_total_size(sizeof(struct xfrm_mark))
3254	       + userpolicy_type_attrsize();
3255}
3256
3257static int build_polexpire(struct sk_buff *skb, struct xfrm_policy *xp,
3258			   int dir, const struct km_event *c)
3259{
3260	struct xfrm_user_polexpire *upe;
3261	int hard = c->data.hard;
3262	struct nlmsghdr *nlh;
3263	int err;
3264
3265	nlh = nlmsg_put(skb, c->portid, 0, XFRM_MSG_POLEXPIRE, sizeof(*upe), 0);
3266	if (nlh == NULL)
3267		return -EMSGSIZE;
3268
3269	upe = nlmsg_data(nlh);
3270	copy_to_user_policy(xp, &upe->pol, dir);
3271	err = copy_to_user_tmpl(xp, skb);
3272	if (!err)
3273		err = copy_to_user_sec_ctx(xp, skb);
3274	if (!err)
3275		err = copy_to_user_policy_type(xp->type, skb);
3276	if (!err)
3277		err = xfrm_mark_put(skb, &xp->mark);
3278	if (!err)
3279		err = xfrm_if_id_put(skb, xp->if_id);
3280	if (err) {
3281		nlmsg_cancel(skb, nlh);
3282		return err;
3283	}
3284	upe->hard = !!hard;
3285
3286	nlmsg_end(skb, nlh);
3287	return 0;
3288}
3289
3290static int xfrm_exp_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
3291{
3292	struct net *net = xp_net(xp);
3293	struct sk_buff *skb;
3294	int err;
3295
3296	skb = nlmsg_new(xfrm_polexpire_msgsize(xp), GFP_ATOMIC);
3297	if (skb == NULL)
3298		return -ENOMEM;
3299
3300	err = build_polexpire(skb, xp, dir, c);
3301	BUG_ON(err < 0);
3302
3303	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_EXPIRE);
3304}
3305
3306static int xfrm_notify_policy(struct xfrm_policy *xp, int dir, const struct km_event *c)
3307{
3308	unsigned int len = nla_total_size(sizeof(struct xfrm_user_tmpl) * xp->xfrm_nr);
3309	struct net *net = xp_net(xp);
3310	struct xfrm_userpolicy_info *p;
3311	struct xfrm_userpolicy_id *id;
3312	struct nlmsghdr *nlh;
3313	struct sk_buff *skb;
3314	unsigned int headlen;
3315	int err;
3316
3317	headlen = sizeof(*p);
3318	if (c->event == XFRM_MSG_DELPOLICY) {
3319		len += nla_total_size(headlen);
3320		headlen = sizeof(*id);
3321	}
3322	len += userpolicy_type_attrsize();
3323	len += nla_total_size(sizeof(struct xfrm_mark));
3324	len += NLMSG_ALIGN(headlen);
3325
3326	skb = nlmsg_new(len, GFP_ATOMIC);
3327	if (skb == NULL)
3328		return -ENOMEM;
3329
3330	nlh = nlmsg_put(skb, c->portid, c->seq, c->event, headlen, 0);
3331	err = -EMSGSIZE;
3332	if (nlh == NULL)
3333		goto out_free_skb;
3334
3335	p = nlmsg_data(nlh);
3336	if (c->event == XFRM_MSG_DELPOLICY) {
3337		struct nlattr *attr;
3338
3339		id = nlmsg_data(nlh);
3340		memset(id, 0, sizeof(*id));
3341		id->dir = dir;
3342		if (c->data.byid)
3343			id->index = xp->index;
3344		else
3345			memcpy(&id->sel, &xp->selector, sizeof(id->sel));
3346
3347		attr = nla_reserve(skb, XFRMA_POLICY, sizeof(*p));
3348		err = -EMSGSIZE;
3349		if (attr == NULL)
3350			goto out_free_skb;
3351
3352		p = nla_data(attr);
3353	}
3354
3355	copy_to_user_policy(xp, p, dir);
3356	err = copy_to_user_tmpl(xp, skb);
3357	if (!err)
3358		err = copy_to_user_policy_type(xp->type, skb);
3359	if (!err)
3360		err = xfrm_mark_put(skb, &xp->mark);
3361	if (!err)
3362		err = xfrm_if_id_put(skb, xp->if_id);
3363	if (err)
3364		goto out_free_skb;
3365
3366	nlmsg_end(skb, nlh);
3367
3368	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY);
3369
3370out_free_skb:
3371	kfree_skb(skb);
3372	return err;
3373}
3374
3375static int xfrm_notify_policy_flush(const struct km_event *c)
3376{
3377	struct net *net = c->net;
3378	struct nlmsghdr *nlh;
3379	struct sk_buff *skb;
3380	int err;
3381
3382	skb = nlmsg_new(userpolicy_type_attrsize(), GFP_ATOMIC);
3383	if (skb == NULL)
3384		return -ENOMEM;
3385
3386	nlh = nlmsg_put(skb, c->portid, c->seq, XFRM_MSG_FLUSHPOLICY, 0, 0);
3387	err = -EMSGSIZE;
3388	if (nlh == NULL)
3389		goto out_free_skb;
3390	err = copy_to_user_policy_type(c->data.type, skb);
3391	if (err)
3392		goto out_free_skb;
3393
3394	nlmsg_end(skb, nlh);
3395
3396	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_POLICY);
3397
3398out_free_skb:
3399	kfree_skb(skb);
3400	return err;
3401}
3402
3403static int xfrm_send_policy_notify(struct xfrm_policy *xp, int dir, const struct km_event *c)
3404{
3405
3406	switch (c->event) {
3407	case XFRM_MSG_NEWPOLICY:
3408	case XFRM_MSG_UPDPOLICY:
3409	case XFRM_MSG_DELPOLICY:
3410		return xfrm_notify_policy(xp, dir, c);
3411	case XFRM_MSG_FLUSHPOLICY:
3412		return xfrm_notify_policy_flush(c);
3413	case XFRM_MSG_POLEXPIRE:
3414		return xfrm_exp_policy_notify(xp, dir, c);
3415	default:
3416		printk(KERN_NOTICE "xfrm_user: Unknown Policy event %d\n",
3417		       c->event);
3418	}
3419
3420	return 0;
3421
3422}
3423
3424static inline unsigned int xfrm_report_msgsize(void)
3425{
3426	return NLMSG_ALIGN(sizeof(struct xfrm_user_report));
3427}
3428
3429static int build_report(struct sk_buff *skb, u8 proto,
3430			struct xfrm_selector *sel, xfrm_address_t *addr)
3431{
3432	struct xfrm_user_report *ur;
3433	struct nlmsghdr *nlh;
3434
3435	nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_REPORT, sizeof(*ur), 0);
3436	if (nlh == NULL)
3437		return -EMSGSIZE;
3438
3439	ur = nlmsg_data(nlh);
3440	ur->proto = proto;
3441	memcpy(&ur->sel, sel, sizeof(ur->sel));
3442
3443	if (addr) {
3444		int err = nla_put(skb, XFRMA_COADDR, sizeof(*addr), addr);
3445		if (err) {
3446			nlmsg_cancel(skb, nlh);
3447			return err;
3448		}
3449	}
3450	nlmsg_end(skb, nlh);
3451	return 0;
3452}
3453
3454static int xfrm_send_report(struct net *net, u8 proto,
3455			    struct xfrm_selector *sel, xfrm_address_t *addr)
3456{
3457	struct sk_buff *skb;
3458	int err;
3459
3460	skb = nlmsg_new(xfrm_report_msgsize(), GFP_ATOMIC);
3461	if (skb == NULL)
3462		return -ENOMEM;
3463
3464	err = build_report(skb, proto, sel, addr);
3465	BUG_ON(err < 0);
3466
3467	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_REPORT);
3468}
3469
3470static inline unsigned int xfrm_mapping_msgsize(void)
3471{
3472	return NLMSG_ALIGN(sizeof(struct xfrm_user_mapping));
3473}
3474
3475static int build_mapping(struct sk_buff *skb, struct xfrm_state *x,
3476			 xfrm_address_t *new_saddr, __be16 new_sport)
3477{
3478	struct xfrm_user_mapping *um;
3479	struct nlmsghdr *nlh;
3480
3481	nlh = nlmsg_put(skb, 0, 0, XFRM_MSG_MAPPING, sizeof(*um), 0);
3482	if (nlh == NULL)
3483		return -EMSGSIZE;
3484
3485	um = nlmsg_data(nlh);
3486
3487	memcpy(&um->id.daddr, &x->id.daddr, sizeof(um->id.daddr));
3488	um->id.spi = x->id.spi;
3489	um->id.family = x->props.family;
3490	um->id.proto = x->id.proto;
3491	memcpy(&um->new_saddr, new_saddr, sizeof(um->new_saddr));
3492	memcpy(&um->old_saddr, &x->props.saddr, sizeof(um->old_saddr));
3493	um->new_sport = new_sport;
3494	um->old_sport = x->encap->encap_sport;
3495	um->reqid = x->props.reqid;
3496
3497	nlmsg_end(skb, nlh);
3498	return 0;
3499}
3500
3501static int xfrm_send_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
3502			     __be16 sport)
3503{
3504	struct net *net = xs_net(x);
3505	struct sk_buff *skb;
3506	int err;
3507
3508	if (x->id.proto != IPPROTO_ESP)
3509		return -EINVAL;
3510
3511	if (!x->encap)
3512		return -EINVAL;
3513
3514	skb = nlmsg_new(xfrm_mapping_msgsize(), GFP_ATOMIC);
3515	if (skb == NULL)
3516		return -ENOMEM;
3517
3518	err = build_mapping(skb, x, ipaddr, sport);
3519	BUG_ON(err < 0);
3520
3521	return xfrm_nlmsg_multicast(net, skb, 0, XFRMNLGRP_MAPPING);
3522}
3523
3524static bool xfrm_is_alive(const struct km_event *c)
3525{
3526	return (bool)xfrm_acquire_is_on(c->net);
3527}
3528
3529static struct xfrm_mgr netlink_mgr = {
3530	.notify		= xfrm_send_state_notify,
3531	.acquire	= xfrm_send_acquire,
3532	.compile_policy	= xfrm_compile_policy,
3533	.notify_policy	= xfrm_send_policy_notify,
3534	.report		= xfrm_send_report,
3535	.migrate	= xfrm_send_migrate,
3536	.new_mapping	= xfrm_send_mapping,
3537	.is_alive	= xfrm_is_alive,
3538};
3539
3540static int __net_init xfrm_user_net_init(struct net *net)
3541{
3542	struct sock *nlsk;
3543	struct netlink_kernel_cfg cfg = {
3544		.groups	= XFRMNLGRP_MAX,
3545		.input	= xfrm_netlink_rcv,
3546	};
3547
3548	nlsk = netlink_kernel_create(net, NETLINK_XFRM, &cfg);
3549	if (nlsk == NULL)
3550		return -ENOMEM;
3551	net->xfrm.nlsk_stash = nlsk; /* Don't set to NULL */
3552	rcu_assign_pointer(net->xfrm.nlsk, nlsk);
3553	return 0;
3554}
3555
3556static void __net_exit xfrm_user_net_exit(struct list_head *net_exit_list)
3557{
3558	struct net *net;
3559	list_for_each_entry(net, net_exit_list, exit_list)
3560		RCU_INIT_POINTER(net->xfrm.nlsk, NULL);
3561	synchronize_net();
3562	list_for_each_entry(net, net_exit_list, exit_list)
3563		netlink_kernel_release(net->xfrm.nlsk_stash);
3564}
3565
3566static struct pernet_operations xfrm_user_net_ops = {
3567	.init	    = xfrm_user_net_init,
3568	.exit_batch = xfrm_user_net_exit,
3569};
3570
3571static int __init xfrm_user_init(void)
3572{
3573	int rv;
3574
3575	printk(KERN_INFO "Initializing XFRM netlink socket\n");
3576
3577	rv = register_pernet_subsys(&xfrm_user_net_ops);
3578	if (rv < 0)
3579		return rv;
3580	rv = xfrm_register_km(&netlink_mgr);
3581	if (rv < 0)
3582		unregister_pernet_subsys(&xfrm_user_net_ops);
3583	return rv;
3584}
3585
3586static void __exit xfrm_user_exit(void)
3587{
3588	xfrm_unregister_km(&netlink_mgr);
3589	unregister_pernet_subsys(&xfrm_user_net_ops);
3590}
3591
3592module_init(xfrm_user_init);
3593module_exit(xfrm_user_exit);
3594MODULE_LICENSE("GPL");
3595MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_XFRM);
3596