xref: /kernel/linux/linux-5.10/net/sched/act_mirred.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * net/sched/act_mirred.c	packet mirroring and redirect actions
4 *
5 * Authors:	Jamal Hadi Salim (2002-4)
6 *
7 * TODO: Add ingress support (and socket redirect support)
8 */
9
10#include <linux/types.h>
11#include <linux/kernel.h>
12#include <linux/string.h>
13#include <linux/errno.h>
14#include <linux/skbuff.h>
15#include <linux/rtnetlink.h>
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/gfp.h>
19#include <linux/if_arp.h>
20#include <net/net_namespace.h>
21#include <net/netlink.h>
22#include <net/dst.h>
23#include <net/pkt_sched.h>
24#include <net/pkt_cls.h>
25#include <linux/tc_act/tc_mirred.h>
26#include <net/tc_act/tc_mirred.h>
27
28static LIST_HEAD(mirred_list);
29static DEFINE_SPINLOCK(mirred_list_lock);
30
31#define MIRRED_NEST_LIMIT    4
32static DEFINE_PER_CPU(unsigned int, mirred_nest_level);
33
34static bool tcf_mirred_is_act_redirect(int action)
35{
36	return action == TCA_EGRESS_REDIR || action == TCA_INGRESS_REDIR;
37}
38
39static bool tcf_mirred_act_wants_ingress(int action)
40{
41	switch (action) {
42	case TCA_EGRESS_REDIR:
43	case TCA_EGRESS_MIRROR:
44		return false;
45	case TCA_INGRESS_REDIR:
46	case TCA_INGRESS_MIRROR:
47		return true;
48	default:
49		BUG();
50	}
51}
52
53static bool tcf_mirred_can_reinsert(int action)
54{
55	switch (action) {
56	case TC_ACT_SHOT:
57	case TC_ACT_STOLEN:
58	case TC_ACT_QUEUED:
59	case TC_ACT_TRAP:
60		return true;
61	}
62	return false;
63}
64
65static struct net_device *tcf_mirred_dev_dereference(struct tcf_mirred *m)
66{
67	return rcu_dereference_protected(m->tcfm_dev,
68					 lockdep_is_held(&m->tcf_lock));
69}
70
71static void tcf_mirred_release(struct tc_action *a)
72{
73	struct tcf_mirred *m = to_mirred(a);
74	struct net_device *dev;
75
76	spin_lock(&mirred_list_lock);
77	list_del(&m->tcfm_list);
78	spin_unlock(&mirred_list_lock);
79
80	/* last reference to action, no need to lock */
81	dev = rcu_dereference_protected(m->tcfm_dev, 1);
82	if (dev)
83		dev_put(dev);
84}
85
86static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
87	[TCA_MIRRED_PARMS]	= { .len = sizeof(struct tc_mirred) },
88};
89
90static unsigned int mirred_net_id;
91static struct tc_action_ops act_mirred_ops;
92
93static int tcf_mirred_init(struct net *net, struct nlattr *nla,
94			   struct nlattr *est, struct tc_action **a,
95			   int ovr, int bind, bool rtnl_held,
96			   struct tcf_proto *tp,
97			   u32 flags, struct netlink_ext_ack *extack)
98{
99	struct tc_action_net *tn = net_generic(net, mirred_net_id);
100	struct nlattr *tb[TCA_MIRRED_MAX + 1];
101	struct tcf_chain *goto_ch = NULL;
102	bool mac_header_xmit = false;
103	struct tc_mirred *parm;
104	struct tcf_mirred *m;
105	struct net_device *dev;
106	bool exists = false;
107	int ret, err;
108	u32 index;
109
110	if (!nla) {
111		NL_SET_ERR_MSG_MOD(extack, "Mirred requires attributes to be passed");
112		return -EINVAL;
113	}
114	ret = nla_parse_nested_deprecated(tb, TCA_MIRRED_MAX, nla,
115					  mirred_policy, extack);
116	if (ret < 0)
117		return ret;
118	if (!tb[TCA_MIRRED_PARMS]) {
119		NL_SET_ERR_MSG_MOD(extack, "Missing required mirred parameters");
120		return -EINVAL;
121	}
122	parm = nla_data(tb[TCA_MIRRED_PARMS]);
123	index = parm->index;
124	err = tcf_idr_check_alloc(tn, &index, a, bind);
125	if (err < 0)
126		return err;
127	exists = err;
128	if (exists && bind)
129		return 0;
130
131	switch (parm->eaction) {
132	case TCA_EGRESS_MIRROR:
133	case TCA_EGRESS_REDIR:
134	case TCA_INGRESS_REDIR:
135	case TCA_INGRESS_MIRROR:
136		break;
137	default:
138		if (exists)
139			tcf_idr_release(*a, bind);
140		else
141			tcf_idr_cleanup(tn, index);
142		NL_SET_ERR_MSG_MOD(extack, "Unknown mirred option");
143		return -EINVAL;
144	}
145
146	if (!exists) {
147		if (!parm->ifindex) {
148			tcf_idr_cleanup(tn, index);
149			NL_SET_ERR_MSG_MOD(extack, "Specified device does not exist");
150			return -EINVAL;
151		}
152		ret = tcf_idr_create_from_flags(tn, index, est, a,
153						&act_mirred_ops, bind, flags);
154		if (ret) {
155			tcf_idr_cleanup(tn, index);
156			return ret;
157		}
158		ret = ACT_P_CREATED;
159	} else if (!ovr) {
160		tcf_idr_release(*a, bind);
161		return -EEXIST;
162	}
163
164	m = to_mirred(*a);
165	if (ret == ACT_P_CREATED)
166		INIT_LIST_HEAD(&m->tcfm_list);
167
168	err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
169	if (err < 0)
170		goto release_idr;
171
172	spin_lock_bh(&m->tcf_lock);
173
174	if (parm->ifindex) {
175		dev = dev_get_by_index(net, parm->ifindex);
176		if (!dev) {
177			spin_unlock_bh(&m->tcf_lock);
178			err = -ENODEV;
179			goto put_chain;
180		}
181		mac_header_xmit = dev_is_mac_header_xmit(dev);
182		dev = rcu_replace_pointer(m->tcfm_dev, dev,
183					  lockdep_is_held(&m->tcf_lock));
184		if (dev)
185			dev_put(dev);
186		m->tcfm_mac_header_xmit = mac_header_xmit;
187	}
188	goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
189	m->tcfm_eaction = parm->eaction;
190	spin_unlock_bh(&m->tcf_lock);
191	if (goto_ch)
192		tcf_chain_put_by_act(goto_ch);
193
194	if (ret == ACT_P_CREATED) {
195		spin_lock(&mirred_list_lock);
196		list_add(&m->tcfm_list, &mirred_list);
197		spin_unlock(&mirred_list_lock);
198	}
199
200	return ret;
201put_chain:
202	if (goto_ch)
203		tcf_chain_put_by_act(goto_ch);
204release_idr:
205	tcf_idr_release(*a, bind);
206	return err;
207}
208
209static bool is_mirred_nested(void)
210{
211	return unlikely(__this_cpu_read(mirred_nest_level) > 1);
212}
213
214static int tcf_mirred_forward(bool want_ingress, struct sk_buff *skb)
215{
216	int err;
217
218	if (!want_ingress)
219		err = dev_queue_xmit(skb);
220	else if (is_mirred_nested())
221		err = netif_rx(skb);
222	else
223		err = netif_receive_skb(skb);
224
225	return err;
226}
227
228static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
229			  struct tcf_result *res)
230{
231	struct tcf_mirred *m = to_mirred(a);
232	struct sk_buff *skb2 = skb;
233	bool m_mac_header_xmit;
234	struct net_device *dev;
235	unsigned int nest_level;
236	int retval, err = 0;
237	bool use_reinsert;
238	bool want_ingress;
239	bool is_redirect;
240	bool expects_nh;
241	bool at_ingress;
242	int m_eaction;
243	int mac_len;
244	bool at_nh;
245
246	nest_level = __this_cpu_inc_return(mirred_nest_level);
247	if (unlikely(nest_level > MIRRED_NEST_LIMIT)) {
248		net_warn_ratelimited("Packet exceeded mirred recursion limit on dev %s\n",
249				     netdev_name(skb->dev));
250		__this_cpu_dec(mirred_nest_level);
251		return TC_ACT_SHOT;
252	}
253
254	tcf_lastuse_update(&m->tcf_tm);
255	tcf_action_update_bstats(&m->common, skb);
256
257	m_mac_header_xmit = READ_ONCE(m->tcfm_mac_header_xmit);
258	m_eaction = READ_ONCE(m->tcfm_eaction);
259	retval = READ_ONCE(m->tcf_action);
260	dev = rcu_dereference_bh(m->tcfm_dev);
261	if (unlikely(!dev)) {
262		pr_notice_once("tc mirred: target device is gone\n");
263		goto out;
264	}
265
266	if (unlikely(!(dev->flags & IFF_UP)) || !netif_carrier_ok(dev)) {
267		net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
268				       dev->name);
269		goto out;
270	}
271
272	/* we could easily avoid the clone only if called by ingress and clsact;
273	 * since we can't easily detect the clsact caller, skip clone only for
274	 * ingress - that covers the TC S/W datapath.
275	 */
276	is_redirect = tcf_mirred_is_act_redirect(m_eaction);
277	at_ingress = skb_at_tc_ingress(skb);
278	use_reinsert = at_ingress && is_redirect &&
279		       tcf_mirred_can_reinsert(retval);
280	if (!use_reinsert) {
281		skb2 = skb_clone(skb, GFP_ATOMIC);
282		if (!skb2)
283			goto out;
284	}
285
286	want_ingress = tcf_mirred_act_wants_ingress(m_eaction);
287
288	/* All mirred/redirected skbs should clear previous ct info */
289	nf_reset_ct(skb2);
290	if (want_ingress && !at_ingress) /* drop dst for egress -> ingress */
291		skb_dst_drop(skb2);
292
293	expects_nh = want_ingress || !m_mac_header_xmit;
294	at_nh = skb->data == skb_network_header(skb);
295	if (at_nh != expects_nh) {
296		mac_len = skb_at_tc_ingress(skb) ? skb->mac_len :
297			  skb_network_header(skb) - skb_mac_header(skb);
298		if (expects_nh) {
299			/* target device/action expect data at nh */
300			skb_pull_rcsum(skb2, mac_len);
301		} else {
302			/* target device/action expect data at mac */
303			skb_push_rcsum(skb2, mac_len);
304		}
305	}
306
307	skb2->skb_iif = skb->dev->ifindex;
308	skb2->dev = dev;
309
310	/* mirror is always swallowed */
311	if (is_redirect) {
312		skb_set_redirected(skb2, skb2->tc_at_ingress);
313
314		/* let's the caller reinsert the packet, if possible */
315		if (use_reinsert) {
316			res->ingress = want_ingress;
317			err = tcf_mirred_forward(res->ingress, skb);
318			if (err)
319				tcf_action_inc_overlimit_qstats(&m->common);
320			__this_cpu_dec(mirred_nest_level);
321			return TC_ACT_CONSUMED;
322		}
323	}
324
325	err = tcf_mirred_forward(want_ingress, skb2);
326	if (err) {
327out:
328		tcf_action_inc_overlimit_qstats(&m->common);
329		if (tcf_mirred_is_act_redirect(m_eaction))
330			retval = TC_ACT_SHOT;
331	}
332	__this_cpu_dec(mirred_nest_level);
333
334	return retval;
335}
336
337static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets,
338			     u64 drops, u64 lastuse, bool hw)
339{
340	struct tcf_mirred *m = to_mirred(a);
341	struct tcf_t *tm = &m->tcf_tm;
342
343	tcf_action_update_stats(a, bytes, packets, drops, hw);
344	tm->lastuse = max_t(u64, tm->lastuse, lastuse);
345}
346
347static int tcf_mirred_dump(struct sk_buff *skb, struct tc_action *a, int bind,
348			   int ref)
349{
350	unsigned char *b = skb_tail_pointer(skb);
351	struct tcf_mirred *m = to_mirred(a);
352	struct tc_mirred opt = {
353		.index   = m->tcf_index,
354		.refcnt  = refcount_read(&m->tcf_refcnt) - ref,
355		.bindcnt = atomic_read(&m->tcf_bindcnt) - bind,
356	};
357	struct net_device *dev;
358	struct tcf_t t;
359
360	spin_lock_bh(&m->tcf_lock);
361	opt.action = m->tcf_action;
362	opt.eaction = m->tcfm_eaction;
363	dev = tcf_mirred_dev_dereference(m);
364	if (dev)
365		opt.ifindex = dev->ifindex;
366
367	if (nla_put(skb, TCA_MIRRED_PARMS, sizeof(opt), &opt))
368		goto nla_put_failure;
369
370	tcf_tm_dump(&t, &m->tcf_tm);
371	if (nla_put_64bit(skb, TCA_MIRRED_TM, sizeof(t), &t, TCA_MIRRED_PAD))
372		goto nla_put_failure;
373	spin_unlock_bh(&m->tcf_lock);
374
375	return skb->len;
376
377nla_put_failure:
378	spin_unlock_bh(&m->tcf_lock);
379	nlmsg_trim(skb, b);
380	return -1;
381}
382
383static int tcf_mirred_walker(struct net *net, struct sk_buff *skb,
384			     struct netlink_callback *cb, int type,
385			     const struct tc_action_ops *ops,
386			     struct netlink_ext_ack *extack)
387{
388	struct tc_action_net *tn = net_generic(net, mirred_net_id);
389
390	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
391}
392
393static int tcf_mirred_search(struct net *net, struct tc_action **a, u32 index)
394{
395	struct tc_action_net *tn = net_generic(net, mirred_net_id);
396
397	return tcf_idr_search(tn, a, index);
398}
399
400static int mirred_device_event(struct notifier_block *unused,
401			       unsigned long event, void *ptr)
402{
403	struct net_device *dev = netdev_notifier_info_to_dev(ptr);
404	struct tcf_mirred *m;
405
406	ASSERT_RTNL();
407	if (event == NETDEV_UNREGISTER) {
408		spin_lock(&mirred_list_lock);
409		list_for_each_entry(m, &mirred_list, tcfm_list) {
410			spin_lock_bh(&m->tcf_lock);
411			if (tcf_mirred_dev_dereference(m) == dev) {
412				dev_put(dev);
413				/* Note : no rcu grace period necessary, as
414				 * net_device are already rcu protected.
415				 */
416				RCU_INIT_POINTER(m->tcfm_dev, NULL);
417			}
418			spin_unlock_bh(&m->tcf_lock);
419		}
420		spin_unlock(&mirred_list_lock);
421	}
422
423	return NOTIFY_DONE;
424}
425
426static struct notifier_block mirred_device_notifier = {
427	.notifier_call = mirred_device_event,
428};
429
430static void tcf_mirred_dev_put(void *priv)
431{
432	struct net_device *dev = priv;
433
434	dev_put(dev);
435}
436
437static struct net_device *
438tcf_mirred_get_dev(const struct tc_action *a,
439		   tc_action_priv_destructor *destructor)
440{
441	struct tcf_mirred *m = to_mirred(a);
442	struct net_device *dev;
443
444	rcu_read_lock();
445	dev = rcu_dereference(m->tcfm_dev);
446	if (dev) {
447		dev_hold(dev);
448		*destructor = tcf_mirred_dev_put;
449	}
450	rcu_read_unlock();
451
452	return dev;
453}
454
455static size_t tcf_mirred_get_fill_size(const struct tc_action *act)
456{
457	return nla_total_size(sizeof(struct tc_mirred));
458}
459
460static struct tc_action_ops act_mirred_ops = {
461	.kind		=	"mirred",
462	.id		=	TCA_ID_MIRRED,
463	.owner		=	THIS_MODULE,
464	.act		=	tcf_mirred_act,
465	.stats_update	=	tcf_stats_update,
466	.dump		=	tcf_mirred_dump,
467	.cleanup	=	tcf_mirred_release,
468	.init		=	tcf_mirred_init,
469	.walk		=	tcf_mirred_walker,
470	.lookup		=	tcf_mirred_search,
471	.get_fill_size	=	tcf_mirred_get_fill_size,
472	.size		=	sizeof(struct tcf_mirred),
473	.get_dev	=	tcf_mirred_get_dev,
474};
475
476static __net_init int mirred_init_net(struct net *net)
477{
478	struct tc_action_net *tn = net_generic(net, mirred_net_id);
479
480	return tc_action_net_init(net, tn, &act_mirred_ops);
481}
482
483static void __net_exit mirred_exit_net(struct list_head *net_list)
484{
485	tc_action_net_exit(net_list, mirred_net_id);
486}
487
488static struct pernet_operations mirred_net_ops = {
489	.init = mirred_init_net,
490	.exit_batch = mirred_exit_net,
491	.id   = &mirred_net_id,
492	.size = sizeof(struct tc_action_net),
493};
494
495MODULE_AUTHOR("Jamal Hadi Salim(2002)");
496MODULE_DESCRIPTION("Device Mirror/redirect actions");
497MODULE_LICENSE("GPL");
498
499static int __init mirred_init_module(void)
500{
501	int err = register_netdevice_notifier(&mirred_device_notifier);
502	if (err)
503		return err;
504
505	pr_info("Mirror/redirect action on\n");
506	err = tcf_register_action(&act_mirred_ops, &mirred_net_ops);
507	if (err)
508		unregister_netdevice_notifier(&mirred_device_notifier);
509
510	return err;
511}
512
513static void __exit mirred_cleanup_module(void)
514{
515	tcf_unregister_action(&act_mirred_ops, &mirred_net_ops);
516	unregister_netdevice_notifier(&mirred_device_notifier);
517}
518
519module_init(mirred_init_module);
520module_exit(mirred_cleanup_module);
521