xref: /kernel/linux/linux-5.10/net/sched/act_ct.c (revision 8c2ecf20)
1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2/* -
3 * net/sched/act_ct.c  Connection Tracking action
4 *
5 * Authors:   Paul Blakey <paulb@mellanox.com>
6 *            Yossi Kuperman <yossiku@mellanox.com>
7 *            Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
8 */
9
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/skbuff.h>
14#include <linux/rtnetlink.h>
15#include <linux/pkt_cls.h>
16#include <linux/ip.h>
17#include <linux/ipv6.h>
18#include <linux/rhashtable.h>
19#include <net/netlink.h>
20#include <net/pkt_sched.h>
21#include <net/pkt_cls.h>
22#include <net/act_api.h>
23#include <net/ip.h>
24#include <net/ipv6_frag.h>
25#include <uapi/linux/tc_act/tc_ct.h>
26#include <net/tc_act/tc_ct.h>
27
28#include <net/netfilter/nf_flow_table.h>
29#include <net/netfilter/nf_conntrack.h>
30#include <net/netfilter/nf_conntrack_core.h>
31#include <net/netfilter/nf_conntrack_zones.h>
32#include <net/netfilter/nf_conntrack_helper.h>
33#include <net/netfilter/nf_conntrack_acct.h>
34#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
35#include <uapi/linux/netfilter/nf_nat.h>
36
37static struct workqueue_struct *act_ct_wq;
38static struct rhashtable zones_ht;
39static DEFINE_MUTEX(zones_mutex);
40
41struct tcf_ct_flow_table {
42	struct rhash_head node; /* In zones tables */
43
44	struct rcu_work rwork;
45	struct nf_flowtable nf_ft;
46	refcount_t ref;
47	u16 zone;
48
49	bool dying;
50};
51
52static const struct rhashtable_params zones_params = {
53	.head_offset = offsetof(struct tcf_ct_flow_table, node),
54	.key_offset = offsetof(struct tcf_ct_flow_table, zone),
55	.key_len = sizeof_field(struct tcf_ct_flow_table, zone),
56	.automatic_shrinking = true,
57};
58
59static struct flow_action_entry *
60tcf_ct_flow_table_flow_action_get_next(struct flow_action *flow_action)
61{
62	int i = flow_action->num_entries++;
63
64	return &flow_action->entries[i];
65}
66
67static void tcf_ct_add_mangle_action(struct flow_action *action,
68				     enum flow_action_mangle_base htype,
69				     u32 offset,
70				     u32 mask,
71				     u32 val)
72{
73	struct flow_action_entry *entry;
74
75	entry = tcf_ct_flow_table_flow_action_get_next(action);
76	entry->id = FLOW_ACTION_MANGLE;
77	entry->mangle.htype = htype;
78	entry->mangle.mask = ~mask;
79	entry->mangle.offset = offset;
80	entry->mangle.val = val;
81}
82
83/* The following nat helper functions check if the inverted reverse tuple
84 * (target) is different then the current dir tuple - meaning nat for ports
85 * and/or ip is needed, and add the relevant mangle actions.
86 */
87static void
88tcf_ct_flow_table_add_action_nat_ipv4(const struct nf_conntrack_tuple *tuple,
89				      struct nf_conntrack_tuple target,
90				      struct flow_action *action)
91{
92	if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
93		tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
94					 offsetof(struct iphdr, saddr),
95					 0xFFFFFFFF,
96					 be32_to_cpu(target.src.u3.ip));
97	if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
98		tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP4,
99					 offsetof(struct iphdr, daddr),
100					 0xFFFFFFFF,
101					 be32_to_cpu(target.dst.u3.ip));
102}
103
104static void
105tcf_ct_add_ipv6_addr_mangle_action(struct flow_action *action,
106				   union nf_inet_addr *addr,
107				   u32 offset)
108{
109	int i;
110
111	for (i = 0; i < sizeof(struct in6_addr) / sizeof(u32); i++)
112		tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_IP6,
113					 i * sizeof(u32) + offset,
114					 0xFFFFFFFF, be32_to_cpu(addr->ip6[i]));
115}
116
117static void
118tcf_ct_flow_table_add_action_nat_ipv6(const struct nf_conntrack_tuple *tuple,
119				      struct nf_conntrack_tuple target,
120				      struct flow_action *action)
121{
122	if (memcmp(&target.src.u3, &tuple->src.u3, sizeof(target.src.u3)))
123		tcf_ct_add_ipv6_addr_mangle_action(action, &target.src.u3,
124						   offsetof(struct ipv6hdr,
125							    saddr));
126	if (memcmp(&target.dst.u3, &tuple->dst.u3, sizeof(target.dst.u3)))
127		tcf_ct_add_ipv6_addr_mangle_action(action, &target.dst.u3,
128						   offsetof(struct ipv6hdr,
129							    daddr));
130}
131
132static void
133tcf_ct_flow_table_add_action_nat_tcp(const struct nf_conntrack_tuple *tuple,
134				     struct nf_conntrack_tuple target,
135				     struct flow_action *action)
136{
137	__be16 target_src = target.src.u.tcp.port;
138	__be16 target_dst = target.dst.u.tcp.port;
139
140	if (target_src != tuple->src.u.tcp.port)
141		tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
142					 offsetof(struct tcphdr, source),
143					 0xFFFF, be16_to_cpu(target_src));
144	if (target_dst != tuple->dst.u.tcp.port)
145		tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_TCP,
146					 offsetof(struct tcphdr, dest),
147					 0xFFFF, be16_to_cpu(target_dst));
148}
149
150static void
151tcf_ct_flow_table_add_action_nat_udp(const struct nf_conntrack_tuple *tuple,
152				     struct nf_conntrack_tuple target,
153				     struct flow_action *action)
154{
155	__be16 target_src = target.src.u.udp.port;
156	__be16 target_dst = target.dst.u.udp.port;
157
158	if (target_src != tuple->src.u.udp.port)
159		tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
160					 offsetof(struct udphdr, source),
161					 0xFFFF, be16_to_cpu(target_src));
162	if (target_dst != tuple->dst.u.udp.port)
163		tcf_ct_add_mangle_action(action, FLOW_ACT_MANGLE_HDR_TYPE_UDP,
164					 offsetof(struct udphdr, dest),
165					 0xFFFF, be16_to_cpu(target_dst));
166}
167
168static void tcf_ct_flow_table_add_action_meta(struct nf_conn *ct,
169					      enum ip_conntrack_dir dir,
170					      struct flow_action *action)
171{
172	struct nf_conn_labels *ct_labels;
173	struct flow_action_entry *entry;
174	enum ip_conntrack_info ctinfo;
175	u32 *act_ct_labels;
176
177	entry = tcf_ct_flow_table_flow_action_get_next(action);
178	entry->id = FLOW_ACTION_CT_METADATA;
179#if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
180	entry->ct_metadata.mark = READ_ONCE(ct->mark);
181#endif
182	ctinfo = dir == IP_CT_DIR_ORIGINAL ? IP_CT_ESTABLISHED :
183					     IP_CT_ESTABLISHED_REPLY;
184	/* aligns with the CT reference on the SKB nf_ct_set */
185	entry->ct_metadata.cookie = (unsigned long)ct | ctinfo;
186
187	act_ct_labels = entry->ct_metadata.labels;
188	ct_labels = nf_ct_labels_find(ct);
189	if (ct_labels)
190		memcpy(act_ct_labels, ct_labels->bits, NF_CT_LABELS_MAX_SIZE);
191	else
192		memset(act_ct_labels, 0, NF_CT_LABELS_MAX_SIZE);
193}
194
195static int tcf_ct_flow_table_add_action_nat(struct net *net,
196					    struct nf_conn *ct,
197					    enum ip_conntrack_dir dir,
198					    struct flow_action *action)
199{
200	const struct nf_conntrack_tuple *tuple = &ct->tuplehash[dir].tuple;
201	struct nf_conntrack_tuple target;
202
203	if (!(ct->status & IPS_NAT_MASK))
204		return 0;
205
206	nf_ct_invert_tuple(&target, &ct->tuplehash[!dir].tuple);
207
208	switch (tuple->src.l3num) {
209	case NFPROTO_IPV4:
210		tcf_ct_flow_table_add_action_nat_ipv4(tuple, target,
211						      action);
212		break;
213	case NFPROTO_IPV6:
214		tcf_ct_flow_table_add_action_nat_ipv6(tuple, target,
215						      action);
216		break;
217	default:
218		return -EOPNOTSUPP;
219	}
220
221	switch (nf_ct_protonum(ct)) {
222	case IPPROTO_TCP:
223		tcf_ct_flow_table_add_action_nat_tcp(tuple, target, action);
224		break;
225	case IPPROTO_UDP:
226		tcf_ct_flow_table_add_action_nat_udp(tuple, target, action);
227		break;
228	default:
229		return -EOPNOTSUPP;
230	}
231
232	return 0;
233}
234
235static int tcf_ct_flow_table_fill_actions(struct net *net,
236					  const struct flow_offload *flow,
237					  enum flow_offload_tuple_dir tdir,
238					  struct nf_flow_rule *flow_rule)
239{
240	struct flow_action *action = &flow_rule->rule->action;
241	int num_entries = action->num_entries;
242	struct nf_conn *ct = flow->ct;
243	enum ip_conntrack_dir dir;
244	int i, err;
245
246	switch (tdir) {
247	case FLOW_OFFLOAD_DIR_ORIGINAL:
248		dir = IP_CT_DIR_ORIGINAL;
249		break;
250	case FLOW_OFFLOAD_DIR_REPLY:
251		dir = IP_CT_DIR_REPLY;
252		break;
253	default:
254		return -EOPNOTSUPP;
255	}
256
257	err = tcf_ct_flow_table_add_action_nat(net, ct, dir, action);
258	if (err)
259		goto err_nat;
260
261	tcf_ct_flow_table_add_action_meta(ct, dir, action);
262	return 0;
263
264err_nat:
265	/* Clear filled actions */
266	for (i = num_entries; i < action->num_entries; i++)
267		memset(&action->entries[i], 0, sizeof(action->entries[i]));
268	action->num_entries = num_entries;
269
270	return err;
271}
272
273static struct nf_flowtable_type flowtable_ct = {
274	.action		= tcf_ct_flow_table_fill_actions,
275	.owner		= THIS_MODULE,
276};
277
278static int tcf_ct_flow_table_get(struct tcf_ct_params *params)
279{
280	struct tcf_ct_flow_table *ct_ft;
281	int err = -ENOMEM;
282
283	mutex_lock(&zones_mutex);
284	ct_ft = rhashtable_lookup_fast(&zones_ht, &params->zone, zones_params);
285	if (ct_ft && refcount_inc_not_zero(&ct_ft->ref))
286		goto out_unlock;
287
288	ct_ft = kzalloc(sizeof(*ct_ft), GFP_KERNEL);
289	if (!ct_ft)
290		goto err_alloc;
291	refcount_set(&ct_ft->ref, 1);
292
293	ct_ft->zone = params->zone;
294	err = rhashtable_insert_fast(&zones_ht, &ct_ft->node, zones_params);
295	if (err)
296		goto err_insert;
297
298	ct_ft->nf_ft.type = &flowtable_ct;
299	ct_ft->nf_ft.flags |= NF_FLOWTABLE_HW_OFFLOAD;
300	err = nf_flow_table_init(&ct_ft->nf_ft);
301	if (err)
302		goto err_init;
303
304	__module_get(THIS_MODULE);
305out_unlock:
306	params->ct_ft = ct_ft;
307	params->nf_ft = &ct_ft->nf_ft;
308	mutex_unlock(&zones_mutex);
309
310	return 0;
311
312err_init:
313	rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
314err_insert:
315	kfree(ct_ft);
316err_alloc:
317	mutex_unlock(&zones_mutex);
318	return err;
319}
320
321static void tcf_ct_flow_table_cleanup_work(struct work_struct *work)
322{
323	struct flow_block_cb *block_cb, *tmp_cb;
324	struct tcf_ct_flow_table *ct_ft;
325	struct flow_block *block;
326
327	ct_ft = container_of(to_rcu_work(work), struct tcf_ct_flow_table,
328			     rwork);
329	nf_flow_table_free(&ct_ft->nf_ft);
330
331	/* Remove any remaining callbacks before cleanup */
332	block = &ct_ft->nf_ft.flow_block;
333	down_write(&ct_ft->nf_ft.flow_block_lock);
334	list_for_each_entry_safe(block_cb, tmp_cb, &block->cb_list, list) {
335		list_del(&block_cb->list);
336		flow_block_cb_free(block_cb);
337	}
338	up_write(&ct_ft->nf_ft.flow_block_lock);
339	kfree(ct_ft);
340
341	module_put(THIS_MODULE);
342}
343
344static void tcf_ct_flow_table_put(struct tcf_ct_params *params)
345{
346	struct tcf_ct_flow_table *ct_ft = params->ct_ft;
347
348	if (refcount_dec_and_test(&params->ct_ft->ref)) {
349		rhashtable_remove_fast(&zones_ht, &ct_ft->node, zones_params);
350		INIT_RCU_WORK(&ct_ft->rwork, tcf_ct_flow_table_cleanup_work);
351		queue_rcu_work(act_ct_wq, &ct_ft->rwork);
352	}
353}
354
355static void tcf_ct_flow_table_add(struct tcf_ct_flow_table *ct_ft,
356				  struct nf_conn *ct,
357				  bool tcp)
358{
359	struct flow_offload *entry;
360	int err;
361
362	if (test_and_set_bit(IPS_OFFLOAD_BIT, &ct->status))
363		return;
364
365	entry = flow_offload_alloc(ct);
366	if (!entry) {
367		WARN_ON_ONCE(1);
368		goto err_alloc;
369	}
370
371	if (tcp) {
372		ct->proto.tcp.seen[0].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
373		ct->proto.tcp.seen[1].flags |= IP_CT_TCP_FLAG_BE_LIBERAL;
374	}
375
376	err = flow_offload_add(&ct_ft->nf_ft, entry);
377	if (err)
378		goto err_add;
379
380	return;
381
382err_add:
383	flow_offload_free(entry);
384err_alloc:
385	clear_bit(IPS_OFFLOAD_BIT, &ct->status);
386}
387
388static void tcf_ct_flow_table_process_conn(struct tcf_ct_flow_table *ct_ft,
389					   struct nf_conn *ct,
390					   enum ip_conntrack_info ctinfo)
391{
392	bool tcp = false;
393
394	if (ctinfo != IP_CT_ESTABLISHED && ctinfo != IP_CT_ESTABLISHED_REPLY)
395		return;
396
397	switch (nf_ct_protonum(ct)) {
398	case IPPROTO_TCP:
399		tcp = true;
400		if (ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED)
401			return;
402		break;
403	case IPPROTO_UDP:
404		break;
405	default:
406		return;
407	}
408
409	if (nf_ct_ext_exist(ct, NF_CT_EXT_HELPER) ||
410	    ct->status & IPS_SEQ_ADJUST)
411		return;
412
413	tcf_ct_flow_table_add(ct_ft, ct, tcp);
414}
415
416static bool
417tcf_ct_flow_table_fill_tuple_ipv4(struct sk_buff *skb,
418				  struct flow_offload_tuple *tuple,
419				  struct tcphdr **tcph)
420{
421	struct flow_ports *ports;
422	unsigned int thoff;
423	struct iphdr *iph;
424
425	if (!pskb_network_may_pull(skb, sizeof(*iph)))
426		return false;
427
428	iph = ip_hdr(skb);
429	thoff = iph->ihl * 4;
430
431	if (ip_is_fragment(iph) ||
432	    unlikely(thoff != sizeof(struct iphdr)))
433		return false;
434
435	if (iph->protocol != IPPROTO_TCP &&
436	    iph->protocol != IPPROTO_UDP)
437		return false;
438
439	if (iph->ttl <= 1)
440		return false;
441
442	if (!pskb_network_may_pull(skb, iph->protocol == IPPROTO_TCP ?
443					thoff + sizeof(struct tcphdr) :
444					thoff + sizeof(*ports)))
445		return false;
446
447	iph = ip_hdr(skb);
448	if (iph->protocol == IPPROTO_TCP)
449		*tcph = (void *)(skb_network_header(skb) + thoff);
450
451	ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
452	tuple->src_v4.s_addr = iph->saddr;
453	tuple->dst_v4.s_addr = iph->daddr;
454	tuple->src_port = ports->source;
455	tuple->dst_port = ports->dest;
456	tuple->l3proto = AF_INET;
457	tuple->l4proto = iph->protocol;
458
459	return true;
460}
461
462static bool
463tcf_ct_flow_table_fill_tuple_ipv6(struct sk_buff *skb,
464				  struct flow_offload_tuple *tuple,
465				  struct tcphdr **tcph)
466{
467	struct flow_ports *ports;
468	struct ipv6hdr *ip6h;
469	unsigned int thoff;
470
471	if (!pskb_network_may_pull(skb, sizeof(*ip6h)))
472		return false;
473
474	ip6h = ipv6_hdr(skb);
475
476	if (ip6h->nexthdr != IPPROTO_TCP &&
477	    ip6h->nexthdr != IPPROTO_UDP)
478		return false;
479
480	if (ip6h->hop_limit <= 1)
481		return false;
482
483	thoff = sizeof(*ip6h);
484	if (!pskb_network_may_pull(skb, ip6h->nexthdr == IPPROTO_TCP ?
485					thoff + sizeof(struct tcphdr) :
486					thoff + sizeof(*ports)))
487		return false;
488
489	ip6h = ipv6_hdr(skb);
490	if (ip6h->nexthdr == IPPROTO_TCP)
491		*tcph = (void *)(skb_network_header(skb) + thoff);
492
493	ports = (struct flow_ports *)(skb_network_header(skb) + thoff);
494	tuple->src_v6 = ip6h->saddr;
495	tuple->dst_v6 = ip6h->daddr;
496	tuple->src_port = ports->source;
497	tuple->dst_port = ports->dest;
498	tuple->l3proto = AF_INET6;
499	tuple->l4proto = ip6h->nexthdr;
500
501	return true;
502}
503
504static bool tcf_ct_flow_table_lookup(struct tcf_ct_params *p,
505				     struct sk_buff *skb,
506				     u8 family)
507{
508	struct nf_flowtable *nf_ft = &p->ct_ft->nf_ft;
509	struct flow_offload_tuple_rhash *tuplehash;
510	struct flow_offload_tuple tuple = {};
511	enum ip_conntrack_info ctinfo;
512	struct tcphdr *tcph = NULL;
513	struct flow_offload *flow;
514	struct nf_conn *ct;
515	u8 dir;
516
517	switch (family) {
518	case NFPROTO_IPV4:
519		if (!tcf_ct_flow_table_fill_tuple_ipv4(skb, &tuple, &tcph))
520			return false;
521		break;
522	case NFPROTO_IPV6:
523		if (!tcf_ct_flow_table_fill_tuple_ipv6(skb, &tuple, &tcph))
524			return false;
525		break;
526	default:
527		return false;
528	}
529
530	tuplehash = flow_offload_lookup(nf_ft, &tuple);
531	if (!tuplehash)
532		return false;
533
534	dir = tuplehash->tuple.dir;
535	flow = container_of(tuplehash, struct flow_offload, tuplehash[dir]);
536	ct = flow->ct;
537
538	if (tcph && (unlikely(tcph->fin || tcph->rst))) {
539		flow_offload_teardown(flow);
540		return false;
541	}
542
543	ctinfo = dir == FLOW_OFFLOAD_DIR_ORIGINAL ? IP_CT_ESTABLISHED :
544						    IP_CT_ESTABLISHED_REPLY;
545
546	flow_offload_refresh(nf_ft, flow);
547	nf_conntrack_get(&ct->ct_general);
548	nf_ct_set(skb, ct, ctinfo);
549	nf_ct_acct_update(ct, dir, skb->len);
550
551	return true;
552}
553
554static int tcf_ct_flow_tables_init(void)
555{
556	return rhashtable_init(&zones_ht, &zones_params);
557}
558
559static void tcf_ct_flow_tables_uninit(void)
560{
561	rhashtable_destroy(&zones_ht);
562}
563
564static struct tc_action_ops act_ct_ops;
565static unsigned int ct_net_id;
566
567struct tc_ct_action_net {
568	struct tc_action_net tn; /* Must be first */
569	bool labels;
570};
571
572/* Determine whether skb->_nfct is equal to the result of conntrack lookup. */
573static bool tcf_ct_skb_nfct_cached(struct net *net, struct sk_buff *skb,
574				   u16 zone_id, bool force)
575{
576	enum ip_conntrack_info ctinfo;
577	struct nf_conn *ct;
578
579	ct = nf_ct_get(skb, &ctinfo);
580	if (!ct)
581		return false;
582	if (!net_eq(net, read_pnet(&ct->ct_net)))
583		return false;
584	if (nf_ct_zone(ct)->id != zone_id)
585		return false;
586
587	/* Force conntrack entry direction. */
588	if (force && CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL) {
589		if (nf_ct_is_confirmed(ct))
590			nf_ct_kill(ct);
591
592		nf_conntrack_put(&ct->ct_general);
593		nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
594
595		return false;
596	}
597
598	return true;
599}
600
601/* Trim the skb to the length specified by the IP/IPv6 header,
602 * removing any trailing lower-layer padding. This prepares the skb
603 * for higher-layer processing that assumes skb->len excludes padding
604 * (such as nf_ip_checksum). The caller needs to pull the skb to the
605 * network header, and ensure ip_hdr/ipv6_hdr points to valid data.
606 */
607static int tcf_ct_skb_network_trim(struct sk_buff *skb, int family)
608{
609	unsigned int len;
610	int err;
611
612	switch (family) {
613	case NFPROTO_IPV4:
614		len = ntohs(ip_hdr(skb)->tot_len);
615		break;
616	case NFPROTO_IPV6:
617		len = sizeof(struct ipv6hdr)
618			+ ntohs(ipv6_hdr(skb)->payload_len);
619		break;
620	default:
621		len = skb->len;
622	}
623
624	err = pskb_trim_rcsum(skb, len);
625
626	return err;
627}
628
629static u8 tcf_ct_skb_nf_family(struct sk_buff *skb)
630{
631	u8 family = NFPROTO_UNSPEC;
632
633	switch (skb_protocol(skb, true)) {
634	case htons(ETH_P_IP):
635		family = NFPROTO_IPV4;
636		break;
637	case htons(ETH_P_IPV6):
638		family = NFPROTO_IPV6;
639		break;
640	default:
641		break;
642	}
643
644	return family;
645}
646
647static int tcf_ct_ipv4_is_fragment(struct sk_buff *skb, bool *frag)
648{
649	unsigned int len;
650
651	len =  skb_network_offset(skb) + sizeof(struct iphdr);
652	if (unlikely(skb->len < len))
653		return -EINVAL;
654	if (unlikely(!pskb_may_pull(skb, len)))
655		return -ENOMEM;
656
657	*frag = ip_is_fragment(ip_hdr(skb));
658	return 0;
659}
660
661static int tcf_ct_ipv6_is_fragment(struct sk_buff *skb, bool *frag)
662{
663	unsigned int flags = 0, len, payload_ofs = 0;
664	unsigned short frag_off;
665	int nexthdr;
666
667	len =  skb_network_offset(skb) + sizeof(struct ipv6hdr);
668	if (unlikely(skb->len < len))
669		return -EINVAL;
670	if (unlikely(!pskb_may_pull(skb, len)))
671		return -ENOMEM;
672
673	nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags);
674	if (unlikely(nexthdr < 0))
675		return -EPROTO;
676
677	*frag = flags & IP6_FH_F_FRAG;
678	return 0;
679}
680
681static int tcf_ct_handle_fragments(struct net *net, struct sk_buff *skb,
682				   u8 family, u16 zone, bool *defrag)
683{
684	enum ip_conntrack_info ctinfo;
685	struct qdisc_skb_cb cb;
686	struct nf_conn *ct;
687	int err = 0;
688	bool frag;
689
690	/* Previously seen (loopback)? Ignore. */
691	ct = nf_ct_get(skb, &ctinfo);
692	if ((ct && !nf_ct_is_template(ct)) || ctinfo == IP_CT_UNTRACKED)
693		return 0;
694
695	if (family == NFPROTO_IPV4)
696		err = tcf_ct_ipv4_is_fragment(skb, &frag);
697	else
698		err = tcf_ct_ipv6_is_fragment(skb, &frag);
699	if (err || !frag)
700		return err;
701
702	skb_get(skb);
703	cb = *qdisc_skb_cb(skb);
704
705	if (family == NFPROTO_IPV4) {
706		enum ip_defrag_users user = IP_DEFRAG_CONNTRACK_IN + zone;
707
708		memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
709		local_bh_disable();
710		err = ip_defrag(net, skb, user);
711		local_bh_enable();
712		if (err && err != -EINPROGRESS)
713			return err;
714
715		if (!err) {
716			*defrag = true;
717			cb.mru = IPCB(skb)->frag_max_size;
718		}
719	} else { /* NFPROTO_IPV6 */
720#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
721		enum ip6_defrag_users user = IP6_DEFRAG_CONNTRACK_IN + zone;
722
723		memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
724		err = nf_ct_frag6_gather(net, skb, user);
725		if (err && err != -EINPROGRESS)
726			goto out_free;
727
728		if (!err) {
729			*defrag = true;
730			cb.mru = IP6CB(skb)->frag_max_size;
731		}
732#else
733		err = -EOPNOTSUPP;
734		goto out_free;
735#endif
736	}
737
738	*qdisc_skb_cb(skb) = cb;
739	skb_clear_hash(skb);
740	skb->ignore_df = 1;
741	return err;
742
743out_free:
744	kfree_skb(skb);
745	return err;
746}
747
748static void tcf_ct_params_free(struct rcu_head *head)
749{
750	struct tcf_ct_params *params = container_of(head,
751						    struct tcf_ct_params, rcu);
752
753	tcf_ct_flow_table_put(params);
754
755	if (params->tmpl)
756		nf_conntrack_put(&params->tmpl->ct_general);
757	kfree(params);
758}
759
760#if IS_ENABLED(CONFIG_NF_NAT)
761/* Modelled after nf_nat_ipv[46]_fn().
762 * range is only used for new, uninitialized NAT state.
763 * Returns either NF_ACCEPT or NF_DROP.
764 */
765static int ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
766			  enum ip_conntrack_info ctinfo,
767			  const struct nf_nat_range2 *range,
768			  enum nf_nat_manip_type maniptype)
769{
770	__be16 proto = skb_protocol(skb, true);
771	int hooknum, err = NF_ACCEPT;
772
773	/* See HOOK2MANIP(). */
774	if (maniptype == NF_NAT_MANIP_SRC)
775		hooknum = NF_INET_LOCAL_IN; /* Source NAT */
776	else
777		hooknum = NF_INET_LOCAL_OUT; /* Destination NAT */
778
779	switch (ctinfo) {
780	case IP_CT_RELATED:
781	case IP_CT_RELATED_REPLY:
782		if (proto == htons(ETH_P_IP) &&
783		    ip_hdr(skb)->protocol == IPPROTO_ICMP) {
784			if (!nf_nat_icmp_reply_translation(skb, ct, ctinfo,
785							   hooknum))
786				err = NF_DROP;
787			goto out;
788		} else if (IS_ENABLED(CONFIG_IPV6) && proto == htons(ETH_P_IPV6)) {
789			__be16 frag_off;
790			u8 nexthdr = ipv6_hdr(skb)->nexthdr;
791			int hdrlen = ipv6_skip_exthdr(skb,
792						      sizeof(struct ipv6hdr),
793						      &nexthdr, &frag_off);
794
795			if (hdrlen >= 0 && nexthdr == IPPROTO_ICMPV6) {
796				if (!nf_nat_icmpv6_reply_translation(skb, ct,
797								     ctinfo,
798								     hooknum,
799								     hdrlen))
800					err = NF_DROP;
801				goto out;
802			}
803		}
804		/* Non-ICMP, fall thru to initialize if needed. */
805		fallthrough;
806	case IP_CT_NEW:
807		/* Seen it before?  This can happen for loopback, retrans,
808		 * or local packets.
809		 */
810		if (!nf_nat_initialized(ct, maniptype)) {
811			/* Initialize according to the NAT action. */
812			err = (range && range->flags & NF_NAT_RANGE_MAP_IPS)
813				/* Action is set up to establish a new
814				 * mapping.
815				 */
816				? nf_nat_setup_info(ct, range, maniptype)
817				: nf_nat_alloc_null_binding(ct, hooknum);
818			if (err != NF_ACCEPT)
819				goto out;
820		}
821		break;
822
823	case IP_CT_ESTABLISHED:
824	case IP_CT_ESTABLISHED_REPLY:
825		break;
826
827	default:
828		err = NF_DROP;
829		goto out;
830	}
831
832	err = nf_nat_packet(ct, ctinfo, hooknum, skb);
833out:
834	return err;
835}
836#endif /* CONFIG_NF_NAT */
837
838static void tcf_ct_act_set_mark(struct nf_conn *ct, u32 mark, u32 mask)
839{
840#if IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)
841	u32 new_mark;
842
843	if (!mask)
844		return;
845
846	new_mark = mark | (READ_ONCE(ct->mark) & ~(mask));
847	if (READ_ONCE(ct->mark) != new_mark) {
848		WRITE_ONCE(ct->mark, new_mark);
849		if (nf_ct_is_confirmed(ct))
850			nf_conntrack_event_cache(IPCT_MARK, ct);
851	}
852#endif
853}
854
855static void tcf_ct_act_set_labels(struct nf_conn *ct,
856				  u32 *labels,
857				  u32 *labels_m)
858{
859#if IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)
860	size_t labels_sz = sizeof_field(struct tcf_ct_params, labels);
861
862	if (!memchr_inv(labels_m, 0, labels_sz))
863		return;
864
865	nf_connlabels_replace(ct, labels, labels_m, 4);
866#endif
867}
868
869static int tcf_ct_act_nat(struct sk_buff *skb,
870			  struct nf_conn *ct,
871			  enum ip_conntrack_info ctinfo,
872			  int ct_action,
873			  struct nf_nat_range2 *range,
874			  bool commit)
875{
876#if IS_ENABLED(CONFIG_NF_NAT)
877	int err;
878	enum nf_nat_manip_type maniptype;
879
880	if (!(ct_action & TCA_CT_ACT_NAT))
881		return NF_ACCEPT;
882
883	/* Add NAT extension if not confirmed yet. */
884	if (!nf_ct_is_confirmed(ct) && !nf_ct_nat_ext_add(ct))
885		return NF_DROP;   /* Can't NAT. */
886
887	if (ctinfo != IP_CT_NEW && (ct->status & IPS_NAT_MASK) &&
888	    (ctinfo != IP_CT_RELATED || commit)) {
889		/* NAT an established or related connection like before. */
890		if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY)
891			/* This is the REPLY direction for a connection
892			 * for which NAT was applied in the forward
893			 * direction.  Do the reverse NAT.
894			 */
895			maniptype = ct->status & IPS_SRC_NAT
896				? NF_NAT_MANIP_DST : NF_NAT_MANIP_SRC;
897		else
898			maniptype = ct->status & IPS_SRC_NAT
899				? NF_NAT_MANIP_SRC : NF_NAT_MANIP_DST;
900	} else if (ct_action & TCA_CT_ACT_NAT_SRC) {
901		maniptype = NF_NAT_MANIP_SRC;
902	} else if (ct_action & TCA_CT_ACT_NAT_DST) {
903		maniptype = NF_NAT_MANIP_DST;
904	} else {
905		return NF_ACCEPT;
906	}
907
908	err = ct_nat_execute(skb, ct, ctinfo, range, maniptype);
909	if (err == NF_ACCEPT && ct->status & IPS_DST_NAT) {
910		if (ct->status & IPS_SRC_NAT) {
911			if (maniptype == NF_NAT_MANIP_SRC)
912				maniptype = NF_NAT_MANIP_DST;
913			else
914				maniptype = NF_NAT_MANIP_SRC;
915
916			err = ct_nat_execute(skb, ct, ctinfo, range,
917					     maniptype);
918		} else if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL) {
919			err = ct_nat_execute(skb, ct, ctinfo, NULL,
920					     NF_NAT_MANIP_SRC);
921		}
922	}
923	return err;
924#else
925	return NF_ACCEPT;
926#endif
927}
928
929static int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a,
930		      struct tcf_result *res)
931{
932	struct net *net = dev_net(skb->dev);
933	bool cached, commit, clear, force;
934	enum ip_conntrack_info ctinfo;
935	struct tcf_ct *c = to_ct(a);
936	struct nf_conn *tmpl = NULL;
937	struct nf_hook_state state;
938	int nh_ofs, err, retval;
939	struct tcf_ct_params *p;
940	bool skip_add = false;
941	bool defrag = false;
942	struct nf_conn *ct;
943	u8 family;
944
945	p = rcu_dereference_bh(c->params);
946
947	retval = READ_ONCE(c->tcf_action);
948	commit = p->ct_action & TCA_CT_ACT_COMMIT;
949	clear = p->ct_action & TCA_CT_ACT_CLEAR;
950	force = p->ct_action & TCA_CT_ACT_FORCE;
951	tmpl = p->tmpl;
952
953	tcf_lastuse_update(&c->tcf_tm);
954
955	if (clear) {
956		ct = nf_ct_get(skb, &ctinfo);
957		if (ct) {
958			nf_conntrack_put(&ct->ct_general);
959			nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
960		}
961
962		goto out;
963	}
964
965	family = tcf_ct_skb_nf_family(skb);
966	if (family == NFPROTO_UNSPEC)
967		goto drop;
968
969	/* The conntrack module expects to be working at L3.
970	 * We also try to pull the IPv4/6 header to linear area
971	 */
972	nh_ofs = skb_network_offset(skb);
973	skb_pull_rcsum(skb, nh_ofs);
974	err = tcf_ct_handle_fragments(net, skb, family, p->zone, &defrag);
975	if (err == -EINPROGRESS) {
976		retval = TC_ACT_STOLEN;
977		goto out;
978	}
979	if (err)
980		goto drop;
981
982	err = tcf_ct_skb_network_trim(skb, family);
983	if (err)
984		goto drop;
985
986	/* If we are recirculating packets to match on ct fields and
987	 * committing with a separate ct action, then we don't need to
988	 * actually run the packet through conntrack twice unless it's for a
989	 * different zone.
990	 */
991	cached = tcf_ct_skb_nfct_cached(net, skb, p->zone, force);
992	if (!cached) {
993		if (tcf_ct_flow_table_lookup(p, skb, family)) {
994			skip_add = true;
995			goto do_nat;
996		}
997
998		/* Associate skb with specified zone. */
999		if (tmpl) {
1000			ct = nf_ct_get(skb, &ctinfo);
1001			if (skb_nfct(skb))
1002				nf_conntrack_put(skb_nfct(skb));
1003			nf_conntrack_get(&tmpl->ct_general);
1004			nf_ct_set(skb, tmpl, IP_CT_NEW);
1005		}
1006
1007		state.hook = NF_INET_PRE_ROUTING;
1008		state.net = net;
1009		state.pf = family;
1010		err = nf_conntrack_in(skb, &state);
1011		if (err != NF_ACCEPT)
1012			goto out_push;
1013	}
1014
1015do_nat:
1016	ct = nf_ct_get(skb, &ctinfo);
1017	if (!ct)
1018		goto out_push;
1019	nf_ct_deliver_cached_events(ct);
1020
1021	err = tcf_ct_act_nat(skb, ct, ctinfo, p->ct_action, &p->range, commit);
1022	if (err != NF_ACCEPT)
1023		goto drop;
1024
1025	if (commit) {
1026		tcf_ct_act_set_mark(ct, p->mark, p->mark_mask);
1027		tcf_ct_act_set_labels(ct, p->labels, p->labels_mask);
1028
1029		/* This will take care of sending queued events
1030		 * even if the connection is already confirmed.
1031		 */
1032		if (nf_conntrack_confirm(skb) != NF_ACCEPT)
1033			goto drop;
1034	}
1035
1036	if (!skip_add)
1037		tcf_ct_flow_table_process_conn(p->ct_ft, ct, ctinfo);
1038
1039out_push:
1040	skb_push_rcsum(skb, nh_ofs);
1041
1042out:
1043	tcf_action_update_bstats(&c->common, skb);
1044	if (defrag)
1045		qdisc_skb_cb(skb)->pkt_len = skb->len;
1046	return retval;
1047
1048drop:
1049	tcf_action_inc_drop_qstats(&c->common);
1050	return TC_ACT_SHOT;
1051}
1052
1053static const struct nla_policy ct_policy[TCA_CT_MAX + 1] = {
1054	[TCA_CT_ACTION] = { .type = NLA_U16 },
1055	[TCA_CT_PARMS] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_ct)),
1056	[TCA_CT_ZONE] = { .type = NLA_U16 },
1057	[TCA_CT_MARK] = { .type = NLA_U32 },
1058	[TCA_CT_MARK_MASK] = { .type = NLA_U32 },
1059	[TCA_CT_LABELS] = { .type = NLA_BINARY,
1060			    .len = 128 / BITS_PER_BYTE },
1061	[TCA_CT_LABELS_MASK] = { .type = NLA_BINARY,
1062				 .len = 128 / BITS_PER_BYTE },
1063	[TCA_CT_NAT_IPV4_MIN] = { .type = NLA_U32 },
1064	[TCA_CT_NAT_IPV4_MAX] = { .type = NLA_U32 },
1065	[TCA_CT_NAT_IPV6_MIN] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
1066	[TCA_CT_NAT_IPV6_MAX] = NLA_POLICY_EXACT_LEN(sizeof(struct in6_addr)),
1067	[TCA_CT_NAT_PORT_MIN] = { .type = NLA_U16 },
1068	[TCA_CT_NAT_PORT_MAX] = { .type = NLA_U16 },
1069};
1070
1071static int tcf_ct_fill_params_nat(struct tcf_ct_params *p,
1072				  struct tc_ct *parm,
1073				  struct nlattr **tb,
1074				  struct netlink_ext_ack *extack)
1075{
1076	struct nf_nat_range2 *range;
1077
1078	if (!(p->ct_action & TCA_CT_ACT_NAT))
1079		return 0;
1080
1081	if (!IS_ENABLED(CONFIG_NF_NAT)) {
1082		NL_SET_ERR_MSG_MOD(extack, "Netfilter nat isn't enabled in kernel");
1083		return -EOPNOTSUPP;
1084	}
1085
1086	if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
1087		return 0;
1088
1089	if ((p->ct_action & TCA_CT_ACT_NAT_SRC) &&
1090	    (p->ct_action & TCA_CT_ACT_NAT_DST)) {
1091		NL_SET_ERR_MSG_MOD(extack, "dnat and snat can't be enabled at the same time");
1092		return -EOPNOTSUPP;
1093	}
1094
1095	range = &p->range;
1096	if (tb[TCA_CT_NAT_IPV4_MIN]) {
1097		struct nlattr *max_attr = tb[TCA_CT_NAT_IPV4_MAX];
1098
1099		p->ipv4_range = true;
1100		range->flags |= NF_NAT_RANGE_MAP_IPS;
1101		range->min_addr.ip =
1102			nla_get_in_addr(tb[TCA_CT_NAT_IPV4_MIN]);
1103
1104		range->max_addr.ip = max_attr ?
1105				     nla_get_in_addr(max_attr) :
1106				     range->min_addr.ip;
1107	} else if (tb[TCA_CT_NAT_IPV6_MIN]) {
1108		struct nlattr *max_attr = tb[TCA_CT_NAT_IPV6_MAX];
1109
1110		p->ipv4_range = false;
1111		range->flags |= NF_NAT_RANGE_MAP_IPS;
1112		range->min_addr.in6 =
1113			nla_get_in6_addr(tb[TCA_CT_NAT_IPV6_MIN]);
1114
1115		range->max_addr.in6 = max_attr ?
1116				      nla_get_in6_addr(max_attr) :
1117				      range->min_addr.in6;
1118	}
1119
1120	if (tb[TCA_CT_NAT_PORT_MIN]) {
1121		range->flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
1122		range->min_proto.all = nla_get_be16(tb[TCA_CT_NAT_PORT_MIN]);
1123
1124		range->max_proto.all = tb[TCA_CT_NAT_PORT_MAX] ?
1125				       nla_get_be16(tb[TCA_CT_NAT_PORT_MAX]) :
1126				       range->min_proto.all;
1127	}
1128
1129	return 0;
1130}
1131
1132static void tcf_ct_set_key_val(struct nlattr **tb,
1133			       void *val, int val_type,
1134			       void *mask, int mask_type,
1135			       int len)
1136{
1137	if (!tb[val_type])
1138		return;
1139	nla_memcpy(val, tb[val_type], len);
1140
1141	if (!mask)
1142		return;
1143
1144	if (mask_type == TCA_CT_UNSPEC || !tb[mask_type])
1145		memset(mask, 0xff, len);
1146	else
1147		nla_memcpy(mask, tb[mask_type], len);
1148}
1149
1150static int tcf_ct_fill_params(struct net *net,
1151			      struct tcf_ct_params *p,
1152			      struct tc_ct *parm,
1153			      struct nlattr **tb,
1154			      struct netlink_ext_ack *extack)
1155{
1156	struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1157	struct nf_conntrack_zone zone;
1158	struct nf_conn *tmpl;
1159	int err;
1160
1161	p->zone = NF_CT_DEFAULT_ZONE_ID;
1162
1163	tcf_ct_set_key_val(tb,
1164			   &p->ct_action, TCA_CT_ACTION,
1165			   NULL, TCA_CT_UNSPEC,
1166			   sizeof(p->ct_action));
1167
1168	if (p->ct_action & TCA_CT_ACT_CLEAR)
1169		return 0;
1170
1171	err = tcf_ct_fill_params_nat(p, parm, tb, extack);
1172	if (err)
1173		return err;
1174
1175	if (tb[TCA_CT_MARK]) {
1176		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_MARK)) {
1177			NL_SET_ERR_MSG_MOD(extack, "Conntrack mark isn't enabled.");
1178			return -EOPNOTSUPP;
1179		}
1180		tcf_ct_set_key_val(tb,
1181				   &p->mark, TCA_CT_MARK,
1182				   &p->mark_mask, TCA_CT_MARK_MASK,
1183				   sizeof(p->mark));
1184	}
1185
1186	if (tb[TCA_CT_LABELS]) {
1187		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS)) {
1188			NL_SET_ERR_MSG_MOD(extack, "Conntrack labels isn't enabled.");
1189			return -EOPNOTSUPP;
1190		}
1191
1192		if (!tn->labels) {
1193			NL_SET_ERR_MSG_MOD(extack, "Failed to set connlabel length");
1194			return -EOPNOTSUPP;
1195		}
1196		tcf_ct_set_key_val(tb,
1197				   p->labels, TCA_CT_LABELS,
1198				   p->labels_mask, TCA_CT_LABELS_MASK,
1199				   sizeof(p->labels));
1200	}
1201
1202	if (tb[TCA_CT_ZONE]) {
1203		if (!IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES)) {
1204			NL_SET_ERR_MSG_MOD(extack, "Conntrack zones isn't enabled.");
1205			return -EOPNOTSUPP;
1206		}
1207
1208		tcf_ct_set_key_val(tb,
1209				   &p->zone, TCA_CT_ZONE,
1210				   NULL, TCA_CT_UNSPEC,
1211				   sizeof(p->zone));
1212	}
1213
1214	nf_ct_zone_init(&zone, p->zone, NF_CT_DEFAULT_ZONE_DIR, 0);
1215	tmpl = nf_ct_tmpl_alloc(net, &zone, GFP_KERNEL);
1216	if (!tmpl) {
1217		NL_SET_ERR_MSG_MOD(extack, "Failed to allocate conntrack template");
1218		return -ENOMEM;
1219	}
1220	__set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
1221	nf_conntrack_get(&tmpl->ct_general);
1222	p->tmpl = tmpl;
1223
1224	return 0;
1225}
1226
1227static int tcf_ct_init(struct net *net, struct nlattr *nla,
1228		       struct nlattr *est, struct tc_action **a,
1229		       int replace, int bind, bool rtnl_held,
1230		       struct tcf_proto *tp, u32 flags,
1231		       struct netlink_ext_ack *extack)
1232{
1233	struct tc_action_net *tn = net_generic(net, ct_net_id);
1234	struct tcf_ct_params *params = NULL;
1235	struct nlattr *tb[TCA_CT_MAX + 1];
1236	struct tcf_chain *goto_ch = NULL;
1237	struct tc_ct *parm;
1238	struct tcf_ct *c;
1239	int err, res = 0;
1240	u32 index;
1241
1242	if (!nla) {
1243		NL_SET_ERR_MSG_MOD(extack, "Ct requires attributes to be passed");
1244		return -EINVAL;
1245	}
1246
1247	err = nla_parse_nested(tb, TCA_CT_MAX, nla, ct_policy, extack);
1248	if (err < 0)
1249		return err;
1250
1251	if (!tb[TCA_CT_PARMS]) {
1252		NL_SET_ERR_MSG_MOD(extack, "Missing required ct parameters");
1253		return -EINVAL;
1254	}
1255	parm = nla_data(tb[TCA_CT_PARMS]);
1256	index = parm->index;
1257	err = tcf_idr_check_alloc(tn, &index, a, bind);
1258	if (err < 0)
1259		return err;
1260
1261	if (!err) {
1262		err = tcf_idr_create_from_flags(tn, index, est, a,
1263						&act_ct_ops, bind, flags);
1264		if (err) {
1265			tcf_idr_cleanup(tn, index);
1266			return err;
1267		}
1268		res = ACT_P_CREATED;
1269	} else {
1270		if (bind)
1271			return 0;
1272
1273		if (!replace) {
1274			tcf_idr_release(*a, bind);
1275			return -EEXIST;
1276		}
1277	}
1278	err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack);
1279	if (err < 0)
1280		goto cleanup;
1281
1282	c = to_ct(*a);
1283
1284	params = kzalloc(sizeof(*params), GFP_KERNEL);
1285	if (unlikely(!params)) {
1286		err = -ENOMEM;
1287		goto cleanup;
1288	}
1289
1290	err = tcf_ct_fill_params(net, params, parm, tb, extack);
1291	if (err)
1292		goto cleanup;
1293
1294	err = tcf_ct_flow_table_get(params);
1295	if (err)
1296		goto cleanup_params;
1297
1298	spin_lock_bh(&c->tcf_lock);
1299	goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch);
1300	params = rcu_replace_pointer(c->params, params,
1301				     lockdep_is_held(&c->tcf_lock));
1302	spin_unlock_bh(&c->tcf_lock);
1303
1304	if (goto_ch)
1305		tcf_chain_put_by_act(goto_ch);
1306	if (params)
1307		call_rcu(&params->rcu, tcf_ct_params_free);
1308
1309	return res;
1310
1311cleanup_params:
1312	if (params->tmpl)
1313		nf_ct_put(params->tmpl);
1314cleanup:
1315	if (goto_ch)
1316		tcf_chain_put_by_act(goto_ch);
1317	kfree(params);
1318	tcf_idr_release(*a, bind);
1319	return err;
1320}
1321
1322static void tcf_ct_cleanup(struct tc_action *a)
1323{
1324	struct tcf_ct_params *params;
1325	struct tcf_ct *c = to_ct(a);
1326
1327	params = rcu_dereference_protected(c->params, 1);
1328	if (params)
1329		call_rcu(&params->rcu, tcf_ct_params_free);
1330}
1331
1332static int tcf_ct_dump_key_val(struct sk_buff *skb,
1333			       void *val, int val_type,
1334			       void *mask, int mask_type,
1335			       int len)
1336{
1337	int err;
1338
1339	if (mask && !memchr_inv(mask, 0, len))
1340		return 0;
1341
1342	err = nla_put(skb, val_type, len, val);
1343	if (err)
1344		return err;
1345
1346	if (mask_type != TCA_CT_UNSPEC) {
1347		err = nla_put(skb, mask_type, len, mask);
1348		if (err)
1349			return err;
1350	}
1351
1352	return 0;
1353}
1354
1355static int tcf_ct_dump_nat(struct sk_buff *skb, struct tcf_ct_params *p)
1356{
1357	struct nf_nat_range2 *range = &p->range;
1358
1359	if (!(p->ct_action & TCA_CT_ACT_NAT))
1360		return 0;
1361
1362	if (!(p->ct_action & (TCA_CT_ACT_NAT_SRC | TCA_CT_ACT_NAT_DST)))
1363		return 0;
1364
1365	if (range->flags & NF_NAT_RANGE_MAP_IPS) {
1366		if (p->ipv4_range) {
1367			if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MIN,
1368					    range->min_addr.ip))
1369				return -1;
1370			if (nla_put_in_addr(skb, TCA_CT_NAT_IPV4_MAX,
1371					    range->max_addr.ip))
1372				return -1;
1373		} else {
1374			if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MIN,
1375					     &range->min_addr.in6))
1376				return -1;
1377			if (nla_put_in6_addr(skb, TCA_CT_NAT_IPV6_MAX,
1378					     &range->max_addr.in6))
1379				return -1;
1380		}
1381	}
1382
1383	if (range->flags & NF_NAT_RANGE_PROTO_SPECIFIED) {
1384		if (nla_put_be16(skb, TCA_CT_NAT_PORT_MIN,
1385				 range->min_proto.all))
1386			return -1;
1387		if (nla_put_be16(skb, TCA_CT_NAT_PORT_MAX,
1388				 range->max_proto.all))
1389			return -1;
1390	}
1391
1392	return 0;
1393}
1394
1395static inline int tcf_ct_dump(struct sk_buff *skb, struct tc_action *a,
1396			      int bind, int ref)
1397{
1398	unsigned char *b = skb_tail_pointer(skb);
1399	struct tcf_ct *c = to_ct(a);
1400	struct tcf_ct_params *p;
1401
1402	struct tc_ct opt = {
1403		.index   = c->tcf_index,
1404		.refcnt  = refcount_read(&c->tcf_refcnt) - ref,
1405		.bindcnt = atomic_read(&c->tcf_bindcnt) - bind,
1406	};
1407	struct tcf_t t;
1408
1409	spin_lock_bh(&c->tcf_lock);
1410	p = rcu_dereference_protected(c->params,
1411				      lockdep_is_held(&c->tcf_lock));
1412	opt.action = c->tcf_action;
1413
1414	if (tcf_ct_dump_key_val(skb,
1415				&p->ct_action, TCA_CT_ACTION,
1416				NULL, TCA_CT_UNSPEC,
1417				sizeof(p->ct_action)))
1418		goto nla_put_failure;
1419
1420	if (p->ct_action & TCA_CT_ACT_CLEAR)
1421		goto skip_dump;
1422
1423	if (IS_ENABLED(CONFIG_NF_CONNTRACK_MARK) &&
1424	    tcf_ct_dump_key_val(skb,
1425				&p->mark, TCA_CT_MARK,
1426				&p->mark_mask, TCA_CT_MARK_MASK,
1427				sizeof(p->mark)))
1428		goto nla_put_failure;
1429
1430	if (IS_ENABLED(CONFIG_NF_CONNTRACK_LABELS) &&
1431	    tcf_ct_dump_key_val(skb,
1432				p->labels, TCA_CT_LABELS,
1433				p->labels_mask, TCA_CT_LABELS_MASK,
1434				sizeof(p->labels)))
1435		goto nla_put_failure;
1436
1437	if (IS_ENABLED(CONFIG_NF_CONNTRACK_ZONES) &&
1438	    tcf_ct_dump_key_val(skb,
1439				&p->zone, TCA_CT_ZONE,
1440				NULL, TCA_CT_UNSPEC,
1441				sizeof(p->zone)))
1442		goto nla_put_failure;
1443
1444	if (tcf_ct_dump_nat(skb, p))
1445		goto nla_put_failure;
1446
1447skip_dump:
1448	if (nla_put(skb, TCA_CT_PARMS, sizeof(opt), &opt))
1449		goto nla_put_failure;
1450
1451	tcf_tm_dump(&t, &c->tcf_tm);
1452	if (nla_put_64bit(skb, TCA_CT_TM, sizeof(t), &t, TCA_CT_PAD))
1453		goto nla_put_failure;
1454	spin_unlock_bh(&c->tcf_lock);
1455
1456	return skb->len;
1457nla_put_failure:
1458	spin_unlock_bh(&c->tcf_lock);
1459	nlmsg_trim(skb, b);
1460	return -1;
1461}
1462
1463static int tcf_ct_walker(struct net *net, struct sk_buff *skb,
1464			 struct netlink_callback *cb, int type,
1465			 const struct tc_action_ops *ops,
1466			 struct netlink_ext_ack *extack)
1467{
1468	struct tc_action_net *tn = net_generic(net, ct_net_id);
1469
1470	return tcf_generic_walker(tn, skb, cb, type, ops, extack);
1471}
1472
1473static int tcf_ct_search(struct net *net, struct tc_action **a, u32 index)
1474{
1475	struct tc_action_net *tn = net_generic(net, ct_net_id);
1476
1477	return tcf_idr_search(tn, a, index);
1478}
1479
1480static void tcf_stats_update(struct tc_action *a, u64 bytes, u64 packets,
1481			     u64 drops, u64 lastuse, bool hw)
1482{
1483	struct tcf_ct *c = to_ct(a);
1484
1485	tcf_action_update_stats(a, bytes, packets, drops, hw);
1486	c->tcf_tm.lastuse = max_t(u64, c->tcf_tm.lastuse, lastuse);
1487}
1488
1489static struct tc_action_ops act_ct_ops = {
1490	.kind		=	"ct",
1491	.id		=	TCA_ID_CT,
1492	.owner		=	THIS_MODULE,
1493	.act		=	tcf_ct_act,
1494	.dump		=	tcf_ct_dump,
1495	.init		=	tcf_ct_init,
1496	.cleanup	=	tcf_ct_cleanup,
1497	.walk		=	tcf_ct_walker,
1498	.lookup		=	tcf_ct_search,
1499	.stats_update	=	tcf_stats_update,
1500	.size		=	sizeof(struct tcf_ct),
1501};
1502
1503static __net_init int ct_init_net(struct net *net)
1504{
1505	unsigned int n_bits = sizeof_field(struct tcf_ct_params, labels) * 8;
1506	struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1507
1508	if (nf_connlabels_get(net, n_bits - 1)) {
1509		tn->labels = false;
1510		pr_err("act_ct: Failed to set connlabels length");
1511	} else {
1512		tn->labels = true;
1513	}
1514
1515	return tc_action_net_init(net, &tn->tn, &act_ct_ops);
1516}
1517
1518static void __net_exit ct_exit_net(struct list_head *net_list)
1519{
1520	struct net *net;
1521
1522	rtnl_lock();
1523	list_for_each_entry(net, net_list, exit_list) {
1524		struct tc_ct_action_net *tn = net_generic(net, ct_net_id);
1525
1526		if (tn->labels)
1527			nf_connlabels_put(net);
1528	}
1529	rtnl_unlock();
1530
1531	tc_action_net_exit(net_list, ct_net_id);
1532}
1533
1534static struct pernet_operations ct_net_ops = {
1535	.init = ct_init_net,
1536	.exit_batch = ct_exit_net,
1537	.id   = &ct_net_id,
1538	.size = sizeof(struct tc_ct_action_net),
1539};
1540
1541static int __init ct_init_module(void)
1542{
1543	int err;
1544
1545	act_ct_wq = alloc_ordered_workqueue("act_ct_workqueue", 0);
1546	if (!act_ct_wq)
1547		return -ENOMEM;
1548
1549	err = tcf_ct_flow_tables_init();
1550	if (err)
1551		goto err_tbl_init;
1552
1553	err = tcf_register_action(&act_ct_ops, &ct_net_ops);
1554	if (err)
1555		goto err_register;
1556
1557	return 0;
1558
1559err_register:
1560	tcf_ct_flow_tables_uninit();
1561err_tbl_init:
1562	destroy_workqueue(act_ct_wq);
1563	return err;
1564}
1565
1566static void __exit ct_cleanup_module(void)
1567{
1568	tcf_unregister_action(&act_ct_ops, &ct_net_ops);
1569	tcf_ct_flow_tables_uninit();
1570	destroy_workqueue(act_ct_wq);
1571}
1572
1573module_init(ct_init_module);
1574module_exit(ct_cleanup_module);
1575MODULE_AUTHOR("Paul Blakey <paulb@mellanox.com>");
1576MODULE_AUTHOR("Yossi Kuperman <yossiku@mellanox.com>");
1577MODULE_AUTHOR("Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>");
1578MODULE_DESCRIPTION("Connection tracking action");
1579MODULE_LICENSE("GPL v2");
1580