1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 *  Copyright (C) 2020 Felix Fietkau <nbd@nbd.name>
4 */
5
6#include <linux/if_ether.h>
7#include <linux/rhashtable.h>
8#include <linux/ip.h>
9#include <linux/ipv6.h>
10#include <net/flow_offload.h>
11#include <net/pkt_cls.h>
12#include <net/dsa.h>
13#include "mtk_eth_soc.h"
14#include "mtk_wed.h"
15
16struct mtk_flow_data {
17	struct ethhdr eth;
18
19	union {
20		struct {
21			__be32 src_addr;
22			__be32 dst_addr;
23		} v4;
24
25		struct {
26			struct in6_addr src_addr;
27			struct in6_addr dst_addr;
28		} v6;
29	};
30
31	__be16 src_port;
32	__be16 dst_port;
33
34	u16 vlan_in;
35
36	struct {
37		u16 id;
38		__be16 proto;
39		u8 num;
40	} vlan;
41	struct {
42		u16 sid;
43		u8 num;
44	} pppoe;
45};
46
47static const struct rhashtable_params mtk_flow_ht_params = {
48	.head_offset = offsetof(struct mtk_flow_entry, node),
49	.key_offset = offsetof(struct mtk_flow_entry, cookie),
50	.key_len = sizeof(unsigned long),
51	.automatic_shrinking = true,
52};
53
54static int
55mtk_flow_set_ipv4_addr(struct mtk_eth *eth, struct mtk_foe_entry *foe,
56		       struct mtk_flow_data *data, bool egress)
57{
58	return mtk_foe_entry_set_ipv4_tuple(eth, foe, egress,
59					    data->v4.src_addr, data->src_port,
60					    data->v4.dst_addr, data->dst_port);
61}
62
63static int
64mtk_flow_set_ipv6_addr(struct mtk_eth *eth, struct mtk_foe_entry *foe,
65		       struct mtk_flow_data *data)
66{
67	return mtk_foe_entry_set_ipv6_tuple(eth, foe,
68					    data->v6.src_addr.s6_addr32, data->src_port,
69					    data->v6.dst_addr.s6_addr32, data->dst_port);
70}
71
72static void
73mtk_flow_offload_mangle_eth(const struct flow_action_entry *act, void *eth)
74{
75	void *dest = eth + act->mangle.offset;
76	const void *src = &act->mangle.val;
77
78	if (act->mangle.offset > 8)
79		return;
80
81	if (act->mangle.mask == 0xffff) {
82		src += 2;
83		dest += 2;
84	}
85
86	memcpy(dest, src, act->mangle.mask ? 2 : 4);
87}
88
89static int
90mtk_flow_get_wdma_info(struct net_device *dev, const u8 *addr, struct mtk_wdma_info *info)
91{
92	struct net_device_path_stack stack;
93	struct net_device_path *path;
94	int err;
95
96	if (!dev)
97		return -ENODEV;
98
99	if (!IS_ENABLED(CONFIG_NET_MEDIATEK_SOC_WED))
100		return -1;
101
102	err = dev_fill_forward_path(dev, addr, &stack);
103	if (err)
104		return err;
105
106	path = &stack.path[stack.num_paths - 1];
107	if (path->type != DEV_PATH_MTK_WDMA)
108		return -1;
109
110	info->wdma_idx = path->mtk_wdma.wdma_idx;
111	info->queue = path->mtk_wdma.queue;
112	info->bss = path->mtk_wdma.bss;
113	info->wcid = path->mtk_wdma.wcid;
114
115	return 0;
116}
117
118
119static int
120mtk_flow_mangle_ports(const struct flow_action_entry *act,
121		      struct mtk_flow_data *data)
122{
123	u32 val = ntohl(act->mangle.val);
124
125	switch (act->mangle.offset) {
126	case 0:
127		if (act->mangle.mask == ~htonl(0xffff))
128			data->dst_port = cpu_to_be16(val);
129		else
130			data->src_port = cpu_to_be16(val >> 16);
131		break;
132	case 2:
133		data->dst_port = cpu_to_be16(val);
134		break;
135	default:
136		return -EINVAL;
137	}
138
139	return 0;
140}
141
142static int
143mtk_flow_mangle_ipv4(const struct flow_action_entry *act,
144		     struct mtk_flow_data *data)
145{
146	__be32 *dest;
147
148	switch (act->mangle.offset) {
149	case offsetof(struct iphdr, saddr):
150		dest = &data->v4.src_addr;
151		break;
152	case offsetof(struct iphdr, daddr):
153		dest = &data->v4.dst_addr;
154		break;
155	default:
156		return -EINVAL;
157	}
158
159	memcpy(dest, &act->mangle.val, sizeof(u32));
160
161	return 0;
162}
163
164static int
165mtk_flow_get_dsa_port(struct net_device **dev)
166{
167#if IS_ENABLED(CONFIG_NET_DSA)
168	struct dsa_port *dp;
169
170	dp = dsa_port_from_netdev(*dev);
171	if (IS_ERR(dp))
172		return -ENODEV;
173
174	if (dp->cpu_dp->tag_ops->proto != DSA_TAG_PROTO_MTK)
175		return -ENODEV;
176
177	*dev = dsa_port_to_master(dp);
178
179	return dp->index;
180#else
181	return -ENODEV;
182#endif
183}
184
185static int
186mtk_flow_set_output_device(struct mtk_eth *eth, struct mtk_foe_entry *foe,
187			   struct net_device *dev, const u8 *dest_mac,
188			   int *wed_index)
189{
190	struct mtk_wdma_info info = {};
191	int pse_port, dsa_port, queue;
192
193	if (mtk_flow_get_wdma_info(dev, dest_mac, &info) == 0) {
194		mtk_foe_entry_set_wdma(eth, foe, info.wdma_idx, info.queue,
195				       info.bss, info.wcid);
196		if (mtk_is_netsys_v2_or_greater(eth)) {
197			switch (info.wdma_idx) {
198			case 0:
199				pse_port = 8;
200				break;
201			case 1:
202				pse_port = 9;
203				break;
204			default:
205				return -EINVAL;
206			}
207		} else {
208			pse_port = 3;
209		}
210		*wed_index = info.wdma_idx;
211		goto out;
212	}
213
214	dsa_port = mtk_flow_get_dsa_port(&dev);
215
216	if (dev == eth->netdev[0])
217		pse_port = PSE_GDM1_PORT;
218	else if (dev == eth->netdev[1])
219		pse_port = PSE_GDM2_PORT;
220	else if (dev == eth->netdev[2])
221		pse_port = PSE_GDM3_PORT;
222	else
223		return -EOPNOTSUPP;
224
225	if (dsa_port >= 0) {
226		mtk_foe_entry_set_dsa(eth, foe, dsa_port);
227		queue = 3 + dsa_port;
228	} else {
229		queue = pse_port - 1;
230	}
231	mtk_foe_entry_set_queue(eth, foe, queue);
232
233out:
234	mtk_foe_entry_set_pse_port(eth, foe, pse_port);
235
236	return 0;
237}
238
239static int
240mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f,
241			 int ppe_index)
242{
243	struct flow_rule *rule = flow_cls_offload_flow_rule(f);
244	struct flow_action_entry *act;
245	struct mtk_flow_data data = {};
246	struct mtk_foe_entry foe;
247	struct net_device *odev = NULL;
248	struct mtk_flow_entry *entry;
249	int offload_type = 0;
250	int wed_index = -1;
251	u16 addr_type = 0;
252	u8 l4proto = 0;
253	int err = 0;
254	int i;
255
256	if (rhashtable_lookup(&eth->flow_table, &f->cookie, mtk_flow_ht_params))
257		return -EEXIST;
258
259	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META)) {
260		struct flow_match_meta match;
261
262		flow_rule_match_meta(rule, &match);
263	} else {
264		return -EOPNOTSUPP;
265	}
266
267	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
268		struct flow_match_control match;
269
270		flow_rule_match_control(rule, &match);
271		addr_type = match.key->addr_type;
272	} else {
273		return -EOPNOTSUPP;
274	}
275
276	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
277		struct flow_match_basic match;
278
279		flow_rule_match_basic(rule, &match);
280		l4proto = match.key->ip_proto;
281	} else {
282		return -EOPNOTSUPP;
283	}
284
285	switch (addr_type) {
286	case 0:
287		offload_type = MTK_PPE_PKT_TYPE_BRIDGE;
288		if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
289			struct flow_match_eth_addrs match;
290
291			flow_rule_match_eth_addrs(rule, &match);
292			memcpy(data.eth.h_dest, match.key->dst, ETH_ALEN);
293			memcpy(data.eth.h_source, match.key->src, ETH_ALEN);
294		} else {
295			return -EOPNOTSUPP;
296		}
297
298		if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
299			struct flow_match_vlan match;
300
301			flow_rule_match_vlan(rule, &match);
302
303			if (match.key->vlan_tpid != cpu_to_be16(ETH_P_8021Q))
304				return -EOPNOTSUPP;
305
306			data.vlan_in = match.key->vlan_id;
307		}
308		break;
309	case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
310		offload_type = MTK_PPE_PKT_TYPE_IPV4_HNAPT;
311		break;
312	case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
313		offload_type = MTK_PPE_PKT_TYPE_IPV6_ROUTE_5T;
314		break;
315	default:
316		return -EOPNOTSUPP;
317	}
318
319	flow_action_for_each(i, act, &rule->action) {
320		switch (act->id) {
321		case FLOW_ACTION_MANGLE:
322			if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
323				return -EOPNOTSUPP;
324			if (act->mangle.htype == FLOW_ACT_MANGLE_HDR_TYPE_ETH)
325				mtk_flow_offload_mangle_eth(act, &data.eth);
326			break;
327		case FLOW_ACTION_REDIRECT:
328			odev = act->dev;
329			break;
330		case FLOW_ACTION_CSUM:
331			break;
332		case FLOW_ACTION_VLAN_PUSH:
333			if (data.vlan.num == 1 ||
334			    act->vlan.proto != htons(ETH_P_8021Q))
335				return -EOPNOTSUPP;
336
337			data.vlan.id = act->vlan.vid;
338			data.vlan.proto = act->vlan.proto;
339			data.vlan.num++;
340			break;
341		case FLOW_ACTION_VLAN_POP:
342			break;
343		case FLOW_ACTION_PPPOE_PUSH:
344			if (data.pppoe.num == 1)
345				return -EOPNOTSUPP;
346
347			data.pppoe.sid = act->pppoe.sid;
348			data.pppoe.num++;
349			break;
350		default:
351			return -EOPNOTSUPP;
352		}
353	}
354
355	if (!is_valid_ether_addr(data.eth.h_source) ||
356	    !is_valid_ether_addr(data.eth.h_dest))
357		return -EINVAL;
358
359	err = mtk_foe_entry_prepare(eth, &foe, offload_type, l4proto, 0,
360				    data.eth.h_source, data.eth.h_dest);
361	if (err)
362		return err;
363
364	if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
365		struct flow_match_ports ports;
366
367		if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
368			return -EOPNOTSUPP;
369
370		flow_rule_match_ports(rule, &ports);
371		data.src_port = ports.key->src;
372		data.dst_port = ports.key->dst;
373	} else if (offload_type != MTK_PPE_PKT_TYPE_BRIDGE) {
374		return -EOPNOTSUPP;
375	}
376
377	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
378		struct flow_match_ipv4_addrs addrs;
379
380		flow_rule_match_ipv4_addrs(rule, &addrs);
381
382		data.v4.src_addr = addrs.key->src;
383		data.v4.dst_addr = addrs.key->dst;
384
385		mtk_flow_set_ipv4_addr(eth, &foe, &data, false);
386	}
387
388	if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
389		struct flow_match_ipv6_addrs addrs;
390
391		flow_rule_match_ipv6_addrs(rule, &addrs);
392
393		data.v6.src_addr = addrs.key->src;
394		data.v6.dst_addr = addrs.key->dst;
395
396		mtk_flow_set_ipv6_addr(eth, &foe, &data);
397	}
398
399	flow_action_for_each(i, act, &rule->action) {
400		if (act->id != FLOW_ACTION_MANGLE)
401			continue;
402
403		if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
404			return -EOPNOTSUPP;
405
406		switch (act->mangle.htype) {
407		case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
408		case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
409			err = mtk_flow_mangle_ports(act, &data);
410			break;
411		case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
412			err = mtk_flow_mangle_ipv4(act, &data);
413			break;
414		case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
415			/* handled earlier */
416			break;
417		default:
418			return -EOPNOTSUPP;
419		}
420
421		if (err)
422			return err;
423	}
424
425	if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
426		err = mtk_flow_set_ipv4_addr(eth, &foe, &data, true);
427		if (err)
428			return err;
429	}
430
431	if (offload_type == MTK_PPE_PKT_TYPE_BRIDGE)
432		foe.bridge.vlan = data.vlan_in;
433
434	if (data.vlan.num == 1) {
435		if (data.vlan.proto != htons(ETH_P_8021Q))
436			return -EOPNOTSUPP;
437
438		mtk_foe_entry_set_vlan(eth, &foe, data.vlan.id);
439	}
440	if (data.pppoe.num == 1)
441		mtk_foe_entry_set_pppoe(eth, &foe, data.pppoe.sid);
442
443	err = mtk_flow_set_output_device(eth, &foe, odev, data.eth.h_dest,
444					 &wed_index);
445	if (err)
446		return err;
447
448	if (wed_index >= 0 && (err = mtk_wed_flow_add(wed_index)) < 0)
449		return err;
450
451	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
452	if (!entry)
453		return -ENOMEM;
454
455	entry->cookie = f->cookie;
456	memcpy(&entry->data, &foe, sizeof(entry->data));
457	entry->wed_index = wed_index;
458	entry->ppe_index = ppe_index;
459
460	err = mtk_foe_entry_commit(eth->ppe[entry->ppe_index], entry);
461	if (err < 0)
462		goto free;
463
464	err = rhashtable_insert_fast(&eth->flow_table, &entry->node,
465				     mtk_flow_ht_params);
466	if (err < 0)
467		goto clear;
468
469	return 0;
470
471clear:
472	mtk_foe_entry_clear(eth->ppe[entry->ppe_index], entry);
473free:
474	kfree(entry);
475	if (wed_index >= 0)
476	    mtk_wed_flow_remove(wed_index);
477	return err;
478}
479
480static int
481mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f)
482{
483	struct mtk_flow_entry *entry;
484
485	entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
486				  mtk_flow_ht_params);
487	if (!entry)
488		return -ENOENT;
489
490	mtk_foe_entry_clear(eth->ppe[entry->ppe_index], entry);
491	rhashtable_remove_fast(&eth->flow_table, &entry->node,
492			       mtk_flow_ht_params);
493	if (entry->wed_index >= 0)
494		mtk_wed_flow_remove(entry->wed_index);
495	kfree(entry);
496
497	return 0;
498}
499
500static int
501mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
502{
503	struct mtk_flow_entry *entry;
504	struct mtk_foe_accounting diff;
505	u32 idle;
506
507	entry = rhashtable_lookup(&eth->flow_table, &f->cookie,
508				  mtk_flow_ht_params);
509	if (!entry)
510		return -ENOENT;
511
512	idle = mtk_foe_entry_idle_time(eth->ppe[entry->ppe_index], entry);
513	f->stats.lastused = jiffies - idle * HZ;
514
515	if (entry->hash != 0xFFFF &&
516	    mtk_foe_entry_get_mib(eth->ppe[entry->ppe_index], entry->hash,
517				  &diff)) {
518		f->stats.pkts += diff.packets;
519		f->stats.bytes += diff.bytes;
520	}
521
522	return 0;
523}
524
525static DEFINE_MUTEX(mtk_flow_offload_mutex);
526
527int mtk_flow_offload_cmd(struct mtk_eth *eth, struct flow_cls_offload *cls,
528			 int ppe_index)
529{
530	int err;
531
532	mutex_lock(&mtk_flow_offload_mutex);
533	switch (cls->command) {
534	case FLOW_CLS_REPLACE:
535		err = mtk_flow_offload_replace(eth, cls, ppe_index);
536		break;
537	case FLOW_CLS_DESTROY:
538		err = mtk_flow_offload_destroy(eth, cls);
539		break;
540	case FLOW_CLS_STATS:
541		err = mtk_flow_offload_stats(eth, cls);
542		break;
543	default:
544		err = -EOPNOTSUPP;
545		break;
546	}
547	mutex_unlock(&mtk_flow_offload_mutex);
548
549	return err;
550}
551
552static int
553mtk_eth_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
554{
555	struct flow_cls_offload *cls = type_data;
556	struct net_device *dev = cb_priv;
557	struct mtk_mac *mac;
558	struct mtk_eth *eth;
559
560	mac = netdev_priv(dev);
561	eth = mac->hw;
562
563	if (!tc_can_offload(dev))
564		return -EOPNOTSUPP;
565
566	if (type != TC_SETUP_CLSFLOWER)
567		return -EOPNOTSUPP;
568
569	return mtk_flow_offload_cmd(eth, cls, 0);
570}
571
572static int
573mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
574{
575	struct mtk_mac *mac = netdev_priv(dev);
576	struct mtk_eth *eth = mac->hw;
577	static LIST_HEAD(block_cb_list);
578	struct flow_block_cb *block_cb;
579	flow_setup_cb_t *cb;
580
581	if (!eth->soc->offload_version)
582		return -EOPNOTSUPP;
583
584	if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
585		return -EOPNOTSUPP;
586
587	cb = mtk_eth_setup_tc_block_cb;
588	f->driver_block_list = &block_cb_list;
589
590	switch (f->command) {
591	case FLOW_BLOCK_BIND:
592		block_cb = flow_block_cb_lookup(f->block, cb, dev);
593		if (block_cb) {
594			flow_block_cb_incref(block_cb);
595			return 0;
596		}
597		block_cb = flow_block_cb_alloc(cb, dev, dev, NULL);
598		if (IS_ERR(block_cb))
599			return PTR_ERR(block_cb);
600
601		flow_block_cb_incref(block_cb);
602		flow_block_cb_add(block_cb, f);
603		list_add_tail(&block_cb->driver_list, &block_cb_list);
604		return 0;
605	case FLOW_BLOCK_UNBIND:
606		block_cb = flow_block_cb_lookup(f->block, cb, dev);
607		if (!block_cb)
608			return -ENOENT;
609
610		if (!flow_block_cb_decref(block_cb)) {
611			flow_block_cb_remove(block_cb, f);
612			list_del(&block_cb->driver_list);
613		}
614		return 0;
615	default:
616		return -EOPNOTSUPP;
617	}
618}
619
620int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
621		     void *type_data)
622{
623	switch (type) {
624	case TC_SETUP_BLOCK:
625	case TC_SETUP_FT:
626		return mtk_eth_setup_tc_block(dev, type_data);
627	default:
628		return -EOPNOTSUPP;
629	}
630}
631
632int mtk_eth_offload_init(struct mtk_eth *eth)
633{
634	return rhashtable_init(&eth->flow_table, &mtk_flow_ht_params);
635}
636