1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2/* Copyright (C) 2017-2018 Netronome Systems, Inc. */
3
4#include <net/pkt_cls.h>
5
6#include "../nfpcore/nfp_cpp.h"
7#include "../nfpcore/nfp_nffw.h"
8#include "../nfpcore/nfp_nsp.h"
9#include "../nfp_app.h"
10#include "../nfp_main.h"
11#include "../nfp_net.h"
12#include "../nfp_port.h"
13#include "fw.h"
14#include "main.h"
15
16const struct rhashtable_params nfp_bpf_maps_neutral_params = {
17	.nelem_hint		= 4,
18	.key_len		= sizeof_field(struct bpf_map, id),
19	.key_offset		= offsetof(struct nfp_bpf_neutral_map, map_id),
20	.head_offset		= offsetof(struct nfp_bpf_neutral_map, l),
21	.automatic_shrinking	= true,
22};
23
24static bool nfp_net_ebpf_capable(struct nfp_net *nn)
25{
26#ifdef __LITTLE_ENDIAN
27	struct nfp_app_bpf *bpf = nn->app->priv;
28
29	return nn->cap & NFP_NET_CFG_CTRL_BPF &&
30	       bpf->abi_version &&
31	       nn_readb(nn, NFP_NET_CFG_BPF_ABI) == bpf->abi_version;
32#else
33	return false;
34#endif
35}
36
37static int
38nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
39		    struct bpf_prog *prog, struct netlink_ext_ack *extack)
40{
41	bool running, xdp_running;
42
43	if (!nfp_net_ebpf_capable(nn))
44		return -EINVAL;
45
46	running = nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF;
47	xdp_running = running && nn->xdp_hw.prog;
48
49	if (!prog && !xdp_running)
50		return 0;
51	if (prog && running && !xdp_running)
52		return -EBUSY;
53
54	return nfp_net_bpf_offload(nn, prog, running, extack);
55}
56
57static const char *nfp_bpf_extra_cap(struct nfp_app *app, struct nfp_net *nn)
58{
59	return nfp_net_ebpf_capable(nn) ? "BPF" : "";
60}
61
62static int
63nfp_bpf_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id)
64{
65	struct nfp_pf *pf = app->pf;
66	struct nfp_bpf_vnic *bv;
67	int err;
68
69	if (!pf->eth_tbl) {
70		nfp_err(pf->cpp, "No ETH table\n");
71		return -EINVAL;
72	}
73	if (pf->max_data_vnics != pf->eth_tbl->count) {
74		nfp_err(pf->cpp, "ETH entries don't match vNICs (%d vs %d)\n",
75			pf->max_data_vnics, pf->eth_tbl->count);
76		return -EINVAL;
77	}
78
79	bv = kzalloc(sizeof(*bv), GFP_KERNEL);
80	if (!bv)
81		return -ENOMEM;
82	nn->app_priv = bv;
83
84	err = nfp_app_nic_vnic_alloc(app, nn, id);
85	if (err)
86		goto err_free_priv;
87
88	bv->start_off = nn_readw(nn, NFP_NET_CFG_BPF_START);
89	bv->tgt_done = nn_readw(nn, NFP_NET_CFG_BPF_DONE);
90
91	return 0;
92err_free_priv:
93	kfree(nn->app_priv);
94	return err;
95}
96
97static void nfp_bpf_vnic_free(struct nfp_app *app, struct nfp_net *nn)
98{
99	struct nfp_bpf_vnic *bv = nn->app_priv;
100
101	WARN_ON(bv->tc_prog);
102	kfree(bv);
103}
104
105static int nfp_bpf_setup_tc_block_cb(enum tc_setup_type type,
106				     void *type_data, void *cb_priv)
107{
108	struct tc_cls_bpf_offload *cls_bpf = type_data;
109	struct nfp_net *nn = cb_priv;
110	struct bpf_prog *oldprog;
111	struct nfp_bpf_vnic *bv;
112	int err;
113
114	if (type != TC_SETUP_CLSBPF) {
115		NL_SET_ERR_MSG_MOD(cls_bpf->common.extack,
116				   "only offload of BPF classifiers supported");
117		return -EOPNOTSUPP;
118	}
119	if (!tc_cls_can_offload_and_chain0(nn->dp.netdev, &cls_bpf->common))
120		return -EOPNOTSUPP;
121	if (!nfp_net_ebpf_capable(nn)) {
122		NL_SET_ERR_MSG_MOD(cls_bpf->common.extack,
123				   "NFP firmware does not support eBPF offload");
124		return -EOPNOTSUPP;
125	}
126	if (cls_bpf->common.protocol != htons(ETH_P_ALL)) {
127		NL_SET_ERR_MSG_MOD(cls_bpf->common.extack,
128				   "only ETH_P_ALL supported as filter protocol");
129		return -EOPNOTSUPP;
130	}
131
132	/* Only support TC direct action */
133	if (!cls_bpf->exts_integrated ||
134	    tcf_exts_has_actions(cls_bpf->exts)) {
135		NL_SET_ERR_MSG_MOD(cls_bpf->common.extack,
136				   "only direct action with no legacy actions supported");
137		return -EOPNOTSUPP;
138	}
139
140	if (cls_bpf->command != TC_CLSBPF_OFFLOAD)
141		return -EOPNOTSUPP;
142
143	bv = nn->app_priv;
144	oldprog = cls_bpf->oldprog;
145
146	/* Don't remove if oldprog doesn't match driver's state */
147	if (bv->tc_prog != oldprog) {
148		oldprog = NULL;
149		if (!cls_bpf->prog)
150			return 0;
151	}
152
153	err = nfp_net_bpf_offload(nn, cls_bpf->prog, oldprog,
154				  cls_bpf->common.extack);
155	if (err)
156		return err;
157
158	bv->tc_prog = cls_bpf->prog;
159	nn->port->tc_offload_cnt = !!bv->tc_prog;
160	return 0;
161}
162
163static LIST_HEAD(nfp_bpf_block_cb_list);
164
165static int nfp_bpf_setup_tc(struct nfp_app *app, struct net_device *netdev,
166			    enum tc_setup_type type, void *type_data)
167{
168	struct nfp_net *nn = netdev_priv(netdev);
169
170	switch (type) {
171	case TC_SETUP_BLOCK:
172		return flow_block_cb_setup_simple(type_data,
173						  &nfp_bpf_block_cb_list,
174						  nfp_bpf_setup_tc_block_cb,
175						  nn, nn, true);
176	default:
177		return -EOPNOTSUPP;
178	}
179}
180
181static int
182nfp_bpf_check_mtu(struct nfp_app *app, struct net_device *netdev, int new_mtu)
183{
184	struct nfp_net *nn = netdev_priv(netdev);
185	struct nfp_bpf_vnic *bv;
186	struct bpf_prog *prog;
187
188	if (~nn->dp.ctrl & NFP_NET_CFG_CTRL_BPF)
189		return 0;
190
191	if (nn->xdp_hw.prog) {
192		prog = nn->xdp_hw.prog;
193	} else {
194		bv = nn->app_priv;
195		prog = bv->tc_prog;
196	}
197
198	if (nfp_bpf_offload_check_mtu(nn, prog, new_mtu)) {
199		nn_info(nn, "BPF offload active, potential packet access beyond hardware packet boundary");
200		return -EBUSY;
201	}
202	return 0;
203}
204
205static int
206nfp_bpf_parse_cap_adjust_head(struct nfp_app_bpf *bpf, void __iomem *value,
207			      u32 length)
208{
209	struct nfp_bpf_cap_tlv_adjust_head __iomem *cap = value;
210	struct nfp_cpp *cpp = bpf->app->pf->cpp;
211
212	if (length < sizeof(*cap)) {
213		nfp_err(cpp, "truncated adjust_head TLV: %d\n", length);
214		return -EINVAL;
215	}
216
217	bpf->adjust_head.flags = readl(&cap->flags);
218	bpf->adjust_head.off_min = readl(&cap->off_min);
219	bpf->adjust_head.off_max = readl(&cap->off_max);
220	bpf->adjust_head.guaranteed_sub = readl(&cap->guaranteed_sub);
221	bpf->adjust_head.guaranteed_add = readl(&cap->guaranteed_add);
222
223	if (bpf->adjust_head.off_min > bpf->adjust_head.off_max) {
224		nfp_err(cpp, "invalid adjust_head TLV: min > max\n");
225		return -EINVAL;
226	}
227	if (!FIELD_FIT(UR_REG_IMM_MAX, bpf->adjust_head.off_min) ||
228	    !FIELD_FIT(UR_REG_IMM_MAX, bpf->adjust_head.off_max)) {
229		nfp_warn(cpp, "disabling adjust_head - driver expects min/max to fit in as immediates\n");
230		memset(&bpf->adjust_head, 0, sizeof(bpf->adjust_head));
231		return 0;
232	}
233
234	return 0;
235}
236
237static int
238nfp_bpf_parse_cap_func(struct nfp_app_bpf *bpf, void __iomem *value, u32 length)
239{
240	struct nfp_bpf_cap_tlv_func __iomem *cap = value;
241
242	if (length < sizeof(*cap)) {
243		nfp_err(bpf->app->cpp, "truncated function TLV: %d\n", length);
244		return -EINVAL;
245	}
246
247	switch (readl(&cap->func_id)) {
248	case BPF_FUNC_map_lookup_elem:
249		bpf->helpers.map_lookup = readl(&cap->func_addr);
250		break;
251	case BPF_FUNC_map_update_elem:
252		bpf->helpers.map_update = readl(&cap->func_addr);
253		break;
254	case BPF_FUNC_map_delete_elem:
255		bpf->helpers.map_delete = readl(&cap->func_addr);
256		break;
257	case BPF_FUNC_perf_event_output:
258		bpf->helpers.perf_event_output = readl(&cap->func_addr);
259		break;
260	}
261
262	return 0;
263}
264
265static int
266nfp_bpf_parse_cap_maps(struct nfp_app_bpf *bpf, void __iomem *value, u32 length)
267{
268	struct nfp_bpf_cap_tlv_maps __iomem *cap = value;
269
270	if (length < sizeof(*cap)) {
271		nfp_err(bpf->app->cpp, "truncated maps TLV: %d\n", length);
272		return -EINVAL;
273	}
274
275	bpf->maps.types = readl(&cap->types);
276	bpf->maps.max_maps = readl(&cap->max_maps);
277	bpf->maps.max_elems = readl(&cap->max_elems);
278	bpf->maps.max_key_sz = readl(&cap->max_key_sz);
279	bpf->maps.max_val_sz = readl(&cap->max_val_sz);
280	bpf->maps.max_elem_sz = readl(&cap->max_elem_sz);
281
282	return 0;
283}
284
285static int
286nfp_bpf_parse_cap_random(struct nfp_app_bpf *bpf, void __iomem *value,
287			 u32 length)
288{
289	bpf->pseudo_random = true;
290	return 0;
291}
292
293static int
294nfp_bpf_parse_cap_qsel(struct nfp_app_bpf *bpf, void __iomem *value, u32 length)
295{
296	bpf->queue_select = true;
297	return 0;
298}
299
300static int
301nfp_bpf_parse_cap_adjust_tail(struct nfp_app_bpf *bpf, void __iomem *value,
302			      u32 length)
303{
304	bpf->adjust_tail = true;
305	return 0;
306}
307
308static int
309nfp_bpf_parse_cap_cmsg_multi_ent(struct nfp_app_bpf *bpf, void __iomem *value,
310				 u32 length)
311{
312	bpf->cmsg_multi_ent = true;
313	return 0;
314}
315
316static int
317nfp_bpf_parse_cap_abi_version(struct nfp_app_bpf *bpf, void __iomem *value,
318			      u32 length)
319{
320	if (length < 4) {
321		nfp_err(bpf->app->cpp, "truncated ABI version TLV: %d\n",
322			length);
323		return -EINVAL;
324	}
325
326	bpf->abi_version = readl(value);
327	if (bpf->abi_version < 2 || bpf->abi_version > 3) {
328		nfp_warn(bpf->app->cpp, "unsupported BPF ABI version: %d\n",
329			 bpf->abi_version);
330		bpf->abi_version = 0;
331	}
332
333	return 0;
334}
335
336static int nfp_bpf_parse_capabilities(struct nfp_app *app)
337{
338	struct nfp_cpp *cpp = app->pf->cpp;
339	struct nfp_cpp_area *area;
340	u8 __iomem *mem, *start;
341
342	mem = nfp_rtsym_map(app->pf->rtbl, "_abi_bpf_capabilities", "bpf.cap",
343			    8, &area);
344	if (IS_ERR(mem))
345		return PTR_ERR(mem) == -ENOENT ? 0 : PTR_ERR(mem);
346
347	start = mem;
348	while (mem - start + 8 <= nfp_cpp_area_size(area)) {
349		u8 __iomem *value;
350		u32 type, length;
351
352		type = readl(mem);
353		length = readl(mem + 4);
354		value = mem + 8;
355
356		mem += 8 + length;
357		if (mem - start > nfp_cpp_area_size(area))
358			goto err_release_free;
359
360		switch (type) {
361		case NFP_BPF_CAP_TYPE_FUNC:
362			if (nfp_bpf_parse_cap_func(app->priv, value, length))
363				goto err_release_free;
364			break;
365		case NFP_BPF_CAP_TYPE_ADJUST_HEAD:
366			if (nfp_bpf_parse_cap_adjust_head(app->priv, value,
367							  length))
368				goto err_release_free;
369			break;
370		case NFP_BPF_CAP_TYPE_MAPS:
371			if (nfp_bpf_parse_cap_maps(app->priv, value, length))
372				goto err_release_free;
373			break;
374		case NFP_BPF_CAP_TYPE_RANDOM:
375			if (nfp_bpf_parse_cap_random(app->priv, value, length))
376				goto err_release_free;
377			break;
378		case NFP_BPF_CAP_TYPE_QUEUE_SELECT:
379			if (nfp_bpf_parse_cap_qsel(app->priv, value, length))
380				goto err_release_free;
381			break;
382		case NFP_BPF_CAP_TYPE_ADJUST_TAIL:
383			if (nfp_bpf_parse_cap_adjust_tail(app->priv, value,
384							  length))
385				goto err_release_free;
386			break;
387		case NFP_BPF_CAP_TYPE_ABI_VERSION:
388			if (nfp_bpf_parse_cap_abi_version(app->priv, value,
389							  length))
390				goto err_release_free;
391			break;
392		case NFP_BPF_CAP_TYPE_CMSG_MULTI_ENT:
393			if (nfp_bpf_parse_cap_cmsg_multi_ent(app->priv, value,
394							     length))
395				goto err_release_free;
396			break;
397		default:
398			nfp_dbg(cpp, "unknown BPF capability: %d\n", type);
399			break;
400		}
401	}
402	if (mem - start != nfp_cpp_area_size(area)) {
403		nfp_err(cpp, "BPF capabilities left after parsing, parsed:%zd total length:%zu\n",
404			mem - start, nfp_cpp_area_size(area));
405		goto err_release_free;
406	}
407
408	nfp_cpp_area_release_free(area);
409
410	return 0;
411
412err_release_free:
413	nfp_err(cpp, "invalid BPF capabilities at offset:%zd\n", mem - start);
414	nfp_cpp_area_release_free(area);
415	return -EINVAL;
416}
417
418static void nfp_bpf_init_capabilities(struct nfp_app_bpf *bpf)
419{
420	bpf->abi_version = 2; /* Original BPF ABI version */
421}
422
423static int nfp_bpf_ndo_init(struct nfp_app *app, struct net_device *netdev)
424{
425	struct nfp_app_bpf *bpf = app->priv;
426
427	return bpf_offload_dev_netdev_register(bpf->bpf_dev, netdev);
428}
429
430static void nfp_bpf_ndo_uninit(struct nfp_app *app, struct net_device *netdev)
431{
432	struct nfp_app_bpf *bpf = app->priv;
433
434	bpf_offload_dev_netdev_unregister(bpf->bpf_dev, netdev);
435}
436
437static int nfp_bpf_start(struct nfp_app *app)
438{
439	struct nfp_app_bpf *bpf = app->priv;
440
441	if (app->ctrl->dp.mtu < nfp_bpf_ctrl_cmsg_min_mtu(bpf)) {
442		nfp_err(bpf->app->cpp,
443			"ctrl channel MTU below min required %u < %u\n",
444			app->ctrl->dp.mtu, nfp_bpf_ctrl_cmsg_min_mtu(bpf));
445		return -EINVAL;
446	}
447
448	if (bpf->cmsg_multi_ent)
449		bpf->cmsg_cache_cnt = nfp_bpf_ctrl_cmsg_cache_cnt(bpf);
450	else
451		bpf->cmsg_cache_cnt = 1;
452
453	return 0;
454}
455
456static int nfp_bpf_init(struct nfp_app *app)
457{
458	struct nfp_app_bpf *bpf;
459	int err;
460
461	bpf = kzalloc(sizeof(*bpf), GFP_KERNEL);
462	if (!bpf)
463		return -ENOMEM;
464	bpf->app = app;
465	app->priv = bpf;
466
467	INIT_LIST_HEAD(&bpf->map_list);
468
469	err = nfp_ccm_init(&bpf->ccm, app);
470	if (err)
471		goto err_free_bpf;
472
473	err = rhashtable_init(&bpf->maps_neutral, &nfp_bpf_maps_neutral_params);
474	if (err)
475		goto err_clean_ccm;
476
477	nfp_bpf_init_capabilities(bpf);
478
479	err = nfp_bpf_parse_capabilities(app);
480	if (err)
481		goto err_free_neutral_maps;
482
483	if (bpf->abi_version < 3) {
484		bpf->cmsg_key_sz = CMSG_MAP_KEY_LW * 4;
485		bpf->cmsg_val_sz = CMSG_MAP_VALUE_LW * 4;
486	} else {
487		bpf->cmsg_key_sz = bpf->maps.max_key_sz;
488		bpf->cmsg_val_sz = bpf->maps.max_val_sz;
489		app->ctrl_mtu = nfp_bpf_ctrl_cmsg_mtu(bpf);
490	}
491
492	bpf->bpf_dev = bpf_offload_dev_create(&nfp_bpf_dev_ops, bpf);
493	err = PTR_ERR_OR_ZERO(bpf->bpf_dev);
494	if (err)
495		goto err_free_neutral_maps;
496
497	return 0;
498
499err_free_neutral_maps:
500	rhashtable_destroy(&bpf->maps_neutral);
501err_clean_ccm:
502	nfp_ccm_clean(&bpf->ccm);
503err_free_bpf:
504	kfree(bpf);
505	return err;
506}
507
508static void nfp_bpf_clean(struct nfp_app *app)
509{
510	struct nfp_app_bpf *bpf = app->priv;
511
512	bpf_offload_dev_destroy(bpf->bpf_dev);
513	nfp_ccm_clean(&bpf->ccm);
514	WARN_ON(!list_empty(&bpf->map_list));
515	WARN_ON(bpf->maps_in_use || bpf->map_elems_in_use);
516	rhashtable_free_and_destroy(&bpf->maps_neutral,
517				    nfp_check_rhashtable_empty, NULL);
518	kfree(bpf);
519}
520
521const struct nfp_app_type app_bpf = {
522	.id		= NFP_APP_BPF_NIC,
523	.name		= "ebpf",
524
525	.ctrl_cap_mask	= 0,
526
527	.init		= nfp_bpf_init,
528	.clean		= nfp_bpf_clean,
529	.start		= nfp_bpf_start,
530
531	.check_mtu	= nfp_bpf_check_mtu,
532
533	.extra_cap	= nfp_bpf_extra_cap,
534
535	.ndo_init	= nfp_bpf_ndo_init,
536	.ndo_uninit	= nfp_bpf_ndo_uninit,
537
538	.vnic_alloc	= nfp_bpf_vnic_alloc,
539	.vnic_free	= nfp_bpf_vnic_free,
540
541	.ctrl_msg_rx	= nfp_bpf_ctrl_msg_rx,
542	.ctrl_msg_rx_raw	= nfp_bpf_ctrl_msg_rx_raw,
543
544	.setup_tc	= nfp_bpf_setup_tc,
545	.bpf		= nfp_ndo_bpf,
546	.xdp_offload	= nfp_bpf_xdp_offload,
547};
548