1// SPDX-License-Identifier: GPL-2.0
2#include <test_progs.h>
3#include <network_helpers.h>
4
5/* test_tailcall_1 checks basic functionality by patching multiple locations
6 * in a single program for a single tail call slot with nop->jmp, jmp->nop
7 * and jmp->jmp rewrites. Also checks for nop->nop.
8 */
9static void test_tailcall_1(void)
10{
11	int err, map_fd, prog_fd, main_fd, i, j;
12	struct bpf_map *prog_array;
13	struct bpf_program *prog;
14	struct bpf_object *obj;
15	__u32 retval, duration;
16	char prog_name[32];
17	char buff[128] = {};
18
19	err = bpf_prog_load("tailcall1.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
20			    &prog_fd);
21	if (CHECK_FAIL(err))
22		return;
23
24	prog = bpf_object__find_program_by_title(obj, "classifier");
25	if (CHECK_FAIL(!prog))
26		goto out;
27
28	main_fd = bpf_program__fd(prog);
29	if (CHECK_FAIL(main_fd < 0))
30		goto out;
31
32	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
33	if (CHECK_FAIL(!prog_array))
34		goto out;
35
36	map_fd = bpf_map__fd(prog_array);
37	if (CHECK_FAIL(map_fd < 0))
38		goto out;
39
40	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
41		snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
42
43		prog = bpf_object__find_program_by_title(obj, prog_name);
44		if (CHECK_FAIL(!prog))
45			goto out;
46
47		prog_fd = bpf_program__fd(prog);
48		if (CHECK_FAIL(prog_fd < 0))
49			goto out;
50
51		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
52		if (CHECK_FAIL(err))
53			goto out;
54	}
55
56	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
57		err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
58					&duration, &retval, NULL);
59		CHECK(err || retval != i, "tailcall",
60		      "err %d errno %d retval %d\n", err, errno, retval);
61
62		err = bpf_map_delete_elem(map_fd, &i);
63		if (CHECK_FAIL(err))
64			goto out;
65	}
66
67	err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
68				&duration, &retval, NULL);
69	CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n",
70	      err, errno, retval);
71
72	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
73		snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
74
75		prog = bpf_object__find_program_by_title(obj, prog_name);
76		if (CHECK_FAIL(!prog))
77			goto out;
78
79		prog_fd = bpf_program__fd(prog);
80		if (CHECK_FAIL(prog_fd < 0))
81			goto out;
82
83		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
84		if (CHECK_FAIL(err))
85			goto out;
86	}
87
88	err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
89				&duration, &retval, NULL);
90	CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n",
91	      err, errno, retval);
92
93	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
94		j = bpf_map__def(prog_array)->max_entries - 1 - i;
95		snprintf(prog_name, sizeof(prog_name), "classifier/%i", j);
96
97		prog = bpf_object__find_program_by_title(obj, prog_name);
98		if (CHECK_FAIL(!prog))
99			goto out;
100
101		prog_fd = bpf_program__fd(prog);
102		if (CHECK_FAIL(prog_fd < 0))
103			goto out;
104
105		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
106		if (CHECK_FAIL(err))
107			goto out;
108	}
109
110	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
111		j = bpf_map__def(prog_array)->max_entries - 1 - i;
112
113		err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
114					&duration, &retval, NULL);
115		CHECK(err || retval != j, "tailcall",
116		      "err %d errno %d retval %d\n", err, errno, retval);
117
118		err = bpf_map_delete_elem(map_fd, &i);
119		if (CHECK_FAIL(err))
120			goto out;
121	}
122
123	err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
124				&duration, &retval, NULL);
125	CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n",
126	      err, errno, retval);
127
128	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
129		err = bpf_map_delete_elem(map_fd, &i);
130		if (CHECK_FAIL(err >= 0 || errno != ENOENT))
131			goto out;
132
133		err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
134					&duration, &retval, NULL);
135		CHECK(err || retval != 3, "tailcall",
136		      "err %d errno %d retval %d\n", err, errno, retval);
137	}
138
139out:
140	bpf_object__close(obj);
141}
142
143/* test_tailcall_2 checks that patching multiple programs for a single
144 * tail call slot works. It also jumps through several programs and tests
145 * the tail call limit counter.
146 */
147static void test_tailcall_2(void)
148{
149	int err, map_fd, prog_fd, main_fd, i;
150	struct bpf_map *prog_array;
151	struct bpf_program *prog;
152	struct bpf_object *obj;
153	__u32 retval, duration;
154	char prog_name[32];
155	char buff[128] = {};
156
157	err = bpf_prog_load("tailcall2.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
158			    &prog_fd);
159	if (CHECK_FAIL(err))
160		return;
161
162	prog = bpf_object__find_program_by_title(obj, "classifier");
163	if (CHECK_FAIL(!prog))
164		goto out;
165
166	main_fd = bpf_program__fd(prog);
167	if (CHECK_FAIL(main_fd < 0))
168		goto out;
169
170	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
171	if (CHECK_FAIL(!prog_array))
172		goto out;
173
174	map_fd = bpf_map__fd(prog_array);
175	if (CHECK_FAIL(map_fd < 0))
176		goto out;
177
178	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
179		snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
180
181		prog = bpf_object__find_program_by_title(obj, prog_name);
182		if (CHECK_FAIL(!prog))
183			goto out;
184
185		prog_fd = bpf_program__fd(prog);
186		if (CHECK_FAIL(prog_fd < 0))
187			goto out;
188
189		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
190		if (CHECK_FAIL(err))
191			goto out;
192	}
193
194	err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
195				&duration, &retval, NULL);
196	CHECK(err || retval != 2, "tailcall", "err %d errno %d retval %d\n",
197	      err, errno, retval);
198
199	i = 2;
200	err = bpf_map_delete_elem(map_fd, &i);
201	if (CHECK_FAIL(err))
202		goto out;
203
204	err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
205				&duration, &retval, NULL);
206	CHECK(err || retval != 1, "tailcall", "err %d errno %d retval %d\n",
207	      err, errno, retval);
208
209	i = 0;
210	err = bpf_map_delete_elem(map_fd, &i);
211	if (CHECK_FAIL(err))
212		goto out;
213
214	err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
215				&duration, &retval, NULL);
216	CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n",
217	      err, errno, retval);
218out:
219	bpf_object__close(obj);
220}
221
222/* test_tailcall_3 checks that the count value of the tail call limit
223 * enforcement matches with expectations.
224 */
225static void test_tailcall_3(void)
226{
227	int err, map_fd, prog_fd, main_fd, data_fd, i, val;
228	struct bpf_map *prog_array, *data_map;
229	struct bpf_program *prog;
230	struct bpf_object *obj;
231	__u32 retval, duration;
232	char buff[128] = {};
233
234	err = bpf_prog_load("tailcall3.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
235			    &prog_fd);
236	if (CHECK_FAIL(err))
237		return;
238
239	prog = bpf_object__find_program_by_title(obj, "classifier");
240	if (CHECK_FAIL(!prog))
241		goto out;
242
243	main_fd = bpf_program__fd(prog);
244	if (CHECK_FAIL(main_fd < 0))
245		goto out;
246
247	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
248	if (CHECK_FAIL(!prog_array))
249		goto out;
250
251	map_fd = bpf_map__fd(prog_array);
252	if (CHECK_FAIL(map_fd < 0))
253		goto out;
254
255	prog = bpf_object__find_program_by_title(obj, "classifier/0");
256	if (CHECK_FAIL(!prog))
257		goto out;
258
259	prog_fd = bpf_program__fd(prog);
260	if (CHECK_FAIL(prog_fd < 0))
261		goto out;
262
263	i = 0;
264	err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
265	if (CHECK_FAIL(err))
266		goto out;
267
268	err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
269				&duration, &retval, NULL);
270	CHECK(err || retval != 1, "tailcall", "err %d errno %d retval %d\n",
271	      err, errno, retval);
272
273	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
274	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
275		return;
276
277	data_fd = bpf_map__fd(data_map);
278	if (CHECK_FAIL(map_fd < 0))
279		return;
280
281	i = 0;
282	err = bpf_map_lookup_elem(data_fd, &i, &val);
283	CHECK(err || val != 33, "tailcall count", "err %d errno %d count %d\n",
284	      err, errno, val);
285
286	i = 0;
287	err = bpf_map_delete_elem(map_fd, &i);
288	if (CHECK_FAIL(err))
289		goto out;
290
291	err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
292				&duration, &retval, NULL);
293	CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n",
294	      err, errno, retval);
295out:
296	bpf_object__close(obj);
297}
298
299/* test_tailcall_4 checks that the kernel properly selects indirect jump
300 * for the case where the key is not known. Latter is passed via global
301 * data to select different targets we can compare return value of.
302 */
303static void test_tailcall_4(void)
304{
305	int err, map_fd, prog_fd, main_fd, data_fd, i;
306	struct bpf_map *prog_array, *data_map;
307	struct bpf_program *prog;
308	struct bpf_object *obj;
309	__u32 retval, duration;
310	static const int zero = 0;
311	char buff[128] = {};
312	char prog_name[32];
313
314	err = bpf_prog_load("tailcall4.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
315			    &prog_fd);
316	if (CHECK_FAIL(err))
317		return;
318
319	prog = bpf_object__find_program_by_title(obj, "classifier");
320	if (CHECK_FAIL(!prog))
321		goto out;
322
323	main_fd = bpf_program__fd(prog);
324	if (CHECK_FAIL(main_fd < 0))
325		goto out;
326
327	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
328	if (CHECK_FAIL(!prog_array))
329		goto out;
330
331	map_fd = bpf_map__fd(prog_array);
332	if (CHECK_FAIL(map_fd < 0))
333		goto out;
334
335	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
336	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
337		return;
338
339	data_fd = bpf_map__fd(data_map);
340	if (CHECK_FAIL(map_fd < 0))
341		return;
342
343	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
344		snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
345
346		prog = bpf_object__find_program_by_title(obj, prog_name);
347		if (CHECK_FAIL(!prog))
348			goto out;
349
350		prog_fd = bpf_program__fd(prog);
351		if (CHECK_FAIL(prog_fd < 0))
352			goto out;
353
354		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
355		if (CHECK_FAIL(err))
356			goto out;
357	}
358
359	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
360		err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
361		if (CHECK_FAIL(err))
362			goto out;
363
364		err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
365					&duration, &retval, NULL);
366		CHECK(err || retval != i, "tailcall",
367		      "err %d errno %d retval %d\n", err, errno, retval);
368	}
369
370	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
371		err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY);
372		if (CHECK_FAIL(err))
373			goto out;
374
375		err = bpf_map_delete_elem(map_fd, &i);
376		if (CHECK_FAIL(err))
377			goto out;
378
379		err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
380					&duration, &retval, NULL);
381		CHECK(err || retval != 3, "tailcall",
382		      "err %d errno %d retval %d\n", err, errno, retval);
383	}
384out:
385	bpf_object__close(obj);
386}
387
388/* test_tailcall_5 probes similarly to test_tailcall_4 that the kernel generates
389 * an indirect jump when the keys are const but different from different branches.
390 */
391static void test_tailcall_5(void)
392{
393	int err, map_fd, prog_fd, main_fd, data_fd, i, key[] = { 1111, 1234, 5678 };
394	struct bpf_map *prog_array, *data_map;
395	struct bpf_program *prog;
396	struct bpf_object *obj;
397	__u32 retval, duration;
398	static const int zero = 0;
399	char buff[128] = {};
400	char prog_name[32];
401
402	err = bpf_prog_load("tailcall5.o", BPF_PROG_TYPE_SCHED_CLS, &obj,
403			    &prog_fd);
404	if (CHECK_FAIL(err))
405		return;
406
407	prog = bpf_object__find_program_by_title(obj, "classifier");
408	if (CHECK_FAIL(!prog))
409		goto out;
410
411	main_fd = bpf_program__fd(prog);
412	if (CHECK_FAIL(main_fd < 0))
413		goto out;
414
415	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
416	if (CHECK_FAIL(!prog_array))
417		goto out;
418
419	map_fd = bpf_map__fd(prog_array);
420	if (CHECK_FAIL(map_fd < 0))
421		goto out;
422
423	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
424	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
425		return;
426
427	data_fd = bpf_map__fd(data_map);
428	if (CHECK_FAIL(map_fd < 0))
429		return;
430
431	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
432		snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
433
434		prog = bpf_object__find_program_by_title(obj, prog_name);
435		if (CHECK_FAIL(!prog))
436			goto out;
437
438		prog_fd = bpf_program__fd(prog);
439		if (CHECK_FAIL(prog_fd < 0))
440			goto out;
441
442		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
443		if (CHECK_FAIL(err))
444			goto out;
445	}
446
447	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
448		err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
449		if (CHECK_FAIL(err))
450			goto out;
451
452		err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
453					&duration, &retval, NULL);
454		CHECK(err || retval != i, "tailcall",
455		      "err %d errno %d retval %d\n", err, errno, retval);
456	}
457
458	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
459		err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY);
460		if (CHECK_FAIL(err))
461			goto out;
462
463		err = bpf_map_delete_elem(map_fd, &i);
464		if (CHECK_FAIL(err))
465			goto out;
466
467		err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
468					&duration, &retval, NULL);
469		CHECK(err || retval != 3, "tailcall",
470		      "err %d errno %d retval %d\n", err, errno, retval);
471	}
472out:
473	bpf_object__close(obj);
474}
475
476/* test_tailcall_bpf2bpf_1 purpose is to make sure that tailcalls are working
477 * correctly in correlation with BPF subprograms
478 */
479static void test_tailcall_bpf2bpf_1(void)
480{
481	int err, map_fd, prog_fd, main_fd, i;
482	struct bpf_map *prog_array;
483	struct bpf_program *prog;
484	struct bpf_object *obj;
485	__u32 retval, duration;
486	char prog_name[32];
487
488	err = bpf_prog_load("tailcall_bpf2bpf1.o", BPF_PROG_TYPE_SCHED_CLS,
489			    &obj, &prog_fd);
490	if (CHECK_FAIL(err))
491		return;
492
493	prog = bpf_object__find_program_by_title(obj, "classifier");
494	if (CHECK_FAIL(!prog))
495		goto out;
496
497	main_fd = bpf_program__fd(prog);
498	if (CHECK_FAIL(main_fd < 0))
499		goto out;
500
501	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
502	if (CHECK_FAIL(!prog_array))
503		goto out;
504
505	map_fd = bpf_map__fd(prog_array);
506	if (CHECK_FAIL(map_fd < 0))
507		goto out;
508
509	/* nop -> jmp */
510	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
511		snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
512
513		prog = bpf_object__find_program_by_title(obj, prog_name);
514		if (CHECK_FAIL(!prog))
515			goto out;
516
517		prog_fd = bpf_program__fd(prog);
518		if (CHECK_FAIL(prog_fd < 0))
519			goto out;
520
521		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
522		if (CHECK_FAIL(err))
523			goto out;
524	}
525
526	err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
527				0, &retval, &duration);
528	CHECK(err || retval != 1, "tailcall",
529	      "err %d errno %d retval %d\n", err, errno, retval);
530
531	/* jmp -> nop, call subprog that will do tailcall */
532	i = 1;
533	err = bpf_map_delete_elem(map_fd, &i);
534	if (CHECK_FAIL(err))
535		goto out;
536
537	err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
538				0, &retval, &duration);
539	CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n",
540	      err, errno, retval);
541
542	/* make sure that subprog can access ctx and entry prog that
543	 * called this subprog can properly return
544	 */
545	i = 0;
546	err = bpf_map_delete_elem(map_fd, &i);
547	if (CHECK_FAIL(err))
548		goto out;
549
550	err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
551				0, &retval, &duration);
552	CHECK(err || retval != sizeof(pkt_v4) * 2,
553	      "tailcall", "err %d errno %d retval %d\n",
554	      err, errno, retval);
555out:
556	bpf_object__close(obj);
557}
558
559/* test_tailcall_bpf2bpf_2 checks that the count value of the tail call limit
560 * enforcement matches with expectations when tailcall is preceded with
561 * bpf2bpf call.
562 */
563static void test_tailcall_bpf2bpf_2(void)
564{
565	int err, map_fd, prog_fd, main_fd, data_fd, i, val;
566	struct bpf_map *prog_array, *data_map;
567	struct bpf_program *prog;
568	struct bpf_object *obj;
569	__u32 retval, duration;
570	char buff[128] = {};
571
572	err = bpf_prog_load("tailcall_bpf2bpf2.o", BPF_PROG_TYPE_SCHED_CLS,
573			    &obj, &prog_fd);
574	if (CHECK_FAIL(err))
575		return;
576
577	prog = bpf_object__find_program_by_title(obj, "classifier");
578	if (CHECK_FAIL(!prog))
579		goto out;
580
581	main_fd = bpf_program__fd(prog);
582	if (CHECK_FAIL(main_fd < 0))
583		goto out;
584
585	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
586	if (CHECK_FAIL(!prog_array))
587		goto out;
588
589	map_fd = bpf_map__fd(prog_array);
590	if (CHECK_FAIL(map_fd < 0))
591		goto out;
592
593	prog = bpf_object__find_program_by_title(obj, "classifier/0");
594	if (CHECK_FAIL(!prog))
595		goto out;
596
597	prog_fd = bpf_program__fd(prog);
598	if (CHECK_FAIL(prog_fd < 0))
599		goto out;
600
601	i = 0;
602	err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
603	if (CHECK_FAIL(err))
604		goto out;
605
606	err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
607				&duration, &retval, NULL);
608	CHECK(err || retval != 1, "tailcall", "err %d errno %d retval %d\n",
609	      err, errno, retval);
610
611	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
612	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
613		return;
614
615	data_fd = bpf_map__fd(data_map);
616	if (CHECK_FAIL(map_fd < 0))
617		return;
618
619	i = 0;
620	err = bpf_map_lookup_elem(data_fd, &i, &val);
621	CHECK(err || val != 33, "tailcall count", "err %d errno %d count %d\n",
622	      err, errno, val);
623
624	i = 0;
625	err = bpf_map_delete_elem(map_fd, &i);
626	if (CHECK_FAIL(err))
627		goto out;
628
629	err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0,
630				&duration, &retval, NULL);
631	CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n",
632	      err, errno, retval);
633out:
634	bpf_object__close(obj);
635}
636
637/* test_tailcall_bpf2bpf_3 checks that non-trivial amount of stack (up to
638 * 256 bytes) can be used within bpf subprograms that have the tailcalls
639 * in them
640 */
641static void test_tailcall_bpf2bpf_3(void)
642{
643	int err, map_fd, prog_fd, main_fd, i;
644	struct bpf_map *prog_array;
645	struct bpf_program *prog;
646	struct bpf_object *obj;
647	__u32 retval, duration;
648	char prog_name[32];
649
650	err = bpf_prog_load("tailcall_bpf2bpf3.o", BPF_PROG_TYPE_SCHED_CLS,
651			    &obj, &prog_fd);
652	if (CHECK_FAIL(err))
653		return;
654
655	prog = bpf_object__find_program_by_title(obj, "classifier");
656	if (CHECK_FAIL(!prog))
657		goto out;
658
659	main_fd = bpf_program__fd(prog);
660	if (CHECK_FAIL(main_fd < 0))
661		goto out;
662
663	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
664	if (CHECK_FAIL(!prog_array))
665		goto out;
666
667	map_fd = bpf_map__fd(prog_array);
668	if (CHECK_FAIL(map_fd < 0))
669		goto out;
670
671	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
672		snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
673
674		prog = bpf_object__find_program_by_title(obj, prog_name);
675		if (CHECK_FAIL(!prog))
676			goto out;
677
678		prog_fd = bpf_program__fd(prog);
679		if (CHECK_FAIL(prog_fd < 0))
680			goto out;
681
682		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
683		if (CHECK_FAIL(err))
684			goto out;
685	}
686
687	err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
688				&duration, &retval, NULL);
689	CHECK(err || retval != sizeof(pkt_v4) * 3,
690	      "tailcall", "err %d errno %d retval %d\n",
691	      err, errno, retval);
692
693	i = 1;
694	err = bpf_map_delete_elem(map_fd, &i);
695	if (CHECK_FAIL(err))
696		goto out;
697
698	err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
699				&duration, &retval, NULL);
700	CHECK(err || retval != sizeof(pkt_v4),
701	      "tailcall", "err %d errno %d retval %d\n",
702	      err, errno, retval);
703
704	i = 0;
705	err = bpf_map_delete_elem(map_fd, &i);
706	if (CHECK_FAIL(err))
707		goto out;
708
709	err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
710				&duration, &retval, NULL);
711	CHECK(err || retval != sizeof(pkt_v4) * 2,
712	      "tailcall", "err %d errno %d retval %d\n",
713	      err, errno, retval);
714out:
715	bpf_object__close(obj);
716}
717
718/* test_tailcall_bpf2bpf_4 checks that tailcall counter is correctly preserved
719 * across tailcalls combined with bpf2bpf calls. for making sure that tailcall
720 * counter behaves correctly, bpf program will go through following flow:
721 *
722 * entry -> entry_subprog -> tailcall0 -> bpf_func0 -> subprog0 ->
723 * -> tailcall1 -> bpf_func1 -> subprog1 -> tailcall2 -> bpf_func2 ->
724 * subprog2 [here bump global counter] --------^
725 *
726 * We go through first two tailcalls and start counting from the subprog2 where
727 * the loop begins. At the end of the test make sure that the global counter is
728 * equal to 31, because tailcall counter includes the first two tailcalls
729 * whereas global counter is incremented only on loop presented on flow above.
730 */
731static void test_tailcall_bpf2bpf_4(void)
732{
733	int err, map_fd, prog_fd, main_fd, data_fd, i, val;
734	struct bpf_map *prog_array, *data_map;
735	struct bpf_program *prog;
736	struct bpf_object *obj;
737	__u32 retval, duration;
738	char prog_name[32];
739
740	err = bpf_prog_load("tailcall_bpf2bpf4.o", BPF_PROG_TYPE_SCHED_CLS,
741			    &obj, &prog_fd);
742	if (CHECK_FAIL(err))
743		return;
744
745	prog = bpf_object__find_program_by_title(obj, "classifier");
746	if (CHECK_FAIL(!prog))
747		goto out;
748
749	main_fd = bpf_program__fd(prog);
750	if (CHECK_FAIL(main_fd < 0))
751		goto out;
752
753	prog_array = bpf_object__find_map_by_name(obj, "jmp_table");
754	if (CHECK_FAIL(!prog_array))
755		goto out;
756
757	map_fd = bpf_map__fd(prog_array);
758	if (CHECK_FAIL(map_fd < 0))
759		goto out;
760
761	for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) {
762		snprintf(prog_name, sizeof(prog_name), "classifier/%i", i);
763
764		prog = bpf_object__find_program_by_title(obj, prog_name);
765		if (CHECK_FAIL(!prog))
766			goto out;
767
768		prog_fd = bpf_program__fd(prog);
769		if (CHECK_FAIL(prog_fd < 0))
770			goto out;
771
772		err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY);
773		if (CHECK_FAIL(err))
774			goto out;
775	}
776
777	err = bpf_prog_test_run(main_fd, 1, &pkt_v4, sizeof(pkt_v4), 0,
778				&duration, &retval, NULL);
779	CHECK(err || retval != sizeof(pkt_v4) * 3, "tailcall", "err %d errno %d retval %d\n",
780	      err, errno, retval);
781
782	data_map = bpf_object__find_map_by_name(obj, "tailcall.bss");
783	if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map)))
784		return;
785
786	data_fd = bpf_map__fd(data_map);
787	if (CHECK_FAIL(map_fd < 0))
788		return;
789
790	i = 0;
791	err = bpf_map_lookup_elem(data_fd, &i, &val);
792	CHECK(err || val != 31, "tailcall count", "err %d errno %d count %d\n",
793	      err, errno, val);
794
795out:
796	bpf_object__close(obj);
797}
798
799void test_tailcalls(void)
800{
801	if (test__start_subtest("tailcall_1"))
802		test_tailcall_1();
803	if (test__start_subtest("tailcall_2"))
804		test_tailcall_2();
805	if (test__start_subtest("tailcall_3"))
806		test_tailcall_3();
807	if (test__start_subtest("tailcall_4"))
808		test_tailcall_4();
809	if (test__start_subtest("tailcall_5"))
810		test_tailcall_5();
811	if (test__start_subtest("tailcall_bpf2bpf_1"))
812		test_tailcall_bpf2bpf_1();
813	if (test__start_subtest("tailcall_bpf2bpf_2"))
814		test_tailcall_bpf2bpf_2();
815	if (test__start_subtest("tailcall_bpf2bpf_3"))
816		test_tailcall_bpf2bpf_3();
817	if (test__start_subtest("tailcall_bpf2bpf_4"))
818		test_tailcall_bpf2bpf_4();
819}
820