xref: /kernel/linux/linux-5.10/tools/lib/bpf/btf_dump.c (revision 8c2ecf20)
1// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2
3/*
4 * BTF-to-C type converter.
5 *
6 * Copyright (c) 2019 Facebook
7 */
8
9#include <stdbool.h>
10#include <stddef.h>
11#include <stdlib.h>
12#include <string.h>
13#include <errno.h>
14#include <linux/err.h>
15#include <linux/btf.h>
16#include <linux/kernel.h>
17#include "btf.h"
18#include "hashmap.h"
19#include "libbpf.h"
20#include "libbpf_internal.h"
21
22static const char PREFIXES[] = "\t\t\t\t\t\t\t\t\t\t\t\t\t";
23static const size_t PREFIX_CNT = sizeof(PREFIXES) - 1;
24
25static const char *pfx(int lvl)
26{
27	return lvl >= PREFIX_CNT ? PREFIXES : &PREFIXES[PREFIX_CNT - lvl];
28}
29
30enum btf_dump_type_order_state {
31	NOT_ORDERED,
32	ORDERING,
33	ORDERED,
34};
35
36enum btf_dump_type_emit_state {
37	NOT_EMITTED,
38	EMITTING,
39	EMITTED,
40};
41
42/* per-type auxiliary state */
43struct btf_dump_type_aux_state {
44	/* topological sorting state */
45	enum btf_dump_type_order_state order_state: 2;
46	/* emitting state used to determine the need for forward declaration */
47	enum btf_dump_type_emit_state emit_state: 2;
48	/* whether forward declaration was already emitted */
49	__u8 fwd_emitted: 1;
50	/* whether unique non-duplicate name was already assigned */
51	__u8 name_resolved: 1;
52	/* whether type is referenced from any other type */
53	__u8 referenced: 1;
54};
55
56struct btf_dump {
57	const struct btf *btf;
58	const struct btf_ext *btf_ext;
59	btf_dump_printf_fn_t printf_fn;
60	struct btf_dump_opts opts;
61	int ptr_sz;
62	bool strip_mods;
63	int last_id;
64
65	/* per-type auxiliary state */
66	struct btf_dump_type_aux_state *type_states;
67	size_t type_states_cap;
68	/* per-type optional cached unique name, must be freed, if present */
69	const char **cached_names;
70	size_t cached_names_cap;
71
72	/* topo-sorted list of dependent type definitions */
73	__u32 *emit_queue;
74	int emit_queue_cap;
75	int emit_queue_cnt;
76
77	/*
78	 * stack of type declarations (e.g., chain of modifiers, arrays,
79	 * funcs, etc)
80	 */
81	__u32 *decl_stack;
82	int decl_stack_cap;
83	int decl_stack_cnt;
84
85	/* maps struct/union/enum name to a number of name occurrences */
86	struct hashmap *type_names;
87	/*
88	 * maps typedef identifiers and enum value names to a number of such
89	 * name occurrences
90	 */
91	struct hashmap *ident_names;
92};
93
94static size_t str_hash_fn(const void *key, void *ctx)
95{
96	return str_hash(key);
97}
98
99static bool str_equal_fn(const void *a, const void *b, void *ctx)
100{
101	return strcmp(a, b) == 0;
102}
103
104static const char *btf_name_of(const struct btf_dump *d, __u32 name_off)
105{
106	return btf__name_by_offset(d->btf, name_off);
107}
108
109static void btf_dump_printf(const struct btf_dump *d, const char *fmt, ...)
110{
111	va_list args;
112
113	va_start(args, fmt);
114	d->printf_fn(d->opts.ctx, fmt, args);
115	va_end(args);
116}
117
118static int btf_dump_mark_referenced(struct btf_dump *d);
119static int btf_dump_resize(struct btf_dump *d);
120
121struct btf_dump *btf_dump__new(const struct btf *btf,
122			       const struct btf_ext *btf_ext,
123			       const struct btf_dump_opts *opts,
124			       btf_dump_printf_fn_t printf_fn)
125{
126	struct btf_dump *d;
127	int err;
128
129	d = calloc(1, sizeof(struct btf_dump));
130	if (!d)
131		return ERR_PTR(-ENOMEM);
132
133	d->btf = btf;
134	d->btf_ext = btf_ext;
135	d->printf_fn = printf_fn;
136	d->opts.ctx = opts ? opts->ctx : NULL;
137	d->ptr_sz = btf__pointer_size(btf) ? : sizeof(void *);
138
139	d->type_names = hashmap__new(str_hash_fn, str_equal_fn, NULL);
140	if (IS_ERR(d->type_names)) {
141		err = PTR_ERR(d->type_names);
142		d->type_names = NULL;
143		goto err;
144	}
145	d->ident_names = hashmap__new(str_hash_fn, str_equal_fn, NULL);
146	if (IS_ERR(d->ident_names)) {
147		err = PTR_ERR(d->ident_names);
148		d->ident_names = NULL;
149		goto err;
150	}
151
152	err = btf_dump_resize(d);
153	if (err)
154		goto err;
155
156	return d;
157err:
158	btf_dump__free(d);
159	return ERR_PTR(err);
160}
161
162static int btf_dump_resize(struct btf_dump *d)
163{
164	int err, last_id = btf__get_nr_types(d->btf);
165
166	if (last_id <= d->last_id)
167		return 0;
168
169	if (btf_ensure_mem((void **)&d->type_states, &d->type_states_cap,
170			   sizeof(*d->type_states), last_id + 1))
171		return -ENOMEM;
172	if (btf_ensure_mem((void **)&d->cached_names, &d->cached_names_cap,
173			   sizeof(*d->cached_names), last_id + 1))
174		return -ENOMEM;
175
176	if (d->last_id == 0) {
177		/* VOID is special */
178		d->type_states[0].order_state = ORDERED;
179		d->type_states[0].emit_state = EMITTED;
180	}
181
182	/* eagerly determine referenced types for anon enums */
183	err = btf_dump_mark_referenced(d);
184	if (err)
185		return err;
186
187	d->last_id = last_id;
188	return 0;
189}
190
191static void btf_dump_free_names(struct hashmap *map)
192{
193	size_t bkt;
194	struct hashmap_entry *cur;
195
196	hashmap__for_each_entry(map, cur, bkt)
197		free((void *)cur->key);
198
199	hashmap__free(map);
200}
201
202void btf_dump__free(struct btf_dump *d)
203{
204	int i;
205
206	if (IS_ERR_OR_NULL(d))
207		return;
208
209	free(d->type_states);
210	if (d->cached_names) {
211		/* any set cached name is owned by us and should be freed */
212		for (i = 0; i <= d->last_id; i++) {
213			if (d->cached_names[i])
214				free((void *)d->cached_names[i]);
215		}
216	}
217	free(d->cached_names);
218	free(d->emit_queue);
219	free(d->decl_stack);
220	btf_dump_free_names(d->type_names);
221	btf_dump_free_names(d->ident_names);
222
223	free(d);
224}
225
226static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr);
227static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id);
228
229/*
230 * Dump BTF type in a compilable C syntax, including all the necessary
231 * dependent types, necessary for compilation. If some of the dependent types
232 * were already emitted as part of previous btf_dump__dump_type() invocation
233 * for another type, they won't be emitted again. This API allows callers to
234 * filter out BTF types according to user-defined criterias and emitted only
235 * minimal subset of types, necessary to compile everything. Full struct/union
236 * definitions will still be emitted, even if the only usage is through
237 * pointer and could be satisfied with just a forward declaration.
238 *
239 * Dumping is done in two high-level passes:
240 *   1. Topologically sort type definitions to satisfy C rules of compilation.
241 *   2. Emit type definitions in C syntax.
242 *
243 * Returns 0 on success; <0, otherwise.
244 */
245int btf_dump__dump_type(struct btf_dump *d, __u32 id)
246{
247	int err, i;
248
249	if (id > btf__get_nr_types(d->btf))
250		return -EINVAL;
251
252	err = btf_dump_resize(d);
253	if (err)
254		return err;
255
256	d->emit_queue_cnt = 0;
257	err = btf_dump_order_type(d, id, false);
258	if (err < 0)
259		return err;
260
261	for (i = 0; i < d->emit_queue_cnt; i++)
262		btf_dump_emit_type(d, d->emit_queue[i], 0 /*top-level*/);
263
264	return 0;
265}
266
267/*
268 * Mark all types that are referenced from any other type. This is used to
269 * determine top-level anonymous enums that need to be emitted as an
270 * independent type declarations.
271 * Anonymous enums come in two flavors: either embedded in a struct's field
272 * definition, in which case they have to be declared inline as part of field
273 * type declaration; or as a top-level anonymous enum, typically used for
274 * declaring global constants. It's impossible to distinguish between two
275 * without knowning whether given enum type was referenced from other type:
276 * top-level anonymous enum won't be referenced by anything, while embedded
277 * one will.
278 */
279static int btf_dump_mark_referenced(struct btf_dump *d)
280{
281	int i, j, n = btf__get_nr_types(d->btf);
282	const struct btf_type *t;
283	__u16 vlen;
284
285	for (i = d->last_id + 1; i <= n; i++) {
286		t = btf__type_by_id(d->btf, i);
287		vlen = btf_vlen(t);
288
289		switch (btf_kind(t)) {
290		case BTF_KIND_INT:
291		case BTF_KIND_ENUM:
292		case BTF_KIND_FWD:
293			break;
294
295		case BTF_KIND_VOLATILE:
296		case BTF_KIND_CONST:
297		case BTF_KIND_RESTRICT:
298		case BTF_KIND_PTR:
299		case BTF_KIND_TYPEDEF:
300		case BTF_KIND_FUNC:
301		case BTF_KIND_VAR:
302			d->type_states[t->type].referenced = 1;
303			break;
304
305		case BTF_KIND_ARRAY: {
306			const struct btf_array *a = btf_array(t);
307
308			d->type_states[a->index_type].referenced = 1;
309			d->type_states[a->type].referenced = 1;
310			break;
311		}
312		case BTF_KIND_STRUCT:
313		case BTF_KIND_UNION: {
314			const struct btf_member *m = btf_members(t);
315
316			for (j = 0; j < vlen; j++, m++)
317				d->type_states[m->type].referenced = 1;
318			break;
319		}
320		case BTF_KIND_FUNC_PROTO: {
321			const struct btf_param *p = btf_params(t);
322
323			for (j = 0; j < vlen; j++, p++)
324				d->type_states[p->type].referenced = 1;
325			break;
326		}
327		case BTF_KIND_DATASEC: {
328			const struct btf_var_secinfo *v = btf_var_secinfos(t);
329
330			for (j = 0; j < vlen; j++, v++)
331				d->type_states[v->type].referenced = 1;
332			break;
333		}
334		default:
335			return -EINVAL;
336		}
337	}
338	return 0;
339}
340
341static int btf_dump_add_emit_queue_id(struct btf_dump *d, __u32 id)
342{
343	__u32 *new_queue;
344	size_t new_cap;
345
346	if (d->emit_queue_cnt >= d->emit_queue_cap) {
347		new_cap = max(16, d->emit_queue_cap * 3 / 2);
348		new_queue = libbpf_reallocarray(d->emit_queue, new_cap, sizeof(new_queue[0]));
349		if (!new_queue)
350			return -ENOMEM;
351		d->emit_queue = new_queue;
352		d->emit_queue_cap = new_cap;
353	}
354
355	d->emit_queue[d->emit_queue_cnt++] = id;
356	return 0;
357}
358
359/*
360 * Determine order of emitting dependent types and specified type to satisfy
361 * C compilation rules.  This is done through topological sorting with an
362 * additional complication which comes from C rules. The main idea for C is
363 * that if some type is "embedded" into a struct/union, it's size needs to be
364 * known at the time of definition of containing type. E.g., for:
365 *
366 *	struct A {};
367 *	struct B { struct A x; }
368 *
369 * struct A *HAS* to be defined before struct B, because it's "embedded",
370 * i.e., it is part of struct B layout. But in the following case:
371 *
372 *	struct A;
373 *	struct B { struct A *x; }
374 *	struct A {};
375 *
376 * it's enough to just have a forward declaration of struct A at the time of
377 * struct B definition, as struct B has a pointer to struct A, so the size of
378 * field x is known without knowing struct A size: it's sizeof(void *).
379 *
380 * Unfortunately, there are some trickier cases we need to handle, e.g.:
381 *
382 *	struct A {}; // if this was forward-declaration: compilation error
383 *	struct B {
384 *		struct { // anonymous struct
385 *			struct A y;
386 *		} *x;
387 *	};
388 *
389 * In this case, struct B's field x is a pointer, so it's size is known
390 * regardless of the size of (anonymous) struct it points to. But because this
391 * struct is anonymous and thus defined inline inside struct B, *and* it
392 * embeds struct A, compiler requires full definition of struct A to be known
393 * before struct B can be defined. This creates a transitive dependency
394 * between struct A and struct B. If struct A was forward-declared before
395 * struct B definition and fully defined after struct B definition, that would
396 * trigger compilation error.
397 *
398 * All this means that while we are doing topological sorting on BTF type
399 * graph, we need to determine relationships between different types (graph
400 * nodes):
401 *   - weak link (relationship) between X and Y, if Y *CAN* be
402 *   forward-declared at the point of X definition;
403 *   - strong link, if Y *HAS* to be fully-defined before X can be defined.
404 *
405 * The rule is as follows. Given a chain of BTF types from X to Y, if there is
406 * BTF_KIND_PTR type in the chain and at least one non-anonymous type
407 * Z (excluding X, including Y), then link is weak. Otherwise, it's strong.
408 * Weak/strong relationship is determined recursively during DFS traversal and
409 * is returned as a result from btf_dump_order_type().
410 *
411 * btf_dump_order_type() is trying to avoid unnecessary forward declarations,
412 * but it is not guaranteeing that no extraneous forward declarations will be
413 * emitted.
414 *
415 * To avoid extra work, algorithm marks some of BTF types as ORDERED, when
416 * it's done with them, but not for all (e.g., VOLATILE, CONST, RESTRICT,
417 * ARRAY, FUNC_PROTO), as weak/strong semantics for those depends on the
418 * entire graph path, so depending where from one came to that BTF type, it
419 * might cause weak or strong ordering. For types like STRUCT/UNION/INT/ENUM,
420 * once they are processed, there is no need to do it again, so they are
421 * marked as ORDERED. We can mark PTR as ORDERED as well, as it semi-forces
422 * weak link, unless subsequent referenced STRUCT/UNION/ENUM is anonymous. But
423 * in any case, once those are processed, no need to do it again, as the
424 * result won't change.
425 *
426 * Returns:
427 *   - 1, if type is part of strong link (so there is strong topological
428 *   ordering requirements);
429 *   - 0, if type is part of weak link (so can be satisfied through forward
430 *   declaration);
431 *   - <0, on error (e.g., unsatisfiable type loop detected).
432 */
433static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)
434{
435	/*
436	 * Order state is used to detect strong link cycles, but only for BTF
437	 * kinds that are or could be an independent definition (i.e.,
438	 * stand-alone fwd decl, enum, typedef, struct, union). Ptrs, arrays,
439	 * func_protos, modifiers are just means to get to these definitions.
440	 * Int/void don't need definitions, they are assumed to be always
441	 * properly defined.  We also ignore datasec, var, and funcs for now.
442	 * So for all non-defining kinds, we never even set ordering state,
443	 * for defining kinds we set ORDERING and subsequently ORDERED if it
444	 * forms a strong link.
445	 */
446	struct btf_dump_type_aux_state *tstate = &d->type_states[id];
447	const struct btf_type *t;
448	__u16 vlen;
449	int err, i;
450
451	/* return true, letting typedefs know that it's ok to be emitted */
452	if (tstate->order_state == ORDERED)
453		return 1;
454
455	t = btf__type_by_id(d->btf, id);
456
457	if (tstate->order_state == ORDERING) {
458		/* type loop, but resolvable through fwd declaration */
459		if (btf_is_composite(t) && through_ptr && t->name_off != 0)
460			return 0;
461		pr_warn("unsatisfiable type cycle, id:[%u]\n", id);
462		return -ELOOP;
463	}
464
465	switch (btf_kind(t)) {
466	case BTF_KIND_INT:
467		tstate->order_state = ORDERED;
468		return 0;
469
470	case BTF_KIND_PTR:
471		err = btf_dump_order_type(d, t->type, true);
472		tstate->order_state = ORDERED;
473		return err;
474
475	case BTF_KIND_ARRAY:
476		return btf_dump_order_type(d, btf_array(t)->type, false);
477
478	case BTF_KIND_STRUCT:
479	case BTF_KIND_UNION: {
480		const struct btf_member *m = btf_members(t);
481		/*
482		 * struct/union is part of strong link, only if it's embedded
483		 * (so no ptr in a path) or it's anonymous (so has to be
484		 * defined inline, even if declared through ptr)
485		 */
486		if (through_ptr && t->name_off != 0)
487			return 0;
488
489		tstate->order_state = ORDERING;
490
491		vlen = btf_vlen(t);
492		for (i = 0; i < vlen; i++, m++) {
493			err = btf_dump_order_type(d, m->type, false);
494			if (err < 0)
495				return err;
496		}
497
498		if (t->name_off != 0) {
499			err = btf_dump_add_emit_queue_id(d, id);
500			if (err < 0)
501				return err;
502		}
503
504		tstate->order_state = ORDERED;
505		return 1;
506	}
507	case BTF_KIND_ENUM:
508	case BTF_KIND_FWD:
509		/*
510		 * non-anonymous or non-referenced enums are top-level
511		 * declarations and should be emitted. Same logic can be
512		 * applied to FWDs, it won't hurt anyways.
513		 */
514		if (t->name_off != 0 || !tstate->referenced) {
515			err = btf_dump_add_emit_queue_id(d, id);
516			if (err)
517				return err;
518		}
519		tstate->order_state = ORDERED;
520		return 1;
521
522	case BTF_KIND_TYPEDEF: {
523		int is_strong;
524
525		is_strong = btf_dump_order_type(d, t->type, through_ptr);
526		if (is_strong < 0)
527			return is_strong;
528
529		/* typedef is similar to struct/union w.r.t. fwd-decls */
530		if (through_ptr && !is_strong)
531			return 0;
532
533		/* typedef is always a named definition */
534		err = btf_dump_add_emit_queue_id(d, id);
535		if (err)
536			return err;
537
538		d->type_states[id].order_state = ORDERED;
539		return 1;
540	}
541	case BTF_KIND_VOLATILE:
542	case BTF_KIND_CONST:
543	case BTF_KIND_RESTRICT:
544		return btf_dump_order_type(d, t->type, through_ptr);
545
546	case BTF_KIND_FUNC_PROTO: {
547		const struct btf_param *p = btf_params(t);
548		bool is_strong;
549
550		err = btf_dump_order_type(d, t->type, through_ptr);
551		if (err < 0)
552			return err;
553		is_strong = err > 0;
554
555		vlen = btf_vlen(t);
556		for (i = 0; i < vlen; i++, p++) {
557			err = btf_dump_order_type(d, p->type, through_ptr);
558			if (err < 0)
559				return err;
560			if (err > 0)
561				is_strong = true;
562		}
563		return is_strong;
564	}
565	case BTF_KIND_FUNC:
566	case BTF_KIND_VAR:
567	case BTF_KIND_DATASEC:
568		d->type_states[id].order_state = ORDERED;
569		return 0;
570
571	default:
572		return -EINVAL;
573	}
574}
575
576static void btf_dump_emit_missing_aliases(struct btf_dump *d, __u32 id,
577					  const struct btf_type *t);
578
579static void btf_dump_emit_struct_fwd(struct btf_dump *d, __u32 id,
580				     const struct btf_type *t);
581static void btf_dump_emit_struct_def(struct btf_dump *d, __u32 id,
582				     const struct btf_type *t, int lvl);
583
584static void btf_dump_emit_enum_fwd(struct btf_dump *d, __u32 id,
585				   const struct btf_type *t);
586static void btf_dump_emit_enum_def(struct btf_dump *d, __u32 id,
587				   const struct btf_type *t, int lvl);
588
589static void btf_dump_emit_fwd_def(struct btf_dump *d, __u32 id,
590				  const struct btf_type *t);
591
592static void btf_dump_emit_typedef_def(struct btf_dump *d, __u32 id,
593				      const struct btf_type *t, int lvl);
594
595/* a local view into a shared stack */
596struct id_stack {
597	const __u32 *ids;
598	int cnt;
599};
600
601static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id,
602				    const char *fname, int lvl);
603static void btf_dump_emit_type_chain(struct btf_dump *d,
604				     struct id_stack *decl_stack,
605				     const char *fname, int lvl);
606
607static const char *btf_dump_type_name(struct btf_dump *d, __u32 id);
608static const char *btf_dump_ident_name(struct btf_dump *d, __u32 id);
609static size_t btf_dump_name_dups(struct btf_dump *d, struct hashmap *name_map,
610				 const char *orig_name);
611
612static bool btf_dump_is_blacklisted(struct btf_dump *d, __u32 id)
613{
614	const struct btf_type *t = btf__type_by_id(d->btf, id);
615
616	/* __builtin_va_list is a compiler built-in, which causes compilation
617	 * errors, when compiling w/ different compiler, then used to compile
618	 * original code (e.g., GCC to compile kernel, Clang to use generated
619	 * C header from BTF). As it is built-in, it should be already defined
620	 * properly internally in compiler.
621	 */
622	if (t->name_off == 0)
623		return false;
624	return strcmp(btf_name_of(d, t->name_off), "__builtin_va_list") == 0;
625}
626
627/*
628 * Emit C-syntax definitions of types from chains of BTF types.
629 *
630 * High-level handling of determining necessary forward declarations are handled
631 * by btf_dump_emit_type() itself, but all nitty-gritty details of emitting type
632 * declarations/definitions in C syntax  are handled by a combo of
633 * btf_dump_emit_type_decl()/btf_dump_emit_type_chain() w/ delegation to
634 * corresponding btf_dump_emit_*_{def,fwd}() functions.
635 *
636 * We also keep track of "containing struct/union type ID" to determine when
637 * we reference it from inside and thus can avoid emitting unnecessary forward
638 * declaration.
639 *
640 * This algorithm is designed in such a way, that even if some error occurs
641 * (either technical, e.g., out of memory, or logical, i.e., malformed BTF
642 * that doesn't comply to C rules completely), algorithm will try to proceed
643 * and produce as much meaningful output as possible.
644 */
645static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id)
646{
647	struct btf_dump_type_aux_state *tstate = &d->type_states[id];
648	bool top_level_def = cont_id == 0;
649	const struct btf_type *t;
650	__u16 kind;
651
652	if (tstate->emit_state == EMITTED)
653		return;
654
655	t = btf__type_by_id(d->btf, id);
656	kind = btf_kind(t);
657
658	if (tstate->emit_state == EMITTING) {
659		if (tstate->fwd_emitted)
660			return;
661
662		switch (kind) {
663		case BTF_KIND_STRUCT:
664		case BTF_KIND_UNION:
665			/*
666			 * if we are referencing a struct/union that we are
667			 * part of - then no need for fwd declaration
668			 */
669			if (id == cont_id)
670				return;
671			if (t->name_off == 0) {
672				pr_warn("anonymous struct/union loop, id:[%u]\n",
673					id);
674				return;
675			}
676			btf_dump_emit_struct_fwd(d, id, t);
677			btf_dump_printf(d, ";\n\n");
678			tstate->fwd_emitted = 1;
679			break;
680		case BTF_KIND_TYPEDEF:
681			/*
682			 * for typedef fwd_emitted means typedef definition
683			 * was emitted, but it can be used only for "weak"
684			 * references through pointer only, not for embedding
685			 */
686			if (!btf_dump_is_blacklisted(d, id)) {
687				btf_dump_emit_typedef_def(d, id, t, 0);
688				btf_dump_printf(d, ";\n\n");
689			}
690			tstate->fwd_emitted = 1;
691			break;
692		default:
693			break;
694		}
695
696		return;
697	}
698
699	switch (kind) {
700	case BTF_KIND_INT:
701		/* Emit type alias definitions if necessary */
702		btf_dump_emit_missing_aliases(d, id, t);
703
704		tstate->emit_state = EMITTED;
705		break;
706	case BTF_KIND_ENUM:
707		if (top_level_def) {
708			btf_dump_emit_enum_def(d, id, t, 0);
709			btf_dump_printf(d, ";\n\n");
710		}
711		tstate->emit_state = EMITTED;
712		break;
713	case BTF_KIND_PTR:
714	case BTF_KIND_VOLATILE:
715	case BTF_KIND_CONST:
716	case BTF_KIND_RESTRICT:
717		btf_dump_emit_type(d, t->type, cont_id);
718		break;
719	case BTF_KIND_ARRAY:
720		btf_dump_emit_type(d, btf_array(t)->type, cont_id);
721		break;
722	case BTF_KIND_FWD:
723		btf_dump_emit_fwd_def(d, id, t);
724		btf_dump_printf(d, ";\n\n");
725		tstate->emit_state = EMITTED;
726		break;
727	case BTF_KIND_TYPEDEF:
728		tstate->emit_state = EMITTING;
729		btf_dump_emit_type(d, t->type, id);
730		/*
731		 * typedef can server as both definition and forward
732		 * declaration; at this stage someone depends on
733		 * typedef as a forward declaration (refers to it
734		 * through pointer), so unless we already did it,
735		 * emit typedef as a forward declaration
736		 */
737		if (!tstate->fwd_emitted && !btf_dump_is_blacklisted(d, id)) {
738			btf_dump_emit_typedef_def(d, id, t, 0);
739			btf_dump_printf(d, ";\n\n");
740		}
741		tstate->emit_state = EMITTED;
742		break;
743	case BTF_KIND_STRUCT:
744	case BTF_KIND_UNION:
745		tstate->emit_state = EMITTING;
746		/* if it's a top-level struct/union definition or struct/union
747		 * is anonymous, then in C we'll be emitting all fields and
748		 * their types (as opposed to just `struct X`), so we need to
749		 * make sure that all types, referenced from struct/union
750		 * members have necessary forward-declarations, where
751		 * applicable
752		 */
753		if (top_level_def || t->name_off == 0) {
754			const struct btf_member *m = btf_members(t);
755			__u16 vlen = btf_vlen(t);
756			int i, new_cont_id;
757
758			new_cont_id = t->name_off == 0 ? cont_id : id;
759			for (i = 0; i < vlen; i++, m++)
760				btf_dump_emit_type(d, m->type, new_cont_id);
761		} else if (!tstate->fwd_emitted && id != cont_id) {
762			btf_dump_emit_struct_fwd(d, id, t);
763			btf_dump_printf(d, ";\n\n");
764			tstate->fwd_emitted = 1;
765		}
766
767		if (top_level_def) {
768			btf_dump_emit_struct_def(d, id, t, 0);
769			btf_dump_printf(d, ";\n\n");
770			tstate->emit_state = EMITTED;
771		} else {
772			tstate->emit_state = NOT_EMITTED;
773		}
774		break;
775	case BTF_KIND_FUNC_PROTO: {
776		const struct btf_param *p = btf_params(t);
777		__u16 vlen = btf_vlen(t);
778		int i;
779
780		btf_dump_emit_type(d, t->type, cont_id);
781		for (i = 0; i < vlen; i++, p++)
782			btf_dump_emit_type(d, p->type, cont_id);
783
784		break;
785	}
786	default:
787		break;
788	}
789}
790
791static bool btf_is_struct_packed(const struct btf *btf, __u32 id,
792				 const struct btf_type *t)
793{
794	const struct btf_member *m;
795	int max_align = 1, align, i, bit_sz;
796	__u16 vlen;
797
798	m = btf_members(t);
799	vlen = btf_vlen(t);
800	/* all non-bitfield fields have to be naturally aligned */
801	for (i = 0; i < vlen; i++, m++) {
802		align = btf__align_of(btf, m->type);
803		bit_sz = btf_member_bitfield_size(t, i);
804		if (align && bit_sz == 0 && m->offset % (8 * align) != 0)
805			return true;
806		max_align = max(align, max_align);
807	}
808	/* size of a non-packed struct has to be a multiple of its alignment */
809	if (t->size % max_align != 0)
810		return true;
811	/*
812	 * if original struct was marked as packed, but its layout is
813	 * naturally aligned, we'll detect that it's not packed
814	 */
815	return false;
816}
817
818static void btf_dump_emit_bit_padding(const struct btf_dump *d,
819				      int cur_off, int next_off, int next_align,
820				      bool in_bitfield, int lvl)
821{
822	const struct {
823		const char *name;
824		int bits;
825	} pads[] = {
826		{"long", d->ptr_sz * 8}, {"int", 32}, {"short", 16}, {"char", 8}
827	};
828	int new_off, pad_bits, bits, i;
829	const char *pad_type;
830
831	if (cur_off >= next_off)
832		return; /* no gap */
833
834	/* For filling out padding we want to take advantage of
835	 * natural alignment rules to minimize unnecessary explicit
836	 * padding. First, we find the largest type (among long, int,
837	 * short, or char) that can be used to force naturally aligned
838	 * boundary. Once determined, we'll use such type to fill in
839	 * the remaining padding gap. In some cases we can rely on
840	 * compiler filling some gaps, but sometimes we need to force
841	 * alignment to close natural alignment with markers like
842	 * `long: 0` (this is always the case for bitfields).  Note
843	 * that even if struct itself has, let's say 4-byte alignment
844	 * (i.e., it only uses up to int-aligned types), using `long:
845	 * X;` explicit padding doesn't actually change struct's
846	 * overall alignment requirements, but compiler does take into
847	 * account that type's (long, in this example) natural
848	 * alignment requirements when adding implicit padding. We use
849	 * this fact heavily and don't worry about ruining correct
850	 * struct alignment requirement.
851	 */
852	for (i = 0; i < ARRAY_SIZE(pads); i++) {
853		pad_bits = pads[i].bits;
854		pad_type = pads[i].name;
855
856		new_off = roundup(cur_off, pad_bits);
857		if (new_off <= next_off)
858			break;
859	}
860
861	if (new_off > cur_off && new_off <= next_off) {
862		/* We need explicit `<type>: 0` aligning mark if next
863		 * field is right on alignment offset and its
864		 * alignment requirement is less strict than <type>'s
865		 * alignment (so compiler won't naturally align to the
866		 * offset we expect), or if subsequent `<type>: X`,
867		 * will actually completely fit in the remaining hole,
868		 * making compiler basically ignore `<type>: X`
869		 * completely.
870		 */
871		if (in_bitfield ||
872		    (new_off == next_off && roundup(cur_off, next_align * 8) != new_off) ||
873		    (new_off != next_off && next_off - new_off <= new_off - cur_off))
874			/* but for bitfields we'll emit explicit bit count */
875			btf_dump_printf(d, "\n%s%s: %d;", pfx(lvl), pad_type,
876					in_bitfield ? new_off - cur_off : 0);
877		cur_off = new_off;
878	}
879
880	/* Now we know we start at naturally aligned offset for a chosen
881	 * padding type (long, int, short, or char), and so the rest is just
882	 * a straightforward filling of remaining padding gap with full
883	 * `<type>: sizeof(<type>);` markers, except for the last one, which
884	 * might need smaller than sizeof(<type>) padding.
885	 */
886	while (cur_off != next_off) {
887		bits = min(next_off - cur_off, pad_bits);
888		if (bits == pad_bits) {
889			btf_dump_printf(d, "\n%s%s: %d;", pfx(lvl), pad_type, pad_bits);
890			cur_off += bits;
891			continue;
892		}
893		/* For the remainder padding that doesn't cover entire
894		 * pad_type bit length, we pick the smallest necessary type.
895		 * This is pure aesthetics, we could have just used `long`,
896		 * but having smallest necessary one communicates better the
897		 * scale of the padding gap.
898		 */
899		for (i = ARRAY_SIZE(pads) - 1; i >= 0; i--) {
900			pad_type = pads[i].name;
901			pad_bits = pads[i].bits;
902			if (pad_bits < bits)
903				continue;
904
905			btf_dump_printf(d, "\n%s%s: %d;", pfx(lvl), pad_type, bits);
906			cur_off += bits;
907			break;
908		}
909	}
910}
911
912static void btf_dump_emit_struct_fwd(struct btf_dump *d, __u32 id,
913				     const struct btf_type *t)
914{
915	btf_dump_printf(d, "%s %s",
916			btf_is_struct(t) ? "struct" : "union",
917			btf_dump_type_name(d, id));
918}
919
920static void btf_dump_emit_struct_def(struct btf_dump *d,
921				     __u32 id,
922				     const struct btf_type *t,
923				     int lvl)
924{
925	const struct btf_member *m = btf_members(t);
926	bool is_struct = btf_is_struct(t);
927	bool packed, prev_bitfield = false;
928	int align, i, off = 0;
929	__u16 vlen = btf_vlen(t);
930
931	align = btf__align_of(d->btf, id);
932	packed = is_struct ? btf_is_struct_packed(d->btf, id, t) : 0;
933
934	btf_dump_printf(d, "%s%s%s {",
935			is_struct ? "struct" : "union",
936			t->name_off ? " " : "",
937			btf_dump_type_name(d, id));
938
939	for (i = 0; i < vlen; i++, m++) {
940		const char *fname;
941		int m_off, m_sz, m_align;
942		bool in_bitfield;
943
944		fname = btf_name_of(d, m->name_off);
945		m_sz = btf_member_bitfield_size(t, i);
946		m_off = btf_member_bit_offset(t, i);
947		m_align = packed ? 1 : btf__align_of(d->btf, m->type);
948
949		in_bitfield = prev_bitfield && m_sz != 0;
950
951		btf_dump_emit_bit_padding(d, off, m_off, m_align, in_bitfield, lvl + 1);
952		btf_dump_printf(d, "\n%s", pfx(lvl + 1));
953		btf_dump_emit_type_decl(d, m->type, fname, lvl + 1);
954
955		if (m_sz) {
956			btf_dump_printf(d, ": %d", m_sz);
957			off = m_off + m_sz;
958			prev_bitfield = true;
959		} else {
960			m_sz = max((__s64)0, btf__resolve_size(d->btf, m->type));
961			off = m_off + m_sz * 8;
962			prev_bitfield = false;
963		}
964
965		btf_dump_printf(d, ";");
966	}
967
968	/* pad at the end, if necessary */
969	if (is_struct)
970		btf_dump_emit_bit_padding(d, off, t->size * 8, align, false, lvl + 1);
971
972	/*
973	 * Keep `struct empty {}` on a single line,
974	 * only print newline when there are regular or padding fields.
975	 */
976	if (vlen || t->size) {
977		btf_dump_printf(d, "\n");
978		btf_dump_printf(d, "%s}", pfx(lvl));
979	} else {
980		btf_dump_printf(d, "}");
981	}
982	if (packed)
983		btf_dump_printf(d, " __attribute__((packed))");
984}
985
986static const char *missing_base_types[][2] = {
987	/*
988	 * GCC emits typedefs to its internal __PolyX_t types when compiling Arm
989	 * SIMD intrinsics. Alias them to standard base types.
990	 */
991	{ "__Poly8_t",		"unsigned char" },
992	{ "__Poly16_t",		"unsigned short" },
993	{ "__Poly64_t",		"unsigned long long" },
994	{ "__Poly128_t",	"unsigned __int128" },
995};
996
997static void btf_dump_emit_missing_aliases(struct btf_dump *d, __u32 id,
998					  const struct btf_type *t)
999{
1000	const char *name = btf_dump_type_name(d, id);
1001	int i;
1002
1003	for (i = 0; i < ARRAY_SIZE(missing_base_types); i++) {
1004		if (strcmp(name, missing_base_types[i][0]) == 0) {
1005			btf_dump_printf(d, "typedef %s %s;\n\n",
1006					missing_base_types[i][1], name);
1007			break;
1008		}
1009	}
1010}
1011
1012static void btf_dump_emit_enum_fwd(struct btf_dump *d, __u32 id,
1013				   const struct btf_type *t)
1014{
1015	btf_dump_printf(d, "enum %s", btf_dump_type_name(d, id));
1016}
1017
1018static void btf_dump_emit_enum_def(struct btf_dump *d, __u32 id,
1019				   const struct btf_type *t,
1020				   int lvl)
1021{
1022	const struct btf_enum *v = btf_enum(t);
1023	__u16 vlen = btf_vlen(t);
1024	const char *name;
1025	size_t dup_cnt;
1026	int i;
1027
1028	btf_dump_printf(d, "enum%s%s",
1029			t->name_off ? " " : "",
1030			btf_dump_type_name(d, id));
1031
1032	if (vlen) {
1033		btf_dump_printf(d, " {");
1034		for (i = 0; i < vlen; i++, v++) {
1035			name = btf_name_of(d, v->name_off);
1036			/* enumerators share namespace with typedef idents */
1037			dup_cnt = btf_dump_name_dups(d, d->ident_names, name);
1038			if (dup_cnt > 1) {
1039				btf_dump_printf(d, "\n%s%s___%zu = %u,",
1040						pfx(lvl + 1), name, dup_cnt,
1041						(__u32)v->val);
1042			} else {
1043				btf_dump_printf(d, "\n%s%s = %u,",
1044						pfx(lvl + 1), name,
1045						(__u32)v->val);
1046			}
1047		}
1048		btf_dump_printf(d, "\n%s}", pfx(lvl));
1049	}
1050}
1051
1052static void btf_dump_emit_fwd_def(struct btf_dump *d, __u32 id,
1053				  const struct btf_type *t)
1054{
1055	const char *name = btf_dump_type_name(d, id);
1056
1057	if (btf_kflag(t))
1058		btf_dump_printf(d, "union %s", name);
1059	else
1060		btf_dump_printf(d, "struct %s", name);
1061}
1062
1063static void btf_dump_emit_typedef_def(struct btf_dump *d, __u32 id,
1064				     const struct btf_type *t, int lvl)
1065{
1066	const char *name = btf_dump_ident_name(d, id);
1067
1068	/*
1069	 * Old GCC versions are emitting invalid typedef for __gnuc_va_list
1070	 * pointing to VOID. This generates warnings from btf_dump() and
1071	 * results in uncompilable header file, so we are fixing it up here
1072	 * with valid typedef into __builtin_va_list.
1073	 */
1074	if (t->type == 0 && strcmp(name, "__gnuc_va_list") == 0) {
1075		btf_dump_printf(d, "typedef __builtin_va_list __gnuc_va_list");
1076		return;
1077	}
1078
1079	btf_dump_printf(d, "typedef ");
1080	btf_dump_emit_type_decl(d, t->type, name, lvl);
1081}
1082
1083static int btf_dump_push_decl_stack_id(struct btf_dump *d, __u32 id)
1084{
1085	__u32 *new_stack;
1086	size_t new_cap;
1087
1088	if (d->decl_stack_cnt >= d->decl_stack_cap) {
1089		new_cap = max(16, d->decl_stack_cap * 3 / 2);
1090		new_stack = libbpf_reallocarray(d->decl_stack, new_cap, sizeof(new_stack[0]));
1091		if (!new_stack)
1092			return -ENOMEM;
1093		d->decl_stack = new_stack;
1094		d->decl_stack_cap = new_cap;
1095	}
1096
1097	d->decl_stack[d->decl_stack_cnt++] = id;
1098
1099	return 0;
1100}
1101
1102/*
1103 * Emit type declaration (e.g., field type declaration in a struct or argument
1104 * declaration in function prototype) in correct C syntax.
1105 *
1106 * For most types it's trivial, but there are few quirky type declaration
1107 * cases worth mentioning:
1108 *   - function prototypes (especially nesting of function prototypes);
1109 *   - arrays;
1110 *   - const/volatile/restrict for pointers vs other types.
1111 *
1112 * For a good discussion of *PARSING* C syntax (as a human), see
1113 * Peter van der Linden's "Expert C Programming: Deep C Secrets",
1114 * Ch.3 "Unscrambling Declarations in C".
1115 *
1116 * It won't help with BTF to C conversion much, though, as it's an opposite
1117 * problem. So we came up with this algorithm in reverse to van der Linden's
1118 * parsing algorithm. It goes from structured BTF representation of type
1119 * declaration to a valid compilable C syntax.
1120 *
1121 * For instance, consider this C typedef:
1122 *	typedef const int * const * arr[10] arr_t;
1123 * It will be represented in BTF with this chain of BTF types:
1124 *	[typedef] -> [array] -> [ptr] -> [const] -> [ptr] -> [const] -> [int]
1125 *
1126 * Notice how [const] modifier always goes before type it modifies in BTF type
1127 * graph, but in C syntax, const/volatile/restrict modifiers are written to
1128 * the right of pointers, but to the left of other types. There are also other
1129 * quirks, like function pointers, arrays of them, functions returning other
1130 * functions, etc.
1131 *
1132 * We handle that by pushing all the types to a stack, until we hit "terminal"
1133 * type (int/enum/struct/union/fwd). Then depending on the kind of a type on
1134 * top of a stack, modifiers are handled differently. Array/function pointers
1135 * have also wildly different syntax and how nesting of them are done. See
1136 * code for authoritative definition.
1137 *
1138 * To avoid allocating new stack for each independent chain of BTF types, we
1139 * share one bigger stack, with each chain working only on its own local view
1140 * of a stack frame. Some care is required to "pop" stack frames after
1141 * processing type declaration chain.
1142 */
1143int btf_dump__emit_type_decl(struct btf_dump *d, __u32 id,
1144			     const struct btf_dump_emit_type_decl_opts *opts)
1145{
1146	const char *fname;
1147	int lvl, err;
1148
1149	if (!OPTS_VALID(opts, btf_dump_emit_type_decl_opts))
1150		return -EINVAL;
1151
1152	err = btf_dump_resize(d);
1153	if (err)
1154		return -EINVAL;
1155
1156	fname = OPTS_GET(opts, field_name, "");
1157	lvl = OPTS_GET(opts, indent_level, 0);
1158	d->strip_mods = OPTS_GET(opts, strip_mods, false);
1159	btf_dump_emit_type_decl(d, id, fname, lvl);
1160	d->strip_mods = false;
1161	return 0;
1162}
1163
1164static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id,
1165				    const char *fname, int lvl)
1166{
1167	struct id_stack decl_stack;
1168	const struct btf_type *t;
1169	int err, stack_start;
1170
1171	stack_start = d->decl_stack_cnt;
1172	for (;;) {
1173		t = btf__type_by_id(d->btf, id);
1174		if (d->strip_mods && btf_is_mod(t))
1175			goto skip_mod;
1176
1177		err = btf_dump_push_decl_stack_id(d, id);
1178		if (err < 0) {
1179			/*
1180			 * if we don't have enough memory for entire type decl
1181			 * chain, restore stack, emit warning, and try to
1182			 * proceed nevertheless
1183			 */
1184			pr_warn("not enough memory for decl stack:%d", err);
1185			d->decl_stack_cnt = stack_start;
1186			return;
1187		}
1188skip_mod:
1189		/* VOID */
1190		if (id == 0)
1191			break;
1192
1193		switch (btf_kind(t)) {
1194		case BTF_KIND_PTR:
1195		case BTF_KIND_VOLATILE:
1196		case BTF_KIND_CONST:
1197		case BTF_KIND_RESTRICT:
1198		case BTF_KIND_FUNC_PROTO:
1199			id = t->type;
1200			break;
1201		case BTF_KIND_ARRAY:
1202			id = btf_array(t)->type;
1203			break;
1204		case BTF_KIND_INT:
1205		case BTF_KIND_ENUM:
1206		case BTF_KIND_FWD:
1207		case BTF_KIND_STRUCT:
1208		case BTF_KIND_UNION:
1209		case BTF_KIND_TYPEDEF:
1210			goto done;
1211		default:
1212			pr_warn("unexpected type in decl chain, kind:%u, id:[%u]\n",
1213				btf_kind(t), id);
1214			goto done;
1215		}
1216	}
1217done:
1218	/*
1219	 * We might be inside a chain of declarations (e.g., array of function
1220	 * pointers returning anonymous (so inlined) structs, having another
1221	 * array field). Each of those needs its own "stack frame" to handle
1222	 * emitting of declarations. Those stack frames are non-overlapping
1223	 * portions of shared btf_dump->decl_stack. To make it a bit nicer to
1224	 * handle this set of nested stacks, we create a view corresponding to
1225	 * our own "stack frame" and work with it as an independent stack.
1226	 * We'll need to clean up after emit_type_chain() returns, though.
1227	 */
1228	decl_stack.ids = d->decl_stack + stack_start;
1229	decl_stack.cnt = d->decl_stack_cnt - stack_start;
1230	btf_dump_emit_type_chain(d, &decl_stack, fname, lvl);
1231	/*
1232	 * emit_type_chain() guarantees that it will pop its entire decl_stack
1233	 * frame before returning. But it works with a read-only view into
1234	 * decl_stack, so it doesn't actually pop anything from the
1235	 * perspective of shared btf_dump->decl_stack, per se. We need to
1236	 * reset decl_stack state to how it was before us to avoid it growing
1237	 * all the time.
1238	 */
1239	d->decl_stack_cnt = stack_start;
1240}
1241
1242static void btf_dump_emit_mods(struct btf_dump *d, struct id_stack *decl_stack)
1243{
1244	const struct btf_type *t;
1245	__u32 id;
1246
1247	while (decl_stack->cnt) {
1248		id = decl_stack->ids[decl_stack->cnt - 1];
1249		t = btf__type_by_id(d->btf, id);
1250
1251		switch (btf_kind(t)) {
1252		case BTF_KIND_VOLATILE:
1253			btf_dump_printf(d, "volatile ");
1254			break;
1255		case BTF_KIND_CONST:
1256			btf_dump_printf(d, "const ");
1257			break;
1258		case BTF_KIND_RESTRICT:
1259			btf_dump_printf(d, "restrict ");
1260			break;
1261		default:
1262			return;
1263		}
1264		decl_stack->cnt--;
1265	}
1266}
1267
1268static void btf_dump_drop_mods(struct btf_dump *d, struct id_stack *decl_stack)
1269{
1270	const struct btf_type *t;
1271	__u32 id;
1272
1273	while (decl_stack->cnt) {
1274		id = decl_stack->ids[decl_stack->cnt - 1];
1275		t = btf__type_by_id(d->btf, id);
1276		if (!btf_is_mod(t))
1277			return;
1278		decl_stack->cnt--;
1279	}
1280}
1281
1282static void btf_dump_emit_name(const struct btf_dump *d,
1283			       const char *name, bool last_was_ptr)
1284{
1285	bool separate = name[0] && !last_was_ptr;
1286
1287	btf_dump_printf(d, "%s%s", separate ? " " : "", name);
1288}
1289
1290static void btf_dump_emit_type_chain(struct btf_dump *d,
1291				     struct id_stack *decls,
1292				     const char *fname, int lvl)
1293{
1294	/*
1295	 * last_was_ptr is used to determine if we need to separate pointer
1296	 * asterisk (*) from previous part of type signature with space, so
1297	 * that we get `int ***`, instead of `int * * *`. We default to true
1298	 * for cases where we have single pointer in a chain. E.g., in ptr ->
1299	 * func_proto case. func_proto will start a new emit_type_chain call
1300	 * with just ptr, which should be emitted as (*) or (*<fname>), so we
1301	 * don't want to prepend space for that last pointer.
1302	 */
1303	bool last_was_ptr = true;
1304	const struct btf_type *t;
1305	const char *name;
1306	__u16 kind;
1307	__u32 id;
1308
1309	while (decls->cnt) {
1310		id = decls->ids[--decls->cnt];
1311		if (id == 0) {
1312			/* VOID is a special snowflake */
1313			btf_dump_emit_mods(d, decls);
1314			btf_dump_printf(d, "void");
1315			last_was_ptr = false;
1316			continue;
1317		}
1318
1319		t = btf__type_by_id(d->btf, id);
1320		kind = btf_kind(t);
1321
1322		switch (kind) {
1323		case BTF_KIND_INT:
1324			btf_dump_emit_mods(d, decls);
1325			name = btf_name_of(d, t->name_off);
1326			btf_dump_printf(d, "%s", name);
1327			break;
1328		case BTF_KIND_STRUCT:
1329		case BTF_KIND_UNION:
1330			btf_dump_emit_mods(d, decls);
1331			/* inline anonymous struct/union */
1332			if (t->name_off == 0)
1333				btf_dump_emit_struct_def(d, id, t, lvl);
1334			else
1335				btf_dump_emit_struct_fwd(d, id, t);
1336			break;
1337		case BTF_KIND_ENUM:
1338			btf_dump_emit_mods(d, decls);
1339			/* inline anonymous enum */
1340			if (t->name_off == 0)
1341				btf_dump_emit_enum_def(d, id, t, lvl);
1342			else
1343				btf_dump_emit_enum_fwd(d, id, t);
1344			break;
1345		case BTF_KIND_FWD:
1346			btf_dump_emit_mods(d, decls);
1347			btf_dump_emit_fwd_def(d, id, t);
1348			break;
1349		case BTF_KIND_TYPEDEF:
1350			btf_dump_emit_mods(d, decls);
1351			btf_dump_printf(d, "%s", btf_dump_ident_name(d, id));
1352			break;
1353		case BTF_KIND_PTR:
1354			btf_dump_printf(d, "%s", last_was_ptr ? "*" : " *");
1355			break;
1356		case BTF_KIND_VOLATILE:
1357			btf_dump_printf(d, " volatile");
1358			break;
1359		case BTF_KIND_CONST:
1360			btf_dump_printf(d, " const");
1361			break;
1362		case BTF_KIND_RESTRICT:
1363			btf_dump_printf(d, " restrict");
1364			break;
1365		case BTF_KIND_ARRAY: {
1366			const struct btf_array *a = btf_array(t);
1367			const struct btf_type *next_t;
1368			__u32 next_id;
1369			bool multidim;
1370			/*
1371			 * GCC has a bug
1372			 * (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=8354)
1373			 * which causes it to emit extra const/volatile
1374			 * modifiers for an array, if array's element type has
1375			 * const/volatile modifiers. Clang doesn't do that.
1376			 * In general, it doesn't seem very meaningful to have
1377			 * a const/volatile modifier for array, so we are
1378			 * going to silently skip them here.
1379			 */
1380			btf_dump_drop_mods(d, decls);
1381
1382			if (decls->cnt == 0) {
1383				btf_dump_emit_name(d, fname, last_was_ptr);
1384				btf_dump_printf(d, "[%u]", a->nelems);
1385				return;
1386			}
1387
1388			next_id = decls->ids[decls->cnt - 1];
1389			next_t = btf__type_by_id(d->btf, next_id);
1390			multidim = btf_is_array(next_t);
1391			/* we need space if we have named non-pointer */
1392			if (fname[0] && !last_was_ptr)
1393				btf_dump_printf(d, " ");
1394			/* no parentheses for multi-dimensional array */
1395			if (!multidim)
1396				btf_dump_printf(d, "(");
1397			btf_dump_emit_type_chain(d, decls, fname, lvl);
1398			if (!multidim)
1399				btf_dump_printf(d, ")");
1400			btf_dump_printf(d, "[%u]", a->nelems);
1401			return;
1402		}
1403		case BTF_KIND_FUNC_PROTO: {
1404			const struct btf_param *p = btf_params(t);
1405			__u16 vlen = btf_vlen(t);
1406			int i;
1407
1408			/*
1409			 * GCC emits extra volatile qualifier for
1410			 * __attribute__((noreturn)) function pointers. Clang
1411			 * doesn't do it. It's a GCC quirk for backwards
1412			 * compatibility with code written for GCC <2.5. So,
1413			 * similarly to extra qualifiers for array, just drop
1414			 * them, instead of handling them.
1415			 */
1416			btf_dump_drop_mods(d, decls);
1417			if (decls->cnt) {
1418				btf_dump_printf(d, " (");
1419				btf_dump_emit_type_chain(d, decls, fname, lvl);
1420				btf_dump_printf(d, ")");
1421			} else {
1422				btf_dump_emit_name(d, fname, last_was_ptr);
1423			}
1424			btf_dump_printf(d, "(");
1425			/*
1426			 * Clang for BPF target generates func_proto with no
1427			 * args as a func_proto with a single void arg (e.g.,
1428			 * `int (*f)(void)` vs just `int (*f)()`). We are
1429			 * going to pretend there are no args for such case.
1430			 */
1431			if (vlen == 1 && p->type == 0) {
1432				btf_dump_printf(d, ")");
1433				return;
1434			}
1435
1436			for (i = 0; i < vlen; i++, p++) {
1437				if (i > 0)
1438					btf_dump_printf(d, ", ");
1439
1440				/* last arg of type void is vararg */
1441				if (i == vlen - 1 && p->type == 0) {
1442					btf_dump_printf(d, "...");
1443					break;
1444				}
1445
1446				name = btf_name_of(d, p->name_off);
1447				btf_dump_emit_type_decl(d, p->type, name, lvl);
1448			}
1449
1450			btf_dump_printf(d, ")");
1451			return;
1452		}
1453		default:
1454			pr_warn("unexpected type in decl chain, kind:%u, id:[%u]\n",
1455				kind, id);
1456			return;
1457		}
1458
1459		last_was_ptr = kind == BTF_KIND_PTR;
1460	}
1461
1462	btf_dump_emit_name(d, fname, last_was_ptr);
1463}
1464
1465/* return number of duplicates (occurrences) of a given name */
1466static size_t btf_dump_name_dups(struct btf_dump *d, struct hashmap *name_map,
1467				 const char *orig_name)
1468{
1469	char *old_name, *new_name;
1470	size_t dup_cnt = 0;
1471	int err;
1472
1473	new_name = strdup(orig_name);
1474	if (!new_name)
1475		return 1;
1476
1477	hashmap__find(name_map, orig_name, (void **)&dup_cnt);
1478	dup_cnt++;
1479
1480	err = hashmap__set(name_map, new_name, (void *)dup_cnt,
1481			   (const void **)&old_name, NULL);
1482	if (err)
1483		free(new_name);
1484
1485	free(old_name);
1486
1487	return dup_cnt;
1488}
1489
1490static const char *btf_dump_resolve_name(struct btf_dump *d, __u32 id,
1491					 struct hashmap *name_map)
1492{
1493	struct btf_dump_type_aux_state *s = &d->type_states[id];
1494	const struct btf_type *t = btf__type_by_id(d->btf, id);
1495	const char *orig_name = btf_name_of(d, t->name_off);
1496	const char **cached_name = &d->cached_names[id];
1497	size_t dup_cnt;
1498
1499	if (t->name_off == 0)
1500		return "";
1501
1502	if (s->name_resolved)
1503		return *cached_name ? *cached_name : orig_name;
1504
1505	if (btf_is_fwd(t) || (btf_is_enum(t) && btf_vlen(t) == 0)) {
1506		s->name_resolved = 1;
1507		return orig_name;
1508	}
1509
1510	dup_cnt = btf_dump_name_dups(d, name_map, orig_name);
1511	if (dup_cnt > 1) {
1512		const size_t max_len = 256;
1513		char new_name[max_len];
1514
1515		snprintf(new_name, max_len, "%s___%zu", orig_name, dup_cnt);
1516		*cached_name = strdup(new_name);
1517	}
1518
1519	s->name_resolved = 1;
1520	return *cached_name ? *cached_name : orig_name;
1521}
1522
1523static const char *btf_dump_type_name(struct btf_dump *d, __u32 id)
1524{
1525	return btf_dump_resolve_name(d, id, d->type_names);
1526}
1527
1528static const char *btf_dump_ident_name(struct btf_dump *d, __u32 id)
1529{
1530	return btf_dump_resolve_name(d, id, d->ident_names);
1531}
1532