1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (c) 2017, Intel Corporation.
4 */
5
6/* Manage metrics and groups of metrics from JSON files */
7
8#include "metricgroup.h"
9#include "debug.h"
10#include "evlist.h"
11#include "evsel.h"
12#include "strbuf.h"
13#include "pmu.h"
14#include "expr.h"
15#include "rblist.h"
16#include <string.h>
17#include <errno.h>
18#include "strlist.h"
19#include <assert.h>
20#include <linux/ctype.h>
21#include <linux/string.h>
22#include <linux/zalloc.h>
23#include <subcmd/parse-options.h>
24#include <api/fs/fs.h>
25#include "util.h"
26#include <asm/bug.h>
27#include "cgroup.h"
28
29struct metric_event *metricgroup__lookup(struct rblist *metric_events,
30					 struct evsel *evsel,
31					 bool create)
32{
33	struct rb_node *nd;
34	struct metric_event me = {
35		.evsel = evsel
36	};
37
38	if (!metric_events)
39		return NULL;
40
41	nd = rblist__find(metric_events, &me);
42	if (nd)
43		return container_of(nd, struct metric_event, nd);
44	if (create) {
45		rblist__add_node(metric_events, &me);
46		nd = rblist__find(metric_events, &me);
47		if (nd)
48			return container_of(nd, struct metric_event, nd);
49	}
50	return NULL;
51}
52
53static int metric_event_cmp(struct rb_node *rb_node, const void *entry)
54{
55	struct metric_event *a = container_of(rb_node,
56					      struct metric_event,
57					      nd);
58	const struct metric_event *b = entry;
59
60	if (a->evsel == b->evsel)
61		return 0;
62	if ((char *)a->evsel < (char *)b->evsel)
63		return -1;
64	return +1;
65}
66
67static struct rb_node *metric_event_new(struct rblist *rblist __maybe_unused,
68					const void *entry)
69{
70	struct metric_event *me = malloc(sizeof(struct metric_event));
71
72	if (!me)
73		return NULL;
74	memcpy(me, entry, sizeof(struct metric_event));
75	me->evsel = ((struct metric_event *)entry)->evsel;
76	INIT_LIST_HEAD(&me->head);
77	return &me->nd;
78}
79
80static void metric_event_delete(struct rblist *rblist __maybe_unused,
81				struct rb_node *rb_node)
82{
83	struct metric_event *me = container_of(rb_node, struct metric_event, nd);
84	struct metric_expr *expr, *tmp;
85
86	list_for_each_entry_safe(expr, tmp, &me->head, nd) {
87		free(expr->metric_refs);
88		free(expr->metric_events);
89		free(expr);
90	}
91
92	free(me);
93}
94
95static void metricgroup__rblist_init(struct rblist *metric_events)
96{
97	rblist__init(metric_events);
98	metric_events->node_cmp = metric_event_cmp;
99	metric_events->node_new = metric_event_new;
100	metric_events->node_delete = metric_event_delete;
101}
102
103void metricgroup__rblist_exit(struct rblist *metric_events)
104{
105	rblist__exit(metric_events);
106}
107
108/*
109 * A node in the list of referenced metrics. metric_expr
110 * is held as a convenience to avoid a search through the
111 * metric list.
112 */
113struct metric_ref_node {
114	const char *metric_name;
115	const char *metric_expr;
116	struct list_head list;
117};
118
119struct metric {
120	struct list_head nd;
121	struct expr_parse_ctx pctx;
122	const char *metric_name;
123	const char *metric_expr;
124	const char *metric_unit;
125	struct list_head metric_refs;
126	int metric_refs_cnt;
127	int runtime;
128	bool has_constraint;
129};
130
131#define RECURSION_ID_MAX 1000
132
133struct expr_ids {
134	struct expr_id	id[RECURSION_ID_MAX];
135	int		cnt;
136};
137
138static struct expr_id *expr_ids__alloc(struct expr_ids *ids)
139{
140	if (ids->cnt >= RECURSION_ID_MAX)
141		return NULL;
142	return &ids->id[ids->cnt++];
143}
144
145static void expr_ids__exit(struct expr_ids *ids)
146{
147	int i;
148
149	for (i = 0; i < ids->cnt; i++)
150		free(ids->id[i].id);
151}
152
153static bool contains_event(struct evsel **metric_events, int num_events,
154			const char *event_name)
155{
156	int i;
157
158	for (i = 0; i < num_events; i++) {
159		if (!strcmp(metric_events[i]->name, event_name))
160			return true;
161	}
162	return false;
163}
164
165/**
166 * Find a group of events in perf_evlist that correspond to those from a parsed
167 * metric expression. Note, as find_evsel_group is called in the same order as
168 * perf_evlist was constructed, metric_no_merge doesn't need to test for
169 * underfilling a group.
170 * @perf_evlist: a list of events something like: {metric1 leader, metric1
171 * sibling, metric1 sibling}:W,duration_time,{metric2 leader, metric2 sibling,
172 * metric2 sibling}:W,duration_time
173 * @pctx: the parse context for the metric expression.
174 * @metric_no_merge: don't attempt to share events for the metric with other
175 * metrics.
176 * @has_constraint: is there a contraint on the group of events? In which case
177 * the events won't be grouped.
178 * @metric_events: out argument, null terminated array of evsel's associated
179 * with the metric.
180 * @evlist_used: in/out argument, bitmap tracking which evlist events are used.
181 * @return the first metric event or NULL on failure.
182 */
183static struct evsel *find_evsel_group(struct evlist *perf_evlist,
184				      struct expr_parse_ctx *pctx,
185				      bool metric_no_merge,
186				      bool has_constraint,
187				      struct evsel **metric_events,
188				      unsigned long *evlist_used)
189{
190	struct evsel *ev, *current_leader = NULL;
191	struct expr_id_data *val_ptr;
192	int i = 0, matched_events = 0, events_to_match;
193	const int idnum = (int)hashmap__size(&pctx->ids);
194
195	/*
196	 * duration_time is always grouped separately, when events are grouped
197	 * (ie has_constraint is false) then ignore it in the matching loop and
198	 * add it to metric_events at the end.
199	 */
200	if (!has_constraint &&
201	    hashmap__find(&pctx->ids, "duration_time", (void **)&val_ptr))
202		events_to_match = idnum - 1;
203	else
204		events_to_match = idnum;
205
206	evlist__for_each_entry (perf_evlist, ev) {
207		/*
208		 * Events with a constraint aren't grouped and match the first
209		 * events available.
210		 */
211		if (has_constraint && ev->weak_group)
212			continue;
213		/* Ignore event if already used and merging is disabled. */
214		if (metric_no_merge && test_bit(ev->idx, evlist_used))
215			continue;
216		if (!has_constraint && ev->leader != current_leader) {
217			/*
218			 * Start of a new group, discard the whole match and
219			 * start again.
220			 */
221			matched_events = 0;
222			memset(metric_events, 0,
223				sizeof(struct evsel *) * idnum);
224			current_leader = ev->leader;
225		}
226		/*
227		 * Check for duplicate events with the same name. For example,
228		 * uncore_imc/cas_count_read/ will turn into 6 events per socket
229		 * on skylakex. Only the first such event is placed in
230		 * metric_events. If events aren't grouped then this also
231		 * ensures that the same event in different sibling groups
232		 * aren't both added to metric_events.
233		 */
234		if (contains_event(metric_events, matched_events, ev->name))
235			continue;
236		/* Does this event belong to the parse context? */
237		if (hashmap__find(&pctx->ids, ev->name, (void **)&val_ptr))
238			metric_events[matched_events++] = ev;
239
240		if (matched_events == events_to_match)
241			break;
242	}
243
244	if (events_to_match != idnum) {
245		/* Add the first duration_time. */
246		evlist__for_each_entry(perf_evlist, ev) {
247			if (!strcmp(ev->name, "duration_time")) {
248				metric_events[matched_events++] = ev;
249				break;
250			}
251		}
252	}
253
254	if (matched_events != idnum) {
255		/* Not a whole match */
256		return NULL;
257	}
258
259	metric_events[idnum] = NULL;
260
261	for (i = 0; i < idnum; i++) {
262		ev = metric_events[i];
263		/* Don't free the used events. */
264		set_bit(ev->idx, evlist_used);
265		/*
266		 * The metric leader points to the identically named event in
267		 * metric_events.
268		 */
269		ev->metric_leader = ev;
270		/*
271		 * Mark two events with identical names in the same group (or
272		 * globally) as being in use as uncore events may be duplicated
273		 * for each pmu. Set the metric leader of such events to be the
274		 * event that appears in metric_events.
275		 */
276		evlist__for_each_entry_continue(perf_evlist, ev) {
277			/*
278			 * If events are grouped then the search can terminate
279			 * when then group is left.
280			 */
281			if (!has_constraint &&
282			    ev->leader != metric_events[i]->leader)
283				break;
284			if (!strcmp(metric_events[i]->name, ev->name)) {
285				set_bit(ev->idx, evlist_used);
286				ev->metric_leader = metric_events[i];
287			}
288		}
289	}
290
291	return metric_events[0];
292}
293
294static int metricgroup__setup_events(struct list_head *groups,
295				     bool metric_no_merge,
296				     struct evlist *perf_evlist,
297				     struct rblist *metric_events_list)
298{
299	struct metric_event *me;
300	struct metric_expr *expr;
301	int i = 0;
302	int ret = 0;
303	struct metric *m;
304	struct evsel *evsel, *tmp;
305	unsigned long *evlist_used;
306
307	evlist_used = bitmap_alloc(perf_evlist->core.nr_entries);
308	if (!evlist_used)
309		return -ENOMEM;
310
311	list_for_each_entry (m, groups, nd) {
312		struct evsel **metric_events;
313		struct metric_ref *metric_refs = NULL;
314
315		metric_events = calloc(sizeof(void *),
316				hashmap__size(&m->pctx.ids) + 1);
317		if (!metric_events) {
318			ret = -ENOMEM;
319			break;
320		}
321		evsel = find_evsel_group(perf_evlist, &m->pctx,
322					 metric_no_merge,
323					 m->has_constraint, metric_events,
324					 evlist_used);
325		if (!evsel) {
326			pr_debug("Cannot resolve %s: %s\n",
327					m->metric_name, m->metric_expr);
328			free(metric_events);
329			continue;
330		}
331		for (i = 0; metric_events[i]; i++)
332			metric_events[i]->collect_stat = true;
333		me = metricgroup__lookup(metric_events_list, evsel, true);
334		if (!me) {
335			ret = -ENOMEM;
336			free(metric_events);
337			break;
338		}
339		expr = malloc(sizeof(struct metric_expr));
340		if (!expr) {
341			ret = -ENOMEM;
342			free(metric_events);
343			break;
344		}
345
346		/*
347		 * Collect and store collected nested expressions
348		 * for metric processing.
349		 */
350		if (m->metric_refs_cnt) {
351			struct metric_ref_node *ref;
352
353			metric_refs = zalloc(sizeof(struct metric_ref) * (m->metric_refs_cnt + 1));
354			if (!metric_refs) {
355				ret = -ENOMEM;
356				free(metric_events);
357				free(expr);
358				break;
359			}
360
361			i = 0;
362			list_for_each_entry(ref, &m->metric_refs, list) {
363				/*
364				 * Intentionally passing just const char pointers,
365				 * originally from 'struct pmu_event' object.
366				 * We don't need to change them, so there's no
367				 * need to create our own copy.
368				 */
369				metric_refs[i].metric_name = ref->metric_name;
370				metric_refs[i].metric_expr = ref->metric_expr;
371				i++;
372			}
373		};
374
375		expr->metric_refs = metric_refs;
376		expr->metric_expr = m->metric_expr;
377		expr->metric_name = m->metric_name;
378		expr->metric_unit = m->metric_unit;
379		expr->metric_events = metric_events;
380		expr->runtime = m->runtime;
381		list_add(&expr->nd, &me->head);
382	}
383
384	evlist__for_each_entry_safe(perf_evlist, tmp, evsel) {
385		if (!test_bit(evsel->idx, evlist_used)) {
386			evlist__remove(perf_evlist, evsel);
387			evsel__delete(evsel);
388		}
389	}
390	bitmap_free(evlist_used);
391
392	return ret;
393}
394
395static bool match_metric(const char *n, const char *list)
396{
397	int len;
398	char *m;
399
400	if (!list)
401		return false;
402	if (!strcmp(list, "all"))
403		return true;
404	if (!n)
405		return !strcasecmp(list, "No_group");
406	len = strlen(list);
407	m = strcasestr(n, list);
408	if (!m)
409		return false;
410	if ((m == n || m[-1] == ';' || m[-1] == ' ') &&
411	    (m[len] == 0 || m[len] == ';'))
412		return true;
413	return false;
414}
415
416struct mep {
417	struct rb_node nd;
418	const char *name;
419	struct strlist *metrics;
420};
421
422static int mep_cmp(struct rb_node *rb_node, const void *entry)
423{
424	struct mep *a = container_of(rb_node, struct mep, nd);
425	struct mep *b = (struct mep *)entry;
426
427	return strcmp(a->name, b->name);
428}
429
430static struct rb_node *mep_new(struct rblist *rl __maybe_unused,
431					const void *entry)
432{
433	struct mep *me = malloc(sizeof(struct mep));
434
435	if (!me)
436		return NULL;
437	memcpy(me, entry, sizeof(struct mep));
438	me->name = strdup(me->name);
439	if (!me->name)
440		goto out_me;
441	me->metrics = strlist__new(NULL, NULL);
442	if (!me->metrics)
443		goto out_name;
444	return &me->nd;
445out_name:
446	zfree(&me->name);
447out_me:
448	free(me);
449	return NULL;
450}
451
452static struct mep *mep_lookup(struct rblist *groups, const char *name)
453{
454	struct rb_node *nd;
455	struct mep me = {
456		.name = name
457	};
458	nd = rblist__find(groups, &me);
459	if (nd)
460		return container_of(nd, struct mep, nd);
461	rblist__add_node(groups, &me);
462	nd = rblist__find(groups, &me);
463	if (nd)
464		return container_of(nd, struct mep, nd);
465	return NULL;
466}
467
468static void mep_delete(struct rblist *rl __maybe_unused,
469		       struct rb_node *nd)
470{
471	struct mep *me = container_of(nd, struct mep, nd);
472
473	strlist__delete(me->metrics);
474	zfree(&me->name);
475	free(me);
476}
477
478static void metricgroup__print_strlist(struct strlist *metrics, bool raw)
479{
480	struct str_node *sn;
481	int n = 0;
482
483	strlist__for_each_entry (sn, metrics) {
484		if (raw)
485			printf("%s%s", n > 0 ? " " : "", sn->s);
486		else
487			printf("  %s\n", sn->s);
488		n++;
489	}
490	if (raw)
491		putchar('\n');
492}
493
494void metricgroup__print(bool metrics, bool metricgroups, char *filter,
495			bool raw, bool details)
496{
497	struct pmu_events_map *map = perf_pmu__find_map(NULL);
498	struct pmu_event *pe;
499	int i;
500	struct rblist groups;
501	struct rb_node *node, *next;
502	struct strlist *metriclist = NULL;
503
504	if (!map)
505		return;
506
507	if (!metricgroups) {
508		metriclist = strlist__new(NULL, NULL);
509		if (!metriclist)
510			return;
511	}
512
513	rblist__init(&groups);
514	groups.node_new = mep_new;
515	groups.node_cmp = mep_cmp;
516	groups.node_delete = mep_delete;
517	for (i = 0; ; i++) {
518		const char *g;
519		pe = &map->table[i];
520
521		if (!pe->name && !pe->metric_group && !pe->metric_name)
522			break;
523		if (!pe->metric_expr)
524			continue;
525		g = pe->metric_group;
526		if (!g && pe->metric_name) {
527			if (pe->name)
528				continue;
529			g = "No_group";
530		}
531		if (g) {
532			char *omg;
533			char *mg = strdup(g);
534
535			if (!mg)
536				return;
537			omg = mg;
538			while ((g = strsep(&mg, ";")) != NULL) {
539				struct mep *me;
540				char *s;
541
542				g = skip_spaces(g);
543				if (*g == 0)
544					g = "No_group";
545				if (filter && !strstr(g, filter))
546					continue;
547				if (raw)
548					s = (char *)pe->metric_name;
549				else {
550					if (asprintf(&s, "%s\n%*s%s]",
551						     pe->metric_name, 8, "[", pe->desc) < 0)
552						return;
553
554					if (details) {
555						if (asprintf(&s, "%s\n%*s%s]",
556							     s, 8, "[", pe->metric_expr) < 0)
557							return;
558					}
559				}
560
561				if (!s)
562					continue;
563
564				if (!metricgroups) {
565					strlist__add(metriclist, s);
566				} else {
567					me = mep_lookup(&groups, g);
568					if (!me)
569						continue;
570					strlist__add(me->metrics, s);
571				}
572
573				if (!raw)
574					free(s);
575			}
576			free(omg);
577		}
578	}
579
580	if (!filter || !rblist__empty(&groups)) {
581		if (metricgroups && !raw)
582			printf("\nMetric Groups:\n\n");
583		else if (metrics && !raw)
584			printf("\nMetrics:\n\n");
585	}
586
587	for (node = rb_first_cached(&groups.entries); node; node = next) {
588		struct mep *me = container_of(node, struct mep, nd);
589
590		if (metricgroups)
591			printf("%s%s%s", me->name, metrics && !raw ? ":" : "", raw ? " " : "\n");
592		if (metrics)
593			metricgroup__print_strlist(me->metrics, raw);
594		next = rb_next(node);
595		rblist__remove_node(&groups, node);
596	}
597	if (!metricgroups)
598		metricgroup__print_strlist(metriclist, raw);
599	strlist__delete(metriclist);
600}
601
602static void metricgroup__add_metric_weak_group(struct strbuf *events,
603					       struct expr_parse_ctx *ctx)
604{
605	struct hashmap_entry *cur;
606	size_t bkt;
607	bool no_group = true, has_duration = false;
608
609	hashmap__for_each_entry((&ctx->ids), cur, bkt) {
610		pr_debug("found event %s\n", (const char *)cur->key);
611		/*
612		 * Duration time maps to a software event and can make
613		 * groups not count. Always use it outside a
614		 * group.
615		 */
616		if (!strcmp(cur->key, "duration_time")) {
617			has_duration = true;
618			continue;
619		}
620		strbuf_addf(events, "%s%s",
621			no_group ? "{" : ",",
622			(const char *)cur->key);
623		no_group = false;
624	}
625	if (!no_group) {
626		strbuf_addf(events, "}:W");
627		if (has_duration)
628			strbuf_addf(events, ",duration_time");
629	} else if (has_duration)
630		strbuf_addf(events, "duration_time");
631}
632
633static void metricgroup__add_metric_non_group(struct strbuf *events,
634					      struct expr_parse_ctx *ctx)
635{
636	struct hashmap_entry *cur;
637	size_t bkt;
638	bool first = true;
639
640	hashmap__for_each_entry((&ctx->ids), cur, bkt) {
641		if (!first)
642			strbuf_addf(events, ",");
643		strbuf_addf(events, "%s", (const char *)cur->key);
644		first = false;
645	}
646}
647
648static void metricgroup___watchdog_constraint_hint(const char *name, bool foot)
649{
650	static bool violate_nmi_constraint;
651
652	if (!foot) {
653		pr_warning("Splitting metric group %s into standalone metrics.\n", name);
654		violate_nmi_constraint = true;
655		return;
656	}
657
658	if (!violate_nmi_constraint)
659		return;
660
661	pr_warning("Try disabling the NMI watchdog to comply NO_NMI_WATCHDOG metric constraint:\n"
662		   "    echo 0 > /proc/sys/kernel/nmi_watchdog\n"
663		   "    perf stat ...\n"
664		   "    echo 1 > /proc/sys/kernel/nmi_watchdog\n");
665}
666
667static bool metricgroup__has_constraint(struct pmu_event *pe)
668{
669	if (!pe->metric_constraint)
670		return false;
671
672	if (!strcmp(pe->metric_constraint, "NO_NMI_WATCHDOG") &&
673	    sysctl__nmi_watchdog_enabled()) {
674		metricgroup___watchdog_constraint_hint(pe->metric_name, false);
675		return true;
676	}
677
678	return false;
679}
680
681int __weak arch_get_runtimeparam(struct pmu_event *pe __maybe_unused)
682{
683	return 1;
684}
685
686static int __add_metric(struct list_head *metric_list,
687			struct pmu_event *pe,
688			bool metric_no_group,
689			int runtime,
690			struct metric **mp,
691			struct expr_id *parent,
692			struct expr_ids *ids)
693{
694	struct metric_ref_node *ref;
695	struct metric *m;
696
697	if (*mp == NULL) {
698		/*
699		 * We got in here for the parent group,
700		 * allocate it and put it on the list.
701		 */
702		m = zalloc(sizeof(*m));
703		if (!m)
704			return -ENOMEM;
705
706		expr__ctx_init(&m->pctx);
707		m->metric_name = pe->metric_name;
708		m->metric_expr = pe->metric_expr;
709		m->metric_unit = pe->unit;
710		m->runtime = runtime;
711		m->has_constraint = metric_no_group || metricgroup__has_constraint(pe);
712		INIT_LIST_HEAD(&m->metric_refs);
713		m->metric_refs_cnt = 0;
714
715		parent = expr_ids__alloc(ids);
716		if (!parent) {
717			free(m);
718			return -EINVAL;
719		}
720
721		parent->id = strdup(pe->metric_name);
722		if (!parent->id) {
723			free(m);
724			return -ENOMEM;
725		}
726		*mp = m;
727	} else {
728		/*
729		 * We got here for the referenced metric, via the
730		 * recursive metricgroup__add_metric call, add
731		 * it to the parent group.
732		 */
733		m = *mp;
734
735		ref = malloc(sizeof(*ref));
736		if (!ref)
737			return -ENOMEM;
738
739		/*
740		 * Intentionally passing just const char pointers,
741		 * from 'pe' object, so they never go away. We don't
742		 * need to change them, so there's no need to create
743		 * our own copy.
744		 */
745		ref->metric_name = pe->metric_name;
746		ref->metric_expr = pe->metric_expr;
747
748		list_add(&ref->list, &m->metric_refs);
749		m->metric_refs_cnt++;
750	}
751
752	/* Force all found IDs in metric to have us as parent ID. */
753	WARN_ON_ONCE(!parent);
754	m->pctx.parent = parent;
755
756	/*
757	 * For both the parent and referenced metrics, we parse
758	 * all the metric's IDs and add it to the parent context.
759	 */
760	if (expr__find_other(pe->metric_expr, NULL, &m->pctx, runtime) < 0) {
761		if (m->metric_refs_cnt == 0) {
762			expr__ctx_clear(&m->pctx);
763			free(m);
764			*mp = NULL;
765		}
766		return -EINVAL;
767	}
768
769	/*
770	 * We add new group only in the 'parent' call,
771	 * so bail out for referenced metric case.
772	 */
773	if (m->metric_refs_cnt)
774		return 0;
775
776	if (list_empty(metric_list))
777		list_add(&m->nd, metric_list);
778	else {
779		struct list_head *pos;
780
781		/* Place the largest groups at the front. */
782		list_for_each_prev(pos, metric_list) {
783			struct metric *old = list_entry(pos, struct metric, nd);
784
785			if (hashmap__size(&m->pctx.ids) <=
786			    hashmap__size(&old->pctx.ids))
787				break;
788		}
789		list_add(&m->nd, pos);
790	}
791
792	return 0;
793}
794
795#define map_for_each_event(__pe, __idx, __map)				\
796	for (__idx = 0, __pe = &__map->table[__idx];			\
797	     __pe->name || __pe->metric_group || __pe->metric_name;	\
798	     __pe = &__map->table[++__idx])
799
800#define map_for_each_metric(__pe, __idx, __map, __metric)		\
801	map_for_each_event(__pe, __idx, __map)				\
802		if (__pe->metric_expr &&				\
803		    (match_metric(__pe->metric_group, __metric) ||	\
804		     match_metric(__pe->metric_name, __metric)))
805
806static struct pmu_event *find_metric(const char *metric, struct pmu_events_map *map)
807{
808	struct pmu_event *pe;
809	int i;
810
811	map_for_each_event(pe, i, map) {
812		if (match_metric(pe->metric_name, metric))
813			return pe;
814	}
815
816	return NULL;
817}
818
819static int recursion_check(struct metric *m, const char *id, struct expr_id **parent,
820			   struct expr_ids *ids)
821{
822	struct expr_id_data *data;
823	struct expr_id *p;
824	int ret;
825
826	/*
827	 * We get the parent referenced by 'id' argument and
828	 * traverse through all the parent object IDs to check
829	 * if we already processed 'id', if we did, it's recursion
830	 * and we fail.
831	 */
832	ret = expr__get_id(&m->pctx, id, &data);
833	if (ret)
834		return ret;
835
836	p = data->parent;
837
838	while (p->parent) {
839		if (!strcmp(p->id, id)) {
840			pr_err("failed: recursion detected for %s\n", id);
841			return -1;
842		}
843		p = p->parent;
844	}
845
846	/*
847	 * If we are over the limit of static entris, the metric
848	 * is too difficult/nested to process, fail as well.
849	 */
850	p = expr_ids__alloc(ids);
851	if (!p) {
852		pr_err("failed: too many nested metrics\n");
853		return -EINVAL;
854	}
855
856	p->id     = strdup(id);
857	p->parent = data->parent;
858	*parent   = p;
859
860	return p->id ? 0 : -ENOMEM;
861}
862
863static int add_metric(struct list_head *metric_list,
864		      struct pmu_event *pe,
865		      bool metric_no_group,
866		      struct metric **mp,
867		      struct expr_id *parent,
868		      struct expr_ids *ids);
869
870static int __resolve_metric(struct metric *m,
871			    bool metric_no_group,
872			    struct list_head *metric_list,
873			    struct pmu_events_map *map,
874			    struct expr_ids *ids)
875{
876	struct hashmap_entry *cur;
877	size_t bkt;
878	bool all;
879	int ret;
880
881	/*
882	 * Iterate all the parsed IDs and if there's metric,
883	 * add it to the context.
884	 */
885	do {
886		all = true;
887		hashmap__for_each_entry((&m->pctx.ids), cur, bkt) {
888			struct expr_id *parent;
889			struct pmu_event *pe;
890
891			pe = find_metric(cur->key, map);
892			if (!pe)
893				continue;
894
895			ret = recursion_check(m, cur->key, &parent, ids);
896			if (ret)
897				return ret;
898
899			all = false;
900			/* The metric key itself needs to go out.. */
901			expr__del_id(&m->pctx, cur->key);
902
903			/* ... and it gets resolved to the parent context. */
904			ret = add_metric(metric_list, pe, metric_no_group, &m, parent, ids);
905			if (ret)
906				return ret;
907
908			/*
909			 * We added new metric to hashmap, so we need
910			 * to break the iteration and start over.
911			 */
912			break;
913		}
914	} while (!all);
915
916	return 0;
917}
918
919static int resolve_metric(bool metric_no_group,
920			  struct list_head *metric_list,
921			  struct pmu_events_map *map,
922			  struct expr_ids *ids)
923{
924	struct metric *m;
925	int err;
926
927	list_for_each_entry(m, metric_list, nd) {
928		err = __resolve_metric(m, metric_no_group, metric_list, map, ids);
929		if (err)
930			return err;
931	}
932	return 0;
933}
934
935static int add_metric(struct list_head *metric_list,
936		      struct pmu_event *pe,
937		      bool metric_no_group,
938		      struct metric **m,
939		      struct expr_id *parent,
940		      struct expr_ids *ids)
941{
942	struct metric *orig = *m;
943	int ret = 0;
944
945	pr_debug("metric expr %s for %s\n", pe->metric_expr, pe->metric_name);
946
947	if (!strstr(pe->metric_expr, "?")) {
948		ret = __add_metric(metric_list, pe, metric_no_group, 1, m, parent, ids);
949	} else {
950		int j, count;
951
952		count = arch_get_runtimeparam(pe);
953
954		/* This loop is added to create multiple
955		 * events depend on count value and add
956		 * those events to metric_list.
957		 */
958
959		for (j = 0; j < count && !ret; j++, *m = orig)
960			ret = __add_metric(metric_list, pe, metric_no_group, j, m, parent, ids);
961	}
962
963	return ret;
964}
965
966static int metricgroup__add_metric(const char *metric, bool metric_no_group,
967				   struct strbuf *events,
968				   struct list_head *metric_list,
969				   struct pmu_events_map *map)
970{
971	struct expr_ids ids = { .cnt = 0, };
972	struct pmu_event *pe;
973	struct metric *m;
974	LIST_HEAD(list);
975	int i, ret;
976	bool has_match = false;
977
978	map_for_each_metric(pe, i, map, metric) {
979		has_match = true;
980		m = NULL;
981
982		ret = add_metric(&list, pe, metric_no_group, &m, NULL, &ids);
983		if (ret)
984			goto out;
985
986		/*
987		 * Process any possible referenced metrics
988		 * included in the expression.
989		 */
990		ret = resolve_metric(metric_no_group,
991				     &list, map, &ids);
992		if (ret)
993			goto out;
994	}
995
996	/* End of pmu events. */
997	if (!has_match) {
998		ret = -EINVAL;
999		goto out;
1000	}
1001
1002	list_for_each_entry(m, &list, nd) {
1003		if (events->len > 0)
1004			strbuf_addf(events, ",");
1005
1006		if (m->has_constraint) {
1007			metricgroup__add_metric_non_group(events,
1008							  &m->pctx);
1009		} else {
1010			metricgroup__add_metric_weak_group(events,
1011							   &m->pctx);
1012		}
1013	}
1014
1015out:
1016	/*
1017	 * add to metric_list so that they can be released
1018	 * even if it's failed
1019	 */
1020	list_splice(&list, metric_list);
1021	expr_ids__exit(&ids);
1022	return ret;
1023}
1024
1025static int metricgroup__add_metric_list(const char *list, bool metric_no_group,
1026					struct strbuf *events,
1027					struct list_head *metric_list,
1028					struct pmu_events_map *map)
1029{
1030	char *llist, *nlist, *p;
1031	int ret = -EINVAL;
1032
1033	nlist = strdup(list);
1034	if (!nlist)
1035		return -ENOMEM;
1036	llist = nlist;
1037
1038	strbuf_init(events, 100);
1039	strbuf_addf(events, "%s", "");
1040
1041	while ((p = strsep(&llist, ",")) != NULL) {
1042		ret = metricgroup__add_metric(p, metric_no_group, events,
1043					      metric_list, map);
1044		if (ret == -EINVAL) {
1045			fprintf(stderr, "Cannot find metric or group `%s'\n",
1046					p);
1047			break;
1048		}
1049	}
1050	free(nlist);
1051
1052	if (!ret)
1053		metricgroup___watchdog_constraint_hint(NULL, true);
1054
1055	return ret;
1056}
1057
1058static void metric__free_refs(struct metric *metric)
1059{
1060	struct metric_ref_node *ref, *tmp;
1061
1062	list_for_each_entry_safe(ref, tmp, &metric->metric_refs, list) {
1063		list_del(&ref->list);
1064		free(ref);
1065	}
1066}
1067
1068static void metricgroup__free_metrics(struct list_head *metric_list)
1069{
1070	struct metric *m, *tmp;
1071
1072	list_for_each_entry_safe (m, tmp, metric_list, nd) {
1073		metric__free_refs(m);
1074		expr__ctx_clear(&m->pctx);
1075		list_del_init(&m->nd);
1076		free(m);
1077	}
1078}
1079
1080static int parse_groups(struct evlist *perf_evlist, const char *str,
1081			bool metric_no_group,
1082			bool metric_no_merge,
1083			struct perf_pmu *fake_pmu,
1084			struct rblist *metric_events,
1085			struct pmu_events_map *map)
1086{
1087	struct parse_events_error parse_error;
1088	struct strbuf extra_events;
1089	LIST_HEAD(metric_list);
1090	int ret;
1091
1092	if (metric_events->nr_entries == 0)
1093		metricgroup__rblist_init(metric_events);
1094	ret = metricgroup__add_metric_list(str, metric_no_group,
1095					   &extra_events, &metric_list, map);
1096	if (ret)
1097		goto out;
1098	pr_debug("adding %s\n", extra_events.buf);
1099	bzero(&parse_error, sizeof(parse_error));
1100	ret = __parse_events(perf_evlist, extra_events.buf, &parse_error, fake_pmu);
1101	if (ret) {
1102		parse_events_print_error(&parse_error, extra_events.buf);
1103		goto out;
1104	}
1105	ret = metricgroup__setup_events(&metric_list, metric_no_merge,
1106					perf_evlist, metric_events);
1107out:
1108	metricgroup__free_metrics(&metric_list);
1109	strbuf_release(&extra_events);
1110	return ret;
1111}
1112
1113int metricgroup__parse_groups(const struct option *opt,
1114			      const char *str,
1115			      bool metric_no_group,
1116			      bool metric_no_merge,
1117			      struct rblist *metric_events)
1118{
1119	struct evlist *perf_evlist = *(struct evlist **)opt->value;
1120	struct pmu_events_map *map = perf_pmu__find_map(NULL);
1121
1122	if (!map)
1123		return 0;
1124
1125	return parse_groups(perf_evlist, str, metric_no_group,
1126			    metric_no_merge, NULL, metric_events, map);
1127}
1128
1129int metricgroup__parse_groups_test(struct evlist *evlist,
1130				   struct pmu_events_map *map,
1131				   const char *str,
1132				   bool metric_no_group,
1133				   bool metric_no_merge,
1134				   struct rblist *metric_events)
1135{
1136	return parse_groups(evlist, str, metric_no_group,
1137			    metric_no_merge, &perf_pmu__fake, metric_events, map);
1138}
1139
1140bool metricgroup__has_metric(const char *metric)
1141{
1142	struct pmu_events_map *map = perf_pmu__find_map(NULL);
1143	struct pmu_event *pe;
1144	int i;
1145
1146	if (!map)
1147		return false;
1148
1149	for (i = 0; ; i++) {
1150		pe = &map->table[i];
1151
1152		if (!pe->name && !pe->metric_group && !pe->metric_name)
1153			break;
1154		if (!pe->metric_expr)
1155			continue;
1156		if (match_metric(pe->metric_name, metric))
1157			return true;
1158	}
1159	return false;
1160}
1161
1162int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
1163				    struct rblist *new_metric_events,
1164				    struct rblist *old_metric_events)
1165{
1166	unsigned i;
1167
1168	for (i = 0; i < rblist__nr_entries(old_metric_events); i++) {
1169		struct rb_node *nd;
1170		struct metric_event *old_me, *new_me;
1171		struct metric_expr *old_expr, *new_expr;
1172		struct evsel *evsel;
1173		size_t alloc_size;
1174		int idx, nr;
1175
1176		nd = rblist__entry(old_metric_events, i);
1177		old_me = container_of(nd, struct metric_event, nd);
1178
1179		evsel = evlist__find_evsel(evlist, old_me->evsel->idx);
1180		if (!evsel)
1181			return -EINVAL;
1182		new_me = metricgroup__lookup(new_metric_events, evsel, true);
1183		if (!new_me)
1184			return -ENOMEM;
1185
1186		pr_debug("copying metric event for cgroup '%s': %s (idx=%d)\n",
1187			 cgrp ? cgrp->name : "root", evsel->name, evsel->idx);
1188
1189		list_for_each_entry(old_expr, &old_me->head, nd) {
1190			new_expr = malloc(sizeof(*new_expr));
1191			if (!new_expr)
1192				return -ENOMEM;
1193
1194			new_expr->metric_expr = old_expr->metric_expr;
1195			new_expr->metric_name = old_expr->metric_name;
1196			new_expr->metric_unit = old_expr->metric_unit;
1197			new_expr->runtime = old_expr->runtime;
1198
1199			if (old_expr->metric_refs) {
1200				/* calculate number of metric_events */
1201				for (nr = 0; old_expr->metric_refs[nr].metric_name; nr++)
1202					continue;
1203				alloc_size = sizeof(*new_expr->metric_refs);
1204				new_expr->metric_refs = calloc(nr + 1, alloc_size);
1205				if (!new_expr->metric_refs) {
1206					free(new_expr);
1207					return -ENOMEM;
1208				}
1209
1210				memcpy(new_expr->metric_refs, old_expr->metric_refs,
1211				       nr * alloc_size);
1212			} else {
1213				new_expr->metric_refs = NULL;
1214			}
1215
1216			/* calculate number of metric_events */
1217			for (nr = 0; old_expr->metric_events[nr]; nr++)
1218				continue;
1219			alloc_size = sizeof(*new_expr->metric_events);
1220			new_expr->metric_events = calloc(nr + 1, alloc_size);
1221			if (!new_expr->metric_events) {
1222				free(new_expr->metric_refs);
1223				free(new_expr);
1224				return -ENOMEM;
1225			}
1226
1227			/* copy evsel in the same position */
1228			for (idx = 0; idx < nr; idx++) {
1229				evsel = old_expr->metric_events[idx];
1230				evsel = evlist__find_evsel(evlist, evsel->idx);
1231				if (evsel == NULL) {
1232					free(new_expr->metric_events);
1233					free(new_expr->metric_refs);
1234					free(new_expr);
1235					return -EINVAL;
1236				}
1237				new_expr->metric_events[idx] = evsel;
1238			}
1239
1240			list_add(&new_expr->nd, &new_me->head);
1241		}
1242	}
1243	return 0;
1244}
1245