Lines Matching refs:evsel_list

148 static struct evlist	*evsel_list;
273 perf_evlist__reset_stats(evsel_list);
335 int nthreads = perf_thread_map__nr(evsel_list->core.threads);
391 ncpus = perf_cpu_map__nr(evsel_list->core.all_cpus);
394 evlist__for_each_cpu(evsel_list, i, cpu) {
399 evlist__for_each_entry(evsel_list, counter) {
419 evlist__for_each_entry(evsel_list, counter) {
517 evlist__enable(evsel_list);
531 evlist__disable(evsel_list);
635 child_exited = !is_target_alive(&target, evsel_list->core.threads) ? 1 : 0;
641 if (!(evlist__poll(evsel_list, time_to_sleep) > 0)) { /* poll timeout or EINTR */
646 process_evlist(evsel_list, interval);
689 evsel_list->core.threads &&
690 evsel_list->core.threads->err_thread != -1) {
695 if (!thread_map__remove(evsel_list->core.threads,
696 evsel_list->core.threads->err_thread)) {
697 evsel_list->core.threads->err_thread = -1;
727 if (perf_evlist__prepare_workload(evsel_list, &target, argv, is_pipe,
732 child_pid = evsel_list->workload.pid;
736 perf_evlist__set_leader(evsel_list);
741 evlist__for_each_cpu (evsel_list, i, cpu) {
744 evlist__for_each_entry(evsel_list, counter) {
763 perf_evlist__reset_weak_group(evsel_list, counter, false);
791 evlist__for_each_cpu(evsel_list, i, cpu) {
794 evlist__for_each_entry(evsel_list, counter) {
802 evlist__for_each_entry(evsel_list, counter) {
831 evlist__for_each_entry(evsel_list, counter) {
842 evsel__store_ids(counter, evsel_list))
846 if (perf_evlist__apply_filters(evsel_list, &counter)) {
859 err = perf_session__write_header(perf_stat.session, evsel_list,
866 err = perf_event__synthesize_stat_events(&stat_config, NULL, evsel_list,
879 perf_evlist__start_workload(evsel_list);
882 if (interval || timeout || evlist__ctlfd_initialized(evsel_list))
917 perf_evlist__save_aggr_prev_raw_counts(evsel_list);
919 perf_evlist__copy_prev_raw_counts(evsel_list);
920 perf_evlist__reset_prev_raw_counts(evsel_list);
935 * We need to keep evsel_list alive, because it's processed
936 * later the evsel_list will be closed after.
939 evlist__close(evsel_list);
976 perf_evlist__print_counters(evsel_list, &stat_config, &target,
1077 OPT_CALLBACK('e', "event", &evsel_list, "event",
1080 OPT_CALLBACK(0, "filter", &evsel_list, "filter",
1116 OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
1159 OPT_CALLBACK('M', "metrics", &evsel_list, "metric/metric group list",
1175 OPT_CALLBACK(0, "pfm-events", &evsel_list, "event",
1255 evlist__for_each_entry(evsel_list, counter) {
1269 if (cpu_map__build_socket_map(evsel_list->core.cpus, &stat_config.aggr_map)) {
1276 if (cpu_map__build_die_map(evsel_list->core.cpus, &stat_config.aggr_map)) {
1283 if (cpu_map__build_core_map(evsel_list->core.cpus, &stat_config.aggr_map)) {
1290 if (cpu_map__build_node_map(evsel_list->core.cpus, &stat_config.aggr_map)) {
1298 if (cpu_map__build_core_map(evsel_list->core.cpus,
1314 * The evsel_list->cpus is the base we operate on,
1318 nr = perf_cpu_map__max(evsel_list->core.cpus);
1468 if (perf_env__build_socket_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) {
1475 if (perf_env__build_die_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) {
1482 if (perf_env__build_core_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) {
1489 if (perf_env__build_node_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) {
1639 struct option opt = { .value = &evsel_list };
1649 err = parse_events(evsel_list, transaction_attrs,
1652 err = parse_events(evsel_list,
1683 err = parse_events(evsel_list, smi_cost_attrs, &errinfo);
1740 err = parse_events(evsel_list, str, &errinfo);
1756 if (!evsel_list->core.nr_entries) {
1760 if (evlist__add_default_attrs(evsel_list, default_attrs0) < 0)
1763 if (evlist__add_default_attrs(evsel_list, frontend_attrs) < 0)
1767 if (evlist__add_default_attrs(evsel_list, backend_attrs) < 0)
1770 if (evlist__add_default_attrs(evsel_list, default_attrs1) < 0)
1780 if (evlist__add_default_attrs(evsel_list, detailed_attrs) < 0)
1787 if (evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0)
1794 return evlist__add_default_attrs(evsel_list, very_very_detailed_attrs);
1840 session->evlist = evsel_list;
1855 evlist__for_each_entry(evsel_list, counter)
1905 perf_evlist__set_maps(&evsel_list->core, st->cpus, st->threads);
1907 if (perf_evlist__alloc_stats(evsel_list, true))
2010 evsel_list = session->evlist;
2039 evlist__for_each_entry(evsel_list, counter) {
2046 if (evsel_list->core.nr_entries)
2065 evsel_list = evlist__new();
2066 if (evsel_list == NULL)
2079 perf_stat__collect_metric_expr(evsel_list);
2237 if (evlist__expand_cgroup(evsel_list, stat_config.cgroup_list,
2247 if (perf_evlist__create_maps(evsel_list, &target) < 0) {
2260 evlist__check_cpu_maps(evsel_list);
2267 thread_map__read_comms(evsel_list->core.threads);
2270 perf_thread_map__nr(evsel_list->core.threads))) {
2306 if (perf_evlist__alloc_stats(evsel_list, interval))
2336 if (evlist__initialize_ctlfd(evsel_list, stat_config.ctl_fd, stat_config.ctl_fd_ack))
2346 perf_evlist__reset_prev_raw_counts(evsel_list);
2358 evlist__finalize_ctlfd(evsel_list);
2389 perf_session__write_header(perf_stat.session, evsel_list, fd, true);
2392 evlist__close(evsel_list);
2397 perf_evlist__free_stats(evsel_list);
2404 evlist__delete(evsel_list);