Lines Matching refs:evsel_list
102 static struct evlist *evsel_list;
104 .evlistp = &evsel_list,
238 evlist__reset_stats(evsel_list);
318 int nthreads = perf_thread_map__nr(evsel_list->core.threads);
378 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
400 evlist__for_each_entry(evsel_list, counter) {
425 evlist__for_each_entry(evsel_list, counter) {
433 perf_stat_merge_counters(&stat_config, evsel_list);
434 perf_stat_process_percore(&stat_config, evsel_list);
444 evlist__reset_aggr_stats(evsel_list);
474 evlist__for_each_entry(evsel_list, evsel) {
485 evlist__enable(evsel_list);
500 evlist__for_each_entry(evsel_list, counter)
503 evlist__disable(evsel_list);
607 child_exited = !is_target_alive(&target, evsel_list->core.threads) ? 1 : 0;
613 if (!(evlist__poll(evsel_list, time_to_sleep) > 0)) { /* poll timeout or EINTR */
618 process_evlist(evsel_list, interval);
661 evsel_list->core.threads &&
662 evsel_list->core.threads->err_thread != -1) {
667 if (!thread_map__remove(evsel_list->core.threads,
668 evsel_list->core.threads->err_thread)) {
669 evsel_list->core.threads->err_thread = -1;
707 if (evlist__prepare_workload(evsel_list, &target, argv, is_pipe, workload_exec_failed_signal) < 0) {
711 child_pid = evsel_list->workload.pid;
714 if (!cpu_map__is_dummy(evsel_list->core.user_requested_cpus)) {
720 evlist__for_each_entry(evsel_list, counter) {
728 evlist__reset_aggr_stats(evsel_list);
730 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
758 evlist__reset_weak_group(evsel_list, counter, false);
786 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
795 evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) {
821 evlist__for_each_entry(evsel_list, counter) {
832 evsel__store_ids(counter, evsel_list))
836 if (evlist__apply_filters(evsel_list, &counter)) {
849 err = perf_session__write_header(perf_stat.session, evsel_list,
856 err = perf_event__synthesize_stat_events(&stat_config, NULL, evsel_list,
872 evlist__start_workload(evsel_list);
887 if (interval || timeout || evlist__ctlfd_initialized(evsel_list))
920 evlist__copy_prev_raw_counts(evsel_list);
921 evlist__reset_prev_raw_counts(evsel_list);
922 evlist__reset_aggr_stats(evsel_list);
938 * We need to keep evsel_list alive, because it's processed
939 * later the evsel_list will be closed after.
942 evlist__close(evsel_list);
981 evlist__print_counters(evsel_list, &stat_config, &target, ts, argc, argv);
1169 OPT_CALLBACK(0, "filter", &evsel_list, "filter",
1215 OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
1265 OPT_CALLBACK('M', "metrics", &evsel_list, "metric/metric group list",
1284 OPT_CALLBACK(0, "cputype", &evsel_list, "hybrid cpu type",
1289 OPT_CALLBACK(0, "pfm-events", &evsel_list, "event",
1298 OPT_CALLBACK_OPTARG(0, "iostat", &evsel_list, &stat_config, "default",
1596 stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus,
1606 nr = perf_thread_map__nr(evsel_list->core.threads);
1621 * The evsel_list->cpus is the base we operate on,
1625 if (!perf_cpu_map__empty(evsel_list->core.user_requested_cpus))
1626 nr = perf_cpu_map__max(evsel_list->core.user_requested_cpus).cpu;
1894 int nr = perf_thread_map__nr(evsel_list->core.threads);
1912 stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus,
2058 return metricgroup__parse_groups(evsel_list, pmu, "transaction",
2091 return metricgroup__parse_groups(evsel_list, pmu, "smi",
2124 if (metricgroup__parse_groups(evsel_list,
2138 if (!evsel_list->core.nr_entries) {
2143 if (evlist__add_default_attrs(evsel_list, default_attrs0) < 0)
2146 if (evlist__add_default_attrs(evsel_list, frontend_attrs) < 0)
2150 if (evlist__add_default_attrs(evsel_list, backend_attrs) < 0)
2153 if (evlist__add_default_attrs(evsel_list, default_attrs1) < 0)
2179 evlist__splice_list_tail(evsel_list, &metric_evlist->core.entries);
2184 if (evlist__add_default_attrs(evsel_list, default_null_attrs) < 0)
2194 if (evlist__add_default_attrs(evsel_list, detailed_attrs) < 0)
2201 if (evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0)
2208 return evlist__add_default_attrs(evsel_list, very_very_detailed_attrs);
2254 session->evlist = evsel_list;
2323 perf_evlist__set_maps(&evsel_list->core, st->cpus, st->threads);
2325 if (evlist__alloc_stats(&stat_config, evsel_list, /*alloc_raw=*/true))
2433 evlist__delete(evsel_list);
2434 evsel_list = session->evlist;
2463 evlist__for_each_entry(evsel_list, counter) {
2470 if (evsel_list->core.nr_entries)
2490 evsel_list = evlist__new();
2491 if (evsel_list == NULL)
2668 status = iostat_prepare(evsel_list, &stat_config);
2672 iostat_list(evsel_list, &stat_config);
2675 iostat_list(evsel_list, &stat_config);
2698 int ret = metricgroup__parse_groups(evsel_list, pmu, metrics,
2724 if (evlist__expand_cgroup(evsel_list, stat_config.cgroup_list,
2732 evlist__warn_user_requested_cpus(evsel_list, target.cpu_list);
2734 if (evlist__create_maps(evsel_list, &target) < 0) {
2747 evlist__check_cpu_maps(evsel_list);
2754 thread_map__read_comms(evsel_list->core.threads);
2790 if (evlist__alloc_stats(&stat_config, evsel_list, interval))
2817 if (evlist__initialize_ctlfd(evsel_list, stat_config.ctl_fd, stat_config.ctl_fd_ack))
2821 evlist__first(evsel_list)->ignore_missing_thread = target.pid;
2829 evlist__reset_prev_raw_counts(evsel_list);
2840 evlist__copy_res_stats(&stat_config, evsel_list);
2844 evlist__finalize_ctlfd(evsel_list);
2876 perf_session__write_header(perf_stat.session, evsel_list, fd, true);
2879 evlist__close(evsel_list);
2884 evlist__free_stats(evsel_list);
2887 iostat_release(evsel_list);
2895 evlist__delete(evsel_list);