Lines Matching refs:evsel
14 #include "evsel.h"
78 bool __perf_evsel_stat__is(struct evsel *evsel,
81 struct perf_stat_evsel *ps = evsel->stats;
107 static void perf_stat_evsel_id_init(struct evsel *evsel)
109 struct perf_stat_evsel *ps = evsel->stats;
115 if (!strcmp(evsel__name(evsel), id_str[i])) {
122 static void evsel__reset_stat_priv(struct evsel *evsel)
125 struct perf_stat_evsel *ps = evsel->stats;
130 perf_stat_evsel_id_init(evsel);
133 static int evsel__alloc_stat_priv(struct evsel *evsel)
135 evsel->stats = zalloc(sizeof(struct perf_stat_evsel));
136 if (evsel->stats == NULL)
138 evsel__reset_stat_priv(evsel);
142 static void evsel__free_stat_priv(struct evsel *evsel)
144 struct perf_stat_evsel *ps = evsel->stats;
148 zfree(&evsel->stats);
151 static int evsel__alloc_prev_raw_counts(struct evsel *evsel, int ncpus, int nthreads)
157 evsel->prev_raw_counts = counts;
162 static void evsel__free_prev_raw_counts(struct evsel *evsel)
164 perf_counts__delete(evsel->prev_raw_counts);
165 evsel->prev_raw_counts = NULL;
168 static void evsel__reset_prev_raw_counts(struct evsel *evsel)
170 if (evsel->prev_raw_counts)
171 perf_counts__reset(evsel->prev_raw_counts);
174 static int evsel__alloc_stats(struct evsel *evsel, bool alloc_raw)
176 int ncpus = evsel__nr_cpus(evsel);
177 int nthreads = perf_thread_map__nr(evsel->core.threads);
179 if (evsel__alloc_stat_priv(evsel) < 0 ||
180 evsel__alloc_counts(evsel, ncpus, nthreads) < 0 ||
181 (alloc_raw && evsel__alloc_prev_raw_counts(evsel, ncpus, nthreads) < 0))
189 struct evsel *evsel;
191 evlist__for_each_entry(evlist, evsel) {
192 if (evsel__alloc_stats(evsel, alloc_raw))
205 struct evsel *evsel;
207 evlist__for_each_entry(evlist, evsel) {
208 evsel__free_stat_priv(evsel);
209 evsel__free_counts(evsel);
210 evsel__free_prev_raw_counts(evsel);
216 struct evsel *evsel;
218 evlist__for_each_entry(evlist, evsel) {
219 evsel__reset_stat_priv(evsel);
220 evsel__reset_counts(evsel);
226 struct evsel *evsel;
228 evlist__for_each_entry(evlist, evsel)
229 evsel__reset_prev_raw_counts(evsel);
232 static void perf_evsel__copy_prev_raw_counts(struct evsel *evsel)
234 int ncpus = evsel__nr_cpus(evsel);
235 int nthreads = perf_thread_map__nr(evsel->core.threads);
239 *perf_counts(evsel->counts, cpu, thread) =
240 *perf_counts(evsel->prev_raw_counts, cpu,
245 evsel->counts->aggr = evsel->prev_raw_counts->aggr;
250 struct evsel *evsel;
252 evlist__for_each_entry(evlist, evsel)
253 perf_evsel__copy_prev_raw_counts(evsel);
258 struct evsel *evsel;
262 * we copy the counts from evsel->prev_raw_counts to
263 * evsel->counts. The perf_stat_process_counter creates
270 evlist__for_each_entry(evlist, evsel) {
271 *perf_counts(evsel->prev_raw_counts, 0, 0) =
272 evsel->prev_raw_counts->aggr;
276 static void zero_per_pkg(struct evsel *counter)
282 static int check_per_pkg(struct evsel *counter,
325 process_counter_values(struct perf_stat_config *config, struct evsel *evsel,
329 struct perf_counts_values *aggr = &evsel->counts->aggr;
333 if (check_per_pkg(evsel, count, cpu, &skip)) {
348 if (!evsel->snapshot)
349 evsel__compute_deltas(evsel, cpu, thread, count);
351 if ((config->aggr_mode == AGGR_NONE) && (!evsel->percore)) {
352 perf_stat__update_shadow_stats(evsel, count->val,
358 perf_stat__update_shadow_stats(evsel,
361 perf_stat__update_shadow_stats(evsel,
378 struct evsel *counter)
399 struct evsel *counter)
455 struct evsel *counter;
511 int create_perf_stat_counter(struct evsel *evsel,
516 struct perf_event_attr *attr = &evsel->core.attr;
517 struct evsel *leader = evsel->leader;
556 if (evsel__is_group_leader(evsel)) {
568 return evsel__open_per_cpu(evsel, evsel__cpus(evsel), cpu);
570 return evsel__open_per_thread(evsel, evsel->core.threads);