Lines Matching refs:ctx
29 int ctx;
51 * are cpu/type/ctx/stat (evsel is NULL). For generic metrics
52 * case, the keys are still evsel/cpu (type/ctx/stat are 0 or NULL).
57 if (a->ctx != b->ctx)
58 return a->ctx - b->ctx;
102 int ctx,
111 .ctx = ctx,
151 int ctx = 0;
154 ctx |= CTX_BIT_KERNEL;
156 ctx |= CTX_BIT_USER;
158 ctx |= CTX_BIT_HV;
160 ctx |= CTX_BIT_HOST;
162 ctx |= CTX_BIT_IDLE;
164 return ctx;
196 int ctx, int cpu, u64 count)
199 type, ctx, st);
213 int ctx = evsel_context(counter);
222 update_runtime_stat(st, STAT_CYCLES, ctx, cpu, count);
224 update_runtime_stat(st, STAT_CYCLES_IN_TX, ctx, cpu, count);
226 update_runtime_stat(st, STAT_TRANSACTION, ctx, cpu, count);
228 update_runtime_stat(st, STAT_ELISION, ctx, cpu, count);
231 ctx, cpu, count);
234 ctx, cpu, count);
237 ctx, cpu, count);
240 ctx, cpu, count);
243 ctx, cpu, count);
246 ctx, cpu, count);
249 ctx, cpu, count);
252 ctx, cpu, count);
255 ctx, cpu, count);
258 ctx, cpu, count);
261 ctx, cpu, count);
263 update_runtime_stat(st, STAT_BRANCHES, ctx, cpu, count);
265 update_runtime_stat(st, STAT_CACHEREFS, ctx, cpu, count);
267 update_runtime_stat(st, STAT_L1_DCACHE, ctx, cpu, count);
269 update_runtime_stat(st, STAT_L1_ICACHE, ctx, cpu, count);
271 update_runtime_stat(st, STAT_LL_CACHE, ctx, cpu, count);
273 update_runtime_stat(st, STAT_DTLB_CACHE, ctx, cpu, count);
275 update_runtime_stat(st, STAT_ITLB_CACHE, ctx, cpu, count);
277 update_runtime_stat(st, STAT_SMI_NUM, ctx, cpu, count);
279 update_runtime_stat(st, STAT_APERF, ctx, cpu, count);
338 struct expr_parse_ctx ctx;
343 expr__ctx_init(&ctx);
351 expr__ctx_clear(&ctx);
356 &ctx, 1) < 0)
360 hashmap__size(&ctx.ids) + 1);
362 expr__ctx_clear(&ctx);
369 hashmap__for_each_entry((&ctx.ids), cur, bkt) {
421 expr__ctx_clear(&ctx);
425 enum stat_type type, int ctx, int cpu)
429 v = saved_value_lookup(NULL, cpu, false, type, ctx, st);
437 enum stat_type type, int ctx, int cpu)
441 v = saved_value_lookup(NULL, cpu, false, type, ctx, st);
456 int ctx = evsel_context(evsel);
458 total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
466 out->print_metric(config, out->ctx, color, "%7.2f%%", "frontend cycles idle",
469 out->print_metric(config, out->ctx, NULL, NULL, "frontend cycles idle", 0);
480 int ctx = evsel_context(evsel);
482 total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
489 out->print_metric(config, out->ctx, color, "%7.2f%%", "backend cycles idle", ratio);
501 int ctx = evsel_context(evsel);
503 total = runtime_stat_avg(st, STAT_BRANCHES, ctx, cpu);
510 out->print_metric(config, out->ctx, color, "%7.2f%%", "of all branches", ratio);
523 int ctx = evsel_context(evsel);
525 total = runtime_stat_avg(st, STAT_L1_DCACHE, ctx, cpu);
532 out->print_metric(config, out->ctx, color, "%7.2f%%", "of all L1-dcache accesses", ratio);
545 int ctx = evsel_context(evsel);
547 total = runtime_stat_avg(st, STAT_L1_ICACHE, ctx, cpu);
553 out->print_metric(config, out->ctx, color, "%7.2f%%", "of all L1-icache accesses", ratio);
565 int ctx = evsel_context(evsel);
567 total = runtime_stat_avg(st, STAT_DTLB_CACHE, ctx, cpu);
573 out->print_metric(config, out->ctx, color, "%7.2f%%", "of all dTLB cache accesses", ratio);
585 int ctx = evsel_context(evsel);
587 total = runtime_stat_avg(st, STAT_ITLB_CACHE, ctx, cpu);
593 out->print_metric(config, out->ctx, color, "%7.2f%%", "of all iTLB cache accesses", ratio);
605 int ctx = evsel_context(evsel);
607 total = runtime_stat_avg(st, STAT_LL_CACHE, ctx, cpu);
613 out->print_metric(config, out->ctx, color, "%7.2f%%", "of all LL-cache accesses", ratio);
665 static double td_total_slots(int ctx, int cpu, struct runtime_stat *st)
667 return runtime_stat_avg(st, STAT_TOPDOWN_TOTAL_SLOTS, ctx, cpu);
670 static double td_bad_spec(int ctx, int cpu, struct runtime_stat *st)
676 total = runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_ISSUED, ctx, cpu) -
677 runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_RETIRED, ctx, cpu) +
678 runtime_stat_avg(st, STAT_TOPDOWN_RECOVERY_BUBBLES, ctx, cpu);
680 total_slots = td_total_slots(ctx, cpu, st);
686 static double td_retiring(int ctx, int cpu, struct runtime_stat *st)
689 double total_slots = td_total_slots(ctx, cpu, st);
691 ctx, cpu);
698 static double td_fe_bound(int ctx, int cpu, struct runtime_stat *st)
701 double total_slots = td_total_slots(ctx, cpu, st);
703 ctx, cpu);
710 static double td_be_bound(int ctx, int cpu, struct runtime_stat *st)
712 double sum = (td_fe_bound(ctx, cpu, st) +
713 td_bad_spec(ctx, cpu, st) +
714 td_retiring(ctx, cpu, st));
725 static double td_metric_ratio(int ctx, int cpu,
729 double sum = runtime_stat_avg(stat, STAT_TOPDOWN_RETIRING, ctx, cpu) +
730 runtime_stat_avg(stat, STAT_TOPDOWN_FE_BOUND, ctx, cpu) +
731 runtime_stat_avg(stat, STAT_TOPDOWN_BE_BOUND, ctx, cpu) +
732 runtime_stat_avg(stat, STAT_TOPDOWN_BAD_SPEC, ctx, cpu);
733 double d = runtime_stat_avg(stat, type, ctx, cpu);
745 static bool full_td(int ctx, int cpu,
750 if (runtime_stat_avg(stat, STAT_TOPDOWN_RETIRING, ctx, cpu) > 0)
752 if (runtime_stat_avg(stat, STAT_TOPDOWN_BE_BOUND, ctx, cpu) > 0)
754 if (runtime_stat_avg(stat, STAT_TOPDOWN_FE_BOUND, ctx, cpu) > 0)
756 if (runtime_stat_avg(stat, STAT_TOPDOWN_BAD_SPEC, ctx, cpu) > 0)
767 int ctx = evsel_context(evsel);
770 smi_num = runtime_stat_avg(st, STAT_SMI_NUM, ctx, cpu);
771 aperf = runtime_stat_avg(st, STAT_APERF, ctx, cpu);
772 cycles = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
782 out->print_metric(config, out->ctx, color, "%8.1f%%", "SMI cycles%", cost);
783 out->print_metric(config, out->ctx, NULL, "%4.0f", "SMI#", smi_num);
860 void *ctxp = out->ctx;
929 void *ctxp = out->ctx;
933 int ctx = evsel_context(evsel);
938 total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
949 ctx, cpu);
953 ctx, cpu));
963 if (runtime_stat_n(st, STAT_BRANCHES, ctx, cpu) != 0)
973 if (runtime_stat_n(st, STAT_L1_DCACHE, ctx, cpu) != 0)
983 if (runtime_stat_n(st, STAT_L1_ICACHE, ctx, cpu) != 0)
993 if (runtime_stat_n(st, STAT_DTLB_CACHE, ctx, cpu) != 0)
1003 if (runtime_stat_n(st, STAT_ITLB_CACHE, ctx, cpu) != 0)
1013 if (runtime_stat_n(st, STAT_LL_CACHE, ctx, cpu) != 0)
1018 total = runtime_stat_avg(st, STAT_CACHEREFS, ctx, cpu);
1023 if (runtime_stat_n(st, STAT_CACHEREFS, ctx, cpu) != 0)
1042 total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
1052 total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
1053 total2 = runtime_stat_avg(st, STAT_CYCLES_IN_TX, ctx, cpu);
1064 ctx, cpu);
1069 if (runtime_stat_n(st, STAT_CYCLES_IN_TX, ctx, cpu) != 0)
1077 ctx, cpu);
1090 double fe_bound = td_fe_bound(ctx, cpu, st);
1097 double retiring = td_retiring(ctx, cpu, st);
1104 double bad_spec = td_bad_spec(ctx, cpu, st);
1111 double be_bound = td_be_bound(ctx, cpu, st);
1124 if (td_total_slots(ctx, cpu, st) > 0)
1130 full_td(ctx, cpu, st)) {
1131 double retiring = td_metric_ratio(ctx, cpu,
1139 full_td(ctx, cpu, st)) {
1140 double fe_bound = td_metric_ratio(ctx, cpu,
1148 full_td(ctx, cpu, st)) {
1149 double be_bound = td_metric_ratio(ctx, cpu,
1157 full_td(ctx, cpu, st)) {
1158 double bad_spec = td_metric_ratio(ctx, cpu,