Lines Matching defs:kwork

3  * builtin-kwork.c
14 #include "util/kwork.h"
134 static int sort_dimension__add(struct perf_kwork *kwork __maybe_unused,
166 if (kwork->report == KWORK_REPORT_LATENCY)
179 static void setup_sorting(struct perf_kwork *kwork,
183 char *tmp, *tok, *str = strdup(kwork->sort_order);
187 if (sort_dimension__add(kwork, tok, &kwork->sort_list) < 0)
192 pr_debug("Sort order: %s\n", kwork->sort_order);
196 static struct kwork_atom *atom_new(struct perf_kwork *kwork,
203 list_for_each_entry(page, &kwork->atom_page_list, list) {
217 pr_err("Failed to zalloc kwork atom page\n");
223 list_add_tail(&page->list, &kwork->atom_page_list);
321 pr_err("Failed to zalloc kwork work\n");
351 static void profile_update_timespan(struct perf_kwork *kwork,
354 if (!kwork->summary)
357 if ((kwork->timestart == 0) || (kwork->timestart > sample->time))
358 kwork->timestart = sample->time;
360 if (kwork->timeend < sample->time)
361 kwork->timeend = sample->time;
364 static bool profile_event_match(struct perf_kwork *kwork,
370 struct perf_time_interval *ptime = &kwork->ptime;
372 if ((kwork->cpu_list != NULL) && !test_bit(cpu, kwork->cpu_bitmap))
379 if ((kwork->profile_name != NULL) &&
381 (strcmp(work->name, kwork->profile_name) != 0))
384 profile_update_timespan(kwork, sample);
388 static int work_push_atom(struct perf_kwork *kwork,
403 atom = atom_new(kwork, sample);
407 work = work_findnew(&class->work_root, &key, &kwork->cmp_id);
413 if (!profile_event_match(kwork, work, sample)) {
435 static struct kwork_atom *work_pop_atom(struct perf_kwork *kwork,
450 work = work_findnew(&class->work_root, &key, &kwork->cmp_id);
457 if (!profile_event_match(kwork, work, sample))
465 src_atom = atom_new(kwork, sample);
497 static int report_entry_event(struct perf_kwork *kwork,
503 return work_push_atom(kwork, class, KWORK_TRACE_ENTRY,
508 static int report_exit_event(struct perf_kwork *kwork,
517 atom = work_pop_atom(kwork, class, KWORK_TRACE_EXIT,
552 static int latency_raise_event(struct perf_kwork *kwork,
558 return work_push_atom(kwork, class, KWORK_TRACE_RAISE,
563 static int latency_entry_event(struct perf_kwork *kwork,
572 atom = work_pop_atom(kwork, class, KWORK_TRACE_ENTRY,
586 static void timehist_save_callchain(struct perf_kwork *kwork,
596 if (!kwork->show_callchain || sample->callchain == NULL)
609 NULL, NULL, kwork->max_stack + 2) != 0) {
635 static void timehist_print_event(struct perf_kwork *kwork,
664 * kwork name
692 if (kwork->show_callchain) {
711 static int timehist_raise_event(struct perf_kwork *kwork,
717 return work_push_atom(kwork, class, KWORK_TRACE_RAISE,
722 static int timehist_entry_event(struct perf_kwork *kwork,
731 ret = work_push_atom(kwork, class, KWORK_TRACE_ENTRY,
738 timehist_save_callchain(kwork, sample, evsel, machine);
743 static int timehist_exit_event(struct perf_kwork *kwork,
761 atom = work_pop_atom(kwork, class, KWORK_TRACE_EXIT,
771 timehist_print_event(kwork, work, atom, sample, &al);
786 struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
788 if (kwork->tp_handler->entry_event)
789 return kwork->tp_handler->entry_event(kwork, &kwork_irq,
799 struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
801 if (kwork->tp_handler->exit_event)
802 return kwork->tp_handler->exit_event(kwork, &kwork_irq,
857 struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
859 if (kwork->tp_handler->raise_event)
860 return kwork->tp_handler->raise_event(kwork, &kwork_softirq,
871 struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
873 if (kwork->tp_handler->entry_event)
874 return kwork->tp_handler->entry_event(kwork, &kwork_softirq,
885 struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
887 if (kwork->tp_handler->exit_event)
888 return kwork->tp_handler->exit_event(kwork, &kwork_softirq,
978 struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
980 if (kwork->tp_handler->raise_event)
981 return kwork->tp_handler->raise_event(kwork, &kwork_workqueue,
992 struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
994 if (kwork->tp_handler->entry_event)
995 return kwork->tp_handler->entry_event(kwork, &kwork_workqueue,
1006 struct perf_kwork *kwork = container_of(tool, struct perf_kwork, tool);
1008 if (kwork->tp_handler->exit_event)
1009 return kwork->tp_handler->exit_event(kwork, &kwork_workqueue,
1080 static int report_print_work(struct perf_kwork *kwork, struct kwork_work *work)
1090 * kwork name
1108 if (kwork->report == KWORK_REPORT_RUNTIME) {
1112 } else if (kwork->report == KWORK_REPORT_LATENCY) { // avg delay
1127 if (kwork->report == KWORK_REPORT_RUNTIME) {
1143 else if (kwork->report == KWORK_REPORT_LATENCY) {
1161 static int report_print_header(struct perf_kwork *kwork)
1170 if (kwork->report == KWORK_REPORT_RUNTIME) {
1173 } else if (kwork->report == KWORK_REPORT_LATENCY) {
1180 if (kwork->report == KWORK_REPORT_RUNTIME) {
1185 } else if (kwork->report == KWORK_REPORT_LATENCY) {
1233 static void print_summary(struct perf_kwork *kwork)
1235 u64 time = kwork->timeend - kwork->timestart;
1237 printf(" Total count : %9" PRIu64 "\n", kwork->all_count);
1239 (double)kwork->all_runtime / NSEC_PER_MSEC,
1240 time == 0 ? 0 : (double)kwork->all_runtime / time);
1256 static void print_skipped_events(struct perf_kwork *kwork)
1265 if ((kwork->nr_skipped_events[KWORK_TRACE_MAX] != 0) &&
1266 (kwork->nr_events != 0)) {
1268 (double)kwork->nr_skipped_events[KWORK_TRACE_MAX] /
1269 (double)kwork->nr_events * 100.0,
1270 kwork->nr_skipped_events[KWORK_TRACE_MAX]);
1274 kwork->nr_skipped_events[i],
1282 nr_list_entry(&kwork->atom_page_list));
1285 static void print_bad_events(struct perf_kwork *kwork)
1287 if ((kwork->nr_lost_events != 0) && (kwork->nr_events != 0)) {
1289 (double)kwork->nr_lost_events /
1290 (double)kwork->nr_events * 100.0,
1291 kwork->nr_lost_events, kwork->nr_events,
1292 kwork->nr_lost_chunks);
1296 static void work_sort(struct perf_kwork *kwork, struct kwork_class *class)
1310 work_insert(&kwork->sorted_work_root,
1311 data, &kwork->sort_list);
1315 static void perf_kwork__sort(struct perf_kwork *kwork)
1319 list_for_each_entry(class, &kwork->class_list, list)
1320 work_sort(kwork, class);
1323 static int perf_kwork__check_config(struct perf_kwork *kwork,
1344 switch (kwork->report) {
1346 kwork->tp_handler = &report_ops;
1349 kwork->tp_handler = &latency_ops;
1352 kwork->tp_handler = &timehist_ops;
1355 pr_debug("Invalid report type %d\n", kwork->report);
1359 list_for_each_entry(class, &kwork->class_list, list)
1364 if (kwork->cpu_list != NULL) {
1366 kwork->cpu_list,
1367 kwork->cpu_bitmap);
1374 if (kwork->time_str != NULL) {
1375 ret = perf_time__parse_str(&kwork->ptime, kwork->time_str);
1383 if (kwork->show_callchain && !evsel__has_callchain(evsel)) {
1385 kwork->show_callchain = 0;
1393 static int perf_kwork__read_events(struct perf_kwork *kwork)
1401 .force = kwork->force,
1404 session = perf_session__new(&data, &kwork->tool);
1412 if (perf_kwork__check_config(kwork, session) != 0)
1423 if (kwork->report == KWORK_REPORT_TIMEHIST)
1432 kwork->nr_events = session->evlist->stats.nr_events[0];
1433 kwork->nr_lost_events = session->evlist->stats.total_lost;
1434 kwork->nr_lost_chunks = session->evlist->stats.nr_events[PERF_RECORD_LOST];
1441 static void process_skipped_events(struct perf_kwork *kwork,
1449 kwork->nr_skipped_events[i] += count;
1450 kwork->nr_skipped_events[KWORK_TRACE_MAX] += count;
1454 struct kwork_work *perf_kwork_add_work(struct perf_kwork *kwork,
1464 work_insert(&class->work_root, work, &kwork->cmp_id);
1477 static int perf_kwork__report_bpf(struct perf_kwork *kwork)
1484 ret = perf_kwork__trace_prepare_bpf(kwork);
1499 perf_kwork__report_read_bpf(kwork);
1506 static int perf_kwork__report(struct perf_kwork *kwork)
1512 if (kwork->use_bpf)
1513 ret = perf_kwork__report_bpf(kwork);
1515 ret = perf_kwork__read_events(kwork);
1520 perf_kwork__sort(kwork);
1524 ret = report_print_header(kwork);
1525 next = rb_first_cached(&kwork->sorted_work_root);
1528 process_skipped_events(kwork, work);
1531 report_print_work(kwork, work);
1532 if (kwork->summary) {
1533 kwork->all_runtime += work->total_runtime;
1534 kwork->all_count += work->nr_atoms;
1541 if (kwork->summary) {
1542 print_summary(kwork);
1546 print_bad_events(kwork);
1547 print_skipped_events(kwork);
1575 static int perf_kwork__timehist(struct perf_kwork *kwork)
1580 kwork->tool.comm = perf_event__process_comm;
1581 kwork->tool.exit = perf_event__process_exit;
1582 kwork->tool.fork = perf_event__process_fork;
1583 kwork->tool.attr = perf_event__process_attr;
1584 kwork->tool.tracing_data = perf_event__process_tracing_data;
1585 kwork->tool.build_id = perf_event__process_build_id;
1586 kwork->tool.ordered_events = true;
1587 kwork->tool.ordering_requires_timestamps = true;
1588 symbol_conf.use_callchain = kwork->show_callchain;
1597 return perf_kwork__read_events(kwork);
1600 static void setup_event_list(struct perf_kwork *kwork,
1608 if (kwork->event_list_str == NULL)
1611 str = strdup(kwork->event_list_str);
1617 list_add_tail(&class->list, &kwork->class_list);
1630 * config all kwork events if not specified
1632 if (list_empty(&kwork->class_list)) {
1635 &kwork->class_list);
1640 list_for_each_entry(class, &kwork->class_list, list)
1645 static int perf_kwork__record(struct perf_kwork *kwork,
1662 list_for_each_entry(class, &kwork->class_list, list)
1672 list_for_each_entry(class, &kwork->class_list, list) {
1694 static struct perf_kwork kwork = {
1695 .class_list = LIST_HEAD_INIT(kwork.class_list),
1702 .atom_page_list = LIST_HEAD_INIT(kwork.atom_page_list),
1703 .sort_list = LIST_HEAD_INIT(kwork.sort_list),
1704 .cmp_id = LIST_HEAD_INIT(kwork.cmp_id),
1732 OPT_STRING('k', "kwork", &kwork.event_list_str, "kwork",
1733 "list of kwork to profile (irq, softirq, workqueue, etc)"),
1734 OPT_BOOLEAN('f', "force", &kwork.force, "don't complain, do it"),
1738 OPT_STRING('s', "sort", &kwork.sort_order, "key[,key2...]",
1740 OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
1742 OPT_STRING('n', "name", &kwork.profile_name, "name",
1744 OPT_STRING(0, "time", &kwork.time_str, "str",
1748 OPT_BOOLEAN('S', "with-summary", &kwork.summary,
1751 OPT_BOOLEAN('b', "use-bpf", &kwork.use_bpf,
1752 "Use BPF to measure kwork runtime"),
1757 OPT_STRING('s', "sort", &kwork.sort_order, "key[,key2...]",
1759 OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
1761 OPT_STRING('n', "name", &kwork.profile_name, "name",
1763 OPT_STRING(0, "time", &kwork.time_str, "str",
1768 OPT_BOOLEAN('b', "use-bpf", &kwork.use_bpf,
1769 "Use BPF to measure kwork latency"),
1778 OPT_BOOLEAN('g', "call-graph", &kwork.show_callchain,
1780 OPT_UINTEGER(0, "max-stack", &kwork.max_stack,
1784 OPT_STRING(0, "time", &kwork.time_str, "str",
1786 OPT_STRING('C', "cpu", &kwork.cpu_list, "cpu",
1788 OPT_STRING('n', "name", &kwork.profile_name, "name",
1799 "perf kwork report [<options>]",
1803 "perf kwork latency [<options>]",
1807 "perf kwork timehist [<options>]",
1820 setup_event_list(&kwork, kwork_options, kwork_usage);
1821 sort_dimension__add(&kwork, "id", &kwork.cmp_id);
1824 return perf_kwork__record(&kwork, argc, argv);
1826 kwork.sort_order = default_report_sort_order;
1832 kwork.report = KWORK_REPORT_RUNTIME;
1833 setup_sorting(&kwork, report_options, report_usage);
1834 return perf_kwork__report(&kwork);
1836 kwork.sort_order = default_latency_sort_order;
1842 kwork.report = KWORK_REPORT_LATENCY;
1843 setup_sorting(&kwork, latency_options, latency_usage);
1844 return perf_kwork__report(&kwork);
1851 kwork.report = KWORK_REPORT_TIMEHIST;
1852 return perf_kwork__timehist(&kwork);