Lines Matching refs:rec
123 static bool switch_output_signal(struct record *rec)
125 return rec->switch_output.signal &&
129 static bool switch_output_size(struct record *rec)
131 return rec->switch_output.size &&
133 (rec->bytes_written >= rec->switch_output.size);
136 static bool switch_output_time(struct record *rec)
138 return rec->switch_output.time &&
142 static bool record__output_max_size_exceeded(struct record *rec)
144 return rec->output_max_size &&
145 (rec->bytes_written >= rec->output_max_size);
148 static int record__write(struct record *rec, struct mmap *map __maybe_unused,
151 struct perf_data_file *file = &rec->session->data->file;
158 rec->bytes_written += size;
160 if (record__output_max_size_exceeded(rec) && !done) {
163 rec->bytes_written >> 10);
167 if (switch_output_size(rec))
173 static int record__aio_enabled(struct record *rec);
174 static int record__comp_enabled(struct record *rec);
286 struct record *rec;
309 if (record__comp_enabled(aio->rec)) {
310 size = zstd_compress(aio->rec->session, aio->data + aio->size,
336 static int record__aio_push(struct record *rec, struct mmap *map, off_t *off)
339 int trace_fd = rec->session->data->file.fd;
340 struct record_aio aio = { .rec = rec, .size = 0 };
353 rec->samples++;
357 rec->bytes_written += aio.size;
358 if (switch_output_size(rec))
383 static void record__aio_mmap_read_sync(struct record *rec)
386 struct evlist *evlist = rec->evlist;
389 if (!record__aio_enabled(rec))
423 static int record__aio_push(struct record *rec __maybe_unused, struct mmap *map __maybe_unused,
438 static void record__aio_mmap_read_sync(struct record *rec __maybe_unused)
443 static int record__aio_enabled(struct record *rec)
445 return rec->opts.nr_cblocks > 0;
504 static int record__comp_enabled(struct record *rec)
506 return rec->opts.comp_level > 0;
514 struct record *rec = container_of(tool, struct record, tool);
515 return record__write(rec, NULL, event, event->header.size);
534 struct record *rec = to;
536 if (record__comp_enabled(rec)) {
537 size = zstd_compress(rec->session, map->data, mmap__mmap_len(map), bf, size);
541 rec->samples++;
542 return record__write(rec, map, bf, size);
598 struct record *rec = container_of(tool, struct record, tool);
599 struct perf_data *data = &rec->data;
611 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
622 record__write(rec, map, event, event->header.size);
623 record__write(rec, map, data1, len1);
625 record__write(rec, map, data2, len2);
626 record__write(rec, map, &pad, padding);
631 static int record__auxtrace_mmap_read(struct record *rec,
636 ret = auxtrace_mmap__read(map, rec->itr, &rec->tool,
642 rec->samples++;
647 static int record__auxtrace_mmap_read_snapshot(struct record *rec,
652 ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool,
654 rec->opts.auxtrace_snapshot_size);
659 rec->samples++;
664 static int record__auxtrace_read_snapshot_all(struct record *rec)
669 for (i = 0; i < rec->evlist->core.nr_mmaps; i++) {
670 struct mmap *map = &rec->evlist->mmap[i];
675 if (record__auxtrace_mmap_read_snapshot(rec, map) != 0) {
684 static void record__read_auxtrace_snapshot(struct record *rec, bool on_exit)
687 if (record__auxtrace_read_snapshot_all(rec) < 0) {
690 if (auxtrace_record__snapshot_finish(rec->itr, on_exit))
697 static int record__auxtrace_snapshot_exit(struct record *rec)
703 auxtrace_record__snapshot_start(rec->itr))
706 record__read_auxtrace_snapshot(rec, true);
713 static int record__auxtrace_init(struct record *rec)
717 if (!rec->itr) {
718 rec->itr = auxtrace_record__init(rec->evlist, &err);
723 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
724 rec->opts.auxtrace_snapshot_opts);
728 err = auxtrace_parse_sample_options(rec->itr, rec->evlist, &rec->opts,
729 rec->opts.auxtrace_sample_opts);
733 return auxtrace_parse_filters(rec->evlist);
739 int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
746 void record__read_auxtrace_snapshot(struct record *rec __maybe_unused,
758 int record__auxtrace_snapshot_exit(struct record *rec __maybe_unused)
763 static int record__auxtrace_init(struct record *rec __maybe_unused)
838 static int record__mmap_evlist(struct record *rec,
841 struct record_opts *opts = &rec->opts;
874 static int record__mmap(struct record *rec)
876 return record__mmap_evlist(rec, rec->evlist);
879 static int record__open(struct record *rec)
883 struct evlist *evlist = rec->evlist;
884 struct perf_session *session = rec->session;
885 struct record_opts *opts = &rec->opts;
957 rc = record__mmap(rec);
973 struct record *rec = container_of(tool, struct record, tool);
975 if (rec->evlist->first_sample_time == 0)
976 rec->evlist->first_sample_time = sample->time;
978 rec->evlist->last_sample_time = sample->time;
980 if (rec->buildid_all)
983 rec->samples++;
987 static int process_buildids(struct record *rec)
989 struct perf_session *session = rec->session;
991 if (perf_data__size(&rec->data) == 0)
1011 if (rec->buildid_all && !rec->timestamp_boundary)
1012 rec->tool.sample = NULL;
1051 static void record__adjust_affinity(struct record *rec, struct mmap *map)
1053 if (rec->opts.affinity != PERF_AFFINITY_SYS &&
1054 !bitmap_equal(rec->affinity_mask.bits, map->affinity_mask.bits,
1055 rec->affinity_mask.nbits)) {
1056 bitmap_zero(rec->affinity_mask.bits, rec->affinity_mask.nbits);
1057 bitmap_or(rec->affinity_mask.bits, rec->affinity_mask.bits,
1058 map->affinity_mask.bits, rec->affinity_mask.nbits);
1059 sched_setaffinity(0, MMAP_CPU_MASK_BYTES(&rec->affinity_mask),
1060 (cpu_set_t *)rec->affinity_mask.bits);
1062 mmap_cpu_mask__scnprintf(&rec->affinity_mask, "thread");
1097 static int record__mmap_read_evlist(struct record *rec, struct evlist *evlist,
1100 u64 bytes_written = rec->bytes_written;
1104 int trace_fd = rec->data.file.fd;
1117 if (record__aio_enabled(rec))
1125 record__adjust_affinity(rec, map);
1130 if (!record__aio_enabled(rec)) {
1131 if (perf_mmap__push(map, rec, record__pushfn) < 0) {
1138 if (record__aio_push(rec, map, &off) < 0) {
1150 if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
1151 !rec->opts.auxtrace_sample_mode &&
1152 record__auxtrace_mmap_read(rec, map) != 0) {
1158 if (record__aio_enabled(rec))
1165 if (bytes_written != rec->bytes_written)
1166 rc = record__write(rec, NULL, &finished_round_event, sizeof(finished_round_event));
1174 static int record__mmap_read_all(struct record *rec, bool synch)
1178 err = record__mmap_read_evlist(rec, rec->evlist, false, synch);
1182 return record__mmap_read_evlist(rec, rec->evlist, true, synch);
1185 static void record__init_features(struct record *rec)
1187 struct perf_session *session = rec->session;
1193 if (rec->no_buildid)
1196 if (!have_tracepoints(&rec->evlist->core.entries))
1199 if (!rec->opts.branch_stack)
1202 if (!rec->opts.full_auxtrace)
1205 if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
1208 if (!rec->opts.use_clockid)
1212 if (!record__comp_enabled(rec))
1219 record__finish_output(struct record *rec)
1221 struct perf_data *data = &rec->data;
1227 rec->session->header.data_size += rec->bytes_written;
1230 if (!rec->no_buildid) {
1231 process_buildids(rec);
1233 if (rec->buildid_all)
1234 dsos__hit_all(rec->session);
1236 perf_session__write_header(rec->session, rec->evlist, fd, true);
1241 static int record__synthesize_workload(struct record *rec, bool tail)
1246 if (rec->opts.tail_synthesize != tail)
1249 thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
1253 err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
1255 &rec->session->machines.host,
1256 rec->opts.sample_address);
1261 static int record__synthesize(struct record *rec, bool tail);
1264 record__switch_output(struct record *rec, bool at_exit)
1266 struct perf_data *data = &rec->data;
1273 record__aio_mmap_read_sync(rec);
1275 record__synthesize(rec, true);
1276 if (target__none(&rec->opts.target))
1277 record__synthesize_workload(rec, true);
1279 rec->samples = 0;
1280 record__finish_output(rec);
1288 rec->session->header.data_offset,
1291 rec->bytes_written = 0;
1292 rec->session->header.data_size = 0;
1299 if (rec->switch_output.num_files) {
1300 int n = rec->switch_output.cur_file + 1;
1302 if (n >= rec->switch_output.num_files)
1304 rec->switch_output.cur_file = n;
1305 if (rec->switch_output.filenames[n]) {
1306 remove(rec->switch_output.filenames[n]);
1307 zfree(&rec->switch_output.filenames[n]);
1309 rec->switch_output.filenames[n] = new_filename;
1316 record__synthesize(rec, false);
1327 if (target__none(&rec->opts.target))
1328 record__synthesize_workload(rec, false);
1364 static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
1368 pc = perf_evlist__pick_pc(rec->evlist);
1374 static int record__synthesize(struct record *rec, bool tail)
1376 struct perf_session *session = rec->session;
1378 struct perf_data *data = &rec->data;
1379 struct record_opts *opts = &rec->opts;
1380 struct perf_tool *tool = &rec->tool;
1385 if (rec->opts.tail_synthesize != tail)
1393 err = perf_event__synthesize_attrs(tool, rec->evlist,
1400 err = perf_event__synthesize_features(tool, session, rec->evlist,
1407 if (have_tracepoints(&rec->evlist->core.entries)) {
1416 err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
1422 rec->bytes_written += err;
1426 err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
1432 if (rec->opts.auxtrace_sample_mode) {
1440 if (rec->opts.full_auxtrace) {
1441 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
1447 if (!perf_evlist__exclude_kernel(rec->evlist)) {
1466 err = perf_event__synthesize_extra_attr(&rec->tool,
1467 rec->evlist,
1473 err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->core.threads,
1481 err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.cpus,
1498 if (rec->opts.nr_threads_synthesize > 1) {
1503 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->core.threads,
1505 rec->opts.nr_threads_synthesize);
1507 if (rec->opts.nr_threads_synthesize > 1)
1516 struct record *rec = data;
1517 pthread_kill(rec->thread_id, SIGUSR2);
1521 static int record__setup_sb_evlist(struct record *rec)
1523 struct record_opts *opts = &rec->opts;
1525 if (rec->sb_evlist != NULL) {
1531 evlist__set_cb(rec->sb_evlist, record__process_signal_event, rec);
1532 rec->thread_id = pthread_self();
1536 if (rec->sb_evlist == NULL) {
1537 rec->sb_evlist = evlist__new();
1539 if (rec->sb_evlist == NULL) {
1545 if (evlist__add_bpf_sb_event(rec->sb_evlist, &rec->session->header.env)) {
1551 if (perf_evlist__start_sb_thread(rec->sb_evlist, &rec->opts.target)) {
1559 static int record__init_clock(struct record *rec)
1561 struct perf_session *session = rec->session;
1566 if (!rec->opts.use_clockid)
1569 if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
1570 session->header.env.clock.clockid_res_ns = rec->opts.clockid_res_ns;
1572 session->header.env.clock.clockid = rec->opts.clockid;
1579 if (clock_gettime(rec->opts.clockid, &ref_clockid)) {
1596 static void hit_auxtrace_snapshot_trigger(struct record *rec)
1601 if (auxtrace_record__snapshot_start(rec->itr))
1606 static int __cmd_record(struct record *rec, int argc, const char **argv)
1612 struct perf_tool *tool = &rec->tool;
1613 struct record_opts *opts = &rec->opts;
1614 struct perf_data *data = &rec->data;
1627 if (rec->opts.record_namespaces)
1630 if (rec->opts.record_cgroup) {
1639 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
1641 if (rec->opts.auxtrace_snapshot_mode)
1643 if (rec->switch_output.enabled)
1656 rec->session = session;
1658 if (zstd_init(&session->zstd_data, rec->opts.comp_level) < 0) {
1669 err = evlist__add_wakeup_eventfd(rec->evlist, done_fd);
1678 session->header.env.comp_level = rec->opts.comp_level;
1680 if (rec->opts.kcore &&
1686 if (record__init_clock(rec))
1689 record__init_features(rec);
1692 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
1708 if (data->is_pipe && rec->evlist->core.nr_entries == 1)
1709 rec->opts.sample_id = true;
1711 if (record__open(rec) != 0) {
1717 if (rec->opts.kcore) {
1739 if (rec->tool.ordered_events && !evlist__sample_id_all(rec->evlist)) {
1741 rec->tool.ordered_events = false;
1744 if (!rec->evlist->nr_groups)
1752 err = perf_session__write_header(session, rec->evlist, fd, false);
1758 if (!rec->no_buildid
1765 err = record__setup_sb_evlist(rec);
1769 err = record__synthesize(rec, false);
1773 if (rec->realtime_prio) {
1776 param.sched_priority = rec->realtime_prio;
1790 evlist__enable(rec->evlist);
1813 rec->evlist->workload.pid,
1833 rec->evlist->workload.pid,
1838 perf_evlist__start_workload(rec->evlist);
1841 if (evlist__initialize_ctlfd(rec->evlist, opts->ctl_fd, opts->ctl_fd_ack))
1848 evlist__enable(rec->evlist);
1857 unsigned long long hits = rec->samples;
1860 * rec->evlist->bkw_mmap_state is possible to be
1862 * hits != rec->samples in previous round.
1868 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
1870 if (record__mmap_read_all(rec, false) < 0) {
1880 record__read_auxtrace_snapshot(rec, false);
1898 if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
1907 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
1913 fd = record__switch_output(rec, false);
1922 if (rec->switch_output.time)
1923 alarm(rec->switch_output.time);
1926 if (hits == rec->samples) {
1929 err = evlist__poll(rec->evlist, -1);
1938 if (evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
1942 if (evlist__ctlfd_process(rec->evlist, &cmd) > 0) {
1951 hit_auxtrace_snapshot_trigger(rec);
1952 evlist__ctlfd_ack(rec->evlist);
1968 evlist__disable(rec->evlist);
1977 record__auxtrace_snapshot_exit(rec);
1990 if (target__none(&rec->opts.target))
1991 record__synthesize_workload(rec, true);
1994 evlist__finalize_ctlfd(rec->evlist);
1995 record__mmap_read_all(rec, true);
1996 record__aio_mmap_read_sync(rec);
1998 if (rec->session->bytes_transferred && rec->session->bytes_compressed) {
1999 ratio = (float)rec->session->bytes_transferred/(float)rec->session->bytes_compressed;
2007 kill(rec->evlist->workload.pid, SIGTERM);
2020 record__synthesize(rec, true);
2022 rec->samples = 0;
2025 if (!rec->timestamp_filename) {
2026 record__finish_output(rec);
2028 fd = record__switch_output(rec, true);
2040 const char *postfix = rec->timestamp_filename ?
2043 if (rec->samples && !rec->opts.full_auxtrace)
2045 " (%" PRIu64 " samples)", rec->samples);
2054 rec->session->bytes_transferred / 1024.0 / 1024.0,
2069 perf_evlist__stop_sb_thread(rec->sb_evlist);
2133 struct record *rec = cb;
2137 rec->no_buildid_cache = false;
2139 rec->no_buildid_cache = true;
2141 rec->no_buildid = true;
2152 rec->opts.nr_cblocks = strtol(value, NULL, 0);
2153 if (!rec->opts.nr_cblocks)
2154 rec->opts.nr_cblocks = nr_cblocks_default;
2256 static void switch_output_size_warn(struct record *rec)
2258 u64 wakeup_size = evlist__mmap_size(rec->opts.mmap_pages);
2259 struct switch_output *s = &rec->switch_output;
2273 static int switch_output_setup(struct record *rec)
2275 struct switch_output *s = &rec->switch_output;
2297 if (rec->switch_output_event_set)
2328 rec->timestamp_filename = true;
2331 if (s->size && !rec->opts.no_buffering)
2332 switch_output_size_warn(rec);
2609 struct record *rec = &record;
2635 rec->opts.affinity = PERF_AFFINITY_SYS;
2637 rec->evlist = evlist__new();
2638 if (rec->evlist == NULL)
2641 err = perf_config(perf_record_config, rec);
2651 if (!argc && target__none(&rec->opts.target))
2652 rec->opts.target.system_wide = true;
2654 if (nr_cgroups && !rec->opts.target.system_wide) {
2660 if (rec->opts.kcore)
2661 rec->data.is_dir = true;
2663 if (rec->opts.comp_level != 0) {
2665 rec->no_buildid = true;
2668 if (rec->opts.record_switch_events &&
2676 if (switch_output_setup(rec)) {
2682 if (rec->switch_output.time) {
2684 alarm(rec->switch_output.time);
2687 if (rec->switch_output.num_files) {
2688 rec->switch_output.filenames = calloc(sizeof(char *),
2689 rec->switch_output.num_files);
2690 if (!rec->switch_output.filenames) {
2704 if (rec->opts.affinity != PERF_AFFINITY_SYS) {
2705 rec->affinity_mask.nbits = cpu__max_cpu();
2706 rec->affinity_mask.bits = bitmap_alloc(rec->affinity_mask.nbits);
2707 if (!rec->affinity_mask.bits) {
2708 pr_err("Failed to allocate thread mask for %zd cpus\n", rec->affinity_mask.nbits);
2712 pr_debug2("thread mask[%zd]: empty\n", rec->affinity_mask.nbits);
2715 err = record__auxtrace_init(rec);
2722 err = bpf__setup_stdout(rec->evlist);
2724 bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
2732 if (rec->no_buildid_cache || rec->no_buildid) {
2734 } else if (rec->switch_output.enabled) {
2746 * if ((rec->no_buildid || !rec->no_buildid_set) &&
2747 * (rec->no_buildid_cache || !rec->no_buildid_cache_set))
2752 if (rec->no_buildid_set && !rec->no_buildid)
2754 if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
2757 rec->no_buildid = true;
2758 rec->no_buildid_cache = true;
2766 if (rec->evlist->core.nr_entries == 0 &&
2767 __evlist__add_default(rec->evlist, !record.opts.no_samples) < 0) {
2772 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
2773 rec->opts.no_inherit = true;
2775 err = target__validate(&rec->opts.target);
2777 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
2781 err = target__parse_uid(&rec->opts.target);
2785 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
2793 rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
2796 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
2799 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
2808 if (rec->opts.full_auxtrace)
2809 rec->buildid_all = true;
2811 if (rec->opts.text_poke) {
2812 err = record__config_text_poke(rec->evlist);
2819 if (record_opts__config(&rec->opts)) {
2824 if (rec->opts.nr_cblocks > nr_cblocks_max)
2825 rec->opts.nr_cblocks = nr_cblocks_max;
2826 pr_debug("nr_cblocks: %d\n", rec->opts.nr_cblocks);
2828 pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]);
2829 pr_debug("mmap flush: %d\n", rec->opts.mmap_flush);
2831 if (rec->opts.comp_level > comp_level_max)
2832 rec->opts.comp_level = comp_level_max;
2833 pr_debug("comp level: %d\n", rec->opts.comp_level);
2837 bitmap_free(rec->affinity_mask.bits);
2838 evlist__delete(rec->evlist);
2840 auxtrace_record__free(rec->itr);
2842 evlist__close_control(rec->opts.ctl_fd, rec->opts.ctl_fd_ack, &rec->opts.ctl_fd_close);
2848 struct record *rec = &record;
2850 hit_auxtrace_snapshot_trigger(rec);
2852 if (switch_output_signal(rec))
2858 struct record *rec = &record;
2860 if (switch_output_time(rec))