Lines Matching refs:rec

113 	struct record		*rec;
203 static int record__threads_enabled(struct record *rec)
205 return rec->opts.threads_spec;
208 static bool switch_output_signal(struct record *rec)
210 return rec->switch_output.signal &&
214 static bool switch_output_size(struct record *rec)
216 return rec->switch_output.size &&
218 (rec->bytes_written >= rec->switch_output.size);
221 static bool switch_output_time(struct record *rec)
223 return rec->switch_output.time &&
227 static u64 record__bytes_written(struct record *rec)
229 return rec->bytes_written + rec->thread_bytes_written;
232 static bool record__output_max_size_exceeded(struct record *rec)
234 return rec->output_max_size &&
235 (record__bytes_written(rec) >= rec->output_max_size);
238 static int record__write(struct record *rec, struct mmap *map __maybe_unused,
241 struct perf_data_file *file = &rec->session->data->file;
253 rec->thread_bytes_written += size;
255 rec->bytes_written += size;
258 if (record__output_max_size_exceeded(rec) && !done) {
261 record__bytes_written(rec) >> 10);
265 if (switch_output_size(rec))
271 static int record__aio_enabled(struct record *rec);
272 static int record__comp_enabled(struct record *rec);
384 struct record *rec;
407 if (record__comp_enabled(aio->rec)) {
408 size = zstd_compress(aio->rec->session, NULL, aio->data + aio->size,
434 static int record__aio_push(struct record *rec, struct mmap *map, off_t *off)
437 int trace_fd = rec->session->data->file.fd;
438 struct record_aio aio = { .rec = rec, .size = 0 };
451 rec->samples++;
455 rec->bytes_written += aio.size;
456 if (switch_output_size(rec))
481 static void record__aio_mmap_read_sync(struct record *rec)
484 struct evlist *evlist = rec->evlist;
487 if (!record__aio_enabled(rec))
521 static int record__aio_push(struct record *rec __maybe_unused, struct mmap *map __maybe_unused,
536 static void record__aio_mmap_read_sync(struct record *rec __maybe_unused)
541 static int record__aio_enabled(struct record *rec)
543 return rec->opts.nr_cblocks > 0;
602 static int record__comp_enabled(struct record *rec)
604 return rec->opts.comp_level > 0;
612 struct record *rec = container_of(tool, struct record, tool);
613 return record__write(rec, NULL, event, event->header.size);
633 struct record *rec = to;
635 if (record__comp_enabled(rec)) {
636 size = zstd_compress(rec->session, map, map->data, mmap__mmap_len(map), bf, size);
641 return record__write(rec, map, bf, size);
702 struct record *rec = container_of(tool, struct record, tool);
703 struct perf_data *data = &rec->data;
715 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
726 record__write(rec, map, event, event->header.size);
727 record__write(rec, map, data1, len1);
729 record__write(rec, map, data2, len2);
730 record__write(rec, map, &pad, padding);
735 static int record__auxtrace_mmap_read(struct record *rec,
740 ret = auxtrace_mmap__read(map, rec->itr, &rec->tool,
746 rec->samples++;
751 static int record__auxtrace_mmap_read_snapshot(struct record *rec,
756 ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool,
758 rec->opts.auxtrace_snapshot_size);
763 rec->samples++;
768 static int record__auxtrace_read_snapshot_all(struct record *rec)
773 for (i = 0; i < rec->evlist->core.nr_mmaps; i++) {
774 struct mmap *map = &rec->evlist->mmap[i];
779 if (record__auxtrace_mmap_read_snapshot(rec, map) != 0) {
788 static void record__read_auxtrace_snapshot(struct record *rec, bool on_exit)
791 if (record__auxtrace_read_snapshot_all(rec) < 0) {
794 if (auxtrace_record__snapshot_finish(rec->itr, on_exit))
801 static int record__auxtrace_snapshot_exit(struct record *rec)
807 auxtrace_record__snapshot_start(rec->itr))
810 record__read_auxtrace_snapshot(rec, true);
817 static int record__auxtrace_init(struct record *rec)
821 if ((rec->opts.auxtrace_snapshot_opts || rec->opts.auxtrace_sample_opts)
822 && record__threads_enabled(rec)) {
827 if (!rec->itr) {
828 rec->itr = auxtrace_record__init(rec->evlist, &err);
833 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
834 rec->opts.auxtrace_snapshot_opts);
838 err = auxtrace_parse_sample_options(rec->itr, rec->evlist, &rec->opts,
839 rec->opts.auxtrace_sample_opts);
843 auxtrace_regroup_aux_output(rec->evlist);
845 return auxtrace_parse_filters(rec->evlist);
851 int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
858 void record__read_auxtrace_snapshot(struct record *rec __maybe_unused,
870 int record__auxtrace_snapshot_exit(struct record *rec __maybe_unused)
875 static int record__auxtrace_init(struct record *rec __maybe_unused)
904 static int record__config_off_cpu(struct record *rec)
906 return off_cpu_prepare(rec->evlist, &rec->opts.target, &rec->opts);
1070 static void record__free_thread_data(struct record *rec)
1073 struct record_thread *thread_data = rec->thread_data;
1078 for (t = 0; t < rec->nr_threads; t++) {
1085 zfree(&rec->thread_data);
1088 static int record__map_thread_evlist_pollfd_indexes(struct record *rec,
1092 size_t x = rec->index_map_cnt;
1094 if (realloc_array_as_needed(rec->index_map, rec->index_map_sz, x, NULL))
1096 rec->index_map[x].evlist_pollfd_index = evlist_pollfd_index;
1097 rec->index_map[x].thread_pollfd_index = thread_pollfd_index;
1098 rec->index_map_cnt += 1;
1102 static int record__update_evlist_pollfd_from_thread(struct record *rec,
1111 for (i = 0; i < rec->index_map_cnt; i++) {
1112 int e_pos = rec->index_map[i].evlist_pollfd_index;
1113 int t_pos = rec->index_map[i].thread_pollfd_index;
1126 static int record__dup_non_perf_events(struct record *rec,
1143 ret = record__map_thread_evlist_pollfd_indexes(rec, i, ret);
1152 static int record__alloc_thread_data(struct record *rec, struct evlist *evlist)
1157 rec->thread_data = zalloc(rec->nr_threads * sizeof(*(rec->thread_data)));
1158 if (!rec->thread_data) {
1162 thread_data = rec->thread_data;
1164 for (t = 0; t < rec->nr_threads; t++)
1167 for (t = 0; t < rec->nr_threads; t++) {
1168 thread_data[t].rec = rec;
1169 thread_data[t].mask = &rec->thread_masks[t];
1200 ret = record__dup_non_perf_events(rec, evlist, &thread_data[t]);
1211 record__free_thread_data(rec);
1216 static int record__mmap_evlist(struct record *rec,
1220 struct record_opts *opts = &rec->opts;
1254 ret = record__alloc_thread_data(rec, evlist);
1258 if (record__threads_enabled(rec)) {
1259 ret = perf_data__create_dir(&rec->data, evlist->core.nr_mmaps);
1266 evlist->mmap[i].file = &rec->data.dir.files[i];
1268 evlist->overwrite_mmap[i].file = &rec->data.dir.files[i];
1275 static int record__mmap(struct record *rec)
1277 return record__mmap_evlist(rec, rec->evlist);
1280 static int record__open(struct record *rec)
1284 struct evlist *evlist = rec->evlist;
1285 struct perf_session *session = rec->session;
1286 struct record_opts *opts = &rec->opts;
1360 rc = record__mmap(rec);
1370 static void set_timestamp_boundary(struct record *rec, u64 sample_time)
1372 if (rec->evlist->first_sample_time == 0)
1373 rec->evlist->first_sample_time = sample_time;
1376 rec->evlist->last_sample_time = sample_time;
1385 struct record *rec = container_of(tool, struct record, tool);
1387 set_timestamp_boundary(rec, sample->time);
1389 if (rec->buildid_all)
1392 rec->samples++;
1396 static int process_buildids(struct record *rec)
1398 struct perf_session *session = rec->session;
1400 if (perf_data__size(&rec->data) == 0)
1420 if (rec->buildid_all && !rec->timestamp_boundary)
1421 rec->tool.sample = NULL;
1465 static void record__adjust_affinity(struct record *rec, struct mmap *map)
1467 if (rec->opts.affinity != PERF_AFFINITY_SYS &&
1522 static int record__mmap_read_evlist(struct record *rec, struct evlist *evlist,
1525 u64 bytes_written = rec->bytes_written;
1530 int trace_fd = rec->data.file.fd;
1545 if (record__aio_enabled(rec))
1553 record__adjust_affinity(rec, map);
1558 if (!record__aio_enabled(rec)) {
1559 if (perf_mmap__push(map, rec, record__pushfn) < 0) {
1566 if (record__aio_push(rec, map, &off) < 0) {
1578 if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
1579 !rec->opts.auxtrace_sample_mode &&
1580 record__auxtrace_mmap_read(rec, map) != 0) {
1586 if (record__aio_enabled(rec))
1597 if (!record__threads_enabled(rec) && bytes_written != rec->bytes_written)
1598 rc = record__write(rec, NULL, &finished_round_event, sizeof(finished_round_event));
1606 static int record__mmap_read_all(struct record *rec, bool synch)
1610 err = record__mmap_read_evlist(rec, rec->evlist, false, synch);
1614 return record__mmap_read_evlist(rec, rec->evlist, true, synch);
1649 if (record__mmap_read_all(thread->rec, false) < 0 || terminate)
1678 record__mmap_read_all(thread->rec, true);
1688 static void record__init_features(struct record *rec)
1690 struct perf_session *session = rec->session;
1696 if (rec->no_buildid)
1700 if (!have_tracepoints(&rec->evlist->core.entries))
1704 if (!rec->opts.branch_stack)
1707 if (!rec->opts.full_auxtrace)
1710 if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
1713 if (!rec->opts.use_clockid)
1716 if (!record__threads_enabled(rec))
1719 if (!record__comp_enabled(rec))
1726 record__finish_output(struct record *rec)
1729 struct perf_data *data = &rec->data;
1735 rec->session->header.data_size += rec->bytes_written;
1737 if (record__threads_enabled(rec)) {
1742 if (!rec->no_buildid) {
1743 process_buildids(rec);
1745 if (rec->buildid_all)
1746 dsos__hit_all(rec->session);
1748 perf_session__write_header(rec->session, rec->evlist, fd, true);
1753 static int record__synthesize_workload(struct record *rec, bool tail)
1757 bool needs_mmap = rec->opts.synth & PERF_SYNTH_MMAP;
1759 if (rec->opts.tail_synthesize != tail)
1762 thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
1766 err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
1768 &rec->session->machines.host,
1770 rec->opts.sample_address);
1775 static int write_finished_init(struct record *rec, bool tail)
1777 if (rec->opts.tail_synthesize != tail)
1780 return record__write(rec, NULL, &finished_init_event, sizeof(finished_init_event));
1783 static int record__synthesize(struct record *rec, bool tail);
1786 record__switch_output(struct record *rec, bool at_exit)
1788 struct perf_data *data = &rec->data;
1795 record__aio_mmap_read_sync(rec);
1797 write_finished_init(rec, true);
1799 record__synthesize(rec, true);
1800 if (target__none(&rec->opts.target))
1801 record__synthesize_workload(rec, true);
1803 rec->samples = 0;
1804 record__finish_output(rec);
1812 rec->session->header.data_offset,
1815 rec->bytes_written = 0;
1816 rec->session->header.data_size = 0;
1823 if (rec->switch_output.num_files) {
1824 int n = rec->switch_output.cur_file + 1;
1826 if (n >= rec->switch_output.num_files)
1828 rec->switch_output.cur_file = n;
1829 if (rec->switch_output.filenames[n]) {
1830 remove(rec->switch_output.filenames[n]);
1831 zfree(&rec->switch_output.filenames[n]);
1833 rec->switch_output.filenames[n] = new_filename;
1840 record__synthesize(rec, false);
1851 if (target__none(&rec->opts.target))
1852 record__synthesize_workload(rec, false);
1853 write_finished_init(rec, false);
1858 static void __record__save_lost_samples(struct record *rec, struct evsel *evsel,
1877 record__write(rec, NULL, lost, lost->header.size);
1880 static void record__read_lost_samples(struct record *rec)
1882 struct perf_session *session = rec->session;
1920 __record__save_lost_samples(rec, evsel, lost,
1928 __record__save_lost_samples(rec, evsel, lost, 0, 0, lost_count,
1965 static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
1967 const struct perf_event_mmap_page *pc = evlist__pick_pc(rec->evlist);
1973 static int record__synthesize(struct record *rec, bool tail)
1975 struct perf_session *session = rec->session;
1977 struct perf_data *data = &rec->data;
1978 struct record_opts *opts = &rec->opts;
1979 struct perf_tool *tool = &rec->tool;
1983 if (rec->opts.tail_synthesize != tail)
1992 rec->bytes_written += err;
1995 err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
2007 if (rec->opts.full_auxtrace) {
2008 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
2014 if (!evlist__exclude_kernel(rec->evlist)) {
2033 err = perf_event__synthesize_extra_attr(&rec->tool,
2034 rec->evlist,
2040 err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->core.threads,
2048 err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.all_cpus,
2062 if (rec->opts.synth & PERF_SYNTH_CGROUP) {
2071 if (rec->opts.nr_threads_synthesize > 1) {
2077 if (rec->opts.synth & PERF_SYNTH_TASK) {
2078 bool needs_mmap = rec->opts.synth & PERF_SYNTH_MMAP;
2081 rec->evlist->core.threads,
2083 rec->opts.nr_threads_synthesize);
2086 if (rec->opts.nr_threads_synthesize > 1) {
2097 struct record *rec = data;
2098 pthread_kill(rec->thread_id, SIGUSR2);
2102 static int record__setup_sb_evlist(struct record *rec)
2104 struct record_opts *opts = &rec->opts;
2106 if (rec->sb_evlist != NULL) {
2112 evlist__set_cb(rec->sb_evlist, record__process_signal_event, rec);
2113 rec->thread_id = pthread_self();
2117 if (rec->sb_evlist == NULL) {
2118 rec->sb_evlist = evlist__new();
2120 if (rec->sb_evlist == NULL) {
2126 if (evlist__add_bpf_sb_event(rec->sb_evlist, &rec->session->header.env)) {
2132 if (evlist__start_sb_thread(rec->sb_evlist, &rec->opts.target)) {
2140 static int record__init_clock(struct record *rec)
2142 struct perf_session *session = rec->session;
2147 if (!rec->opts.use_clockid)
2150 if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
2151 session->header.env.clock.clockid_res_ns = rec->opts.clockid_res_ns;
2153 session->header.env.clock.clockid = rec->opts.clockid;
2160 if (clock_gettime(rec->opts.clockid, &ref_clockid)) {
2177 static void hit_auxtrace_snapshot_trigger(struct record *rec)
2182 if (auxtrace_record__snapshot_start(rec->itr))
2205 static int record__start_threads(struct record *rec)
2207 int t, tt, err, ret = 0, nr_threads = rec->nr_threads;
2208 struct record_thread *thread_data = rec->thread_data;
2215 if (!record__threads_enabled(rec))
2245 pr_debug2("threads[%d]: sent %s\n", rec->thread_data[t].tid,
2249 thread->tid, rec->thread_data[t].tid);
2268 static int record__stop_threads(struct record *rec)
2271 struct record_thread *thread_data = rec->thread_data;
2273 for (t = 1; t < rec->nr_threads; t++)
2276 for (t = 0; t < rec->nr_threads; t++) {
2277 rec->samples += thread_data[t].samples;
2278 if (!record__threads_enabled(rec))
2280 rec->session->bytes_transferred += thread_data[t].bytes_transferred;
2281 rec->session->bytes_compressed += thread_data[t].bytes_compressed;
2294 static unsigned long record__waking(struct record *rec)
2298 struct record_thread *thread_data = rec->thread_data;
2300 for (t = 0; t < rec->nr_threads; t++)
2306 static int __cmd_record(struct record *rec, int argc, const char **argv)
2311 struct perf_tool *tool = &rec->tool;
2312 struct record_opts *opts = &rec->opts;
2313 struct perf_data *data = &rec->data;
2326 if (rec->opts.record_namespaces)
2329 if (rec->opts.record_cgroup) {
2338 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
2340 if (rec->opts.auxtrace_snapshot_mode)
2342 if (rec->switch_output.enabled)
2354 if (record__threads_enabled(rec)) {
2355 if (perf_data__is_pipe(&rec->data)) {
2359 if (rec->opts.full_auxtrace) {
2366 rec->session = session;
2368 if (zstd_init(&session->zstd_data, rec->opts.comp_level) < 0) {
2379 err = evlist__add_wakeup_eventfd(rec->evlist, done_fd);
2388 session->header.env.comp_level = rec->opts.comp_level;
2390 if (rec->opts.kcore &&
2396 if (record__init_clock(rec))
2399 record__init_features(rec);
2402 err = evlist__prepare_workload(rec->evlist, &opts->target, argv, data->is_pipe,
2417 if (data->is_pipe && rec->evlist->core.nr_entries == 1)
2418 rec->opts.sample_id = true;
2420 if (rec->timestamp_filename && perf_data__is_pipe(data)) {
2421 rec->timestamp_filename = false;
2425 evlist__uniquify_name(rec->evlist);
2429 if (record__open(rec) != 0) {
2437 if (rec->opts.kcore) {
2449 if (rec->tool.ordered_events && !evlist__sample_id_all(rec->evlist)) {
2451 rec->tool.ordered_events = false;
2454 if (evlist__nr_groups(rec->evlist) == 0)
2462 err = perf_session__write_header(session, rec->evlist, fd, false);
2468 if (!rec->no_buildid
2475 err = record__setup_sb_evlist(rec);
2479 err = record__synthesize(rec, false);
2483 if (rec->realtime_prio) {
2486 param.sched_priority = rec->realtime_prio;
2494 if (record__start_threads(rec))
2503 evlist__enable(rec->evlist);
2526 rec->evlist->workload.pid,
2546 rec->evlist->workload.pid,
2551 evlist__start_workload(rec->evlist);
2558 evlist__enable(rec->evlist);
2563 err = event_enable_timer__start(rec->evlist->eet);
2579 err = write_finished_init(rec, false);
2587 * rec->evlist->bkw_mmap_state is possible to be
2589 * hits != rec->samples in previous round.
2595 evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
2597 if (record__mmap_read_all(rec, false) < 0) {
2607 record__read_auxtrace_snapshot(rec, false);
2625 if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
2634 evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
2638 record__waking(rec));
2640 fd = record__switch_output(rec, false);
2649 if (rec->switch_output.time)
2650 alarm(rec->switch_output.time);
2669 err = record__update_evlist_pollfd_from_thread(rec, rec->evlist, thread);
2674 if (evlist__ctlfd_process(rec->evlist, &cmd) > 0) {
2677 hit_auxtrace_snapshot_trigger(rec);
2678 evlist__ctlfd_ack(rec->evlist);
2694 err = event_enable_timer__process(rec->evlist->eet);
2709 evlist__disable(rec->evlist);
2718 record__auxtrace_snapshot_exit(rec);
2724 evlist__scnprintf_evsels(rec->evlist, sizeof(strevsels), strevsels);
2734 record__waking(rec));
2736 write_finished_init(rec, true);
2738 if (target__none(&rec->opts.target))
2739 record__synthesize_workload(rec, true);
2742 record__stop_threads(rec);
2743 record__mmap_read_all(rec, true);
2745 record__free_thread_data(rec);
2746 evlist__finalize_ctlfd(rec->evlist);
2747 record__aio_mmap_read_sync(rec);
2749 if (rec->session->bytes_transferred && rec->session->bytes_compressed) {
2750 ratio = (float)rec->session->bytes_transferred/(float)rec->session->bytes_compressed;
2758 kill(rec->evlist->workload.pid, SIGTERM);
2771 if (rec->off_cpu)
2772 rec->bytes_written += off_cpu_write(rec->session);
2774 record__read_lost_samples(rec);
2775 record__synthesize(rec, true);
2777 rec->samples = 0;
2780 if (!rec->timestamp_filename) {
2781 record__finish_output(rec);
2783 fd = record__switch_output(rec, true);
2795 const char *postfix = rec->timestamp_filename ?
2798 if (rec->samples && !rec->opts.full_auxtrace)
2800 " (%" PRIu64 " samples)", rec->samples);
2809 rec->session->bytes_transferred / 1024.0 / 1024.0,
2828 evlist__stop_sb_thread(rec->sb_evlist);
2892 struct record *rec = cb;
2896 rec->no_buildid_cache = false;
2898 rec->no_buildid_cache = true;
2900 rec->no_buildid = true;
2902 rec->buildid_mmap = true;
2913 rec->opts.nr_cblocks = strtol(value, NULL, 0);
2914 if (!rec->opts.nr_cblocks)
2915 rec->opts.nr_cblocks = nr_cblocks_default;
2919 rec->debuginfod.urls = strdup(value);
2920 if (!rec->debuginfod.urls)
2922 rec->debuginfod.set = true;
2930 struct record *rec = (struct record *)opt->value;
2932 return evlist__parse_event_enable_time(rec->evlist, &rec->opts, str, unset);
3105 static void switch_output_size_warn(struct record *rec)
3107 u64 wakeup_size = evlist__mmap_size(rec->opts.mmap_pages);
3108 struct switch_output *s = &rec->switch_output;
3122 static int switch_output_setup(struct record *rec)
3124 struct switch_output *s = &rec->switch_output;
3146 if (rec->switch_output_event_set) {
3147 if (record__threads_enabled(rec)) {
3157 if (record__threads_enabled(rec)) {
3187 rec->timestamp_filename = true;
3190 if (s->size && !rec->opts.no_buffering)
3191 switch_output_size_warn(rec);
3233 struct record *rec = container_of(tool, struct record, tool);
3235 set_timestamp_boundary(rec, sample->time);
3559 static void record__free_thread_masks(struct record *rec, int nr_threads)
3563 if (rec->thread_masks)
3565 record__thread_mask_free(&rec->thread_masks[t]);
3567 zfree(&rec->thread_masks);
3570 static int record__alloc_thread_masks(struct record *rec, int nr_threads, int nr_bits)
3574 rec->thread_masks = zalloc(nr_threads * sizeof(*(rec->thread_masks)));
3575 if (!rec->thread_masks) {
3581 ret = record__thread_mask_alloc(&rec->thread_masks[t], nr_bits);
3591 record__free_thread_masks(rec, nr_threads);
3596 static int record__init_thread_cpu_masks(struct record *rec, struct perf_cpu_map *cpus)
3600 ret = record__alloc_thread_masks(rec, nr_cpus, cpu__max_cpu().cpu);
3604 rec->nr_threads = nr_cpus;
3605 pr_debug("nr_threads: %d\n", rec->nr_threads);
3607 for (t = 0; t < rec->nr_threads; t++) {
3608 __set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].maps.bits);
3609 __set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].affinity.bits);
3612 mmap_cpu_mask__scnprintf(&rec->thread_masks[t].maps, "maps");
3614 mmap_cpu_mask__scnprintf(&rec->thread_masks[t].affinity, "affinity");
3621 static int record__init_thread_masks_spec(struct record *rec, struct perf_cpu_map *cpus,
3699 thread_masks = realloc(rec->thread_masks, (t + 1) * sizeof(struct thread_mask));
3705 rec->thread_masks = thread_masks;
3706 rec->thread_masks[t] = thread_mask;
3709 mmap_cpu_mask__scnprintf(&rec->thread_masks[t].maps, "maps");
3711 mmap_cpu_mask__scnprintf(&rec->thread_masks[t].affinity, "affinity");
3720 rec->nr_threads = t;
3721 pr_debug("nr_threads: %d\n", rec->nr_threads);
3722 if (!rec->nr_threads)
3735 static int record__init_thread_core_masks(struct record *rec, struct perf_cpu_map *cpus)
3746 ret = record__init_thread_masks_spec(rec, cpus, topo->core_cpus_list,
3753 static int record__init_thread_package_masks(struct record *rec, struct perf_cpu_map *cpus)
3764 ret = record__init_thread_masks_spec(rec, cpus, topo->package_cpus_list,
3771 static int record__init_thread_numa_masks(struct record *rec, struct perf_cpu_map *cpus)
3793 ret = record__init_thread_masks_spec(rec, cpus, spec, spec, topo->nr);
3803 static int record__init_thread_user_masks(struct record *rec, struct perf_cpu_map *cpus)
3810 for (t = 0, user_spec = (char *)rec->opts.threads_user_spec; ; t++, user_spec = NULL) {
3856 ret = record__init_thread_masks_spec(rec, cpus, (const char **)maps_spec,
3873 static int record__init_thread_default_masks(struct record *rec, struct perf_cpu_map *cpus)
3877 ret = record__alloc_thread_masks(rec, 1, cpu__max_cpu().cpu);
3881 if (record__mmap_cpu_mask_init(&rec->thread_masks->maps, cpus))
3884 rec->nr_threads = 1;
3889 static int record__init_thread_masks(struct record *rec)
3892 struct perf_cpu_map *cpus = rec->evlist->core.all_cpus;
3894 if (!record__threads_enabled(rec))
3895 return record__init_thread_default_masks(rec, cpus);
3897 if (evlist__per_thread(rec->evlist)) {
3902 switch (rec->opts.threads_spec) {
3904 ret = record__init_thread_cpu_masks(rec, cpus);
3907 ret = record__init_thread_core_masks(rec, cpus);
3910 ret = record__init_thread_package_masks(rec, cpus);
3913 ret = record__init_thread_numa_masks(rec, cpus);
3916 ret = record__init_thread_user_masks(rec, cpus);
3928 struct record *rec = &record;
3939 rec->opts.affinity = PERF_AFFINITY_SYS;
3941 rec->evlist = evlist__new();
3942 if (rec->evlist == NULL)
3945 err = perf_config(perf_record_config, rec);
3961 if (!argc && target__none(&rec->opts.target))
3962 rec->opts.target.system_wide = true;
3964 if (nr_cgroups && !rec->opts.target.system_wide) {
3970 if (rec->buildid_mmap) {
3980 rec->opts.build_id = true;
3982 rec->no_buildid = true;
3985 if (rec->opts.record_cgroup && !perf_can_record_cgroup()) {
3991 if (rec->opts.kcore)
3992 rec->opts.text_poke = true;
3994 if (rec->opts.kcore || record__threads_enabled(rec))
3995 rec->data.is_dir = true;
3997 if (record__threads_enabled(rec)) {
3998 if (rec->opts.affinity != PERF_AFFINITY_SYS) {
4002 if (record__aio_enabled(rec)) {
4008 if (rec->opts.comp_level != 0) {
4010 rec->no_buildid = true;
4013 if (rec->opts.record_switch_events &&
4021 if (switch_output_setup(rec)) {
4027 if (rec->switch_output.time) {
4029 alarm(rec->switch_output.time);
4032 if (rec->switch_output.num_files) {
4033 rec->switch_output.filenames = calloc(sizeof(char *),
4034 rec->switch_output.num_files);
4035 if (!rec->switch_output.filenames) {
4041 if (rec->timestamp_filename && record__threads_enabled(rec)) {
4042 rec->timestamp_filename = false;
4054 err = record__auxtrace_init(rec);
4063 if (rec->no_buildid_cache || rec->no_buildid) {
4065 } else if (rec->switch_output.enabled) {
4077 * if ((rec->no_buildid || !rec->no_buildid_set) &&
4078 * (rec->no_buildid_cache || !rec->no_buildid_cache_set))
4083 if (rec->no_buildid_set && !rec->no_buildid)
4085 if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
4088 rec->no_buildid = true;
4089 rec->no_buildid_cache = true;
4097 if (rec->evlist->core.nr_entries == 0) {
4100 err = parse_event(rec->evlist, can_profile_kernel ? "cycles:P" : "cycles:Pu");
4105 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
4106 rec->opts.no_inherit = true;
4108 err = target__validate(&rec->opts.target);
4110 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
4114 err = target__parse_uid(&rec->opts.target);
4118 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
4126 rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
4128 evlist__warn_user_requested_cpus(rec->evlist, rec->opts.target.cpu_list);
4131 arch__add_leaf_frame_record_opts(&rec->opts);
4134 if (evlist__create_maps(rec->evlist, &rec->opts.target) < 0) {
4135 if (rec->opts.target.pid != NULL) {
4144 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
4153 if (rec->opts.full_auxtrace)
4154 rec->buildid_all = true;
4156 if (rec->opts.text_poke) {
4157 err = record__config_text_poke(rec->evlist);
4164 if (rec->off_cpu) {
4165 err = record__config_off_cpu(rec);
4172 if (record_opts__config(&rec->opts)) {
4177 err = record__init_thread_masks(rec);
4183 if (rec->opts.nr_cblocks > nr_cblocks_max)
4184 rec->opts.nr_cblocks = nr_cblocks_max;
4185 pr_debug("nr_cblocks: %d\n", rec->opts.nr_cblocks);
4187 pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]);
4188 pr_debug("mmap flush: %d\n", rec->opts.mmap_flush);
4190 if (rec->opts.comp_level > comp_level_max)
4191 rec->opts.comp_level = comp_level_max;
4192 pr_debug("comp level: %d\n", rec->opts.comp_level);
4196 evlist__delete(rec->evlist);
4198 auxtrace_record__free(rec->itr);
4200 record__free_thread_masks(rec, rec->nr_threads);
4201 rec->nr_threads = 0;
4202 evlist__close_control(rec->opts.ctl_fd, rec->opts.ctl_fd_ack, &rec->opts.ctl_fd_close);
4208 struct record *rec = &record;
4210 hit_auxtrace_snapshot_trigger(rec);
4212 if (switch_output_signal(rec))
4218 struct record *rec = &record;
4220 if (switch_output_time(rec))