Lines Matching defs:sample

748 				struct perf_sample *sample)
763 __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
775 struct perf_sample *sample __maybe_unused)
794 thread__set_namespaces(thread, sample->time, &event->namespaces)) {
806 struct perf_sample *sample __maybe_unused)
821 union perf_event *event, struct perf_sample *sample __maybe_unused)
829 union perf_event *event, struct perf_sample *sample)
832 sample->id, event->lost_samples.lost);
895 struct perf_sample *sample __maybe_unused)
963 struct perf_sample *sample __maybe_unused)
987 struct perf_sample *sample)
994 sample);
995 return machine__process_ksymbol_register(machine, event, sample);
999 struct perf_sample *sample __maybe_unused)
1928 struct perf_sample *sample)
1949 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1950 sample->cpumode == PERF_RECORD_MISC_KERNEL) {
1996 struct perf_sample *sample)
2006 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
2007 sample->cpumode == PERF_RECORD_MISC_KERNEL) {
2086 struct perf_sample *sample)
2141 thread__fork(thread, parent, sample->time, do_maps_clone) < 0) {
2152 struct perf_sample *sample __maybe_unused)
2168 struct perf_sample *sample)
2174 ret = machine__process_comm_event(machine, event, sample); break;
2176 ret = machine__process_mmap_event(machine, event, sample); break;
2178 ret = machine__process_namespaces_event(machine, event, sample); break;
2180 ret = machine__process_cgroup_event(machine, event, sample); break;
2182 ret = machine__process_mmap2_event(machine, event, sample); break;
2184 ret = machine__process_fork_event(machine, event, sample); break;
2186 ret = machine__process_exit_event(machine, event, sample); break;
2188 ret = machine__process_lost_event(machine, event, sample); break;
2194 ret = machine__process_lost_samples_event(machine, event, sample); break;
2199 ret = machine__process_ksymbol(machine, event, sample); break;
2201 ret = machine__process_bpf(machine, event, sample); break;
2203 ret = machine__process_text_poke(machine, event, sample); break;
2269 struct mem_info *sample__resolve_mem(struct perf_sample *sample,
2277 ip__resolve_ams(al->thread, &mi->iaddr, sample->ip);
2279 sample->addr, sample->phys_addr,
2280 sample->data_page_size);
2281 mi->data_src.val = sample->data_src;
2400 struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
2404 const struct branch_stack *bs = sample->branch_stack;
2405 struct branch_entry *entries = perf_sample__branch_entries(sample);
2485 struct perf_sample *sample,
2491 struct ip_callchain *chain = sample->callchain;
2544 struct perf_sample *sample,
2550 struct branch_stack *lbr_stack = sample->branch_stack;
2551 struct branch_entry *entries = perf_sample__branch_entries(sample);
2589 * It's impossible to stitch the whole LBRs of previous sample.
2702 /* Find the physical index of the base-of-stack for current sample. */
2707 /* Previous sample has shorter stack. Nothing can be stitched. */
2717 * Starts from the base-of-stack of current sample.
2731 * Save the LBRs between the base-of-stack of previous sample
2732 * and the base-of-stack of current sample into lbr_stitch->lists.
2785 * Resolve LBR callstack chain sample
2793 struct perf_sample *sample,
2800 struct ip_callchain *chain = sample->callchain;
2816 if (thread__lbr_stitch_enable(thread) && !sample->no_hw_idx &&
2820 stitched_lbr = has_stitched_lbr(thread, sample,
2828 memcpy(&lbr_stitch->prev_sample, sample, sizeof(*sample));
2833 err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
2839 err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
2856 err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
2862 err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
2895 static u64 get_leaf_frame_caller(struct perf_sample *sample,
2899 return get_leaf_frame_caller_aarch64(sample, thread, usr_idx);
2907 struct perf_sample *sample,
2912 struct branch_stack *branch = sample->branch_stack;
2913 struct branch_entry *entries = perf_sample__branch_entries(sample);
2914 struct ip_callchain *chain = sample->callchain;
2928 err = resolve_lbr_callchain_sample(thread, cursor, sample, parent,
2943 * more context for a sample than just the callers.
3056 leaf_frame_caller = get_leaf_frame_caller(sample, thread, usr_idx);
3155 struct perf_sample *sample,
3164 if ((!sample->user_regs.regs) ||
3165 (!sample->user_stack.size))
3169 thread, sample, max_stack, false);
3175 struct perf_sample *sample,
3189 evsel, sample,
3195 evsel, sample,
3199 evsel, sample,
3204 evsel, sample,