Lines Matching refs:event

43 						  union perf_event *event, u64 file_offset,
75 src = (void *)event + sizeof(struct perf_record_compressed);
76 src_size = event->pack.header.size - sizeof(struct perf_record_compressed);
104 union perf_event *event,
183 struct ordered_event *event)
188 return perf_session__deliver_event(session, event->event,
189 session->tool, event->file_offset,
190 event->file_path);
257 * kernel MMAP event, in perf_event__process_mmap().
319 union perf_event *event
327 union perf_event *event __maybe_unused,
336 union perf_event *event __maybe_unused,
341 perf_event__fprintf_event_update(event, stdout);
348 union perf_event *event __maybe_unused,
358 union perf_event *event __maybe_unused,
367 union perf_event *event __maybe_unused,
390 union perf_event *event)
394 skipn(perf_data__fd(session->data), event->auxtrace.size);
395 return event->auxtrace.size;
399 union perf_event *event __maybe_unused)
408 union perf_event *event __maybe_unused)
411 perf_event__fprintf_thread_map(event, stdout);
419 union perf_event *event __maybe_unused)
422 perf_event__fprintf_cpu_map(event, stdout);
430 union perf_event *event __maybe_unused)
433 perf_event__fprintf_stat_config(event, stdout);
440 union perf_event *event)
443 perf_event__fprintf_stat(event, stdout);
450 union perf_event *event)
453 perf_event__fprintf_stat_round(event, stdout);
460 union perf_event *event)
463 perf_event__fprintf_time_conv(event, stdout);
470 union perf_event *event __maybe_unused,
562 static void swap_sample_id_all(union perf_event *event, void *data)
564 void *end = (void *) event + event->header.size;
571 static void perf_event__all64_swap(union perf_event *event,
574 struct perf_event_header *hdr = &event->header;
575 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
578 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
580 event->comm.pid = bswap_32(event->comm.pid);
581 event->comm.tid = bswap_32(event->comm.tid);
584 void *data = &event->comm.comm;
587 swap_sample_id_all(event, data);
591 static void perf_event__mmap_swap(union perf_event *event,
594 event->mmap.pid = bswap_32(event->mmap.pid);
595 event->mmap.tid = bswap_32(event->mmap.tid);
596 event->mmap.start = bswap_64(event->mmap.start);
597 event->mmap.len = bswap_64(event->mmap.len);
598 event->mmap.pgoff = bswap_64(event->mmap.pgoff);
601 void *data = &event->mmap.filename;
604 swap_sample_id_all(event, data);
608 static void perf_event__mmap2_swap(union perf_event *event,
611 event->mmap2.pid = bswap_32(event->mmap2.pid);
612 event->mmap2.tid = bswap_32(event->mmap2.tid);
613 event->mmap2.start = bswap_64(event->mmap2.start);
614 event->mmap2.len = bswap_64(event->mmap2.len);
615 event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
617 if (!(event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID)) {
618 event->mmap2.maj = bswap_32(event->mmap2.maj);
619 event->mmap2.min = bswap_32(event->mmap2.min);
620 event->mmap2.ino = bswap_64(event->mmap2.ino);
621 event->mmap2.ino_generation = bswap_64(event->mmap2.ino_generation);
625 void *data = &event->mmap2.filename;
628 swap_sample_id_all(event, data);
631 static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
633 event->fork.pid = bswap_32(event->fork.pid);
634 event->fork.tid = bswap_32(event->fork.tid);
635 event->fork.ppid = bswap_32(event->fork.ppid);
636 event->fork.ptid = bswap_32(event->fork.ptid);
637 event->fork.time = bswap_64(event->fork.time);
640 swap_sample_id_all(event, &event->fork + 1);
643 static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
645 event->read.pid = bswap_32(event->read.pid);
646 event->read.tid = bswap_32(event->read.tid);
647 event->read.value = bswap_64(event->read.value);
648 event->read.time_enabled = bswap_64(event->read.time_enabled);
649 event->read.time_running = bswap_64(event->read.time_running);
650 event->read.id = bswap_64(event->read.id);
653 swap_sample_id_all(event, &event->read + 1);
656 static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
658 event->aux.aux_offset = bswap_64(event->aux.aux_offset);
659 event->aux.aux_size = bswap_64(event->aux.aux_size);
660 event->aux.flags = bswap_64(event->aux.flags);
663 swap_sample_id_all(event, &event->aux + 1);
666 static void perf_event__itrace_start_swap(union perf_event *event,
669 event->itrace_start.pid = bswap_32(event->itrace_start.pid);
670 event->itrace_start.tid = bswap_32(event->itrace_start.tid);
673 swap_sample_id_all(event, &event->itrace_start + 1);
676 static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
678 if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
679 event->context_switch.next_prev_pid =
680 bswap_32(event->context_switch.next_prev_pid);
681 event->context_switch.next_prev_tid =
682 bswap_32(event->context_switch.next_prev_tid);
686 swap_sample_id_all(event, &event->context_switch + 1);
689 static void perf_event__text_poke_swap(union perf_event *event, bool sample_id_all)
691 event->text_poke.addr = bswap_64(event->text_poke.addr);
692 event->text_poke.old_len = bswap_16(event->text_poke.old_len);
693 event->text_poke.new_len = bswap_16(event->text_poke.new_len);
696 size_t len = sizeof(event->text_poke.old_len) +
697 sizeof(event->text_poke.new_len) +
698 event->text_poke.old_len +
699 event->text_poke.new_len;
700 void *data = &event->text_poke.old_len;
703 swap_sample_id_all(event, data);
707 static void perf_event__throttle_swap(union perf_event *event,
710 event->throttle.time = bswap_64(event->throttle.time);
711 event->throttle.id = bswap_64(event->throttle.id);
712 event->throttle.stream_id = bswap_64(event->throttle.stream_id);
715 swap_sample_id_all(event, &event->throttle + 1);
718 static void perf_event__namespaces_swap(union perf_event *event,
723 event->namespaces.pid = bswap_32(event->namespaces.pid);
724 event->namespaces.tid = bswap_32(event->namespaces.tid);
725 event->namespaces.nr_namespaces = bswap_64(event->namespaces.nr_namespaces);
727 for (i = 0; i < event->namespaces.nr_namespaces; i++) {
728 struct perf_ns_link_info *ns = &event->namespaces.link_info[i];
735 swap_sample_id_all(event, &event->namespaces.link_info[i]);
738 static void perf_event__cgroup_swap(union perf_event *event, bool sample_id_all)
740 event->cgroup.id = bswap_64(event->cgroup.id);
743 void *data = &event->cgroup.path;
746 swap_sample_id_all(event, data);
828 static void perf_event__hdr_attr_swap(union perf_event *event,
833 perf_event__attr_swap(&event->attr.attr);
835 size = event->header.size;
836 size -= perf_record_header_attr_id(event) - (void *)event;
837 mem_bswap_64(perf_record_header_attr_id(event), size);
840 static void perf_event__event_update_swap(union perf_event *event,
843 event->event_update.type = bswap_64(event->event_update.type);
844 event->event_update.id = bswap_64(event->event_update.id);
847 static void perf_event__event_type_swap(union perf_event *event,
850 event->event_type.event_type.event_id =
851 bswap_64(event->event_type.event_type.event_id);
854 static void perf_event__tracing_data_swap(union perf_event *event,
857 event->tracing_data.size = bswap_32(event->tracing_data.size);
860 static void perf_event__auxtrace_info_swap(union perf_event *event,
865 event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
867 size = event->header.size;
868 size -= (void *)&event->auxtrace_info.priv - (void *)event;
869 mem_bswap_64(event->auxtrace_info.priv, size);
872 static void perf_event__auxtrace_swap(union perf_event *event,
875 event->auxtrace.size = bswap_64(event->auxtrace.size);
876 event->auxtrace.offset = bswap_64(event->auxtrace.offset);
877 event->auxtrace.reference = bswap_64(event->auxtrace.reference);
878 event->auxtrace.idx = bswap_32(event->auxtrace.idx);
879 event->auxtrace.tid = bswap_32(event->auxtrace.tid);
880 event->auxtrace.cpu = bswap_32(event->auxtrace.cpu);
883 static void perf_event__auxtrace_error_swap(union perf_event *event,
886 event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
887 event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
888 event->auxtrace_error.cpu = bswap_32(event->auxtrace_error.cpu);
889 event->auxtrace_error.pid = bswap_32(event->auxtrace_error.pid);
890 event->auxtrace_error.tid = bswap_32(event->auxtrace_error.tid);
891 event->auxtrace_error.fmt = bswap_32(event->auxtrace_error.fmt);
892 event->auxtrace_error.ip = bswap_64(event->auxtrace_error.ip);
893 if (event->auxtrace_error.fmt)
894 event->auxtrace_error.time = bswap_64(event->auxtrace_error.time);
895 if (event->auxtrace_error.fmt >= 2) {
896 event->auxtrace_error.machine_pid = bswap_32(event->auxtrace_error.machine_pid);
897 event->auxtrace_error.vcpu = bswap_32(event->auxtrace_error.vcpu);
901 static void perf_event__thread_map_swap(union perf_event *event,
906 event->thread_map.nr = bswap_64(event->thread_map.nr);
908 for (i = 0; i < event->thread_map.nr; i++)
909 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
912 static void perf_event__cpu_map_swap(union perf_event *event,
915 struct perf_record_cpu_map_data *data = &event->cpu_map.data;
953 static void perf_event__stat_config_swap(union perf_event *event,
958 size = bswap_64(event->stat_config.nr) * sizeof(event->stat_config.data[0]);
960 mem_bswap_64(&event->stat_config.nr, size);
963 static void perf_event__stat_swap(union perf_event *event,
966 event->stat.id = bswap_64(event->stat.id);
967 event->stat.thread = bswap_32(event->stat.thread);
968 event->stat.cpu = bswap_32(event->stat.cpu);
969 event->stat.val = bswap_64(event->stat.val);
970 event->stat.ena = bswap_64(event->stat.ena);
971 event->stat.run = bswap_64(event->stat.run);
974 static void perf_event__stat_round_swap(union perf_event *event,
977 event->stat_round.type = bswap_64(event->stat_round.type);
978 event->stat_round.time = bswap_64(event->stat_round.time);
981 static void perf_event__time_conv_swap(union perf_event *event,
984 event->time_conv.time_shift = bswap_64(event->time_conv.time_shift);
985 event->time_conv.time_mult = bswap_64(event->time_conv.time_mult);
986 event->time_conv.time_zero = bswap_64(event->time_conv.time_zero);
988 if (event_contains(event->time_conv, time_cycles)) {
989 event->time_conv.time_cycles = bswap_64(event->time_conv.time_cycles);
990 event->time_conv.time_mask = bswap_64(event->time_conv.time_mask);
994 typedef void (*perf_event__swap_op)(union perf_event *event,
1037 * event.
1075 union perf_event *event __maybe_unused,
1083 int perf_session__queue_event(struct perf_session *s, union perf_event *event,
1086 return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset, file_path);
1259 static void evlist__print_tstamp(struct evlist *evlist, union perf_event *event, struct perf_sample *sample)
1263 if (event->header.type != PERF_RECORD_SAMPLE &&
1310 static void dump_event(struct evlist *evlist, union perf_event *event,
1317 printf("\n%#" PRIx64 "@%s [%#x]: event: %d\n",
1318 file_offset, file_path, event->header.size, event->header.type);
1320 trace_event(event);
1321 if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw)
1322 evlist->trace_event_sample_raw(evlist, event, sample);
1325 evlist__print_tstamp(evlist, event, sample);
1328 event->header.size, perf_event__name(event->header.type));
1339 static void dump_sample(struct evsel *evsel, union perf_event *event,
1349 event->header.misc, sample->pid, sample->tid, sample->ip,
1397 static void dump_read(struct evsel *evsel, union perf_event *event)
1399 struct perf_record_read *read_event = &event->read;
1405 printf(": %d %d %s %" PRI_lu64 "\n", event->read.pid, event->read.tid,
1406 evsel__name(evsel), event->read.value);
1427 union perf_event *event,
1437 else if (event->header.type == PERF_RECORD_MMAP
1438 || event->header.type == PERF_RECORD_MMAP2)
1439 pid = event->mmap.pid;
1458 union perf_event *event,
1485 return tool->sample(tool, event, sample, evsel, machine);
1490 union perf_event *event,
1499 ret = deliver_sample_value(evlist, tool, event, sample, v,
1509 union perf_event *event, struct perf_sample *sample,
1518 return tool->sample(tool, event, sample, evsel, machine);
1522 return deliver_sample_group(evlist, tool, event, sample,
1525 return deliver_sample_value(evlist, tool, event, sample,
1531 union perf_event *event,
1539 dump_event(evlist, event, file_offset, sample, file_path);
1543 machine = machines__find_for_cpumode(machines, event, sample);
1545 switch (event->header.type) {
1553 dump_sample(evsel, event, sample, perf_env__arch(NULL));
1556 dump_sample(evsel, event, sample, perf_env__arch(machine->env));
1557 return evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
1559 return tool->mmap(tool, event, sample, machine);
1561 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1563 return tool->mmap2(tool, event, sample, machine);
1565 return tool->comm(tool, event, sample, machine);
1567 return tool->namespaces(tool, event, sample, machine);
1569 return tool->cgroup(tool, event, sample, machine);
1571 return tool->fork(tool, event, sample, machine);
1573 return tool->exit(tool, event, sample, machine);
1576 evlist->stats.total_lost += event->lost.lost;
1577 return tool->lost(tool, event, sample, machine);
1580 !(event->header.misc & PERF_RECORD_MISC_LOST_SAMPLES_BPF))
1581 evlist->stats.total_lost_samples += event->lost_samples.lost;
1582 return tool->lost_samples(tool, event, sample, machine);
1584 dump_read(evsel, event);
1585 return tool->read(tool, event, sample, evsel, machine);
1587 return tool->throttle(tool, event, sample, machine);
1589 return tool->unthrottle(tool, event, sample, machine);
1592 if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
1594 if (event->aux.flags & PERF_AUX_FLAG_PARTIAL)
1596 if (event->aux.flags & PERF_AUX_FLAG_COLLISION)
1599 return tool->aux(tool, event, sample, machine);
1601 return tool->itrace_start(tool, event, sample, machine);
1604 return tool->context_switch(tool, event, sample, machine);
1606 return tool->ksymbol(tool, event, sample, machine);
1608 return tool->bpf(tool, event, sample, machine);
1610 return tool->text_poke(tool, event, sample, machine);
1612 return tool->aux_output_hw_id(tool, event, sample, machine);
1620 union perf_event *event,
1626 int ret = evlist__parse_sample(session->evlist, event, &sample);
1633 ret = auxtrace__process_event(session, event, &sample, tool);
1640 event, &sample, tool, file_offset, file_path);
1649 union perf_event *event,
1659 if (event->header.type != PERF_RECORD_COMPRESSED ||
1661 dump_event(session->evlist, event, file_offset, &sample, file_path);
1664 switch (event->header.type) {
1666 err = tool->attr(tool, event, &session->evlist);
1673 return tool->event_update(tool, event, &session->evlist);
1688 return tool->tracing_data(session, event);
1690 return tool->build_id(session, event);
1692 return tool->finished_round(tool, event, oe);
1694 return tool->id_index(session, event);
1696 return tool->auxtrace_info(session, event);
1704 lseek(fd, file_offset + event->header.size, SEEK_SET);
1705 return tool->auxtrace(session, event);
1707 perf_session__auxtrace_error_inc(session, event);
1708 return tool->auxtrace_error(session, event);
1710 return tool->thread_map(session, event);
1712 return tool->cpu_map(session, event);
1714 return tool->stat_config(session, event);
1716 return tool->stat(session, event);
1718 return tool->stat_round(session, event);
1720 session->time_conv = event->time_conv;
1721 return tool->time_conv(session, event);
1723 return tool->feature(session, event);
1725 err = tool->compressed(session, event, file_offset, file_path);
1727 dump_event(session->evlist, event, file_offset, &sample, file_path);
1730 return tool->finished_init(session, event);
1737 union perf_event *event,
1743 events_stats__inc(&evlist->stats, event->header.type);
1745 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1746 return perf_session__process_user_event(session, event, 0, NULL);
1748 return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0, NULL);
1751 static void event_swap(union perf_event *event, bool sample_id_all)
1755 swap = perf_event__swap_ops[event->header.type];
1757 swap(event, sample_id_all);
1765 union perf_event *event;
1770 event = file_offset - session->one_mmap_offset +
1788 event = (union perf_event *)buf;
1791 perf_event_header__bswap(&event->header);
1793 if (event->header.size < hdr_sz || event->header.size > buf_sz)
1797 rest = event->header.size - hdr_sz;
1803 event_swap(event, evlist__sample_id_all(session->evlist));
1807 if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1808 evlist__parse_sample(session->evlist, event, sample))
1811 *event_ptr = event;
1821 union perf_event *event;
1826 PERF_SAMPLE_MAX_SIZE, &event,
1831 err = cb(session, event, offset, data);
1835 offset += event->header.size;
1836 if (event->header.type == PERF_RECORD_AUXTRACE)
1837 offset += event->auxtrace.size;
1845 union perf_event *event, u64 file_offset,
1853 event_swap(event, evlist__sample_id_all(evlist));
1855 if (event->header.type >= PERF_RECORD_HEADER_MAX)
1858 events_stats__inc(&evlist->stats, event->header.type);
1860 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1861 return perf_session__process_user_event(session, event, file_offset, file_path);
1866 ret = evlist__parse_sample_timestamp(evlist, event, &timestamp);
1870 ret = perf_session__queue_event(session, event, timestamp, file_offset, file_path);
1875 return perf_session__deliver_event(session, event, tool, file_offset, file_path);
2037 union perf_event *event;
2055 event = buf;
2056 err = perf_data__read(session->data, event,
2062 pr_err("failed to read event header\n");
2067 perf_event_header__bswap(&event->header);
2069 size = event->header.size;
2071 pr_err("bad event header size\n");
2078 pr_err("failed to allocate memory to read event\n");
2083 event = buf;
2085 p = event;
2093 pr_err("unexpected end of event stream\n");
2097 pr_err("failed to read event data\n");
2102 if ((skip = perf_session__process_event(session, event, head, "pipe")) < 0) {
2104 head, event->header.size, event->header.type);
2142 union perf_event *event;
2147 * the size of the event in the headers.
2149 if (head + sizeof(event->header) > mmap_size)
2152 event = (union perf_event *)(buf + head);
2154 perf_event_header__bswap(&event->header);
2156 event_size = event->header.size;
2158 return event;
2160 /* We're not fetching the event so swap back again */
2162 perf_event_header__bswap(&event->header);
2164 /* Check if the event fits into the next mmapped buf. */
2171 pr_debug("%s: head=%#" PRIx64 " event->header.size=%#x, mmap_size=%#zx:"
2199 union perf_event *event = fetch_decomp_event(decomp->head, decomp->size, decomp->data,
2202 if (!event)
2205 size = event->header.size;
2208 (skip = perf_session__process_event(session, event, decomp->file_pos,
2211 decomp->file_pos + decomp->head, event->header.size, event->header.type);
2239 union perf_event *event,
2349 union perf_event *event;
2352 event = fetch_mmaped_event(rd->head, rd->mmap_size, rd->mmap_cur,
2354 if (IS_ERR(event))
2355 return PTR_ERR(event);
2357 if (!event)
2360 size = event->header.size;
2365 (skip = rd->process(session, event, rd->file_pos, rd->path)) < 0) {
2367 rd->file_offset + rd->head, event->header.size,
2368 event->header.type, strerror(-skip));
2432 union perf_event *event,
2436 return perf_session__process_event(session, event, file_offset, file_path);
2811 union perf_event *event)
2814 struct perf_record_id_index *ie = &event->id_index;