Lines Matching defs:sample

286 	struct cpu_sample *sample;
295 sample = zalloc(sizeof(*sample));
296 assert(sample != NULL);
297 sample->start_time = start;
298 sample->end_time = end;
299 sample->type = type;
300 sample->next = c->samples;
301 sample->cpu = cpu;
302 sample->backtrace = backtrace;
303 c->samples = sample;
305 if (sample->type == TYPE_RUNNING && end > start && start > 0) {
325 struct perf_sample *sample __maybe_unused,
335 struct perf_sample *sample __maybe_unused,
345 struct perf_sample *sample __maybe_unused,
493 struct perf_sample *sample,
501 struct ip_callchain *chain = sample->callchain;
513 if (machine__resolve(machine, &al, sample) < 0) {
571 struct perf_sample *sample,
576 struct perf_sample *sample,
583 if (!tchart->first_time || tchart->first_time > sample->time)
584 tchart->first_time = sample->time;
585 if (tchart->last_time < sample->time)
586 tchart->last_time = sample->time;
591 return f(tchart, evsel, sample,
592 cat_backtrace(event, sample, machine));
601 struct perf_sample *sample,
604 u32 state = evsel__intval(evsel, sample, "state");
605 u32 cpu_id = evsel__intval(evsel, sample, "cpu_id");
608 c_state_end(tchart, cpu_id, sample->time);
610 c_state_start(cpu_id, sample->time, state);
617 struct perf_sample *sample,
620 u32 state = evsel__intval(evsel, sample, "state");
621 u32 cpu_id = evsel__intval(evsel, sample, "cpu_id");
623 p_state_change(tchart, cpu_id, sample->time, state);
630 struct perf_sample *sample,
633 u8 flags = evsel__intval(evsel, sample, "common_flags");
634 int waker = evsel__intval(evsel, sample, "common_pid");
635 int wakee = evsel__intval(evsel, sample, "pid");
637 sched_wakeup(tchart, sample->cpu, sample->time, waker, wakee, flags, backtrace);
644 struct perf_sample *sample,
647 int prev_pid = evsel__intval(evsel, sample, "prev_pid");
648 int next_pid = evsel__intval(evsel, sample, "next_pid");
649 u64 prev_state = evsel__intval(evsel, sample, "prev_state");
651 sched_switch(tchart, sample->cpu, sample->time, prev_pid, next_pid,
660 struct perf_sample *sample,
663 u64 cpu_id = evsel__intval(evsel, sample, "cpu_id");
664 u64 value = evsel__intval(evsel, sample, "value");
666 c_state_start(cpu_id, sample->time, value);
673 struct perf_sample *sample,
676 c_state_end(tchart, sample->cpu, sample->time);
683 struct perf_sample *sample,
686 u64 cpu_id = evsel__intval(evsel, sample, "cpu_id");
687 u64 value = evsel__intval(evsel, sample, "value");
689 p_state_change(tchart, cpu_id, sample->time, value);
695 * After the last sample we need to wrap up the current C/P state
735 struct io_sample *sample;
757 sample = zalloc(sizeof(*sample));
758 if (!sample)
760 sample->start_time = start;
761 sample->type = type;
762 sample->fd = fd;
763 sample->next = c->io_samples;
764 c->io_samples = sample;
777 struct io_sample *sample, *prev;
784 sample = c->io_samples;
786 if (!sample) /* skip partially captured events */
789 if (sample->end_time) {
795 if (sample->type != type) {
800 sample->end_time = end;
801 prev = sample->next;
805 if (sample->end_time - sample->start_time < tchart->min_time)
806 sample->end_time = sample->start_time + tchart->min_time;
807 if (prev && sample->start_time < prev->end_time) {
809 sample->start_time = prev->end_time;
811 prev->end_time = sample->start_time;
815 sample->err = ret;
824 sample->bytes = ret;
829 prev->type == sample->type &&
830 prev->err == sample->err &&
831 prev->fd == sample->fd &&
832 prev->end_time + tchart->merge_dist >= sample->start_time) {
834 sample->bytes += prev->bytes;
835 sample->merges += prev->merges + 1;
837 sample->start_time = prev->start_time;
838 sample->next = prev->next;
841 if (!sample->err && sample->bytes > c->max_bytes)
842 c->max_bytes = sample->bytes;
853 struct perf_sample *sample)
855 long fd = evsel__intval(evsel, sample, "fd");
856 return pid_begin_io_sample(tchart, sample->tid, IOTYPE_READ,
857 sample->time, fd);
863 struct perf_sample *sample)
865 long ret = evsel__intval(evsel, sample, "ret");
866 return pid_end_io_sample(tchart, sample->tid, IOTYPE_READ,
867 sample->time, ret);
873 struct perf_sample *sample)
875 long fd = evsel__intval(evsel, sample, "fd");
876 return pid_begin_io_sample(tchart, sample->tid, IOTYPE_WRITE,
877 sample->time, fd);
883 struct perf_sample *sample)
885 long ret = evsel__intval(evsel, sample, "ret");
886 return pid_end_io_sample(tchart, sample->tid, IOTYPE_WRITE,
887 sample->time, ret);
893 struct perf_sample *sample)
895 long fd = evsel__intval(evsel, sample, "fd");
896 return pid_begin_io_sample(tchart, sample->tid, IOTYPE_SYNC,
897 sample->time, fd);
903 struct perf_sample *sample)
905 long ret = evsel__intval(evsel, sample, "ret");
906 return pid_end_io_sample(tchart, sample->tid, IOTYPE_SYNC,
907 sample->time, ret);
913 struct perf_sample *sample)
915 long fd = evsel__intval(evsel, sample, "fd");
916 return pid_begin_io_sample(tchart, sample->tid, IOTYPE_TX,
917 sample->time, fd);
923 struct perf_sample *sample)
925 long ret = evsel__intval(evsel, sample, "ret");
926 return pid_end_io_sample(tchart, sample->tid, IOTYPE_TX,
927 sample->time, ret);
933 struct perf_sample *sample)
935 long fd = evsel__intval(evsel, sample, "fd");
936 return pid_begin_io_sample(tchart, sample->tid, IOTYPE_RX,
937 sample->time, fd);
943 struct perf_sample *sample)
945 long ret = evsel__intval(evsel, sample, "ret");
946 return pid_end_io_sample(tchart, sample->tid, IOTYPE_RX,
947 sample->time, ret);
953 struct perf_sample *sample)
955 long fd = evsel__intval(evsel, sample, "fd");
956 return pid_begin_io_sample(tchart, sample->tid, IOTYPE_POLL,
957 sample->time, fd);
963 struct perf_sample *sample)
965 long ret = evsel__intval(evsel, sample, "ret");
966 return pid_end_io_sample(tchart, sample->tid, IOTYPE_POLL,
967 sample->time, ret);
1116 struct cpu_sample *sample;
1121 sample = c->samples;
1122 while (sample) {
1123 if (sample->type == TYPE_RUNNING) {
1124 svg_process(sample->cpu,
1125 sample->start_time,
1126 sample->end_time,
1129 sample->backtrace);
1132 sample = sample->next;
1147 struct io_sample *sample;
1161 sample = c->io_samples;
1162 for (sample = c->io_samples; sample; sample = sample->next) {
1163 double h = (double)sample->bytes / c->max_bytes;
1166 sample->err == -EAGAIN)
1169 if (sample->err)
1172 if (sample->type == IOTYPE_SYNC)
1174 sample->start_time,
1175 sample->end_time,
1177 sample->err ? "error" : "sync",
1178 sample->fd,
1179 sample->err,
1180 sample->merges);
1181 else if (sample->type == IOTYPE_POLL)
1183 sample->start_time,
1184 sample->end_time,
1186 sample->err ? "error" : "poll",
1187 sample->fd,
1188 sample->err,
1189 sample->merges);
1190 else if (sample->type == IOTYPE_READ)
1192 sample->start_time,
1193 sample->end_time,
1195 sample->err ? "error" : "disk",
1196 sample->fd,
1197 sample->err,
1198 sample->merges);
1199 else if (sample->type == IOTYPE_WRITE)
1201 sample->start_time,
1202 sample->end_time,
1204 sample->err ? "error" : "disk",
1205 sample->fd,
1206 sample->err,
1207 sample->merges);
1208 else if (sample->type == IOTYPE_RX)
1210 sample->start_time,
1211 sample->end_time,
1213 sample->err ? "error" : "net",
1214 sample->fd,
1215 sample->err,
1216 sample->merges);
1217 else if (sample->type == IOTYPE_TX)
1219 sample->start_time,
1220 sample->end_time,
1222 sample->err ? "error" : "net",
1223 sample->fd,
1224 sample->err,
1225 sample->merges);
1259 struct cpu_sample *sample;
1275 sample = c->samples;
1276 while (sample) {
1277 if (sample->type == TYPE_RUNNING)
1278 svg_running(Y, sample->cpu,
1279 sample->start_time,
1280 sample->end_time,
1281 sample->backtrace);
1282 if (sample->type == TYPE_BLOCKED)
1283 svg_blocked(Y, sample->cpu,
1284 sample->start_time,
1285 sample->end_time,
1286 sample->backtrace);
1287 if (sample->type == TYPE_WAITING)
1288 svg_waiting(Y, sample->cpu,
1289 sample->start_time,
1290 sample->end_time,
1291 sample->backtrace);
1292 sample = sample->next;
1931 .sample = process_sample_event,