Lines Matching defs:thread
1500 void evsel__compute_deltas(struct evsel *evsel, int cpu_map_idx, int thread,
1508 tmp = *perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread);
1509 *perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread) = *count;
1516 static int evsel__read_one(struct evsel *evsel, int cpu_map_idx, int thread)
1518 struct perf_counts_values *count = perf_counts(evsel->counts, cpu_map_idx, thread);
1520 return perf_evsel__read(&evsel->core, cpu_map_idx, thread, count);
1523 static void evsel__set_count(struct evsel *counter, int cpu_map_idx, int thread,
1528 count = perf_counts(counter->counts, cpu_map_idx, thread);
1535 perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, true);
1538 static int evsel__process_group_data(struct evsel *leader, int cpu_map_idx, int thread, u64 *data)
1566 evsel__set_count(counter, cpu_map_idx, thread, v->value, ena, run, lost);
1572 static int evsel__read_group(struct evsel *leader, int cpu_map_idx, int thread)
1593 if (FD(leader, cpu_map_idx, thread) < 0)
1596 if (readn(FD(leader, cpu_map_idx, thread), data, size) <= 0)
1599 return evsel__process_group_data(leader, cpu_map_idx, thread, data);
1602 int evsel__read_counter(struct evsel *evsel, int cpu_map_idx, int thread)
1607 return evsel__read_group(evsel, cpu_map_idx, thread);
1609 return evsel__read_one(evsel, cpu_map_idx, thread);
1612 int __evsel__read_on_cpu(struct evsel *evsel, int cpu_map_idx, int thread, bool scale)
1617 if (FD(evsel, cpu_map_idx, thread) < 0)
1623 if (readn(FD(evsel, cpu_map_idx, thread), &count, nv * sizeof(u64)) <= 0)
1626 evsel__compute_deltas(evsel, cpu_map_idx, thread, &count);
1628 *perf_counts(evsel->counts, cpu_map_idx, thread) = count;
1653 static int get_group_fd(struct evsel *evsel, int cpu_map_idx, int thread)
1671 fd = FD(leader, cpu_map_idx, thread);
1684 for (int thread = thread_idx; thread < nr_threads - 1; thread++)
1685 FD(pos, cpu, thread) = FD(pos, cpu, thread + 1);
1715 int thread, int err)
1717 pid_t ignore_pid = perf_thread_map__pid(threads, thread);
1730 /* If there's only one thread, let it fail. */
1738 if (update_fds(evsel, nr_cpus, cpu_map_idx, threads->nr, thread))
1741 if (thread_map__remove(threads, thread))
2023 int idx, thread, nthreads;
2050 for (thread = 0; thread < nthreads; thread++) {
2053 if (thread >= nthreads)
2057 pid = perf_thread_map__pid(threads, thread);
2059 group_fd = get_group_fd(evsel, idx, thread);
2077 FD(evsel, idx, thread) = fd;
2134 idx, threads, thread, err)) {
2135 /* We just removed 1 thread, so lower the upper nthreads limit. */
2149 if (err != -EINVAL || idx > 0 || thread > 0)
2156 threads->err_thread = thread;
2160 while (--thread >= 0) {
2161 if (FD(evsel, idx, thread) >= 0)
2162 close(FD(evsel, idx, thread));
2163 FD(evsel, idx, thread) = -1;
2165 thread = nthreads;
3038 "Invalid event (%s) in per-thread mode, enable system wide with '-a'.",
3068 int cpu_map_idx, thread;
3071 for (thread = 0; thread < xyarray__max_y(evsel->core.fd);
3072 thread++) {
3073 int fd = FD(evsel, cpu_map_idx, thread);
3076 cpu_map_idx, thread, fd) < 0)