Lines Matching refs:env
4 #include "env.h"
23 void perf_env__insert_bpf_prog_info(struct perf_env *env,
26 down_write(&env->bpf_progs.lock);
27 __perf_env__insert_bpf_prog_info(env, info_node);
28 up_write(&env->bpf_progs.lock);
31 void __perf_env__insert_bpf_prog_info(struct perf_env *env, struct bpf_prog_info_node *info_node)
38 p = &env->bpf_progs.infos.rb_node;
54 rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
55 env->bpf_progs.infos_cnt++;
58 struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
64 down_read(&env->bpf_progs.lock);
65 n = env->bpf_progs.infos.rb_node;
79 up_read(&env->bpf_progs.lock);
83 bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
87 down_write(&env->bpf_progs.lock);
88 ret = __perf_env__insert_btf(env, btf_node);
89 up_write(&env->bpf_progs.lock);
93 bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
100 p = &env->bpf_progs.btfs.rb_node;
116 rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs);
117 env->bpf_progs.btfs_cnt++;
121 struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
125 down_read(&env->bpf_progs.lock);
126 res = __perf_env__find_btf(env, btf_id);
127 up_read(&env->bpf_progs.lock);
131 struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id)
136 n = env->bpf_progs.btfs.rb_node;
151 static void perf_env__purge_bpf(struct perf_env *env)
156 down_write(&env->bpf_progs.lock);
158 root = &env->bpf_progs.infos;
171 env->bpf_progs.infos_cnt = 0;
173 root = &env->bpf_progs.btfs;
185 env->bpf_progs.btfs_cnt = 0;
187 up_write(&env->bpf_progs.lock);
190 static void perf_env__purge_bpf(struct perf_env *env __maybe_unused)
195 void perf_env__exit(struct perf_env *env)
199 perf_env__purge_bpf(env);
200 perf_env__purge_cgroups(env);
201 zfree(&env->hostname);
202 zfree(&env->os_release);
203 zfree(&env->version);
204 zfree(&env->arch);
205 zfree(&env->cpu_desc);
206 zfree(&env->cpuid);
207 zfree(&env->cmdline);
208 zfree(&env->cmdline_argv);
209 zfree(&env->sibling_dies);
210 zfree(&env->sibling_cores);
211 zfree(&env->sibling_threads);
212 zfree(&env->pmu_mappings);
213 zfree(&env->cpu);
214 for (i = 0; i < env->nr_cpu_pmu_caps; i++)
215 zfree(&env->cpu_pmu_caps[i]);
216 zfree(&env->cpu_pmu_caps);
217 zfree(&env->numa_map);
219 for (i = 0; i < env->nr_numa_nodes; i++)
220 perf_cpu_map__put(env->numa_nodes[i].map);
221 zfree(&env->numa_nodes);
223 for (i = 0; i < env->caches_cnt; i++)
224 cpu_cache_level__free(&env->caches[i]);
225 zfree(&env->caches);
227 for (i = 0; i < env->nr_memory_nodes; i++)
228 zfree(&env->memory_nodes[i].set);
229 zfree(&env->memory_nodes);
231 for (i = 0; i < env->nr_hybrid_nodes; i++) {
232 zfree(&env->hybrid_nodes[i].pmu_name);
233 zfree(&env->hybrid_nodes[i].cpus);
235 zfree(&env->hybrid_nodes);
237 for (i = 0; i < env->nr_pmus_with_caps; i++) {
238 for (j = 0; j < env->pmu_caps[i].nr_caps; j++)
239 zfree(&env->pmu_caps[i].caps[j]);
240 zfree(&env->pmu_caps[i].caps);
241 zfree(&env->pmu_caps[i].pmu_name);
243 zfree(&env->pmu_caps);
246 void perf_env__init(struct perf_env *env)
249 env->bpf_progs.infos = RB_ROOT;
250 env->bpf_progs.btfs = RB_ROOT;
251 init_rwsem(&env->bpf_progs.lock);
253 env->kernel_is_64_bit = -1;
256 static void perf_env__init_kernel_mode(struct perf_env *env)
258 const char *arch = perf_env__raw_arch(env);
264 env->kernel_is_64_bit = 1;
266 env->kernel_is_64_bit = 0;
269 int perf_env__kernel_is_64_bit(struct perf_env *env)
271 if (env->kernel_is_64_bit == -1)
272 perf_env__init_kernel_mode(env);
274 return env->kernel_is_64_bit;
277 int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[])
282 env->cmdline_argv = calloc(argc, sizeof(char *));
283 if (env->cmdline_argv == NULL)
291 env->cmdline_argv[i] = argv[i];
292 if (env->cmdline_argv[i] == NULL)
296 env->nr_cmdline = argc;
300 zfree(&env->cmdline_argv);
305 int perf_env__read_cpu_topology_map(struct perf_env *env)
309 if (env->cpu != NULL)
312 if (env->nr_cpus_avail == 0)
313 env->nr_cpus_avail = cpu__max_present_cpu().cpu;
315 nr_cpus = env->nr_cpus_avail;
319 env->cpu = calloc(nr_cpus, sizeof(env->cpu[0]));
320 if (env->cpu == NULL)
326 env->cpu[idx].core_id = cpu__get_core_id(cpu);
327 env->cpu[idx].socket_id = cpu__get_socket_id(cpu);
328 env->cpu[idx].die_id = cpu__get_die_id(cpu);
331 env->nr_cpus_avail = nr_cpus;
335 int perf_env__read_pmu_mappings(struct perf_env *env)
348 env->nr_pmu_mappings = pmu_num;
361 env->pmu_mappings = strbuf_detach(&sb, NULL);
370 int perf_env__read_cpuid(struct perf_env *env)
378 free(env->cpuid);
379 env->cpuid = strdup(cpuid);
380 if (env->cpuid == NULL)
385 static int perf_env__read_arch(struct perf_env *env)
389 if (env->arch)
393 env->arch = strdup(uts.machine);
395 return env->arch ? 0 : -ENOMEM;
398 static int perf_env__read_nr_cpus_avail(struct perf_env *env)
400 if (env->nr_cpus_avail == 0)
401 env->nr_cpus_avail = cpu__max_present_cpu().cpu;
403 return env->nr_cpus_avail ? 0 : -ENOENT;
406 const char *perf_env__raw_arch(struct perf_env *env)
408 return env && !perf_env__read_arch(env) ? env->arch : "unknown";
411 int perf_env__nr_cpus_avail(struct perf_env *env)
413 return env && !perf_env__read_nr_cpus_avail(env) ? env->nr_cpus_avail : 0;
455 const char *perf_env__arch(struct perf_env *env)
459 if (!env || !env->arch) { /* Assume local operation */
465 arch_name = env->arch;
470 const char *perf_env__cpuid(struct perf_env *env)
474 if (!env || !env->cpuid) { /* Assume local operation */
475 status = perf_env__read_cpuid(env);
480 return env->cpuid;
483 int perf_env__nr_pmu_mappings(struct perf_env *env)
487 if (!env || !env->nr_pmu_mappings) { /* Assume local operation */
488 status = perf_env__read_pmu_mappings(env);
493 return env->nr_pmu_mappings;
496 const char *perf_env__pmu_mappings(struct perf_env *env)
500 if (!env || !env->pmu_mappings) { /* Assume local operation */
501 status = perf_env__read_pmu_mappings(env);
506 return env->pmu_mappings;
509 int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu)
511 if (!env->nr_numa_map) {
515 for (i = 0; i < env->nr_numa_nodes; i++) {
516 nn = &env->numa_nodes[i];
526 env->numa_map = malloc(nr * sizeof(int));
527 if (!env->numa_map)
531 env->numa_map[i] = -1;
533 env->nr_numa_map = nr;
535 for (i = 0; i < env->nr_numa_nodes; i++) {
539 nn = &env->numa_nodes[i];
541 env->numa_map[tmp.cpu] = i;
545 return cpu.cpu >= 0 && cpu.cpu < env->nr_numa_map ? env->numa_map[cpu.cpu] : -1;
548 char *perf_env__find_pmu_cap(struct perf_env *env, const char *pmu_name,
568 for (i = 0; i < env->nr_cpu_pmu_caps; i++) {
569 if (!strncmp(env->cpu_pmu_caps[i], cap_eq, cap_size + 1)) {
571 return &env->cpu_pmu_caps[i][cap_size + 1];
577 for (i = 0; i < env->nr_pmus_with_caps; i++) {
578 if (strcmp(env->pmu_caps[i].pmu_name, pmu_name))
581 ptr = env->pmu_caps[i].caps;
583 for (j = 0; j < env->pmu_caps[i].nr_caps; j++) {