Lines Matching refs:env

4 #include "env.h"
18 void perf_env__insert_bpf_prog_info(struct perf_env *env,
21 down_write(&env->bpf_progs.lock);
22 __perf_env__insert_bpf_prog_info(env, info_node);
23 up_write(&env->bpf_progs.lock);
26 void __perf_env__insert_bpf_prog_info(struct perf_env *env, struct bpf_prog_info_node *info_node)
33 p = &env->bpf_progs.infos.rb_node;
49 rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos);
50 env->bpf_progs.infos_cnt++;
53 struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
59 down_read(&env->bpf_progs.lock);
60 n = env->bpf_progs.infos.rb_node;
74 up_read(&env->bpf_progs.lock);
78 bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
82 down_write(&env->bpf_progs.lock);
83 ret = __perf_env__insert_btf(env, btf_node);
84 up_write(&env->bpf_progs.lock);
88 bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node)
95 p = &env->bpf_progs.btfs.rb_node;
111 rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs);
112 env->bpf_progs.btfs_cnt++;
116 struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id)
120 down_read(&env->bpf_progs.lock);
121 res = __perf_env__find_btf(env, btf_id);
122 up_read(&env->bpf_progs.lock);
126 struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id)
131 n = env->bpf_progs.btfs.rb_node;
146 static void perf_env__purge_bpf(struct perf_env *env)
151 down_write(&env->bpf_progs.lock);
153 root = &env->bpf_progs.infos;
166 env->bpf_progs.infos_cnt = 0;
168 root = &env->bpf_progs.btfs;
180 env->bpf_progs.btfs_cnt = 0;
182 up_write(&env->bpf_progs.lock);
185 void perf_env__exit(struct perf_env *env)
189 perf_env__purge_bpf(env);
190 perf_env__purge_cgroups(env);
191 zfree(&env->hostname);
192 zfree(&env->os_release);
193 zfree(&env->version);
194 zfree(&env->arch);
195 zfree(&env->cpu_desc);
196 zfree(&env->cpuid);
197 zfree(&env->cmdline);
198 zfree(&env->cmdline_argv);
199 zfree(&env->sibling_dies);
200 zfree(&env->sibling_cores);
201 zfree(&env->sibling_threads);
202 zfree(&env->pmu_mappings);
203 zfree(&env->cpu);
204 zfree(&env->cpu_pmu_caps);
205 zfree(&env->numa_map);
207 for (i = 0; i < env->nr_numa_nodes; i++)
208 perf_cpu_map__put(env->numa_nodes[i].map);
209 zfree(&env->numa_nodes);
211 for (i = 0; i < env->caches_cnt; i++)
212 cpu_cache_level__free(&env->caches[i]);
213 zfree(&env->caches);
215 for (i = 0; i < env->nr_memory_nodes; i++)
216 zfree(&env->memory_nodes[i].set);
217 zfree(&env->memory_nodes);
220 void perf_env__init(struct perf_env *env)
222 env->bpf_progs.infos = RB_ROOT;
223 env->bpf_progs.btfs = RB_ROOT;
224 init_rwsem(&env->bpf_progs.lock);
227 int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[])
232 env->cmdline_argv = calloc(argc, sizeof(char *));
233 if (env->cmdline_argv == NULL)
241 env->cmdline_argv[i] = argv[i];
242 if (env->cmdline_argv[i] == NULL)
246 env->nr_cmdline = argc;
250 zfree(&env->cmdline_argv);
255 int perf_env__read_cpu_topology_map(struct perf_env *env)
259 if (env->cpu != NULL)
262 if (env->nr_cpus_avail == 0)
263 env->nr_cpus_avail = cpu__max_present_cpu();
265 nr_cpus = env->nr_cpus_avail;
269 env->cpu = calloc(nr_cpus, sizeof(env->cpu[0]));
270 if (env->cpu == NULL)
274 env->cpu[cpu].core_id = cpu_map__get_core_id(cpu);
275 env->cpu[cpu].socket_id = cpu_map__get_socket_id(cpu);
276 env->cpu[cpu].die_id = cpu_map__get_die_id(cpu);
279 env->nr_cpus_avail = nr_cpus;
283 int perf_env__read_cpuid(struct perf_env *env)
291 free(env->cpuid);
292 env->cpuid = strdup(cpuid);
293 if (env->cpuid == NULL)
298 static int perf_env__read_arch(struct perf_env *env)
302 if (env->arch)
306 env->arch = strdup(uts.machine);
308 return env->arch ? 0 : -ENOMEM;
311 static int perf_env__read_nr_cpus_avail(struct perf_env *env)
313 if (env->nr_cpus_avail == 0)
314 env->nr_cpus_avail = cpu__max_present_cpu();
316 return env->nr_cpus_avail ? 0 : -ENOENT;
319 const char *perf_env__raw_arch(struct perf_env *env)
321 return env && !perf_env__read_arch(env) ? env->arch : "unknown";
324 int perf_env__nr_cpus_avail(struct perf_env *env)
326 return env && !perf_env__read_nr_cpus_avail(env) ? env->nr_cpus_avail : 0;
368 const char *perf_env__arch(struct perf_env *env)
372 if (!env || !env->arch) { /* Assume local operation */
378 arch_name = env->arch;
384 int perf_env__numa_node(struct perf_env *env, int cpu)
386 if (!env->nr_numa_map) {
390 for (i = 0; i < env->nr_numa_nodes; i++) {
391 nn = &env->numa_nodes[i];
401 env->numa_map = malloc(nr * sizeof(int));
402 if (!env->numa_map)
406 env->numa_map[i] = -1;
408 env->nr_numa_map = nr;
410 for (i = 0; i < env->nr_numa_nodes; i++) {
413 nn = &env->numa_nodes[i];
415 env->numa_map[j] = i;
419 return cpu >= 0 && cpu < env->nr_numa_map ? env->numa_map[cpu] : -1;