Lines Matching refs:env
22 void perf_env__insert_bpf_prog_info(struct perf_env *env, in perf_env__insert_bpf_prog_info() argument
30 down_write(&env->bpf_progs.lock); in perf_env__insert_bpf_prog_info()
31 p = &env->bpf_progs.infos.rb_node; in perf_env__insert_bpf_prog_info()
47 rb_insert_color(&info_node->rb_node, &env->bpf_progs.infos); in perf_env__insert_bpf_prog_info()
48 env->bpf_progs.infos_cnt++; in perf_env__insert_bpf_prog_info()
50 up_write(&env->bpf_progs.lock); in perf_env__insert_bpf_prog_info()
53 struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env, in perf_env__find_bpf_prog_info() argument
59 down_read(&env->bpf_progs.lock); in perf_env__find_bpf_prog_info()
60 n = env->bpf_progs.infos.rb_node; in perf_env__find_bpf_prog_info()
74 up_read(&env->bpf_progs.lock); in perf_env__find_bpf_prog_info()
78 bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node) in perf_env__insert_btf() argument
86 down_write(&env->bpf_progs.lock); in perf_env__insert_btf()
87 p = &env->bpf_progs.btfs.rb_node; in perf_env__insert_btf()
104 rb_insert_color(&btf_node->rb_node, &env->bpf_progs.btfs); in perf_env__insert_btf()
105 env->bpf_progs.btfs_cnt++; in perf_env__insert_btf()
107 up_write(&env->bpf_progs.lock); in perf_env__insert_btf()
111 struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id) in perf_env__find_btf() argument
116 down_read(&env->bpf_progs.lock); in perf_env__find_btf()
117 n = env->bpf_progs.btfs.rb_node; in perf_env__find_btf()
131 up_read(&env->bpf_progs.lock); in perf_env__find_btf()
136 static void perf_env__purge_bpf(struct perf_env *env) in perf_env__purge_bpf() argument
141 down_write(&env->bpf_progs.lock); in perf_env__purge_bpf()
143 root = &env->bpf_progs.infos; in perf_env__purge_bpf()
156 env->bpf_progs.infos_cnt = 0; in perf_env__purge_bpf()
158 root = &env->bpf_progs.btfs; in perf_env__purge_bpf()
170 env->bpf_progs.btfs_cnt = 0; in perf_env__purge_bpf()
172 up_write(&env->bpf_progs.lock); in perf_env__purge_bpf()
175 static void perf_env__purge_bpf(struct perf_env *env __maybe_unused) in perf_env__purge_bpf()
180 void perf_env__exit(struct perf_env *env) in perf_env__exit() argument
184 perf_env__purge_bpf(env); in perf_env__exit()
185 perf_env__purge_cgroups(env); in perf_env__exit()
186 zfree(&env->hostname); in perf_env__exit()
187 zfree(&env->os_release); in perf_env__exit()
188 zfree(&env->version); in perf_env__exit()
189 zfree(&env->arch); in perf_env__exit()
190 zfree(&env->cpu_desc); in perf_env__exit()
191 zfree(&env->cpuid); in perf_env__exit()
192 zfree(&env->cmdline); in perf_env__exit()
193 zfree(&env->cmdline_argv); in perf_env__exit()
194 zfree(&env->sibling_dies); in perf_env__exit()
195 zfree(&env->sibling_cores); in perf_env__exit()
196 zfree(&env->sibling_threads); in perf_env__exit()
197 zfree(&env->pmu_mappings); in perf_env__exit()
198 zfree(&env->cpu); in perf_env__exit()
199 zfree(&env->cpu_pmu_caps); in perf_env__exit()
200 zfree(&env->numa_map); in perf_env__exit()
202 for (i = 0; i < env->nr_numa_nodes; i++) in perf_env__exit()
203 perf_cpu_map__put(env->numa_nodes[i].map); in perf_env__exit()
204 zfree(&env->numa_nodes); in perf_env__exit()
206 for (i = 0; i < env->caches_cnt; i++) in perf_env__exit()
207 cpu_cache_level__free(&env->caches[i]); in perf_env__exit()
208 zfree(&env->caches); in perf_env__exit()
210 for (i = 0; i < env->nr_memory_nodes; i++) in perf_env__exit()
211 zfree(&env->memory_nodes[i].set); in perf_env__exit()
212 zfree(&env->memory_nodes); in perf_env__exit()
214 for (i = 0; i < env->nr_hybrid_nodes; i++) { in perf_env__exit()
215 zfree(&env->hybrid_nodes[i].pmu_name); in perf_env__exit()
216 zfree(&env->hybrid_nodes[i].cpus); in perf_env__exit()
218 zfree(&env->hybrid_nodes); in perf_env__exit()
220 for (i = 0; i < env->nr_hybrid_cpc_nodes; i++) { in perf_env__exit()
221 zfree(&env->hybrid_cpc_nodes[i].cpu_pmu_caps); in perf_env__exit()
222 zfree(&env->hybrid_cpc_nodes[i].pmu_name); in perf_env__exit()
224 zfree(&env->hybrid_cpc_nodes); in perf_env__exit()
227 void perf_env__init(struct perf_env *env) in perf_env__init() argument
230 env->bpf_progs.infos = RB_ROOT; in perf_env__init()
231 env->bpf_progs.btfs = RB_ROOT; in perf_env__init()
232 init_rwsem(&env->bpf_progs.lock); in perf_env__init()
234 env->kernel_is_64_bit = -1; in perf_env__init()
237 static void perf_env__init_kernel_mode(struct perf_env *env) in perf_env__init_kernel_mode() argument
239 const char *arch = perf_env__raw_arch(env); in perf_env__init_kernel_mode()
245 env->kernel_is_64_bit = 1; in perf_env__init_kernel_mode()
247 env->kernel_is_64_bit = 0; in perf_env__init_kernel_mode()
250 int perf_env__kernel_is_64_bit(struct perf_env *env) in perf_env__kernel_is_64_bit() argument
252 if (env->kernel_is_64_bit == -1) in perf_env__kernel_is_64_bit()
253 perf_env__init_kernel_mode(env); in perf_env__kernel_is_64_bit()
255 return env->kernel_is_64_bit; in perf_env__kernel_is_64_bit()
258 int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[]) in perf_env__set_cmdline() argument
263 env->cmdline_argv = calloc(argc, sizeof(char *)); in perf_env__set_cmdline()
264 if (env->cmdline_argv == NULL) in perf_env__set_cmdline()
272 env->cmdline_argv[i] = argv[i]; in perf_env__set_cmdline()
273 if (env->cmdline_argv[i] == NULL) in perf_env__set_cmdline()
277 env->nr_cmdline = argc; in perf_env__set_cmdline()
281 zfree(&env->cmdline_argv); in perf_env__set_cmdline()
286 int perf_env__read_cpu_topology_map(struct perf_env *env) in perf_env__read_cpu_topology_map() argument
290 if (env->cpu != NULL) in perf_env__read_cpu_topology_map()
293 if (env->nr_cpus_avail == 0) in perf_env__read_cpu_topology_map()
294 env->nr_cpus_avail = cpu__max_present_cpu(); in perf_env__read_cpu_topology_map()
296 nr_cpus = env->nr_cpus_avail; in perf_env__read_cpu_topology_map()
300 env->cpu = calloc(nr_cpus, sizeof(env->cpu[0])); in perf_env__read_cpu_topology_map()
301 if (env->cpu == NULL) in perf_env__read_cpu_topology_map()
305 env->cpu[cpu].core_id = cpu_map__get_core_id(cpu); in perf_env__read_cpu_topology_map()
306 env->cpu[cpu].socket_id = cpu_map__get_socket_id(cpu); in perf_env__read_cpu_topology_map()
307 env->cpu[cpu].die_id = cpu_map__get_die_id(cpu); in perf_env__read_cpu_topology_map()
310 env->nr_cpus_avail = nr_cpus; in perf_env__read_cpu_topology_map()
314 int perf_env__read_pmu_mappings(struct perf_env *env) in perf_env__read_pmu_mappings() argument
329 env->nr_pmu_mappings = pmu_num; in perf_env__read_pmu_mappings()
344 env->pmu_mappings = strbuf_detach(&sb, NULL); in perf_env__read_pmu_mappings()
353 int perf_env__read_cpuid(struct perf_env *env) in perf_env__read_cpuid() argument
361 free(env->cpuid); in perf_env__read_cpuid()
362 env->cpuid = strdup(cpuid); in perf_env__read_cpuid()
363 if (env->cpuid == NULL) in perf_env__read_cpuid()
368 static int perf_env__read_arch(struct perf_env *env) in perf_env__read_arch() argument
372 if (env->arch) in perf_env__read_arch()
376 env->arch = strdup(uts.machine); in perf_env__read_arch()
378 return env->arch ? 0 : -ENOMEM; in perf_env__read_arch()
381 static int perf_env__read_nr_cpus_avail(struct perf_env *env) in perf_env__read_nr_cpus_avail() argument
383 if (env->nr_cpus_avail == 0) in perf_env__read_nr_cpus_avail()
384 env->nr_cpus_avail = cpu__max_present_cpu(); in perf_env__read_nr_cpus_avail()
386 return env->nr_cpus_avail ? 0 : -ENOENT; in perf_env__read_nr_cpus_avail()
389 const char *perf_env__raw_arch(struct perf_env *env) in perf_env__raw_arch() argument
391 return env && !perf_env__read_arch(env) ? env->arch : "unknown"; in perf_env__raw_arch()
394 int perf_env__nr_cpus_avail(struct perf_env *env) in perf_env__nr_cpus_avail() argument
396 return env && !perf_env__read_nr_cpus_avail(env) ? env->nr_cpus_avail : 0; in perf_env__nr_cpus_avail()
436 const char *perf_env__arch(struct perf_env *env) in perf_env__arch() argument
440 if (!env || !env->arch) { /* Assume local operation */ in perf_env__arch()
446 arch_name = env->arch; in perf_env__arch()
451 const char *perf_env__cpuid(struct perf_env *env) in perf_env__cpuid() argument
455 if (!env || !env->cpuid) { /* Assume local operation */ in perf_env__cpuid()
456 status = perf_env__read_cpuid(env); in perf_env__cpuid()
461 return env->cpuid; in perf_env__cpuid()
464 int perf_env__nr_pmu_mappings(struct perf_env *env) in perf_env__nr_pmu_mappings() argument
468 if (!env || !env->nr_pmu_mappings) { /* Assume local operation */ in perf_env__nr_pmu_mappings()
469 status = perf_env__read_pmu_mappings(env); in perf_env__nr_pmu_mappings()
474 return env->nr_pmu_mappings; in perf_env__nr_pmu_mappings()
477 const char *perf_env__pmu_mappings(struct perf_env *env) in perf_env__pmu_mappings() argument
481 if (!env || !env->pmu_mappings) { /* Assume local operation */ in perf_env__pmu_mappings()
482 status = perf_env__read_pmu_mappings(env); in perf_env__pmu_mappings()
487 return env->pmu_mappings; in perf_env__pmu_mappings()
490 int perf_env__numa_node(struct perf_env *env, int cpu) in perf_env__numa_node() argument
492 if (!env->nr_numa_map) { in perf_env__numa_node()
496 for (i = 0; i < env->nr_numa_nodes; i++) { in perf_env__numa_node()
497 nn = &env->numa_nodes[i]; in perf_env__numa_node()
507 env->numa_map = malloc(nr * sizeof(int)); in perf_env__numa_node()
508 if (!env->numa_map) in perf_env__numa_node()
512 env->numa_map[i] = -1; in perf_env__numa_node()
514 env->nr_numa_map = nr; in perf_env__numa_node()
516 for (i = 0; i < env->nr_numa_nodes; i++) { in perf_env__numa_node()
519 nn = &env->numa_nodes[i]; in perf_env__numa_node()
521 env->numa_map[j] = i; in perf_env__numa_node()
525 return cpu >= 0 && cpu < env->nr_numa_map ? env->numa_map[cpu] : -1; in perf_env__numa_node()