Lines Matching defs:map

538 			  threads->map[i].pid);
1182 OPT_STRING(0, "bpf-attr-map", &target.attr_map, "attr-map-path",
1183 "path to perf_event_attr map"),
1305 * Calculate the cache instance ID from the map in
1309 static int cpu__get_cache_id_from_map(struct perf_cpu cpu, char *map)
1312 struct perf_cpu_map *cpu_map = perf_cpu_map__new(map);
1315 * If the map contains no CPU, consider the current CPU to
1376 cache->cache = cpu__get_cache_id_from_map(cpu, caches[max_level_index].map);
1386 cache->cache = cpu__get_cache_id_from_map(cpu, caches[i].map);
1490 if (aggr_cpu_id__is_empty(&config->cpus_aggr_map->map[cpu.cpu]))
1491 config->cpus_aggr_map->map[cpu.cpu] = get_id(config, cpu);
1493 id = config->cpus_aggr_map->map[cpu.cpu];
1599 pr_err("cannot build %s map\n", aggr_mode__string[stat_config.aggr_mode]);
1615 stat_config.aggr_map->map[s] = id;
1633 static void cpu_aggr_map__delete(struct cpu_aggr_map *map)
1635 if (map) {
1636 WARN_ONCE(refcount_read(&map->refcnt) != 0,
1638 free(map);
1642 static void cpu_aggr_map__put(struct cpu_aggr_map *map)
1644 if (map && refcount_dec_and_test(&map->refcnt))
1645 cpu_aggr_map__delete(map);
1704 * the cpu in the map. Since building the map is expensive, do
1710 cpu_map = perf_cpu_map__new(caches[i].map);
1716 id->cache = cpu__get_cache_id_from_map(cpu, caches[i].map);
1904 stat_config.aggr_map->map[s] = id;
1915 pr_err("cannot build %s map\n", aggr_mode__string[stat_config.aggr_mode]);
2340 pr_warning("Extra thread map event, ignoring.\n");
2360 pr_warning("Extra cpu map event, ignoring.\n");
2740 perror("failed to parse CPUs map");