diff options
Diffstat (limited to 'tools/perf/util/symbol.c')
-rw-r--r-- | tools/perf/util/symbol.c | 121 |
1 files changed, 116 insertions, 5 deletions
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 5cbad55cd99d..ae2ce255e848 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -25,7 +25,7 @@ #include "namespaces.h" #include "header.h" #include "path.h" -#include "sane_ctype.h" +#include <linux/ctype.h> #include <elf.h> #include <limits.h> @@ -1166,6 +1166,85 @@ static int kcore_mapfn(u64 start, u64 len, u64 pgoff, void *data) return 0; } +/* + * Merges map into map_groups by splitting the new map + * within the existing map regions. + */ +int map_groups__merge_in(struct map_groups *kmaps, struct map *new_map) +{ + struct map *old_map; + LIST_HEAD(merged); + + for (old_map = map_groups__first(kmaps); old_map; + old_map = map_groups__next(old_map)) { + + /* no overload with this one */ + if (new_map->end < old_map->start || + new_map->start >= old_map->end) + continue; + + if (new_map->start < old_map->start) { + /* + * |new...... + * |old.... + */ + if (new_map->end < old_map->end) { + /* + * |new......| -> |new..| + * |old....| -> |old....| + */ + new_map->end = old_map->start; + } else { + /* + * |new.............| -> |new..| |new..| + * |old....| -> |old....| + */ + struct map *m = map__clone(new_map); + + if (!m) + return -ENOMEM; + + m->end = old_map->start; + list_add_tail(&m->node, &merged); + new_map->start = old_map->end; + } + } else { + /* + * |new...... + * |old.... + */ + if (new_map->end < old_map->end) { + /* + * |new..| -> x + * |old.........| -> |old.........| + */ + map__put(new_map); + new_map = NULL; + break; + } else { + /* + * |new......| -> |new...| + * |old....| -> |old....| + */ + new_map->start = old_map->end; + } + } + } + + while (!list_empty(&merged)) { + old_map = list_entry(merged.next, struct map, node); + list_del_init(&old_map->node); + map_groups__insert(kmaps, old_map); + map__put(old_map); + } + + if (new_map) { + map_groups__insert(kmaps, new_map); + map__put(new_map); + } + return 0; +} + static int dso__load_kcore(struct dso *dso, struct map *map, const char *kallsyms_filename) { @@ -1222,7 +1301,12 @@ static int dso__load_kcore(struct dso *dso, struct map *map, while (old_map) { struct map *next = map_groups__next(old_map); - if (old_map != map) + /* + * We need to preserve eBPF maps even if they are + * covered by kcore, because we need to access + * eBPF dso for source data. + */ + if (old_map != map && !__map__is_bpf_prog(old_map)) map_groups__remove(kmaps, old_map); old_map = next; } @@ -1256,11 +1340,16 @@ static int dso__load_kcore(struct dso *dso, struct map *map, map_groups__remove(kmaps, map); map_groups__insert(kmaps, map); map__put(map); + map__put(new_map); } else { - map_groups__insert(kmaps, new_map); + /* + * Merge kcore map into existing maps, + * and ensure that current maps (eBPF) + * stay intact. + */ + if (map_groups__merge_in(kmaps, new_map)) + goto out_err; } - - map__put(new_map); } if (machine__is(machine, "x86_64")) { @@ -2262,3 +2351,25 @@ struct mem_info *mem_info__new(void) refcount_set(&mi->refcnt, 1); return mi; } + +struct block_info *block_info__get(struct block_info *bi) +{ + if (bi) + refcount_inc(&bi->refcnt); + return bi; +} + +void block_info__put(struct block_info *bi) +{ + if (bi && refcount_dec_and_test(&bi->refcnt)) + free(bi); +} + +struct block_info *block_info__new(void) +{ + struct block_info *bi = zalloc(sizeof(*bi)); + + if (bi) + refcount_set(&bi->refcnt, 1); + return bi; +} |