diff options
Diffstat (limited to 'tools/perf')
-rw-r--r-- | tools/perf/builtin-buildid-cache.c | 33 | ||||
-rw-r--r-- | tools/perf/builtin-record.c | 10 | ||||
-rw-r--r-- | tools/perf/builtin-timechart.c | 3 | ||||
-rw-r--r-- | tools/perf/config/Makefile | 2 | ||||
-rw-r--r-- | tools/perf/design.txt | 1 | ||||
-rw-r--r-- | tools/perf/perf.h | 11 | ||||
-rw-r--r-- | tools/perf/tests/vmlinux-kallsyms.c | 10 | ||||
-rw-r--r-- | tools/perf/util/event.c | 36 | ||||
-rw-r--r-- | tools/perf/util/event.h | 6 | ||||
-rw-r--r-- | tools/perf/util/evlist.c | 7 | ||||
-rw-r--r-- | tools/perf/util/evsel.c | 1 | ||||
-rw-r--r-- | tools/perf/util/header.c | 2 | ||||
-rw-r--r-- | tools/perf/util/include/asm/hash.h | 6 | ||||
-rw-r--r-- | tools/perf/util/machine.c | 42 | ||||
-rw-r--r-- | tools/perf/util/machine.h | 2 | ||||
-rw-r--r-- | tools/perf/util/map.c | 12 | ||||
-rw-r--r-- | tools/perf/util/map.h | 1 | ||||
-rw-r--r-- | tools/perf/util/parse-events.c | 2 | ||||
-rw-r--r-- | tools/perf/util/pmu.c | 24 | ||||
-rw-r--r-- | tools/perf/util/pmu.h | 2 | ||||
-rw-r--r-- | tools/perf/util/session.c | 10 | ||||
-rw-r--r-- | tools/perf/util/symbol-elf.c | 4 | ||||
-rw-r--r-- | tools/perf/util/symbol.c | 65 |
23 files changed, 212 insertions, 80 deletions
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c index cfede86161d8..b22dbb16f877 100644 --- a/tools/perf/builtin-buildid-cache.c +++ b/tools/perf/builtin-buildid-cache.c @@ -63,11 +63,35 @@ static int build_id_cache__kcore_dir(char *dir, size_t sz) return 0; } +static bool same_kallsyms_reloc(const char *from_dir, char *to_dir) +{ + char from[PATH_MAX]; + char to[PATH_MAX]; + const char *name; + u64 addr1 = 0, addr2 = 0; + int i; + + scnprintf(from, sizeof(from), "%s/kallsyms", from_dir); + scnprintf(to, sizeof(to), "%s/kallsyms", to_dir); + + for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) { + addr1 = kallsyms__get_function_start(from, name); + if (addr1) + break; + } + + if (name) + addr2 = kallsyms__get_function_start(to, name); + + return addr1 == addr2; +} + static int build_id_cache__kcore_existing(const char *from_dir, char *to_dir, size_t to_dir_sz) { char from[PATH_MAX]; char to[PATH_MAX]; + char to_subdir[PATH_MAX]; struct dirent *dent; int ret = -1; DIR *d; @@ -86,10 +110,11 @@ static int build_id_cache__kcore_existing(const char *from_dir, char *to_dir, continue; scnprintf(to, sizeof(to), "%s/%s/modules", to_dir, dent->d_name); - if (!compare_proc_modules(from, to)) { - scnprintf(to, sizeof(to), "%s/%s", to_dir, - dent->d_name); - strlcpy(to_dir, to, to_dir_sz); + scnprintf(to_subdir, sizeof(to_subdir), "%s/%s", + to_dir, dent->d_name); + if (!compare_proc_modules(from, to) && + same_kallsyms_reloc(from_dir, to_subdir)) { + strlcpy(to_dir, to_subdir, to_dir_sz); ret = 0; break; } diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 3c394bf16fa8..af47531b82ec 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c @@ -287,10 +287,7 @@ static void perf_event__synthesize_guest_os(struct machine *machine, void *data) * have no _text sometimes. */ err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event, - machine, "_text"); - if (err < 0) - err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event, - machine, "_stext"); + machine); if (err < 0) pr_err("Couldn't record guest kernel [%d]'s reference" " relocation symbol.\n", machine->pid); @@ -457,10 +454,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv) } err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event, - machine, "_text"); - if (err < 0) - err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event, - machine, "_stext"); + machine); if (err < 0) pr_err("Couldn't record kernel reference relocation symbol\n" "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n" diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c index 652af0b66a62..25526d6eae59 100644 --- a/tools/perf/builtin-timechart.c +++ b/tools/perf/builtin-timechart.c @@ -1045,6 +1045,9 @@ static void write_svg_file(struct timechart *tchart, const char *filename) thresh /= 10; } while (!process_filter && thresh && count < tchart->proc_num); + if (!tchart->proc_num) + count = 0; + open_svg(filename, tchart->numcpus, count, tchart->first_time, tchart->last_time); svg_time_grid(); diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile index d604e50fc167..c48d44958172 100644 --- a/tools/perf/config/Makefile +++ b/tools/perf/config/Makefile @@ -600,5 +600,5 @@ perfexec_instdir_SQ = $(subst ','\'',$(perfexec_instdir)) # Otherwise we install plugins into the global $(libdir). ifdef DESTDIR plugindir=$(libdir)/traceevent/plugins -plugindir_SQ= $(subst ','\'',$(prefix)/$(plugindir)) +plugindir_SQ= $(subst ','\'',$(plugindir)) endif diff --git a/tools/perf/design.txt b/tools/perf/design.txt index 67e5d0cace85..63a0e6f04a01 100644 --- a/tools/perf/design.txt +++ b/tools/perf/design.txt @@ -454,7 +454,6 @@ So to start with, in order to add HAVE_PERF_EVENTS to your Kconfig, you will need at least this: - asm/perf_event.h - a basic stub will suffice at first - support for atomic64 types (and associated helper functions) - - set_perf_event_pending() implemented If your architecture does have hardware capabilities, you can override the weak stub hw_perf_event_init() to register hardware counters. diff --git a/tools/perf/perf.h b/tools/perf/perf.h index 3c2f213e979d..e84fa26bc1be 100644 --- a/tools/perf/perf.h +++ b/tools/perf/perf.h @@ -100,8 +100,8 @@ #ifdef __aarch64__ #define mb() asm volatile("dmb ish" ::: "memory") -#define wmb() asm volatile("dmb ishld" ::: "memory") -#define rmb() asm volatile("dmb ishst" ::: "memory") +#define wmb() asm volatile("dmb ishst" ::: "memory") +#define rmb() asm volatile("dmb ishld" ::: "memory") #define cpu_relax() asm volatile("yield" ::: "memory") #endif @@ -132,6 +132,13 @@ #define CPUINFO_PROC "CPU" #endif +#ifdef __xtensa__ +#define mb() asm volatile("memw" ::: "memory") +#define wmb() asm volatile("memw" ::: "memory") +#define rmb() asm volatile("" ::: "memory") +#define CPUINFO_PROC "core ID" +#endif + #define barrier() asm volatile ("" ::: "memory") #ifndef cpu_relax diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c index 2bd13edcbc17..3d9088003a5b 100644 --- a/tools/perf/tests/vmlinux-kallsyms.c +++ b/tools/perf/tests/vmlinux-kallsyms.c @@ -26,7 +26,6 @@ int test__vmlinux_matches_kallsyms(void) struct map *kallsyms_map, *vmlinux_map; struct machine kallsyms, vmlinux; enum map_type type = MAP__FUNCTION; - struct ref_reloc_sym ref_reloc_sym = { .name = "_stext", }; u64 mem_start, mem_end; /* @@ -70,14 +69,6 @@ int test__vmlinux_matches_kallsyms(void) */ kallsyms_map = machine__kernel_map(&kallsyms, type); - sym = map__find_symbol_by_name(kallsyms_map, ref_reloc_sym.name, NULL); - if (sym == NULL) { - pr_debug("dso__find_symbol_by_name "); - goto out; - } - - ref_reloc_sym.addr = UM(sym->start); - /* * Step 5: * @@ -89,7 +80,6 @@ int test__vmlinux_matches_kallsyms(void) } vmlinux_map = machine__kernel_map(&vmlinux, type); - map__kmap(vmlinux_map)->ref_reloc_sym = &ref_reloc_sym; /* * Step 6: diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index 1fc1c2f04772..b0f3ca850e9e 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -470,23 +470,32 @@ static int find_symbol_cb(void *arg, const char *name, char type, return 1; } +u64 kallsyms__get_function_start(const char *kallsyms_filename, + const char *symbol_name) +{ + struct process_symbol_args args = { .name = symbol_name, }; + + if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0) + return 0; + + return args.start; +} + int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, perf_event__handler_t process, - struct machine *machine, - const char *symbol_name) + struct machine *machine) { size_t size; - const char *filename, *mmap_name; - char path[PATH_MAX]; + const char *mmap_name; char name_buff[PATH_MAX]; struct map *map; + struct kmap *kmap; int err; /* * We should get this from /sys/kernel/sections/.text, but till that is * available use this, and after it is use this as a fallback for older * kernels. */ - struct process_symbol_args args = { .name = symbol_name, }; union perf_event *event = zalloc((sizeof(event->mmap) + machine->id_hdr_size)); if (event == NULL) { @@ -502,30 +511,19 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, * see kernel/perf_event.c __perf_event_mmap */ event->header.misc = PERF_RECORD_MISC_KERNEL; - filename = "/proc/kallsyms"; } else { event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL; - if (machine__is_default_guest(machine)) - filename = (char *) symbol_conf.default_guest_kallsyms; - else { - sprintf(path, "%s/proc/kallsyms", machine->root_dir); - filename = path; - } - } - - if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0) { - free(event); - return -ENOENT; } map = machine->vmlinux_maps[MAP__FUNCTION]; + kmap = map__kmap(map); size = snprintf(event->mmap.filename, sizeof(event->mmap.filename), - "%s%s", mmap_name, symbol_name) + 1; + "%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1; size = PERF_ALIGN(size, sizeof(u64)); event->mmap.header.type = PERF_RECORD_MMAP; event->mmap.header.size = (sizeof(event->mmap) - (sizeof(event->mmap.filename) - size) + machine->id_hdr_size); - event->mmap.pgoff = args.start; + event->mmap.pgoff = kmap->ref_reloc_sym->addr; event->mmap.start = map->start; event->mmap.len = map->end - event->mmap.start; event->mmap.pid = machine->pid; diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index faf6e219be21..851fa06f4a42 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -214,8 +214,7 @@ int perf_event__synthesize_threads(struct perf_tool *tool, struct machine *machine, bool mmap_data); int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, perf_event__handler_t process, - struct machine *machine, - const char *symbol_name); + struct machine *machine); int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t process, @@ -279,4 +278,7 @@ size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp); size_t perf_event__fprintf_task(union perf_event *event, FILE *fp); size_t perf_event__fprintf(union perf_event *event, FILE *fp); +u64 kallsyms__get_function_start(const char *kallsyms_filename, + const char *symbol_name); + #endif /* __PERF_RECORD_H */ diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index 40bd2c04df8a..59ef2802fcf6 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -1003,9 +1003,12 @@ void perf_evlist__close(struct perf_evlist *evlist) struct perf_evsel *evsel; int ncpus = cpu_map__nr(evlist->cpus); int nthreads = thread_map__nr(evlist->threads); + int n; - evlist__for_each_reverse(evlist, evsel) - perf_evsel__close(evsel, ncpus, nthreads); + evlist__for_each_reverse(evlist, evsel) { + n = evsel->cpus ? evsel->cpus->nr : ncpus; + perf_evsel__close(evsel, n, nthreads); + } } int perf_evlist__open(struct perf_evlist *evlist) diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 22e18a26b7e6..55407c594b87 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -1081,7 +1081,6 @@ void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads) perf_evsel__close_fd(evsel, ncpus, nthreads); perf_evsel__free_fd(evsel); - evsel->fd = NULL; } static struct { diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index bb3e0ede6183..893f8e2df928 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -930,7 +930,7 @@ static int write_topo_node(int fd, int node) /* skip over invalid lines */ if (!strchr(buf, ':')) continue; - if (sscanf(buf, "%*s %*d %s %"PRIu64, field, &mem) != 2) + if (sscanf(buf, "%*s %*d %31s %"PRIu64, field, &mem) != 2) goto done; if (!strcmp(field, "MemTotal:")) mem_total = mem; diff --git a/tools/perf/util/include/asm/hash.h b/tools/perf/util/include/asm/hash.h new file mode 100644 index 000000000000..d82b170bb216 --- /dev/null +++ b/tools/perf/util/include/asm/hash.h @@ -0,0 +1,6 @@ +#ifndef __ASM_GENERIC_HASH_H +#define __ASM_GENERIC_HASH_H + +/* Stub */ + +#endif /* __ASM_GENERIC_HASH_H */ diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index ded74590b92f..c872991e0f65 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -496,19 +496,22 @@ static int symbol__in_kernel(void *arg, const char *name, return 1; } +static void machine__get_kallsyms_filename(struct machine *machine, char *buf, + size_t bufsz) +{ + if (machine__is_default_guest(machine)) + scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms); + else + scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir); +} + /* Figure out the start address of kernel map from /proc/kallsyms */ static u64 machine__get_kernel_start_addr(struct machine *machine) { - const char *filename; - char path[PATH_MAX]; + char filename[PATH_MAX]; struct process_args args; - if (machine__is_default_guest(machine)) - filename = (char *)symbol_conf.default_guest_kallsyms; - else { - sprintf(path, "%s/proc/kallsyms", machine->root_dir); - filename = path; - } + machine__get_kallsyms_filename(machine, filename, PATH_MAX); if (symbol__restricted_filename(filename, "/proc/kallsyms")) return 0; @@ -829,9 +832,25 @@ static int machine__create_modules(struct machine *machine) return 0; } +const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL}; + int machine__create_kernel_maps(struct machine *machine) { struct dso *kernel = machine__get_kernel(machine); + char filename[PATH_MAX]; + const char *name; + u64 addr = 0; + int i; + + machine__get_kallsyms_filename(machine, filename, PATH_MAX); + + for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) { + addr = kallsyms__get_function_start(filename, name); + if (addr) + break; + } + if (!addr) + return -1; if (kernel == NULL || __machine__create_kernel_maps(machine, kernel) < 0) @@ -850,6 +869,13 @@ int machine__create_kernel_maps(struct machine *machine) * Now that we have all the maps created, just set the ->end of them: */ map_groups__fixup_end(&machine->kmaps); + + if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name, + addr)) { + machine__destroy_kernel_maps(machine); + return -1; + } + return 0; } diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h index 477133015440..f77e91e483dc 100644 --- a/tools/perf/util/machine.h +++ b/tools/perf/util/machine.h @@ -18,6 +18,8 @@ union perf_event; #define HOST_KERNEL_ID (-1) #define DEFAULT_GUEST_KERNEL_ID (0) +extern const char *ref_reloc_sym_names[]; + struct machine { struct rb_node rb_node; pid_t pid; diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index 9b9bd719aa19..39cd2d0faff6 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -39,6 +39,7 @@ void map__init(struct map *map, enum map_type type, map->start = start; map->end = end; map->pgoff = pgoff; + map->reloc = 0; map->dso = dso; map->map_ip = map__map_ip; map->unmap_ip = map__unmap_ip; @@ -69,7 +70,7 @@ struct map *map__new(struct list_head *dsos__list, u64 start, u64 len, map->ino = ino; map->ino_generation = ino_gen; - if (anon) { + if ((anon || no_dso) && type == MAP__FUNCTION) { snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", pid); filename = newfilename; } @@ -93,7 +94,7 @@ struct map *map__new(struct list_head *dsos__list, u64 start, u64 len, * functions still return NULL, and we avoid the * unnecessary map__load warning. */ - if (no_dso) + if (type != MAP__FUNCTION) dso__set_loaded(dso, map->type); } } @@ -288,7 +289,7 @@ u64 map__rip_2objdump(struct map *map, u64 rip) if (map->dso->rel) return rip - map->pgoff; - return map->unmap_ip(map, rip); + return map->unmap_ip(map, rip) - map->reloc; } /** @@ -311,7 +312,7 @@ u64 map__objdump_2mem(struct map *map, u64 ip) if (map->dso->rel) return map->unmap_ip(map, ip + map->pgoff); - return ip; + return ip + map->reloc; } void map_groups__init(struct map_groups *mg) @@ -386,7 +387,8 @@ struct symbol *map_groups__find_symbol(struct map_groups *mg, { struct map *map = map_groups__find(mg, type, addr); - if (map != NULL) { + /* Ensure map is loaded before using map->map_ip */ + if (map != NULL && map__load(map, filter) >= 0) { if (mapp != NULL) *mapp = map; return map__find_symbol(map, map->map_ip(map, addr), filter); diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h index 18068c6b71c1..257e513205ce 100644 --- a/tools/perf/util/map.h +++ b/tools/perf/util/map.h @@ -36,6 +36,7 @@ struct map { bool erange_warned; u32 priv; u64 pgoff; + u64 reloc; u32 maj, min; /* only valid for MMAP2 record */ u64 ino; /* only valid for MMAP2 record */ u64 ino_generation;/* only valid for MMAP2 record */ diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index a7f1b6a91fdd..d248fca6d7ed 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -635,7 +635,7 @@ int parse_events_add_pmu(struct list_head *list, int *idx, struct perf_event_attr attr; struct perf_pmu *pmu; struct perf_evsel *evsel; - char *unit; + const char *unit; double scale; pmu = perf_pmu__find(name); diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index d9cab4d27192..b752ecb40d86 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c @@ -105,7 +105,7 @@ static int perf_pmu__parse_scale(struct perf_pmu_alias *alias, char *dir, char * char scale[128]; int fd, ret = -1; char path[PATH_MAX]; - char *lc; + const char *lc; snprintf(path, PATH_MAX, "%s/%s.scale", dir, name); @@ -609,7 +609,7 @@ static struct perf_pmu_alias *pmu_find_alias(struct perf_pmu *pmu, static int check_unit_scale(struct perf_pmu_alias *alias, - char **unit, double *scale) + const char **unit, double *scale) { /* * Only one term in event definition can @@ -634,14 +634,18 @@ static int check_unit_scale(struct perf_pmu_alias *alias, * defined for the alias */ int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms, - char **unit, double *scale) + const char **unit, double *scale) { struct parse_events_term *term, *h; struct perf_pmu_alias *alias; int ret; + /* + * Mark unit and scale as not set + * (different from default values, see below) + */ *unit = NULL; - *scale = 0; + *scale = 0.0; list_for_each_entry_safe(term, h, head_terms, list) { alias = pmu_find_alias(pmu, term); @@ -658,6 +662,18 @@ int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms, list_del(&term->list); free(term); } + + /* + * if no unit or scale foundin aliases, then + * set defaults as for evsel + * unit cannot left to NULL + */ + if (*unit == NULL) + *unit = ""; + + if (*scale == 0.0) + *scale = 1.0; + return 0; } diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h index 9183380e2038..8b64125a9281 100644 --- a/tools/perf/util/pmu.h +++ b/tools/perf/util/pmu.h @@ -29,7 +29,7 @@ int perf_pmu__config_terms(struct list_head *formats, struct perf_event_attr *attr, struct list_head *head_terms); int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms, - char **unit, double *scale); + const char **unit, double *scale); struct list_head *perf_pmu__alias(struct perf_pmu *pmu, struct list_head *head_terms); int perf_pmu_wrap(void); diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 7acc03e8f3b2..0b39a48e5110 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -1573,7 +1573,7 @@ next: int perf_session__cpu_bitmap(struct perf_session *session, const char *cpu_list, unsigned long *cpu_bitmap) { - int i; + int i, err = -1; struct cpu_map *map; for (i = 0; i < PERF_TYPE_MAX; ++i) { @@ -1602,13 +1602,17 @@ int perf_session__cpu_bitmap(struct perf_session *session, if (cpu >= MAX_NR_CPUS) { pr_err("Requested CPU %d too large. " "Consider raising MAX_NR_CPUS\n", cpu); - return -1; + goto out_delete_map; } set_bit(cpu, cpu_bitmap); } - return 0; + err = 0; + +out_delete_map: + cpu_map__delete(map); + return err; } void perf_session__fprintf_info(struct perf_session *session, FILE *fp, diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c index 759456728703..3e9f336740fa 100644 --- a/tools/perf/util/symbol-elf.c +++ b/tools/perf/util/symbol-elf.c @@ -751,6 +751,8 @@ int dso__load_sym(struct dso *dso, struct map *map, if (strcmp(elf_name, kmap->ref_reloc_sym->name)) continue; kmap->ref_reloc_sym->unrelocated_addr = sym.st_value; + map->reloc = kmap->ref_reloc_sym->addr - + kmap->ref_reloc_sym->unrelocated_addr; break; } } @@ -922,6 +924,7 @@ int dso__load_sym(struct dso *dso, struct map *map, (u64)shdr.sh_offset); sym.st_value -= shdr.sh_addr - shdr.sh_offset; } +new_symbol: /* * We need to figure out if the object was created from C++ sources * DWARF DW_compile_unit has this, but we don't always have access @@ -933,7 +936,6 @@ int dso__load_sym(struct dso *dso, struct map *map, if (demangled != NULL) elf_name = demangled; } -new_symbol: f = symbol__new(sym.st_value, sym.st_size, GELF_ST_BIND(sym.st_info), elf_name); free(demangled); diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 39ce9adbaaf0..a9d758a3b371 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -627,7 +627,7 @@ static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map, * kernel range is broken in several maps, named [kernel].N, as we don't have * the original ELF section names vmlinux have. */ -static int dso__split_kallsyms(struct dso *dso, struct map *map, +static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta, symbol_filter_t filter) { struct map_groups *kmaps = map__kmap(map)->kmaps; @@ -692,6 +692,12 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, char dso_name[PATH_MAX]; struct dso *ndso; + if (delta) { + /* Kernel was relocated at boot time */ + pos->start -= delta; + pos->end -= delta; + } + if (count == 0) { curr_map = map; goto filter_symbol; @@ -721,6 +727,10 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, curr_map->map_ip = curr_map->unmap_ip = identity__map_ip; map_groups__insert(kmaps, curr_map); ++kernel_range; + } else if (delta) { + /* Kernel was relocated at boot time */ + pos->start -= delta; + pos->end -= delta; } filter_symbol: if (filter && filter(curr_map, pos)) { @@ -976,6 +986,23 @@ static int validate_kcore_modules(const char *kallsyms_filename, return 0; } +static int validate_kcore_addresses(const char *kallsyms_filename, + struct map *map) +{ + struct kmap *kmap = map__kmap(map); + + if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) { + u64 start; + + start = kallsyms__get_function_start(kallsyms_filename, + kmap->ref_reloc_sym->name); + if (start != kmap->ref_reloc_sym->addr) + return -EINVAL; + } + + return validate_kcore_modules(kallsyms_filename, map); +} + struct kcore_mapfn_data { struct dso *dso; enum map_type type; @@ -1019,8 +1046,8 @@ static int dso__load_kcore(struct dso *dso, struct map *map, kallsyms_filename)) return -EINVAL; - /* All modules must be present at their original addresses */ - if (validate_kcore_modules(kallsyms_filename, map)) + /* Modules and kernel must be present at their original addresses */ + if (validate_kcore_addresses(kallsyms_filename, map)) return -EINVAL; md.dso = dso; @@ -1113,15 +1140,41 @@ out_err: return -EINVAL; } +/* + * If the kernel is relocated at boot time, kallsyms won't match. Compute the + * delta based on the relocation reference symbol. + */ +static int kallsyms__delta(struct map *map, const char *filename, u64 *delta) +{ + struct kmap *kmap = map__kmap(map); + u64 addr; + + if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name) + return 0; + + addr = kallsyms__get_function_start(filename, + kmap->ref_reloc_sym->name); + if (!addr) + return -1; + + *delta = addr - kmap->ref_reloc_sym->addr; + return 0; +} + int dso__load_kallsyms(struct dso *dso, const char *filename, struct map *map, symbol_filter_t filter) { + u64 delta = 0; + if (symbol__restricted_filename(filename, "/proc/kallsyms")) return -1; if (dso__load_all_kallsyms(dso, filename, map) < 0) return -1; + if (kallsyms__delta(map, filename, &delta)) + return -1; + symbols__fixup_duplicate(&dso->symbols[map->type]); symbols__fixup_end(&dso->symbols[map->type]); @@ -1133,7 +1186,7 @@ int dso__load_kallsyms(struct dso *dso, const char *filename, if (!dso__load_kcore(dso, map, filename)) return dso__split_kallsyms_for_kcore(dso, map, filter); else - return dso__split_kallsyms(dso, map, filter); + return dso__split_kallsyms(dso, map, delta, filter); } static int dso__load_perf_map(struct dso *dso, struct map *map, @@ -1424,7 +1477,7 @@ static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz) continue; scnprintf(kallsyms_filename, sizeof(kallsyms_filename), "%s/%s/kallsyms", dir, dent->d_name); - if (!validate_kcore_modules(kallsyms_filename, map)) { + if (!validate_kcore_addresses(kallsyms_filename, map)) { strlcpy(dir, kallsyms_filename, dir_sz); ret = 0; break; @@ -1479,7 +1532,7 @@ static char *dso__find_kallsyms(struct dso *dso, struct map *map) if (fd != -1) { close(fd); /* If module maps match go with /proc/kallsyms */ - if (!validate_kcore_modules("/proc/kallsyms", map)) + if (!validate_kcore_addresses("/proc/kallsyms", map)) goto proc_kallsyms; } |