summaryrefslogtreecommitdiff
path: root/tools/perf/builtin-kmem.c
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@redhat.com>2009-12-14 00:50:29 +0300
committerIngo Molnar <mingo@elte.hu>2009-12-14 18:57:17 +0300
commit4aa65636411ccb12f006a6ad593930655c445ff6 (patch)
tree0f494705a2a7631070a5372bb53f873684b001c2 /tools/perf/builtin-kmem.c
parentb3165f414416a717f72a376720564012af5a2e01 (diff)
downloadlinux-4aa65636411ccb12f006a6ad593930655c445ff6.tar.xz
perf session: Move kmaps to perf_session
There is still some more work to do to disentangle map creation from DSO loading, but this happens only for the kernel, and for the early adopters of perf diff, where this disentanglement matters most, we'll be testing different kernels, so no problem here. Further clarification: right now we create the kernel maps for the various modules and discontiguous kernel text maps when loading the DSO, we should do it as a two step process, first creating the maps, for multiple mappings with the same DSO store, then doing the dso load just once, for the first hit on one of the maps sharing this DSO backing store. Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Frédéric Weisbecker <fweisbec@gmail.com> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> LKML-Reference: <1260741029-4430-6-git-send-email-acme@infradead.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'tools/perf/builtin-kmem.c')
-rw-r--r--tools/perf/builtin-kmem.c41
1 files changed, 19 insertions, 22 deletions
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index de194958fe6e..e79ecbc17181 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -364,19 +364,6 @@ static struct perf_event_ops event_ops = {
.sample_type_check = sample_type_check,
};
-static int read_events(void)
-{
- int err;
- struct perf_session *session = perf_session__new(input_name, O_RDONLY, 0);
-
- if (session == NULL)
- return -ENOMEM;
-
- err = perf_session__process_events(session, &event_ops);
- perf_session__delete(session);
- return err;
-}
-
static double fragmentation(unsigned long n_req, unsigned long n_alloc)
{
if (n_alloc == 0)
@@ -385,7 +372,8 @@ static double fragmentation(unsigned long n_req, unsigned long n_alloc)
return 100.0 - (100.0 * n_req / n_alloc);
}
-static void __print_result(struct rb_root *root, int n_lines, int is_caller)
+static void __print_result(struct rb_root *root, struct perf_session *session,
+ int n_lines, int is_caller)
{
struct rb_node *next;
@@ -406,7 +394,7 @@ static void __print_result(struct rb_root *root, int n_lines, int is_caller)
if (is_caller) {
addr = data->call_site;
if (!raw_ip)
- sym = map_groups__find_function(kmaps, addr, NULL);
+ sym = map_groups__find_function(&session->kmaps, session, addr, NULL);
} else
addr = data->ptr;
@@ -447,12 +435,12 @@ static void print_summary(void)
printf("Cross CPU allocations: %lu/%lu\n", nr_cross_allocs, nr_allocs);
}
-static void print_result(void)
+static void print_result(struct perf_session *session)
{
if (caller_flag)
- __print_result(&root_caller_sorted, caller_lines, 1);
+ __print_result(&root_caller_sorted, session, caller_lines, 1);
if (alloc_flag)
- __print_result(&root_alloc_sorted, alloc_lines, 0);
+ __print_result(&root_alloc_sorted, session, alloc_lines, 0);
print_summary();
}
@@ -520,12 +508,21 @@ static void sort_result(void)
static int __cmd_kmem(void)
{
+ int err;
+ struct perf_session *session = perf_session__new(input_name, O_RDONLY,
+ 0, NULL);
+ if (session == NULL)
+ return -ENOMEM;
+
setup_pager();
- read_events();
+ err = perf_session__process_events(session, &event_ops);
+ if (err != 0)
+ goto out_delete;
sort_result();
- print_result();
-
- return 0;
+ print_result(session);
+out_delete:
+ perf_session__delete(session);
+ return err;
}
static const char * const kmem_usage[] = {