diff options
author | Wang Nan <wangnan0@huawei.com> | 2016-07-14 11:34:38 +0300 |
---|---|---|
committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2016-07-15 23:27:47 +0300 |
commit | a1f72618346a4e7ce9cff6aec1a62737d5d08763 (patch) | |
tree | 62d32ea095693439a56178a80bf8a1b9e7b39a73 /tools/perf/util/evlist.c | |
parent | 4876075b3205af992bf1012f6d6fbc03593d55b9 (diff) | |
download | linux-a1f72618346a4e7ce9cff6aec1a62737d5d08763.tar.xz |
perf evlist: Extract common code in mmap failure processing
In perf_evlist__mmap_per_cpu() and perf_evlist__mmap_per_thread(), in
case of mmap failure, successfully created maps should be cleared.
Current code uses two loops calling __perf_evlist__munmap() for each
function.
This patch extracts common code to perf_evlist__munmap_nofree() and use
previous introduced decoupled API perf_mmap__munmap(). Now
__perf_evlist__munmap() can be removed because of no user.
Signed-off-by: Wang Nan <wangnan0@huawei.com>
Acked-by: Jiri Olsa <jolsa@kernel.org>
Cc: He Kuang <hekuang@huawei.com>
Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Nilay Vaish <nilayvaish@gmail.com>
Cc: Zefan Li <lizefan@huawei.com>
Cc: pi3orama@163.com
Link: http://lkml.kernel.org/r/1468485287-33422-7-git-send-email-wangnan0@huawei.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/util/evlist.c')
-rw-r--r-- | tools/perf/util/evlist.c | 20 |
1 files changed, 8 insertions, 12 deletions
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index 1462085a8618..54ae0a0bc22c 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -28,7 +28,6 @@ #include <linux/err.h> static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx); -static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx); static void perf_mmap__munmap(struct perf_mmap *map); static void perf_mmap__put(struct perf_mmap *map); @@ -970,12 +969,7 @@ static void perf_mmap__munmap(struct perf_mmap *map) auxtrace_mmap__munmap(&map->auxtrace_mmap); } -static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx) -{ - perf_mmap__munmap(&evlist->mmap[idx]); -} - -void perf_evlist__munmap(struct perf_evlist *evlist) +static void perf_evlist__munmap_nofree(struct perf_evlist *evlist) { int i; @@ -983,8 +977,12 @@ void perf_evlist__munmap(struct perf_evlist *evlist) return; for (i = 0; i < evlist->nr_mmaps; i++) - __perf_evlist__munmap(evlist, i); + perf_mmap__munmap(&evlist->mmap[i]); +} +void perf_evlist__munmap(struct perf_evlist *evlist) +{ + perf_evlist__munmap_nofree(evlist); zfree(&evlist->mmap); } @@ -1142,8 +1140,7 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, return 0; out_unmap: - for (cpu = 0; cpu < nr_cpus; cpu++) - __perf_evlist__munmap(evlist, cpu); + perf_evlist__munmap_nofree(evlist); return -1; } @@ -1168,8 +1165,7 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, return 0; out_unmap: - for (thread = 0; thread < nr_threads; thread++) - __perf_evlist__munmap(evlist, thread); + perf_evlist__munmap_nofree(evlist); return -1; } |