summaryrefslogtreecommitdiff
path: root/include/linux/perf_event.h
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2020-10-09 12:09:27 +0300
committerPeter Zijlstra <peterz@infradead.org>2020-10-29 13:00:39 +0300
commit51b646b2d9f84d6ff6300e3c1d09f2be4329a424 (patch)
treea8c9b6bded7c1358f87cbc0f582eb6c653791818 /include/linux/perf_event.h
parent995f088efebe1eba0282a6ffa12411b37f8990c2 (diff)
downloadlinux-51b646b2d9f84d6ff6300e3c1d09f2be4329a424.tar.xz
perf,mm: Handle non-page-table-aligned hugetlbfs
A limited nunmber of architectures support hugetlbfs sizes that do not align with the page-tables (ARM64, Power, Sparc64). Add support for this to the generic perf_get_page_size() implementation, and also allow an architecture to override this implementation. This latter is only needed when it uses non-page-table aligned huge pages in its kernel map. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Diffstat (limited to 'include/linux/perf_event.h')
-rw-r--r--include/linux/perf_event.h4
1 files changed, 4 insertions, 0 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index e533b03af053..0defb526cd0c 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1590,4 +1590,8 @@ extern void __weak arch_perf_update_userpage(struct perf_event *event,
struct perf_event_mmap_page *userpg,
u64 now);
+#ifdef CONFIG_MMU
+extern __weak u64 arch_perf_get_page_size(struct mm_struct *mm, unsigned long addr);
+#endif
+
#endif /* _LINUX_PERF_EVENT_H */