summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAnshuman Khandual <anshuman.khandual@arm.com>2022-07-11 10:05:41 +0300
committerakpm <akpm@linux-foundation.org>2022-07-18 03:14:38 +0300
commit09095f74130dfb2110ef2bcdd9ad0d42addaa1d5 (patch)
treef436e59f784c014300f84f14cf59776b6b46db14
parent4867fbbdd6b362400d154417e08ce76b14200ba1 (diff)
downloadlinux-09095f74130dfb2110ef2bcdd9ad0d42addaa1d5.tar.xz
mm/mmap: build protect protection_map[] with ARCH_HAS_VM_GET_PAGE_PROT
Now that protection_map[] has been moved inside those platforms that enable ARCH_HAS_VM_GET_PAGE_PROT. Hence generic protection_map[] array now can be protected with CONFIG_ARCH_HAS_VM_GET_PAGE_PROT intead of __P000. Link: https://lkml.kernel.org/r/20220711070600.2378316-8-anshuman.khandual@arm.com Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com> Reviewed-by: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Brian Cain <bcain@quicinc.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Christoph Hellwig <hch@infradead.org> Cc: Christoph Hellwig <hch@lst.de> Cc: Chris Zankel <chris@zankel.net> Cc: "David S. Miller" <davem@davemloft.net> Cc: Dinh Nguyen <dinguyen@kernel.org> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Guo Ren <guoren@kernel.org> Cc: Heiko Carstens <hca@linux.ibm.com> Cc: Huacai Chen <chenhuacai@kernel.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> Cc: Jeff Dike <jdike@addtoit.com> Cc: Jonas Bonn <jonas@southpole.se> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Nicholas Piggin <npiggin@gmail.com> Cc: Palmer Dabbelt <palmer@dabbelt.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Richard Henderson <rth@twiddle.net> Cc: Rich Felker <dalias@libc.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Sam Ravnborg <sam@ravnborg.org> Cc: Stafford Horne <shorne@gmail.com> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vasily Gorbik <gor@linux.ibm.com> Cc: Vineet Gupta <vgupta@kernel.org> Cc: WANG Xuerui <kernel@xen0n.name> Cc: Will Deacon <will@kernel.org> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--include/linux/mm.h2
-rw-r--r--mm/mmap.c5
2 files changed, 2 insertions, 5 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 1a435ce146a2..4b4dc93f9bc3 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -425,7 +425,7 @@ extern unsigned int kobjsize(const void *objp);
* mapping from the currently active vm_flags protection bits (the
* low four bits) to a page protection mask..
*/
-#ifdef __P000
+#ifndef CONFIG_ARCH_HAS_VM_GET_PAGE_PROT
extern pgprot_t protection_map[16];
#endif
diff --git a/mm/mmap.c b/mm/mmap.c
index 3c0d65743bc4..2a58a9cd0752 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -81,7 +81,7 @@ static void unmap_region(struct mm_struct *mm,
struct vm_area_struct *vma, struct vm_area_struct *prev,
unsigned long start, unsigned long end);
-#ifdef __P000
+#ifndef CONFIG_ARCH_HAS_VM_GET_PAGE_PROT
pgprot_t protection_map[16] __ro_after_init = {
[VM_NONE] = __P000,
[VM_READ] = __P001,
@@ -100,9 +100,6 @@ pgprot_t protection_map[16] __ro_after_init = {
[VM_SHARED | VM_EXEC | VM_WRITE] = __S110,
[VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __S111
};
-#endif
-
-#ifndef CONFIG_ARCH_HAS_VM_GET_PAGE_PROT
DECLARE_VM_GET_PAGE_PROT
#endif /* CONFIG_ARCH_HAS_VM_GET_PAGE_PROT */