summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/include/asm/task_size_32.h6
-rw-r--r--arch/powerpc/include/asm/uaccess.h76
2 files changed, 79 insertions, 3 deletions
diff --git a/arch/powerpc/include/asm/task_size_32.h b/arch/powerpc/include/asm/task_size_32.h
index 42a64bbd1964..725ddbf06217 100644
--- a/arch/powerpc/include/asm/task_size_32.h
+++ b/arch/powerpc/include/asm/task_size_32.h
@@ -13,7 +13,7 @@
#define MODULES_SIZE (CONFIG_MODULES_SIZE * SZ_1M)
#define MODULES_VADDR (MODULES_END - MODULES_SIZE)
#define MODULES_BASE (MODULES_VADDR & ~(UL(SZ_4M) - 1))
-#define USER_TOP MODULES_BASE
+#define USER_TOP (MODULES_BASE - SZ_4M)
#endif
#ifdef CONFIG_PPC_BOOK3S_32
@@ -21,11 +21,11 @@
#define MODULES_SIZE (CONFIG_MODULES_SIZE * SZ_1M)
#define MODULES_VADDR (MODULES_END - MODULES_SIZE)
#define MODULES_BASE (MODULES_VADDR & ~(UL(SZ_256M) - 1))
-#define USER_TOP MODULES_BASE
+#define USER_TOP (MODULES_BASE - SZ_4M)
#endif
#ifndef USER_TOP
-#define USER_TOP ASM_CONST(CONFIG_PAGE_OFFSET)
+#define USER_TOP ((ASM_CONST(CONFIG_PAGE_OFFSET) - SZ_128K) & ~(UL(SZ_128K) - 1))
#endif
#if CONFIG_TASK_SIZE < USER_TOP
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 721d65dbbb2e..ba1d878c3f40 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -2,6 +2,8 @@
#ifndef _ARCH_POWERPC_UACCESS_H
#define _ARCH_POWERPC_UACCESS_H
+#include <linux/sizes.h>
+
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/extable.h>
@@ -435,6 +437,80 @@ static __must_check __always_inline bool __user_access_begin(const void __user *
#define user_access_save prevent_user_access_return
#define user_access_restore restore_user_access
+/*
+ * Masking the user address is an alternative to a conditional
+ * user_access_begin that can avoid the fencing. This only works
+ * for dense accesses starting at the address.
+ */
+static inline void __user *mask_user_address_simple(const void __user *ptr)
+{
+ unsigned long addr = (unsigned long)ptr;
+ unsigned long mask = (unsigned long)(((long)addr >> (BITS_PER_LONG - 1)) & LONG_MAX);
+
+ return (void __user *)(addr & ~mask);
+}
+
+static inline void __user *mask_user_address_isel(const void __user *ptr)
+{
+ unsigned long addr;
+
+ asm("cmplw %1, %2; iselgt %0, %2, %1" : "=r"(addr) : "r"(ptr), "r"(TASK_SIZE) : "cr0");
+
+ return (void __user *)addr;
+}
+
+/* TASK_SIZE is a multiple of 128K for shifting by 17 to the right */
+static inline void __user *mask_user_address_32(const void __user *ptr)
+{
+ unsigned long addr = (unsigned long)ptr;
+ unsigned long mask = (unsigned long)((long)((TASK_SIZE >> 17) - 1 - (addr >> 17)) >> 31);
+
+ addr = (addr & ~mask) | (TASK_SIZE & mask);
+
+ return (void __user *)addr;
+}
+
+static inline void __user *mask_user_address_fallback(const void __user *ptr)
+{
+ unsigned long addr = (unsigned long)ptr;
+
+ return (void __user *)(likely(addr < TASK_SIZE) ? addr : TASK_SIZE);
+}
+
+static inline void __user *mask_user_address(const void __user *ptr)
+{
+#ifdef MODULES_VADDR
+ const unsigned long border = MODULES_VADDR;
+#else
+ const unsigned long border = PAGE_OFFSET;
+#endif
+
+ if (IS_ENABLED(CONFIG_PPC64))
+ return mask_user_address_simple(ptr);
+ if (IS_ENABLED(CONFIG_E500))
+ return mask_user_address_isel(ptr);
+ if (TASK_SIZE <= UL(SZ_2G) && border >= UL(SZ_2G))
+ return mask_user_address_simple(ptr);
+ if (IS_ENABLED(CONFIG_PPC_BARRIER_NOSPEC))
+ return mask_user_address_32(ptr);
+ return mask_user_address_fallback(ptr);
+}
+
+static __always_inline void __user *__masked_user_access_begin(const void __user *p,
+ unsigned long dir)
+{
+ void __user *ptr = mask_user_address(p);
+
+ might_fault();
+ allow_user_access(ptr, dir);
+
+ return ptr;
+}
+
+#define masked_user_access_begin(p) __masked_user_access_begin(p, KUAP_READ_WRITE)
+#define masked_user_read_access_begin(p) __masked_user_access_begin(p, KUAP_READ)
+#define masked_user_write_access_begin(p) __masked_user_access_begin(p, KUAP_WRITE)
+
#define arch_unsafe_get_user(x, p, e) do { \
__long_type(*(p)) __gu_val; \
__typeof__(*(p)) __user *__gu_addr = (p); \