summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHeiko Carstens <hca@linux.ibm.com>2025-02-11 22:19:26 +0300
committerVasily Gorbik <gor@linux.ibm.com>2025-03-04 19:18:03 +0300
commitc488f5187a24969b40fd08c3e9ead377c0cfb9b3 (patch)
treef8cde8b32b1cc9267f8bd683e22050e7900ef4fe
parentfb5bbcdcc3eabd8e9061eac7c3223e3de640adfa (diff)
downloadlinux-c488f5187a24969b40fd08c3e9ead377c0cfb9b3.tar.xz
s390/uaccess: Shorten raw_copy_from_user() / raw_copy_to_user() inline assemblies
Add specific exception handler for copy_to_user() / copy_from_user() mvcos fault handling, which allows to shorten the inline assemblies to three instructions. On fault the exception handler adjusts the length used by the mvcos instruction in a way that the instruction completes with condition code zero, indicating the number of bytes copied with the input/output operand 'size'. This allows to calculate and return the number of bytes not copied, if any, like required. Loop and return value handling is changed to C so that the compiler may optimize the code. Signed-off-by: Heiko Carstens <hca@linux.ibm.com> Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
-rw-r--r--arch/s390/include/asm/asm-extable.h8
-rw-r--r--arch/s390/include/asm/uaccess.h114
-rw-r--r--arch/s390/mm/extable.c47
3 files changed, 105 insertions, 64 deletions
diff --git a/arch/s390/include/asm/asm-extable.h b/arch/s390/include/asm/asm-extable.h
index 2e829c16fd8a..d23ea0c94e4e 100644
--- a/arch/s390/include/asm/asm-extable.h
+++ b/arch/s390/include/asm/asm-extable.h
@@ -14,6 +14,8 @@
#define EX_TYPE_UA_LOAD_REGPAIR 6
#define EX_TYPE_ZEROPAD 7
#define EX_TYPE_FPC 8
+#define EX_TYPE_UA_MVCOS_TO 9
+#define EX_TYPE_UA_MVCOS_FROM 10
#define EX_DATA_REG_ERR_SHIFT 0
#define EX_DATA_REG_ERR GENMASK(3, 0)
@@ -84,4 +86,10 @@
#define EX_TABLE_FPC(_fault, _target) \
__EX_TABLE(__ex_table, _fault, _target, EX_TYPE_FPC, __stringify(%%r0), __stringify(%%r0), 0)
+#define EX_TABLE_UA_MVCOS_TO(_fault, _target) \
+ __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_UA_MVCOS_TO, __stringify(%%r0), __stringify(%%r0), 0)
+
+#define EX_TABLE_UA_MVCOS_FROM(_fault, _target) \
+ __EX_TABLE(__ex_table, _fault, _target, EX_TYPE_UA_MVCOS_FROM, __stringify(%%r0), __stringify(%%r0), 0)
+
#endif /* __ASM_EXTABLE_H */
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index f5920163ee97..f9ace938743c 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -44,43 +44,42 @@ union oac {
};
};
-static __always_inline __must_check unsigned long
+#ifdef CONFIG_KMSAN
+#define uaccess_kmsan_or_inline noinline __maybe_unused __no_sanitize_memory
+#else
+#define uaccess_kmsan_or_inline __always_inline
+#endif
+
+static uaccess_kmsan_or_inline __must_check unsigned long
raw_copy_from_user_key(void *to, const void __user *from, unsigned long size, unsigned long key)
{
- unsigned long rem;
+ unsigned long osize;
union oac spec = {
.oac2.key = key,
.oac2.as = PSW_BITS_AS_SECONDARY,
.oac2.k = 1,
.oac2.a = 1,
};
-
- asm_inline volatile(
- " lr %%r0,%[spec]\n"
- "0: mvcos 0(%[to]),0(%[from]),%[size]\n"
- "1: jz 5f\n"
- " algr %[size],%[val]\n"
- " slgr %[from],%[val]\n"
- " slgr %[to],%[val]\n"
- " j 0b\n"
- "2: la %[rem],4095(%[from])\n" /* rem = from + 4095 */
- " nr %[rem],%[val]\n" /* rem = (from + 4095) & -4096 */
- " slgr %[rem],%[from]\n"
- " clgr %[size],%[rem]\n" /* copy crosses next page boundary? */
- " jnh 6f\n"
- "3: mvcos 0(%[to]),0(%[from]),%[rem]\n"
- "4: slgr %[size],%[rem]\n"
- " j 6f\n"
- "5: lghi %[size],0\n"
- "6:\n"
- EX_TABLE(0b, 2b)
- EX_TABLE(1b, 2b)
- EX_TABLE(3b, 6b)
- EX_TABLE(4b, 6b)
- : [size] "+&a" (size), [from] "+&a" (from), [to] "+&a" (to), [rem] "=&a" (rem)
- : [val] "a" (-4096UL), [spec] "d" (spec.val)
- : "cc", "memory", "0");
- return size;
+ int cc;
+
+ while (1) {
+ osize = size;
+ asm_inline volatile(
+ " lr %%r0,%[spec]\n"
+ "0: mvcos %[to],%[from],%[size]\n"
+ "1: nopr %%r7\n"
+ CC_IPM(cc)
+ EX_TABLE_UA_MVCOS_FROM(0b, 0b)
+ EX_TABLE_UA_MVCOS_FROM(1b, 0b)
+ : CC_OUT(cc, cc), [size] "+d" (size), [to] "=Q" (*(char *)to)
+ : [spec] "d" (spec.val), [from] "Q" (*(const char __user *)from)
+ : CC_CLOBBER_LIST("memory", "0"));
+ if (likely(CC_TRANSFORM(cc) == 0))
+ return osize - size;
+ size -= 4096;
+ to += 4096;
+ from += 4096;
+ }
}
static __always_inline __must_check unsigned long
@@ -89,43 +88,36 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long n)
return raw_copy_from_user_key(to, from, n, 0);
}
-static __always_inline __must_check unsigned long
+static uaccess_kmsan_or_inline __must_check unsigned long
raw_copy_to_user_key(void __user *to, const void *from, unsigned long size, unsigned long key)
{
- unsigned long rem;
+ unsigned long osize;
union oac spec = {
.oac1.key = key,
.oac1.as = PSW_BITS_AS_SECONDARY,
.oac1.k = 1,
.oac1.a = 1,
};
-
- asm_inline volatile(
- " lr %%r0,%[spec]\n"
- "0: mvcos 0(%[to]),0(%[from]),%[size]\n"
- "1: jz 5f\n"
- " algr %[size],%[val]\n"
- " slgr %[to],%[val]\n"
- " slgr %[from],%[val]\n"
- " j 0b\n"
- "2: la %[rem],4095(%[to])\n" /* rem = to + 4095 */
- " nr %[rem],%[val]\n" /* rem = (to + 4095) & -4096 */
- " slgr %[rem],%[to]\n"
- " clgr %[size],%[rem]\n" /* copy crosses next page boundary? */
- " jnh 6f\n"
- "3: mvcos 0(%[to]),0(%[from]),%[rem]\n"
- "4: slgr %[size],%[rem]\n"
- " j 6f\n"
- "5: lghi %[size],0\n"
- "6:\n"
- EX_TABLE(0b, 2b)
- EX_TABLE(1b, 2b)
- EX_TABLE(3b, 6b)
- EX_TABLE(4b, 6b)
- : [size] "+&a" (size), [to] "+&a" (to), [from] "+&a" (from), [rem] "=&a" (rem)
- : [val] "a" (-4096UL), [spec] "d" (spec.val)
- : "cc", "memory", "0");
- return size;
+ int cc;
+
+ while (1) {
+ osize = size;
+ asm_inline volatile(
+ " lr %%r0,%[spec]\n"
+ "0: mvcos %[to],%[from],%[size]\n"
+ "1: nopr %%r7\n"
+ CC_IPM(cc)
+ EX_TABLE_UA_MVCOS_TO(0b, 0b)
+ EX_TABLE_UA_MVCOS_TO(1b, 0b)
+ : CC_OUT(cc, cc), [size] "+d" (size), [to] "=Q" (*(char __user *)to)
+ : [spec] "d" (spec.val), [from] "Q" (*(const char *)from)
+ : CC_CLOBBER_LIST("memory", "0"));
+ if (likely(CC_TRANSFORM(cc) == 0))
+ return osize - size;
+ size -= 4096;
+ to += 4096;
+ from += 4096;
+ }
}
static __always_inline __must_check unsigned long
@@ -158,12 +150,6 @@ copy_to_user_key(void __user *to, const void *from, unsigned long n, unsigned lo
int __noreturn __put_user_bad(void);
-#ifdef CONFIG_KMSAN
-#define uaccess_kmsan_or_inline noinline __maybe_unused __no_sanitize_memory
-#else
-#define uaccess_kmsan_or_inline __always_inline
-#endif
-
#ifdef CONFIG_CC_HAS_ASM_GOTO_OUTPUT
#define DEFINE_PUT_USER_NOINSTR(type) \
diff --git a/arch/s390/mm/extable.c b/arch/s390/mm/extable.c
index a046be1715cf..7498e858c401 100644
--- a/arch/s390/mm/extable.c
+++ b/arch/s390/mm/extable.c
@@ -73,6 +73,49 @@ static bool ex_handler_fpc(const struct exception_table_entry *ex, struct pt_reg
return true;
}
+struct insn_ssf {
+ u64 opc1 : 8;
+ u64 r3 : 4;
+ u64 opc2 : 4;
+ u64 b1 : 4;
+ u64 d1 : 12;
+ u64 b2 : 4;
+ u64 d2 : 12;
+} __packed;
+
+static bool ex_handler_ua_mvcos(const struct exception_table_entry *ex,
+ bool from, struct pt_regs *regs)
+{
+ unsigned long uaddr, remainder;
+ struct insn_ssf *insn;
+
+ /*
+ * If the faulting user space access crossed a page boundary retry by
+ * limiting the access to the first page (adjust length accordingly).
+ * Then the mvcos instruction will either complete with condition code
+ * zero, or generate another fault where the user space access did not
+ * cross a page boundary.
+ * If the faulting user space access did not cross a page boundary set
+ * length to zero and retry. In this case no user space access will
+ * happen, and the mvcos instruction will complete with condition code
+ * zero.
+ * In both cases the instruction will complete with condition code
+ * zero (copying finished), and the register which contains the
+ * length, indicates the number of bytes copied.
+ */
+ regs->psw.addr = extable_fixup(ex);
+ insn = (struct insn_ssf *)regs->psw.addr;
+ if (from)
+ uaddr = regs->gprs[insn->b2] + insn->d2;
+ else
+ uaddr = regs->gprs[insn->b1] + insn->d1;
+ remainder = PAGE_SIZE - (uaddr & (PAGE_SIZE - 1));
+ if (regs->gprs[insn->r3] <= remainder)
+ remainder = 0;
+ regs->gprs[insn->r3] = remainder;
+ return true;
+}
+
bool fixup_exception(struct pt_regs *regs)
{
const struct exception_table_entry *ex;
@@ -95,6 +138,10 @@ bool fixup_exception(struct pt_regs *regs)
return ex_handler_zeropad(ex, regs);
case EX_TYPE_FPC:
return ex_handler_fpc(ex, regs);
+ case EX_TYPE_UA_MVCOS_TO:
+ return ex_handler_ua_mvcos(ex, false, regs);
+ case EX_TYPE_UA_MVCOS_FROM:
+ return ex_handler_ua_mvcos(ex, true, regs);
}
panic("invalid exception table entry");
}