summaryrefslogtreecommitdiff
path: root/arch/sh/include/asm/uaccess.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-05-02 00:41:04 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-02 00:41:04 +0300
commit5db6db0d400edd8bec274e34960cfa22838e1df5 (patch)
tree3d7934f2eb27a2b72b87eae3c2918cf2e635d814 /arch/sh/include/asm/uaccess.h
parent5fab10041b4389b61de7e7a49893190bae686241 (diff)
parent2fefc97b2180518bac923fba3f79fdca1f41dc15 (diff)
downloadlinux-5db6db0d400edd8bec274e34960cfa22838e1df5.tar.xz
Merge branch 'work.uaccess' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull uaccess unification updates from Al Viro: "This is the uaccess unification pile. It's _not_ the end of uaccess work, but the next batch of that will go into the next cycle. This one mostly takes copy_from_user() and friends out of arch/* and gets the zero-padding behaviour in sync for all architectures. Dealing with the nocache/writethrough mess is for the next cycle; fortunately, that's x86-only. Same for cleanups in iov_iter.c (I am sold on access_ok() in there, BTW; just not in this pile), same for reducing __copy_... callsites, strn*... stuff, etc. - there will be a pile about as large as this one in the next merge window. This one sat in -next for weeks. -3KLoC" * 'work.uaccess' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (96 commits) HAVE_ARCH_HARDENED_USERCOPY is unconditional now CONFIG_ARCH_HAS_RAW_COPY_USER is unconditional now m32r: switch to RAW_COPY_USER hexagon: switch to RAW_COPY_USER microblaze: switch to RAW_COPY_USER get rid of padding, switch to RAW_COPY_USER ia64: get rid of copy_in_user() ia64: sanitize __access_ok() ia64: get rid of 'segment' argument of __do_{get,put}_user() ia64: get rid of 'segment' argument of __{get,put}_user_check() ia64: add extable.h powerpc: get rid of zeroing, switch to RAW_COPY_USER esas2r: don't open-code memdup_user() alpha: fix stack smashing in old_adjtimex(2) don't open-code kernel_setsockopt() mips: switch to RAW_COPY_USER mips: get rid of tail-zeroing in primitives mips: make copy_from_user() zero tail explicitly mips: clean and reorder the forest of macros... mips: consolidate __invoke_... wrappers ...
Diffstat (limited to 'arch/sh/include/asm/uaccess.h')
-rw-r--r--arch/sh/include/asm/uaccess.h64
1 files changed, 5 insertions, 59 deletions
diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h
index c4f0fee812c3..2722b61b2283 100644
--- a/arch/sh/include/asm/uaccess.h
+++ b/arch/sh/include/asm/uaccess.h
@@ -1,12 +1,8 @@
#ifndef __ASM_SH_UACCESS_H
#define __ASM_SH_UACCESS_H
-#include <linux/errno.h>
-#include <linux/sched.h>
#include <asm/segment.h>
-
-#define VERIFY_READ 0
-#define VERIFY_WRITE 1
+#include <asm/extable.h>
#define __addr_ok(addr) \
((unsigned long __force)(addr) < current_thread_info()->addr_limit.seg)
@@ -112,19 +108,18 @@ extern __must_check long strnlen_user(const char __user *str, long n);
__kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
static __always_inline unsigned long
-__copy_from_user(void *to, const void __user *from, unsigned long n)
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
return __copy_user(to, (__force void *)from, n);
}
static __always_inline unsigned long __must_check
-__copy_to_user(void __user *to, const void *from, unsigned long n)
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
return __copy_user((__force void *)to, from, n);
}
-
-#define __copy_to_user_inatomic __copy_to_user
-#define __copy_from_user_inatomic __copy_from_user
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
/*
* Clear the area and return remaining number of bytes
@@ -144,55 +139,6 @@ __kernel_size_t __clear_user(void *addr, __kernel_size_t size);
__cl_size; \
})
-static inline unsigned long
-copy_from_user(void *to, const void __user *from, unsigned long n)
-{
- unsigned long __copy_from = (unsigned long) from;
- __kernel_size_t __copy_size = (__kernel_size_t) n;
-
- if (__copy_size && __access_ok(__copy_from, __copy_size))
- __copy_size = __copy_user(to, from, __copy_size);
-
- if (unlikely(__copy_size))
- memset(to + (n - __copy_size), 0, __copy_size);
-
- return __copy_size;
-}
-
-static inline unsigned long
-copy_to_user(void __user *to, const void *from, unsigned long n)
-{
- unsigned long __copy_to = (unsigned long) to;
- __kernel_size_t __copy_size = (__kernel_size_t) n;
-
- if (__copy_size && __access_ok(__copy_to, __copy_size))
- return __copy_user(to, from, __copy_size);
-
- return __copy_size;
-}
-
-/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue. No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path. This means when everything is well,
- * we don't even have to jump over them. Further, they do not intrude
- * on our cache or tlb entries.
- */
-struct exception_table_entry {
- unsigned long insn, fixup;
-};
-
-#if defined(CONFIG_SUPERH64) && defined(CONFIG_MMU)
-#define ARCH_HAS_SEARCH_EXTABLE
-#endif
-
-int fixup_exception(struct pt_regs *regs);
-
extern void *set_exception_table_vec(unsigned int vec, void *handler);
static inline void *set_exception_table_evt(unsigned int evt, void *handler)