summaryrefslogtreecommitdiff
path: root/arch/microblaze/include/asm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-05-02 00:41:04 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-02 00:41:04 +0300
commit5db6db0d400edd8bec274e34960cfa22838e1df5 (patch)
tree3d7934f2eb27a2b72b87eae3c2918cf2e635d814 /arch/microblaze/include/asm
parent5fab10041b4389b61de7e7a49893190bae686241 (diff)
parent2fefc97b2180518bac923fba3f79fdca1f41dc15 (diff)
downloadlinux-5db6db0d400edd8bec274e34960cfa22838e1df5.tar.xz
Merge branch 'work.uaccess' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull uaccess unification updates from Al Viro: "This is the uaccess unification pile. It's _not_ the end of uaccess work, but the next batch of that will go into the next cycle. This one mostly takes copy_from_user() and friends out of arch/* and gets the zero-padding behaviour in sync for all architectures. Dealing with the nocache/writethrough mess is for the next cycle; fortunately, that's x86-only. Same for cleanups in iov_iter.c (I am sold on access_ok() in there, BTW; just not in this pile), same for reducing __copy_... callsites, strn*... stuff, etc. - there will be a pile about as large as this one in the next merge window. This one sat in -next for weeks. -3KLoC" * 'work.uaccess' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (96 commits) HAVE_ARCH_HARDENED_USERCOPY is unconditional now CONFIG_ARCH_HAS_RAW_COPY_USER is unconditional now m32r: switch to RAW_COPY_USER hexagon: switch to RAW_COPY_USER microblaze: switch to RAW_COPY_USER get rid of padding, switch to RAW_COPY_USER ia64: get rid of copy_in_user() ia64: sanitize __access_ok() ia64: get rid of 'segment' argument of __do_{get,put}_user() ia64: get rid of 'segment' argument of __{get,put}_user_check() ia64: add extable.h powerpc: get rid of zeroing, switch to RAW_COPY_USER esas2r: don't open-code memdup_user() alpha: fix stack smashing in old_adjtimex(2) don't open-code kernel_setsockopt() mips: switch to RAW_COPY_USER mips: get rid of tail-zeroing in primitives mips: make copy_from_user() zero tail explicitly mips: clean and reorder the forest of macros... mips: consolidate __invoke_... wrappers ...
Diffstat (limited to 'arch/microblaze/include/asm')
-rw-r--r--arch/microblaze/include/asm/Kbuild1
-rw-r--r--arch/microblaze/include/asm/uaccess.h64
2 files changed, 10 insertions, 55 deletions
diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild
index 1732ec13b211..56830ff65333 100644
--- a/arch/microblaze/include/asm/Kbuild
+++ b/arch/microblaze/include/asm/Kbuild
@@ -3,6 +3,7 @@ generic-y += barrier.h
generic-y += clkdev.h
generic-y += device.h
generic-y += exec.h
+generic-y += extable.h
generic-y += irq_work.h
generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
index 253a67e275ad..38f2c9ccef10 100644
--- a/arch/microblaze/include/asm/uaccess.h
+++ b/arch/microblaze/include/asm/uaccess.h
@@ -11,22 +11,15 @@
#ifndef _ASM_MICROBLAZE_UACCESS_H
#define _ASM_MICROBLAZE_UACCESS_H
-#ifdef __KERNEL__
-#ifndef __ASSEMBLY__
-
#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/sched.h> /* RLIMIT_FSIZE */
#include <linux/mm.h>
#include <asm/mmu.h>
#include <asm/page.h>
#include <asm/pgtable.h>
+#include <asm/extable.h>
#include <linux/string.h>
-#define VERIFY_READ 0
-#define VERIFY_WRITE 1
-
/*
* On Microblaze the fs value is actually the top of the corresponding
* address space.
@@ -55,22 +48,6 @@
# define segment_eq(a, b) ((a).seg == (b).seg)
-/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue. No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path. This means when everything is well,
- * we don't even have to jump over them. Further, they do not intrude
- * on our cache or tlb entries.
- */
-struct exception_table_entry {
- unsigned long insn, fixup;
-};
-
#ifndef CONFIG_MMU
/* Check against bounds of physical memory */
@@ -359,39 +336,19 @@ extern long __user_bad(void);
__gu_err; \
})
-
-/* copy_to_from_user */
-#define __copy_from_user(to, from, n) \
- __copy_tofrom_user((__force void __user *)(to), \
- (void __user *)(from), (n))
-#define __copy_from_user_inatomic(to, from, n) \
- __copy_from_user((to), (from), (n))
-
-static inline long copy_from_user(void *to,
- const void __user *from, unsigned long n)
+static inline unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
{
- unsigned long res = n;
- might_fault();
- if (likely(access_ok(VERIFY_READ, from, n)))
- res = __copy_from_user(to, from, n);
- if (unlikely(res))
- memset(to + (n - res), 0, res);
- return res;
+ return __copy_tofrom_user((__force void __user *)to, from, n);
}
-#define __copy_to_user(to, from, n) \
- __copy_tofrom_user((void __user *)(to), \
- (__force const void __user *)(from), (n))
-#define __copy_to_user_inatomic(to, from, n) __copy_to_user((to), (from), (n))
-
-static inline long copy_to_user(void __user *to,
- const void *from, unsigned long n)
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
- might_fault();
- if (access_ok(VERIFY_WRITE, to, n))
- return __copy_to_user(to, from, n);
- return n;
+ return __copy_tofrom_user(to, (__force const void __user *)from, n);
}
+#define INLINE_COPY_FROM_USER
+#define INLINE_COPY_TO_USER
/*
* Copy a null terminated string from userspace.
@@ -422,7 +379,4 @@ static inline long strnlen_user(const char __user *src, long n)
return __strnlen_user(src, n);
}
-#endif /* __ASSEMBLY__ */
-#endif /* __KERNEL__ */
-
#endif /* _ASM_MICROBLAZE_UACCESS_H */