From 9641c7cc5a7f6d5c9dc9b43eea4e5f8c3c08c94e Mon Sep 17 00:00:00 2001 From: Russell King Date: Wed, 21 Jun 2006 20:38:17 +0100 Subject: [ARM] nommu: uaccess tweaks MMUless systems have only one address space for all threads, so both the usual access_ok() checks, and the exception handling do not make much sense. Hence, discard the fixup and exception tables at link time, use memcpy/memset for the user copy/clearing functions, and define the permission check macros to be constants. Some of this patch was derived from the equivalent patch by Hyok S. Choi. Signed-off-by: Hyok S. Choi Signed-off-by: Russell King --- arch/arm/kernel/armksyms.c | 7 +++++-- arch/arm/kernel/vmlinux.lds.S | 8 ++++++++ arch/arm/lib/Makefile | 13 ++++++++----- 3 files changed, 21 insertions(+), 7 deletions(-) (limited to 'arch/arm') diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c index f8bb7abd3e9b..da69e660574b 100644 --- a/arch/arm/kernel/armksyms.c +++ b/arch/arm/kernel/armksyms.c @@ -109,11 +109,13 @@ EXPORT_SYMBOL(memchr); EXPORT_SYMBOL(__memzero); /* user mem (segment) */ +EXPORT_SYMBOL(__strnlen_user); +EXPORT_SYMBOL(__strncpy_from_user); + +#ifdef CONFIG_MMU EXPORT_SYMBOL(__copy_from_user); EXPORT_SYMBOL(__copy_to_user); EXPORT_SYMBOL(__clear_user); -EXPORT_SYMBOL(__strnlen_user); -EXPORT_SYMBOL(__strncpy_from_user); EXPORT_SYMBOL(__get_user_1); EXPORT_SYMBOL(__get_user_2); @@ -123,6 +125,7 @@ EXPORT_SYMBOL(__put_user_1); EXPORT_SYMBOL(__put_user_2); EXPORT_SYMBOL(__put_user_4); EXPORT_SYMBOL(__put_user_8); +#endif /* crypto hash */ EXPORT_SYMBOL(sha_transform); diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 2b254e88595c..2df9688a7028 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -80,6 +80,10 @@ SECTIONS *(.exit.text) *(.exit.data) *(.exitcall.exit) +#ifndef CONFIG_MMU + *(.fixup) + *(__ex_table) +#endif } .text : { /* Real text segment */ @@ -87,7 +91,9 @@ SECTIONS *(.text) SCHED_TEXT LOCK_TEXT +#ifdef CONFIG_MMU *(.fixup) +#endif *(.gnu.warning) *(.rodata) *(.rodata.*) @@ -142,7 +148,9 @@ SECTIONS */ . = ALIGN(32); __start___ex_table = .; +#ifdef CONFIG_MMU *(__ex_table) +#endif __stop___ex_table = .; /* diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile index 7b726b627ea5..30351cd4560d 100644 --- a/arch/arm/lib/Makefile +++ b/arch/arm/lib/Makefile @@ -6,28 +6,31 @@ lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \ csumpartialcopy.o csumpartialcopyuser.o clearbit.o \ - copy_page.o delay.o findbit.o memchr.o memcpy.o \ + delay.o findbit.o memchr.o memcpy.o \ memmove.o memset.o memzero.o setbit.o \ strncpy_from_user.o strnlen_user.o \ strchr.o strrchr.o \ testchangebit.o testclearbit.o testsetbit.o \ - getuser.o putuser.o clear_user.o \ ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \ ucmpdi2.o lib1funcs.o div64.o sha1.o \ io-readsb.o io-writesb.o io-readsl.o io-writesl.o +mmu-y := clear_user.o copy_page.o getuser.o putuser.o + # the code in uaccess.S is not preemption safe and # probably faster on ARMv3 only ifeq ($(CONFIG_PREEMPT),y) - lib-y += copy_from_user.o copy_to_user.o + mmu-y += copy_from_user.o copy_to_user.o else ifneq ($(CONFIG_CPU_32v3),y) - lib-y += copy_from_user.o copy_to_user.o + mmu-y += copy_from_user.o copy_to_user.o else - lib-y += uaccess.o + mmu-y += uaccess.o endif endif +lib-$(CONFIG_MMU) += $(mmu-y) + ifeq ($(CONFIG_CPU_32v3),y) lib-y += io-readsw-armv3.o io-writesw-armv3.o else -- cgit v1.2.3