From 42d4dc3f4e1ec1396371aac89d0dccfdd977191b Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Fri, 29 Apr 2005 07:40:12 -0700 Subject: [PATCH] Add suspend method to cpufreq core In order to properly fix some issues with cpufreq vs. sleep on PowerBooks, I had to add a suspend callback to the pmac_cpufreq driver. I must force a switch to full speed before sleep and I switch back to previous speed on resume. I also added a driver flag to disable the warnings in suspend/resume since it is expected in this case to have different speed (and I want it to fixup the jiffies properly). Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/cpufreq.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 910eca35583d..f21af067d015 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -103,6 +103,7 @@ struct cpufreq_policy { #define CPUFREQ_PRECHANGE (0) #define CPUFREQ_POSTCHANGE (1) #define CPUFREQ_RESUMECHANGE (8) +#define CPUFREQ_SUSPENDCHANGE (9) struct cpufreq_freqs { unsigned int cpu; /* cpu nr */ @@ -200,6 +201,7 @@ struct cpufreq_driver { /* optional */ int (*exit) (struct cpufreq_policy *policy); + int (*suspend) (struct cpufreq_policy *policy, u32 state); int (*resume) (struct cpufreq_policy *policy); struct freq_attr **attr; }; @@ -211,7 +213,8 @@ struct cpufreq_driver { #define CPUFREQ_CONST_LOOPS 0x02 /* loops_per_jiffy or other kernel * "constants" aren't affected by * frequency transitions */ - +#define CPUFREQ_PM_NO_WARN 0x04 /* don't warn on suspend/resume speed + * mismatches */ int cpufreq_register_driver(struct cpufreq_driver *driver_data); int cpufreq_unregister_driver(struct cpufreq_driver *driver_data); -- cgit v1.2.3 From 8443b165f13d21214e5d5495eee7c3bf7f2456bf Mon Sep 17 00:00:00 2001 From: Lennert Buytenhek Date: Fri, 29 Apr 2005 21:58:15 +0100 Subject: [PATCH] ARM: 2657/1: export ixp2000_pci_config_addr Patch from Lennert Buytenhek Export ixp2000_pci_config_addr, to be used by the IXDP2800 platform setup code to coordinate booting the master and slave NPU. Signed-off-by: Lennert Buytenhek Signed-off-by: Deepak Saxena Signed-off-by: Russell King --- arch/arm/mach-ixp2000/pci.c | 2 +- include/asm-arm/arch-ixp2000/platform.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) (limited to 'include') diff --git a/arch/arm/mach-ixp2000/pci.c b/arch/arm/mach-ixp2000/pci.c index 831f8ffb6b61..36c9a94298cc 100644 --- a/arch/arm/mach-ixp2000/pci.c +++ b/arch/arm/mach-ixp2000/pci.c @@ -37,7 +37,7 @@ static int pci_master_aborts = 0; static int clear_master_aborts(void); -static u32 * +u32 * ixp2000_pci_config_addr(unsigned int bus_nr, unsigned int devfn, int where) { u32 *paddress; diff --git a/include/asm-arm/arch-ixp2000/platform.h b/include/asm-arm/arch-ixp2000/platform.h index 509e44d528d8..901bba6d02b4 100644 --- a/include/asm-arm/arch-ixp2000/platform.h +++ b/include/asm-arm/arch-ixp2000/platform.h @@ -121,6 +121,7 @@ unsigned long ixp2000_gettimeoffset(void); struct pci_sys_data; +u32 *ixp2000_pci_config_addr(unsigned int bus, unsigned int devfn, int where); void ixp2000_pci_preinit(void); int ixp2000_pci_setup(int, struct pci_sys_data*); struct pci_bus* ixp2000_pci_scan_bus(int, struct pci_sys_data*); -- cgit v1.2.3 From 2d2669b62984b8d76b05a6a045390a3250317d21 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Fri, 29 Apr 2005 22:08:33 +0100 Subject: [PATCH] ARM: 2651/3: kernel helpers for NPTL support Patch from Nicolas Pitre This patch entirely reworks the kernel assistance for NPTL on ARM. In particular this provides an efficient way to retrieve the TLS value and perform atomic operations without any instruction emulation nor special system call. This even allows for pre ARMv6 binaries to be forward compatible with SMP systems without any penalty. The problematic and performance critical operations are performed through segment of kernel provided user code reachable from user space at a fixed address in kernel memory. Those fixed entry points are within the vector page so we basically get it for free as no extra memory page is required and nothing else may be mapped at that location anyway. This is different from (but doesn't preclude) a full blown VDSO implementation, however a VDSO would prevent some assembly tricks with constants that allows for efficient branching to those code segments. And since those code segments only use a few cycles before returning to user code, the overhead of a VDSO far call would add a significant overhead to such minimalistic operations. The ARM_NR_set_tls syscall also changed number. This is done for two reasons: 1) this patch changes the way the TLS value was previously meant to be retrieved, therefore we ensure whatever library using the old way gets fixed (they only exist in private tree at the moment since the NPTL work is still progressing). 2) the previous number was allocated in a range causing an undefined instruction trap on kernels not supporting that syscall and it was determined that allocating it in a range returning -ENOSYS would be much nicer for libraries trying to determine if the feature is present or not. Signed-off-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/kernel/entry-armv.S | 213 ++++++++++++++++++++++++++++++++++++++++++- arch/arm/kernel/traps.c | 58 ++++++++++-- arch/arm/mm/Kconfig | 14 +++ include/asm-arm/unistd.h | 3 +- 4 files changed, 277 insertions(+), 11 deletions(-) (limited to 'include') diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 2a5c3fe09a95..080df907f242 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -269,6 +269,12 @@ __pabt_svc: add r5, sp, #S_PC ldmia r7, {r2 - r4} @ Get USR pc, cpsr +#if __LINUX_ARM_ARCH__ < 6 + @ make sure our user space atomic helper is aborted + cmp r2, #VIRT_OFFSET + bichs r3, r3, #PSR_Z_BIT +#endif + @ @ We are now ready to fill in the remaining blanks on the stack: @ @@ -499,8 +505,12 @@ ENTRY(__switch_to) mra r4, r5, acc0 stmia ip, {r4, r5} #endif +#ifdef CONFIG_HAS_TLS_REG + mcr p15, 0, r3, c13, c0, 3 @ set TLS register +#else mov r4, #0xffff0fff - str r3, [r4, #-3] @ Set TLS ptr + str r3, [r4, #-15] @ TLS val at 0xffff0ff0 +#endif mcr p15, 0, r6, c3, c0, 0 @ Set domain register #ifdef CONFIG_VFP @ Always disable VFP so we can lazily save/restore the old @@ -519,6 +529,207 @@ ENTRY(__switch_to) ldmib r2, {r4 - sl, fp, sp, pc} @ Load all regs saved previously __INIT + +/* + * User helpers. + * + * These are segment of kernel provided user code reachable from user space + * at a fixed address in kernel memory. This is used to provide user space + * with some operations which require kernel help because of unimplemented + * native feature and/or instructions in many ARM CPUs. The idea is for + * this code to be executed directly in user mode for best efficiency but + * which is too intimate with the kernel counter part to be left to user + * libraries. In fact this code might even differ from one CPU to another + * depending on the available instruction set and restrictions like on + * SMP systems. In other words, the kernel reserves the right to change + * this code as needed without warning. Only the entry points and their + * results are guaranteed to be stable. + * + * Each segment is 32-byte aligned and will be moved to the top of the high + * vector page. New segments (if ever needed) must be added in front of + * existing ones. This mechanism should be used only for things that are + * really small and justified, and not be abused freely. + * + * User space is expected to implement those things inline when optimizing + * for a processor that has the necessary native support, but only if such + * resulting binaries are already to be incompatible with earlier ARM + * processors due to the use of unsupported instructions other than what + * is provided here. In other words don't make binaries unable to run on + * earlier processors just for the sake of not using these kernel helpers + * if your compiled code is not going to use the new instructions for other + * purpose. + */ + + .align 5 + .globl __kuser_helper_start +__kuser_helper_start: + +/* + * Reference prototype: + * + * int __kernel_cmpxchg(int oldval, int newval, int *ptr) + * + * Input: + * + * r0 = oldval + * r1 = newval + * r2 = ptr + * lr = return address + * + * Output: + * + * r0 = returned value (zero or non-zero) + * C flag = set if r0 == 0, clear if r0 != 0 + * + * Clobbered: + * + * r3, ip, flags + * + * Definition and user space usage example: + * + * typedef int (__kernel_cmpxchg_t)(int oldval, int newval, int *ptr); + * #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0) + * + * Atomically store newval in *ptr if *ptr is equal to oldval for user space. + * Return zero if *ptr was changed or non-zero if no exchange happened. + * The C flag is also set if *ptr was changed to allow for assembly + * optimization in the calling code. + * + * For example, a user space atomic_add implementation could look like this: + * + * #define atomic_add(ptr, val) \ + * ({ register unsigned int *__ptr asm("r2") = (ptr); \ + * register unsigned int __result asm("r1"); \ + * asm volatile ( \ + * "1: @ atomic_add\n\t" \ + * "ldr r0, [r2]\n\t" \ + * "mov r3, #0xffff0fff\n\t" \ + * "add lr, pc, #4\n\t" \ + * "add r1, r0, %2\n\t" \ + * "add pc, r3, #(0xffff0fc0 - 0xffff0fff)\n\t" \ + * "bcc 1b" \ + * : "=&r" (__result) \ + * : "r" (__ptr), "rIL" (val) \ + * : "r0","r3","ip","lr","cc","memory" ); \ + * __result; }) + */ + +__kuser_cmpxchg: @ 0xffff0fc0 + +#if __LINUX_ARM_ARCH__ < 6 + +#ifdef CONFIG_SMP /* sanity check */ +#error "CONFIG_SMP on a machine supporting pre-ARMv6 processors?" +#endif + + /* + * Theory of operation: + * + * We set the Z flag before loading oldval. If ever an exception + * occurs we can not be sure the loaded value will still be the same + * when the exception returns, therefore the user exception handler + * will clear the Z flag whenever the interrupted user code was + * actually from the kernel address space (see the usr_entry macro). + * + * The post-increment on the str is used to prevent a race with an + * exception happening just after the str instruction which would + * clear the Z flag although the exchange was done. + */ + teq ip, ip @ set Z flag + ldr ip, [r2] @ load current val + add r3, r2, #1 @ prepare store ptr + teqeq ip, r0 @ compare with oldval if still allowed + streq r1, [r3, #-1]! @ store newval if still allowed + subs r0, r2, r3 @ if r2 == r3 the str occured + mov pc, lr + +#else + + ldrex r3, [r2] + subs r3, r3, r0 + strexeq r3, r1, [r2] + rsbs r0, r3, #0 + mov pc, lr + +#endif + + .align 5 + +/* + * Reference prototype: + * + * int __kernel_get_tls(void) + * + * Input: + * + * lr = return address + * + * Output: + * + * r0 = TLS value + * + * Clobbered: + * + * the Z flag might be lost + * + * Definition and user space usage example: + * + * typedef int (__kernel_get_tls_t)(void); + * #define __kernel_get_tls (*(__kernel_get_tls_t *)0xffff0fe0) + * + * Get the TLS value as previously set via the __ARM_NR_set_tls syscall. + * + * This could be used as follows: + * + * #define __kernel_get_tls() \ + * ({ register unsigned int __val asm("r0"); \ + * asm( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #31" \ + * : "=r" (__val) : : "lr","cc" ); \ + * __val; }) + */ + +__kuser_get_tls: @ 0xffff0fe0 + +#ifndef CONFIG_HAS_TLS_REG + +#ifdef CONFIG_SMP /* sanity check */ +#error "CONFIG_SMP without CONFIG_HAS_TLS_REG is wrong" +#endif + + ldr r0, [pc, #(16 - 8)] @ TLS stored at 0xffff0ff0 + mov pc, lr + +#else + + mrc p15, 0, r0, c13, c0, 3 @ read TLS register + mov pc, lr + +#endif + + .rep 5 + .word 0 @ pad up to __kuser_helper_version + .endr + +/* + * Reference declaration: + * + * extern unsigned int __kernel_helper_version; + * + * Definition and user space usage example: + * + * #define __kernel_helper_version (*(unsigned int *)0xffff0ffc) + * + * User space may read this to determine the curent number of helpers + * available. + */ + +__kuser_helper_version: @ 0xffff0ffc + .word ((__kuser_helper_end - __kuser_helper_start) >> 5) + + .globl __kuser_helper_end +__kuser_helper_end: + + /* * Vector stubs. * diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 0078aeb85737..3a001fe5540b 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -450,13 +450,17 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) case NR(set_tls): thread->tp_value = regs->ARM_r0; +#ifdef CONFIG_HAS_TLS_REG + asm ("mcr p15, 0, %0, c13, c0, 3" : : "r" (regs->ARM_r0) ); +#else /* - * Our user accessible TLS ptr is located at 0xffff0ffc. - * On SMP read access to this address must raise a fault - * and be emulated from the data abort handler. - * m + * User space must never try to access this directly. + * Expect your app to break eventually if you do so. + * The user helper at 0xffff0fe0 must be used instead. + * (see entry-armv.S for details) */ - *((unsigned long *)0xffff0ffc) = thread->tp_value; + *((unsigned int *)0xffff0ff0) = regs->ARM_r0; +#endif return 0; default: @@ -493,6 +497,41 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs) return 0; } +#if defined(CONFIG_CPU_32v6) && !defined(CONFIG_HAS_TLS_REG) + +/* + * We might be running on an ARMv6+ processor which should have the TLS + * register, but for some reason we can't use it and have to emulate it. + */ + +static int get_tp_trap(struct pt_regs *regs, unsigned int instr) +{ + int reg = (instr >> 12) & 15; + if (reg == 15) + return 1; + regs->uregs[reg] = current_thread_info()->tp_value; + regs->ARM_pc += 4; + return 0; +} + +static struct undef_hook arm_mrc_hook = { + .instr_mask = 0x0fff0fff, + .instr_val = 0x0e1d0f70, + .cpsr_mask = PSR_T_BIT, + .cpsr_val = 0, + .fn = get_tp_trap, +}; + +static int __init arm_mrc_hook_init(void) +{ + register_undef_hook(&arm_mrc_hook); + return 0; +} + +late_initcall(arm_mrc_hook_init); + +#endif + void __bad_xchg(volatile void *ptr, int size) { printk("xchg: bad data size: pc 0x%p, ptr 0x%p, size %d\n", @@ -580,14 +619,17 @@ void __init trap_init(void) { extern char __stubs_start[], __stubs_end[]; extern char __vectors_start[], __vectors_end[]; + extern char __kuser_helper_start[], __kuser_helper_end[]; + int kuser_sz = __kuser_helper_end - __kuser_helper_start; /* - * Copy the vectors and stubs (in entry-armv.S) into the - * vector page, mapped at 0xffff0000, and ensure these are - * visible to the instruction stream. + * Copy the vectors, stubs and kuser helpers (in entry-armv.S) + * into the vector page, mapped at 0xffff0000, and ensure these + * are visible to the instruction stream. */ memcpy((void *)0xffff0000, __vectors_start, __vectors_end - __vectors_start); memcpy((void *)0xffff0200, __stubs_start, __stubs_end - __stubs_start); + memcpy((void *)0xffff1000 - kuser_sz, __kuser_helper_start, kuser_sz); flush_icache_range(0xffff0000, 0xffff0000 + PAGE_SIZE); modify_domain(DOMAIN_USER, DOMAIN_CLIENT); } diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 5b670c9ac5ef..007766a0644c 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -409,3 +409,17 @@ config CPU_BPREDICT_DISABLE depends on CPU_ARM1020 help Say Y here to disable branch prediction. If unsure, say N. + +config HAS_TLS_REG + bool + depends on CPU_32v6 && !CPU_32v5 && !CPU_32v4 && !CPU_32v3 + help + This selects support for the CP15 thread register. + It is defined to be available on ARMv6 or later. However + if the kernel is configured to support multiple CPUs including + a pre-ARMv6 processors, or if a given ARMv6 processor doesn't + implement the thread register for some reason, then access to + this register from user space must be trapped and emulated. + If user space is relying on the __kuser_get_tls code then + there should not be any impact. + diff --git a/include/asm-arm/unistd.h b/include/asm-arm/unistd.h index a19ec09eaa01..ace27480886e 100644 --- a/include/asm-arm/unistd.h +++ b/include/asm-arm/unistd.h @@ -359,8 +359,7 @@ #define __ARM_NR_cacheflush (__ARM_NR_BASE+2) #define __ARM_NR_usr26 (__ARM_NR_BASE+3) #define __ARM_NR_usr32 (__ARM_NR_BASE+4) - -#define __ARM_NR_set_tls (__ARM_NR_BASE+0x800) +#define __ARM_NR_set_tls (__ARM_NR_BASE+5) #define __sys2(x) #x #define __sys1(x) __sys2(x) -- cgit v1.2.3 From 05f9869bf20e11bcb9b64b9ebd6a9cf89d6b71ba Mon Sep 17 00:00:00 2001 From: Olav Kongas Date: Fri, 29 Apr 2005 22:08:34 +0100 Subject: [PATCH] ARM: 2649/1: Fix 'sparse -Wbitwise' warnings from MMIO macros Patch from Olav Kongas On ARM, the outX() and writeX() families of macros take the result of cpu_to_leYY(), which is of restricted type __leYY, and feed it to __raw_writeX(), which expect an argument of unrestricted type. This results in 'sparse -Wbitwise' warnings about incorrect types in assignments. Analogous type mismatch warnings are issued for inX() and readX() counterparts. The below patch resolves these warnings by adding forced typecasts. Signed-off-by: Olav Kongas Signed-off-by: Russell King --- include/asm-arm/io.h | 28 ++++++++++++++++++---------- 1 file changed, 18 insertions(+), 10 deletions(-) (limited to 'include') diff --git a/include/asm-arm/io.h b/include/asm-arm/io.h index 69bc7a3e8160..658ffa384fda 100644 --- a/include/asm-arm/io.h +++ b/include/asm-arm/io.h @@ -99,12 +99,16 @@ extern void __readwrite_bug(const char *fn); */ #ifdef __io #define outb(v,p) __raw_writeb(v,__io(p)) -#define outw(v,p) __raw_writew(cpu_to_le16(v),__io(p)) -#define outl(v,p) __raw_writel(cpu_to_le32(v),__io(p)) +#define outw(v,p) __raw_writew((__force __u16) \ + cpu_to_le16(v),__io(p)) +#define outl(v,p) __raw_writel((__force __u32) \ + cpu_to_le32(v),__io(p)) -#define inb(p) ({ unsigned int __v = __raw_readb(__io(p)); __v; }) -#define inw(p) ({ unsigned int __v = le16_to_cpu(__raw_readw(__io(p))); __v; }) -#define inl(p) ({ unsigned int __v = le32_to_cpu(__raw_readl(__io(p))); __v; }) +#define inb(p) ({ __u8 __v = __raw_readb(__io(p)); __v; }) +#define inw(p) ({ __u16 __v = le16_to_cpu((__force __le16) \ + __raw_readw(__io(p))); __v; }) +#define inl(p) ({ __u32 __v = le32_to_cpu((__force __le32) \ + __raw_readl(__io(p))); __v; }) #define outsb(p,d,l) __raw_writesb(__io(p),d,l) #define outsw(p,d,l) __raw_writesw(__io(p),d,l) @@ -149,9 +153,11 @@ extern void _memset_io(void __iomem *, int, size_t); * IO port primitives for more information. */ #ifdef __mem_pci -#define readb(c) ({ unsigned int __v = __raw_readb(__mem_pci(c)); __v; }) -#define readw(c) ({ unsigned int __v = le16_to_cpu(__raw_readw(__mem_pci(c))); __v; }) -#define readl(c) ({ unsigned int __v = le32_to_cpu(__raw_readl(__mem_pci(c))); __v; }) +#define readb(c) ({ __u8 __v = __raw_readb(__mem_pci(c)); __v; }) +#define readw(c) ({ __u16 __v = le16_to_cpu((__force __le16) \ + __raw_readw(__mem_pci(c))); __v; }) +#define readl(c) ({ __u32 __v = le32_to_cpu((__force __le32) \ + __raw_readl(__mem_pci(c))); __v; }) #define readb_relaxed(addr) readb(addr) #define readw_relaxed(addr) readw(addr) #define readl_relaxed(addr) readl(addr) @@ -161,8 +167,10 @@ extern void _memset_io(void __iomem *, int, size_t); #define readsl(p,d,l) __raw_readsl(__mem_pci(p),d,l) #define writeb(v,c) __raw_writeb(v,__mem_pci(c)) -#define writew(v,c) __raw_writew(cpu_to_le16(v),__mem_pci(c)) -#define writel(v,c) __raw_writel(cpu_to_le32(v),__mem_pci(c)) +#define writew(v,c) __raw_writew((__force __u16) \ + cpu_to_le16(v),__mem_pci(c)) +#define writel(v,c) __raw_writel((__force __u32) \ + cpu_to_le32(v),__mem_pci(c)) #define writesb(p,d,l) __raw_writesb(__mem_pci(p),d,l) #define writesw(p,d,l) __raw_writesw(__mem_pci(p),d,l) -- cgit v1.2.3 From d5aa207e46ff7ee838683a7d95ecf46fe42a9a56 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 30 Apr 2005 12:19:28 +0100 Subject: [PATCH] ARM: RTC: allow driver methods to return error Allow RTC drivers to return error codes from their read_time or read_alarm methods. Signed-off-by: Russell King --- arch/arm/common/rtctime.c | 29 +++++++++++++++-------------- arch/arm/mach-integrator/time.c | 17 ++++++++++++----- drivers/char/s3c2410-rtc.c | 8 ++++++-- include/asm-arm/rtc.h | 4 ++-- 4 files changed, 35 insertions(+), 23 deletions(-) (limited to 'include') diff --git a/arch/arm/common/rtctime.c b/arch/arm/common/rtctime.c index c397e71f938d..72b03f201eb9 100644 --- a/arch/arm/common/rtctime.c +++ b/arch/arm/common/rtctime.c @@ -141,10 +141,10 @@ void rtc_next_alarm_time(struct rtc_time *next, struct rtc_time *now, struct rtc next->tm_sec = alrm->tm_sec; } -static inline void rtc_read_time(struct rtc_ops *ops, struct rtc_time *tm) +static inline int rtc_read_time(struct rtc_ops *ops, struct rtc_time *tm) { memset(tm, 0, sizeof(struct rtc_time)); - ops->read_time(tm); + return ops->read_time(tm); } static inline int rtc_set_time(struct rtc_ops *ops, struct rtc_time *tm) @@ -163,8 +163,7 @@ static inline int rtc_read_alarm(struct rtc_ops *ops, struct rtc_wkalrm *alrm) int ret = -EINVAL; if (ops->read_alarm) { memset(alrm, 0, sizeof(struct rtc_wkalrm)); - ops->read_alarm(alrm); - ret = 0; + ret = ops->read_alarm(alrm); } return ret; } @@ -283,7 +282,9 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd, break; case RTC_RD_TIME: - rtc_read_time(ops, &tm); + ret = rtc_read_time(ops, &tm); + if (ret) + break; ret = copy_to_user(uarg, &tm, sizeof(tm)); if (ret) ret = -EFAULT; @@ -424,15 +425,15 @@ static int rtc_read_proc(char *page, char **start, off_t off, int count, int *eo struct rtc_time tm; char *p = page; - rtc_read_time(ops, &tm); - - p += sprintf(p, - "rtc_time\t: %02d:%02d:%02d\n" - "rtc_date\t: %04d-%02d-%02d\n" - "rtc_epoch\t: %04lu\n", - tm.tm_hour, tm.tm_min, tm.tm_sec, - tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, - rtc_epoch); + if (rtc_read_time(ops, &tm) == 0) { + p += sprintf(p, + "rtc_time\t: %02d:%02d:%02d\n" + "rtc_date\t: %04d-%02d-%02d\n" + "rtc_epoch\t: %04lu\n", + tm.tm_hour, tm.tm_min, tm.tm_sec, + tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, + rtc_epoch); + } if (rtc_read_alarm(ops, &alrm) == 0) { p += sprintf(p, "alrm_time\t: "); diff --git a/arch/arm/mach-integrator/time.c b/arch/arm/mach-integrator/time.c index 20729de2af28..1a844ca139e0 100644 --- a/arch/arm/mach-integrator/time.c +++ b/arch/arm/mach-integrator/time.c @@ -40,25 +40,32 @@ static int integrator_set_rtc(void) return 1; } -static void rtc_read_alarm(struct rtc_wkalrm *alrm) +static int rtc_read_alarm(struct rtc_wkalrm *alrm) { rtc_time_to_tm(readl(rtc_base + RTC_MR), &alrm->time); + return 0; } -static int rtc_set_alarm(struct rtc_wkalrm *alrm) +static inline int rtc_set_alarm(struct rtc_wkalrm *alrm) { unsigned long time; int ret; - ret = rtc_tm_to_time(&alrm->time, &time); + /* + * At the moment, we can only deal with non-wildcarded alarm times. + */ + ret = rtc_valid_tm(&alrm->time); + if (ret == 0) + ret = rtc_tm_to_time(&alrm->time, &time); if (ret == 0) writel(time, rtc_base + RTC_MR); return ret; } -static void rtc_read_time(struct rtc_time *tm) +static int rtc_read_time(struct rtc_time *tm) { rtc_time_to_tm(readl(rtc_base + RTC_DR), tm); + return 0; } /* @@ -69,7 +76,7 @@ static void rtc_read_time(struct rtc_time *tm) * edge of the 1Hz clock, we must write the time one second * in advance. */ -static int rtc_set_time(struct rtc_time *tm) +static inline int rtc_set_time(struct rtc_time *tm) { unsigned long time; int ret; diff --git a/drivers/char/s3c2410-rtc.c b/drivers/char/s3c2410-rtc.c index 8e61be34a1d3..ed867db550a9 100644 --- a/drivers/char/s3c2410-rtc.c +++ b/drivers/char/s3c2410-rtc.c @@ -116,7 +116,7 @@ static void s3c2410_rtc_setfreq(int freq) /* Time read/write */ -static void s3c2410_rtc_gettime(struct rtc_time *rtc_tm) +static int s3c2410_rtc_gettime(struct rtc_time *rtc_tm) { unsigned int have_retried = 0; @@ -151,6 +151,8 @@ static void s3c2410_rtc_gettime(struct rtc_time *rtc_tm) rtc_tm->tm_year += 100; rtc_tm->tm_mon -= 1; + + return 0; } @@ -171,7 +173,7 @@ static int s3c2410_rtc_settime(struct rtc_time *tm) return 0; } -static void s3c2410_rtc_getalarm(struct rtc_wkalrm *alrm) +static int s3c2410_rtc_getalarm(struct rtc_wkalrm *alrm) { struct rtc_time *alm_tm = &alrm->time; unsigned int alm_en; @@ -231,6 +233,8 @@ static void s3c2410_rtc_getalarm(struct rtc_wkalrm *alrm) } /* todo - set alrm->enabled ? */ + + return 0; } static int s3c2410_rtc_setalarm(struct rtc_wkalrm *alrm) diff --git a/include/asm-arm/rtc.h b/include/asm-arm/rtc.h index aa7e16b2e225..370dfe77589d 100644 --- a/include/asm-arm/rtc.h +++ b/include/asm-arm/rtc.h @@ -18,9 +18,9 @@ struct rtc_ops { void (*release)(void); int (*ioctl)(unsigned int, unsigned long); - void (*read_time)(struct rtc_time *); + int (*read_time)(struct rtc_time *); int (*set_time)(struct rtc_time *); - void (*read_alarm)(struct rtc_wkalrm *); + int (*read_alarm)(struct rtc_wkalrm *); int (*set_alarm)(struct rtc_wkalrm *); int (*proc)(char *buf); }; -- cgit v1.2.3 From 4774e2260cf25c54f2188dd0407676e3af6f1f23 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sat, 30 Apr 2005 23:32:38 +0100 Subject: [PATCH] ARM: IntegratorCP: Fix CLCD MUX selection values The documentation on these values seems to be rather wrong. These values have been determined by mere trial and error. Signed-off-by: Russell King --- arch/arm/mach-integrator/integrator_cp.c | 17 ++++++++++++++++- include/asm-arm/arch-integrator/cm.h | 6 +++--- 2 files changed, 19 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/arch/arm/mach-integrator/integrator_cp.c b/arch/arm/mach-integrator/integrator_cp.c index 68e15c36e336..3b948e8c2751 100644 --- a/arch/arm/mach-integrator/integrator_cp.c +++ b/arch/arm/mach-integrator/integrator_cp.c @@ -420,7 +420,22 @@ static struct clcd_panel vga = { */ static void cp_clcd_enable(struct clcd_fb *fb) { - cm_control(CM_CTRL_LCDMUXSEL_MASK, CM_CTRL_LCDMUXSEL_VGA); + u32 val; + + if (fb->fb.var.bits_per_pixel <= 8) + val = CM_CTRL_LCDMUXSEL_VGA_8421BPP; + else if (fb->fb.var.bits_per_pixel <= 16) + val = CM_CTRL_LCDMUXSEL_VGA_16BPP; + else + val = 0; /* no idea for this, don't trust the docs */ + + cm_control(CM_CTRL_LCDMUXSEL_MASK| + CM_CTRL_LCDEN0| + CM_CTRL_LCDEN1| + CM_CTRL_STATIC1| + CM_CTRL_STATIC2| + CM_CTRL_STATIC| + CM_CTRL_n24BITEN, val); } static unsigned long framesize = SZ_1M; diff --git a/include/asm-arm/arch-integrator/cm.h b/include/asm-arm/arch-integrator/cm.h index d31c1a71f781..1ab353e23595 100644 --- a/include/asm-arm/arch-integrator/cm.h +++ b/include/asm-arm/arch-integrator/cm.h @@ -24,9 +24,9 @@ void cm_control(u32, u32); #define CM_CTRL_LCDBIASDN (1 << 10) #define CM_CTRL_LCDMUXSEL_MASK (7 << 11) #define CM_CTRL_LCDMUXSEL_GENLCD (1 << 11) -#define CM_CTRL_LCDMUXSEL_SHARPLCD1 (3 << 11) -#define CM_CTRL_LCDMUXSEL_SHARPLCD2 (4 << 11) -#define CM_CTRL_LCDMUXSEL_VGA (7 << 11) +#define CM_CTRL_LCDMUXSEL_VGA_16BPP (2 << 11) +#define CM_CTRL_LCDMUXSEL_SHARPLCD (3 << 11) +#define CM_CTRL_LCDMUXSEL_VGA_8421BPP (4 << 11) #define CM_CTRL_LCDEN0 (1 << 14) #define CM_CTRL_LCDEN1 (1 << 15) #define CM_CTRL_STATIC1 (1 << 16) -- cgit v1.2.3 From 119f657c72fc07d6fd28c61de59cfba1566970a9 Mon Sep 17 00:00:00 2001 From: "akpm@osdl.org" Date: Sun, 1 May 2005 08:58:35 -0700 Subject: [PATCH] RLIMIT_AS checking fix Address bug #4508: there's potential for wraparound in the various places where we perform RLIMIT_AS checking. (I'm a bit worried about acct_stack_growth(). Are we sure that vma->vm_mm is always equal to current->mm? If not, then we're comparing some other process's total_vm with the calling process's rlimits). Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 1 + mm/mmap.c | 24 +++++++++++++++++++----- mm/mremap.c | 6 +++--- 3 files changed, 23 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/linux/mm.h b/include/linux/mm.h index c74a74ca401d..8b007ad2d450 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -726,6 +726,7 @@ extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *, extern struct vm_area_struct *copy_vma(struct vm_area_struct **, unsigned long addr, unsigned long len, pgoff_t pgoff); extern void exit_mmap(struct mm_struct *); +extern int may_expand_vm(struct mm_struct *mm, unsigned long npages); extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); diff --git a/mm/mmap.c b/mm/mmap.c index 6ea204cc751e..1ec0f6e9c0d8 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1009,8 +1009,7 @@ munmap_back: } /* Check against address space limit. */ - if ((mm->total_vm << PAGE_SHIFT) + len - > current->signal->rlim[RLIMIT_AS].rlim_cur) + if (!may_expand_vm(mm, len >> PAGE_SHIFT)) return -ENOMEM; if (accountable && (!(flags & MAP_NORESERVE) || @@ -1421,7 +1420,7 @@ static int acct_stack_growth(struct vm_area_struct * vma, unsigned long size, un struct rlimit *rlim = current->signal->rlim; /* address space limit tests */ - if (mm->total_vm + grow > rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT) + if (!may_expand_vm(mm, grow)) return -ENOMEM; /* Stack limit test */ @@ -1848,8 +1847,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len) } /* Check against address space limits *after* clearing old maps... */ - if ((mm->total_vm << PAGE_SHIFT) + len - > current->signal->rlim[RLIMIT_AS].rlim_cur) + if (!may_expand_vm(mm, len >> PAGE_SHIFT)) return -ENOMEM; if (mm->map_count > sysctl_max_map_count) @@ -2019,3 +2017,19 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, } return new_vma; } + +/* + * Return true if the calling process may expand its vm space by the passed + * number of pages + */ +int may_expand_vm(struct mm_struct *mm, unsigned long npages) +{ + unsigned long cur = mm->total_vm; /* pages */ + unsigned long lim; + + lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT; + + if (cur + npages > lim) + return 0; + return 1; +} diff --git a/mm/mremap.c b/mm/mremap.c index 0d1c1b9c7a0a..0dd7ace94e51 100644 --- a/mm/mremap.c +++ b/mm/mremap.c @@ -347,10 +347,10 @@ unsigned long do_mremap(unsigned long addr, if (locked > lock_limit && !capable(CAP_IPC_LOCK)) goto out; } - ret = -ENOMEM; - if ((current->mm->total_vm << PAGE_SHIFT) + (new_len - old_len) - > current->signal->rlim[RLIMIT_AS].rlim_cur) + if (!may_expand_vm(current->mm, (new_len - old_len) >> PAGE_SHIFT)) { + ret = -ENOMEM; goto out; + } if (vma->vm_flags & VM_ACCOUNT) { charged = (new_len - old_len) >> PAGE_SHIFT; -- cgit v1.2.3 From b84a35be0285229b0a8a5e2e04d79360c5b75562 Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Sun, 1 May 2005 08:58:36 -0700 Subject: [PATCH] mempool: NOMEMALLOC and NORETRY Mempools have 2 problems. The first is that mempool_alloc can possibly get stuck in __alloc_pages when they should opt to fail, and take an element from their reserved pool. The second is that it will happily eat emergency PF_MEMALLOC reserves instead of going to their reserved pools. Fix the first by passing __GFP_NORETRY in the allocation calls in mempool_alloc. Fix the second by introducing a __GFP_MEMPOOL flag which directs the page allocator not to allocate from the reserve pool. Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/gfp.h | 6 ++++-- mm/mempool.c | 9 +++++++-- mm/page_alloc.c | 20 ++++++++++++-------- 3 files changed, 23 insertions(+), 12 deletions(-) (limited to 'include') diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 848a1baac079..af7407e8cfc5 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -38,14 +38,16 @@ struct vm_area_struct; #define __GFP_NO_GROW 0x2000u /* Slab internal usage */ #define __GFP_COMP 0x4000u /* Add compound page metadata */ #define __GFP_ZERO 0x8000u /* Return zeroed page on success */ +#define __GFP_NOMEMALLOC 0x10000u /* Don't use emergency reserves */ -#define __GFP_BITS_SHIFT 16 /* Room for 16 __GFP_FOO bits */ +#define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */ #define __GFP_BITS_MASK ((1 << __GFP_BITS_SHIFT) - 1) /* if you forget to add the bitmask here kernel will crash, period */ #define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \ __GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \ - __GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW|__GFP_COMP) + __GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW|__GFP_COMP| \ + __GFP_NOMEMALLOC) #define GFP_ATOMIC (__GFP_HIGH) #define GFP_NOIO (__GFP_WAIT) diff --git a/mm/mempool.c b/mm/mempool.c index b014ffeaa413..d691b5cb8022 100644 --- a/mm/mempool.c +++ b/mm/mempool.c @@ -198,11 +198,16 @@ void * mempool_alloc(mempool_t *pool, unsigned int __nocast gfp_mask) void *element; unsigned long flags; DEFINE_WAIT(wait); - int gfp_nowait = gfp_mask & ~(__GFP_WAIT | __GFP_IO); + int gfp_nowait; + + gfp_mask |= __GFP_NOMEMALLOC; /* don't allocate emergency reserves */ + gfp_mask |= __GFP_NORETRY; /* don't loop in __alloc_pages */ + gfp_mask |= __GFP_NOWARN; /* failures are OK */ + gfp_nowait = gfp_mask & ~(__GFP_WAIT | __GFP_IO); might_sleep_if(gfp_mask & __GFP_WAIT); repeat_alloc: - element = pool->alloc(gfp_nowait|__GFP_NOWARN, pool->pool_data); + element = pool->alloc(gfp_nowait, pool->pool_data); if (likely(element != NULL)) return element; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 08e8627361a0..04a35b3d3262 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -799,14 +799,18 @@ __alloc_pages(unsigned int __nocast gfp_mask, unsigned int order, } /* This allocation should allow future memory freeing. */ - if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE))) && !in_interrupt()) { - /* go through the zonelist yet again, ignoring mins */ - for (i = 0; (z = zones[i]) != NULL; i++) { - if (!cpuset_zone_allowed(z)) - continue; - page = buffered_rmqueue(z, order, gfp_mask); - if (page) - goto got_pg; + + if (((p->flags & PF_MEMALLOC) || unlikely(test_thread_flag(TIF_MEMDIE))) + && !in_interrupt()) { + if (!(gfp_mask & __GFP_NOMEMALLOC)) { + /* go through the zonelist yet again, ignoring mins */ + for (i = 0; (z = zones[i]) != NULL; i++) { + if (!cpuset_zone_allowed(z)) + continue; + page = buffered_rmqueue(z, order, gfp_mask); + if (page) + goto got_pg; + } } goto nopage; } -- cgit v1.2.3 From edfbe2b0038723e5699ab22695ccd62b5542a5c1 Mon Sep 17 00:00:00 2001 From: KAMEZAWA Hiroyuki Date: Sun, 1 May 2005 08:58:37 -0700 Subject: [PATCH] count bounce buffer pages in vmstat This is a patch for counting the number of pages for bounce buffers. It's shown in /proc/vmstat. Currently, the number of bounce pages are not counted anywhere. So, if there are many bounce pages, it seems that there are leaked pages. And it's difficult for a user to imagine the usage of bounce pages. So, it's meaningful to show # of bouce pages. Signed-off-by: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/page-flags.h | 1 + mm/highmem.c | 2 ++ mm/page_alloc.c | 1 + 3 files changed, 4 insertions(+) (limited to 'include') diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 6b74fcf5bb63..39ab8c6b5652 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -131,6 +131,7 @@ struct page_state { unsigned long allocstall; /* direct reclaim calls */ unsigned long pgrotated; /* pages rotated to tail of the LRU */ + unsigned long nr_bounce; /* pages for bounce buffers */ }; extern void get_page_state(struct page_state *ret); diff --git a/mm/highmem.c b/mm/highmem.c index d01276506b00..400911599468 100644 --- a/mm/highmem.c +++ b/mm/highmem.c @@ -325,6 +325,7 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool, int err) continue; mempool_free(bvec->bv_page, pool); + dec_page_state(nr_bounce); } bio_endio(bio_orig, bio_orig->bi_size, err); @@ -405,6 +406,7 @@ static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig, to->bv_page = mempool_alloc(pool, q->bounce_gfp); to->bv_len = from->bv_len; to->bv_offset = from->bv_offset; + inc_page_state(nr_bounce); if (rw == WRITE) { char *vto, *vfrom; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 04a35b3d3262..80ce7f2104df 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1897,6 +1897,7 @@ static char *vmstat_text[] = { "allocstall", "pgrotated", + "nr_bounce", }; static void *vmstat_start(struct seq_file *m, loff_t *pos) -- cgit v1.2.3 From 97e2bde47f886a317909c8a8f9bd2fcd8ce2f0b0 Mon Sep 17 00:00:00 2001 From: Manfred Spraul Date: Sun, 1 May 2005 08:58:38 -0700 Subject: [PATCH] add kmalloc_node, inline cleanup The patch makes the following function calls available to allocate memory on a specific node without changing the basic operation of the slab allocator: kmem_cache_alloc_node(kmem_cache_t *cachep, unsigned int flags, int node); kmalloc_node(size_t size, unsigned int flags, int node); in a similar way to the existing node-blind functions: kmem_cache_alloc(kmem_cache_t *cachep, unsigned int flags); kmalloc(size, flags); kmem_cache_alloc_node was changed to pass flags and the node information through the existing layers of the slab allocator (which lead to some minor rearrangements). The functions at the lowest layer (kmem_getpages, cache_grow) are already node aware. Also __alloc_percpu can call kmalloc_node now. Performance measurements (using the pageset localization patch) yields: w/o patches: Tasks jobs/min jti jobs/min/task real cpu 1 484.27 100 484.2736 12.02 1.97 Wed Mar 30 20:50:43 2005 100 25170.83 91 251.7083 23.12 150.10 Wed Mar 30 20:51:06 2005 200 34601.66 84 173.0083 33.64 294.14 Wed Mar 30 20:51:40 2005 300 37154.47 86 123.8482 46.99 436.56 Wed Mar 30 20:52:28 2005 400 39839.82 80 99.5995 58.43 580.46 Wed Mar 30 20:53:27 2005 500 40036.32 79 80.0726 72.68 728.60 Wed Mar 30 20:54:40 2005 600 44074.21 79 73.4570 79.23 872.10 Wed Mar 30 20:55:59 2005 700 44016.60 78 62.8809 92.56 1015.84 Wed Mar 30 20:57:32 2005 800 40411.05 80 50.5138 115.22 1161.13 Wed Mar 30 20:59:28 2005 900 42298.56 79 46.9984 123.83 1303.42 Wed Mar 30 21:01:33 2005 1000 40955.05 80 40.9551 142.11 1441.92 Wed Mar 30 21:03:55 2005 with pageset localization and slab API patches: Tasks jobs/min jti jobs/min/task real cpu 1 484.19 100 484.1930 12.02 1.98 Wed Mar 30 21:10:18 2005 100 27428.25 92 274.2825 21.22 149.79 Wed Mar 30 21:10:40 2005 200 37228.94 86 186.1447 31.27 293.49 Wed Mar 30 21:11:12 2005 300 41725.42 85 139.0847 41.84 434.10 Wed Mar 30 21:11:54 2005 400 43032.22 82 107.5805 54.10 582.06 Wed Mar 30 21:12:48 2005 500 42211.23 83 84.4225 68.94 722.61 Wed Mar 30 21:13:58 2005 600 40084.49 82 66.8075 87.12 873.11 Wed Mar 30 21:15:25 2005 700 44169.30 79 63.0990 92.24 1008.77 Wed Mar 30 21:16:58 2005 800 43097.94 79 53.8724 108.03 1155.88 Wed Mar 30 21:18:47 2005 900 41846.75 79 46.4964 125.17 1303.38 Wed Mar 30 21:20:52 2005 1000 40247.85 79 40.2478 144.60 1442.21 Wed Mar 30 21:23:17 2005 Signed-off-by: Christoph Lameter Signed-off-by: Manfred Spraul Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/slab.h | 23 +++++++++++++++-------- mm/slab.c | 45 +++++++++++++++++++++++++++++++-------------- 2 files changed, 46 insertions(+), 22 deletions(-) (limited to 'include') diff --git a/include/linux/slab.h b/include/linux/slab.h index 3e3c3ab8ff94..7d66385ae750 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -62,16 +62,9 @@ extern kmem_cache_t *kmem_cache_create(const char *, size_t, size_t, unsigned lo extern int kmem_cache_destroy(kmem_cache_t *); extern int kmem_cache_shrink(kmem_cache_t *); extern void *kmem_cache_alloc(kmem_cache_t *, unsigned int __nocast); -#ifdef CONFIG_NUMA -extern void *kmem_cache_alloc_node(kmem_cache_t *, int); -#else -static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, int node) -{ - return kmem_cache_alloc(cachep, GFP_KERNEL); -} -#endif extern void kmem_cache_free(kmem_cache_t *, void *); extern unsigned int kmem_cache_size(kmem_cache_t *); +extern kmem_cache_t *kmem_find_general_cachep(size_t size, int gfpflags); /* Size description struct for general caches. */ struct cache_sizes { @@ -109,6 +102,20 @@ extern void *kcalloc(size_t, size_t, unsigned int __nocast); extern void kfree(const void *); extern unsigned int ksize(const void *); +#ifdef CONFIG_NUMA +extern void *kmem_cache_alloc_node(kmem_cache_t *, int flags, int node); +extern void *kmalloc_node(size_t size, int flags, int node); +#else +static inline void *kmem_cache_alloc_node(kmem_cache_t *cachep, int flags, int node) +{ + return kmem_cache_alloc(cachep, flags); +} +static inline void *kmalloc_node(size_t size, int flags, int node) +{ + return kmalloc(size, flags); +} +#endif + extern int FASTCALL(kmem_cache_reap(int)); extern int FASTCALL(kmem_ptr_validate(kmem_cache_t *cachep, void *ptr)); diff --git a/mm/slab.c b/mm/slab.c index ec660d85ddd7..771cc09f9f1a 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -583,7 +583,7 @@ static inline struct array_cache *ac_data(kmem_cache_t *cachep) return cachep->array[smp_processor_id()]; } -static inline kmem_cache_t *kmem_find_general_cachep(size_t size, int gfpflags) +static inline kmem_cache_t *__find_general_cachep(size_t size, int gfpflags) { struct cache_sizes *csizep = malloc_sizes; @@ -607,6 +607,12 @@ static inline kmem_cache_t *kmem_find_general_cachep(size_t size, int gfpflags) return csizep->cs_cachep; } +kmem_cache_t *kmem_find_general_cachep(size_t size, int gfpflags) +{ + return __find_general_cachep(size, gfpflags); +} +EXPORT_SYMBOL(kmem_find_general_cachep); + /* Cal the num objs, wastage, and bytes left over for a given slab size. */ static void cache_estimate(unsigned long gfporder, size_t size, size_t align, int flags, size_t *left_over, unsigned int *num) @@ -672,14 +678,11 @@ static struct array_cache *alloc_arraycache(int cpu, int entries, int memsize = sizeof(void*)*entries+sizeof(struct array_cache); struct array_cache *nc = NULL; - if (cpu != -1) { - kmem_cache_t *cachep; - cachep = kmem_find_general_cachep(memsize, GFP_KERNEL); - if (cachep) - nc = kmem_cache_alloc_node(cachep, cpu_to_node(cpu)); - } - if (!nc) + if (cpu == -1) nc = kmalloc(memsize, GFP_KERNEL); + else + nc = kmalloc_node(memsize, GFP_KERNEL, cpu_to_node(cpu)); + if (nc) { nc->avail = 0; nc->limit = entries; @@ -2361,7 +2364,7 @@ out: * and can sleep. And it will allocate memory on the given node, which * can improve the performance for cpu bound structures. */ -void *kmem_cache_alloc_node(kmem_cache_t *cachep, int nodeid) +void *kmem_cache_alloc_node(kmem_cache_t *cachep, int flags, int nodeid) { int loop; void *objp; @@ -2393,7 +2396,7 @@ void *kmem_cache_alloc_node(kmem_cache_t *cachep, int nodeid) spin_unlock_irq(&cachep->spinlock); local_irq_disable(); - if (!cache_grow(cachep, GFP_KERNEL, nodeid)) { + if (!cache_grow(cachep, flags, nodeid)) { local_irq_enable(); return NULL; } @@ -2435,6 +2438,16 @@ got_slabp: } EXPORT_SYMBOL(kmem_cache_alloc_node); +void *kmalloc_node(size_t size, int flags, int node) +{ + kmem_cache_t *cachep; + + cachep = kmem_find_general_cachep(size, flags); + if (unlikely(cachep == NULL)) + return NULL; + return kmem_cache_alloc_node(cachep, flags, node); +} +EXPORT_SYMBOL(kmalloc_node); #endif /** @@ -2462,7 +2475,12 @@ void *__kmalloc(size_t size, unsigned int __nocast flags) { kmem_cache_t *cachep; - cachep = kmem_find_general_cachep(size, flags); + /* If you want to save a few bytes .text space: replace + * __ with kmem_. + * Then kmalloc uses the uninlined functions instead of the inline + * functions. + */ + cachep = __find_general_cachep(size, flags); if (unlikely(cachep == NULL)) return NULL; return __cache_alloc(cachep, flags); @@ -2489,9 +2507,8 @@ void *__alloc_percpu(size_t size, size_t align) for (i = 0; i < NR_CPUS; i++) { if (!cpu_possible(i)) continue; - pdata->ptrs[i] = kmem_cache_alloc_node( - kmem_find_general_cachep(size, GFP_KERNEL), - cpu_to_node(i)); + pdata->ptrs[i] = kmalloc_node(size, GFP_KERNEL, + cpu_to_node(i)); if (!pdata->ptrs[i]) goto unwind_oom; -- cgit v1.2.3 From 443a848cd30eb5bb5c1038e6371d83404775dcfc Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Sun, 1 May 2005 08:58:40 -0700 Subject: [PATCH] ppc32: refactor FPU exception handling Moved common FPU exception handling code out of head.S so it can be used by several of the sub-architectures that might of a full PowerPC FPU. Also, uses new CONFIG_PPC_FPU define to fix alignment exception handling for floating point load/store instructions to only occur if we have a hardware FPU. Signed-off-by: Jason McMullan Signed-off-by: Kumar Gala Signed-off-by: Paul Mackerras Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ppc/Kconfig | 4 + arch/ppc/Makefile | 1 + arch/ppc/kernel/Makefile | 1 + arch/ppc/kernel/align.c | 8 ++ arch/ppc/kernel/entry.S | 59 ++++++++++++++ arch/ppc/kernel/fpu.S | 133 ++++++++++++++++++++++++++++++++ arch/ppc/kernel/head.S | 163 --------------------------------------- arch/ppc/kernel/head_44x.S | 6 ++ arch/ppc/kernel/head_booke.h | 7 ++ arch/ppc/kernel/head_fsl_booke.S | 8 +- arch/ppc/kernel/misc.S | 12 +-- arch/ppc/kernel/traps.c | 2 +- include/asm-ppc/reg_booke.h | 1 + 13 files changed, 229 insertions(+), 176 deletions(-) create mode 100644 arch/ppc/kernel/fpu.S (limited to 'include') diff --git a/arch/ppc/Kconfig b/arch/ppc/Kconfig index 74aa1e92a395..c3d941345e3d 100644 --- a/arch/ppc/Kconfig +++ b/arch/ppc/Kconfig @@ -53,6 +53,7 @@ choice config 6xx bool "6xx/7xx/74xx/52xx/82xx/83xx" + select PPC_FPU help There are four types of PowerPC chips supported. The more common types (601, 603, 604, 740, 750, 7400), the Motorola embedded @@ -86,6 +87,9 @@ config E500 endchoice +config PPC_FPU + bool + config BOOKE bool depends on E500 diff --git a/arch/ppc/Makefile b/arch/ppc/Makefile index 73cbdda5b597..0432a25b4735 100644 --- a/arch/ppc/Makefile +++ b/arch/ppc/Makefile @@ -53,6 +53,7 @@ head-$(CONFIG_FSL_BOOKE) := arch/ppc/kernel/head_fsl_booke.o head-$(CONFIG_6xx) += arch/ppc/kernel/idle_6xx.o head-$(CONFIG_POWER4) += arch/ppc/kernel/idle_power4.o +head-$(CONFIG_PPC_FPU) += arch/ppc/kernel/fpu.o core-y += arch/ppc/kernel/ arch/ppc/platforms/ \ arch/ppc/mm/ arch/ppc/lib/ arch/ppc/syslib/ diff --git a/arch/ppc/kernel/Makefile b/arch/ppc/kernel/Makefile index 86bc878cb3ee..b284451802c9 100644 --- a/arch/ppc/kernel/Makefile +++ b/arch/ppc/kernel/Makefile @@ -9,6 +9,7 @@ extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o extra-$(CONFIG_8xx) := head_8xx.o extra-$(CONFIG_6xx) += idle_6xx.o extra-$(CONFIG_POWER4) += idle_power4.o +extra-$(CONFIG_PPC_FPU) += fpu.o extra-y += vmlinux.lds obj-y := entry.o traps.o irq.o idle.o time.o misc.o \ diff --git a/arch/ppc/kernel/align.c b/arch/ppc/kernel/align.c index 79c929475037..40d356c66c41 100644 --- a/arch/ppc/kernel/align.c +++ b/arch/ppc/kernel/align.c @@ -368,16 +368,24 @@ fix_alignment(struct pt_regs *regs) /* Single-precision FP load and store require conversions... */ case LD+F+S: +#ifdef CONFIG_PPC_FPU preempt_disable(); enable_kernel_fp(); cvt_fd(&data.f, &data.d, ¤t->thread.fpscr); preempt_enable(); +#else + return 0; +#endif break; case ST+F+S: +#ifdef CONFIG_PPC_FPU preempt_disable(); enable_kernel_fp(); cvt_df(&data.d, &data.f, ¤t->thread.fpscr); preempt_enable(); +#else + return 0; +#endif break; } diff --git a/arch/ppc/kernel/entry.S b/arch/ppc/kernel/entry.S index 035217d6c0f1..5f075dbc4ee7 100644 --- a/arch/ppc/kernel/entry.S +++ b/arch/ppc/kernel/entry.S @@ -563,6 +563,65 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) addi r1,r1,INT_FRAME_SIZE blr + .globl fast_exception_return +fast_exception_return: +#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) + andi. r10,r9,MSR_RI /* check for recoverable interrupt */ + beq 1f /* if not, we've got problems */ +#endif + +2: REST_4GPRS(3, r11) + lwz r10,_CCR(r11) + REST_GPR(1, r11) + mtcr r10 + lwz r10,_LINK(r11) + mtlr r10 + REST_GPR(10, r11) + mtspr SPRN_SRR1,r9 + mtspr SPRN_SRR0,r12 + REST_GPR(9, r11) + REST_GPR(12, r11) + lwz r11,GPR11(r11) + SYNC + RFI + +#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) +/* check if the exception happened in a restartable section */ +1: lis r3,exc_exit_restart_end@ha + addi r3,r3,exc_exit_restart_end@l + cmplw r12,r3 + bge 3f + lis r4,exc_exit_restart@ha + addi r4,r4,exc_exit_restart@l + cmplw r12,r4 + blt 3f + lis r3,fee_restarts@ha + tophys(r3,r3) + lwz r5,fee_restarts@l(r3) + addi r5,r5,1 + stw r5,fee_restarts@l(r3) + mr r12,r4 /* restart at exc_exit_restart */ + b 2b + + .comm fee_restarts,4 + +/* aargh, a nonrecoverable interrupt, panic */ +/* aargh, we don't know which trap this is */ +/* but the 601 doesn't implement the RI bit, so assume it's OK */ +3: +BEGIN_FTR_SECTION + b 2b +END_FTR_SECTION_IFSET(CPU_FTR_601) + li r10,-1 + stw r10,TRAP(r11) + addi r3,r1,STACK_FRAME_OVERHEAD + lis r10,MSR_KERNEL@h + ori r10,r10,MSR_KERNEL@l + bl transfer_to_handler_full + .long nonrecoverable_exception + .long ret_from_except +#endif + .globl sigreturn_exit sigreturn_exit: subi r1,r3,STACK_FRAME_OVERHEAD diff --git a/arch/ppc/kernel/fpu.S b/arch/ppc/kernel/fpu.S new file mode 100644 index 000000000000..6189b26f640f --- /dev/null +++ b/arch/ppc/kernel/fpu.S @@ -0,0 +1,133 @@ +/* + * FPU support code, moved here from head.S so that it can be used + * by chips which use other head-whatever.S files. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * This task wants to use the FPU now. + * On UP, disable FP for the task which had the FPU previously, + * and save its floating-point registers in its thread_struct. + * Load up this task's FP registers from its thread_struct, + * enable the FPU for the current task and return to the task. + */ + .globl load_up_fpu +load_up_fpu: + mfmsr r5 + ori r5,r5,MSR_FP +#ifdef CONFIG_PPC64BRIDGE + clrldi r5,r5,1 /* turn off 64-bit mode */ +#endif /* CONFIG_PPC64BRIDGE */ + SYNC + MTMSRD(r5) /* enable use of fpu now */ + isync +/* + * For SMP, we don't do lazy FPU switching because it just gets too + * horrendously complex, especially when a task switches from one CPU + * to another. Instead we call giveup_fpu in switch_to. + */ +#ifndef CONFIG_SMP + tophys(r6,0) /* get __pa constant */ + addis r3,r6,last_task_used_math@ha + lwz r4,last_task_used_math@l(r3) + cmpwi 0,r4,0 + beq 1f + add r4,r4,r6 + addi r4,r4,THREAD /* want last_task_used_math->thread */ + SAVE_32FPRS(0, r4) + mffs fr0 + stfd fr0,THREAD_FPSCR-4(r4) + lwz r5,PT_REGS(r4) + add r5,r5,r6 + lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) + li r10,MSR_FP|MSR_FE0|MSR_FE1 + andc r4,r4,r10 /* disable FP for previous task */ + stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) +1: +#endif /* CONFIG_SMP */ + /* enable use of FP after return */ + mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */ + lwz r4,THREAD_FPEXC_MODE(r5) + ori r9,r9,MSR_FP /* enable FP for current */ + or r9,r9,r4 + lfd fr0,THREAD_FPSCR-4(r5) + mtfsf 0xff,fr0 + REST_32FPRS(0, r5) +#ifndef CONFIG_SMP + subi r4,r5,THREAD + sub r4,r4,r6 + stw r4,last_task_used_math@l(r3) +#endif /* CONFIG_SMP */ + /* restore registers and return */ + /* we haven't used ctr or xer or lr */ + b fast_exception_return + +/* + * FP unavailable trap from kernel - print a message, but let + * the task use FP in the kernel until it returns to user mode. + */ + .globl KernelFP +KernelFP: + lwz r3,_MSR(r1) + ori r3,r3,MSR_FP + stw r3,_MSR(r1) /* enable use of FP after return */ + lis r3,86f@h + ori r3,r3,86f@l + mr r4,r2 /* current */ + lwz r5,_NIP(r1) + bl printk + b ret_from_except +86: .string "floating point used in kernel (task=%p, pc=%x)\n" + .align 4,0 + +/* + * giveup_fpu(tsk) + * Disable FP for the task given as the argument, + * and save the floating-point registers in its thread_struct. + * Enables the FPU for use in the kernel on return. + */ + .globl giveup_fpu +giveup_fpu: + mfmsr r5 + ori r5,r5,MSR_FP + SYNC_601 + ISYNC_601 + MTMSRD(r5) /* enable use of fpu now */ + SYNC_601 + isync + cmpwi 0,r3,0 + beqlr- /* if no previous owner, done */ + addi r3,r3,THREAD /* want THREAD of task */ + lwz r5,PT_REGS(r3) + cmpwi 0,r5,0 + SAVE_32FPRS(0, r3) + mffs fr0 + stfd fr0,THREAD_FPSCR-4(r3) + beq 1f + lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) + li r3,MSR_FP|MSR_FE0|MSR_FE1 + andc r4,r4,r3 /* disable FP for previous task */ + stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) +1: +#ifndef CONFIG_SMP + li r5,0 + lis r4,last_task_used_math@ha + stw r5,last_task_used_math@l(r4) +#endif /* CONFIG_SMP */ + blr diff --git a/arch/ppc/kernel/head.S b/arch/ppc/kernel/head.S index 1a89a71e0acc..a931d773715f 100644 --- a/arch/ppc/kernel/head.S +++ b/arch/ppc/kernel/head.S @@ -775,133 +775,6 @@ InstructionSegment: EXC_XFER_STD(0x480, UnknownException) #endif /* CONFIG_PPC64BRIDGE */ -/* - * This task wants to use the FPU now. - * On UP, disable FP for the task which had the FPU previously, - * and save its floating-point registers in its thread_struct. - * Load up this task's FP registers from its thread_struct, - * enable the FPU for the current task and return to the task. - */ -load_up_fpu: - mfmsr r5 - ori r5,r5,MSR_FP -#ifdef CONFIG_PPC64BRIDGE - clrldi r5,r5,1 /* turn off 64-bit mode */ -#endif /* CONFIG_PPC64BRIDGE */ - SYNC - MTMSRD(r5) /* enable use of fpu now */ - isync -/* - * For SMP, we don't do lazy FPU switching because it just gets too - * horrendously complex, especially when a task switches from one CPU - * to another. Instead we call giveup_fpu in switch_to. - */ -#ifndef CONFIG_SMP - tophys(r6,0) /* get __pa constant */ - addis r3,r6,last_task_used_math@ha - lwz r4,last_task_used_math@l(r3) - cmpwi 0,r4,0 - beq 1f - add r4,r4,r6 - addi r4,r4,THREAD /* want last_task_used_math->thread */ - SAVE_32FPRS(0, r4) - mffs fr0 - stfd fr0,THREAD_FPSCR-4(r4) - lwz r5,PT_REGS(r4) - add r5,r5,r6 - lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) - li r10,MSR_FP|MSR_FE0|MSR_FE1 - andc r4,r4,r10 /* disable FP for previous task */ - stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) -1: -#endif /* CONFIG_SMP */ - /* enable use of FP after return */ - mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */ - lwz r4,THREAD_FPEXC_MODE(r5) - ori r9,r9,MSR_FP /* enable FP for current */ - or r9,r9,r4 - lfd fr0,THREAD_FPSCR-4(r5) - mtfsf 0xff,fr0 - REST_32FPRS(0, r5) -#ifndef CONFIG_SMP - subi r4,r5,THREAD - sub r4,r4,r6 - stw r4,last_task_used_math@l(r3) -#endif /* CONFIG_SMP */ - /* restore registers and return */ - /* we haven't used ctr or xer or lr */ - /* fall through to fast_exception_return */ - - .globl fast_exception_return -fast_exception_return: - andi. r10,r9,MSR_RI /* check for recoverable interrupt */ - beq 1f /* if not, we've got problems */ -2: REST_4GPRS(3, r11) - lwz r10,_CCR(r11) - REST_GPR(1, r11) - mtcr r10 - lwz r10,_LINK(r11) - mtlr r10 - REST_GPR(10, r11) - mtspr SPRN_SRR1,r9 - mtspr SPRN_SRR0,r12 - REST_GPR(9, r11) - REST_GPR(12, r11) - lwz r11,GPR11(r11) - SYNC - RFI - -/* check if the exception happened in a restartable section */ -1: lis r3,exc_exit_restart_end@ha - addi r3,r3,exc_exit_restart_end@l - cmplw r12,r3 - bge 3f - lis r4,exc_exit_restart@ha - addi r4,r4,exc_exit_restart@l - cmplw r12,r4 - blt 3f - lis r3,fee_restarts@ha - tophys(r3,r3) - lwz r5,fee_restarts@l(r3) - addi r5,r5,1 - stw r5,fee_restarts@l(r3) - mr r12,r4 /* restart at exc_exit_restart */ - b 2b - - .comm fee_restarts,4 - -/* aargh, a nonrecoverable interrupt, panic */ -/* aargh, we don't know which trap this is */ -/* but the 601 doesn't implement the RI bit, so assume it's OK */ -3: -BEGIN_FTR_SECTION - b 2b -END_FTR_SECTION_IFSET(CPU_FTR_601) - li r10,-1 - stw r10,TRAP(r11) - addi r3,r1,STACK_FRAME_OVERHEAD - li r10,MSR_KERNEL - bl transfer_to_handler_full - .long nonrecoverable_exception - .long ret_from_except - -/* - * FP unavailable trap from kernel - print a message, but let - * the task use FP in the kernel until it returns to user mode. - */ -KernelFP: - lwz r3,_MSR(r1) - ori r3,r3,MSR_FP - stw r3,_MSR(r1) /* enable use of FP after return */ - lis r3,86f@h - ori r3,r3,86f@l - mr r4,r2 /* current */ - lwz r5,_NIP(r1) - bl printk - b ret_from_except -86: .string "floating point used in kernel (task=%p, pc=%x)\n" - .align 4,0 - #ifdef CONFIG_ALTIVEC /* Note that the AltiVec support is closely modeled after the FP * support. Changes to one are likely to be applicable to the @@ -1015,42 +888,6 @@ giveup_altivec: blr #endif /* CONFIG_ALTIVEC */ -/* - * giveup_fpu(tsk) - * Disable FP for the task given as the argument, - * and save the floating-point registers in its thread_struct. - * Enables the FPU for use in the kernel on return. - */ - .globl giveup_fpu -giveup_fpu: - mfmsr r5 - ori r5,r5,MSR_FP - SYNC_601 - ISYNC_601 - MTMSRD(r5) /* enable use of fpu now */ - SYNC_601 - isync - cmpwi 0,r3,0 - beqlr- /* if no previous owner, done */ - addi r3,r3,THREAD /* want THREAD of task */ - lwz r5,PT_REGS(r3) - cmpwi 0,r5,0 - SAVE_32FPRS(0, r3) - mffs fr0 - stfd fr0,THREAD_FPSCR-4(r3) - beq 1f - lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5) - li r3,MSR_FP|MSR_FE0|MSR_FE1 - andc r4,r4,r3 /* disable FP for previous task */ - stw r4,_MSR-STACK_FRAME_OVERHEAD(r5) -1: -#ifndef CONFIG_SMP - li r5,0 - lis r4,last_task_used_math@ha - stw r5,last_task_used_math@l(r4) -#endif /* CONFIG_SMP */ - blr - /* * This code is jumped to from the startup code to copy * the kernel image to physical address 0. diff --git a/arch/ppc/kernel/head_44x.S b/arch/ppc/kernel/head_44x.S index 9ed8165a3d6c..9b6a8e513657 100644 --- a/arch/ppc/kernel/head_44x.S +++ b/arch/ppc/kernel/head_44x.S @@ -426,7 +426,11 @@ interrupt_base: PROGRAM_EXCEPTION /* Floating Point Unavailable Interrupt */ +#ifdef CONFIG_PPC_FPU + FP_UNAVAILABLE_EXCEPTION +#else EXCEPTION(0x2010, FloatingPointUnavailable, UnknownException, EXC_XFER_EE) +#endif /* System Call Interrupt */ START_EXCEPTION(SystemCall) @@ -686,8 +690,10 @@ _GLOBAL(giveup_altivec) * * The 44x core does not have an FPU. */ +#ifndef CONFIG_PPC_FPU _GLOBAL(giveup_fpu) blr +#endif /* * extern void abort(void) diff --git a/arch/ppc/kernel/head_booke.h b/arch/ppc/kernel/head_booke.h index 884dac916bce..f213d12eec08 100644 --- a/arch/ppc/kernel/head_booke.h +++ b/arch/ppc/kernel/head_booke.h @@ -337,4 +337,11 @@ label: addi r3,r1,STACK_FRAME_OVERHEAD; \ EXC_XFER_LITE(0x0900, timer_interrupt) +#define FP_UNAVAILABLE_EXCEPTION \ + START_EXCEPTION(FloatingPointUnavailable) \ + NORMAL_EXCEPTION_PROLOG; \ + bne load_up_fpu; /* if from user, just load it up */ \ + addi r3,r1,STACK_FRAME_OVERHEAD; \ + EXC_XFER_EE_LITE(0x800, KernelFP) + #endif /* __HEAD_BOOKE_H__ */ diff --git a/arch/ppc/kernel/head_fsl_booke.S b/arch/ppc/kernel/head_fsl_booke.S index d64bf61d2b1f..f22ddce36135 100644 --- a/arch/ppc/kernel/head_fsl_booke.S +++ b/arch/ppc/kernel/head_fsl_booke.S @@ -504,7 +504,11 @@ interrupt_base: PROGRAM_EXCEPTION /* Floating Point Unavailable Interrupt */ +#ifdef CONFIG_PPC_FPU + FP_UNAVAILABLE_EXCEPTION +#else EXCEPTION(0x0800, FloatingPointUnavailable, UnknownException, EXC_XFER_EE) +#endif /* System Call Interrupt */ START_EXCEPTION(SystemCall) @@ -916,10 +920,12 @@ _GLOBAL(giveup_spe) /* * extern void giveup_fpu(struct task_struct *prev) * - * The e500 core does not have an FPU. + * Not all FSL Book-E cores have an FPU */ +#ifndef CONFIG_PPC_FPU _GLOBAL(giveup_fpu) blr +#endif /* * extern void abort(void) diff --git a/arch/ppc/kernel/misc.S b/arch/ppc/kernel/misc.S index 73f7c23b0dd4..e4f1615ec13f 100644 --- a/arch/ppc/kernel/misc.S +++ b/arch/ppc/kernel/misc.S @@ -1096,17 +1096,7 @@ _GLOBAL(_get_SP) * and exceptions as if the cpu had performed the load or store. */ -#if defined(CONFIG_4xx) || defined(CONFIG_E500) -_GLOBAL(cvt_fd) - lfs 0,0(r3) - stfd 0,0(r4) - blr - -_GLOBAL(cvt_df) - lfd 0,0(r3) - stfs 0,0(r4) - blr -#else +#ifdef CONFIG_PPC_FPU _GLOBAL(cvt_fd) lfd 0,-4(r5) /* load up fpscr value */ mtfsf 0xff,0 diff --git a/arch/ppc/kernel/traps.c b/arch/ppc/kernel/traps.c index 361865c4bc84..f8e7e324a173 100644 --- a/arch/ppc/kernel/traps.c +++ b/arch/ppc/kernel/traps.c @@ -176,7 +176,7 @@ static inline int check_io_access(struct pt_regs *regs) #else #define get_mc_reason(regs) (mfspr(SPRN_MCSR)) #endif -#define REASON_FP 0 +#define REASON_FP ESR_FP #define REASON_ILLEGAL ESR_PIL #define REASON_PRIVILEGED ESR_PPR #define REASON_TRAP ESR_PTR diff --git a/include/asm-ppc/reg_booke.h b/include/asm-ppc/reg_booke.h index e70c25f3c339..45c5e6f2b7ab 100644 --- a/include/asm-ppc/reg_booke.h +++ b/include/asm-ppc/reg_booke.h @@ -305,6 +305,7 @@ do { \ #define ESR_PIL 0x08000000 /* Program Exception - Illegal */ #define ESR_PPR 0x04000000 /* Program Exception - Priveleged */ #define ESR_PTR 0x02000000 /* Program Exception - Trap */ +#define ESR_FP 0x01000000 /* Floating Point Operation */ #define ESR_DST 0x00800000 /* Storage Exception - Data miss */ #define ESR_DIZ 0x00400000 /* Storage Exception - Zone fault */ #define ESR_ST 0x00800000 /* Store Operation */ -- cgit v1.2.3 From 146a4b3bdfb5641bfbf975e29680b482b8b343ba Mon Sep 17 00:00:00 2001 From: Andreas Jaggi Date: Sun, 1 May 2005 08:58:41 -0700 Subject: [PATCH] macintosh/adbhid.c: adb buttons support for aluminium PowerBook G4 This patch adds support for the special adb buttons of the aluminium PowerBook G4. Signed-off-by: Andreas Jaggi Cc: Benjamin Herrenschmidt Cc: Paul Mackerras Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/macintosh/adbhid.c | 40 ++++++++++++++++++++++++++++++++++++++++ include/linux/input.h | 5 +++++ 2 files changed, 45 insertions(+) (limited to 'include') diff --git a/drivers/macintosh/adbhid.c b/drivers/macintosh/adbhid.c index 8f93d01d8928..db654e8bd67e 100644 --- a/drivers/macintosh/adbhid.c +++ b/drivers/macintosh/adbhid.c @@ -555,6 +555,42 @@ adbhid_buttons_input(unsigned char *data, int nb, struct pt_regs *regs, int auto #endif /* CONFIG_PMAC_BACKLIGHT */ input_report_key(&adbhid[id]->input, KEY_BRIGHTNESSUP, down); break; + + case 0xc: /* videomode switch */ + input_report_key(&adbhid[id]->input, KEY_SWITCHVIDEOMODE, down); + break; + + case 0xd: /* keyboard illumination toggle */ + input_report_key(&adbhid[id]->input, KEY_KBDILLUMTOGGLE, down); + break; + + case 0xe: /* keyboard illumination decrease */ + input_report_key(&adbhid[id]->input, KEY_KBDILLUMDOWN, down); + break; + + case 0xf: + switch (data[1]) { + case 0x8f: + case 0x0f: + /* keyboard illumination increase */ + input_report_key(&adbhid[id]->input, KEY_KBDILLUMUP, down); + break; + + case 0x7f: + case 0xff: + /* keypad overlay toogle */ + break; + + default: + printk(KERN_INFO "Unhandled ADB_MISC event %02x, %02x, %02x, %02x\n", + data[0], data[1], data[2], data[3]); + break; + } + break; + default: + printk(KERN_INFO "Unhandled ADB_MISC event %02x, %02x, %02x, %02x\n", + data[0], data[1], data[2], data[3]); + break; } } break; @@ -775,6 +811,10 @@ adbhid_input_register(int id, int default_id, int original_handler_id, set_bit(KEY_BRIGHTNESSUP, adbhid[id]->input.keybit); set_bit(KEY_BRIGHTNESSDOWN, adbhid[id]->input.keybit); set_bit(KEY_EJECTCD, adbhid[id]->input.keybit); + set_bit(KEY_SWITCHVIDEOMODE, adbhid[id]->input.keybit); + set_bit(KEY_KBDILLUMTOGGLE, adbhid[id]->input.keybit); + set_bit(KEY_KBDILLUMDOWN, adbhid[id]->input.keybit); + set_bit(KEY_KBDILLUMUP, adbhid[id]->input.keybit); break; } if (adbhid[id]->name[0]) diff --git a/include/linux/input.h b/include/linux/input.h index b70df8fe60e6..72731d7d189e 100644 --- a/include/linux/input.h +++ b/include/linux/input.h @@ -328,6 +328,11 @@ struct input_absinfo { #define KEY_BRIGHTNESSUP 225 #define KEY_MEDIA 226 +#define KEY_SWITCHVIDEOMODE 227 +#define KEY_KBDILLUMTOGGLE 228 +#define KEY_KBDILLUMDOWN 229 +#define KEY_KBDILLUMUP 230 + #define KEY_UNKNOWN 240 #define BTN_MISC 0x100 -- cgit v1.2.3 From 58366af5861eee1479426380e3c91ecb334c301d Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Sun, 1 May 2005 08:58:44 -0700 Subject: [PATCH] ppc64: update to use the new 4L headers This patch converts ppc64 to use the generic pgtable-nopud.h instead of the "fixup" header. Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/ppc64/mm/hugetlbpage.c | 45 +++++----- arch/ppc64/mm/init.c | 198 ++++++++++++++++++++++---------------------- include/asm-ppc64/pgalloc.h | 2 +- include/asm-ppc64/pgtable.h | 41 +++++---- 4 files changed, 147 insertions(+), 139 deletions(-) (limited to 'include') diff --git a/arch/ppc64/mm/hugetlbpage.c b/arch/ppc64/mm/hugetlbpage.c index 390296efe3e0..d3bf86a5c1ad 100644 --- a/arch/ppc64/mm/hugetlbpage.c +++ b/arch/ppc64/mm/hugetlbpage.c @@ -42,7 +42,7 @@ static inline int hugepgd_index(unsigned long addr) return (addr & ~REGION_MASK) >> HUGEPGDIR_SHIFT; } -static pgd_t *hugepgd_offset(struct mm_struct *mm, unsigned long addr) +static pud_t *hugepgd_offset(struct mm_struct *mm, unsigned long addr) { int index; @@ -52,21 +52,21 @@ static pgd_t *hugepgd_offset(struct mm_struct *mm, unsigned long addr) index = hugepgd_index(addr); BUG_ON(index >= PTRS_PER_HUGEPGD); - return mm->context.huge_pgdir + index; + return (pud_t *)(mm->context.huge_pgdir + index); } -static inline pte_t *hugepte_offset(pgd_t *dir, unsigned long addr) +static inline pte_t *hugepte_offset(pud_t *dir, unsigned long addr) { int index; - if (pgd_none(*dir)) + if (pud_none(*dir)) return NULL; index = (addr >> HPAGE_SHIFT) % PTRS_PER_HUGEPTE; - return (pte_t *)pgd_page(*dir) + index; + return (pte_t *)pud_page(*dir) + index; } -static pgd_t *hugepgd_alloc(struct mm_struct *mm, unsigned long addr) +static pud_t *hugepgd_alloc(struct mm_struct *mm, unsigned long addr) { BUG_ON(! in_hugepage_area(mm->context, addr)); @@ -90,10 +90,9 @@ static pgd_t *hugepgd_alloc(struct mm_struct *mm, unsigned long addr) return hugepgd_offset(mm, addr); } -static pte_t *hugepte_alloc(struct mm_struct *mm, pgd_t *dir, - unsigned long addr) +static pte_t *hugepte_alloc(struct mm_struct *mm, pud_t *dir, unsigned long addr) { - if (! pgd_present(*dir)) { + if (! pud_present(*dir)) { pte_t *new; spin_unlock(&mm->page_table_lock); @@ -104,7 +103,7 @@ static pte_t *hugepte_alloc(struct mm_struct *mm, pgd_t *dir, * Because we dropped the lock, we should re-check the * entry, as somebody else could have populated it.. */ - if (pgd_present(*dir)) { + if (pud_present(*dir)) { if (new) kmem_cache_free(zero_cache, new); } else { @@ -115,7 +114,7 @@ static pte_t *hugepte_alloc(struct mm_struct *mm, pgd_t *dir, ptepage = virt_to_page(new); ptepage->mapping = (void *) mm; ptepage->index = addr & HUGEPGDIR_MASK; - pgd_populate(mm, dir, new); + pud_populate(mm, dir, new); } } @@ -124,28 +123,28 @@ static pte_t *hugepte_alloc(struct mm_struct *mm, pgd_t *dir, static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) { - pgd_t *pgd; + pud_t *pud; BUG_ON(! in_hugepage_area(mm->context, addr)); - pgd = hugepgd_offset(mm, addr); - if (! pgd) + pud = hugepgd_offset(mm, addr); + if (! pud) return NULL; - return hugepte_offset(pgd, addr); + return hugepte_offset(pud, addr); } static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) { - pgd_t *pgd; + pud_t *pud; BUG_ON(! in_hugepage_area(mm->context, addr)); - pgd = hugepgd_alloc(mm, addr); - if (! pgd) + pud = hugepgd_alloc(mm, addr); + if (! pud) return NULL; - return hugepte_alloc(mm, pgd, addr); + return hugepte_alloc(mm, pud, addr); } static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, @@ -709,10 +708,10 @@ void hugetlb_mm_free_pgd(struct mm_struct *mm) /* cleanup any hugepte pages leftover */ for (i = 0; i < PTRS_PER_HUGEPGD; i++) { - pgd_t *pgd = pgdir + i; + pud_t *pud = (pud_t *)(pgdir + i); - if (! pgd_none(*pgd)) { - pte_t *pte = (pte_t *)pgd_page(*pgd); + if (! pud_none(*pud)) { + pte_t *pte = (pte_t *)pud_page(*pud); struct page *ptepage = virt_to_page(pte); ptepage->mapping = NULL; @@ -720,7 +719,7 @@ void hugetlb_mm_free_pgd(struct mm_struct *mm) BUG_ON(memcmp(pte, empty_zero_page, PAGE_SIZE)); kmem_cache_free(zero_cache, pte); } - pgd_clear(pgd); + pud_clear(pud); } BUG_ON(memcmp(pgdir, empty_zero_page, PAGE_SIZE)); diff --git a/arch/ppc64/mm/init.c b/arch/ppc64/mm/init.c index a7149b9fc35c..cf33d7ec2e29 100644 --- a/arch/ppc64/mm/init.c +++ b/arch/ppc64/mm/init.c @@ -136,14 +136,78 @@ void iounmap(volatile void __iomem *addr) #else +static void unmap_im_area_pte(pmd_t *pmd, unsigned long addr, + unsigned long end) +{ + pte_t *pte; + + pte = pte_offset_kernel(pmd, addr); + do { + pte_t ptent = ptep_get_and_clear(&ioremap_mm, addr, pte); + WARN_ON(!pte_none(ptent) && !pte_present(ptent)); + } while (pte++, addr += PAGE_SIZE, addr != end); +} + +static inline void unmap_im_area_pmd(pud_t *pud, unsigned long addr, + unsigned long end) +{ + pmd_t *pmd; + unsigned long next; + + pmd = pmd_offset(pud, addr); + do { + next = pmd_addr_end(addr, end); + if (pmd_none_or_clear_bad(pmd)) + continue; + unmap_im_area_pte(pmd, addr, next); + } while (pmd++, addr = next, addr != end); +} + +static inline void unmap_im_area_pud(pgd_t *pgd, unsigned long addr, + unsigned long end) +{ + pud_t *pud; + unsigned long next; + + pud = pud_offset(pgd, addr); + do { + next = pud_addr_end(addr, end); + if (pud_none_or_clear_bad(pud)) + continue; + unmap_im_area_pmd(pud, addr, next); + } while (pud++, addr = next, addr != end); +} + +static void unmap_im_area(unsigned long addr, unsigned long end) +{ + struct mm_struct *mm = &ioremap_mm; + unsigned long next; + pgd_t *pgd; + + spin_lock(&mm->page_table_lock); + + pgd = pgd_offset_i(addr); + flush_cache_vunmap(addr, end); + do { + next = pgd_addr_end(addr, end); + if (pgd_none_or_clear_bad(pgd)) + continue; + unmap_im_area_pud(pgd, addr, next); + } while (pgd++, addr = next, addr != end); + flush_tlb_kernel_range(start, end); + + spin_unlock(&mm->page_table_lock); +} + /* * map_io_page currently only called by __ioremap * map_io_page adds an entry to the ioremap page table * and adds an entry to the HPT, possibly bolting it */ -static void map_io_page(unsigned long ea, unsigned long pa, int flags) +static int map_io_page(unsigned long ea, unsigned long pa, int flags) { pgd_t *pgdp; + pud_t *pudp; pmd_t *pmdp; pte_t *ptep; unsigned long vsid; @@ -151,9 +215,15 @@ static void map_io_page(unsigned long ea, unsigned long pa, int flags) if (mem_init_done) { spin_lock(&ioremap_mm.page_table_lock); pgdp = pgd_offset_i(ea); - pmdp = pmd_alloc(&ioremap_mm, pgdp, ea); + pudp = pud_alloc(&ioremap_mm, pgdp, ea); + if (!pudp) + return -ENOMEM; + pmdp = pmd_alloc(&ioremap_mm, pudp, ea); + if (!pmdp) + return -ENOMEM; ptep = pte_alloc_kernel(&ioremap_mm, pmdp, ea); - + if (!ptep) + return -ENOMEM; pa = abs_to_phys(pa); set_pte_at(&ioremap_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags))); @@ -181,6 +251,7 @@ static void map_io_page(unsigned long ea, unsigned long pa, int flags) panic("map_io_page: could not insert mapping"); } } + return 0; } @@ -194,9 +265,14 @@ static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa, flags |= pgprot_val(PAGE_KERNEL); for (i = 0; i < size; i += PAGE_SIZE) - map_io_page(ea+i, pa+i, flags); + if (map_io_page(ea+i, pa+i, flags)) + goto failure; return (void __iomem *) (ea + (addr & ~PAGE_MASK)); + failure: + if (mem_init_done) + unmap_im_area(ea, ea + size); + return NULL; } @@ -206,10 +282,11 @@ ioremap(unsigned long addr, unsigned long size) return __ioremap(addr, size, _PAGE_NO_CACHE | _PAGE_GUARDED); } -void __iomem * -__ioremap(unsigned long addr, unsigned long size, unsigned long flags) +void __iomem * __ioremap(unsigned long addr, unsigned long size, + unsigned long flags) { unsigned long pa, ea; + void __iomem *ret; /* * Choose an address to map it to. @@ -232,12 +309,16 @@ __ioremap(unsigned long addr, unsigned long size, unsigned long flags) if (area == NULL) return NULL; ea = (unsigned long)(area->addr); + ret = __ioremap_com(addr, pa, ea, size, flags); + if (!ret) + im_free(area->addr); } else { ea = ioremap_bot; - ioremap_bot += size; + ret = __ioremap_com(addr, pa, ea, size, flags); + if (ret) + ioremap_bot += size; } - - return __ioremap_com(addr, pa, ea, size, flags); + return ret; } #define IS_PAGE_ALIGNED(_val) ((_val) == ((_val) & PAGE_MASK)) @@ -246,6 +327,7 @@ int __ioremap_explicit(unsigned long pa, unsigned long ea, unsigned long size, unsigned long flags) { struct vm_struct *area; + void __iomem *ret; /* For now, require page-aligned values for pa, ea, and size */ if (!IS_PAGE_ALIGNED(pa) || !IS_PAGE_ALIGNED(ea) || @@ -276,7 +358,12 @@ int __ioremap_explicit(unsigned long pa, unsigned long ea, } } - if (__ioremap_com(pa, pa, ea, size, flags) != (void *) ea) { + ret = __ioremap_com(pa, pa, ea, size, flags); + if (ret == NULL) { + printk(KERN_ERR "ioremap_explicit() allocation failure !\n"); + return 1; + } + if (ret != (void *) ea) { printk(KERN_ERR "__ioremap_com() returned unexpected addr\n"); return 1; } @@ -284,69 +371,6 @@ int __ioremap_explicit(unsigned long pa, unsigned long ea, return 0; } -static void unmap_im_area_pte(pmd_t *pmd, unsigned long address, - unsigned long size) -{ - unsigned long base, end; - pte_t *pte; - - if (pmd_none(*pmd)) - return; - if (pmd_bad(*pmd)) { - pmd_ERROR(*pmd); - pmd_clear(pmd); - return; - } - - pte = pte_offset_kernel(pmd, address); - base = address & PMD_MASK; - address &= ~PMD_MASK; - end = address + size; - if (end > PMD_SIZE) - end = PMD_SIZE; - - do { - pte_t page; - page = ptep_get_and_clear(&ioremap_mm, base + address, pte); - address += PAGE_SIZE; - pte++; - if (pte_none(page)) - continue; - if (pte_present(page)) - continue; - printk(KERN_CRIT "Whee.. Swapped out page in kernel page" - " table\n"); - } while (address < end); -} - -static void unmap_im_area_pmd(pgd_t *dir, unsigned long address, - unsigned long size) -{ - unsigned long base, end; - pmd_t *pmd; - - if (pgd_none(*dir)) - return; - if (pgd_bad(*dir)) { - pgd_ERROR(*dir); - pgd_clear(dir); - return; - } - - pmd = pmd_offset(dir, address); - base = address & PGDIR_MASK; - address &= ~PGDIR_MASK; - end = address + size; - if (end > PGDIR_SIZE) - end = PGDIR_SIZE; - - do { - unmap_im_area_pte(pmd, base + address, end - address); - address = (address + PMD_SIZE) & PMD_MASK; - pmd++; - } while (address < end); -} - /* * Unmap an IO region and remove it from imalloc'd list. * Access to IO memory should be serialized by driver. @@ -356,39 +380,19 @@ static void unmap_im_area_pmd(pgd_t *dir, unsigned long address, */ void iounmap(volatile void __iomem *token) { - unsigned long address, start, end, size; - struct mm_struct *mm; - pgd_t *dir; + unsigned long address, size; void *addr; - if (!mem_init_done) { + if (!mem_init_done) return; - } addr = (void *) ((unsigned long __force) token & PAGE_MASK); - if ((size = im_free(addr)) == 0) { + if ((size = im_free(addr)) == 0) return; - } address = (unsigned long)addr; - start = address; - end = address + size; - - mm = &ioremap_mm; - spin_lock(&mm->page_table_lock); - - dir = pgd_offset_i(address); - flush_cache_vunmap(address, end); - do { - unmap_im_area_pmd(dir, address, end - address); - address = (address + PGDIR_SIZE) & PGDIR_MASK; - dir++; - } while (address && (address < end)); - flush_tlb_kernel_range(start, end); - - spin_unlock(&mm->page_table_lock); - return; + unmap_im_area(address, address + size); } static int iounmap_subset_regions(unsigned long addr, unsigned long size) diff --git a/include/asm-ppc64/pgalloc.h b/include/asm-ppc64/pgalloc.h index 16232d740173..4fc4b739b380 100644 --- a/include/asm-ppc64/pgalloc.h +++ b/include/asm-ppc64/pgalloc.h @@ -27,7 +27,7 @@ pgd_free(pgd_t *pgd) kmem_cache_free(zero_cache, pgd); } -#define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD) +#define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD) static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long addr) diff --git a/include/asm-ppc64/pgtable.h b/include/asm-ppc64/pgtable.h index a26120517c54..b984e2747e0c 100644 --- a/include/asm-ppc64/pgtable.h +++ b/include/asm-ppc64/pgtable.h @@ -1,8 +1,6 @@ #ifndef _PPC64_PGTABLE_H #define _PPC64_PGTABLE_H -#include - /* * This file contains the functions and defines necessary to modify and use * the ppc64 hashed page table. @@ -17,6 +15,8 @@ #include #endif /* __ASSEMBLY__ */ +#include + /* PMD_SHIFT determines what a second-level page table entry can map */ #define PMD_SHIFT (PAGE_SHIFT + PAGE_SHIFT - 3) #define PMD_SIZE (1UL << PMD_SHIFT) @@ -228,12 +228,13 @@ void hugetlb_mm_free_pgd(struct mm_struct *mm); #define pmd_page_kernel(pmd) \ (__bpn_to_ba(pmd_val(pmd) >> PMD_TO_PTEPAGE_SHIFT)) #define pmd_page(pmd) virt_to_page(pmd_page_kernel(pmd)) -#define pgd_set(pgdp, pmdp) (pgd_val(*(pgdp)) = (__ba_to_bpn(pmdp))) -#define pgd_none(pgd) (!pgd_val(pgd)) -#define pgd_bad(pgd) ((pgd_val(pgd)) == 0) -#define pgd_present(pgd) (pgd_val(pgd) != 0UL) -#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0UL) -#define pgd_page(pgd) (__bpn_to_ba(pgd_val(pgd))) + +#define pud_set(pudp, pmdp) (pud_val(*(pudp)) = (__ba_to_bpn(pmdp))) +#define pud_none(pud) (!pud_val(pud)) +#define pud_bad(pud) ((pud_val(pud)) == 0UL) +#define pud_present(pud) (pud_val(pud) != 0UL) +#define pud_clear(pudp) (pud_val(*(pudp)) = 0UL) +#define pud_page(pud) (__bpn_to_ba(pud_val(pud))) /* * Find an entry in a page-table-directory. We combine the address region @@ -245,12 +246,13 @@ void hugetlb_mm_free_pgd(struct mm_struct *mm); #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) /* Find an entry in the second-level page table.. */ -#define pmd_offset(dir,addr) \ - ((pmd_t *) pgd_page(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) +#define pmd_offset(pudp,addr) \ + ((pmd_t *) pud_page(*(pudp)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))) /* Find an entry in the third-level page table.. */ #define pte_offset_kernel(dir,addr) \ - ((pte_t *) pmd_page_kernel(*(dir)) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) + ((pte_t *) pmd_page_kernel(*(dir)) \ + + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) @@ -582,19 +584,22 @@ extern long native_hpte_insert(unsigned long hpte_group, unsigned long va, static inline pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea) { pgd_t *pg; + pud_t *pu; pmd_t *pm; pte_t *pt = NULL; pte_t pte; pg = pgdir + pgd_index(ea); if (!pgd_none(*pg)) { - - pm = pmd_offset(pg, ea); - if (pmd_present(*pm)) { - pt = pte_offset_kernel(pm, ea); - pte = *pt; - if (!pte_present(pte)) - pt = NULL; + pu = pud_offset(pg, ea); + if (!pud_none(*pu)) { + pm = pmd_offset(pu, ea); + if (pmd_present(*pm)) { + pt = pte_offset_kernel(pm, ea); + pte = *pt; + if (!pte_present(pte)) + pt = NULL; + } } } -- cgit v1.2.3 From a2f95a5ae99eb8209ad8d9faeaada00600bd8027 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Sun, 1 May 2005 08:58:45 -0700 Subject: [PATCH] ppc64: noexec fixes There were a few issues with the ppc64 noexec support: The 64bit ABI has a non executable stack by default. At the moment 64bit apps require a PT_GNU_STACK section in order to have a non executable stack. Disable the read implies exec workaround on the 64bit ABI. The 64bit toolchain has never had problems with incorrect mmap permissions (the 32bit has, thats why we need to retain the workaround). With these fixes as well as a gcc fix from Alan Modra (that was recently committed) 64bit apps work as expected. Signed-off-by: Anton Blanchard Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-ppc64/elf.h | 8 ++++++-- include/asm-ppc64/page.h | 15 ++++++++++++--- 2 files changed, 18 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/asm-ppc64/elf.h b/include/asm-ppc64/elf.h index 8457d90244fb..6c42d61bedd1 100644 --- a/include/asm-ppc64/elf.h +++ b/include/asm-ppc64/elf.h @@ -229,9 +229,13 @@ do { \ /* * An executable for which elf_read_implies_exec() returns TRUE will - * have the READ_IMPLIES_EXEC personality flag set automatically. + * have the READ_IMPLIES_EXEC personality flag set automatically. This + * is only required to work around bugs in old 32bit toolchains. Since + * the 64bit ABI has never had these issues dont enable the workaround + * even if we have an executable stack. */ -#define elf_read_implies_exec(ex, exec_stk) (exec_stk != EXSTACK_DISABLE_X) +#define elf_read_implies_exec(ex, exec_stk) (test_thread_flag(TIF_32BIT) ? \ + (exec_stk != EXSTACK_DISABLE_X) : 0) #endif diff --git a/include/asm-ppc64/page.h b/include/asm-ppc64/page.h index 20e0f19324e8..86219574c1a5 100644 --- a/include/asm-ppc64/page.h +++ b/include/asm-ppc64/page.h @@ -252,10 +252,19 @@ extern u64 ppc64_pft_size; /* Log 2 of page table size */ /* * This is the default if a program doesn't have a PT_GNU_STACK - * program header entry. + * program header entry. The PPC64 ELF ABI has a non executable stack + * stack by default, so in the absense of a PT_GNU_STACK program header + * we turn execute permission off. */ -#define VM_STACK_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + +#define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + +#define VM_STACK_DEFAULT_FLAGS \ + (test_thread_flag(TIF_32BIT) ? \ + VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64) #endif /* __KERNEL__ */ #endif /* _PPC64_PAGE_H */ -- cgit v1.2.3 From 4b88e927e8c38f4053680a3098325142017a37f0 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Sun, 1 May 2005 08:58:46 -0700 Subject: [PATCH] ppc64: remove unnecessary include We no longer use any ppcdebug stuff in a.out.h, so remove the define. Signed-off-by: Anton Blanchard Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-ppc64/a.out.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include') diff --git a/include/asm-ppc64/a.out.h b/include/asm-ppc64/a.out.h index 802338efcb19..3871e252a6f1 100644 --- a/include/asm-ppc64/a.out.h +++ b/include/asm-ppc64/a.out.h @@ -1,8 +1,6 @@ #ifndef __PPC64_A_OUT_H__ #define __PPC64_A_OUT_H__ -#include - /* * c 2001 PPC 64 Team, IBM Corp * -- cgit v1.2.3 From d637413f3f05b41f678f8004225b33b62274183f Mon Sep 17 00:00:00 2001 From: Jake Moilanen Date: Sun, 1 May 2005 08:58:47 -0700 Subject: [PATCH] ppc64: reverse prediction on spinlock busy loop code On our raw spinlocks, we currently have an attempt at the lock, and if we do not get it we enter a spin loop. This spinloop will likely continue for awhile, and we pridict likely. Shouldn't we predict that we will get out of the loop so our next instructions are already prefetched. Even when we miss because the lock is still held, it won't matter since we are waiting anyways. I did a couple quick benchmarks, but the results are inconclusive. 16-way 690 running specjbb with original code # ./specjbb 3000 16 1 1 19 30 120 ... Valid run, Score is 59282 16-way 690 running specjbb with unlikely code # ./specjbb 3000 16 1 1 19 30 120 ... Valid run, Score is 59541 I saw a smaller increase on a JS20 (~1.6%) JS20 specjbb w/ original code # ./specjbb 400 2 1 1 19 30 120 ... Valid run, Score is 20460 JS20 specjbb w/ unlikely code # ./specjbb 400 2 1 1 19 30 120 ... Valid run, Score is 20803 Anton said: Mispredicting the spinlock busy loop also means we slow down the rate at which we do the loads which can be good for heavily contended locks. Note: There are some gcc issues with our default build and branch prediction, but a CONFIG_POWER4_ONLY build should emit them correctly. I'm working with Alan Modra on it now. Signed-off-by: Jake Moilanen Signed-off-by: Anton Blanchard Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-ppc64/spinlock.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/include/asm-ppc64/spinlock.h b/include/asm-ppc64/spinlock.h index a9b2a1162cf7..acd11564dd75 100644 --- a/include/asm-ppc64/spinlock.h +++ b/include/asm-ppc64/spinlock.h @@ -110,7 +110,7 @@ static void __inline__ _raw_spin_lock(spinlock_t *lock) HMT_low(); if (SHARED_PROCESSOR) __spin_yield(lock); - } while (likely(lock->lock != 0)); + } while (unlikely(lock->lock != 0)); HMT_medium(); } } @@ -128,7 +128,7 @@ static void __inline__ _raw_spin_lock_flags(spinlock_t *lock, unsigned long flag HMT_low(); if (SHARED_PROCESSOR) __spin_yield(lock); - } while (likely(lock->lock != 0)); + } while (unlikely(lock->lock != 0)); HMT_medium(); local_irq_restore(flags_dis); } @@ -194,7 +194,7 @@ static void __inline__ _raw_read_lock(rwlock_t *rw) HMT_low(); if (SHARED_PROCESSOR) __rw_yield(rw); - } while (likely(rw->lock < 0)); + } while (unlikely(rw->lock < 0)); HMT_medium(); } } @@ -251,7 +251,7 @@ static void __inline__ _raw_write_lock(rwlock_t *rw) HMT_low(); if (SHARED_PROCESSOR) __rw_yield(rw); - } while (likely(rw->lock != 0)); + } while (unlikely(rw->lock != 0)); HMT_medium(); } } -- cgit v1.2.3 From d5b63d78f1e75f6c6f04862dfb2f2a4aeffafd4c Mon Sep 17 00:00:00 2001 From: Denis Vlasenko Date: Sun, 1 May 2005 08:58:48 -0700 Subject: [PATCH] fix i386 memcpy This patch shortens non-constant memcpy() by two bytes and fixes spurious out-of-line constant memcpy(). # size vmlinux.org vmlinux text data bss dec hex filename 3954591 1553426 236544 5744561 57a7b1 vmlinux.org 3952615 1553426 236544 5742585 579ff9 vmlinux Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-i386/string.h | 89 ++++++++++++++++++++++++++++++++--------------- 1 file changed, 61 insertions(+), 28 deletions(-) (limited to 'include') diff --git a/include/asm-i386/string.h b/include/asm-i386/string.h index 1679983d053f..6a78ac58c194 100644 --- a/include/asm-i386/string.h +++ b/include/asm-i386/string.h @@ -198,47 +198,80 @@ static inline void * __memcpy(void * to, const void * from, size_t n) int d0, d1, d2; __asm__ __volatile__( "rep ; movsl\n\t" - "testb $2,%b4\n\t" - "je 1f\n\t" - "movsw\n" - "1:\ttestb $1,%b4\n\t" - "je 2f\n\t" - "movsb\n" - "2:" + "movl %4,%%ecx\n\t" + "andl $3,%%ecx\n\t" +#if 1 /* want to pay 2 byte penalty for a chance to skip microcoded rep? */ + "jz 1f\n\t" +#endif + "rep ; movsb\n\t" + "1:" : "=&c" (d0), "=&D" (d1), "=&S" (d2) - :"0" (n/4), "q" (n),"1" ((long) to),"2" ((long) from) + : "0" (n/4), "g" (n), "1" ((long) to), "2" ((long) from) : "memory"); return (to); } /* - * This looks horribly ugly, but the compiler can optimize it totally, + * This looks ugly, but the compiler can optimize it totally, * as the count is constant. */ static inline void * __constant_memcpy(void * to, const void * from, size_t n) { - if (n <= 128) - return __builtin_memcpy(to, from, n); - -#define COMMON(x) \ -__asm__ __volatile__( \ - "rep ; movsl" \ - x \ - : "=&c" (d0), "=&D" (d1), "=&S" (d2) \ - : "0" (n/4),"1" ((long) to),"2" ((long) from) \ - : "memory"); -{ - int d0, d1, d2; + long esi, edi; + if (!n) return to; +#if 1 /* want to do small copies with non-string ops? */ + switch (n) { + case 1: *(char*)to = *(char*)from; return to; + case 2: *(short*)to = *(short*)from; return to; + case 4: *(int*)to = *(int*)from; return to; +#if 1 /* including those doable with two moves? */ + case 3: *(short*)to = *(short*)from; + *((char*)to+2) = *((char*)from+2); return to; + case 5: *(int*)to = *(int*)from; + *((char*)to+4) = *((char*)from+4); return to; + case 6: *(int*)to = *(int*)from; + *((short*)to+2) = *((short*)from+2); return to; + case 8: *(int*)to = *(int*)from; + *((int*)to+1) = *((int*)from+1); return to; +#endif + } +#endif + esi = (long) from; + edi = (long) to; + if (n >= 5*4) { + /* large block: use rep prefix */ + int ecx; + __asm__ __volatile__( + "rep ; movsl" + : "=&c" (ecx), "=&D" (edi), "=&S" (esi) + : "0" (n/4), "1" (edi),"2" (esi) + : "memory" + ); + } else { + /* small block: don't clobber ecx + smaller code */ + if (n >= 4*4) __asm__ __volatile__("movsl" + :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); + if (n >= 3*4) __asm__ __volatile__("movsl" + :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); + if (n >= 2*4) __asm__ __volatile__("movsl" + :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); + if (n >= 1*4) __asm__ __volatile__("movsl" + :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); + } switch (n % 4) { - case 0: COMMON(""); return to; - case 1: COMMON("\n\tmovsb"); return to; - case 2: COMMON("\n\tmovsw"); return to; - default: COMMON("\n\tmovsw\n\tmovsb"); return to; + /* tail */ + case 0: return to; + case 1: __asm__ __volatile__("movsb" + :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); + return to; + case 2: __asm__ __volatile__("movsw" + :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); + return to; + default: __asm__ __volatile__("movsw\n\tmovsb" + :"=&D"(edi),"=&S"(esi):"0"(edi),"1"(esi):"memory"); + return to; } } - -#undef COMMON -} #define __HAVE_ARCH_MEMCPY -- cgit v1.2.3 From fd51f666fa591294bd7462447512666e61c56ea0 Mon Sep 17 00:00:00 2001 From: "H. J. Lu" Date: Sun, 1 May 2005 08:58:48 -0700 Subject: [PATCH] i386/x86_64 segment register access update The new i386/x86_64 assemblers no longer accept instructions for moving between a segment register and a 32bit memory location, i.e., movl (%eax),%ds movl %ds,(%eax) To generate instructions for moving between a segment register and a 16bit memory location without the 16bit operand size prefix, 0x66, mov (%eax),%ds mov %ds,(%eax) should be used. It will work with both new and old assemblers. The assembler starting from 2.16.90.0.1 will also support movw (%eax),%ds movw %ds,(%eax) without the 0x66 prefix. I am enclosing patches for 2.4 and 2.6 kernels here. The resulting kernel binaries should be unchanged as before, with old and new assemblers, if gcc never generates memory access for unsigned gsindex; asm volatile("movl %%gs,%0" : "=g" (gsindex)); If gcc does generate memory access for the code above, the upper bits in gsindex are undefined and the new assembler doesn't allow it. Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/i386/kernel/process.c | 4 ++-- arch/i386/kernel/vm86.c | 4 ++-- arch/x86_64/kernel/process.c | 12 ++++++------ include/asm-i386/system.h | 6 +++--- 4 files changed, 13 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c index b2203e21acb3..85bd56d44314 100644 --- a/arch/i386/kernel/process.c +++ b/arch/i386/kernel/process.c @@ -611,8 +611,8 @@ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct tas * Save away %fs and %gs. No need to save %es and %ds, as * those are always kernel segments while inside the kernel. */ - asm volatile("movl %%fs,%0":"=m" (*(int *)&prev->fs)); - asm volatile("movl %%gs,%0":"=m" (*(int *)&prev->gs)); + asm volatile("mov %%fs,%0":"=m" (prev->fs)); + asm volatile("mov %%gs,%0":"=m" (prev->gs)); /* * Restore %fs and %gs if needed. diff --git a/arch/i386/kernel/vm86.c b/arch/i386/kernel/vm86.c index 2f3d52dacff7..d16cd3738a48 100644 --- a/arch/i386/kernel/vm86.c +++ b/arch/i386/kernel/vm86.c @@ -294,8 +294,8 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk */ info->regs32->eax = 0; tsk->thread.saved_esp0 = tsk->thread.esp0; - asm volatile("movl %%fs,%0":"=m" (tsk->thread.saved_fs)); - asm volatile("movl %%gs,%0":"=m" (tsk->thread.saved_gs)); + asm volatile("mov %%fs,%0":"=m" (tsk->thread.saved_fs)); + asm volatile("mov %%gs,%0":"=m" (tsk->thread.saved_gs)); tss = &per_cpu(init_tss, get_cpu()); tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0; diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c index 9922d2ba24a3..761b6d35e338 100644 --- a/arch/x86_64/kernel/process.c +++ b/arch/x86_64/kernel/process.c @@ -402,10 +402,10 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp, p->thread.fs = me->thread.fs; p->thread.gs = me->thread.gs; - asm("movl %%gs,%0" : "=m" (p->thread.gsindex)); - asm("movl %%fs,%0" : "=m" (p->thread.fsindex)); - asm("movl %%es,%0" : "=m" (p->thread.es)); - asm("movl %%ds,%0" : "=m" (p->thread.ds)); + asm("mov %%gs,%0" : "=m" (p->thread.gsindex)); + asm("mov %%fs,%0" : "=m" (p->thread.fsindex)); + asm("mov %%es,%0" : "=m" (p->thread.es)); + asm("mov %%ds,%0" : "=m" (p->thread.ds)); if (unlikely(me->thread.io_bitmap_ptr != NULL)) { p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL); @@ -468,11 +468,11 @@ struct task_struct *__switch_to(struct task_struct *prev_p, struct task_struct * * Switch DS and ES. * This won't pick up thread selector changes, but I guess that is ok. */ - asm volatile("movl %%es,%0" : "=m" (prev->es)); + asm volatile("mov %%es,%0" : "=m" (prev->es)); if (unlikely(next->es | prev->es)) loadsegment(es, next->es); - asm volatile ("movl %%ds,%0" : "=m" (prev->ds)); + asm volatile ("mov %%ds,%0" : "=m" (prev->ds)); if (unlikely(next->ds | prev->ds)) loadsegment(ds, next->ds); diff --git a/include/asm-i386/system.h b/include/asm-i386/system.h index 6f74d4c44a0e..3db717a244f0 100644 --- a/include/asm-i386/system.h +++ b/include/asm-i386/system.h @@ -81,7 +81,7 @@ static inline unsigned long _get_base(char * addr) #define loadsegment(seg,value) \ asm volatile("\n" \ "1:\t" \ - "movl %0,%%" #seg "\n" \ + "mov %0,%%" #seg "\n" \ "2:\n" \ ".section .fixup,\"ax\"\n" \ "3:\t" \ @@ -93,13 +93,13 @@ static inline unsigned long _get_base(char * addr) ".align 4\n\t" \ ".long 1b,3b\n" \ ".previous" \ - : :"m" (*(unsigned int *)&(value))) + : :"m" (value)) /* * Save a segment register away */ #define savesegment(seg, value) \ - asm volatile("movl %%" #seg ",%0":"=m" (*(int *)&(value))) + asm volatile("mov %%" #seg ",%0":"=m" (value)) /* * Clear and set 'TS' bit respectively -- cgit v1.2.3 From 67701ae9767534534d3710664037dfde2cc04935 Mon Sep 17 00:00:00 2001 From: Jack F Vogel Date: Sun, 1 May 2005 08:58:48 -0700 Subject: [PATCH] check nmi watchdog is broken A bug against an xSeries system showed up recently noting that the check_nmi_watchdog() test was failing. I have been investigating it and discovered in both i386 and x86_64 the recent change to the routine to use the cpu_callin_map has uncovered a problem. Prior to that change, on an SMP box, the test was trivally passing because all cpu's were found to not yet be online, but now with the callin_map they are discovered, it goes on to test the counter and they have not yet begun to increment, so it announces a CPU is stuck and bails out. On all the systems I have access to test, the announcement of failure is also bougs... by the time you can login and check /proc/interrupts, the NMI count is happily incrementing on all CPUs. Its just that the test is being done too early. I have tried moving the call to the test around a bit, and it was always too early. I finally hit on this proposed solution, it delays the routine via a late_initcall(), seems like the right solution to me. Signed-off-by: Adrian Bunk Cc: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/i386/kernel/apic.c | 2 -- arch/i386/kernel/io_apic.c | 2 -- arch/i386/kernel/nmi.c | 11 +++++++---- arch/i386/kernel/smpboot.c | 3 --- arch/x86_64/kernel/io_apic.c | 2 -- arch/x86_64/kernel/nmi.c | 9 +++++++-- include/asm-i386/apic.h | 1 - include/asm-x86_64/apic.h | 1 - 8 files changed, 14 insertions(+), 17 deletions(-) (limited to 'include') diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c index e3879f7625c2..d509836b70c3 100644 --- a/arch/i386/kernel/apic.c +++ b/arch/i386/kernel/apic.c @@ -1265,8 +1265,6 @@ int __init APIC_init_uniprocessor (void) setup_local_APIC(); - if (nmi_watchdog == NMI_LOCAL_APIC) - check_nmi_watchdog(); #ifdef CONFIG_X86_IO_APIC if (smp_found_config) if (!skip_ioapic_setup && nr_ioapics) diff --git a/arch/i386/kernel/io_apic.c b/arch/i386/kernel/io_apic.c index 5e0d55be5435..7a324e8b86f9 100644 --- a/arch/i386/kernel/io_apic.c +++ b/arch/i386/kernel/io_apic.c @@ -2175,7 +2175,6 @@ static inline void check_timer(void) disable_8259A_irq(0); setup_nmi(); enable_8259A_irq(0); - check_nmi_watchdog(); } return; } @@ -2198,7 +2197,6 @@ static inline void check_timer(void) add_pin_to_irq(0, 0, pin2); if (nmi_watchdog == NMI_IO_APIC) { setup_nmi(); - check_nmi_watchdog(); } return; } diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index 2f89d000f954..2c0ee9c2d020 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c @@ -102,20 +102,21 @@ int nmi_active; (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \ P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE) -int __init check_nmi_watchdog (void) +static int __init check_nmi_watchdog(void) { unsigned int prev_nmi_count[NR_CPUS]; int cpu; - printk(KERN_INFO "testing NMI watchdog ... "); + if (nmi_watchdog == NMI_NONE) + return 0; + + printk(KERN_INFO "Testing NMI watchdog ... "); for (cpu = 0; cpu < NR_CPUS; cpu++) prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count; local_irq_enable(); mdelay((10*1000)/nmi_hz); // wait 10 ticks - /* FIXME: Only boot CPU is online at this stage. Check CPUs - as they come up. */ for (cpu = 0; cpu < NR_CPUS; cpu++) { #ifdef CONFIG_SMP /* Check cpu_callin_map here because that is set @@ -139,6 +140,8 @@ int __init check_nmi_watchdog (void) return 0; } +/* This needs to happen later in boot so counters are working */ +late_initcall(check_nmi_watchdog); static int __init setup_nmi_watchdog(char *str) { diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c index fd36d2f65f88..cbea7ac582e5 100644 --- a/arch/i386/kernel/smpboot.c +++ b/arch/i386/kernel/smpboot.c @@ -1089,9 +1089,6 @@ static void __init smp_boot_cpus(unsigned int max_cpus) } } - if (nmi_watchdog == NMI_LOCAL_APIC) - check_nmi_watchdog(); - smpboot_setup_io_apic(); setup_boot_APIC_clock(); diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c index 29a257295484..60be58617eb9 100644 --- a/arch/x86_64/kernel/io_apic.c +++ b/arch/x86_64/kernel/io_apic.c @@ -1607,7 +1607,6 @@ static inline void check_timer(void) disable_8259A_irq(0); setup_nmi(); enable_8259A_irq(0); - check_nmi_watchdog(); } return; } @@ -1627,7 +1626,6 @@ static inline void check_timer(void) nmi_watchdog_default(); if (nmi_watchdog == NMI_IO_APIC) { setup_nmi(); - check_nmi_watchdog(); } return; } diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c index e00d4adec36b..61de0b34a01e 100644 --- a/arch/x86_64/kernel/nmi.c +++ b/arch/x86_64/kernel/nmi.c @@ -112,17 +112,20 @@ static __init int cpu_has_lapic(void) } } -int __init check_nmi_watchdog (void) +static int __init check_nmi_watchdog (void) { int counts[NR_CPUS]; int cpu; + if (nmi_watchdog == NMI_NONE) + return 0; + if (nmi_watchdog == NMI_LOCAL_APIC && !cpu_has_lapic()) { nmi_watchdog = NMI_NONE; return -1; } - printk(KERN_INFO "testing NMI watchdog ... "); + printk(KERN_INFO "Testing NMI watchdog ... "); for (cpu = 0; cpu < NR_CPUS; cpu++) counts[cpu] = cpu_pda[cpu].__nmi_count; @@ -148,6 +151,8 @@ int __init check_nmi_watchdog (void) return 0; } +/* Have this called later during boot so counters are updating */ +late_initcall(check_nmi_watchdog); int __init setup_nmi_watchdog(char *str) { diff --git a/include/asm-i386/apic.h b/include/asm-i386/apic.h index e1de67483f38..a5810cf7b578 100644 --- a/include/asm-i386/apic.h +++ b/include/asm-i386/apic.h @@ -109,7 +109,6 @@ extern int APIC_init_uniprocessor (void); extern void disable_APIC_timer(void); extern void enable_APIC_timer(void); -extern int check_nmi_watchdog (void); extern void enable_NMI_through_LVT0 (void * dummy); extern unsigned int nmi_watchdog; diff --git a/include/asm-x86_64/apic.h b/include/asm-x86_64/apic.h index c025cc3ef789..e4b1017b8b2b 100644 --- a/include/asm-x86_64/apic.h +++ b/include/asm-x86_64/apic.h @@ -99,7 +99,6 @@ extern void disable_APIC_timer(void); extern void enable_APIC_timer(void); extern void clustered_apic_check(void); -extern int check_nmi_watchdog(void); extern void nmi_watchdog_default(void); extern int setup_nmi_watchdog(char *); -- cgit v1.2.3 From a2f7c354159b87dfbd9900f597d48d18755a9d16 Mon Sep 17 00:00:00 2001 From: Jaya Kumar Date: Sun, 1 May 2005 08:58:49 -0700 Subject: [PATCH] x86 reboot: Add reboot fixup for gx1/cs5530a This patch by Jaya Kumar introduces a generic infrastructure to deal with x86 chipsets with nonstandard reset sequences, and adds support for the Geode gx1/cs5530a chipset. Signed-off-by: Jaya Kumar Signed-off-by: H. Peter Anvin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/i386/Kconfig | 18 +++++++++++++ arch/i386/kernel/Makefile | 1 + arch/i386/kernel/reboot.c | 2 ++ arch/i386/kernel/reboot_fixups.c | 56 ++++++++++++++++++++++++++++++++++++++++ include/linux/reboot_fixups.h | 10 +++++++ 5 files changed, 87 insertions(+) create mode 100644 arch/i386/kernel/reboot_fixups.c create mode 100644 include/linux/reboot_fixups.h (limited to 'include') diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig index 17a0cbce6f30..99b4f294a52d 100644 --- a/arch/i386/Kconfig +++ b/arch/i386/Kconfig @@ -653,6 +653,24 @@ config I8K Say Y if you intend to run this kernel on a Dell Inspiron 8000. Say N otherwise. +config X86_REBOOTFIXUPS + bool "Enable X86 board specific fixups for reboot" + depends on X86 + default n + ---help--- + This enables chipset and/or board specific fixups to be done + in order to get reboot to work correctly. This is only needed on + some combinations of hardware and BIOS. The symptom, for which + this config is intended, is when reboot ends with a stalled/hung + system. + + Currently, the only fixup is for the Geode GX1/CS5530A/TROM2.1. + combination. + + Say Y if you want to enable the fixup. Currently, it's safe to + enable this option even if you don't need it. + Say N otherwise. + config MICROCODE tristate "/dev/cpu/microcode - Intel IA32 CPU microcode support" ---help--- diff --git a/arch/i386/kernel/Makefile b/arch/i386/kernel/Makefile index aacdae6f372d..0fbcfe00dd8d 100644 --- a/arch/i386/kernel/Makefile +++ b/arch/i386/kernel/Makefile @@ -23,6 +23,7 @@ obj-$(CONFIG_X86_TRAMPOLINE) += trampoline.o obj-$(CONFIG_X86_MPPARSE) += mpparse.o obj-$(CONFIG_X86_LOCAL_APIC) += apic.o nmi.o obj-$(CONFIG_X86_IO_APIC) += io_apic.o +obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups.o obj-$(CONFIG_X86_NUMAQ) += numaq.o obj-$(CONFIG_X86_SUMMIT_NUMA) += summit.o obj-$(CONFIG_KPROBES) += kprobes.o diff --git a/arch/i386/kernel/reboot.c b/arch/i386/kernel/reboot.c index 3d7e994563df..6dc27eb70ee7 100644 --- a/arch/i386/kernel/reboot.c +++ b/arch/i386/kernel/reboot.c @@ -13,6 +13,7 @@ #include #include #include "mach_reboot.h" +#include /* * Power off function, if any @@ -348,6 +349,7 @@ void machine_restart(char * __unused) /* rebooting needs to touch the page at absolute addr 0 */ *((unsigned short *)__va(0x472)) = reboot_mode; for (;;) { + mach_reboot_fixups(); /* for board specific fixups */ mach_reboot(); /* That didn't work - force a triple fault.. */ __asm__ __volatile__("lidt %0": :"m" (no_idt)); diff --git a/arch/i386/kernel/reboot_fixups.c b/arch/i386/kernel/reboot_fixups.c new file mode 100644 index 000000000000..1b183b378c2c --- /dev/null +++ b/arch/i386/kernel/reboot_fixups.c @@ -0,0 +1,56 @@ +/* + * linux/arch/i386/kernel/reboot_fixups.c + * + * This is a good place to put board specific reboot fixups. + * + * List of supported fixups: + * geode-gx1/cs5530a - Jaya Kumar + * + */ + +#include +#include + +static void cs5530a_warm_reset(struct pci_dev *dev) +{ + /* writing 1 to the reset control register, 0x44 causes the + cs5530a to perform a system warm reset */ + pci_write_config_byte(dev, 0x44, 0x1); + udelay(50); /* shouldn't get here but be safe and spin-a-while */ + return; +} + +struct device_fixup { + unsigned int vendor; + unsigned int device; + void (*reboot_fixup)(struct pci_dev *); +}; + +static struct device_fixup fixups_table[] = { +{ PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY, cs5530a_warm_reset }, +}; + +/* + * we see if any fixup is available for our current hardware. if there + * is a fixup, we call it and we expect to never return from it. if we + * do return, we keep looking and then eventually fall back to the + * standard mach_reboot on return. + */ +void mach_reboot_fixups(void) +{ + struct device_fixup *cur; + struct pci_dev *dev; + int i; + + for (i=0; i < (sizeof(fixups_table)/sizeof(fixups_table[0])); i++) { + cur = &(fixups_table[i]); + dev = pci_get_device(cur->vendor, cur->device, 0); + if (!dev) + continue; + + cur->reboot_fixup(dev); + } + + printk(KERN_WARNING "No reboot fixup found for your hardware\n"); +} + diff --git a/include/linux/reboot_fixups.h b/include/linux/reboot_fixups.h new file mode 100644 index 000000000000..480ea2d489d8 --- /dev/null +++ b/include/linux/reboot_fixups.h @@ -0,0 +1,10 @@ +#ifndef _LINUX_REBOOT_FIXUPS_H +#define _LINUX_REBOOT_FIXUPS_H + +#ifdef CONFIG_X86_REBOOTFIXUPS +extern void mach_reboot_fixups(void); +#else +#define mach_reboot_fixups() ((void)(0)) +#endif + +#endif /* _LINUX_REBOOT_FIXUPS_H */ -- cgit v1.2.3 From 5b7abc6fdcaf103f15e06c518ef0aec02a9c00e7 Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Sun, 1 May 2005 08:58:49 -0700 Subject: [PATCH] CPUID bug and inconsistency fix The recent support for K8 multicore was misported from x86-64 to i386, due to an unnecessary inconsistency between the CPUID code. Sure, there is are no x86-64 VIA chips yet, but it should happen eventually. This patch fixes the i386 bug as well as makes x86-64 match i386 in the handing of the CPUID array. Signed-off-by: H. Peter Anvin Cc: Andi Kleen Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/setup.c | 10 ++++++++-- include/asm-i386/cpufeature.h | 4 ++-- include/asm-x86_64/cpufeature.h | 14 ++++++++++---- 3 files changed, 20 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c index b18c114c7648..2959ce703df9 100644 --- a/arch/x86_64/kernel/setup.c +++ b/arch/x86_64/kernel/setup.c @@ -977,7 +977,7 @@ void __init identify_cpu(struct cpuinfo_x86 *c) if ((xlvl & 0xffff0000) == 0x80000000) { if (xlvl >= 0x80000001) { c->x86_capability[1] = cpuid_edx(0x80000001); - c->x86_capability[5] = cpuid_ecx(0x80000001); + c->x86_capability[6] = cpuid_ecx(0x80000001); } if (xlvl >= 0x80000004) get_model_name(c); /* Default name */ @@ -1100,11 +1100,17 @@ static int show_cpuinfo(struct seq_file *m, void *v) NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + /* VIA/Cyrix/Centaur-defined */ + NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en", + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, + /* AMD-defined (#2) */ "lahf_lm", "cmp_legacy", NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, - NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL + NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, }; static char *x86_power_flags[] = { "ts", /* temperature sensor */ diff --git a/include/asm-i386/cpufeature.h b/include/asm-i386/cpufeature.h index e147cabd3bfe..ff1187e80c32 100644 --- a/include/asm-i386/cpufeature.h +++ b/include/asm-i386/cpufeature.h @@ -87,8 +87,8 @@ #define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* on-CPU crypto enabled */ /* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ -#define X86_FEATURE_LAHF_LM (5*32+ 0) /* LAHF/SAHF in long mode */ -#define X86_FEATURE_CMP_LEGACY (5*32+ 1) /* If yes HyperThreading not valid */ +#define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */ +#define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */ #define cpu_has(c, bit) test_bit(bit, (c)->x86_capability) #define boot_cpu_has(bit) test_bit(bit, boot_cpu_data.x86_capability) diff --git a/include/asm-x86_64/cpufeature.h b/include/asm-x86_64/cpufeature.h index e68ad97a6319..aea308c65709 100644 --- a/include/asm-x86_64/cpufeature.h +++ b/include/asm-x86_64/cpufeature.h @@ -7,7 +7,7 @@ #ifndef __ASM_X8664_CPUFEATURE_H #define __ASM_X8664_CPUFEATURE_H -#define NCAPINTS 6 +#define NCAPINTS 7 /* N 32-bit words worth of info */ /* Intel-defined CPU features, CPUID level 0x00000001, word 0 */ #define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */ @@ -74,9 +74,15 @@ #define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */ #define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */ -/* More extended AMD flags: CPUID level 0x80000001, ecx, word 5 */ -#define X86_FEATURE_LAHF_LM (5*32+ 0) /* LAHF/SAHF in long mode */ -#define X86_FEATURE_CMP_LEGACY (5*32+ 1) /* If yes HyperThreading not valid */ +/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */ +#define X86_FEATURE_XSTORE (5*32+ 2) /* on-CPU RNG present (xstore insn) */ +#define X86_FEATURE_XSTORE_EN (5*32+ 3) /* on-CPU RNG enabled */ +#define X86_FEATURE_XCRYPT (5*32+ 6) /* on-CPU crypto (xcrypt insn) */ +#define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* on-CPU crypto enabled */ + +/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */ +#define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */ +#define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */ #define cpu_has(c, bit) test_bit(bit, (c)->x86_capability) #define boot_cpu_has(bit) test_bit(bit, boot_cpu_data.x86_capability) -- cgit v1.2.3 From 35492df5ae0f36f717448b2aea908d3a8891d1c4 Mon Sep 17 00:00:00 2001 From: john stultz Date: Sun, 1 May 2005 08:58:50 -0700 Subject: [PATCH] i386: fix hpet for systems that don't support legacy replacement Currently the i386 HPET code assumes the entire HPET implementation from the spec is present. This breaks on boxes that do not implement the optional legacy timer replacement functionality portion of the spec. This patch, which is very similar to my x86-64 patch for the same issue, fixes the problem allowing i386 systems that cannot use the HPET for the timer interrupt and RTC to still use the HPET as a time source. I've tested this patch on a system systems without HPET, with HPET but without legacy timer replacement, as well as HPET with legacy timer replacement. Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/i386/kernel/time.c | 2 +- arch/i386/kernel/time_hpet.c | 48 ++++++++++++++++++++---------------- arch/i386/kernel/timers/timer_hpet.c | 11 ++++++--- arch/i386/kernel/timers/timer_tsc.c | 2 +- include/asm-i386/hpet.h | 1 + 5 files changed, 37 insertions(+), 27 deletions(-) (limited to 'include') diff --git a/arch/i386/kernel/time.c b/arch/i386/kernel/time.c index 4d75b373f90e..a0dcb7c87c30 100644 --- a/arch/i386/kernel/time.c +++ b/arch/i386/kernel/time.c @@ -441,7 +441,7 @@ static void __init hpet_time_init(void) set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); - if (hpet_enable() >= 0) { + if ((hpet_enable() >= 0) && hpet_use_timer) { printk("Using HPET for base-timer\n"); } diff --git a/arch/i386/kernel/time_hpet.c b/arch/i386/kernel/time_hpet.c index 244a31b04be7..10a0cbb88e75 100644 --- a/arch/i386/kernel/time_hpet.c +++ b/arch/i386/kernel/time_hpet.c @@ -26,6 +26,7 @@ static unsigned long hpet_period; /* fsecs / HPET clock */ unsigned long hpet_tick; /* hpet clks count per tick */ unsigned long hpet_address; /* hpet memory map physical address */ +int hpet_use_timer; static int use_hpet; /* can be used for runtime check of hpet */ static int boot_hpet_disable; /* boottime override for HPET timer */ @@ -73,27 +74,30 @@ static int hpet_timer_stop_set_go(unsigned long tick) hpet_writel(0, HPET_COUNTER); hpet_writel(0, HPET_COUNTER + 4); - /* - * Set up timer 0, as periodic with first interrupt to happen at - * hpet_tick, and period also hpet_tick. - */ - cfg = hpet_readl(HPET_T0_CFG); - cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | - HPET_TN_SETVAL | HPET_TN_32BIT; - hpet_writel(cfg, HPET_T0_CFG); - - /* - * The first write after writing TN_SETVAL to the config register sets - * the counter value, the second write sets the threshold. - */ - hpet_writel(tick, HPET_T0_CMP); - hpet_writel(tick, HPET_T0_CMP); + if (hpet_use_timer) { + /* + * Set up timer 0, as periodic with first interrupt to happen at + * hpet_tick, and period also hpet_tick. + */ + cfg = hpet_readl(HPET_T0_CFG); + cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC | + HPET_TN_SETVAL | HPET_TN_32BIT; + hpet_writel(cfg, HPET_T0_CFG); + /* + * The first write after writing TN_SETVAL to the config register sets + * the counter value, the second write sets the threshold. + */ + hpet_writel(tick, HPET_T0_CMP); + hpet_writel(tick, HPET_T0_CMP); + } /* * Go! */ cfg = hpet_readl(HPET_CFG); - cfg |= HPET_CFG_ENABLE | HPET_CFG_LEGACY; + if (hpet_use_timer) + cfg |= HPET_CFG_LEGACY; + cfg |= HPET_CFG_ENABLE; hpet_writel(cfg, HPET_CFG); return 0; @@ -128,12 +132,11 @@ int __init hpet_enable(void) * However, we can do with one timer otherwise using the * the single HPET timer for system time. */ - if ( #ifdef CONFIG_HPET_EMULATE_RTC - !(id & HPET_ID_NUMBER) || -#endif - !(id & HPET_ID_LEGSUP)) + if (!(id & HPET_ID_NUMBER)) return -1; +#endif + hpet_period = hpet_readl(HPET_PERIOD); if ((hpet_period < HPET_MIN_PERIOD) || (hpet_period > HPET_MAX_PERIOD)) @@ -152,6 +155,8 @@ int __init hpet_enable(void) if (hpet_tick_rem > (hpet_period >> 1)) hpet_tick++; /* rounding the result */ + hpet_use_timer = id & HPET_ID_LEGSUP; + if (hpet_timer_stop_set_go(hpet_tick)) return -1; @@ -202,7 +207,8 @@ int __init hpet_enable(void) #endif #ifdef CONFIG_X86_LOCAL_APIC - wait_timer_tick = wait_hpet_tick; + if (hpet_use_timer) + wait_timer_tick = wait_hpet_tick; #endif return 0; } diff --git a/arch/i386/kernel/timers/timer_hpet.c b/arch/i386/kernel/timers/timer_hpet.c index 713134e71844..f778f471a09a 100644 --- a/arch/i386/kernel/timers/timer_hpet.c +++ b/arch/i386/kernel/timers/timer_hpet.c @@ -79,7 +79,7 @@ static unsigned long get_offset_hpet(void) eax = hpet_readl(HPET_COUNTER); eax -= hpet_last; /* hpet delta */ - + eax = min(hpet_tick, eax); /* * Time offset = (hpet delta) * ( usecs per HPET clock ) * = (hpet delta) * ( usecs per tick / HPET clocks per tick) @@ -105,9 +105,12 @@ static void mark_offset_hpet(void) last_offset = ((unsigned long long)last_tsc_high<<32)|last_tsc_low; rdtsc(last_tsc_low, last_tsc_high); - offset = hpet_readl(HPET_T0_CMP) - hpet_tick; - if (unlikely(((offset - hpet_last) > hpet_tick) && (hpet_last != 0))) { - int lost_ticks = (offset - hpet_last) / hpet_tick; + if (hpet_use_timer) + offset = hpet_readl(HPET_T0_CMP) - hpet_tick; + else + offset = hpet_readl(HPET_COUNTER); + if (unlikely(((offset - hpet_last) >= (2*hpet_tick)) && (hpet_last != 0))) { + int lost_ticks = ((offset - hpet_last) / hpet_tick) - 1; jiffies_64 += lost_ticks; } hpet_last = offset; diff --git a/arch/i386/kernel/timers/timer_tsc.c b/arch/i386/kernel/timers/timer_tsc.c index a685994e5c8e..7926d967be00 100644 --- a/arch/i386/kernel/timers/timer_tsc.c +++ b/arch/i386/kernel/timers/timer_tsc.c @@ -477,7 +477,7 @@ static int __init init_tsc(char* override) if (cpu_has_tsc) { unsigned long tsc_quotient; #ifdef CONFIG_HPET_TIMER - if (is_hpet_enabled()){ + if (is_hpet_enabled() && hpet_use_timer) { unsigned long result, remain; printk("Using TSC for gettimeofday\n"); tsc_quotient = calibrate_tsc_hpet(NULL); diff --git a/include/asm-i386/hpet.h b/include/asm-i386/hpet.h index 6e20b079f1d3..16ef9f996e3f 100644 --- a/include/asm-i386/hpet.h +++ b/include/asm-i386/hpet.h @@ -92,6 +92,7 @@ extern unsigned long hpet_tick; /* hpet clks count per tick */ extern unsigned long hpet_address; /* hpet memory map physical address */ +extern int hpet_use_timer; extern int hpet_rtc_timer_init(void); extern int hpet_enable(void); -- cgit v1.2.3 From 4d24a439a6b2280357d62fb30a73350cf253bdb7 Mon Sep 17 00:00:00 2001 From: Jason Gaston Date: Sun, 1 May 2005 08:58:50 -0700 Subject: [PATCH] irq and pci_ids for Intel ICH7DH & ICH7-M DH MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This patch adds the Intel ICH7DH and ICH7-M DH DID's to the irq.c and pci_ids.h files. Signed-off-by:  Jason Gaston Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/i386/pci/irq.c | 2 ++ include/linux/pci_ids.h | 2 ++ 2 files changed, 4 insertions(+) (limited to 'include') diff --git a/arch/i386/pci/irq.c b/arch/i386/pci/irq.c index e07589d04f64..d6598da4b67b 100644 --- a/arch/i386/pci/irq.c +++ b/arch/i386/pci/irq.c @@ -495,6 +495,8 @@ static __init int intel_router_probe(struct irq_router *r, struct pci_dev *route case PCI_DEVICE_ID_INTEL_ICH6_1: case PCI_DEVICE_ID_INTEL_ICH7_0: case PCI_DEVICE_ID_INTEL_ICH7_1: + case PCI_DEVICE_ID_INTEL_ICH7_30: + case PCI_DEVICE_ID_INTEL_ICH7_31: case PCI_DEVICE_ID_INTEL_ESB2_0: r->name = "PIIX/ICH"; r->get = pirq_piix_get; diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 6a1897481942..5d5820a4cf10 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2415,6 +2415,8 @@ #define PCI_DEVICE_ID_INTEL_ICH7_1 0x27b9 #define PCI_DEVICE_ID_INTEL_ICH7_2 0x27c0 #define PCI_DEVICE_ID_INTEL_ICH7_3 0x27c1 +#define PCI_DEVICE_ID_INTEL_ICH7_30 0x27b0 +#define PCI_DEVICE_ID_INTEL_ICH7_31 0x27bd #define PCI_DEVICE_ID_INTEL_ICH7_5 0x27c4 #define PCI_DEVICE_ID_INTEL_ICH7_6 0x27c5 #define PCI_DEVICE_ID_INTEL_ICH7_7 0x27c8 -- cgit v1.2.3 From f9ba70535dc12d9eb57d466a2ecd749e16eca866 Mon Sep 17 00:00:00 2001 From: Venkatesh Pallipadi Date: Sun, 1 May 2005 08:58:51 -0700 Subject: [PATCH] Increase number of e820 entries hard limit from 32 to 128 The specifications that talk about E820 map doesn't have an upper limit on the number of e820 entries. But, today's kernel has a hard limit of 32. With increase in memory size, we are seeing the number of E820 entries reaching close to 32. Patch below bumps the number upto 128. The patch changes the location of EDDBUF in zero-page (as it comes after E820). As, EDDBUF is not used by boot loaders, this patch should not have any effect on bootloader-setup code interface. Patch covers both i386 and x86-64. Tested on: * grub booting bzImage * lilo booting bzImage with EDID info enabled * pxeboot of bzImage Side-effect: bss increases by ~ 2K and init.data increases by ~7.5K on all systems, due to increase in size of static arrays. Signed-off-by: Venkatesh Pallipadi Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/i386/zero-page.txt | 6 +++--- arch/i386/boot/setup.S | 6 +++--- arch/x86_64/boot/setup.S | 6 +++--- arch/x86_64/kernel/head64.c | 4 +--- arch/x86_64/kernel/setup64.c | 3 ++- include/asm-i386/e820.h | 2 +- include/asm-i386/setup.h | 2 +- include/asm-x86_64/bootsetup.h | 3 ++- include/asm-x86_64/e820.h | 2 +- include/linux/edd.h | 2 +- 10 files changed, 18 insertions(+), 18 deletions(-) (limited to 'include') diff --git a/Documentation/i386/zero-page.txt b/Documentation/i386/zero-page.txt index 67c053a099ed..df28c7416781 100644 --- a/Documentation/i386/zero-page.txt +++ b/Documentation/i386/zero-page.txt @@ -79,6 +79,6 @@ Offset Type Description 0x22c unsigned long ramdisk_max 0x230 16 bytes trampoline 0x290 - 0x2cf EDD_MBR_SIG_BUFFER (edd.S) -0x2d0 - 0x600 E820MAP -0x600 - 0x7ff EDDBUF (edd.S) for disk signature read sector -0x600 - 0x7eb EDDBUF (edd.S) for edd data +0x2d0 - 0xd00 E820MAP +0xd00 - 0xeff EDDBUF (edd.S) for disk signature read sector +0xd00 - 0xeeb EDDBUF (edd.S) for edd data diff --git a/arch/i386/boot/setup.S b/arch/i386/boot/setup.S index a934ab32bf8e..caa1fde6904e 100644 --- a/arch/i386/boot/setup.S +++ b/arch/i386/boot/setup.S @@ -164,7 +164,7 @@ ramdisk_max: .long (-__PAGE_OFFSET-(512 << 20)-1) & 0x7fffffff trampoline: call start_of_setup .align 16 # The offset at this point is 0x240 - .space (0x7ff-0x240+1) # E820 & EDD space (ending at 0x7ff) + .space (0xeff-0x240+1) # E820 & EDD space (ending at 0xeff) # End of setup header ##################################################### start_of_setup: @@ -333,9 +333,9 @@ jmpe820: # sizeof(e820rec). # good820: - movb (E820NR), %al # up to 32 entries + movb (E820NR), %al # up to 128 entries cmpb $E820MAX, %al - jnl bail820 + jae bail820 incb (E820NR) movw %di, %ax diff --git a/arch/x86_64/boot/setup.S b/arch/x86_64/boot/setup.S index 3e838be9dbe7..75d4d2ad93b3 100644 --- a/arch/x86_64/boot/setup.S +++ b/arch/x86_64/boot/setup.S @@ -160,7 +160,7 @@ ramdisk_max: .long 0xffffffff trampoline: call start_of_setup .align 16 # The offset at this point is 0x240 - .space (0x7ff-0x240+1) # E820 & EDD space (ending at 0x7ff) + .space (0xeff-0x240+1) # E820 & EDD space (ending at 0xeff) # End of setup header ##################################################### start_of_setup: @@ -412,9 +412,9 @@ jmpe820: # sizeof(e820rec). # good820: - movb (E820NR), %al # up to 32 entries + movb (E820NR), %al # up to 128 entries cmpb $E820MAX, %al - jnl bail820 + jae bail820 incb (E820NR) movw %di, %ax diff --git a/arch/x86_64/kernel/head64.c b/arch/x86_64/kernel/head64.c index 6cad46c98a23..77df2ccb08f1 100644 --- a/arch/x86_64/kernel/head64.c +++ b/arch/x86_64/kernel/head64.c @@ -29,8 +29,6 @@ static void __init clear_bss(void) (unsigned long) __bss_end - (unsigned long) __bss_start); } -extern char x86_boot_params[2048]; - #define NEW_CL_POINTER 0x228 /* Relative to real mode data */ #define OLD_CL_MAGIC_ADDR 0x90020 #define OLD_CL_MAGIC 0xA33F @@ -44,7 +42,7 @@ static void __init copy_bootdata(char *real_mode_data) int new_data; char * command_line; - memcpy(x86_boot_params, real_mode_data, 2048); + memcpy(x86_boot_params, real_mode_data, BOOT_PARAM_SIZE); new_data = *(int *) (x86_boot_params + NEW_CL_POINTER); if (!new_data) { if (OLD_CL_MAGIC != * (u16 *) OLD_CL_MAGIC_ADDR) { diff --git a/arch/x86_64/kernel/setup64.c b/arch/x86_64/kernel/setup64.c index b5305b04bc40..678b7ac33b8b 100644 --- a/arch/x86_64/kernel/setup64.c +++ b/arch/x86_64/kernel/setup64.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -26,7 +27,7 @@ #include #include -char x86_boot_params[2048] __initdata = {0,}; +char x86_boot_params[BOOT_PARAM_SIZE] __initdata = {0,}; cpumask_t cpu_initialized __initdata = CPU_MASK_NONE; diff --git a/include/asm-i386/e820.h b/include/asm-i386/e820.h index 5c285aee7294..edf65be21a92 100644 --- a/include/asm-i386/e820.h +++ b/include/asm-i386/e820.h @@ -13,7 +13,7 @@ #define __E820_HEADER #define E820MAP 0x2d0 /* our map */ -#define E820MAX 32 /* number of entries in E820MAP */ +#define E820MAX 128 /* number of entries in E820MAP */ #define E820NR 0x1e8 /* # entries in E820MAP */ #define E820_RAM 1 diff --git a/include/asm-i386/setup.h b/include/asm-i386/setup.h index 8814b54c75d4..7a32184d54bf 100644 --- a/include/asm-i386/setup.h +++ b/include/asm-i386/setup.h @@ -16,7 +16,7 @@ #define MAXMEM_PFN PFN_DOWN(MAXMEM) #define MAX_NONPAE_PFN (1 << 20) -#define PARAM_SIZE 2048 +#define PARAM_SIZE 4096 #define COMMAND_LINE_SIZE 256 #define OLD_CL_MAGIC_ADDR 0x90020 diff --git a/include/asm-x86_64/bootsetup.h b/include/asm-x86_64/bootsetup.h index b570a484dc50..b829f7b534be 100644 --- a/include/asm-x86_64/bootsetup.h +++ b/include/asm-x86_64/bootsetup.h @@ -2,7 +2,8 @@ #ifndef _X86_64_BOOTSETUP_H #define _X86_64_BOOTSETUP_H 1 -extern char x86_boot_params[2048]; +#define BOOT_PARAM_SIZE 4096 +extern char x86_boot_params[BOOT_PARAM_SIZE]; /* * This is set up by the setup-routine at boot-time diff --git a/include/asm-x86_64/e820.h b/include/asm-x86_64/e820.h index 08f83a4b4f4a..8e94edf0b984 100644 --- a/include/asm-x86_64/e820.h +++ b/include/asm-x86_64/e820.h @@ -14,7 +14,7 @@ #include #define E820MAP 0x2d0 /* our map */ -#define E820MAX 32 /* number of entries in E820MAP */ +#define E820MAX 128 /* number of entries in E820MAP */ #define E820NR 0x1e8 /* # entries in E820MAP */ #define E820_RAM 1 diff --git a/include/linux/edd.h b/include/linux/edd.h index c6e6747a401d..162512b886f7 100644 --- a/include/linux/edd.h +++ b/include/linux/edd.h @@ -32,7 +32,7 @@ #define EDDNR 0x1e9 /* addr of number of edd_info structs at EDDBUF in boot_params - treat this as 1 byte */ -#define EDDBUF 0x600 /* addr of edd_info structs in boot_params */ +#define EDDBUF 0xd00 /* addr of edd_info structs in boot_params */ #define EDDMAXNR 6 /* number of edd_info structs starting at EDDBUF */ #define EDDEXTSIZE 8 /* change these if you muck with the structures */ #define EDDPARMSIZE 74 -- cgit v1.2.3 From 92eac95287d75f220a8bbef6646f51a6497c4b4c Mon Sep 17 00:00:00 2001 From: Jeff Dike Date: Sun, 1 May 2005 08:58:53 -0700 Subject: [PATCH] uml: fix oops related to exception table Paolo 'Blaisorblade' Giarrusso Prevent the kernel from oopsing during the extable sorting, as it can do now, because the extable is in the readonly section of the binary. Jeff says: The exception table turned RO in 2.6.11-rc3-mm1 for some reason. Moving it causes it to land in the writable data section of the binary. Paolo says: This patch fixes a oops on startup, which can be easily triggered by compiling with CONFIG_MODE_TT disabled, and STATIC_LINK either disabled or enabled. The resulting kernel will always Oops on startup, after printing this simple output: I've verified, by binary search on the BitKeeper repository (synced up as of 2.6.12-rc2), starting from the range 2.6.11-2.6.12-rc1, that this bug shows up on BitKeeper revisions in the range [@1.1994.11.168,+inf), i.e. starting from this: [PATCH] lib/sort: Replace insertion sort in exception tables Since UML does not use the exception table, it's likely that insertion sort didn't happen to write anything on the table. Signed-off-by: Paolo 'Blaisorblade' Giarrusso Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-um/common.lds.S | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/asm-um/common.lds.S b/include/asm-um/common.lds.S index a3d6aab0e74d..1010153faaf9 100644 --- a/include/asm-um/common.lds.S +++ b/include/asm-um/common.lds.S @@ -8,11 +8,6 @@ _sdata = .; PROVIDE (sdata = .); - . = ALIGN(16); /* Exception table */ - __start___ex_table = .; - __ex_table : { *(__ex_table) } - __stop___ex_table = .; - RODATA .unprotected : { *(.unprotected) } @@ -20,6 +15,10 @@ PROVIDE (_unprotected_end = .); . = ALIGN(4096); + __start___ex_table = .; + __ex_table : { *(__ex_table) } + __stop___ex_table = .; + __uml_setup_start = .; .uml.setup.init : { *(.uml.setup.init) } __uml_setup_end = .; -- cgit v1.2.3 From c45166be3cc666ce88fe623ad79276c943e74eff Mon Sep 17 00:00:00 2001 From: Paolo 'Blaisorblade' Giarrusso Date: Sun, 1 May 2005 08:58:54 -0700 Subject: [PATCH] uml: support AES i586 crypto driver We want to make possible, for the user, to enable the i586 AES implementation. This requires a restructure. - Add a CONFIG_UML_X86 to notify that we are building a UML for i386. - Rename CONFIG_64_BIT to CONFIG_64BIT as is used for all other archs - Tell crypto/Kconfig that UML_X86 is as good as X86 - Tell it that it must exclude not X86_64 but 64BIT, which will give the same results. - Tell kbuild to descend down into arch/i386/crypto/ to build what's needed. Signed-off-by: Paolo 'Blaisorblade' Giarrusso Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/um/Kconfig | 1 + arch/um/Kconfig_i386 | 6 +++++- arch/um/Kconfig_x86_64 | 6 +++++- arch/um/Makefile-i386 | 2 +- arch/um/defconfig | 10 +++++----- crypto/Kconfig | 4 ++-- include/asm-um/elf.h | 2 +- include/asm-um/page.h | 2 +- include/asm-um/pgtable-3level.h | 2 +- 9 files changed, 22 insertions(+), 13 deletions(-) (limited to 'include') diff --git a/arch/um/Kconfig b/arch/um/Kconfig index 9a23df182123..c5292181a664 100644 --- a/arch/um/Kconfig +++ b/arch/um/Kconfig @@ -244,6 +244,7 @@ config KERNEL_HALF_GIGS config HIGHMEM bool "Highmem support" + depends on !64BIT config KERNEL_STACK_ORDER int "Kernel stack size order" diff --git a/arch/um/Kconfig_i386 b/arch/um/Kconfig_i386 index 203c242201b6..e41f3748d30f 100644 --- a/arch/um/Kconfig_i386 +++ b/arch/um/Kconfig_i386 @@ -1,4 +1,8 @@ -config 64_BIT +config UML_X86 + bool + default y + +config 64BIT bool default n diff --git a/arch/um/Kconfig_x86_64 b/arch/um/Kconfig_x86_64 index 768dc6626a8d..fd8d7e8982b1 100644 --- a/arch/um/Kconfig_x86_64 +++ b/arch/um/Kconfig_x86_64 @@ -1,4 +1,8 @@ -config 64_BIT +config UML_X86 + bool + default y + +config 64BIT bool default y diff --git a/arch/um/Makefile-i386 b/arch/um/Makefile-i386 index 97b223bfa78e..f9e3c0f06541 100644 --- a/arch/um/Makefile-i386 +++ b/arch/um/Makefile-i386 @@ -1,4 +1,4 @@ -SUBARCH_CORE := arch/um/sys-i386/ +SUBARCH_CORE := arch/um/sys-i386/ arch/i386/crypto/ TOP_ADDR := $(CONFIG_TOP_ADDR) diff --git a/arch/um/defconfig b/arch/um/defconfig index fc3075c589d8..4067c3aa5b60 100644 --- a/arch/um/defconfig +++ b/arch/um/defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.12-rc1-bk1 -# Sun Mar 20 16:53:00 2005 +# Linux kernel version: 2.6.12-rc3-skas3-v9-pre2 +# Sun Apr 24 19:46:10 2005 # CONFIG_GENERIC_HARDIRQS=y CONFIG_UML=y @@ -15,7 +15,8 @@ CONFIG_GENERIC_CALIBRATE_DELAY=y # CONFIG_MODE_TT=y CONFIG_MODE_SKAS=y -# CONFIG_64_BIT is not set +CONFIG_UML_X86=y +# CONFIG_64BIT is not set CONFIG_TOP_ADDR=0xc0000000 # CONFIG_3_LEVEL_PGTABLES is not set CONFIG_ARCH_HAS_SC_SIGNALS=y @@ -41,6 +42,7 @@ CONFIG_UML_REAL_TIME_CLOCK=y CONFIG_EXPERIMENTAL=y CONFIG_CLEAN_COMPILE=y CONFIG_BROKEN_ON_SMP=y +CONFIG_INIT_ENV_ARG_LIMIT=32 # # General setup @@ -158,7 +160,6 @@ CONFIG_UML_NET_SLIRP=y # CONFIG_PACKET=y CONFIG_PACKET_MMAP=y -# CONFIG_NETLINK_DEV is not set CONFIG_UNIX=y # CONFIG_NET_KEY is not set CONFIG_INET=y @@ -412,6 +413,5 @@ CONFIG_DEBUG_INFO=y # CONFIG_DEBUG_FS is not set CONFIG_FRAME_POINTER=y CONFIG_PT_PROXY=y -# CONFIG_GPROF is not set # CONFIG_GCOV is not set # CONFIG_SYSCALL_DEBUG is not set diff --git a/crypto/Kconfig b/crypto/Kconfig index 536754faf4d2..90d6089d60ed 100644 --- a/crypto/Kconfig +++ b/crypto/Kconfig @@ -146,7 +146,7 @@ config CRYPTO_SERPENT config CRYPTO_AES tristate "AES cipher algorithms" - depends on CRYPTO && !(X86 && !X86_64) + depends on CRYPTO && !((X86 || UML_X86) && !64BIT) help AES cipher algorithms (FIPS-197). AES uses the Rijndael algorithm. @@ -166,7 +166,7 @@ config CRYPTO_AES config CRYPTO_AES_586 tristate "AES cipher algorithms (i586)" - depends on CRYPTO && (X86 && !X86_64) + depends on CRYPTO && ((X86 || UML_X86) && !64BIT) help AES cipher algorithms (FIPS-197). AES uses the Rijndael algorithm. diff --git a/include/asm-um/elf.h b/include/asm-um/elf.h index b3a7258f9971..7908f8fe8231 100644 --- a/include/asm-um/elf.h +++ b/include/asm-um/elf.h @@ -13,7 +13,7 @@ extern long elf_aux_hwcap; #define elf_check_arch(x) (1) -#ifdef CONFIG_64_BIT +#ifdef CONFIG_64BIT #define ELF_CLASS ELFCLASS64 #else #define ELF_CLASS ELFCLASS32 diff --git a/include/asm-um/page.h b/include/asm-um/page.h index 3620a08dc9f3..102eb3df1aaf 100644 --- a/include/asm-um/page.h +++ b/include/asm-um/page.h @@ -27,7 +27,7 @@ struct page; #define clear_user_page(page, vaddr, pg) clear_page(page) #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) -#if defined(CONFIG_3_LEVEL_PGTABLES) && !defined(CONFIG_64_BIT) +#if defined(CONFIG_3_LEVEL_PGTABLES) && !defined(CONFIG_64BIT) typedef struct { unsigned long pte_low, pte_high; } pte_t; typedef struct { unsigned long long pmd; } pmd_t; diff --git a/include/asm-um/pgtable-3level.h b/include/asm-um/pgtable-3level.h index bdbc3f97e20b..d309f3a9e6f6 100644 --- a/include/asm-um/pgtable-3level.h +++ b/include/asm-um/pgtable-3level.h @@ -145,7 +145,7 @@ static inline pmd_t pfn_pmd(pfn_t page_nr, pgprot_t pgprot) */ #define PTE_FILE_MAX_BITS 32 -#ifdef CONFIG_64_BIT +#ifdef CONFIG_64BIT #define pte_to_pgoff(p) ((p).pte >> 32) -- cgit v1.2.3 From c16993d9009b4311f0e6088af38844eabc8b5e5b Mon Sep 17 00:00:00 2001 From: Paolo 'Blaisorblade' Giarrusso Date: Sun, 1 May 2005 08:58:54 -0700 Subject: [PATCH] uml: inline empty proc Cleanup: make an inline of this empty proc. Signed-off-by: Paolo 'Blaisorblade' Giarrusso Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/um/kernel/process_kern.c | 4 ---- include/asm-um/processor-generic.h | 6 +++++- 2 files changed, 5 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/arch/um/kernel/process_kern.c b/arch/um/kernel/process_kern.c index 1d719d5b4bb9..7a943696f950 100644 --- a/arch/um/kernel/process_kern.c +++ b/arch/um/kernel/process_kern.c @@ -161,10 +161,6 @@ void *get_current(void) return(current); } -void prepare_to_copy(struct task_struct *tsk) -{ -} - int copy_thread(int nr, unsigned long clone_flags, unsigned long sp, unsigned long stack_top, struct task_struct * p, struct pt_regs *regs) diff --git a/include/asm-um/processor-generic.h b/include/asm-um/processor-generic.h index 038ba6fc88b8..4d9404989b5a 100644 --- a/include/asm-um/processor-generic.h +++ b/include/asm-um/processor-generic.h @@ -89,7 +89,11 @@ extern struct task_struct *alloc_task_struct(void); extern void release_thread(struct task_struct *); extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags); extern void dump_thread(struct pt_regs *regs, struct user *u); -extern void prepare_to_copy(struct task_struct *tsk); + +static inline void prepare_to_copy(struct task_struct *tsk) +{ +} + extern unsigned long thread_saved_pc(struct task_struct *t); -- cgit v1.2.3 From e9c527163d31da9f616e989a90429729525c5233 Mon Sep 17 00:00:00 2001 From: Paolo 'Blaisorblade' Giarrusso Date: Sun, 1 May 2005 08:58:54 -0700 Subject: [PATCH] uml: move va_copy conditional def GCC 2.95 uses __va_copy instead of va_copy. Handle it inside compiler.h instead of in a casual file, and avoid the risk that this breaks with a newer compiler (which it could do). Signed-off-by: Paolo 'Blaisorblade' Giarrusso Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/um/kernel/skas/uaccess.c | 4 ++-- include/linux/compiler-gcc2.h | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/arch/um/kernel/skas/uaccess.c b/arch/um/kernel/skas/uaccess.c index 7575ec489b63..f7da9d027672 100644 --- a/arch/um/kernel/skas/uaccess.c +++ b/arch/um/kernel/skas/uaccess.c @@ -3,6 +3,7 @@ * Licensed under the GPL */ +#include "linux/compiler.h" #include "linux/stddef.h" #include "linux/kernel.h" #include "linux/string.h" @@ -61,8 +62,7 @@ static void do_buffer_op(void *jmpbuf, void *arg_ptr) void *arg; int *res; - /* Some old gccs recognize __va_copy, but not va_copy */ - __va_copy(args, *(va_list *)arg_ptr); + va_copy(args, *(va_list *)arg_ptr); addr = va_arg(args, unsigned long); len = va_arg(args, int); is_write = va_arg(args, int); diff --git a/include/linux/compiler-gcc2.h b/include/linux/compiler-gcc2.h index 5a359153ffd9..ebed17660c5f 100644 --- a/include/linux/compiler-gcc2.h +++ b/include/linux/compiler-gcc2.h @@ -22,3 +22,8 @@ # define __attribute_pure__ __attribute__((pure)) # define __attribute_const__ __attribute__((__const__)) #endif + +/* GCC 2.95.x/2.96 recognize __va_copy, but not va_copy. Actually later GCC's + * define both va_copy and __va_copy, but the latter may go away, so limit this + * to this header */ +#define va_copy __va_copy -- cgit v1.2.3 From acef2e55d2a1b59bb5610cacc546c1d1b5de2dc9 Mon Sep 17 00:00:00 2001 From: Paolo 'Blaisorblade' Giarrusso Date: Sun, 1 May 2005 08:58:56 -0700 Subject: [PATCH] uml: commentary about forking flag Add some commentary about UML internals, for a strange trick. Signed-off-by: Paolo 'Blaisorblade' Giarrusso Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-um/processor-generic.h | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'include') diff --git a/include/asm-um/processor-generic.h b/include/asm-um/processor-generic.h index 4d9404989b5a..b953b1ad3b02 100644 --- a/include/asm-um/processor-generic.h +++ b/include/asm-um/processor-generic.h @@ -17,6 +17,10 @@ struct task_struct; struct mm_struct; struct thread_struct { + /* This flag is set to 1 before calling do_fork (and analyzed in + * copy_thread) to mark that we are begin called from userspace (fork / + * vfork / clone), and reset to 0 after. It is left to 0 when called + * from kernelspace (i.e. kernel_thread() or fork_idle(), as of 2.6.11). */ int forking; int nsyscalls; struct pt_regs regs; -- cgit v1.2.3 From 0b642ede47969d4180b0922d982777fe64379228 Mon Sep 17 00:00:00 2001 From: Peter Oberparleiter Date: Sun, 1 May 2005 08:58:58 -0700 Subject: [PATCH] s390: default storage key Provide an easy way to define a non-zero storage key at compile time. This is useful for debugging purposes. Signed-off-by: Martin Schwidefsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/s390/kernel/s390_ksyms.c | 1 - arch/s390/kernel/setup.c | 20 ++++++++++++++++---- drivers/s390/cio/cio.c | 4 ++-- drivers/s390/cio/device_ops.c | 6 +++--- drivers/s390/cio/qdio.h | 6 ++++-- include/asm-s390/page.h | 2 ++ include/asm-s390/processor.h | 2 +- include/asm-s390/ptrace.h | 16 +++++++++++++--- 8 files changed, 41 insertions(+), 16 deletions(-) (limited to 'include') diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c index 11fd6d556d8f..bee654abb6d3 100644 --- a/arch/s390/kernel/s390_ksyms.c +++ b/arch/s390/kernel/s390_ksyms.c @@ -34,7 +34,6 @@ EXPORT_SYMBOL(__clear_user_asm); EXPORT_SYMBOL(__strncpy_from_user_asm); EXPORT_SYMBOL(__strnlen_user_asm); EXPORT_SYMBOL(diag10); -EXPORT_SYMBOL(default_storage_key); /* * semaphore ops diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index f0679be4f96f..df83215beac3 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -44,6 +44,8 @@ #include #include #include +#include +#include /* * Machine setup.. @@ -53,7 +55,6 @@ unsigned int console_devno = -1; unsigned int console_irq = -1; unsigned long memory_size = 0; unsigned long machine_flags = 0; -unsigned int default_storage_key = 0; struct { unsigned long addr, size, type; } memory_chunk[MEMORY_CHUNKS] = { { 0 } }; @@ -402,7 +403,7 @@ setup_lowcore(void) lc = (struct _lowcore *) __alloc_bootmem(lc_pages * PAGE_SIZE, lc_pages * PAGE_SIZE, 0); memset(lc, 0, lc_pages * PAGE_SIZE); - lc->restart_psw.mask = PSW_BASE_BITS; + lc->restart_psw.mask = PSW_BASE_BITS | PSW_DEFAULT_KEY; lc->restart_psw.addr = PSW_ADDR_AMODE | (unsigned long) restart_int_handler; lc->external_new_psw.mask = PSW_KERNEL_BITS; @@ -470,7 +471,7 @@ static void __init setup_memory(void) { unsigned long bootmap_size; - unsigned long start_pfn, end_pfn; + unsigned long start_pfn, end_pfn, init_pfn; unsigned long last_rw_end; int i; @@ -481,6 +482,10 @@ setup_memory(void) start_pfn = (__pa(&_end) + PAGE_SIZE - 1) >> PAGE_SHIFT; end_pfn = max_pfn = memory_end >> PAGE_SHIFT; + /* Initialize storage key for kernel pages */ + for (init_pfn = 0 ; init_pfn < start_pfn; init_pfn++) + page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY); + /* * Initialize the boot-time allocator (with low memory only): */ @@ -491,7 +496,7 @@ setup_memory(void) */ last_rw_end = start_pfn; - for (i = 0; i < 16 && memory_chunk[i].size > 0; i++) { + for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { unsigned long start_chunk, end_chunk; if (memory_chunk[i].type != CHUNK_READ_WRITE) @@ -505,6 +510,11 @@ setup_memory(void) if (end_chunk > end_pfn) end_chunk = end_pfn; if (start_chunk < end_chunk) { + /* Initialize storage key for RAM pages */ + for (init_pfn = start_chunk ; init_pfn < end_chunk; + init_pfn++) + page_set_storage_key(init_pfn << PAGE_SHIFT, + PAGE_DEFAULT_KEY); free_bootmem(start_chunk << PAGE_SHIFT, (end_chunk - start_chunk) << PAGE_SHIFT); if (last_rw_end < start_chunk) @@ -513,6 +523,8 @@ setup_memory(void) } } + psw_set_key(PAGE_DEFAULT_KEY); + if (last_rw_end < end_pfn - 1) add_memory_hole(last_rw_end, end_pfn - 1); diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 99ce5a567982..1d9b3f18d8de 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c @@ -1,7 +1,7 @@ /* * drivers/s390/cio/cio.c * S/390 common I/O routines -- low level i/o calls - * $Revision: 1.131 $ + * $Revision: 1.133 $ * * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, * IBM Corporation @@ -228,7 +228,7 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */ int cio_start (struct subchannel *sch, struct ccw1 *cpa, __u8 lpm) { - return cio_start_key(sch, cpa, lpm, default_storage_key); + return cio_start_key(sch, cpa, lpm, PAGE_DEFAULT_KEY); } /* diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index 11e260e0b9c9..02d01a0de16c 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c @@ -1,7 +1,7 @@ /* * drivers/s390/cio/device_ops.c * - * $Revision: 1.55 $ + * $Revision: 1.56 $ * * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, * IBM Corporation @@ -128,7 +128,7 @@ ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa, unsigned long intparm, __u8 lpm, unsigned long flags) { return ccw_device_start_key(cdev, cpa, intparm, lpm, - default_storage_key, flags); + PAGE_DEFAULT_KEY, flags); } int @@ -137,7 +137,7 @@ ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa, int expires) { return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm, - default_storage_key, flags, + PAGE_DEFAULT_KEY, flags, expires); } diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index 9ad14db24143..b6daadac4e8b 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h @@ -1,7 +1,9 @@ #ifndef _CIO_QDIO_H #define _CIO_QDIO_H -#define VERSION_CIO_QDIO_H "$Revision: 1.26 $" +#include + +#define VERSION_CIO_QDIO_H "$Revision: 1.32 $" #ifdef CONFIG_QDIO_DEBUG #define QDIO_VERBOSE_LEVEL 9 @@ -42,7 +44,7 @@ #define QDIO_Q_LAPS 5 -#define QDIO_STORAGE_KEY 0 +#define QDIO_STORAGE_KEY PAGE_DEFAULT_KEY #define L2_CACHELINE_SIZE 256 #define INDICATORS_PER_CACHELINE (L2_CACHELINE_SIZE/sizeof(__u32)) diff --git a/include/asm-s390/page.h b/include/asm-s390/page.h index 614e2a93c703..2be287b9df88 100644 --- a/include/asm-s390/page.h +++ b/include/asm-s390/page.h @@ -16,6 +16,8 @@ #define PAGE_SHIFT 12 #define PAGE_SIZE (1UL << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE-1)) +#define PAGE_DEFAULT_ACC 0 +#define PAGE_DEFAULT_KEY (PAGE_DEFAULT_ACC << 4) #ifdef __KERNEL__ #ifndef __ASSEMBLY__ diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h index 88c272ca48bf..fb46e9090b50 100644 --- a/include/asm-s390/processor.h +++ b/include/asm-s390/processor.h @@ -245,7 +245,7 @@ static inline void enabled_wait(void) psw_t wait_psw; wait_psw.mask = PSW_BASE_BITS | PSW_MASK_IO | PSW_MASK_EXT | - PSW_MASK_MCHECK | PSW_MASK_WAIT; + PSW_MASK_MCHECK | PSW_MASK_WAIT | PSW_DEFAULT_KEY; #ifndef __s390x__ asm volatile ( " basr %0,0\n" diff --git a/include/asm-s390/ptrace.h b/include/asm-s390/ptrace.h index 1dc80666e97e..4eff8f2e3bf1 100644 --- a/include/asm-s390/ptrace.h +++ b/include/asm-s390/ptrace.h @@ -185,6 +185,7 @@ #include #include #include +#include typedef union { @@ -235,6 +236,7 @@ typedef struct #define PSW_ADDR_INSN 0x7FFFFFFFUL #define PSW_BASE_BITS 0x00080000UL +#define PSW_DEFAULT_KEY (((unsigned long) PAGE_DEFAULT_ACC) << 20) #define PSW_ASC_PRIMARY 0x00000000UL #define PSW_ASC_ACCREG 0x00004000UL @@ -260,6 +262,7 @@ typedef struct #define PSW_BASE_BITS 0x0000000180000000UL #define PSW_BASE32_BITS 0x0000000080000000UL +#define PSW_DEFAULT_KEY (((unsigned long) PAGE_DEFAULT_ACC) << 52) #define PSW_ASC_PRIMARY 0x0000000000000000UL #define PSW_ASC_ACCREG 0x0000400000000000UL @@ -268,14 +271,15 @@ typedef struct #define PSW_USER32_BITS (PSW_BASE32_BITS | PSW_MASK_DAT | PSW_ASC_HOME | \ PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | \ - PSW_MASK_PSTATE) + PSW_MASK_PSTATE | PSW_DEFAULT_KEY) #endif /* __s390x__ */ -#define PSW_KERNEL_BITS (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY) +#define PSW_KERNEL_BITS (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_PRIMARY | \ + PSW_DEFAULT_KEY) #define PSW_USER_BITS (PSW_BASE_BITS | PSW_MASK_DAT | PSW_ASC_HOME | \ PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK | \ - PSW_MASK_PSTATE) + PSW_MASK_PSTATE | PSW_DEFAULT_KEY) /* This macro merges a NEW PSW mask specified by the user into the currently active PSW mask CURRENT, modifying only those @@ -470,6 +474,12 @@ struct user_regs_struct extern void show_regs(struct pt_regs * regs); #endif +static inline void +psw_set_key(unsigned int key) +{ + asm volatile ( "spka 0(%0)" : : "d" (key) ); +} + #endif /* __ASSEMBLY__ */ #endif /* _S390_PTRACE_H */ -- cgit v1.2.3 From e8f0641ef74eaa71ed9aa9d19c4b741c2143d752 Mon Sep 17 00:00:00 2001 From: Michael Holzheu Date: Sun, 1 May 2005 08:58:58 -0700 Subject: [PATCH] s390: allow longer debug feature names The current limitation of 16 characters of the debug feature names turned out to be insufficient. Increase it to 64 characters. Signed-off-by: Martin Schwidefsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-s390/debug.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/asm-s390/debug.h b/include/asm-s390/debug.h index 28ef2354b1b2..6bbcdea42a86 100644 --- a/include/asm-s390/debug.h +++ b/include/asm-s390/debug.h @@ -43,7 +43,7 @@ struct __debug_entry{ #define DEBUG_OFF_LEVEL -1 /* level where debug is switched off */ #define DEBUG_FLUSH_ALL -1 /* parameter to flush all areas */ #define DEBUG_MAX_VIEWS 10 /* max number of views in proc fs */ -#define DEBUG_MAX_PROCF_LEN 16 /* max length for a proc file name */ +#define DEBUG_MAX_PROCF_LEN 64 /* max length for a proc file name */ #define DEBUG_DEFAULT_LEVEL 3 /* initial debug level */ #define DEBUG_DIR_ROOT "s390dbf" /* name of debug root directory in proc fs */ -- cgit v1.2.3 From 4beb37097b20b61054b15c56848e4ffcef093819 Mon Sep 17 00:00:00 2001 From: Cornelia Huck Date: Sun, 1 May 2005 08:58:59 -0700 Subject: [PATCH] s390: remove ioctl32 from dasdcmb The ioctl32_conversion routines will be deprecated: Remove them from dasd_cmb and handle the three cmb ioctls like all other dasd ioctls. Signed-off-by: Martin Schwidefsky Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/s390/kernel/compat_ioctl.c | 7 ++++++- drivers/s390/block/dasd_cmb.c | 19 ++----------------- include/asm-s390/cmb.h | 2 +- 3 files changed, 9 insertions(+), 19 deletions(-) (limited to 'include') diff --git a/arch/s390/kernel/compat_ioctl.c b/arch/s390/kernel/compat_ioctl.c index 96571ff7115d..03d03c6d3cbb 100644 --- a/arch/s390/kernel/compat_ioctl.c +++ b/arch/s390/kernel/compat_ioctl.c @@ -16,6 +16,7 @@ #define CODE #include "../../../fs/compat_ioctl.c" #include +#include #include static int do_ioctl32_pointer(unsigned int fd, unsigned int cmd, @@ -58,7 +59,11 @@ COMPATIBLE_IOCTL(BIODASDPRRD) COMPATIBLE_IOCTL(BIODASDPSRD) COMPATIBLE_IOCTL(BIODASDGATTR) COMPATIBLE_IOCTL(BIODASDSATTR) - +#if defined(CONFIG_DASD_CMB) || defined(CONFIG_DASD_CMB_MODULE) +COMPATIBLE_IOCTL(BIODASDCMFENABLE) +COMPATIBLE_IOCTL(BIODASDCMFDISABLE) +COMPATIBLE_IOCTL(BIODASDREADALLCMB) +#endif #endif #if defined(CONFIG_S390_TAPE) || defined(CONFIG_S390_TAPE_MODULE) diff --git a/drivers/s390/block/dasd_cmb.c b/drivers/s390/block/dasd_cmb.c index ed1ab474c0c6..4f365bff275c 100644 --- a/drivers/s390/block/dasd_cmb.c +++ b/drivers/s390/block/dasd_cmb.c @@ -1,5 +1,5 @@ /* - * linux/drivers/s390/block/dasd_cmb.c ($Revision: 1.6 $) + * linux/drivers/s390/block/dasd_cmb.c ($Revision: 1.9 $) * * Linux on zSeries Channel Measurement Facility support * (dasd device driver interface) @@ -23,7 +23,6 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include -#include #include #include #include @@ -84,27 +83,13 @@ dasd_ioctl_readall_cmb(struct block_device *bdev, int no, long args) static inline int ioctl_reg(unsigned int no, dasd_ioctl_fn_t handler) { - int ret; - ret = dasd_ioctl_no_register(THIS_MODULE, no, handler); -#ifdef CONFIG_COMPAT - if (ret) - return ret; - - ret = register_ioctl32_conversion(no, NULL); - if (ret) - dasd_ioctl_no_unregister(THIS_MODULE, no, handler); -#endif - return ret; + return dasd_ioctl_no_register(THIS_MODULE, no, handler); } static inline void ioctl_unreg(unsigned int no, dasd_ioctl_fn_t handler) { dasd_ioctl_no_unregister(THIS_MODULE, no, handler); -#ifdef CONFIG_COMPAT - unregister_ioctl32_conversion(no); -#endif - } static void diff --git a/include/asm-s390/cmb.h b/include/asm-s390/cmb.h index 1bfe2bd630b5..dae1dd4fb937 100644 --- a/include/asm-s390/cmb.h +++ b/include/asm-s390/cmb.h @@ -52,7 +52,7 @@ struct cmbdata { #define BIODASDREADALLCMB _IOWR(DASD_IOCTL_LETTER,33,struct cmbdata) #ifdef __KERNEL__ - +struct ccw_device; /** * enable_cmf() - switch on the channel measurement for a specific device * @cdev: The ccw device to be enabled -- cgit v1.2.3 From e43379f10b42194b8a6e1de342cfb44463c0f6da Mon Sep 17 00:00:00 2001 From: Matt Mackall Date: Sun, 1 May 2005 08:59:00 -0700 Subject: [PATCH] nice and rt-prio rlimits Add a pair of rlimits for allowing non-root tasks to raise nice and rt priorities. Defaults to traditional behavior. Originally written by Chris Wright. The patch implements a simple rlimit ceiling for the RT (and nice) priorities a task can set. The rlimit defaults to 0, meaning no change in behavior by default. A value of 50 means RT priority levels 1-50 are allowed. A value of 100 means all 99 privilege levels from 1 to 99 are allowed. CAP_SYS_NICE is blanket permission. (akpm: see http://www.uwsg.iu.edu/hypermail/linux/kernel/0503.1/1921.html for tips on integrating this with PAM). Signed-off-by: Matt Mackall Acked-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-generic/resource.h | 7 ++++++- include/linux/sched.h | 1 + kernel/sched.c | 25 +++++++++++++++++++------ kernel/sys.c | 2 +- 4 files changed, 27 insertions(+), 8 deletions(-) (limited to 'include') diff --git a/include/asm-generic/resource.h b/include/asm-generic/resource.h index b1fcda9eac23..cfe3692b23e5 100644 --- a/include/asm-generic/resource.h +++ b/include/asm-generic/resource.h @@ -41,8 +41,11 @@ #define RLIMIT_LOCKS 10 /* maximum file locks held */ #define RLIMIT_SIGPENDING 11 /* max number of pending signals */ #define RLIMIT_MSGQUEUE 12 /* maximum bytes in POSIX mqueues */ +#define RLIMIT_NICE 13 /* max nice prio allowed to raise to + 0-39 for nice level 19 .. -20 */ +#define RLIMIT_RTPRIO 14 /* maximum realtime priority */ -#define RLIM_NLIMITS 13 +#define RLIM_NLIMITS 15 /* * SuS says limits have to be unsigned. @@ -81,6 +84,8 @@ [RLIMIT_LOCKS] = { RLIM_INFINITY, RLIM_INFINITY }, \ [RLIMIT_SIGPENDING] = { 0, 0 }, \ [RLIMIT_MSGQUEUE] = { MQ_BYTES_MAX, MQ_BYTES_MAX }, \ + [RLIMIT_NICE] = { 0, 0 }, \ + [RLIMIT_RTPRIO] = { 0, 0 }, \ } #endif /* __KERNEL__ */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 1cced971232c..8960f99ea128 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -845,6 +845,7 @@ extern void sched_idle_next(void); extern void set_user_nice(task_t *p, long nice); extern int task_prio(const task_t *p); extern int task_nice(const task_t *p); +extern int can_nice(const task_t *p, const int nice); extern int task_curr(const task_t *p); extern int idle_cpu(int cpu); extern int sched_setscheduler(struct task_struct *, int, struct sched_param *); diff --git a/kernel/sched.c b/kernel/sched.c index 9bb7489ee645..5dadcc6df7dd 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -3223,6 +3223,19 @@ out_unlock: EXPORT_SYMBOL(set_user_nice); +/* + * can_nice - check if a task can reduce its nice value + * @p: task + * @nice: nice value + */ +int can_nice(const task_t *p, const int nice) +{ + /* convert nice value [19,-20] to rlimit style value [0,39] */ + int nice_rlim = 19 - nice; + return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur || + capable(CAP_SYS_NICE)); +} + #ifdef __ARCH_WANT_SYS_NICE /* @@ -3242,12 +3255,8 @@ asmlinkage long sys_nice(int increment) * We don't have to worry. Conceptually one call occurs first * and we have a single winner. */ - if (increment < 0) { - if (!capable(CAP_SYS_NICE)) - return -EPERM; - if (increment < -40) - increment = -40; - } + if (increment < -40) + increment = -40; if (increment > 40) increment = 40; @@ -3257,6 +3266,9 @@ asmlinkage long sys_nice(int increment) if (nice > 19) nice = 19; + if (increment < 0 && !can_nice(current, nice)) + return -EPERM; + retval = security_task_setnice(current, nice); if (retval) return retval; @@ -3372,6 +3384,7 @@ recheck: return -EINVAL; if ((policy == SCHED_FIFO || policy == SCHED_RR) && + param->sched_priority > p->signal->rlim[RLIMIT_RTPRIO].rlim_cur && !capable(CAP_SYS_NICE)) return -EPERM; if ((current->euid != p->euid) && (current->euid != p->uid) && diff --git a/kernel/sys.c b/kernel/sys.c index df2ddcc6863b..7f43d6e62c7a 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -227,7 +227,7 @@ static int set_one_prio(struct task_struct *p, int niceval, int error) error = -EPERM; goto out; } - if (niceval < task_nice(p) && !capable(CAP_SYS_NICE)) { + if (niceval < task_nice(p) && !can_nice(p, niceval)) { error = -EACCES; goto out; } -- cgit v1.2.3 From c8538a7aa5527d02c7191ac5da124efadf6a2827 Mon Sep 17 00:00:00 2001 From: Matt Mackall Date: Sun, 1 May 2005 08:59:01 -0700 Subject: [PATCH] remove all kernel BUGs This patch eliminates all kernel BUGs, trims about 35k off the typical kernel, and makes the system slightly faster. Signed-off-by: Matt Mackall Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-alpha/bug.h | 3 +++ include/asm-arm/bug.h | 3 +++ include/asm-arm26/bug.h | 3 +++ include/asm-frv/bug.h | 2 ++ include/asm-generic/bug.h | 19 +++++++++++++++++++ include/asm-i386/bug.h | 5 +++-- include/asm-ia64/bug.h | 5 ++++- include/asm-m68k/bug.h | 3 +++ include/asm-mips/bug.h | 4 +++- include/asm-parisc/bug.h | 2 ++ include/asm-ppc/bug.h | 3 +++ include/asm-ppc64/bug.h | 7 +++++-- include/asm-s390/bug.h | 3 +++ include/asm-sh/bug.h | 3 +++ include/asm-sparc/bug.h | 3 +++ include/asm-sparc64/bug.h | 3 +++ include/asm-v850/bug.h | 3 +++ include/asm-x86_64/bug.h | 4 +++- init/Kconfig | 10 ++++++++++ lib/Kconfig.debug | 1 + 20 files changed, 82 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/asm-alpha/bug.h b/include/asm-alpha/bug.h index ae1e0a5fa492..39a3e2a5017d 100644 --- a/include/asm-alpha/bug.h +++ b/include/asm-alpha/bug.h @@ -1,6 +1,7 @@ #ifndef _ALPHA_BUG_H #define _ALPHA_BUG_H +#ifdef CONFIG_BUG #include /* ??? Would be nice to use .gprel32 here, but we can't be sure that the @@ -10,6 +11,8 @@ : : "i" (PAL_bugchk), "i"(__LINE__), "i"(__FILE__)) #define HAVE_ARCH_BUG +#endif + #include #endif diff --git a/include/asm-arm/bug.h b/include/asm-arm/bug.h index 5e91b90a8181..24d11672eb60 100644 --- a/include/asm-arm/bug.h +++ b/include/asm-arm/bug.h @@ -3,6 +3,7 @@ #include +#ifdef CONFIG_BUG #ifdef CONFIG_DEBUG_BUGVERBOSE extern volatile void __bug(const char *file, int line, void *data); @@ -17,6 +18,8 @@ extern volatile void __bug(const char *file, int line, void *data); #endif #define HAVE_ARCH_BUG +#endif + #include #endif diff --git a/include/asm-arm26/bug.h b/include/asm-arm26/bug.h index 920b70533368..7177c7399967 100644 --- a/include/asm-arm26/bug.h +++ b/include/asm-arm26/bug.h @@ -3,6 +3,7 @@ #include +#ifdef CONFIG_BUG #ifdef CONFIG_DEBUG_BUGVERBOSE extern volatile void __bug(const char *file, int line, void *data); /* give file/line information */ @@ -12,6 +13,8 @@ extern volatile void __bug(const char *file, int line, void *data); #endif #define HAVE_ARCH_BUG +#endif + #include #endif diff --git a/include/asm-frv/bug.h b/include/asm-frv/bug.h index 011860b28818..074c0d5770eb 100644 --- a/include/asm-frv/bug.h +++ b/include/asm-frv/bug.h @@ -13,6 +13,7 @@ #include +#ifdef CONFIG_BUG /* * Tell the user there is some problem. */ @@ -45,6 +46,7 @@ do { \ #define HAVE_ARCH_KGDB_BAD_PAGE #define kgdb_bad_page(page) do { kgdb_raise(SIGABRT); } while(0) #endif +#endif #include diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index e5913c3b715a..6e5aaaa9a2fb 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h @@ -4,6 +4,7 @@ #include #include +#ifdef CONFIG_BUG #ifndef HAVE_ARCH_BUG #define BUG() do { \ printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ @@ -31,4 +32,22 @@ } while (0) #endif +#else /* !CONFIG_BUG */ +#ifndef HAVE_ARCH_BUG +#define BUG() +#endif + +#ifndef HAVE_ARCH_PAGE_BUG +#define PAGE_BUG(page) do { if (page) ; } while (0) +#endif + +#ifndef HAVE_ARCH_BUG_ON +#define BUG_ON(condition) do { if (condition) ; } while(0) +#endif + +#ifndef HAVE_ARCH_WARN_ON +#define WARN_ON(condition) do { if (condition) ; } while(0) +#endif +#endif + #endif diff --git a/include/asm-i386/bug.h b/include/asm-i386/bug.h index 706eb511c330..8f79de19eb94 100644 --- a/include/asm-i386/bug.h +++ b/include/asm-i386/bug.h @@ -9,6 +9,8 @@ * undefined" opcode for parsing in the trap handler. */ +#ifdef CONFIG_BUG +#define HAVE_ARCH_BUG #ifdef CONFIG_DEBUG_BUGVERBOSE #define BUG() \ __asm__ __volatile__( "ud2\n" \ @@ -18,8 +20,7 @@ #else #define BUG() __asm__ __volatile__("ud2\n") #endif +#endif -#define HAVE_ARCH_BUG #include - #endif diff --git a/include/asm-ia64/bug.h b/include/asm-ia64/bug.h index 2c0cd51e8856..3aa0a0a5474b 100644 --- a/include/asm-ia64/bug.h +++ b/include/asm-ia64/bug.h @@ -1,6 +1,7 @@ #ifndef _ASM_IA64_BUG_H #define _ASM_IA64_BUG_H +#ifdef CONFIG_BUG #if (__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1) # define ia64_abort() __builtin_trap() #else @@ -8,8 +9,10 @@ #endif #define BUG() do { printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); ia64_abort(); } while (0) -/* should this BUG should be made generic? */ +/* should this BUG be made generic? */ #define HAVE_ARCH_BUG +#endif + #include #endif diff --git a/include/asm-m68k/bug.h b/include/asm-m68k/bug.h index 3e1d2266fa69..072ce274d537 100644 --- a/include/asm-m68k/bug.h +++ b/include/asm-m68k/bug.h @@ -3,6 +3,7 @@ #include +#ifdef CONFIG_BUG #ifdef CONFIG_DEBUG_BUGVERBOSE #ifndef CONFIG_SUN3 #define BUG() do { \ @@ -22,6 +23,8 @@ #endif #define HAVE_ARCH_BUG +#endif + #include #endif diff --git a/include/asm-mips/bug.h b/include/asm-mips/bug.h index eb94bb96cfbc..3f594b440abc 100644 --- a/include/asm-mips/bug.h +++ b/include/asm-mips/bug.h @@ -3,12 +3,14 @@ #include +#ifdef CONFIG_BUG +#define HAVE_ARCH_BUG #define BUG() \ do { \ __asm__ __volatile__("break %0" : : "i" (BRK_BUG)); \ } while (0) +#endif -#define HAVE_ARCH_BUG #include #endif diff --git a/include/asm-parisc/bug.h b/include/asm-parisc/bug.h index e72f6e2b4b9f..695588da41f8 100644 --- a/include/asm-parisc/bug.h +++ b/include/asm-parisc/bug.h @@ -1,12 +1,14 @@ #ifndef _PARISC_BUG_H #define _PARISC_BUG_H +#ifdef CONFIG_BUG #define HAVE_ARCH_BUG #define BUG() do { \ printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ dump_stack(); \ panic("BUG!"); \ } while (0) +#endif #include #endif diff --git a/include/asm-ppc/bug.h b/include/asm-ppc/bug.h index e99c6cb9d618..8b34fd682b0d 100644 --- a/include/asm-ppc/bug.h +++ b/include/asm-ppc/bug.h @@ -14,6 +14,7 @@ struct bug_entry { */ #define BUG_WARNING_TRAP 0x1000000 +#ifdef CONFIG_BUG #define BUG() do { \ __asm__ __volatile__( \ "1: twi 31,0,0\n" \ @@ -50,6 +51,8 @@ struct bug_entry { #define HAVE_ARCH_BUG #define HAVE_ARCH_BUG_ON #define HAVE_ARCH_WARN_ON +#endif + #include #endif diff --git a/include/asm-ppc64/bug.h b/include/asm-ppc64/bug.h index db31dd22233c..169868fa307d 100644 --- a/include/asm-ppc64/bug.h +++ b/include/asm-ppc64/bug.h @@ -26,6 +26,8 @@ struct bug_entry *find_bug(unsigned long bugaddr); */ #define BUG_WARNING_TRAP 0x1000000 +#ifdef CONFIG_BUG + #define BUG() do { \ __asm__ __volatile__( \ "1: twi 31,0,0\n" \ @@ -55,11 +57,12 @@ struct bug_entry *find_bug(unsigned long bugaddr); "i" (__FILE__), "i" (__FUNCTION__)); \ } while (0) -#endif - #define HAVE_ARCH_BUG #define HAVE_ARCH_BUG_ON #define HAVE_ARCH_WARN_ON +#endif +#endif + #include #endif diff --git a/include/asm-s390/bug.h b/include/asm-s390/bug.h index 2b8d6d4dffcf..a2e7430aafa6 100644 --- a/include/asm-s390/bug.h +++ b/include/asm-s390/bug.h @@ -3,12 +3,15 @@ #include +#ifdef CONFIG_BUG #define BUG() do { \ printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ __asm__ __volatile__(".long 0"); \ } while (0) #define HAVE_ARCH_BUG +#endif + #include #endif diff --git a/include/asm-sh/bug.h b/include/asm-sh/bug.h index 70172217140f..70508a360cd6 100644 --- a/include/asm-sh/bug.h +++ b/include/asm-sh/bug.h @@ -3,6 +3,7 @@ #include +#ifdef CONFIG_BUG /* * Tell the user there is some problem. */ @@ -12,6 +13,8 @@ } while (0) #define HAVE_ARCH_BUG +#endif + #include #endif diff --git a/include/asm-sparc/bug.h b/include/asm-sparc/bug.h index 0d30a67d87a3..04151208189f 100644 --- a/include/asm-sparc/bug.h +++ b/include/asm-sparc/bug.h @@ -1,6 +1,7 @@ #ifndef _SPARC_BUG_H #define _SPARC_BUG_H +#ifdef CONFIG_BUG /* Only use the inline asm until a gcc release that can handle __builtin_trap * -rob 2003-06-25 * @@ -26,6 +27,8 @@ extern void do_BUG(const char *file, int line); #endif #define HAVE_ARCH_BUG +#endif + #include #endif diff --git a/include/asm-sparc64/bug.h b/include/asm-sparc64/bug.h index 25c5b1dfe378..516bb27f3fc4 100644 --- a/include/asm-sparc64/bug.h +++ b/include/asm-sparc64/bug.h @@ -1,6 +1,7 @@ #ifndef _SPARC64_BUG_H #define _SPARC64_BUG_H +#ifdef CONFIG_BUG #include #ifdef CONFIG_DEBUG_BUGVERBOSE @@ -14,6 +15,8 @@ extern void do_BUG(const char *file, int line); #endif #define HAVE_ARCH_BUG +#endif + #include #endif diff --git a/include/asm-v850/bug.h b/include/asm-v850/bug.h index c778916bf7f2..b0ed2d35f3e8 100644 --- a/include/asm-v850/bug.h +++ b/include/asm-v850/bug.h @@ -14,9 +14,12 @@ #ifndef __V850_BUG_H__ #define __V850_BUG_H__ +#ifdef CONFIG_BUG extern void __bug (void) __attribute__ ((noreturn)); #define BUG() __bug() #define HAVE_ARCH_BUG +#endif + #include #endif /* __V850_BUG_H__ */ diff --git a/include/asm-x86_64/bug.h b/include/asm-x86_64/bug.h index 19aed6e78fec..bdbf66eab6ee 100644 --- a/include/asm-x86_64/bug.h +++ b/include/asm-x86_64/bug.h @@ -15,11 +15,13 @@ struct bug_frame { unsigned short line; } __attribute__((packed)); +#ifdef CONFIG_BUG #define HAVE_ARCH_BUG #define BUG() \ asm volatile("ud2 ; .quad %c1 ; .short %c0" :: \ "i"(__LINE__), "i" (__stringify(__FILE__))) void out_of_line_bug(void); -#include +#endif +#include #endif diff --git a/init/Kconfig b/init/Kconfig index abe2682a6ca6..42dca393b94e 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -275,6 +275,16 @@ config KALLSYMS_EXTRA_PASS reported. KALLSYMS_EXTRA_PASS is only a temporary workaround while you wait for kallsyms to be fixed. +config BUG + bool "BUG() support" if EMBEDDED + default y + help + Disabling this option eliminates support for BUG and WARN, reducing + the size of your kernel image and potentially quietly ignoring + numerous fatal conditions. You should only consider disabling this + option for embedded systems with no facilities for reporting errors. + Just say Y. + config BASE_FULL default y bool "Enable full-sized data structures for core" if EMBEDDED diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 426a0cf7b11c..ac23847ce0e3 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -108,6 +108,7 @@ config DEBUG_HIGHMEM config DEBUG_BUGVERBOSE bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EMBEDDED + depends on BUG depends on ARM || ARM26 || M32R || M68K || SPARC32 || SPARC64 || (X86 && !X86_64) || FRV default !EMBEDDED help -- cgit v1.2.3 From cd7619d6bf36564cf54ff7218ef54e558a741913 Mon Sep 17 00:00:00 2001 From: Matt Mackall Date: Sun, 1 May 2005 08:59:01 -0700 Subject: [PATCH] Exterminate PAGE_BUG Remove PAGE_BUG - repalce it with BUG and BUG_ON. Signed-off-by: Matt Mackall Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/arm26/mm/small_page.c | 6 ++---- fs/afs/file.c | 3 +-- fs/buffer.c | 3 +-- fs/jffs2/file.c | 3 +-- fs/udf/file.c | 6 ++---- fs/udf/inode.c | 4 ++-- include/asm-cris/page.h | 4 ---- include/asm-generic/bug.h | 11 ----------- include/asm-sh64/bug.h | 4 ---- mm/filemap.c | 3 +-- 10 files changed, 10 insertions(+), 37 deletions(-) (limited to 'include') diff --git a/arch/arm26/mm/small_page.c b/arch/arm26/mm/small_page.c index 77be86cca789..30447106c25f 100644 --- a/arch/arm26/mm/small_page.c +++ b/arch/arm26/mm/small_page.c @@ -92,8 +92,7 @@ static unsigned long __get_small_page(int priority, struct order *order) page = list_entry(order->queue.next, struct page, lru); again: #ifdef PEDANTIC - if (USED_MAP(page) & ~order->all_used) - PAGE_BUG(page); + BUG_ON(USED_MAP(page) & ~order->all_used); #endif offset = ffz(USED_MAP(page)); SET_USED(page, offset); @@ -141,8 +140,7 @@ static void __free_small_page(unsigned long spage, struct order *order) goto non_small; #ifdef PEDANTIC - if (USED_MAP(page) & ~order->all_used) - PAGE_BUG(page); + BUG_ON(USED_MAP(page) & ~order->all_used); #endif spage = spage >> order->shift; diff --git a/fs/afs/file.c b/fs/afs/file.c index 6b6bb7c8abf6..23c125128024 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c @@ -131,8 +131,7 @@ static int afs_file_readpage(struct file *file, struct page *page) vnode = AFS_FS_I(inode); - if (!PageLocked(page)) - PAGE_BUG(page); + BUG_ON(!PageLocked(page)); ret = -ESTALE; if (vnode->flags & AFS_VNODE_DELETED) diff --git a/fs/buffer.c b/fs/buffer.c index 188365c79204..792cbacbbf41 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -2078,8 +2078,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block) int nr, i; int fully_mapped = 1; - if (!PageLocked(page)) - PAGE_BUG(page); + BUG_ON(!PageLocked(page)); blocksize = 1 << inode->i_blkbits; if (!page_has_buffers(page)) create_empty_buffers(page, blocksize, 0); diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c index 0c607c1388f4..771a554701d6 100644 --- a/fs/jffs2/file.c +++ b/fs/jffs2/file.c @@ -79,8 +79,7 @@ static int jffs2_do_readpage_nolock (struct inode *inode, struct page *pg) D2(printk(KERN_DEBUG "jffs2_do_readpage_nolock(): ino #%lu, page at offset 0x%lx\n", inode->i_ino, pg->index << PAGE_CACHE_SHIFT)); - if (!PageLocked(pg)) - PAGE_BUG(pg); + BUG_ON(!PageLocked(pg)); pg_buf = kmap(pg); /* FIXME: Can kmap fail? */ diff --git a/fs/udf/file.c b/fs/udf/file.c index 2faa4172b9f7..bb40d63f328f 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c @@ -49,8 +49,7 @@ static int udf_adinicb_readpage(struct file *file, struct page * page) struct inode *inode = page->mapping->host; char *kaddr; - if (!PageLocked(page)) - PAGE_BUG(page); + BUG_ON(!PageLocked(page)); kaddr = kmap(page); memset(kaddr, 0, PAGE_CACHE_SIZE); @@ -67,8 +66,7 @@ static int udf_adinicb_writepage(struct page *page, struct writeback_control *wb struct inode *inode = page->mapping->host; char *kaddr; - if (!PageLocked(page)) - PAGE_BUG(page); + BUG_ON(!PageLocked(page)); kaddr = kmap(page); memcpy(UDF_I_DATA(inode) + UDF_I_LENEATTR(inode), kaddr, inode->i_size); diff --git a/fs/udf/inode.c b/fs/udf/inode.c index 0506e1173784..3d68de39fad6 100644 --- a/fs/udf/inode.c +++ b/fs/udf/inode.c @@ -167,8 +167,8 @@ void udf_expand_file_adinicb(struct inode * inode, int newsize, int * err) } page = grab_cache_page(inode->i_mapping, 0); - if (!PageLocked(page)) - PAGE_BUG(page); + BUG_ON(!PageLocked(page)); + if (!PageUptodate(page)) { kaddr = kmap(page); diff --git a/include/asm-cris/page.h b/include/asm-cris/page.h index ddd8915e41e6..c767da1ef8f5 100644 --- a/include/asm-cris/page.h +++ b/include/asm-cris/page.h @@ -77,10 +77,6 @@ typedef struct { unsigned long pgprot; } pgprot_t; printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ } while (0) -#define PAGE_BUG(page) do { \ - BUG(); \ -} while (0) - /* Pure 2^n version of get_order */ static inline int get_order(unsigned long size) { diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index 6e5aaaa9a2fb..400c2b41896e 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h @@ -12,13 +12,6 @@ } while (0) #endif -#ifndef HAVE_ARCH_PAGE_BUG -#define PAGE_BUG(page) do { \ - printk("page BUG for page at %p\n", page); \ - BUG(); \ -} while (0) -#endif - #ifndef HAVE_ARCH_BUG_ON #define BUG_ON(condition) do { if (unlikely((condition)!=0)) BUG(); } while(0) #endif @@ -37,10 +30,6 @@ #define BUG() #endif -#ifndef HAVE_ARCH_PAGE_BUG -#define PAGE_BUG(page) do { if (page) ; } while (0) -#endif - #ifndef HAVE_ARCH_BUG_ON #define BUG_ON(condition) do { if (condition) ; } while(0) #endif diff --git a/include/asm-sh64/bug.h b/include/asm-sh64/bug.h index 3acd54d59566..5d659ec28e10 100644 --- a/include/asm-sh64/bug.h +++ b/include/asm-sh64/bug.h @@ -17,10 +17,6 @@ BUG(); \ } while(0) -#define PAGE_BUG(page) do { \ - BUG(); \ -} while (0) - #define WARN_ON(condition) do { \ if (unlikely((condition)!=0)) { \ printk("Badness in %s at %s:%d\n", __FUNCTION__, __FILE__, __LINE__); \ diff --git a/mm/filemap.c b/mm/filemap.c index ee79b5d3439f..c085af2332d8 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -123,8 +123,7 @@ void remove_from_page_cache(struct page *page) { struct address_space *mapping = page->mapping; - if (unlikely(!PageLocked(page))) - PAGE_BUG(page); + BUG_ON(!PageLocked(page)); write_lock_irq(&mapping->tree_lock); __remove_from_page_cache(page); -- cgit v1.2.3 From d59745ce3e7aa13856bca16d3bcbb95041775ff6 Mon Sep 17 00:00:00 2001 From: Matt Mackall Date: Sun, 1 May 2005 08:59:02 -0700 Subject: [PATCH] clean up kernel messages Arrange for all kernel printks to be no-ops. Only available if CONFIG_EMBEDDED. This patch saves about 375k on my laptop config and nearly 100k on minimal configs. Signed-off-by: Matt Mackall Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/i386/kernel/head.S | 2 ++ include/linux/kernel.h | 9 +++++++++ init/Kconfig | 11 +++++++++++ kernel/printk.c | 27 +++++++++++++++++++++------ 4 files changed, 43 insertions(+), 6 deletions(-) (limited to 'include') diff --git a/arch/i386/kernel/head.S b/arch/i386/kernel/head.S index d273fd746192..e966fc8c44c4 100644 --- a/arch/i386/kernel/head.S +++ b/arch/i386/kernel/head.S @@ -380,6 +380,7 @@ rp_sidt: ALIGN ignore_int: cld +#ifdef CONFIG_PRINTK pushl %eax pushl %ecx pushl %edx @@ -400,6 +401,7 @@ ignore_int: popl %edx popl %ecx popl %eax +#endif iret /* diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 7c1cba4a5278..e25b97062ce1 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -115,10 +115,19 @@ extern int __kernel_text_address(unsigned long addr); extern int kernel_text_address(unsigned long addr); extern int session_of_pgrp(int pgrp); +#ifdef CONFIG_PRINTK asmlinkage int vprintk(const char *fmt, va_list args) __attribute__ ((format (printf, 1, 0))); asmlinkage int printk(const char * fmt, ...) __attribute__ ((format (printf, 1, 2))); +#else +static inline int vprintk(const char *s, va_list args) + __attribute__ ((format (printf, 1, 0))); +static inline int vprintk(const char *s, va_list args) { return 0; } +static inline int printk(const char *s, ...) + __attribute__ ((format (printf, 1, 2))); +static inline int printk(const char *s, ...) { return 0; } +#endif unsigned long int_sqrt(unsigned long); diff --git a/init/Kconfig b/init/Kconfig index 42dca393b94e..40d286d1d118 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -275,6 +275,17 @@ config KALLSYMS_EXTRA_PASS reported. KALLSYMS_EXTRA_PASS is only a temporary workaround while you wait for kallsyms to be fixed. + +config PRINTK + default y + bool "Enable support for printk" if EMBEDDED + help + This option enables normal printk support. Removing it + eliminates most of the message strings from the kernel image + and makes the kernel more or less silent. As this makes it + very difficult to diagnose system problems, saying N here is + strongly discouraged. + config BUG bool "BUG() support" if EMBEDDED default y diff --git a/kernel/printk.c b/kernel/printk.c index 1498689548d1..290a07ce2c8a 100644 --- a/kernel/printk.c +++ b/kernel/printk.c @@ -85,10 +85,6 @@ static int console_locked; */ static DEFINE_SPINLOCK(logbuf_lock); -static char __log_buf[__LOG_BUF_LEN]; -static char *log_buf = __log_buf; -static int log_buf_len = __LOG_BUF_LEN; - #define LOG_BUF_MASK (log_buf_len-1) #define LOG_BUF(idx) (log_buf[(idx) & LOG_BUF_MASK]) @@ -99,7 +95,6 @@ static int log_buf_len = __LOG_BUF_LEN; static unsigned long log_start; /* Index into log_buf: next char to be read by syslog() */ static unsigned long con_start; /* Index into log_buf: next char to be sent to consoles */ static unsigned long log_end; /* Index into log_buf: most-recently-written-char + 1 */ -static unsigned long logged_chars; /* Number of chars produced since last read+clear operation */ /* * Array of consoles built from command line options (console=) @@ -120,6 +115,13 @@ static int preferred_console = -1; /* Flag: console code may call schedule() */ static int console_may_schedule; +#ifdef CONFIG_PRINTK + +static char __log_buf[__LOG_BUF_LEN]; +static char *log_buf = __log_buf; +static int log_buf_len = __LOG_BUF_LEN; +static unsigned long logged_chars; /* Number of chars produced since last read+clear operation */ + /* * Setup a list of consoles. Called from init/main.c */ @@ -535,6 +537,7 @@ __setup("time", printk_time_setup); * then changes console_loglevel may break. This is because console_loglevel * is inspected when the actual printing occurs. */ + asmlinkage int printk(const char *fmt, ...) { va_list args; @@ -655,6 +658,18 @@ out: EXPORT_SYMBOL(printk); EXPORT_SYMBOL(vprintk); +#else + +asmlinkage long sys_syslog(int type, char __user * buf, int len) +{ + return 0; +} + +int do_syslog(int type, char __user * buf, int len) { return 0; } +static void call_console_drivers(unsigned long start, unsigned long end) {} + +#endif + /** * acquire_console_sem - lock the console system for exclusive use. * @@ -931,7 +946,7 @@ int unregister_console(struct console * console) return res; } EXPORT_SYMBOL(unregister_console); - + /** * tty_write_message - write a message to a certain tty, not just the console. * -- cgit v1.2.3 From 7f261b5f0dccd53ed3a9a95b55c36e24a698a92a Mon Sep 17 00:00:00 2001 From: Stas Sergeev Date: Sun, 1 May 2005 08:59:02 -0700 Subject: [PATCH] move SA_xxx defines to linux/signal.h The attached patch moves the IRQ-related SA_xxx flags (namely, SA_PROBE, SA_SAMPLE_RANDOM and SA_SHIRQ) from all the arch-specific headers to linux/signal.h. This looks like a left-over after the irq-handling code was consolidated. The code was moved to kernel/irq/*, but the flags are still left per-arch. Right now, adding a new IRQ flag to the arch-specific header, like this patch does: http://cvs.sourceforge.net/viewcvs.py/*checkout*/alsa/alsa-driver/utils/patches/pcsp-kernel-2.6.10-03.diff?rev=1.1 no longer works, it breaks the compilation for all other arches, unless you add that flag to all the other arch-specific headers too. So I think such a clean-up makes sense. Signed-off-by: Stas Sergeev Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-alpha/signal.h | 14 -------------- include/asm-arm/signal.h | 11 ----------- include/asm-arm26/signal.h | 11 ----------- include/asm-cris/signal.h | 14 -------------- include/asm-frv/signal.h | 14 -------------- include/asm-h8300/signal.h | 13 ------------- include/asm-i386/signal.h | 14 -------------- include/asm-ia64/signal.h | 10 ---------- include/asm-m32r/signal.h | 14 -------------- include/asm-m68k/signal.h | 13 ------------- include/asm-m68knommu/signal.h | 13 ------------- include/asm-mips/signal.h | 15 --------------- include/asm-parisc/signal.h | 11 ----------- include/asm-ppc/signal.h | 13 ------------- include/asm-ppc64/signal.h | 13 ------------- include/asm-s390/signal.h | 14 -------------- include/asm-sh/signal.h | 14 -------------- include/asm-sh64/signal.h | 14 -------------- include/asm-sparc/signal.h | 8 -------- include/asm-sparc64/signal.h | 8 -------- include/asm-v850/signal.h | 15 --------------- include/asm-x86_64/signal.h | 14 -------------- include/linux/signal.h | 11 +++++++++++ 23 files changed, 11 insertions(+), 280 deletions(-) (limited to 'include') diff --git a/include/asm-alpha/signal.h b/include/asm-alpha/signal.h index 25f98bc5576f..4e0842b415aa 100644 --- a/include/asm-alpha/signal.h +++ b/include/asm-alpha/signal.h @@ -109,20 +109,6 @@ typedef unsigned long sigset_t; #define MINSIGSTKSZ 4096 #define SIGSTKSZ 16384 - -#ifdef __KERNEL__ -/* - * These values of sa_flags are used only by the kernel as part of the - * irq handling routines. - * - * SA_INTERRUPT is also used by the irq handling routines. - * SA_SHIRQ is for shared interrupt support on PCI and EISA. - */ -#define SA_PROBE SA_ONESHOT -#define SA_SAMPLE_RANDOM SA_RESTART -#define SA_SHIRQ 0x40000000 -#endif - #define SIG_BLOCK 1 /* for blocking signals */ #define SIG_UNBLOCK 2 /* for unblocking signals */ #define SIG_SETMASK 3 /* for setting the signal mask */ diff --git a/include/asm-arm/signal.h b/include/asm-arm/signal.h index b033e5fd60fa..b860dc3c5dc7 100644 --- a/include/asm-arm/signal.h +++ b/include/asm-arm/signal.h @@ -114,18 +114,7 @@ typedef unsigned long sigset_t; #define SIGSTKSZ 8192 #ifdef __KERNEL__ - -/* - * These values of sa_flags are used only by the kernel as part of the - * irq handling routines. - * - * SA_INTERRUPT is also used by the irq handling routines. - * SA_SHIRQ is for shared interrupt support on PCI and EISA. - */ -#define SA_PROBE 0x80000000 -#define SA_SAMPLE_RANDOM 0x10000000 #define SA_IRQNOMASK 0x08000000 -#define SA_SHIRQ 0x04000000 #endif #define SIG_BLOCK 0 /* for blocking signals */ diff --git a/include/asm-arm26/signal.h b/include/asm-arm26/signal.h index 6f62e51a2e5a..a1aacefa6562 100644 --- a/include/asm-arm26/signal.h +++ b/include/asm-arm26/signal.h @@ -114,18 +114,7 @@ typedef unsigned long sigset_t; #define SIGSTKSZ 8192 #ifdef __KERNEL__ - -/* - * These values of sa_flags are used only by the kernel as part of the - * irq handling routines. - * - * SA_INTERRUPT is also used by the irq handling routines. - * SA_SHIRQ is for shared interrupt support on PCI and EISA. - */ -#define SA_PROBE 0x80000000 -#define SA_SAMPLE_RANDOM 0x10000000 #define SA_IRQNOMASK 0x08000000 -#define SA_SHIRQ 0x04000000 #endif #define SIG_BLOCK 0 /* for blocking signals */ diff --git a/include/asm-cris/signal.h b/include/asm-cris/signal.h index 3f187ec4800a..2330769ba55d 100644 --- a/include/asm-cris/signal.h +++ b/include/asm-cris/signal.h @@ -108,20 +108,6 @@ typedef unsigned long sigset_t; #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 -#ifdef __KERNEL__ - -/* - * These values of sa_flags are used only by the kernel as part of the - * irq handling routines. - * - * SA_INTERRUPT is also used by the irq handling routines. - * SA_SHIRQ is for shared interrupt support - */ -#define SA_PROBE SA_ONESHOT -#define SA_SAMPLE_RANDOM SA_RESTART -#define SA_SHIRQ 0x04000000 -#endif - #define SIG_BLOCK 0 /* for blocking signals */ #define SIG_UNBLOCK 1 /* for unblocking signals */ #define SIG_SETMASK 2 /* for setting the signal mask */ diff --git a/include/asm-frv/signal.h b/include/asm-frv/signal.h index f18952f86a80..c930bb176875 100644 --- a/include/asm-frv/signal.h +++ b/include/asm-frv/signal.h @@ -107,20 +107,6 @@ typedef unsigned long sigset_t; #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 -#ifdef __KERNEL__ - -/* - * These values of sa_flags are used only by the kernel as part of the - * irq handling routines. - * - * SA_INTERRUPT is also used by the irq handling routines. - * SA_SHIRQ is for shared interrupt support on PCI and EISA. - */ -#define SA_PROBE SA_ONESHOT -#define SA_SAMPLE_RANDOM SA_RESTART -#define SA_SHIRQ 0x04000000 -#endif - #define SIG_BLOCK 0 /* for blocking signals */ #define SIG_UNBLOCK 1 /* for unblocking signals */ #define SIG_SETMASK 2 /* for setting the signal mask */ diff --git a/include/asm-h8300/signal.h b/include/asm-h8300/signal.h index 3a08544a473e..ac3e01bd6396 100644 --- a/include/asm-h8300/signal.h +++ b/include/asm-h8300/signal.h @@ -107,19 +107,6 @@ typedef unsigned long sigset_t; #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 -#ifdef __KERNEL__ -/* - * These values of sa_flags are used only by the kernel as part of the - * irq handling routines. - * - * SA_INTERRUPT is also used by the irq handling routines. - * SA_SHIRQ is for shared interrupt support on PCI and EISA. - */ -#define SA_PROBE SA_ONESHOT -#define SA_SAMPLE_RANDOM SA_RESTART -#define SA_SHIRQ 0x04000000 -#endif - #define SIG_BLOCK 0 /* for blocking signals */ #define SIG_UNBLOCK 1 /* for unblocking signals */ #define SIG_SETMASK 2 /* for setting the signal mask */ diff --git a/include/asm-i386/signal.h b/include/asm-i386/signal.h index 7ef343b6812d..0f082bd1c455 100644 --- a/include/asm-i386/signal.h +++ b/include/asm-i386/signal.h @@ -110,20 +110,6 @@ typedef unsigned long sigset_t; #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 -#ifdef __KERNEL__ - -/* - * These values of sa_flags are used only by the kernel as part of the - * irq handling routines. - * - * SA_INTERRUPT is also used by the irq handling routines. - * SA_SHIRQ is for shared interrupt support on PCI and EISA. - */ -#define SA_PROBE SA_ONESHOT -#define SA_SAMPLE_RANDOM SA_RESTART -#define SA_SHIRQ 0x04000000 -#endif - #define SIG_BLOCK 0 /* for blocking signals */ #define SIG_UNBLOCK 1 /* for unblocking signals */ #define SIG_SETMASK 2 /* for setting the signal mask */ diff --git a/include/asm-ia64/signal.h b/include/asm-ia64/signal.h index 660a759744dd..85a577ae9146 100644 --- a/include/asm-ia64/signal.h +++ b/include/asm-ia64/signal.h @@ -114,16 +114,6 @@ #define _NSIG_BPW 64 #define _NSIG_WORDS (_NSIG / _NSIG_BPW) -/* - * These values of sa_flags are used only by the kernel as part of the - * irq handling routines. - * - * SA_INTERRUPT is also used by the irq handling routines. - * SA_SHIRQ is for shared interrupt support on PCI and EISA. - */ -#define SA_PROBE SA_ONESHOT -#define SA_SAMPLE_RANDOM SA_RESTART -#define SA_SHIRQ 0x04000000 #define SA_PERCPU_IRQ 0x02000000 #endif /* __KERNEL__ */ diff --git a/include/asm-m32r/signal.h b/include/asm-m32r/signal.h index ce46eaea4494..6e55fd421883 100644 --- a/include/asm-m32r/signal.h +++ b/include/asm-m32r/signal.h @@ -114,20 +114,6 @@ typedef unsigned long sigset_t; #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 -#ifdef __KERNEL__ - -/* - * These values of sa_flags are used only by the kernel as part of the - * irq handling routines. - * - * SA_INTERRUPT is also used by the irq handling routines. - * SA_SHIRQ is for shared interrupt support on PCI and EISA. - */ -#define SA_PROBE SA_ONESHOT -#define SA_SAMPLE_RANDOM SA_RESTART -#define SA_SHIRQ 0x04000000 -#endif - #define SIG_BLOCK 0 /* for blocking signals */ #define SIG_UNBLOCK 1 /* for unblocking signals */ #define SIG_SETMASK 2 /* for setting the signal mask */ diff --git a/include/asm-m68k/signal.h b/include/asm-m68k/signal.h index 6681bb6a5523..1d016e9f19bf 100644 --- a/include/asm-m68k/signal.h +++ b/include/asm-m68k/signal.h @@ -105,19 +105,6 @@ typedef unsigned long sigset_t; #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 -#ifdef __KERNEL__ -/* - * These values of sa_flags are used only by the kernel as part of the - * irq handling routines. - * - * SA_INTERRUPT is also used by the irq handling routines. - * SA_SHIRQ is for shared interrupt support on PCI and EISA. - */ -#define SA_PROBE SA_ONESHOT -#define SA_SAMPLE_RANDOM SA_RESTART -#define SA_SHIRQ 0x04000000 -#endif - #define SIG_BLOCK 0 /* for blocking signals */ #define SIG_UNBLOCK 1 /* for unblocking signals */ #define SIG_SETMASK 2 /* for setting the signal mask */ diff --git a/include/asm-m68knommu/signal.h b/include/asm-m68knommu/signal.h index 486cbb0dc088..37c9c8a024ba 100644 --- a/include/asm-m68knommu/signal.h +++ b/include/asm-m68knommu/signal.h @@ -105,19 +105,6 @@ typedef unsigned long sigset_t; #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 -#ifdef __KERNEL__ -/* - * These values of sa_flags are used only by the kernel as part of the - * irq handling routines. - * - * SA_INTERRUPT is also used by the irq handling routines. - * SA_SHIRQ is for shared interrupt support on PCI and EISA. - */ -#define SA_PROBE SA_ONESHOT -#define SA_SAMPLE_RANDOM SA_RESTART -#define SA_SHIRQ 0x04000000 -#endif - #define SIG_BLOCK 0 /* for blocking signals */ #define SIG_UNBLOCK 1 /* for unblocking signals */ #define SIG_SETMASK 2 /* for setting the signal mask */ diff --git a/include/asm-mips/signal.h b/include/asm-mips/signal.h index 994987db61be..d81356731eb6 100644 --- a/include/asm-mips/signal.h +++ b/include/asm-mips/signal.h @@ -98,21 +98,6 @@ typedef unsigned long old_sigset_t; /* at least 32 bits */ #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 -#ifdef __KERNEL__ - -/* - * These values of sa_flags are used only by the kernel as part of the - * irq handling routines. - * - * SA_INTERRUPT is also used by the irq handling routines. - * SA_SHIRQ flag is for shared interrupt support on PCI and EISA. - */ -#define SA_PROBE SA_ONESHOT -#define SA_SAMPLE_RANDOM SA_RESTART -#define SA_SHIRQ 0x02000000 - -#endif /* __KERNEL__ */ - #define SIG_BLOCK 1 /* for blocking signals */ #define SIG_UNBLOCK 2 /* for unblocking signals */ #define SIG_SETMASK 3 /* for setting the signal mask */ diff --git a/include/asm-parisc/signal.h b/include/asm-parisc/signal.h index 358f577c8eb8..25cb23ef7dd1 100644 --- a/include/asm-parisc/signal.h +++ b/include/asm-parisc/signal.h @@ -89,17 +89,6 @@ #define _NSIG_BPW BITS_PER_LONG #define _NSIG_WORDS (_NSIG / _NSIG_BPW) -/* - * These values of sa_flags are used only by the kernel as part of the - * irq handling routines. - * - * SA_INTERRUPT is also used by the irq handling routines. - * SA_SHIRQ is for shared interrupt support on PCI and EISA. - */ -#define SA_PROBE SA_ONESHOT -#define SA_SAMPLE_RANDOM SA_RESTART -#define SA_SHIRQ 0x04000000 - #endif /* __KERNEL__ */ #define SIG_BLOCK 0 /* for blocking signals */ diff --git a/include/asm-ppc/signal.h b/include/asm-ppc/signal.h index 8cc8b88d4edd..d890dabd5a69 100644 --- a/include/asm-ppc/signal.h +++ b/include/asm-ppc/signal.h @@ -99,19 +99,6 @@ typedef struct { #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 -#ifdef __KERNEL__ - -/* - * These values of sa_flags are used only by the kernel as part of the - * irq handling routines. - * - * SA_INTERRUPT is also used by the irq handling routines. - * SA_SHIRQ is for shared interrupt support on PCI and EISA. - */ -#define SA_PROBE SA_ONESHOT -#define SA_SAMPLE_RANDOM SA_RESTART -#define SA_SHIRQ 0x04000000 -#endif /* __KERNEL__ */ #define SIG_BLOCK 0 /* for blocking signals */ #define SIG_UNBLOCK 1 /* for unblocking signals */ diff --git a/include/asm-ppc64/signal.h b/include/asm-ppc64/signal.h index fe5401adb41b..a2d7bbb4befd 100644 --- a/include/asm-ppc64/signal.h +++ b/include/asm-ppc64/signal.h @@ -96,19 +96,6 @@ typedef struct { #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 -#ifdef __KERNEL__ - -/* - * These values of sa_flags are used only by the kernel as part of the - * irq handling routines. - * - * SA_INTERRUPT is also used by the irq handling routines. - * SA_SHIRQ is for shared interrupt support on PCI and EISA. - */ -#define SA_PROBE SA_ONESHOT -#define SA_SAMPLE_RANDOM SA_RESTART -#define SA_SHIRQ 0x04000000 -#endif #define SIG_BLOCK 0 /* for blocking signals */ #define SIG_UNBLOCK 1 /* for unblocking signals */ diff --git a/include/asm-s390/signal.h b/include/asm-s390/signal.h index f273cdcd1cf6..bfed83a818cc 100644 --- a/include/asm-s390/signal.h +++ b/include/asm-s390/signal.h @@ -117,20 +117,6 @@ typedef unsigned long sigset_t; #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 -#ifdef __KERNEL__ - -/* - * These values of sa_flags are used only by the kernel as part of the - * irq handling routines. - * - * SA_INTERRUPT is also used by the irq handling routines. - * SA_SHIRQ is for shared interrupt support on PCI and EISA. - */ -#define SA_PROBE SA_ONESHOT -#define SA_SAMPLE_RANDOM SA_RESTART -#define SA_SHIRQ 0x04000000 -#endif - #define SIG_BLOCK 0 /* for blocking signals */ #define SIG_UNBLOCK 1 /* for unblocking signals */ #define SIG_SETMASK 2 /* for setting the signal mask */ diff --git a/include/asm-sh/signal.h b/include/asm-sh/signal.h index 0a7ff717c245..29f1ac1bf4df 100644 --- a/include/asm-sh/signal.h +++ b/include/asm-sh/signal.h @@ -108,20 +108,6 @@ typedef unsigned long sigset_t; #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 -#ifdef __KERNEL__ - -/* - * These values of sa_flags are used only by the kernel as part of the - * irq handling routines. - * - * SA_INTERRUPT is also used by the irq handling routines. - * SA_SHIRQ is for shared interrupt support on PCI and EISA. - */ -#define SA_PROBE SA_ONESHOT -#define SA_SAMPLE_RANDOM SA_RESTART -#define SA_SHIRQ 0x04000000 -#endif - #define SIG_BLOCK 0 /* for blocking signals */ #define SIG_UNBLOCK 1 /* for unblocking signals */ #define SIG_SETMASK 2 /* for setting the signal mask */ diff --git a/include/asm-sh64/signal.h b/include/asm-sh64/signal.h index 77957e9b92d9..864c94ecc98c 100644 --- a/include/asm-sh64/signal.h +++ b/include/asm-sh64/signal.h @@ -107,20 +107,6 @@ typedef struct { #define MINSIGSTKSZ 2048 #define SIGSTKSZ THREAD_SIZE -#ifdef __KERNEL__ - -/* - * These values of sa_flags are used only by the kernel as part of the - * irq handling routines. - * - * SA_INTERRUPT is also used by the irq handling routines. - * SA_SHIRQ is for shared interrupt support on PCI and EISA. - */ -#define SA_PROBE SA_ONESHOT -#define SA_SAMPLE_RANDOM SA_RESTART -#define SA_SHIRQ 0x04000000 -#endif - #define SIG_BLOCK 0 /* for blocking signals */ #define SIG_UNBLOCK 1 /* for unblocking signals */ #define SIG_SETMASK 2 /* for setting the signal mask */ diff --git a/include/asm-sparc/signal.h b/include/asm-sparc/signal.h index d8211cb6e6b4..f792e10e704f 100644 --- a/include/asm-sparc/signal.h +++ b/include/asm-sparc/signal.h @@ -143,7 +143,6 @@ struct sigstack { #define SA_ONESHOT _SV_RESET #define SA_INTERRUPT 0x10u #define SA_NOMASK 0x20u -#define SA_SHIRQ 0x40u #define SA_NOCLDWAIT 0x100u #define SA_SIGINFO 0x200u @@ -162,11 +161,6 @@ struct sigstack { #ifdef __KERNEL__ /* - * These values of sa_flags are used only by the kernel as part of the - * irq handling routines. - * - * SA_INTERRUPT is also used by the irq handling routines. - * * DJHR * SA_STATIC_ALLOC is used for the SPARC system to indicate that this * interrupt handler's irq structure should be statically allocated @@ -177,8 +171,6 @@ struct sigstack { * statically allocated data.. which is NOT GOOD. * */ -#define SA_PROBE SA_ONESHOT -#define SA_SAMPLE_RANDOM SA_RESTART #define SA_STATIC_ALLOC 0x80 #endif diff --git a/include/asm-sparc64/signal.h b/include/asm-sparc64/signal.h index 6428e366c38c..466d021d7038 100644 --- a/include/asm-sparc64/signal.h +++ b/include/asm-sparc64/signal.h @@ -145,7 +145,6 @@ struct sigstack { #define SA_ONESHOT _SV_RESET #define SA_INTERRUPT 0x10u #define SA_NOMASK 0x20u -#define SA_SHIRQ 0x40u #define SA_NOCLDWAIT 0x100u #define SA_SIGINFO 0x200u @@ -165,11 +164,6 @@ struct sigstack { #ifdef __KERNEL__ /* - * These values of sa_flags are used only by the kernel as part of the - * irq handling routines. - * - * SA_INTERRUPT is also used by the irq handling routines. - * * DJHR * SA_STATIC_ALLOC is used for the SPARC system to indicate that this * interrupt handler's irq structure should be statically allocated @@ -180,8 +174,6 @@ struct sigstack { * statically allocated data.. which is NOT GOOD. * */ -#define SA_PROBE SA_ONESHOT -#define SA_SAMPLE_RANDOM SA_RESTART #define SA_STATIC_ALLOC 0x80 #endif diff --git a/include/asm-v850/signal.h b/include/asm-v850/signal.h index 407db875899c..ec3566c875d9 100644 --- a/include/asm-v850/signal.h +++ b/include/asm-v850/signal.h @@ -110,21 +110,6 @@ typedef unsigned long sigset_t; #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 - -#ifdef __KERNEL__ -/* - * These values of sa_flags are used only by the kernel as part of the - * irq handling routines. - * - * SA_INTERRUPT is also used by the irq handling routines. - * SA_SHIRQ is for shared interrupt support on PCI and EISA. - */ -#define SA_PROBE SA_ONESHOT -#define SA_SAMPLE_RANDOM SA_RESTART -#define SA_SHIRQ 0x04000000 -#endif /* __KERNEL__ */ - - #define SIG_BLOCK 0 /* for blocking signals */ #define SIG_UNBLOCK 1 /* for unblocking signals */ #define SIG_SETMASK 2 /* for setting the signal mask */ diff --git a/include/asm-x86_64/signal.h b/include/asm-x86_64/signal.h index 643a20d73765..4987ad8082ba 100644 --- a/include/asm-x86_64/signal.h +++ b/include/asm-x86_64/signal.h @@ -116,20 +116,6 @@ typedef unsigned long sigset_t; #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 -#ifdef __KERNEL__ - -/* - * These values of sa_flags are used only by the kernel as part of the - * irq handling routines. - * - * SA_INTERRUPT is also used by the irq handling routines. - * SA_SHIRQ is for shared interrupt support on PCI and EISA. - */ -#define SA_PROBE SA_ONESHOT -#define SA_SAMPLE_RANDOM SA_RESTART -#define SA_SHIRQ 0x04000000 -#endif - #define SIG_BLOCK 0 /* for blocking signals */ #define SIG_UNBLOCK 1 /* for unblocking signals */ #define SIG_SETMASK 2 /* for setting the signal mask */ diff --git a/include/linux/signal.h b/include/linux/signal.h index 99c97ad026c8..78bfb266e4f7 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -8,6 +8,17 @@ #ifdef __KERNEL__ +/* + * These values of sa_flags are used only by the kernel as part of the + * irq handling routines. + * + * SA_INTERRUPT is also used by the irq handling routines. + * SA_SHIRQ is for shared interrupt support on PCI and EISA. + */ +#define SA_PROBE SA_ONESHOT +#define SA_SAMPLE_RANDOM SA_RESTART +#define SA_SHIRQ 0x04000000 + /* * Real Time signals may be queued. */ -- cgit v1.2.3 From 512345be2549308b8ae8e85a3ff7f6d56a38e5f6 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sun, 1 May 2005 08:59:03 -0700 Subject: [PATCH] Add deprecated_for_modules Add a deprecated_for_modules macro that allows symbols to be deprecated only when used by modules, as suggested by Andrew Morton some months back. Signed-off-by: Paul E. McKenney Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/compiler.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include') diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 487725cf0d0d..d7378215b851 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -90,6 +90,12 @@ extern void __chk_io_ptr(void __iomem *); # define __deprecated /* unimplemented */ #endif +#ifdef MODULE +#define __deprecated_for_modules __deprecated +#else +#define __deprecated_for_modules +#endif + #ifndef __must_check #define __must_check #endif -- cgit v1.2.3 From 9b06e818985d139fd9e82c28297f7744e1b484e1 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Sun, 1 May 2005 08:59:04 -0700 Subject: [PATCH] Deprecate synchronize_kernel, GPL replacement The synchronize_kernel() primitive is used for quite a few different purposes: waiting for RCU readers, waiting for NMIs, waiting for interrupts, and so on. This makes RCU code harder to read, since synchronize_kernel() might or might not have matching rcu_read_lock()s. This patch creates a new synchronize_rcu() that is to be used for RCU readers and a new synchronize_sched() that is used for the rest. These two new primitives currently have the same implementation, but this is might well change with additional real-time support. Both new primitives are GPL-only, the old primitive is deprecated. Signed-off-by: Paul E. McKenney Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/rcupdate.h | 23 ++++++++++++++++++++--- kernel/rcupdate.c | 16 ++++++++++++++-- 2 files changed, 34 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 4d747433916b..fd276adf0fd5 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -157,9 +157,9 @@ static inline int rcu_pending(int cpu) /** * rcu_read_lock - mark the beginning of an RCU read-side critical section. * - * When synchronize_kernel() is invoked on one CPU while other CPUs + * When synchronize_rcu() is invoked on one CPU while other CPUs * are within RCU read-side critical sections, then the - * synchronize_kernel() is guaranteed to block until after all the other + * synchronize_rcu() is guaranteed to block until after all the other * CPUs exit their critical sections. Similarly, if call_rcu() is invoked * on one CPU while other CPUs are within RCU read-side critical * sections, invocation of the corresponding RCU callback is deferred @@ -256,6 +256,21 @@ static inline int rcu_pending(int cpu) (p) = (v); \ }) +/** + * synchronize_sched - block until all CPUs have exited any non-preemptive + * kernel code sequences. + * + * This means that all preempt_disable code sequences, including NMI and + * hardware-interrupt handlers, in progress on entry will have completed + * before this primitive returns. However, this does not guarantee that + * softirq handlers will have completed, since in some kernels + * + * This primitive provides the guarantees made by the (deprecated) + * synchronize_kernel() API. In contrast, synchronize_rcu() only + * guarantees that rcu_read_lock() sections will have completed. + */ +#define synchronize_sched() synchronize_rcu() + extern void rcu_init(void); extern void rcu_check_callbacks(int cpu, int user); extern void rcu_restart_cpu(int cpu); @@ -265,7 +280,9 @@ extern void FASTCALL(call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *head))); extern void FASTCALL(call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *head))); -extern void synchronize_kernel(void); +extern __deprecated_for_modules void synchronize_kernel(void); +extern void synchronize_rcu(void); +void synchronize_idle(void); #endif /* __KERNEL__ */ #endif /* __LINUX_RCUPDATE_H */ diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index ad497722f04f..f436993bd590 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c @@ -444,15 +444,18 @@ static void wakeme_after_rcu(struct rcu_head *head) } /** - * synchronize_kernel - wait until a grace period has elapsed. + * synchronize_rcu - wait until a grace period has elapsed. * * Control will return to the caller some time after a full grace * period has elapsed, in other words after all currently executing RCU * read-side critical sections have completed. RCU read-side critical * sections are delimited by rcu_read_lock() and rcu_read_unlock(), * and may be nested. + * + * If your read-side code is not protected by rcu_read_lock(), do -not- + * use synchronize_rcu(). */ -void synchronize_kernel(void) +void synchronize_rcu(void) { struct rcu_synchronize rcu; @@ -464,7 +467,16 @@ void synchronize_kernel(void) wait_for_completion(&rcu.completion); } +/* + * Deprecated, use synchronize_rcu() or synchronize_sched() instead. + */ +void synchronize_kernel(void) +{ + synchronize_rcu(); +} + module_param(maxbatch, int, 0); EXPORT_SYMBOL(call_rcu); /* WARNING: GPL-only in April 2006. */ EXPORT_SYMBOL(call_rcu_bh); /* WARNING: GPL-only in April 2006. */ +EXPORT_SYMBOL_GPL(synchronize_rcu); EXPORT_SYMBOL(synchronize_kernel); /* WARNING: GPL-only in April 2006. */ -- cgit v1.2.3 From 4750e2c0c59e0c84c6c036b3d96ebd88365ae7ee Mon Sep 17 00:00:00 2001 From: Joe Korty Date: Sun, 1 May 2005 08:59:06 -0700 Subject: [PATCH] add EOWNERDEAD and ENOTRECOVERABLE version 2 Add EOWNERDEAD and ENOTRECOVERABLE to all architectures. This is to support the upcoming patches for robust mutexes. We normally don't reserve parts of the name/number space for external patches, but robust mutexes are sufficiently popular and important to justify it in this case. Signed-off-by: Joe Korty Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-alpha/errno.h | 4 ++++ include/asm-generic/errno.h | 4 ++++ include/asm-mips/errno.h | 4 ++++ include/asm-parisc/errno.h | 4 ++++ include/asm-sparc/errno.h | 4 ++++ include/asm-sparc64/errno.h | 4 ++++ 6 files changed, 24 insertions(+) (limited to 'include') diff --git a/include/asm-alpha/errno.h b/include/asm-alpha/errno.h index c85ab6b9d6c6..69e2655249d2 100644 --- a/include/asm-alpha/errno.h +++ b/include/asm-alpha/errno.h @@ -116,4 +116,8 @@ #define EKEYREVOKED 134 /* Key has been revoked */ #define EKEYREJECTED 135 /* Key was rejected by service */ +/* for robust mutexes */ +#define EOWNERDEAD 136 /* Owner died */ +#define ENOTRECOVERABLE 137 /* State not recoverable */ + #endif diff --git a/include/asm-generic/errno.h b/include/asm-generic/errno.h index 4dd2384bc38d..e8852c092fea 100644 --- a/include/asm-generic/errno.h +++ b/include/asm-generic/errno.h @@ -102,4 +102,8 @@ #define EKEYREVOKED 128 /* Key has been revoked */ #define EKEYREJECTED 129 /* Key was rejected by service */ +/* for robust mutexes */ +#define EOWNERDEAD 130 /* Owner died */ +#define ENOTRECOVERABLE 131 /* State not recoverable */ + #endif diff --git a/include/asm-mips/errno.h b/include/asm-mips/errno.h index 2b458f9538cd..3c0d840e4577 100644 --- a/include/asm-mips/errno.h +++ b/include/asm-mips/errno.h @@ -115,6 +115,10 @@ #define EKEYREVOKED 163 /* Key has been revoked */ #define EKEYREJECTED 164 /* Key was rejected by service */ +/* for robust mutexes */ +#define EOWNERDEAD 165 /* Owner died */ +#define ENOTRECOVERABLE 166 /* State not recoverable */ + #define EDQUOT 1133 /* Quota exceeded */ #ifdef __KERNEL__ diff --git a/include/asm-parisc/errno.h b/include/asm-parisc/errno.h index a10f109770f1..08464c405471 100644 --- a/include/asm-parisc/errno.h +++ b/include/asm-parisc/errno.h @@ -115,5 +115,9 @@ #define ENOTSUP 252 /* Function not implemented (POSIX.4 / HPUX) */ #define ECANCELLED 253 /* aio request was canceled before complete (POSIX.4 / HPUX) */ +/* for robust mutexes */ +#define EOWNERDEAD 254 /* Owner died */ +#define ENOTRECOVERABLE 255 /* State not recoverable */ + #endif diff --git a/include/asm-sparc/errno.h b/include/asm-sparc/errno.h index 8c01c5f3b06d..ed41c8bac1fa 100644 --- a/include/asm-sparc/errno.h +++ b/include/asm-sparc/errno.h @@ -107,4 +107,8 @@ #define EKEYREVOKED 130 /* Key has been revoked */ #define EKEYREJECTED 131 /* Key was rejected by service */ +/* for robust mutexes */ +#define EOWNERDEAD 132 /* Owner died */ +#define ENOTRECOVERABLE 133 /* State not recoverable */ + #endif diff --git a/include/asm-sparc64/errno.h b/include/asm-sparc64/errno.h index cc98a73b55a7..ea3509ee1b0b 100644 --- a/include/asm-sparc64/errno.h +++ b/include/asm-sparc64/errno.h @@ -107,4 +107,8 @@ #define EKEYREVOKED 130 /* Key has been revoked */ #define EKEYREJECTED 131 /* Key was rejected by service */ +/* for robust mutexes */ +#define EOWNERDEAD 132 /* Owner died */ +#define ENOTRECOVERABLE 133 /* State not recoverable */ + #endif /* !(_SPARC64_ERRNO_H) */ -- cgit v1.2.3 From a71c1ab50a2a0f4dd9834bf5a917a2f064535c6b Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Sun, 1 May 2005 08:59:08 -0700 Subject: [PATCH] consolidate SIGEV_PAD_SIZE Discussing with Matthew Wilcox some of his outstanding patches lead me to this patch (among others). The preamble in struct sigevent can be expressed independently of the architecture. Also use __ARCH_SI_PREAMBLE_SIZE on ia64. Signed-off-by: Stephen Rothwell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/asm-alpha/siginfo.h | 2 -- include/asm-generic/siginfo.h | 13 ++++++++++--- include/asm-ia64/siginfo.h | 4 +--- include/asm-mips/siginfo.h | 2 -- include/asm-s390/siginfo.h | 6 ------ include/asm-sparc64/siginfo.h | 2 -- include/asm-x86_64/siginfo.h | 2 -- 7 files changed, 11 insertions(+), 20 deletions(-) (limited to 'include') diff --git a/include/asm-alpha/siginfo.h b/include/asm-alpha/siginfo.h index 86bcab59c52b..9822362a8424 100644 --- a/include/asm-alpha/siginfo.h +++ b/include/asm-alpha/siginfo.h @@ -4,8 +4,6 @@ #define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) #define __ARCH_SI_TRAPNO -#define SIGEV_PAD_SIZE ((SIGEV_MAX_SIZE/sizeof(int)) - 4) - #include #endif diff --git a/include/asm-generic/siginfo.h b/include/asm-generic/siginfo.h index 9cac8e8dde51..8786e01e0db8 100644 --- a/include/asm-generic/siginfo.h +++ b/include/asm-generic/siginfo.h @@ -236,11 +236,18 @@ typedef struct siginfo { #define SIGEV_THREAD 2 /* deliver via thread creation */ #define SIGEV_THREAD_ID 4 /* deliver to thread */ -#define SIGEV_MAX_SIZE 64 -#ifndef SIGEV_PAD_SIZE -#define SIGEV_PAD_SIZE ((SIGEV_MAX_SIZE/sizeof(int)) - 3) +/* + * This works because the alignment is ok on all current architectures + * but we leave open this being overridden in the future + */ +#ifndef __ARCH_SIGEV_PREAMBLE_SIZE +#define __ARCH_SIGEV_PREAMBLE_SIZE (sizeof(int) * 2 + sizeof(sigval_t)) #endif +#define SIGEV_MAX_SIZE 64 +#define SIGEV_PAD_SIZE ((SIGEV_MAX_SIZE - __ARCH_SIGEV_PREAMBLE_SIZE) \ + / sizeof(int)) + typedef struct sigevent { sigval_t sigev_value; int sigev_signo; diff --git a/include/asm-ia64/siginfo.h b/include/asm-ia64/siginfo.h index d55f139cbcdc..9294e4b0c8bc 100644 --- a/include/asm-ia64/siginfo.h +++ b/include/asm-ia64/siginfo.h @@ -8,9 +8,7 @@ * David Mosberger-Tang , Hewlett-Packard Co */ -#define SI_PAD_SIZE ((SI_MAX_SIZE/sizeof(int)) - 4) - -#define SIGEV_PAD_SIZE ((SIGEV_MAX_SIZE/sizeof(int)) - 4) +#define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) #define HAVE_ARCH_SIGINFO_T #define HAVE_ARCH_COPY_SIGINFO diff --git a/include/asm-mips/siginfo.h b/include/asm-mips/siginfo.h index 8ddd3c99bcf7..a0e26e6c994d 100644 --- a/include/asm-mips/siginfo.h +++ b/include/asm-mips/siginfo.h @@ -11,8 +11,6 @@ #include -#define SIGEV_HEAD_SIZE (sizeof(long) + 2*sizeof(int)) -#define SIGEV_PAD_SIZE ((SIGEV_MAX_SIZE-SIGEV_HEAD_SIZE) / sizeof(int)) #undef __ARCH_SI_TRAPNO /* exception code needs to fill this ... */ #define HAVE_ARCH_SIGINFO_T diff --git a/include/asm-s390/siginfo.h b/include/asm-s390/siginfo.h index 72303537b732..e0ff1ab054be 100644 --- a/include/asm-s390/siginfo.h +++ b/include/asm-s390/siginfo.h @@ -13,12 +13,6 @@ #define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) #endif -#ifdef CONFIG_ARCH_S390X -#define SIGEV_PAD_SIZE ((SIGEV_MAX_SIZE/sizeof(int)) - 4) -#else -#define SIGEV_PAD_SIZE ((SIGEV_MAX_SIZE/sizeof(int)) - 3) -#endif - #include #endif diff --git a/include/asm-sparc64/siginfo.h b/include/asm-sparc64/siginfo.h index 7160449e7cab..df17e47abc1c 100644 --- a/include/asm-sparc64/siginfo.h +++ b/include/asm-sparc64/siginfo.h @@ -3,8 +3,6 @@ #define SI_PAD_SIZE32 ((SI_MAX_SIZE/sizeof(int)) - 3) -#define SIGEV_PAD_SIZE ((SIGEV_MAX_SIZE/sizeof(int)) - 4) - #define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) #define __ARCH_SI_TRAPNO #define __ARCH_SI_BAND_T int diff --git a/include/asm-x86_64/siginfo.h b/include/asm-x86_64/siginfo.h index 7bc15985f124..d09a1e6e7246 100644 --- a/include/asm-x86_64/siginfo.h +++ b/include/asm-x86_64/siginfo.h @@ -3,8 +3,6 @@ #define __ARCH_SI_PREAMBLE_SIZE (4 * sizeof(int)) -#define SIGEV_PAD_SIZE ((SIGEV_MAX_SIZE/sizeof(int)) - 4) - #include #endif -- cgit v1.2.3 From e49332bd12e92da2df6d002f857ec62675ba2648 Mon Sep 17 00:00:00 2001 From: Jesper Juhl Date: Sun, 1 May 2005 08:59:08 -0700 Subject: [PATCH] misc verify_area cleanups There were still a few comments left refering to verify_area, and two functions, verify_area_skas & verify_area_tt that just wrap corresponding access_ok_skas & access_ok_tt functions, just like verify_area does for access_ok - deprecate those. There was also a few places that still used verify_area in commented-out code, fix those up to use access_ok. After applying this one there should not be anything left but finally removing verify_area completely, which will happen after a kernel release or two. Signed-off-by: Jesper Juhl Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/i386/kernel/vm86.c | 2 +- arch/um/kernel/skas/include/uaccess-skas.h | 2 +- arch/um/kernel/tt/include/uaccess-tt.h | 2 +- drivers/char/dtlk.c | 2 +- drivers/char/specialix.c | 11 ++++------- include/asm-frv/pgtable.h | 4 ++-- include/asm-i386/checksum.h | 2 +- include/asm-i386/pgtable.h | 4 ++-- include/asm-parisc/uaccess.h | 2 +- include/asm-sh/checksum.h | 2 +- include/asm-sh64/checksum.h | 2 +- include/asm-sparc/uaccess.h | 2 +- net/8021q/vlanproc.c | 2 +- net/atm/common.c | 4 ++-- net/core/iovec.c | 2 +- net/wanrouter/wanmain.c | 4 ++-- 16 files changed, 23 insertions(+), 26 deletions(-) (limited to 'include') diff --git a/arch/i386/kernel/vm86.c b/arch/i386/kernel/vm86.c index d3b4c540eb64..ec0f68ce6886 100644 --- a/arch/i386/kernel/vm86.c +++ b/arch/i386/kernel/vm86.c @@ -222,7 +222,7 @@ asmlinkage int sys_vm86(struct pt_regs regs) goto out; case VM86_PLUS_INSTALL_CHECK: /* NOTE: on old vm86 stuff this will return the error - from verify_area(), because the subfunction is + from access_ok(), because the subfunction is interpreted as (invalid) address to vm86_struct. So the installation check works. */ diff --git a/arch/um/kernel/skas/include/uaccess-skas.h b/arch/um/kernel/skas/include/uaccess-skas.h index 11986c9b9ddf..c35620385da0 100644 --- a/arch/um/kernel/skas/include/uaccess-skas.h +++ b/arch/um/kernel/skas/include/uaccess-skas.h @@ -18,7 +18,7 @@ ((unsigned long) (addr) + (size) <= FIXADDR_USER_END) && \ ((unsigned long) (addr) + (size) >= (unsigned long)(addr)))) -static inline int verify_area_skas(int type, const void * addr, +static inline int __deprecated verify_area_skas(int type, const void * addr, unsigned long size) { return(access_ok_skas(type, addr, size) ? 0 : -EFAULT); diff --git a/arch/um/kernel/tt/include/uaccess-tt.h b/arch/um/kernel/tt/include/uaccess-tt.h index f0bad010cebd..bb69d6b7d022 100644 --- a/arch/um/kernel/tt/include/uaccess-tt.h +++ b/arch/um/kernel/tt/include/uaccess-tt.h @@ -33,7 +33,7 @@ extern unsigned long uml_physmem; (((unsigned long) (addr) <= ((unsigned long) (addr) + (size))) && \ (under_task_size(addr, size) || is_stack(addr, size)))) -static inline int verify_area_tt(int type, const void * addr, +static inline int __deprecated verify_area_tt(int type, const void * addr, unsigned long size) { return(access_ok_tt(type, addr, size) ? 0 : -EFAULT); diff --git a/drivers/char/dtlk.c b/drivers/char/dtlk.c index 903e4c3cc209..a229915ce1b2 100644 --- a/drivers/char/dtlk.c +++ b/drivers/char/dtlk.c @@ -52,7 +52,7 @@ #define KERNEL #include #include -#include /* for verify_area */ +#include #include /* for -EBUSY */ #include /* for request_region */ #include /* for loops_per_jiffy */ diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c index c789d5ceac76..50e0b612a8a2 100644 --- a/drivers/char/specialix.c +++ b/drivers/char/specialix.c @@ -1987,10 +1987,9 @@ static inline int sx_set_serial_info(struct specialix_port * port, func_enter(); /* - error = verify_area(VERIFY_READ, (void *) newinfo, sizeof(tmp)); - if (error) { + if (!access_ok(VERIFY_READ, (void *) newinfo, sizeof(tmp))) { func_exit(); - return error; + return -EFAULT; } */ if (copy_from_user(&tmp, newinfo, sizeof(tmp))) { @@ -2046,14 +2045,12 @@ static inline int sx_get_serial_info(struct specialix_port * port, { struct serial_struct tmp; struct specialix_board *bp = port_Board(port); - // int error; func_enter(); /* - error = verify_area(VERIFY_WRITE, (void *) retinfo, sizeof(tmp)); - if (error) - return error; + if (!access_ok(VERIFY_WRITE, (void *) retinfo, sizeof(tmp))) + return -EFAULT; */ memset(&tmp, 0, sizeof(tmp)); diff --git a/include/asm-frv/pgtable.h b/include/asm-frv/pgtable.h index 3c6d42a22dfe..d0a9c2f9c13e 100644 --- a/include/asm-frv/pgtable.h +++ b/include/asm-frv/pgtable.h @@ -349,9 +349,9 @@ static inline pmd_t *pmd_offset(pud_t *dir, unsigned long address) /* * Define this to warn about kernel memory accesses that are - * done without a 'verify_area(VERIFY_WRITE,..)' + * done without a 'access_ok(VERIFY_WRITE,..)' */ -#undef TEST_VERIFY_AREA +#undef TEST_ACCESS_OK #define pte_present(x) (pte_val(x) & _PAGE_PRESENT) #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) diff --git a/include/asm-i386/checksum.h b/include/asm-i386/checksum.h index d76a5f081c91..641342002bcd 100644 --- a/include/asm-i386/checksum.h +++ b/include/asm-i386/checksum.h @@ -33,7 +33,7 @@ asmlinkage unsigned int csum_partial_copy_generic(const unsigned char *src, unsi * passed in an incorrect kernel address to one of these functions. * * If you use these functions directly please don't forget the - * verify_area(). + * access_ok(). */ static __inline__ unsigned int csum_partial_copy_nocheck (const unsigned char *src, unsigned char *dst, diff --git a/include/asm-i386/pgtable.h b/include/asm-i386/pgtable.h index 5c725425d863..8d60c2b4b003 100644 --- a/include/asm-i386/pgtable.h +++ b/include/asm-i386/pgtable.h @@ -193,9 +193,9 @@ extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC; /* * Define this if things work differently on an i386 and an i486: * it will (on an i486) warn about kernel memory accesses that are - * done without a 'verify_area(VERIFY_WRITE,..)' + * done without a 'access_ok(VERIFY_WRITE,..)' */ -#undef TEST_VERIFY_AREA +#undef TEST_ACCESS_OK /* The boot page tables (all created as a single array) */ extern unsigned long pg0[]; diff --git a/include/asm-parisc/uaccess.h b/include/asm-parisc/uaccess.h index 8a08423b7570..c1b5bdea53ee 100644 --- a/include/asm-parisc/uaccess.h +++ b/include/asm-parisc/uaccess.h @@ -24,7 +24,7 @@ /* * Note that since kernel addresses are in a separate address space on - * parisc, we don't need to do anything for access_ok() or verify_area(). + * parisc, we don't need to do anything for access_ok(). * We just let the page fault handler do the right thing. This also means * that put_user is the same as __put_user, etc. */ diff --git a/include/asm-sh/checksum.h b/include/asm-sh/checksum.h index 5113c7f8a739..5ebd0f24299e 100644 --- a/include/asm-sh/checksum.h +++ b/include/asm-sh/checksum.h @@ -42,7 +42,7 @@ asmlinkage unsigned int csum_partial_copy_generic(const unsigned char *src, unsi * passed in an incorrect kernel address to one of these functions. * * If you use these functions directly please don't forget the - * verify_area(). + * access_ok(). */ static __inline__ unsigned int csum_partial_copy_nocheck (const unsigned char *src, unsigned char *dst, diff --git a/include/asm-sh64/checksum.h b/include/asm-sh64/checksum.h index aa3911a99490..fd034e9ae6e3 100644 --- a/include/asm-sh64/checksum.h +++ b/include/asm-sh64/checksum.h @@ -34,7 +34,7 @@ asmlinkage unsigned int csum_partial(const unsigned char *buff, int len, * passed in an incorrect kernel address to one of these functions. * * If you use these functions directly please don't forget the - * verify_area(). + * access_ok(). */ diff --git a/include/asm-sparc/uaccess.h b/include/asm-sparc/uaccess.h index 3f47889883b7..f461144067ee 100644 --- a/include/asm-sparc/uaccess.h +++ b/include/asm-sparc/uaccess.h @@ -18,7 +18,7 @@ #ifndef __ASSEMBLY__ -/* Sparc is not segmented, however we need to be able to fool verify_area() +/* Sparc is not segmented, however we need to be able to fool access_ok() * when doing system calls from kernel mode legitimately. * * "For historical reasons, these macros are grossly misnamed." -Linus diff --git a/net/8021q/vlanproc.c b/net/8021q/vlanproc.c index c32d27af0a3f..7b214cffc956 100644 --- a/net/8021q/vlanproc.c +++ b/net/8021q/vlanproc.c @@ -23,7 +23,7 @@ #include /* return codes */ #include #include /* kmalloc(), kfree() */ -#include /* verify_area(), etc. */ +#include #include /* inline mem*, str* functions */ #include /* __initfunc et al. */ #include /* htons(), etc. */ diff --git a/net/atm/common.c b/net/atm/common.c index 6d16be334ea0..e93e838069e8 100644 --- a/net/atm/common.c +++ b/net/atm/common.c @@ -12,7 +12,7 @@ #include /* SOL_SOCKET */ #include /* error codes */ #include -#include /* verify_area */ +#include #include #include /* struct timeval */ #include @@ -540,7 +540,7 @@ int vcc_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *m, error = -EMSGSIZE; goto out; } - /* verify_area is done by net/socket.c */ + eff = (size+3) & ~3; /* align to word boundary */ prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); error = 0; diff --git a/net/core/iovec.c b/net/core/iovec.c index d57ace949ab8..65e4b56fbc77 100644 --- a/net/core/iovec.c +++ b/net/core/iovec.c @@ -33,7 +33,7 @@ * Verify iovec. The caller must ensure that the iovec is big enough * to hold the message iovec. * - * Save time not doing verify_area. copy_*_user will make this work + * Save time not doing access_ok. copy_*_user will make this work * in any case. */ diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c index 956c17f6c548..d6844ac226f5 100644 --- a/net/wanrouter/wanmain.c +++ b/net/wanrouter/wanmain.c @@ -48,8 +48,8 @@ #include #include #include /* support for loadable modules */ -#include /* kmalloc(), kfree() */ -#include /* verify_area(), etc. */ +#include /* kmalloc(), kfree() */ +#include #include /* inline mem*, str* functions */ #include /* htons(), etc. */ -- cgit v1.2.3 From f78fc874f42f63a460bcebc2aeb98db526280d1c Mon Sep 17 00:00:00 2001 From: Vinay K Nallamothu Date: Sun, 1 May 2005 08:59:09 -0700 Subject: [PATCH] __attribute__ placement fixes The variable attributes "packed" and "align" when used with struct, should have the following order: struct ... {...} __attribute__((packed)) var; This patch fixes few instances where the variable and attributes are placed the other way around and had no effect. Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/net/gt96100eth.h | 4 ++-- include/asm-m68knommu/MC68328.h | 2 +- include/asm-m68knommu/MC68EZ328.h | 2 +- include/asm-m68knommu/MC68VZ328.h | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) (limited to 'include') diff --git a/drivers/net/gt96100eth.h b/drivers/net/gt96100eth.h index 2f4bfd4dacbe..395869c5ed3e 100644 --- a/drivers/net/gt96100eth.h +++ b/drivers/net/gt96100eth.h @@ -214,7 +214,7 @@ typedef struct { u32 cmdstat; u32 next; u32 buff_ptr; -} gt96100_td_t __attribute__ ((packed)); +} __attribute__ ((packed)) gt96100_td_t; typedef struct { #ifdef DESC_BE @@ -227,7 +227,7 @@ typedef struct { u32 cmdstat; u32 next; u32 buff_ptr; -} gt96100_rd_t __attribute__ ((packed)); +} __attribute__ ((packed)) gt96100_rd_t; /* Values for the Tx command-status descriptor entry. */ diff --git a/include/asm-m68knommu/MC68328.h b/include/asm-m68knommu/MC68328.h index 4f5a9845f5be..a337e56d09bf 100644 --- a/include/asm-m68knommu/MC68328.h +++ b/include/asm-m68knommu/MC68328.h @@ -993,7 +993,7 @@ typedef volatile struct { volatile unsigned short int pad1; volatile unsigned short int pad2; volatile unsigned short int pad3; -} m68328_uart __attribute__((packed)); +} __attribute__((packed)) m68328_uart; /********** diff --git a/include/asm-m68knommu/MC68EZ328.h b/include/asm-m68knommu/MC68EZ328.h index 801933da4c70..69b7f9139e5e 100644 --- a/include/asm-m68knommu/MC68EZ328.h +++ b/include/asm-m68knommu/MC68EZ328.h @@ -815,7 +815,7 @@ typedef volatile struct { volatile unsigned short int nipr; volatile unsigned short int pad1; volatile unsigned short int pad2; -} m68328_uart __attribute__((packed)); +} __attribute__((packed)) m68328_uart; /********** diff --git a/include/asm-m68knommu/MC68VZ328.h b/include/asm-m68knommu/MC68VZ328.h index df74322f37ed..2b9bf626a0a5 100644 --- a/include/asm-m68knommu/MC68VZ328.h +++ b/include/asm-m68knommu/MC68VZ328.h @@ -909,7 +909,7 @@ typedef struct { volatile unsigned short int nipr; volatile unsigned short int hmark; volatile unsigned short int unused; -} m68328_uart __attribute__((packed)); +} __attribute__((packed)) m68328_uart; -- cgit v1.2.3 From 7d87e14c236d6c4cab66d87cf0bc1e0f0375d308 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Sun, 1 May 2005 08:59:12 -0700 Subject: [PATCH] consolidate sys_shmat Signed-off-by: Stephen Rothwell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/alpha/kernel/osf_sys.c | 16 ---------------- arch/alpha/kernel/systbls.S | 2 +- arch/arm/kernel/sys_arm.c | 12 ------------ arch/ia64/kernel/entry.S | 2 +- arch/ia64/kernel/sys_ia64.c | 14 -------------- arch/mips/kernel/syscall.c | 16 ---------------- arch/parisc/kernel/sys_parisc.c | 11 ----------- arch/parisc/kernel/syscall_table.S | 2 +- arch/sh64/kernel/sys_sh64.c | 15 --------------- arch/sh64/kernel/syscalls.S | 2 +- arch/um/include/sysdep-x86_64/syscalls.h | 1 - arch/um/sys-x86_64/syscalls.c | 8 -------- arch/x86_64/kernel/sys_x86_64.c | 6 ------ include/asm-x86_64/unistd.h | 2 +- include/linux/syscalls.h | 3 +-- ipc/shm.c | 14 ++++++++++++++ kernel/sys_ni.c | 1 + 17 files changed, 21 insertions(+), 106 deletions(-) (limited to 'include') diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index b5d0fd2bb10a..64e450dddb49 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c @@ -457,22 +457,6 @@ osf_getdomainname(char __user *name, int namelen) return 0; } -asmlinkage long -osf_shmat(int shmid, void __user *shmaddr, int shmflg) -{ - unsigned long raddr; - long err; - - err = do_shmat(shmid, shmaddr, shmflg, &raddr); - - /* - * This works because all user-level addresses are - * non-negative longs! - */ - return err ? err : (long)raddr; -} - - /* * The following stuff should move into a header file should it ever * be labeled "officially supported." Right now, there is just enough diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S index 3864b33562ee..052120882876 100644 --- a/arch/alpha/kernel/systbls.S +++ b/arch/alpha/kernel/systbls.S @@ -227,7 +227,7 @@ sys_call_table: .quad sys_semop .quad osf_utsname .quad sys_lchown - .quad osf_shmat + .quad sys_shmat .quad sys_shmctl /* 210 */ .quad sys_shmdt .quad sys_shmget diff --git a/arch/arm/kernel/sys_arm.c b/arch/arm/kernel/sys_arm.c index 7ba6342cf93d..ef32577da304 100644 --- a/arch/arm/kernel/sys_arm.c +++ b/arch/arm/kernel/sys_arm.c @@ -227,18 +227,6 @@ asmlinkage int sys_ipc(uint call, int first, int second, int third, } } -asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg, - unsigned long __user *addr) -{ - unsigned long ret; - long err; - - err = do_shmat(shmid, shmaddr, shmflg, &ret); - if (err == 0) - err = put_user(ret, addr); - return err; -} - /* Fork a new task - this creates a new program thread. * This is called indirectly via a small wrapper */ diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index bd86fea49a0c..d3f093820bc7 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -1417,7 +1417,7 @@ sys_call_table: data8 sys_msgrcv data8 sys_msgctl data8 sys_shmget - data8 ia64_shmat + data8 sys_shmat data8 sys_shmdt // 1115 data8 sys_shmctl data8 sys_syslog diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c index 3ac216e1c8bb..a8cf6d8a509c 100644 --- a/arch/ia64/kernel/sys_ia64.c +++ b/arch/ia64/kernel/sys_ia64.c @@ -92,20 +92,6 @@ sys_getpagesize (void) return PAGE_SIZE; } -asmlinkage unsigned long -ia64_shmat (int shmid, void __user *shmaddr, int shmflg) -{ - unsigned long raddr; - int retval; - - retval = do_shmat(shmid, shmaddr, shmflg, &raddr); - if (retval < 0) - return retval; - - force_successful_syscall_return(); - return raddr; -} - asmlinkage unsigned long ia64_brk (unsigned long brk) { diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index 598bfe7426a2..ae2a1312d4ef 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c @@ -373,22 +373,6 @@ asmlinkage int sys_ipc (uint call, int first, int second, } } -/* - * Native ABI that is O32 or N64 version - */ -asmlinkage long sys_shmat(int shmid, char __user *shmaddr, - int shmflg, unsigned long *addr) -{ - unsigned long raddr; - int err; - - err = do_shmat(shmid, shmaddr, shmflg, &raddr); - if (err) - return err; - - return put_user(raddr, addr); -} - /* * No implemented yet ... */ diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c index 7958cd8c8bf8..d15a1d53e101 100644 --- a/arch/parisc/kernel/sys_parisc.c +++ b/arch/parisc/kernel/sys_parisc.c @@ -161,17 +161,6 @@ asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len, } } -long sys_shmat_wrapper(int shmid, char __user *shmaddr, int shmflag) -{ - unsigned long raddr; - int r; - - r = do_shmat(shmid, shmaddr, shmflag, &raddr); - if (r < 0) - return r; - return raddr; -} - /* Fucking broken ABI */ #ifdef CONFIG_64BIT diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index 779b537100ec..dcfa4d3d0e7d 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S @@ -297,7 +297,7 @@ ENTRY_DIFF(msgrcv) ENTRY_SAME(msgget) /* 190 */ ENTRY_SAME(msgctl) - ENTRY_SAME(shmat_wrapper) + ENTRY_SAME(shmat) ENTRY_SAME(shmdt) ENTRY_SAME(shmget) ENTRY_SAME(shmctl) /* 195 */ diff --git a/arch/sh64/kernel/sys_sh64.c b/arch/sh64/kernel/sys_sh64.c index 4546845b9caf..58ff7d522d81 100644 --- a/arch/sh64/kernel/sys_sh64.c +++ b/arch/sh64/kernel/sys_sh64.c @@ -283,18 +283,3 @@ asmlinkage int sys_uname(struct old_utsname * name) up_read(&uts_sem); return err?-EFAULT:0; } - -/* Copy from mips version */ -asmlinkage long sys_shmatcall(int shmid, char __user *shmaddr, - int shmflg) -{ - unsigned long raddr; - int err; - - err = do_shmat(shmid, shmaddr, shmflg, &raddr); - if (err) - return err; - - err = raddr; - return err; -} diff --git a/arch/sh64/kernel/syscalls.S b/arch/sh64/kernel/syscalls.S index 8ed417df3dc6..6aabc63e4518 100644 --- a/arch/sh64/kernel/syscalls.S +++ b/arch/sh64/kernel/syscalls.S @@ -268,7 +268,7 @@ sys_call_table: .long sys_msgrcv .long sys_msgget .long sys_msgctl - .long sys_shmatcall + .long sys_shmat .long sys_shmdt /* 245 */ .long sys_shmget .long sys_shmctl diff --git a/arch/um/include/sysdep-x86_64/syscalls.h b/arch/um/include/sysdep-x86_64/syscalls.h index b56b335c3514..67923cca5691 100644 --- a/arch/um/include/sysdep-x86_64/syscalls.h +++ b/arch/um/include/sysdep-x86_64/syscalls.h @@ -26,7 +26,6 @@ extern syscall_handler_t *ia32_sys_call_table[]; extern long old_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff); -extern syscall_handler_t wrap_sys_shmat; extern syscall_handler_t sys_modify_ldt; extern syscall_handler_t sys_arch_prctl; diff --git a/arch/um/sys-x86_64/syscalls.c b/arch/um/sys-x86_64/syscalls.c index 15768c96ceb4..ab4b0abf8af3 100644 --- a/arch/um/sys-x86_64/syscalls.c +++ b/arch/um/sys-x86_64/syscalls.c @@ -14,14 +14,6 @@ #include "asm/prctl.h" /* XXX This should get the constants from libc */ #include "choose-mode.h" -/* XXX: copied from x86-64: arch/x86_64/kernel/sys_x86_64.c */ -asmlinkage long wrap_sys_shmat(int shmid, char __user *shmaddr, int shmflg) -{ - unsigned long raddr; - - return do_shmat(shmid, shmaddr, shmflg, &raddr) ?: (long) raddr; -} - asmlinkage long sys_uname64(struct new_utsname __user * name) { int err; diff --git a/arch/x86_64/kernel/sys_x86_64.c b/arch/x86_64/kernel/sys_x86_64.c index 477d8be57d64..dbebd5ccba6b 100644 --- a/arch/x86_64/kernel/sys_x86_64.c +++ b/arch/x86_64/kernel/sys_x86_64.c @@ -152,12 +152,6 @@ asmlinkage long sys_uname(struct new_utsname __user * name) return err ? -EFAULT : 0; } -asmlinkage long wrap_sys_shmat(int shmid, char __user *shmaddr, int shmflg) -{ - unsigned long raddr; - return do_shmat(shmid,shmaddr,shmflg,&raddr) ?: (long)raddr; -} - asmlinkage long sys_time64(long __user * tloc) { struct timeval now; diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h index 3d65d240dc95..3c9af6fd4332 100644 --- a/include/asm-x86_64/unistd.h +++ b/include/asm-x86_64/unistd.h @@ -76,7 +76,7 @@ __SYSCALL(__NR_madvise, sys_madvise) #define __NR_shmget 29 __SYSCALL(__NR_shmget, sys_shmget) #define __NR_shmat 30 -__SYSCALL(__NR_shmat, wrap_sys_shmat) +__SYSCALL(__NR_shmat, sys_shmat) #define __NR_shmctl 31 __SYSCALL(__NR_shmctl, sys_shmctl) diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 757cd9be7743..c39f6f72cbbc 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -456,8 +456,7 @@ asmlinkage long sys_semctl(int semid, int semnum, int cmd, union semun arg); asmlinkage long sys_semtimedop(int semid, struct sembuf __user *sops, unsigned nsops, const struct timespec __user *timeout); -asmlinkage long sys_shmat(int shmid, char __user *shmaddr, - int shmflg, unsigned long __user *addr); +asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg); asmlinkage long sys_shmget(key_t key, size_t size, int flag); asmlinkage long sys_shmdt(char __user *shmaddr); asmlinkage long sys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf); diff --git a/ipc/shm.c b/ipc/shm.c index 06cd5c91056f..cce022435dbc 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -28,6 +28,8 @@ #include #include #include +#include + #include #include "util.h" @@ -771,6 +773,18 @@ out: return err; } +asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg) +{ + unsigned long ret; + long err; + + err = do_shmat(shmid, shmaddr, shmflg, &ret); + if (err) + return err; + force_successful_syscall_return(); + return (long)ret; +} + /* * detach and kill segment if marked destroyed. * The work is done in shm_close. diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 1802a311dd3f..0dda70ed1f98 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c @@ -52,6 +52,7 @@ cond_syscall(sys_msgsnd); cond_syscall(sys_msgrcv); cond_syscall(sys_msgctl); cond_syscall(sys_shmget); +cond_syscall(sys_shmat); cond_syscall(sys_shmdt); cond_syscall(sys_shmctl); cond_syscall(sys_mq_open); -- cgit v1.2.3 From e5bdd883a189243541e7a132385580703b049102 Mon Sep 17 00:00:00 2001 From: Jesper Juhl Date: Sun, 1 May 2005 08:59:13 -0700 Subject: [PATCH] new valid_signal() function This patch adds a new function valid_signal() that tests if its argument is a valid signal number. The reasons for adding this new function are: - some code currently testing _NSIG directly has off-by-one errors. Using this function instead avoids such errors. - some code currently tests unsigned signal numbers for <0 which is pointless and generates warnings when building with gcc -W. Using this function instead avoids such warnings. I considered various places to add this function but eventually settled on include/linux/signal.h as the most logical place for it. If there's some reason this is a bad choice then please let me know (hints as to a better location are then welcome of course). Signed-off-by: Jesper Juhl Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/signal.h | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'include') diff --git a/include/linux/signal.h b/include/linux/signal.h index 78bfb266e4f7..0a98f5ec5cae 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h @@ -220,6 +220,12 @@ static inline void init_sigpending(struct sigpending *sig) INIT_LIST_HEAD(&sig->list); } +/* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */ +static inline int valid_signal(unsigned long sig) +{ + return sig <= _NSIG ? 1 : 0; +} + extern int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p); extern int __group_send_sig_info(int, struct siginfo *, struct task_struct *); extern long do_sigpending(void __user *, unsigned long); -- cgit v1.2.3 From d46aa455dd5457fdbebad17db4ff4df655cbfbae Mon Sep 17 00:00:00 2001 From: Ian Kent Date: Sun, 1 May 2005 08:59:17 -0700 Subject: [PATCH] autofs4: bump version number Bump autofs4 version so we know what's going on. Signed-off-by: Ian Kent Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/auto_fs4.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'include') diff --git a/include/linux/auto_fs4.h b/include/linux/auto_fs4.h index d1c7b0ec7c22..a1657fb99516 100644 --- a/include/linux/auto_fs4.h +++ b/include/linux/auto_fs4.h @@ -23,7 +23,7 @@ #define AUTOFS_MIN_PROTO_VERSION 3 #define AUTOFS_MAX_PROTO_VERSION 4 -#define AUTOFS_PROTO_SUBVERSION 5 +#define AUTOFS_PROTO_SUBVERSION 6 /* Mask for expire behaviour */ #define AUTOFS_EXP_IMMEDIATE 1 -- cgit v1.2.3 From 6a3a16f2ef6f335286e2b2bf8284b0ab4ff38ec0 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 1 May 2005 08:59:17 -0700 Subject: [PATCH] reiserfs endianness: clone struct reiserfs_key struct reiserfs_key cloned; (currently) identical struct in_core_key added. Places that expect host-endian data in reiserfs_key switched to in_core_key. Basically, we get annotation of reiserfs_key users and keep the resulting tree obviously equivalent to original. Signed-off-by: Al Viro Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/reiserfs/bitmap.c | 4 ++-- fs/reiserfs/stree.c | 1 + fs/reiserfs/super.c | 4 ++-- include/linux/reiserfs_fs.h | 32 +++++++++++++++++++++++++++++--- 4 files changed, 34 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/fs/reiserfs/bitmap.c b/fs/reiserfs/bitmap.c index a4e2ed544bbe..f4f16fada14c 100644 --- a/fs/reiserfs/bitmap.c +++ b/fs/reiserfs/bitmap.c @@ -736,7 +736,7 @@ static inline int this_blocknr_allocation_would_make_it_a_large_file(reiserfs_bl #ifdef DISPLACE_NEW_PACKING_LOCALITIES static inline void displace_new_packing_locality (reiserfs_blocknr_hint_t *hint) { - struct reiserfs_key * key = &hint->key; + struct in_core_key * key = &hint->key; hint->th->displace_new_blocks = 0; hint->search_start = hint->beg + keyed_hash((char*)(&key->k_objectid),4) % (hint->end - hint->beg); @@ -777,7 +777,7 @@ static inline int old_way (reiserfs_blocknr_hint_t * hint) static inline void hundredth_slices (reiserfs_blocknr_hint_t * hint) { - struct reiserfs_key * key = &hint->key; + struct in_core_key * key = &hint->key; b_blocknr_t slice_start; slice_start = (keyed_hash((char*)(&key->k_dir_id),4) % 100) * (hint->end / 100); diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c index 73ec5212178b..1d380a5da39c 100644 --- a/fs/reiserfs/stree.c +++ b/fs/reiserfs/stree.c @@ -229,6 +229,7 @@ const struct reiserfs_key MIN_KEY = {0, 0, {{0, 0},}}; /* Maximal possible key. It is never in the tree. */ const struct reiserfs_key MAX_KEY = {0xffffffff, 0xffffffff, {{0xffffffff, 0xffffffff},}}; +const struct in_core_key MAX_IN_CORE_KEY = {0xffffffff, 0xffffffff, {{0xffffffff, 0xffffffff},}}; /* Get delimiting key of the buffer by looking for it in the buffers in the path, starting from the bottom diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index bc5e8893b5d5..d6d1d7e2801f 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -110,7 +110,7 @@ static void reiserfs_unlockfs(struct super_block *s) { reiserfs_allow_writes(s) ; } -extern const struct reiserfs_key MAX_KEY; +extern const struct in_core_key MAX_IN_CORE_KEY; /* this is used to delete "save link" when there are no items of a @@ -164,7 +164,7 @@ static int finish_unfinished (struct super_block * s) /* compose key to look for "save" links */ max_cpu_key.version = KEY_FORMAT_3_5; - max_cpu_key.on_disk_key = MAX_KEY; + max_cpu_key.on_disk_key = MAX_IN_CORE_KEY; max_cpu_key.key_length = 3; #ifdef CONFIG_QUOTA diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h index bccff8b17dc4..d0867873a1b5 100644 --- a/include/linux/reiserfs_fs.h +++ b/include/linux/reiserfs_fs.h @@ -433,6 +433,23 @@ static inline void set_offset_v2_k_offset( struct offset_v2 *v2, loff_t offset ) # define set_offset_v2_k_offset(v2,val) (offset_v2_k_offset(v2) = (val)) #endif +struct in_core_offset_v1 { + __u32 k_offset; + __u32 k_uniqueness; +} __attribute__ ((__packed__)); + +struct in_core_offset_v2 { +#ifdef __LITTLE_ENDIAN + /* little endian version */ + __u64 k_offset:60; + __u64 k_type: 4; +#else + /* big endian version */ + __u64 k_type: 4; + __u64 k_offset:60; +#endif +} __attribute__ ((__packed__)); + /* Key of an item determines its location in the S+tree, and is composed of 4 components */ struct reiserfs_key { @@ -445,9 +462,18 @@ struct reiserfs_key { } __attribute__ ((__packed__)) u; } __attribute__ ((__packed__)); +struct in_core_key { + __u32 k_dir_id; /* packing locality: by default parent + directory object id */ + __u32 k_objectid; /* object identifier */ + union { + struct in_core_offset_v1 k_offset_v1; + struct in_core_offset_v2 k_offset_v2; + } __attribute__ ((__packed__)) u; +} __attribute__ ((__packed__)); struct cpu_key { - struct reiserfs_key on_disk_key; + struct in_core_key on_disk_key; int version; int key_length; /* 3 in all cases but direct2indirect and indirect2direct conversion */ @@ -1476,7 +1502,7 @@ struct tree_balance int fs_gen; /* saved value of `reiserfs_generation' counter see FILESYSTEM_CHANGED() macro in reiserfs_fs.h */ #ifdef DISPLACE_NEW_PACKING_LOCALITIES - struct reiserfs_key key; /* key pointer, to pass to block allocator or + struct in_core_key key; /* key pointer, to pass to block allocator or another low-level subsystem */ #endif } ; @@ -2117,7 +2143,7 @@ struct buffer_head * get_FEB (struct tree_balance *); struct __reiserfs_blocknr_hint { struct inode * inode; /* inode passed to allocator, if we allocate unf. nodes */ long block; /* file offset, in blocks */ - struct reiserfs_key key; + struct in_core_key key; struct path * path; /* search path, used by allocator to deternine search_start by * various ways */ struct reiserfs_transaction_handle * th; /* transaction handle is needed to log super blocks and -- cgit v1.2.3 From 3e8962be915bacc1d70e4849a075041838d60a3f Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 1 May 2005 08:59:18 -0700 Subject: [PATCH] reiserfs endianness: annotate little-endian objects little-endian objects annotated as such; again, obviously no changes of resulting code, we only replace __u16 with __le16, etc. in relevant places. Signed-off-by: Al Viro Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/reiserfs/bitmap.c | 7 +- fs/reiserfs/dir.c | 8 +- fs/reiserfs/file.c | 4 +- fs/reiserfs/inode.c | 10 +-- fs/reiserfs/item_ops.c | 5 +- fs/reiserfs/objectid.c | 18 ++--- fs/reiserfs/procfs.c | 4 +- fs/reiserfs/stree.c | 22 ++++-- fs/reiserfs/super.c | 6 +- include/linux/reiserfs_acl.h | 12 +-- include/linux/reiserfs_fs.h | 168 ++++++++++++++++++++--------------------- include/linux/reiserfs_xattr.h | 4 +- 12 files changed, 138 insertions(+), 130 deletions(-) (limited to 'include') diff --git a/fs/reiserfs/bitmap.c b/fs/reiserfs/bitmap.c index f4f16fada14c..49c479c9454a 100644 --- a/fs/reiserfs/bitmap.c +++ b/fs/reiserfs/bitmap.c @@ -260,8 +260,9 @@ static inline int block_group_used(struct super_block *s, u32 id) { /* * the packing is returned in disk byte order */ -u32 reiserfs_choose_packing(struct inode *dir) { - u32 packing; +__le32 reiserfs_choose_packing(struct inode *dir) +{ + __le32 packing; if (TEST_OPTION(packing_groups, dir->i_sb)) { u32 parent_dir = le32_to_cpu(INODE_PKEY(dir)->k_dir_id); /* @@ -655,7 +656,7 @@ static int get_left_neighbor(reiserfs_blocknr_hint_t *hint) struct buffer_head * bh; struct item_head * ih; int pos_in_item; - __u32 * item; + __le32 * item; int ret = 0; if (!hint->path) /* reiserfs code can call this function w/o pointer to path diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c index d1514a9b0514..fbde4b01a325 100644 --- a/fs/reiserfs/dir.c +++ b/fs/reiserfs/dir.c @@ -209,8 +209,8 @@ static int reiserfs_readdir (struct file * filp, void * dirent, filldir_t filldi /* compose directory item containing "." and ".." entries (entries are not aligned to 4 byte boundary) */ /* the last four params are LE */ -void make_empty_dir_item_v1 (char * body, __u32 dirid, __u32 objid, - __u32 par_dirid, __u32 par_objid) +void make_empty_dir_item_v1 (char * body, __le32 dirid, __le32 objid, + __le32 par_dirid, __le32 par_objid) { struct reiserfs_de_head * deh; @@ -242,8 +242,8 @@ void make_empty_dir_item_v1 (char * body, __u32 dirid, __u32 objid, } /* compose directory item containing "." and ".." entries */ -void make_empty_dir_item (char * body, __u32 dirid, __u32 objid, - __u32 par_dirid, __u32 par_objid) +void make_empty_dir_item (char * body, __le32 dirid, __le32 objid, + __le32 par_dirid, __le32 par_objid) { struct reiserfs_de_head * deh; diff --git a/fs/reiserfs/file.c b/fs/reiserfs/file.c index f6860e83521d..2230afff1870 100644 --- a/fs/reiserfs/file.c +++ b/fs/reiserfs/file.c @@ -166,7 +166,7 @@ static int reiserfs_allocate_blocks_for_region( struct cpu_key key; // cpu key of item that we are going to deal with struct item_head *ih; // pointer to item head that we are going to deal with struct buffer_head *bh; // Buffer head that contains items that we are going to deal with - __u32 * item; // pointer to item we are going to deal with + __le32 * item; // pointer to item we are going to deal with INITIALIZE_PATH(path); // path to item, that we are going to deal with. b_blocknr_t *allocated_blocks; // Pointer to a place where allocated blocknumbers would be stored. reiserfs_blocknr_hint_t hint; // hint structure for block allocator. @@ -891,7 +891,7 @@ static int reiserfs_prepare_file_region_for_write( struct item_head *ih = NULL; // pointer to item head that we are going to deal with struct buffer_head *itembuf=NULL; // Buffer head that contains items that we are going to deal with INITIALIZE_PATH(path); // path to item, that we are going to deal with. - __u32 * item=NULL; // pointer to item we are going to deal with + __le32 * item=NULL; // pointer to item we are going to deal with int item_pos=-1; /* Position in indirect item */ diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 7543031396f4..5fdb9f97b99e 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -173,7 +173,7 @@ static inline void fix_tail_page_for_writing(struct page *page) { done already or non-hole position has been found in the indirect item */ static inline int allocation_needed (int retval, b_blocknr_t allocated, struct item_head * ih, - __u32 * item, int pos_in_item) + __le32 * item, int pos_in_item) { if (allocated) return 0; @@ -278,7 +278,7 @@ research: bh = get_last_bh (&path); ih = get_ih (&path); if (is_indirect_le_ih (ih)) { - __u32 * ind_item = (__u32 *)B_I_PITEM (bh, ih); + __le32 * ind_item = (__le32 *)B_I_PITEM (bh, ih); /* FIXME: here we could cache indirect item or part of it in the inode to avoid search_by_key in case of subsequent @@ -581,7 +581,7 @@ int reiserfs_get_block (struct inode * inode, sector_t block, struct cpu_key key; struct buffer_head * bh, * unbh = NULL; struct item_head * ih, tmp_ih; - __u32 * item; + __le32 * item; int done; int fs_gen; struct reiserfs_transaction_handle *th = NULL; @@ -746,7 +746,7 @@ start_trans: done = 0; do { if (is_statdata_le_ih (ih)) { - __u32 unp = 0; + __le32 unp = 0; struct cpu_key tmp_key; /* indirect item has to be inserted */ @@ -2067,7 +2067,7 @@ static int map_block_for_writepage(struct inode *inode, struct item_head tmp_ih ; struct item_head *ih ; struct buffer_head *bh ; - __u32 *item ; + __le32 *item ; struct cpu_key key ; INITIALIZE_PATH(path) ; int pos_in_item ; diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c index 9cf7c13b120d..0ce33db1acdf 100644 --- a/fs/reiserfs/item_ops.c +++ b/fs/reiserfs/item_ops.c @@ -296,10 +296,11 @@ static void print_sequence (__u32 start, int len) static void indirect_print_item (struct item_head * ih, char * item) { int j; - __u32 * unp, prev = INT_MAX; + __le32 * unp; + __u32 prev = INT_MAX; int num; - unp = (__u32 *)item; + unp = (__le32 *)item; if (ih_item_len(ih) % UNFM_P_SIZE) reiserfs_warning (NULL, "indirect_print_item: invalid item len"); diff --git a/fs/reiserfs/objectid.c b/fs/reiserfs/objectid.c index 0785c43a7486..bfe8e25ef293 100644 --- a/fs/reiserfs/objectid.c +++ b/fs/reiserfs/objectid.c @@ -11,13 +11,13 @@ // find where objectid map starts #define objectid_map(s,rs) (old_format_only (s) ? \ - (__u32 *)((struct reiserfs_super_block_v1 *)(rs) + 1) :\ - (__u32 *)((rs) + 1)) + (__le32 *)((struct reiserfs_super_block_v1 *)(rs) + 1) :\ + (__le32 *)((rs) + 1)) #ifdef CONFIG_REISERFS_CHECK -static void check_objectid_map (struct super_block * s, __u32 * map) +static void check_objectid_map (struct super_block * s, __le32 * map) { if (le32_to_cpu (map[0]) != 1) reiserfs_panic (s, "vs-15010: check_objectid_map: map corrupted: %lx", @@ -27,7 +27,7 @@ static void check_objectid_map (struct super_block * s, __u32 * map) } #else -static void check_objectid_map (struct super_block * s, __u32 * map) +static void check_objectid_map (struct super_block * s, __le32 * map) {;} #endif @@ -52,7 +52,7 @@ __u32 reiserfs_get_unused_objectid (struct reiserfs_transaction_handle *th) { struct super_block * s = th->t_super; struct reiserfs_super_block * rs = SB_DISK_SUPER_BLOCK (s); - __u32 * map = objectid_map (s, rs); + __le32 * map = objectid_map (s, rs); __u32 unused_objectid; BUG_ON (!th->t_trans_id); @@ -97,7 +97,7 @@ void reiserfs_release_objectid (struct reiserfs_transaction_handle *th, { struct super_block * s = th->t_super; struct reiserfs_super_block * rs = SB_DISK_SUPER_BLOCK (s); - __u32 * map = objectid_map (s, rs); + __le32 * map = objectid_map (s, rs); int i = 0; BUG_ON (!th->t_trans_id); @@ -172,12 +172,12 @@ int reiserfs_convert_objectid_map_v1(struct super_block *s) { int new_size = (s->s_blocksize - SB_SIZE) / sizeof(__u32) / 2 * 2 ; int old_max = sb_oid_maxsize(disk_sb); struct reiserfs_super_block_v1 *disk_sb_v1 ; - __u32 *objectid_map, *new_objectid_map ; + __le32 *objectid_map, *new_objectid_map ; int i ; disk_sb_v1=(struct reiserfs_super_block_v1 *)(SB_BUFFER_WITH_SB(s)->b_data); - objectid_map = (__u32 *)(disk_sb_v1 + 1) ; - new_objectid_map = (__u32 *)(disk_sb + 1) ; + objectid_map = (__le32 *)(disk_sb_v1 + 1) ; + new_objectid_map = (__le32 *)(disk_sb + 1) ; if (cur_size > new_size) { /* mark everyone used that was listed as free at the end of the objectid diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c index f4ea81ae0e0f..e242ebc7f6f6 100644 --- a/fs/reiserfs/procfs.c +++ b/fs/reiserfs/procfs.c @@ -73,8 +73,8 @@ int reiserfs_global_version_in_proc( char *buffer, char **start, off_t offset, #define DFL( x ) D4C( rs -> s_v1.x ) #define objectid_map( s, rs ) (old_format_only (s) ? \ - (__u32 *)((struct reiserfs_super_block_v1 *)rs + 1) : \ - (__u32 *)(rs + 1)) + (__le32 *)((struct reiserfs_super_block_v1 *)rs + 1) : \ + (__le32 *)(rs + 1)) #define MAP( i ) D4C( objectid_map( sb, rs )[ i ] ) #define DJF( x ) le32_to_cpu( rs -> x ) diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c index 1d380a5da39c..15fa4cbdce3e 100644 --- a/fs/reiserfs/stree.c +++ b/fs/reiserfs/stree.c @@ -87,10 +87,11 @@ inline void copy_item_head(struct item_head * p_v_to, inline int comp_short_keys (const struct reiserfs_key * le_key, const struct cpu_key * cpu_key) { - __u32 * p_s_le_u32, * p_s_cpu_u32; + __le32 * p_s_le_u32; + __u32 * p_s_cpu_u32; int n_key_length = REISERFS_SHORT_KEY_LEN; - p_s_le_u32 = (__u32 *)le_key; + p_s_le_u32 = (__le32 *)le_key; p_s_cpu_u32 = (__u32 *)&cpu_key->on_disk_key; for( ; n_key_length--; ++p_s_le_u32, ++p_s_cpu_u32 ) { if ( le32_to_cpu (*p_s_le_u32) < *p_s_cpu_u32 ) @@ -228,7 +229,12 @@ extern struct tree_balance * cur_tb; const struct reiserfs_key MIN_KEY = {0, 0, {{0, 0},}}; /* Maximal possible key. It is never in the tree. */ -const struct reiserfs_key MAX_KEY = {0xffffffff, 0xffffffff, {{0xffffffff, 0xffffffff},}}; +const struct reiserfs_key MAX_KEY = { + __constant_cpu_to_le32(0xffffffff), + __constant_cpu_to_le32(0xffffffff), + {{__constant_cpu_to_le32(0xffffffff), + __constant_cpu_to_le32(0xffffffff)},} +}; const struct in_core_key MAX_IN_CORE_KEY = {0xffffffff, 0xffffffff, {{0xffffffff, 0xffffffff},}}; @@ -998,7 +1004,7 @@ static char prepare_for_delete_or_cut( int n_unfm_number, /* Number of the item unformatted nodes. */ n_counter, n_blk_size; - __u32 * p_n_unfm_pointer; /* Pointer to the unformatted node number. */ + __le32 * p_n_unfm_pointer; /* Pointer to the unformatted node number. */ __u32 tmp; struct item_head s_ih; /* Item header. */ char c_mode; /* Returned mode of the balance. */ @@ -1060,7 +1066,7 @@ static char prepare_for_delete_or_cut( /* pointers to be cut */ n_unfm_number -= pos_in_item (p_s_path); /* Set pointer to the last unformatted node pointer that is to be cut. */ - p_n_unfm_pointer = (__u32 *)B_I_PITEM(p_s_bh, &s_ih) + I_UNFM_NUM(&s_ih) - 1 - *p_n_removed; + p_n_unfm_pointer = (__le32 *)B_I_PITEM(p_s_bh, &s_ih) + I_UNFM_NUM(&s_ih) - 1 - *p_n_removed; /* We go through the unformatted nodes pointers of the indirect @@ -1082,8 +1088,8 @@ static char prepare_for_delete_or_cut( need_research = 1 ; break; } - RFALSE( p_n_unfm_pointer < (__u32 *)B_I_PITEM(p_s_bh, &s_ih) || - p_n_unfm_pointer > (__u32 *)B_I_PITEM(p_s_bh, &s_ih) + I_UNFM_NUM(&s_ih) - 1, + RFALSE( p_n_unfm_pointer < (__le32 *)B_I_PITEM(p_s_bh, &s_ih) || + p_n_unfm_pointer > (__le32 *)B_I_PITEM(p_s_bh, &s_ih) + I_UNFM_NUM(&s_ih) - 1, "vs-5265: pointer out of range"); /* Hole, nothing to remove. */ @@ -1432,7 +1438,7 @@ int reiserfs_delete_object (struct reiserfs_transaction_handle *th, struct inode #if defined( USE_INODE_GENERATION_COUNTER ) if( !old_format_only ( th -> t_super ) ) { - __u32 *inode_generation; + __le32 *inode_generation; inode_generation = &REISERFS_SB(th -> t_super) -> s_rs -> s_inode_generation; diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index d6d1d7e2801f..2283f18aa1dc 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -216,7 +216,7 @@ static int finish_unfinished (struct super_block * s) /* reiserfs_iget needs k_dirid and k_objectid only */ item = B_I_PITEM (bh, ih); - obj_key.on_disk_key.k_dir_id = le32_to_cpu (*(__u32 *)item); + obj_key.on_disk_key.k_dir_id = le32_to_cpu (*(__le32 *)item); obj_key.on_disk_key.k_objectid = le32_to_cpu (ih->ih_key.k_objectid); obj_key.on_disk_key.u.k_offset_v1.k_offset = 0; obj_key.on_disk_key.u.k_offset_v1.k_uniqueness = 0; @@ -304,7 +304,7 @@ void add_save_link (struct reiserfs_transaction_handle * th, int retval; struct cpu_key key; struct item_head ih; - __u32 link; + __le32 link; BUG_ON (!th->t_trans_id); @@ -1336,7 +1336,7 @@ static int read_super_block (struct super_block * s, int offset) return 1; } - if ( rs->s_v1.s_root_block == -1 ) { + if ( rs->s_v1.s_root_block == cpu_to_le32(-1) ) { brelse(bh) ; reiserfs_warning (s, "Unfinished reiserfsck --rebuild-tree run detected. Please run\n" "reiserfsck --rebuild-tree and wait for a completion. If that fails\n" diff --git a/include/linux/reiserfs_acl.h b/include/linux/reiserfs_acl.h index a57e973af0bd..2aef9c3f5ce8 100644 --- a/include/linux/reiserfs_acl.h +++ b/include/linux/reiserfs_acl.h @@ -5,18 +5,18 @@ #define REISERFS_ACL_VERSION 0x0001 typedef struct { - __u16 e_tag; - __u16 e_perm; - __u32 e_id; + __le16 e_tag; + __le16 e_perm; + __le32 e_id; } reiserfs_acl_entry; typedef struct { - __u16 e_tag; - __u16 e_perm; + __le16 e_tag; + __le16 e_perm; } reiserfs_acl_entry_short; typedef struct { - __u32 a_version; + __le32 a_version; } reiserfs_acl_header; static inline size_t reiserfs_acl_size(int count) diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h index d0867873a1b5..cc39c5305b80 100644 --- a/include/linux/reiserfs_fs.h +++ b/include/linux/reiserfs_fs.h @@ -114,47 +114,47 @@ if( !( cond ) ) \ struct journal_params { - __u32 jp_journal_1st_block; /* where does journal start from on its + __le32 jp_journal_1st_block; /* where does journal start from on its * device */ - __u32 jp_journal_dev; /* journal device st_rdev */ - __u32 jp_journal_size; /* size of the journal */ - __u32 jp_journal_trans_max; /* max number of blocks in a transaction. */ - __u32 jp_journal_magic; /* random value made on fs creation (this + __le32 jp_journal_dev; /* journal device st_rdev */ + __le32 jp_journal_size; /* size of the journal */ + __le32 jp_journal_trans_max; /* max number of blocks in a transaction. */ + __le32 jp_journal_magic; /* random value made on fs creation (this * was sb_journal_block_count) */ - __u32 jp_journal_max_batch; /* max number of blocks to batch into a + __le32 jp_journal_max_batch; /* max number of blocks to batch into a * trans */ - __u32 jp_journal_max_commit_age; /* in seconds, how old can an async + __le32 jp_journal_max_commit_age; /* in seconds, how old can an async * commit be */ - __u32 jp_journal_max_trans_age; /* in seconds, how old can a transaction + __le32 jp_journal_max_trans_age; /* in seconds, how old can a transaction * be */ }; /* this is the super from 3.5.X, where X >= 10 */ struct reiserfs_super_block_v1 { - __u32 s_block_count; /* blocks count */ - __u32 s_free_blocks; /* free blocks count */ - __u32 s_root_block; /* root block number */ + __le32 s_block_count; /* blocks count */ + __le32 s_free_blocks; /* free blocks count */ + __le32 s_root_block; /* root block number */ struct journal_params s_journal; - __u16 s_blocksize; /* block size */ - __u16 s_oid_maxsize; /* max size of object id array, see + __le16 s_blocksize; /* block size */ + __le16 s_oid_maxsize; /* max size of object id array, see * get_objectid() commentary */ - __u16 s_oid_cursize; /* current size of object id array */ - __u16 s_umount_state; /* this is set to 1 when filesystem was + __le16 s_oid_cursize; /* current size of object id array */ + __le16 s_umount_state; /* this is set to 1 when filesystem was * umounted, to 2 - when not */ char s_magic[10]; /* reiserfs magic string indicates that * file system is reiserfs: * "ReIsErFs" or "ReIsEr2Fs" or "ReIsEr3Fs" */ - __u16 s_fs_state; /* it is set to used by fsck to mark which + __le16 s_fs_state; /* it is set to used by fsck to mark which * phase of rebuilding is done */ - __u32 s_hash_function_code; /* indicate, what hash function is being use + __le32 s_hash_function_code; /* indicate, what hash function is being use * to sort names in a directory*/ - __u16 s_tree_height; /* height of disk tree */ - __u16 s_bmap_nr; /* amount of bitmap blocks needed to address + __le16 s_tree_height; /* height of disk tree */ + __le16 s_bmap_nr; /* amount of bitmap blocks needed to address * each block of file system */ - __u16 s_version; /* this field is only reliable on filesystem + __le16 s_version; /* this field is only reliable on filesystem * with non-standard journal */ - __u16 s_reserved_for_journal; /* size in blocks of journal area on main + __le16 s_reserved_for_journal; /* size in blocks of journal area on main * device, we need to keep after * making fs with non-standard journal */ } __attribute__ ((__packed__)); @@ -165,8 +165,8 @@ struct reiserfs_super_block_v1 struct reiserfs_super_block { struct reiserfs_super_block_v1 s_v1; - __u32 s_inode_generation; - __u32 s_flags; /* Right now used only by inode-attributes, if enabled */ + __le32 s_inode_generation; + __le32 s_flags; /* Right now used only by inode-attributes, if enabled */ unsigned char s_uuid[16]; /* filesystem unique identifier */ unsigned char s_label[16]; /* filesystem volume label */ char s_unused[88] ; /* zero filled by mkreiserfs and @@ -269,7 +269,7 @@ int is_reiserfs_jr (struct reiserfs_super_block * rs); #define QUOTA_EXCEEDED -6 typedef __u32 b_blocknr_t; -typedef __u32 unp_t; +typedef __le32 unp_t; struct unfm_nodeinfo { unp_t unfm_nodenum; @@ -376,8 +376,8 @@ static inline struct reiserfs_sb_info *REISERFS_SB(const struct super_block *sb) // directories use this key as well as old files // struct offset_v1 { - __u32 k_offset; - __u32 k_uniqueness; + __le32 k_offset; + __le32 k_uniqueness; } __attribute__ ((__packed__)); struct offset_v2 { @@ -453,9 +453,9 @@ struct in_core_offset_v2 { /* Key of an item determines its location in the S+tree, and is composed of 4 components */ struct reiserfs_key { - __u32 k_dir_id; /* packing locality: by default parent + __le32 k_dir_id; /* packing locality: by default parent directory object id */ - __u32 k_objectid; /* object identifier */ + __le32 k_objectid; /* object identifier */ union { struct offset_v1 k_offset_v1; struct offset_v2 k_offset_v2; @@ -534,15 +534,15 @@ struct item_head item. Note that the key, not this field, is used to determine the item type, and thus which field this union contains. */ - __u16 ih_free_space_reserved; + __le16 ih_free_space_reserved; /* Iff this is a directory item, this field equals the number of directory entries in the directory item. */ - __u16 ih_entry_count; + __le16 ih_entry_count; } __attribute__ ((__packed__)) u; - __u16 ih_item_len; /* total size of the item body */ - __u16 ih_item_location; /* an offset to the item body + __le16 ih_item_len; /* total size of the item body */ + __le16 ih_item_location; /* an offset to the item body * within the block */ - __u16 ih_version; /* 0 for all old items, 2 for new + __le16 ih_version; /* 0 for all old items, 2 for new ones. Highest bit is set by fsck temporary, cleaned after all done */ @@ -778,10 +778,10 @@ extern struct reiserfs_key root_key; /* Header of a disk block. More precisely, header of a formatted leaf or internal node, and not the header of an unformatted node. */ struct block_head { - __u16 blk_level; /* Level of a block in the tree. */ - __u16 blk_nr_item; /* Number of keys/items in a block. */ - __u16 blk_free_space; /* Block free space in bytes. */ - __u16 blk_reserved; + __le16 blk_level; /* Level of a block in the tree. */ + __le16 blk_nr_item; /* Number of keys/items in a block. */ + __le16 blk_free_space; /* Block free space in bytes. */ + __le16 blk_reserved; /* dump this in v4/planA */ struct reiserfs_key blk_right_delim_key; /* kept only for compatibility */ }; @@ -845,19 +845,19 @@ struct block_head { // struct stat_data_v1 { - __u16 sd_mode; /* file type, permissions */ - __u16 sd_nlink; /* number of hard links */ - __u16 sd_uid; /* owner */ - __u16 sd_gid; /* group */ - __u32 sd_size; /* file size */ - __u32 sd_atime; /* time of last access */ - __u32 sd_mtime; /* time file was last modified */ - __u32 sd_ctime; /* time inode (stat data) was last changed (except changes to sd_atime and sd_mtime) */ + __le16 sd_mode; /* file type, permissions */ + __le16 sd_nlink; /* number of hard links */ + __le16 sd_uid; /* owner */ + __le16 sd_gid; /* group */ + __le32 sd_size; /* file size */ + __le32 sd_atime; /* time of last access */ + __le32 sd_mtime; /* time file was last modified */ + __le32 sd_ctime; /* time inode (stat data) was last changed (except changes to sd_atime and sd_mtime) */ union { - __u32 sd_rdev; - __u32 sd_blocks; /* number of blocks file uses */ + __le32 sd_rdev; + __le32 sd_blocks; /* number of blocks file uses */ } __attribute__ ((__packed__)) u; - __u32 sd_first_direct_byte; /* first byte of file which is stored + __le32 sd_first_direct_byte; /* first byte of file which is stored in a direct item: except that if it equals 1 it is a symlink and if it equals ~(__u32)0 there is no @@ -923,20 +923,20 @@ struct stat_data_v1 /* Stat Data on disk (reiserfs version of UFS disk inode minus the address blocks) */ struct stat_data { - __u16 sd_mode; /* file type, permissions */ - __u16 sd_attrs; /* persistent inode flags */ - __u32 sd_nlink; /* number of hard links */ - __u64 sd_size; /* file size */ - __u32 sd_uid; /* owner */ - __u32 sd_gid; /* group */ - __u32 sd_atime; /* time of last access */ - __u32 sd_mtime; /* time file was last modified */ - __u32 sd_ctime; /* time inode (stat data) was last changed (except changes to sd_atime and sd_mtime) */ - __u32 sd_blocks; + __le16 sd_mode; /* file type, permissions */ + __le16 sd_attrs; /* persistent inode flags */ + __le32 sd_nlink; /* number of hard links */ + __le64 sd_size; /* file size */ + __le32 sd_uid; /* owner */ + __le32 sd_gid; /* group */ + __le32 sd_atime; /* time of last access */ + __le32 sd_mtime; /* time file was last modified */ + __le32 sd_ctime; /* time inode (stat data) was last changed (except changes to sd_atime and sd_mtime) */ + __le32 sd_blocks; union { - __u32 sd_rdev; - __u32 sd_generation; - //__u32 sd_first_direct_byte; + __le32 sd_rdev; + __le32 sd_generation; + //__le32 sd_first_direct_byte; /* first byte of file which is stored in a direct item: except that if it equals 1 it is a symlink and if it equals @@ -1019,12 +1019,12 @@ struct stat_data { struct reiserfs_de_head { - __u32 deh_offset; /* third component of the directory entry key */ - __u32 deh_dir_id; /* objectid of the parent directory of the object, that is referenced + __le32 deh_offset; /* third component of the directory entry key */ + __le32 deh_dir_id; /* objectid of the parent directory of the object, that is referenced by directory entry */ - __u32 deh_objectid; /* objectid of the object, that is referenced by directory entry */ - __u16 deh_location; /* offset of name in the whole item */ - __u16 deh_state; /* whether 1) entry contains stat data (for future), and 2) whether + __le32 deh_objectid; /* objectid of the object, that is referenced by directory entry */ + __le16 deh_location; /* offset of name in the whole item */ + __le16 deh_state; /* whether 1) entry contains stat data (for future), and 2) whether entry is hidden (unlinked) */ } __attribute__ ((__packed__)); #define DEH_SIZE sizeof(struct reiserfs_de_head) @@ -1084,10 +1084,10 @@ struct reiserfs_de_head #define de_visible(deh) test_bit_unaligned (DEH_Visible, &((deh)->deh_state)) #define de_hidden(deh) !test_bit_unaligned (DEH_Visible, &((deh)->deh_state)) -extern void make_empty_dir_item_v1 (char * body, __u32 dirid, __u32 objid, - __u32 par_dirid, __u32 par_objid); -extern void make_empty_dir_item (char * body, __u32 dirid, __u32 objid, - __u32 par_dirid, __u32 par_objid); +extern void make_empty_dir_item_v1 (char * body, __le32 dirid, __le32 objid, + __le32 par_dirid, __le32 par_objid); +extern void make_empty_dir_item (char * body, __le32 dirid, __le32 objid, + __le32 par_dirid, __le32 par_objid); /* array of the entry headers */ /* get item body */ @@ -1186,9 +1186,9 @@ struct reiserfs_dir_entry /* Disk child pointer: The pointer from an internal node of the tree to a node that is on disk. */ struct disk_child { - __u32 dc_block_number; /* Disk child's block number. */ - __u16 dc_size; /* Disk child's used space. */ - __u16 dc_reserved; + __le32 dc_block_number; /* Disk child's block number. */ + __le16 dc_size; /* Disk child's used space. */ + __le16 dc_reserved; }; #define DC_SIZE (sizeof(struct disk_child)) @@ -1656,10 +1656,10 @@ struct reiserfs_iget_args { /* first block written in a commit. */ struct reiserfs_journal_desc { - __u32 j_trans_id ; /* id of commit */ - __u32 j_len ; /* length of commit. len +1 is the commit block */ - __u32 j_mount_id ; /* mount id of this trans*/ - __u32 j_realblock[1] ; /* real locations for each block */ + __le32 j_trans_id ; /* id of commit */ + __le32 j_len ; /* length of commit. len +1 is the commit block */ + __le32 j_mount_id ; /* mount id of this trans*/ + __le32 j_realblock[1] ; /* real locations for each block */ } ; #define get_desc_trans_id(d) le32_to_cpu((d)->j_trans_id) @@ -1672,9 +1672,9 @@ struct reiserfs_journal_desc { /* last block written in a commit */ struct reiserfs_journal_commit { - __u32 j_trans_id ; /* must match j_trans_id from the desc block */ - __u32 j_len ; /* ditto */ - __u32 j_realblock[1] ; /* real locations for each block */ + __le32 j_trans_id ; /* must match j_trans_id from the desc block */ + __le32 j_len ; /* ditto */ + __le32 j_realblock[1] ; /* real locations for each block */ } ; #define get_commit_trans_id(c) le32_to_cpu((c)->j_trans_id) @@ -1689,9 +1689,9 @@ struct reiserfs_journal_commit { ** and this transaction does not need to be replayed. */ struct reiserfs_journal_header { - __u32 j_last_flush_trans_id ; /* id of last fully flushed transaction */ - __u32 j_first_unflushed_offset ; /* offset in the log of where to start replay after a crash */ - __u32 j_mount_id ; + __le32 j_last_flush_trans_id ; /* id of last fully flushed transaction */ + __le32 j_first_unflushed_offset ; /* offset in the log of where to start replay after a crash */ + __le32 j_mount_id ; /* 12 */ struct journal_params jh_journal; } ; @@ -2170,7 +2170,7 @@ void reiserfs_init_alloc_options (struct super_block *s); * to use for a new object underneat it. The locality is returned * in disk byte order (le). */ -u32 reiserfs_choose_packing(struct inode *dir); +__le32 reiserfs_choose_packing(struct inode *dir); int is_reusable (struct super_block * s, b_blocknr_t block, int bit_value); void reiserfs_free_block (struct reiserfs_transaction_handle *th, struct inode *, b_blocknr_t, int for_unformatted); diff --git a/include/linux/reiserfs_xattr.h b/include/linux/reiserfs_xattr.h index 1eaa48eca811..9244c5748820 100644 --- a/include/linux/reiserfs_xattr.h +++ b/include/linux/reiserfs_xattr.h @@ -10,8 +10,8 @@ #define REISERFS_XATTR_MAGIC 0x52465841 /* "RFXA" */ struct reiserfs_xattr_header { - __u32 h_magic; /* magic number for identification */ - __u32 h_hash; /* hash of the value */ + __le32 h_magic; /* magic number for identification */ + __le32 h_hash; /* hash of the value */ }; #ifdef __KERNEL__ -- cgit v1.2.3 From b8cc936f6295bba23513a49d858ea82f64982faf Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 1 May 2005 08:59:18 -0700 Subject: [PATCH] reiserfs endianness: fix endianness bugs fixes for a couple of bugs exposed by the above: le32_to_cpu() used on 16bit value and missing conversion in comparison of host- and little-endian values. Signed-off-by: Al Viro Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/reiserfs/journal.c | 2 +- include/linux/reiserfs_fs.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index b16d65acb550..3072cfdee959 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c @@ -2391,7 +2391,7 @@ int journal_init(struct super_block *p_s_sb, const char * j_dev_name, int old_fo jh = (struct reiserfs_journal_header *)(bhjh->b_data); /* make sure that journal matches to the super block */ - if (is_reiserfs_jr(rs) && (jh->jh_journal.jp_journal_magic != sb_jp_journal_magic(rs))) { + if (is_reiserfs_jr(rs) && (le32_to_cpu(jh->jh_journal.jp_journal_magic) != sb_jp_journal_magic(rs))) { reiserfs_warning (p_s_sb, "sh-460: journal header magic %x " "(device %s) does not match to magic found in super " "block %x", diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h index cc39c5305b80..2f7a34d636dd 100644 --- a/include/linux/reiserfs_fs.h +++ b/include/linux/reiserfs_fs.h @@ -225,7 +225,7 @@ struct reiserfs_super_block #define SB_ONDISK_JOURNAL_DEVICE(s) \ le32_to_cpu ((SB_ONDISK_JP(s)->jp_journal_dev)) #define SB_ONDISK_RESERVED_FOR_JOURNAL(s) \ - le32_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_reserved_for_journal)) + le16_to_cpu ((SB_V1_DISK_SUPER_BLOCK(s)->s_reserved_for_journal)) #define is_block_in_log_or_reserved_area(s, block) \ block >= SB_JOURNAL_1st_RESERVED_BLOCK(s) \ -- cgit v1.2.3 From 6b9f5829e6e3af44f20c681e26524c637d4f82ff Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 1 May 2005 08:59:19 -0700 Subject: [PATCH] reiserfs endianness: comp_short_keys() cleanup comp_short_keys() massaged into sane form, which kills the last place where pointer to in_core_key (or any object containing such) would be cast to or from something else. At that point we are free to change layout of in_core_key - nothing depends on it anymore. So we drop the mess with union in there and simply use (unconditional) __u64 k_offset and __u8 k_type instead; places using in_core_key switched to those. That gives _far_ better code than current mess - on all platforms. Signed-off-by: Al Viro Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/reiserfs/inode.c | 4 ++-- fs/reiserfs/stree.c | 36 ++++++++++++++------------------- fs/reiserfs/super.c | 4 ++-- include/linux/reiserfs_fs.h | 49 ++++++++------------------------------------- 4 files changed, 27 insertions(+), 66 deletions(-) (limited to 'include') diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 5fdb9f97b99e..2711dff1b7b4 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -1341,8 +1341,8 @@ void reiserfs_read_locked_inode (struct inode * inode, struct reiserfs_iget_args key.version = KEY_FORMAT_3_5; key.on_disk_key.k_dir_id = dirino; key.on_disk_key.k_objectid = inode->i_ino; - key.on_disk_key.u.k_offset_v1.k_offset = SD_OFFSET; - key.on_disk_key.u.k_offset_v1.k_uniqueness = SD_UNIQUENESS; + key.on_disk_key.k_offset = 0; + key.on_disk_key.k_type = 0; /* look for the object's stat data */ retval = search_item (inode->i_sb, &key, &path_to_sd); diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c index 15fa4cbdce3e..da23ba75f3d5 100644 --- a/fs/reiserfs/stree.c +++ b/fs/reiserfs/stree.c @@ -87,23 +87,20 @@ inline void copy_item_head(struct item_head * p_v_to, inline int comp_short_keys (const struct reiserfs_key * le_key, const struct cpu_key * cpu_key) { - __le32 * p_s_le_u32; - __u32 * p_s_cpu_u32; - int n_key_length = REISERFS_SHORT_KEY_LEN; - - p_s_le_u32 = (__le32 *)le_key; - p_s_cpu_u32 = (__u32 *)&cpu_key->on_disk_key; - for( ; n_key_length--; ++p_s_le_u32, ++p_s_cpu_u32 ) { - if ( le32_to_cpu (*p_s_le_u32) < *p_s_cpu_u32 ) + __u32 n; + n = le32_to_cpu(le_key->k_dir_id); + if (n < cpu_key->on_disk_key.k_dir_id) return -1; - if ( le32_to_cpu (*p_s_le_u32) > *p_s_cpu_u32 ) + if (n > cpu_key->on_disk_key.k_dir_id) + return 1; + n = le32_to_cpu(le_key->k_objectid); + if (n < cpu_key->on_disk_key.k_objectid) + return -1; + if (n > cpu_key->on_disk_key.k_objectid) return 1; - } - return 0; } - /* k1 is pointer to on-disk structure which is stored in little-endian form. k2 is pointer to cpu variable. Compare keys using all 4 key fields. @@ -153,18 +150,15 @@ inline int comp_short_le_keys (const struct reiserfs_key * key1, const struct re inline void le_key2cpu_key (struct cpu_key * to, const struct reiserfs_key * from) { + int version; to->on_disk_key.k_dir_id = le32_to_cpu (from->k_dir_id); to->on_disk_key.k_objectid = le32_to_cpu (from->k_objectid); // find out version of the key - to->version = le_key_version (from); - if (to->version == KEY_FORMAT_3_5) { - to->on_disk_key.u.k_offset_v1.k_offset = le32_to_cpu (from->u.k_offset_v1.k_offset); - to->on_disk_key.u.k_offset_v1.k_uniqueness = le32_to_cpu (from->u.k_offset_v1.k_uniqueness); - } else { - to->on_disk_key.u.k_offset_v2.k_offset = offset_v2_k_offset(&from->u.k_offset_v2); - to->on_disk_key.u.k_offset_v2.k_type = offset_v2_k_type(&from->u.k_offset_v2); - } + version = le_key_version (from); + to->version = version; + to->on_disk_key.k_offset = le_key_k_offset(version, from); + to->on_disk_key.k_type = le_key_k_type(version, from); } @@ -235,8 +229,8 @@ const struct reiserfs_key MAX_KEY = { {{__constant_cpu_to_le32(0xffffffff), __constant_cpu_to_le32(0xffffffff)},} }; -const struct in_core_key MAX_IN_CORE_KEY = {0xffffffff, 0xffffffff, {{0xffffffff, 0xffffffff},}}; +const struct in_core_key MAX_IN_CORE_KEY = {~0U, ~0U, ~0ULL>>4, 15}; /* Get delimiting key of the buffer by looking for it in the buffers in the path, starting from the bottom of the path, and going upwards. We must check the path's validity at each step. If the key is not in diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c index 2283f18aa1dc..31e75125f48b 100644 --- a/fs/reiserfs/super.c +++ b/fs/reiserfs/super.c @@ -218,8 +218,8 @@ static int finish_unfinished (struct super_block * s) item = B_I_PITEM (bh, ih); obj_key.on_disk_key.k_dir_id = le32_to_cpu (*(__le32 *)item); obj_key.on_disk_key.k_objectid = le32_to_cpu (ih->ih_key.k_objectid); - obj_key.on_disk_key.u.k_offset_v1.k_offset = 0; - obj_key.on_disk_key.u.k_offset_v1.k_uniqueness = 0; + obj_key.on_disk_key.k_offset = 0; + obj_key.on_disk_key.k_type = 0; pathrelse (&path); diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h index 2f7a34d636dd..d445b682ce00 100644 --- a/include/linux/reiserfs_fs.h +++ b/include/linux/reiserfs_fs.h @@ -433,23 +433,6 @@ static inline void set_offset_v2_k_offset( struct offset_v2 *v2, loff_t offset ) # define set_offset_v2_k_offset(v2,val) (offset_v2_k_offset(v2) = (val)) #endif -struct in_core_offset_v1 { - __u32 k_offset; - __u32 k_uniqueness; -} __attribute__ ((__packed__)); - -struct in_core_offset_v2 { -#ifdef __LITTLE_ENDIAN - /* little endian version */ - __u64 k_offset:60; - __u64 k_type: 4; -#else - /* big endian version */ - __u64 k_type: 4; - __u64 k_offset:60; -#endif -} __attribute__ ((__packed__)); - /* Key of an item determines its location in the S+tree, and is composed of 4 components */ struct reiserfs_key { @@ -466,11 +449,9 @@ struct in_core_key { __u32 k_dir_id; /* packing locality: by default parent directory object id */ __u32 k_objectid; /* object identifier */ - union { - struct in_core_offset_v1 k_offset_v1; - struct in_core_offset_v2 k_offset_v2; - } __attribute__ ((__packed__)) u; -} __attribute__ ((__packed__)); + __u64 k_offset; + __u8 k_type; +}; struct cpu_key { struct in_core_key on_disk_key; @@ -696,43 +677,29 @@ static inline void set_le_ih_k_type (struct item_head * ih, int type) // static inline loff_t cpu_key_k_offset (const struct cpu_key * key) { - return (key->version == KEY_FORMAT_3_5) ? - key->on_disk_key.u.k_offset_v1.k_offset : - key->on_disk_key.u.k_offset_v2.k_offset; + return key->on_disk_key.k_offset; } static inline loff_t cpu_key_k_type (const struct cpu_key * key) { - return (key->version == KEY_FORMAT_3_5) ? - uniqueness2type (key->on_disk_key.u.k_offset_v1.k_uniqueness) : - key->on_disk_key.u.k_offset_v2.k_type; + return key->on_disk_key.k_type; } static inline void set_cpu_key_k_offset (struct cpu_key * key, loff_t offset) { - (key->version == KEY_FORMAT_3_5) ? - (key->on_disk_key.u.k_offset_v1.k_offset = offset) : - (key->on_disk_key.u.k_offset_v2.k_offset = offset); + key->on_disk_key.k_offset = offset; } - static inline void set_cpu_key_k_type (struct cpu_key * key, int type) { - (key->version == KEY_FORMAT_3_5) ? - (key->on_disk_key.u.k_offset_v1.k_uniqueness = type2uniqueness (type)): - (key->on_disk_key.u.k_offset_v2.k_type = type); + key->on_disk_key.k_type = type; } - static inline void cpu_key_k_offset_dec (struct cpu_key * key) { - if (key->version == KEY_FORMAT_3_5) - key->on_disk_key.u.k_offset_v1.k_offset --; - else - key->on_disk_key.u.k_offset_v2.k_offset --; + key->on_disk_key.k_offset --; } - #define is_direntry_cpu_key(key) (cpu_key_k_type (key) == TYPE_DIRENTRY) #define is_direct_cpu_key(key) (cpu_key_k_type (key) == TYPE_DIRECT) #define is_indirect_cpu_key(key) (cpu_key_k_type (key) == TYPE_INDIRECT) -- cgit v1.2.3 From f8e08a8466c4ac5f61b4bdb6338fd97eedb9c9e8 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 1 May 2005 08:59:19 -0700 Subject: [PATCH] reiserfs endianness: sanitize reiserfs_key union Since we only access reiserfs_key ->u.k_offset_v2 guts in four helper functions, we are free to sanitize those, as long as - layout of the structure is unchanged (it's on-disk object) - behaviour of these helpers is same as before. Patch kills the mess with endianness-dependent bitfields and replaces them with a single __le64. Helpers are switched to straightforward shift/and/or. Benefits: - exact same definitions for little- and big-endian architectures; no ifdefs in sight. - generate the same code on little-endian and improved on big-endian. - doesn't rely on lousy bitfields handling in gcc codegenerator. - happens to be standard C (unsigned long long is not a valid type for a bitfield; it's a gccism and not well-implemented one, at that). Signed-off-by: Al Viro Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/reiserfs_fs.h | 42 +++++++----------------------------------- 1 file changed, 7 insertions(+), 35 deletions(-) (limited to 'include') diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h index d445b682ce00..32148625fc2f 100644 --- a/include/linux/reiserfs_fs.h +++ b/include/linux/reiserfs_fs.h @@ -381,57 +381,29 @@ struct offset_v1 { } __attribute__ ((__packed__)); struct offset_v2 { -#ifdef __LITTLE_ENDIAN - /* little endian version */ - __u64 k_offset:60; - __u64 k_type: 4; -#else - /* big endian version */ - __u64 k_type: 4; - __u64 k_offset:60; -#endif + __le64 v; } __attribute__ ((__packed__)); -#ifndef __LITTLE_ENDIAN -typedef union { - struct offset_v2 offset_v2; - __u64 linear; -} __attribute__ ((__packed__)) offset_v2_esafe_overlay; - static inline __u16 offset_v2_k_type( const struct offset_v2 *v2 ) { - offset_v2_esafe_overlay tmp = *(const offset_v2_esafe_overlay *)v2; - tmp.linear = le64_to_cpu( tmp.linear ); - return (tmp.offset_v2.k_type <= TYPE_MAXTYPE)?tmp.offset_v2.k_type:TYPE_ANY; + __u8 type = le64_to_cpu(v2->v) >> 60; + return (type <= TYPE_MAXTYPE)?type:TYPE_ANY; } static inline void set_offset_v2_k_type( struct offset_v2 *v2, int type ) { - offset_v2_esafe_overlay *tmp = (offset_v2_esafe_overlay *)v2; - tmp->linear = le64_to_cpu(tmp->linear); - tmp->offset_v2.k_type = type; - tmp->linear = cpu_to_le64(tmp->linear); + v2->v = (v2->v & cpu_to_le64(~0ULL>>4)) | cpu_to_le64((__u64)type<<60); } static inline loff_t offset_v2_k_offset( const struct offset_v2 *v2 ) { - offset_v2_esafe_overlay tmp = *(const offset_v2_esafe_overlay *)v2; - tmp.linear = le64_to_cpu( tmp.linear ); - return tmp.offset_v2.k_offset; + return le64_to_cpu(v2->v) & (~0ULL>>4); } static inline void set_offset_v2_k_offset( struct offset_v2 *v2, loff_t offset ){ - offset_v2_esafe_overlay *tmp = (offset_v2_esafe_overlay *)v2; - tmp->linear = le64_to_cpu(tmp->linear); - tmp->offset_v2.k_offset = offset; - tmp->linear = cpu_to_le64(tmp->linear); + offset &= (~0ULL>>4); + v2->v = (v2->v & cpu_to_le64(15ULL<<60)) | cpu_to_le64(offset); } -#else -# define offset_v2_k_type(v2) ((v2)->k_type) -# define set_offset_v2_k_type(v2,val) (offset_v2_k_type(v2) = (val)) -# define offset_v2_k_offset(v2) ((v2)->k_offset) -# define set_offset_v2_k_offset(v2,val) (offset_v2_k_offset(v2) = (val)) -#endif /* Key of an item determines its location in the S+tree, and is composed of 4 components */ -- cgit v1.2.3 From 7149437669f79b497830e643a2b13d26a017b038 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Sun, 1 May 2005 08:59:22 -0700 Subject: [PATCH] fbdev: Batch cmap changes at driver level This patch adds to the fbdev interface a set_cmap callback that allow the driver to "batch" palette changes. This is useful for drivers like radeonfb which might require lenghtly workarounds on palette accesses, thus allowing to factor out those workarounds efficiently. Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/video/aty/radeon_base.c | 118 ++++++++++++++++++++++++++++++++-------- drivers/video/fbcmap.c | 32 ++++++++++- include/linux/fb.h | 3 + 3 files changed, 127 insertions(+), 26 deletions(-) (limited to 'include') diff --git a/drivers/video/aty/radeon_base.c b/drivers/video/aty/radeon_base.c index e8eb124754b1..ee25b9e8db60 100644 --- a/drivers/video/aty/radeon_base.c +++ b/drivers/video/aty/radeon_base.c @@ -1057,13 +1057,14 @@ static int radeonfb_blank (int blank, struct fb_info *info) return radeon_screen_blank(rinfo, blank, 0); } -static int radeonfb_setcolreg (unsigned regno, unsigned red, unsigned green, - unsigned blue, unsigned transp, struct fb_info *info) +static int radeon_setcolreg (unsigned regno, unsigned red, unsigned green, + unsigned blue, unsigned transp, + struct radeonfb_info *rinfo) { - struct radeonfb_info *rinfo = info->par; u32 pindex; unsigned int i; - + + if (regno > 255) return 1; @@ -1078,20 +1079,7 @@ static int radeonfb_setcolreg (unsigned regno, unsigned red, unsigned green, pindex = regno; if (!rinfo->asleep) { - u32 dac_cntl2, vclk_cntl = 0; - radeon_fifo_wait(9); - if (rinfo->is_mobility) { - vclk_cntl = INPLL(VCLK_ECP_CNTL); - OUTPLL(VCLK_ECP_CNTL, vclk_cntl & ~PIXCLK_DAC_ALWAYS_ONb); - } - - /* Make sure we are on first palette */ - if (rinfo->has_CRTC2) { - dac_cntl2 = INREG(DAC_CNTL2); - dac_cntl2 &= ~DAC2_PALETTE_ACCESS_CNTL; - OUTREG(DAC_CNTL2, dac_cntl2); - } if (rinfo->bpp == 16) { pindex = regno * 8; @@ -1101,24 +1089,27 @@ static int radeonfb_setcolreg (unsigned regno, unsigned red, unsigned green, if (rinfo->depth == 15 && regno > 31) return 1; - /* For 565, the green component is mixed one order below */ + /* For 565, the green component is mixed one order + * below + */ if (rinfo->depth == 16) { OUTREG(PALETTE_INDEX, pindex>>1); - OUTREG(PALETTE_DATA, (rinfo->palette[regno>>1].red << 16) | - (green << 8) | (rinfo->palette[regno>>1].blue)); + OUTREG(PALETTE_DATA, + (rinfo->palette[regno>>1].red << 16) | + (green << 8) | + (rinfo->palette[regno>>1].blue)); green = rinfo->palette[regno<<1].green; } } if (rinfo->depth != 16 || regno < 32) { OUTREG(PALETTE_INDEX, pindex); - OUTREG(PALETTE_DATA, (red << 16) | (green << 8) | blue); + OUTREG(PALETTE_DATA, (red << 16) | + (green << 8) | blue); } - if (rinfo->is_mobility) - OUTPLL(VCLK_ECP_CNTL, vclk_cntl); } if (regno < 16) { - u32 *pal = info->pseudo_palette; + u32 *pal = rinfo->info->pseudo_palette; switch (rinfo->depth) { case 15: pal[regno] = (regno << 10) | (regno << 5) | regno; @@ -1138,6 +1129,84 @@ static int radeonfb_setcolreg (unsigned regno, unsigned red, unsigned green, return 0; } +static int radeonfb_setcolreg (unsigned regno, unsigned red, unsigned green, + unsigned blue, unsigned transp, + struct fb_info *info) +{ + struct radeonfb_info *rinfo = info->par; + u32 dac_cntl2, vclk_cntl = 0; + int rc; + + if (!rinfo->asleep) { + if (rinfo->is_mobility) { + vclk_cntl = INPLL(VCLK_ECP_CNTL); + OUTPLL(VCLK_ECP_CNTL, + vclk_cntl & ~PIXCLK_DAC_ALWAYS_ONb); + } + + /* Make sure we are on first palette */ + if (rinfo->has_CRTC2) { + dac_cntl2 = INREG(DAC_CNTL2); + dac_cntl2 &= ~DAC2_PALETTE_ACCESS_CNTL; + OUTREG(DAC_CNTL2, dac_cntl2); + } + } + + rc = radeon_setcolreg (regno, red, green, blue, transp, rinfo); + + if (!rinfo->asleep && rinfo->is_mobility) + OUTPLL(VCLK_ECP_CNTL, vclk_cntl); + + return rc; +} + +static int radeonfb_setcmap(struct fb_cmap *cmap, struct fb_info *info) +{ + struct radeonfb_info *rinfo = info->par; + u16 *red, *green, *blue, *transp; + u32 dac_cntl2, vclk_cntl = 0; + int i, start, rc = 0; + + if (!rinfo->asleep) { + if (rinfo->is_mobility) { + vclk_cntl = INPLL(VCLK_ECP_CNTL); + OUTPLL(VCLK_ECP_CNTL, + vclk_cntl & ~PIXCLK_DAC_ALWAYS_ONb); + } + + /* Make sure we are on first palette */ + if (rinfo->has_CRTC2) { + dac_cntl2 = INREG(DAC_CNTL2); + dac_cntl2 &= ~DAC2_PALETTE_ACCESS_CNTL; + OUTREG(DAC_CNTL2, dac_cntl2); + } + } + + red = cmap->red; + green = cmap->green; + blue = cmap->blue; + transp = cmap->transp; + start = cmap->start; + + for (i = 0; i < cmap->len; i++) { + u_int hred, hgreen, hblue, htransp = 0xffff; + + hred = *red++; + hgreen = *green++; + hblue = *blue++; + if (transp) + htransp = *transp++; + rc = radeon_setcolreg (start++, hred, hgreen, hblue, htransp, + rinfo); + if (rc) + break; + } + + if (!rinfo->asleep && rinfo->is_mobility) + OUTPLL(VCLK_ECP_CNTL, vclk_cntl); + + return rc; +} static void radeon_save_state (struct radeonfb_info *rinfo, struct radeon_regs *save) @@ -1796,6 +1865,7 @@ static struct fb_ops radeonfb_ops = { .fb_check_var = radeonfb_check_var, .fb_set_par = radeonfb_set_par, .fb_setcolreg = radeonfb_setcolreg, + .fb_setcmap = radeonfb_setcmap, .fb_pan_display = radeonfb_pan_display, .fb_blank = radeonfb_blank, .fb_ioctl = radeonfb_ioctl, diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c index c51f8fb5c1de..4e5ce8f7d65e 100644 --- a/drivers/video/fbcmap.c +++ b/drivers/video/fbcmap.c @@ -222,8 +222,11 @@ int fb_set_cmap(struct fb_cmap *cmap, struct fb_info *info) transp = cmap->transp; start = cmap->start; - if (start < 0 || !info->fbops->fb_setcolreg) + if (start < 0 || (!info->fbops->fb_setcolreg && + !info->fbops->fb_setcmap)) return -EINVAL; + if (info->fbops->fb_setcmap) + return info->fbops->fb_setcmap(cmap, info); for (i = 0; i < cmap->len; i++) { hred = *red++; hgreen = *green++; @@ -250,8 +253,33 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info) transp = cmap->transp; start = cmap->start; - if (start < 0 || !info->fbops->fb_setcolreg) + if (start < 0 || (!info->fbops->fb_setcolreg && + !info->fbops->fb_setcmap)) return -EINVAL; + + /* If we can batch, do it */ + if (info->fbops->fb_setcmap && cmap->len > 1) { + struct fb_cmap umap; + int size = cmap->len * sizeof(u16); + int rc; + + memset(&umap, 0, sizeof(struct fb_cmap)); + rc = fb_alloc_cmap(&umap, cmap->len, transp != NULL); + if (rc) + return rc; + if (copy_from_user(umap.red, red, size) || + copy_from_user(umap.green, green, size) || + copy_from_user(umap.blue, blue, size) || + (transp && copy_from_user(umap.transp, transp, size))) { + rc = -EFAULT; + } + umap.start = start; + if (rc == 0) + rc = info->fbops->fb_setcmap(&umap, info); + fb_dealloc_cmap(&umap); + return rc; + } + for (i = 0; i < cmap->len; i++, red++, blue++, green++) { if (get_user(hred, red) || get_user(hgreen, green) || diff --git a/include/linux/fb.h b/include/linux/fb.h index b45d3e2d711a..b468bf496547 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -563,6 +563,9 @@ struct fb_ops { int (*fb_setcolreg)(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *info); + /* set color registers in batch */ + int (*fb_setcmap)(struct fb_cmap *cmap, struct fb_info *info); + /* blank display */ int (*fb_blank)(int blank, struct fb_info *info); -- cgit v1.2.3 From 5f76be80d96f60adfc91f2acf22b146ce0e3072f Mon Sep 17 00:00:00 2001 From: Adrian Bunk Date: Sun, 1 May 2005 08:59:23 -0700 Subject: [PATCH] fbdev: edid.h cleanups This patch removes some completely unused code. Signed-off-by: Adrian Bunk Signed-off-by: Antonino Daplas Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/i386/boot/compressed/misc.c | 1 - drivers/video/fbmon.c | 1 - drivers/video/vesafb.c | 3 --- include/video/edid.h | 9 --------- 4 files changed, 14 deletions(-) (limited to 'include') diff --git a/arch/i386/boot/compressed/misc.c b/arch/i386/boot/compressed/misc.c index fa67045234a3..cedc55cc47de 100644 --- a/arch/i386/boot/compressed/misc.c +++ b/arch/i386/boot/compressed/misc.c @@ -12,7 +12,6 @@ #include #include #include -#include