From d73e60b7144a86baf0fdfcc9537a70bb4f72e11c Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 31 Oct 2008 13:08:02 +0000 Subject: [ARM] copypage: convert assembly files to C Signed-off-by: Russell King --- arch/arm/mm/copypage-feroceon.S | 95 ------------------------------------- arch/arm/mm/copypage-feroceon.c | 100 +++++++++++++++++++++++++++++++++++++++ arch/arm/mm/copypage-v3.S | 67 -------------------------- arch/arm/mm/copypage-v3.c | 69 +++++++++++++++++++++++++++ arch/arm/mm/copypage-v4wb.S | 79 ------------------------------- arch/arm/mm/copypage-v4wb.c | 83 ++++++++++++++++++++++++++++++++ arch/arm/mm/copypage-v4wt.S | 73 ---------------------------- arch/arm/mm/copypage-v4wt.c | 77 ++++++++++++++++++++++++++++++ arch/arm/mm/copypage-xsc3.S | 97 -------------------------------------- arch/arm/mm/copypage-xsc3.c | 102 ++++++++++++++++++++++++++++++++++++++++ 10 files changed, 431 insertions(+), 411 deletions(-) delete mode 100644 arch/arm/mm/copypage-feroceon.S create mode 100644 arch/arm/mm/copypage-feroceon.c delete mode 100644 arch/arm/mm/copypage-v3.S create mode 100644 arch/arm/mm/copypage-v3.c delete mode 100644 arch/arm/mm/copypage-v4wb.S create mode 100644 arch/arm/mm/copypage-v4wb.c delete mode 100644 arch/arm/mm/copypage-v4wt.S create mode 100644 arch/arm/mm/copypage-v4wt.c delete mode 100644 arch/arm/mm/copypage-xsc3.S create mode 100644 arch/arm/mm/copypage-xsc3.c (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/copypage-feroceon.S b/arch/arm/mm/copypage-feroceon.S deleted file mode 100644 index 7eb0d320d240..000000000000 --- a/arch/arm/mm/copypage-feroceon.S +++ /dev/null @@ -1,95 +0,0 @@ -/* - * linux/arch/arm/lib/copypage-feroceon.S - * - * Copyright (C) 2008 Marvell Semiconductors - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This handles copy_user_page and clear_user_page on Feroceon - * more optimally than the generic implementations. - */ -#include -#include -#include - - .text - .align 5 - -ENTRY(feroceon_copy_user_page) - stmfd sp!, {r4-r9, lr} - mov ip, #PAGE_SZ -1: mov lr, r1 - ldmia r1!, {r2 - r9} - pld [lr, #32] - pld [lr, #64] - pld [lr, #96] - pld [lr, #128] - pld [lr, #160] - pld [lr, #192] - pld [lr, #224] - stmia r0, {r2 - r9} - ldmia r1!, {r2 - r9} - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line - add r0, r0, #32 - stmia r0, {r2 - r9} - ldmia r1!, {r2 - r9} - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line - add r0, r0, #32 - stmia r0, {r2 - r9} - ldmia r1!, {r2 - r9} - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line - add r0, r0, #32 - stmia r0, {r2 - r9} - ldmia r1!, {r2 - r9} - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line - add r0, r0, #32 - stmia r0, {r2 - r9} - ldmia r1!, {r2 - r9} - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line - add r0, r0, #32 - stmia r0, {r2 - r9} - ldmia r1!, {r2 - r9} - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line - add r0, r0, #32 - stmia r0, {r2 - r9} - ldmia r1!, {r2 - r9} - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line - add r0, r0, #32 - stmia r0, {r2 - r9} - subs ip, ip, #(32 * 8) - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line - add r0, r0, #32 - bne 1b - mcr p15, 0, ip, c7, c10, 4 @ drain WB - ldmfd sp!, {r4-r9, pc} - - .align 5 - -ENTRY(feroceon_clear_user_page) - stmfd sp!, {r4-r7, lr} - mov r1, #PAGE_SZ/32 - mov r2, #0 - mov r3, #0 - mov r4, #0 - mov r5, #0 - mov r6, #0 - mov r7, #0 - mov ip, #0 - mov lr, #0 -1: stmia r0, {r2-r7, ip, lr} - subs r1, r1, #1 - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line - add r0, r0, #32 - bne 1b - mcr p15, 0, r1, c7, c10, 4 @ drain WB - ldmfd sp!, {r4-r7, pc} - - __INITDATA - - .type feroceon_user_fns, #object -ENTRY(feroceon_user_fns) - .long feroceon_clear_user_page - .long feroceon_copy_user_page - .size feroceon_user_fns, . - feroceon_user_fns diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c new file mode 100644 index 000000000000..c8347670ab00 --- /dev/null +++ b/arch/arm/mm/copypage-feroceon.c @@ -0,0 +1,100 @@ +/* + * linux/arch/arm/mm/copypage-feroceon.S + * + * Copyright (C) 2008 Marvell Semiconductors + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This handles copy_user_page and clear_user_page on Feroceon + * more optimally than the generic implementations. + */ +#include + +#include + +void __attribute__((naked)) +feroceon_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) +{ + asm("\ + stmfd sp!, {r4-r9, lr} \n\ + mov ip, %0 \n\ +1: mov lr, r1 \n\ + ldmia r1!, {r2 - r9} \n\ + pld [lr, #32] \n\ + pld [lr, #64] \n\ + pld [lr, #96] \n\ + pld [lr, #128] \n\ + pld [lr, #160] \n\ + pld [lr, #192] \n\ + pld [lr, #224] \n\ + stmia r0, {r2 - r9} \n\ + ldmia r1!, {r2 - r9} \n\ + mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ + add r0, r0, #32 \n\ + stmia r0, {r2 - r9} \n\ + ldmia r1!, {r2 - r9} \n\ + mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ + add r0, r0, #32 \n\ + stmia r0, {r2 - r9} \n\ + ldmia r1!, {r2 - r9} \n\ + mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ + add r0, r0, #32 \n\ + stmia r0, {r2 - r9} \n\ + ldmia r1!, {r2 - r9} \n\ + mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ + add r0, r0, #32 \n\ + stmia r0, {r2 - r9} \n\ + ldmia r1!, {r2 - r9} \n\ + mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ + add r0, r0, #32 \n\ + stmia r0, {r2 - r9} \n\ + ldmia r1!, {r2 - r9} \n\ + mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ + add r0, r0, #32 \n\ + stmia r0, {r2 - r9} \n\ + ldmia r1!, {r2 - r9} \n\ + mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ + add r0, r0, #32 \n\ + stmia r0, {r2 - r9} \n\ + subs ip, ip, #(32 * 8) \n\ + mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ + add r0, r0, #32 \n\ + bne 1b \n\ + mcr p15, 0, ip, c7, c10, 4 @ drain WB\n\ + ldmfd sp!, {r4-r9, pc}" + : + : "I" (PAGE_SIZE)); +} + +void __attribute__((naked)) +feroceon_clear_user_page(void *kaddr, unsigned long vaddr) +{ + asm("\ + stmfd sp!, {r4-r7, lr} \n\ + mov r1, %0 \n\ + mov r2, #0 \n\ + mov r3, #0 \n\ + mov r4, #0 \n\ + mov r5, #0 \n\ + mov r6, #0 \n\ + mov r7, #0 \n\ + mov ip, #0 \n\ + mov lr, #0 \n\ +1: stmia r0, {r2-r7, ip, lr} \n\ + subs r1, r1, #1 \n\ + mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ + add r0, r0, #32 \n\ + bne 1b \n\ + mcr p15, 0, r1, c7, c10, 4 @ drain WB\n\ + ldmfd sp!, {r4-r7, pc}" + : + : "I" (PAGE_SIZE / 32)); +} + +struct cpu_user_fns feroceon_user_fns __initdata = { + .cpu_clear_user_page = feroceon_clear_user_page, + .cpu_copy_user_page = feroceon_copy_user_page, +}; + diff --git a/arch/arm/mm/copypage-v3.S b/arch/arm/mm/copypage-v3.S deleted file mode 100644 index 2ee394b11bcb..000000000000 --- a/arch/arm/mm/copypage-v3.S +++ /dev/null @@ -1,67 +0,0 @@ -/* - * linux/arch/arm/lib/copypage.S - * - * Copyright (C) 1995-1999 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * ASM optimised string functions - */ -#include -#include -#include -#include - - .text - .align 5 -/* - * ARMv3 optimised copy_user_page - * - * FIXME: do we need to handle cache stuff... - */ -ENTRY(v3_copy_user_page) - stmfd sp!, {r4, lr} @ 2 - mov r2, #PAGE_SZ/64 @ 1 - ldmia r1!, {r3, r4, ip, lr} @ 4+1 -1: stmia r0!, {r3, r4, ip, lr} @ 4 - ldmia r1!, {r3, r4, ip, lr} @ 4+1 - stmia r0!, {r3, r4, ip, lr} @ 4 - ldmia r1!, {r3, r4, ip, lr} @ 4+1 - stmia r0!, {r3, r4, ip, lr} @ 4 - ldmia r1!, {r3, r4, ip, lr} @ 4 - subs r2, r2, #1 @ 1 - stmia r0!, {r3, r4, ip, lr} @ 4 - ldmneia r1!, {r3, r4, ip, lr} @ 4 - bne 1b @ 1 - ldmfd sp!, {r4, pc} @ 3 - - .align 5 -/* - * ARMv3 optimised clear_user_page - * - * FIXME: do we need to handle cache stuff... - */ -ENTRY(v3_clear_user_page) - str lr, [sp, #-4]! - mov r1, #PAGE_SZ/64 @ 1 - mov r2, #0 @ 1 - mov r3, #0 @ 1 - mov ip, #0 @ 1 - mov lr, #0 @ 1 -1: stmia r0!, {r2, r3, ip, lr} @ 4 - stmia r0!, {r2, r3, ip, lr} @ 4 - stmia r0!, {r2, r3, ip, lr} @ 4 - stmia r0!, {r2, r3, ip, lr} @ 4 - subs r1, r1, #1 @ 1 - bne 1b @ 1 - ldr pc, [sp], #4 - - __INITDATA - - .type v3_user_fns, #object -ENTRY(v3_user_fns) - .long v3_clear_user_page - .long v3_copy_user_page - .size v3_user_fns, . - v3_user_fns diff --git a/arch/arm/mm/copypage-v3.c b/arch/arm/mm/copypage-v3.c new file mode 100644 index 000000000000..184911089e6c --- /dev/null +++ b/arch/arm/mm/copypage-v3.c @@ -0,0 +1,69 @@ +/* + * linux/arch/arm/mm/copypage-v3.c + * + * Copyright (C) 1995-1999 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include + +#include + +/* + * ARMv3 optimised copy_user_page + * + * FIXME: do we need to handle cache stuff... + */ +void __attribute__((naked)) +v3_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) +{ + asm("\n\ + stmfd sp!, {r4, lr} @ 2\n\ + mov r2, %2 @ 1\n\ + ldmia %0!, {r3, r4, ip, lr} @ 4+1\n\ +1: stmia %1!, {r3, r4, ip, lr} @ 4\n\ + ldmia %0!, {r3, r4, ip, lr} @ 4+1\n\ + stmia %1!, {r3, r4, ip, lr} @ 4\n\ + ldmia %0!, {r3, r4, ip, lr} @ 4+1\n\ + stmia %1!, {r3, r4, ip, lr} @ 4\n\ + ldmia %0!, {r3, r4, ip, lr} @ 4\n\ + subs r2, r2, #1 @ 1\n\ + stmia %1!, {r3, r4, ip, lr} @ 4\n\ + ldmneia %0!, {r3, r4, ip, lr} @ 4\n\ + bne 1b @ 1\n\ + ldmfd sp!, {r4, pc} @ 3" + : + : "r" (kfrom), "r" (kto), "I" (PAGE_SIZE / 64)); +} + +/* + * ARMv3 optimised clear_user_page + * + * FIXME: do we need to handle cache stuff... + */ +void __attribute__((naked)) v3_clear_user_page(void *kaddr, unsigned long vaddr) +{ + asm("\n\ + str lr, [sp, #-4]!\n\ + mov r1, %1 @ 1\n\ + mov r2, #0 @ 1\n\ + mov r3, #0 @ 1\n\ + mov ip, #0 @ 1\n\ + mov lr, #0 @ 1\n\ +1: stmia %0!, {r2, r3, ip, lr} @ 4\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ + subs r1, r1, #1 @ 1\n\ + bne 1b @ 1\n\ + ldr pc, [sp], #4" + : + : "r" (kaddr), "I" (PAGE_SIZE / 64)); +} + +struct cpu_user_fns v3_user_fns __initdata = { + .cpu_clear_user_page = v3_clear_user_page, + .cpu_copy_user_page = v3_copy_user_page, +}; diff --git a/arch/arm/mm/copypage-v4wb.S b/arch/arm/mm/copypage-v4wb.S deleted file mode 100644 index 83117354b1cd..000000000000 --- a/arch/arm/mm/copypage-v4wb.S +++ /dev/null @@ -1,79 +0,0 @@ -/* - * linux/arch/arm/lib/copypage.S - * - * Copyright (C) 1995-1999 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * ASM optimised string functions - */ -#include -#include -#include - - .text - .align 5 -/* - * ARMv4 optimised copy_user_page - * - * We flush the destination cache lines just before we write the data into the - * corresponding address. Since the Dcache is read-allocate, this removes the - * Dcache aliasing issue. The writes will be forwarded to the write buffer, - * and merged as appropriate. - * - * Note: We rely on all ARMv4 processors implementing the "invalidate D line" - * instruction. If your processor does not supply this, you have to write your - * own copy_user_page that does the right thing. - */ -ENTRY(v4wb_copy_user_page) - stmfd sp!, {r4, lr} @ 2 - mov r2, #PAGE_SZ/64 @ 1 - ldmia r1!, {r3, r4, ip, lr} @ 4 -1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line - stmia r0!, {r3, r4, ip, lr} @ 4 - ldmia r1!, {r3, r4, ip, lr} @ 4+1 - stmia r0!, {r3, r4, ip, lr} @ 4 - ldmia r1!, {r3, r4, ip, lr} @ 4 - mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line - stmia r0!, {r3, r4, ip, lr} @ 4 - ldmia r1!, {r3, r4, ip, lr} @ 4 - subs r2, r2, #1 @ 1 - stmia r0!, {r3, r4, ip, lr} @ 4 - ldmneia r1!, {r3, r4, ip, lr} @ 4 - bne 1b @ 1 - mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB - ldmfd sp!, {r4, pc} @ 3 - - .align 5 -/* - * ARMv4 optimised clear_user_page - * - * Same story as above. - */ -ENTRY(v4wb_clear_user_page) - str lr, [sp, #-4]! - mov r1, #PAGE_SZ/64 @ 1 - mov r2, #0 @ 1 - mov r3, #0 @ 1 - mov ip, #0 @ 1 - mov lr, #0 @ 1 -1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line - stmia r0!, {r2, r3, ip, lr} @ 4 - stmia r0!, {r2, r3, ip, lr} @ 4 - mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line - stmia r0!, {r2, r3, ip, lr} @ 4 - stmia r0!, {r2, r3, ip, lr} @ 4 - subs r1, r1, #1 @ 1 - bne 1b @ 1 - mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB - ldr pc, [sp], #4 - - __INITDATA - - .type v4wb_user_fns, #object -ENTRY(v4wb_user_fns) - .long v4wb_clear_user_page - .long v4wb_copy_user_page - .size v4wb_user_fns, . - v4wb_user_fns diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c new file mode 100644 index 000000000000..230210822961 --- /dev/null +++ b/arch/arm/mm/copypage-v4wb.c @@ -0,0 +1,83 @@ +/* + * linux/arch/arm/mm/copypage-v4wb.c + * + * Copyright (C) 1995-1999 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include + +#include + +/* + * ARMv4 optimised copy_user_page + * + * We flush the destination cache lines just before we write the data into the + * corresponding address. Since the Dcache is read-allocate, this removes the + * Dcache aliasing issue. The writes will be forwarded to the write buffer, + * and merged as appropriate. + * + * Note: We rely on all ARMv4 processors implementing the "invalidate D line" + * instruction. If your processor does not supply this, you have to write your + * own copy_user_page that does the right thing. + */ +void __attribute__((naked)) +v4wb_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) +{ + asm("\ + stmfd sp!, {r4, lr} @ 2\n\ + mov r2, %0 @ 1\n\ + ldmia r1!, {r3, r4, ip, lr} @ 4\n\ +1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ + stmia r0!, {r3, r4, ip, lr} @ 4\n\ + ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\ + stmia r0!, {r3, r4, ip, lr} @ 4\n\ + ldmia r1!, {r3, r4, ip, lr} @ 4\n\ + mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ + stmia r0!, {r3, r4, ip, lr} @ 4\n\ + ldmia r1!, {r3, r4, ip, lr} @ 4\n\ + subs r2, r2, #1 @ 1\n\ + stmia r0!, {r3, r4, ip, lr} @ 4\n\ + ldmneia r1!, {r3, r4, ip, lr} @ 4\n\ + bne 1b @ 1\n\ + mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB\n\ + ldmfd sp!, {r4, pc} @ 3" + : + : "I" (PAGE_SIZE / 64)); +} + +/* + * ARMv4 optimised clear_user_page + * + * Same story as above. + */ +void __attribute__((naked)) +v4wb_clear_user_page(void *kaddr, unsigned long vaddr) +{ + asm("\ + str lr, [sp, #-4]!\n\ + mov r1, %0 @ 1\n\ + mov r2, #0 @ 1\n\ + mov r3, #0 @ 1\n\ + mov ip, #0 @ 1\n\ + mov lr, #0 @ 1\n\ +1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ + stmia r0!, {r2, r3, ip, lr} @ 4\n\ + stmia r0!, {r2, r3, ip, lr} @ 4\n\ + mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ + stmia r0!, {r2, r3, ip, lr} @ 4\n\ + stmia r0!, {r2, r3, ip, lr} @ 4\n\ + subs r1, r1, #1 @ 1\n\ + bne 1b @ 1\n\ + mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB\n\ + ldr pc, [sp], #4" + : + : "I" (PAGE_SIZE / 64)); +} + +struct cpu_user_fns v4wb_user_fns __initdata = { + .cpu_clear_user_page = v4wb_clear_user_page, + .cpu_copy_user_page = v4wb_copy_user_page, +}; diff --git a/arch/arm/mm/copypage-v4wt.S b/arch/arm/mm/copypage-v4wt.S deleted file mode 100644 index e1f2af28d549..000000000000 --- a/arch/arm/mm/copypage-v4wt.S +++ /dev/null @@ -1,73 +0,0 @@ -/* - * linux/arch/arm/lib/copypage-v4.S - * - * Copyright (C) 1995-1999 Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * ASM optimised string functions - * - * This is for CPUs with a writethrough cache and 'flush ID cache' is - * the only supported cache operation. - */ -#include -#include -#include - - .text - .align 5 -/* - * ARMv4 optimised copy_user_page - * - * Since we have writethrough caches, we don't have to worry about - * dirty data in the cache. However, we do have to ensure that - * subsequent reads are up to date. - */ -ENTRY(v4wt_copy_user_page) - stmfd sp!, {r4, lr} @ 2 - mov r2, #PAGE_SZ/64 @ 1 - ldmia r1!, {r3, r4, ip, lr} @ 4 -1: stmia r0!, {r3, r4, ip, lr} @ 4 - ldmia r1!, {r3, r4, ip, lr} @ 4+1 - stmia r0!, {r3, r4, ip, lr} @ 4 - ldmia r1!, {r3, r4, ip, lr} @ 4 - stmia r0!, {r3, r4, ip, lr} @ 4 - ldmia r1!, {r3, r4, ip, lr} @ 4 - subs r2, r2, #1 @ 1 - stmia r0!, {r3, r4, ip, lr} @ 4 - ldmneia r1!, {r3, r4, ip, lr} @ 4 - bne 1b @ 1 - mcr p15, 0, r2, c7, c7, 0 @ flush ID cache - ldmfd sp!, {r4, pc} @ 3 - - .align 5 -/* - * ARMv4 optimised clear_user_page - * - * Same story as above. - */ -ENTRY(v4wt_clear_user_page) - str lr, [sp, #-4]! - mov r1, #PAGE_SZ/64 @ 1 - mov r2, #0 @ 1 - mov r3, #0 @ 1 - mov ip, #0 @ 1 - mov lr, #0 @ 1 -1: stmia r0!, {r2, r3, ip, lr} @ 4 - stmia r0!, {r2, r3, ip, lr} @ 4 - stmia r0!, {r2, r3, ip, lr} @ 4 - stmia r0!, {r2, r3, ip, lr} @ 4 - subs r1, r1, #1 @ 1 - bne 1b @ 1 - mcr p15, 0, r2, c7, c7, 0 @ flush ID cache - ldr pc, [sp], #4 - - __INITDATA - - .type v4wt_user_fns, #object -ENTRY(v4wt_user_fns) - .long v4wt_clear_user_page - .long v4wt_copy_user_page - .size v4wt_user_fns, . - v4wt_user_fns diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c new file mode 100644 index 000000000000..d8ef39503ff0 --- /dev/null +++ b/arch/arm/mm/copypage-v4wt.c @@ -0,0 +1,77 @@ +/* + * linux/arch/arm/mm/copypage-v4wt.S + * + * Copyright (C) 1995-1999 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This is for CPUs with a writethrough cache and 'flush ID cache' is + * the only supported cache operation. + */ +#include + +#include + +/* + * ARMv4 optimised copy_user_page + * + * Since we have writethrough caches, we don't have to worry about + * dirty data in the cache. However, we do have to ensure that + * subsequent reads are up to date. + */ +void __attribute__((naked)) +v4wt_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) +{ + asm("\ + stmfd sp!, {r4, lr} @ 2\n\ + mov r2, %0 @ 1\n\ + ldmia r1!, {r3, r4, ip, lr} @ 4\n\ +1: stmia r0!, {r3, r4, ip, lr} @ 4\n\ + ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\ + stmia r0!, {r3, r4, ip, lr} @ 4\n\ + ldmia r1!, {r3, r4, ip, lr} @ 4\n\ + stmia r0!, {r3, r4, ip, lr} @ 4\n\ + ldmia r1!, {r3, r4, ip, lr} @ 4\n\ + subs r2, r2, #1 @ 1\n\ + stmia r0!, {r3, r4, ip, lr} @ 4\n\ + ldmneia r1!, {r3, r4, ip, lr} @ 4\n\ + bne 1b @ 1\n\ + mcr p15, 0, r2, c7, c7, 0 @ flush ID cache\n\ + ldmfd sp!, {r4, pc} @ 3" + : + : "I" (PAGE_SIZE / 64)); +} + +/* + * ARMv4 optimised clear_user_page + * + * Same story as above. + */ +void __attribute__((naked)) +v4wt_clear_user_page(void *kaddr, unsigned long vaddr) +{ + asm("\ + str lr, [sp, #-4]!\n\ + mov r1, %0 @ 1\n\ + mov r2, #0 @ 1\n\ + mov r3, #0 @ 1\n\ + mov ip, #0 @ 1\n\ + mov lr, #0 @ 1\n\ +1: stmia r0!, {r2, r3, ip, lr} @ 4\n\ + stmia r0!, {r2, r3, ip, lr} @ 4\n\ + stmia r0!, {r2, r3, ip, lr} @ 4\n\ + stmia r0!, {r2, r3, ip, lr} @ 4\n\ + subs r1, r1, #1 @ 1\n\ + bne 1b @ 1\n\ + mcr p15, 0, r2, c7, c7, 0 @ flush ID cache\n\ + ldr pc, [sp], #4" + : + : "I" (PAGE_SIZE / 64)); +} + +struct cpu_user_fns v4wt_user_fns __initdata = { + .cpu_clear_user_page = v4wt_clear_user_page, + .cpu_copy_user_page = v4wt_copy_user_page, +}; diff --git a/arch/arm/mm/copypage-xsc3.S b/arch/arm/mm/copypage-xsc3.S deleted file mode 100644 index 9a2cb4332b4c..000000000000 --- a/arch/arm/mm/copypage-xsc3.S +++ /dev/null @@ -1,97 +0,0 @@ -/* - * linux/arch/arm/lib/copypage-xsc3.S - * - * Copyright (C) 2004 Intel Corp. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * Adapted for 3rd gen XScale core, no more mini-dcache - * Author: Matt Gilbert (matthew.m.gilbert@intel.com) - */ - -#include -#include -#include - -/* - * General note: - * We don't really want write-allocate cache behaviour for these functions - * since that will just eat through 8K of the cache. - */ - - .text - .align 5 -/* - * XSC3 optimised copy_user_page - * r0 = destination - * r1 = source - * r2 = virtual user address of ultimate destination page - * - * The source page may have some clean entries in the cache already, but we - * can safely ignore them - break_cow() will flush them out of the cache - * if we eventually end up using our copied page. - * - */ -ENTRY(xsc3_mc_copy_user_page) - stmfd sp!, {r4, r5, lr} - mov lr, #PAGE_SZ/64-1 - - pld [r1, #0] - pld [r1, #32] -1: pld [r1, #64] - pld [r1, #96] - -2: ldrd r2, [r1], #8 - mov ip, r0 - ldrd r4, [r1], #8 - mcr p15, 0, ip, c7, c6, 1 @ invalidate - strd r2, [r0], #8 - ldrd r2, [r1], #8 - strd r4, [r0], #8 - ldrd r4, [r1], #8 - strd r2, [r0], #8 - strd r4, [r0], #8 - ldrd r2, [r1], #8 - mov ip, r0 - ldrd r4, [r1], #8 - mcr p15, 0, ip, c7, c6, 1 @ invalidate - strd r2, [r0], #8 - ldrd r2, [r1], #8 - subs lr, lr, #1 - strd r4, [r0], #8 - ldrd r4, [r1], #8 - strd r2, [r0], #8 - strd r4, [r0], #8 - bgt 1b - beq 2b - - ldmfd sp!, {r4, r5, pc} - - .align 5 -/* - * XScale optimised clear_user_page - * r0 = destination - * r1 = virtual user address of ultimate destination page - */ -ENTRY(xsc3_mc_clear_user_page) - mov r1, #PAGE_SZ/32 - mov r2, #0 - mov r3, #0 -1: mcr p15, 0, r0, c7, c6, 1 @ invalidate line - strd r2, [r0], #8 - strd r2, [r0], #8 - strd r2, [r0], #8 - strd r2, [r0], #8 - subs r1, r1, #1 - bne 1b - mov pc, lr - - __INITDATA - - .type xsc3_mc_user_fns, #object -ENTRY(xsc3_mc_user_fns) - .long xsc3_mc_clear_user_page - .long xsc3_mc_copy_user_page - .size xsc3_mc_user_fns, . - xsc3_mc_user_fns diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c new file mode 100644 index 000000000000..51ed502e5777 --- /dev/null +++ b/arch/arm/mm/copypage-xsc3.c @@ -0,0 +1,102 @@ +/* + * linux/arch/arm/mm/copypage-xsc3.S + * + * Copyright (C) 2004 Intel Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Adapted for 3rd gen XScale core, no more mini-dcache + * Author: Matt Gilbert (matthew.m.gilbert@intel.com) + */ +#include + +#include + +/* + * General note: + * We don't really want write-allocate cache behaviour for these functions + * since that will just eat through 8K of the cache. + */ + +/* + * XSC3 optimised copy_user_page + * r0 = destination + * r1 = source + * r2 = virtual user address of ultimate destination page + * + * The source page may have some clean entries in the cache already, but we + * can safely ignore them - break_cow() will flush them out of the cache + * if we eventually end up using our copied page. + * + */ +void __attribute__((naked)) +xsc3_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) +{ + asm("\ + stmfd sp!, {r4, r5, lr} \n\ + mov lr, %0 \n\ + \n\ + pld [r1, #0] \n\ + pld [r1, #32] \n\ +1: pld [r1, #64] \n\ + pld [r1, #96] \n\ + \n\ +2: ldrd r2, [r1], #8 \n\ + mov ip, r0 \n\ + ldrd r4, [r1], #8 \n\ + mcr p15, 0, ip, c7, c6, 1 @ invalidate\n\ + strd r2, [r0], #8 \n\ + ldrd r2, [r1], #8 \n\ + strd r4, [r0], #8 \n\ + ldrd r4, [r1], #8 \n\ + strd r2, [r0], #8 \n\ + strd r4, [r0], #8 \n\ + ldrd r2, [r1], #8 \n\ + mov ip, r0 \n\ + ldrd r4, [r1], #8 \n\ + mcr p15, 0, ip, c7, c6, 1 @ invalidate\n\ + strd r2, [r0], #8 \n\ + ldrd r2, [r1], #8 \n\ + subs lr, lr, #1 \n\ + strd r4, [r0], #8 \n\ + ldrd r4, [r1], #8 \n\ + strd r2, [r0], #8 \n\ + strd r4, [r0], #8 \n\ + bgt 1b \n\ + beq 2b \n\ + \n\ + ldmfd sp!, {r4, r5, pc}" + : + : "I" (PAGE_SIZE / 64 - 1)); +} + +/* + * XScale optimised clear_user_page + * r0 = destination + * r1 = virtual user address of ultimate destination page + */ +void __attribute__((naked)) +xsc3_mc_clear_user_page(void *kaddr, unsigned long vaddr) +{ + asm("\ + mov r1, %0 \n\ + mov r2, #0 \n\ + mov r3, #0 \n\ +1: mcr p15, 0, r0, c7, c6, 1 @ invalidate line\n\ + strd r2, [r0], #8 \n\ + strd r2, [r0], #8 \n\ + strd r2, [r0], #8 \n\ + strd r2, [r0], #8 \n\ + subs r1, r1, #1 \n\ + bne 1b \n\ + mov pc, lr" + : + : "I" (PAGE_SIZE / 32)); +} + +struct cpu_user_fns xsc3_mc_user_fns __initdata = { + .cpu_clear_user_page = xsc3_mc_clear_user_page, + .cpu_copy_user_page = xsc3_mc_copy_user_page, +}; -- cgit v1.2.3 From 063b0a4207e43acbeff3d4b09f43e750e0212b48 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 31 Oct 2008 15:08:35 +0000 Subject: [ARM] copypage: provide our own copy_user_highpage() We used to override the copy_user_page() function. However, this is not only inefficient, it also causes additional complexity for highmem support, since we convert from a struct page to a kernel direct mapped address and back to a struct page again. Moreover, with highmem support, we end up pointlessly setting up kmap entries for pages which we're going to remap. So, push the kmapping down into the copypage implementation files where it's required. Signed-off-by: Russell King --- arch/arm/include/asm/page.h | 23 ++++++++++------ arch/arm/mm/copypage-feroceon.c | 23 ++++++++++++---- arch/arm/mm/copypage-v3.c | 23 ++++++++++++---- arch/arm/mm/copypage-v4mc.c | 21 ++++++++------ arch/arm/mm/copypage-v4wb.c | 25 ++++++++++++----- arch/arm/mm/copypage-v4wt.c | 23 ++++++++++++---- arch/arm/mm/copypage-v6.c | 61 +++++++++++++++++++++++++---------------- arch/arm/mm/copypage-xsc3.c | 24 +++++++++++----- arch/arm/mm/copypage-xscale.c | 19 +++++++------ arch/arm/mm/proc-syms.c | 2 +- 10 files changed, 161 insertions(+), 83 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index bed1c0a00368..1581b8cf8f33 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h @@ -108,30 +108,35 @@ #error Unknown user operations model #endif +struct page; + struct cpu_user_fns { void (*cpu_clear_user_page)(void *p, unsigned long user); - void (*cpu_copy_user_page)(void *to, const void *from, - unsigned long user); + void (*cpu_copy_user_highpage)(struct page *to, struct page *from, + unsigned long vaddr); }; #ifdef MULTI_USER extern struct cpu_user_fns cpu_user; -#define __cpu_clear_user_page cpu_user.cpu_clear_user_page -#define __cpu_copy_user_page cpu_user.cpu_copy_user_page +#define __cpu_clear_user_page cpu_user.cpu_clear_user_page +#define __cpu_copy_user_highpage cpu_user.cpu_copy_user_highpage #else -#define __cpu_clear_user_page __glue(_USER,_clear_user_page) -#define __cpu_copy_user_page __glue(_USER,_copy_user_page) +#define __cpu_clear_user_page __glue(_USER,_clear_user_page) +#define __cpu_copy_user_highpage __glue(_USER,_copy_user_highpage) extern void __cpu_clear_user_page(void *p, unsigned long user); -extern void __cpu_copy_user_page(void *to, const void *from, - unsigned long user); +extern void __cpu_copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr); #endif #define clear_user_page(addr,vaddr,pg) __cpu_clear_user_page(addr, vaddr) -#define copy_user_page(to,from,vaddr,pg) __cpu_copy_user_page(to, from, vaddr) + +#define __HAVE_ARCH_COPY_USER_HIGHPAGE +#define copy_user_highpage(to,from,vaddr,vma) \ + __cpu_copy_user_highpage(to, from, vaddr) #define clear_page(page) memzero((void *)(page), PAGE_SIZE) extern void copy_page(void *to, const void *from); diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c index c8347670ab00..edd71686b8df 100644 --- a/arch/arm/mm/copypage-feroceon.c +++ b/arch/arm/mm/copypage-feroceon.c @@ -7,15 +7,14 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * - * This handles copy_user_page and clear_user_page on Feroceon + * This handles copy_user_highpage and clear_user_page on Feroceon * more optimally than the generic implementations. */ #include +#include -#include - -void __attribute__((naked)) -feroceon_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) +static void __attribute__((naked)) +feroceon_copy_user_page(void *kto, const void *kfrom) { asm("\ stmfd sp!, {r4-r9, lr} \n\ @@ -68,6 +67,18 @@ feroceon_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) : "I" (PAGE_SIZE)); } +void feroceon_copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr) +{ + void *kto, *kfrom; + + kto = kmap_atomic(to, KM_USER0); + kfrom = kmap_atomic(from, KM_USER1); + feroceon_copy_user_page(kto, kfrom); + kunmap_atomic(kfrom, KM_USER1); + kunmap_atomic(kto, KM_USER0); +} + void __attribute__((naked)) feroceon_clear_user_page(void *kaddr, unsigned long vaddr) { @@ -95,6 +106,6 @@ feroceon_clear_user_page(void *kaddr, unsigned long vaddr) struct cpu_user_fns feroceon_user_fns __initdata = { .cpu_clear_user_page = feroceon_clear_user_page, - .cpu_copy_user_page = feroceon_copy_user_page, + .cpu_copy_user_highpage = feroceon_copy_user_highpage, }; diff --git a/arch/arm/mm/copypage-v3.c b/arch/arm/mm/copypage-v3.c index 184911089e6c..52df8f04d3f7 100644 --- a/arch/arm/mm/copypage-v3.c +++ b/arch/arm/mm/copypage-v3.c @@ -8,16 +8,15 @@ * published by the Free Software Foundation. */ #include - -#include +#include /* - * ARMv3 optimised copy_user_page + * ARMv3 optimised copy_user_highpage * * FIXME: do we need to handle cache stuff... */ -void __attribute__((naked)) -v3_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) +static void __attribute__((naked)) +v3_copy_user_page(void *kto, const void *kfrom) { asm("\n\ stmfd sp!, {r4, lr} @ 2\n\ @@ -38,6 +37,18 @@ v3_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) : "r" (kfrom), "r" (kto), "I" (PAGE_SIZE / 64)); } +void v3_copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr) +{ + void *kto, *kfrom; + + kto = kmap_atomic(to, KM_USER0); + kfrom = kmap_atomic(from, KM_USER1); + v3_copy_user_page(kto, kfrom); + kunmap_atomic(kfrom, KM_USER1); + kunmap_atomic(kto, KM_USER0); +} + /* * ARMv3 optimised clear_user_page * @@ -65,5 +76,5 @@ void __attribute__((naked)) v3_clear_user_page(void *kaddr, unsigned long vaddr) struct cpu_user_fns v3_user_fns __initdata = { .cpu_clear_user_page = v3_clear_user_page, - .cpu_copy_user_page = v3_copy_user_page, + .cpu_copy_user_highpage = v3_copy_user_highpage, }; diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index 8d33e2549344..a7dc838fee76 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c @@ -15,8 +15,8 @@ */ #include #include +#include -#include #include #include #include @@ -33,7 +33,7 @@ static DEFINE_SPINLOCK(minicache_lock); /* - * ARMv4 mini-dcache optimised copy_user_page + * ARMv4 mini-dcache optimised copy_user_highpage * * We flush the destination cache lines just before we write the data into the * corresponding address. Since the Dcache is read-allocate, this removes the @@ -42,7 +42,7 @@ static DEFINE_SPINLOCK(minicache_lock); * * Note: We rely on all ARMv4 processors implementing the "invalidate D line" * instruction. If your processor does not supply this, you have to write your - * own copy_user_page that does the right thing. + * own copy_user_highpage that does the right thing. */ static void __attribute__((naked)) mc_copy_user_page(void *from, void *to) @@ -68,21 +68,24 @@ mc_copy_user_page(void *from, void *to) : "r" (from), "r" (to), "I" (PAGE_SIZE / 64)); } -void v4_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) +void v4_mc_copy_user_highpage(struct page *from, struct page *to, + unsigned long vaddr) { - struct page *page = virt_to_page(kfrom); + void *kto = kmap_atomic(to, KM_USER1); - if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) - __flush_dcache_page(page_mapping(page), page); + if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) + __flush_dcache_page(page_mapping(from), from); spin_lock(&minicache_lock); - set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0); + set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); flush_tlb_kernel_page(0xffff8000); mc_copy_user_page((void *)0xffff8000, kto); spin_unlock(&minicache_lock); + + kunmap_atomic(kto, KM_USER1); } /* @@ -113,5 +116,5 @@ v4_mc_clear_user_page(void *kaddr, unsigned long vaddr) struct cpu_user_fns v4_mc_user_fns __initdata = { .cpu_clear_user_page = v4_mc_clear_user_page, - .cpu_copy_user_page = v4_mc_copy_user_page, + .cpu_copy_user_highpage = v4_mc_copy_user_highpage, }; diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c index 230210822961..186a68a794a9 100644 --- a/arch/arm/mm/copypage-v4wb.c +++ b/arch/arm/mm/copypage-v4wb.c @@ -8,11 +8,10 @@ * published by the Free Software Foundation. */ #include - -#include +#include /* - * ARMv4 optimised copy_user_page + * ARMv4 optimised copy_user_highpage * * We flush the destination cache lines just before we write the data into the * corresponding address. Since the Dcache is read-allocate, this removes the @@ -21,10 +20,10 @@ * * Note: We rely on all ARMv4 processors implementing the "invalidate D line" * instruction. If your processor does not supply this, you have to write your - * own copy_user_page that does the right thing. + * own copy_user_highpage that does the right thing. */ -void __attribute__((naked)) -v4wb_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) +static void __attribute__((naked)) +v4wb_copy_user_page(void *kto, const void *kfrom) { asm("\ stmfd sp!, {r4, lr} @ 2\n\ @@ -48,6 +47,18 @@ v4wb_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) : "I" (PAGE_SIZE / 64)); } +void v4wb_copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr) +{ + void *kto, *kfrom; + + kto = kmap_atomic(to, KM_USER0); + kfrom = kmap_atomic(from, KM_USER1); + v4wb_copy_user_page(kto, kfrom); + kunmap_atomic(kfrom, KM_USER1); + kunmap_atomic(kto, KM_USER0); +} + /* * ARMv4 optimised clear_user_page * @@ -79,5 +90,5 @@ v4wb_clear_user_page(void *kaddr, unsigned long vaddr) struct cpu_user_fns v4wb_user_fns __initdata = { .cpu_clear_user_page = v4wb_clear_user_page, - .cpu_copy_user_page = v4wb_copy_user_page, + .cpu_copy_user_highpage = v4wb_copy_user_highpage, }; diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c index d8ef39503ff0..86c2cfdbde03 100644 --- a/arch/arm/mm/copypage-v4wt.c +++ b/arch/arm/mm/copypage-v4wt.c @@ -11,18 +11,17 @@ * the only supported cache operation. */ #include - -#include +#include /* - * ARMv4 optimised copy_user_page + * ARMv4 optimised copy_user_highpage * * Since we have writethrough caches, we don't have to worry about * dirty data in the cache. However, we do have to ensure that * subsequent reads are up to date. */ -void __attribute__((naked)) -v4wt_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) +static void __attribute__((naked)) +v4wt_copy_user_page(void *kto, const void *kfrom) { asm("\ stmfd sp!, {r4, lr} @ 2\n\ @@ -44,6 +43,18 @@ v4wt_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) : "I" (PAGE_SIZE / 64)); } +void v4wt_copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr) +{ + void *kto, *kfrom; + + kto = kmap_atomic(to, KM_USER0); + kfrom = kmap_atomic(from, KM_USER1); + v4wt_copy_user_page(kto, kfrom); + kunmap_atomic(kfrom, KM_USER1); + kunmap_atomic(kto, KM_USER0); +} + /* * ARMv4 optimised clear_user_page * @@ -73,5 +84,5 @@ v4wt_clear_user_page(void *kaddr, unsigned long vaddr) struct cpu_user_fns v4wt_user_fns __initdata = { .cpu_clear_user_page = v4wt_clear_user_page, - .cpu_copy_user_page = v4wt_copy_user_page, + .cpu_copy_user_highpage = v4wt_copy_user_highpage, }; diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 0e21c0767580..2ea75d0f5048 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c @@ -10,8 +10,8 @@ #include #include #include +#include -#include #include #include #include @@ -33,9 +33,16 @@ static DEFINE_SPINLOCK(v6_lock); * Copy the user page. No aliasing to deal with so we can just * attack the kernel's existing mapping of these pages. */ -static void v6_copy_user_page_nonaliasing(void *kto, const void *kfrom, unsigned long vaddr) +static void v6_copy_user_highpage_nonaliasing(struct page *to, + struct page *from, unsigned long vaddr) { + void *kto, *kfrom; + + kfrom = kmap_atomic(from, KM_USER0); + kto = kmap_atomic(to, KM_USER1); copy_page(kto, kfrom); + kunmap_atomic(kto, KM_USER1); + kunmap_atomic(kfrom, KM_USER0); } /* @@ -48,26 +55,32 @@ static void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr) } /* - * Copy the page, taking account of the cache colour. + * Discard data in the kernel mapping for the new page. + * FIXME: needs this MCRR to be supported. */ -static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned long vaddr) +static void discard_old_kernel_data(void *kto) { - unsigned int offset = CACHE_COLOUR(vaddr); - unsigned long from, to; - struct page *page = virt_to_page(kfrom); - - if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) - __flush_dcache_page(page_mapping(page), page); - - /* - * Discard data in the kernel mapping for the new page. - * FIXME: needs this MCRR to be supported. - */ __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06" : : "r" (kto), "r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES) : "cc"); +} + +/* + * Copy the page, taking account of the cache colour. + */ +static void v6_copy_user_highpage_aliasing(struct page *to, + struct page *from, unsigned long vaddr) +{ + unsigned int offset = CACHE_COLOUR(vaddr); + unsigned long kfrom, kto; + + if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) + __flush_dcache_page(page_mapping(from), from); + + /* FIXME: not highmem safe */ + discard_old_kernel_data(page_address(to)); /* * Now copy the page using the same cache colour as the @@ -75,16 +88,16 @@ static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned lo */ spin_lock(&v6_lock); - set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(__pa(kfrom) >> PAGE_SHIFT, PAGE_KERNEL), 0); - set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(__pa(kto) >> PAGE_SHIFT, PAGE_KERNEL), 0); + set_pte_ext(TOP_PTE(from_address) + offset, pfn_pte(page_to_pfn(from), PAGE_KERNEL), 0); + set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(to), PAGE_KERNEL), 0); - from = from_address + (offset << PAGE_SHIFT); - to = to_address + (offset << PAGE_SHIFT); + kfrom = from_address + (offset << PAGE_SHIFT); + kto = to_address + (offset << PAGE_SHIFT); - flush_tlb_kernel_page(from); - flush_tlb_kernel_page(to); + flush_tlb_kernel_page(kfrom); + flush_tlb_kernel_page(kto); - copy_page((void *)to, (void *)from); + copy_page((void *)kto, (void *)kfrom); spin_unlock(&v6_lock); } @@ -124,14 +137,14 @@ static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr) struct cpu_user_fns v6_user_fns __initdata = { .cpu_clear_user_page = v6_clear_user_page_nonaliasing, - .cpu_copy_user_page = v6_copy_user_page_nonaliasing, + .cpu_copy_user_highpage = v6_copy_user_highpage_nonaliasing, }; static int __init v6_userpage_init(void) { if (cache_is_vipt_aliasing()) { cpu_user.cpu_clear_user_page = v6_clear_user_page_aliasing; - cpu_user.cpu_copy_user_page = v6_copy_user_page_aliasing; + cpu_user.cpu_copy_user_highpage = v6_copy_user_highpage_aliasing; } return 0; diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c index 51ed502e5777..caa697ccd8db 100644 --- a/arch/arm/mm/copypage-xsc3.c +++ b/arch/arm/mm/copypage-xsc3.c @@ -11,8 +11,7 @@ * Author: Matt Gilbert (matthew.m.gilbert@intel.com) */ #include - -#include +#include /* * General note: @@ -21,18 +20,17 @@ */ /* - * XSC3 optimised copy_user_page + * XSC3 optimised copy_user_highpage * r0 = destination * r1 = source - * r2 = virtual user address of ultimate destination page * * The source page may have some clean entries in the cache already, but we * can safely ignore them - break_cow() will flush them out of the cache * if we eventually end up using our copied page. * */ -void __attribute__((naked)) -xsc3_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) +static void __attribute__((naked)) +xsc3_mc_copy_user_page(void *kto, const void *kfrom) { asm("\ stmfd sp!, {r4, r5, lr} \n\ @@ -72,6 +70,18 @@ xsc3_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) : "I" (PAGE_SIZE / 64 - 1)); } +void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr) +{ + void *kto, *kfrom; + + kto = kmap_atomic(to, KM_USER0); + kfrom = kmap_atomic(from, KM_USER1); + xsc3_mc_copy_user_page(kto, kfrom); + kunmap_atomic(kfrom, KM_USER1); + kunmap_atomic(kto, KM_USER0); +} + /* * XScale optimised clear_user_page * r0 = destination @@ -98,5 +108,5 @@ xsc3_mc_clear_user_page(void *kaddr, unsigned long vaddr) struct cpu_user_fns xsc3_mc_user_fns __initdata = { .cpu_clear_user_page = xsc3_mc_clear_user_page, - .cpu_copy_user_page = xsc3_mc_copy_user_page, + .cpu_copy_user_highpage = xsc3_mc_copy_user_highpage, }; diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index bad49331bbf9..01bafafce181 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c @@ -15,8 +15,8 @@ */ #include #include +#include -#include #include #include #include @@ -35,7 +35,7 @@ static DEFINE_SPINLOCK(minicache_lock); /* - * XScale mini-dcache optimised copy_user_page + * XScale mini-dcache optimised copy_user_highpage * * We flush the destination cache lines just before we write the data into the * corresponding address. Since the Dcache is read-allocate, this removes the @@ -90,21 +90,24 @@ mc_copy_user_page(void *from, void *to) : "r" (from), "r" (to), "I" (PAGE_SIZE / 64 - 1)); } -void xscale_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) +void xscale_mc_copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr) { - struct page *page = virt_to_page(kfrom); + void *kto = kmap_atomic(to, KM_USER1); - if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) - __flush_dcache_page(page_mapping(page), page); + if (test_and_clear_bit(PG_dcache_dirty, &from->flags)) + __flush_dcache_page(page_mapping(from), from); spin_lock(&minicache_lock); - set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0); + set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(page_to_pfn(from), minicache_pgprot), 0); flush_tlb_kernel_page(COPYPAGE_MINICACHE); mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto); spin_unlock(&minicache_lock); + + kunmap_atomic(kto, KM_USER1); } /* @@ -133,5 +136,5 @@ xscale_mc_clear_user_page(void *kaddr, unsigned long vaddr) struct cpu_user_fns xscale_mc_user_fns __initdata = { .cpu_clear_user_page = xscale_mc_clear_user_page, - .cpu_copy_user_page = xscale_mc_copy_user_page, + .cpu_copy_user_highpage = xscale_mc_copy_user_highpage, }; diff --git a/arch/arm/mm/proc-syms.c b/arch/arm/mm/proc-syms.c index 2b5ba396e3a6..b9743e6416c4 100644 --- a/arch/arm/mm/proc-syms.c +++ b/arch/arm/mm/proc-syms.c @@ -34,7 +34,7 @@ EXPORT_SYMBOL(cpu_cache); #ifdef CONFIG_MMU #ifndef MULTI_USER EXPORT_SYMBOL(__cpu_clear_user_page); -EXPORT_SYMBOL(__cpu_copy_user_page); +EXPORT_SYMBOL(__cpu_copy_user_highpage); #else EXPORT_SYMBOL(cpu_user); #endif -- cgit v1.2.3 From 303c6443659bc1dc911356f5de149f48ff1d97b8 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 31 Oct 2008 16:32:19 +0000 Subject: [ARM] clearpage: provide our own clear_user_highpage() For similar reasons as copy_user_page(), we want to avoid the additional kmap_atomic if it's unnecessary. Signed-off-by: Russell King --- arch/arm/include/asm/page.h | 11 ++++++----- arch/arm/mm/copypage-feroceon.c | 20 ++++++++++---------- arch/arm/mm/copypage-v3.c | 13 +++++++------ arch/arm/mm/copypage-v4mc.c | 28 ++++++++++++++-------------- arch/arm/mm/copypage-v4wb.c | 28 ++++++++++++++-------------- arch/arm/mm/copypage-v4wt.c | 24 ++++++++++++------------ arch/arm/mm/copypage-v6.c | 23 +++++++++-------------- arch/arm/mm/copypage-xsc3.c | 25 +++++++++++++------------ arch/arm/mm/copypage-xscale.c | 26 ++++++++++++++------------ arch/arm/mm/proc-syms.c | 2 +- 10 files changed, 100 insertions(+), 100 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h index 1581b8cf8f33..77747df713b4 100644 --- a/arch/arm/include/asm/page.h +++ b/arch/arm/include/asm/page.h @@ -111,7 +111,7 @@ struct page; struct cpu_user_fns { - void (*cpu_clear_user_page)(void *p, unsigned long user); + void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr); void (*cpu_copy_user_highpage)(struct page *to, struct page *from, unsigned long vaddr); }; @@ -119,20 +119,21 @@ struct cpu_user_fns { #ifdef MULTI_USER extern struct cpu_user_fns cpu_user; -#define __cpu_clear_user_page cpu_user.cpu_clear_user_page +#define __cpu_clear_user_highpage cpu_user.cpu_clear_user_highpage #define __cpu_copy_user_highpage cpu_user.cpu_copy_user_highpage #else -#define __cpu_clear_user_page __glue(_USER,_clear_user_page) +#define __cpu_clear_user_highpage __glue(_USER,_clear_user_highpage) #define __cpu_copy_user_highpage __glue(_USER,_copy_user_highpage) -extern void __cpu_clear_user_page(void *p, unsigned long user); +extern void __cpu_clear_user_highpage(struct page *page, unsigned long vaddr); extern void __cpu_copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr); #endif -#define clear_user_page(addr,vaddr,pg) __cpu_clear_user_page(addr, vaddr) +#define clear_user_highpage(page,vaddr) \ + __cpu_clear_user_highpage(page, vaddr) #define __HAVE_ARCH_COPY_USER_HIGHPAGE #define copy_user_highpage(to,from,vaddr,vma) \ diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c index edd71686b8df..c3651b2939c7 100644 --- a/arch/arm/mm/copypage-feroceon.c +++ b/arch/arm/mm/copypage-feroceon.c @@ -79,12 +79,11 @@ void feroceon_copy_user_highpage(struct page *to, struct page *from, kunmap_atomic(kto, KM_USER0); } -void __attribute__((naked)) -feroceon_clear_user_page(void *kaddr, unsigned long vaddr) +void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr) { + void *kaddr = kmap_atomic(page, KM_USER0); asm("\ - stmfd sp!, {r4-r7, lr} \n\ - mov r1, %0 \n\ + mov r1, %1 \n\ mov r2, #0 \n\ mov r3, #0 \n\ mov r4, #0 \n\ @@ -93,19 +92,20 @@ feroceon_clear_user_page(void *kaddr, unsigned long vaddr) mov r7, #0 \n\ mov ip, #0 \n\ mov lr, #0 \n\ -1: stmia r0, {r2-r7, ip, lr} \n\ +1: stmia %0, {r2-r7, ip, lr} \n\ subs r1, r1, #1 \n\ - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ + mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ add r0, r0, #32 \n\ bne 1b \n\ - mcr p15, 0, r1, c7, c10, 4 @ drain WB\n\ - ldmfd sp!, {r4-r7, pc}" + mcr p15, 0, r1, c7, c10, 4 @ drain WB" : - : "I" (PAGE_SIZE / 32)); + : "r" (kaddr), "I" (PAGE_SIZE / 32) + : "r1", "r2", "r3", "r4", "r5", "r6", "r7", "ip", "lr"); + kunmap_atomic(kaddr, KM_USER0); } struct cpu_user_fns feroceon_user_fns __initdata = { - .cpu_clear_user_page = feroceon_clear_user_page, + .cpu_clear_user_highpage = feroceon_clear_user_highpage, .cpu_copy_user_highpage = feroceon_copy_user_highpage, }; diff --git a/arch/arm/mm/copypage-v3.c b/arch/arm/mm/copypage-v3.c index 52df8f04d3f7..13ce0baa6ba5 100644 --- a/arch/arm/mm/copypage-v3.c +++ b/arch/arm/mm/copypage-v3.c @@ -54,10 +54,10 @@ void v3_copy_user_highpage(struct page *to, struct page *from, * * FIXME: do we need to handle cache stuff... */ -void __attribute__((naked)) v3_clear_user_page(void *kaddr, unsigned long vaddr) +void v3_clear_user_highpage(struct page *page, unsigned long vaddr) { + void *kaddr = kmap_atomic(page, KM_USER0); asm("\n\ - str lr, [sp, #-4]!\n\ mov r1, %1 @ 1\n\ mov r2, #0 @ 1\n\ mov r3, #0 @ 1\n\ @@ -68,13 +68,14 @@ void __attribute__((naked)) v3_clear_user_page(void *kaddr, unsigned long vaddr) stmia %0!, {r2, r3, ip, lr} @ 4\n\ stmia %0!, {r2, r3, ip, lr} @ 4\n\ subs r1, r1, #1 @ 1\n\ - bne 1b @ 1\n\ - ldr pc, [sp], #4" + bne 1b @ 1" : - : "r" (kaddr), "I" (PAGE_SIZE / 64)); + : "r" (kaddr), "I" (PAGE_SIZE / 64) + : "r1", "r2", "r3", "ip", "lr"); + kunmap_atomic(kaddr, KM_USER0); } struct cpu_user_fns v3_user_fns __initdata = { - .cpu_clear_user_page = v3_clear_user_page, + .cpu_clear_user_highpage = v3_clear_user_highpage, .cpu_copy_user_highpage = v3_copy_user_highpage, }; diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index a7dc838fee76..a5eae503a34f 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c @@ -91,30 +91,30 @@ void v4_mc_copy_user_highpage(struct page *from, struct page *to, /* * ARMv4 optimised clear_user_page */ -void __attribute__((naked)) -v4_mc_clear_user_page(void *kaddr, unsigned long vaddr) +void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr) { - asm volatile( - "str lr, [sp, #-4]!\n\ + void *kaddr = kmap_atomic(page, KM_USER0); + asm volatile("\ mov r1, %0 @ 1\n\ mov r2, #0 @ 1\n\ mov r3, #0 @ 1\n\ mov ip, #0 @ 1\n\ mov lr, #0 @ 1\n\ -1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ - stmia r0!, {r2, r3, ip, lr} @ 4\n\ - stmia r0!, {r2, r3, ip, lr} @ 4\n\ - mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ - stmia r0!, {r2, r3, ip, lr} @ 4\n\ - stmia r0!, {r2, r3, ip, lr} @ 4\n\ +1: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ + mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ subs r1, r1, #1 @ 1\n\ - bne 1b @ 1\n\ - ldr pc, [sp], #4" + bne 1b @ 1" : - : "I" (PAGE_SIZE / 64)); + : "r" (kaddr), "I" (PAGE_SIZE / 64) + : "r1", "r2", "r3", "ip", "lr"); + kunmap_atomic(kaddr, KM_USER0); } struct cpu_user_fns v4_mc_user_fns __initdata = { - .cpu_clear_user_page = v4_mc_clear_user_page, + .cpu_clear_user_highpage = v4_mc_clear_user_highpage, .cpu_copy_user_highpage = v4_mc_copy_user_highpage, }; diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c index 186a68a794a9..9144a96037bf 100644 --- a/arch/arm/mm/copypage-v4wb.c +++ b/arch/arm/mm/copypage-v4wb.c @@ -64,31 +64,31 @@ void v4wb_copy_user_highpage(struct page *to, struct page *from, * * Same story as above. */ -void __attribute__((naked)) -v4wb_clear_user_page(void *kaddr, unsigned long vaddr) +void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr) { + void *kaddr = kmap_atomic(page, KM_USER0); asm("\ - str lr, [sp, #-4]!\n\ - mov r1, %0 @ 1\n\ + mov r1, %1 @ 1\n\ mov r2, #0 @ 1\n\ mov r3, #0 @ 1\n\ mov ip, #0 @ 1\n\ mov lr, #0 @ 1\n\ -1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ - stmia r0!, {r2, r3, ip, lr} @ 4\n\ - stmia r0!, {r2, r3, ip, lr} @ 4\n\ - mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ - stmia r0!, {r2, r3, ip, lr} @ 4\n\ - stmia r0!, {r2, r3, ip, lr} @ 4\n\ +1: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ + mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ subs r1, r1, #1 @ 1\n\ bne 1b @ 1\n\ - mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB\n\ - ldr pc, [sp], #4" + mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB" : - : "I" (PAGE_SIZE / 64)); + : "r" (kaddr), "I" (PAGE_SIZE / 64) + : "r1", "r2", "r3", "ip", "lr"); + kunmap_atomic(kaddr, KM_USER0); } struct cpu_user_fns v4wb_user_fns __initdata = { - .cpu_clear_user_page = v4wb_clear_user_page, + .cpu_clear_user_highpage = v4wb_clear_user_highpage, .cpu_copy_user_highpage = v4wb_copy_user_highpage, }; diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c index 86c2cfdbde03..b8a345d6e77e 100644 --- a/arch/arm/mm/copypage-v4wt.c +++ b/arch/arm/mm/copypage-v4wt.c @@ -60,29 +60,29 @@ void v4wt_copy_user_highpage(struct page *to, struct page *from, * * Same story as above. */ -void __attribute__((naked)) -v4wt_clear_user_page(void *kaddr, unsigned long vaddr) +void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr) { + void *kaddr = kmap_atomic(page, KM_USER0); asm("\ - str lr, [sp, #-4]!\n\ - mov r1, %0 @ 1\n\ + mov r1, %1 @ 1\n\ mov r2, #0 @ 1\n\ mov r3, #0 @ 1\n\ mov ip, #0 @ 1\n\ mov lr, #0 @ 1\n\ -1: stmia r0!, {r2, r3, ip, lr} @ 4\n\ - stmia r0!, {r2, r3, ip, lr} @ 4\n\ - stmia r0!, {r2, r3, ip, lr} @ 4\n\ - stmia r0!, {r2, r3, ip, lr} @ 4\n\ +1: stmia %0!, {r2, r3, ip, lr} @ 4\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ + stmia %0!, {r2, r3, ip, lr} @ 4\n\ subs r1, r1, #1 @ 1\n\ bne 1b @ 1\n\ - mcr p15, 0, r2, c7, c7, 0 @ flush ID cache\n\ - ldr pc, [sp], #4" + mcr p15, 0, r2, c7, c7, 0 @ flush ID cache" : - : "I" (PAGE_SIZE / 64)); + : "r" (kaddr), "I" (PAGE_SIZE / 64) + : "r1", "r2", "r3", "ip", "lr"); + kunmap_atomic(kaddr, KM_USER0); } struct cpu_user_fns v4wt_user_fns __initdata = { - .cpu_clear_user_page = v4wt_clear_user_page, + .cpu_clear_user_highpage = v4wt_clear_user_highpage, .cpu_copy_user_highpage = v4wt_copy_user_highpage, }; diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 2ea75d0f5048..4127a7bddfe5 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c @@ -49,9 +49,11 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to, * Clear the user page. No aliasing to deal with so we can just * attack the kernel's existing mapping of this page. */ -static void v6_clear_user_page_nonaliasing(void *kaddr, unsigned long vaddr) +static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr) { + void *kaddr = kmap_atomic(page, KM_USER0); clear_page(kaddr); + kunmap_atomic(kaddr, KM_USER0); } /* @@ -107,20 +109,13 @@ static void v6_copy_user_highpage_aliasing(struct page *to, * so remap the kernel page into the same cache colour as the user * page. */ -static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr) +static void v6_clear_user_highpage_aliasing(struct page *page, unsigned long vaddr) { unsigned int offset = CACHE_COLOUR(vaddr); unsigned long to = to_address + (offset << PAGE_SHIFT); - /* - * Discard data in the kernel mapping for the new page - * FIXME: needs this MCRR to be supported. - */ - __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06" - : - : "r" (kaddr), - "r" ((unsigned long)kaddr + PAGE_SIZE - L1_CACHE_BYTES) - : "cc"); + /* FIXME: not highmem safe */ + discard_old_kernel_data(page_address(page)); /* * Now clear the page using the same cache colour as @@ -128,7 +123,7 @@ static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr) */ spin_lock(&v6_lock); - set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(__pa(kaddr) >> PAGE_SHIFT, PAGE_KERNEL), 0); + set_pte_ext(TOP_PTE(to_address) + offset, pfn_pte(page_to_pfn(page), PAGE_KERNEL), 0); flush_tlb_kernel_page(to); clear_page((void *)to); @@ -136,14 +131,14 @@ static void v6_clear_user_page_aliasing(void *kaddr, unsigned long vaddr) } struct cpu_user_fns v6_user_fns __initdata = { - .cpu_clear_user_page = v6_clear_user_page_nonaliasing, + .cpu_clear_user_highpage = v6_clear_user_highpage_nonaliasing, .cpu_copy_user_highpage = v6_copy_user_highpage_nonaliasing, }; static int __init v6_userpage_init(void) { if (cache_is_vipt_aliasing()) { - cpu_user.cpu_clear_user_page = v6_clear_user_page_aliasing; + cpu_user.cpu_clear_user_highpage = v6_clear_user_highpage_aliasing; cpu_user.cpu_copy_user_highpage = v6_copy_user_highpage_aliasing; } diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c index caa697ccd8db..0e7cb325ca4c 100644 --- a/arch/arm/mm/copypage-xsc3.c +++ b/arch/arm/mm/copypage-xsc3.c @@ -87,26 +87,27 @@ void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, * r0 = destination * r1 = virtual user address of ultimate destination page */ -void __attribute__((naked)) -xsc3_mc_clear_user_page(void *kaddr, unsigned long vaddr) +void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr) { + void *kaddr = kmap_atomic(page, KM_USER0); asm("\ - mov r1, %0 \n\ + mov r1, %1 \n\ mov r2, #0 \n\ mov r3, #0 \n\ -1: mcr p15, 0, r0, c7, c6, 1 @ invalidate line\n\ - strd r2, [r0], #8 \n\ - strd r2, [r0], #8 \n\ - strd r2, [r0], #8 \n\ - strd r2, [r0], #8 \n\ +1: mcr p15, 0, %0, c7, c6, 1 @ invalidate line\n\ + strd r2, [%0], #8 \n\ + strd r2, [%0], #8 \n\ + strd r2, [%0], #8 \n\ + strd r2, [%0], #8 \n\ subs r1, r1, #1 \n\ - bne 1b \n\ - mov pc, lr" + bne 1b" : - : "I" (PAGE_SIZE / 32)); + : "r" (kaddr), "I" (PAGE_SIZE / 32) + : "r1", "r2", "r3"); + kunmap_atomic(kaddr, KM_USER0); } struct cpu_user_fns xsc3_mc_user_fns __initdata = { - .cpu_clear_user_page = xsc3_mc_clear_user_page, + .cpu_clear_user_highpage = xsc3_mc_clear_user_highpage, .cpu_copy_user_highpage = xsc3_mc_copy_user_highpage, }; diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index 01bafafce181..aa9f2ff9dce0 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c @@ -113,28 +113,30 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from, /* * XScale optimised clear_user_page */ -void __attribute__((naked)) -xscale_mc_clear_user_page(void *kaddr, unsigned long vaddr) +void +xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr) { + void *kaddr = kmap_atomic(page, KM_USER0); asm volatile( - "mov r1, %0 \n\ + "mov r1, %1 \n\ mov r2, #0 \n\ mov r3, #0 \n\ -1: mov ip, r0 \n\ - strd r2, [r0], #8 \n\ - strd r2, [r0], #8 \n\ - strd r2, [r0], #8 \n\ - strd r2, [r0], #8 \n\ +1: mov ip, %0 \n\ + strd r2, [%0], #8 \n\ + strd r2, [%0], #8 \n\ + strd r2, [%0], #8 \n\ + strd r2, [%0], #8 \n\ mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ subs r1, r1, #1 \n\ mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ - bne 1b \n\ - mov pc, lr" + bne 1b" : - : "I" (PAGE_SIZE / 32)); + : "r" (kaddr), "I" (PAGE_SIZE / 32) + : "r1", "r2", "r3", "ip"); + kunmap_atomic(kaddr, KM_USER0); } struct cpu_user_fns xscale_mc_user_fns __initdata = { - .cpu_clear_user_page = xscale_mc_clear_user_page, + .cpu_clear_user_highpage = xscale_mc_clear_user_highpage, .cpu_copy_user_highpage = xscale_mc_copy_user_highpage, }; diff --git a/arch/arm/mm/proc-syms.c b/arch/arm/mm/proc-syms.c index b9743e6416c4..4ad3bf291ad3 100644 --- a/arch/arm/mm/proc-syms.c +++ b/arch/arm/mm/proc-syms.c @@ -33,7 +33,7 @@ EXPORT_SYMBOL(cpu_cache); #ifdef CONFIG_MMU #ifndef MULTI_USER -EXPORT_SYMBOL(__cpu_clear_user_page); +EXPORT_SYMBOL(__cpu_clear_user_highpage); EXPORT_SYMBOL(__cpu_copy_user_highpage); #else EXPORT_SYMBOL(cpu_user); -- cgit v1.2.3 From 43ae286b7d4d8c4983bc263ef2e3cccc10dedb2b Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Tue, 4 Nov 2008 02:42:27 -0500 Subject: [ARM] fix a couple clear_user_highpage assembly constraints In all cases the kaddr is assigned an input register even though it is modified in the assembly code. Let's assign a new variable to the modified value and mark those inline asm with volatile otherwise they get optimized away because the output variable is otherwise not used. Also fix a few conversion errors in copypage-feroceon.c and copypage-v4mc.c. Signed-off-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/mm/copypage-feroceon.c | 12 ++++++------ arch/arm/mm/copypage-v3.c | 10 +++++----- arch/arm/mm/copypage-v4mc.c | 8 ++++---- arch/arm/mm/copypage-v4wb.c | 10 +++++----- arch/arm/mm/copypage-v4wt.c | 10 +++++----- arch/arm/mm/copypage-xsc3.c | 10 +++++----- arch/arm/mm/copypage-xscale.c | 8 ++++---- 7 files changed, 34 insertions(+), 34 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c index c3651b2939c7..c3ba6a94da0c 100644 --- a/arch/arm/mm/copypage-feroceon.c +++ b/arch/arm/mm/copypage-feroceon.c @@ -81,9 +81,9 @@ void feroceon_copy_user_highpage(struct page *to, struct page *from, void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr) { - void *kaddr = kmap_atomic(page, KM_USER0); - asm("\ - mov r1, %1 \n\ + void *ptr, *kaddr = kmap_atomic(page, KM_USER0); + asm volatile ("\ + mov r1, %2 \n\ mov r2, #0 \n\ mov r3, #0 \n\ mov r4, #0 \n\ @@ -95,11 +95,11 @@ void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr) 1: stmia %0, {r2-r7, ip, lr} \n\ subs r1, r1, #1 \n\ mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ - add r0, r0, #32 \n\ + add %0, %0, #32 \n\ bne 1b \n\ mcr p15, 0, r1, c7, c10, 4 @ drain WB" - : - : "r" (kaddr), "I" (PAGE_SIZE / 32) + : "=r" (ptr) + : "0" (kaddr), "I" (PAGE_SIZE / 32) : "r1", "r2", "r3", "r4", "r5", "r6", "r7", "ip", "lr"); kunmap_atomic(kaddr, KM_USER0); } diff --git a/arch/arm/mm/copypage-v3.c b/arch/arm/mm/copypage-v3.c index 13ce0baa6ba5..70ed96c8af8e 100644 --- a/arch/arm/mm/copypage-v3.c +++ b/arch/arm/mm/copypage-v3.c @@ -56,9 +56,9 @@ void v3_copy_user_highpage(struct page *to, struct page *from, */ void v3_clear_user_highpage(struct page *page, unsigned long vaddr) { - void *kaddr = kmap_atomic(page, KM_USER0); - asm("\n\ - mov r1, %1 @ 1\n\ + void *ptr, *kaddr = kmap_atomic(page, KM_USER0); + asm volatile("\n\ + mov r1, %2 @ 1\n\ mov r2, #0 @ 1\n\ mov r3, #0 @ 1\n\ mov ip, #0 @ 1\n\ @@ -69,8 +69,8 @@ void v3_clear_user_highpage(struct page *page, unsigned long vaddr) stmia %0!, {r2, r3, ip, lr} @ 4\n\ subs r1, r1, #1 @ 1\n\ bne 1b @ 1" - : - : "r" (kaddr), "I" (PAGE_SIZE / 64) + : "=r" (ptr) + : "0" (kaddr), "I" (PAGE_SIZE / 64) : "r1", "r2", "r3", "ip", "lr"); kunmap_atomic(kaddr, KM_USER0); } diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index a5eae503a34f..bdb5fd983b15 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c @@ -93,9 +93,9 @@ void v4_mc_copy_user_highpage(struct page *from, struct page *to, */ void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr) { - void *kaddr = kmap_atomic(page, KM_USER0); + void *ptr, *kaddr = kmap_atomic(page, KM_USER0); asm volatile("\ - mov r1, %0 @ 1\n\ + mov r1, %2 @ 1\n\ mov r2, #0 @ 1\n\ mov r3, #0 @ 1\n\ mov ip, #0 @ 1\n\ @@ -108,8 +108,8 @@ void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr) stmia %0!, {r2, r3, ip, lr} @ 4\n\ subs r1, r1, #1 @ 1\n\ bne 1b @ 1" - : - : "r" (kaddr), "I" (PAGE_SIZE / 64) + : "=r" (ptr) + : "0" (kaddr), "I" (PAGE_SIZE / 64) : "r1", "r2", "r3", "ip", "lr"); kunmap_atomic(kaddr, KM_USER0); } diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c index 9144a96037bf..3ec93dab7656 100644 --- a/arch/arm/mm/copypage-v4wb.c +++ b/arch/arm/mm/copypage-v4wb.c @@ -66,9 +66,9 @@ void v4wb_copy_user_highpage(struct page *to, struct page *from, */ void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr) { - void *kaddr = kmap_atomic(page, KM_USER0); - asm("\ - mov r1, %1 @ 1\n\ + void *ptr, *kaddr = kmap_atomic(page, KM_USER0); + asm volatile("\ + mov r1, %2 @ 1\n\ mov r2, #0 @ 1\n\ mov r3, #0 @ 1\n\ mov ip, #0 @ 1\n\ @@ -82,8 +82,8 @@ void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr) subs r1, r1, #1 @ 1\n\ bne 1b @ 1\n\ mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB" - : - : "r" (kaddr), "I" (PAGE_SIZE / 64) + : "=r" (ptr) + : "0" (kaddr), "I" (PAGE_SIZE / 64) : "r1", "r2", "r3", "ip", "lr"); kunmap_atomic(kaddr, KM_USER0); } diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c index b8a345d6e77e..0f1188efae45 100644 --- a/arch/arm/mm/copypage-v4wt.c +++ b/arch/arm/mm/copypage-v4wt.c @@ -62,9 +62,9 @@ void v4wt_copy_user_highpage(struct page *to, struct page *from, */ void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr) { - void *kaddr = kmap_atomic(page, KM_USER0); - asm("\ - mov r1, %1 @ 1\n\ + void *ptr, *kaddr = kmap_atomic(page, KM_USER0); + asm volatile("\ + mov r1, %2 @ 1\n\ mov r2, #0 @ 1\n\ mov r3, #0 @ 1\n\ mov ip, #0 @ 1\n\ @@ -76,8 +76,8 @@ void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr) subs r1, r1, #1 @ 1\n\ bne 1b @ 1\n\ mcr p15, 0, r2, c7, c7, 0 @ flush ID cache" - : - : "r" (kaddr), "I" (PAGE_SIZE / 64) + : "=r" (ptr) + : "0" (kaddr), "I" (PAGE_SIZE / 64) : "r1", "r2", "r3", "ip", "lr"); kunmap_atomic(kaddr, KM_USER0); } diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c index 0e7cb325ca4c..39a994542cad 100644 --- a/arch/arm/mm/copypage-xsc3.c +++ b/arch/arm/mm/copypage-xsc3.c @@ -89,9 +89,9 @@ void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, */ void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr) { - void *kaddr = kmap_atomic(page, KM_USER0); - asm("\ - mov r1, %1 \n\ + void *ptr, *kaddr = kmap_atomic(page, KM_USER0); + asm volatile ("\ + mov r1, %2 \n\ mov r2, #0 \n\ mov r3, #0 \n\ 1: mcr p15, 0, %0, c7, c6, 1 @ invalidate line\n\ @@ -101,8 +101,8 @@ void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr) strd r2, [%0], #8 \n\ subs r1, r1, #1 \n\ bne 1b" - : - : "r" (kaddr), "I" (PAGE_SIZE / 32) + : "=r" (ptr) + : "0" (kaddr), "I" (PAGE_SIZE / 32) : "r1", "r2", "r3"); kunmap_atomic(kaddr, KM_USER0); } diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index aa9f2ff9dce0..d18f2397ee2d 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c @@ -116,9 +116,9 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from, void xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr) { - void *kaddr = kmap_atomic(page, KM_USER0); + void *ptr, *kaddr = kmap_atomic(page, KM_USER0); asm volatile( - "mov r1, %1 \n\ + "mov r1, %2 \n\ mov r2, #0 \n\ mov r3, #0 \n\ 1: mov ip, %0 \n\ @@ -130,8 +130,8 @@ xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr) subs r1, r1, #1 \n\ mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ bne 1b" - : - : "r" (kaddr), "I" (PAGE_SIZE / 32) + : "=r" (ptr) + : "0" (kaddr), "I" (PAGE_SIZE / 32) : "r1", "r2", "r3", "ip"); kunmap_atomic(kaddr, KM_USER0); } -- cgit v1.2.3 From 4b5f32cee0cce7b9783ced5cbeabd17aa53c51fb Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Mon, 6 Oct 2008 13:24:40 -0400 Subject: [ARM] rationalize memory configuration code some more Currently there are two instances of struct meminfo: one in kernel/setup.c marked __initdata, and another in mm/init.c with permanent storage. Let's keep only the later to directly populate the permanent version from arm_add_memory(). Also move common validation tests between the MMU and non-MMU cases into arm_add_memory() to remove some duplication. Protection against overflowing the membank array is also moved in there in order to cover the kernel cmdline parsing path as well. Signed-off-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/include/asm/setup.h | 6 ++++-- arch/arm/kernel/setup.c | 37 +++++++++++++++++++++---------------- arch/arm/mm/init.c | 12 ++++++------ arch/arm/mm/mm.h | 2 +- arch/arm/mm/mmu.c | 29 +++++++++++------------------ arch/arm/mm/nommu.c | 18 ++---------------- 6 files changed, 45 insertions(+), 59 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h index a65413ba121d..f2cd18a0932b 100644 --- a/arch/arm/include/asm/setup.h +++ b/arch/arm/include/asm/setup.h @@ -209,9 +209,11 @@ struct meminfo { struct membank bank[NR_BANKS]; }; +extern struct meminfo meminfo; + #define for_each_nodebank(iter,mi,no) \ - for (iter = 0; iter < mi->nr_banks; iter++) \ - if (mi->bank[iter].node == no) + for (iter = 0; iter < (mi)->nr_banks; iter++) \ + if ((mi)->bank[iter].node == no) #define bank_pfn_start(bank) __phys_to_pfn((bank)->start) #define bank_pfn_end(bank) __phys_to_pfn((bank)->start + (bank)->size) diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 1f1eecca7f55..d21786712c88 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -59,7 +59,7 @@ static int __init fpe_setup(char *line) __setup("fpe=", fpe_setup); #endif -extern void paging_init(struct meminfo *, struct machine_desc *desc); +extern void paging_init(struct machine_desc *desc); extern void reboot_setup(char *str); extern void _text, _etext, __data_start, _edata, _end; @@ -112,7 +112,6 @@ static struct stack stacks[NR_CPUS]; char elf_platform[ELF_PLATFORM_SIZE]; EXPORT_SYMBOL(elf_platform); -static struct meminfo meminfo __initdata = { 0, }; static const char *cpu_name; static const char *machine_name; static char __initdata command_line[COMMAND_LINE_SIZE]; @@ -367,21 +366,34 @@ static struct machine_desc * __init setup_machine(unsigned int nr) return list; } -static void __init arm_add_memory(unsigned long start, unsigned long size) +static int __init arm_add_memory(unsigned long start, unsigned long size) { - struct membank *bank; + struct membank *bank = &meminfo.bank[meminfo.nr_banks]; + + if (meminfo.nr_banks >= NR_BANKS) { + printk(KERN_CRIT "NR_BANKS too low, " + "ignoring memory at %#lx\n", start); + return -EINVAL; + } /* * Ensure that start/size are aligned to a page boundary. * Size is appropriately rounded down, start is rounded up. */ size -= start & ~PAGE_MASK; - - bank = &meminfo.bank[meminfo.nr_banks++]; - bank->start = PAGE_ALIGN(start); bank->size = size & PAGE_MASK; bank->node = PHYS_TO_NID(start); + + /* + * Check whether this memory region has non-zero size or + * invalid node number. + */ + if (bank->size == 0 || bank->node >= MAX_NUMNODES) + return -EINVAL; + + meminfo.nr_banks++; + return 0; } /* @@ -539,14 +551,7 @@ __tagtable(ATAG_CORE, parse_tag_core); static int __init parse_tag_mem32(const struct tag *tag) { - if (meminfo.nr_banks >= NR_BANKS) { - printk(KERN_WARNING - "Ignoring memory bank 0x%08x size %dKB\n", - tag->u.mem.start, tag->u.mem.size / 1024); - return -EINVAL; - } - arm_add_memory(tag->u.mem.start, tag->u.mem.size); - return 0; + return arm_add_memory(tag->u.mem.start, tag->u.mem.size); } __tagtable(ATAG_MEM, parse_tag_mem32); @@ -718,7 +723,7 @@ void __init setup_arch(char **cmdline_p) memcpy(boot_command_line, from, COMMAND_LINE_SIZE); boot_command_line[COMMAND_LINE_SIZE-1] = '\0'; parse_cmdline(cmdline_p, from); - paging_init(&meminfo, mdesc); + paging_init(mdesc); request_standard_resources(&meminfo, mdesc); #ifdef CONFIG_SMP diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 82c4b4217989..b43da2479fa0 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -64,10 +64,11 @@ static int __init parse_tag_initrd2(const struct tag *tag) __tagtable(ATAG_INITRD2, parse_tag_initrd2); /* - * This is used to pass memory configuration data from paging_init - * to mem_init, and by show_mem() to skip holes in the memory map. + * This keeps memory configuration data used by a couple memory + * initialization functions, as well as show_mem() for the skipping + * of holes in the memory map. It is populated by arm_add_memory(). */ -static struct meminfo meminfo = { 0, }; +struct meminfo meminfo; void show_mem(void) { @@ -331,13 +332,12 @@ static void __init bootmem_free_node(int node, struct meminfo *mi) free_area_init_node(node, zone_size, start_pfn, zhole_size); } -void __init bootmem_init(struct meminfo *mi) +void __init bootmem_init(void) { + struct meminfo *mi = &meminfo; unsigned long memend_pfn = 0; int node, initrd_node; - memcpy(&meminfo, mi, sizeof(meminfo)); - /* * Locate which node contains the ramdisk image, if any. */ diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index 5d9f53907b4e..94367bdbb5a8 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -32,7 +32,7 @@ struct meminfo; struct pglist_data; void __init create_mapping(struct map_desc *md); -void __init bootmem_init(struct meminfo *mi); +void __init bootmem_init(void); void reserve_node_zero(struct pglist_data *pgdat); extern void _text, _stext, _etext, __data_start, _end, __init_begin, __init_end; diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 7f36c825718d..6870805c31dd 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -653,13 +653,6 @@ __early_param("vmalloc=", early_vmalloc); static int __init check_membank_valid(struct membank *mb) { - /* - * Check whether this memory region has non-zero size or - * invalid node number. - */ - if (mb->size == 0 || mb->node >= MAX_NUMNODES) - return 0; - /* * Check whether this memory region would entirely overlap * the vmalloc area. @@ -689,18 +682,18 @@ static int __init check_membank_valid(struct membank *mb) return 1; } -static void __init sanity_check_meminfo(struct meminfo *mi) +static void __init sanity_check_meminfo(void) { int i, j; - for (i = 0, j = 0; i < mi->nr_banks; i++) { - if (check_membank_valid(&mi->bank[i])) - mi->bank[j++] = mi->bank[i]; + for (i = 0, j = 0; i < meminfo.nr_banks; i++) { + if (check_membank_valid(&meminfo.bank[i])) + meminfo.bank[j++] = meminfo.bank[i]; } - mi->nr_banks = j; + meminfo.nr_banks = j; } -static inline void prepare_page_table(struct meminfo *mi) +static inline void prepare_page_table(void) { unsigned long addr; @@ -721,7 +714,7 @@ static inline void prepare_page_table(struct meminfo *mi) * Clear out all the kernel space mappings, except for the first * memory bank, up to the end of the vmalloc region. */ - for (addr = __phys_to_virt(mi->bank[0].start + mi->bank[0].size); + for (addr = __phys_to_virt(bank_phys_end(&meminfo.bank[0])); addr < VMALLOC_END; addr += PGDIR_SIZE) pmd_clear(pmd_off_k(addr)); } @@ -880,14 +873,14 @@ static void __init devicemaps_init(struct machine_desc *mdesc) * paging_init() sets up the page tables, initialises the zone memory * maps, and sets up the zero page, bad page and bad page tables. */ -void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc) +void __init paging_init(struct machine_desc *mdesc) { void *zero_page; build_mem_type_table(); - sanity_check_meminfo(mi); - prepare_page_table(mi); - bootmem_init(mi); + sanity_check_meminfo(); + prepare_page_table(); + bootmem_init(); devicemaps_init(mdesc); top_pmd = pmd_off_k(0xffff0000); diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 07b62b238979..c085f4e8248b 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -41,27 +41,13 @@ void __init reserve_node_zero(pg_data_t *pgdat) BOOTMEM_DEFAULT); } -static void __init sanity_check_meminfo(struct meminfo *mi) -{ - int i, j; - - for (i = 0, j = 0; i < mi->nr_banks; i++) { - struct membank *mb = &mi->bank[i]; - - if (mb->size != 0 && mb->node < MAX_NUMNODES) - mi->bank[j++] = mi->bank[i]; - } - mi->nr_banks = j; -} - /* * paging_init() sets up the page tables, initialises the zone memory * maps, and sets up the zero page, bad page and bad page tables. */ -void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc) +void __init paging_init(struct machine_desc *mdesc) { - sanity_check_meminfo(mi); - bootmem_init(mi); + bootmem_init(); } /* -- cgit v1.2.3 From a1bbaec0cd2a59d4bb09b72e4541a8a12e480d5d Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Tue, 2 Sep 2008 11:44:21 -0400 Subject: [ARM] split highmem into its own memory bank Doing so will greatly simplify the bootmem initialization code as each bank is therefore entirely lowmem or highmem with no crossing between those zones. Signed-off-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/mm/mmu.c | 84 +++++++++++++++++++++++++++++++++---------------------- 1 file changed, 51 insertions(+), 33 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 6870805c31dd..ab511d94d917 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -651,44 +651,62 @@ __early_param("vmalloc=", early_vmalloc); #define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve) -static int __init check_membank_valid(struct membank *mb) -{ - /* - * Check whether this memory region would entirely overlap - * the vmalloc area. - */ - if (phys_to_virt(mb->start) >= VMALLOC_MIN) { - printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx " - "(vmalloc region overlap).\n", - mb->start, mb->start + mb->size - 1); - return 0; - } - - /* - * Check whether this memory region would partially overlap - * the vmalloc area. - */ - if (phys_to_virt(mb->start + mb->size) < phys_to_virt(mb->start) || - phys_to_virt(mb->start + mb->size) > VMALLOC_MIN) { - unsigned long newsize = VMALLOC_MIN - phys_to_virt(mb->start); - - printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx " - "to -%.8lx (vmalloc region overlap).\n", - mb->start, mb->start + mb->size - 1, - mb->start + newsize - 1); - mb->size = newsize; - } - - return 1; -} - static void __init sanity_check_meminfo(void) { int i, j; for (i = 0, j = 0; i < meminfo.nr_banks; i++) { - if (check_membank_valid(&meminfo.bank[i])) - meminfo.bank[j++] = meminfo.bank[i]; + struct membank *bank = &meminfo.bank[j]; + *bank = meminfo.bank[i]; + +#ifdef CONFIG_HIGHMEM + /* + * Split those memory banks which are partially overlapping + * the vmalloc area greatly simplifying things later. + */ + if (__va(bank->start) < VMALLOC_MIN && + bank->size > VMALLOC_MIN - __va(bank->start)) { + if (meminfo.nr_banks >= NR_BANKS) { + printk(KERN_CRIT "NR_BANKS too low, " + "ignoring high memory\n"); + } else { + memmove(bank + 1, bank, + (meminfo.nr_banks - i) * sizeof(*bank)); + meminfo.nr_banks++; + i++; + bank[1].size -= VMALLOC_MIN - __va(bank->start); + bank[1].start = __pa(VMALLOC_MIN - 1) + 1; + j++; + } + bank->size = VMALLOC_MIN - __va(bank->start); + } +#else + /* + * Check whether this memory bank would entirely overlap + * the vmalloc area. + */ + if (__va(bank->start) >= VMALLOC_MIN) { + printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx " + "(vmalloc region overlap).\n", + bank->start, bank->start + bank->size - 1); + continue; + } + + /* + * Check whether this memory bank would partially overlap + * the vmalloc area. + */ + if (__va(bank->start + bank->size) > VMALLOC_MIN || + __va(bank->start + bank->size) < __va(bank->start)) { + unsigned long newsize = VMALLOC_MIN - __va(bank->start); + printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx " + "to -%.8lx (vmalloc region overlap).\n", + bank->start, bank->start + bank->size - 1, + bank->start + newsize - 1); + bank->size = newsize; + } +#endif + j++; } meminfo.nr_banks = j; } -- cgit v1.2.3 From 6db015e49c03d42247d2a985475b833635406a4f Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Wed, 17 Sep 2008 14:50:42 -0400 Subject: [ARM] mem_init() cleanups Make free_area() arguments pfn based, and return number of freed pages. This will simplify highmem initialization later. Also, codepages, datapages and initpages are actually codesize, datasize and initsize. Signed-off-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/mm/init.c | 45 ++++++++++++++++++++++++--------------------- 1 file changed, 24 insertions(+), 21 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index b43da2479fa0..ab5c9abd5c34 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -394,20 +394,22 @@ void __init bootmem_init(void) max_pfn = max_low_pfn = memend_pfn - PHYS_PFN_OFFSET; } -static inline void free_area(unsigned long addr, unsigned long end, char *s) +static inline int free_area(unsigned long pfn, unsigned long end, char *s) { - unsigned int size = (end - addr) >> 10; + unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10); - for (; addr < end; addr += PAGE_SIZE) { - struct page *page = virt_to_page(addr); + for (; pfn < end; pfn++) { + struct page *page = pfn_to_page(pfn); ClearPageReserved(page); init_page_count(page); - free_page(addr); - totalram_pages++; + __free_page(page); + pages++; } if (size && s) printk(KERN_INFO "Freeing %s memory: %dK\n", s, size); + + return pages; } static inline void @@ -478,13 +480,9 @@ static void __init free_unused_memmap_node(int node, struct meminfo *mi) */ void __init mem_init(void) { - unsigned int codepages, datapages, initpages; + unsigned int codesize, datasize, initsize; int i, node; - codepages = &_etext - &_text; - datapages = &_end - &__data_start; - initpages = &__init_end - &__init_begin; - #ifndef CONFIG_DISCONTIGMEM max_mapnr = virt_to_page(high_memory) - mem_map; #endif @@ -501,7 +499,8 @@ void __init mem_init(void) #ifdef CONFIG_SA1111 /* now that our DMA memory is actually so designated, we can free it */ - free_area(PAGE_OFFSET, (unsigned long)swapper_pg_dir, NULL); + totalram_pages += free_area(PHYS_PFN_OFFSET, + __phys_to_pfn(__pa(swapper_pg_dir)), NULL); #endif /* @@ -509,18 +508,21 @@ void __init mem_init(void) * real number of pages we have in this system */ printk(KERN_INFO "Memory:"); - num_physpages = 0; for (i = 0; i < meminfo.nr_banks; i++) { num_physpages += bank_pfn_size(&meminfo.bank[i]); printk(" %ldMB", bank_phys_size(&meminfo.bank[i]) >> 20); } - printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); + + codesize = &_etext - &_text; + datasize = &_end - &__data_start; + initsize = &__init_end - &__init_begin; + printk(KERN_NOTICE "Memory: %luKB available (%dK code, " "%dK data, %dK init)\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), - codepages >> 10, datapages >> 10, initpages >> 10); + codesize >> 10, datasize >> 10, initsize >> 10); if (PAGE_SIZE >= 16384 && num_physpages <= 128) { extern int sysctl_overcommit_memory; @@ -535,11 +537,10 @@ void __init mem_init(void) void free_initmem(void) { - if (!machine_is_integrator() && !machine_is_cintegrator()) { - free_area((unsigned long)(&__init_begin), - (unsigned long)(&__init_end), - "init"); - } + if (!machine_is_integrator() && !machine_is_cintegrator()) + totalram_pages += free_area(__phys_to_pfn(__pa(&__init_begin)), + __phys_to_pfn(__pa(&__init_end)), + "init"); } #ifdef CONFIG_BLK_DEV_INITRD @@ -549,7 +550,9 @@ static int keep_initrd; void free_initrd_mem(unsigned long start, unsigned long end) { if (!keep_initrd) - free_area(start, end, "initrd"); + totalram_pages += free_area(__phys_to_pfn(__pa(start)), + __phys_to_pfn(__pa(end)), + "initrd"); } static int __init keepinitrd_setup(char *__unused) -- cgit v1.2.3 From 9210807cb5a3f19a0e954dd401e3a2c3626d1b48 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Fri, 19 Sep 2008 10:43:06 -0400 Subject: [ARM] prevent the vmalloc cmdline argument from eating all memory Commit 8d5796d2ec6b5a4e7a52861144e63af438d6f8f7 allows for the vmalloc area to be resized from the kernel cmdline. Make sure it cannot overlap with RAM entirely. Signed-off-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/mm/mmu.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index ab511d94d917..636cf8fc70ef 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -646,6 +646,13 @@ static void __init early_vmalloc(char **arg) "vmalloc area too small, limiting to %luMB\n", vmalloc_reserve >> 20); } + + if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) { + vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M); + printk(KERN_WARNING + "vmalloc area is too big, limiting to %luMB\n", + vmalloc_reserve >> 20); + } } __early_param("vmalloc=", early_vmalloc); -- cgit v1.2.3 From 252d4c276dc0895834af48743579cf19d1fa150b Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Thu, 11 Sep 2008 11:52:02 -0400 Subject: [ARM] remove bogus #ifdef CONFIG_HIGHMEM in show_pte() The restriction on !CONFIG_HIGHMEM is unneeded since page tables are currently never allocated with highmem pages, and actually disable PTE dump whenever highmem is configured. Let's have a dynamic test to better describe the current limitation instead. Signed-off-by: Nicolas Pitre Signed-off-by: Russell King --- arch/arm/mm/fault.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 2df8d9facf57..ffd8b228a139 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -83,13 +84,14 @@ void show_pte(struct mm_struct *mm, unsigned long addr) break; } -#ifndef CONFIG_HIGHMEM /* We must not map this if we have highmem enabled */ + if (PageHighMem(pfn_to_page(pmd_val(*pmd) >> PAGE_SHIFT))) + break; + pte = pte_offset_map(pmd, addr); printk(", *pte=%08lx", pte_val(*pte)); printk(", *ppte=%08lx", pte_val(pte[-PTRS_PER_PTE])); pte_unmap(pte); -#endif } while(0); printk("\n"); -- cgit v1.2.3