diff options
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/cache-v7.S | 8 | ||||
-rw-r--r-- | arch/arm/mm/cache-v7m.S | 14 | ||||
-rw-r--r-- | arch/arm/mm/copypage-fa.c | 35 | ||||
-rw-r--r-- | arch/arm/mm/copypage-feroceon.c | 98 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v4mc.c | 19 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v4wb.c | 41 | ||||
-rw-r--r-- | arch/arm/mm/copypage-v4wt.c | 37 | ||||
-rw-r--r-- | arch/arm/mm/copypage-xsc3.c | 79 | ||||
-rw-r--r-- | arch/arm/mm/copypage-xscale.c | 79 | ||||
-rw-r--r-- | arch/arm/mm/dma-mapping-nommu.c | 14 | ||||
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 41 | ||||
-rw-r--r-- | arch/arm/mm/fault.c | 6 | ||||
-rw-r--r-- | arch/arm/mm/init.c | 28 | ||||
-rw-r--r-- | arch/arm/mm/proc-macros.S | 10 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7-bugs.c | 17 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/pv-fixup-asm.S | 16 |
17 files changed, 248 insertions, 296 deletions
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index 215df435bfb9..2149b47a0c5a 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -360,14 +360,16 @@ v7_dma_inv_range: ALT_UP(W(nop)) #endif mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line + addne r0, r0, r2 tst r1, r3 bic r1, r1, r3 mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D / U line -1: - mcr p15, 0, r0, c7, c6, 1 @ invalidate D / U line - add r0, r0, r2 cmp r0, r1 +1: + mcrlo p15, 0, r0, c7, c6, 1 @ invalidate D / U line + addlo r0, r0, r2 + cmplo r0, r1 blo 1b dsb st ret lr diff --git a/arch/arm/mm/cache-v7m.S b/arch/arm/mm/cache-v7m.S index 788486e830d3..32aa2a2aa260 100644 --- a/arch/arm/mm/cache-v7m.S +++ b/arch/arm/mm/cache-v7m.S @@ -73,9 +73,11 @@ /* * dcimvac: Invalidate data cache line by MVA to PoC */ -.macro dcimvac, rt, tmp - v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC +.irp c,,eq,ne,cs,cc,mi,pl,vs,vc,hi,ls,ge,lt,gt,le,hs,lo +.macro dcimvac\c, rt, tmp + v7m_cacheop \rt, \tmp, V7M_SCB_DCIMVAC, \c .endm +.endr /* * dccmvau: Clean data cache line by MVA to PoU @@ -369,14 +371,16 @@ v7m_dma_inv_range: tst r0, r3 bic r0, r0, r3 dccimvacne r0, r3 + addne r0, r0, r2 subne r3, r2, #1 @ restore r3, corrupted by v7m's dccimvac tst r1, r3 bic r1, r1, r3 dccimvacne r1, r3 -1: - dcimvac r0, r3 - add r0, r0, r2 cmp r0, r1 +1: + dcimvaclo r0, r3 + addlo r0, r0, r2 + cmplo r0, r1 blo 1b dsb st ret lr diff --git a/arch/arm/mm/copypage-fa.c b/arch/arm/mm/copypage-fa.c index d130a5ece5d5..bf24690ec83a 100644 --- a/arch/arm/mm/copypage-fa.c +++ b/arch/arm/mm/copypage-fa.c @@ -17,26 +17,25 @@ /* * Faraday optimised copy_user_page */ -static void __naked -fa_copy_user_page(void *kto, const void *kfrom) +static void fa_copy_user_page(void *kto, const void *kfrom) { - asm("\ - stmfd sp!, {r4, lr} @ 2\n\ - mov r2, %0 @ 1\n\ -1: ldmia r1!, {r3, r4, ip, lr} @ 4\n\ - stmia r0, {r3, r4, ip, lr} @ 4\n\ - mcr p15, 0, r0, c7, c14, 1 @ 1 clean and invalidate D line\n\ - add r0, r0, #16 @ 1\n\ - ldmia r1!, {r3, r4, ip, lr} @ 4\n\ - stmia r0, {r3, r4, ip, lr} @ 4\n\ - mcr p15, 0, r0, c7, c14, 1 @ 1 clean and invalidate D line\n\ - add r0, r0, #16 @ 1\n\ - subs r2, r2, #1 @ 1\n\ + int tmp; + + asm volatile ("\ +1: ldmia %1!, {r3, r4, ip, lr} @ 4\n\ + stmia %0, {r3, r4, ip, lr} @ 4\n\ + mcr p15, 0, %0, c7, c14, 1 @ 1 clean and invalidate D line\n\ + add %0, %0, #16 @ 1\n\ + ldmia %1!, {r3, r4, ip, lr} @ 4\n\ + stmia %0, {r3, r4, ip, lr} @ 4\n\ + mcr p15, 0, %0, c7, c14, 1 @ 1 clean and invalidate D line\n\ + add %0, %0, #16 @ 1\n\ + subs %2, %2, #1 @ 1\n\ bne 1b @ 1\n\ - mcr p15, 0, r2, c7, c10, 4 @ 1 drain WB\n\ - ldmfd sp!, {r4, pc} @ 3" - : - : "I" (PAGE_SIZE / 32)); + mcr p15, 0, %2, c7, c10, 4 @ 1 drain WB" + : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp) + : "2" (PAGE_SIZE / 32) + : "r3", "r4", "ip", "lr"); } void fa_copy_user_highpage(struct page *to, struct page *from, diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c index 49ee0c1a7209..cc819732d9b8 100644 --- a/arch/arm/mm/copypage-feroceon.c +++ b/arch/arm/mm/copypage-feroceon.c @@ -13,58 +13,56 @@ #include <linux/init.h> #include <linux/highmem.h> -static void __naked -feroceon_copy_user_page(void *kto, const void *kfrom) +static void feroceon_copy_user_page(void *kto, const void *kfrom) { - asm("\ - stmfd sp!, {r4-r9, lr} \n\ - mov ip, %2 \n\ -1: mov lr, r1 \n\ - ldmia r1!, {r2 - r9} \n\ - pld [lr, #32] \n\ - pld [lr, #64] \n\ - pld [lr, #96] \n\ - pld [lr, #128] \n\ - pld [lr, #160] \n\ - pld [lr, #192] \n\ - pld [lr, #224] \n\ - stmia r0, {r2 - r9} \n\ - ldmia r1!, {r2 - r9} \n\ - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ - add r0, r0, #32 \n\ - stmia r0, {r2 - r9} \n\ - ldmia r1!, {r2 - r9} \n\ - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ - add r0, r0, #32 \n\ - stmia r0, {r2 - r9} \n\ - ldmia r1!, {r2 - r9} \n\ - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ - add r0, r0, #32 \n\ - stmia r0, {r2 - r9} \n\ - ldmia r1!, {r2 - r9} \n\ - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ - add r0, r0, #32 \n\ - stmia r0, {r2 - r9} \n\ - ldmia r1!, {r2 - r9} \n\ - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ - add r0, r0, #32 \n\ - stmia r0, {r2 - r9} \n\ - ldmia r1!, {r2 - r9} \n\ - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ - add r0, r0, #32 \n\ - stmia r0, {r2 - r9} \n\ - ldmia r1!, {r2 - r9} \n\ - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ - add r0, r0, #32 \n\ - stmia r0, {r2 - r9} \n\ - subs ip, ip, #(32 * 8) \n\ - mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D line\n\ - add r0, r0, #32 \n\ + int tmp; + + asm volatile ("\ +1: ldmia %1!, {r2 - r7, ip, lr} \n\ + pld [%1, #0] \n\ + pld [%1, #32] \n\ + pld [%1, #64] \n\ + pld [%1, #96] \n\ + pld [%1, #128] \n\ + pld [%1, #160] \n\ + pld [%1, #192] \n\ + stmia %0, {r2 - r7, ip, lr} \n\ + ldmia %1!, {r2 - r7, ip, lr} \n\ + mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ + add %0, %0, #32 \n\ + stmia %0, {r2 - r7, ip, lr} \n\ + ldmia %1!, {r2 - r7, ip, lr} \n\ + mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ + add %0, %0, #32 \n\ + stmia %0, {r2 - r7, ip, lr} \n\ + ldmia %1!, {r2 - r7, ip, lr} \n\ + mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ + add %0, %0, #32 \n\ + stmia %0, {r2 - r7, ip, lr} \n\ + ldmia %1!, {r2 - r7, ip, lr} \n\ + mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ + add %0, %0, #32 \n\ + stmia %0, {r2 - r7, ip, lr} \n\ + ldmia %1!, {r2 - r7, ip, lr} \n\ + mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ + add %0, %0, #32 \n\ + stmia %0, {r2 - r7, ip, lr} \n\ + ldmia %1!, {r2 - r7, ip, lr} \n\ + mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ + add %0, %0, #32 \n\ + stmia %0, {r2 - r7, ip, lr} \n\ + ldmia %1!, {r2 - r7, ip, lr} \n\ + mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ + add %0, %0, #32 \n\ + stmia %0, {r2 - r7, ip, lr} \n\ + subs %2, %2, #(32 * 8) \n\ + mcr p15, 0, %0, c7, c14, 1 @ clean and invalidate D line\n\ + add %0, %0, #32 \n\ bne 1b \n\ - mcr p15, 0, ip, c7, c10, 4 @ drain WB\n\ - ldmfd sp!, {r4-r9, pc}" - : - : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE)); + mcr p15, 0, %2, c7, c10, 4 @ drain WB" + : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp) + : "2" (PAGE_SIZE) + : "r2", "r3", "r4", "r5", "r6", "r7", "ip", "lr"); } void feroceon_copy_user_highpage(struct page *to, struct page *from, diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index 0224416cba3c..b03202cddddb 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c @@ -40,12 +40,11 @@ static DEFINE_RAW_SPINLOCK(minicache_lock); * instruction. If your processor does not supply this, you have to write your * own copy_user_highpage that does the right thing. */ -static void __naked -mc_copy_user_page(void *from, void *to) +static void mc_copy_user_page(void *from, void *to) { - asm volatile( - "stmfd sp!, {r4, lr} @ 2\n\ - mov r4, %2 @ 1\n\ + int tmp; + + asm volatile ("\ ldmia %0!, {r2, r3, ip, lr} @ 4\n\ 1: mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\ stmia %1!, {r2, r3, ip, lr} @ 4\n\ @@ -55,13 +54,13 @@ mc_copy_user_page(void *from, void *to) mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\ stmia %1!, {r2, r3, ip, lr} @ 4\n\ ldmia %0!, {r2, r3, ip, lr} @ 4\n\ - subs r4, r4, #1 @ 1\n\ + subs %2, %2, #1 @ 1\n\ stmia %1!, {r2, r3, ip, lr} @ 4\n\ ldmneia %0!, {r2, r3, ip, lr} @ 4\n\ - bne 1b @ 1\n\ - ldmfd sp!, {r4, pc} @ 3" - : - : "r" (from), "r" (to), "I" (PAGE_SIZE / 64)); + bne 1b @ " + : "+&r" (from), "+&r" (to), "=&r" (tmp) + : "2" (PAGE_SIZE / 64) + : "r2", "r3", "ip", "lr"); } void v4_mc_copy_user_highpage(struct page *to, struct page *from, diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c index 067d0fdd630c..cd3e165afeed 100644 --- a/arch/arm/mm/copypage-v4wb.c +++ b/arch/arm/mm/copypage-v4wb.c @@ -22,29 +22,28 @@ * instruction. If your processor does not supply this, you have to write your * own copy_user_highpage that does the right thing. */ -static void __naked -v4wb_copy_user_page(void *kto, const void *kfrom) +static void v4wb_copy_user_page(void *kto, const void *kfrom) { - asm("\ - stmfd sp!, {r4, lr} @ 2\n\ - mov r2, %2 @ 1\n\ - ldmia r1!, {r3, r4, ip, lr} @ 4\n\ -1: mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ - stmia r0!, {r3, r4, ip, lr} @ 4\n\ - ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\ - stmia r0!, {r3, r4, ip, lr} @ 4\n\ - ldmia r1!, {r3, r4, ip, lr} @ 4\n\ - mcr p15, 0, r0, c7, c6, 1 @ 1 invalidate D line\n\ - stmia r0!, {r3, r4, ip, lr} @ 4\n\ - ldmia r1!, {r3, r4, ip, lr} @ 4\n\ - subs r2, r2, #1 @ 1\n\ - stmia r0!, {r3, r4, ip, lr} @ 4\n\ - ldmneia r1!, {r3, r4, ip, lr} @ 4\n\ + int tmp; + + asm volatile ("\ + ldmia %1!, {r3, r4, ip, lr} @ 4\n\ +1: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ + stmia %0!, {r3, r4, ip, lr} @ 4\n\ + ldmia %1!, {r3, r4, ip, lr} @ 4+1\n\ + stmia %0!, {r3, r4, ip, lr} @ 4\n\ + ldmia %1!, {r3, r4, ip, lr} @ 4\n\ + mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\ + stmia %0!, {r3, r4, ip, lr} @ 4\n\ + ldmia %1!, {r3, r4, ip, lr} @ 4\n\ + subs %2, %2, #1 @ 1\n\ + stmia %0!, {r3, r4, ip, lr} @ 4\n\ + ldmneia %1!, {r3, r4, ip, lr} @ 4\n\ bne 1b @ 1\n\ - mcr p15, 0, r1, c7, c10, 4 @ 1 drain WB\n\ - ldmfd sp!, {r4, pc} @ 3" - : - : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64)); + mcr p15, 0, %1, c7, c10, 4 @ 1 drain WB" + : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp) + : "2" (PAGE_SIZE / 64) + : "r3", "r4", "ip", "lr"); } void v4wb_copy_user_highpage(struct page *to, struct page *from, diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c index b85c5da2e510..8614572e1296 100644 --- a/arch/arm/mm/copypage-v4wt.c +++ b/arch/arm/mm/copypage-v4wt.c @@ -20,27 +20,26 @@ * dirty data in the cache. However, we do have to ensure that * subsequent reads are up to date. */ -static void __naked -v4wt_copy_user_page(void *kto, const void *kfrom) +static void v4wt_copy_user_page(void *kto, const void *kfrom) { - asm("\ - stmfd sp!, {r4, lr} @ 2\n\ - mov r2, %2 @ 1\n\ - ldmia r1!, {r3, r4, ip, lr} @ 4\n\ -1: stmia r0!, {r3, r4, ip, lr} @ 4\n\ - ldmia r1!, {r3, r4, ip, lr} @ 4+1\n\ - stmia r0!, {r3, r4, ip, lr} @ 4\n\ - ldmia r1!, {r3, r4, ip, lr} @ 4\n\ - stmia r0!, {r3, r4, ip, lr} @ 4\n\ - ldmia r1!, {r3, r4, ip, lr} @ 4\n\ - subs r2, r2, #1 @ 1\n\ - stmia r0!, {r3, r4, ip, lr} @ 4\n\ - ldmneia r1!, {r3, r4, ip, lr} @ 4\n\ + int tmp; + + asm volatile ("\ + ldmia %1!, {r3, r4, ip, lr} @ 4\n\ +1: stmia %0!, {r3, r4, ip, lr} @ 4\n\ + ldmia %1!, {r3, r4, ip, lr} @ 4+1\n\ + stmia %0!, {r3, r4, ip, lr} @ 4\n\ + ldmia %1!, {r3, r4, ip, lr} @ 4\n\ + stmia %0!, {r3, r4, ip, lr} @ 4\n\ + ldmia %1!, {r3, r4, ip, lr} @ 4\n\ + subs %2, %2, #1 @ 1\n\ + stmia %0!, {r3, r4, ip, lr} @ 4\n\ + ldmneia %1!, {r3, r4, ip, lr} @ 4\n\ bne 1b @ 1\n\ - mcr p15, 0, r2, c7, c7, 0 @ flush ID cache\n\ - ldmfd sp!, {r4, pc} @ 3" - : - : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64)); + mcr p15, 0, %2, c7, c7, 0 @ flush ID cache" + : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp) + : "2" (PAGE_SIZE / 64) + : "r3", "r4", "ip", "lr"); } void v4wt_copy_user_highpage(struct page *to, struct page *from, diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c index 03a2042aced5..a08158241ad1 100644 --- a/arch/arm/mm/copypage-xsc3.c +++ b/arch/arm/mm/copypage-xsc3.c @@ -21,53 +21,46 @@ /* * XSC3 optimised copy_user_highpage - * r0 = destination - * r1 = source * * The source page may have some clean entries in the cache already, but we * can safely ignore them - break_cow() will flush them out of the cache * if we eventually end up using our copied page. * */ -static void __naked -xsc3_mc_copy_user_page(void *kto, const void *kfrom) +static void xsc3_mc_copy_user_page(void *kto, const void *kfrom) { - asm("\ - stmfd sp!, {r4, r5, lr} \n\ - mov lr, %2 \n\ - \n\ - pld [r1, #0] \n\ - pld [r1, #32] \n\ -1: pld [r1, #64] \n\ - pld [r1, #96] \n\ + int tmp; + + asm volatile ("\ + pld [%1, #0] \n\ + pld [%1, #32] \n\ +1: pld [%1, #64] \n\ + pld [%1, #96] \n\ \n\ -2: ldrd r2, [r1], #8 \n\ - mov ip, r0 \n\ - ldrd r4, [r1], #8 \n\ - mcr p15, 0, ip, c7, c6, 1 @ invalidate\n\ - strd r2, [r0], #8 \n\ - ldrd r2, [r1], #8 \n\ - strd r4, [r0], #8 \n\ - ldrd r4, [r1], #8 \n\ - strd r2, [r0], #8 \n\ - strd r4, [r0], #8 \n\ - ldrd r2, [r1], #8 \n\ - mov ip, r0 \n\ - ldrd r4, [r1], #8 \n\ - mcr p15, 0, ip, c7, c6, 1 @ invalidate\n\ - strd r2, [r0], #8 \n\ - ldrd r2, [r1], #8 \n\ - subs lr, lr, #1 \n\ - strd r4, [r0], #8 \n\ - ldrd r4, [r1], #8 \n\ - strd r2, [r0], #8 \n\ - strd r4, [r0], #8 \n\ +2: ldrd r2, r3, [%1], #8 \n\ + ldrd r4, r5, [%1], #8 \n\ + mcr p15, 0, %0, c7, c6, 1 @ invalidate\n\ + strd r2, r3, [%0], #8 \n\ + ldrd r2, r3, [%1], #8 \n\ + strd r4, r5, [%0], #8 \n\ + ldrd r4, r5, [%1], #8 \n\ + strd r2, r3, [%0], #8 \n\ + strd r4, r5, [%0], #8 \n\ + ldrd r2, r3, [%1], #8 \n\ + ldrd r4, r5, [%1], #8 \n\ + mcr p15, 0, %0, c7, c6, 1 @ invalidate\n\ + strd r2, r3, [%0], #8 \n\ + ldrd r2, r3, [%1], #8 \n\ + subs %2, %2, #1 \n\ + strd r4, r5, [%0], #8 \n\ + ldrd r4, r5, [%1], #8 \n\ + strd r2, r3, [%0], #8 \n\ + strd r4, r5, [%0], #8 \n\ bgt 1b \n\ - beq 2b \n\ - \n\ - ldmfd sp!, {r4, r5, pc}" - : - : "r" (kto), "r" (kfrom), "I" (PAGE_SIZE / 64 - 1)); + beq 2b " + : "+&r" (kto), "+&r" (kfrom), "=&r" (tmp) + : "2" (PAGE_SIZE / 64 - 1) + : "r2", "r3", "r4", "r5"); } void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, @@ -85,8 +78,6 @@ void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, /* * XScale optimised clear_user_page - * r0 = destination - * r1 = virtual user address of ultimate destination page */ void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr) { @@ -96,10 +87,10 @@ void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr) mov r2, #0 \n\ mov r3, #0 \n\ 1: mcr p15, 0, %0, c7, c6, 1 @ invalidate line\n\ - strd r2, [%0], #8 \n\ - strd r2, [%0], #8 \n\ - strd r2, [%0], #8 \n\ - strd r2, [%0], #8 \n\ + strd r2, r3, [%0], #8 \n\ + strd r2, r3, [%0], #8 \n\ + strd r2, r3, [%0], #8 \n\ + strd r2, r3, [%0], #8 \n\ subs r1, r1, #1 \n\ bne 1b" : "=r" (ptr) diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index 97972379f4d6..63b921936754 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c @@ -36,52 +36,51 @@ static DEFINE_RAW_SPINLOCK(minicache_lock); * Dcache aliasing issue. The writes will be forwarded to the write buffer, * and merged as appropriate. */ -static void __naked -mc_copy_user_page(void *from, void *to) +static void mc_copy_user_page(void *from, void *to) { + int tmp; + /* * Strangely enough, best performance is achieved * when prefetching destination as well. (NP) */ - asm volatile( - "stmfd sp!, {r4, r5, lr} \n\ - mov lr, %2 \n\ - pld [r0, #0] \n\ - pld [r0, #32] \n\ - pld [r1, #0] \n\ - pld [r1, #32] \n\ -1: pld [r0, #64] \n\ - pld [r0, #96] \n\ - pld [r1, #64] \n\ - pld [r1, #96] \n\ -2: ldrd r2, [r0], #8 \n\ - ldrd r4, [r0], #8 \n\ - mov ip, r1 \n\ - strd r2, [r1], #8 \n\ - ldrd r2, [r0], #8 \n\ - strd r4, [r1], #8 \n\ - ldrd r4, [r0], #8 \n\ - strd r2, [r1], #8 \n\ - strd r4, [r1], #8 \n\ + asm volatile ("\ + pld [%0, #0] \n\ + pld [%0, #32] \n\ + pld [%1, #0] \n\ + pld [%1, #32] \n\ +1: pld [%0, #64] \n\ + pld [%0, #96] \n\ + pld [%1, #64] \n\ + pld [%1, #96] \n\ +2: ldrd r2, r3, [%0], #8 \n\ + ldrd r4, r5, [%0], #8 \n\ + mov ip, %1 \n\ + strd r2, r3, [%1], #8 \n\ + ldrd r2, r3, [%0], #8 \n\ + strd r4, r5, [%1], #8 \n\ + ldrd r4, r5, [%0], #8 \n\ + strd r2, r3, [%1], #8 \n\ + strd r4, r5, [%1], #8 \n\ mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ - ldrd r2, [r0], #8 \n\ + ldrd r2, r3, [%0], #8 \n\ mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ - ldrd r4, [r0], #8 \n\ - mov ip, r1 \n\ - strd r2, [r1], #8 \n\ - ldrd r2, [r0], #8 \n\ - strd r4, [r1], #8 \n\ - ldrd r4, [r0], #8 \n\ - strd r2, [r1], #8 \n\ - strd r4, [r1], #8 \n\ + ldrd r4, r5, [%0], #8 \n\ + mov ip, %1 \n\ + strd r2, r3, [%1], #8 \n\ + ldrd r2, r3, [%0], #8 \n\ + strd r4, r5, [%1], #8 \n\ + ldrd r4, r5, [%0], #8 \n\ + strd r2, r3, [%1], #8 \n\ + strd r4, r5, [%1], #8 \n\ mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ - subs lr, lr, #1 \n\ + subs %2, %2, #1 \n\ mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ bgt 1b \n\ - beq 2b \n\ - ldmfd sp!, {r4, r5, pc} " - : - : "r" (from), "r" (to), "I" (PAGE_SIZE / 64 - 1)); + beq 2b " + : "+&r" (from), "+&r" (to), "=&r" (tmp) + : "2" (PAGE_SIZE / 64 - 1) + : "r2", "r3", "r4", "r5", "ip"); } void xscale_mc_copy_user_highpage(struct page *to, struct page *from, @@ -115,10 +114,10 @@ xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr) mov r2, #0 \n\ mov r3, #0 \n\ 1: mov ip, %0 \n\ - strd r2, [%0], #8 \n\ - strd r2, [%0], #8 \n\ - strd r2, [%0], #8 \n\ - strd r2, [%0], #8 \n\ + strd r2, r3, [%0], #8 \n\ + strd r2, r3, [%0], #8 \n\ + strd r2, r3, [%0], #8 \n\ + strd r2, r3, [%0], #8 \n\ mcr p15, 0, ip, c7, c10, 1 @ clean D line\n\ subs r1, r1, #1 \n\ mcr p15, 0, ip, c7, c6, 1 @ invalidate D line\n\ diff --git a/arch/arm/mm/dma-mapping-nommu.c b/arch/arm/mm/dma-mapping-nommu.c index 712416ecd8e6..f304b10e23a4 100644 --- a/arch/arm/mm/dma-mapping-nommu.c +++ b/arch/arm/mm/dma-mapping-nommu.c @@ -22,7 +22,7 @@ #include "dma.h" /* - * dma_direct_ops is used if + * The generic direct mapping code is used if * - MMU/MPU is off * - cpu is v7m w/o cache support * - device is coherent @@ -209,16 +209,9 @@ const struct dma_map_ops arm_nommu_dma_ops = { }; EXPORT_SYMBOL(arm_nommu_dma_ops); -static const struct dma_map_ops *arm_nommu_get_dma_map_ops(bool coherent) -{ - return coherent ? &dma_direct_ops : &arm_nommu_dma_ops; -} - void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, const struct iommu_ops *iommu, bool coherent) { - const struct dma_map_ops *dma_ops; - if (IS_ENABLED(CONFIG_CPU_V7M)) { /* * Cache support for v7m is optional, so can be treated as @@ -234,7 +227,6 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, dev->archdata.dma_coherent = (get_cr() & CR_M) ? coherent : true; } - dma_ops = arm_nommu_get_dma_map_ops(dev->archdata.dma_coherent); - - set_dma_ops(dev, dma_ops); + if (!dev->archdata.dma_coherent) + set_dma_ops(dev, &arm_nommu_dma_ops); } diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 661fe48ab78d..f1e2922e447c 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -179,11 +179,6 @@ static void arm_dma_sync_single_for_device(struct device *dev, __dma_page_cpu_to_dev(page, offset, size, dir); } -static int arm_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return dma_addr == ARM_MAPPING_ERROR; -} - const struct dma_map_ops arm_dma_ops = { .alloc = arm_dma_alloc, .free = arm_dma_free, @@ -197,7 +192,6 @@ const struct dma_map_ops arm_dma_ops = { .sync_single_for_device = arm_dma_sync_single_for_device, .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, .sync_sg_for_device = arm_dma_sync_sg_for_device, - .mapping_error = arm_dma_mapping_error, .dma_supported = arm_dma_supported, }; EXPORT_SYMBOL(arm_dma_ops); @@ -217,7 +211,6 @@ const struct dma_map_ops arm_coherent_dma_ops = { .get_sgtable = arm_dma_get_sgtable, .map_page = arm_coherent_dma_map_page, .map_sg = arm_dma_map_sg, - .mapping_error = arm_dma_mapping_error, .dma_supported = arm_dma_supported, }; EXPORT_SYMBOL(arm_coherent_dma_ops); @@ -774,7 +767,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp &= ~(__GFP_COMP); args.gfp = gfp; - *handle = ARM_MAPPING_ERROR; + *handle = DMA_MAPPING_ERROR; allowblock = gfpflags_allow_blocking(gfp); cma = allowblock ? dev_get_cma_area(dev) : false; @@ -829,7 +822,7 @@ static int __arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs) { - int ret; + int ret = -ENXIO; unsigned long nr_vma_pages = vma_pages(vma); unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned long pfn = dma_to_pfn(dev, dma_addr); @@ -1217,7 +1210,7 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, if (i == mapping->nr_bitmaps) { if (extend_iommu_mapping(mapping)) { spin_unlock_irqrestore(&mapping->lock, flags); - return ARM_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } start = bitmap_find_next_zero_area(mapping->bitmaps[i], @@ -1225,7 +1218,7 @@ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, if (start > mapping->bits) { spin_unlock_irqrestore(&mapping->lock, flags); - return ARM_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } bitmap_set(mapping->bitmaps[i], start, count); @@ -1409,7 +1402,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size, int i; dma_addr = __alloc_iova(mapping, size); - if (dma_addr == ARM_MAPPING_ERROR) + if (dma_addr == DMA_MAPPING_ERROR) return dma_addr; iova = dma_addr; @@ -1436,7 +1429,7 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size, fail: iommu_unmap(mapping->domain, dma_addr, iova-dma_addr); __free_iova(mapping, dma_addr, size); - return ARM_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) @@ -1497,7 +1490,7 @@ static void *__iommu_alloc_simple(struct device *dev, size_t size, gfp_t gfp, return NULL; *handle = __iommu_create_mapping(dev, &page, size, attrs); - if (*handle == ARM_MAPPING_ERROR) + if (*handle == DMA_MAPPING_ERROR) goto err_mapping; return addr; @@ -1525,7 +1518,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size, struct page **pages; void *addr = NULL; - *handle = ARM_MAPPING_ERROR; + *handle = DMA_MAPPING_ERROR; size = PAGE_ALIGN(size); if (coherent_flag == COHERENT || !gfpflags_allow_blocking(gfp)) @@ -1546,7 +1539,7 @@ static void *__arm_iommu_alloc_attrs(struct device *dev, size_t size, return NULL; *handle = __iommu_create_mapping(dev, pages, size, attrs); - if (*handle == ARM_MAPPING_ERROR) + if (*handle == DMA_MAPPING_ERROR) goto err_buffer; if (attrs & DMA_ATTR_NO_KERNEL_MAPPING) @@ -1696,10 +1689,10 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, int prot; size = PAGE_ALIGN(size); - *handle = ARM_MAPPING_ERROR; + *handle = DMA_MAPPING_ERROR; iova_base = iova = __alloc_iova(mapping, size); - if (iova == ARM_MAPPING_ERROR) + if (iova == DMA_MAPPING_ERROR) return -ENOMEM; for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { @@ -1739,7 +1732,7 @@ static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, for (i = 1; i < nents; i++) { s = sg_next(s); - s->dma_address = ARM_MAPPING_ERROR; + s->dma_address = DMA_MAPPING_ERROR; s->dma_length = 0; if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { @@ -1914,7 +1907,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p int ret, prot, len = PAGE_ALIGN(size + offset); dma_addr = __alloc_iova(mapping, len); - if (dma_addr == ARM_MAPPING_ERROR) + if (dma_addr == DMA_MAPPING_ERROR) return dma_addr; prot = __dma_info_to_prot(dir, attrs); @@ -1926,7 +1919,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p return dma_addr + offset; fail: __free_iova(mapping, dma_addr, len); - return ARM_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } /** @@ -2020,7 +2013,7 @@ static dma_addr_t arm_iommu_map_resource(struct device *dev, size_t len = PAGE_ALIGN(size + offset); dma_addr = __alloc_iova(mapping, len); - if (dma_addr == ARM_MAPPING_ERROR) + if (dma_addr == DMA_MAPPING_ERROR) return dma_addr; prot = __dma_info_to_prot(dir, attrs) | IOMMU_MMIO; @@ -2032,7 +2025,7 @@ static dma_addr_t arm_iommu_map_resource(struct device *dev, return dma_addr + offset; fail: __free_iova(mapping, dma_addr, len); - return ARM_MAPPING_ERROR; + return DMA_MAPPING_ERROR; } /** @@ -2105,7 +2098,6 @@ const struct dma_map_ops iommu_ops = { .map_resource = arm_iommu_map_resource, .unmap_resource = arm_iommu_unmap_resource, - .mapping_error = arm_dma_mapping_error, .dma_supported = arm_dma_supported, }; @@ -2124,7 +2116,6 @@ const struct dma_map_ops iommu_coherent_ops = { .map_resource = arm_iommu_map_resource, .unmap_resource = arm_iommu_unmap_resource, - .mapping_error = arm_dma_mapping_error, .dma_supported = arm_dma_supported, }; diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index f4ea4c62c613..58f69fa07df9 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -173,6 +173,12 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr, show_regs(regs); } #endif +#ifndef CONFIG_KUSER_HELPERS + if ((sig == SIGSEGV) && ((addr & PAGE_MASK) == 0xffff0000)) + printk_ratelimited(KERN_DEBUG + "%s: CONFIG_KUSER_HELPERS disabled at 0x%08lx\n", + tsk->comm, addr); +#endif tsk->thread.address = addr; tsk->thread.error_code = fsr; diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 32e4845af2b6..478ea8b7db87 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -50,26 +50,7 @@ unsigned long __init __clear_cr(unsigned long mask) } #endif -static phys_addr_t phys_initrd_start __initdata = 0; -static unsigned long phys_initrd_size __initdata = 0; - -static int __init early_initrd(char *p) -{ - phys_addr_t start; - unsigned long size; - char *endp; - - start = memparse(p, &endp); - if (*endp == ',') { - size = memparse(endp + 1, NULL); - - phys_initrd_start = start; - phys_initrd_size = size; - } - return 0; -} -early_param("initrd", early_initrd); - +#ifdef CONFIG_BLK_DEV_INITRD static int __init parse_tag_initrd(const struct tag *tag) { pr_warn("ATAG_INITRD is deprecated; " @@ -89,6 +70,7 @@ static int __init parse_tag_initrd2(const struct tag *tag) } __tagtable(ATAG_INITRD2, parse_tag_initrd2); +#endif static void __init find_limits(unsigned long *min, unsigned long *max_low, unsigned long *max_high) @@ -236,12 +218,6 @@ static void __init arm_initrd_init(void) phys_addr_t start; unsigned long size; - /* FDT scan will populate initrd_start */ - if (initrd_start && !phys_initrd_size) { - phys_initrd_start = __virt_to_phys(initrd_start); - phys_initrd_size = initrd_end - initrd_start; - } - initrd_start = initrd_end = 0; if (!phys_initrd_size) diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index 81d0efb055c6..5461d589a1e2 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S @@ -274,6 +274,13 @@ .endm .macro define_processor_functions name:req, dabort:req, pabort:req, nommu=0, suspend=0, bugs=0 +/* + * If we are building for big.Little with branch predictor hardening, + * we need the processor function tables to remain available after boot. + */ +#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) + .section ".rodata" +#endif .type \name\()_processor_functions, #object .align 2 ENTRY(\name\()_processor_functions) @@ -309,6 +316,9 @@ ENTRY(\name\()_processor_functions) .endif .size \name\()_processor_functions, . - \name\()_processor_functions +#if defined(CONFIG_BIG_LITTLE) && defined(CONFIG_HARDEN_BRANCH_PREDICTOR) + .previous +#endif .endm .macro define_cache_functions name:req diff --git a/arch/arm/mm/proc-v7-bugs.c b/arch/arm/mm/proc-v7-bugs.c index 5544b82a2e7a..9a07916af8dd 100644 --- a/arch/arm/mm/proc-v7-bugs.c +++ b/arch/arm/mm/proc-v7-bugs.c @@ -52,8 +52,6 @@ static void cpu_v7_spectre_init(void) case ARM_CPU_PART_CORTEX_A17: case ARM_CPU_PART_CORTEX_A73: case ARM_CPU_PART_CORTEX_A75: - if (processor.switch_mm != cpu_v7_bpiall_switch_mm) - goto bl_error; per_cpu(harden_branch_predictor_fn, cpu) = harden_branch_predictor_bpiall; spectre_v2_method = "BPIALL"; @@ -61,8 +59,6 @@ static void cpu_v7_spectre_init(void) case ARM_CPU_PART_CORTEX_A15: case ARM_CPU_PART_BRAHMA_B15: - if (processor.switch_mm != cpu_v7_iciallu_switch_mm) - goto bl_error; per_cpu(harden_branch_predictor_fn, cpu) = harden_branch_predictor_iciallu; spectre_v2_method = "ICIALLU"; @@ -88,11 +84,9 @@ static void cpu_v7_spectre_init(void) ARM_SMCCC_ARCH_WORKAROUND_1, &res); if ((int)res.a0 != 0) break; - if (processor.switch_mm != cpu_v7_hvc_switch_mm && cpu) - goto bl_error; per_cpu(harden_branch_predictor_fn, cpu) = call_hvc_arch_workaround_1; - processor.switch_mm = cpu_v7_hvc_switch_mm; + cpu_do_switch_mm = cpu_v7_hvc_switch_mm; spectre_v2_method = "hypervisor"; break; @@ -101,11 +95,9 @@ static void cpu_v7_spectre_init(void) ARM_SMCCC_ARCH_WORKAROUND_1, &res); if ((int)res.a0 != 0) break; - if (processor.switch_mm != cpu_v7_smc_switch_mm && cpu) - goto bl_error; per_cpu(harden_branch_predictor_fn, cpu) = call_smc_arch_workaround_1; - processor.switch_mm = cpu_v7_smc_switch_mm; + cpu_do_switch_mm = cpu_v7_smc_switch_mm; spectre_v2_method = "firmware"; break; @@ -119,11 +111,6 @@ static void cpu_v7_spectre_init(void) if (spectre_v2_method) pr_info("CPU%u: Spectre v2: using %s workaround\n", smp_processor_id(), spectre_v2_method); - return; - -bl_error: - pr_err("CPU%u: Spectre v2: incorrect context switching function, system vulnerable\n", - cpu); } #else static void cpu_v7_spectre_init(void) diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 6fe52819e014..339eb17c9808 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -112,7 +112,7 @@ ENTRY(cpu_v7_hvc_switch_mm) hvc #0 ldmfd sp!, {r0 - r3} b cpu_v7_switch_mm -ENDPROC(cpu_v7_smc_switch_mm) +ENDPROC(cpu_v7_hvc_switch_mm) #endif ENTRY(cpu_v7_iciallu_switch_mm) mov r3, #0 diff --git a/arch/arm/mm/pv-fixup-asm.S b/arch/arm/mm/pv-fixup-asm.S index 1867f3e43016..fd2ff9034d17 100644 --- a/arch/arm/mm/pv-fixup-asm.S +++ b/arch/arm/mm/pv-fixup-asm.S @@ -33,10 +33,10 @@ ENTRY(lpae_pgtables_remap_asm) add r7, r2, #0x1000 add r6, r7, r6, lsr #SECTION_SHIFT - L2_ORDER add r7, r7, #PAGE_OFFSET >> (SECTION_SHIFT - L2_ORDER) -1: ldrd r4, [r7] +1: ldrd r4, r5, [r7] adds r4, r4, r0 adc r5, r5, r1 - strd r4, [r7], #1 << L2_ORDER + strd r4, r5, [r7], #1 << L2_ORDER cmp r7, r6 bls 1b @@ -44,22 +44,22 @@ ENTRY(lpae_pgtables_remap_asm) add r7, r2, #0x1000 add r7, r7, r3, lsr #SECTION_SHIFT - L2_ORDER bic r7, r7, #(1 << L2_ORDER) - 1 - ldrd r4, [r7] + ldrd r4, r5, [r7] adds r4, r4, r0 adc r5, r5, r1 - strd r4, [r7], #1 << L2_ORDER - ldrd r4, [r7] + strd r4, r5, [r7], #1 << L2_ORDER + ldrd r4, r5, [r7] adds r4, r4, r0 adc r5, r5, r1 - strd r4, [r7] + strd r4, r5, [r7] /* Update level 1 entries */ mov r6, #4 mov r7, r2 -2: ldrd r4, [r7] +2: ldrd r4, r5, [r7] adds r4, r4, r0 adc r5, r5, r1 - strd r4, [r7], #1 << L1_ORDER + strd r4, r5, [r7], #1 << L1_ORDER subs r6, r6, #1 bne 2b |