summaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/cache-v6.S8
-rw-r--r--arch/arm/mm/copypage-v4mc.c3
-rw-r--r--arch/arm/mm/copypage-v4wb.c3
-rw-r--r--arch/arm/mm/copypage-v4wt.c3
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/arm/mm/idmap.c4
-rw-r--r--arch/arm/mm/init.c69
-rw-r--r--arch/arm/mm/pmsa-v8.c4
-rw-r--r--arch/arm/mm/proc-v7m.S7
9 files changed, 25 insertions, 78 deletions
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S
index 24659952c278..be68d62566c7 100644
--- a/arch/arm/mm/cache-v6.S
+++ b/arch/arm/mm/cache-v6.S
@@ -215,8 +215,8 @@ v6_dma_inv_range:
#endif
tst r1, #D_CACHE_LINE_SIZE - 1
#ifdef CONFIG_DMA_CACHE_RWFO
- ldrneb r2, [r1, #-1] @ read for ownership
- strneb r2, [r1, #-1] @ write for ownership
+ ldrbne r2, [r1, #-1] @ read for ownership
+ strbne r2, [r1, #-1] @ write for ownership
#endif
bic r1, r1, #D_CACHE_LINE_SIZE - 1
#ifdef HARVARD_CACHE
@@ -284,8 +284,8 @@ ENTRY(v6_dma_flush_range)
add r0, r0, #D_CACHE_LINE_SIZE
cmp r0, r1
#ifdef CONFIG_DMA_CACHE_RWFO
- ldrlob r2, [r0] @ read for ownership
- strlob r2, [r0] @ write for ownership
+ ldrblo r2, [r0] @ read for ownership
+ strblo r2, [r0] @ write for ownership
#endif
blo 1b
mov r0, #0
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c
index b03202cddddb..f74cdce6d4da 100644
--- a/arch/arm/mm/copypage-v4mc.c
+++ b/arch/arm/mm/copypage-v4mc.c
@@ -45,6 +45,7 @@ static void mc_copy_user_page(void *from, void *to)
int tmp;
asm volatile ("\
+ .syntax unified\n\
ldmia %0!, {r2, r3, ip, lr} @ 4\n\
1: mcr p15, 0, %1, c7, c6, 1 @ 1 invalidate D line\n\
stmia %1!, {r2, r3, ip, lr} @ 4\n\
@@ -56,7 +57,7 @@ static void mc_copy_user_page(void *from, void *to)
ldmia %0!, {r2, r3, ip, lr} @ 4\n\
subs %2, %2, #1 @ 1\n\
stmia %1!, {r2, r3, ip, lr} @ 4\n\
- ldmneia %0!, {r2, r3, ip, lr} @ 4\n\
+ ldmiane %0!, {r2, r3, ip, lr} @ 4\n\
bne 1b @ "
: "+&r" (from), "+&r" (to), "=&r" (tmp)
: "2" (PAGE_SIZE / 64)
diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c
index cd3e165afeed..6d336740aae4 100644
--- a/arch/arm/mm/copypage-v4wb.c
+++ b/arch/arm/mm/copypage-v4wb.c
@@ -27,6 +27,7 @@ static void v4wb_copy_user_page(void *kto, const void *kfrom)
int tmp;
asm volatile ("\
+ .syntax unified\n\
ldmia %1!, {r3, r4, ip, lr} @ 4\n\
1: mcr p15, 0, %0, c7, c6, 1 @ 1 invalidate D line\n\
stmia %0!, {r3, r4, ip, lr} @ 4\n\
@@ -38,7 +39,7 @@ static void v4wb_copy_user_page(void *kto, const void *kfrom)
ldmia %1!, {r3, r4, ip, lr} @ 4\n\
subs %2, %2, #1 @ 1\n\
stmia %0!, {r3, r4, ip, lr} @ 4\n\
- ldmneia %1!, {r3, r4, ip, lr} @ 4\n\
+ ldmiane %1!, {r3, r4, ip, lr} @ 4\n\
bne 1b @ 1\n\
mcr p15, 0, %1, c7, c10, 4 @ 1 drain WB"
: "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c
index 8614572e1296..3851bb396442 100644
--- a/arch/arm/mm/copypage-v4wt.c
+++ b/arch/arm/mm/copypage-v4wt.c
@@ -25,6 +25,7 @@ static void v4wt_copy_user_page(void *kto, const void *kfrom)
int tmp;
asm volatile ("\
+ .syntax unified\n\
ldmia %1!, {r3, r4, ip, lr} @ 4\n\
1: stmia %0!, {r3, r4, ip, lr} @ 4\n\
ldmia %1!, {r3, r4, ip, lr} @ 4+1\n\
@@ -34,7 +35,7 @@ static void v4wt_copy_user_page(void *kto, const void *kfrom)
ldmia %1!, {r3, r4, ip, lr} @ 4\n\
subs %2, %2, #1 @ 1\n\
stmia %0!, {r3, r4, ip, lr} @ 4\n\
- ldmneia %1!, {r3, r4, ip, lr} @ 4\n\
+ ldmiane %1!, {r3, r4, ip, lr} @ 4\n\
bne 1b @ 1\n\
mcr p15, 0, %2, c7, c7, 0 @ flush ID cache"
: "+&r" (kto), "+&r" (kfrom), "=&r" (tmp)
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index c6aab9c36ff1..43f46aa7ef33 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -2279,7 +2279,7 @@ EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
* @dev: valid struct device pointer
*
* Detaches the provided device from a previously attached map.
- * This voids the dma operations (dma_map_ops pointer)
+ * This overwrites the dma_ops pointer with appropriate non-IOMMU ops.
*/
void arm_iommu_detach_device(struct device *dev)
{
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
index 1d1edd064199..a033f6134a64 100644
--- a/arch/arm/mm/idmap.c
+++ b/arch/arm/mm/idmap.c
@@ -6,6 +6,7 @@
#include <asm/cputype.h>
#include <asm/idmap.h>
+#include <asm/hwcap.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/sections.h>
@@ -110,7 +111,8 @@ static int __init init_static_idmap(void)
__idmap_text_end, 0);
/* Flush L1 for the hardware to see this page table content */
- flush_cache_louis();
+ if (!(elf_hwcap & HWCAP_LPAE))
+ flush_cache_louis();
return 0;
}
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 15dddfe43319..c2daabbe0af0 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -282,15 +282,12 @@ void __init arm_memblock_init(const struct machine_desc *mdesc)
void __init bootmem_init(void)
{
- unsigned long min, max_low, max_high;
-
memblock_allow_resize();
- max_low = max_high = 0;
- find_limits(&min, &max_low, &max_high);
+ find_limits(&min_low_pfn, &max_low_pfn, &max_pfn);
- early_memtest((phys_addr_t)min << PAGE_SHIFT,
- (phys_addr_t)max_low << PAGE_SHIFT);
+ early_memtest((phys_addr_t)min_low_pfn << PAGE_SHIFT,
+ (phys_addr_t)max_low_pfn << PAGE_SHIFT);
/*
* Sparsemem tries to allocate bootmem in memory_present(),
@@ -308,16 +305,7 @@ void __init bootmem_init(void)
* the sparse mem_map arrays initialized by sparse_init()
* for memmap_init_zone(), otherwise all PFNs are invalid.
*/
- zone_sizes_init(min, max_low, max_high);
-
- /*
- * This doesn't seem to be used by the Linux memory manager any
- * more, but is used by ll_rw_block. If we can get rid of it, we
- * also get rid of some of the stuff above as well.
- */
- min_low_pfn = min;
- max_low_pfn = max_low;
- max_pfn = max_high;
+ zone_sizes_init(min_low_pfn, max_low_pfn, max_pfn);
}
/*
@@ -498,55 +486,6 @@ void __init mem_init(void)
mem_init_print_info(NULL);
-#define MLK(b, t) b, t, ((t) - (b)) >> 10
-#define MLM(b, t) b, t, ((t) - (b)) >> 20
-#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
-
- pr_notice("Virtual kernel memory layout:\n"
- " vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
-#ifdef CONFIG_HAVE_TCM
- " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
- " ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
-#endif
- " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
- " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
- " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
-#ifdef CONFIG_HIGHMEM
- " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n"
-#endif
-#ifdef CONFIG_MODULES
- " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
-#endif
- " .text : 0x%p" " - 0x%p" " (%4td kB)\n"
- " .init : 0x%p" " - 0x%p" " (%4td kB)\n"
- " .data : 0x%p" " - 0x%p" " (%4td kB)\n"
- " .bss : 0x%p" " - 0x%p" " (%4td kB)\n",
-
- MLK(VECTORS_BASE, VECTORS_BASE + PAGE_SIZE),
-#ifdef CONFIG_HAVE_TCM
- MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
- MLK(ITCM_OFFSET, (unsigned long) itcm_end),
-#endif
- MLK(FIXADDR_START, FIXADDR_END),
- MLM(VMALLOC_START, VMALLOC_END),
- MLM(PAGE_OFFSET, (unsigned long)high_memory),
-#ifdef CONFIG_HIGHMEM
- MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
- (PAGE_SIZE)),
-#endif
-#ifdef CONFIG_MODULES
- MLM(MODULES_VADDR, MODULES_END),
-#endif
-
- MLK_ROUNDUP(_text, _etext),
- MLK_ROUNDUP(__init_begin, __init_end),
- MLK_ROUNDUP(_sdata, _edata),
- MLK_ROUNDUP(__bss_start, __bss_stop));
-
-#undef MLK
-#undef MLM
-#undef MLK_ROUNDUP
-
/*
* Check boundaries twice: Some fundamental inconsistencies can
* be detected at build time already.
diff --git a/arch/arm/mm/pmsa-v8.c b/arch/arm/mm/pmsa-v8.c
index 617a83def88a..0d7d5fb59247 100644
--- a/arch/arm/mm/pmsa-v8.c
+++ b/arch/arm/mm/pmsa-v8.c
@@ -165,7 +165,7 @@ static int __init pmsav8_setup_ram(unsigned int number, phys_addr_t start,phys_a
return -EINVAL;
bar = start;
- lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);;
+ lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);
bar |= PMSAv8_AP_PL1RW_PL0RW | PMSAv8_RGN_SHARED;
lar |= PMSAv8_LAR_IDX(PMSAv8_RGN_NORMAL) | PMSAv8_LAR_EN;
@@ -181,7 +181,7 @@ static int __init pmsav8_setup_io(unsigned int number, phys_addr_t start,phys_ad
return -EINVAL;
bar = start;
- lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);;
+ lar = (end - 1) & ~(PMSAv8_MINALIGN - 1);
bar |= PMSAv8_AP_PL1RW_PL0RW | PMSAv8_RGN_SHARED | PMSAv8_BAR_XN;
lar |= PMSAv8_LAR_IDX(PMSAv8_RGN_DEVICE_nGnRnE) | PMSAv8_LAR_EN;
diff --git a/arch/arm/mm/proc-v7m.S b/arch/arm/mm/proc-v7m.S
index 47a5acc64433..acd5a66dfc23 100644
--- a/arch/arm/mm/proc-v7m.S
+++ b/arch/arm/mm/proc-v7m.S
@@ -139,6 +139,9 @@ __v7m_setup_cont:
cpsie i
svc #0
1: cpsid i
+ ldr r0, =exc_ret
+ orr lr, lr, #EXC_RET_THREADMODE_PROCESSSTACK
+ str lr, [r0]
ldmia sp, {r0-r3, r12}
str r5, [r12, #11 * 4] @ restore the original SVC vector entry
mov lr, r6 @ restore LR
@@ -149,10 +152,10 @@ __v7m_setup_cont:
@ Configure caches (if implemented)
teq r8, #0
- stmneia sp, {r0-r6, lr} @ v7m_invalidate_l1 touches r0-r6
+ stmiane sp, {r0-r6, lr} @ v7m_invalidate_l1 touches r0-r6
blne v7m_invalidate_l1
teq r8, #0 @ re-evalutae condition
- ldmneia sp, {r0-r6, lr}
+ ldmiane sp, {r0-r6, lr}
@ Configure the System Control Register to ensure 8-byte stack alignment
@ Note the STKALIGN bit is either RW or RAO.