summaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/alignment.c20
-rw-r--r--arch/arm/mm/cache-l2x0.c216
-rw-r--r--arch/arm/mm/cache-v7.S20
-rw-r--r--arch/arm/mm/dma-mapping.c8
-rw-r--r--arch/arm/mm/fault.c1
-rw-r--r--arch/arm/mm/init.c7
-rw-r--r--arch/arm/mm/ioremap.c21
-rw-r--r--arch/arm/mm/mm.h4
-rw-r--r--arch/arm/mm/mmu.c18
-rw-r--r--arch/arm/mm/proc-v7.S2
10 files changed, 291 insertions, 26 deletions
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index cfbcf8b95599..c335c76e0d88 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -86,16 +86,6 @@ core_param(alignment, ai_usermode, int, 0600);
#define UM_FIXUP (1 << 1)
#define UM_SIGNAL (1 << 2)
-#ifdef CONFIG_PROC_FS
-static const char *usermode_action[] = {
- "ignored",
- "warn",
- "fixup",
- "fixup+warn",
- "signal",
- "signal+warn"
-};
-
/* Return true if and only if the ARMv6 unaligned access model is in use. */
static bool cpu_is_v6_unaligned(void)
{
@@ -123,6 +113,16 @@ static int safe_usermode(int new_usermode, bool warn)
return new_usermode;
}
+#ifdef CONFIG_PROC_FS
+static const char *usermode_action[] = {
+ "ignored",
+ "warn",
+ "fixup",
+ "fixup+warn",
+ "signal",
+ "signal+warn"
+};
+
static int alignment_proc_show(struct seq_file *m, void *v)
{
seq_printf(m, "User:\t\t%lu\n", ai_user);
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 9ecfdb511951..3f9b9980478e 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -16,9 +16,12 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
+#include <linux/err.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
#include <asm/cacheflush.h>
#include <asm/hardware/cache-l2x0.h>
@@ -30,11 +33,19 @@ static DEFINE_SPINLOCK(l2x0_lock);
static uint32_t l2x0_way_mask; /* Bitmask of active ways */
static uint32_t l2x0_size;
+struct l2x0_regs l2x0_saved_regs;
+
+struct l2x0_of_data {
+ void (*setup)(const struct device_node *, __u32 *, __u32 *);
+ void (*save)(void);
+ void (*resume)(void);
+};
+
static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
{
/* wait for cache operation by line or way to complete */
while (readl_relaxed(reg) & mask)
- ;
+ cpu_relax();
}
#ifdef CONFIG_CACHE_PL310
@@ -277,7 +288,7 @@ static void l2x0_disable(void)
spin_unlock_irqrestore(&l2x0_lock, flags);
}
-static void __init l2x0_unlock(__u32 cache_id)
+static void l2x0_unlock(__u32 cache_id)
{
int lockregs;
int i;
@@ -353,6 +364,8 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
/* l2x0 controller is disabled */
writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
+ l2x0_saved_regs.aux_ctrl = aux;
+
l2x0_inv_all();
/* enable L2X0 */
@@ -372,3 +385,202 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
ways, cache_id, aux, l2x0_size);
}
+
+#ifdef CONFIG_OF
+static void __init l2x0_of_setup(const struct device_node *np,
+ __u32 *aux_val, __u32 *aux_mask)
+{
+ u32 data[2] = { 0, 0 };
+ u32 tag = 0;
+ u32 dirty = 0;
+ u32 val = 0, mask = 0;
+
+ of_property_read_u32(np, "arm,tag-latency", &tag);
+ if (tag) {
+ mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
+ val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
+ }
+
+ of_property_read_u32_array(np, "arm,data-latency",
+ data, ARRAY_SIZE(data));
+ if (data[0] && data[1]) {
+ mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
+ L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
+ val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
+ ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
+ }
+
+ of_property_read_u32(np, "arm,dirty-latency", &dirty);
+ if (dirty) {
+ mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
+ val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
+ }
+
+ *aux_val &= ~mask;
+ *aux_val |= val;
+ *aux_mask &= ~mask;
+}
+
+static void __init pl310_of_setup(const struct device_node *np,
+ __u32 *aux_val, __u32 *aux_mask)
+{
+ u32 data[3] = { 0, 0, 0 };
+ u32 tag[3] = { 0, 0, 0 };
+ u32 filter[2] = { 0, 0 };
+
+ of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
+ if (tag[0] && tag[1] && tag[2])
+ writel_relaxed(
+ ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
+ ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
+ ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
+ l2x0_base + L2X0_TAG_LATENCY_CTRL);
+
+ of_property_read_u32_array(np, "arm,data-latency",
+ data, ARRAY_SIZE(data));
+ if (data[0] && data[1] && data[2])
+ writel_relaxed(
+ ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
+ ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
+ ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
+ l2x0_base + L2X0_DATA_LATENCY_CTRL);
+
+ of_property_read_u32_array(np, "arm,filter-ranges",
+ filter, ARRAY_SIZE(filter));
+ if (filter[1]) {
+ writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
+ l2x0_base + L2X0_ADDR_FILTER_END);
+ writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN,
+ l2x0_base + L2X0_ADDR_FILTER_START);
+ }
+}
+
+static void __init pl310_save(void)
+{
+ u32 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
+ L2X0_CACHE_ID_RTL_MASK;
+
+ l2x0_saved_regs.tag_latency = readl_relaxed(l2x0_base +
+ L2X0_TAG_LATENCY_CTRL);
+ l2x0_saved_regs.data_latency = readl_relaxed(l2x0_base +
+ L2X0_DATA_LATENCY_CTRL);
+ l2x0_saved_regs.filter_end = readl_relaxed(l2x0_base +
+ L2X0_ADDR_FILTER_END);
+ l2x0_saved_regs.filter_start = readl_relaxed(l2x0_base +
+ L2X0_ADDR_FILTER_START);
+
+ if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
+ /*
+ * From r2p0, there is Prefetch offset/control register
+ */
+ l2x0_saved_regs.prefetch_ctrl = readl_relaxed(l2x0_base +
+ L2X0_PREFETCH_CTRL);
+ /*
+ * From r3p0, there is Power control register
+ */
+ if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
+ l2x0_saved_regs.pwr_ctrl = readl_relaxed(l2x0_base +
+ L2X0_POWER_CTRL);
+ }
+}
+
+static void l2x0_resume(void)
+{
+ if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
+ /* restore aux ctrl and enable l2 */
+ l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID));
+
+ writel_relaxed(l2x0_saved_regs.aux_ctrl, l2x0_base +
+ L2X0_AUX_CTRL);
+
+ l2x0_inv_all();
+
+ writel_relaxed(1, l2x0_base + L2X0_CTRL);
+ }
+}
+
+static void pl310_resume(void)
+{
+ u32 l2x0_revision;
+
+ if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
+ /* restore pl310 setup */
+ writel_relaxed(l2x0_saved_regs.tag_latency,
+ l2x0_base + L2X0_TAG_LATENCY_CTRL);
+ writel_relaxed(l2x0_saved_regs.data_latency,
+ l2x0_base + L2X0_DATA_LATENCY_CTRL);
+ writel_relaxed(l2x0_saved_regs.filter_end,
+ l2x0_base + L2X0_ADDR_FILTER_END);
+ writel_relaxed(l2x0_saved_regs.filter_start,
+ l2x0_base + L2X0_ADDR_FILTER_START);
+
+ l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
+ L2X0_CACHE_ID_RTL_MASK;
+
+ if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
+ writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
+ l2x0_base + L2X0_PREFETCH_CTRL);
+ if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
+ writel_relaxed(l2x0_saved_regs.pwr_ctrl,
+ l2x0_base + L2X0_POWER_CTRL);
+ }
+ }
+
+ l2x0_resume();
+}
+
+static const struct l2x0_of_data pl310_data = {
+ pl310_of_setup,
+ pl310_save,
+ pl310_resume,
+};
+
+static const struct l2x0_of_data l2x0_data = {
+ l2x0_of_setup,
+ NULL,
+ l2x0_resume,
+};
+
+static const struct of_device_id l2x0_ids[] __initconst = {
+ { .compatible = "arm,pl310-cache", .data = (void *)&pl310_data },
+ { .compatible = "arm,l220-cache", .data = (void *)&l2x0_data },
+ { .compatible = "arm,l210-cache", .data = (void *)&l2x0_data },
+ {}
+};
+
+int __init l2x0_of_init(__u32 aux_val, __u32 aux_mask)
+{
+ struct device_node *np;
+ struct l2x0_of_data *data;
+ struct resource res;
+
+ np = of_find_matching_node(NULL, l2x0_ids);
+ if (!np)
+ return -ENODEV;
+
+ if (of_address_to_resource(np, 0, &res))
+ return -ENODEV;
+
+ l2x0_base = ioremap(res.start, resource_size(&res));
+ if (!l2x0_base)
+ return -ENOMEM;
+
+ l2x0_saved_regs.phy_base = res.start;
+
+ data = of_match_node(l2x0_ids, np)->data;
+
+ /* L2 configuration can only be changed if the cache is disabled */
+ if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
+ if (data->setup)
+ data->setup(np, &aux_val, &aux_mask);
+ }
+
+ if (data->save)
+ data->save();
+
+ l2x0_init(l2x0_base, aux_val, aux_mask);
+
+ outer_cache.resume = data->resume;
+ return 0;
+}
+#endif
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index 3b24bfa3b828..07c4bc8ea0a4 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -174,6 +174,10 @@ ENTRY(v7_coherent_user_range)
dcache_line_size r2, r3
sub r3, r2, #1
bic r12, r0, r3
+#ifdef CONFIG_ARM_ERRATA_764369
+ ALT_SMP(W(dsb))
+ ALT_UP(W(nop))
+#endif
1:
USER( mcr p15, 0, r12, c7, c11, 1 ) @ clean D line to the point of unification
add r12, r12, r2
@@ -223,6 +227,10 @@ ENTRY(v7_flush_kern_dcache_area)
add r1, r0, r1
sub r3, r2, #1
bic r0, r0, r3
+#ifdef CONFIG_ARM_ERRATA_764369
+ ALT_SMP(W(dsb))
+ ALT_UP(W(nop))
+#endif
1:
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line
add r0, r0, r2
@@ -247,6 +255,10 @@ v7_dma_inv_range:
sub r3, r2, #1
tst r0, r3
bic r0, r0, r3
+#ifdef CONFIG_ARM_ERRATA_764369
+ ALT_SMP(W(dsb))
+ ALT_UP(W(nop))
+#endif
mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
tst r1, r3
@@ -270,6 +282,10 @@ v7_dma_clean_range:
dcache_line_size r2, r3
sub r3, r2, #1
bic r0, r0, r3
+#ifdef CONFIG_ARM_ERRATA_764369
+ ALT_SMP(W(dsb))
+ ALT_UP(W(nop))
+#endif
1:
mcr p15, 0, r0, c7, c10, 1 @ clean D / U line
add r0, r0, r2
@@ -288,6 +304,10 @@ ENTRY(v7_dma_flush_range)
dcache_line_size r2, r3
sub r3, r2, #1
bic r0, r0, r3
+#ifdef CONFIG_ARM_ERRATA_764369
+ ALT_SMP(W(dsb))
+ ALT_UP(W(nop))
+#endif
1:
mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line
add r0, r0, r2
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 0a0a1e7c20d2..235eb775fc78 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -123,8 +123,8 @@ static void __dma_free_buffer(struct page *page, size_t size)
#endif
#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
-#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT)
-#define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT)
+#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PMD_SHIFT)
+#define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PMD_SHIFT)
/*
* These are the page tables (2MB each) covering uncached, DMA consistent allocations
@@ -183,7 +183,7 @@ static int __init consistent_init(void)
}
consistent_pte[i++] = pte;
- base += (1 << PGDIR_SHIFT);
+ base += PMD_SIZE;
} while (base < CONSISTENT_END);
return ret;
@@ -324,6 +324,8 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
if (addr)
*handle = pfn_to_dma(dev, page_to_pfn(page));
+ else
+ __dma_free_buffer(page, size);
return addr;
}
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 3b5ea68acbb8..aa33949fef60 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -20,6 +20,7 @@
#include <linux/highmem.h>
#include <linux/perf_event.h>
+#include <asm/exception.h>
#include <asm/system.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index cc7e2d8be9aa..f8037ba338ac 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -496,6 +496,13 @@ static void __init free_unused_memmap(struct meminfo *mi)
*/
bank_start = min(bank_start,
ALIGN(prev_bank_end, PAGES_PER_SECTION));
+#else
+ /*
+ * Align down here since the VM subsystem insists that the
+ * memmap entries are valid from the bank start aligned to
+ * MAX_ORDER_NR_PAGES.
+ */
+ bank_start = round_down(bank_start, MAX_ORDER_NR_PAGES);
#endif
/*
* If we had a previous bank, and there is a space
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index ab506272b2d3..bdb248c4f55c 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -289,6 +289,27 @@ __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
}
EXPORT_SYMBOL(__arm_ioremap);
+/*
+ * Remap an arbitrary physical address space into the kernel virtual
+ * address space as memory. Needed when the kernel wants to execute
+ * code in external memory. This is needed for reprogramming source
+ * clocks that would affect normal memory for example. Please see
+ * CONFIG_GENERIC_ALLOCATOR for allocating external memory.
+ */
+void __iomem *
+__arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
+{
+ unsigned int mtype;
+
+ if (cached)
+ mtype = MT_MEMORY;
+ else
+ mtype = MT_MEMORY_NONCACHED;
+
+ return __arm_ioremap_caller(phys_addr, size, mtype,
+ __builtin_return_address(0));
+}
+
void __iounmap(volatile void __iomem *io_addr)
{
void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 010566799c80..ad7cce3bc431 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -12,8 +12,8 @@ static inline pmd_t *pmd_off_k(unsigned long virt)
struct mem_type {
pteval_t prot_pte;
- unsigned int prot_l1;
- unsigned int prot_sect;
+ pmdval_t prot_l1;
+ pmdval_t prot_sect;
unsigned int domain;
};
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 594d677b92c8..226f1804be12 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -60,7 +60,7 @@ EXPORT_SYMBOL(pgprot_kernel);
struct cachepolicy {
const char policy[16];
unsigned int cr_mask;
- unsigned int pmd;
+ pmdval_t pmd;
pteval_t pte;
};
@@ -288,7 +288,7 @@ static void __init build_mem_type_table(void)
{
struct cachepolicy *cp;
unsigned int cr = get_cr();
- unsigned int user_pgprot, kern_pgprot, vecs_pgprot;
+ pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
int cpu_arch = cpu_architecture();
int i;
@@ -863,14 +863,14 @@ static inline void prepare_page_table(void)
/*
* Clear out all the mappings below the kernel image.
*/
- for (addr = 0; addr < MODULES_VADDR; addr += PGDIR_SIZE)
+ for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
#ifdef CONFIG_XIP_KERNEL
/* The XIP kernel is mapped in the module area -- skip over it */
- addr = ((unsigned long)_etext + PGDIR_SIZE - 1) & PGDIR_MASK;
+ addr = ((unsigned long)_etext + PMD_SIZE - 1) & PMD_MASK;
#endif
- for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE)
+ for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
/*
@@ -885,10 +885,12 @@ static inline void prepare_page_table(void)
* memory bank, up to the end of the vmalloc region.
*/
for (addr = __phys_to_virt(end);
- addr < VMALLOC_END; addr += PGDIR_SIZE)
+ addr < VMALLOC_END; addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
}
+#define SWAPPER_PG_DIR_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
+
/*
* Reserve the special regions of memory
*/
@@ -898,7 +900,7 @@ void __init arm_mm_memblock_reserve(void)
* Reserve the page tables. These are already in use,
* and can only be in node 0.
*/
- memblock_reserve(__pa(swapper_pg_dir), PTRS_PER_PGD * sizeof(pgd_t));
+ memblock_reserve(__pa(swapper_pg_dir), SWAPPER_PG_DIR_SIZE);
#ifdef CONFIG_SA1111
/*
@@ -926,7 +928,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
*/
vectors_page = early_alloc(PAGE_SIZE);
- for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
+ for (addr = VMALLOC_END; addr; addr += PMD_SIZE)
pmd_clear(pmd_off_k(addr));
/*
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 9049c0764db2..9591c8e9fb8c 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -218,7 +218,7 @@ ENDPROC(cpu_v7_set_pte_ext)
/* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */
.globl cpu_v7_suspend_size
.equ cpu_v7_suspend_size, 4 * 9
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_ARM_CPU_SUSPEND
ENTRY(cpu_v7_do_suspend)
stmfd sp!, {r4 - r11, lr}
mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID