summaryrefslogtreecommitdiff
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2011-03-17 02:35:25 +0300
committerRussell King <rmk+kernel@arm.linux.org.uk>2011-03-17 02:35:25 +0300
commit1f0090a1eaa1b750a2fc5c99c91b790d5322a1fd (patch)
treec685060f260410e6704c9dfd457ed8c347141f1d /arch/arm/mm
parent2472f3c8d8fc18b25b2cf1574c036e238187c0ff (diff)
parent10a8c3839810ac9af1aec836d61b92e7a879f5fa (diff)
downloadlinux-1f0090a1eaa1b750a2fc5c99c91b790d5322a1fd.tar.xz
Merge branch 'misc' into devel
Conflicts: arch/arm/Kconfig
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig5
-rw-r--r--arch/arm/mm/cache-l2x0.c32
-rw-r--r--arch/arm/mm/mmu.c10
-rw-r--r--arch/arm/mm/vmregion.c17
4 files changed, 31 insertions, 33 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index e4509bae8fc4..05b26a03c209 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -845,6 +845,11 @@ config CACHE_XSC3L2
help
This option enables the L2 cache on XScale3.
+config ARM_L1_CACHE_SHIFT_6
+ bool
+ help
+ Setting ARM L1 cache line size to 64 Bytes.
+
config ARM_L1_CACHE_SHIFT
int
default 6 if ARM_L1_CACHE_SHIFT_6
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index f2ce38e085d2..ef59099a5463 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -73,18 +73,24 @@ static inline void l2x0_inv_line(unsigned long addr)
writel_relaxed(addr, base + L2X0_INV_LINE_PA);
}
-#ifdef CONFIG_PL310_ERRATA_588369
-static void debug_writel(unsigned long val)
-{
- extern void omap_smc1(u32 fn, u32 arg);
+#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
- /*
- * Texas Instrument secure monitor api to modify the
- * PL310 Debug Control Register.
- */
- omap_smc1(0x100, val);
+#define debug_writel(val) outer_cache.set_debug(val)
+
+static void l2x0_set_debug(unsigned long val)
+{
+ writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
}
+#else
+/* Optimised out for non-errata case */
+static inline void debug_writel(unsigned long val)
+{
+}
+
+#define l2x0_set_debug NULL
+#endif
+#ifdef CONFIG_PL310_ERRATA_588369
static inline void l2x0_flush_line(unsigned long addr)
{
void __iomem *base = l2x0_base;
@@ -97,11 +103,6 @@ static inline void l2x0_flush_line(unsigned long addr)
}
#else
-/* Optimised out for non-errata case */
-static inline void debug_writel(unsigned long val)
-{
-}
-
static inline void l2x0_flush_line(unsigned long addr)
{
void __iomem *base = l2x0_base;
@@ -125,9 +126,11 @@ static void l2x0_flush_all(void)
/* clean all ways */
spin_lock_irqsave(&l2x0_lock, flags);
+ debug_writel(0x03);
writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY);
cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask);
cache_sync();
+ debug_writel(0x00);
spin_unlock_irqrestore(&l2x0_lock, flags);
}
@@ -335,6 +338,7 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
outer_cache.flush_all = l2x0_flush_all;
outer_cache.inv_all = l2x0_inv_all;
outer_cache.disable = l2x0_disable;
+ outer_cache.set_debug = l2x0_set_debug;
printk(KERN_INFO "%s cache controller enabled\n", type);
printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 3c67e92f7d59..ff7b43b5885a 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -827,16 +827,6 @@ static void __init sanity_check_meminfo(void)
* rather difficult.
*/
reason = "with VIPT aliasing cache";
- } else if (is_smp() && tlb_ops_need_broadcast()) {
- /*
- * kmap_high needs to occasionally flush TLB entries,
- * however, if the TLB entries need to be broadcast
- * we may deadlock:
- * kmap_high(irqs off)->flush_all_zero_pkmaps->
- * flush_tlb_kernel_range->smp_call_function_many
- * (must not be called with irqs off)
- */
- reason = "without hardware TLB ops broadcasting";
}
if (reason) {
printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
diff --git a/arch/arm/mm/vmregion.c b/arch/arm/mm/vmregion.c
index 935993e1b1ef..036fdbfdd62f 100644
--- a/arch/arm/mm/vmregion.c
+++ b/arch/arm/mm/vmregion.c
@@ -38,7 +38,7 @@ struct arm_vmregion *
arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align,
size_t size, gfp_t gfp)
{
- unsigned long addr = head->vm_start, end = head->vm_end - size;
+ unsigned long start = head->vm_start, addr = head->vm_end;
unsigned long flags;
struct arm_vmregion *c, *new;
@@ -54,21 +54,20 @@ arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align,
spin_lock_irqsave(&head->vm_lock, flags);
- list_for_each_entry(c, &head->vm_list, vm_list) {
- if ((addr + size) < addr)
- goto nospc;
- if ((addr + size) <= c->vm_start)
+ addr = rounddown(addr - size, align);
+ list_for_each_entry_reverse(c, &head->vm_list, vm_list) {
+ if (addr >= c->vm_end)
goto found;
- addr = ALIGN(c->vm_end, align);
- if (addr > end)
+ addr = rounddown(c->vm_start - size, align);
+ if (addr < start)
goto nospc;
}
found:
/*
- * Insert this entry _before_ the one we found.
+ * Insert this entry after the one we found.
*/
- list_add_tail(&new->vm_list, &c->vm_list);
+ list_add(&new->vm_list, &c->vm_list);
new->vm_start = addr;
new->vm_end = addr + size;
new->vm_active = 1;