summaryrefslogtreecommitdiff
path: root/arch/arm/mm/init.c
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2012-01-13 19:00:51 +0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2012-01-13 19:02:35 +0400
commit716a3dc20084da9b3ab17bd125005a5345e23e3b (patch)
treef7ba487050d33fc2913fdee81b384f5578ccb105 /arch/arm/mm/init.c
parent4de3a8e101150feaefa1139611a50ff37467f33e (diff)
downloadlinux-716a3dc20084da9b3ab17bd125005a5345e23e3b.tar.xz
ARM: Add arm_memblock_steal() to allocate memory away from the kernel
Several platforms are now using the memblock_alloc+memblock_free+ memblock_remove trick to obtain memory which won't be mapped in the kernel's page tables. Most platforms do this (correctly) in the ->reserve callback. However, OMAP has started to call these functions outside of this callback, and this is extremely unsafe - memory will not be unmapped, and could well be given out after memblock is no longer responsible for its management. So, provide arm_memblock_steal() to perform this function, and ensure that it panic()s if it is used inappropriately. Convert everyone over, including OMAP. As a result, OMAP with OMAP4_ERRATA_I688 enabled will panic on boot with this change. Mark this option as BROKEN and make it depend on BROKEN. OMAP needs to be fixed, or 137d105d50 (ARM: OMAP4: Fix errata i688 with MPU interconnect barriers.) reverted until such time it can be fixed correctly. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/mm/init.c')
-rw-r--r--arch/arm/mm/init.c17
1 files changed, 17 insertions, 0 deletions
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index e34ea8adc1f9..6ec1226fc62d 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -22,6 +22,7 @@
#include <linux/memblock.h>
#include <asm/mach-types.h>
+#include <asm/memblock.h>
#include <asm/prom.h>
#include <asm/sections.h>
#include <asm/setup.h>
@@ -307,6 +308,21 @@ static void arm_memory_present(void)
}
#endif
+static bool arm_memblock_steal_permitted = true;
+
+phys_addr_t arm_memblock_steal(phys_addr_t size, phys_addr_t align)
+{
+ phys_addr_t phys;
+
+ BUG_ON(!arm_memblock_steal_permitted);
+
+ phys = memblock_alloc(size, align);
+ memblock_free(phys, size);
+ memblock_remove(phys, size);
+
+ return phys;
+}
+
void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
{
int i;
@@ -349,6 +365,7 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
if (mdesc->reserve)
mdesc->reserve();
+ arm_memblock_steal_permitted = false;
memblock_allow_resize();
memblock_dump_all();
}