summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDavid Gibson <david@gibson.dropbear.id.au>2005-11-24 00:37:45 +0300
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-24 03:08:39 +0300
commit5e391dc9e3fec68922137ae317bf680a74656c1b (patch)
tree9b262b96fff2f28e85a7e58c9cd69631655b781b
parente9b15b54d3646108bbd3e054158b402025d3e704 (diff)
downloadlinux-5e391dc9e3fec68922137ae317bf680a74656c1b.tar.xz
[PATCH] powerpc: fix for hugepage areas straddling 4GB boundary
Commit 7d24f0b8a53261709938ffabe3e00f88f6498df9 fixed bugs in the ppc64 SLB miss handler with respect to hugepage handling, and in the process tweaked the semantics of the hugepage address masks in mm_context_t. Unfortunately, it left out a couple of necessary changes to go with that change. First, the in_hugepage_area() macro was not updated to match, second prepare_hugepage_range() was not updated to correctly handle hugepages regions which straddled the 4GB point. The latter appears only to cause process-hangs when attempting to map such a region, but the former can cause oopses if a get_user_pages() is triggered at the wrong point. This patch addresses both bugs. Signed-off-by: David Gibson <david@gibson.dropbear.id.au> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--arch/powerpc/mm/hugetlbpage.c6
-rw-r--r--include/asm-powerpc/page_64.h6
2 files changed, 6 insertions, 6 deletions
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 9250f14be8ef..f867bba893ca 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -287,15 +287,15 @@ static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas)
int prepare_hugepage_range(unsigned long addr, unsigned long len)
{
- int err;
+ int err = 0;
if ( (addr+len) < addr )
return -EINVAL;
- if ((addr + len) < 0x100000000UL)
+ if (addr < 0x100000000UL)
err = open_low_hpage_areas(current->mm,
LOW_ESID_MASK(addr, len));
- else
+ if ((addr + len) >= 0x100000000UL)
err = open_high_hpage_areas(current->mm,
HTLB_AREA_MASK(addr, len));
if (err) {
diff --git a/include/asm-powerpc/page_64.h b/include/asm-powerpc/page_64.h
index 1e6e7846824f..58a3dd9a79ec 100644
--- a/include/asm-powerpc/page_64.h
+++ b/include/asm-powerpc/page_64.h
@@ -135,9 +135,9 @@ extern unsigned int HPAGE_SHIFT;
#define in_hugepage_area(context, addr) \
(cpu_has_feature(CPU_FTR_16M_PAGE) && \
- ( ((1 << GET_HTLB_AREA(addr)) & (context).high_htlb_areas) || \
- ( ((addr) < 0x100000000L) && \
- ((1 << GET_ESID(addr)) & (context).low_htlb_areas) ) ) )
+ ( ( (addr) >= 0x100000000UL) \
+ ? ((1 << GET_HTLB_AREA(addr)) & (context).high_htlb_areas) \
+ : ((1 << GET_ESID(addr)) & (context).low_htlb_areas) ) )
#else /* !CONFIG_HUGETLB_PAGE */