diff options
author | Yinghai Lu <yinghai@kernel.org> | 2012-11-17 07:39:14 +0400 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2012-11-17 23:59:41 +0400 |
commit | 1829ae9ad7380bf17333ab9ad1610631d9cb8664 (patch) | |
tree | ae54f697df1ac557090f626af4349613b619f462 /arch/x86/mm/init.c | |
parent | 84d770019bb990dcd8013d9d08174d0e1516b517 (diff) | |
download | linux-1829ae9ad7380bf17333ab9ad1610631d9cb8664.tar.xz |
x86, mm: use pfn instead of pos in split_mem_range
could save some bit shifting operations.
Signed-off-by: Yinghai Lu <yinghai@kernel.org>
Link: http://lkml.kernel.org/r/1353123563-3103-38-git-send-email-yinghai@kernel.org
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/mm/init.c')
-rw-r--r-- | arch/x86/mm/init.c | 29 |
1 files changed, 14 insertions, 15 deletions
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 1cca052b2cbd..4bf1c5374928 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -204,12 +204,11 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range, unsigned long end) { unsigned long start_pfn, end_pfn; - unsigned long pos; + unsigned long pfn; int i; /* head if not big page alignment ? */ - start_pfn = PFN_DOWN(start); - pos = PFN_PHYS(start_pfn); + pfn = start_pfn = PFN_DOWN(start); #ifdef CONFIG_X86_32 /* * Don't use a large page for the first 2/4MB of memory @@ -217,26 +216,26 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range, * and overlapping MTRRs into large pages can cause * slowdowns. */ - if (pos == 0) + if (pfn == 0) end_pfn = PFN_DOWN(PMD_SIZE); else - end_pfn = PFN_DOWN(round_up(pos, PMD_SIZE)); + end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); #else /* CONFIG_X86_64 */ - end_pfn = PFN_DOWN(round_up(pos, PMD_SIZE)); + end_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); #endif if (end_pfn > PFN_DOWN(end)) end_pfn = PFN_DOWN(end); if (start_pfn < end_pfn) { nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); - pos = PFN_PHYS(end_pfn); + pfn = end_pfn; } /* big page (2M) range */ - start_pfn = PFN_DOWN(round_up(pos, PMD_SIZE)); + start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); #ifdef CONFIG_X86_32 end_pfn = PFN_DOWN(round_down(end, PMD_SIZE)); #else /* CONFIG_X86_64 */ - end_pfn = PFN_DOWN(round_up(pos, PUD_SIZE)); + end_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE)); if (end_pfn > PFN_DOWN(round_down(end, PMD_SIZE))) end_pfn = PFN_DOWN(round_down(end, PMD_SIZE)); #endif @@ -244,32 +243,32 @@ static int __meminit split_mem_range(struct map_range *mr, int nr_range, if (start_pfn < end_pfn) { nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, page_size_mask & (1<<PG_LEVEL_2M)); - pos = PFN_PHYS(end_pfn); + pfn = end_pfn; } #ifdef CONFIG_X86_64 /* big page (1G) range */ - start_pfn = PFN_DOWN(round_up(pos, PUD_SIZE)); + start_pfn = round_up(pfn, PFN_DOWN(PUD_SIZE)); end_pfn = PFN_DOWN(round_down(end, PUD_SIZE)); if (start_pfn < end_pfn) { nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, page_size_mask & ((1<<PG_LEVEL_2M)|(1<<PG_LEVEL_1G))); - pos = PFN_PHYS(end_pfn); + pfn = end_pfn; } /* tail is not big page (1G) alignment */ - start_pfn = PFN_DOWN(round_up(pos, PMD_SIZE)); + start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); end_pfn = PFN_DOWN(round_down(end, PMD_SIZE)); if (start_pfn < end_pfn) { nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, page_size_mask & (1<<PG_LEVEL_2M)); - pos = PFN_PHYS(end_pfn); + pfn = end_pfn; } #endif /* tail is not big page (2M) alignment */ - start_pfn = PFN_DOWN(pos); + start_pfn = pfn; end_pfn = PFN_DOWN(end); nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); |