summaryrefslogtreecommitdiff
path: root/arch/sparc/kernel
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2013-09-21 08:50:41 +0400
committerDavid S. Miller <davem@davemloft.net>2013-11-13 03:22:34 +0400
commitb2d438348024b75a1ee8b66b85d77f569a5dfed8 (patch)
tree057c725d9d058d326533d0947aedd226adb57540 /arch/sparc/kernel
parentf998c9c0d663b013e3aa3ba78908396c8c497218 (diff)
downloadlinux-b2d438348024b75a1ee8b66b85d77f569a5dfed8.tar.xz
sparc64: Make PAGE_OFFSET variable.
Choose PAGE_OFFSET dynamically based upon cpu type. Original UltraSPARC-I (spitfire) chips only supported a 44-bit virtual address space. Newer chips (T4 and later) support 52-bit virtual addresses and up to 47-bits of physical memory space. Therefore we have to adjust PAGE_SIZE dynamically based upon the capabilities of the chip. Note that this change alone does not allow us to support > 43-bit physical memory, to do that we need to re-arrange our page table support. The current encodings of the pmd_t and pgd_t pointers restricts us to "32 + 11" == 43 bits. This change can waste quite a bit of memory for the various tables. In particular, a future change should work to size and allocate kern_linear_bitmap[] and sparc64_valid_addr_bitmap[] dynamically. This isn't easy as we really cannot take a TLB miss when accessing kern_linear_bitmap[]. We'd have to lock it into the TLB or similar. Signed-off-by: David S. Miller <davem@davemloft.net> Acked-by: Bob Picco <bob.picco@oracle.com>
Diffstat (limited to 'arch/sparc/kernel')
-rw-r--r--arch/sparc/kernel/ktlb.S30
-rw-r--r--arch/sparc/kernel/vmlinux.lds.S5
2 files changed, 30 insertions, 5 deletions
diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S
index 7ad46bc0c698..542e96ac4d39 100644
--- a/arch/sparc/kernel/ktlb.S
+++ b/arch/sparc/kernel/ktlb.S
@@ -153,12 +153,19 @@ kvmap_dtlb_tsb4m_miss:
/* Clear the PAGE_OFFSET top virtual bits, shift
* down to get PFN, and make sure PFN is in range.
*/
- sllx %g4, PAGE_OFFSET_VA_BITS, %g5
+661: sllx %g4, 0, %g5
+ .section .page_offset_shift_patch, "ax"
+ .word 661b
+ .previous
/* Check to see if we know about valid memory at the 4MB
* chunk this physical address will reside within.
*/
- srlx %g5, PAGE_OFFSET_VA_BITS + MAX_PHYS_ADDRESS_BITS, %g2
+661: srlx %g5, MAX_PHYS_ADDRESS_BITS, %g2
+ .section .page_offset_shift_patch, "ax"
+ .word 661b
+ .previous
+
brnz,pn %g2, kvmap_dtlb_longpath
nop
@@ -176,7 +183,11 @@ valid_addr_bitmap_patch:
or %g7, %lo(sparc64_valid_addr_bitmap), %g7
.previous
- srlx %g5, PAGE_OFFSET_VA_BITS + ILOG2_4MB, %g2
+661: srlx %g5, ILOG2_4MB, %g2
+ .section .page_offset_shift_patch, "ax"
+ .word 661b
+ .previous
+
srlx %g2, 6, %g5
and %g2, 63, %g2
sllx %g5, 3, %g5
@@ -189,9 +200,18 @@ valid_addr_bitmap_patch:
2: sethi %hi(kpte_linear_bitmap), %g2
/* Get the 256MB physical address index. */
- sllx %g4, PAGE_OFFSET_VA_BITS, %g5
+661: sllx %g4, 0, %g5
+ .section .page_offset_shift_patch, "ax"
+ .word 661b
+ .previous
+
or %g2, %lo(kpte_linear_bitmap), %g2
- srlx %g5, PAGE_OFFSET_VA_BITS + ILOG2_256MB, %g5
+
+661: srlx %g5, ILOG2_256MB, %g5
+ .section .page_offset_shift_patch, "ax"
+ .word 661b
+ .previous
+
and %g5, (32 - 1), %g7
/* Divide by 32 to get the offset into the bitmask. */
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index 0bacceb19150..932ff90fd760 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -122,6 +122,11 @@ SECTIONS
*(.swapper_4m_tsb_phys_patch)
__swapper_4m_tsb_phys_patch_end = .;
}
+ .page_offset_shift_patch : {
+ __page_offset_shift_patch = .;
+ *(.page_offset_shift_patch)
+ __page_offset_shift_patch_end = .;
+ }
.popc_3insn_patch : {
__popc_3insn_patch = .;
*(.popc_3insn_patch)