summaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
authorKevin Hao <haokexin@gmail.com>2013-12-24 11:12:11 +0400
committerScott Wood <scottwood@freescale.com>2014-01-10 03:52:18 +0400
commit0be7d969b0efef085ed6497d462ba16a875ca737 (patch)
treec2cf4e6760102757785a0d91fcb052372153e9c7 /arch/powerpc
parent7d2471f9fa85089beb1cb9436ffc28f9e11e518d (diff)
downloadlinux-0be7d969b0efef085ed6497d462ba16a875ca737.tar.xz
powerpc/fsl_booke: smp support for booting a relocatable kernel above 64M
When booting above the 64M for a secondary cpu, we also face the same issue as the boot cpu that the PAGE_OFFSET map two different physical address for the init tlb and the final map. So we have to use switch_to_as1/restore_to_as0 between the conversion of these two maps. When restoring to as0 for a secondary cpu, we only need to return to the caller. So add a new parameter for function restore_to_as0 for this purpose. Use LOAD_REG_ADDR_PIC to get the address of variables which may be used before we set the final map in cams for the secondary cpu. Move the setting of cams a bit earlier in order to avoid the unnecessary using of LOAD_REG_ADDR_PIC. Signed-off-by: Kevin Hao <haokexin@gmail.com> Signed-off-by: Scott Wood <scottwood@freescale.com>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S41
-rw-r--r--arch/powerpc/mm/fsl_booke_mmu.c4
-rw-r--r--arch/powerpc/mm/mmu_decl.h2
-rw-r--r--arch/powerpc/mm/tlb_nohash_low.S4
4 files changed, 34 insertions, 17 deletions
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index b1f7edc3c360..b497188a94a1 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -216,8 +216,7 @@ set_ivor:
/* Check to see if we're the second processor, and jump
* to the secondary_start code if so
*/
- lis r24, boot_cpuid@h
- ori r24, r24, boot_cpuid@l
+ LOAD_REG_ADDR_PIC(r24, boot_cpuid)
lwz r24, 0(r24)
cmpwi r24, -1
mfspr r24,SPRN_PIR
@@ -1146,24 +1145,36 @@ _GLOBAL(__flush_disable_L1)
/* When we get here, r24 needs to hold the CPU # */
.globl __secondary_start
__secondary_start:
- lis r3,__secondary_hold_acknowledge@h
- ori r3,r3,__secondary_hold_acknowledge@l
- stw r24,0(r3)
-
- li r3,0
- mr r4,r24 /* Why? */
- bl call_setup_cpu
-
- lis r3,tlbcam_index@ha
- lwz r3,tlbcam_index@l(r3)
+ LOAD_REG_ADDR_PIC(r3, tlbcam_index)
+ lwz r3,0(r3)
mtctr r3
li r26,0 /* r26 safe? */
+ bl switch_to_as1
+ mr r27,r3 /* tlb entry */
/* Load each CAM entry */
1: mr r3,r26
bl loadcam_entry
addi r26,r26,1
bdnz 1b
+ mr r3,r27 /* tlb entry */
+ LOAD_REG_ADDR_PIC(r4, memstart_addr)
+ lwz r4,0(r4)
+ mr r5,r25 /* phys kernel start */
+ rlwinm r5,r5,0,~0x3ffffff /* aligned 64M */
+ subf r4,r5,r4 /* memstart_addr - phys kernel start */
+ li r5,0 /* no device tree */
+ li r6,0 /* not boot cpu */
+ bl restore_to_as0
+
+
+ lis r3,__secondary_hold_acknowledge@h
+ ori r3,r3,__secondary_hold_acknowledge@l
+ stw r24,0(r3)
+
+ li r3,0
+ mr r4,r24 /* Why? */
+ bl call_setup_cpu
/* get current_thread_info and current */
lis r1,secondary_ti@ha
@@ -1253,6 +1264,7 @@ _GLOBAL(switch_to_as1)
* r3 - the tlb entry which should be invalidated
* r4 - __pa(PAGE_OFFSET in AS1) - __pa(PAGE_OFFSET in AS0)
* r5 - device tree virtual address. If r4 is 0, r5 is ignored.
+ * r6 - boot cpu
*/
_GLOBAL(restore_to_as0)
mflr r0
@@ -1268,6 +1280,7 @@ _GLOBAL(restore_to_as0)
*/
add r9,r9,r4
add r5,r5,r4
+ add r0,r0,r4
2: mfmsr r7
li r8,(MSR_IS | MSR_DS)
@@ -1290,7 +1303,9 @@ _GLOBAL(restore_to_as0)
isync
cmpwi r4,0
- bne 3f
+ cmpwi cr1,r6,0
+ cror eq,4*cr1+eq,eq
+ bne 3f /* offset != 0 && is_boot_cpu */
mtlr r0
blr
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
index 95deb9fdf92f..a68671c18ad4 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -231,7 +231,7 @@ void __init adjust_total_lowmem(void)
i = switch_to_as1();
__max_low_memory = map_mem_in_cams(ram, CONFIG_LOWMEM_CAM_NUM);
- restore_to_as0(i, 0, 0);
+ restore_to_as0(i, 0, 0, 1);
pr_info("Memory CAM mapping: ");
for (i = 0; i < tlbcam_index - 1; i++)
@@ -302,7 +302,7 @@ notrace void __init relocate_init(u64 dt_ptr, phys_addr_t start)
else
map_mem_in_cams_addr(start, PAGE_OFFSET + offset,
0x4000000, CONFIG_LOWMEM_CAM_NUM);
- restore_to_as0(n, offset, __va(dt_ptr));
+ restore_to_as0(n, offset, __va(dt_ptr), 1);
/* We should never reach here */
panic("Relocation error");
}
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 91da910210cb..9615d82919b8 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -149,7 +149,7 @@ extern void MMU_init_hw(void);
extern unsigned long mmu_mapin_ram(unsigned long top);
extern void adjust_total_lowmem(void);
extern int switch_to_as1(void);
-extern void restore_to_as0(int esel, int offset, void *dt_ptr);
+extern void restore_to_as0(int esel, int offset, void *dt_ptr, int bootcpu);
#endif
extern void loadcam_entry(unsigned int index);
diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S
index 626ad081639f..43ff3c797fbf 100644
--- a/arch/powerpc/mm/tlb_nohash_low.S
+++ b/arch/powerpc/mm/tlb_nohash_low.S
@@ -402,7 +402,9 @@ _GLOBAL(set_context)
* Load TLBCAM[index] entry in to the L2 CAM MMU
*/
_GLOBAL(loadcam_entry)
- LOAD_REG_ADDR(r4, TLBCAM)
+ mflr r5
+ LOAD_REG_ADDR_PIC(r4, TLBCAM)
+ mtlr r5
mulli r5,r3,TLBCAM_SIZE
add r3,r5,r4
lwz r4,TLBCAM_MAS0(r3)