From f34b3e19fd060b87e97c89050f2b40b8ada468a9 Mon Sep 17 00:00:00 2001 From: Scott Wood Date: Tue, 6 Oct 2015 22:48:12 -0500 Subject: powerpc/e6500: kexec: Handle hardware threads The new kernel will be expecting secondary threads to be disabled, not spinning. Signed-off-by: Scott Wood --- arch/powerpc/kernel/head_64.S | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'arch/powerpc/kernel/head_64.S') diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index d48125d0c048..8b2bf0d3b7d1 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -182,6 +182,8 @@ exception_marker: #ifdef CONFIG_PPC_BOOK3E _GLOBAL(fsl_secondary_thread_init) + mfspr r4,SPRN_BUCSR + /* Enable branch prediction */ lis r3,BUCSR_INIT@h ori r3,r3,BUCSR_INIT@l @@ -196,10 +198,24 @@ _GLOBAL(fsl_secondary_thread_init) * number. There are two threads per core, so shift everything * but the low bit right by two bits so that the cpu numbering is * continuous. + * + * If the old value of BUCSR is non-zero, this thread has run + * before. Thus, we assume we are coming from kexec or a similar + * scenario, and PIR is already set to the correct value. This + * is a bit of a hack, but there are limited opportunities for + * getting information into the thread and the alternatives + * seemed like they'd be overkill. We can't tell just by looking + * at the old PIR value which state it's in, since the same value + * could be valid for one thread out of reset and for a different + * thread in Linux. */ + mfspr r3, SPRN_PIR + cmpwi r4,0 + bne 1f rlwimi r3, r3, 30, 2, 30 mtspr SPRN_PIR, r3 +1: #endif _GLOBAL(generic_secondary_thread_init) -- cgit v1.2.3