summaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
authorScott Wood <scottwood@freescale.com>2015-10-07 06:48:12 +0300
committerScott Wood <scottwood@freescale.com>2015-10-28 02:13:25 +0300
commitf34b3e19fd060b87e97c89050f2b40b8ada468a9 (patch)
treea4aad95d90cda53bd75398feef7e863284e14091 /arch/powerpc
parent939fbf00805b395743e2406755ca20f5f959a598 (diff)
downloadlinux-f34b3e19fd060b87e97c89050f2b40b8ada468a9.tar.xz
powerpc/e6500: kexec: Handle hardware threads
The new kernel will be expecting secondary threads to be disabled, not spinning. Signed-off-by: Scott Wood <scottwood@freescale.com>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kernel/head_64.S16
-rw-r--r--arch/powerpc/platforms/85xx/smp.c46
2 files changed, 62 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index d48125d0c048..8b2bf0d3b7d1 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -182,6 +182,8 @@ exception_marker:
#ifdef CONFIG_PPC_BOOK3E
_GLOBAL(fsl_secondary_thread_init)
+ mfspr r4,SPRN_BUCSR
+
/* Enable branch prediction */
lis r3,BUCSR_INIT@h
ori r3,r3,BUCSR_INIT@l
@@ -196,10 +198,24 @@ _GLOBAL(fsl_secondary_thread_init)
* number. There are two threads per core, so shift everything
* but the low bit right by two bits so that the cpu numbering is
* continuous.
+ *
+ * If the old value of BUCSR is non-zero, this thread has run
+ * before. Thus, we assume we are coming from kexec or a similar
+ * scenario, and PIR is already set to the correct value. This
+ * is a bit of a hack, but there are limited opportunities for
+ * getting information into the thread and the alternatives
+ * seemed like they'd be overkill. We can't tell just by looking
+ * at the old PIR value which state it's in, since the same value
+ * could be valid for one thread out of reset and for a different
+ * thread in Linux.
*/
+
mfspr r3, SPRN_PIR
+ cmpwi r4,0
+ bne 1f
rlwimi r3, r3, 30, 2, 30
mtspr SPRN_PIR, r3
+1:
#endif
_GLOBAL(generic_secondary_thread_init)
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index 2e4668435307..712764f3dcbc 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -374,9 +374,55 @@ static void mpc85xx_smp_kexec_down(void *arg)
#else
void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary)
{
+ int cpu = smp_processor_id();
+ int sibling = cpu_last_thread_sibling(cpu);
+ bool notified = false;
+ int disable_cpu;
+ int disable_threadbit = 0;
+ long start = mftb();
+ long now;
+
local_irq_disable();
hard_irq_disable();
mpic_teardown_this_cpu(secondary);
+
+ if (cpu == crashing_cpu && cpu_thread_in_core(cpu) != 0) {
+ /*
+ * We enter the crash kernel on whatever cpu crashed,
+ * even if it's a secondary thread. If that's the case,
+ * disable the corresponding primary thread.
+ */
+ disable_threadbit = 1;
+ disable_cpu = cpu_first_thread_sibling(cpu);
+ } else if (sibling != crashing_cpu &&
+ cpu_thread_in_core(cpu) == 0 &&
+ cpu_thread_in_core(sibling) != 0) {
+ disable_threadbit = 2;
+ disable_cpu = sibling;
+ }
+
+ if (disable_threadbit) {
+ while (paca[disable_cpu].kexec_state < KEXEC_STATE_REAL_MODE) {
+ barrier();
+ now = mftb();
+ if (!notified && now - start > 1000000) {
+ pr_info("%s/%d: waiting for cpu %d to enter KEXEC_STATE_REAL_MODE (%d)\n",
+ __func__, smp_processor_id(),
+ disable_cpu,
+ paca[disable_cpu].kexec_state);
+ notified = true;
+ }
+ }
+
+ if (notified) {
+ pr_info("%s: cpu %d done waiting\n",
+ __func__, disable_cpu);
+ }
+
+ mtspr(SPRN_TENC, disable_threadbit);
+ while (mfspr(SPRN_TENSR) & disable_threadbit)
+ cpu_relax();
+ }
}
#endif