summaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/ftrace.h29
-rw-r--r--arch/powerpc/include/asm/paca.h1
-rw-r--r--arch/powerpc/include/asm/powernv.h2
-rw-r--r--arch/powerpc/include/asm/topology.h13
-rw-r--r--arch/powerpc/kernel/eeh_pe.c3
-rw-r--r--arch/powerpc/kernel/idle_book3s.S4
-rw-r--r--arch/powerpc/kernel/mce_power.c7
-rw-r--r--arch/powerpc/kernel/setup_64.c11
-rw-r--r--arch/powerpc/kernel/smp.c49
-rw-r--r--arch/powerpc/kvm/booke.c7
-rw-r--r--arch/powerpc/lib/feature-fixups.c2
-rw-r--r--arch/powerpc/mm/mem.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c2
-rw-r--r--arch/powerpc/platforms/powernv/memtrace.c17
-rw-r--r--arch/powerpc/platforms/powernv/npu-dma.c88
-rw-r--r--arch/powerpc/platforms/powernv/opal-nvram.c14
-rw-r--r--arch/powerpc/platforms/powernv/opal-rtc.c8
-rw-r--r--arch/powerpc/sysdev/xive/native.c4
18 files changed, 191 insertions, 72 deletions
diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h
index 9abddde372ab..b2dabd06659d 100644
--- a/arch/powerpc/include/asm/ftrace.h
+++ b/arch/powerpc/include/asm/ftrace.h
@@ -69,17 +69,30 @@ struct dyn_arch_ftrace {
#endif
#if defined(CONFIG_FTRACE_SYSCALLS) && !defined(__ASSEMBLY__)
-#ifdef PPC64_ELF_ABI_v1
+/*
+ * Some syscall entry functions on powerpc start with "ppc_" (fork and clone,
+ * for instance) or ppc32_/ppc64_. We should also match the sys_ variant with
+ * those.
+ */
#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
+#ifdef PPC64_ELF_ABI_v1
+static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
+{
+ /* We need to skip past the initial dot, and the __se_sys alias */
+ return !strcmp(sym + 1, name) ||
+ (!strncmp(sym, ".__se_sys", 9) && !strcmp(sym + 6, name)) ||
+ (!strncmp(sym, ".ppc_", 5) && !strcmp(sym + 5, name + 4)) ||
+ (!strncmp(sym, ".ppc32_", 7) && !strcmp(sym + 7, name + 4)) ||
+ (!strncmp(sym, ".ppc64_", 7) && !strcmp(sym + 7, name + 4));
+}
+#else
static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
{
- /*
- * Compare the symbol name with the system call name. Skip the .sys or .SyS
- * prefix from the symbol name and the sys prefix from the system call name and
- * just match the rest. This is only needed on ppc64 since symbol names on
- * 32bit do not start with a period so the generic function will work.
- */
- return !strcmp(sym + 4, name + 3);
+ return !strcmp(sym, name) ||
+ (!strncmp(sym, "__se_sys", 8) && !strcmp(sym + 5, name)) ||
+ (!strncmp(sym, "ppc_", 4) && !strcmp(sym + 4, name + 4)) ||
+ (!strncmp(sym, "ppc32_", 6) && !strcmp(sym + 6, name + 4)) ||
+ (!strncmp(sym, "ppc64_", 6) && !strcmp(sym + 6, name + 4));
}
#endif
#endif /* CONFIG_FTRACE_SYSCALLS && !__ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 4185f1c96125..3f109a3e3edb 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -165,7 +165,6 @@ struct paca_struct {
u64 saved_msr; /* MSR saved here by enter_rtas */
u16 trap_save; /* Used when bad stack is encountered */
u8 irq_soft_mask; /* mask for irq soft masking */
- u8 soft_enabled; /* irq soft-enable flag */
u8 irq_happened; /* irq happened while soft-disabled */
u8 io_sync; /* writel() needs spin_unlock sync */
u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */
diff --git a/arch/powerpc/include/asm/powernv.h b/arch/powerpc/include/asm/powernv.h
index d1c2d2e658cf..2f3ff7a27881 100644
--- a/arch/powerpc/include/asm/powernv.h
+++ b/arch/powerpc/include/asm/powernv.h
@@ -15,7 +15,7 @@
extern void powernv_set_nmmu_ptcr(unsigned long ptcr);
extern struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
unsigned long flags,
- struct npu_context *(*cb)(struct npu_context *, void *),
+ void (*cb)(struct npu_context *, void *),
void *priv);
extern void pnv_npu2_destroy_context(struct npu_context *context,
struct pci_dev *gpdev);
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h
index 9f421641a35c..16b077801a5f 100644
--- a/arch/powerpc/include/asm/topology.h
+++ b/arch/powerpc/include/asm/topology.h
@@ -91,6 +91,7 @@ extern int start_topology_update(void);
extern int stop_topology_update(void);
extern int prrn_is_enabled(void);
extern int find_and_online_cpu_nid(int cpu);
+extern int timed_topology_update(int nsecs);
#else
static inline int start_topology_update(void)
{
@@ -108,16 +109,12 @@ static inline int find_and_online_cpu_nid(int cpu)
{
return 0;
}
+static inline int timed_topology_update(int nsecs)
+{
+ return 0;
+}
#endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */
-#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_NEED_MULTIPLE_NODES)
-#if defined(CONFIG_PPC_SPLPAR)
-extern int timed_topology_update(int nsecs);
-#else
-#define timed_topology_update(nsecs)
-#endif /* CONFIG_PPC_SPLPAR */
-#endif /* CONFIG_HOTPLUG_CPU || CONFIG_NEED_MULTIPLE_NODES */
-
#include <asm-generic/topology.h>
#ifdef CONFIG_SMP
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index 2d4956e97aa9..ee5a67d57aab 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -807,7 +807,8 @@ static void eeh_restore_bridge_bars(struct eeh_dev *edev)
eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]);
/* PCI Command: 0x4 */
- eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1]);
+ eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1] |
+ PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
/* Check the PCIe link is ready */
eeh_bridge_check_link(edev);
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index 79d005445c6c..e734f6e45abc 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -553,12 +553,12 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
lbz r0,HSTATE_HWTHREAD_STATE(r13)
cmpwi r0,KVM_HWTHREAD_IN_KERNEL
- beq 1f
+ beq 0f
li r0,KVM_HWTHREAD_IN_KERNEL
stb r0,HSTATE_HWTHREAD_STATE(r13)
/* Order setting hwthread_state vs. testing hwthread_req */
sync
- lbz r0,HSTATE_HWTHREAD_REQ(r13)
+0: lbz r0,HSTATE_HWTHREAD_REQ(r13)
cmpwi r0,0
beq 1f
b kvm_start_guest
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
index fe6fc63251fe..38c5b4764bfe 100644
--- a/arch/powerpc/kernel/mce_power.c
+++ b/arch/powerpc/kernel/mce_power.c
@@ -441,7 +441,6 @@ static int mce_handle_ierror(struct pt_regs *regs,
if (pfn != ULONG_MAX) {
*phys_addr =
(pfn << PAGE_SHIFT);
- handled = 1;
}
}
}
@@ -532,9 +531,7 @@ static int mce_handle_derror(struct pt_regs *regs,
* kernel/exception-64s.h
*/
if (get_paca()->in_mce < MAX_MCE_DEPTH)
- if (!mce_find_instr_ea_and_pfn(regs, addr,
- phys_addr))
- handled = 1;
+ mce_find_instr_ea_and_pfn(regs, addr, phys_addr);
}
found = 1;
}
@@ -572,7 +569,7 @@ static long mce_handle_error(struct pt_regs *regs,
const struct mce_ierror_table itable[])
{
struct mce_error_info mce_err = { 0 };
- uint64_t addr, phys_addr;
+ uint64_t addr, phys_addr = ULONG_MAX;
uint64_t srr1 = regs->msr;
long handled;
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 44c30dd38067..b78f142a4148 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -890,6 +890,17 @@ static void __ref init_fallback_flush(void)
return;
l1d_size = ppc64_caches.l1d.size;
+
+ /*
+ * If there is no d-cache-size property in the device tree, l1d_size
+ * could be zero. That leads to the loop in the asm wrapping around to
+ * 2^64-1, and then walking off the end of the fallback area and
+ * eventually causing a page fault which is fatal. Just default to
+ * something vaguely sane.
+ */
+ if (!l1d_size)
+ l1d_size = (64 * 1024);
+
limit = min(ppc64_bolted_size(), ppc64_rma_size);
/*
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index e16ec7b3b427..9ca7148b5881 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -566,10 +566,35 @@ void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *))
#endif
#ifdef CONFIG_NMI_IPI
-static void stop_this_cpu(struct pt_regs *regs)
-#else
+static void nmi_stop_this_cpu(struct pt_regs *regs)
+{
+ /*
+ * This is a special case because it never returns, so the NMI IPI
+ * handling would never mark it as done, which makes any later
+ * smp_send_nmi_ipi() call spin forever. Mark it done now.
+ *
+ * IRQs are already hard disabled by the smp_handle_nmi_ipi.
+ */
+ nmi_ipi_lock();
+ nmi_ipi_busy_count--;
+ nmi_ipi_unlock();
+
+ /* Remove this CPU */
+ set_cpu_online(smp_processor_id(), false);
+
+ spin_begin();
+ while (1)
+ spin_cpu_relax();
+}
+
+void smp_send_stop(void)
+{
+ smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000);
+}
+
+#else /* CONFIG_NMI_IPI */
+
static void stop_this_cpu(void *dummy)
-#endif
{
/* Remove this CPU */
set_cpu_online(smp_processor_id(), false);
@@ -582,12 +607,22 @@ static void stop_this_cpu(void *dummy)
void smp_send_stop(void)
{
-#ifdef CONFIG_NMI_IPI
- smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, stop_this_cpu, 1000000);
-#else
+ static bool stopped = false;
+
+ /*
+ * Prevent waiting on csd lock from a previous smp_send_stop.
+ * This is racy, but in general callers try to do the right
+ * thing and only fire off one smp_send_stop (e.g., see
+ * kernel/panic.c)
+ */
+ if (stopped)
+ return;
+
+ stopped = true;
+
smp_call_function(stop_this_cpu, NULL, 0);
-#endif
}
+#endif /* CONFIG_NMI_IPI */
struct thread_info *current_set[NR_CPUS];
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 6038e2e7aee0..876d4f294fdd 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -305,6 +305,13 @@ void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu)
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
}
+#ifdef CONFIG_ALTIVEC
+void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu)
+{
+ kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
+}
+#endif
+
void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
{
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
diff --git a/arch/powerpc/lib/feature-fixups.c b/arch/powerpc/lib/feature-fixups.c
index 35f80ab7cbd8..288fe4f0db4e 100644
--- a/arch/powerpc/lib/feature-fixups.c
+++ b/arch/powerpc/lib/feature-fixups.c
@@ -55,7 +55,7 @@ static int patch_alt_instruction(unsigned int *src, unsigned int *dest,
unsigned int *target = (unsigned int *)branch_target(src);
/* Branch within the section doesn't need translating */
- if (target < alt_start || target >= alt_end) {
+ if (target < alt_start || target > alt_end) {
instr = translate_branch(dest, src);
if (!instr)
return 1;
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 737f8a4632cc..c3c39b02b2ba 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -133,6 +133,7 @@ int __meminit arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *
start, start + size, rc);
return -EFAULT;
}
+ flush_inval_dcache_range(start, start + size);
return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
}
@@ -159,6 +160,7 @@ int __meminit arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap
/* Remove htab bolted mappings for this section of memory */
start = (unsigned long)__va(start);
+ flush_inval_dcache_range(start, start + size);
ret = remove_section_mapping(start, start + size);
/* Ensure all vmalloc mappings are flushed in case they also
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 9033c8194eda..ccc421503363 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -1093,7 +1093,7 @@ static int show_spu_loadavg(struct seq_file *s, void *private)
LOAD_INT(c), LOAD_FRAC(c),
count_active_contexts(),
atomic_read(&nr_spu_contexts),
- idr_get_cursor(&task_active_pid_ns(current)->idr));
+ idr_get_cursor(&task_active_pid_ns(current)->idr) - 1);
return 0;
}
diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c
index de470caf0784..fc222a0c2ac4 100644
--- a/arch/powerpc/platforms/powernv/memtrace.c
+++ b/arch/powerpc/platforms/powernv/memtrace.c
@@ -82,19 +82,6 @@ static const struct file_operations memtrace_fops = {
.open = simple_open,
};
-static void flush_memory_region(u64 base, u64 size)
-{
- unsigned long line_size = ppc64_caches.l1d.size;
- u64 end = base + size;
- u64 addr;
-
- base = round_down(base, line_size);
- end = round_up(end, line_size);
-
- for (addr = base; addr < end; addr += line_size)
- asm volatile("dcbf 0,%0" : "=r" (addr) :: "memory");
-}
-
static int check_memblock_online(struct memory_block *mem, void *arg)
{
if (mem->state != MEM_ONLINE)
@@ -132,10 +119,6 @@ static bool memtrace_offline_pages(u32 nid, u64 start_pfn, u64 nr_pages)
walk_memory_range(start_pfn, end_pfn, (void *)MEM_OFFLINE,
change_memblock_state);
- /* RCU grace period? */
- flush_memory_region((u64)__va(start_pfn << PAGE_SHIFT),
- nr_pages << PAGE_SHIFT);
-
lock_device_hotplug();
remove_memory(nid, start_pfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT);
unlock_device_hotplug();
diff --git a/arch/powerpc/platforms/powernv/npu-dma.c b/arch/powerpc/platforms/powernv/npu-dma.c
index 69a4f9e8bd55..525e966dce34 100644
--- a/arch/powerpc/platforms/powernv/npu-dma.c
+++ b/arch/powerpc/platforms/powernv/npu-dma.c
@@ -34,6 +34,19 @@
#define npu_to_phb(x) container_of(x, struct pnv_phb, npu)
/*
+ * spinlock to protect initialisation of an npu_context for a particular
+ * mm_struct.
+ */
+static DEFINE_SPINLOCK(npu_context_lock);
+
+/*
+ * When an address shootdown range exceeds this threshold we invalidate the
+ * entire TLB on the GPU for the given PID rather than each specific address in
+ * the range.
+ */
+#define ATSD_THRESHOLD (2*1024*1024)
+
+/*
* Other types of TCE cache invalidation are not functional in the
* hardware.
*/
@@ -401,7 +414,7 @@ struct npu_context {
bool nmmu_flush;
/* Callback to stop translation requests on a given GPU */
- struct npu_context *(*release_cb)(struct npu_context *, void *);
+ void (*release_cb)(struct npu_context *context, void *priv);
/*
* Private pointer passed to the above callback for usage by
@@ -671,11 +684,19 @@ static void pnv_npu2_mn_invalidate_range(struct mmu_notifier *mn,
struct npu_context *npu_context = mn_to_npu_context(mn);
unsigned long address;
- for (address = start; address < end; address += PAGE_SIZE)
- mmio_invalidate(npu_context, 1, address, false);
+ if (end - start > ATSD_THRESHOLD) {
+ /*
+ * Just invalidate the entire PID if the address range is too
+ * large.
+ */
+ mmio_invalidate(npu_context, 0, 0, true);
+ } else {
+ for (address = start; address < end; address += PAGE_SIZE)
+ mmio_invalidate(npu_context, 1, address, false);
- /* Do the flush only on the final addess == end */
- mmio_invalidate(npu_context, 1, address, true);
+ /* Do the flush only on the final addess == end */
+ mmio_invalidate(npu_context, 1, address, true);
+ }
}
static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
@@ -696,11 +717,12 @@ static const struct mmu_notifier_ops nv_nmmu_notifier_ops = {
* Returns an error if there no contexts are currently available or a
* npu_context which should be passed to pnv_npu2_handle_fault().
*
- * mmap_sem must be held in write mode.
+ * mmap_sem must be held in write mode and must not be called from interrupt
+ * context.
*/
struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
unsigned long flags,
- struct npu_context *(*cb)(struct npu_context *, void *),
+ void (*cb)(struct npu_context *, void *),
void *priv)
{
int rc;
@@ -743,7 +765,9 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
/*
* Setup the NPU context table for a particular GPU. These need to be
* per-GPU as we need the tables to filter ATSDs when there are no
- * active contexts on a particular GPU.
+ * active contexts on a particular GPU. It is safe for these to be
+ * called concurrently with destroy as the OPAL call takes appropriate
+ * locks and refcounts on init/destroy.
*/
rc = opal_npu_init_context(nphb->opal_id, mm->context.id, flags,
PCI_DEVID(gpdev->bus->number, gpdev->devfn));
@@ -754,8 +778,29 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
* We store the npu pci device so we can more easily get at the
* associated npus.
*/
+ spin_lock(&npu_context_lock);
npu_context = mm->context.npu_context;
+ if (npu_context) {
+ if (npu_context->release_cb != cb ||
+ npu_context->priv != priv) {
+ spin_unlock(&npu_context_lock);
+ opal_npu_destroy_context(nphb->opal_id, mm->context.id,
+ PCI_DEVID(gpdev->bus->number,
+ gpdev->devfn));
+ return ERR_PTR(-EINVAL);
+ }
+
+ WARN_ON(!kref_get_unless_zero(&npu_context->kref));
+ }
+ spin_unlock(&npu_context_lock);
+
if (!npu_context) {
+ /*
+ * We can set up these fields without holding the
+ * npu_context_lock as the npu_context hasn't been returned to
+ * the caller meaning it can't be destroyed. Parallel allocation
+ * is protected against by mmap_sem.
+ */
rc = -ENOMEM;
npu_context = kzalloc(sizeof(struct npu_context), GFP_KERNEL);
if (npu_context) {
@@ -774,8 +819,6 @@ struct npu_context *pnv_npu2_init_context(struct pci_dev *gpdev,
}
mm->context.npu_context = npu_context;
- } else {
- WARN_ON(!kref_get_unless_zero(&npu_context->kref));
}
npu_context->release_cb = cb;
@@ -814,15 +857,16 @@ static void pnv_npu2_release_context(struct kref *kref)
mm_context_remove_copro(npu_context->mm);
npu_context->mm->context.npu_context = NULL;
- mmu_notifier_unregister(&npu_context->mn,
- npu_context->mm);
-
- kfree(npu_context);
}
+/*
+ * Destroy a context on the given GPU. May free the npu_context if it is no
+ * longer active on any GPUs. Must not be called from interrupt context.
+ */
void pnv_npu2_destroy_context(struct npu_context *npu_context,
struct pci_dev *gpdev)
{
+ int removed;
struct pnv_phb *nphb;
struct npu *npu;
struct pci_dev *npdev = pnv_pci_get_npu_dev(gpdev, 0);
@@ -844,7 +888,21 @@ void pnv_npu2_destroy_context(struct npu_context *npu_context,
WRITE_ONCE(npu_context->npdev[npu->index][nvlink_index], NULL);
opal_npu_destroy_context(nphb->opal_id, npu_context->mm->context.id,
PCI_DEVID(gpdev->bus->number, gpdev->devfn));
- kref_put(&npu_context->kref, pnv_npu2_release_context);
+ spin_lock(&npu_context_lock);
+ removed = kref_put(&npu_context->kref, pnv_npu2_release_context);
+ spin_unlock(&npu_context_lock);
+
+ /*
+ * We need to do this outside of pnv_npu2_release_context so that it is
+ * outside the spinlock as mmu_notifier_destroy uses SRCU.
+ */
+ if (removed) {
+ mmu_notifier_unregister(&npu_context->mn,
+ npu_context->mm);
+
+ kfree(npu_context);
+ }
+
}
EXPORT_SYMBOL(pnv_npu2_destroy_context);
diff --git a/arch/powerpc/platforms/powernv/opal-nvram.c b/arch/powerpc/platforms/powernv/opal-nvram.c
index 1bceb95f422d..5584247f5029 100644
--- a/arch/powerpc/platforms/powernv/opal-nvram.c
+++ b/arch/powerpc/platforms/powernv/opal-nvram.c
@@ -44,6 +44,10 @@ static ssize_t opal_nvram_read(char *buf, size_t count, loff_t *index)
return count;
}
+/*
+ * This can be called in the panic path with interrupts off, so use
+ * mdelay in that case.
+ */
static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index)
{
s64 rc = OPAL_BUSY;
@@ -58,10 +62,16 @@ static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index)
while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
rc = opal_write_nvram(__pa(buf), count, off);
if (rc == OPAL_BUSY_EVENT) {
- msleep(OPAL_BUSY_DELAY_MS);
+ if (in_interrupt() || irqs_disabled())
+ mdelay(OPAL_BUSY_DELAY_MS);
+ else
+ msleep(OPAL_BUSY_DELAY_MS);
opal_poll_events(NULL);
} else if (rc == OPAL_BUSY) {
- msleep(OPAL_BUSY_DELAY_MS);
+ if (in_interrupt() || irqs_disabled())
+ mdelay(OPAL_BUSY_DELAY_MS);
+ else
+ msleep(OPAL_BUSY_DELAY_MS);
}
}
diff --git a/arch/powerpc/platforms/powernv/opal-rtc.c b/arch/powerpc/platforms/powernv/opal-rtc.c
index f8868864f373..aa2a5139462e 100644
--- a/arch/powerpc/platforms/powernv/opal-rtc.c
+++ b/arch/powerpc/platforms/powernv/opal-rtc.c
@@ -48,10 +48,12 @@ unsigned long __init opal_get_boot_time(void)
while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
- if (rc == OPAL_BUSY_EVENT)
+ if (rc == OPAL_BUSY_EVENT) {
+ mdelay(OPAL_BUSY_DELAY_MS);
opal_poll_events(NULL);
- else if (rc == OPAL_BUSY)
- mdelay(10);
+ } else if (rc == OPAL_BUSY) {
+ mdelay(OPAL_BUSY_DELAY_MS);
+ }
}
if (rc != OPAL_SUCCESS)
return 0;
diff --git a/arch/powerpc/sysdev/xive/native.c b/arch/powerpc/sysdev/xive/native.c
index d22aeb0b69e1..b48454be5b98 100644
--- a/arch/powerpc/sysdev/xive/native.c
+++ b/arch/powerpc/sysdev/xive/native.c
@@ -389,6 +389,10 @@ static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
if (xive_pool_vps == XIVE_INVALID_VP)
return;
+ /* Check if pool VP already active, if it is, pull it */
+ if (in_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2) & TM_QW2W2_VP)
+ in_be64(xive_tima + TM_SPC_PULL_POOL_CTX);
+
/* Enable the pool VP */
vp = xive_pool_vps + cpu;
pr_debug("CPU %d setting up pool VP 0x%x\n", cpu, vp);