summaryrefslogtreecommitdiff
path: root/arch/powerpc/platforms/pseries/lpar.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/platforms/pseries/lpar.c')
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c50
1 files changed, 29 insertions, 21 deletions
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index b02af9ef3ff6..34e64237fff9 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -26,6 +26,7 @@
#include <linux/dma-mapping.h>
#include <linux/console.h>
#include <linux/export.h>
+#include <linux/static_key.h>
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/page.h>
@@ -430,16 +431,17 @@ static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot,
spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags);
}
-static void pSeries_lpar_hugepage_invalidate(struct mm_struct *mm,
- unsigned char *hpte_slot_array,
- unsigned long addr, int psize)
+static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
+ unsigned long addr,
+ unsigned char *hpte_slot_array,
+ int psize, int ssize)
{
- int ssize = 0, i, index = 0;
+ int i, index = 0;
unsigned long s_addr = addr;
unsigned int max_hpte_count, valid;
unsigned long vpn_array[PPC64_HUGE_HPTE_BATCH];
unsigned long slot_array[PPC64_HUGE_HPTE_BATCH];
- unsigned long shift, hidx, vpn = 0, vsid, hash, slot;
+ unsigned long shift, hidx, vpn = 0, hash, slot;
shift = mmu_psize_defs[psize].shift;
max_hpte_count = 1U << (PMD_SHIFT - shift);
@@ -452,15 +454,6 @@ static void pSeries_lpar_hugepage_invalidate(struct mm_struct *mm,
/* get the vpn */
addr = s_addr + (i * (1ul << shift));
- if (!is_kernel_addr(addr)) {
- ssize = user_segment_size(addr);
- vsid = get_vsid(mm->context.id, addr, ssize);
- WARN_ON(vsid == 0);
- } else {
- vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
- ssize = mmu_kernel_ssize;
- }
-
vpn = hpt_vpn(addr, vsid, ssize);
hash = hpt_hash(vpn, shift, ssize);
if (hidx & _PTEIDX_SECONDARY)
@@ -649,6 +642,19 @@ EXPORT_SYMBOL(arch_free_page);
#endif
#ifdef CONFIG_TRACEPOINTS
+#ifdef CONFIG_JUMP_LABEL
+struct static_key hcall_tracepoint_key = STATIC_KEY_INIT;
+
+void hcall_tracepoint_regfunc(void)
+{
+ static_key_slow_inc(&hcall_tracepoint_key);
+}
+
+void hcall_tracepoint_unregfunc(void)
+{
+ static_key_slow_dec(&hcall_tracepoint_key);
+}
+#else
/*
* We optimise our hcall path by placing hcall_tracepoint_refcount
* directly in the TOC so we can check if the hcall tracepoints are
@@ -658,13 +664,6 @@ EXPORT_SYMBOL(arch_free_page);
/* NB: reg/unreg are called while guarded with the tracepoints_mutex */
extern long hcall_tracepoint_refcount;
-/*
- * Since the tracing code might execute hcalls we need to guard against
- * recursion. One example of this are spinlocks calling H_YIELD on
- * shared processor partitions.
- */
-static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);
-
void hcall_tracepoint_regfunc(void)
{
hcall_tracepoint_refcount++;
@@ -674,6 +673,15 @@ void hcall_tracepoint_unregfunc(void)
{
hcall_tracepoint_refcount--;
}
+#endif
+
+/*
+ * Since the tracing code might execute hcalls we need to guard against
+ * recursion. One example of this are spinlocks calling H_YIELD on
+ * shared processor partitions.
+ */
+static DEFINE_PER_CPU(unsigned int, hcall_trace_depth);
+
void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
{