summaryrefslogtreecommitdiff
path: root/arch/powerpc/lib/code-patching.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-02-22 21:30:38 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-22 21:30:38 +0300
commit38705613b74ab090eee55c327cd0cb77fb10eb26 (patch)
treeb219755a7eaaab097fbda4041cf2ba21df44fed5 /arch/powerpc/lib/code-patching.c
parentff47d8c05019d6e7753cef270d6399cb5a33be57 (diff)
parent438e69b52be776c035aa2a851ccc1709033d729b (diff)
downloadlinux-38705613b74ab090eee55c327cd0cb77fb10eb26.tar.xz
Merge tag 'powerpc-4.11-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc updates from Michael Ellerman: "Highlights include: - Support for direct mapped LPC on POWER9, giving Linux direct access to devices that may be on there such as a UART. - Memory hotplug support for the Power9 Radix MMU. - Add new AUX vectors describing the processor's cache geometry, to be used by glibc. - The ability for a guest to ask the hypervisor to resize the guest's hash table, and in addition support for doing so automatically when memory is hotplugged into/out-of the guest. This allows the hash table to be sized based on the current memory usage of the guest, rather than the maximum possible memory usage. - Implementation of optprobes (kprobe optimisation) for powerpc. In addition there's the topic branch shared with the KVM tree, which includes support for guests to use the Radix MMU on Power9. Thanks to: Alistair Popple, Andrew Donnellan, Aneesh Kumar K.V, Anju T, Anton Blanchard, Benjamin Herrenschmidt, Chris Packham, Daniel Axtens, Daniel Borkmann, David Gibson, Finn Thain, Gautham R. Shenoy, Gavin Shan, Greg Kurz, Joel Stanley, John Allen, Madhavan Srinivasan, Mahesh Salgaonkar, Markus Elfring, Michael Neuling, Nathan Fontenot, Naveen N. Rao, Nicholas Piggin, Paul Mackerras, Ravi Bangoria, Reza Arbab, Shailendra Singh, Vaibhav Jain, Wei Yongjun" * tag 'powerpc-4.11-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (129 commits) powerpc/mm/radix: Skip ptesync in pte update helpers powerpc/mm/radix: Use ptep_get_and_clear_full when clearing pte for full mm powerpc/mm/radix: Update pte update sequence for pte clear case powerpc/mm: Update PROTFAULT handling in the page fault path powerpc/xmon: Fix data-breakpoint powerpc/mm: Fix build break with BOOK3S_64=n and MEMORY_HOTPLUG=y powerpc/mm: Fix build break when CMA=n && SPAPR_TCE_IOMMU=y powerpc/mm: Fix build break with RADIX=y & HUGETLBFS=n powerpc/pseries: Fix typo in parameter description powerpc/kprobes: Remove kprobe_exceptions_notify() kprobes: Introduce weak variant of kprobe_exceptions_notify() powerpc/ftrace: Fix confusing help text for DISABLE_MPROFILE_KERNEL powerpc/powernv: Fix opal_exit tracepoint opcode powerpc: Add a prototype for mcount() so it can be versioned powerpc: Drop GPL from of_node_to_nid() export to match other arches powerpc/kprobes: Optimize kprobe in kretprobe_trampoline() powerpc/kprobes: Implement Optprobes powerpc/kprobes: Fixes for kprobe_lookup_name() on BE powerpc: Add helper to check if offset is within relative branch range powerpc/bpf: Introduce __PPC_SH64() ...
Diffstat (limited to 'arch/powerpc/lib/code-patching.c')
-rw-r--r--arch/powerpc/lib/code-patching.c45
1 files changed, 44 insertions, 1 deletions
diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c
index c1746df0f88e..0899315e1434 100644
--- a/arch/powerpc/lib/code-patching.c
+++ b/arch/powerpc/lib/code-patching.c
@@ -32,6 +32,49 @@ int patch_branch(unsigned int *addr, unsigned long target, int flags)
return patch_instruction(addr, create_branch(addr, target, flags));
}
+bool is_offset_in_branch_range(long offset)
+{
+ /*
+ * Powerpc branch instruction is :
+ *
+ * 0 6 30 31
+ * +---------+----------------+---+---+
+ * | opcode | LI |AA |LK |
+ * +---------+----------------+---+---+
+ * Where AA = 0 and LK = 0
+ *
+ * LI is a signed 24 bits integer. The real branch offset is computed
+ * by: imm32 = SignExtend(LI:'0b00', 32);
+ *
+ * So the maximum forward branch should be:
+ * (0x007fffff << 2) = 0x01fffffc = 0x1fffffc
+ * The maximum backward branch should be:
+ * (0xff800000 << 2) = 0xfe000000 = -0x2000000
+ */
+ return (offset >= -0x2000000 && offset <= 0x1fffffc && !(offset & 0x3));
+}
+
+/*
+ * Helper to check if a given instruction is a conditional branch
+ * Derived from the conditional checks in analyse_instr()
+ */
+bool __kprobes is_conditional_branch(unsigned int instr)
+{
+ unsigned int opcode = instr >> 26;
+
+ if (opcode == 16) /* bc, bca, bcl, bcla */
+ return true;
+ if (opcode == 19) {
+ switch ((instr >> 1) & 0x3ff) {
+ case 16: /* bclr, bclrl */
+ case 528: /* bcctr, bcctrl */
+ case 560: /* bctar, bctarl */
+ return true;
+ }
+ }
+ return false;
+}
+
unsigned int create_branch(const unsigned int *addr,
unsigned long target, int flags)
{
@@ -43,7 +86,7 @@ unsigned int create_branch(const unsigned int *addr,
offset = offset - (unsigned long)addr;
/* Check we can represent the target in the instruction format */
- if (offset < -0x2000000 || offset > 0x1fffffc || offset & 0x3)
+ if (!is_offset_in_branch_range(offset))
return 0;
/* Mask out the flags and target, so they don't step on each other. */