summaryrefslogtreecommitdiff
path: root/arch/x86
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2014-04-05 22:05:02 +0400
committerOleg Nesterov <oleg@redhat.com>2014-04-17 23:58:22 +0400
commit7ba6db2d688bdf83049a18c8e55b2d1e58e8b0bc (patch)
treec2be845e50bb6057b8ea0716a18bca4120964060 /arch/x86
parent8faaed1b9f500d6cf32702716733a645c9b0727a (diff)
downloadlinux-7ba6db2d688bdf83049a18c8e55b2d1e58e8b0bc.tar.xz
uprobes/x86: Emulate unconditional relative jmp's
Currently we always execute all insns out-of-line, including relative jmp's and call's. This assumes that even if regs->ip points to nowhere after the single-step, default_post_xol_op(UPROBE_FIX_IP) logic will update it correctly. However, this doesn't work if this regs->ip == xol_vaddr + insn_offset is not canonical. In this case CPU generates #GP and general_protection() kills the task which tries to execute this insn out-of-line. Now that we have uprobe_xol_ops we can teach uprobes to emulate these insns and solve the problem. This patch adds branch_xol_ops which has a single branch_emulate_op() hook, so far it can only handle rel8/32 relative jmp's. TODO: move ->fixup into the union along with rip_rela_target_address. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Reported-by: Jonathan Lebon <jlebon@redhat.com> Reviewed-by: Jim Keniston <jkenisto@us.ibm.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/uprobes.h8
-rw-r--r--arch/x86/kernel/uprobes.c38
2 files changed, 45 insertions, 1 deletions
diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h
index 9f8210bcbb49..e9fd4d5537ed 100644
--- a/arch/x86/include/asm/uprobes.h
+++ b/arch/x86/include/asm/uprobes.h
@@ -44,9 +44,15 @@ struct arch_uprobe {
u16 fixups;
const struct uprobe_xol_ops *ops;
+ union {
#ifdef CONFIG_X86_64
- unsigned long rip_rela_target_address;
+ unsigned long rip_rela_target_address;
#endif
+ struct {
+ s32 offs;
+ u8 ilen;
+ } branch;
+ };
};
struct arch_uprobe_task {
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index aecc22054384..c3baeaacf1b6 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -461,6 +461,40 @@ static struct uprobe_xol_ops default_xol_ops = {
.post_xol = default_post_xol_op,
};
+static bool branch_emulate_op(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ regs->ip += auprobe->branch.ilen + auprobe->branch.offs;
+ return true;
+}
+
+static struct uprobe_xol_ops branch_xol_ops = {
+ .emulate = branch_emulate_op,
+};
+
+/* Returns -ENOSYS if branch_xol_ops doesn't handle this insn */
+static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn)
+{
+
+ switch (OPCODE1(insn)) {
+ case 0xeb: /* jmp 8 */
+ case 0xe9: /* jmp 32 */
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ /* has the side-effect of processing the entire instruction */
+ insn_get_length(insn);
+ if (WARN_ON_ONCE(!insn_complete(insn)))
+ return -ENOEXEC;
+
+ auprobe->branch.ilen = insn->length;
+ auprobe->branch.offs = insn->immediate.value;
+
+ auprobe->ops = &branch_xol_ops;
+ return 0;
+}
+
/**
* arch_uprobe_analyze_insn - instruction analysis including validity and fixups.
* @mm: the probed address space.
@@ -478,6 +512,10 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
if (ret)
return ret;
+ ret = branch_setup_xol_ops(auprobe, &insn);
+ if (ret != -ENOSYS)
+ return ret;
+
/*
* Figure out which fixups arch_uprobe_post_xol() will need to perform,
* and annotate arch_uprobe->fixups accordingly. To start with, ->fixups