summaryrefslogtreecommitdiff
path: root/arch/x86_64
diff options
context:
space:
mode:
authorPrasanna S Panchamukhi <prasanna@in.ibm.com>2006-04-19 09:22:00 +0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-04-19 20:13:53 +0400
commit3b60211c1618063cb296439ebaef2041a725ba20 (patch)
tree5de7fd33aeac8397df4e28aea50eb2bb34829de9 /arch/x86_64
parent34c37e18696ff6a773f0403348342a9fe49df4af (diff)
downloadlinux-3b60211c1618063cb296439ebaef2041a725ba20.tar.xz
[PATCH] Switch Kprobes inline functions to __kprobes for x86_64
Andrew Morton pointed out that compiler might not inline the functions marked for inline in kprobes. There-by allowing the insertion of probes on these kprobes routines, which might cause recursion. This patch removes all such inline and adds them to kprobes section there by disallowing probes on all such routines. Some of the routines can even still be inlined, since these routines gets executed after the kprobes had done necessay setup for reentrancy. Signed-off-by: Prasanna S Panchamukhi <prasanna@in.ibm.com> Cc: Andi Kleen <ak@muc.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/x86_64')
-rw-r--r--arch/x86_64/kernel/kprobes.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c
index accbff3fec49..1eaa5dae6174 100644
--- a/arch/x86_64/kernel/kprobes.c
+++ b/arch/x86_64/kernel/kprobes.c
@@ -53,7 +53,7 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
/*
* returns non-zero if opcode modifies the interrupt flag.
*/
-static inline int is_IF_modifier(kprobe_opcode_t *insn)
+static __always_inline int is_IF_modifier(kprobe_opcode_t *insn)
{
switch (*insn) {
case 0xfa: /* cli */
@@ -84,7 +84,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
* If it does, return the address of the 32-bit displacement word.
* If not, return null.
*/
-static inline s32 *is_riprel(u8 *insn)
+static s32 __kprobes *is_riprel(u8 *insn)
{
#define W(row,b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,ba,bb,bc,bd,be,bf) \
(((b0##UL << 0x0)|(b1##UL << 0x1)|(b2##UL << 0x2)|(b3##UL << 0x3) | \
@@ -229,7 +229,7 @@ void __kprobes arch_remove_kprobe(struct kprobe *p)
mutex_unlock(&kprobe_mutex);
}
-static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
+static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
{
kcb->prev_kprobe.kp = kprobe_running();
kcb->prev_kprobe.status = kcb->kprobe_status;
@@ -237,7 +237,7 @@ static inline void save_previous_kprobe(struct kprobe_ctlblk *kcb)
kcb->prev_kprobe.saved_rflags = kcb->kprobe_saved_rflags;
}
-static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
{
__get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp;
kcb->kprobe_status = kcb->prev_kprobe.status;
@@ -245,7 +245,7 @@ static inline void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
kcb->kprobe_saved_rflags = kcb->prev_kprobe.saved_rflags;
}
-static inline void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
+static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb)
{
__get_cpu_var(current_kprobe) = p;