1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _ASM_POWERPC_FTRACE
#define _ASM_POWERPC_FTRACE
#include <asm/types.h>
#ifdef CONFIG_FUNCTION_TRACER
#define MCOUNT_ADDR ((unsigned long)(_mcount))
#define MCOUNT_INSN_SIZE 4 /* sizeof mcount call */
#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
#ifdef __ASSEMBLY__
/* Based off of objdump optput from glibc */
#define MCOUNT_SAVE_FRAME \
stwu r1,-48(r1); \
stw r3, 12(r1); \
stw r4, 16(r1); \
stw r5, 20(r1); \
stw r6, 24(r1); \
mflr r3; \
lwz r4, 52(r1); \
mfcr r5; \
stw r7, 28(r1); \
stw r8, 32(r1); \
stw r9, 36(r1); \
stw r10,40(r1); \
stw r3, 44(r1); \
stw r5, 8(r1)
#define MCOUNT_RESTORE_FRAME \
lwz r6, 8(r1); \
lwz r0, 44(r1); \
lwz r3, 12(r1); \
mtctr r0; \
lwz r4, 16(r1); \
mtcr r6; \
lwz r5, 20(r1); \
lwz r6, 24(r1); \
lwz r0, 52(r1); \
lwz r7, 28(r1); \
lwz r8, 32(r1); \
mtlr r0; \
lwz r9, 36(r1); \
lwz r10,40(r1); \
addi r1, r1, 48
#else /* !__ASSEMBLY__ */
extern void _mcount(void);
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
/* reloction of mcount call site is the same as the address */
return addr;
}
struct dyn_arch_ftrace {
struct module *mod;
};
#endif /* __ASSEMBLY__ */
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
#define ARCH_SUPPORTS_FTRACE_OPS 1
#endif
#endif /* CONFIG_FUNCTION_TRACER */
#ifndef __ASSEMBLY__
#ifdef CONFIG_FTRACE_SYSCALLS
/*
* Some syscall entry functions on powerpc start with "ppc_" (fork and clone,
* for instance) or ppc32_/ppc64_. We should also match the sys_ variant with
* those.
*/
#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
#ifdef PPC64_ELF_ABI_v1
static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
{
/* We need to skip past the initial dot, and the __se_sys alias */
return !strcmp(sym + 1, name) ||
(!strncmp(sym, ".__se_sys", 9) && !strcmp(sym + 6, name)) ||
(!strncmp(sym, ".ppc_", 5) && !strcmp(sym + 5, name + 4)) ||
(!strncmp(sym, ".ppc32_", 7) && !strcmp(sym + 7, name + 4)) ||
(!strncmp(sym, ".ppc64_", 7) && !strcmp(sym + 7, name + 4));
}
#else
static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
{
return !strcmp(sym, name) ||
(!strncmp(sym, "__se_sys", 8) && !strcmp(sym + 5, name)) ||
(!strncmp(sym, "ppc_", 4) && !strcmp(sym + 4, name + 4)) ||
(!strncmp(sym, "ppc32_", 6) && !strcmp(sym + 6, name + 4)) ||
(!strncmp(sym, "ppc64_", 6) && !strcmp(sym + 6, name + 4));
}
#endif /* PPC64_ELF_ABI_v1 */
#endif /* CONFIG_FTRACE_SYSCALLS */
#ifdef CONFIG_PPC64
#include <asm/paca.h>
static inline void this_cpu_disable_ftrace(void)
{
get_paca()->ftrace_enabled = 0;
}
static inline void this_cpu_enable_ftrace(void)
{
get_paca()->ftrace_enabled = 1;
}
/* Disable ftrace on this CPU if possible (may not be implemented) */
static inline void this_cpu_set_ftrace_enabled(u8 ftrace_enabled)
{
get_paca()->ftrace_enabled = ftrace_enabled;
}
static inline u8 this_cpu_get_ftrace_enabled(void)
{
return get_paca()->ftrace_enabled;
}
#else /* CONFIG_PPC64 */
static inline void this_cpu_disable_ftrace(void) { }
static inline void this_cpu_enable_ftrace(void) { }
static inline void this_cpu_set_ftrace_enabled(u8 ftrace_enabled) { }
static inline u8 this_cpu_get_ftrace_enabled(void) { return 1; }
#endif /* CONFIG_PPC64 */
#endif /* !__ASSEMBLY__ */
#endif /* _ASM_POWERPC_FTRACE */
|