1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
|
/*
* Copyright (C) 2017 Steven Rostedt, VMware Inc.
*/
#include <linux/linkage.h>
#include <asm/page_types.h>
#include <asm/segment.h>
#include <asm/export.h>
#include <asm/ftrace.h>
#ifdef CC_USING_FENTRY
# define function_hook __fentry__
EXPORT_SYMBOL(__fentry__)
#else
# define function_hook mcount
EXPORT_SYMBOL(mcount)
#endif
#ifdef CONFIG_DYNAMIC_FTRACE
/* mcount uses a frame pointer even if CONFIG_FRAME_POINTER is not set */
#if !defined(CC_USING_FENTRY) || defined(CONFIG_FRAME_POINTER)
# define USING_FRAME_POINTER
#endif
#ifdef USING_FRAME_POINTER
# define MCOUNT_FRAME 1 /* using frame = true */
#else
# define MCOUNT_FRAME 0 /* using frame = false */
#endif
ENTRY(function_hook)
ret
END(function_hook)
ENTRY(ftrace_caller)
#ifdef USING_FRAME_POINTER
# ifdef CC_USING_FENTRY
/*
* Frame pointers are of ip followed by bp.
* Since fentry is an immediate jump, we are left with
* parent-ip, function-ip. We need to add a frame with
* parent-ip followed by ebp.
*/
pushl 4(%esp) /* parent ip */
pushl %ebp
movl %esp, %ebp
pushl 2*4(%esp) /* function ip */
# endif
/* For mcount, the function ip is directly above */
pushl %ebp
movl %esp, %ebp
#endif
pushl %eax
pushl %ecx
pushl %edx
pushl $0 /* Pass NULL as regs pointer */
#ifdef USING_FRAME_POINTER
/* Load parent ebp into edx */
movl 4*4(%esp), %edx
#else
/* There's no frame pointer, load the appropriate stack addr instead */
lea 4*4(%esp), %edx
#endif
movl (MCOUNT_FRAME+4)*4(%esp), %eax /* load the rip */
/* Get the parent ip */
movl 4(%edx), %edx /* edx has ebp */
movl function_trace_op, %ecx
subl $MCOUNT_INSN_SIZE, %eax
.globl ftrace_call
ftrace_call:
call ftrace_stub
addl $4, %esp /* skip NULL pointer */
popl %edx
popl %ecx
popl %eax
#ifdef USING_FRAME_POINTER
popl %ebp
# ifdef CC_USING_FENTRY
addl $4,%esp /* skip function ip */
popl %ebp /* this is the orig bp */
addl $4, %esp /* skip parent ip */
# endif
#endif
.Lftrace_ret:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_call
ftrace_graph_call:
jmp ftrace_stub
#endif
/* This is weak to keep gas from relaxing the jumps */
WEAK(ftrace_stub)
ret
END(ftrace_caller)
ENTRY(ftrace_regs_caller)
/*
* i386 does not save SS and ESP when coming from kernel.
* Instead, to get sp, ®s->sp is used (see ptrace.h).
* Unfortunately, that means eflags must be at the same location
* as the current return ip is. We move the return ip into the
* regs->ip location, and move flags into the return ip location.
*/
pushl $__KERNEL_CS
pushl 4(%esp) /* Save the return ip */
pushl $0 /* Load 0 into orig_ax */
pushl %gs
pushl %fs
pushl %es
pushl %ds
pushl %eax
/* Get flags and place them into the return ip slot */
pushf
popl %eax
movl %eax, 8*4(%esp)
pushl %ebp
pushl %edi
pushl %esi
pushl %edx
pushl %ecx
pushl %ebx
movl 12*4(%esp), %eax /* Load ip (1st parameter) */
subl $MCOUNT_INSN_SIZE, %eax /* Adjust ip */
#ifdef CC_USING_FENTRY
movl 15*4(%esp), %edx /* Load parent ip (2nd parameter) */
#else
movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
#endif
movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
pushl %esp /* Save pt_regs as 4th parameter */
GLOBAL(ftrace_regs_call)
call ftrace_stub
addl $4, %esp /* Skip pt_regs */
/* restore flags */
push 14*4(%esp)
popf
/* Move return ip back to its original location */
movl 12*4(%esp), %eax
movl %eax, 14*4(%esp)
popl %ebx
popl %ecx
popl %edx
popl %esi
popl %edi
popl %ebp
popl %eax
popl %ds
popl %es
popl %fs
popl %gs
/* use lea to not affect flags */
lea 3*4(%esp), %esp /* Skip orig_ax, ip and cs */
jmp .Lftrace_ret
#else /* ! CONFIG_DYNAMIC_FTRACE */
ENTRY(function_hook)
cmpl $__PAGE_OFFSET, %esp
jb ftrace_stub /* Paging not enabled yet? */
cmpl $ftrace_stub, ftrace_trace_function
jnz .Ltrace
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
cmpl $ftrace_stub, ftrace_graph_return
jnz ftrace_graph_caller
cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
jnz ftrace_graph_caller
#endif
.globl ftrace_stub
ftrace_stub:
ret
/* taken from glibc */
.Ltrace:
pushl %eax
pushl %ecx
pushl %edx
movl 0xc(%esp), %eax
movl 0x4(%ebp), %edx
subl $MCOUNT_INSN_SIZE, %eax
call *ftrace_trace_function
popl %edx
popl %ecx
popl %eax
jmp ftrace_stub
END(function_hook)
#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller)
pushl %eax
pushl %ecx
pushl %edx
movl 3*4(%esp), %eax
/* Even with frame pointers, fentry doesn't have one here */
#ifdef CC_USING_FENTRY
lea 4*4(%esp), %edx
movl $0, %ecx
#else
lea 0x4(%ebp), %edx
movl (%ebp), %ecx
#endif
subl $MCOUNT_INSN_SIZE, %eax
call prepare_ftrace_return
popl %edx
popl %ecx
popl %eax
ret
END(ftrace_graph_caller)
.globl return_to_handler
return_to_handler:
pushl %eax
pushl %edx
#ifdef CC_USING_FENTRY
movl $0, %eax
#else
movl %ebp, %eax
#endif
call ftrace_return_to_handler
movl %eax, %ecx
popl %edx
popl %eax
jmp *%ecx
#endif
|