summaryrefslogtreecommitdiffstats
path: root/compiler-rt/lib/xray/xray_trampoline_x86_64.S
blob: b9fef6dad9a3dba92a601b4014bc65f8dce7086f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
//===-- xray_trampoline_x86.s -----------------------------------*- ASM -*-===//
//
//                     The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file is a part of XRay, a dynamic runtime instrumentation system.
//
// This implements the X86-specific assembler for the trampolines.
//
//===----------------------------------------------------------------------===//

.macro SAVE_REGISTERS
	subq $200, %rsp
	movupd	%xmm0, 184(%rsp)
	movupd	%xmm1, 168(%rsp)
	movupd	%xmm2, 152(%rsp)
	movupd	%xmm3, 136(%rsp)
	movupd	%xmm4, 120(%rsp)
	movupd	%xmm5, 104(%rsp)
	movupd	%xmm6, 88(%rsp)
	movupd	%xmm7, 72(%rsp)
	movq	%rdi, 64(%rsp)
	movq	%rax, 56(%rsp)
	movq	%rdx, 48(%rsp)
	movq	%rsi, 40(%rsp)
	movq	%rcx, 32(%rsp)
	movq	%r8, 24(%rsp)
	movq	%r9, 16(%rsp)
.endm

.macro RESTORE_REGISTERS
	movupd	184(%rsp), %xmm0
	movupd	168(%rsp), %xmm1
	movupd	152(%rsp), %xmm2
	movupd	136(%rsp), %xmm3
	movupd	120(%rsp), %xmm4
	movupd	104(%rsp), %xmm5
	movupd	88(%rsp) , %xmm6
	movupd	72(%rsp) , %xmm7
	movq	64(%rsp), %rdi
	movq	56(%rsp), %rax
	movq	48(%rsp), %rdx
	movq	40(%rsp), %rsi
	movq	32(%rsp), %rcx
	movq	24(%rsp), %r8
	movq	16(%rsp), %r9
	addq	$200, %rsp
.endm

	.text
	.file "xray_trampoline_x86.S"

//===----------------------------------------------------------------------===//

	.globl __xray_FunctionEntry
	.align 16, 0x90
	.type __xray_FunctionEntry,@function

__xray_FunctionEntry:
	.cfi_startproc
	pushq %rbp
	.cfi_def_cfa_offset 16
	SAVE_REGISTERS

	// This load has to be atomic, it's concurrent with __xray_patch().
	// On x86/amd64, a simple (type-aligned) MOV instruction is enough.
	movq	_ZN6__xray19XRayPatchedFunctionE(%rip), %rax
	testq	%rax, %rax
	je	.Ltmp0

	// The patched function prolog puts its xray_instr_map index into %r10d.
	movl	%r10d, %edi
	xor	%esi,%esi
	callq	*%rax
.Ltmp0:
	RESTORE_REGISTERS
	popq	%rbp
	retq
.Ltmp1:
	.size __xray_FunctionEntry, .Ltmp1-__xray_FunctionEntry
	.cfi_endproc

//===----------------------------------------------------------------------===//

	.globl __xray_FunctionExit
	.align 16, 0x90
	.type __xray_FunctionExit,@function
__xray_FunctionExit:
	.cfi_startproc
	// Save the important registers first. Since we're assuming that this
	// function is only jumped into, we only preserve the registers for
	// returning.
	pushq	%rbp
	.cfi_def_cfa_offset 16
	subq	$56, %rsp
	.cfi_def_cfa_offset 32
	movupd	%xmm0, 40(%rsp)
	movupd	%xmm1, 24(%rsp)
	movq	%rax, 16(%rsp)
	movq	%rdx, 8(%rsp)
	movq	_ZN6__xray19XRayPatchedFunctionE(%rip), %rax
	testq %rax,%rax
	je	.Ltmp2

	movl	%r10d, %edi
	movl	$1, %esi
	callq	*%rax
.Ltmp2:
	// Restore the important registers.
	movupd	40(%rsp), %xmm0
	movupd	24(%rsp), %xmm1
	movq	16(%rsp), %rax
	movq	8(%rsp), %rdx
	addq	$56, %rsp
	popq	%rbp
	retq
.Ltmp3:
	.size __xray_FunctionExit, .Ltmp3-__xray_FunctionExit
	.cfi_endproc

//===----------------------------------------------------------------------===//

	.global __xray_FunctionTailExit
	.align 16, 0x90
	.type __xray_FunctionTailExit,@function
__xray_FunctionTailExit:
	.cfi_startproc
	// Save the important registers as in the entry trampoline, but indicate that
	// this is an exit. In the future, we will introduce a new entry type that
	// differentiates between a normal exit and a tail exit, but we'd have to do
	// this and increment the version number for the header.
	pushq %rbp
	.cfi_def_cfa_offset 16
	SAVE_REGISTERS

	movq	_ZN6__xray19XRayPatchedFunctionE(%rip), %rax
	testq %rax,%rax
	je	.Ltmp4

	movl	%r10d, %edi
	movl	$1, %esi
	callq	*%rax

.Ltmp4:
	RESTORE_REGISTERS
	popq	%rbp
	retq
.Ltmp5:
	.size __xray_FunctionTailExit, .Ltmp5-__xray_FunctionTailExit
	.cfi_endproc

//===----------------------------------------------------------------------===//

	.globl __xray_ArgLoggerEntry
	.align 16, 0x90
	.type __xray_ArgLoggerEntry,@function
__xray_ArgLoggerEntry:
	.cfi_startproc
	pushq	%rbp
	.cfi_def_cfa_offset 16
	SAVE_REGISTERS

	// Again, these function pointer loads must be atomic; MOV is fine.
	movq	_ZN6__xray13XRayArgLoggerE(%rip), %rax
	testq	%rax, %rax
	jne	.Larg1entryLog

	// If [arg1 logging handler] not set, defer to no-arg logging.
	movq	_ZN6__xray19XRayPatchedFunctionE(%rip), %rax
	testq	%rax, %rax
	je	.Larg1entryFail

.Larg1entryLog:
	movq	%rdi, %rdx	// first argument will become the third
	xorq	%rsi, %rsi	// XRayEntryType::ENTRY into the second
	movl	%r10d, %edi	// 32-bit function ID becomes the first
	callq	*%rax

.Larg1entryFail:
	RESTORE_REGISTERS
	popq	%rbp
	retq

.Larg1entryEnd:
	.size __xray_ArgLoggerEntry, .Larg1entryEnd-__xray_ArgLoggerEntry
	.cfi_endproc
OpenPOWER on IntegriCloud