diff options
Diffstat (limited to 'llvm')
| -rw-r--r-- | llvm/lib/Target/X86/X86FrameLowering.cpp | 10 | ||||
| -rw-r--r-- | llvm/lib/Target/X86/X86ISelLowering.cpp | 10 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/x86-64-intrcc-nosse.ll | 3 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/x86-64-intrcc.ll | 17 |
4 files changed, 30 insertions, 10 deletions
diff --git a/llvm/lib/Target/X86/X86FrameLowering.cpp b/llvm/lib/Target/X86/X86FrameLowering.cpp index a80be2047d2..fc5c9ac2e25 100644 --- a/llvm/lib/Target/X86/X86FrameLowering.cpp +++ b/llvm/lib/Target/X86/X86FrameLowering.cpp @@ -988,6 +988,16 @@ void X86FrameLowering::emitPrologue(MachineFunction &MF, .getValueAsString() .getAsInteger(0, StackProbeSize); + // Re-align the stack on 64-bit if the x86-interrupt calling convention is + // used and an error code was pushed, since the x86-64 ABI requires a 16-byte + // stack alignment. + if (Fn->getCallingConv() == CallingConv::X86_INTR && Is64Bit && + Fn->arg_size() == 2) { + StackSize += 8; + MFI.setStackSize(StackSize); + emitSPUpdate(MBB, MBBI, -8, /*InEpilogue=*/false); + } + // If this is x86-64 and the Red Zone is not disabled, if we are a leaf // function, and use up to 128 bytes of stack space, don't have a frame // pointer, calls, or dynamic alloca then we do not need to adjust the diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp index b01fcc90823..fc39d9bff71 100644 --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -2791,6 +2791,11 @@ X86TargetLowering::LowerMemArgument(SDValue Chain, CallingConv::ID CallConv, // Offset of last argument need to be set to -4/-8 bytes. // Where offset of the first argument out of two, should be set to 0 bytes. Offset = (Subtarget.is64Bit() ? 8 : 4) * ((i + 1) % Ins.size() - 1); + if (Subtarget.is64Bit() && Ins.size() == 2) { + // The stack pointer needs to be realigned for 64 bit handlers with error + // code, so the argument offset changes by 8 bytes. + Offset += 8; + } } // FIXME: For now, all byval parameter objects are marked mutable. This can be @@ -3248,8 +3253,9 @@ SDValue X86TargetLowering::LowerFormalArguments( MF.getTarget().Options.GuaranteedTailCallOpt)) { FuncInfo->setBytesToPopOnReturn(StackSize); // Callee pops everything. } else if (CallConv == CallingConv::X86_INTR && Ins.size() == 2) { - // X86 interrupts must pop the error code if present - FuncInfo->setBytesToPopOnReturn(Is64Bit ? 8 : 4); + // X86 interrupts must pop the error code (and the alignment padding) if + // present. + FuncInfo->setBytesToPopOnReturn(Is64Bit ? 16 : 4); } else { FuncInfo->setBytesToPopOnReturn(0); // Callee pops nothing. // If this is an sret function, the return should pop the hidden pointer. diff --git a/llvm/test/CodeGen/X86/x86-64-intrcc-nosse.ll b/llvm/test/CodeGen/X86/x86-64-intrcc-nosse.ll index 0bb4e47adf0..ab84088c344 100644 --- a/llvm/test/CodeGen/X86/x86-64-intrcc-nosse.ll +++ b/llvm/test/CodeGen/X86/x86-64-intrcc-nosse.ll @@ -9,10 +9,11 @@ define x86_intrcc void @test_isr_sse_clobbers(%struct.interrupt_frame* %frame, i64 %ecode) { ; CHECK-LABEL: test_isr_sse_clobbers: ; CHECK: # BB#0: + ; CHECK-NEXT: pushq %rax ; CHECK-NEXT: cld ; CHECK-NEXT: #APP ; CHECK-NEXT: #NO_APP - ; CHECK-NEXT: addq $8, %rsp + ; CHECK-NEXT: addq $16, %rsp ; CHECK-NEXT: iretq call void asm sideeffect "", "~{xmm0},~{xmm6}"() ret void diff --git a/llvm/test/CodeGen/X86/x86-64-intrcc.ll b/llvm/test/CodeGen/X86/x86-64-intrcc.ll index 2bcf3cde478..c8bc9e716ce 100644 --- a/llvm/test/CodeGen/X86/x86-64-intrcc.ll +++ b/llvm/test/CodeGen/X86/x86-64-intrcc.ll @@ -30,22 +30,24 @@ define x86_intrcc void @test_isr_no_ecode(%struct.interrupt_frame* %frame) { define x86_intrcc void @test_isr_ecode(%struct.interrupt_frame* %frame, i64 %ecode) { ; CHECK-LABEL: test_isr_ecode ; CHECK: pushq %rax + ; CHECK: pushq %rax ; CHECK: pushq %rcx - ; CHECK: movq 16(%rsp), %rax - ; CHECK: movq 40(%rsp), %rcx + ; CHECK: movq 24(%rsp), %rax + ; CHECK: movq 48(%rsp), %rcx ; CHECK: popq %rcx ; CHECK: popq %rax - ; CHECK: addq $8, %rsp + ; CHECK: addq $16, %rsp ; CHECK: iretq ; CHECK0-LABEL: test_isr_ecode ; CHECK0: pushq %rax + ; CHECK0: pushq %rax ; CHECK0: pushq %rcx - ; CHECK0: movq 16(%rsp), %rax - ; CHECK0: leaq 24(%rsp), %rcx + ; CHECK0: movq 24(%rsp), %rax + ; CHECK0: leaq 32(%rsp), %rcx ; CHECK0: movq 16(%rcx), %rcx ; CHECK0: popq %rcx ; CHECK0: popq %rax - ; CHECK0: addq $8, %rsp + ; CHECK0: addq $16, %rsp ; CHECK0: iretq %pflags = getelementptr inbounds %struct.interrupt_frame, %struct.interrupt_frame* %frame, i32 0, i32 2 %flags = load i64, i64* %pflags, align 4 @@ -58,6 +60,7 @@ define x86_intrcc void @test_isr_clobbers(%struct.interrupt_frame* %frame, i64 % call void asm sideeffect "", "~{rax},~{rbx},~{rbp},~{r11},~{xmm0}"() ; CHECK-LABEL: test_isr_clobbers ; CHECK-SSE-NEXT: pushq %rax + ; CHECK-SSE-NEXT: pushq %rax ; CHECK-SSE-NEXT; pushq %r11 ; CHECK-SSE-NEXT: pushq %rbp ; CHECK-SSE-NEXT: pushq %rbx @@ -80,7 +83,7 @@ define x86_intrcc void @test_isr_clobbers(%struct.interrupt_frame* %frame, i64 % ; CHECK0-SSE-NEXT: popq %rbp ; CHECK0-SSE-NEXT: popq %r11 ; CHECK0-SSE-NEXT: popq %rax - ; CHECK0-SSE-NEXT: addq $8, %rsp + ; CHECK0-SSE-NEXT: addq $16, %rsp ; CHECK0-SSE-NEXT: iretq ret void } |

