summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp5
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp64
-rw-r--r--llvm/test/CodeGen/AArch64/swifterror.ll2
-rw-r--r--llvm/test/CodeGen/AArch64/swiftself.ll5
-rw-r--r--llvm/test/CodeGen/AMDGPU/mubuf-legalize-operands.ll8
-rw-r--r--llvm/test/CodeGen/ARM/cmpxchg-O0.ll10
-rw-r--r--llvm/test/CodeGen/ARM/swifterror.ll1
-rw-r--r--llvm/test/CodeGen/Mips/atomic.ll278
-rw-r--r--llvm/test/CodeGen/Mips/atomic64.ll117
-rw-r--r--llvm/test/CodeGen/Mips/atomicCmpSwapPW.ll16
-rw-r--r--llvm/test/CodeGen/Mips/dsp-spill-reload.ll1
-rw-r--r--llvm/test/CodeGen/SystemZ/swift-return.ll6
-rw-r--r--llvm/test/CodeGen/X86/atomic-unordered.ll2
-rw-r--r--llvm/test/CodeGen/X86/swifterror.ll8
-rw-r--r--llvm/test/CodeGen/X86/swiftself.ll5
-rw-r--r--llvm/test/DebugInfo/X86/dbg-declare-arg.ll2
16 files changed, 230 insertions, 300 deletions
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index cbd43cb6cab..95b429b915c 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -9750,11 +9750,10 @@ void SelectionDAGISel::LowerArguments(const Function &F) {
// If this argument is live outside of the entry block, insert a copy from
// wherever we got it to the vreg that other BB's will reference it as.
- if (!TM.Options.EnableFastISel && Res.getOpcode() == ISD::CopyFromReg) {
+ if (Res.getOpcode() == ISD::CopyFromReg) {
// If we can, though, try to skip creating an unnecessary vreg.
// FIXME: This isn't very clean... it would be nice to make this more
- // general. It's also subtly incompatible with the hacks FastISel
- // uses with vregs.
+ // general.
unsigned Reg = cast<RegisterSDNode>(Res.getOperand(1))->getReg();
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
FuncInfo->ValueMap[&Arg] = Reg;
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index 77a8037623a..c7a199bf2e0 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -503,6 +503,40 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
Fn.getContext().diagnose(DiagFallback);
}
+ // Replace forward-declared registers with the registers containing
+ // the desired value.
+ // Note: it is important that this happens **before** the call to
+ // EmitLiveInCopies, since implementations can skip copies of unused
+ // registers. If we don't apply the reg fixups before, some registers may
+ // appear as unused and will be skipped, resulting in bad MI.
+ MachineRegisterInfo &MRI = MF->getRegInfo();
+ for (DenseMap<unsigned, unsigned>::iterator I = FuncInfo->RegFixups.begin(),
+ E = FuncInfo->RegFixups.end();
+ I != E; ++I) {
+ unsigned From = I->first;
+ unsigned To = I->second;
+ // If To is also scheduled to be replaced, find what its ultimate
+ // replacement is.
+ while (true) {
+ DenseMap<unsigned, unsigned>::iterator J = FuncInfo->RegFixups.find(To);
+ if (J == E)
+ break;
+ To = J->second;
+ }
+ // Make sure the new register has a sufficiently constrained register class.
+ if (TargetRegisterInfo::isVirtualRegister(From) &&
+ TargetRegisterInfo::isVirtualRegister(To))
+ MRI.constrainRegClass(To, MRI.getRegClass(From));
+ // Replace it.
+
+ // Replacing one register with another won't touch the kill flags.
+ // We need to conservatively clear the kill flags as a kill on the old
+ // register might dominate existing uses of the new register.
+ if (!MRI.use_empty(To))
+ MRI.clearKillFlags(From);
+ MRI.replaceRegWith(From, To);
+ }
+
// If the first basic block in the function has live ins that need to be
// copied into vregs, emit the copies into the top of the block before
// emitting the code for the block.
@@ -622,36 +656,6 @@ bool SelectionDAGISel::runOnMachineFunction(MachineFunction &mf) {
// Determine if floating point is used for msvc
computeUsesMSVCFloatingPoint(TM.getTargetTriple(), Fn, MF->getMMI());
- // Replace forward-declared registers with the registers containing
- // the desired value.
- MachineRegisterInfo &MRI = MF->getRegInfo();
- for (DenseMap<unsigned, unsigned>::iterator
- I = FuncInfo->RegFixups.begin(), E = FuncInfo->RegFixups.end();
- I != E; ++I) {
- unsigned From = I->first;
- unsigned To = I->second;
- // If To is also scheduled to be replaced, find what its ultimate
- // replacement is.
- while (true) {
- DenseMap<unsigned, unsigned>::iterator J = FuncInfo->RegFixups.find(To);
- if (J == E) break;
- To = J->second;
- }
- // Make sure the new register has a sufficiently constrained register class.
- if (TargetRegisterInfo::isVirtualRegister(From) &&
- TargetRegisterInfo::isVirtualRegister(To))
- MRI.constrainRegClass(To, MRI.getRegClass(From));
- // Replace it.
-
-
- // Replacing one register with another won't touch the kill flags.
- // We need to conservatively clear the kill flags as a kill on the old
- // register might dominate existing uses of the new register.
- if (!MRI.use_empty(To))
- MRI.clearKillFlags(From);
- MRI.replaceRegWith(From, To);
- }
-
TLI->finalizeLowering(*MF);
// Release function-specific state. SDB and CurDAG are already cleared
diff --git a/llvm/test/CodeGen/AArch64/swifterror.ll b/llvm/test/CodeGen/AArch64/swifterror.ll
index d83385c5f91..823599d5de6 100644
--- a/llvm/test/CodeGen/AArch64/swifterror.ll
+++ b/llvm/test/CodeGen/AArch64/swifterror.ll
@@ -235,8 +235,6 @@ define void @foo_sret(%struct.S* sret %agg.result, i32 %val1, %swift_error** swi
; CHECK-O0: mov w{{.*}}, #16
; spill x8
; CHECK-O0-DAG: str x8
-; spill x21
-; CHECK-O0-DAG: str x21
; CHECK-O0: malloc
; CHECK-O0: mov [[ID:w[0-9]+]], #1
; CHECK-O0: strb [[ID]], [x0, #8]
diff --git a/llvm/test/CodeGen/AArch64/swiftself.ll b/llvm/test/CodeGen/AArch64/swiftself.ll
index f19c852cb9b..063085636b3 100644
--- a/llvm/test/CodeGen/AArch64/swiftself.ll
+++ b/llvm/test/CodeGen/AArch64/swiftself.ll
@@ -46,9 +46,10 @@ define void @swiftself_passthrough(i8* swiftself %addr0) {
}
; We can use a tail call if the callee swiftself is the same as the caller one.
+; This should also work with fast-isel.
; CHECK-LABEL: swiftself_tail:
-; OPT: b {{_?}}swiftself_param
-; OPT-NOT: ret
+; CHECK: b {{_?}}swiftself_param
+; CHECK-NOT: ret
define i8* @swiftself_tail(i8* swiftself %addr0) {
call void asm sideeffect "", "~{x20}"()
%res = tail call i8* @swiftself_param(i8* swiftself %addr0)
diff --git a/llvm/test/CodeGen/AMDGPU/mubuf-legalize-operands.ll b/llvm/test/CodeGen/AMDGPU/mubuf-legalize-operands.ll
index 7b77b4430d4..04ac6d8bf45 100644
--- a/llvm/test/CodeGen/AMDGPU/mubuf-legalize-operands.ll
+++ b/llvm/test/CodeGen/AMDGPU/mubuf-legalize-operands.ll
@@ -125,8 +125,10 @@ entry:
; CHECK-O0-DAG: s_mov_b32 [[IDX_S:s[0-9]+]], s4
; CHECK-O0-DAG: v_mov_b32_e32 [[IDX_V:v[0-9]+]], [[IDX_S]]
-; CHECK-O0-DAG: s_mov_b64 [[SAVEEXEC:s\[[0-9]+:[0-9]+\]]], exec
+; CHECK-O0-DAG: s_mov_b64 s{{\[}}[[SAVEEXEC0:[0-9]+]]:[[SAVEEXEC1:[0-9]+]]{{\]}}, exec
; CHECK-O0-DAG: buffer_store_dword [[IDX_V]], off, s[0:3], s32 offset:[[IDX_OFF:[0-9]+]] ; 4-byte Folded Spill
+; CHECK-O0-DAG: v_writelane_b32 [[VSAVEEXEC:v[0-9]+]], s[[SAVEEXEC0]], [[SAVEEXEC_IDX0:[0-9]+]]
+; CHECK-O0-DAG: v_writelane_b32 [[VSAVEEXEC:v[0-9]+]], s[[SAVEEXEC1]], [[SAVEEXEC_IDX1:[0-9]+]]
; CHECK-O0: [[LOOPBB0:BB[0-9]+_[0-9]+]]:
; CHECK-O0: buffer_load_dword v[[VRSRC0:[0-9]+]], {{.*}} ; 4-byte Folded Reload
@@ -156,7 +158,9 @@ entry:
; CHECK-O0: s_xor_b64 exec, exec, [[CMP]]
; CHECK-O0-NEXT: s_cbranch_execnz [[LOOPBB0]]
-; CHECK-O0: s_mov_b64 exec, [[SAVEEXEC]]
+; CHECK-O0: v_readlane_b32 s[[SAVEEXEC0:[0-9]+]], [[VSAVEEXEC]], [[SAVEEXEC_IDX0]]
+; CHECK-O0: v_readlane_b32 s[[SAVEEXEC1:[0-9]+]], [[VSAVEEXEC]], [[SAVEEXEC_IDX1]]
+; CHECK-O0: s_mov_b64 exec, s{{\[}}[[SAVEEXEC0]]:[[SAVEEXEC1]]{{\]}}
; CHECK-O0: buffer_load_dword [[RES:v[0-9]+]], off, s[0:3], s32 offset:[[RES_OFF_TMP]] ; 4-byte Folded Reload
; CHECK-O0: buffer_store_dword [[RES]], off, s[0:3], s32 offset:[[RES_OFF:[0-9]+]] ; 4-byte Folded Spill
; CHECK-O0: s_cbranch_execz [[TERMBB:BB[0-9]+_[0-9]+]]
diff --git a/llvm/test/CodeGen/ARM/cmpxchg-O0.ll b/llvm/test/CodeGen/ARM/cmpxchg-O0.ll
index 90c1d591036..d3696cfe39a 100644
--- a/llvm/test/CodeGen/ARM/cmpxchg-O0.ll
+++ b/llvm/test/CodeGen/ARM/cmpxchg-O0.ll
@@ -10,7 +10,7 @@ define { i8, i1 } @test_cmpxchg_8(i8* %addr, i8 %desired, i8 %new) nounwind {
; CHECK: dmb ish
; CHECK: uxtb [[DESIRED:r[0-9]+]], [[DESIRED]]
; CHECK: [[RETRY:.LBB[0-9]+_[0-9]+]]:
-; CHECK: ldrexb [[OLD:r[0-9]+]], [r0]
+; CHECK: ldrexb [[OLD:[lr0-9]+]], [r0]
; CHECK: cmp [[OLD]], [[DESIRED]]
; CHECK: bne [[DONE:.LBB[0-9]+_[0-9]+]]
; CHECK: strexb [[STATUS:r[0-9]+]], r2, [r0]
@@ -19,7 +19,7 @@ define { i8, i1 } @test_cmpxchg_8(i8* %addr, i8 %desired, i8 %new) nounwind {
; CHECK: [[DONE]]:
; Materialisation of a boolean is done with sub/clz/lsr
; CHECK: uxtb [[CMP1:r[0-9]+]], [[DESIRED]]
-; CHECK: sub{{(s)?}} [[CMP1]], [[OLD]], [[CMP1]]
+; CHECK: sub{{(\.w)?}} [[CMP1]], [[OLD]], [[CMP1]]
; CHECK: clz [[CMP2:r[0-9]+]], [[CMP1]]
; CHECK: lsr{{(s)?}} {{r[0-9]+}}, [[CMP2]], #5
; CHECK: dmb ish
@@ -32,7 +32,7 @@ define { i16, i1 } @test_cmpxchg_16(i16* %addr, i16 %desired, i16 %new) nounwind
; CHECK: dmb ish
; CHECK: uxth [[DESIRED:r[0-9]+]], [[DESIRED]]
; CHECK: [[RETRY:.LBB[0-9]+_[0-9]+]]:
-; CHECK: ldrexh [[OLD:r[0-9]+]], [r0]
+; CHECK: ldrexh [[OLD:[lr0-9]+]], [r0]
; CHECK: cmp [[OLD]], [[DESIRED]]
; CHECK: bne [[DONE:.LBB[0-9]+_[0-9]+]]
; CHECK: strexh [[STATUS:r[0-9]+]], r2, [r0]
@@ -41,7 +41,7 @@ define { i16, i1 } @test_cmpxchg_16(i16* %addr, i16 %desired, i16 %new) nounwind
; CHECK: [[DONE]]:
; Materialisation of a boolean is done with sub/clz/lsr
; CHECK: uxth [[CMP1:r[0-9]+]], [[DESIRED]]
-; CHECK: sub{{(s)?}} [[CMP1]], [[OLD]], [[CMP1]]
+; CHECK: sub{{(\.w)?}} [[CMP1]], [[OLD]], [[CMP1]]
; CHECK: clz [[CMP2:r[0-9]+]], [[CMP1]]
; CHECK: lsr{{(s)?}} {{r[0-9]+}}, [[CMP2]], #5
; CHECK: dmb ish
@@ -79,7 +79,7 @@ define { i64, i1 } @test_cmpxchg_64(i64* %addr, i64 %desired, i64 %new) nounwind
; CHECK: cmp [[OLDLO]], r6
; CHECK: cmpeq [[OLDHI]], r7
; CHECK: bne [[DONE:.LBB[0-9]+_[0-9]+]]
-; CHECK: strexd [[STATUS:r[0-9]+]], r4, r5, [r0]
+; CHECK: strexd [[STATUS:[lr0-9]+]], r4, r5, [r0]
; CHECK: cmp{{(\.w)?}} [[STATUS]], #0
; CHECK: bne [[RETRY]]
; CHECK: [[DONE]]:
diff --git a/llvm/test/CodeGen/ARM/swifterror.ll b/llvm/test/CodeGen/ARM/swifterror.ll
index fdbbe773574..f05a7872449 100644
--- a/llvm/test/CodeGen/ARM/swifterror.ll
+++ b/llvm/test/CodeGen/ARM/swifterror.ll
@@ -185,7 +185,6 @@ define float @foo_loop(%swift_error** swifterror %error_ptr_ref, i32 %cc, float
; CHECK-APPLE: ble
; CHECK-O0-LABEL: foo_loop:
-; CHECK-O0: mov r{{.*}}, r8
; CHECK-O0: cmp r{{.*}}, #0
; CHECK-O0: beq
; CHECK-O0: mov r0, #16
diff --git a/llvm/test/CodeGen/Mips/atomic.ll b/llvm/test/CodeGen/Mips/atomic.ll
index d110e003bf5..85254c46c2a 100644
--- a/llvm/test/CodeGen/Mips/atomic.ll
+++ b/llvm/test/CodeGen/Mips/atomic.ll
@@ -106,20 +106,16 @@ define i32 @AtomicLoadAdd32(i32 signext %incr) nounwind {
; MIPS32R6O0: # %bb.0: # %entry
; MIPS32R6O0-NEXT: lui $2, %hi(_gp_disp)
; MIPS32R6O0-NEXT: addiu $2, $2, %lo(_gp_disp)
-; MIPS32R6O0-NEXT: addiu $sp, $sp, -8
; MIPS32R6O0-NEXT: addu $1, $2, $25
-; MIPS32R6O0-NEXT: move $2, $4
; MIPS32R6O0-NEXT: lw $1, %got(x)($1)
; MIPS32R6O0-NEXT: $BB0_1: # %entry
; MIPS32R6O0-NEXT: # =>This Inner Loop Header: Depth=1
-; MIPS32R6O0-NEXT: ll $3, 0($1)
-; MIPS32R6O0-NEXT: addu $5, $3, $4
-; MIPS32R6O0-NEXT: sc $5, 0($1)
-; MIPS32R6O0-NEXT: beqzc $5, $BB0_1
+; MIPS32R6O0-NEXT: ll $2, 0($1)
+; MIPS32R6O0-NEXT: addu $3, $2, $4
+; MIPS32R6O0-NEXT: sc $3, 0($1)
+; MIPS32R6O0-NEXT: beqzc $3, $BB0_1
+; MIPS32R6O0-NEXT: nop
; MIPS32R6O0-NEXT: # %bb.2: # %entry
-; MIPS32R6O0-NEXT: sw $2, 4($sp) # 4-byte Folded Spill
-; MIPS32R6O0-NEXT: move $2, $3
-; MIPS32R6O0-NEXT: addiu $sp, $sp, 8
; MIPS32R6O0-NEXT: jrc $ra
;
; MIPS4-LABEL: AtomicLoadAdd32:
@@ -366,20 +362,16 @@ define i32 @AtomicLoadSub32(i32 signext %incr) nounwind {
; MIPS32R6O0: # %bb.0: # %entry
; MIPS32R6O0-NEXT: lui $2, %hi(_gp_disp)
; MIPS32R6O0-NEXT: addiu $2, $2, %lo(_gp_disp)
-; MIPS32R6O0-NEXT: addiu $sp, $sp, -8
; MIPS32R6O0-NEXT: addu $1, $2, $25
-; MIPS32R6O0-NEXT: move $2, $4
; MIPS32R6O0-NEXT: lw $1, %got(x)($1)
; MIPS32R6O0-NEXT: $BB1_1: # %entry
; MIPS32R6O0-NEXT: # =>This Inner Loop Header: Depth=1
-; MIPS32R6O0-NEXT: ll $3, 0($1)
-; MIPS32R6O0-NEXT: subu $5, $3, $4
-; MIPS32R6O0-NEXT: sc $5, 0($1)
-; MIPS32R6O0-NEXT: beqzc $5, $BB1_1
+; MIPS32R6O0-NEXT: ll $2, 0($1)
+; MIPS32R6O0-NEXT: subu $3, $2, $4
+; MIPS32R6O0-NEXT: sc $3, 0($1)
+; MIPS32R6O0-NEXT: beqzc $3, $BB1_1
+; MIPS32R6O0-NEXT: nop
; MIPS32R6O0-NEXT: # %bb.2: # %entry
-; MIPS32R6O0-NEXT: sw $2, 4($sp) # 4-byte Folded Spill
-; MIPS32R6O0-NEXT: move $2, $3
-; MIPS32R6O0-NEXT: addiu $sp, $sp, 8
; MIPS32R6O0-NEXT: jrc $ra
;
; MIPS4-LABEL: AtomicLoadSub32:
@@ -626,20 +618,16 @@ define i32 @AtomicLoadXor32(i32 signext %incr) nounwind {
; MIPS32R6O0: # %bb.0: # %entry
; MIPS32R6O0-NEXT: lui $2, %hi(_gp_disp)
; MIPS32R6O0-NEXT: addiu $2, $2, %lo(_gp_disp)
-; MIPS32R6O0-NEXT: addiu $sp, $sp, -8
; MIPS32R6O0-NEXT: addu $1, $2, $25
-; MIPS32R6O0-NEXT: move $2, $4
; MIPS32R6O0-NEXT: lw $1, %got(x)($1)
; MIPS32R6O0-NEXT: $BB2_1: # %entry
; MIPS32R6O0-NEXT: # =>This Inner Loop Header: Depth=1
-; MIPS32R6O0-NEXT: ll $3, 0($1)
-; MIPS32R6O0-NEXT: xor $5, $3, $4
-; MIPS32R6O0-NEXT: sc $5, 0($1)
-; MIPS32R6O0-NEXT: beqzc $5, $BB2_1
+; MIPS32R6O0-NEXT: ll $2, 0($1)
+; MIPS32R6O0-NEXT: xor $3, $2, $4
+; MIPS32R6O0-NEXT: sc $3, 0($1)
+; MIPS32R6O0-NEXT: beqzc $3, $BB2_1
+; MIPS32R6O0-NEXT: nop
; MIPS32R6O0-NEXT: # %bb.2: # %entry
-; MIPS32R6O0-NEXT: sw $2, 4($sp) # 4-byte Folded Spill
-; MIPS32R6O0-NEXT: move $2, $3
-; MIPS32R6O0-NEXT: addiu $sp, $sp, 8
; MIPS32R6O0-NEXT: jrc $ra
;
; MIPS4-LABEL: AtomicLoadXor32:
@@ -885,20 +873,16 @@ define i32 @AtomicLoadOr32(i32 signext %incr) nounwind {
; MIPS32R6O0: # %bb.0: # %entry
; MIPS32R6O0-NEXT: lui $2, %hi(_gp_disp)
; MIPS32R6O0-NEXT: addiu $2, $2, %lo(_gp_disp)
-; MIPS32R6O0-NEXT: addiu $sp, $sp, -8
; MIPS32R6O0-NEXT: addu $1, $2, $25
-; MIPS32R6O0-NEXT: move $2, $4
; MIPS32R6O0-NEXT: lw $1, %got(x)($1)
; MIPS32R6O0-NEXT: $BB3_1: # %entry
; MIPS32R6O0-NEXT: # =>This Inner Loop Header: Depth=1
-; MIPS32R6O0-NEXT: ll $3, 0($1)
-; MIPS32R6O0-NEXT: or $5, $3, $4
-; MIPS32R6O0-NEXT: sc $5, 0($1)
-; MIPS32R6O0-NEXT: beqzc $5, $BB3_1
+; MIPS32R6O0-NEXT: ll $2, 0($1)
+; MIPS32R6O0-NEXT: or $3, $2, $4
+; MIPS32R6O0-NEXT: sc $3, 0($1)
+; MIPS32R6O0-NEXT: beqzc $3, $BB3_1
+; MIPS32R6O0-NEXT: nop
; MIPS32R6O0-NEXT: # %bb.2: # %entry
-; MIPS32R6O0-NEXT: sw $2, 4($sp) # 4-byte Folded Spill
-; MIPS32R6O0-NEXT: move $2, $3
-; MIPS32R6O0-NEXT: addiu $sp, $sp, 8
; MIPS32R6O0-NEXT: jrc $ra
;
; MIPS4-LABEL: AtomicLoadOr32:
@@ -1144,20 +1128,16 @@ define i32 @AtomicLoadAnd32(i32 signext %incr) nounwind {
; MIPS32R6O0: # %bb.0: # %entry
; MIPS32R6O0-NEXT: lui $2, %hi(_gp_disp)
; MIPS32R6O0-NEXT: addiu $2, $2, %lo(_gp_disp)
-; MIPS32R6O0-NEXT: addiu $sp, $sp, -8
; MIPS32R6O0-NEXT: addu $1, $2, $25
-; MIPS32R6O0-NEXT: move $2, $4
; MIPS32R6O0-NEXT: lw $1, %got(x)($1)
; MIPS32R6O0-NEXT: $BB4_1: # %entry
; MIPS32R6O0-NEXT: # =>This Inner Loop Header: Depth=1
-; MIPS32R6O0-NEXT: ll $3, 0($1)
-; MIPS32R6O0-NEXT: and $5, $3, $4
-; MIPS32R6O0-NEXT: sc $5, 0($1)
-; MIPS32R6O0-NEXT: beqzc $5, $BB4_1
+; MIPS32R6O0-NEXT: ll $2, 0($1)
+; MIPS32R6O0-NEXT: and $3, $2, $4
+; MIPS32R6O0-NEXT: sc $3, 0($1)
+; MIPS32R6O0-NEXT: beqzc $3, $BB4_1
+; MIPS32R6O0-NEXT: nop
; MIPS32R6O0-NEXT: # %bb.2: # %entry
-; MIPS32R6O0-NEXT: sw $2, 4($sp) # 4-byte Folded Spill
-; MIPS32R6O0-NEXT: move $2, $3
-; MIPS32R6O0-NEXT: addiu $sp, $sp, 8
; MIPS32R6O0-NEXT: jrc $ra
;
; MIPS4-LABEL: AtomicLoadAnd32:
@@ -1407,21 +1387,17 @@ define i32 @AtomicLoadNand32(i32 signext %incr) nounwind {
; MIPS32R6O0: # %bb.0: # %entry
; MIPS32R6O0-NEXT: lui $2, %hi(_gp_disp)
; MIPS32R6O0-NEXT: addiu $2, $2, %lo(_gp_disp)
-; MIPS32R6O0-NEXT: addiu $sp, $sp, -8
; MIPS32R6O0-NEXT: addu $1, $2, $25
-; MIPS32R6O0-NEXT: move $2, $4
; MIPS32R6O0-NEXT: lw $1, %got(x)($1)
; MIPS32R6O0-NEXT: $BB5_1: # %entry
; MIPS32R6O0-NEXT: # =>This Inner Loop Header: Depth=1
-; MIPS32R6O0-NEXT: ll $3, 0($1)
-; MIPS32R6O0-NEXT: and $5, $3, $4
-; MIPS32R6O0-NEXT: nor $5, $zero, $5
-; MIPS32R6O0-NEXT: sc $5, 0($1)
-; MIPS32R6O0-NEXT: beqzc $5, $BB5_1
+; MIPS32R6O0-NEXT: ll $2, 0($1)
+; MIPS32R6O0-NEXT: and $3, $2, $4
+; MIPS32R6O0-NEXT: nor $3, $zero, $3
+; MIPS32R6O0-NEXT: sc $3, 0($1)
+; MIPS32R6O0-NEXT: beqzc $3, $BB5_1
+; MIPS32R6O0-NEXT: nop
; MIPS32R6O0-NEXT: # %bb.2: # %entry
-; MIPS32R6O0-NEXT: sw $2, 4($sp) # 4-byte Folded Spill
-; MIPS32R6O0-NEXT: move $2, $3
-; MIPS32R6O0-NEXT: addiu $sp, $sp, 8
; MIPS32R6O0-NEXT: jrc $ra
;
; MIPS4-LABEL: AtomicLoadNand32:
@@ -1692,19 +1668,17 @@ define i32 @AtomicSwap32(i32 signext %newval) nounwind {
; MIPS32R6O0-NEXT: addiu $2, $2, %lo(_gp_disp)
; MIPS32R6O0-NEXT: addiu $sp, $sp, -8
; MIPS32R6O0-NEXT: addu $1, $2, $25
-; MIPS32R6O0-NEXT: move $2, $4
; MIPS32R6O0-NEXT: sw $4, 4($sp)
-; MIPS32R6O0-NEXT: lw $3, 4($sp)
+; MIPS32R6O0-NEXT: lw $2, 4($sp)
; MIPS32R6O0-NEXT: lw $1, %got(x)($1)
; MIPS32R6O0-NEXT: $BB6_1: # %entry
; MIPS32R6O0-NEXT: # =>This Inner Loop Header: Depth=1
-; MIPS32R6O0-NEXT: ll $4, 0($1)
-; MIPS32R6O0-NEXT: move $5, $3
-; MIPS32R6O0-NEXT: sc $5, 0($1)
-; MIPS32R6O0-NEXT: beqzc $5, $BB6_1
+; MIPS32R6O0-NEXT: ll $3, 0($1)
+; MIPS32R6O0-NEXT: move $4, $2
+; MIPS32R6O0-NEXT: sc $4, 0($1)
+; MIPS32R6O0-NEXT: beqzc $4, $BB6_1
; MIPS32R6O0-NEXT: # %bb.2: # %entry
-; MIPS32R6O0-NEXT: sw $2, 0($sp) # 4-byte Folded Spill
-; MIPS32R6O0-NEXT: move $2, $4
+; MIPS32R6O0-NEXT: move $2, $3
; MIPS32R6O0-NEXT: addiu $sp, $sp, 8
; MIPS32R6O0-NEXT: jrc $ra
;
@@ -2011,23 +1985,20 @@ define i32 @AtomicCmpSwap32(i32 signext %oldval, i32 signext %newval) nounwind {
; MIPS32R6O0-NEXT: addiu $2, $2, %lo(_gp_disp)
; MIPS32R6O0-NEXT: addiu $sp, $sp, -8
; MIPS32R6O0-NEXT: addu $1, $2, $25
-; MIPS32R6O0-NEXT: move $2, $5
-; MIPS32R6O0-NEXT: move $3, $4
; MIPS32R6O0-NEXT: sw $5, 4($sp)
-; MIPS32R6O0-NEXT: lw $5, 4($sp)
+; MIPS32R6O0-NEXT: lw $2, 4($sp)
; MIPS32R6O0-NEXT: lw $1, %got(x)($1)
; MIPS32R6O0-NEXT: $BB7_1: # %entry
; MIPS32R6O0-NEXT: # =>This Inner Loop Header: Depth=1
-; MIPS32R6O0-NEXT: ll $6, 0($1)
-; MIPS32R6O0-NEXT: bnec $6, $4, $BB7_3
+; MIPS32R6O0-NEXT: ll $3, 0($1)
+; MIPS32R6O0-NEXT: bnec $3, $4, $BB7_3
; MIPS32R6O0-NEXT: # %bb.2: # %entry
; MIPS32R6O0-NEXT: # in Loop: Header=BB7_1 Depth=1
-; MIPS32R6O0-NEXT: move $7, $5
-; MIPS32R6O0-NEXT: sc $7, 0($1)
-; MIPS32R6O0-NEXT: beqzc $7, $BB7_1
+; MIPS32R6O0-NEXT: move $5, $2
+; MIPS32R6O0-NEXT: sc $5, 0($1)
+; MIPS32R6O0-NEXT: beqzc $5, $BB7_1
; MIPS32R6O0-NEXT: $BB7_3: # %entry
-; MIPS32R6O0-NEXT: sw $2, 0($sp) # 4-byte Folded Spill
-; MIPS32R6O0-NEXT: move $2, $6
+; MIPS32R6O0-NEXT: move $2, $3
; MIPS32R6O0-NEXT: addiu $sp, $sp, 8
; MIPS32R6O0-NEXT: jrc $ra
;
@@ -5077,35 +5048,34 @@ define i1 @AtomicCmpSwapRes8(i8* %ptr, i8 signext %oldval, i8 signext %newval) n
; MIPS32R6O0-NEXT: addiu $sp, $sp, -8
; MIPS32R6O0-NEXT: move $1, $6
; MIPS32R6O0-NEXT: move $2, $5
-; MIPS32R6O0-NEXT: move $3, $4
-; MIPS32R6O0-NEXT: addiu $7, $zero, -4
-; MIPS32R6O0-NEXT: and $7, $4, $7
+; MIPS32R6O0-NEXT: addiu $3, $zero, -4
+; MIPS32R6O0-NEXT: and $3, $4, $3
; MIPS32R6O0-NEXT: andi $4, $4, 3
; MIPS32R6O0-NEXT: sll $4, $4, 3
-; MIPS32R6O0-NEXT: ori $8, $zero, 255
-; MIPS32R6O0-NEXT: sllv $8, $8, $4
-; MIPS32R6O0-NEXT: nor $9, $zero, $8
-; MIPS32R6O0-NEXT: andi $10, $5, 255
-; MIPS32R6O0-NEXT: sllv $10, $10, $4
+; MIPS32R6O0-NEXT: ori $7, $zero, 255
+; MIPS32R6O0-NEXT: sllv $7, $7, $4
+; MIPS32R6O0-NEXT: nor $8, $zero, $7
+; MIPS32R6O0-NEXT: andi $9, $5, 255
+; MIPS32R6O0-NEXT: sllv $9, $9, $4
; MIPS32R6O0-NEXT: andi $6, $6, 255
; MIPS32R6O0-NEXT: sllv $6, $6, $4
; MIPS32R6O0-NEXT: $BB13_1: # %entry
; MIPS32R6O0-NEXT: # =>This Inner Loop Header: Depth=1
-; MIPS32R6O0-NEXT: ll $12, 0($7)
-; MIPS32R6O0-NEXT: and $13, $12, $8
-; MIPS32R6O0-NEXT: bnec $13, $10, $BB13_3
+; MIPS32R6O0-NEXT: ll $11, 0($3)
+; MIPS32R6O0-NEXT: and $12, $11, $7
+; MIPS32R6O0-NEXT: bnec $12, $9, $BB13_3
; MIPS32R6O0-NEXT: # %bb.2: # %entry
; MIPS32R6O0-NEXT: # in Loop: Header=BB13_1 Depth=1
-; MIPS32R6O0-NEXT: and $12, $12, $9
-; MIPS32R6O0-NEXT: or $12, $12, $6
-; MIPS32R6O0-NEXT: sc $12, 0($7)
-; MIPS32R6O0-NEXT: beqzc $12, $BB13_1
+; MIPS32R6O0-NEXT: and $11, $11, $8
+; MIPS32R6O0-NEXT: or $11, $11, $6
+; MIPS32R6O0-NEXT: sc $11, 0($3)
+; MIPS32R6O0-NEXT: beqzc $11, $BB13_1
; MIPS32R6O0-NEXT: $BB13_3: # %entry
-; MIPS32R6O0-NEXT: srlv $11, $13, $4
-; MIPS32R6O0-NEXT: seb $11, $11
+; MIPS32R6O0-NEXT: srlv $10, $12, $4
+; MIPS32R6O0-NEXT: seb $10, $10
; MIPS32R6O0-NEXT: # %bb.4: # %entry
; MIPS32R6O0-NEXT: sw $5, 4($sp) # 4-byte Folded Spill
-; MIPS32R6O0-NEXT: sw $11, 0($sp) # 4-byte Folded Spill
+; MIPS32R6O0-NEXT: sw $10, 0($sp) # 4-byte Folded Spill
; MIPS32R6O0-NEXT: # %bb.5: # %entry
; MIPS32R6O0-NEXT: lw $1, 0($sp) # 4-byte Folded Reload
; MIPS32R6O0-NEXT: lw $2, 4($sp) # 4-byte Folded Reload
@@ -5259,36 +5229,35 @@ define i1 @AtomicCmpSwapRes8(i8* %ptr, i8 signext %oldval, i8 signext %newval) n
; MIPS64R6O0-NEXT: daddiu $sp, $sp, -16
; MIPS64R6O0-NEXT: # kill: def $a2 killed $a2 killed $a2_64
; MIPS64R6O0-NEXT: # kill: def $a1 killed $a1 killed $a1_64
-; MIPS64R6O0-NEXT: move $1, $4
-; MIPS64R6O0-NEXT: daddiu $2, $zero, -4
-; MIPS64R6O0-NEXT: and $2, $4, $2
-; MIPS64R6O0-NEXT: andi $3, $4, 3
-; MIPS64R6O0-NEXT: xori $3, $3, 3
-; MIPS64R6O0-NEXT: sll $3, $3, 3
-; MIPS64R6O0-NEXT: ori $7, $zero, 255
-; MIPS64R6O0-NEXT: sllv $7, $7, $3
-; MIPS64R6O0-NEXT: nor $8, $zero, $7
-; MIPS64R6O0-NEXT: andi $9, $5, 255
-; MIPS64R6O0-NEXT: sllv $9, $9, $3
+; MIPS64R6O0-NEXT: daddiu $1, $zero, -4
+; MIPS64R6O0-NEXT: and $1, $4, $1
+; MIPS64R6O0-NEXT: andi $2, $4, 3
+; MIPS64R6O0-NEXT: xori $2, $2, 3
+; MIPS64R6O0-NEXT: sll $2, $2, 3
+; MIPS64R6O0-NEXT: ori $3, $zero, 255
+; MIPS64R6O0-NEXT: sllv $3, $3, $2
+; MIPS64R6O0-NEXT: nor $7, $zero, $3
+; MIPS64R6O0-NEXT: andi $8, $5, 255
+; MIPS64R6O0-NEXT: sllv $8, $8, $2
; MIPS64R6O0-NEXT: andi $6, $6, 255
-; MIPS64R6O0-NEXT: sllv $6, $6, $3
+; MIPS64R6O0-NEXT: sllv $6, $6, $2
; MIPS64R6O0-NEXT: .LBB13_1: # %entry
; MIPS64R6O0-NEXT: # =>This Inner Loop Header: Depth=1
-; MIPS64R6O0-NEXT: ll $11, 0($2)
-; MIPS64R6O0-NEXT: and $12, $11, $7
-; MIPS64R6O0-NEXT: bnec $12, $9, .LBB13_3
+; MIPS64R6O0-NEXT: ll $10, 0($1)
+; MIPS64R6O0-NEXT: and $11, $10, $3
+; MIPS64R6O0-NEXT: bnec $11, $8, .LBB13_3
; MIPS64R6O0-NEXT: # %bb.2: # %entry
; MIPS64R6O0-NEXT: # in Loop: Header=BB13_1 Depth=1
-; MIPS64R6O0-NEXT: and $11, $11, $8
-; MIPS64R6O0-NEXT: or $11, $11, $6
-; MIPS64R6O0-NEXT: sc $11, 0($2)
-; MIPS64R6O0-NEXT: beqzc $11, .LBB13_1
+; MIPS64R6O0-NEXT: and $10, $10, $7
+; MIPS64R6O0-NEXT: or $10, $10, $6
+; MIPS64R6O0-NEXT: sc $10, 0($1)
+; MIPS64R6O0-NEXT: beqzc $10, .LBB13_1
; MIPS64R6O0-NEXT: .LBB13_3: # %entry
-; MIPS64R6O0-NEXT: srlv $10, $12, $3
-; MIPS64R6O0-NEXT: seb $10, $10
+; MIPS64R6O0-NEXT: srlv $9, $11, $2
+; MIPS64R6O0-NEXT: seb $9, $9
; MIPS64R6O0-NEXT: # %bb.4: # %entry
; MIPS64R6O0-NEXT: sw $5, 12($sp) # 4-byte Folded Spill
-; MIPS64R6O0-NEXT: sw $10, 8($sp) # 4-byte Folded Spill
+; MIPS64R6O0-NEXT: sw $9, 8($sp) # 4-byte Folded Spill
; MIPS64R6O0-NEXT: # %bb.5: # %entry
; MIPS64R6O0-NEXT: lw $1, 8($sp) # 4-byte Folded Reload
; MIPS64R6O0-NEXT: lw $2, 12($sp) # 4-byte Folded Reload
@@ -6178,35 +6147,34 @@ define {i16, i1} @foo(i16* %addr, i16 %l, i16 %r, i16 %new) {
; MIPS32R6O0-NEXT: move $1, $7
; MIPS32R6O0-NEXT: move $2, $6
; MIPS32R6O0-NEXT: move $3, $5
-; MIPS32R6O0-NEXT: move $8, $4
; MIPS32R6O0-NEXT: addu $5, $5, $6
; MIPS32R6O0-NEXT: sync
; MIPS32R6O0-NEXT: addiu $6, $zero, -4
; MIPS32R6O0-NEXT: and $6, $4, $6
; MIPS32R6O0-NEXT: andi $4, $4, 3
; MIPS32R6O0-NEXT: sll $4, $4, 3
-; MIPS32R6O0-NEXT: ori $9, $zero, 65535
-; MIPS32R6O0-NEXT: sllv $9, $9, $4
-; MIPS32R6O0-NEXT: nor $10, $zero, $9
-; MIPS32R6O0-NEXT: andi $11, $5, 65535
-; MIPS32R6O0-NEXT: sllv $11, $11, $4
+; MIPS32R6O0-NEXT: ori $8, $zero, 65535
+; MIPS32R6O0-NEXT: sllv $8, $8, $4
+; MIPS32R6O0-NEXT: nor $9, $zero, $8
+; MIPS32R6O0-NEXT: andi $10, $5, 65535
+; MIPS32R6O0-NEXT: sllv $10, $10, $4
; MIPS32R6O0-NEXT: andi $7, $7, 65535
; MIPS32R6O0-NEXT: sllv $7, $7, $4
; MIPS32R6O0-NEXT: $BB15_1: # =>This Inner Loop Header: Depth=1
-; MIPS32R6O0-NEXT: ll $13, 0($6)
-; MIPS32R6O0-NEXT: and $14, $13, $9
-; MIPS32R6O0-NEXT: bnec $14, $11, $BB15_3
+; MIPS32R6O0-NEXT: ll $12, 0($6)
+; MIPS32R6O0-NEXT: and $13, $12, $8
+; MIPS32R6O0-NEXT: bnec $13, $10, $BB15_3
; MIPS32R6O0-NEXT: # %bb.2: # in Loop: Header=BB15_1 Depth=1
-; MIPS32R6O0-NEXT: and $13, $13, $10
-; MIPS32R6O0-NEXT: or $13, $13, $7
-; MIPS32R6O0-NEXT: sc $13, 0($6)
-; MIPS32R6O0-NEXT: beqzc $13, $BB15_1
+; MIPS32R6O0-NEXT: and $12, $12, $9
+; MIPS32R6O0-NEXT: or $12, $12, $7
+; MIPS32R6O0-NEXT: sc $12, 0($6)
+; MIPS32R6O0-NEXT: beqzc $12, $BB15_1
; MIPS32R6O0-NEXT: $BB15_3:
-; MIPS32R6O0-NEXT: srlv $12, $14, $4
-; MIPS32R6O0-NEXT: seh $12, $12
+; MIPS32R6O0-NEXT: srlv $11, $13, $4
+; MIPS32R6O0-NEXT: seh $11, $11
; MIPS32R6O0-NEXT: # %bb.4:
; MIPS32R6O0-NEXT: sw $5, 4($sp) # 4-byte Folded Spill
-; MIPS32R6O0-NEXT: sw $12, 0($sp) # 4-byte Folded Spill
+; MIPS32R6O0-NEXT: sw $11, 0($sp) # 4-byte Folded Spill
; MIPS32R6O0-NEXT: # %bb.5:
; MIPS32R6O0-NEXT: lw $1, 4($sp) # 4-byte Folded Reload
; MIPS32R6O0-NEXT: seh $2, $1
@@ -6389,11 +6357,10 @@ define {i16, i1} @foo(i16* %addr, i16 %l, i16 %r, i16 %new) {
; MIPS64R6O0-NEXT: sll $2, $6, 0
; MIPS64R6O0-NEXT: # kill: def $a1 killed $a1 killed $a1_64
; MIPS64R6O0-NEXT: sll $3, $5, 0
-; MIPS64R6O0-NEXT: move $8, $4
; MIPS64R6O0-NEXT: addu $2, $3, $2
; MIPS64R6O0-NEXT: sync
-; MIPS64R6O0-NEXT: daddiu $9, $zero, -4
-; MIPS64R6O0-NEXT: and $9, $4, $9
+; MIPS64R6O0-NEXT: daddiu $8, $zero, -4
+; MIPS64R6O0-NEXT: and $8, $4, $8
; MIPS64R6O0-NEXT: andi $3, $4, 3
; MIPS64R6O0-NEXT: xori $3, $3, 2
; MIPS64R6O0-NEXT: sll $3, $3, 3
@@ -6405,20 +6372,20 @@ define {i16, i1} @foo(i16* %addr, i16 %l, i16 %r, i16 %new) {
; MIPS64R6O0-NEXT: andi $1, $1, 65535
; MIPS64R6O0-NEXT: sllv $1, $1, $3
; MIPS64R6O0-NEXT: .LBB15_1: # =>This Inner Loop Header: Depth=1
-; MIPS64R6O0-NEXT: ll $11, 0($9)
-; MIPS64R6O0-NEXT: and $12, $11, $5
-; MIPS64R6O0-NEXT: bnec $12, $7, .LBB15_3
+; MIPS64R6O0-NEXT: ll $10, 0($8)
+; MIPS64R6O0-NEXT: and $11, $10, $5
+; MIPS64R6O0-NEXT: bnec $11, $7, .LBB15_3
; MIPS64R6O0-NEXT: # %bb.2: # in Loop: Header=BB15_1 Depth=1
-; MIPS64R6O0-NEXT: and $11, $11, $6
-; MIPS64R6O0-NEXT: or $11, $11, $1
-; MIPS64R6O0-NEXT: sc $11, 0($9)
-; MIPS64R6O0-NEXT: beqzc $11, .LBB15_1
+; MIPS64R6O0-NEXT: and $10, $10, $6
+; MIPS64R6O0-NEXT: or $10, $10, $1
+; MIPS64R6O0-NEXT: sc $10, 0($8)
+; MIPS64R6O0-NEXT: beqzc $10, .LBB15_1
; MIPS64R6O0-NEXT: .LBB15_3:
-; MIPS64R6O0-NEXT: srlv $10, $12, $3
-; MIPS64R6O0-NEXT: seh $10, $10
+; MIPS64R6O0-NEXT: srlv $9, $11, $3
+; MIPS64R6O0-NEXT: seh $9, $9
; MIPS64R6O0-NEXT: # %bb.4:
; MIPS64R6O0-NEXT: sw $2, 12($sp) # 4-byte Folded Spill
-; MIPS64R6O0-NEXT: sw $10, 8($sp) # 4-byte Folded Spill
+; MIPS64R6O0-NEXT: sw $9, 8($sp) # 4-byte Folded Spill
; MIPS64R6O0-NEXT: # %bb.5:
; MIPS64R6O0-NEXT: lw $1, 12($sp) # 4-byte Folded Reload
; MIPS64R6O0-NEXT: seh $2, $1
@@ -6706,22 +6673,17 @@ define i32 @CheckSync(i32 signext %v) nounwind noinline {
; MIPS32R6O0: # %bb.0: # %entry
; MIPS32R6O0-NEXT: lui $2, %hi(_gp_disp)
; MIPS32R6O0-NEXT: addiu $2, $2, %lo(_gp_disp)
-; MIPS32R6O0-NEXT: addiu $sp, $sp, -8
; MIPS32R6O0-NEXT: addu $1, $2, $25
-; MIPS32R6O0-NEXT: move $2, $4
; MIPS32R6O0-NEXT: sync
; MIPS32R6O0-NEXT: lw $1, %got(countsint)($1)
; MIPS32R6O0-NEXT: $BB16_1: # %entry
; MIPS32R6O0-NEXT: # =>This Inner Loop Header: Depth=1
-; MIPS32R6O0-NEXT: ll $3, 0($1)
-; MIPS32R6O0-NEXT: addu $5, $3, $4
-; MIPS32R6O0-NEXT: sc $5, 0($1)
-; MIPS32R6O0-NEXT: beqzc $5, $BB16_1
+; MIPS32R6O0-NEXT: ll $2, 0($1)
+; MIPS32R6O0-NEXT: addu $3, $2, $4
+; MIPS32R6O0-NEXT: sc $3, 0($1)
+; MIPS32R6O0-NEXT: beqzc $3, $BB16_1
; MIPS32R6O0-NEXT: # %bb.2: # %entry
; MIPS32R6O0-NEXT: sync
-; MIPS32R6O0-NEXT: sw $2, 4($sp) # 4-byte Folded Spill
-; MIPS32R6O0-NEXT: move $2, $3
-; MIPS32R6O0-NEXT: addiu $sp, $sp, 8
; MIPS32R6O0-NEXT: jrc $ra
;
; MIPS4-LABEL: CheckSync:
@@ -7405,21 +7367,17 @@ define i32 @AtomicLoadAdd32_OffGt9Bit(i32 signext %incr) nounwind {
; MIPS32R6O0: # %bb.0: # %entry
; MIPS32R6O0-NEXT: lui $2, %hi(_gp_disp)
; MIPS32R6O0-NEXT: addiu $2, $2, %lo(_gp_disp)
-; MIPS32R6O0-NEXT: addiu $sp, $sp, -8
; MIPS32R6O0-NEXT: addu $1, $2, $25
-; MIPS32R6O0-NEXT: move $2, $4
; MIPS32R6O0-NEXT: lw $1, %got(x)($1)
; MIPS32R6O0-NEXT: addiu $1, $1, 1024
; MIPS32R6O0-NEXT: $BB18_1: # %entry
; MIPS32R6O0-NEXT: # =>This Inner Loop Header: Depth=1
-; MIPS32R6O0-NEXT: ll $3, 0($1)
-; MIPS32R6O0-NEXT: addu $5, $3, $4
-; MIPS32R6O0-NEXT: sc $5, 0($1)
-; MIPS32R6O0-NEXT: beqzc $5, $BB18_1
+; MIPS32R6O0-NEXT: ll $2, 0($1)
+; MIPS32R6O0-NEXT: addu $3, $2, $4
+; MIPS32R6O0-NEXT: sc $3, 0($1)
+; MIPS32R6O0-NEXT: beqzc $3, $BB18_1
+; MIPS32R6O0-NEXT: nop
; MIPS32R6O0-NEXT: # %bb.2: # %entry
-; MIPS32R6O0-NEXT: sw $2, 4($sp) # 4-byte Folded Spill
-; MIPS32R6O0-NEXT: move $2, $3
-; MIPS32R6O0-NEXT: addiu $sp, $sp, 8
; MIPS32R6O0-NEXT: jrc $ra
;
; MIPS4-LABEL: AtomicLoadAdd32_OffGt9Bit:
diff --git a/llvm/test/CodeGen/Mips/atomic64.ll b/llvm/test/CodeGen/Mips/atomic64.ll
index 8330b1ead7c..5e59246eff5 100644
--- a/llvm/test/CodeGen/Mips/atomic64.ll
+++ b/llvm/test/CodeGen/Mips/atomic64.ll
@@ -92,22 +92,18 @@ define i64 @AtomicLoadAdd(i64 signext %incr) nounwind {
;
; MIPS64R6O0-LABEL: AtomicLoadAdd:
; MIPS64R6O0: # %bb.0: # %entry
-; MIPS64R6O0-NEXT: daddiu $sp, $sp, -16
; MIPS64R6O0-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadAdd)))
; MIPS64R6O0-NEXT: daddu $1, $1, $25
; MIPS64R6O0-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadAdd)))
-; MIPS64R6O0-NEXT: move $2, $4
; MIPS64R6O0-NEXT: ld $1, %got_disp(x)($1)
; MIPS64R6O0-NEXT: .LBB0_1: # %entry
; MIPS64R6O0-NEXT: # =>This Inner Loop Header: Depth=1
-; MIPS64R6O0-NEXT: lld $3, 0($1)
-; MIPS64R6O0-NEXT: daddu $5, $3, $4
-; MIPS64R6O0-NEXT: scd $5, 0($1)
-; MIPS64R6O0-NEXT: beqzc $5, .LBB0_1
+; MIPS64R6O0-NEXT: lld $2, 0($1)
+; MIPS64R6O0-NEXT: daddu $3, $2, $4
+; MIPS64R6O0-NEXT: scd $3, 0($1)
+; MIPS64R6O0-NEXT: beqzc $3, .LBB0_1
+; MIPS64R6O0-NEXT: nop
; MIPS64R6O0-NEXT: # %bb.2: # %entry
-; MIPS64R6O0-NEXT: sd $2, 8($sp) # 8-byte Folded Spill
-; MIPS64R6O0-NEXT: move $2, $3
-; MIPS64R6O0-NEXT: daddiu $sp, $sp, 16
; MIPS64R6O0-NEXT: jrc $ra
;
; O1-LABEL: AtomicLoadAdd:
@@ -253,22 +249,18 @@ define i64 @AtomicLoadSub(i64 signext %incr) nounwind {
;
; MIPS64R6O0-LABEL: AtomicLoadSub:
; MIPS64R6O0: # %bb.0: # %entry
-; MIPS64R6O0-NEXT: daddiu $sp, $sp, -16
; MIPS64R6O0-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadSub)))
; MIPS64R6O0-NEXT: daddu $1, $1, $25
; MIPS64R6O0-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadSub)))
-; MIPS64R6O0-NEXT: move $2, $4
; MIPS64R6O0-NEXT: ld $1, %got_disp(x)($1)
; MIPS64R6O0-NEXT: .LBB1_1: # %entry
; MIPS64R6O0-NEXT: # =>This Inner Loop Header: Depth=1
-; MIPS64R6O0-NEXT: lld $3, 0($1)
-; MIPS64R6O0-NEXT: dsubu $5, $3, $4
-; MIPS64R6O0-NEXT: scd $5, 0($1)
-; MIPS64R6O0-NEXT: beqzc $5, .LBB1_1
+; MIPS64R6O0-NEXT: lld $2, 0($1)
+; MIPS64R6O0-NEXT: dsubu $3, $2, $4
+; MIPS64R6O0-NEXT: scd $3, 0($1)
+; MIPS64R6O0-NEXT: beqzc $3, .LBB1_1
+; MIPS64R6O0-NEXT: nop
; MIPS64R6O0-NEXT: # %bb.2: # %entry
-; MIPS64R6O0-NEXT: sd $2, 8($sp) # 8-byte Folded Spill
-; MIPS64R6O0-NEXT: move $2, $3
-; MIPS64R6O0-NEXT: daddiu $sp, $sp, 16
; MIPS64R6O0-NEXT: jrc $ra
;
; O1-LABEL: AtomicLoadSub:
@@ -414,22 +406,18 @@ define i64 @AtomicLoadAnd(i64 signext %incr) nounwind {
;
; MIPS64R6O0-LABEL: AtomicLoadAnd:
; MIPS64R6O0: # %bb.0: # %entry
-; MIPS64R6O0-NEXT: daddiu $sp, $sp, -16
; MIPS64R6O0-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadAnd)))
; MIPS64R6O0-NEXT: daddu $1, $1, $25
; MIPS64R6O0-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadAnd)))
-; MIPS64R6O0-NEXT: move $2, $4
; MIPS64R6O0-NEXT: ld $1, %got_disp(x)($1)
; MIPS64R6O0-NEXT: .LBB2_1: # %entry
; MIPS64R6O0-NEXT: # =>This Inner Loop Header: Depth=1
-; MIPS64R6O0-NEXT: lld $3, 0($1)
-; MIPS64R6O0-NEXT: and $5, $3, $4
-; MIPS64R6O0-NEXT: scd $5, 0($1)
-; MIPS64R6O0-NEXT: beqzc $5, .LBB2_1
+; MIPS64R6O0-NEXT: lld $2, 0($1)
+; MIPS64R6O0-NEXT: and $3, $2, $4
+; MIPS64R6O0-NEXT: scd $3, 0($1)
+; MIPS64R6O0-NEXT: beqzc $3, .LBB2_1
+; MIPS64R6O0-NEXT: nop
; MIPS64R6O0-NEXT: # %bb.2: # %entry
-; MIPS64R6O0-NEXT: sd $2, 8($sp) # 8-byte Folded Spill
-; MIPS64R6O0-NEXT: move $2, $3
-; MIPS64R6O0-NEXT: daddiu $sp, $sp, 16
; MIPS64R6O0-NEXT: jrc $ra
;
; O1-LABEL: AtomicLoadAnd:
@@ -575,22 +563,18 @@ define i64 @AtomicLoadOr(i64 signext %incr) nounwind {
;
; MIPS64R6O0-LABEL: AtomicLoadOr:
; MIPS64R6O0: # %bb.0: # %entry
-; MIPS64R6O0-NEXT: daddiu $sp, $sp, -16
; MIPS64R6O0-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadOr)))
; MIPS64R6O0-NEXT: daddu $1, $1, $25
; MIPS64R6O0-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadOr)))
-; MIPS64R6O0-NEXT: move $2, $4
; MIPS64R6O0-NEXT: ld $1, %got_disp(x)($1)
; MIPS64R6O0-NEXT: .LBB3_1: # %entry
; MIPS64R6O0-NEXT: # =>This Inner Loop Header: Depth=1
-; MIPS64R6O0-NEXT: lld $3, 0($1)
-; MIPS64R6O0-NEXT: or $5, $3, $4
-; MIPS64R6O0-NEXT: scd $5, 0($1)
-; MIPS64R6O0-NEXT: beqzc $5, .LBB3_1
+; MIPS64R6O0-NEXT: lld $2, 0($1)
+; MIPS64R6O0-NEXT: or $3, $2, $4
+; MIPS64R6O0-NEXT: scd $3, 0($1)
+; MIPS64R6O0-NEXT: beqzc $3, .LBB3_1
+; MIPS64R6O0-NEXT: nop
; MIPS64R6O0-NEXT: # %bb.2: # %entry
-; MIPS64R6O0-NEXT: sd $2, 8($sp) # 8-byte Folded Spill
-; MIPS64R6O0-NEXT: move $2, $3
-; MIPS64R6O0-NEXT: daddiu $sp, $sp, 16
; MIPS64R6O0-NEXT: jrc $ra
;
; O1-LABEL: AtomicLoadOr:
@@ -736,22 +720,18 @@ define i64 @AtomicLoadXor(i64 signext %incr) nounwind {
;
; MIPS64R6O0-LABEL: AtomicLoadXor:
; MIPS64R6O0: # %bb.0: # %entry
-; MIPS64R6O0-NEXT: daddiu $sp, $sp, -16
; MIPS64R6O0-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadXor)))
; MIPS64R6O0-NEXT: daddu $1, $1, $25
; MIPS64R6O0-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadXor)))
-; MIPS64R6O0-NEXT: move $2, $4
; MIPS64R6O0-NEXT: ld $1, %got_disp(x)($1)
; MIPS64R6O0-NEXT: .LBB4_1: # %entry
; MIPS64R6O0-NEXT: # =>This Inner Loop Header: Depth=1
-; MIPS64R6O0-NEXT: lld $3, 0($1)
-; MIPS64R6O0-NEXT: xor $5, $3, $4
-; MIPS64R6O0-NEXT: scd $5, 0($1)
-; MIPS64R6O0-NEXT: beqzc $5, .LBB4_1
+; MIPS64R6O0-NEXT: lld $2, 0($1)
+; MIPS64R6O0-NEXT: xor $3, $2, $4
+; MIPS64R6O0-NEXT: scd $3, 0($1)
+; MIPS64R6O0-NEXT: beqzc $3, .LBB4_1
+; MIPS64R6O0-NEXT: nop
; MIPS64R6O0-NEXT: # %bb.2: # %entry
-; MIPS64R6O0-NEXT: sd $2, 8($sp) # 8-byte Folded Spill
-; MIPS64R6O0-NEXT: move $2, $3
-; MIPS64R6O0-NEXT: daddiu $sp, $sp, 16
; MIPS64R6O0-NEXT: jrc $ra
;
; O1-LABEL: AtomicLoadXor:
@@ -901,23 +881,19 @@ define i64 @AtomicLoadNand(i64 signext %incr) nounwind {
;
; MIPS64R6O0-LABEL: AtomicLoadNand:
; MIPS64R6O0: # %bb.0: # %entry
-; MIPS64R6O0-NEXT: daddiu $sp, $sp, -16
; MIPS64R6O0-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicLoadNand)))
; MIPS64R6O0-NEXT: daddu $1, $1, $25
; MIPS64R6O0-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicLoadNand)))
-; MIPS64R6O0-NEXT: move $2, $4
; MIPS64R6O0-NEXT: ld $1, %got_disp(x)($1)
; MIPS64R6O0-NEXT: .LBB5_1: # %entry
; MIPS64R6O0-NEXT: # =>This Inner Loop Header: Depth=1
-; MIPS64R6O0-NEXT: lld $3, 0($1)
-; MIPS64R6O0-NEXT: and $5, $3, $4
-; MIPS64R6O0-NEXT: nor $5, $zero, $5
-; MIPS64R6O0-NEXT: scd $5, 0($1)
-; MIPS64R6O0-NEXT: beqzc $5, .LBB5_1
+; MIPS64R6O0-NEXT: lld $2, 0($1)
+; MIPS64R6O0-NEXT: and $3, $2, $4
+; MIPS64R6O0-NEXT: nor $3, $zero, $3
+; MIPS64R6O0-NEXT: scd $3, 0($1)
+; MIPS64R6O0-NEXT: beqzc $3, .LBB5_1
+; MIPS64R6O0-NEXT: nop
; MIPS64R6O0-NEXT: # %bb.2: # %entry
-; MIPS64R6O0-NEXT: sd $2, 8($sp) # 8-byte Folded Spill
-; MIPS64R6O0-NEXT: move $2, $3
-; MIPS64R6O0-NEXT: daddiu $sp, $sp, 16
; MIPS64R6O0-NEXT: jrc $ra
;
; O1-LABEL: AtomicLoadNand:
@@ -1080,19 +1056,17 @@ define i64 @AtomicSwap64(i64 signext %newval) nounwind {
; MIPS64R6O0-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicSwap64)))
; MIPS64R6O0-NEXT: daddu $1, $1, $25
; MIPS64R6O0-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicSwap64)))
-; MIPS64R6O0-NEXT: move $2, $4
; MIPS64R6O0-NEXT: sd $4, 8($sp)
-; MIPS64R6O0-NEXT: ld $3, 8($sp)
+; MIPS64R6O0-NEXT: ld $2, 8($sp)
; MIPS64R6O0-NEXT: ld $1, %got_disp(x)($1)
; MIPS64R6O0-NEXT: .LBB6_1: # %entry
; MIPS64R6O0-NEXT: # =>This Inner Loop Header: Depth=1
-; MIPS64R6O0-NEXT: lld $4, 0($1)
-; MIPS64R6O0-NEXT: move $5, $3
-; MIPS64R6O0-NEXT: scd $5, 0($1)
-; MIPS64R6O0-NEXT: beqzc $5, .LBB6_1
+; MIPS64R6O0-NEXT: lld $3, 0($1)
+; MIPS64R6O0-NEXT: move $4, $2
+; MIPS64R6O0-NEXT: scd $4, 0($1)
+; MIPS64R6O0-NEXT: beqzc $4, .LBB6_1
; MIPS64R6O0-NEXT: # %bb.2: # %entry
-; MIPS64R6O0-NEXT: sd $2, 0($sp) # 8-byte Folded Spill
-; MIPS64R6O0-NEXT: move $2, $4
+; MIPS64R6O0-NEXT: move $2, $3
; MIPS64R6O0-NEXT: daddiu $sp, $sp, 16
; MIPS64R6O0-NEXT: jrc $ra
;
@@ -1278,23 +1252,20 @@ define i64 @AtomicCmpSwap64(i64 signext %oldval, i64 signext %newval) nounwind {
; MIPS64R6O0-NEXT: lui $1, %hi(%neg(%gp_rel(AtomicCmpSwap64)))
; MIPS64R6O0-NEXT: daddu $1, $1, $25
; MIPS64R6O0-NEXT: daddiu $1, $1, %lo(%neg(%gp_rel(AtomicCmpSwap64)))
-; MIPS64R6O0-NEXT: move $2, $5
-; MIPS64R6O0-NEXT: move $3, $4
; MIPS64R6O0-NEXT: sd $5, 8($sp)
-; MIPS64R6O0-NEXT: ld $5, 8($sp)
+; MIPS64R6O0-NEXT: ld $2, 8($sp)
; MIPS64R6O0-NEXT: ld $1, %got_disp(x)($1)
; MIPS64R6O0-NEXT: .LBB7_1: # %entry
; MIPS64R6O0-NEXT: # =>This Inner Loop Header: Depth=1
-; MIPS64R6O0-NEXT: lld $6, 0($1)
-; MIPS64R6O0-NEXT: bnec $6, $4, .LBB7_3
+; MIPS64R6O0-NEXT: lld $3, 0($1)
+; MIPS64R6O0-NEXT: bnec $3, $4, .LBB7_3
; MIPS64R6O0-NEXT: # %bb.2: # %entry
; MIPS64R6O0-NEXT: # in Loop: Header=BB7_1 Depth=1
-; MIPS64R6O0-NEXT: move $7, $5
-; MIPS64R6O0-NEXT: scd $7, 0($1)
-; MIPS64R6O0-NEXT: beqzc $7, .LBB7_1
+; MIPS64R6O0-NEXT: move $5, $2
+; MIPS64R6O0-NEXT: scd $5, 0($1)
+; MIPS64R6O0-NEXT: beqzc $5, .LBB7_1
; MIPS64R6O0-NEXT: .LBB7_3: # %entry
-; MIPS64R6O0-NEXT: sd $2, 0($sp) # 8-byte Folded Spill
-; MIPS64R6O0-NEXT: move $2, $6
+; MIPS64R6O0-NEXT: move $2, $3
; MIPS64R6O0-NEXT: daddiu $sp, $sp, 16
; MIPS64R6O0-NEXT: jrc $ra
;
diff --git a/llvm/test/CodeGen/Mips/atomicCmpSwapPW.ll b/llvm/test/CodeGen/Mips/atomicCmpSwapPW.ll
index d8a259aa1c8..64a62c17028 100644
--- a/llvm/test/CodeGen/Mips/atomicCmpSwapPW.ll
+++ b/llvm/test/CodeGen/Mips/atomicCmpSwapPW.ll
@@ -11,21 +11,19 @@
define void @foo(i32 %new, i32 %old) {
; O32-LABEL: foo:
; O32: # %bb.0: # %entry
-; O32-NEXT: move $1, $5
-; O32-NEXT: move $2, $4
-; O32-NEXT: lui $3, %hi(sym)
-; O32-NEXT: lw $3, %lo(sym)($3)
+; O32-NEXT: lui $1, %hi(sym)
+; O32-NEXT: lw $1, %lo(sym)($1)
; O32-NEXT: sync
; O32-NEXT: $BB0_1: # %entry
; O32-NEXT: # =>This Inner Loop Header: Depth=1
-; O32-NEXT: ll $6, 0($3)
-; O32-NEXT: bne $6, $4, $BB0_3
+; O32-NEXT: ll $2, 0($1)
+; O32-NEXT: bne $2, $4, $BB0_3
; O32-NEXT: nop
; O32-NEXT: # %bb.2: # %entry
; O32-NEXT: # in Loop: Header=BB0_1 Depth=1
-; O32-NEXT: move $7, $5
-; O32-NEXT: sc $7, 0($3)
-; O32-NEXT: beqz $7, $BB0_1
+; O32-NEXT: move $3, $5
+; O32-NEXT: sc $3, 0($1)
+; O32-NEXT: beqz $3, $BB0_1
; O32-NEXT: nop
; O32-NEXT: $BB0_3: # %entry
; O32-NEXT: sync
diff --git a/llvm/test/CodeGen/Mips/dsp-spill-reload.ll b/llvm/test/CodeGen/Mips/dsp-spill-reload.ll
index f879853daa6..a1663863011 100644
--- a/llvm/test/CodeGen/Mips/dsp-spill-reload.ll
+++ b/llvm/test/CodeGen/Mips/dsp-spill-reload.ll
@@ -26,7 +26,6 @@ entry:
; MM-OBJ: sw ${{[0-9]+}}, {{[0-9]+}}($sp)
; MM-OBJ: sw ${{[0-9]+}}, {{[0-9]+}}($sp)
-; MM-OBJ: sw ${{[0-9]+}}, {{[0-9]+}}($sp)
true:
ret <4 x i8> %c
diff --git a/llvm/test/CodeGen/SystemZ/swift-return.ll b/llvm/test/CodeGen/SystemZ/swift-return.ll
index af80a508ae8..84e257f9321 100644
--- a/llvm/test/CodeGen/SystemZ/swift-return.ll
+++ b/llvm/test/CodeGen/SystemZ/swift-return.ll
@@ -50,13 +50,13 @@ declare swiftcc { i16, i8 } @gen(i32)
; CHECK-O0-LABEL: test2:
; CHECK-O0: st %r2, [[SPILL1:[0-9]+]](%r15)
; CHECK-O0: l %r3, [[SPILL1]](%r15)
-; CHECK-O0: la %r2, 168(%r15)
+; CHECK-O0: la %r2, 160(%r15)
; CHECK-O0: brasl %r14, gen2
-; CHECK-O0-DAG: l %r{{.*}}, 184(%r15)
-; CHECK-O0-DAG: l %r{{.*}}, 180(%r15)
; CHECK-O0-DAG: l %r{{.*}}, 176(%r15)
; CHECK-O0-DAG: l %r{{.*}}, 172(%r15)
; CHECK-O0-DAG: l %r{{.*}}, 168(%r15)
+; CHECK-O0-DAG: l %r{{.*}}, 164(%r15)
+; CHECK-O0-DAG: l %r{{.*}}, 160(%r15)
; CHECK-O0: ar
; CHECK-O0: ar
; CHECK-O0: ar
diff --git a/llvm/test/CodeGen/X86/atomic-unordered.ll b/llvm/test/CodeGen/X86/atomic-unordered.ll
index 567e8b47c4b..a9df57bba0c 100644
--- a/llvm/test/CodeGen/X86/atomic-unordered.ll
+++ b/llvm/test/CodeGen/X86/atomic-unordered.ll
@@ -314,9 +314,9 @@ define void @store_i128(i128* %ptr, i128 %v) {
; CHECK-O0-NEXT: .cfi_offset %rbx, -16
; CHECK-O0-NEXT: movq (%rdi), %rax
; CHECK-O0-NEXT: movq 8(%rdi), %rcx
+; CHECK-O0-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; CHECK-O0-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; CHECK-O0-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
-; CHECK-O0-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; CHECK-O0-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; CHECK-O0-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; CHECK-O0-NEXT: jmp .LBB16_1
diff --git a/llvm/test/CodeGen/X86/swifterror.ll b/llvm/test/CodeGen/X86/swifterror.ll
index 6d7c13836a5..4fea5dc5857 100644
--- a/llvm/test/CodeGen/X86/swifterror.ll
+++ b/llvm/test/CodeGen/X86/swifterror.ll
@@ -435,7 +435,6 @@ define swiftcc float @conditionally_forward_swifterror(%swift_error** swifterror
; CHECK-O0-LABEL: conditionally_forward_swifterror:
; CHECK-O0: pushq [[REG1:%[a-z0-9]+]]
-; CHECK-O0: movq %r12, [[REG1]]
; CHECK-O0: cmpl $0, %edi
; CHECK-O0-DAG: movq %r12, (%rsp)
; CHECK-O0: je
@@ -744,7 +743,6 @@ a:
}
; CHECK-O0-LABEL: testAssign2
-; CHECK-O0: movq %r12, {{.*}}
; CHECK-O0: movq %r12, [[SLOT:[-a-z0-9\(\)\%]*]]
; CHECK-O0: jmp
; CHECK-O0: movq [[SLOT]], %rax
@@ -791,9 +789,9 @@ a:
; CHECK-O0-LABEL: testAssign4
; CHECK-O0: callq _foo2
-; CHECK-O0: xorl %ecx, %ecx
-; CHECK-O0: movl %ecx, %eax
-; CHECK-O0: movq %rax, [[SLOT:[-a-z0-9\(\)\%]*]]
+; CHECK-O0: xorl %eax, %eax
+; CHECK-O0: movl %eax, %ecx
+; CHECK-O0: movq %rcx, [[SLOT:[-a-z0-9\(\)\%]*]]
; CHECK-O0: movq [[SLOT]], %rax
; CHECK-O0: movq %rax, [[SLOT2:[-a-z0-9\(\)\%]*]]
; CHECK-O0: movq [[SLOT2]], %r12
diff --git a/llvm/test/CodeGen/X86/swiftself.ll b/llvm/test/CodeGen/X86/swiftself.ll
index c5e90594560..6a6c56ef9f0 100644
--- a/llvm/test/CodeGen/X86/swiftself.ll
+++ b/llvm/test/CodeGen/X86/swiftself.ll
@@ -41,9 +41,10 @@ define void @swiftself_passthrough(i8* swiftself %addr0) {
}
; We can use a tail call if the callee swiftself is the same as the caller one.
+; This should also work with fast-isel.
; CHECK-LABEL: swiftself_tail:
-; OPT: jmp {{_?}}swiftself_param
-; OPT-NOT: ret
+; CHECK: jmp {{_?}}swiftself_param
+; CHECK-NOT: ret
define i8* @swiftself_tail(i8* swiftself %addr0) {
call void asm sideeffect "", "~{r13}"()
%res = tail call i8* @swiftself_param(i8* swiftself %addr0)
diff --git a/llvm/test/DebugInfo/X86/dbg-declare-arg.ll b/llvm/test/DebugInfo/X86/dbg-declare-arg.ll
index b2b88cb8b1b..1fa53462b84 100644
--- a/llvm/test/DebugInfo/X86/dbg-declare-arg.ll
+++ b/llvm/test/DebugInfo/X86/dbg-declare-arg.ll
@@ -20,7 +20,7 @@ target triple = "x86_64-apple-macosx10.6.7"
; CHECK: DW_AT_name {{.*}}"j"
; CHECK: DW_TAG_variable
; CHECK-NEXT: DW_AT_location [DW_FORM_sec_offset] (
-; CHECK-NEXT: [0x{{.*}}, 0x{{.*}}): DW_OP_breg7 RSP+8, DW_OP_deref)
+; CHECK-NEXT: [0x{{.*}}, 0x{{.*}}): DW_OP_breg7 RSP+16, DW_OP_deref)
; CHECK-NEXT: DW_AT_name {{.*}}"my_a"
%class.A = type { i32, i32, i32, i32 }
OpenPOWER on IntegriCloud