summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/Thumb2
diff options
context:
space:
mode:
authorSam Parker <sam.parker@arm.com>2019-12-20 09:32:36 +0000
committerSam Parker <sam.parker@arm.com>2019-12-20 09:34:18 +0000
commitacbc9aed726d4b7428691e026a214cb26ee2cf94 (patch)
tree93e611317cbbc73893c2dad4d901d55b864d79f9 /llvm/test/CodeGen/Thumb2
parent0ca9d2fd39264054501927ba6d3c5330159458d7 (diff)
downloadbcm5719-llvm-acbc9aed726d4b7428691e026a214cb26ee2cf94.tar.gz
bcm5719-llvm-acbc9aed726d4b7428691e026a214cb26ee2cf94.zip
[ARM][MVE] Fixes for tail predication.
1) Fix an issue with the incorrect value being used for the number of elements being passed to [d|w]lstp. We were trying to check that the value was available at LoopStart, but this doesn't consider that the last instruction in the block could also define the register. Two helpers have been added to RDA for this. 2) Insert some code to now try to move the element count def or the insertion point so that we can perform more tail predication. 3) Related to (1), the same off-by-one could prevent us from generating a low-overhead loop when a mov lr could have been the last instruction in the block. 4) Fix up some instruction attributes so that not all the low-overhead loop instructions are labelled as branches and terminators - as this is not true for dls/dlstp. Differential Revision: https://reviews.llvm.org/D71609
Diffstat (limited to 'llvm/test/CodeGen/Thumb2')
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/mov-lr-terminator.mir175
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/move-def-before-start.mir183
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/move-start-after-def.mir181
-rw-r--r--llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-cpsr-loop-use.mir1
4 files changed, 540 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mov-lr-terminator.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mov-lr-terminator.mir
new file mode 100644
index 00000000000..57fe0492f1e
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/mov-lr-terminator.mir
@@ -0,0 +1,175 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -run-pass=arm-low-overhead-loops %s -o - | FileCheck %s
+--- |
+ define dso_local arm_aapcs_vfpcc void @start_before_elems(i32* noalias nocapture %a, i8* nocapture readonly %b, i8* nocapture readonly %c, i32 %N) local_unnamed_addr #0 {
+ entry:
+ %div = lshr i32 %N, 1
+ %cmp9 = icmp eq i32 %div, 0
+ %0 = add nuw i32 %div, 3
+ %1 = lshr i32 %0, 2
+ %2 = shl nuw i32 %1, 2
+ %3 = add i32 %2, -4
+ %4 = lshr i32 %3, 2
+ %5 = add nuw nsw i32 %4, 1
+ br i1 %cmp9, label %for.cond.cleanup, label %vector.ph
+
+ vector.ph: ; preds = %entry
+ call void @llvm.set.loop.iterations.i32(i32 %5)
+ br label %vector.body
+
+ vector.body: ; preds = %vector.body, %vector.ph
+ %lsr.iv = phi i32* [ %scevgep, %vector.body ], [ %a, %vector.ph ]
+ %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %6 = phi i32 [ %5, %vector.ph ], [ %13, %vector.body ]
+ %7 = phi i32 [ %div, %vector.ph ], [ %9, %vector.body ]
+ %lsr.iv1 = bitcast i32* %lsr.iv to <4 x i32>*
+ %8 = call <4 x i1> @llvm.arm.mve.vctp32(i32 %7)
+ %9 = sub i32 %7, 4
+ %scevgep4 = getelementptr i8, i8* %b, i32 %index
+ %scevgep45 = bitcast i8* %scevgep4 to <4 x i8>*
+ %wide.masked.load = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %scevgep45, i32 1, <4 x i1> %8, <4 x i8> undef)
+ %10 = zext <4 x i8> %wide.masked.load to <4 x i32>
+ %scevgep2 = getelementptr i8, i8* %c, i32 %index
+ %scevgep23 = bitcast i8* %scevgep2 to <4 x i8>*
+ %wide.masked.load13 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %scevgep23, i32 1, <4 x i1> %8, <4 x i8> undef)
+ %11 = zext <4 x i8> %wide.masked.load13 to <4 x i32>
+ %12 = mul nuw nsw <4 x i32> %11, %10
+ call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %12, <4 x i32>* %lsr.iv1, i32 4, <4 x i1> %8)
+ %index.next = add i32 %index, 4
+ %scevgep = getelementptr i32, i32* %lsr.iv, i32 4
+ %13 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %6, i32 1)
+ %14 = icmp ne i32 %13, 0
+ br i1 %14, label %vector.body, label %for.cond.cleanup
+
+ for.cond.cleanup: ; preds = %vector.body, %entry
+ ret void
+ }
+ declare <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>*, i32 immarg, <4 x i1>, <4 x i8>)
+ declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32 immarg, <4 x i1>)
+ declare void @llvm.set.loop.iterations.i32(i32)
+ declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32)
+ declare <4 x i1> @llvm.arm.mve.vctp32(i32)
+...
+---
+name: start_before_elems
+alignment: 2
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+failedISel: false
+tracksRegLiveness: true
+hasWinCFI: false
+registers: []
+liveins:
+ - { reg: '$r0', virtual-reg: '' }
+ - { reg: '$r1', virtual-reg: '' }
+ - { reg: '$r2', virtual-reg: '' }
+ - { reg: '$r3', virtual-reg: '' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 8
+ offsetAdjustment: 0
+ maxAlignment: 4
+ adjustsStack: false
+ hasCalls: false
+ stackProtector: ''
+ maxCallFrameSize: 0
+ cvBytesOfCalleeSavedRegisters: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+ localFrameSize: 0
+ savePoint: ''
+ restorePoint: ''
+fixedStack: []
+stack:
+ - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4,
+ stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false,
+ debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+ - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4,
+ stack-id: default, callee-saved-register: '$r4', callee-saved-restored: true,
+ debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+callSites: []
+constants: []
+machineFunctionInfo: {}
+body: |
+ ; CHECK-LABEL: name: start_before_elems
+ ; CHECK: bb.0.entry:
+ ; CHECK: successors: %bb.1(0x80000000)
+ ; CHECK: liveins: $r0, $r1, $r2, $r3, $r4, $lr
+ ; CHECK: frame-setup tPUSH 14, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -8
+ ; CHECK: renamable $r12 = t2MOVi 0, 14, $noreg, $noreg
+ ; CHECK: t2CMPrs killed renamable $r12, renamable $r3, 11, 14, $noreg, implicit-def $cpsr
+ ; CHECK: t2IT 0, 8, implicit-def $itstate
+ ; CHECK: tPOP_RET 0, killed $cpsr, def $r4, def $pc, implicit killed $itstate
+ ; CHECK: renamable $r12 = t2LSRri killed renamable $r3, 1, 14, $noreg, $noreg
+ ; CHECK: renamable $r3, dead $cpsr = tMOVi8 0, 14, $noreg
+ ; CHECK: $lr = MVE_DLSTP_32 renamable $r12
+ ; CHECK: bb.1.vector.body:
+ ; CHECK: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+ ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r12
+ ; CHECK: renamable $r4, dead $cpsr = tADDrr renamable $r1, renamable $r3, 14, $noreg
+ ; CHECK: renamable $q0 = MVE_VLDRBU32 killed renamable $r4, 0, 0, $noreg :: (load 4 from %ir.scevgep45, align 1)
+ ; CHECK: renamable $r4, dead $cpsr = tADDrr renamable $r2, renamable $r3, 14, $noreg
+ ; CHECK: renamable $r3, dead $cpsr = tADDi8 killed renamable $r3, 4, 14, $noreg
+ ; CHECK: renamable $r12 = t2SUBri killed renamable $r12, 4, 14, $noreg, $noreg
+ ; CHECK: renamable $q1 = MVE_VLDRBU32 killed renamable $r4, 0, 0, $noreg :: (load 4 from %ir.scevgep23, align 1)
+ ; CHECK: renamable $q0 = nuw nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, undef renamable $q0
+ ; CHECK: renamable $r0 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r0, 16, 0, killed $noreg :: (store 16 into %ir.lsr.iv1, align 4)
+ ; CHECK: $lr = MVE_LETP renamable $lr, %bb.1
+ ; CHECK: bb.2.for.cond.cleanup:
+ ; CHECK: tPOP_RET 14, $noreg, def $r4, def $pc
+ bb.0.entry:
+ successors: %bb.1(0x80000000)
+ liveins: $r0, $r1, $r2, $r3, $r4, $lr
+
+ frame-setup tPUSH 14, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
+ frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ frame-setup CFI_INSTRUCTION offset $lr, -4
+ frame-setup CFI_INSTRUCTION offset $r4, -8
+ renamable $r12 = t2MOVi 0, 14, $noreg, $noreg
+ t2CMPrs killed renamable $r12, renamable $r3, 11, 14, $noreg, implicit-def $cpsr
+ t2IT 0, 8, implicit-def $itstate
+ tPOP_RET 0, killed $cpsr, def $r4, def $pc, implicit killed $itstate
+ renamable $r12 = t2MOVi 3, 14, $noreg, $noreg
+ renamable $lr = t2MOVi 1, 14, $noreg, $noreg
+ renamable $r12 = nuw t2ADDrs killed renamable $r12, renamable $r3, 11, 14, $noreg, $noreg
+ renamable $r12 = t2BICri killed renamable $r12, 3, 14, $noreg, $noreg
+ renamable $r12 = t2SUBri killed renamable $r12, 4, 14, $noreg, $noreg
+ renamable $r5 = nuw nsw t2ADDrs killed renamable $lr, killed renamable $r12, 19, 14, $noreg, $noreg
+ renamable $r12 = t2LSRri killed renamable $r3, 1, 14, $noreg, $noreg
+ renamable $r3, dead $cpsr = tMOVi8 0, 14, $noreg
+ t2DoLoopStart renamable $r5
+ $lr = tMOVr killed $r5, 14, $noreg
+
+ bb.1.vector.body:
+ successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+ liveins: $lr, $r0, $r1, $r2, $r3, $r12
+
+ renamable $r4, dead $cpsr = tADDrr renamable $r1, renamable $r3, 14, $noreg
+ renamable $vpr = MVE_VCTP32 renamable $r12, 0, $noreg
+ MVE_VPST 8, implicit $vpr
+ renamable $q0 = MVE_VLDRBU32 killed renamable $r4, 0, 1, renamable $vpr :: (load 4 from %ir.scevgep45, align 1)
+ renamable $r4, dead $cpsr = tADDrr renamable $r2, renamable $r3, 14, $noreg
+ renamable $r3, dead $cpsr = tADDi8 killed renamable $r3, 4, 14, $noreg
+ renamable $r12 = t2SUBri killed renamable $r12, 4, 14, $noreg, $noreg
+ MVE_VPST 8, implicit $vpr
+ renamable $q1 = MVE_VLDRBU32 killed renamable $r4, 0, 1, renamable $vpr :: (load 4 from %ir.scevgep23, align 1)
+ renamable $q0 = nuw nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, undef renamable $q0
+ MVE_VPST 8, implicit $vpr
+ renamable $r0 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r0, 16, 1, killed renamable $vpr :: (store 16 into %ir.lsr.iv1, align 4)
+ renamable $lr = t2LoopDec killed renamable $lr, 1
+ t2LoopEnd renamable $lr, %bb.1, implicit-def dead $cpsr
+ tB %bb.2, 14, $noreg
+
+ bb.2.for.cond.cleanup:
+ tPOP_RET 14, $noreg, def $r4, def $pc
+
+...
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/move-def-before-start.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/move-def-before-start.mir
new file mode 100644
index 00000000000..ff49bb0770e
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/move-def-before-start.mir
@@ -0,0 +1,183 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -run-pass=arm-low-overhead-loops --verify-machineinstrs %s -o - | FileCheck %s
+
+# Test that, though the vctp operand is defined at the end of the block,
+# that the correct value is used for the dlstp.
+# TODO: The pass currently just bails instead of finding the correct
+# value.
+
+--- |
+ define dso_local arm_aapcs_vfpcc void @start_before_elems(i32* noalias nocapture %a, i8* nocapture readonly %b, i8* nocapture readonly %c, i32 %N) local_unnamed_addr #0 {
+ entry:
+ %div = lshr i32 %N, 1
+ %cmp9 = icmp eq i32 %div, 0
+ %0 = add nuw i32 %div, 3
+ %1 = lshr i32 %0, 2
+ %2 = shl nuw i32 %1, 2
+ %3 = add i32 %2, -4
+ %4 = lshr i32 %3, 2
+ %5 = add nuw nsw i32 %4, 1
+ br i1 %cmp9, label %for.cond.cleanup, label %vector.ph
+
+ vector.ph: ; preds = %entry
+ call void @llvm.set.loop.iterations.i32(i32 %5)
+ br label %vector.body
+
+ vector.body: ; preds = %vector.body, %vector.ph
+ %lsr.iv = phi i32* [ %scevgep, %vector.body ], [ %a, %vector.ph ]
+ %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %6 = phi i32 [ %5, %vector.ph ], [ %13, %vector.body ]
+ %7 = phi i32 [ %div, %vector.ph ], [ %9, %vector.body ]
+ %lsr.iv1 = bitcast i32* %lsr.iv to <4 x i32>*
+ %8 = call <4 x i1> @llvm.arm.mve.vctp32(i32 %7)
+ %9 = sub i32 %7, 4
+ %scevgep4 = getelementptr i8, i8* %b, i32 %index
+ %scevgep45 = bitcast i8* %scevgep4 to <4 x i8>*
+ %wide.masked.load = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %scevgep45, i32 1, <4 x i1> %8, <4 x i8> undef)
+ %10 = zext <4 x i8> %wide.masked.load to <4 x i32>
+ %scevgep2 = getelementptr i8, i8* %c, i32 %index
+ %scevgep23 = bitcast i8* %scevgep2 to <4 x i8>*
+ %wide.masked.load13 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %scevgep23, i32 1, <4 x i1> %8, <4 x i8> undef)
+ %11 = zext <4 x i8> %wide.masked.load13 to <4 x i32>
+ %12 = mul nuw nsw <4 x i32> %11, %10
+ call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %12, <4 x i32>* %lsr.iv1, i32 4, <4 x i1> %8)
+ %index.next = add i32 %index, 4
+ %scevgep = getelementptr i32, i32* %lsr.iv, i32 4
+ %13 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %6, i32 1)
+ %14 = icmp ne i32 %13, 0
+ br i1 %14, label %vector.body, label %for.cond.cleanup
+
+ for.cond.cleanup: ; preds = %vector.body, %entry
+ ret void
+ }
+ declare <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>*, i32 immarg, <4 x i1>, <4 x i8>) #1
+ declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32 immarg, <4 x i1>) #2
+ declare void @llvm.set.loop.iterations.i32(i32) #3
+ declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #3
+ declare <4 x i1> @llvm.arm.mve.vctp32(i32) #4
+
+...
+---
+name: start_before_elems
+alignment: 2
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+failedISel: false
+tracksRegLiveness: true
+hasWinCFI: false
+registers: []
+liveins:
+ - { reg: '$r0', virtual-reg: '' }
+ - { reg: '$r1', virtual-reg: '' }
+ - { reg: '$r2', virtual-reg: '' }
+ - { reg: '$r3', virtual-reg: '' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 8
+ offsetAdjustment: 0
+ maxAlignment: 4
+ adjustsStack: false
+ hasCalls: false
+ stackProtector: ''
+ maxCallFrameSize: 0
+ cvBytesOfCalleeSavedRegisters: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+ localFrameSize: 0
+ savePoint: ''
+ restorePoint: ''
+fixedStack: []
+stack:
+ - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4,
+ stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false,
+ debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+ - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4,
+ stack-id: default, callee-saved-register: '$r4', callee-saved-restored: true,
+ debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+callSites: []
+constants: []
+machineFunctionInfo: {}
+body: |
+ ; CHECK-LABEL: name: start_before_elems
+ ; CHECK: bb.0.entry:
+ ; CHECK: successors: %bb.1(0x80000000)
+ ; CHECK: liveins: $r0, $r1, $r2, $r3, $r4, $lr
+ ; CHECK: frame-setup tPUSH 14, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -8
+ ; CHECK: renamable $r12 = t2MOVi 0, 14, $noreg, $noreg
+ ; CHECK: t2CMPrs killed renamable $r12, renamable $r3, 11, 14, $noreg, implicit-def $cpsr
+ ; CHECK: t2IT 0, 8, implicit-def $itstate
+ ; CHECK: tPOP_RET 0, killed $cpsr, def $r4, def $pc, implicit killed $itstate
+ ; CHECK: $r12 = t2MOVr killed $r3, 14, $noreg, $noreg
+ ; CHECK: renamable $r12 = t2LSRri killed renamable $r12, 1, 14, $noreg, $noreg
+ ; CHECK: $lr = MVE_DLSTP_32 renamable $r12
+ ; CHECK: renamable $r3, dead $cpsr = tMOVi8 0, 14, $noreg
+ ; CHECK: bb.1.vector.body:
+ ; CHECK: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+ ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r12
+ ; CHECK: renamable $r4, dead $cpsr = tADDrr renamable $r1, renamable $r3, 14, $noreg
+ ; CHECK: renamable $q0 = MVE_VLDRBU32 killed renamable $r4, 0, 0, $noreg :: (load 4 from %ir.scevgep45, align 1)
+ ; CHECK: renamable $r4, dead $cpsr = tADDrr renamable $r2, renamable $r3, 14, $noreg
+ ; CHECK: renamable $r3, dead $cpsr = tADDi8 killed renamable $r3, 4, 14, $noreg
+ ; CHECK: renamable $r12 = t2SUBri killed renamable $r12, 4, 14, $noreg, $noreg
+ ; CHECK: renamable $q1 = MVE_VLDRBU32 killed renamable $r4, 0, 0, $noreg :: (load 4 from %ir.scevgep23, align 1)
+ ; CHECK: renamable $q0 = nuw nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, undef renamable $q0
+ ; CHECK: renamable $r0 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r0, 16, 0, killed $noreg :: (store 16 into %ir.lsr.iv1, align 4)
+ ; CHECK: $lr = MVE_LETP renamable $lr, %bb.1
+ ; CHECK: bb.2.for.cond.cleanup:
+ ; CHECK: tPOP_RET 14, $noreg, def $r4, def $pc
+ bb.0.entry:
+ successors: %bb.1(0x80000000)
+ liveins: $r0, $r1, $r2, $r3, $r4, $lr
+
+ frame-setup tPUSH 14, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
+ frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ frame-setup CFI_INSTRUCTION offset $lr, -4
+ frame-setup CFI_INSTRUCTION offset $r4, -8
+ renamable $r12 = t2MOVi 0, 14, $noreg, $noreg
+ t2CMPrs killed renamable $r12, renamable $r3, 11, 14, $noreg, implicit-def $cpsr
+ t2IT 0, 8, implicit-def $itstate
+ tPOP_RET 0, killed $cpsr, def $r4, def $pc, implicit killed $itstate
+ renamable $r12 = t2MOVi 3, 14, $noreg, $noreg
+ renamable $lr = t2MOVi 1, 14, $noreg, $noreg
+ renamable $r12 = nuw t2ADDrs killed renamable $r12, renamable $r3, 11, 14, $noreg, $noreg
+ renamable $r12 = t2BICri killed renamable $r12, 3, 14, $noreg, $noreg
+ renamable $r12 = t2SUBri killed renamable $r12, 4, 14, $noreg, $noreg
+ renamable $lr = nuw nsw t2ADDrs killed renamable $lr, killed renamable $r12, 19, 14, $noreg, $noreg
+ $r12 = t2MOVr killed $r3, 14, $noreg, $noreg
+ t2DoLoopStart renamable $lr
+ renamable $r3, dead $cpsr = tMOVi8 0, 14, $noreg
+ renamable $r12 = t2LSRri killed renamable $r12, 1, 14, $noreg, $noreg
+
+ bb.1.vector.body:
+ successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+ liveins: $lr, $r0, $r1, $r2, $r3, $r12
+
+ renamable $r4, dead $cpsr = tADDrr renamable $r1, renamable $r3, 14, $noreg
+ renamable $vpr = MVE_VCTP32 renamable $r12, 0, $noreg
+ MVE_VPST 8, implicit $vpr
+ renamable $q0 = MVE_VLDRBU32 killed renamable $r4, 0, 1, renamable $vpr :: (load 4 from %ir.scevgep45, align 1)
+ renamable $r4, dead $cpsr = tADDrr renamable $r2, renamable $r3, 14, $noreg
+ renamable $r3, dead $cpsr = tADDi8 killed renamable $r3, 4, 14, $noreg
+ renamable $r12 = t2SUBri killed renamable $r12, 4, 14, $noreg, $noreg
+ MVE_VPST 8, implicit $vpr
+ renamable $q1 = MVE_VLDRBU32 killed renamable $r4, 0, 1, renamable $vpr :: (load 4 from %ir.scevgep23, align 1)
+ renamable $q0 = nuw nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, undef renamable $q0
+ MVE_VPST 8, implicit $vpr
+ renamable $r0 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r0, 16, 1, killed renamable $vpr :: (store 16 into %ir.lsr.iv1, align 4)
+ renamable $lr = t2LoopDec killed renamable $lr, 1
+ t2LoopEnd renamable $lr, %bb.1, implicit-def dead $cpsr
+ tB %bb.2, 14, $noreg
+
+ bb.2.for.cond.cleanup:
+ tPOP_RET 14, $noreg, def $r4, def $pc
+
+...
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/move-start-after-def.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/move-start-after-def.mir
new file mode 100644
index 00000000000..3fb203ee193
--- /dev/null
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/move-start-after-def.mir
@@ -0,0 +1,181 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve -run-pass=arm-low-overhead-loops --verify-machineinstrs %s -o - | FileCheck %s
+
+# Test that, though the vctp operand is defined at the end of the block,
+# that the correct value is used for the dlstp.
+
+--- |
+ define dso_local arm_aapcs_vfpcc void @start_before_elems(i32* noalias nocapture %a, i8* nocapture readonly %b, i8* nocapture readonly %c, i32 %N) local_unnamed_addr #0 {
+ entry:
+ %div = lshr i32 %N, 1
+ %cmp9 = icmp eq i32 %div, 0
+ %0 = add nuw i32 %div, 3
+ %1 = lshr i32 %0, 2
+ %2 = shl nuw i32 %1, 2
+ %3 = add i32 %2, -4
+ %4 = lshr i32 %3, 2
+ %5 = add nuw nsw i32 %4, 1
+ br i1 %cmp9, label %for.cond.cleanup, label %vector.ph
+
+ vector.ph: ; preds = %entry
+ call void @llvm.set.loop.iterations.i32(i32 %5)
+ br label %vector.body
+
+ vector.body: ; preds = %vector.body, %vector.ph
+ %lsr.iv = phi i32* [ %scevgep, %vector.body ], [ %a, %vector.ph ]
+ %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %6 = phi i32 [ %5, %vector.ph ], [ %13, %vector.body ]
+ %7 = phi i32 [ %div, %vector.ph ], [ %9, %vector.body ]
+ %lsr.iv1 = bitcast i32* %lsr.iv to <4 x i32>*
+ %8 = call <4 x i1> @llvm.arm.mve.vctp32(i32 %7)
+ %9 = sub i32 %7, 4
+ %scevgep4 = getelementptr i8, i8* %b, i32 %index
+ %scevgep45 = bitcast i8* %scevgep4 to <4 x i8>*
+ %wide.masked.load = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %scevgep45, i32 1, <4 x i1> %8, <4 x i8> undef)
+ %10 = zext <4 x i8> %wide.masked.load to <4 x i32>
+ %scevgep2 = getelementptr i8, i8* %c, i32 %index
+ %scevgep23 = bitcast i8* %scevgep2 to <4 x i8>*
+ %wide.masked.load13 = call <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>* %scevgep23, i32 1, <4 x i1> %8, <4 x i8> undef)
+ %11 = zext <4 x i8> %wide.masked.load13 to <4 x i32>
+ %12 = mul nuw nsw <4 x i32> %11, %10
+ call void @llvm.masked.store.v4i32.p0v4i32(<4 x i32> %12, <4 x i32>* %lsr.iv1, i32 4, <4 x i1> %8)
+ %index.next = add i32 %index, 4
+ %scevgep = getelementptr i32, i32* %lsr.iv, i32 4
+ %13 = call i32 @llvm.loop.decrement.reg.i32.i32.i32(i32 %6, i32 1)
+ %14 = icmp ne i32 %13, 0
+ br i1 %14, label %vector.body, label %for.cond.cleanup
+
+ for.cond.cleanup: ; preds = %vector.body, %entry
+ ret void
+ }
+ declare <4 x i8> @llvm.masked.load.v4i8.p0v4i8(<4 x i8>*, i32 immarg, <4 x i1>, <4 x i8>) #1
+ declare void @llvm.masked.store.v4i32.p0v4i32(<4 x i32>, <4 x i32>*, i32 immarg, <4 x i1>) #2
+ declare void @llvm.set.loop.iterations.i32(i32) #3
+ declare i32 @llvm.loop.decrement.reg.i32.i32.i32(i32, i32) #3
+ declare <4 x i1> @llvm.arm.mve.vctp32(i32) #4
+
+...
+---
+name: start_before_elems
+alignment: 2
+exposesReturnsTwice: false
+legalized: false
+regBankSelected: false
+selected: false
+failedISel: false
+tracksRegLiveness: true
+hasWinCFI: false
+registers: []
+liveins:
+ - { reg: '$r0', virtual-reg: '' }
+ - { reg: '$r1', virtual-reg: '' }
+ - { reg: '$r2', virtual-reg: '' }
+ - { reg: '$r3', virtual-reg: '' }
+frameInfo:
+ isFrameAddressTaken: false
+ isReturnAddressTaken: false
+ hasStackMap: false
+ hasPatchPoint: false
+ stackSize: 8
+ offsetAdjustment: 0
+ maxAlignment: 4
+ adjustsStack: false
+ hasCalls: false
+ stackProtector: ''
+ maxCallFrameSize: 0
+ cvBytesOfCalleeSavedRegisters: 0
+ hasOpaqueSPAdjustment: false
+ hasVAStart: false
+ hasMustTailInVarArgFunc: false
+ localFrameSize: 0
+ savePoint: ''
+ restorePoint: ''
+fixedStack: []
+stack:
+ - { id: 0, name: '', type: spill-slot, offset: -4, size: 4, alignment: 4,
+ stack-id: default, callee-saved-register: '$lr', callee-saved-restored: false,
+ debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+ - { id: 1, name: '', type: spill-slot, offset: -8, size: 4, alignment: 4,
+ stack-id: default, callee-saved-register: '$r4', callee-saved-restored: true,
+ debug-info-variable: '', debug-info-expression: '', debug-info-location: '' }
+callSites: []
+constants: []
+machineFunctionInfo: {}
+body: |
+ ; CHECK-LABEL: name: start_before_elems
+ ; CHECK: bb.0.entry:
+ ; CHECK: successors: %bb.1(0x80000000)
+ ; CHECK: liveins: $r0, $r1, $r2, $r3, $r4, $lr
+ ; CHECK: frame-setup tPUSH 14, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
+ ; CHECK: frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ ; CHECK: frame-setup CFI_INSTRUCTION offset $lr, -4
+ ; CHECK: frame-setup CFI_INSTRUCTION offset $r4, -8
+ ; CHECK: renamable $r12 = t2MOVi 0, 14, $noreg, $noreg
+ ; CHECK: t2CMPrs killed renamable $r12, renamable $r3, 11, 14, $noreg, implicit-def $cpsr
+ ; CHECK: t2IT 0, 8, implicit-def $itstate
+ ; CHECK: tPOP_RET 0, killed $cpsr, def $r4, def $pc, implicit killed $itstate
+ ; CHECK: $r12 = t2MOVr killed $r3, 14, $noreg, $noreg
+ ; CHECK: renamable $r3, dead $cpsr = tMOVi8 0, 14, $noreg
+ ; CHECK: renamable $r12 = t2LSRri killed renamable $r12, 1, 14, $noreg, $noreg
+ ; CHECK: $lr = MVE_DLSTP_32 renamable $r12
+ ; CHECK: bb.1.vector.body:
+ ; CHECK: successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+ ; CHECK: liveins: $lr, $r0, $r1, $r2, $r3, $r12
+ ; CHECK: renamable $r4, dead $cpsr = tADDrr renamable $r1, renamable $r3, 14, $noreg
+ ; CHECK: renamable $q0 = MVE_VLDRBU32 killed renamable $r4, 0, 0, $noreg :: (load 4 from %ir.scevgep45, align 1)
+ ; CHECK: renamable $r4, dead $cpsr = tADDrr renamable $r2, renamable $r3, 14, $noreg
+ ; CHECK: renamable $r3, dead $cpsr = tADDi8 killed renamable $r3, 4, 14, $noreg
+ ; CHECK: renamable $r12 = t2SUBri killed renamable $r12, 4, 14, $noreg, $noreg
+ ; CHECK: renamable $q1 = MVE_VLDRBU32 killed renamable $r4, 0, 0, $noreg :: (load 4 from %ir.scevgep23, align 1)
+ ; CHECK: renamable $q0 = nuw nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, undef renamable $q0
+ ; CHECK: renamable $r0 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r0, 16, 0, killed $noreg :: (store 16 into %ir.lsr.iv1, align 4)
+ ; CHECK: $lr = MVE_LETP renamable $lr, %bb.1
+ ; CHECK: bb.2.for.cond.cleanup:
+ ; CHECK: tPOP_RET 14, $noreg, def $r4, def $pc
+ bb.0.entry:
+ successors: %bb.1(0x80000000)
+ liveins: $r0, $r1, $r2, $r3, $r4, $lr
+
+ frame-setup tPUSH 14, $noreg, killed $r4, killed $lr, implicit-def $sp, implicit $sp
+ frame-setup CFI_INSTRUCTION def_cfa_offset 8
+ frame-setup CFI_INSTRUCTION offset $lr, -4
+ frame-setup CFI_INSTRUCTION offset $r4, -8
+ renamable $r12 = t2MOVi 0, 14, $noreg, $noreg
+ t2CMPrs killed renamable $r12, renamable $r3, 11, 14, $noreg, implicit-def $cpsr
+ t2IT 0, 8, implicit-def $itstate
+ tPOP_RET 0, killed $cpsr, def $r4, def $pc, implicit killed $itstate
+ renamable $r12 = t2MOVi 3, 14, $noreg, $noreg
+ renamable $lr = t2MOVi 1, 14, $noreg, $noreg
+ renamable $r12 = nuw t2ADDrs killed renamable $r12, renamable $r3, 11, 14, $noreg, $noreg
+ renamable $r12 = t2BICri killed renamable $r12, 3, 14, $noreg, $noreg
+ renamable $r12 = t2SUBri killed renamable $r12, 4, 14, $noreg, $noreg
+ renamable $lr = nuw nsw t2ADDrs killed renamable $lr, killed renamable $r12, 19, 14, $noreg, $noreg
+ t2DoLoopStart renamable $lr
+ $r12 = t2MOVr killed $r3, 14, $noreg, $noreg
+ renamable $r3, dead $cpsr = tMOVi8 0, 14, $noreg
+ renamable $r12 = t2LSRri killed renamable $r12, 1, 14, $noreg, $noreg
+
+ bb.1.vector.body:
+ successors: %bb.1(0x7c000000), %bb.2(0x04000000)
+ liveins: $lr, $r0, $r1, $r2, $r3, $r12
+
+ renamable $r4, dead $cpsr = tADDrr renamable $r1, renamable $r3, 14, $noreg
+ renamable $vpr = MVE_VCTP32 renamable $r12, 0, $noreg
+ MVE_VPST 8, implicit $vpr
+ renamable $q0 = MVE_VLDRBU32 killed renamable $r4, 0, 1, renamable $vpr :: (load 4 from %ir.scevgep45, align 1)
+ renamable $r4, dead $cpsr = tADDrr renamable $r2, renamable $r3, 14, $noreg
+ renamable $r3, dead $cpsr = tADDi8 killed renamable $r3, 4, 14, $noreg
+ renamable $r12 = t2SUBri killed renamable $r12, 4, 14, $noreg, $noreg
+ MVE_VPST 8, implicit $vpr
+ renamable $q1 = MVE_VLDRBU32 killed renamable $r4, 0, 1, renamable $vpr :: (load 4 from %ir.scevgep23, align 1)
+ renamable $q0 = nuw nsw MVE_VMULi32 killed renamable $q1, killed renamable $q0, 0, $noreg, undef renamable $q0
+ MVE_VPST 8, implicit $vpr
+ renamable $r0 = MVE_VSTRWU32_post killed renamable $q0, killed renamable $r0, 16, 1, killed renamable $vpr :: (store 16 into %ir.lsr.iv1, align 4)
+ renamable $lr = t2LoopDec killed renamable $lr, 1
+ t2LoopEnd renamable $lr, %bb.1, implicit-def dead $cpsr
+ tB %bb.2, 14, $noreg
+
+ bb.2.for.cond.cleanup:
+ tPOP_RET 14, $noreg, def $r4, def $pc
+
+...
diff --git a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-cpsr-loop-use.mir b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-cpsr-loop-use.mir
index c052e22d217..5f4a1024968 100644
--- a/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-cpsr-loop-use.mir
+++ b/llvm/test/CodeGen/Thumb2/LowOverheadLoops/unsafe-cpsr-loop-use.mir
@@ -142,6 +142,7 @@ body: |
t2IT 2, 8, implicit-def $itstate
renamable $r3 = tLSRri $noreg, killed renamable $r3, 1, 2, killed $cpsr, implicit renamable $r3, implicit killed $itstate
early-clobber renamable $r0 = t2STR_PRE killed renamable $r3, killed renamable $r0, 4, 14, $noreg :: (store 4 into %ir.scevgep4)
+ renamable $lr = tMOVr $lr, 14, $noreg
t2LoopEnd killed renamable $lr, %bb.1, implicit-def dead $cpsr
tB %bb.2, 14, $noreg
OpenPOWER on IntegriCloud