summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/vmaskmov-offset.ll
diff options
context:
space:
mode:
authorAlexander Ivchenko <alexander.ivchenko@intel.com>2018-02-14 15:55:24 +0000
committerAlexander Ivchenko <alexander.ivchenko@intel.com>2018-02-14 15:55:24 +0000
commit7e5d525bd58dd6ec65f08ef6e2782fe1269db57f (patch)
treeed0947f120cfd95e6aa2ee2d0225c08e37c2ce34 /llvm/test/CodeGen/X86/vmaskmov-offset.ll
parent7f246e003a9a9916b9af00ddf6d12a5288a7803f (diff)
downloadbcm5719-llvm-7e5d525bd58dd6ec65f08ef6e2782fe1269db57f.tar.gz
bcm5719-llvm-7e5d525bd58dd6ec65f08ef6e2782fe1269db57f.zip
[SelectionDAG][X86] Fix incorrect offset generated for VMASKMOV
When creating high MachineMemOperand for MSTORE/MLOAD we supply it with the original PointerInfo, while the pointer itself had been incremented. The patch adds the proper offset to the PointerInfo. llvm-svn: 325135
Diffstat (limited to 'llvm/test/CodeGen/X86/vmaskmov-offset.ll')
-rw-r--r--llvm/test/CodeGen/X86/vmaskmov-offset.ll8
1 files changed, 4 insertions, 4 deletions
diff --git a/llvm/test/CodeGen/X86/vmaskmov-offset.ll b/llvm/test/CodeGen/X86/vmaskmov-offset.ll
index 074506f10eb..a8dbbd0c165 100644
--- a/llvm/test/CodeGen/X86/vmaskmov-offset.ll
+++ b/llvm/test/CodeGen/X86/vmaskmov-offset.ll
@@ -9,8 +9,8 @@ define void @test_v16f() local_unnamed_addr {
; CHECK: bb.0.bb:
; CHECK: [[AVX_SET0_:%[0-9]+]]:vr256 = AVX_SET0
; CHECK: [[VMASKMOVPSYrm:%[0-9]+]]:vr256 = VMASKMOVPSYrm [[AVX_SET0_]], %stack.0.stack_input_vec, 1, $noreg, 0, $noreg :: (load 32 from %ir.stack_input_vec, align 4)
- ; CHECK: [[VMASKMOVPSYrm1:%[0-9]+]]:vr256 = VMASKMOVPSYrm [[AVX_SET0_]], %stack.0.stack_input_vec, 1, $noreg, 32, $noreg :: (load 32 from %ir.stack_input_vec, align 4)
- ; CHECK: VMASKMOVPSYmr %stack.1.stack_output_vec, 1, $noreg, 32, $noreg, [[AVX_SET0_]], killed [[VMASKMOVPSYrm1]] :: (store 32 into %ir.stack_output_vec, align 4)
+ ; CHECK: [[VMASKMOVPSYrm1:%[0-9]+]]:vr256 = VMASKMOVPSYrm [[AVX_SET0_]], %stack.0.stack_input_vec, 1, $noreg, 32, $noreg :: (load 32 from %ir.stack_input_vec + 32, align 4)
+ ; CHECK: VMASKMOVPSYmr %stack.1.stack_output_vec, 1, $noreg, 32, $noreg, [[AVX_SET0_]], killed [[VMASKMOVPSYrm1]] :: (store 32 into %ir.stack_output_vec + 32, align 4)
; CHECK: VMASKMOVPSYmr %stack.1.stack_output_vec, 1, $noreg, 0, $noreg, [[AVX_SET0_]], killed [[VMASKMOVPSYrm]] :: (store 32 into %ir.stack_output_vec, align 4)
; CHECK: RET 0
bb:
@@ -29,8 +29,8 @@ define void @test_v8d() local_unnamed_addr {
; CHECK: bb.0.bb:
; CHECK: [[AVX_SET0_:%[0-9]+]]:vr256 = AVX_SET0
; CHECK: [[VMASKMOVPDYrm:%[0-9]+]]:vr256 = VMASKMOVPDYrm [[AVX_SET0_]], %stack.0.stack_input_vec, 1, $noreg, 0, $noreg :: (load 32 from %ir.stack_input_vec, align 4)
- ; CHECK: [[VMASKMOVPDYrm1:%[0-9]+]]:vr256 = VMASKMOVPDYrm [[AVX_SET0_]], %stack.0.stack_input_vec, 1, $noreg, 32, $noreg :: (load 32 from %ir.stack_input_vec, align 4)
- ; CHECK: VMASKMOVPDYmr %stack.1.stack_output_vec, 1, $noreg, 32, $noreg, [[AVX_SET0_]], killed [[VMASKMOVPDYrm1]] :: (store 32 into %ir.stack_output_vec, align 4)
+ ; CHECK: [[VMASKMOVPDYrm1:%[0-9]+]]:vr256 = VMASKMOVPDYrm [[AVX_SET0_]], %stack.0.stack_input_vec, 1, $noreg, 32, $noreg :: (load 32 from %ir.stack_input_vec + 32, align 4)
+ ; CHECK: VMASKMOVPDYmr %stack.1.stack_output_vec, 1, $noreg, 32, $noreg, [[AVX_SET0_]], killed [[VMASKMOVPDYrm1]] :: (store 32 into %ir.stack_output_vec + 32, align 4)
; CHECK: VMASKMOVPDYmr %stack.1.stack_output_vec, 1, $noreg, 0, $noreg, [[AVX_SET0_]], killed [[VMASKMOVPDYrm]] :: (store 32 into %ir.stack_output_vec, align 4)
; CHECK: RET 0
bb:
OpenPOWER on IntegriCloud