diff options
author | Craig Topper <craig.topper@intel.com> | 2019-03-09 02:08:41 +0000 |
---|---|---|
committer | Craig Topper <craig.topper@intel.com> | 2019-03-09 02:08:41 +0000 |
commit | 69f8c1653d2fc4e65c156d1938c992af3dfb8ce1 (patch) | |
tree | d9a5dd1f6e8b56e20d68b227c10c2745420bf6f5 /llvm/test/CodeGen/X86 | |
parent | 08ae3e0f0b5daf98c0dbb25d92691dbc37d25b96 (diff) | |
download | bcm5719-llvm-69f8c1653d2fc4e65c156d1938c992af3dfb8ce1.tar.gz bcm5719-llvm-69f8c1653d2fc4e65c156d1938c992af3dfb8ce1.zip |
[ScalarizeMaskedMemIntrin] Use IRBuilder functions that take uint32_t/uint64_t for getelementptr, extractelement, and insertelement.
This saves needing to call getInt32 ourselves. Making the code a little shorter.
The test changes are because insert/extract use getInt64 internally. Shouldn't be a functional issue.
This cleanup because I plan to write similar code for expandload/compressstore.
llvm-svn: 355767
Diffstat (limited to 'llvm/test/CodeGen/X86')
-rw-r--r-- | llvm/test/CodeGen/X86/masked_gather_scatter.ll | 18 |
1 files changed, 9 insertions, 9 deletions
diff --git a/llvm/test/CodeGen/X86/masked_gather_scatter.ll b/llvm/test/CodeGen/X86/masked_gather_scatter.ll index 5e6d88ad6f5..9406125d70d 100644 --- a/llvm/test/CodeGen/X86/masked_gather_scatter.ll +++ b/llvm/test/CodeGen/X86/masked_gather_scatter.ll @@ -69,7 +69,7 @@ declare <8 x i32> @llvm.masked.gather.v8i32.v8p0i32(<8 x i32*> , i32, <8 x i1> , ; SCALAR-NEXT: br label %else ; SCALAR: else: ; SCALAR-NEXT: %res.phi.else = phi -; SCALAR-NEXT: %Mask1 = extractelement <16 x i1> %imask, i32 1 +; SCALAR-NEXT: %Mask1 = extractelement <16 x i1> %imask, i64 1 ; SCALAR-NEXT: br i1 %Mask1, label %cond.load1, label %else2 define <16 x float> @test2(float* %base, <16 x i32> %ind, i16 %mask) { @@ -211,15 +211,15 @@ define <16 x i32> @test4(i32* %base, <16 x i32> %ind, i16 %mask) { ; SCALAR-LABEL: test5 -; SCALAR: %Mask0 = extractelement <16 x i1> %imask, i32 0 +; SCALAR: %Mask0 = extractelement <16 x i1> %imask, i64 0 ; SCALAR-NEXT: br i1 %Mask0, label %cond.store, label %else ; SCALAR: cond.store: -; SCALAR-NEXT: %Elt0 = extractelement <16 x i32> %val, i32 0 -; SCALAR-NEXT: %Ptr0 = extractelement <16 x i32*> %gep.random, i32 0 +; SCALAR-NEXT: %Elt0 = extractelement <16 x i32> %val, i64 0 +; SCALAR-NEXT: %Ptr0 = extractelement <16 x i32*> %gep.random, i64 0 ; SCALAR-NEXT: store i32 %Elt0, i32* %Ptr0, align 4 ; SCALAR-NEXT: br label %else ; SCALAR: else: -; SCALAR-NEXT: %Mask1 = extractelement <16 x i1> %imask, i32 1 +; SCALAR-NEXT: %Mask1 = extractelement <16 x i1> %imask, i64 1 ; SCALAR-NEXT: br i1 %Mask1, label %cond.store1, label %else2 define void @test5(i32* %base, <16 x i32> %ind, i16 %mask, <16 x i32>%val) { @@ -277,11 +277,11 @@ declare void @llvm.masked.scatter.v16i32.v16p0i32(<16 x i32> , <16 x i32*> , i32 ; SCALAR-LABEL: test6 ; SCALAR: store i32 %Elt0, i32* %Ptr01, align 4 -; SCALAR-NEXT: %Elt1 = extractelement <8 x i32> %a1, i32 1 -; SCALAR-NEXT: %Ptr12 = extractelement <8 x i32*> %ptr, i32 1 +; SCALAR-NEXT: %Elt1 = extractelement <8 x i32> %a1, i64 1 +; SCALAR-NEXT: %Ptr12 = extractelement <8 x i32*> %ptr, i64 1 ; SCALAR-NEXT: store i32 %Elt1, i32* %Ptr12, align 4 -; SCALAR-NEXT: %Elt2 = extractelement <8 x i32> %a1, i32 2 -; SCALAR-NEXT: %Ptr23 = extractelement <8 x i32*> %ptr, i32 2 +; SCALAR-NEXT: %Elt2 = extractelement <8 x i32> %a1, i64 2 +; SCALAR-NEXT: %Ptr23 = extractelement <8 x i32*> %ptr, i64 2 ; SCALAR-NEXT: store i32 %Elt2, i32* %Ptr23, align 4 define <8 x i32> @test6(<8 x i32>%a1, <8 x i32*> %ptr) { |