summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/avg.ll
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@intel.com>2018-01-18 07:44:09 +0000
committerCraig Topper <craig.topper@intel.com>2018-01-18 07:44:09 +0000
commit83b0a98902077b2bc95bde4162e0f210d4b50f61 (patch)
treeeb9c0decf356cb0b007a2cdff44d6d5bd462cf2c /llvm/test/CodeGen/X86/avg.ll
parent21c8a8fa499c2e5dcafeb82ab2f438e41abe7cb7 (diff)
downloadbcm5719-llvm-83b0a98902077b2bc95bde4162e0f210d4b50f61.tar.gz
bcm5719-llvm-83b0a98902077b2bc95bde4162e0f210d4b50f61.zip
[X86] Use vmovdqu64/vmovdqa64 for unmasked integer vector stores for consistency with loads.
Previously we used 64 for vXi64 stores and 32 for everything else. This change uses 64 for everything just like do for loads. llvm-svn: 322820
Diffstat (limited to 'llvm/test/CodeGen/X86/avg.ll')
-rw-r--r--llvm/test/CodeGen/X86/avg.ll12
1 files changed, 6 insertions, 6 deletions
diff --git a/llvm/test/CodeGen/X86/avg.ll b/llvm/test/CodeGen/X86/avg.ll
index dd11f6ca293..8e1e5f3b5ca 100644
--- a/llvm/test/CodeGen/X86/avg.ll
+++ b/llvm/test/CodeGen/X86/avg.ll
@@ -606,7 +606,7 @@ define void @avg_v64i8(<64 x i8>* %a, <64 x i8>* %b) nounwind {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rsi), %zmm0
; AVX512BW-NEXT: vpavgb (%rdi), %zmm0, %zmm0
-; AVX512BW-NEXT: vmovdqu32 %zmm0, (%rax)
+; AVX512BW-NEXT: vmovdqu64 %zmm0, (%rax)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = load <64 x i8>, <64 x i8>* %a
@@ -790,7 +790,7 @@ define void @avg_v32i16(<32 x i16>* %a, <32 x i16>* %b) nounwind {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rsi), %zmm0
; AVX512BW-NEXT: vpavgw (%rdi), %zmm0, %zmm0
-; AVX512BW-NEXT: vmovdqu32 %zmm0, (%rax)
+; AVX512BW-NEXT: vmovdqu64 %zmm0, (%rax)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = load <32 x i16>, <32 x i16>* %a
@@ -998,7 +998,7 @@ define void @avg_v64i8_2(<64 x i8>* %a, <64 x i8>* %b) nounwind {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rsi), %zmm0
; AVX512BW-NEXT: vpavgb %zmm0, %zmm0, %zmm0
-; AVX512BW-NEXT: vmovdqu32 %zmm0, (%rax)
+; AVX512BW-NEXT: vmovdqu64 %zmm0, (%rax)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = load <64 x i8>, <64 x i8>* %a
@@ -1183,7 +1183,7 @@ define void @avg_v32i16_2(<32 x i16>* %a, <32 x i16>* %b) nounwind {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BW-NEXT: vpavgw (%rsi), %zmm0, %zmm0
-; AVX512BW-NEXT: vmovdqu32 %zmm0, (%rax)
+; AVX512BW-NEXT: vmovdqu64 %zmm0, (%rax)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = load <32 x i16>, <32 x i16>* %a
@@ -1373,7 +1373,7 @@ define void @avg_v64i8_const(<64 x i8>* %a) nounwind {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BW-NEXT: vpavgb {{.*}}(%rip), %zmm0, %zmm0
-; AVX512BW-NEXT: vmovdqu32 %zmm0, (%rax)
+; AVX512BW-NEXT: vmovdqu64 %zmm0, (%rax)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = load <64 x i8>, <64 x i8>* %a
@@ -1539,7 +1539,7 @@ define void @avg_v32i16_const(<32 x i16>* %a) nounwind {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
; AVX512BW-NEXT: vpavgw {{.*}}(%rip), %zmm0, %zmm0
-; AVX512BW-NEXT: vmovdqu32 %zmm0, (%rax)
+; AVX512BW-NEXT: vmovdqu64 %zmm0, (%rax)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
%1 = load <32 x i16>, <32 x i16>* %a
OpenPOWER on IntegriCloud