diff options
| author | Nirav Dave <niravd@google.com> | 2018-03-17 19:24:54 +0000 |
|---|---|---|
| committer | Nirav Dave <niravd@google.com> | 2018-03-17 19:24:54 +0000 |
| commit | 5f0ab71b623fc5ff5734b7e95ae1d432acddba05 (patch) | |
| tree | 7c9b77508035c2e04d8d1642cfe23243cba01d5b /llvm/test/CodeGen | |
| parent | 982d3a56ea055d6f2446209e1d8e0bfcee88aed8 (diff) | |
| download | bcm5719-llvm-5f0ab71b623fc5ff5734b7e95ae1d432acddba05.tar.gz bcm5719-llvm-5f0ab71b623fc5ff5734b7e95ae1d432acddba05.zip | |
Revert "[DAG, X86] Revert r327197 "Revert r327170, r327171, r327172""
as it times out building test-suite on PPC.
llvm-svn: 327778
Diffstat (limited to 'llvm/test/CodeGen')
| -rw-r--r-- | llvm/test/CodeGen/X86/avg.ll | 114 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/avx-vbroadcastf128.ll | 6 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/avx2-vbroadcast.ll | 191 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/avx2-vbroadcasti128.ll | 6 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/avx512-vbroadcasti128.ll | 9 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/i256-add.ll | 145 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/masked_memop.ll | 3 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/merge-consecutive-stores.ll | 9 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/nontemporal.ll | 82 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/pr36274.ll | 33 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/pr36312.ll | 35 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/required-vector-width.ll | 10 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/store_op_load_fold2.ll | 12 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/subvector-broadcast.ll | 48 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-shuffle-variable-256.ll | 3 |
15 files changed, 383 insertions, 323 deletions
diff --git a/llvm/test/CodeGen/X86/avg.ll b/llvm/test/CodeGen/X86/avg.ll index 3c69728a11b..0439c87ef4d 100644 --- a/llvm/test/CodeGen/X86/avg.ll +++ b/llvm/test/CodeGen/X86/avg.ll @@ -90,12 +90,12 @@ define void @avg_v16i8(<16 x i8>* %a, <16 x i8>* %b) nounwind { define void @avg_v32i8(<32 x i8>* %a, <32 x i8>* %b) nounwind { ; SSE2-LABEL: avg_v32i8: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa (%rsi), %xmm0 -; SSE2-NEXT: movdqa 16(%rsi), %xmm1 -; SSE2-NEXT: pavgb (%rdi), %xmm0 -; SSE2-NEXT: pavgb 16(%rdi), %xmm1 -; SSE2-NEXT: movdqu %xmm1, (%rax) +; SSE2-NEXT: movdqa 16(%rdi), %xmm0 +; SSE2-NEXT: movdqa (%rsi), %xmm1 +; SSE2-NEXT: pavgb (%rdi), %xmm1 +; SSE2-NEXT: pavgb 16(%rsi), %xmm0 ; SSE2-NEXT: movdqu %xmm0, (%rax) +; SSE2-NEXT: movdqu %xmm1, (%rax) ; SSE2-NEXT: retq ; ; AVX1-LABEL: avg_v32i8: @@ -528,18 +528,18 @@ define void @avg_v48i8(<48 x i8>* %a, <48 x i8>* %b) nounwind { define void @avg_v64i8(<64 x i8>* %a, <64 x i8>* %b) nounwind { ; SSE2-LABEL: avg_v64i8: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa (%rsi), %xmm0 -; SSE2-NEXT: movdqa 16(%rsi), %xmm1 -; SSE2-NEXT: movdqa 32(%rsi), %xmm2 +; SSE2-NEXT: movdqa 32(%rdi), %xmm0 +; SSE2-NEXT: movdqa (%rsi), %xmm1 +; SSE2-NEXT: movdqa 16(%rsi), %xmm2 ; SSE2-NEXT: movdqa 48(%rsi), %xmm3 -; SSE2-NEXT: pavgb (%rdi), %xmm0 -; SSE2-NEXT: pavgb 16(%rdi), %xmm1 -; SSE2-NEXT: pavgb 32(%rdi), %xmm2 +; SSE2-NEXT: pavgb (%rdi), %xmm1 +; SSE2-NEXT: pavgb 16(%rdi), %xmm2 +; SSE2-NEXT: pavgb 32(%rsi), %xmm0 ; SSE2-NEXT: pavgb 48(%rdi), %xmm3 ; SSE2-NEXT: movdqu %xmm3, (%rax) +; SSE2-NEXT: movdqu %xmm0, (%rax) ; SSE2-NEXT: movdqu %xmm2, (%rax) ; SSE2-NEXT: movdqu %xmm1, (%rax) -; SSE2-NEXT: movdqu %xmm0, (%rax) ; SSE2-NEXT: retq ; ; AVX1-LABEL: avg_v64i8: @@ -565,23 +565,23 @@ define void @avg_v64i8(<64 x i8>* %a, <64 x i8>* %b) nounwind { ; ; AVX2-LABEL: avg_v64i8: ; AVX2: # %bb.0: -; AVX2-NEXT: vmovdqa (%rsi), %ymm0 -; AVX2-NEXT: vmovdqa 32(%rsi), %ymm1 -; AVX2-NEXT: vpavgb (%rdi), %ymm0, %ymm0 -; AVX2-NEXT: vpavgb 32(%rdi), %ymm1, %ymm1 -; AVX2-NEXT: vmovdqu %ymm1, (%rax) +; AVX2-NEXT: vmovdqa 32(%rdi), %ymm0 +; AVX2-NEXT: vmovdqa (%rsi), %ymm1 +; AVX2-NEXT: vpavgb (%rdi), %ymm1, %ymm1 +; AVX2-NEXT: vpavgb 32(%rsi), %ymm0, %ymm0 ; AVX2-NEXT: vmovdqu %ymm0, (%rax) +; AVX2-NEXT: vmovdqu %ymm1, (%rax) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512F-LABEL: avg_v64i8: ; AVX512F: # %bb.0: -; AVX512F-NEXT: vmovdqa (%rsi), %ymm0 -; AVX512F-NEXT: vmovdqa 32(%rsi), %ymm1 -; AVX512F-NEXT: vpavgb (%rdi), %ymm0, %ymm0 -; AVX512F-NEXT: vpavgb 32(%rdi), %ymm1, %ymm1 -; AVX512F-NEXT: vmovdqu %ymm1, (%rax) +; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm0 +; AVX512F-NEXT: vmovdqa (%rsi), %ymm1 +; AVX512F-NEXT: vpavgb (%rdi), %ymm1, %ymm1 +; AVX512F-NEXT: vpavgb 32(%rsi), %ymm0, %ymm0 ; AVX512F-NEXT: vmovdqu %ymm0, (%rax) +; AVX512F-NEXT: vmovdqu %ymm1, (%rax) ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -661,12 +661,12 @@ define void @avg_v8i16(<8 x i16>* %a, <8 x i16>* %b) nounwind { define void @avg_v16i16(<16 x i16>* %a, <16 x i16>* %b) nounwind { ; SSE2-LABEL: avg_v16i16: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa (%rsi), %xmm0 -; SSE2-NEXT: movdqa 16(%rsi), %xmm1 -; SSE2-NEXT: pavgw (%rdi), %xmm0 -; SSE2-NEXT: pavgw 16(%rdi), %xmm1 -; SSE2-NEXT: movdqu %xmm1, (%rax) +; SSE2-NEXT: movdqa 16(%rdi), %xmm0 +; SSE2-NEXT: movdqa (%rsi), %xmm1 +; SSE2-NEXT: pavgw (%rdi), %xmm1 +; SSE2-NEXT: pavgw 16(%rsi), %xmm0 ; SSE2-NEXT: movdqu %xmm0, (%rax) +; SSE2-NEXT: movdqu %xmm1, (%rax) ; SSE2-NEXT: retq ; ; AVX1-LABEL: avg_v16i16: @@ -712,18 +712,18 @@ define void @avg_v16i16(<16 x i16>* %a, <16 x i16>* %b) nounwind { define void @avg_v32i16(<32 x i16>* %a, <32 x i16>* %b) nounwind { ; SSE2-LABEL: avg_v32i16: ; SSE2: # %bb.0: -; SSE2-NEXT: movdqa (%rsi), %xmm0 -; SSE2-NEXT: movdqa 16(%rsi), %xmm1 -; SSE2-NEXT: movdqa 32(%rsi), %xmm2 +; SSE2-NEXT: movdqa 32(%rdi), %xmm0 +; SSE2-NEXT: movdqa (%rsi), %xmm1 +; SSE2-NEXT: movdqa 16(%rsi), %xmm2 ; SSE2-NEXT: movdqa 48(%rsi), %xmm3 -; SSE2-NEXT: pavgw (%rdi), %xmm0 -; SSE2-NEXT: pavgw 16(%rdi), %xmm1 -; SSE2-NEXT: pavgw 32(%rdi), %xmm2 +; SSE2-NEXT: pavgw (%rdi), %xmm1 +; SSE2-NEXT: pavgw 16(%rdi), %xmm2 +; SSE2-NEXT: pavgw 32(%rsi), %xmm0 ; SSE2-NEXT: pavgw 48(%rdi), %xmm3 ; SSE2-NEXT: movdqu %xmm3, (%rax) +; SSE2-NEXT: movdqu %xmm0, (%rax) ; SSE2-NEXT: movdqu %xmm2, (%rax) ; SSE2-NEXT: movdqu %xmm1, (%rax) -; SSE2-NEXT: movdqu %xmm0, (%rax) ; SSE2-NEXT: retq ; ; AVX1-LABEL: avg_v32i16: @@ -749,23 +749,23 @@ define void @avg_v32i16(<32 x i16>* %a, <32 x i16>* %b) nounwind { ; ; AVX2-LABEL: avg_v32i16: ; AVX2: # %bb.0: -; AVX2-NEXT: vmovdqa (%rsi), %ymm0 -; AVX2-NEXT: vmovdqa 32(%rsi), %ymm1 -; AVX2-NEXT: vpavgw (%rdi), %ymm0, %ymm0 -; AVX2-NEXT: vpavgw 32(%rdi), %ymm1, %ymm1 -; AVX2-NEXT: vmovdqu %ymm1, (%rax) +; AVX2-NEXT: vmovdqa 32(%rdi), %ymm0 +; AVX2-NEXT: vmovdqa (%rsi), %ymm1 +; AVX2-NEXT: vpavgw (%rdi), %ymm1, %ymm1 +; AVX2-NEXT: vpavgw 32(%rsi), %ymm0, %ymm0 ; AVX2-NEXT: vmovdqu %ymm0, (%rax) +; AVX2-NEXT: vmovdqu %ymm1, (%rax) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retq ; ; AVX512F-LABEL: avg_v32i16: ; AVX512F: # %bb.0: -; AVX512F-NEXT: vmovdqa (%rsi), %ymm0 -; AVX512F-NEXT: vmovdqa 32(%rsi), %ymm1 -; AVX512F-NEXT: vpavgw (%rdi), %ymm0, %ymm0 -; AVX512F-NEXT: vpavgw 32(%rdi), %ymm1, %ymm1 -; AVX512F-NEXT: vmovdqu %ymm1, (%rax) +; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm0 +; AVX512F-NEXT: vmovdqa (%rsi), %ymm1 +; AVX512F-NEXT: vpavgw (%rdi), %ymm1, %ymm1 +; AVX512F-NEXT: vpavgw 32(%rsi), %ymm0, %ymm0 ; AVX512F-NEXT: vmovdqu %ymm0, (%rax) +; AVX512F-NEXT: vmovdqu %ymm1, (%rax) ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; @@ -874,9 +874,9 @@ define void @avg_v32i8_2(<32 x i8>* %a, <32 x i8>* %b) nounwind { ; SSE2-LABEL: avg_v32i8_2: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa (%rdi), %xmm0 -; SSE2-NEXT: movdqa 16(%rdi), %xmm1 +; SSE2-NEXT: movdqa 16(%rsi), %xmm1 ; SSE2-NEXT: pavgb (%rsi), %xmm0 -; SSE2-NEXT: pavgb 16(%rsi), %xmm1 +; SSE2-NEXT: pavgb 16(%rdi), %xmm1 ; SSE2-NEXT: movdqu %xmm1, (%rax) ; SSE2-NEXT: movdqu %xmm0, (%rax) ; SSE2-NEXT: retq @@ -1055,9 +1055,9 @@ define void @avg_v16i16_2(<16 x i16>* %a, <16 x i16>* %b) nounwind { ; SSE2-LABEL: avg_v16i16_2: ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa (%rdi), %xmm0 -; SSE2-NEXT: movdqa 16(%rdi), %xmm1 +; SSE2-NEXT: movdqa 16(%rsi), %xmm1 ; SSE2-NEXT: pavgw (%rsi), %xmm0 -; SSE2-NEXT: pavgw 16(%rsi), %xmm1 +; SSE2-NEXT: pavgw 16(%rdi), %xmm1 ; SSE2-NEXT: movdqu %xmm1, (%rax) ; SSE2-NEXT: movdqu %xmm0, (%rax) ; SSE2-NEXT: retq @@ -1107,14 +1107,14 @@ define void @avg_v32i16_2(<32 x i16>* %a, <32 x i16>* %b) nounwind { ; SSE2: # %bb.0: ; SSE2-NEXT: movdqa (%rdi), %xmm0 ; SSE2-NEXT: movdqa 16(%rdi), %xmm1 -; SSE2-NEXT: movdqa 32(%rdi), %xmm2 -; SSE2-NEXT: movdqa 48(%rdi), %xmm3 +; SSE2-NEXT: movdqa 48(%rdi), %xmm2 +; SSE2-NEXT: movdqa 32(%rsi), %xmm3 ; SSE2-NEXT: pavgw (%rsi), %xmm0 ; SSE2-NEXT: pavgw 16(%rsi), %xmm1 -; SSE2-NEXT: pavgw 32(%rsi), %xmm2 -; SSE2-NEXT: pavgw 48(%rsi), %xmm3 -; SSE2-NEXT: movdqu %xmm3, (%rax) +; SSE2-NEXT: pavgw 32(%rdi), %xmm3 +; SSE2-NEXT: pavgw 48(%rsi), %xmm2 ; SSE2-NEXT: movdqu %xmm2, (%rax) +; SSE2-NEXT: movdqu %xmm3, (%rax) ; SSE2-NEXT: movdqu %xmm1, (%rax) ; SSE2-NEXT: movdqu %xmm0, (%rax) ; SSE2-NEXT: retq @@ -1143,9 +1143,9 @@ define void @avg_v32i16_2(<32 x i16>* %a, <32 x i16>* %b) nounwind { ; AVX2-LABEL: avg_v32i16_2: ; AVX2: # %bb.0: ; AVX2-NEXT: vmovdqa (%rdi), %ymm0 -; AVX2-NEXT: vmovdqa 32(%rdi), %ymm1 +; AVX2-NEXT: vmovdqa 32(%rsi), %ymm1 ; AVX2-NEXT: vpavgw (%rsi), %ymm0, %ymm0 -; AVX2-NEXT: vpavgw 32(%rsi), %ymm1, %ymm1 +; AVX2-NEXT: vpavgw 32(%rdi), %ymm1, %ymm1 ; AVX2-NEXT: vmovdqu %ymm1, (%rax) ; AVX2-NEXT: vmovdqu %ymm0, (%rax) ; AVX2-NEXT: vzeroupper @@ -1154,9 +1154,9 @@ define void @avg_v32i16_2(<32 x i16>* %a, <32 x i16>* %b) nounwind { ; AVX512F-LABEL: avg_v32i16_2: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 -; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1 +; AVX512F-NEXT: vmovdqa 32(%rsi), %ymm1 ; AVX512F-NEXT: vpavgw (%rsi), %ymm0, %ymm0 -; AVX512F-NEXT: vpavgw 32(%rsi), %ymm1, %ymm1 +; AVX512F-NEXT: vpavgw 32(%rdi), %ymm1, %ymm1 ; AVX512F-NEXT: vmovdqu %ymm1, (%rax) ; AVX512F-NEXT: vmovdqu %ymm0, (%rax) ; AVX512F-NEXT: vzeroupper diff --git a/llvm/test/CodeGen/X86/avx-vbroadcastf128.ll b/llvm/test/CodeGen/X86/avx-vbroadcastf128.ll index b5026437153..7fdbf31a993 100644 --- a/llvm/test/CodeGen/X86/avx-vbroadcastf128.ll +++ b/llvm/test/CodeGen/X86/avx-vbroadcastf128.ll @@ -235,16 +235,18 @@ define <8 x i32> @PR29088(<4 x i32>* %p0, <8 x float>* %p1) { ; X32: # %bb.0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: vmovaps (%ecx), %xmm0 ; X32-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X32-NEXT: vmovaps %ymm1, (%eax) +; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: PR29088: ; X64: # %bb.0: +; X64-NEXT: vmovaps (%rdi), %xmm0 ; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X64-NEXT: vmovaps %ymm1, (%rsi) +; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X64-NEXT: retq %ld = load <4 x i32>, <4 x i32>* %p0 store <8 x float> zeroinitializer, <8 x float>* %p1 diff --git a/llvm/test/CodeGen/X86/avx2-vbroadcast.ll b/llvm/test/CodeGen/X86/avx2-vbroadcast.ll index 3ae6c0b9d81..528dfcd6f8d 100644 --- a/llvm/test/CodeGen/X86/avx2-vbroadcast.ll +++ b/llvm/test/CodeGen/X86/avx2-vbroadcast.ll @@ -1065,7 +1065,9 @@ define void @isel_crash_16b(i8* %cV_R.addr) { ; X64: ## %bb.0: ## %eintry ; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0 ; X64-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) -; X64-NEXT: vpbroadcastb (%rdi), %xmm1 +; X64-NEXT: movb (%rdi), %al +; X64-NEXT: vmovd %eax, %xmm1 +; X64-NEXT: vpbroadcastb %xmm1, %xmm1 ; X64-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) ; X64-NEXT: vmovdqa %xmm1, -{{[0-9]+}}(%rsp) ; X64-NEXT: retq @@ -1116,7 +1118,9 @@ define void @isel_crash_32b(i8* %cV_R.addr) { ; X64-NEXT: subq $128, %rsp ; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0 ; X64-NEXT: vmovaps %ymm0, (%rsp) -; X64-NEXT: vpbroadcastb (%rdi), %ymm1 +; X64-NEXT: movb (%rdi), %al +; X64-NEXT: vmovd %eax, %xmm1 +; X64-NEXT: vpbroadcastb %xmm1, %ymm1 ; X64-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) ; X64-NEXT: vmovdqa %ymm1, {{[0-9]+}}(%rsp) ; X64-NEXT: movq %rbp, %rsp @@ -1156,7 +1160,9 @@ define void @isel_crash_8w(i16* %cV_R.addr) { ; X64: ## %bb.0: ## %entry ; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0 ; X64-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) -; X64-NEXT: vpbroadcastw (%rdi), %xmm1 +; X64-NEXT: movzwl (%rdi), %eax +; X64-NEXT: vmovd %eax, %xmm1 +; X64-NEXT: vpbroadcastw %xmm1, %xmm1 ; X64-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) ; X64-NEXT: vmovdqa %xmm1, -{{[0-9]+}}(%rsp) ; X64-NEXT: retq @@ -1207,7 +1213,9 @@ define void @isel_crash_16w(i16* %cV_R.addr) { ; X64-NEXT: subq $128, %rsp ; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0 ; X64-NEXT: vmovaps %ymm0, (%rsp) -; X64-NEXT: vpbroadcastw (%rdi), %ymm1 +; X64-NEXT: movzwl (%rdi), %eax +; X64-NEXT: vmovd %eax, %xmm1 +; X64-NEXT: vpbroadcastw %xmm1, %ymm1 ; X64-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) ; X64-NEXT: vmovdqa %ymm1, {{[0-9]+}}(%rsp) ; X64-NEXT: movq %rbp, %rsp @@ -1243,14 +1251,26 @@ define void @isel_crash_4d(i32* %cV_R.addr) { ; X32-NEXT: addl $60, %esp ; X32-NEXT: retl ; -; X64-LABEL: isel_crash_4d: -; X64: ## %bb.0: ## %entry -; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0 -; X64-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) -; X64-NEXT: vbroadcastss (%rdi), %xmm1 -; X64-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) -; X64-NEXT: vmovaps %xmm1, -{{[0-9]+}}(%rsp) -; X64-NEXT: retq +; X64-AVX2-LABEL: isel_crash_4d: +; X64-AVX2: ## %bb.0: ## %entry +; X64-AVX2-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; X64-AVX2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) +; X64-AVX2-NEXT: movl (%rdi), %eax +; X64-AVX2-NEXT: vmovd %eax, %xmm1 +; X64-AVX2-NEXT: vpbroadcastd %xmm1, %xmm1 +; X64-AVX2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) +; X64-AVX2-NEXT: vmovdqa %xmm1, -{{[0-9]+}}(%rsp) +; X64-AVX2-NEXT: retq +; +; X64-AVX512VL-LABEL: isel_crash_4d: +; X64-AVX512VL: ## %bb.0: ## %entry +; X64-AVX512VL-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; X64-AVX512VL-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) +; X64-AVX512VL-NEXT: movl (%rdi), %eax +; X64-AVX512VL-NEXT: vpbroadcastd %eax, %xmm1 +; X64-AVX512VL-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) +; X64-AVX512VL-NEXT: vmovdqa %xmm1, -{{[0-9]+}}(%rsp) +; X64-AVX512VL-NEXT: retq entry: %__a.addr.i = alloca <2 x i64>, align 16 %__b.addr.i = alloca <2 x i64>, align 16 @@ -1287,24 +1307,46 @@ define void @isel_crash_8d(i32* %cV_R.addr) { ; X32-NEXT: vzeroupper ; X32-NEXT: retl ; -; X64-LABEL: isel_crash_8d: -; X64: ## %bb.0: ## %eintry -; X64-NEXT: pushq %rbp -; X64-NEXT: .cfi_def_cfa_offset 16 -; X64-NEXT: .cfi_offset %rbp, -16 -; X64-NEXT: movq %rsp, %rbp -; X64-NEXT: .cfi_def_cfa_register %rbp -; X64-NEXT: andq $-32, %rsp -; X64-NEXT: subq $128, %rsp -; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0 -; X64-NEXT: vmovaps %ymm0, (%rsp) -; X64-NEXT: vbroadcastss (%rdi), %ymm1 -; X64-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) -; X64-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) -; X64-NEXT: movq %rbp, %rsp -; X64-NEXT: popq %rbp -; X64-NEXT: vzeroupper -; X64-NEXT: retq +; X64-AVX2-LABEL: isel_crash_8d: +; X64-AVX2: ## %bb.0: ## %eintry +; X64-AVX2-NEXT: pushq %rbp +; X64-AVX2-NEXT: .cfi_def_cfa_offset 16 +; X64-AVX2-NEXT: .cfi_offset %rbp, -16 +; X64-AVX2-NEXT: movq %rsp, %rbp +; X64-AVX2-NEXT: .cfi_def_cfa_register %rbp +; X64-AVX2-NEXT: andq $-32, %rsp +; X64-AVX2-NEXT: subq $128, %rsp +; X64-AVX2-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; X64-AVX2-NEXT: vmovaps %ymm0, (%rsp) +; X64-AVX2-NEXT: movl (%rdi), %eax +; X64-AVX2-NEXT: vmovd %eax, %xmm1 +; X64-AVX2-NEXT: vpbroadcastd %xmm1, %ymm1 +; X64-AVX2-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) +; X64-AVX2-NEXT: vmovdqa %ymm1, {{[0-9]+}}(%rsp) +; X64-AVX2-NEXT: movq %rbp, %rsp +; X64-AVX2-NEXT: popq %rbp +; X64-AVX2-NEXT: vzeroupper +; X64-AVX2-NEXT: retq +; +; X64-AVX512VL-LABEL: isel_crash_8d: +; X64-AVX512VL: ## %bb.0: ## %eintry +; X64-AVX512VL-NEXT: pushq %rbp +; X64-AVX512VL-NEXT: .cfi_def_cfa_offset 16 +; X64-AVX512VL-NEXT: .cfi_offset %rbp, -16 +; X64-AVX512VL-NEXT: movq %rsp, %rbp +; X64-AVX512VL-NEXT: .cfi_def_cfa_register %rbp +; X64-AVX512VL-NEXT: andq $-32, %rsp +; X64-AVX512VL-NEXT: subq $128, %rsp +; X64-AVX512VL-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; X64-AVX512VL-NEXT: vmovaps %ymm0, (%rsp) +; X64-AVX512VL-NEXT: movl (%rdi), %eax +; X64-AVX512VL-NEXT: vpbroadcastd %eax, %ymm1 +; X64-AVX512VL-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) +; X64-AVX512VL-NEXT: vmovdqa %ymm1, {{[0-9]+}}(%rsp) +; X64-AVX512VL-NEXT: movq %rbp, %rsp +; X64-AVX512VL-NEXT: popq %rbp +; X64-AVX512VL-NEXT: vzeroupper +; X64-AVX512VL-NEXT: retq eintry: %__a.addr.i = alloca <4 x i64>, align 16 %__b.addr.i = alloca <4 x i64>, align 16 @@ -1328,20 +1370,33 @@ define void @isel_crash_2q(i64* %cV_R.addr) { ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0 ; X32-NEXT: vmovaps %xmm0, (%esp) -; X32-NEXT: vpbroadcastq (%eax), %xmm1 +; X32-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero +; X32-NEXT: vpbroadcastq %xmm1, %xmm1 ; X32-NEXT: vmovaps %xmm0, {{[0-9]+}}(%esp) ; X32-NEXT: vmovdqa %xmm1, {{[0-9]+}}(%esp) ; X32-NEXT: addl $60, %esp ; X32-NEXT: retl ; -; X64-LABEL: isel_crash_2q: -; X64: ## %bb.0: ## %entry -; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0 -; X64-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) -; X64-NEXT: vpbroadcastq (%rdi), %xmm1 -; X64-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) -; X64-NEXT: vmovdqa %xmm1, -{{[0-9]+}}(%rsp) -; X64-NEXT: retq +; X64-AVX2-LABEL: isel_crash_2q: +; X64-AVX2: ## %bb.0: ## %entry +; X64-AVX2-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; X64-AVX2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) +; X64-AVX2-NEXT: movq (%rdi), %rax +; X64-AVX2-NEXT: vmovq %rax, %xmm1 +; X64-AVX2-NEXT: vpbroadcastq %xmm1, %xmm1 +; X64-AVX2-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) +; X64-AVX2-NEXT: vmovdqa %xmm1, -{{[0-9]+}}(%rsp) +; X64-AVX2-NEXT: retq +; +; X64-AVX512VL-LABEL: isel_crash_2q: +; X64-AVX512VL: ## %bb.0: ## %entry +; X64-AVX512VL-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; X64-AVX512VL-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) +; X64-AVX512VL-NEXT: movq (%rdi), %rax +; X64-AVX512VL-NEXT: vpbroadcastq %rax, %xmm1 +; X64-AVX512VL-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp) +; X64-AVX512VL-NEXT: vmovdqa %xmm1, -{{[0-9]+}}(%rsp) +; X64-AVX512VL-NEXT: retq entry: %__a.addr.i = alloca <2 x i64>, align 16 %__b.addr.i = alloca <2 x i64>, align 16 @@ -1378,24 +1433,46 @@ define void @isel_crash_4q(i64* %cV_R.addr) { ; X32-NEXT: vzeroupper ; X32-NEXT: retl ; -; X64-LABEL: isel_crash_4q: -; X64: ## %bb.0: ## %eintry -; X64-NEXT: pushq %rbp -; X64-NEXT: .cfi_def_cfa_offset 16 -; X64-NEXT: .cfi_offset %rbp, -16 -; X64-NEXT: movq %rsp, %rbp -; X64-NEXT: .cfi_def_cfa_register %rbp -; X64-NEXT: andq $-32, %rsp -; X64-NEXT: subq $128, %rsp -; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0 -; X64-NEXT: vmovaps %ymm0, (%rsp) -; X64-NEXT: vbroadcastsd (%rdi), %ymm1 -; X64-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) -; X64-NEXT: vmovaps %ymm1, {{[0-9]+}}(%rsp) -; X64-NEXT: movq %rbp, %rsp -; X64-NEXT: popq %rbp -; X64-NEXT: vzeroupper -; X64-NEXT: retq +; X64-AVX2-LABEL: isel_crash_4q: +; X64-AVX2: ## %bb.0: ## %eintry +; X64-AVX2-NEXT: pushq %rbp +; X64-AVX2-NEXT: .cfi_def_cfa_offset 16 +; X64-AVX2-NEXT: .cfi_offset %rbp, -16 +; X64-AVX2-NEXT: movq %rsp, %rbp +; X64-AVX2-NEXT: .cfi_def_cfa_register %rbp +; X64-AVX2-NEXT: andq $-32, %rsp +; X64-AVX2-NEXT: subq $128, %rsp +; X64-AVX2-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; X64-AVX2-NEXT: vmovaps %ymm0, (%rsp) +; X64-AVX2-NEXT: movq (%rdi), %rax +; X64-AVX2-NEXT: vmovq %rax, %xmm1 +; X64-AVX2-NEXT: vpbroadcastq %xmm1, %ymm1 +; X64-AVX2-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) +; X64-AVX2-NEXT: vmovdqa %ymm1, {{[0-9]+}}(%rsp) +; X64-AVX2-NEXT: movq %rbp, %rsp +; X64-AVX2-NEXT: popq %rbp +; X64-AVX2-NEXT: vzeroupper +; X64-AVX2-NEXT: retq +; +; X64-AVX512VL-LABEL: isel_crash_4q: +; X64-AVX512VL: ## %bb.0: ## %eintry +; X64-AVX512VL-NEXT: pushq %rbp +; X64-AVX512VL-NEXT: .cfi_def_cfa_offset 16 +; X64-AVX512VL-NEXT: .cfi_offset %rbp, -16 +; X64-AVX512VL-NEXT: movq %rsp, %rbp +; X64-AVX512VL-NEXT: .cfi_def_cfa_register %rbp +; X64-AVX512VL-NEXT: andq $-32, %rsp +; X64-AVX512VL-NEXT: subq $128, %rsp +; X64-AVX512VL-NEXT: vxorps %xmm0, %xmm0, %xmm0 +; X64-AVX512VL-NEXT: vmovaps %ymm0, (%rsp) +; X64-AVX512VL-NEXT: movq (%rdi), %rax +; X64-AVX512VL-NEXT: vpbroadcastq %rax, %ymm1 +; X64-AVX512VL-NEXT: vmovaps %ymm0, {{[0-9]+}}(%rsp) +; X64-AVX512VL-NEXT: vmovdqa %ymm1, {{[0-9]+}}(%rsp) +; X64-AVX512VL-NEXT: movq %rbp, %rsp +; X64-AVX512VL-NEXT: popq %rbp +; X64-AVX512VL-NEXT: vzeroupper +; X64-AVX512VL-NEXT: retq eintry: %__a.addr.i = alloca <4 x i64>, align 16 %__b.addr.i = alloca <4 x i64>, align 16 diff --git a/llvm/test/CodeGen/X86/avx2-vbroadcasti128.ll b/llvm/test/CodeGen/X86/avx2-vbroadcasti128.ll index 996e6796616..254cdfdd8cb 100644 --- a/llvm/test/CodeGen/X86/avx2-vbroadcasti128.ll +++ b/llvm/test/CodeGen/X86/avx2-vbroadcasti128.ll @@ -271,16 +271,18 @@ define <8 x i32> @PR29088(<4 x i32>* %p0, <8 x float>* %p1) { ; X32: # %bb.0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: vmovaps (%ecx), %xmm0 ; X32-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; X32-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X32-NEXT: vmovaps %ymm1, (%eax) +; X32-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X32-NEXT: retl ; ; X64-LABEL: PR29088: ; X64: # %bb.0: +; X64-NEXT: vmovaps (%rdi), %xmm0 ; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; X64-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X64-NEXT: vmovaps %ymm1, (%rsi) +; X64-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X64-NEXT: retq %ld = load <4 x i32>, <4 x i32>* %p0 store <8 x float> zeroinitializer, <8 x float>* %p1 diff --git a/llvm/test/CodeGen/X86/avx512-vbroadcasti128.ll b/llvm/test/CodeGen/X86/avx512-vbroadcasti128.ll index 2bf69cfadcf..c5ecb1559b4 100644 --- a/llvm/test/CodeGen/X86/avx512-vbroadcasti128.ll +++ b/llvm/test/CodeGen/X86/avx512-vbroadcasti128.ll @@ -186,23 +186,26 @@ define <64 x i8> @test_broadcast_16i8_64i8(<16 x i8> *%p) nounwind { define <8 x i32> @PR29088(<4 x i32>* %p0, <8 x float>* %p1) { ; X64-AVX512VL-LABEL: PR29088: ; X64-AVX512VL: ## %bb.0: +; X64-AVX512VL-NEXT: vmovaps (%rdi), %xmm0 ; X64-AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; X64-AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X64-AVX512VL-NEXT: vmovdqa %ymm1, (%rsi) +; X64-AVX512VL-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X64-AVX512VL-NEXT: retq ; ; X64-AVX512BWVL-LABEL: PR29088: ; X64-AVX512BWVL: ## %bb.0: +; X64-AVX512BWVL-NEXT: vmovaps (%rdi), %xmm0 ; X64-AVX512BWVL-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; X64-AVX512BWVL-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X64-AVX512BWVL-NEXT: vmovdqa %ymm1, (%rsi) +; X64-AVX512BWVL-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X64-AVX512BWVL-NEXT: retq ; ; X64-AVX512DQVL-LABEL: PR29088: ; X64-AVX512DQVL: ## %bb.0: +; X64-AVX512DQVL-NEXT: vmovaps (%rdi), %xmm0 ; X64-AVX512DQVL-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; X64-AVX512DQVL-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X64-AVX512DQVL-NEXT: vmovaps %ymm1, (%rsi) +; X64-AVX512DQVL-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X64-AVX512DQVL-NEXT: retq %ld = load <4 x i32>, <4 x i32>* %p0 store <8 x float> zeroinitializer, <8 x float>* %p1 diff --git a/llvm/test/CodeGen/X86/i256-add.ll b/llvm/test/CodeGen/X86/i256-add.ll index 85a885a4315..36d838a68cb 100644 --- a/llvm/test/CodeGen/X86/i256-add.ll +++ b/llvm/test/CodeGen/X86/i256-add.ll @@ -9,30 +9,40 @@ define void @add(i256* %p, i256* %q) nounwind { ; X32-NEXT: pushl %ebx ; X32-NEXT: pushl %edi ; X32-NEXT: pushl %esi -; X32-NEXT: subl $8, %esp -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl 28(%eax), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl 24(%eax), %ecx -; X32-NEXT: movl %ecx, (%esp) # 4-byte Spill -; X32-NEXT: movl 20(%eax), %esi -; X32-NEXT: movl 16(%eax), %edi -; X32-NEXT: movl 12(%eax), %ebx -; X32-NEXT: movl 8(%eax), %ebp -; X32-NEXT: movl (%eax), %ecx -; X32-NEXT: movl 4(%eax), %edx +; X32-NEXT: subl $12, %esp ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: addl %ecx, (%eax) -; X32-NEXT: adcl %edx, 4(%eax) -; X32-NEXT: adcl %ebp, 8(%eax) -; X32-NEXT: adcl %ebx, 12(%eax) -; X32-NEXT: adcl %edi, 16(%eax) -; X32-NEXT: adcl %esi, 20(%eax) -; X32-NEXT: movl (%esp), %ecx # 4-byte Reload -; X32-NEXT: adcl %ecx, 24(%eax) -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload -; X32-NEXT: adcl %ecx, 28(%eax) -; X32-NEXT: addl $8, %esp +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl 8(%ecx), %edi +; X32-NEXT: movl (%ecx), %edx +; X32-NEXT: movl 4(%ecx), %ebx +; X32-NEXT: movl 28(%eax), %esi +; X32-NEXT: movl %esi, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: movl 24(%eax), %ebp +; X32-NEXT: addl (%eax), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: adcl 4(%eax), %ebx +; X32-NEXT: adcl 8(%eax), %edi +; X32-NEXT: movl %edi, (%esp) # 4-byte Spill +; X32-NEXT: movl 20(%eax), %edi +; X32-NEXT: movl 12(%eax), %edx +; X32-NEXT: movl 16(%eax), %esi +; X32-NEXT: adcl 12(%ecx), %edx +; X32-NEXT: adcl 16(%ecx), %esi +; X32-NEXT: adcl 20(%ecx), %edi +; X32-NEXT: movl %ebp, %eax +; X32-NEXT: adcl 24(%ecx), %eax +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebp # 4-byte Reload +; X32-NEXT: adcl %ebp, 28(%ecx) +; X32-NEXT: movl (%esp), %ebp # 4-byte Reload +; X32-NEXT: movl %ebp, 8(%ecx) +; X32-NEXT: movl %ebx, 4(%ecx) +; X32-NEXT: movl {{[0-9]+}}(%esp), %ebx # 4-byte Reload +; X32-NEXT: movl %ebx, (%ecx) +; X32-NEXT: movl %edx, 12(%ecx) +; X32-NEXT: movl %esi, 16(%ecx) +; X32-NEXT: movl %edi, 20(%ecx) +; X32-NEXT: movl %eax, 24(%ecx) +; X32-NEXT: addl $12, %esp ; X32-NEXT: popl %esi ; X32-NEXT: popl %edi ; X32-NEXT: popl %ebx @@ -41,14 +51,17 @@ define void @add(i256* %p, i256* %q) nounwind { ; ; X64-LABEL: add: ; X64: # %bb.0: -; X64-NEXT: movq 24(%rsi), %rax -; X64-NEXT: movq 16(%rsi), %rcx -; X64-NEXT: movq (%rsi), %rdx -; X64-NEXT: movq 8(%rsi), %rsi -; X64-NEXT: addq %rdx, (%rdi) -; X64-NEXT: adcq %rsi, 8(%rdi) -; X64-NEXT: adcq %rcx, 16(%rdi) -; X64-NEXT: adcq %rax, 24(%rdi) +; X64-NEXT: movq 16(%rdi), %rax +; X64-NEXT: movq (%rdi), %rcx +; X64-NEXT: movq 8(%rdi), %rdx +; X64-NEXT: movq 24(%rsi), %r8 +; X64-NEXT: addq (%rsi), %rcx +; X64-NEXT: adcq 8(%rsi), %rdx +; X64-NEXT: adcq 16(%rsi), %rax +; X64-NEXT: adcq %r8, 24(%rdi) +; X64-NEXT: movq %rax, 16(%rdi) +; X64-NEXT: movq %rdx, 8(%rdi) +; X64-NEXT: movq %rcx, (%rdi) ; X64-NEXT: retq %a = load i256, i256* %p %b = load i256, i256* %q @@ -64,28 +77,35 @@ define void @sub(i256* %p, i256* %q) nounwind { ; X32-NEXT: pushl %edi ; X32-NEXT: pushl %esi ; X32-NEXT: subl $8, %esp -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: movl 28(%eax), %ecx -; X32-NEXT: movl %ecx, {{[0-9]+}}(%esp) # 4-byte Spill -; X32-NEXT: movl 24(%eax), %ecx -; X32-NEXT: movl %ecx, (%esp) # 4-byte Spill -; X32-NEXT: movl 20(%eax), %esi -; X32-NEXT: movl 16(%eax), %edi -; X32-NEXT: movl 12(%eax), %ebx -; X32-NEXT: movl 8(%eax), %ebp -; X32-NEXT: movl (%eax), %ecx -; X32-NEXT: movl 4(%eax), %edx -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: subl %ecx, (%eax) -; X32-NEXT: sbbl %edx, 4(%eax) -; X32-NEXT: sbbl %ebp, 8(%eax) -; X32-NEXT: sbbl %ebx, 12(%eax) -; X32-NEXT: sbbl %edi, 16(%eax) -; X32-NEXT: sbbl %esi, 20(%eax) -; X32-NEXT: movl (%esp), %ecx # 4-byte Reload -; X32-NEXT: sbbl %ecx, 24(%eax) -; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx # 4-byte Reload -; X32-NEXT: sbbl %ecx, 28(%eax) +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi +; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-NEXT: movl 16(%ecx), %eax +; X32-NEXT: movl 12(%ecx), %edx +; X32-NEXT: movl 8(%ecx), %edi +; X32-NEXT: movl (%ecx), %ebx +; X32-NEXT: movl 4(%ecx), %ebp +; X32-NEXT: subl (%esi), %ebx +; X32-NEXT: sbbl 4(%esi), %ebp +; X32-NEXT: sbbl 8(%esi), %edi +; X32-NEXT: sbbl 12(%esi), %edx +; X32-NEXT: movl %edx, {{[0-9]+}}(%esp) # 4-byte Spill +; X32-NEXT: sbbl 16(%esi), %eax +; X32-NEXT: movl %eax, (%esp) # 4-byte Spill +; X32-NEXT: movl 20(%ecx), %edx +; X32-NEXT: sbbl 20(%esi), %edx +; X32-NEXT: movl 24(%ecx), %eax +; X32-NEXT: sbbl 24(%esi), %eax +; X32-NEXT: movl 28(%esi), %esi +; X32-NEXT: sbbl %esi, 28(%ecx) +; X32-NEXT: movl %edi, 8(%ecx) +; X32-NEXT: movl %ebp, 4(%ecx) +; X32-NEXT: movl %ebx, (%ecx) +; X32-NEXT: movl {{[0-9]+}}(%esp), %esi # 4-byte Reload +; X32-NEXT: movl %esi, 12(%ecx) +; X32-NEXT: movl (%esp), %esi # 4-byte Reload +; X32-NEXT: movl %esi, 16(%ecx) +; X32-NEXT: movl %edx, 20(%ecx) +; X32-NEXT: movl %eax, 24(%ecx) ; X32-NEXT: addl $8, %esp ; X32-NEXT: popl %esi ; X32-NEXT: popl %edi @@ -95,14 +115,17 @@ define void @sub(i256* %p, i256* %q) nounwind { ; ; X64-LABEL: sub: ; X64: # %bb.0: -; X64-NEXT: movq 24(%rsi), %rax -; X64-NEXT: movq 16(%rsi), %rcx -; X64-NEXT: movq (%rsi), %rdx -; X64-NEXT: movq 8(%rsi), %rsi -; X64-NEXT: subq %rdx, (%rdi) -; X64-NEXT: sbbq %rsi, 8(%rdi) -; X64-NEXT: sbbq %rcx, 16(%rdi) -; X64-NEXT: sbbq %rax, 24(%rdi) +; X64-NEXT: movq 16(%rdi), %rax +; X64-NEXT: movq (%rdi), %rcx +; X64-NEXT: movq 8(%rdi), %rdx +; X64-NEXT: movq 24(%rsi), %r8 +; X64-NEXT: subq (%rsi), %rcx +; X64-NEXT: sbbq 8(%rsi), %rdx +; X64-NEXT: sbbq 16(%rsi), %rax +; X64-NEXT: sbbq %r8, 24(%rdi) +; X64-NEXT: movq %rax, 16(%rdi) +; X64-NEXT: movq %rdx, 8(%rdi) +; X64-NEXT: movq %rcx, (%rdi) ; X64-NEXT: retq %a = load i256, i256* %p %b = load i256, i256* %q diff --git a/llvm/test/CodeGen/X86/masked_memop.ll b/llvm/test/CodeGen/X86/masked_memop.ll index aa6ae096445..4a250205051 100644 --- a/llvm/test/CodeGen/X86/masked_memop.ll +++ b/llvm/test/CodeGen/X86/masked_memop.ll @@ -1264,7 +1264,8 @@ define <8 x double> @load_one_mask_bit_set5(<8 x double>* %addr, <8 x double> %v ; AVX-LABEL: load_one_mask_bit_set5: ; AVX: ## %bb.0: ; AVX-NEXT: vextractf128 $1, %ymm1, %xmm2 -; AVX-NEXT: vmovhpd {{.*#+}} xmm2 = xmm2[0],mem[0] +; AVX-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero +; AVX-NEXT: vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm3[0] ; AVX-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 ; AVX-NEXT: retq ; diff --git a/llvm/test/CodeGen/X86/merge-consecutive-stores.ll b/llvm/test/CodeGen/X86/merge-consecutive-stores.ll index 4f511ef99e5..af5fb478e52 100644 --- a/llvm/test/CodeGen/X86/merge-consecutive-stores.ll +++ b/llvm/test/CodeGen/X86/merge-consecutive-stores.ll @@ -10,11 +10,12 @@ define i32 @foo (i64* %so) nounwind uwtable ssp { ; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax ; CHECK-NEXT: movl $0, 28(%eax) ; CHECK-NEXT: movl $0, 24(%eax) -; CHECK-NEXT: xorl %ecx, %ecx -; CHECK-NEXT: cmpl 16(%eax), %ecx -; CHECK-NEXT: movl $0, 16(%eax) -; CHECK-NEXT: sbbl 20(%eax), %ecx +; CHECK-NEXT: movl 20(%eax), %ecx ; CHECK-NEXT: movl $0, 20(%eax) +; CHECK-NEXT: xorl %edx, %edx +; CHECK-NEXT: cmpl 16(%eax), %edx +; CHECK-NEXT: movl $0, 16(%eax) +; CHECK-NEXT: sbbl %ecx, %edx ; CHECK-NEXT: setl %al ; CHECK-NEXT: movzbl %al, %eax ; CHECK-NEXT: negl %eax diff --git a/llvm/test/CodeGen/X86/nontemporal.ll b/llvm/test/CodeGen/X86/nontemporal.ll index 472c3e4774c..f53982a8542 100644 --- a/llvm/test/CodeGen/X86/nontemporal.ll +++ b/llvm/test/CodeGen/X86/nontemporal.ll @@ -13,35 +13,36 @@ define i32 @f(<4 x float> %A, i8* %B, <2 x double> %C, i32 %D, <2 x i64> %E, <4 ; X32-SSE-NEXT: andl $-16, %esp ; X32-SSE-NEXT: subl $16, %esp ; X32-SSE-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero -; X32-SSE-NEXT: movl 12(%ebp), %ecx +; X32-SSE-NEXT: movl 12(%ebp), %eax ; X32-SSE-NEXT: movdqa 56(%ebp), %xmm4 ; X32-SSE-NEXT: movdqa 40(%ebp), %xmm5 ; X32-SSE-NEXT: movdqa 24(%ebp), %xmm6 -; X32-SSE-NEXT: movl 8(%ebp), %esi -; X32-SSE-NEXT: movl 80(%ebp), %edx -; X32-SSE-NEXT: movl (%edx), %eax +; X32-SSE-NEXT: movl 8(%ebp), %edx +; X32-SSE-NEXT: movl 80(%ebp), %ecx +; X32-SSE-NEXT: movl (%ecx), %esi ; X32-SSE-NEXT: addps {{\.LCPI.*}}, %xmm0 -; X32-SSE-NEXT: movntps %xmm0, (%esi) +; X32-SSE-NEXT: movntps %xmm0, (%edx) ; X32-SSE-NEXT: paddq {{\.LCPI.*}}, %xmm2 -; X32-SSE-NEXT: addl (%edx), %eax -; X32-SSE-NEXT: movntdq %xmm2, (%esi) +; X32-SSE-NEXT: addl (%ecx), %esi +; X32-SSE-NEXT: movntdq %xmm2, (%edx) ; X32-SSE-NEXT: addpd {{\.LCPI.*}}, %xmm1 -; X32-SSE-NEXT: addl (%edx), %eax -; X32-SSE-NEXT: movntpd %xmm1, (%esi) +; X32-SSE-NEXT: addl (%ecx), %esi +; X32-SSE-NEXT: movntpd %xmm1, (%edx) ; X32-SSE-NEXT: paddd {{\.LCPI.*}}, %xmm6 -; X32-SSE-NEXT: addl (%edx), %eax -; X32-SSE-NEXT: movntdq %xmm6, (%esi) +; X32-SSE-NEXT: addl (%ecx), %esi +; X32-SSE-NEXT: movntdq %xmm6, (%edx) ; X32-SSE-NEXT: paddw {{\.LCPI.*}}, %xmm5 -; X32-SSE-NEXT: addl (%edx), %eax -; X32-SSE-NEXT: movntdq %xmm5, (%esi) +; X32-SSE-NEXT: addl (%ecx), %esi +; X32-SSE-NEXT: movntdq %xmm5, (%edx) ; X32-SSE-NEXT: paddb {{\.LCPI.*}}, %xmm4 -; X32-SSE-NEXT: addl (%edx), %eax -; X32-SSE-NEXT: movntdq %xmm4, (%esi) -; X32-SSE-NEXT: addl (%edx), %eax -; X32-SSE-NEXT: movntil %ecx, (%esi) -; X32-SSE-NEXT: addl (%edx), %eax -; X32-SSE-NEXT: movsd %xmm3, (%esi) -; X32-SSE-NEXT: addl (%edx), %eax +; X32-SSE-NEXT: addl (%ecx), %esi +; X32-SSE-NEXT: movntdq %xmm4, (%edx) +; X32-SSE-NEXT: addl (%ecx), %esi +; X32-SSE-NEXT: movntil %eax, (%edx) +; X32-SSE-NEXT: movl (%ecx), %eax +; X32-SSE-NEXT: addl %esi, %eax +; X32-SSE-NEXT: movsd %xmm3, (%edx) +; X32-SSE-NEXT: addl (%ecx), %eax ; X32-SSE-NEXT: leal -4(%ebp), %esp ; X32-SSE-NEXT: popl %esi ; X32-SSE-NEXT: popl %ebp @@ -55,35 +56,36 @@ define i32 @f(<4 x float> %A, i8* %B, <2 x double> %C, i32 %D, <2 x i64> %E, <4 ; X32-AVX-NEXT: andl $-16, %esp ; X32-AVX-NEXT: subl $16, %esp ; X32-AVX-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero -; X32-AVX-NEXT: movl 12(%ebp), %ecx +; X32-AVX-NEXT: movl 12(%ebp), %eax ; X32-AVX-NEXT: vmovdqa 56(%ebp), %xmm4 ; X32-AVX-NEXT: vmovdqa 40(%ebp), %xmm5 ; X32-AVX-NEXT: vmovdqa 24(%ebp), %xmm6 -; X32-AVX-NEXT: movl 8(%ebp), %edx -; X32-AVX-NEXT: movl 80(%ebp), %esi -; X32-AVX-NEXT: movl (%esi), %eax +; X32-AVX-NEXT: movl 8(%ebp), %ecx +; X32-AVX-NEXT: movl 80(%ebp), %edx +; X32-AVX-NEXT: movl (%edx), %esi ; X32-AVX-NEXT: vaddps {{\.LCPI.*}}, %xmm0, %xmm0 -; X32-AVX-NEXT: vmovntps %xmm0, (%edx) +; X32-AVX-NEXT: vmovntps %xmm0, (%ecx) ; X32-AVX-NEXT: vpaddq {{\.LCPI.*}}, %xmm2, %xmm0 -; X32-AVX-NEXT: addl (%esi), %eax -; X32-AVX-NEXT: vmovntdq %xmm0, (%edx) +; X32-AVX-NEXT: addl (%edx), %esi +; X32-AVX-NEXT: vmovntdq %xmm0, (%ecx) ; X32-AVX-NEXT: vaddpd {{\.LCPI.*}}, %xmm1, %xmm0 -; X32-AVX-NEXT: addl (%esi), %eax -; X32-AVX-NEXT: vmovntpd %xmm0, (%edx) +; X32-AVX-NEXT: addl (%edx), %esi +; X32-AVX-NEXT: vmovntpd %xmm0, (%ecx) ; X32-AVX-NEXT: vpaddd {{\.LCPI.*}}, %xmm6, %xmm0 -; X32-AVX-NEXT: addl (%esi), %eax -; X32-AVX-NEXT: vmovntdq %xmm0, (%edx) +; X32-AVX-NEXT: addl (%edx), %esi +; X32-AVX-NEXT: vmovntdq %xmm0, (%ecx) ; X32-AVX-NEXT: vpaddw {{\.LCPI.*}}, %xmm5, %xmm0 -; X32-AVX-NEXT: addl (%esi), %eax -; X32-AVX-NEXT: vmovntdq %xmm0, (%edx) +; X32-AVX-NEXT: addl (%edx), %esi +; X32-AVX-NEXT: vmovntdq %xmm0, (%ecx) ; X32-AVX-NEXT: vpaddb {{\.LCPI.*}}, %xmm4, %xmm0 -; X32-AVX-NEXT: addl (%esi), %eax -; X32-AVX-NEXT: vmovntdq %xmm0, (%edx) -; X32-AVX-NEXT: addl (%esi), %eax -; X32-AVX-NEXT: movntil %ecx, (%edx) -; X32-AVX-NEXT: addl (%esi), %eax -; X32-AVX-NEXT: vmovsd %xmm3, (%edx) -; X32-AVX-NEXT: addl (%esi), %eax +; X32-AVX-NEXT: addl (%edx), %esi +; X32-AVX-NEXT: vmovntdq %xmm0, (%ecx) +; X32-AVX-NEXT: addl (%edx), %esi +; X32-AVX-NEXT: movntil %eax, (%ecx) +; X32-AVX-NEXT: movl (%edx), %eax +; X32-AVX-NEXT: addl %esi, %eax +; X32-AVX-NEXT: vmovsd %xmm3, (%ecx) +; X32-AVX-NEXT: addl (%edx), %eax ; X32-AVX-NEXT: leal -4(%ebp), %esp ; X32-AVX-NEXT: popl %esi ; X32-AVX-NEXT: popl %ebp diff --git a/llvm/test/CodeGen/X86/pr36274.ll b/llvm/test/CodeGen/X86/pr36274.ll deleted file mode 100644 index 97b958c6b68..00000000000 --- a/llvm/test/CodeGen/X86/pr36274.ll +++ /dev/null @@ -1,33 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=i386-unknown-linux-gnu | FileCheck %s - -; This tests is checking for a case where the x86 load-op-store fusion -; misses a dependence between the fused load and a non-fused operand -; to the load causing a cycle. Here the dependence in question comes -; from the carry in input of the adcl. - -@vx = external local_unnamed_addr global <2 x i32>, align 8 - -define void @pr36274(i32* %somewhere) { -; CHECK-LABEL: pr36274: -; CHECK: # %bb.0: -; CHECK-NEXT: movl vx+4, %eax -; CHECK-NEXT: addl $1, vx -; CHECK-NEXT: adcl $0, %eax -; CHECK-NEXT: movl %eax, vx+4 -; CHECK-NEXT: retl - %a0 = getelementptr <2 x i32>, <2 x i32>* @vx, i32 0, i32 0 - %a1 = getelementptr <2 x i32>, <2 x i32>* @vx, i32 0, i32 1 - %x1 = load volatile i32, i32* %a1, align 4 - %x0 = load volatile i32, i32* %a0, align 8 - %vx0 = insertelement <2 x i32> undef, i32 %x0, i32 0 - %vx1 = insertelement <2 x i32> %vx0, i32 %x1, i32 1 - %x = bitcast <2 x i32> %vx1 to i64 - %add = add i64 %x, 1 - %vadd = bitcast i64 %add to <2 x i32> - %vx1_0 = extractelement <2 x i32> %vadd, i32 0 - %vx1_1 = extractelement <2 x i32> %vadd, i32 1 - store i32 %vx1_0, i32* %a0, align 8 - store i32 %vx1_1, i32* %a1, align 4 - ret void -} diff --git a/llvm/test/CodeGen/X86/pr36312.ll b/llvm/test/CodeGen/X86/pr36312.ll deleted file mode 100644 index 64048511ac7..00000000000 --- a/llvm/test/CodeGen/X86/pr36312.ll +++ /dev/null @@ -1,35 +0,0 @@ -; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu | FileCheck %s - -%struct.anon = type { i32, i32 } - -@c = common global %struct.anon zeroinitializer, align 4 -@d = local_unnamed_addr global %struct.anon* @c, align 8 -@a = common local_unnamed_addr global i32 0, align 4 -@b = common local_unnamed_addr global i32 0, align 4 - -; Function Attrs: norecurse nounwind uwtable -define void @g() local_unnamed_addr #0 { -; CHECK-LABEL: g: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: movq {{.*}}(%rip), %rax -; CHECK-NEXT: movl 4(%rax), %eax -; CHECK-NEXT: xorl %ecx, %ecx -; CHECK-NEXT: incl {{.*}}(%rip) -; CHECK-NEXT: setne %cl -; CHECK-NEXT: addl %eax, %ecx -; CHECK-NEXT: movl %ecx, {{.*}}(%rip) -; CHECK-NEXT: retq -entry: - %0 = load %struct.anon*, %struct.anon** @d, align 8 - %y = getelementptr inbounds %struct.anon, %struct.anon* %0, i64 0, i32 1 - %1 = load i32, i32* %y, align 4 - %2 = load i32, i32* @b, align 4 - %inc = add nsw i32 %2, 1 - store i32 %inc, i32* @b, align 4 - %tobool = icmp ne i32 %inc, 0 - %land.ext = zext i1 %tobool to i32 - %add = add nsw i32 %1, %land.ext - store i32 %add, i32* @a, align 4 - ret void -} diff --git a/llvm/test/CodeGen/X86/required-vector-width.ll b/llvm/test/CodeGen/X86/required-vector-width.ll index dcca540b31d..257d3f0d079 100644 --- a/llvm/test/CodeGen/X86/required-vector-width.ll +++ b/llvm/test/CodeGen/X86/required-vector-width.ll @@ -39,12 +39,12 @@ define void @add512(<16 x i32>* %a, <16 x i32>* %b, <16 x i32>* %c) "required-ve define void @avg_v64i8_256(<64 x i8>* %a, <64 x i8>* %b) "required-vector-width"="256" { ; CHECK-LABEL: avg_v64i8_256: ; CHECK: # %bb.0: -; CHECK-NEXT: vmovdqa (%rsi), %ymm0 -; CHECK-NEXT: vmovdqa 32(%rsi), %ymm1 -; CHECK-NEXT: vpavgb (%rdi), %ymm0, %ymm0 -; CHECK-NEXT: vpavgb 32(%rdi), %ymm1, %ymm1 -; CHECK-NEXT: vmovdqu %ymm1, (%rax) +; CHECK-NEXT: vmovdqa 32(%rdi), %ymm0 +; CHECK-NEXT: vmovdqa (%rsi), %ymm1 +; CHECK-NEXT: vpavgb (%rdi), %ymm1, %ymm1 +; CHECK-NEXT: vpavgb 32(%rsi), %ymm0, %ymm0 ; CHECK-NEXT: vmovdqu %ymm0, (%rax) +; CHECK-NEXT: vmovdqu %ymm1, (%rax) ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq %1 = load <64 x i8>, <64 x i8>* %a diff --git a/llvm/test/CodeGen/X86/store_op_load_fold2.ll b/llvm/test/CodeGen/X86/store_op_load_fold2.ll index 674b8d8f938..f47d87f4bb8 100644 --- a/llvm/test/CodeGen/X86/store_op_load_fold2.ll +++ b/llvm/test/CodeGen/X86/store_op_load_fold2.ll @@ -17,14 +17,14 @@ cond_true2732.preheader: ; preds = %entry store i64 %tmp2676.us.us, i64* %tmp2666 ret i32 0 -; INTEL: and {{e..}}, dword ptr [356] -; INTEL: and dword ptr [360], {{e..}} -; FIXME: mov dword ptr [356], {{e..}} +; INTEL: and {{e..}}, dword ptr [360] +; INTEL: and dword ptr [356], {{e..}} +; FIXME: mov dword ptr [360], {{e..}} ; The above line comes out as 'mov 360, eax', but when the register is ecx it works? -; ATT: andl 356, %{{e..}} -; ATT: andl %{{e..}}, 360 -; ATT: movl %{{e..}}, 356 +; ATT: andl 360, %{{e..}} +; ATT: andl %{{e..}}, 356 +; ATT: movl %{{e..}}, 360 } diff --git a/llvm/test/CodeGen/X86/subvector-broadcast.ll b/llvm/test/CodeGen/X86/subvector-broadcast.ll index 218ce26cadd..00cd4df0938 100644 --- a/llvm/test/CodeGen/X86/subvector-broadcast.ll +++ b/llvm/test/CodeGen/X86/subvector-broadcast.ll @@ -751,64 +751,72 @@ define <8 x i32> @test_broadcast_4i32_8i32_chain(<4 x i32>* %p0, <4 x float>* %p ; X32-AVX: # %bb.0: ; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-AVX-NEXT: vmovaps (%ecx), %xmm0 ; X32-AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; X32-AVX-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X32-AVX-NEXT: vmovaps %xmm1, (%eax) +; X32-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X32-AVX-NEXT: retl ; ; X32-AVX512F-LABEL: test_broadcast_4i32_8i32_chain: ; X32-AVX512F: # %bb.0: ; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-AVX512F-NEXT: vmovaps (%ecx), %xmm0 ; X32-AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; X32-AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X32-AVX512F-NEXT: vmovdqa %xmm1, (%eax) +; X32-AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X32-AVX512F-NEXT: retl ; ; X32-AVX512BW-LABEL: test_broadcast_4i32_8i32_chain: ; X32-AVX512BW: # %bb.0: ; X32-AVX512BW-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-AVX512BW-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-AVX512BW-NEXT: vmovaps (%ecx), %xmm0 ; X32-AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; X32-AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X32-AVX512BW-NEXT: vmovdqa %xmm1, (%eax) +; X32-AVX512BW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X32-AVX512BW-NEXT: retl ; ; X32-AVX512DQ-LABEL: test_broadcast_4i32_8i32_chain: ; X32-AVX512DQ: # %bb.0: ; X32-AVX512DQ-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-AVX512DQ-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-AVX512DQ-NEXT: vmovaps (%ecx), %xmm0 ; X32-AVX512DQ-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; X32-AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X32-AVX512DQ-NEXT: vmovaps %xmm1, (%eax) +; X32-AVX512DQ-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X32-AVX512DQ-NEXT: retl ; ; X64-AVX-LABEL: test_broadcast_4i32_8i32_chain: ; X64-AVX: # %bb.0: +; X64-AVX-NEXT: vmovaps (%rdi), %xmm0 ; X64-AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; X64-AVX-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X64-AVX-NEXT: vmovaps %xmm1, (%rsi) +; X64-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X64-AVX-NEXT: retq ; ; X64-AVX512F-LABEL: test_broadcast_4i32_8i32_chain: ; X64-AVX512F: # %bb.0: +; X64-AVX512F-NEXT: vmovaps (%rdi), %xmm0 ; X64-AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; X64-AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X64-AVX512F-NEXT: vmovdqa %xmm1, (%rsi) +; X64-AVX512F-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X64-AVX512F-NEXT: retq ; ; X64-AVX512BW-LABEL: test_broadcast_4i32_8i32_chain: ; X64-AVX512BW: # %bb.0: +; X64-AVX512BW-NEXT: vmovaps (%rdi), %xmm0 ; X64-AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; X64-AVX512BW-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X64-AVX512BW-NEXT: vmovdqa %xmm1, (%rsi) +; X64-AVX512BW-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X64-AVX512BW-NEXT: retq ; ; X64-AVX512DQ-LABEL: test_broadcast_4i32_8i32_chain: ; X64-AVX512DQ: # %bb.0: +; X64-AVX512DQ-NEXT: vmovaps (%rdi), %xmm0 ; X64-AVX512DQ-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; X64-AVX512DQ-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X64-AVX512DQ-NEXT: vmovaps %xmm1, (%rsi) +; X64-AVX512DQ-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X64-AVX512DQ-NEXT: retq %1 = load <4 x i32>, <4 x i32>* %p0 store <4 x float> zeroinitializer, <4 x float>* %p1 @@ -821,9 +829,10 @@ define <16 x i32> @test_broadcast_4i32_16i32_chain(<4 x i32>* %p0, <4 x float>* ; X32-AVX: # %bb.0: ; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-AVX-NEXT: vmovaps (%ecx), %xmm0 ; X32-AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; X32-AVX-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X32-AVX-NEXT: vmovaps %xmm1, (%eax) +; X32-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X32-AVX-NEXT: vmovaps %ymm0, %ymm1 ; X32-AVX-NEXT: retl ; @@ -831,56 +840,63 @@ define <16 x i32> @test_broadcast_4i32_16i32_chain(<4 x i32>* %p0, <4 x float>* ; X32-AVX512F: # %bb.0: ; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-AVX512F-NEXT: vmovdqa (%ecx), %xmm0 ; X32-AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; X32-AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] ; X32-AVX512F-NEXT: vmovdqa %xmm1, (%eax) +; X32-AVX512F-NEXT: vshufi32x4 {{.*#+}} zmm0 = zmm0[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] ; X32-AVX512F-NEXT: retl ; ; X32-AVX512BW-LABEL: test_broadcast_4i32_16i32_chain: ; X32-AVX512BW: # %bb.0: ; X32-AVX512BW-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-AVX512BW-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-AVX512BW-NEXT: vmovdqa (%ecx), %xmm0 ; X32-AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; X32-AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] ; X32-AVX512BW-NEXT: vmovdqa %xmm1, (%eax) +; X32-AVX512BW-NEXT: vshufi32x4 {{.*#+}} zmm0 = zmm0[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] ; X32-AVX512BW-NEXT: retl ; ; X32-AVX512DQ-LABEL: test_broadcast_4i32_16i32_chain: ; X32-AVX512DQ: # %bb.0: ; X32-AVX512DQ-NEXT: movl {{[0-9]+}}(%esp), %eax ; X32-AVX512DQ-NEXT: movl {{[0-9]+}}(%esp), %ecx +; X32-AVX512DQ-NEXT: vmovdqa (%ecx), %xmm0 ; X32-AVX512DQ-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; X32-AVX512DQ-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] ; X32-AVX512DQ-NEXT: vmovaps %xmm1, (%eax) +; X32-AVX512DQ-NEXT: vshufi32x4 {{.*#+}} zmm0 = zmm0[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] ; X32-AVX512DQ-NEXT: retl ; ; X64-AVX-LABEL: test_broadcast_4i32_16i32_chain: ; X64-AVX: # %bb.0: +; X64-AVX-NEXT: vmovaps (%rdi), %xmm0 ; X64-AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; X64-AVX-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] ; X64-AVX-NEXT: vmovaps %xmm1, (%rsi) +; X64-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; X64-AVX-NEXT: vmovaps %ymm0, %ymm1 ; X64-AVX-NEXT: retq ; ; X64-AVX512F-LABEL: test_broadcast_4i32_16i32_chain: ; X64-AVX512F: # %bb.0: +; X64-AVX512F-NEXT: vmovdqa (%rdi), %xmm0 ; X64-AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; X64-AVX512F-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] ; X64-AVX512F-NEXT: vmovdqa %xmm1, (%rsi) +; X64-AVX512F-NEXT: vshufi32x4 {{.*#+}} zmm0 = zmm0[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] ; X64-AVX512F-NEXT: retq ; ; X64-AVX512BW-LABEL: test_broadcast_4i32_16i32_chain: ; X64-AVX512BW: # %bb.0: +; X64-AVX512BW-NEXT: vmovdqa (%rdi), %xmm0 ; X64-AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; X64-AVX512BW-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] ; X64-AVX512BW-NEXT: vmovdqa %xmm1, (%rsi) +; X64-AVX512BW-NEXT: vshufi32x4 {{.*#+}} zmm0 = zmm0[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] ; X64-AVX512BW-NEXT: retq ; ; X64-AVX512DQ-LABEL: test_broadcast_4i32_16i32_chain: ; X64-AVX512DQ: # %bb.0: +; X64-AVX512DQ-NEXT: vmovdqa (%rdi), %xmm0 ; X64-AVX512DQ-NEXT: vxorps %xmm1, %xmm1, %xmm1 -; X64-AVX512DQ-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] ; X64-AVX512DQ-NEXT: vmovaps %xmm1, (%rsi) +; X64-AVX512DQ-NEXT: vshufi32x4 {{.*#+}} zmm0 = zmm0[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] ; X64-AVX512DQ-NEXT: retq %1 = load <4 x i32>, <4 x i32>* %p0 store <4 x float> zeroinitializer, <4 x float>* %p1 diff --git a/llvm/test/CodeGen/X86/vector-shuffle-variable-256.ll b/llvm/test/CodeGen/X86/vector-shuffle-variable-256.ll index 19fd7d3f5ac..38c4a1170ec 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-variable-256.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-variable-256.ll @@ -47,7 +47,8 @@ define <4 x double> @var_shuffle_v4f64_v4f64_uxx0_i64(<4 x double> %x, i64 %i0, ; ALL-NEXT: andl $3, %edx ; ALL-NEXT: andl $3, %esi ; ALL-NEXT: vmovaps %ymm0, (%rsp) -; ALL-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0] +; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; ALL-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] ; ALL-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero ; ALL-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; ALL-NEXT: movq %rbp, %rsp |

