diff options
Diffstat (limited to 'llvm/test/CodeGen/X86/pmul.ll')
-rw-r--r-- | llvm/test/CodeGen/X86/pmul.ll | 61 |
1 files changed, 20 insertions, 41 deletions
diff --git a/llvm/test/CodeGen/X86/pmul.ll b/llvm/test/CodeGen/X86/pmul.ll index 199d6a63ff9..7c416873897 100644 --- a/llvm/test/CodeGen/X86/pmul.ll +++ b/llvm/test/CodeGen/X86/pmul.ll @@ -360,47 +360,26 @@ define <2 x i64> @mul_v2i64spill(<2 x i64> %i, <2 x i64> %j) nounwind { ; SSE-NEXT: addq $40, %rsp ; SSE-NEXT: retq ; -; AVX2-LABEL: mul_v2i64spill: -; AVX2: # BB#0: # %entry -; AVX2-NEXT: subq $40, %rsp -; AVX2-NEXT: vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill -; AVX2-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; AVX2-NEXT: callq foo -; AVX2-NEXT: vmovdqa {{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload -; AVX2-NEXT: vmovdqa (%rsp), %xmm4 # 16-byte Reload -; AVX2-NEXT: vpmuludq %xmm2, %xmm4, %xmm0 -; AVX2-NEXT: vpsrlq $32, %xmm2, %xmm1 -; AVX2-NEXT: vmovdqa %xmm2, %xmm3 -; AVX2-NEXT: vpmuludq %xmm1, %xmm4, %xmm1 -; AVX2-NEXT: vpsllq $32, %xmm1, %xmm1 -; AVX2-NEXT: vpsrlq $32, %xmm4, %xmm2 -; AVX2-NEXT: vpmuludq %xmm3, %xmm2, %xmm2 -; AVX2-NEXT: vpsllq $32, %xmm2, %xmm2 -; AVX2-NEXT: vpaddq %xmm2, %xmm1, %xmm1 -; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0 -; AVX2-NEXT: addq $40, %rsp -; AVX2-NEXT: retq -; -; AVX512-LABEL: mul_v2i64spill: -; AVX512: # BB#0: # %entry -; AVX512-NEXT: subq $40, %rsp -; AVX512-NEXT: vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill -; AVX512-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill -; AVX512-NEXT: callq foo -; AVX512-NEXT: vmovdqa {{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload -; AVX512-NEXT: vmovdqa (%rsp), %xmm4 # 16-byte Reload -; AVX512-NEXT: vpmuludq %xmm2, %xmm4, %xmm0 -; AVX512-NEXT: vpsrlq $32, %xmm2, %xmm1 -; AVX512-NEXT: vmovdqa64 %zmm2, %zmm3 -; AVX512-NEXT: vpmuludq %xmm1, %xmm4, %xmm1 -; AVX512-NEXT: vpsllq $32, %xmm1, %xmm1 -; AVX512-NEXT: vpsrlq $32, %xmm4, %xmm2 -; AVX512-NEXT: vpmuludq %xmm3, %xmm2, %xmm2 -; AVX512-NEXT: vpsllq $32, %xmm2, %xmm2 -; AVX512-NEXT: vpaddq %xmm2, %xmm1, %xmm1 -; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0 -; AVX512-NEXT: addq $40, %rsp -; AVX512-NEXT: retq +; AVX-LABEL: mul_v2i64spill: +; AVX: # BB#0: # %entry +; AVX-NEXT: subq $40, %rsp +; AVX-NEXT: vmovaps %xmm1, {{[0-9]+}}(%rsp) # 16-byte Spill +; AVX-NEXT: vmovaps %xmm0, (%rsp) # 16-byte Spill +; AVX-NEXT: callq foo +; AVX-NEXT: vmovdqa {{[0-9]+}}(%rsp), %xmm2 # 16-byte Reload +; AVX-NEXT: vmovdqa (%rsp), %xmm4 # 16-byte Reload +; AVX-NEXT: vpmuludq %xmm2, %xmm4, %xmm0 +; AVX-NEXT: vpsrlq $32, %xmm2, %xmm1 +; AVX-NEXT: vmovdqa %xmm2, %xmm3 +; AVX-NEXT: vpmuludq %xmm1, %xmm4, %xmm1 +; AVX-NEXT: vpsllq $32, %xmm1, %xmm1 +; AVX-NEXT: vpsrlq $32, %xmm4, %xmm2 +; AVX-NEXT: vpmuludq %xmm3, %xmm2, %xmm2 +; AVX-NEXT: vpsllq $32, %xmm2, %xmm2 +; AVX-NEXT: vpaddq %xmm2, %xmm1, %xmm1 +; AVX-NEXT: vpaddq %xmm1, %xmm0, %xmm0 +; AVX-NEXT: addq $40, %rsp +; AVX-NEXT: retq entry: ; Use a call to force spills. call void @foo() |