diff options
| author | Craig Topper <craig.topper@gmail.com> | 2016-07-22 05:00:52 +0000 |
|---|---|---|
| committer | Craig Topper <craig.topper@gmail.com> | 2016-07-22 05:00:52 +0000 |
| commit | f4151bea72ad4ad81b062f628a332634d09f0d40 (patch) | |
| tree | 8a75f73f584d64d082f4a8baed838470d08e772b /llvm/test/CodeGen/X86/vector-half-conversions.ll | |
| parent | 5ec33a94110b9cb54ab261d4e1de8b19df7d3f61 (diff) | |
| download | bcm5719-llvm-f4151bea72ad4ad81b062f628a332634d09f0d40.tar.gz bcm5719-llvm-f4151bea72ad4ad81b062f628a332634d09f0d40.zip | |
[AVX512] Add initial support for the Execution Domain fixing pass to change some EVEX instructions.
llvm-svn: 276393
Diffstat (limited to 'llvm/test/CodeGen/X86/vector-half-conversions.ll')
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-half-conversions.ll | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/llvm/test/CodeGen/X86/vector-half-conversions.ll b/llvm/test/CodeGen/X86/vector-half-conversions.ll index b091d1bca2e..3261e988ffb 100644 --- a/llvm/test/CodeGen/X86/vector-half-conversions.ll +++ b/llvm/test/CodeGen/X86/vector-half-conversions.ll @@ -3001,7 +3001,7 @@ define <8 x i16> @cvt_8f64_to_8i16(<8 x double> %a0) { ; AVX512-NEXT: .cfi_offset %r14, -24 ; AVX512-NEXT: .Ltmp24: ; AVX512-NEXT: .cfi_offset %r15, -16 -; AVX512-NEXT: vmovups %zmm0, (%rsp) # 64-byte Spill +; AVX512-NEXT: vmovupd %zmm0, (%rsp) # 64-byte Spill ; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] ; AVX512-NEXT: callq __truncdfhf2 ; AVX512-NEXT: movw %ax, %bx @@ -3011,9 +3011,9 @@ define <8 x i16> @cvt_8f64_to_8i16(<8 x double> %a0) { ; AVX512-NEXT: callq __truncdfhf2 ; AVX512-NEXT: movzwl %ax, %r15d ; AVX512-NEXT: orl %ebx, %r15d -; AVX512-NEXT: vmovups (%rsp), %zmm0 # 64-byte Reload +; AVX512-NEXT: vmovupd (%rsp), %zmm0 # 64-byte Reload ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX512-NEXT: vmovaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill +; AVX512-NEXT: vmovapd %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill ; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] ; AVX512-NEXT: callq __truncdfhf2 ; AVX512-NEXT: movw %ax, %bx @@ -3024,7 +3024,7 @@ define <8 x i16> @cvt_8f64_to_8i16(<8 x double> %a0) { ; AVX512-NEXT: orl %ebx, %r14d ; AVX512-NEXT: shlq $32, %r14 ; AVX512-NEXT: orq %r15, %r14 -; AVX512-NEXT: vmovups (%rsp), %zmm0 # 64-byte Reload +; AVX512-NEXT: vmovupd (%rsp), %zmm0 # 64-byte Reload ; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm0 ; AVX512-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill ; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] @@ -3862,17 +3862,17 @@ define void @store_cvt_8f64_to_8i16(<8 x double> %a0, <8 x i16>* %a1) { ; AVX512-NEXT: .Ltmp67: ; AVX512-NEXT: .cfi_offset %rbp, -16 ; AVX512-NEXT: movq %rdi, %rbx -; AVX512-NEXT: vmovups %zmm0, {{[0-9]+}}(%rsp) # 64-byte Spill +; AVX512-NEXT: vmovupd %zmm0, {{[0-9]+}}(%rsp) # 64-byte Spill ; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] ; AVX512-NEXT: callq __truncdfhf2 ; AVX512-NEXT: movw %ax, {{[0-9]+}}(%rsp) # 2-byte Spill -; AVX512-NEXT: vmovups {{[0-9]+}}(%rsp), %zmm0 # 64-byte Reload +; AVX512-NEXT: vmovupd {{[0-9]+}}(%rsp), %zmm0 # 64-byte Reload ; AVX512-NEXT: vextractf128 $1, %ymm0, %xmm0 -; AVX512-NEXT: vmovaps %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill +; AVX512-NEXT: vmovapd %xmm0, {{[0-9]+}}(%rsp) # 16-byte Spill ; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] ; AVX512-NEXT: callq __truncdfhf2 ; AVX512-NEXT: movw %ax, {{[0-9]+}}(%rsp) # 2-byte Spill -; AVX512-NEXT: vmovups {{[0-9]+}}(%rsp), %zmm0 # 64-byte Reload +; AVX512-NEXT: vmovupd {{[0-9]+}}(%rsp), %zmm0 # 64-byte Reload ; AVX512-NEXT: vextractf64x4 $1, %zmm0, %ymm0 ; AVX512-NEXT: vmovupd %ymm0, {{[0-9]+}}(%rsp) # 32-byte Spill ; AVX512-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] |

