diff options
author | Craig Topper <craig.topper@gmail.com> | 2016-04-17 07:25:39 +0000 |
---|---|---|
committer | Craig Topper <craig.topper@gmail.com> | 2016-04-17 07:25:39 +0000 |
commit | 75869d57019ddbf93510027c655d46a97eb526f4 (patch) | |
tree | 6c4aa530a2910a9ba7b7f6351e49951d9b69fc9b /llvm/test | |
parent | 2c7cd4afaba59a2a80330df7adc02c8a4db7c5b9 (diff) | |
download | bcm5719-llvm-75869d57019ddbf93510027c655d46a97eb526f4.tar.gz bcm5719-llvm-75869d57019ddbf93510027c655d46a97eb526f4.zip |
[AVX512] ISD::MUL v2i64/v4i64 should only be legal if DQI and VLX features are enabled.
llvm-svn: 266554
Diffstat (limited to 'llvm/test')
-rw-r--r-- | llvm/test/CodeGen/X86/avx512-arith.ll | 122 |
1 files changed, 122 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/avx512-arith.ll b/llvm/test/CodeGen/X86/avx512-arith.ll index 9220e4f269c..acf9caa2342 100644 --- a/llvm/test/CodeGen/X86/avx512-arith.ll +++ b/llvm/test/CodeGen/X86/avx512-arith.ll @@ -140,6 +140,128 @@ define <8 x i64> @imulq512(<8 x i64> %y, <8 x i64> %x) { ret <8 x i64>%z } +define <4 x i64> @imulq256(<4 x i64> %y, <4 x i64> %x) { +; AVX512F-LABEL: imulq256: +; AVX512F: ## BB#0: +; AVX512F-NEXT: vpmuludq %ymm0, %ymm1, %ymm2 +; AVX512F-NEXT: vpsrlq $32, %ymm0, %ymm3 +; AVX512F-NEXT: vpmuludq %ymm3, %ymm1, %ymm3 +; AVX512F-NEXT: vpsllq $32, %ymm3, %ymm3 +; AVX512F-NEXT: vpaddq %ymm3, %ymm2, %ymm2 +; AVX512F-NEXT: vpsrlq $32, %ymm1, %ymm1 +; AVX512F-NEXT: vpmuludq %ymm0, %ymm1, %ymm0 +; AVX512F-NEXT: vpsllq $32, %ymm0, %ymm0 +; AVX512F-NEXT: vpaddq %ymm0, %ymm2, %ymm0 +; AVX512F-NEXT: retq +; +; AVX512VL-LABEL: imulq256: +; AVX512VL: ## BB#0: +; AVX512VL-NEXT: vpmuludq %ymm0, %ymm1, %ymm2 +; AVX512VL-NEXT: vpsrlq $32, %ymm0, %ymm3 +; AVX512VL-NEXT: vpmuludq %ymm3, %ymm1, %ymm3 +; AVX512VL-NEXT: vpsllq $32, %ymm3, %ymm3 +; AVX512VL-NEXT: vpaddq %ymm3, %ymm2, %ymm2 +; AVX512VL-NEXT: vpsrlq $32, %ymm1, %ymm1 +; AVX512VL-NEXT: vpmuludq %ymm0, %ymm1, %ymm0 +; AVX512VL-NEXT: vpsllq $32, %ymm0, %ymm0 +; AVX512VL-NEXT: vpaddq %ymm0, %ymm2, %ymm0 +; AVX512VL-NEXT: retq +; +; AVX512BW-LABEL: imulq256: +; AVX512BW: ## BB#0: +; AVX512BW-NEXT: vpmuludq %ymm0, %ymm1, %ymm2 +; AVX512BW-NEXT: vpsrlq $32, %ymm0, %ymm3 +; AVX512BW-NEXT: vpmuludq %ymm3, %ymm1, %ymm3 +; AVX512BW-NEXT: vpsllq $32, %ymm3, %ymm3 +; AVX512BW-NEXT: vpaddq %ymm3, %ymm2, %ymm2 +; AVX512BW-NEXT: vpsrlq $32, %ymm1, %ymm1 +; AVX512BW-NEXT: vpmuludq %ymm0, %ymm1, %ymm0 +; AVX512BW-NEXT: vpsllq $32, %ymm0, %ymm0 +; AVX512BW-NEXT: vpaddq %ymm0, %ymm2, %ymm0 +; AVX512BW-NEXT: retq +; +; AVX512DQ-LABEL: imulq256: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: vpmuludq %ymm0, %ymm1, %ymm2 +; AVX512DQ-NEXT: vpsrlq $32, %ymm0, %ymm3 +; AVX512DQ-NEXT: vpmuludq %ymm3, %ymm1, %ymm3 +; AVX512DQ-NEXT: vpsllq $32, %ymm3, %ymm3 +; AVX512DQ-NEXT: vpaddq %ymm3, %ymm2, %ymm2 +; AVX512DQ-NEXT: vpsrlq $32, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpmuludq %ymm0, %ymm1, %ymm0 +; AVX512DQ-NEXT: vpsllq $32, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpaddq %ymm0, %ymm2, %ymm0 +; AVX512DQ-NEXT: retq +; +; SKX-LABEL: imulq256: +; SKX: ## BB#0: +; SKX-NEXT: vpmullq %ymm0, %ymm1, %ymm0 +; SKX-NEXT: retq + %z = mul <4 x i64>%x, %y + ret <4 x i64>%z +} + +define <2 x i64> @imulq128(<2 x i64> %y, <2 x i64> %x) { +; AVX512F-LABEL: imulq128: +; AVX512F: ## BB#0: +; AVX512F-NEXT: vpmuludq %xmm0, %xmm1, %xmm2 +; AVX512F-NEXT: vpsrlq $32, %xmm0, %xmm3 +; AVX512F-NEXT: vpmuludq %xmm3, %xmm1, %xmm3 +; AVX512F-NEXT: vpsllq $32, %xmm3, %xmm3 +; AVX512F-NEXT: vpaddq %xmm3, %xmm2, %xmm2 +; AVX512F-NEXT: vpsrlq $32, %xmm1, %xmm1 +; AVX512F-NEXT: vpmuludq %xmm0, %xmm1, %xmm0 +; AVX512F-NEXT: vpsllq $32, %xmm0, %xmm0 +; AVX512F-NEXT: vpaddq %xmm0, %xmm2, %xmm0 +; AVX512F-NEXT: retq +; +; AVX512VL-LABEL: imulq128: +; AVX512VL: ## BB#0: +; AVX512VL-NEXT: vpmuludq %xmm0, %xmm1, %xmm2 +; AVX512VL-NEXT: vpsrlq $32, %xmm0, %xmm3 +; AVX512VL-NEXT: vpmuludq %xmm3, %xmm1, %xmm3 +; AVX512VL-NEXT: vpsllq $32, %xmm3, %xmm3 +; AVX512VL-NEXT: vpaddq %xmm3, %xmm2, %xmm2 +; AVX512VL-NEXT: vpsrlq $32, %xmm1, %xmm1 +; AVX512VL-NEXT: vpmuludq %xmm0, %xmm1, %xmm0 +; AVX512VL-NEXT: vpsllq $32, %xmm0, %xmm0 +; AVX512VL-NEXT: vpaddq %xmm0, %xmm2, %xmm0 +; AVX512VL-NEXT: retq +; +; AVX512BW-LABEL: imulq128: +; AVX512BW: ## BB#0: +; AVX512BW-NEXT: vpmuludq %xmm0, %xmm1, %xmm2 +; AVX512BW-NEXT: vpsrlq $32, %xmm0, %xmm3 +; AVX512BW-NEXT: vpmuludq %xmm3, %xmm1, %xmm3 +; AVX512BW-NEXT: vpsllq $32, %xmm3, %xmm3 +; AVX512BW-NEXT: vpaddq %xmm3, %xmm2, %xmm2 +; AVX512BW-NEXT: vpsrlq $32, %xmm1, %xmm1 +; AVX512BW-NEXT: vpmuludq %xmm0, %xmm1, %xmm0 +; AVX512BW-NEXT: vpsllq $32, %xmm0, %xmm0 +; AVX512BW-NEXT: vpaddq %xmm0, %xmm2, %xmm0 +; AVX512BW-NEXT: retq +; +; AVX512DQ-LABEL: imulq128: +; AVX512DQ: ## BB#0: +; AVX512DQ-NEXT: vpmuludq %xmm0, %xmm1, %xmm2 +; AVX512DQ-NEXT: vpsrlq $32, %xmm0, %xmm3 +; AVX512DQ-NEXT: vpmuludq %xmm3, %xmm1, %xmm3 +; AVX512DQ-NEXT: vpsllq $32, %xmm3, %xmm3 +; AVX512DQ-NEXT: vpaddq %xmm3, %xmm2, %xmm2 +; AVX512DQ-NEXT: vpsrlq $32, %xmm1, %xmm1 +; AVX512DQ-NEXT: vpmuludq %xmm0, %xmm1, %xmm0 +; AVX512DQ-NEXT: vpsllq $32, %xmm0, %xmm0 +; AVX512DQ-NEXT: vpaddq %xmm0, %xmm2, %xmm0 +; AVX512DQ-NEXT: retq +; +; SKX-LABEL: imulq128: +; SKX: ## BB#0: +; SKX-NEXT: vpmullq %xmm0, %xmm1, %xmm0 +; SKX-NEXT: retq + %z = mul <2 x i64>%x, %y + ret <2 x i64>%z +} + define <8 x double> @mulpd512(<8 x double> %y, <8 x double> %x) { ; CHECK-LABEL: mulpd512: ; CHECK: ## BB#0: ## %entry |