summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael Zuckerman <Michael.zuckerman@intel.com>2017-12-14 11:55:50 +0000
committerMichael Zuckerman <Michael.zuckerman@intel.com>2017-12-14 11:55:50 +0000
commit19fd217eaa74eefe319cee99d7b2ff71e14a8215 (patch)
tree78fc5994f43577f7a60060ceef49a11cf28f8216
parent9f19fe51d2f25bd671a2d43bac23cc74a54636df (diff)
downloadbcm5719-llvm-19fd217eaa74eefe319cee99d7b2ff71e14a8215.tar.gz
bcm5719-llvm-19fd217eaa74eefe319cee99d7b2ff71e14a8215.zip
[AVX512] Adding support for load truncate store of I1
store operation on a truncated memory (load) of vXi1 is poorly supported by LLVM and most of the time end with an assertion. This patch fixes this issue. Differential Revision: https://reviews.llvm.org/D39547 Change-Id: Ida5523dd09c1ad384acc0a27e9e59273d28cbdc9 llvm-svn: 320691
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp3
-rw-r--r--llvm/test/CodeGen/X86/avx512-load-trunc-store-i1.ll151
2 files changed, 154 insertions, 0 deletions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 648e0d9deee..58bcc031ce3 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -1222,6 +1222,9 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
}
}
+ if (Subtarget.hasAVX512())
+ setOperationAction(ISD::SELECT, MVT::v1i1, Custom);
+
if (Subtarget.hasDQI()) {
for (auto VT : { MVT::v2i64, MVT::v4i64, MVT::v8i64 }) {
setOperationAction(ISD::SINT_TO_FP, VT, Legal);
diff --git a/llvm/test/CodeGen/X86/avx512-load-trunc-store-i1.ll b/llvm/test/CodeGen/X86/avx512-load-trunc-store-i1.ll
new file mode 100644
index 00000000000..bfcac893512
--- /dev/null
+++ b/llvm/test/CodeGen/X86/avx512-load-trunc-store-i1.ll
@@ -0,0 +1,151 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl,+avx512dq -O2 | FileCheck %s --check-prefix=AVX512-ALL
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -O2| FileCheck %s --check-prefix=AVX512-ONLY
+
+define void @load_v1i2_trunc_v1i1_store(<1 x i2>* %a0,<1 x i1>* %a1) {
+; AVX512-ALL-LABEL: load_v1i2_trunc_v1i1_store:
+; AVX512-ALL: # %bb.0:
+; AVX512-ALL-NEXT: movb (%rdi), %al
+; AVX512-ALL-NEXT: testb %al, %al
+; AVX512-ALL-NEXT: setne %al
+; AVX512-ALL-NEXT: kmovd %eax, %k0
+; AVX512-ALL-NEXT: kmovb %k0, (%rsi)
+; AVX512-ALL-NEXT: retq
+;
+; AVX512-ONLY-LABEL: load_v1i2_trunc_v1i1_store:
+; AVX512-ONLY: # %bb.0:
+; AVX512-ONLY-NEXT: movb (%rdi), %al
+; AVX512-ONLY-NEXT: testb %al, %al
+; AVX512-ONLY-NEXT: setne %al
+; AVX512-ONLY-NEXT: movb %al, (%rsi)
+; AVX512-ONLY-NEXT: retq
+ %d0 = load <1 x i2>, <1 x i2>* %a0
+ %d1 = trunc <1 x i2> %d0 to <1 x i1>
+ store <1 x i1> %d1, <1 x i1>* %a1
+ ret void
+}
+define void @load_v1i3_trunc_v1i1_store(<1 x i3>* %a0,<1 x i1>* %a1) {
+; AVX512-ALL-LABEL: load_v1i3_trunc_v1i1_store:
+; AVX512-ALL: # %bb.0:
+; AVX512-ALL-NEXT: movb (%rdi), %al
+; AVX512-ALL-NEXT: testb %al, %al
+; AVX512-ALL-NEXT: setne %al
+; AVX512-ALL-NEXT: kmovd %eax, %k0
+; AVX512-ALL-NEXT: kmovb %k0, (%rsi)
+; AVX512-ALL-NEXT: retq
+;
+; AVX512-ONLY-LABEL: load_v1i3_trunc_v1i1_store:
+; AVX512-ONLY: # %bb.0:
+; AVX512-ONLY-NEXT: movb (%rdi), %al
+; AVX512-ONLY-NEXT: testb %al, %al
+; AVX512-ONLY-NEXT: setne %al
+; AVX512-ONLY-NEXT: movb %al, (%rsi)
+; AVX512-ONLY-NEXT: retq
+ %d0 = load <1 x i3>, <1 x i3>* %a0
+ %d1 = trunc <1 x i3> %d0 to <1 x i1>
+ store <1 x i1> %d1, <1 x i1>* %a1
+ ret void
+}
+define void @load_v1i4_trunc_v1i1_store(<1 x i4>* %a0,<1 x i1>* %a1) {
+; AVX512-ALL-LABEL: load_v1i4_trunc_v1i1_store:
+; AVX512-ALL: # %bb.0:
+; AVX512-ALL-NEXT: movb (%rdi), %al
+; AVX512-ALL-NEXT: testb %al, %al
+; AVX512-ALL-NEXT: setne %al
+; AVX512-ALL-NEXT: kmovd %eax, %k0
+; AVX512-ALL-NEXT: kmovb %k0, (%rsi)
+; AVX512-ALL-NEXT: retq
+;
+; AVX512-ONLY-LABEL: load_v1i4_trunc_v1i1_store:
+; AVX512-ONLY: # %bb.0:
+; AVX512-ONLY-NEXT: movb (%rdi), %al
+; AVX512-ONLY-NEXT: testb %al, %al
+; AVX512-ONLY-NEXT: setne %al
+; AVX512-ONLY-NEXT: movb %al, (%rsi)
+; AVX512-ONLY-NEXT: retq
+ %d0 = load <1 x i4>, <1 x i4>* %a0
+ %d1 = trunc <1 x i4> %d0 to <1 x i1>
+ store <1 x i1> %d1, <1 x i1>* %a1
+ ret void
+}
+define void @load_v1i8_trunc_v1i1_store(<1 x i8>* %a0,<1 x i1>* %a1) {
+; AVX512-ALL-LABEL: load_v1i8_trunc_v1i1_store:
+; AVX512-ALL: # %bb.0:
+; AVX512-ALL-NEXT: cmpb $0, (%rdi)
+; AVX512-ALL-NEXT: setne %al
+; AVX512-ALL-NEXT: kmovd %eax, %k0
+; AVX512-ALL-NEXT: kmovb %k0, (%rsi)
+; AVX512-ALL-NEXT: retq
+;
+; AVX512-ONLY-LABEL: load_v1i8_trunc_v1i1_store:
+; AVX512-ONLY: # %bb.0:
+; AVX512-ONLY-NEXT: cmpb $0, (%rdi)
+; AVX512-ONLY-NEXT: setne %al
+; AVX512-ONLY-NEXT: movb %al, (%rsi)
+; AVX512-ONLY-NEXT: retq
+ %d0 = load <1 x i8>, <1 x i8>* %a0
+ %d1 = trunc <1 x i8> %d0 to <1 x i1>
+ store <1 x i1> %d1, <1 x i1>* %a1
+ ret void
+}
+define void @load_v1i16_trunc_v1i1_store(<1 x i16>* %a0,<1 x i1>* %a1) {
+; AVX512-ALL-LABEL: load_v1i16_trunc_v1i1_store:
+; AVX512-ALL: # %bb.0:
+; AVX512-ALL-NEXT: cmpb $0, (%rdi)
+; AVX512-ALL-NEXT: setne %al
+; AVX512-ALL-NEXT: kmovd %eax, %k0
+; AVX512-ALL-NEXT: kmovb %k0, (%rsi)
+; AVX512-ALL-NEXT: retq
+;
+; AVX512-ONLY-LABEL: load_v1i16_trunc_v1i1_store:
+; AVX512-ONLY: # %bb.0:
+; AVX512-ONLY-NEXT: cmpb $0, (%rdi)
+; AVX512-ONLY-NEXT: setne %al
+; AVX512-ONLY-NEXT: movb %al, (%rsi)
+; AVX512-ONLY-NEXT: retq
+ %d0 = load <1 x i16>, <1 x i16>* %a0
+ %d1 = trunc <1 x i16> %d0 to <1 x i1>
+ store <1 x i1> %d1, <1 x i1>* %a1
+ ret void
+}
+define void @load_v1i32_trunc_v1i1_store(<1 x i32>* %a0,<1 x i1>* %a1) {
+; AVX512-ALL-LABEL: load_v1i32_trunc_v1i1_store:
+; AVX512-ALL: # %bb.0:
+; AVX512-ALL-NEXT: cmpb $0, (%rdi)
+; AVX512-ALL-NEXT: setne %al
+; AVX512-ALL-NEXT: kmovd %eax, %k0
+; AVX512-ALL-NEXT: kmovb %k0, (%rsi)
+; AVX512-ALL-NEXT: retq
+;
+; AVX512-ONLY-LABEL: load_v1i32_trunc_v1i1_store:
+; AVX512-ONLY: # %bb.0:
+; AVX512-ONLY-NEXT: cmpb $0, (%rdi)
+; AVX512-ONLY-NEXT: setne %al
+; AVX512-ONLY-NEXT: movb %al, (%rsi)
+; AVX512-ONLY-NEXT: retq
+ %d0 = load <1 x i32>, <1 x i32>* %a0
+ %d1 = trunc <1 x i32> %d0 to <1 x i1>
+ store <1 x i1> %d1, <1 x i1>* %a1
+ ret void
+}
+define void @load_v1i64_trunc_v1i1_store(<1 x i64>* %a0,<1 x i1>* %a1) {
+; AVX512-ALL-LABEL: load_v1i64_trunc_v1i1_store:
+; AVX512-ALL: # %bb.0:
+; AVX512-ALL-NEXT: cmpb $0, (%rdi)
+; AVX512-ALL-NEXT: setne %al
+; AVX512-ALL-NEXT: kmovd %eax, %k0
+; AVX512-ALL-NEXT: kmovb %k0, (%rsi)
+; AVX512-ALL-NEXT: retq
+;
+; AVX512-ONLY-LABEL: load_v1i64_trunc_v1i1_store:
+; AVX512-ONLY: # %bb.0:
+; AVX512-ONLY-NEXT: cmpb $0, (%rdi)
+; AVX512-ONLY-NEXT: setne %al
+; AVX512-ONLY-NEXT: movb %al, (%rsi)
+; AVX512-ONLY-NEXT: retq
+ %d0 = load <1 x i64>, <1 x i64>* %a0
+ %d1 = trunc <1 x i64> %d0 to <1 x i1>
+ store <1 x i1> %d1, <1 x i1>* %a1
+ ret void
+}
+
OpenPOWER on IntegriCloud