summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLei Huang <lei@ca.ibm.com>2019-09-16 20:04:15 +0000
committerLei Huang <lei@ca.ibm.com>2019-09-16 20:04:15 +0000
commitbfb197d7a3b322d55a11cbc076cd9f787934d34f (patch)
treec3b3aff38259eff26e7a13764c926b9c0d4d15ae
parent4e053ff1d188bae61f6f7d20c591a462b32a9992 (diff)
downloadbcm5719-llvm-bfb197d7a3b322d55a11cbc076cd9f787934d34f.tar.gz
bcm5719-llvm-bfb197d7a3b322d55a11cbc076cd9f787934d34f.zip
[PowerPC] Cust lower fpext v2f32 to v2f64 from extract_subvector v4f32
This is a follow up patch from https://reviews.llvm.org/D57857 to handle extract_subvector v4f32. For cases where we fpext of v2f32 to v2f64 from extract_subvector we currently generate on P9 the following: lxv 0, 0(3) xxsldwi 1, 0, 0, 1 xscvspdpn 2, 0 xxsldwi 3, 0, 0, 3 xxswapd 0, 0 xscvspdpn 1, 1 xscvspdpn 3, 3 xscvspdpn 0, 0 xxmrghd 0, 0, 3 xxmrghd 1, 2, 1 stxv 0, 0(4) stxv 1, 0(5) This patch custom lower it to the following sequence: lxv 0, 0(3) # load the v4f32 <w0, w1, w2, w3> xxmrghw 2, 0, 0 # Produce the following vector <w0, w0, w1, w1> xxmrglw 3, 0, 0 # Produce the following vector <w2, w2, w3, w3> xvcvspdp 2, 2 # FP-extend to <d0, d1> xvcvspdp 3, 3 # FP-extend to <d2, d3> stxv 2, 0(5) # Store <d0, d1> (%vecinit11) stxv 3, 0(4) # Store <d2, d3> (%vecinit4) Differential Revision: https://reviews.llvm.org/D61961 llvm-svn: 372029
-rw-r--r--llvm/test/CodeGen/PowerPC/reduce_scalarization02.ll87
1 files changed, 87 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/PowerPC/reduce_scalarization02.ll b/llvm/test/CodeGen/PowerPC/reduce_scalarization02.ll
new file mode 100644
index 00000000000..f7727d6f4ea
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/reduce_scalarization02.ll
@@ -0,0 +1,87 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-unknown \
+; RUN: -mcpu=pwr9 -ppc-asm-full-reg-names \
+; RUN: -ppc-vsr-nums-as-vr < %s | FileCheck %s
+; RUN: llc -verify-machineinstrs -mtriple=powerpc64-unknown-unknown \
+; RUN: -mcpu=pwr9 -ppc-asm-full-reg-names \
+; RUN: -ppc-vsr-nums-as-vr < %s | FileCheck %s --check-prefix=CHECK-BE
+
+; Test reduce scalarization in fpext v2f32 to v2f64 from the extract_subvector v4f32 node.
+
+define dso_local void @test(<4 x float>* nocapture readonly %a, <2 x double>* nocapture %b, <2 x double>* nocapture %c) {
+; CHECK-LABEL: test:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lxv vs0, 0(r3)
+; CHECK-NEXT: xxmrglw vs1, vs0, vs0
+; CHECK-NEXT: xxmrghw vs0, vs0, vs0
+; CHECK-NEXT: xvcvspdp vs1, vs1
+; CHECK-NEXT: xvcvspdp vs0, vs0
+; CHECK-NEXT: stxv vs1, 0(r4)
+; CHECK-NEXT: stxv vs0, 0(r5)
+; CHECK-NEXT: blr
+;
+; CHECK-BE-LABEL: test:
+; CHECK-BE: # %bb.0: # %entry
+; CHECK-BE-NEXT: lxv vs0, 0(r3)
+; CHECK-BE-NEXT: xxmrghw vs1, vs0, vs0
+; CHECK-BE-NEXT: xxmrglw vs0, vs0, vs0
+; CHECK-BE-NEXT: xvcvspdp vs1, vs1
+; CHECK-BE-NEXT: xvcvspdp vs0, vs0
+; CHECK-BE-NEXT: stxv vs1, 0(r4)
+; CHECK-BE-NEXT: stxv vs0, 0(r5)
+; CHECK-BE-NEXT: blr
+entry:
+ %0 = load <4 x float>, <4 x float>* %a, align 16
+ %shuffle = shufflevector <4 x float> %0, <4 x float> undef, <2 x i32> <i32 0, i32 1>
+ %shuffle1 = shufflevector <4 x float> %0, <4 x float> undef, <2 x i32> <i32 2, i32 3>
+ %vecinit4 = fpext <2 x float> %shuffle to <2 x double>
+ %vecinit11 = fpext <2 x float> %shuffle1 to <2 x double>
+ store <2 x double> %vecinit4, <2 x double>* %b, align 16
+ store <2 x double> %vecinit11, <2 x double>* %c, align 16
+ ret void
+}
+
+; Ensure we don't crash for wider types
+
+define dso_local void @test2(<16 x float>* nocapture readonly %a, <2 x double>* nocapture %b, <2 x double>* nocapture %c) {
+; CHECK-LABEL: test2:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lxv vs0, 0(r3)
+; CHECK-NEXT: xxsldwi vs1, vs0, vs0, 1
+; CHECK-NEXT: xscvspdpn f2, vs0
+; CHECK-NEXT: xxsldwi vs3, vs0, vs0, 3
+; CHECK-NEXT: xxswapd vs0, vs0
+; CHECK-NEXT: xscvspdpn f1, vs1
+; CHECK-NEXT: xscvspdpn f3, vs3
+; CHECK-NEXT: xscvspdpn f0, vs0
+; CHECK-NEXT: xxmrghd vs0, vs0, vs3
+; CHECK-NEXT: xxmrghd vs1, vs2, vs1
+; CHECK-NEXT: stxv vs0, 0(r4)
+; CHECK-NEXT: stxv vs1, 0(r5)
+; CHECK-NEXT: blr
+;
+; CHECK-BE-LABEL: test2:
+; CHECK-BE: # %bb.0: # %entry
+; CHECK-BE-NEXT: lxv vs0, 0(r3)
+; CHECK-BE-NEXT: xxswapd vs1, vs0
+; CHECK-BE-NEXT: xxsldwi vs2, vs0, vs0, 3
+; CHECK-BE-NEXT: xscvspdpn f3, vs0
+; CHECK-BE-NEXT: xxsldwi vs0, vs0, vs0, 1
+; CHECK-BE-NEXT: xscvspdpn f1, vs1
+; CHECK-BE-NEXT: xscvspdpn f2, vs2
+; CHECK-BE-NEXT: xscvspdpn f0, vs0
+; CHECK-BE-NEXT: xxmrghd vs0, vs3, vs0
+; CHECK-BE-NEXT: xxmrghd vs1, vs1, vs2
+; CHECK-BE-NEXT: stxv vs0, 0(r4)
+; CHECK-BE-NEXT: stxv vs1, 0(r5)
+; CHECK-BE-NEXT: blr
+entry:
+ %0 = load <16 x float>, <16 x float>* %a, align 16
+ %shuffle = shufflevector <16 x float> %0, <16 x float> undef, <2 x i32> <i32 0, i32 1>
+ %shuffle1 = shufflevector <16 x float> %0, <16 x float> undef, <2 x i32> <i32 2, i32 3>
+ %vecinit4 = fpext <2 x float> %shuffle to <2 x double>
+ %vecinit11 = fpext <2 x float> %shuffle1 to <2 x double>
+ store <2 x double> %vecinit4, <2 x double>* %b, align 16
+ store <2 x double> %vecinit11, <2 x double>* %c, align 16
+ ret void
+}
OpenPOWER on IntegriCloud