summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/SystemZ/vec-perm-03.ll
diff options
context:
space:
mode:
authorUlrich Weigand <ulrich.weigand@de.ibm.com>2015-05-05 19:27:45 +0000
committerUlrich Weigand <ulrich.weigand@de.ibm.com>2015-05-05 19:27:45 +0000
commit80b3af7ab3f8e76507cc4491be1460f1b1d8adb2 (patch)
treeb9f2252bf5fb13308c25e7f05f918dfdae76aa77 /llvm/test/CodeGen/SystemZ/vec-perm-03.ll
parentcd808237b24c7d6d0bb7ddf577dba37c31a06a50 (diff)
downloadbcm5719-llvm-80b3af7ab3f8e76507cc4491be1460f1b1d8adb2.tar.gz
bcm5719-llvm-80b3af7ab3f8e76507cc4491be1460f1b1d8adb2.zip
[SystemZ] Add CodeGen support for v4f32
The architecture doesn't really have any native v4f32 operations except v4f32->v2f64 and v2f64->v4f32 conversions, with only half of the v4f32 elements being used. Even so, using vector registers for <4 x float> and scalarising individual operations is much better than generating completely scalar code, since there's much less register pressure. It's also more efficient to do v4f32 comparisons by extending to 2 v2f64s, comparing those, then packing the result. This particularly helps with llvmpipe. Based on a patch by Richard Sandiford. llvm-svn: 236523
Diffstat (limited to 'llvm/test/CodeGen/SystemZ/vec-perm-03.ll')
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-perm-03.ll38
1 files changed, 38 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/SystemZ/vec-perm-03.ll b/llvm/test/CodeGen/SystemZ/vec-perm-03.ll
index c30a87601a4..663815549c3 100644
--- a/llvm/test/CodeGen/SystemZ/vec-perm-03.ll
+++ b/llvm/test/CodeGen/SystemZ/vec-perm-03.ll
@@ -158,6 +158,44 @@ define <2 x i64> @f12(i64 *%base) {
ret <2 x i64> %ret
}
+; Test a v4f32 replicating load with no offset.
+define <4 x float> @f13(float *%ptr) {
+; CHECK-LABEL: f13:
+; CHECK: vlrepf %v24, 0(%r2)
+; CHECK: br %r14
+ %scalar = load float, float *%ptr
+ %val = insertelement <4 x float> undef, float %scalar, i32 0
+ %ret = shufflevector <4 x float> %val, <4 x float> undef,
+ <4 x i32> zeroinitializer
+ ret <4 x float> %ret
+}
+
+; Test a v4f32 replicating load with the maximum in-range offset.
+define <4 x float> @f14(float *%base) {
+; CHECK-LABEL: f14:
+; CHECK: vlrepf %v24, 4092(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr float, float *%base, i64 1023
+ %scalar = load float, float *%ptr
+ %val = insertelement <4 x float> undef, float %scalar, i32 0
+ %ret = shufflevector <4 x float> %val, <4 x float> undef,
+ <4 x i32> zeroinitializer
+ ret <4 x float> %ret
+}
+
+; Test a v4f32 replicating load with the first out-of-range offset.
+define <4 x float> @f15(float *%base) {
+; CHECK-LABEL: f15:
+; CHECK: aghi %r2, 4096
+; CHECK: vlrepf %v24, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr float, float *%base, i64 1024
+ %scalar = load float, float *%ptr
+ %val = insertelement <4 x float> undef, float %scalar, i32 0
+ %ret = shufflevector <4 x float> %val, <4 x float> undef,
+ <4 x i32> zeroinitializer
+ ret <4 x float> %ret
+}
; Test a v2f64 replicating load with no offset.
define <2 x double> @f16(double *%ptr) {
OpenPOWER on IntegriCloud