summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/SystemZ/vec-sub-01.ll
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen/SystemZ/vec-sub-01.ll')
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-sub-01.ll27
1 files changed, 27 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/SystemZ/vec-sub-01.ll b/llvm/test/CodeGen/SystemZ/vec-sub-01.ll
index 24d4ba5a2bd..aabf1c9be4a 100644
--- a/llvm/test/CodeGen/SystemZ/vec-sub-01.ll
+++ b/llvm/test/CodeGen/SystemZ/vec-sub-01.ll
@@ -38,6 +38,33 @@ define <2 x i64> @f4(<2 x i64> %dummy, <2 x i64> %val1, <2 x i64> %val2) {
ret <2 x i64> %ret
}
+; Test a v4f32 subtraction, as an example of an operation that needs to be
+; scalarized and reassembled. At present there's an unnecessary move that
+; could be avoided with smarter ordering. It also isn't important whether
+; the VSLDBs use the result of the VLRs or use %v24 and %v26 directly.
+define <4 x float> @f5(<4 x float> %val1, <4 x float> %val2) {
+; CHECK-LABEL: f5:
+; CHECK-DAG: vlr %v[[A1:[0-5]]], %v24
+; CHECK-DAG: vlr %v[[A2:[0-5]]], %v26
+; CHECK-DAG: vrepf %v[[B1:[0-5]]], %v[[A1]], 1
+; CHECK-DAG: vrepf %v[[B2:[0-5]]], %v[[A2]], 1
+; CHECK-DAG: vrepf %v[[C1:[0-5]]], %v[[A1]], 2
+; CHECK-DAG: vrepf %v[[C2:[0-5]]], %v[[A2]], 2
+; CHECK-DAG: vrepf %v[[D1:[0-5]]], %v[[A1]], 3
+; CHECK-DAG: vrepf %v[[D2:[0-5]]], %v[[A2]], 3
+; CHECK-DAG: ler %f[[A1copy:[0-5]]], %f[[A1]]
+; CHECK-DAG: sebr %f[[A1copy]], %f[[A2]]
+; CHECK-DAG: sebr %f[[B1]], %f[[B2]]
+; CHECK-DAG: sebr %f[[C1]], %f[[C2]]
+; CHECK-DAG: sebr %f[[D1]], %f[[D2]]
+; CHECK-DAG: vmrhf [[HIGH:%v[0-9]+]], %v[[A1copy]], %v[[B1]]
+; CHECK-DAG: vmrhf [[LOW:%v[0-9]+]], %v[[C1]], %v[[D1]]
+; CHECK: vmrhg %v24, [[HIGH]], [[LOW]]
+; CHECK: br %r14
+ %ret = fsub <4 x float> %val1, %val2
+ ret <4 x float> %ret
+}
+
; Test a v2f64 subtraction.
define <2 x double> @f6(<2 x double> %dummy, <2 x double> %val1,
<2 x double> %val2) {
OpenPOWER on IntegriCloud