summaryrefslogtreecommitdiffstats
path: root/llvm/test
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/CodeGen/SystemZ/frame-19.ll314
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-abi-align.ll49
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-abs-01.ll146
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-abs-02.ll142
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-abs-03.ll138
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-abs-04.ll138
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-add-01.ll39
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-and-01.ll39
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-and-02.ll91
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-and-03.ll113
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-args-01.ll48
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-args-02.ll31
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-args-03.ll16
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-cmp-01.ll228
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-cmp-02.ll228
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-cmp-03.ll228
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-cmp-04.ll228
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-combine-01.ll107
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-const-01.ll55
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-const-02.ll47
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-const-03.ll43
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-const-04.ll43
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-const-07.ll229
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-const-08.ll189
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-const-09.ll169
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-const-10.ll169
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-const-13.ll193
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-const-14.ll113
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-const-15.ll85
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-const-16.ll85
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-ctlz-01.ll81
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-ctpop-01.ll53
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-cttz-01.ll81
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-div-01.ll62
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-max-01.ll83
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-max-02.ll83
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-max-03.ll83
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-max-04.ll83
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-min-01.ll83
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-min-02.ll83
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-min-03.ll83
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-min-04.ll83
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-move-01.ll35
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-move-02.ll93
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-move-03.ll93
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-move-04.ll121
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-move-05.ll161
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-move-06.ll13
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-move-07.ll39
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-move-08.ll284
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-move-09.ll237
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-move-10.ll328
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-move-11.ll93
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-move-12.ll103
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-move-13.ll47
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-move-14.ll76
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-mul-01.ll39
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-mul-02.ll36
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-neg-01.ll39
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-or-01.ll39
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-or-02.ll107
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-perm-01.ll124
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-perm-02.ll144
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-perm-03.ll173
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-perm-04.ll160
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-perm-05.ll160
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-perm-06.ll140
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-perm-07.ll125
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-perm-08.ll130
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-perm-09.ll38
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-perm-10.ll36
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-perm-11.ll35
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-shift-01.ll39
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-shift-02.ll39
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-shift-03.ll39
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-shift-04.ll134
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-shift-05.ll134
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-shift-06.ll134
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-shift-07.ll182
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-sub-01.ll39
-rw-r--r--llvm/test/CodeGen/SystemZ/vec-xor-01.ll39
81 files changed, 8711 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/SystemZ/frame-19.ll b/llvm/test/CodeGen/SystemZ/frame-19.ll
new file mode 100644
index 00000000000..f6e327c3ae3
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/frame-19.ll
@@ -0,0 +1,314 @@
+; Test spilling of vector registers.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; We need to allocate a 16-byte spill slot and save the 8 call-saved FPRs.
+; The frame size should be exactly 160 + 16 + 8 * 8 = 240.
+define void @f1(<16 x i8> *%ptr) {
+; CHECK-LABEL: f1:
+; CHECK: aghi %r15, -240
+; CHECK-DAG: std %f8,
+; CHECK-DAG: std %f9,
+; CHECK-DAG: std %f10,
+; CHECK-DAG: std %f11,
+; CHECK-DAG: std %f12,
+; CHECK-DAG: std %f13,
+; CHECK-DAG: std %f14,
+; CHECK-DAG: std %f15,
+; CHECK: vst {{%v[0-9]+}}, 160(%r15)
+; CHECK: vl {{%v[0-9]+}}, 160(%r15)
+; CHECK-DAG: ld %f8,
+; CHECK-DAG: ld %f9,
+; CHECK-DAG: ld %f10,
+; CHECK-DAG: ld %f11,
+; CHECK-DAG: ld %f12,
+; CHECK-DAG: ld %f13,
+; CHECK-DAG: ld %f14,
+; CHECK-DAG: ld %f15,
+; CHECK: aghi %r15, 240
+; CHECK: br %r14
+ %v0 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v1 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v2 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v3 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v4 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v5 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v6 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v7 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v8 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v9 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v10 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v11 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v12 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v13 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v14 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v15 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v16 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v17 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v18 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v19 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v20 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v21 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v22 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v23 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v24 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v25 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v26 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v27 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v28 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v29 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v30 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v31 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %vx = load volatile <16 x i8>, <16 x i8> *%ptr
+ store volatile <16 x i8> %vx, <16 x i8> *%ptr
+ store volatile <16 x i8> %v31, <16 x i8> *%ptr
+ store volatile <16 x i8> %v30, <16 x i8> *%ptr
+ store volatile <16 x i8> %v29, <16 x i8> *%ptr
+ store volatile <16 x i8> %v28, <16 x i8> *%ptr
+ store volatile <16 x i8> %v27, <16 x i8> *%ptr
+ store volatile <16 x i8> %v26, <16 x i8> *%ptr
+ store volatile <16 x i8> %v25, <16 x i8> *%ptr
+ store volatile <16 x i8> %v24, <16 x i8> *%ptr
+ store volatile <16 x i8> %v23, <16 x i8> *%ptr
+ store volatile <16 x i8> %v22, <16 x i8> *%ptr
+ store volatile <16 x i8> %v21, <16 x i8> *%ptr
+ store volatile <16 x i8> %v20, <16 x i8> *%ptr
+ store volatile <16 x i8> %v19, <16 x i8> *%ptr
+ store volatile <16 x i8> %v18, <16 x i8> *%ptr
+ store volatile <16 x i8> %v17, <16 x i8> *%ptr
+ store volatile <16 x i8> %v16, <16 x i8> *%ptr
+ store volatile <16 x i8> %v15, <16 x i8> *%ptr
+ store volatile <16 x i8> %v14, <16 x i8> *%ptr
+ store volatile <16 x i8> %v13, <16 x i8> *%ptr
+ store volatile <16 x i8> %v12, <16 x i8> *%ptr
+ store volatile <16 x i8> %v11, <16 x i8> *%ptr
+ store volatile <16 x i8> %v10, <16 x i8> *%ptr
+ store volatile <16 x i8> %v9, <16 x i8> *%ptr
+ store volatile <16 x i8> %v8, <16 x i8> *%ptr
+ store volatile <16 x i8> %v7, <16 x i8> *%ptr
+ store volatile <16 x i8> %v6, <16 x i8> *%ptr
+ store volatile <16 x i8> %v5, <16 x i8> *%ptr
+ store volatile <16 x i8> %v4, <16 x i8> *%ptr
+ store volatile <16 x i8> %v3, <16 x i8> *%ptr
+ store volatile <16 x i8> %v2, <16 x i8> *%ptr
+ store volatile <16 x i8> %v1, <16 x i8> *%ptr
+ store volatile <16 x i8> %v0, <16 x i8> *%ptr
+ ret void
+}
+
+; Like f1, but no 16-byte slot should be needed.
+define void @f2(<16 x i8> *%ptr) {
+; CHECK-LABEL: f2:
+; CHECK: aghi %r15, -224
+; CHECK-DAG: std %f8,
+; CHECK-DAG: std %f9,
+; CHECK-DAG: std %f10,
+; CHECK-DAG: std %f11,
+; CHECK-DAG: std %f12,
+; CHECK-DAG: std %f13,
+; CHECK-DAG: std %f14,
+; CHECK-DAG: std %f15,
+; CHECK-NOT: vst {{.*}}(%r15)
+; CHECK-NOT: vl {{.*}}(%r15)
+; CHECK-DAG: ld %f8,
+; CHECK-DAG: ld %f9,
+; CHECK-DAG: ld %f10,
+; CHECK-DAG: ld %f11,
+; CHECK-DAG: ld %f12,
+; CHECK-DAG: ld %f13,
+; CHECK-DAG: ld %f14,
+; CHECK-DAG: ld %f15,
+; CHECK: aghi %r15, 224
+; CHECK: br %r14
+ %v0 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v1 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v2 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v3 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v4 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v5 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v6 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v7 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v8 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v9 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v10 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v11 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v12 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v13 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v14 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v15 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v16 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v17 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v18 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v19 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v20 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v21 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v22 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v23 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v24 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v25 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v26 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v27 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v28 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v29 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v30 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v31 = load volatile <16 x i8>, <16 x i8> *%ptr
+ store volatile <16 x i8> %v31, <16 x i8> *%ptr
+ store volatile <16 x i8> %v30, <16 x i8> *%ptr
+ store volatile <16 x i8> %v29, <16 x i8> *%ptr
+ store volatile <16 x i8> %v28, <16 x i8> *%ptr
+ store volatile <16 x i8> %v27, <16 x i8> *%ptr
+ store volatile <16 x i8> %v26, <16 x i8> *%ptr
+ store volatile <16 x i8> %v25, <16 x i8> *%ptr
+ store volatile <16 x i8> %v24, <16 x i8> *%ptr
+ store volatile <16 x i8> %v23, <16 x i8> *%ptr
+ store volatile <16 x i8> %v22, <16 x i8> *%ptr
+ store volatile <16 x i8> %v21, <16 x i8> *%ptr
+ store volatile <16 x i8> %v20, <16 x i8> *%ptr
+ store volatile <16 x i8> %v19, <16 x i8> *%ptr
+ store volatile <16 x i8> %v18, <16 x i8> *%ptr
+ store volatile <16 x i8> %v17, <16 x i8> *%ptr
+ store volatile <16 x i8> %v16, <16 x i8> *%ptr
+ store volatile <16 x i8> %v15, <16 x i8> *%ptr
+ store volatile <16 x i8> %v14, <16 x i8> *%ptr
+ store volatile <16 x i8> %v13, <16 x i8> *%ptr
+ store volatile <16 x i8> %v12, <16 x i8> *%ptr
+ store volatile <16 x i8> %v11, <16 x i8> *%ptr
+ store volatile <16 x i8> %v10, <16 x i8> *%ptr
+ store volatile <16 x i8> %v9, <16 x i8> *%ptr
+ store volatile <16 x i8> %v8, <16 x i8> *%ptr
+ store volatile <16 x i8> %v7, <16 x i8> *%ptr
+ store volatile <16 x i8> %v6, <16 x i8> *%ptr
+ store volatile <16 x i8> %v5, <16 x i8> *%ptr
+ store volatile <16 x i8> %v4, <16 x i8> *%ptr
+ store volatile <16 x i8> %v3, <16 x i8> *%ptr
+ store volatile <16 x i8> %v2, <16 x i8> *%ptr
+ store volatile <16 x i8> %v1, <16 x i8> *%ptr
+ store volatile <16 x i8> %v0, <16 x i8> *%ptr
+ ret void
+}
+
+; Like f2, but only %f8 should be saved.
+define void @f3(<16 x i8> *%ptr) {
+; CHECK-LABEL: f3:
+; CHECK: aghi %r15, -168
+; CHECK-DAG: std %f8,
+; CHECK-NOT: vst {{.*}}(%r15)
+; CHECK-NOT: vl {{.*}}(%r15)
+; CHECK-NOT: %v9
+; CHECK-NOT: %v10
+; CHECK-NOT: %v11
+; CHECK-NOT: %v12
+; CHECK-NOT: %v13
+; CHECK-NOT: %v14
+; CHECK-NOT: %v15
+; CHECK-DAG: ld %f8,
+; CHECK: aghi %r15, 168
+; CHECK: br %r14
+ %v0 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v1 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v2 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v3 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v4 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v5 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v6 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v7 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v8 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v16 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v17 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v18 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v19 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v20 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v21 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v22 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v23 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v24 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v25 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v26 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v27 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v28 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v29 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v30 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v31 = load volatile <16 x i8>, <16 x i8> *%ptr
+ store volatile <16 x i8> %v31, <16 x i8> *%ptr
+ store volatile <16 x i8> %v30, <16 x i8> *%ptr
+ store volatile <16 x i8> %v29, <16 x i8> *%ptr
+ store volatile <16 x i8> %v28, <16 x i8> *%ptr
+ store volatile <16 x i8> %v27, <16 x i8> *%ptr
+ store volatile <16 x i8> %v26, <16 x i8> *%ptr
+ store volatile <16 x i8> %v25, <16 x i8> *%ptr
+ store volatile <16 x i8> %v24, <16 x i8> *%ptr
+ store volatile <16 x i8> %v23, <16 x i8> *%ptr
+ store volatile <16 x i8> %v22, <16 x i8> *%ptr
+ store volatile <16 x i8> %v21, <16 x i8> *%ptr
+ store volatile <16 x i8> %v20, <16 x i8> *%ptr
+ store volatile <16 x i8> %v19, <16 x i8> *%ptr
+ store volatile <16 x i8> %v18, <16 x i8> *%ptr
+ store volatile <16 x i8> %v17, <16 x i8> *%ptr
+ store volatile <16 x i8> %v16, <16 x i8> *%ptr
+ store volatile <16 x i8> %v8, <16 x i8> *%ptr
+ store volatile <16 x i8> %v7, <16 x i8> *%ptr
+ store volatile <16 x i8> %v6, <16 x i8> *%ptr
+ store volatile <16 x i8> %v5, <16 x i8> *%ptr
+ store volatile <16 x i8> %v4, <16 x i8> *%ptr
+ store volatile <16 x i8> %v3, <16 x i8> *%ptr
+ store volatile <16 x i8> %v2, <16 x i8> *%ptr
+ store volatile <16 x i8> %v1, <16 x i8> *%ptr
+ store volatile <16 x i8> %v0, <16 x i8> *%ptr
+ ret void
+}
+
+; Like f2, but no registers should be saved.
+define void @f4(<16 x i8> *%ptr) {
+; CHECK-LABEL: f4:
+; CHECK-NOT: %r15
+; CHECK: br %r14
+ %v0 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v1 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v2 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v3 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v4 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v5 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v6 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v7 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v16 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v17 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v18 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v19 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v20 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v21 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v22 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v23 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v24 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v25 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v26 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v27 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v28 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v29 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v30 = load volatile <16 x i8>, <16 x i8> *%ptr
+ %v31 = load volatile <16 x i8>, <16 x i8> *%ptr
+ store volatile <16 x i8> %v31, <16 x i8> *%ptr
+ store volatile <16 x i8> %v30, <16 x i8> *%ptr
+ store volatile <16 x i8> %v29, <16 x i8> *%ptr
+ store volatile <16 x i8> %v28, <16 x i8> *%ptr
+ store volatile <16 x i8> %v27, <16 x i8> *%ptr
+ store volatile <16 x i8> %v26, <16 x i8> *%ptr
+ store volatile <16 x i8> %v25, <16 x i8> *%ptr
+ store volatile <16 x i8> %v24, <16 x i8> *%ptr
+ store volatile <16 x i8> %v23, <16 x i8> *%ptr
+ store volatile <16 x i8> %v22, <16 x i8> *%ptr
+ store volatile <16 x i8> %v21, <16 x i8> *%ptr
+ store volatile <16 x i8> %v20, <16 x i8> *%ptr
+ store volatile <16 x i8> %v19, <16 x i8> *%ptr
+ store volatile <16 x i8> %v18, <16 x i8> *%ptr
+ store volatile <16 x i8> %v17, <16 x i8> *%ptr
+ store volatile <16 x i8> %v16, <16 x i8> *%ptr
+ store volatile <16 x i8> %v7, <16 x i8> *%ptr
+ store volatile <16 x i8> %v6, <16 x i8> *%ptr
+ store volatile <16 x i8> %v5, <16 x i8> *%ptr
+ store volatile <16 x i8> %v4, <16 x i8> *%ptr
+ store volatile <16 x i8> %v3, <16 x i8> *%ptr
+ store volatile <16 x i8> %v2, <16 x i8> *%ptr
+ store volatile <16 x i8> %v1, <16 x i8> *%ptr
+ store volatile <16 x i8> %v0, <16 x i8> *%ptr
+ ret void
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-abi-align.ll b/llvm/test/CodeGen/SystemZ/vec-abi-align.ll
new file mode 100644
index 00000000000..01b97a8583e
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-abi-align.ll
@@ -0,0 +1,49 @@
+; Verify that we use the vector ABI datalayout if and only if
+; the vector facility is present.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu | \
+; RUN: FileCheck -check-prefix=CHECK-NOVECTOR %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=generic | \
+; RUN: FileCheck -check-prefix=CHECK-NOVECTOR %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 | \
+; RUN: FileCheck -check-prefix=CHECK-NOVECTOR %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z196 | \
+; RUN: FileCheck -check-prefix=CHECK-NOVECTOR %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=zEC12 | \
+; RUN: FileCheck -check-prefix=CHECK-NOVECTOR %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | \
+; RUN: FileCheck -check-prefix=CHECK-VECTOR %s
+
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mattr=vector | \
+; RUN: FileCheck -check-prefix=CHECK-VECTOR %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mattr=+vector | \
+; RUN: FileCheck -check-prefix=CHECK-VECTOR %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mattr=-vector,vector | \
+; RUN: FileCheck -check-prefix=CHECK-VECTOR %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mattr=-vector,+vector | \
+; RUN: FileCheck -check-prefix=CHECK-VECTOR %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mattr=-vector | \
+; RUN: FileCheck -check-prefix=CHECK-NOVECTOR %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mattr=vector,-vector | \
+; RUN: FileCheck -check-prefix=CHECK-NOVECTOR %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mattr=+vector,-vector | \
+; RUN: FileCheck -check-prefix=CHECK-NOVECTOR %s
+
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 -mattr=-vector | \
+; RUN: FileCheck -check-prefix=CHECK-NOVECTOR %s
+
+%struct.S = type { i8, <2 x i64> }
+
+define void @test(%struct.S* %s) nounwind {
+; CHECK-VECTOR-LABEL: @test
+; CHECK-VECTOR: vl %v0, 8(%r2)
+; CHECK-NOVECTOR-LABEL: @test
+; CHECK-NOVECTOR-DAG: agsi 16(%r2), 1
+; CHECK-NOVECTOR-DAG: agsi 24(%r2), 1
+ %ptr = getelementptr %struct.S, %struct.S* %s, i64 0, i32 1
+ %vec = load <2 x i64>, <2 x i64>* %ptr
+ %add = add <2 x i64> %vec, <i64 1, i64 1>
+ store <2 x i64> %add, <2 x i64>* %ptr
+ ret void
+}
+
diff --git a/llvm/test/CodeGen/SystemZ/vec-abs-01.ll b/llvm/test/CodeGen/SystemZ/vec-abs-01.ll
new file mode 100644
index 00000000000..aec3b9314f1
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-abs-01.ll
@@ -0,0 +1,146 @@
+; Test v16i8 absolute.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test with slt.
+define <16 x i8> @f1(<16 x i8> %val) {
+; CHECK-LABEL: f1:
+; CHECK: vlpb %v24, %v24
+; CHECK: br %r14
+ %cmp = icmp slt <16 x i8> %val, zeroinitializer
+ %neg = sub <16 x i8> zeroinitializer, %val
+ %ret = select <16 x i1> %cmp, <16 x i8> %neg, <16 x i8> %val
+ ret <16 x i8> %ret
+}
+
+; Test with sle.
+define <16 x i8> @f2(<16 x i8> %val) {
+; CHECK-LABEL: f2:
+; CHECK: vlpb %v24, %v24
+; CHECK: br %r14
+ %cmp = icmp sle <16 x i8> %val, zeroinitializer
+ %neg = sub <16 x i8> zeroinitializer, %val
+ %ret = select <16 x i1> %cmp, <16 x i8> %neg, <16 x i8> %val
+ ret <16 x i8> %ret
+}
+
+; Test with sgt.
+define <16 x i8> @f3(<16 x i8> %val) {
+; CHECK-LABEL: f3:
+; CHECK: vlpb %v24, %v24
+; CHECK: br %r14
+ %cmp = icmp sgt <16 x i8> %val, zeroinitializer
+ %neg = sub <16 x i8> zeroinitializer, %val
+ %ret = select <16 x i1> %cmp, <16 x i8> %val, <16 x i8> %neg
+ ret <16 x i8> %ret
+}
+
+; Test with sge.
+define <16 x i8> @f4(<16 x i8> %val) {
+; CHECK-LABEL: f4:
+; CHECK: vlpb %v24, %v24
+; CHECK: br %r14
+ %cmp = icmp sge <16 x i8> %val, zeroinitializer
+ %neg = sub <16 x i8> zeroinitializer, %val
+ %ret = select <16 x i1> %cmp, <16 x i8> %val, <16 x i8> %neg
+ ret <16 x i8> %ret
+}
+
+; Test that negative absolute uses VLPB too. There is no vector equivalent
+; of LOAD NEGATIVE.
+define <16 x i8> @f5(<16 x i8> %val) {
+; CHECK-LABEL: f5:
+; CHECK: vlpb [[REG:%v[0-9]+]], %v24
+; CHECK: vlcb %v24, [[REG]]
+; CHECK: br %r14
+ %cmp = icmp slt <16 x i8> %val, zeroinitializer
+ %neg = sub <16 x i8> zeroinitializer, %val
+ %abs = select <16 x i1> %cmp, <16 x i8> %neg, <16 x i8> %val
+ %ret = sub <16 x i8> zeroinitializer, %abs
+ ret <16 x i8> %ret
+}
+
+; Try another form of negative absolute (slt version).
+define <16 x i8> @f6(<16 x i8> %val) {
+; CHECK-LABEL: f6:
+; CHECK: vlpb [[REG:%v[0-9]+]], %v24
+; CHECK: vlcb %v24, [[REG]]
+; CHECK: br %r14
+ %cmp = icmp slt <16 x i8> %val, zeroinitializer
+ %neg = sub <16 x i8> zeroinitializer, %val
+ %ret = select <16 x i1> %cmp, <16 x i8> %val, <16 x i8> %neg
+ ret <16 x i8> %ret
+}
+
+; Test with sle.
+define <16 x i8> @f7(<16 x i8> %val) {
+; CHECK-LABEL: f7:
+; CHECK: vlpb [[REG:%v[0-9]+]], %v24
+; CHECK: vlcb %v24, [[REG]]
+; CHECK: br %r14
+ %cmp = icmp sle <16 x i8> %val, zeroinitializer
+ %neg = sub <16 x i8> zeroinitializer, %val
+ %ret = select <16 x i1> %cmp, <16 x i8> %val, <16 x i8> %neg
+ ret <16 x i8> %ret
+}
+
+; Test with sgt.
+define <16 x i8> @f8(<16 x i8> %val) {
+; CHECK-LABEL: f8:
+; CHECK: vlpb [[REG:%v[0-9]+]], %v24
+; CHECK: vlcb %v24, [[REG]]
+; CHECK: br %r14
+ %cmp = icmp sgt <16 x i8> %val, zeroinitializer
+ %neg = sub <16 x i8> zeroinitializer, %val
+ %ret = select <16 x i1> %cmp, <16 x i8> %neg, <16 x i8> %val
+ ret <16 x i8> %ret
+}
+
+; Test with sge.
+define <16 x i8> @f9(<16 x i8> %val) {
+; CHECK-LABEL: f9:
+; CHECK: vlpb [[REG:%v[0-9]+]], %v24
+; CHECK: vlcb %v24, [[REG]]
+; CHECK: br %r14
+ %cmp = icmp sge <16 x i8> %val, zeroinitializer
+ %neg = sub <16 x i8> zeroinitializer, %val
+ %ret = select <16 x i1> %cmp, <16 x i8> %neg, <16 x i8> %val
+ ret <16 x i8> %ret
+}
+
+; Test with an SRA-based boolean vector.
+define <16 x i8> @f10(<16 x i8> %val) {
+; CHECK-LABEL: f10:
+; CHECK: vlpb %v24, %v24
+; CHECK: br %r14
+ %shr = ashr <16 x i8> %val,
+ <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,
+ i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
+ %neg = sub <16 x i8> zeroinitializer, %val
+ %and1 = and <16 x i8> %shr, %neg
+ %not = xor <16 x i8> %shr,
+ <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ %and2 = and <16 x i8> %not, %val
+ %ret = or <16 x i8> %and1, %and2
+ ret <16 x i8> %ret
+}
+
+; ...and again in reverse
+define <16 x i8> @f11(<16 x i8> %val) {
+; CHECK-LABEL: f11:
+; CHECK: vlpb [[REG:%v[0-9]+]], %v24
+; CHECK: vlcb %v24, [[REG]]
+; CHECK: br %r14
+ %shr = ashr <16 x i8> %val,
+ <i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7,
+ i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7>
+ %and1 = and <16 x i8> %shr, %val
+ %not = xor <16 x i8> %shr,
+ <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ %neg = sub <16 x i8> zeroinitializer, %val
+ %and2 = and <16 x i8> %not, %neg
+ %ret = or <16 x i8> %and1, %and2
+ ret <16 x i8> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-abs-02.ll b/llvm/test/CodeGen/SystemZ/vec-abs-02.ll
new file mode 100644
index 00000000000..c5af619f0ba
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-abs-02.ll
@@ -0,0 +1,142 @@
+; Test v8i16 absolute.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test with slt.
+define <8 x i16> @f1(<8 x i16> %val) {
+; CHECK-LABEL: f1:
+; CHECK: vlph %v24, %v24
+; CHECK: br %r14
+ %cmp = icmp slt <8 x i16> %val, zeroinitializer
+ %neg = sub <8 x i16> zeroinitializer, %val
+ %ret = select <8 x i1> %cmp, <8 x i16> %neg, <8 x i16> %val
+ ret <8 x i16> %ret
+}
+
+; Test with sle.
+define <8 x i16> @f2(<8 x i16> %val) {
+; CHECK-LABEL: f2:
+; CHECK: vlph %v24, %v24
+; CHECK: br %r14
+ %cmp = icmp sle <8 x i16> %val, zeroinitializer
+ %neg = sub <8 x i16> zeroinitializer, %val
+ %ret = select <8 x i1> %cmp, <8 x i16> %neg, <8 x i16> %val
+ ret <8 x i16> %ret
+}
+
+; Test with sgt.
+define <8 x i16> @f3(<8 x i16> %val) {
+; CHECK-LABEL: f3:
+; CHECK: vlph %v24, %v24
+; CHECK: br %r14
+ %cmp = icmp sgt <8 x i16> %val, zeroinitializer
+ %neg = sub <8 x i16> zeroinitializer, %val
+ %ret = select <8 x i1> %cmp, <8 x i16> %val, <8 x i16> %neg
+ ret <8 x i16> %ret
+}
+
+; Test with sge.
+define <8 x i16> @f4(<8 x i16> %val) {
+; CHECK-LABEL: f4:
+; CHECK: vlph %v24, %v24
+; CHECK: br %r14
+ %cmp = icmp sge <8 x i16> %val, zeroinitializer
+ %neg = sub <8 x i16> zeroinitializer, %val
+ %ret = select <8 x i1> %cmp, <8 x i16> %val, <8 x i16> %neg
+ ret <8 x i16> %ret
+}
+
+; Test that negative absolute uses VLPH too. There is no vector equivalent
+; of LOAD NEGATIVE.
+define <8 x i16> @f5(<8 x i16> %val) {
+; CHECK-LABEL: f5:
+; CHECK: vlph [[REG:%v[0-9]+]], %v24
+; CHECK: vlch %v24, [[REG]]
+; CHECK: br %r14
+ %cmp = icmp slt <8 x i16> %val, zeroinitializer
+ %neg = sub <8 x i16> zeroinitializer, %val
+ %abs = select <8 x i1> %cmp, <8 x i16> %neg, <8 x i16> %val
+ %ret = sub <8 x i16> zeroinitializer, %abs
+ ret <8 x i16> %ret
+}
+
+; Try another form of negative absolute (slt version).
+define <8 x i16> @f6(<8 x i16> %val) {
+; CHECK-LABEL: f6:
+; CHECK: vlph [[REG:%v[0-9]+]], %v24
+; CHECK: vlch %v24, [[REG]]
+; CHECK: br %r14
+ %cmp = icmp slt <8 x i16> %val, zeroinitializer
+ %neg = sub <8 x i16> zeroinitializer, %val
+ %ret = select <8 x i1> %cmp, <8 x i16> %val, <8 x i16> %neg
+ ret <8 x i16> %ret
+}
+
+; Test with sle.
+define <8 x i16> @f7(<8 x i16> %val) {
+; CHECK-LABEL: f7:
+; CHECK: vlph [[REG:%v[0-9]+]], %v24
+; CHECK: vlch %v24, [[REG]]
+; CHECK: br %r14
+ %cmp = icmp sle <8 x i16> %val, zeroinitializer
+ %neg = sub <8 x i16> zeroinitializer, %val
+ %ret = select <8 x i1> %cmp, <8 x i16> %val, <8 x i16> %neg
+ ret <8 x i16> %ret
+}
+
+; Test with sgt.
+define <8 x i16> @f8(<8 x i16> %val) {
+; CHECK-LABEL: f8:
+; CHECK: vlph [[REG:%v[0-9]+]], %v24
+; CHECK: vlch %v24, [[REG]]
+; CHECK: br %r14
+ %cmp = icmp sgt <8 x i16> %val, zeroinitializer
+ %neg = sub <8 x i16> zeroinitializer, %val
+ %ret = select <8 x i1> %cmp, <8 x i16> %neg, <8 x i16> %val
+ ret <8 x i16> %ret
+}
+
+; Test with sge.
+define <8 x i16> @f9(<8 x i16> %val) {
+; CHECK-LABEL: f9:
+; CHECK: vlph [[REG:%v[0-9]+]], %v24
+; CHECK: vlch %v24, [[REG]]
+; CHECK: br %r14
+ %cmp = icmp sge <8 x i16> %val, zeroinitializer
+ %neg = sub <8 x i16> zeroinitializer, %val
+ %ret = select <8 x i1> %cmp, <8 x i16> %neg, <8 x i16> %val
+ ret <8 x i16> %ret
+}
+
+; Test with an SRA-based boolean vector.
+define <8 x i16> @f10(<8 x i16> %val) {
+; CHECK-LABEL: f10:
+; CHECK: vlph %v24, %v24
+; CHECK: br %r14
+ %shr = ashr <8 x i16> %val,
+ <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
+ %neg = sub <8 x i16> zeroinitializer, %val
+ %and1 = and <8 x i16> %shr, %neg
+ %not = xor <8 x i16> %shr,
+ <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ %and2 = and <8 x i16> %not, %val
+ %ret = or <8 x i16> %and1, %and2
+ ret <8 x i16> %ret
+}
+
+; ...and again in reverse
+define <8 x i16> @f11(<8 x i16> %val) {
+; CHECK-LABEL: f11:
+; CHECK: vlph [[REG:%v[0-9]+]], %v24
+; CHECK: vlch %v24, [[REG]]
+; CHECK: br %r14
+ %shr = ashr <8 x i16> %val,
+ <i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15>
+ %and1 = and <8 x i16> %shr, %val
+ %not = xor <8 x i16> %shr,
+ <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
+ %neg = sub <8 x i16> zeroinitializer, %val
+ %and2 = and <8 x i16> %not, %neg
+ %ret = or <8 x i16> %and1, %and2
+ ret <8 x i16> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-abs-03.ll b/llvm/test/CodeGen/SystemZ/vec-abs-03.ll
new file mode 100644
index 00000000000..cb17a8895e1
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-abs-03.ll
@@ -0,0 +1,138 @@
+; Test v4i32 absolute.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test with slt.
+define <4 x i32> @f1(<4 x i32> %val) {
+; CHECK-LABEL: f1:
+; CHECK: vlpf %v24, %v24
+; CHECK: br %r14
+ %cmp = icmp slt <4 x i32> %val, zeroinitializer
+ %neg = sub <4 x i32> zeroinitializer, %val
+ %ret = select <4 x i1> %cmp, <4 x i32> %neg, <4 x i32> %val
+ ret <4 x i32> %ret
+}
+
+; Test with sle.
+define <4 x i32> @f2(<4 x i32> %val) {
+; CHECK-LABEL: f2:
+; CHECK: vlpf %v24, %v24
+; CHECK: br %r14
+ %cmp = icmp sle <4 x i32> %val, zeroinitializer
+ %neg = sub <4 x i32> zeroinitializer, %val
+ %ret = select <4 x i1> %cmp, <4 x i32> %neg, <4 x i32> %val
+ ret <4 x i32> %ret
+}
+
+; Test with sgt.
+define <4 x i32> @f3(<4 x i32> %val) {
+; CHECK-LABEL: f3:
+; CHECK: vlpf %v24, %v24
+; CHECK: br %r14
+ %cmp = icmp sgt <4 x i32> %val, zeroinitializer
+ %neg = sub <4 x i32> zeroinitializer, %val
+ %ret = select <4 x i1> %cmp, <4 x i32> %val, <4 x i32> %neg
+ ret <4 x i32> %ret
+}
+
+; Test with sge.
+define <4 x i32> @f4(<4 x i32> %val) {
+; CHECK-LABEL: f4:
+; CHECK: vlpf %v24, %v24
+; CHECK: br %r14
+ %cmp = icmp sge <4 x i32> %val, zeroinitializer
+ %neg = sub <4 x i32> zeroinitializer, %val
+ %ret = select <4 x i1> %cmp, <4 x i32> %val, <4 x i32> %neg
+ ret <4 x i32> %ret
+}
+
+; Test that negative absolute uses VLPF too. There is no vector equivalent
+; of LOAD NEGATIVE.
+define <4 x i32> @f5(<4 x i32> %val) {
+; CHECK-LABEL: f5:
+; CHECK: vlpf [[REG:%v[0-9]+]], %v24
+; CHECK: vlcf %v24, [[REG]]
+; CHECK: br %r14
+ %cmp = icmp slt <4 x i32> %val, zeroinitializer
+ %neg = sub <4 x i32> zeroinitializer, %val
+ %abs = select <4 x i1> %cmp, <4 x i32> %neg, <4 x i32> %val
+ %ret = sub <4 x i32> zeroinitializer, %abs
+ ret <4 x i32> %ret
+}
+
+; Try another form of negative absolute (slt version).
+define <4 x i32> @f6(<4 x i32> %val) {
+; CHECK-LABEL: f6:
+; CHECK: vlpf [[REG:%v[0-9]+]], %v24
+; CHECK: vlcf %v24, [[REG]]
+; CHECK: br %r14
+ %cmp = icmp slt <4 x i32> %val, zeroinitializer
+ %neg = sub <4 x i32> zeroinitializer, %val
+ %ret = select <4 x i1> %cmp, <4 x i32> %val, <4 x i32> %neg
+ ret <4 x i32> %ret
+}
+
+; Test with sle.
+define <4 x i32> @f7(<4 x i32> %val) {
+; CHECK-LABEL: f7:
+; CHECK: vlpf [[REG:%v[0-9]+]], %v24
+; CHECK: vlcf %v24, [[REG]]
+; CHECK: br %r14
+ %cmp = icmp sle <4 x i32> %val, zeroinitializer
+ %neg = sub <4 x i32> zeroinitializer, %val
+ %ret = select <4 x i1> %cmp, <4 x i32> %val, <4 x i32> %neg
+ ret <4 x i32> %ret
+}
+
+; Test with sgt.
+define <4 x i32> @f8(<4 x i32> %val) {
+; CHECK-LABEL: f8:
+; CHECK: vlpf [[REG:%v[0-9]+]], %v24
+; CHECK: vlcf %v24, [[REG]]
+; CHECK: br %r14
+ %cmp = icmp sgt <4 x i32> %val, zeroinitializer
+ %neg = sub <4 x i32> zeroinitializer, %val
+ %ret = select <4 x i1> %cmp, <4 x i32> %neg, <4 x i32> %val
+ ret <4 x i32> %ret
+}
+
+; Test with sge.
+define <4 x i32> @f9(<4 x i32> %val) {
+; CHECK-LABEL: f9:
+; CHECK: vlpf [[REG:%v[0-9]+]], %v24
+; CHECK: vlcf %v24, [[REG]]
+; CHECK: br %r14
+ %cmp = icmp sge <4 x i32> %val, zeroinitializer
+ %neg = sub <4 x i32> zeroinitializer, %val
+ %ret = select <4 x i1> %cmp, <4 x i32> %neg, <4 x i32> %val
+ ret <4 x i32> %ret
+}
+
+; Test with an SRA-based boolean vector.
+define <4 x i32> @f10(<4 x i32> %val) {
+; CHECK-LABEL: f10:
+; CHECK: vlpf %v24, %v24
+; CHECK: br %r14
+ %shr = ashr <4 x i32> %val, <i32 31, i32 31, i32 31, i32 31>
+ %neg = sub <4 x i32> zeroinitializer, %val
+ %and1 = and <4 x i32> %shr, %neg
+ %not = xor <4 x i32> %shr, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %and2 = and <4 x i32> %not, %val
+ %ret = or <4 x i32> %and1, %and2
+ ret <4 x i32> %ret
+}
+
+; ...and again in reverse
+define <4 x i32> @f11(<4 x i32> %val) {
+; CHECK-LABEL: f11:
+; CHECK: vlpf [[REG:%v[0-9]+]], %v24
+; CHECK: vlcf %v24, [[REG]]
+; CHECK: br %r14
+ %shr = ashr <4 x i32> %val, <i32 31, i32 31, i32 31, i32 31>
+ %and1 = and <4 x i32> %shr, %val
+ %not = xor <4 x i32> %shr, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %neg = sub <4 x i32> zeroinitializer, %val
+ %and2 = and <4 x i32> %not, %neg
+ %ret = or <4 x i32> %and1, %and2
+ ret <4 x i32> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-abs-04.ll b/llvm/test/CodeGen/SystemZ/vec-abs-04.ll
new file mode 100644
index 00000000000..31c489b00b3
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-abs-04.ll
@@ -0,0 +1,138 @@
+; Test v2i64 absolute.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test with slt.
+define <2 x i64> @f1(<2 x i64> %val) {
+; CHECK-LABEL: f1:
+; CHECK: vlpg %v24, %v24
+; CHECK: br %r14
+ %cmp = icmp slt <2 x i64> %val, zeroinitializer
+ %neg = sub <2 x i64> zeroinitializer, %val
+ %ret = select <2 x i1> %cmp, <2 x i64> %neg, <2 x i64> %val
+ ret <2 x i64> %ret
+}
+
+; Test with sle.
+define <2 x i64> @f2(<2 x i64> %val) {
+; CHECK-LABEL: f2:
+; CHECK: vlpg %v24, %v24
+; CHECK: br %r14
+ %cmp = icmp sle <2 x i64> %val, zeroinitializer
+ %neg = sub <2 x i64> zeroinitializer, %val
+ %ret = select <2 x i1> %cmp, <2 x i64> %neg, <2 x i64> %val
+ ret <2 x i64> %ret
+}
+
+; Test with sgt.
+define <2 x i64> @f3(<2 x i64> %val) {
+; CHECK-LABEL: f3:
+; CHECK: vlpg %v24, %v24
+; CHECK: br %r14
+ %cmp = icmp sgt <2 x i64> %val, zeroinitializer
+ %neg = sub <2 x i64> zeroinitializer, %val
+ %ret = select <2 x i1> %cmp, <2 x i64> %val, <2 x i64> %neg
+ ret <2 x i64> %ret
+}
+
+; Test with sge.
+define <2 x i64> @f4(<2 x i64> %val) {
+; CHECK-LABEL: f4:
+; CHECK: vlpg %v24, %v24
+; CHECK: br %r14
+ %cmp = icmp sge <2 x i64> %val, zeroinitializer
+ %neg = sub <2 x i64> zeroinitializer, %val
+ %ret = select <2 x i1> %cmp, <2 x i64> %val, <2 x i64> %neg
+ ret <2 x i64> %ret
+}
+
+; Test that negative absolute uses VLPG too. There is no vector equivalent
+; of LOAD NEGATIVE.
+define <2 x i64> @f5(<2 x i64> %val) {
+; CHECK-LABEL: f5:
+; CHECK: vlpg [[REG:%v[0-9]+]], %v24
+; CHECK: vlcg %v24, [[REG]]
+; CHECK: br %r14
+ %cmp = icmp slt <2 x i64> %val, zeroinitializer
+ %neg = sub <2 x i64> zeroinitializer, %val
+ %abs = select <2 x i1> %cmp, <2 x i64> %neg, <2 x i64> %val
+ %ret = sub <2 x i64> zeroinitializer, %abs
+ ret <2 x i64> %ret
+}
+
+; Try another form of negative absolute (slt version).
+define <2 x i64> @f6(<2 x i64> %val) {
+; CHECK-LABEL: f6:
+; CHECK: vlpg [[REG:%v[0-9]+]], %v24
+; CHECK: vlcg %v24, [[REG]]
+; CHECK: br %r14
+ %cmp = icmp slt <2 x i64> %val, zeroinitializer
+ %neg = sub <2 x i64> zeroinitializer, %val
+ %ret = select <2 x i1> %cmp, <2 x i64> %val, <2 x i64> %neg
+ ret <2 x i64> %ret
+}
+
+; Test with sle.
+define <2 x i64> @f7(<2 x i64> %val) {
+; CHECK-LABEL: f7:
+; CHECK: vlpg [[REG:%v[0-9]+]], %v24
+; CHECK: vlcg %v24, [[REG]]
+; CHECK: br %r14
+ %cmp = icmp sle <2 x i64> %val, zeroinitializer
+ %neg = sub <2 x i64> zeroinitializer, %val
+ %ret = select <2 x i1> %cmp, <2 x i64> %val, <2 x i64> %neg
+ ret <2 x i64> %ret
+}
+
+; Test with sgt.
+define <2 x i64> @f8(<2 x i64> %val) {
+; CHECK-LABEL: f8:
+; CHECK: vlpg [[REG:%v[0-9]+]], %v24
+; CHECK: vlcg %v24, [[REG]]
+; CHECK: br %r14
+ %cmp = icmp sgt <2 x i64> %val, zeroinitializer
+ %neg = sub <2 x i64> zeroinitializer, %val
+ %ret = select <2 x i1> %cmp, <2 x i64> %neg, <2 x i64> %val
+ ret <2 x i64> %ret
+}
+
+; Test with sge.
+define <2 x i64> @f9(<2 x i64> %val) {
+; CHECK-LABEL: f9:
+; CHECK: vlpg [[REG:%v[0-9]+]], %v24
+; CHECK: vlcg %v24, [[REG]]
+; CHECK: br %r14
+ %cmp = icmp sge <2 x i64> %val, zeroinitializer
+ %neg = sub <2 x i64> zeroinitializer, %val
+ %ret = select <2 x i1> %cmp, <2 x i64> %neg, <2 x i64> %val
+ ret <2 x i64> %ret
+}
+
+; Test with an SRA-based boolean vector.
+define <2 x i64> @f10(<2 x i64> %val) {
+; CHECK-LABEL: f10:
+; CHECK: vlpg %v24, %v24
+; CHECK: br %r14
+ %shr = ashr <2 x i64> %val, <i64 63, i64 63>
+ %neg = sub <2 x i64> zeroinitializer, %val
+ %and1 = and <2 x i64> %shr, %neg
+ %not = xor <2 x i64> %shr, <i64 -1, i64 -1>
+ %and2 = and <2 x i64> %not, %val
+ %ret = or <2 x i64> %and1, %and2
+ ret <2 x i64> %ret
+}
+
+; ...and again in reverse
+define <2 x i64> @f11(<2 x i64> %val) {
+; CHECK-LABEL: f11:
+; CHECK: vlpg [[REG:%v[0-9]+]], %v24
+; CHECK: vlcg %v24, [[REG]]
+; CHECK: br %r14
+ %shr = ashr <2 x i64> %val, <i64 63, i64 63>
+ %and1 = and <2 x i64> %shr, %val
+ %not = xor <2 x i64> %shr, <i64 -1, i64 -1>
+ %neg = sub <2 x i64> zeroinitializer, %val
+ %and2 = and <2 x i64> %not, %neg
+ %ret = or <2 x i64> %and1, %and2
+ ret <2 x i64> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-add-01.ll b/llvm/test/CodeGen/SystemZ/vec-add-01.ll
new file mode 100644
index 00000000000..a59a8da1cf8
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-add-01.ll
@@ -0,0 +1,39 @@
+; Test vector addition.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test a v16i8 addition.
+define <16 x i8> @f1(<16 x i8> %dummy, <16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f1:
+; CHECK: vab %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = add <16 x i8> %val1, %val2
+ ret <16 x i8> %ret
+}
+
+; Test a v8i16 addition.
+define <8 x i16> @f2(<8 x i16> %dummy, <8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f2:
+; CHECK: vah %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = add <8 x i16> %val1, %val2
+ ret <8 x i16> %ret
+}
+
+; Test a v4i32 addition.
+define <4 x i32> @f3(<4 x i32> %dummy, <4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f3:
+; CHECK: vaf %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = add <4 x i32> %val1, %val2
+ ret <4 x i32> %ret
+}
+
+; Test a v2i64 addition.
+define <2 x i64> @f4(<2 x i64> %dummy, <2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f4:
+; CHECK: vag %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = add <2 x i64> %val1, %val2
+ ret <2 x i64> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-and-01.ll b/llvm/test/CodeGen/SystemZ/vec-and-01.ll
new file mode 100644
index 00000000000..d467de69cea
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-and-01.ll
@@ -0,0 +1,39 @@
+; Test vector AND.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test a v16i8 AND.
+define <16 x i8> @f1(<16 x i8> %dummy, <16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f1:
+; CHECK: vn %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = and <16 x i8> %val1, %val2
+ ret <16 x i8> %ret
+}
+
+; Test a v8i16 AND.
+define <8 x i16> @f2(<8 x i16> %dummy, <8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f2:
+; CHECK: vn %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = and <8 x i16> %val1, %val2
+ ret <8 x i16> %ret
+}
+
+; Test a v4i32 AND.
+define <4 x i32> @f3(<4 x i32> %dummy, <4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f3:
+; CHECK: vn %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = and <4 x i32> %val1, %val2
+ ret <4 x i32> %ret
+}
+
+; Test a v2i64 AND.
+define <2 x i64> @f4(<2 x i64> %dummy, <2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f4:
+; CHECK: vn %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = and <2 x i64> %val1, %val2
+ ret <2 x i64> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-and-02.ll b/llvm/test/CodeGen/SystemZ/vec-and-02.ll
new file mode 100644
index 00000000000..30bc9241689
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-and-02.ll
@@ -0,0 +1,91 @@
+; Test vector AND-NOT.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test a v16i8 AND-NOT.
+define <16 x i8> @f1(<16 x i8> %dummy, <16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f1:
+; CHECK: vnc %v24, %v26, %v28
+; CHECK: br %r14
+ %not = xor <16 x i8> %val2, <i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1>
+ %ret = and <16 x i8> %val1, %not
+ ret <16 x i8> %ret
+}
+
+; ...and again with the reverse.
+define <16 x i8> @f2(<16 x i8> %dummy, <16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f2:
+; CHECK: vnc %v24, %v28, %v26
+; CHECK: br %r14
+ %not = xor <16 x i8> %val1, <i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1>
+ %ret = and <16 x i8> %not, %val2
+ ret <16 x i8> %ret
+}
+
+; Test a v8i16 AND-NOT.
+define <8 x i16> @f3(<8 x i16> %dummy, <8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f3:
+; CHECK: vnc %v24, %v26, %v28
+; CHECK: br %r14
+ %not = xor <8 x i16> %val2, <i16 -1, i16 -1, i16 -1, i16 -1,
+ i16 -1, i16 -1, i16 -1, i16 -1>
+ %ret = and <8 x i16> %val1, %not
+ ret <8 x i16> %ret
+}
+
+; ...and again with the reverse.
+define <8 x i16> @f4(<8 x i16> %dummy, <8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f4:
+; CHECK: vnc %v24, %v28, %v26
+; CHECK: br %r14
+ %not = xor <8 x i16> %val1, <i16 -1, i16 -1, i16 -1, i16 -1,
+ i16 -1, i16 -1, i16 -1, i16 -1>
+ %ret = and <8 x i16> %not, %val2
+ ret <8 x i16> %ret
+}
+
+; Test a v4i32 AND-NOT.
+define <4 x i32> @f5(<4 x i32> %dummy, <4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f5:
+; CHECK: vnc %v24, %v26, %v28
+; CHECK: br %r14
+ %not = xor <4 x i32> %val2, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %ret = and <4 x i32> %val1, %not
+ ret <4 x i32> %ret
+}
+
+; ...and again with the reverse.
+define <4 x i32> @f6(<4 x i32> %dummy, <4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f6:
+; CHECK: vnc %v24, %v28, %v26
+; CHECK: br %r14
+ %not = xor <4 x i32> %val1, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %ret = and <4 x i32> %not, %val2
+ ret <4 x i32> %ret
+}
+
+; Test a v2i64 AND-NOT.
+define <2 x i64> @f7(<2 x i64> %dummy, <2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f7:
+; CHECK: vnc %v24, %v26, %v28
+; CHECK: br %r14
+ %not = xor <2 x i64> %val2, <i64 -1, i64 -1>
+ %ret = and <2 x i64> %val1, %not
+ ret <2 x i64> %ret
+}
+
+; ...and again with the reverse.
+define <2 x i64> @f8(<2 x i64> %dummy, <2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f8:
+; CHECK: vnc %v24, %v28, %v26
+; CHECK: br %r14
+ %not = xor <2 x i64> %val1, <i64 -1, i64 -1>
+ %ret = and <2 x i64> %not, %val2
+ ret <2 x i64> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-and-03.ll b/llvm/test/CodeGen/SystemZ/vec-and-03.ll
new file mode 100644
index 00000000000..c73d570fb7b
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-and-03.ll
@@ -0,0 +1,113 @@
+; Test vector zero extensions, which need to be implemented as ANDs.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test a v16i1->v16i8 extension.
+define <16 x i8> @f1(<16 x i8> %val) {
+; CHECK-LABEL: f1:
+; CHECK: vrepib [[REG:%v[0-9]+]], 1
+; CHECK: vn %v24, %v24, [[REG]]
+; CHECK: br %r14
+ %trunc = trunc <16 x i8> %val to <16 x i1>
+ %ret = zext <16 x i1> %trunc to <16 x i8>
+ ret <16 x i8> %ret
+}
+
+; Test a v8i1->v8i16 extension.
+define <8 x i16> @f2(<8 x i16> %val) {
+; CHECK-LABEL: f2:
+; CHECK: vrepih [[REG:%v[0-9]+]], 1
+; CHECK: vn %v24, %v24, [[REG]]
+; CHECK: br %r14
+ %trunc = trunc <8 x i16> %val to <8 x i1>
+ %ret = zext <8 x i1> %trunc to <8 x i16>
+ ret <8 x i16> %ret
+}
+
+; Test a v8i8->v8i16 extension.
+define <8 x i16> @f3(<8 x i16> %val) {
+; CHECK-LABEL: f3:
+; CHECK: vgbm [[REG:%v[0-9]+]], 21845
+; CHECK: vn %v24, %v24, [[REG]]
+; CHECK: br %r14
+ %trunc = trunc <8 x i16> %val to <8 x i8>
+ %ret = zext <8 x i8> %trunc to <8 x i16>
+ ret <8 x i16> %ret
+}
+
+; Test a v4i1->v4i32 extension.
+define <4 x i32> @f4(<4 x i32> %val) {
+; CHECK-LABEL: f4:
+; CHECK: vrepif [[REG:%v[0-9]+]], 1
+; CHECK: vn %v24, %v24, [[REG]]
+; CHECK: br %r14
+ %trunc = trunc <4 x i32> %val to <4 x i1>
+ %ret = zext <4 x i1> %trunc to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test a v4i8->v4i32 extension.
+define <4 x i32> @f5(<4 x i32> %val) {
+; CHECK-LABEL: f5:
+; CHECK: vgbm [[REG:%v[0-9]+]], 4369
+; CHECK: vn %v24, %v24, [[REG]]
+; CHECK: br %r14
+ %trunc = trunc <4 x i32> %val to <4 x i8>
+ %ret = zext <4 x i8> %trunc to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test a v4i16->v4i32 extension.
+define <4 x i32> @f6(<4 x i32> %val) {
+; CHECK-LABEL: f6:
+; CHECK: vgbm [[REG:%v[0-9]+]], 13107
+; CHECK: vn %v24, %v24, [[REG]]
+; CHECK: br %r14
+ %trunc = trunc <4 x i32> %val to <4 x i16>
+ %ret = zext <4 x i16> %trunc to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test a v2i1->v2i64 extension.
+define <2 x i64> @f7(<2 x i64> %val) {
+; CHECK-LABEL: f7:
+; CHECK: vrepig [[REG:%v[0-9]+]], 1
+; CHECK: vn %v24, %v24, [[REG]]
+; CHECK: br %r14
+ %trunc = trunc <2 x i64> %val to <2 x i1>
+ %ret = zext <2 x i1> %trunc to <2 x i64>
+ ret <2 x i64> %ret
+}
+
+; Test a v2i8->v2i64 extension.
+define <2 x i64> @f8(<2 x i64> %val) {
+; CHECK-LABEL: f8:
+; CHECK: vgbm [[REG:%v[0-9]+]], 257
+; CHECK: vn %v24, %v24, [[REG]]
+; CHECK: br %r14
+ %trunc = trunc <2 x i64> %val to <2 x i8>
+ %ret = zext <2 x i8> %trunc to <2 x i64>
+ ret <2 x i64> %ret
+}
+
+; Test a v2i16->v2i64 extension.
+define <2 x i64> @f9(<2 x i64> %val) {
+; CHECK-LABEL: f9:
+; CHECK: vgbm [[REG:%v[0-9]+]], 771
+; CHECK: vn %v24, %v24, [[REG]]
+; CHECK: br %r14
+ %trunc = trunc <2 x i64> %val to <2 x i16>
+ %ret = zext <2 x i16> %trunc to <2 x i64>
+ ret <2 x i64> %ret
+}
+
+; Test a v2i32->v2i64 extension.
+define <2 x i64> @f10(<2 x i64> %val) {
+; CHECK-LABEL: f10:
+; CHECK: vgbm [[REG:%v[0-9]+]], 3855
+; CHECK: vn %v24, %v24, [[REG]]
+; CHECK: br %r14
+ %trunc = trunc <2 x i64> %val to <2 x i32>
+ %ret = zext <2 x i32> %trunc to <2 x i64>
+ ret <2 x i64> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-args-01.ll b/llvm/test/CodeGen/SystemZ/vec-args-01.ll
new file mode 100644
index 00000000000..e07ab7447b2
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-args-01.ll
@@ -0,0 +1,48 @@
+; Test the handling of named vector arguments.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s -check-prefix=CHECK-VEC
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s -check-prefix=CHECK-STACK
+
+; This routine has 6 integer arguments, which fill up r2-r5 and
+; the stack slot at offset 160, and 10 vector arguments, which
+; fill up v24-v31 and the two double-wide stack slots at 168
+; and 184.
+declare void @bar(i64, i64, i64, i64, i64, i64,
+ <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>,
+ <4 x i32>, <4 x i32>, <4 x i32>, <4 x i32>,
+ <4 x i32>, <4 x i32>)
+
+define void @foo() {
+; CHECK-VEC-LABEL: foo:
+; CHECK-VEC-DAG: vrepif %v24, 1
+; CHECK-VEC-DAG: vrepif %v26, 2
+; CHECK-VEC-DAG: vrepif %v28, 3
+; CHECK-VEC-DAG: vrepif %v30, 4
+; CHECK-VEC-DAG: vrepif %v25, 5
+; CHECK-VEC-DAG: vrepif %v27, 6
+; CHECK-VEC-DAG: vrepif %v29, 7
+; CHECK-VEC-DAG: vrepif %v31, 8
+; CHECK-VEC: brasl %r14, bar@PLT
+;
+; CHECK-STACK-LABEL: foo:
+; CHECK-STACK: aghi %r15, -200
+; CHECK-STACK-DAG: mvghi 160(%r15), 6
+; CHECK-STACK-DAG: vrepif [[REG1:%v[0-9]+]], 9
+; CHECK-STACK-DAG: vst [[REG1]], 168(%r15)
+; CHECK-STACK-DAG: vrepif [[REG2:%v[0-9]+]], 10
+; CHECK-STACK-DAG: vst [[REG2]], 184(%r15)
+; CHECK-STACK: brasl %r14, bar@PLT
+
+ call void @bar (i64 1, i64 2, i64 3, i64 4, i64 5, i64 6,
+ <4 x i32> <i32 1, i32 1, i32 1, i32 1>,
+ <4 x i32> <i32 2, i32 2, i32 2, i32 2>,
+ <4 x i32> <i32 3, i32 3, i32 3, i32 3>,
+ <4 x i32> <i32 4, i32 4, i32 4, i32 4>,
+ <4 x i32> <i32 5, i32 5, i32 5, i32 5>,
+ <4 x i32> <i32 6, i32 6, i32 6, i32 6>,
+ <4 x i32> <i32 7, i32 7, i32 7, i32 7>,
+ <4 x i32> <i32 8, i32 8, i32 8, i32 8>,
+ <4 x i32> <i32 9, i32 9, i32 9, i32 9>,
+ <4 x i32> <i32 10, i32 10, i32 10, i32 10>)
+ ret void
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-args-02.ll b/llvm/test/CodeGen/SystemZ/vec-args-02.ll
new file mode 100644
index 00000000000..b6081598326
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-args-02.ll
@@ -0,0 +1,31 @@
+; Test the handling of unnamed vector arguments.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s -check-prefix=CHECK-VEC
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s -check-prefix=CHECK-STACK
+
+; This routine is called with two named vector argument (passed
+; in %v24 and %v26) and two unnamed vector arguments (passed
+; in the double-wide stack slots at 160 and 176).
+declare void @bar(<4 x i32>, <4 x i32>, ...)
+
+define void @foo() {
+; CHECK-VEC-LABEL: foo:
+; CHECK-VEC-DAG: vrepif %v24, 1
+; CHECK-VEC-DAG: vrepif %v26, 2
+; CHECK-VEC: brasl %r14, bar@PLT
+;
+; CHECK-STACK-LABEL: foo:
+; CHECK-STACK: aghi %r15, -192
+; CHECK-STACK-DAG: vrepif [[REG1:%v[0-9]+]], 3
+; CHECK-STACK-DAG: vst [[REG1]], 160(%r15)
+; CHECK-STACK-DAG: vrepif [[REG2:%v[0-9]+]], 4
+; CHECK-STACK-DAG: vst [[REG2]], 176(%r15)
+; CHECK-STACK: brasl %r14, bar@PLT
+
+ call void (<4 x i32>, <4 x i32>, ...) @bar
+ (<4 x i32> <i32 1, i32 1, i32 1, i32 1>,
+ <4 x i32> <i32 2, i32 2, i32 2, i32 2>,
+ <4 x i32> <i32 3, i32 3, i32 3, i32 3>,
+ <4 x i32> <i32 4, i32 4, i32 4, i32 4>)
+ ret void
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-args-03.ll b/llvm/test/CodeGen/SystemZ/vec-args-03.ll
new file mode 100644
index 00000000000..e9f51c5e9ee
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-args-03.ll
@@ -0,0 +1,16 @@
+; Test the handling of incoming vector arguments.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; This routine has 10 vector arguments, which fill up %v24-%v31 and
+; the two double-wide stack slots at 160 and 176.
+define <4 x i32> @foo(<4 x i32> %v1, <4 x i32> %v2, <4 x i32> %v3, <4 x i32> %v4,
+ <4 x i32> %v5, <4 x i32> %v6, <4 x i32> %v7, <4 x i32> %v8,
+ <4 x i32> %v9, <4 x i32> %v10) {
+; CHECK-LABEL: foo:
+; CHECK: vl [[REG1:%v[0-9]+]], 176(%r15)
+; CHECK: vsf %v24, %v26, [[REG1]]
+; CHECK: br %r14
+ %y = sub <4 x i32> %v2, %v10
+ ret <4 x i32> %y
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-cmp-01.ll b/llvm/test/CodeGen/SystemZ/vec-cmp-01.ll
new file mode 100644
index 00000000000..a7546db8d7f
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-cmp-01.ll
@@ -0,0 +1,228 @@
+; Test v16i8 comparisons.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test eq.
+define <16 x i8> @f1(<16 x i8> %dummy, <16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f1:
+; CHECK: vceqb %v24, %v26, %v28
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <16 x i8> %val1, %val2
+ %ret = sext <16 x i1> %cmp to <16 x i8>
+ ret <16 x i8> %ret
+}
+
+; Test ne.
+define <16 x i8> @f2(<16 x i8> %dummy, <16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f2:
+; CHECK: vceqb [[REG:%v[0-9]+]], %v26, %v28
+; CHECK-NEXT: vno %v24, [[REG]], [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp ne <16 x i8> %val1, %val2
+ %ret = sext <16 x i1> %cmp to <16 x i8>
+ ret <16 x i8> %ret
+}
+
+; Test sgt.
+define <16 x i8> @f3(<16 x i8> %dummy, <16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f3:
+; CHECK: vchb %v24, %v26, %v28
+; CHECK-NEXT: br %r14
+ %cmp = icmp sgt <16 x i8> %val1, %val2
+ %ret = sext <16 x i1> %cmp to <16 x i8>
+ ret <16 x i8> %ret
+}
+
+; Test sge.
+define <16 x i8> @f4(<16 x i8> %dummy, <16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f4:
+; CHECK: vchb [[REG:%v[0-9]+]], %v28, %v26
+; CHECK-NEXT: vno %v24, [[REG]], [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp sge <16 x i8> %val1, %val2
+ %ret = sext <16 x i1> %cmp to <16 x i8>
+ ret <16 x i8> %ret
+}
+
+; Test sle.
+define <16 x i8> @f5(<16 x i8> %dummy, <16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f5:
+; CHECK: vchb [[REG:%v[0-9]+]], %v26, %v28
+; CHECK-NEXT: vno %v24, [[REG]], [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp sle <16 x i8> %val1, %val2
+ %ret = sext <16 x i1> %cmp to <16 x i8>
+ ret <16 x i8> %ret
+}
+
+; Test slt.
+define <16 x i8> @f6(<16 x i8> %dummy, <16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f6:
+; CHECK: vchb %v24, %v28, %v26
+; CHECK-NEXT: br %r14
+ %cmp = icmp slt <16 x i8> %val1, %val2
+ %ret = sext <16 x i1> %cmp to <16 x i8>
+ ret <16 x i8> %ret
+}
+
+; Test ugt.
+define <16 x i8> @f7(<16 x i8> %dummy, <16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f7:
+; CHECK: vchlb %v24, %v26, %v28
+; CHECK-NEXT: br %r14
+ %cmp = icmp ugt <16 x i8> %val1, %val2
+ %ret = sext <16 x i1> %cmp to <16 x i8>
+ ret <16 x i8> %ret
+}
+
+; Test uge.
+define <16 x i8> @f8(<16 x i8> %dummy, <16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f8:
+; CHECK: vchlb [[REG:%v[0-9]+]], %v28, %v26
+; CHECK-NEXT: vno %v24, [[REG]], [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp uge <16 x i8> %val1, %val2
+ %ret = sext <16 x i1> %cmp to <16 x i8>
+ ret <16 x i8> %ret
+}
+
+; Test ule.
+define <16 x i8> @f9(<16 x i8> %dummy, <16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f9:
+; CHECK: vchlb [[REG:%v[0-9]+]], %v26, %v28
+; CHECK-NEXT: vno %v24, [[REG]], [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp ule <16 x i8> %val1, %val2
+ %ret = sext <16 x i1> %cmp to <16 x i8>
+ ret <16 x i8> %ret
+}
+
+; Test ult.
+define <16 x i8> @f10(<16 x i8> %dummy, <16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f10:
+; CHECK: vchlb %v24, %v28, %v26
+; CHECK-NEXT: br %r14
+ %cmp = icmp ult <16 x i8> %val1, %val2
+ %ret = sext <16 x i1> %cmp to <16 x i8>
+ ret <16 x i8> %ret
+}
+
+; Test eq selects.
+define <16 x i8> @f11(<16 x i8> %val1, <16 x i8> %val2,
+ <16 x i8> %val3, <16 x i8> %val4) {
+; CHECK-LABEL: f11:
+; CHECK: vceqb [[REG:%v[0-9]+]], %v24, %v26
+; CHECK-NEXT: vsel %v24, %v28, %v30, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <16 x i8> %val1, %val2
+ %ret = select <16 x i1> %cmp, <16 x i8> %val3, <16 x i8> %val4
+ ret <16 x i8> %ret
+}
+
+; Test ne selects.
+define <16 x i8> @f12(<16 x i8> %val1, <16 x i8> %val2,
+ <16 x i8> %val3, <16 x i8> %val4) {
+; CHECK-LABEL: f12:
+; CHECK: vceqb [[REG:%v[0-9]+]], %v24, %v26
+; CHECK-NEXT: vsel %v24, %v30, %v28, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp ne <16 x i8> %val1, %val2
+ %ret = select <16 x i1> %cmp, <16 x i8> %val3, <16 x i8> %val4
+ ret <16 x i8> %ret
+}
+
+; Test sgt selects.
+define <16 x i8> @f13(<16 x i8> %val1, <16 x i8> %val2,
+ <16 x i8> %val3, <16 x i8> %val4) {
+; CHECK-LABEL: f13:
+; CHECK: vchb [[REG:%v[0-9]+]], %v24, %v26
+; CHECK-NEXT: vsel %v24, %v28, %v30, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp sgt <16 x i8> %val1, %val2
+ %ret = select <16 x i1> %cmp, <16 x i8> %val3, <16 x i8> %val4
+ ret <16 x i8> %ret
+}
+
+; Test sge selects.
+define <16 x i8> @f14(<16 x i8> %val1, <16 x i8> %val2,
+ <16 x i8> %val3, <16 x i8> %val4) {
+; CHECK-LABEL: f14:
+; CHECK: vchb [[REG:%v[0-9]+]], %v26, %v24
+; CHECK-NEXT: vsel %v24, %v30, %v28, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp sge <16 x i8> %val1, %val2
+ %ret = select <16 x i1> %cmp, <16 x i8> %val3, <16 x i8> %val4
+ ret <16 x i8> %ret
+}
+
+; Test sle selects.
+define <16 x i8> @f15(<16 x i8> %val1, <16 x i8> %val2,
+ <16 x i8> %val3, <16 x i8> %val4) {
+; CHECK-LABEL: f15:
+; CHECK: vchb [[REG:%v[0-9]+]], %v24, %v26
+; CHECK-NEXT: vsel %v24, %v30, %v28, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp sle <16 x i8> %val1, %val2
+ %ret = select <16 x i1> %cmp, <16 x i8> %val3, <16 x i8> %val4
+ ret <16 x i8> %ret
+}
+
+; Test slt selects.
+define <16 x i8> @f16(<16 x i8> %val1, <16 x i8> %val2,
+ <16 x i8> %val3, <16 x i8> %val4) {
+; CHECK-LABEL: f16:
+; CHECK: vchb [[REG:%v[0-9]+]], %v26, %v24
+; CHECK-NEXT: vsel %v24, %v28, %v30, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp slt <16 x i8> %val1, %val2
+ %ret = select <16 x i1> %cmp, <16 x i8> %val3, <16 x i8> %val4
+ ret <16 x i8> %ret
+}
+
+; Test ugt selects.
+define <16 x i8> @f17(<16 x i8> %val1, <16 x i8> %val2,
+ <16 x i8> %val3, <16 x i8> %val4) {
+; CHECK-LABEL: f17:
+; CHECK: vchlb [[REG:%v[0-9]+]], %v24, %v26
+; CHECK-NEXT: vsel %v24, %v28, %v30, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp ugt <16 x i8> %val1, %val2
+ %ret = select <16 x i1> %cmp, <16 x i8> %val3, <16 x i8> %val4
+ ret <16 x i8> %ret
+}
+
+; Test uge selects.
+define <16 x i8> @f18(<16 x i8> %val1, <16 x i8> %val2,
+ <16 x i8> %val3, <16 x i8> %val4) {
+; CHECK-LABEL: f18:
+; CHECK: vchlb [[REG:%v[0-9]+]], %v26, %v24
+; CHECK-NEXT: vsel %v24, %v30, %v28, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp uge <16 x i8> %val1, %val2
+ %ret = select <16 x i1> %cmp, <16 x i8> %val3, <16 x i8> %val4
+ ret <16 x i8> %ret
+}
+
+; Test ule selects.
+define <16 x i8> @f19(<16 x i8> %val1, <16 x i8> %val2,
+ <16 x i8> %val3, <16 x i8> %val4) {
+; CHECK-LABEL: f19:
+; CHECK: vchlb [[REG:%v[0-9]+]], %v24, %v26
+; CHECK-NEXT: vsel %v24, %v30, %v28, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp ule <16 x i8> %val1, %val2
+ %ret = select <16 x i1> %cmp, <16 x i8> %val3, <16 x i8> %val4
+ ret <16 x i8> %ret
+}
+
+; Test ult selects.
+define <16 x i8> @f20(<16 x i8> %val1, <16 x i8> %val2,
+ <16 x i8> %val3, <16 x i8> %val4) {
+; CHECK-LABEL: f20:
+; CHECK: vchlb [[REG:%v[0-9]+]], %v26, %v24
+; CHECK-NEXT: vsel %v24, %v28, %v30, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp ult <16 x i8> %val1, %val2
+ %ret = select <16 x i1> %cmp, <16 x i8> %val3, <16 x i8> %val4
+ ret <16 x i8> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-cmp-02.ll b/llvm/test/CodeGen/SystemZ/vec-cmp-02.ll
new file mode 100644
index 00000000000..78fb46c01c0
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-cmp-02.ll
@@ -0,0 +1,228 @@
+; Test v8i16 comparisons.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test eq.
+define <8 x i16> @f1(<8 x i16> %dummy, <8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f1:
+; CHECK: vceqh %v24, %v26, %v28
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <8 x i16> %val1, %val2
+ %ret = sext <8 x i1> %cmp to <8 x i16>
+ ret <8 x i16> %ret
+}
+
+; Test ne.
+define <8 x i16> @f2(<8 x i16> %dummy, <8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f2:
+; CHECK: vceqh [[REG:%v[0-9]+]], %v26, %v28
+; CHECK-NEXT: vno %v24, [[REG]], [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp ne <8 x i16> %val1, %val2
+ %ret = sext <8 x i1> %cmp to <8 x i16>
+ ret <8 x i16> %ret
+}
+
+; Test sgt.
+define <8 x i16> @f3(<8 x i16> %dummy, <8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f3:
+; CHECK: vchh %v24, %v26, %v28
+; CHECK-NEXT: br %r14
+ %cmp = icmp sgt <8 x i16> %val1, %val2
+ %ret = sext <8 x i1> %cmp to <8 x i16>
+ ret <8 x i16> %ret
+}
+
+; Test sge.
+define <8 x i16> @f4(<8 x i16> %dummy, <8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f4:
+; CHECK: vchh [[REG:%v[0-9]+]], %v28, %v26
+; CHECK-NEXT: vno %v24, [[REG]], [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp sge <8 x i16> %val1, %val2
+ %ret = sext <8 x i1> %cmp to <8 x i16>
+ ret <8 x i16> %ret
+}
+
+; Test sle.
+define <8 x i16> @f5(<8 x i16> %dummy, <8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f5:
+; CHECK: vchh [[REG:%v[0-9]+]], %v26, %v28
+; CHECK-NEXT: vno %v24, [[REG]], [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp sle <8 x i16> %val1, %val2
+ %ret = sext <8 x i1> %cmp to <8 x i16>
+ ret <8 x i16> %ret
+}
+
+; Test slt.
+define <8 x i16> @f6(<8 x i16> %dummy, <8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f6:
+; CHECK: vchh %v24, %v28, %v26
+; CHECK-NEXT: br %r14
+ %cmp = icmp slt <8 x i16> %val1, %val2
+ %ret = sext <8 x i1> %cmp to <8 x i16>
+ ret <8 x i16> %ret
+}
+
+; Test ugt.
+define <8 x i16> @f7(<8 x i16> %dummy, <8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f7:
+; CHECK: vchlh %v24, %v26, %v28
+; CHECK-NEXT: br %r14
+ %cmp = icmp ugt <8 x i16> %val1, %val2
+ %ret = sext <8 x i1> %cmp to <8 x i16>
+ ret <8 x i16> %ret
+}
+
+; Test uge.
+define <8 x i16> @f8(<8 x i16> %dummy, <8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f8:
+; CHECK: vchlh [[REG:%v[0-9]+]], %v28, %v26
+; CHECK-NEXT: vno %v24, [[REG]], [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp uge <8 x i16> %val1, %val2
+ %ret = sext <8 x i1> %cmp to <8 x i16>
+ ret <8 x i16> %ret
+}
+
+; Test ule.
+define <8 x i16> @f9(<8 x i16> %dummy, <8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f9:
+; CHECK: vchlh [[REG:%v[0-9]+]], %v26, %v28
+; CHECK-NEXT: vno %v24, [[REG]], [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp ule <8 x i16> %val1, %val2
+ %ret = sext <8 x i1> %cmp to <8 x i16>
+ ret <8 x i16> %ret
+}
+
+; Test ult.
+define <8 x i16> @f10(<8 x i16> %dummy, <8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f10:
+; CHECK: vchlh %v24, %v28, %v26
+; CHECK-NEXT: br %r14
+ %cmp = icmp ult <8 x i16> %val1, %val2
+ %ret = sext <8 x i1> %cmp to <8 x i16>
+ ret <8 x i16> %ret
+}
+
+; Test eq selects.
+define <8 x i16> @f11(<8 x i16> %val1, <8 x i16> %val2,
+ <8 x i16> %val3, <8 x i16> %val4) {
+; CHECK-LABEL: f11:
+; CHECK: vceqh [[REG:%v[0-9]+]], %v24, %v26
+; CHECK-NEXT: vsel %v24, %v28, %v30, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <8 x i16> %val1, %val2
+ %ret = select <8 x i1> %cmp, <8 x i16> %val3, <8 x i16> %val4
+ ret <8 x i16> %ret
+}
+
+; Test ne selects.
+define <8 x i16> @f12(<8 x i16> %val1, <8 x i16> %val2,
+ <8 x i16> %val3, <8 x i16> %val4) {
+; CHECK-LABEL: f12:
+; CHECK: vceqh [[REG:%v[0-9]+]], %v24, %v26
+; CHECK-NEXT: vsel %v24, %v30, %v28, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp ne <8 x i16> %val1, %val2
+ %ret = select <8 x i1> %cmp, <8 x i16> %val3, <8 x i16> %val4
+ ret <8 x i16> %ret
+}
+
+; Test sgt selects.
+define <8 x i16> @f13(<8 x i16> %val1, <8 x i16> %val2,
+ <8 x i16> %val3, <8 x i16> %val4) {
+; CHECK-LABEL: f13:
+; CHECK: vchh [[REG:%v[0-9]+]], %v24, %v26
+; CHECK-NEXT: vsel %v24, %v28, %v30, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp sgt <8 x i16> %val1, %val2
+ %ret = select <8 x i1> %cmp, <8 x i16> %val3, <8 x i16> %val4
+ ret <8 x i16> %ret
+}
+
+; Test sge selects.
+define <8 x i16> @f14(<8 x i16> %val1, <8 x i16> %val2,
+ <8 x i16> %val3, <8 x i16> %val4) {
+; CHECK-LABEL: f14:
+; CHECK: vchh [[REG:%v[0-9]+]], %v26, %v24
+; CHECK-NEXT: vsel %v24, %v30, %v28, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp sge <8 x i16> %val1, %val2
+ %ret = select <8 x i1> %cmp, <8 x i16> %val3, <8 x i16> %val4
+ ret <8 x i16> %ret
+}
+
+; Test sle selects.
+define <8 x i16> @f15(<8 x i16> %val1, <8 x i16> %val2,
+ <8 x i16> %val3, <8 x i16> %val4) {
+; CHECK-LABEL: f15:
+; CHECK: vchh [[REG:%v[0-9]+]], %v24, %v26
+; CHECK-NEXT: vsel %v24, %v30, %v28, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp sle <8 x i16> %val1, %val2
+ %ret = select <8 x i1> %cmp, <8 x i16> %val3, <8 x i16> %val4
+ ret <8 x i16> %ret
+}
+
+; Test slt selects.
+define <8 x i16> @f16(<8 x i16> %val1, <8 x i16> %val2,
+ <8 x i16> %val3, <8 x i16> %val4) {
+; CHECK-LABEL: f16:
+; CHECK: vchh [[REG:%v[0-9]+]], %v26, %v24
+; CHECK-NEXT: vsel %v24, %v28, %v30, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp slt <8 x i16> %val1, %val2
+ %ret = select <8 x i1> %cmp, <8 x i16> %val3, <8 x i16> %val4
+ ret <8 x i16> %ret
+}
+
+; Test ugt selects.
+define <8 x i16> @f17(<8 x i16> %val1, <8 x i16> %val2,
+ <8 x i16> %val3, <8 x i16> %val4) {
+; CHECK-LABEL: f17:
+; CHECK: vchlh [[REG:%v[0-9]+]], %v24, %v26
+; CHECK-NEXT: vsel %v24, %v28, %v30, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp ugt <8 x i16> %val1, %val2
+ %ret = select <8 x i1> %cmp, <8 x i16> %val3, <8 x i16> %val4
+ ret <8 x i16> %ret
+}
+
+; Test uge selects.
+define <8 x i16> @f18(<8 x i16> %val1, <8 x i16> %val2,
+ <8 x i16> %val3, <8 x i16> %val4) {
+; CHECK-LABEL: f18:
+; CHECK: vchlh [[REG:%v[0-9]+]], %v26, %v24
+; CHECK-NEXT: vsel %v24, %v30, %v28, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp uge <8 x i16> %val1, %val2
+ %ret = select <8 x i1> %cmp, <8 x i16> %val3, <8 x i16> %val4
+ ret <8 x i16> %ret
+}
+
+; Test ule selects.
+define <8 x i16> @f19(<8 x i16> %val1, <8 x i16> %val2,
+ <8 x i16> %val3, <8 x i16> %val4) {
+; CHECK-LABEL: f19:
+; CHECK: vchlh [[REG:%v[0-9]+]], %v24, %v26
+; CHECK-NEXT: vsel %v24, %v30, %v28, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp ule <8 x i16> %val1, %val2
+ %ret = select <8 x i1> %cmp, <8 x i16> %val3, <8 x i16> %val4
+ ret <8 x i16> %ret
+}
+
+; Test ult selects.
+define <8 x i16> @f20(<8 x i16> %val1, <8 x i16> %val2,
+ <8 x i16> %val3, <8 x i16> %val4) {
+; CHECK-LABEL: f20:
+; CHECK: vchlh [[REG:%v[0-9]+]], %v26, %v24
+; CHECK-NEXT: vsel %v24, %v28, %v30, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp ult <8 x i16> %val1, %val2
+ %ret = select <8 x i1> %cmp, <8 x i16> %val3, <8 x i16> %val4
+ ret <8 x i16> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-cmp-03.ll b/llvm/test/CodeGen/SystemZ/vec-cmp-03.ll
new file mode 100644
index 00000000000..4b070acc935
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-cmp-03.ll
@@ -0,0 +1,228 @@
+; Test v4i32 comparisons.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test eq.
+define <4 x i32> @f1(<4 x i32> %dummy, <4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f1:
+; CHECK: vceqf %v24, %v26, %v28
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <4 x i32> %val1, %val2
+ %ret = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test ne.
+define <4 x i32> @f2(<4 x i32> %dummy, <4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f2:
+; CHECK: vceqf [[REG:%v[0-9]+]], %v26, %v28
+; CHECK-NEXT: vno %v24, [[REG]], [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp ne <4 x i32> %val1, %val2
+ %ret = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test sgt.
+define <4 x i32> @f3(<4 x i32> %dummy, <4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f3:
+; CHECK: vchf %v24, %v26, %v28
+; CHECK-NEXT: br %r14
+ %cmp = icmp sgt <4 x i32> %val1, %val2
+ %ret = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test sge.
+define <4 x i32> @f4(<4 x i32> %dummy, <4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f4:
+; CHECK: vchf [[REG:%v[0-9]+]], %v28, %v26
+; CHECK-NEXT: vno %v24, [[REG]], [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp sge <4 x i32> %val1, %val2
+ %ret = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test sle.
+define <4 x i32> @f5(<4 x i32> %dummy, <4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f5:
+; CHECK: vchf [[REG:%v[0-9]+]], %v26, %v28
+; CHECK-NEXT: vno %v24, [[REG]], [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp sle <4 x i32> %val1, %val2
+ %ret = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test slt.
+define <4 x i32> @f6(<4 x i32> %dummy, <4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f6:
+; CHECK: vchf %v24, %v28, %v26
+; CHECK-NEXT: br %r14
+ %cmp = icmp slt <4 x i32> %val1, %val2
+ %ret = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test ugt.
+define <4 x i32> @f7(<4 x i32> %dummy, <4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f7:
+; CHECK: vchlf %v24, %v26, %v28
+; CHECK-NEXT: br %r14
+ %cmp = icmp ugt <4 x i32> %val1, %val2
+ %ret = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test uge.
+define <4 x i32> @f8(<4 x i32> %dummy, <4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f8:
+; CHECK: vchlf [[REG:%v[0-9]+]], %v28, %v26
+; CHECK-NEXT: vno %v24, [[REG]], [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp uge <4 x i32> %val1, %val2
+ %ret = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test ule.
+define <4 x i32> @f9(<4 x i32> %dummy, <4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f9:
+; CHECK: vchlf [[REG:%v[0-9]+]], %v26, %v28
+; CHECK-NEXT: vno %v24, [[REG]], [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp ule <4 x i32> %val1, %val2
+ %ret = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test ult.
+define <4 x i32> @f10(<4 x i32> %dummy, <4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f10:
+; CHECK: vchlf %v24, %v28, %v26
+; CHECK-NEXT: br %r14
+ %cmp = icmp ult <4 x i32> %val1, %val2
+ %ret = sext <4 x i1> %cmp to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test eq selects.
+define <4 x i32> @f11(<4 x i32> %val1, <4 x i32> %val2,
+ <4 x i32> %val3, <4 x i32> %val4) {
+; CHECK-LABEL: f11:
+; CHECK: vceqf [[REG:%v[0-9]+]], %v24, %v26
+; CHECK-NEXT: vsel %v24, %v28, %v30, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <4 x i32> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x i32> %val3, <4 x i32> %val4
+ ret <4 x i32> %ret
+}
+
+; Test ne selects.
+define <4 x i32> @f12(<4 x i32> %val1, <4 x i32> %val2,
+ <4 x i32> %val3, <4 x i32> %val4) {
+; CHECK-LABEL: f12:
+; CHECK: vceqf [[REG:%v[0-9]+]], %v24, %v26
+; CHECK-NEXT: vsel %v24, %v30, %v28, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp ne <4 x i32> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x i32> %val3, <4 x i32> %val4
+ ret <4 x i32> %ret
+}
+
+; Test sgt selects.
+define <4 x i32> @f13(<4 x i32> %val1, <4 x i32> %val2,
+ <4 x i32> %val3, <4 x i32> %val4) {
+; CHECK-LABEL: f13:
+; CHECK: vchf [[REG:%v[0-9]+]], %v24, %v26
+; CHECK-NEXT: vsel %v24, %v28, %v30, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp sgt <4 x i32> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x i32> %val3, <4 x i32> %val4
+ ret <4 x i32> %ret
+}
+
+; Test sge selects.
+define <4 x i32> @f14(<4 x i32> %val1, <4 x i32> %val2,
+ <4 x i32> %val3, <4 x i32> %val4) {
+; CHECK-LABEL: f14:
+; CHECK: vchf [[REG:%v[0-9]+]], %v26, %v24
+; CHECK-NEXT: vsel %v24, %v30, %v28, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp sge <4 x i32> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x i32> %val3, <4 x i32> %val4
+ ret <4 x i32> %ret
+}
+
+; Test sle selects.
+define <4 x i32> @f15(<4 x i32> %val1, <4 x i32> %val2,
+ <4 x i32> %val3, <4 x i32> %val4) {
+; CHECK-LABEL: f15:
+; CHECK: vchf [[REG:%v[0-9]+]], %v24, %v26
+; CHECK-NEXT: vsel %v24, %v30, %v28, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp sle <4 x i32> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x i32> %val3, <4 x i32> %val4
+ ret <4 x i32> %ret
+}
+
+; Test slt selects.
+define <4 x i32> @f16(<4 x i32> %val1, <4 x i32> %val2,
+ <4 x i32> %val3, <4 x i32> %val4) {
+; CHECK-LABEL: f16:
+; CHECK: vchf [[REG:%v[0-9]+]], %v26, %v24
+; CHECK-NEXT: vsel %v24, %v28, %v30, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp slt <4 x i32> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x i32> %val3, <4 x i32> %val4
+ ret <4 x i32> %ret
+}
+
+; Test ugt selects.
+define <4 x i32> @f17(<4 x i32> %val1, <4 x i32> %val2,
+ <4 x i32> %val3, <4 x i32> %val4) {
+; CHECK-LABEL: f17:
+; CHECK: vchlf [[REG:%v[0-9]+]], %v24, %v26
+; CHECK-NEXT: vsel %v24, %v28, %v30, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp ugt <4 x i32> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x i32> %val3, <4 x i32> %val4
+ ret <4 x i32> %ret
+}
+
+; Test uge selects.
+define <4 x i32> @f18(<4 x i32> %val1, <4 x i32> %val2,
+ <4 x i32> %val3, <4 x i32> %val4) {
+; CHECK-LABEL: f18:
+; CHECK: vchlf [[REG:%v[0-9]+]], %v26, %v24
+; CHECK-NEXT: vsel %v24, %v30, %v28, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp uge <4 x i32> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x i32> %val3, <4 x i32> %val4
+ ret <4 x i32> %ret
+}
+
+; Test ule selects.
+define <4 x i32> @f19(<4 x i32> %val1, <4 x i32> %val2,
+ <4 x i32> %val3, <4 x i32> %val4) {
+; CHECK-LABEL: f19:
+; CHECK: vchlf [[REG:%v[0-9]+]], %v24, %v26
+; CHECK-NEXT: vsel %v24, %v30, %v28, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp ule <4 x i32> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x i32> %val3, <4 x i32> %val4
+ ret <4 x i32> %ret
+}
+
+; Test ult selects.
+define <4 x i32> @f20(<4 x i32> %val1, <4 x i32> %val2,
+ <4 x i32> %val3, <4 x i32> %val4) {
+; CHECK-LABEL: f20:
+; CHECK: vchlf [[REG:%v[0-9]+]], %v26, %v24
+; CHECK-NEXT: vsel %v24, %v28, %v30, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp ult <4 x i32> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x i32> %val3, <4 x i32> %val4
+ ret <4 x i32> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-cmp-04.ll b/llvm/test/CodeGen/SystemZ/vec-cmp-04.ll
new file mode 100644
index 00000000000..5cecaa7251b
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-cmp-04.ll
@@ -0,0 +1,228 @@
+; Test v2i64 comparisons.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test eq.
+define <2 x i64> @f1(<2 x i64> %dummy, <2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f1:
+; CHECK: vceqg %v24, %v26, %v28
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <2 x i64> %val1, %val2
+ %ret = sext <2 x i1> %cmp to <2 x i64>
+ ret <2 x i64> %ret
+}
+
+; Test ne.
+define <2 x i64> @f2(<2 x i64> %dummy, <2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f2:
+; CHECK: vceqg [[REG:%v[0-9]+]], %v26, %v28
+; CHECK-NEXT: vno %v24, [[REG]], [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp ne <2 x i64> %val1, %val2
+ %ret = sext <2 x i1> %cmp to <2 x i64>
+ ret <2 x i64> %ret
+}
+
+; Test sgt.
+define <2 x i64> @f3(<2 x i64> %dummy, <2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f3:
+; CHECK: vchg %v24, %v26, %v28
+; CHECK-NEXT: br %r14
+ %cmp = icmp sgt <2 x i64> %val1, %val2
+ %ret = sext <2 x i1> %cmp to <2 x i64>
+ ret <2 x i64> %ret
+}
+
+; Test sge.
+define <2 x i64> @f4(<2 x i64> %dummy, <2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f4:
+; CHECK: vchg [[REG:%v[0-9]+]], %v28, %v26
+; CHECK-NEXT: vno %v24, [[REG]], [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp sge <2 x i64> %val1, %val2
+ %ret = sext <2 x i1> %cmp to <2 x i64>
+ ret <2 x i64> %ret
+}
+
+; Test sle.
+define <2 x i64> @f5(<2 x i64> %dummy, <2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f5:
+; CHECK: vchg [[REG:%v[0-9]+]], %v26, %v28
+; CHECK-NEXT: vno %v24, [[REG]], [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp sle <2 x i64> %val1, %val2
+ %ret = sext <2 x i1> %cmp to <2 x i64>
+ ret <2 x i64> %ret
+}
+
+; Test slt.
+define <2 x i64> @f6(<2 x i64> %dummy, <2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f6:
+; CHECK: vchg %v24, %v28, %v26
+; CHECK-NEXT: br %r14
+ %cmp = icmp slt <2 x i64> %val1, %val2
+ %ret = sext <2 x i1> %cmp to <2 x i64>
+ ret <2 x i64> %ret
+}
+
+; Test ugt.
+define <2 x i64> @f7(<2 x i64> %dummy, <2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f7:
+; CHECK: vchlg %v24, %v26, %v28
+; CHECK-NEXT: br %r14
+ %cmp = icmp ugt <2 x i64> %val1, %val2
+ %ret = sext <2 x i1> %cmp to <2 x i64>
+ ret <2 x i64> %ret
+}
+
+; Test uge.
+define <2 x i64> @f8(<2 x i64> %dummy, <2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f8:
+; CHECK: vchlg [[REG:%v[0-9]+]], %v28, %v26
+; CHECK-NEXT: vno %v24, [[REG]], [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp uge <2 x i64> %val1, %val2
+ %ret = sext <2 x i1> %cmp to <2 x i64>
+ ret <2 x i64> %ret
+}
+
+; Test ule.
+define <2 x i64> @f9(<2 x i64> %dummy, <2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f9:
+; CHECK: vchlg [[REG:%v[0-9]+]], %v26, %v28
+; CHECK-NEXT: vno %v24, [[REG]], [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp ule <2 x i64> %val1, %val2
+ %ret = sext <2 x i1> %cmp to <2 x i64>
+ ret <2 x i64> %ret
+}
+
+; Test ult.
+define <2 x i64> @f10(<2 x i64> %dummy, <2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f10:
+; CHECK: vchlg %v24, %v28, %v26
+; CHECK-NEXT: br %r14
+ %cmp = icmp ult <2 x i64> %val1, %val2
+ %ret = sext <2 x i1> %cmp to <2 x i64>
+ ret <2 x i64> %ret
+}
+
+; Test eq selects.
+define <2 x i64> @f11(<2 x i64> %val1, <2 x i64> %val2,
+ <2 x i64> %val3, <2 x i64> %val4) {
+; CHECK-LABEL: f11:
+; CHECK: vceqg [[REG:%v[0-9]+]], %v24, %v26
+; CHECK-NEXT: vsel %v24, %v28, %v30, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp eq <2 x i64> %val1, %val2
+ %ret = select <2 x i1> %cmp, <2 x i64> %val3, <2 x i64> %val4
+ ret <2 x i64> %ret
+}
+
+; Test ne selects.
+define <2 x i64> @f12(<2 x i64> %val1, <2 x i64> %val2,
+ <2 x i64> %val3, <2 x i64> %val4) {
+; CHECK-LABEL: f12:
+; CHECK: vceqg [[REG:%v[0-9]+]], %v24, %v26
+; CHECK-NEXT: vsel %v24, %v30, %v28, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp ne <2 x i64> %val1, %val2
+ %ret = select <2 x i1> %cmp, <2 x i64> %val3, <2 x i64> %val4
+ ret <2 x i64> %ret
+}
+
+; Test sgt selects.
+define <2 x i64> @f13(<2 x i64> %val1, <2 x i64> %val2,
+ <2 x i64> %val3, <2 x i64> %val4) {
+; CHECK-LABEL: f13:
+; CHECK: vchg [[REG:%v[0-9]+]], %v24, %v26
+; CHECK-NEXT: vsel %v24, %v28, %v30, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp sgt <2 x i64> %val1, %val2
+ %ret = select <2 x i1> %cmp, <2 x i64> %val3, <2 x i64> %val4
+ ret <2 x i64> %ret
+}
+
+; Test sge selects.
+define <2 x i64> @f14(<2 x i64> %val1, <2 x i64> %val2,
+ <2 x i64> %val3, <2 x i64> %val4) {
+; CHECK-LABEL: f14:
+; CHECK: vchg [[REG:%v[0-9]+]], %v26, %v24
+; CHECK-NEXT: vsel %v24, %v30, %v28, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp sge <2 x i64> %val1, %val2
+ %ret = select <2 x i1> %cmp, <2 x i64> %val3, <2 x i64> %val4
+ ret <2 x i64> %ret
+}
+
+; Test sle selects.
+define <2 x i64> @f15(<2 x i64> %val1, <2 x i64> %val2,
+ <2 x i64> %val3, <2 x i64> %val4) {
+; CHECK-LABEL: f15:
+; CHECK: vchg [[REG:%v[0-9]+]], %v24, %v26
+; CHECK-NEXT: vsel %v24, %v30, %v28, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp sle <2 x i64> %val1, %val2
+ %ret = select <2 x i1> %cmp, <2 x i64> %val3, <2 x i64> %val4
+ ret <2 x i64> %ret
+}
+
+; Test slt selects.
+define <2 x i64> @f16(<2 x i64> %val1, <2 x i64> %val2,
+ <2 x i64> %val3, <2 x i64> %val4) {
+; CHECK-LABEL: f16:
+; CHECK: vchg [[REG:%v[0-9]+]], %v26, %v24
+; CHECK-NEXT: vsel %v24, %v28, %v30, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp slt <2 x i64> %val1, %val2
+ %ret = select <2 x i1> %cmp, <2 x i64> %val3, <2 x i64> %val4
+ ret <2 x i64> %ret
+}
+
+; Test ugt selects.
+define <2 x i64> @f17(<2 x i64> %val1, <2 x i64> %val2,
+ <2 x i64> %val3, <2 x i64> %val4) {
+; CHECK-LABEL: f17:
+; CHECK: vchlg [[REG:%v[0-9]+]], %v24, %v26
+; CHECK-NEXT: vsel %v24, %v28, %v30, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp ugt <2 x i64> %val1, %val2
+ %ret = select <2 x i1> %cmp, <2 x i64> %val3, <2 x i64> %val4
+ ret <2 x i64> %ret
+}
+
+; Test uge selects.
+define <2 x i64> @f18(<2 x i64> %val1, <2 x i64> %val2,
+ <2 x i64> %val3, <2 x i64> %val4) {
+; CHECK-LABEL: f18:
+; CHECK: vchlg [[REG:%v[0-9]+]], %v26, %v24
+; CHECK-NEXT: vsel %v24, %v30, %v28, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp uge <2 x i64> %val1, %val2
+ %ret = select <2 x i1> %cmp, <2 x i64> %val3, <2 x i64> %val4
+ ret <2 x i64> %ret
+}
+
+; Test ule selects.
+define <2 x i64> @f19(<2 x i64> %val1, <2 x i64> %val2,
+ <2 x i64> %val3, <2 x i64> %val4) {
+; CHECK-LABEL: f19:
+; CHECK: vchlg [[REG:%v[0-9]+]], %v24, %v26
+; CHECK-NEXT: vsel %v24, %v30, %v28, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp ule <2 x i64> %val1, %val2
+ %ret = select <2 x i1> %cmp, <2 x i64> %val3, <2 x i64> %val4
+ ret <2 x i64> %ret
+}
+
+; Test ult selects.
+define <2 x i64> @f20(<2 x i64> %val1, <2 x i64> %val2,
+ <2 x i64> %val3, <2 x i64> %val4) {
+; CHECK-LABEL: f20:
+; CHECK: vchlg [[REG:%v[0-9]+]], %v26, %v24
+; CHECK-NEXT: vsel %v24, %v28, %v30, [[REG]]
+; CHECK-NEXT: br %r14
+ %cmp = icmp ult <2 x i64> %val1, %val2
+ %ret = select <2 x i1> %cmp, <2 x i64> %val3, <2 x i64> %val4
+ ret <2 x i64> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-combine-01.ll b/llvm/test/CodeGen/SystemZ/vec-combine-01.ll
new file mode 100644
index 00000000000..f9da34b6475
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-combine-01.ll
@@ -0,0 +1,107 @@
+; Test various target-specific DAG combiner patterns.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Check that an extraction followed by a truncation is effectively treated
+; as a bitcast.
+define void @f1(<4 x i32> %v1, <4 x i32> %v2, i8 *%ptr1, i8 *%ptr2) {
+; CHECK-LABEL: f1:
+; CHECK: vaf [[REG:%v[0-9]+]], %v24, %v26
+; CHECK-DAG: vsteb [[REG]], 0(%r2), 3
+; CHECK-DAG: vsteb [[REG]], 0(%r3), 15
+; CHECK: br %r14
+ %add = add <4 x i32> %v1, %v2
+ %elem1 = extractelement <4 x i32> %add, i32 0
+ %elem2 = extractelement <4 x i32> %add, i32 3
+ %trunc1 = trunc i32 %elem1 to i8
+ %trunc2 = trunc i32 %elem2 to i8
+ store i8 %trunc1, i8 *%ptr1
+ store i8 %trunc2, i8 *%ptr2
+ ret void
+}
+
+; Test a case where a pack-type shuffle can be eliminated.
+define i16 @f2(<4 x i32> %v1, <4 x i32> %v2, <4 x i32> %v3) {
+; CHECK-LABEL: f2:
+; CHECK-NOT: vpk
+; CHECK-DAG: vaf [[REG1:%v[0-9]+]], %v24, %v26
+; CHECK-DAG: vaf [[REG2:%v[0-9]+]], %v26, %v28
+; CHECK-DAG: vlgvh {{%r[0-5]}}, [[REG1]], 3
+; CHECK-DAG: vlgvh {{%r[0-5]}}, [[REG2]], 7
+; CHECK: br %r14
+ %add1 = add <4 x i32> %v1, %v2
+ %add2 = add <4 x i32> %v2, %v3
+ %shuffle = shufflevector <4 x i32> %add1, <4 x i32> %add2,
+ <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ %bitcast = bitcast <4 x i32> %shuffle to <8 x i16>
+ %elem1 = extractelement <8 x i16> %bitcast, i32 1
+ %elem2 = extractelement <8 x i16> %bitcast, i32 7
+ %res = add i16 %elem1, %elem2
+ ret i16 %res
+}
+
+; ...and again in a case where there's also a splat and a bitcast.
+define i16 @f3(<4 x i32> %v1, <4 x i32> %v2, <2 x i64> %v3) {
+; CHECK-LABEL: f3:
+; CHECK-NOT: vrepg
+; CHECK-NOT: vpk
+; CHECK-DAG: vaf [[REG:%v[0-9]+]], %v24, %v26
+; CHECK-DAG: vlgvh {{%r[0-5]}}, [[REG]], 6
+; CHECK-DAG: vlgvh {{%r[0-5]}}, %v28, 3
+; CHECK: br %r14
+ %add = add <4 x i32> %v1, %v2
+ %splat = shufflevector <2 x i64> %v3, <2 x i64> undef,
+ <2 x i32> <i32 0, i32 0>
+ %splatcast = bitcast <2 x i64> %splat to <4 x i32>
+ %shuffle = shufflevector <4 x i32> %add, <4 x i32> %splatcast,
+ <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ %bitcast = bitcast <4 x i32> %shuffle to <8 x i16>
+ %elem1 = extractelement <8 x i16> %bitcast, i32 2
+ %elem2 = extractelement <8 x i16> %bitcast, i32 7
+ %res = add i16 %elem1, %elem2
+ ret i16 %res
+}
+
+; ...and again with a merge low instead of a pack.
+define i16 @f4(<4 x i32> %v1, <4 x i32> %v2, <2 x i64> %v3) {
+; CHECK-LABEL: f4:
+; CHECK-NOT: vrepg
+; CHECK-NOT: vmr
+; CHECK-DAG: vaf [[REG:%v[0-9]+]], %v24, %v26
+; CHECK-DAG: vlgvh {{%r[0-5]}}, [[REG]], 6
+; CHECK-DAG: vlgvh {{%r[0-5]}}, %v28, 3
+; CHECK: br %r14
+ %add = add <4 x i32> %v1, %v2
+ %splat = shufflevector <2 x i64> %v3, <2 x i64> undef,
+ <2 x i32> <i32 0, i32 0>
+ %splatcast = bitcast <2 x i64> %splat to <4 x i32>
+ %shuffle = shufflevector <4 x i32> %add, <4 x i32> %splatcast,
+ <4 x i32> <i32 2, i32 6, i32 3, i32 7>
+ %bitcast = bitcast <4 x i32> %shuffle to <8 x i16>
+ %elem1 = extractelement <8 x i16> %bitcast, i32 4
+ %elem2 = extractelement <8 x i16> %bitcast, i32 7
+ %res = add i16 %elem1, %elem2
+ ret i16 %res
+}
+
+; ...and again with a merge high.
+define i16 @f5(<4 x i32> %v1, <4 x i32> %v2, <2 x i64> %v3) {
+; CHECK-LABEL: f5:
+; CHECK-NOT: vrepg
+; CHECK-NOT: vmr
+; CHECK-DAG: vaf [[REG:%v[0-9]+]], %v24, %v26
+; CHECK-DAG: vlgvh {{%r[0-5]}}, [[REG]], 2
+; CHECK-DAG: vlgvh {{%r[0-5]}}, %v28, 3
+; CHECK: br %r14
+ %add = add <4 x i32> %v1, %v2
+ %splat = shufflevector <2 x i64> %v3, <2 x i64> undef,
+ <2 x i32> <i32 0, i32 0>
+ %splatcast = bitcast <2 x i64> %splat to <4 x i32>
+ %shuffle = shufflevector <4 x i32> %add, <4 x i32> %splatcast,
+ <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ %bitcast = bitcast <4 x i32> %shuffle to <8 x i16>
+ %elem1 = extractelement <8 x i16> %bitcast, i32 4
+ %elem2 = extractelement <8 x i16> %bitcast, i32 7
+ %res = add i16 %elem1, %elem2
+ ret i16 %res
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-const-01.ll b/llvm/test/CodeGen/SystemZ/vec-const-01.ll
new file mode 100644
index 00000000000..f173b92b015
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-const-01.ll
@@ -0,0 +1,55 @@
+; Test vector byte masks, v16i8 version.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test an all-zeros vector.
+define <16 x i8> @f1() {
+; CHECK-LABEL: f1:
+; CHECK: vgbm %v24, 0
+; CHECK: br %r14
+ ret <16 x i8> zeroinitializer
+}
+
+; Test an all-ones vector.
+define <16 x i8> @f2() {
+; CHECK-LABEL: f2:
+; CHECK: vgbm %v24, 65535
+; CHECK: br %r14
+ ret <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1>
+}
+
+; Test a mixed vector (mask 0x8c75).
+define <16 x i8> @f3() {
+; CHECK-LABEL: f3:
+; CHECK: vgbm %v24, 35957
+; CHECK: br %r14
+ ret <16 x i8> <i8 -1, i8 0, i8 0, i8 0,
+ i8 -1, i8 -1, i8 0, i8 0,
+ i8 0, i8 -1, i8 -1, i8 -1,
+ i8 0, i8 -1, i8 0, i8 -1>
+}
+
+; Test that undefs are treated as zero.
+define <16 x i8> @f4() {
+; CHECK-LABEL: f4:
+; CHECK: vgbm %v24, 35957
+; CHECK: br %r14
+ ret <16 x i8> <i8 -1, i8 undef, i8 undef, i8 undef,
+ i8 -1, i8 -1, i8 undef, i8 undef,
+ i8 undef, i8 -1, i8 -1, i8 -1,
+ i8 undef, i8 -1, i8 undef, i8 -1>
+}
+
+; Test that we don't use VGBM if one of the bytes is not 0 or 0xff.
+define <16 x i8> @f5() {
+; CHECK-LABEL: f5:
+; CHECK-NOT: vgbm
+; CHECK: br %r14
+ ret <16 x i8> <i8 -1, i8 0, i8 0, i8 0,
+ i8 -1, i8 -1, i8 0, i8 1,
+ i8 0, i8 -1, i8 -1, i8 -1,
+ i8 0, i8 -1, i8 0, i8 -1>
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-const-02.ll b/llvm/test/CodeGen/SystemZ/vec-const-02.ll
new file mode 100644
index 00000000000..541cbb9faca
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-const-02.ll
@@ -0,0 +1,47 @@
+; Test vector byte masks, v8i16 version.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test an all-zeros vector.
+define <8 x i16> @f1() {
+; CHECK-LABEL: f1:
+; CHECK: vgbm %v24, 0
+; CHECK: br %r14
+ ret <8 x i16> zeroinitializer
+}
+
+; Test an all-ones vector.
+define <8 x i16> @f2() {
+; CHECK-LABEL: f2:
+; CHECK: vgbm %v24, 65535
+; CHECK: br %r14
+ ret <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -1,
+ i16 -1, i16 -1, i16 -1, i16 -1>
+}
+
+; Test a mixed vector (mask 0x8c76).
+define <8 x i16> @f3() {
+; CHECK-LABEL: f3:
+; CHECK: vgbm %v24, 35958
+; CHECK: br %r14
+ ret <8 x i16> <i16 65280, i16 0, i16 65535, i16 0,
+ i16 255, i16 65535, i16 255, i16 65280>
+}
+
+; Test that undefs are treated as zero.
+define <8 x i16> @f4() {
+; CHECK-LABEL: f4:
+; CHECK: vgbm %v24, 35958
+; CHECK: br %r14
+ ret <8 x i16> <i16 65280, i16 undef, i16 65535, i16 undef,
+ i16 255, i16 65535, i16 255, i16 65280>
+}
+
+; Test that we don't use VGBM if one of the bytes is not 0 or 0xff.
+define <8 x i16> @f5() {
+; CHECK-LABEL: f5:
+; CHECK-NOT: vgbm
+; CHECK: br %r14
+ ret <8 x i16> <i16 65280, i16 0, i16 65535, i16 0,
+ i16 255, i16 65535, i16 256, i16 65280>
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-const-03.ll b/llvm/test/CodeGen/SystemZ/vec-const-03.ll
new file mode 100644
index 00000000000..45ed83866d5
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-const-03.ll
@@ -0,0 +1,43 @@
+; Test vector byte masks, v4i32 version.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test an all-zeros vector.
+define <4 x i32> @f1() {
+; CHECK-LABEL: f1:
+; CHECK: vgbm %v24, 0
+; CHECK: br %r14
+ ret <4 x i32> zeroinitializer
+}
+
+; Test an all-ones vector.
+define <4 x i32> @f2() {
+; CHECK-LABEL: f2:
+; CHECK: vgbm %v24, 65535
+; CHECK: br %r14
+ ret <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>
+}
+
+; Test a mixed vector (mask 0x8c76).
+define <4 x i32> @f3() {
+; CHECK-LABEL: f3:
+; CHECK: vgbm %v24, 35958
+; CHECK: br %r14
+ ret <4 x i32> <i32 4278190080, i32 4294901760, i32 16777215, i32 16776960>
+}
+
+; Test that undefs are treated as zero (mask 0x8076).
+define <4 x i32> @f4() {
+; CHECK-LABEL: f4:
+; CHECK: vgbm %v24, 32886
+; CHECK: br %r14
+ ret <4 x i32> <i32 4278190080, i32 undef, i32 16777215, i32 16776960>
+}
+
+; Test that we don't use VGBM if one of the bytes is not 0 or 0xff.
+define <4 x i32> @f5() {
+; CHECK-LABEL: f5:
+; CHECK-NOT: vgbm
+; CHECK: br %r14
+ ret <4 x i32> <i32 4278190080, i32 1, i32 16777215, i32 16776960>
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-const-04.ll b/llvm/test/CodeGen/SystemZ/vec-const-04.ll
new file mode 100644
index 00000000000..1c2fb414d25
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-const-04.ll
@@ -0,0 +1,43 @@
+; Test vector byte masks, v2i64 version.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test an all-zeros vector.
+define <2 x i64> @f1() {
+; CHECK-LABEL: f1:
+; CHECK: vgbm %v24, 0
+; CHECK: br %r14
+ ret <2 x i64> zeroinitializer
+}
+
+; Test an all-ones vector.
+define <2 x i64> @f2() {
+; CHECK-LABEL: f2:
+; CHECK: vgbm %v24, 65535
+; CHECK: br %r14
+ ret <2 x i64> <i64 -1, i64 -1>
+}
+
+; Test a mixed vector (mask 0x8c76).
+define <2 x i64> @f3() {
+; CHECK-LABEL: f3:
+; CHECK: vgbm %v24, 35958
+; CHECK: br %r14
+ ret <2 x i64> <i64 18374686483966525440, i64 72057589759737600>
+}
+
+; Test that undefs are treated as zero (mask 0x8c00).
+define <2 x i64> @f4() {
+; CHECK-LABEL: f4:
+; CHECK: vgbm %v24, 35840
+; CHECK: br %r14
+ ret <2 x i64> <i64 18374686483966525440, i64 undef>
+}
+
+; Test that we don't use VGBM if one of the bytes is not 0 or 0xff.
+define <2 x i64> @f5() {
+; CHECK-LABEL: f5:
+; CHECK-NOT: vgbm
+; CHECK: br %r14
+ ret <2 x i64> <i64 18374686483966525441, i64 72057589759737600>
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-const-07.ll b/llvm/test/CodeGen/SystemZ/vec-const-07.ll
new file mode 100644
index 00000000000..6fcf95b6921
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-const-07.ll
@@ -0,0 +1,229 @@
+; Test vector replicates, v16i8 version.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test a byte-granularity replicate with the lowest useful value.
+define <16 x i8> @f1() {
+; CHECK-LABEL: f1:
+; CHECK: vrepib %v24, 1
+; CHECK: br %r14
+ ret <16 x i8> <i8 1, i8 1, i8 1, i8 1,
+ i8 1, i8 1, i8 1, i8 1,
+ i8 1, i8 1, i8 1, i8 1,
+ i8 1, i8 1, i8 1, i8 1>
+}
+
+; Test a byte-granularity replicate with an arbitrary value.
+define <16 x i8> @f2() {
+; CHECK-LABEL: f2:
+; CHECK: vrepib %v24, -55
+; CHECK: br %r14
+ ret <16 x i8> <i8 201, i8 201, i8 201, i8 201,
+ i8 201, i8 201, i8 201, i8 201,
+ i8 201, i8 201, i8 201, i8 201,
+ i8 201, i8 201, i8 201, i8 201>
+}
+
+; Test a byte-granularity replicate with the highest useful value.
+define <16 x i8> @f3() {
+; CHECK-LABEL: f3:
+; CHECK: vrepib %v24, -2
+; CHECK: br %r14
+ ret <16 x i8> <i8 254, i8 254, i8 254, i8 254,
+ i8 254, i8 254, i8 254, i8 254,
+ i8 254, i8 254, i8 254, i8 254,
+ i8 254, i8 254, i8 254, i8 254>
+}
+
+; Test a halfword-granularity replicate with the lowest useful value.
+define <16 x i8> @f4() {
+; CHECK-LABEL: f4:
+; CHECK: vrepih %v24, 1
+; CHECK: br %r14
+ ret <16 x i8> <i8 0, i8 1, i8 0, i8 1,
+ i8 0, i8 1, i8 0, i8 1,
+ i8 0, i8 1, i8 0, i8 1,
+ i8 0, i8 1, i8 0, i8 1>
+}
+
+; Test a halfword-granularity replicate with an arbitrary value.
+define <16 x i8> @f5() {
+; CHECK-LABEL: f5:
+; CHECK: vrepih %v24, 25650
+; CHECK: br %r14
+ ret <16 x i8> <i8 100, i8 50, i8 100, i8 50,
+ i8 100, i8 50, i8 100, i8 50,
+ i8 100, i8 50, i8 100, i8 50,
+ i8 100, i8 50, i8 100, i8 50>
+}
+
+; Test a halfword-granularity replicate with the highest useful value.
+define <16 x i8> @f6() {
+; CHECK-LABEL: f6:
+; CHECK: vrepih %v24, -2
+; CHECK: br %r14
+ ret <16 x i8> <i8 255, i8 254, i8 255, i8 254,
+ i8 255, i8 254, i8 255, i8 254,
+ i8 255, i8 254, i8 255, i8 254,
+ i8 255, i8 254, i8 255, i8 254>
+}
+
+; Test a word-granularity replicate with the lowest useful positive value.
+define <16 x i8> @f7() {
+; CHECK-LABEL: f7:
+; CHECK: vrepif %v24, 1
+; CHECK: br %r14
+ ret <16 x i8> <i8 0, i8 0, i8 0, i8 1,
+ i8 0, i8 0, i8 0, i8 1,
+ i8 0, i8 0, i8 0, i8 1,
+ i8 0, i8 0, i8 0, i8 1>
+}
+
+; Test a word-granularity replicate with the highest in-range value.
+define <16 x i8> @f8() {
+; CHECK-LABEL: f8:
+; CHECK: vrepif %v24, 32767
+; CHECK: br %r14
+ ret <16 x i8> <i8 0, i8 0, i8 127, i8 255,
+ i8 0, i8 0, i8 127, i8 255,
+ i8 0, i8 0, i8 127, i8 255,
+ i8 0, i8 0, i8 127, i8 255>
+}
+
+; Test a word-granularity replicate with the next highest value.
+; This cannot use VREPIF.
+define <16 x i8> @f9() {
+; CHECK-LABEL: f9:
+; CHECK-NOT: vrepif
+; CHECK: br %r14
+ ret <16 x i8> <i8 0, i8 0, i8 128, i8 0,
+ i8 0, i8 0, i8 128, i8 0,
+ i8 0, i8 0, i8 128, i8 0,
+ i8 0, i8 0, i8 128, i8 0>
+}
+
+; Test a word-granularity replicate with the lowest in-range value.
+define <16 x i8> @f10() {
+; CHECK-LABEL: f10:
+; CHECK: vrepif %v24, -32768
+; CHECK: br %r14
+ ret <16 x i8> <i8 255, i8 255, i8 128, i8 0,
+ i8 255, i8 255, i8 128, i8 0,
+ i8 255, i8 255, i8 128, i8 0,
+ i8 255, i8 255, i8 128, i8 0>
+}
+
+; Test a word-granularity replicate with the next lowest value.
+; This cannot use VREPIF.
+define <16 x i8> @f11() {
+; CHECK-LABEL: f11:
+; CHECK-NOT: vrepif
+; CHECK: br %r14
+ ret <16 x i8> <i8 255, i8 255, i8 127, i8 255,
+ i8 255, i8 255, i8 127, i8 255,
+ i8 255, i8 255, i8 127, i8 255,
+ i8 255, i8 255, i8 127, i8 255>
+}
+
+; Test a word-granularity replicate with the highest useful negative value.
+define <16 x i8> @f12() {
+; CHECK-LABEL: f12:
+; CHECK: vrepif %v24, -2
+; CHECK: br %r14
+ ret <16 x i8> <i8 255, i8 255, i8 255, i8 254,
+ i8 255, i8 255, i8 255, i8 254,
+ i8 255, i8 255, i8 255, i8 254,
+ i8 255, i8 255, i8 255, i8 254>
+}
+
+; Test a doubleword-granularity replicate with the lowest useful positive
+; value.
+define <16 x i8> @f13() {
+; CHECK-LABEL: f13:
+; CHECK: vrepig %v24, 1
+; CHECK: br %r14
+ ret <16 x i8> <i8 0, i8 0, i8 0, i8 0,
+ i8 0, i8 0, i8 0, i8 1,
+ i8 0, i8 0, i8 0, i8 0,
+ i8 0, i8 0, i8 0, i8 1>
+}
+
+; Test a doubleword-granularity replicate with the highest in-range value.
+define <16 x i8> @f14() {
+; CHECK-LABEL: f14:
+; CHECK: vrepig %v24, 32767
+; CHECK: br %r14
+ ret <16 x i8> <i8 0, i8 0, i8 0, i8 0,
+ i8 0, i8 0, i8 127, i8 255,
+ i8 0, i8 0, i8 0, i8 0,
+ i8 0, i8 0, i8 127, i8 255>
+}
+
+; Test a doubleword-granularity replicate with the next highest value.
+; This cannot use VREPIG.
+define <16 x i8> @f15() {
+; CHECK-LABEL: f15:
+; CHECK-NOT: vrepig
+; CHECK: br %r14
+ ret <16 x i8> <i8 0, i8 0, i8 0, i8 0,
+ i8 0, i8 0, i8 128, i8 0,
+ i8 0, i8 0, i8 0, i8 0,
+ i8 0, i8 0, i8 128, i8 0>
+}
+
+; Test a doubleword-granularity replicate with the lowest in-range value.
+define <16 x i8> @f16() {
+; CHECK-LABEL: f16:
+; CHECK: vrepig %v24, -32768
+; CHECK: br %r14
+ ret <16 x i8> <i8 255, i8 255, i8 255, i8 255,
+ i8 255, i8 255, i8 128, i8 0,
+ i8 255, i8 255, i8 255, i8 255,
+ i8 255, i8 255, i8 128, i8 0>
+}
+
+; Test a doubleword-granularity replicate with the next lowest value.
+; This cannot use VREPIG.
+define <16 x i8> @f17() {
+; CHECK-LABEL: f17:
+; CHECK-NOT: vrepig
+; CHECK: br %r14
+ ret <16 x i8> <i8 255, i8 255, i8 255, i8 255,
+ i8 255, i8 255, i8 127, i8 255,
+ i8 255, i8 255, i8 255, i8 255,
+ i8 255, i8 255, i8 127, i8 255>
+}
+
+; Test a doubleword-granularity replicate with the highest useful negative
+; value.
+define <16 x i8> @f18() {
+; CHECK-LABEL: f18:
+; CHECK: vrepig %v24, -2
+; CHECK: br %r14
+ ret <16 x i8> <i8 255, i8 255, i8 255, i8 255,
+ i8 255, i8 255, i8 255, i8 254,
+ i8 255, i8 255, i8 255, i8 255,
+ i8 255, i8 255, i8 255, i8 254>
+}
+
+; Repeat f14 with undefs optimistically treated as 0.
+define <16 x i8> @f19() {
+; CHECK-LABEL: f19:
+; CHECK: vrepig %v24, 32767
+; CHECK: br %r14
+ ret <16 x i8> <i8 0, i8 undef, i8 0, i8 0,
+ i8 0, i8 0, i8 127, i8 255,
+ i8 undef, i8 0, i8 undef, i8 0,
+ i8 0, i8 0, i8 127, i8 255>
+}
+
+; Repeat f18 with undefs optimistically treated as -1.
+define <16 x i8> @f20() {
+; CHECK-LABEL: f20:
+; CHECK: vrepig %v24, -2
+; CHECK: br %r14
+ ret <16 x i8> <i8 undef, i8 255, i8 255, i8 255,
+ i8 255, i8 255, i8 undef, i8 254,
+ i8 255, i8 255, i8 255, i8 undef,
+ i8 255, i8 undef, i8 255, i8 254>
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-const-08.ll b/llvm/test/CodeGen/SystemZ/vec-const-08.ll
new file mode 100644
index 00000000000..5ab6947e548
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-const-08.ll
@@ -0,0 +1,189 @@
+; Test vector replicates, v8i16 version.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test a byte-granularity replicate with the lowest useful value.
+define <8 x i16> @f1() {
+; CHECK-LABEL: f1:
+; CHECK: vrepib %v24, 1
+; CHECK: br %r14
+ ret <8 x i16> <i16 257, i16 257, i16 257, i16 257,
+ i16 257, i16 257, i16 257, i16 257>
+}
+
+; Test a byte-granularity replicate with an arbitrary value.
+define <8 x i16> @f2() {
+; CHECK-LABEL: f2:
+; CHECK: vrepib %v24, -55
+; CHECK: br %r14
+ ret <8 x i16> <i16 51657, i16 51657, i16 51657, i16 51657,
+ i16 51657, i16 51657, i16 51657, i16 51657>
+}
+
+; Test a byte-granularity replicate with the highest useful value.
+define <8 x i16> @f3() {
+; CHECK-LABEL: f3:
+; CHECK: vrepib %v24, -2
+; CHECK: br %r14
+ ret <8 x i16> <i16 -258, i16 -258, i16 -258, i16 -258,
+ i16 -258, i16 -258, i16 -258, i16 -258>
+}
+
+; Test a halfword-granularity replicate with the lowest useful value.
+define <8 x i16> @f4() {
+; CHECK-LABEL: f4:
+; CHECK: vrepih %v24, 1
+; CHECK: br %r14
+ ret <8 x i16> <i16 1, i16 1, i16 1, i16 1,
+ i16 1, i16 1, i16 1, i16 1>
+}
+
+; Test a halfword-granularity replicate with an arbitrary value.
+define <8 x i16> @f5() {
+; CHECK-LABEL: f5:
+; CHECK: vrepih %v24, 25650
+; CHECK: br %r14
+ ret <8 x i16> <i16 25650, i16 25650, i16 25650, i16 25650,
+ i16 25650, i16 25650, i16 25650, i16 25650>
+}
+
+; Test a halfword-granularity replicate with the highest useful value.
+define <8 x i16> @f6() {
+; CHECK-LABEL: f6:
+; CHECK: vrepih %v24, -2
+; CHECK: br %r14
+ ret <8 x i16> <i16 65534, i16 65534, i16 65534, i16 65534,
+ i16 65534, i16 65534, i16 65534, i16 65534>
+}
+
+; Test a word-granularity replicate with the lowest useful positive value.
+define <8 x i16> @f7() {
+; CHECK-LABEL: f7:
+; CHECK: vrepif %v24, 1
+; CHECK: br %r14
+ ret <8 x i16> <i16 0, i16 1, i16 0, i16 1,
+ i16 0, i16 1, i16 0, i16 1>
+}
+
+; Test a word-granularity replicate with the highest in-range value.
+define <8 x i16> @f8() {
+; CHECK-LABEL: f8:
+; CHECK: vrepif %v24, 32767
+; CHECK: br %r14
+ ret <8 x i16> <i16 0, i16 32767, i16 0, i16 32767,
+ i16 0, i16 32767, i16 0, i16 32767>
+}
+
+; Test a word-granularity replicate with the next highest value.
+; This cannot use VREPIF.
+define <8 x i16> @f9() {
+; CHECK-LABEL: f9:
+; CHECK-NOT: vrepif
+; CHECK: br %r14
+ ret <8 x i16> <i16 0, i16 32768, i16 0, i16 32768,
+ i16 0, i16 32768, i16 0, i16 32768>
+}
+
+; Test a word-granularity replicate with the lowest in-range value.
+define <8 x i16> @f10() {
+; CHECK-LABEL: f10:
+; CHECK: vrepif %v24, -32768
+; CHECK: br %r14
+ ret <8 x i16> <i16 -1, i16 -32768, i16 -1, i16 -32768,
+ i16 -1, i16 -32768, i16 -1, i16 -32768>
+}
+
+; Test a word-granularity replicate with the next lowest value.
+; This cannot use VREPIF.
+define <8 x i16> @f11() {
+; CHECK-LABEL: f11:
+; CHECK-NOT: vrepif
+; CHECK: br %r14
+ ret <8 x i16> <i16 -1, i16 -32769, i16 -1, i16 -32769,
+ i16 -1, i16 -32769, i16 -1, i16 -32769>
+}
+
+; Test a word-granularity replicate with the highest useful negative value.
+define <8 x i16> @f12() {
+; CHECK-LABEL: f12:
+; CHECK: vrepif %v24, -2
+; CHECK: br %r14
+ ret <8 x i16> <i16 -1, i16 -2, i16 -1, i16 -2,
+ i16 -1, i16 -2, i16 -1, i16 -2>
+}
+
+; Test a doubleword-granularity replicate with the lowest useful positive
+; value.
+define <8 x i16> @f13() {
+; CHECK-LABEL: f13:
+; CHECK: vrepig %v24, 1
+; CHECK: br %r14
+ ret <8 x i16> <i16 0, i16 0, i16 0, i16 1,
+ i16 0, i16 0, i16 0, i16 1>
+}
+
+; Test a doubleword-granularity replicate with the highest in-range value.
+define <8 x i16> @f14() {
+; CHECK-LABEL: f14:
+; CHECK: vrepig %v24, 32767
+; CHECK: br %r14
+ ret <8 x i16> <i16 0, i16 0, i16 0, i16 32767,
+ i16 0, i16 0, i16 0, i16 32767>
+}
+
+; Test a doubleword-granularity replicate with the next highest value.
+; This cannot use VREPIG.
+define <8 x i16> @f15() {
+; CHECK-LABEL: f15:
+; CHECK-NOT: vrepig
+; CHECK: br %r14
+ ret <8 x i16> <i16 0, i16 0, i16 0, i16 32768,
+ i16 0, i16 0, i16 0, i16 32768>
+}
+
+; Test a doubleword-granularity replicate with the lowest in-range value.
+define <8 x i16> @f16() {
+; CHECK-LABEL: f16:
+; CHECK: vrepig %v24, -32768
+; CHECK: br %r14
+ ret <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -32768,
+ i16 -1, i16 -1, i16 -1, i16 -32768>
+}
+
+; Test a doubleword-granularity replicate with the next lowest value.
+; This cannot use VREPIG.
+define <8 x i16> @f17() {
+; CHECK-LABEL: f17:
+; CHECK-NOT: vrepig
+; CHECK: br %r14
+ ret <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -32769,
+ i16 -1, i16 -1, i16 -1, i16 -32769>
+}
+
+; Test a doubleword-granularity replicate with the highest useful negative
+; value.
+define <8 x i16> @f18() {
+; CHECK-LABEL: f18:
+; CHECK: vrepig %v24, -2
+; CHECK: br %r14
+ ret <8 x i16> <i16 -1, i16 -1, i16 -1, i16 -2,
+ i16 -1, i16 -1, i16 -1, i16 -2>
+}
+
+; Repeat f14 with undefs optimistically treated as 0.
+define <8 x i16> @f19() {
+; CHECK-LABEL: f19:
+; CHECK: vrepig %v24, 32767
+; CHECK: br %r14
+ ret <8 x i16> <i16 0, i16 undef, i16 0, i16 32767,
+ i16 undef, i16 0, i16 undef, i16 32767>
+}
+
+; Repeat f18 with undefs optimistically treated as -1.
+define <8 x i16> @f20() {
+; CHECK-LABEL: f20:
+; CHECK: vrepig %v24, -2
+; CHECK: br %r14
+ ret <8 x i16> <i16 -1, i16 -1, i16 undef, i16 -2,
+ i16 undef, i16 undef, i16 -1, i16 -2>
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-const-09.ll b/llvm/test/CodeGen/SystemZ/vec-const-09.ll
new file mode 100644
index 00000000000..2cbe9259452
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-const-09.ll
@@ -0,0 +1,169 @@
+; Test vector replicates, v4i32 version.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test a byte-granularity replicate with the lowest useful value.
+define <4 x i32> @f1() {
+; CHECK-LABEL: f1:
+; CHECK: vrepib %v24, 1
+; CHECK: br %r14
+ ret <4 x i32> <i32 16843009, i32 16843009, i32 16843009, i32 16843009>
+}
+
+; Test a byte-granularity replicate with an arbitrary value.
+define <4 x i32> @f2() {
+; CHECK-LABEL: f2:
+; CHECK: vrepib %v24, -55
+; CHECK: br %r14
+ ret <4 x i32> <i32 3385444809, i32 3385444809, i32 3385444809, i32 3385444809>
+}
+
+; Test a byte-granularity replicate with the highest useful value.
+define <4 x i32> @f3() {
+; CHECK-LABEL: f3:
+; CHECK: vrepib %v24, -2
+; CHECK: br %r14
+ ret <4 x i32> <i32 4278124286, i32 4278124286, i32 4278124286, i32 4278124286>
+}
+
+; Test a halfword-granularity replicate with the lowest useful value.
+define <4 x i32> @f4() {
+; CHECK-LABEL: f4:
+; CHECK: vrepih %v24, 1
+; CHECK: br %r14
+ ret <4 x i32> <i32 65537, i32 65537, i32 65537, i32 65537>
+}
+
+; Test a halfword-granularity replicate with an arbitrary value.
+define <4 x i32> @f5() {
+; CHECK-LABEL: f5:
+; CHECK: vrepih %v24, 25650
+; CHECK: br %r14
+ ret <4 x i32> <i32 1681024050, i32 1681024050, i32 1681024050, i32 1681024050>
+}
+
+; Test a halfword-granularity replicate with the highest useful value.
+define <4 x i32> @f6() {
+; CHECK-LABEL: f6:
+; CHECK: vrepih %v24, -2
+; CHECK: br %r14
+ ret <4 x i32> <i32 -65538, i32 -65538, i32 -65538, i32 -65538>
+}
+
+; Test a word-granularity replicate with the lowest useful positive value.
+define <4 x i32> @f7() {
+; CHECK-LABEL: f7:
+; CHECK: vrepif %v24, 1
+; CHECK: br %r14
+ ret <4 x i32> <i32 1, i32 1, i32 1, i32 1>
+}
+
+; Test a word-granularity replicate with the highest in-range value.
+define <4 x i32> @f8() {
+; CHECK-LABEL: f8:
+; CHECK: vrepif %v24, 32767
+; CHECK: br %r14
+ ret <4 x i32> <i32 32767, i32 32767, i32 32767, i32 32767>
+}
+
+; Test a word-granularity replicate with the next highest value.
+; This cannot use VREPIF.
+define <4 x i32> @f9() {
+; CHECK-LABEL: f9:
+; CHECK-NOT: vrepif
+; CHECK: br %r14
+ ret <4 x i32> <i32 32768, i32 32768, i32 32768, i32 32768>
+}
+
+; Test a word-granularity replicate with the lowest in-range value.
+define <4 x i32> @f10() {
+; CHECK-LABEL: f10:
+; CHECK: vrepif %v24, -32768
+; CHECK: br %r14
+ ret <4 x i32> <i32 -32768, i32 -32768, i32 -32768, i32 -32768>
+}
+
+; Test a word-granularity replicate with the next lowest value.
+; This cannot use VREPIF.
+define <4 x i32> @f11() {
+; CHECK-LABEL: f11:
+; CHECK-NOT: vrepif
+; CHECK: br %r14
+ ret <4 x i32> <i32 -32769, i32 -32769, i32 -32769, i32 -32769>
+}
+
+; Test a word-granularity replicate with the highest useful negative value.
+define <4 x i32> @f12() {
+; CHECK-LABEL: f12:
+; CHECK: vrepif %v24, -2
+; CHECK: br %r14
+ ret <4 x i32> <i32 -2, i32 -2, i32 -2, i32 -2>
+}
+
+; Test a doubleword-granularity replicate with the lowest useful positive
+; value.
+define <4 x i32> @f13() {
+; CHECK-LABEL: f13:
+; CHECK: vrepig %v24, 1
+; CHECK: br %r14
+ ret <4 x i32> <i32 0, i32 1, i32 0, i32 1>
+}
+
+; Test a doubleword-granularity replicate with the highest in-range value.
+define <4 x i32> @f14() {
+; CHECK-LABEL: f14:
+; CHECK: vrepig %v24, 32767
+; CHECK: br %r14
+ ret <4 x i32> <i32 0, i32 32767, i32 0, i32 32767>
+}
+
+; Test a doubleword-granularity replicate with the next highest value.
+; This cannot use VREPIG.
+define <4 x i32> @f15() {
+; CHECK-LABEL: f15:
+; CHECK-NOT: vrepig
+; CHECK: br %r14
+ ret <4 x i32> <i32 0, i32 32768, i32 0, i32 32768>
+}
+
+; Test a doubleword-granularity replicate with the lowest in-range value.
+define <4 x i32> @f16() {
+; CHECK-LABEL: f16:
+; CHECK: vrepig %v24, -32768
+; CHECK: br %r14
+ ret <4 x i32> <i32 -1, i32 -32768, i32 -1, i32 -32768>
+}
+
+; Test a doubleword-granularity replicate with the next lowest value.
+; This cannot use VREPIG.
+define <4 x i32> @f17() {
+; CHECK-LABEL: f17:
+; CHECK-NOT: vrepig
+; CHECK: br %r14
+ ret <4 x i32> <i32 -1, i32 -32769, i32 -1, i32 -32769>
+}
+
+; Test a doubleword-granularity replicate with the highest useful negative
+; value.
+define <4 x i32> @f18() {
+; CHECK-LABEL: f18:
+; CHECK: vrepig %v24, -2
+; CHECK: br %r14
+ ret <4 x i32> <i32 -1, i32 -2, i32 -1, i32 -2>
+}
+
+; Repeat f14 with undefs optimistically treated as 0, 32767.
+define <4 x i32> @f19() {
+; CHECK-LABEL: f19:
+; CHECK: vrepig %v24, 32767
+; CHECK: br %r14
+ ret <4 x i32> <i32 undef, i32 undef, i32 0, i32 32767>
+}
+
+; Repeat f18 with undefs optimistically treated as -2, -1.
+define <4 x i32> @f20() {
+; CHECK-LABEL: f20:
+; CHECK: vrepig %v24, -2
+; CHECK: br %r14
+ ret <4 x i32> <i32 -1, i32 undef, i32 undef, i32 -2>
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-const-10.ll b/llvm/test/CodeGen/SystemZ/vec-const-10.ll
new file mode 100644
index 00000000000..0613b69a277
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-const-10.ll
@@ -0,0 +1,169 @@
+; Test vector replicates, v2i64 version.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test a byte-granularity replicate with the lowest useful value.
+define <2 x i64> @f1() {
+; CHECK-LABEL: f1:
+; CHECK: vrepib %v24, 1
+; CHECK: br %r14
+ ret <2 x i64> <i64 72340172838076673, i64 72340172838076673>
+}
+
+; Test a byte-granularity replicate with an arbitrary value.
+define <2 x i64> @f2() {
+; CHECK-LABEL: f2:
+; CHECK: vrepib %v24, -55
+; CHECK: br %r14
+ ret <2 x i64> <i64 -3906369333256140343, i64 -3906369333256140343>
+}
+
+; Test a byte-granularity replicate with the highest useful value.
+define <2 x i64> @f3() {
+; CHECK-LABEL: f3:
+; CHECK: vrepib %v24, -2
+; CHECK: br %r14
+ ret <2 x i64> <i64 -72340172838076674, i64 -72340172838076674>
+}
+
+; Test a halfword-granularity replicate with the lowest useful value.
+define <2 x i64> @f4() {
+; CHECK-LABEL: f4:
+; CHECK: vrepih %v24, 1
+; CHECK: br %r14
+ ret <2 x i64> <i64 281479271743489, i64 281479271743489>
+}
+
+; Test a halfword-granularity replicate with an arbitrary value.
+define <2 x i64> @f5() {
+; CHECK-LABEL: f5:
+; CHECK: vrepih %v24, 25650
+; CHECK: br %r14
+ ret <2 x i64> <i64 7219943320220492850, i64 7219943320220492850>
+}
+
+; Test a halfword-granularity replicate with the highest useful value.
+define <2 x i64> @f6() {
+; CHECK-LABEL: f6:
+; CHECK: vrepih %v24, -2
+; CHECK: br %r14
+ ret <2 x i64> <i64 -281479271743490, i64 -281479271743490>
+}
+
+; Test a word-granularity replicate with the lowest useful positive value.
+define <2 x i64> @f7() {
+; CHECK-LABEL: f7:
+; CHECK: vrepif %v24, 1
+; CHECK: br %r14
+ ret <2 x i64> <i64 4294967297, i64 4294967297>
+}
+
+; Test a word-granularity replicate with the highest in-range value.
+define <2 x i64> @f8() {
+; CHECK-LABEL: f8:
+; CHECK: vrepif %v24, 32767
+; CHECK: br %r14
+ ret <2 x i64> <i64 140733193420799, i64 140733193420799>
+}
+
+; Test a word-granularity replicate with the next highest value.
+; This cannot use VREPIF.
+define <2 x i64> @f9() {
+; CHECK-LABEL: f9:
+; CHECK-NOT: vrepif
+; CHECK: br %r14
+ ret <2 x i64> <i64 140737488388096, i64 140737488388096>
+}
+
+; Test a word-granularity replicate with the lowest in-range value.
+define <2 x i64> @f10() {
+; CHECK-LABEL: f10:
+; CHECK: vrepif %v24, -32768
+; CHECK: br %r14
+ ret <2 x i64> <i64 -140733193420800, i64 -140733193420800>
+}
+
+; Test a word-granularity replicate with the next lowest value.
+; This cannot use VREPIF.
+define <2 x i64> @f11() {
+; CHECK-LABEL: f11:
+; CHECK-NOT: vrepif
+; CHECK: br %r14
+ ret <2 x i64> <i64 -140737488388097, i64 -140737488388097>
+}
+
+; Test a word-granularity replicate with the highest useful negative value.
+define <2 x i64> @f12() {
+; CHECK-LABEL: f12:
+; CHECK: vrepif %v24, -2
+; CHECK: br %r14
+ ret <2 x i64> <i64 -4294967298, i64 -4294967298>
+}
+
+; Test a doubleword-granularity replicate with the lowest useful positive
+; value.
+define <2 x i64> @f13() {
+; CHECK-LABEL: f13:
+; CHECK: vrepig %v24, 1
+; CHECK: br %r14
+ ret <2 x i64> <i64 1, i64 1>
+}
+
+; Test a doubleword-granularity replicate with the highest in-range value.
+define <2 x i64> @f14() {
+; CHECK-LABEL: f14:
+; CHECK: vrepig %v24, 32767
+; CHECK: br %r14
+ ret <2 x i64> <i64 32767, i64 32767>
+}
+
+; Test a doubleword-granularity replicate with the next highest value.
+; This cannot use VREPIG.
+define <2 x i64> @f15() {
+; CHECK-LABEL: f15:
+; CHECK-NOT: vrepig
+; CHECK: br %r14
+ ret <2 x i64> <i64 32768, i64 32768>
+}
+
+; Test a doubleword-granularity replicate with the lowest in-range value.
+define <2 x i64> @f16() {
+; CHECK-LABEL: f16:
+; CHECK: vrepig %v24, -32768
+; CHECK: br %r14
+ ret <2 x i64> <i64 -32768, i64 -32768>
+}
+
+; Test a doubleword-granularity replicate with the next lowest value.
+; This cannot use VREPIG.
+define <2 x i64> @f17() {
+; CHECK-LABEL: f17:
+; CHECK-NOT: vrepig
+; CHECK: br %r14
+ ret <2 x i64> <i64 -32769, i64 -32769>
+}
+
+; Test a doubleword-granularity replicate with the highest useful negative
+; value.
+define <2 x i64> @f18() {
+; CHECK-LABEL: f18:
+; CHECK: vrepig %v24, -2
+; CHECK: br %r14
+ ret <2 x i64> <i64 -2, i64 -2>
+}
+
+; Repeat f14 with undefs optimistically treated as 32767.
+define <2 x i64> @f19() {
+; CHECK-LABEL: f19:
+; CHECK: vrepig %v24, 32767
+; CHECK: br %r14
+ ret <2 x i64> <i64 undef, i64 32767>
+}
+
+; Repeat f18 with undefs optimistically treated as -2.
+define <2 x i64> @f20() {
+; CHECK-LABEL: f20:
+; CHECK: vrepig %v24, -2
+; CHECK: br %r14
+ ret <2 x i64> <i64 undef, i64 -2>
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-const-13.ll b/llvm/test/CodeGen/SystemZ/vec-const-13.ll
new file mode 100644
index 00000000000..2cc425252c2
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-const-13.ll
@@ -0,0 +1,193 @@
+; Test vector replicates that use VECTOR GENERATE MASK, v16i8 version.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test a word-granularity replicate with the lowest value that cannot use
+; VREPIF.
+define <16 x i8> @f1() {
+; CHECK-LABEL: f1:
+; CHECK: vgmf %v24, 16, 16
+; CHECK: br %r14
+ ret <16 x i8> <i8 0, i8 0, i8 128, i8 0,
+ i8 0, i8 0, i8 128, i8 0,
+ i8 0, i8 0, i8 128, i8 0,
+ i8 0, i8 0, i8 128, i8 0>
+}
+
+; Test a word-granularity replicate that has the lower 17 bits set.
+define <16 x i8> @f2() {
+; CHECK-LABEL: f2:
+; CHECK: vgmf %v24, 15, 31
+; CHECK: br %r14
+ ret <16 x i8> <i8 0, i8 1, i8 255, i8 255,
+ i8 0, i8 1, i8 255, i8 255,
+ i8 0, i8 1, i8 255, i8 255,
+ i8 0, i8 1, i8 255, i8 255>
+}
+
+; Test a word-granularity replicate that has the upper 15 bits set.
+define <16 x i8> @f3() {
+; CHECK-LABEL: f3:
+; CHECK: vgmf %v24, 0, 14
+; CHECK: br %r14
+ ret <16 x i8> <i8 255, i8 254, i8 0, i8 0,
+ i8 255, i8 254, i8 0, i8 0,
+ i8 255, i8 254, i8 0, i8 0,
+ i8 255, i8 254, i8 0, i8 0>
+}
+
+; Test a word-granularity replicate that has middle bits set.
+define <16 x i8> @f4() {
+; CHECK-LABEL: f4:
+; CHECK: vgmf %v24, 12, 17
+; CHECK: br %r14
+ ret <16 x i8> <i8 0, i8 15, i8 192, i8 0,
+ i8 0, i8 15, i8 192, i8 0,
+ i8 0, i8 15, i8 192, i8 0,
+ i8 0, i8 15, i8 192, i8 0>
+}
+
+; Test a word-granularity replicate with a wrap-around mask.
+define <16 x i8> @f5() {
+; CHECK-LABEL: f5:
+; CHECK: vgmf %v24, 17, 15
+; CHECK: br %r14
+ ret <16 x i8> <i8 255, i8 255, i8 127, i8 255,
+ i8 255, i8 255, i8 127, i8 255,
+ i8 255, i8 255, i8 127, i8 255,
+ i8 255, i8 255, i8 127, i8 255>
+}
+
+; Test a doubleword-granularity replicate with the lowest value that cannot
+; use VREPIG.
+define <16 x i8> @f6() {
+; CHECK-LABEL: f6:
+; CHECK: vgmg %v24, 48, 48
+; CHECK: br %r14
+ ret <16 x i8> <i8 0, i8 0, i8 0, i8 0,
+ i8 0, i8 0, i8 128, i8 0,
+ i8 0, i8 0, i8 0, i8 0,
+ i8 0, i8 0, i8 128, i8 0>
+}
+
+; Test a doubleword-granularity replicate that has the lower 22 bits set.
+define <16 x i8> @f7() {
+; CHECK-LABEL: f7:
+; CHECK: vgmg %v24, 42, 63
+; CHECK: br %r14
+ ret <16 x i8> <i8 0, i8 0, i8 0, i8 0,
+ i8 0, i8 63, i8 255, i8 255,
+ i8 0, i8 0, i8 0, i8 0,
+ i8 0, i8 63, i8 255, i8 255>
+}
+
+; Test a doubleword-granularity replicate that has the upper 45 bits set.
+define <16 x i8> @f8() {
+; CHECK-LABEL: f8:
+; CHECK: vgmg %v24, 0, 44
+; CHECK: br %r14
+ ret <16 x i8> <i8 255, i8 255, i8 255, i8 255,
+ i8 255, i8 248, i8 0, i8 0,
+ i8 255, i8 255, i8 255, i8 255,
+ i8 255, i8 248, i8 0, i8 0>
+}
+
+; Test a doubleword-granularity replicate that has middle bits set.
+define <16 x i8> @f9() {
+; CHECK-LABEL: f9:
+; CHECK: vgmg %v24, 31, 42
+; CHECK: br %r14
+ ret <16 x i8> <i8 0, i8 0, i8 0, i8 1,
+ i8 255, i8 224, i8 0, i8 0,
+ i8 0, i8 0, i8 0, i8 1,
+ i8 255, i8 224, i8 0, i8 0>
+}
+
+; Test a doubleword-granularity replicate with a wrap-around mask.
+define <16 x i8> @f10() {
+; CHECK-LABEL: f10:
+; CHECK: vgmg %v24, 18, 0
+; CHECK: br %r14
+ ret <16 x i8> <i8 128, i8 0, i8 63, i8 255,
+ i8 255, i8 255, i8 255, i8 255,
+ i8 128, i8 0, i8 63, i8 255,
+ i8 255, i8 255, i8 255, i8 255>
+}
+
+; Retest f1 with arbitrary undefs instead of 0s.
+define <16 x i8> @f11() {
+; CHECK-LABEL: f11:
+; CHECK: vgmf %v24, 16, 16
+; CHECK: br %r14
+ ret <16 x i8> <i8 0, i8 undef, i8 128, i8 0,
+ i8 0, i8 0, i8 128, i8 undef,
+ i8 undef, i8 0, i8 128, i8 0,
+ i8 undef, i8 undef, i8 128, i8 0>
+}
+
+; Try a case where we want consistent undefs to be treated as 0.
+define <16 x i8> @f12() {
+; CHECK-LABEL: f12:
+; CHECK: vgmf %v24, 15, 23
+; CHECK: br %r14
+ ret <16 x i8> <i8 undef, i8 1, i8 255, i8 0,
+ i8 undef, i8 1, i8 255, i8 0,
+ i8 undef, i8 1, i8 255, i8 0,
+ i8 undef, i8 1, i8 255, i8 0>
+}
+
+; ...and again with the lower bits of the replicated constant.
+define <16 x i8> @f13() {
+; CHECK-LABEL: f13:
+; CHECK: vgmf %v24, 15, 22
+; CHECK: br %r14
+ ret <16 x i8> <i8 0, i8 1, i8 254, i8 undef,
+ i8 0, i8 1, i8 254, i8 undef,
+ i8 0, i8 1, i8 254, i8 undef,
+ i8 0, i8 1, i8 254, i8 undef>
+}
+
+; Try a case where we want consistent undefs to be treated as -1.
+define <16 x i8> @f14() {
+; CHECK-LABEL: f14:
+; CHECK: vgmf %v24, 28, 8
+; CHECK: br %r14
+ ret <16 x i8> <i8 undef, i8 128, i8 0, i8 15,
+ i8 undef, i8 128, i8 0, i8 15,
+ i8 undef, i8 128, i8 0, i8 15,
+ i8 undef, i8 128, i8 0, i8 15>
+}
+
+; ...and again with the lower bits of the replicated constant.
+define <16 x i8> @f15() {
+; CHECK-LABEL: f15:
+; CHECK: vgmf %v24, 18, 3
+; CHECK: br %r14
+ ret <16 x i8> <i8 240, i8 0, i8 63, i8 undef,
+ i8 240, i8 0, i8 63, i8 undef,
+ i8 240, i8 0, i8 63, i8 undef,
+ i8 240, i8 0, i8 63, i8 undef>
+}
+
+; Repeat f9 with arbitrary undefs.
+define <16 x i8> @f16() {
+; CHECK-LABEL: f16:
+; CHECK: vgmg %v24, 31, 42
+; CHECK: br %r14
+ ret <16 x i8> <i8 undef, i8 0, i8 undef, i8 1,
+ i8 255, i8 undef, i8 0, i8 0,
+ i8 0, i8 0, i8 0, i8 1,
+ i8 undef, i8 224, i8 undef, i8 undef>
+}
+
+; Try a case where we want some consistent undefs to be treated as 0
+; and some to be treated as 255.
+define <16 x i8> @f17() {
+; CHECK-LABEL: f17:
+; CHECK: vgmg %v24, 23, 35
+; CHECK: br %r14
+ ret <16 x i8> <i8 0, i8 undef, i8 1, i8 undef,
+ i8 240, i8 undef, i8 0, i8 0,
+ i8 0, i8 undef, i8 1, i8 undef,
+ i8 240, i8 undef, i8 0, i8 0>
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-const-14.ll b/llvm/test/CodeGen/SystemZ/vec-const-14.ll
new file mode 100644
index 00000000000..0e3f124dbf6
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-const-14.ll
@@ -0,0 +1,113 @@
+; Test vector replicates that use VECTOR GENERATE MASK, v8i16 version.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test a word-granularity replicate with the lowest value that cannot use
+; VREPIF.
+define <8 x i16> @f1() {
+; CHECK-LABEL: f1:
+; CHECK: vgmf %v24, 16, 16
+; CHECK: br %r14
+ ret <8 x i16> <i16 0, i16 32768, i16 0, i16 32768,
+ i16 0, i16 32768, i16 0, i16 32768>
+}
+
+; Test a word-granularity replicate that has the lower 17 bits set.
+define <8 x i16> @f2() {
+; CHECK-LABEL: f2:
+; CHECK: vgmf %v24, 15, 31
+; CHECK: br %r14
+ ret <8 x i16> <i16 1, i16 -1, i16 1, i16 -1,
+ i16 1, i16 -1, i16 1, i16 -1>
+}
+
+; Test a word-granularity replicate that has the upper 15 bits set.
+define <8 x i16> @f3() {
+; CHECK-LABEL: f3:
+; CHECK: vgmf %v24, 0, 14
+; CHECK: br %r14
+ ret <8 x i16> <i16 -2, i16 0, i16 -2, i16 0,
+ i16 -2, i16 0, i16 -2, i16 0>
+}
+
+; Test a word-granularity replicate that has middle bits set.
+define <8 x i16> @f4() {
+; CHECK-LABEL: f4:
+; CHECK: vgmf %v24, 12, 17
+; CHECK: br %r14
+ ret <8 x i16> <i16 15, i16 49152, i16 15, i16 49152,
+ i16 15, i16 49152, i16 15, i16 49152>
+}
+
+; Test a word-granularity replicate with a wrap-around mask.
+define <8 x i16> @f5() {
+; CHECK-LABEL: f5:
+; CHECK: vgmf %v24, 17, 15
+; CHECK: br %r14
+ ret <8 x i16> <i16 -1, i16 32767, i16 -1, i16 32767,
+ i16 -1, i16 32767, i16 -1, i16 32767>
+}
+
+; Test a doubleword-granularity replicate with the lowest value that cannot
+; use VREPIG.
+define <8 x i16> @f6() {
+; CHECK-LABEL: f6:
+; CHECK: vgmg %v24, 48, 48
+; CHECK: br %r14
+ ret <8 x i16> <i16 0, i16 0, i16 0, i16 32768,
+ i16 0, i16 0, i16 0, i16 32768>
+}
+
+; Test a doubleword-granularity replicate that has the lower 22 bits set.
+define <8 x i16> @f7() {
+; CHECK-LABEL: f7:
+; CHECK: vgmg %v24, 42, 63
+; CHECK: br %r14
+ ret <8 x i16> <i16 0, i16 0, i16 63, i16 -1,
+ i16 0, i16 0, i16 63, i16 -1>
+}
+
+; Test a doubleword-granularity replicate that has the upper 45 bits set.
+define <8 x i16> @f8() {
+; CHECK-LABEL: f8:
+; CHECK: vgmg %v24, 0, 44
+; CHECK: br %r14
+ ret <8 x i16> <i16 -1, i16 -1, i16 -8, i16 0,
+ i16 -1, i16 -1, i16 -8, i16 0>
+}
+
+; Test a doubleword-granularity replicate that has middle bits set.
+define <8 x i16> @f9() {
+; CHECK-LABEL: f9:
+; CHECK: vgmg %v24, 31, 42
+; CHECK: br %r14
+ ret <8 x i16> <i16 0, i16 1, i16 -32, i16 0,
+ i16 0, i16 1, i16 -32, i16 0>
+}
+
+; Test a doubleword-granularity replicate with a wrap-around mask.
+define <8 x i16> @f10() {
+; CHECK-LABEL: f10:
+; CHECK: vgmg %v24, 18, 0
+; CHECK: br %r14
+ ret <8 x i16> <i16 32768, i16 16383, i16 -1, i16 -1,
+ i16 32768, i16 16383, i16 -1, i16 -1>
+}
+
+; Retest f1 with arbitrary undefs instead of 0s.
+define <8 x i16> @f11() {
+; CHECK-LABEL: f11:
+; CHECK: vgmf %v24, 16, 16
+; CHECK: br %r14
+ ret <8 x i16> <i16 undef, i16 32768, i16 0, i16 32768,
+ i16 0, i16 32768, i16 undef, i16 32768>
+}
+
+; ...likewise f9.
+define <8 x i16> @f12() {
+; CHECK-LABEL: f12:
+; CHECK: vgmg %v24, 31, 42
+; CHECK: br %r14
+ ret <8 x i16> <i16 undef, i16 1, i16 -32, i16 0,
+ i16 0, i16 1, i16 -32, i16 undef>
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-const-15.ll b/llvm/test/CodeGen/SystemZ/vec-const-15.ll
new file mode 100644
index 00000000000..cec445efe89
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-const-15.ll
@@ -0,0 +1,85 @@
+; Test vector replicates that use VECTOR GENERATE MASK, v4i32 version.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test a word-granularity replicate with the lowest value that cannot use
+; VREPIF.
+define <4 x i32> @f1() {
+; CHECK-LABEL: f1:
+; CHECK: vgmf %v24, 16, 16
+; CHECK: br %r14
+ ret <4 x i32> <i32 32768, i32 32768, i32 32768, i32 32768>
+}
+
+; Test a word-granularity replicate that has the lower 17 bits set.
+define <4 x i32> @f2() {
+; CHECK-LABEL: f2:
+; CHECK: vgmf %v24, 15, 31
+; CHECK: br %r14
+ ret <4 x i32> <i32 131071, i32 131071, i32 131071, i32 131071>
+}
+
+; Test a word-granularity replicate that has the upper 15 bits set.
+define <4 x i32> @f3() {
+; CHECK-LABEL: f3:
+; CHECK: vgmf %v24, 0, 14
+; CHECK: br %r14
+ ret <4 x i32> <i32 -131072, i32 -131072, i32 -131072, i32 -131072>
+}
+
+; Test a word-granularity replicate that has middle bits set.
+define <4 x i32> @f4() {
+; CHECK-LABEL: f4:
+; CHECK: vgmf %v24, 12, 17
+; CHECK: br %r14
+ ret <4 x i32> <i32 1032192, i32 1032192, i32 1032192, i32 1032192>
+}
+
+; Test a word-granularity replicate with a wrap-around mask.
+define <4 x i32> @f5() {
+; CHECK-LABEL: f5:
+; CHECK: vgmf %v24, 17, 15
+; CHECK: br %r14
+ ret <4 x i32> <i32 -32769, i32 -32769, i32 -32769, i32 -32769>
+}
+
+; Test a doubleword-granularity replicate with the lowest value that cannot
+; use VREPIG.
+define <4 x i32> @f6() {
+; CHECK-LABEL: f6:
+; CHECK: vgmg %v24, 48, 48
+; CHECK: br %r14
+ ret <4 x i32> <i32 0, i32 32768, i32 0, i32 32768>
+}
+
+; Test a doubleword-granularity replicate that has the lower 22 bits set.
+define <4 x i32> @f7() {
+; CHECK-LABEL: f7:
+; CHECK: vgmg %v24, 42, 63
+; CHECK: br %r14
+ ret <4 x i32> <i32 0, i32 4194303, i32 0, i32 4194303>
+}
+
+; Test a doubleword-granularity replicate that has the upper 45 bits set.
+define <4 x i32> @f8() {
+; CHECK-LABEL: f8:
+; CHECK: vgmg %v24, 0, 44
+; CHECK: br %r14
+ ret <4 x i32> <i32 -1, i32 -524288, i32 -1, i32 -524288>
+}
+
+; Test a doubleword-granularity replicate that has middle bits set.
+define <4 x i32> @f9() {
+; CHECK-LABEL: f9:
+; CHECK: vgmg %v24, 31, 42
+; CHECK: br %r14
+ ret <4 x i32> <i32 1, i32 -2097152, i32 1, i32 -2097152>
+}
+
+; Test a doubleword-granularity replicate with a wrap-around mask.
+define <4 x i32> @f10() {
+; CHECK-LABEL: f10:
+; CHECK: vgmg %v24, 18, 0
+; CHECK: br %r14
+ ret <4 x i32> <i32 -2147467265, i32 -1, i32 -2147467265, i32 -1>
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-const-16.ll b/llvm/test/CodeGen/SystemZ/vec-const-16.ll
new file mode 100644
index 00000000000..1ab7de2761c
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-const-16.ll
@@ -0,0 +1,85 @@
+; Test vector replicates that use VECTOR GENERATE MASK, v2i64 version.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test a word-granularity replicate with the lowest value that cannot use
+; VREPIF.
+define <2 x i64> @f1() {
+; CHECK-LABEL: f1:
+; CHECK: vgmf %v24, 16, 16
+; CHECK: br %r14
+ ret <2 x i64> <i64 140737488388096, i64 140737488388096>
+}
+
+; Test a word-granularity replicate that has the lower 17 bits set.
+define <2 x i64> @f2() {
+; CHECK-LABEL: f2:
+; CHECK: vgmf %v24, 15, 31
+; CHECK: br %r14
+ ret <2 x i64> <i64 562945658585087, i64 562945658585087>
+}
+
+; Test a word-granularity replicate that has the upper 15 bits set.
+define <2 x i64> @f3() {
+; CHECK-LABEL: f3:
+; CHECK: vgmf %v24, 0, 14
+; CHECK: br %r14
+ ret <2 x i64> <i64 -562945658585088, i64 -562945658585088>
+}
+
+; Test a word-granularity replicate that has middle bits set.
+define <2 x i64> @f4() {
+; CHECK-LABEL: f4:
+; CHECK: vgmf %v24, 12, 17
+; CHECK: br %r14
+ ret <2 x i64> <i64 4433230884225024, i64 4433230884225024>
+}
+
+; Test a word-granularity replicate with a wrap-around mask.
+define <2 x i64> @f5() {
+; CHECK-LABEL: f5:
+; CHECK: vgmf %v24, 17, 15
+; CHECK: br %r14
+ ret <2 x i64> <i64 -140737488388097, i64 -140737488388097>
+}
+
+; Test a doubleword-granularity replicate with the lowest value that cannot
+; use VREPIG.
+define <2 x i64> @f6() {
+; CHECK-LABEL: f6:
+; CHECK: vgmg %v24, 48, 48
+; CHECK: br %r14
+ ret <2 x i64> <i64 32768, i64 32768>
+}
+
+; Test a doubleword-granularity replicate that has the lower 22 bits set.
+define <2 x i64> @f7() {
+; CHECK-LABEL: f7:
+; CHECK: vgmg %v24, 42, 63
+; CHECK: br %r14
+ ret <2 x i64> <i64 4194303, i64 4194303>
+}
+
+; Test a doubleword-granularity replicate that has the upper 45 bits set.
+define <2 x i64> @f8() {
+; CHECK-LABEL: f8:
+; CHECK: vgmg %v24, 0, 44
+; CHECK: br %r14
+ ret <2 x i64> <i64 -524288, i64 -524288>
+}
+
+; Test a doubleword-granularity replicate that has middle bits set.
+define <2 x i64> @f9() {
+; CHECK-LABEL: f9:
+; CHECK: vgmg %v24, 31, 42
+; CHECK: br %r14
+ ret <2 x i64> <i64 8587837440, i64 8587837440>
+}
+
+; Test a doubleword-granularity replicate with a wrap-around mask.
+define <2 x i64> @f10() {
+; CHECK-LABEL: f10:
+; CHECK: vgmg %v24, 18, 0
+; CHECK: br %r14
+ ret <2 x i64> <i64 -9223301668110598145, i64 -9223301668110598145>
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-ctlz-01.ll b/llvm/test/CodeGen/SystemZ/vec-ctlz-01.ll
new file mode 100644
index 00000000000..f6502202ef5
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-ctlz-01.ll
@@ -0,0 +1,81 @@
+; Test vector count leading zeros
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+declare <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %src, i1 %is_zero_undef)
+declare <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %src, i1 %is_zero_undef)
+declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %src, i1 %is_zero_undef)
+declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %src, i1 %is_zero_undef)
+
+define <16 x i8> @f1(<16 x i8> %a) {
+; CHECK-LABEL: f1:
+; CHECK: vclzb %v24, %v24
+; CHECK: br %r14
+
+ %res = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %a, i1 false)
+ ret <16 x i8> %res
+}
+
+define <16 x i8> @f2(<16 x i8> %a) {
+; CHECK-LABEL: f2:
+; CHECK: vclzb %v24, %v24
+; CHECK: br %r14
+
+ %res = call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %a, i1 true)
+ ret <16 x i8> %res
+}
+
+define <8 x i16> @f3(<8 x i16> %a) {
+; CHECK-LABEL: f3:
+; CHECK: vclzh %v24, %v24
+; CHECK: br %r14
+
+ %res = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %a, i1 false)
+ ret <8 x i16> %res
+}
+
+define <8 x i16> @f4(<8 x i16> %a) {
+; CHECK-LABEL: f4:
+; CHECK: vclzh %v24, %v24
+; CHECK: br %r14
+
+ %res = call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %a, i1 true)
+ ret <8 x i16> %res
+}
+
+define <4 x i32> @f5(<4 x i32> %a) {
+; CHECK-LABEL: f5:
+; CHECK: vclzf %v24, %v24
+; CHECK: br %r14
+
+ %res = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %a, i1 false)
+ ret <4 x i32> %res
+}
+
+define <4 x i32> @f6(<4 x i32> %a) {
+; CHECK-LABEL: f6:
+; CHECK: vclzf %v24, %v24
+; CHECK: br %r14
+
+ %res = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %a, i1 true)
+ ret <4 x i32> %res
+}
+
+define <2 x i64> @f7(<2 x i64> %a) {
+; CHECK-LABEL: f7:
+; CHECK: vclzg %v24, %v24
+; CHECK: br %r14
+
+ %res = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %a, i1 false)
+ ret <2 x i64> %res
+}
+
+define <2 x i64> @f8(<2 x i64> %a) {
+; CHECK-LABEL: f8:
+; CHECK: vclzg %v24, %v24
+; CHECK: br %r14
+
+ %res = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %a, i1 true)
+ ret <2 x i64> %res
+}
+
diff --git a/llvm/test/CodeGen/SystemZ/vec-ctpop-01.ll b/llvm/test/CodeGen/SystemZ/vec-ctpop-01.ll
new file mode 100644
index 00000000000..0056af73a2e
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-ctpop-01.ll
@@ -0,0 +1,53 @@
+; Test vector population-count instruction
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+declare <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %a)
+declare <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %a)
+declare <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %a)
+declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %a)
+
+define <16 x i8> @f1(<16 x i8> %a) {
+; CHECK-LABEL: f1:
+; CHECK: vpopct %v24, %v24, 0
+; CHECK: br %r14
+
+ %popcnt = call <16 x i8> @llvm.ctpop.v16i8(<16 x i8> %a)
+ ret <16 x i8> %popcnt
+}
+
+define <8 x i16> @f2(<8 x i16> %a) {
+; CHECK-LABEL: f2:
+; CHECK: vpopct [[T1:%v[0-9]+]], %v24, 0
+; CHECK: veslh [[T2:%v[0-9]+]], [[T1]], 8
+; CHECK: vah [[T3:%v[0-9]+]], [[T1]], [[T2]]
+; CHECK: vesrlh %v24, [[T3]], 8
+; CHECK: br %r14
+
+ %popcnt = call <8 x i16> @llvm.ctpop.v8i16(<8 x i16> %a)
+ ret <8 x i16> %popcnt
+}
+
+define <4 x i32> @f3(<4 x i32> %a) {
+; CHECK-LABEL: f3:
+; CHECK: vpopct [[T1:%v[0-9]+]], %v24, 0
+; CHECK: vgbm [[T2:%v[0-9]+]], 0
+; CHECK: vsumb %v24, [[T1]], [[T2]]
+; CHECK: br %r14
+
+ %popcnt = call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %a)
+ ret <4 x i32> %popcnt
+}
+
+define <2 x i64> @f4(<2 x i64> %a) {
+; CHECK-LABEL: f4:
+; CHECK: vpopct [[T1:%v[0-9]+]], %v24, 0
+; CHECK: vgbm [[T2:%v[0-9]+]], 0
+; CHECK: vsumb [[T3:%v[0-9]+]], [[T1]], [[T2]]
+; CHECK: vsumgf %v24, [[T3]], [[T2]]
+; CHECK: br %r14
+
+ %popcnt = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %a)
+ ret <2 x i64> %popcnt
+}
+
diff --git a/llvm/test/CodeGen/SystemZ/vec-cttz-01.ll b/llvm/test/CodeGen/SystemZ/vec-cttz-01.ll
new file mode 100644
index 00000000000..00a0d21b42f
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-cttz-01.ll
@@ -0,0 +1,81 @@
+; Test vector count trailing zeros
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+declare <16 x i8> @llvm.cttz.v16i8(<16 x i8> %src, i1 %is_zero_undef)
+declare <8 x i16> @llvm.cttz.v8i16(<8 x i16> %src, i1 %is_zero_undef)
+declare <4 x i32> @llvm.cttz.v4i32(<4 x i32> %src, i1 %is_zero_undef)
+declare <2 x i64> @llvm.cttz.v2i64(<2 x i64> %src, i1 %is_zero_undef)
+
+define <16 x i8> @f1(<16 x i8> %a) {
+; CHECK-LABEL: f1:
+; CHECK: vctzb %v24, %v24
+; CHECK: br %r14
+
+ %res = call <16 x i8> @llvm.cttz.v16i8(<16 x i8> %a, i1 false)
+ ret <16 x i8> %res
+}
+
+define <16 x i8> @f2(<16 x i8> %a) {
+; CHECK-LABEL: f2:
+; CHECK: vctzb %v24, %v24
+; CHECK: br %r14
+
+ %res = call <16 x i8> @llvm.cttz.v16i8(<16 x i8> %a, i1 true)
+ ret <16 x i8> %res
+}
+
+define <8 x i16> @f3(<8 x i16> %a) {
+; CHECK-LABEL: f3:
+; CHECK: vctzh %v24, %v24
+; CHECK: br %r14
+
+ %res = call <8 x i16> @llvm.cttz.v8i16(<8 x i16> %a, i1 false)
+ ret <8 x i16> %res
+}
+
+define <8 x i16> @f4(<8 x i16> %a) {
+; CHECK-LABEL: f4:
+; CHECK: vctzh %v24, %v24
+; CHECK: br %r14
+
+ %res = call <8 x i16> @llvm.cttz.v8i16(<8 x i16> %a, i1 true)
+ ret <8 x i16> %res
+}
+
+define <4 x i32> @f5(<4 x i32> %a) {
+; CHECK-LABEL: f5:
+; CHECK: vctzf %v24, %v24
+; CHECK: br %r14
+
+ %res = call <4 x i32> @llvm.cttz.v4i32(<4 x i32> %a, i1 false)
+ ret <4 x i32> %res
+}
+
+define <4 x i32> @f6(<4 x i32> %a) {
+; CHECK-LABEL: f6:
+; CHECK: vctzf %v24, %v24
+; CHECK: br %r14
+
+ %res = call <4 x i32> @llvm.cttz.v4i32(<4 x i32> %a, i1 true)
+ ret <4 x i32> %res
+}
+
+define <2 x i64> @f7(<2 x i64> %a) {
+; CHECK-LABEL: f7:
+; CHECK: vctzg %v24, %v24
+; CHECK: br %r14
+
+ %res = call <2 x i64> @llvm.cttz.v2i64(<2 x i64> %a, i1 false)
+ ret <2 x i64> %res
+}
+
+define <2 x i64> @f8(<2 x i64> %a) {
+; CHECK-LABEL: f8:
+; CHECK: vctzg %v24, %v24
+; CHECK: br %r14
+
+ %res = call <2 x i64> @llvm.cttz.v2i64(<2 x i64> %a, i1 true)
+ ret <2 x i64> %res
+}
+
diff --git a/llvm/test/CodeGen/SystemZ/vec-div-01.ll b/llvm/test/CodeGen/SystemZ/vec-div-01.ll
new file mode 100644
index 00000000000..3c5ec4f54ee
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-div-01.ll
@@ -0,0 +1,62 @@
+; Test vector division. There is no native support for this, so it's really
+; a test of the operation legalization code.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test a v16i8 division.
+define <16 x i8> @f1(<16 x i8> %dummy, <16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f1:
+; CHECK: vlvgp [[REG:%v[0-9]+]],
+; CHECK-DAG: vlvgb [[REG]], {{%r[0-5]}}, 0
+; CHECK-DAG: vlvgb [[REG]], {{%r[0-5]}}, 1
+; CHECK-DAG: vlvgb [[REG]], {{%r[0-5]}}, 2
+; CHECK-DAG: vlvgb [[REG]], {{%r[0-5]}}, 3
+; CHECK-DAG: vlvgb [[REG]], {{%r[0-5]}}, 4
+; CHECK-DAG: vlvgb [[REG]], {{%r[0-5]}}, 5
+; CHECK-DAG: vlvgb [[REG]], {{%r[0-5]}}, 6
+; CHECK-DAG: vlvgb [[REG]], {{%r[0-5]}}, 8
+; CHECK-DAG: vlvgb [[REG]], {{%r[0-5]}}, 9
+; CHECK-DAG: vlvgb [[REG]], {{%r[0-5]}}, 10
+; CHECK-DAG: vlvgb [[REG]], {{%r[0-5]}}, 11
+; CHECK-DAG: vlvgb [[REG]], {{%r[0-5]}}, 12
+; CHECK-DAG: vlvgb [[REG]], {{%r[0-5]}}, 13
+; CHECK-DAG: vlvgb [[REG]], {{%r[0-5]}}, 14
+; CHECK: br %r14
+ %ret = sdiv <16 x i8> %val1, %val2
+ ret <16 x i8> %ret
+}
+
+; Test a v8i16 division.
+define <8 x i16> @f2(<8 x i16> %dummy, <8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f2:
+; CHECK: vlvgp [[REG:%v[0-9]+]],
+; CHECK-DAG: vlvgh [[REG]], {{%r[0-5]}}, 0
+; CHECK-DAG: vlvgh [[REG]], {{%r[0-5]}}, 1
+; CHECK-DAG: vlvgh [[REG]], {{%r[0-5]}}, 2
+; CHECK-DAG: vlvgh [[REG]], {{%r[0-5]}}, 4
+; CHECK-DAG: vlvgh [[REG]], {{%r[0-5]}}, 5
+; CHECK-DAG: vlvgh [[REG]], {{%r[0-5]}}, 6
+; CHECK: br %r14
+ %ret = sdiv <8 x i16> %val1, %val2
+ ret <8 x i16> %ret
+}
+
+; Test a v4i32 division.
+define <4 x i32> @f3(<4 x i32> %dummy, <4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f3:
+; CHECK: vlvgp [[REG:%v[0-9]+]],
+; CHECK-DAG: vlvgf [[REG]], {{%r[0-5]}}, 0
+; CHECK-DAG: vlvgf [[REG]], {{%r[0-5]}}, 2
+; CHECK: br %r14
+ %ret = sdiv <4 x i32> %val1, %val2
+ ret <4 x i32> %ret
+}
+
+; Test a v2i64 division.
+define <2 x i64> @f4(<2 x i64> %dummy, <2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f4:
+; CHECK: vlvgp %v24,
+; CHECK: br %r14
+ %ret = sdiv <2 x i64> %val1, %val2
+ ret <2 x i64> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-max-01.ll b/llvm/test/CodeGen/SystemZ/vec-max-01.ll
new file mode 100644
index 00000000000..ca6f08aa493
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-max-01.ll
@@ -0,0 +1,83 @@
+; Test v16i8 maximum.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test with slt.
+define <16 x i8> @f1(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f1:
+; CHECK: vmxb %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp slt <16 x i8> %val1, %val2
+ %ret = select <16 x i1> %cmp, <16 x i8> %val2, <16 x i8> %val1
+ ret <16 x i8> %ret
+}
+
+; Test with sle.
+define <16 x i8> @f2(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f2:
+; CHECK: vmxb %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp sle <16 x i8> %val1, %val2
+ %ret = select <16 x i1> %cmp, <16 x i8> %val2, <16 x i8> %val1
+ ret <16 x i8> %ret
+}
+
+; Test with sgt.
+define <16 x i8> @f3(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f3:
+; CHECK: vmxb %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp sgt <16 x i8> %val1, %val2
+ %ret = select <16 x i1> %cmp, <16 x i8> %val1, <16 x i8> %val2
+ ret <16 x i8> %ret
+}
+
+; Test with sge.
+define <16 x i8> @f4(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f4:
+; CHECK: vmxb %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp sge <16 x i8> %val1, %val2
+ %ret = select <16 x i1> %cmp, <16 x i8> %val1, <16 x i8> %val2
+ ret <16 x i8> %ret
+}
+
+; Test with ult.
+define <16 x i8> @f5(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f5:
+; CHECK: vmxlb %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp ult <16 x i8> %val1, %val2
+ %ret = select <16 x i1> %cmp, <16 x i8> %val2, <16 x i8> %val1
+ ret <16 x i8> %ret
+}
+
+; Test with ule.
+define <16 x i8> @f6(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f6:
+; CHECK: vmxlb %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp ule <16 x i8> %val1, %val2
+ %ret = select <16 x i1> %cmp, <16 x i8> %val2, <16 x i8> %val1
+ ret <16 x i8> %ret
+}
+
+; Test with ugt.
+define <16 x i8> @f7(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f7:
+; CHECK: vmxlb %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp ugt <16 x i8> %val1, %val2
+ %ret = select <16 x i1> %cmp, <16 x i8> %val1, <16 x i8> %val2
+ ret <16 x i8> %ret
+}
+
+; Test with uge.
+define <16 x i8> @f8(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f8:
+; CHECK: vmxlb %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp uge <16 x i8> %val1, %val2
+ %ret = select <16 x i1> %cmp, <16 x i8> %val1, <16 x i8> %val2
+ ret <16 x i8> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-max-02.ll b/llvm/test/CodeGen/SystemZ/vec-max-02.ll
new file mode 100644
index 00000000000..2c61603b6f3
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-max-02.ll
@@ -0,0 +1,83 @@
+; Test v8i16 maximum.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test with slt.
+define <8 x i16> @f1(<8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f1:
+; CHECK: vmxh %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp slt <8 x i16> %val1, %val2
+ %ret = select <8 x i1> %cmp, <8 x i16> %val2, <8 x i16> %val1
+ ret <8 x i16> %ret
+}
+
+; Test with sle.
+define <8 x i16> @f2(<8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f2:
+; CHECK: vmxh %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp sle <8 x i16> %val1, %val2
+ %ret = select <8 x i1> %cmp, <8 x i16> %val2, <8 x i16> %val1
+ ret <8 x i16> %ret
+}
+
+; Test with sgt.
+define <8 x i16> @f3(<8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f3:
+; CHECK: vmxh %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp sgt <8 x i16> %val1, %val2
+ %ret = select <8 x i1> %cmp, <8 x i16> %val1, <8 x i16> %val2
+ ret <8 x i16> %ret
+}
+
+; Test with sge.
+define <8 x i16> @f4(<8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f4:
+; CHECK: vmxh %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp sge <8 x i16> %val1, %val2
+ %ret = select <8 x i1> %cmp, <8 x i16> %val1, <8 x i16> %val2
+ ret <8 x i16> %ret
+}
+
+; Test with ult.
+define <8 x i16> @f5(<8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f5:
+; CHECK: vmxlh %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp ult <8 x i16> %val1, %val2
+ %ret = select <8 x i1> %cmp, <8 x i16> %val2, <8 x i16> %val1
+ ret <8 x i16> %ret
+}
+
+; Test with ule.
+define <8 x i16> @f6(<8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f6:
+; CHECK: vmxlh %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp ule <8 x i16> %val1, %val2
+ %ret = select <8 x i1> %cmp, <8 x i16> %val2, <8 x i16> %val1
+ ret <8 x i16> %ret
+}
+
+; Test with ugt.
+define <8 x i16> @f7(<8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f7:
+; CHECK: vmxlh %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp ugt <8 x i16> %val1, %val2
+ %ret = select <8 x i1> %cmp, <8 x i16> %val1, <8 x i16> %val2
+ ret <8 x i16> %ret
+}
+
+; Test with uge.
+define <8 x i16> @f8(<8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f8:
+; CHECK: vmxlh %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp uge <8 x i16> %val1, %val2
+ %ret = select <8 x i1> %cmp, <8 x i16> %val1, <8 x i16> %val2
+ ret <8 x i16> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-max-03.ll b/llvm/test/CodeGen/SystemZ/vec-max-03.ll
new file mode 100644
index 00000000000..a4387948399
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-max-03.ll
@@ -0,0 +1,83 @@
+; Test v4i32 maximum.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test with slt.
+define <4 x i32> @f1(<4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f1:
+; CHECK: vmxf %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp slt <4 x i32> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x i32> %val2, <4 x i32> %val1
+ ret <4 x i32> %ret
+}
+
+; Test with sle.
+define <4 x i32> @f2(<4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f2:
+; CHECK: vmxf %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp sle <4 x i32> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x i32> %val2, <4 x i32> %val1
+ ret <4 x i32> %ret
+}
+
+; Test with sgt.
+define <4 x i32> @f3(<4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f3:
+; CHECK: vmxf %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp sgt <4 x i32> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x i32> %val1, <4 x i32> %val2
+ ret <4 x i32> %ret
+}
+
+; Test with sge.
+define <4 x i32> @f4(<4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f4:
+; CHECK: vmxf %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp sge <4 x i32> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x i32> %val1, <4 x i32> %val2
+ ret <4 x i32> %ret
+}
+
+; Test with ult.
+define <4 x i32> @f5(<4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f5:
+; CHECK: vmxlf %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp ult <4 x i32> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x i32> %val2, <4 x i32> %val1
+ ret <4 x i32> %ret
+}
+
+; Test with ule.
+define <4 x i32> @f6(<4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f6:
+; CHECK: vmxlf %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp ule <4 x i32> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x i32> %val2, <4 x i32> %val1
+ ret <4 x i32> %ret
+}
+
+; Test with ugt.
+define <4 x i32> @f7(<4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f7:
+; CHECK: vmxlf %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp ugt <4 x i32> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x i32> %val1, <4 x i32> %val2
+ ret <4 x i32> %ret
+}
+
+; Test with uge.
+define <4 x i32> @f8(<4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f8:
+; CHECK: vmxlf %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp uge <4 x i32> %val1, %val2
+ %ret = select <4 x i1> %cmp, <4 x i32> %val1, <4 x i32> %val2
+ ret <4 x i32> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-max-04.ll b/llvm/test/CodeGen/SystemZ/vec-max-04.ll
new file mode 100644
index 00000000000..ab7c6239127
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-max-04.ll
@@ -0,0 +1,83 @@
+; Test v2i64 maximum.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test with slt.
+define <2 x i64> @f1(<2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f1:
+; CHECK: vmxg %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp slt <2 x i64> %val1, %val2
+ %ret = select <2 x i1> %cmp, <2 x i64> %val2, <2 x i64> %val1
+ ret <2 x i64> %ret
+}
+
+; Test with sle.
+define <2 x i64> @f2(<2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f2:
+; CHECK: vmxg %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp sle <2 x i64> %val1, %val2
+ %ret = select <2 x i1> %cmp, <2 x i64> %val2, <2 x i64> %val1
+ ret <2 x i64> %ret
+}
+
+; Test with sgt.
+define <2 x i64> @f3(<2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f3:
+; CHECK: vmxg %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp sgt <2 x i64> %val1, %val2
+ %ret = select <2 x i1> %cmp, <2 x i64> %val1, <2 x i64> %val2
+ ret <2 x i64> %ret
+}
+
+; Test with sge.
+define <2 x i64> @f4(<2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f4:
+; CHECK: vmxg %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp sge <2 x i64> %val1, %val2
+ %ret = select <2 x i1> %cmp, <2 x i64> %val1, <2 x i64> %val2
+ ret <2 x i64> %ret
+}
+
+; Test with ult.
+define <2 x i64> @f5(<2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f5:
+; CHECK: vmxlg %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp ult <2 x i64> %val1, %val2
+ %ret = select <2 x i1> %cmp, <2 x i64> %val2, <2 x i64> %val1
+ ret <2 x i64> %ret
+}
+
+; Test with ule.
+define <2 x i64> @f6(<2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f6:
+; CHECK: vmxlg %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp ule <2 x i64> %val1, %val2
+ %ret = select <2 x i1> %cmp, <2 x i64> %val2, <2 x i64> %val1
+ ret <2 x i64> %ret
+}
+
+; Test with ugt.
+define <2 x i64> @f7(<2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f7:
+; CHECK: vmxlg %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp ugt <2 x i64> %val1, %val2
+ %ret = select <2 x i1> %cmp, <2 x i64> %val1, <2 x i64> %val2
+ ret <2 x i64> %ret
+}
+
+; Test with uge.
+define <2 x i64> @f8(<2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f8:
+; CHECK: vmxlg %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp uge <2 x i64> %val1, %val2
+ %ret = select <2 x i1> %cmp, <2 x i64> %val1, <2 x i64> %val2
+ ret <2 x i64> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-min-01.ll b/llvm/test/CodeGen/SystemZ/vec-min-01.ll
new file mode 100644
index 00000000000..255dc57e113
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-min-01.ll
@@ -0,0 +1,83 @@
+; Test v16i8 minimum.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test with slt.
+define <16 x i8> @f1(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f1:
+; CHECK: vmnb %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp slt <16 x i8> %val2, %val1
+ %ret = select <16 x i1> %cmp, <16 x i8> %val2, <16 x i8> %val1
+ ret <16 x i8> %ret
+}
+
+; Test with sle.
+define <16 x i8> @f2(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f2:
+; CHECK: vmnb %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp sle <16 x i8> %val2, %val1
+ %ret = select <16 x i1> %cmp, <16 x i8> %val2, <16 x i8> %val1
+ ret <16 x i8> %ret
+}
+
+; Test with sgt.
+define <16 x i8> @f3(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f3:
+; CHECK: vmnb %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp sgt <16 x i8> %val2, %val1
+ %ret = select <16 x i1> %cmp, <16 x i8> %val1, <16 x i8> %val2
+ ret <16 x i8> %ret
+}
+
+; Test with sge.
+define <16 x i8> @f4(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f4:
+; CHECK: vmnb %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp sge <16 x i8> %val2, %val1
+ %ret = select <16 x i1> %cmp, <16 x i8> %val1, <16 x i8> %val2
+ ret <16 x i8> %ret
+}
+
+; Test with ult.
+define <16 x i8> @f5(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f5:
+; CHECK: vmnlb %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp ult <16 x i8> %val2, %val1
+ %ret = select <16 x i1> %cmp, <16 x i8> %val2, <16 x i8> %val1
+ ret <16 x i8> %ret
+}
+
+; Test with ule.
+define <16 x i8> @f6(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f6:
+; CHECK: vmnlb %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp ule <16 x i8> %val2, %val1
+ %ret = select <16 x i1> %cmp, <16 x i8> %val2, <16 x i8> %val1
+ ret <16 x i8> %ret
+}
+
+; Test with ugt.
+define <16 x i8> @f7(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f7:
+; CHECK: vmnlb %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp ugt <16 x i8> %val2, %val1
+ %ret = select <16 x i1> %cmp, <16 x i8> %val1, <16 x i8> %val2
+ ret <16 x i8> %ret
+}
+
+; Test with uge.
+define <16 x i8> @f8(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f8:
+; CHECK: vmnlb %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp uge <16 x i8> %val2, %val1
+ %ret = select <16 x i1> %cmp, <16 x i8> %val1, <16 x i8> %val2
+ ret <16 x i8> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-min-02.ll b/llvm/test/CodeGen/SystemZ/vec-min-02.ll
new file mode 100644
index 00000000000..cad8a61506c
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-min-02.ll
@@ -0,0 +1,83 @@
+; Test v8i16 minimum.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test with slt.
+define <8 x i16> @f1(<8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f1:
+; CHECK: vmnh %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp slt <8 x i16> %val2, %val1
+ %ret = select <8 x i1> %cmp, <8 x i16> %val2, <8 x i16> %val1
+ ret <8 x i16> %ret
+}
+
+; Test with sle.
+define <8 x i16> @f2(<8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f2:
+; CHECK: vmnh %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp sle <8 x i16> %val2, %val1
+ %ret = select <8 x i1> %cmp, <8 x i16> %val2, <8 x i16> %val1
+ ret <8 x i16> %ret
+}
+
+; Test with sgt.
+define <8 x i16> @f3(<8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f3:
+; CHECK: vmnh %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp sgt <8 x i16> %val2, %val1
+ %ret = select <8 x i1> %cmp, <8 x i16> %val1, <8 x i16> %val2
+ ret <8 x i16> %ret
+}
+
+; Test with sge.
+define <8 x i16> @f4(<8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f4:
+; CHECK: vmnh %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp sge <8 x i16> %val2, %val1
+ %ret = select <8 x i1> %cmp, <8 x i16> %val1, <8 x i16> %val2
+ ret <8 x i16> %ret
+}
+
+; Test with ult.
+define <8 x i16> @f5(<8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f5:
+; CHECK: vmnlh %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp ult <8 x i16> %val2, %val1
+ %ret = select <8 x i1> %cmp, <8 x i16> %val2, <8 x i16> %val1
+ ret <8 x i16> %ret
+}
+
+; Test with ule.
+define <8 x i16> @f6(<8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f6:
+; CHECK: vmnlh %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp ule <8 x i16> %val2, %val1
+ %ret = select <8 x i1> %cmp, <8 x i16> %val2, <8 x i16> %val1
+ ret <8 x i16> %ret
+}
+
+; Test with ugt.
+define <8 x i16> @f7(<8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f7:
+; CHECK: vmnlh %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp ugt <8 x i16> %val2, %val1
+ %ret = select <8 x i1> %cmp, <8 x i16> %val1, <8 x i16> %val2
+ ret <8 x i16> %ret
+}
+
+; Test with uge.
+define <8 x i16> @f8(<8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f8:
+; CHECK: vmnlh %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp uge <8 x i16> %val2, %val1
+ %ret = select <8 x i1> %cmp, <8 x i16> %val1, <8 x i16> %val2
+ ret <8 x i16> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-min-03.ll b/llvm/test/CodeGen/SystemZ/vec-min-03.ll
new file mode 100644
index 00000000000..febac50aa46
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-min-03.ll
@@ -0,0 +1,83 @@
+; Test v4i32 minimum.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test with slt.
+define <4 x i32> @f1(<4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f1:
+; CHECK: vmnf %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp slt <4 x i32> %val2, %val1
+ %ret = select <4 x i1> %cmp, <4 x i32> %val2, <4 x i32> %val1
+ ret <4 x i32> %ret
+}
+
+; Test with sle.
+define <4 x i32> @f2(<4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f2:
+; CHECK: vmnf %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp sle <4 x i32> %val2, %val1
+ %ret = select <4 x i1> %cmp, <4 x i32> %val2, <4 x i32> %val1
+ ret <4 x i32> %ret
+}
+
+; Test with sgt.
+define <4 x i32> @f3(<4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f3:
+; CHECK: vmnf %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp sgt <4 x i32> %val2, %val1
+ %ret = select <4 x i1> %cmp, <4 x i32> %val1, <4 x i32> %val2
+ ret <4 x i32> %ret
+}
+
+; Test with sge.
+define <4 x i32> @f4(<4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f4:
+; CHECK: vmnf %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp sge <4 x i32> %val2, %val1
+ %ret = select <4 x i1> %cmp, <4 x i32> %val1, <4 x i32> %val2
+ ret <4 x i32> %ret
+}
+
+; Test with ult.
+define <4 x i32> @f5(<4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f5:
+; CHECK: vmnlf %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp ult <4 x i32> %val2, %val1
+ %ret = select <4 x i1> %cmp, <4 x i32> %val2, <4 x i32> %val1
+ ret <4 x i32> %ret
+}
+
+; Test with ule.
+define <4 x i32> @f6(<4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f6:
+; CHECK: vmnlf %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp ule <4 x i32> %val2, %val1
+ %ret = select <4 x i1> %cmp, <4 x i32> %val2, <4 x i32> %val1
+ ret <4 x i32> %ret
+}
+
+; Test with ugt.
+define <4 x i32> @f7(<4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f7:
+; CHECK: vmnlf %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp ugt <4 x i32> %val2, %val1
+ %ret = select <4 x i1> %cmp, <4 x i32> %val1, <4 x i32> %val2
+ ret <4 x i32> %ret
+}
+
+; Test with uge.
+define <4 x i32> @f8(<4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f8:
+; CHECK: vmnlf %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp uge <4 x i32> %val2, %val1
+ %ret = select <4 x i1> %cmp, <4 x i32> %val1, <4 x i32> %val2
+ ret <4 x i32> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-min-04.ll b/llvm/test/CodeGen/SystemZ/vec-min-04.ll
new file mode 100644
index 00000000000..765ce1956b5
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-min-04.ll
@@ -0,0 +1,83 @@
+; Test v2i64 minimum.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test with slt.
+define <2 x i64> @f1(<2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f1:
+; CHECK: vmng %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp slt <2 x i64> %val2, %val1
+ %ret = select <2 x i1> %cmp, <2 x i64> %val2, <2 x i64> %val1
+ ret <2 x i64> %ret
+}
+
+; Test with sle.
+define <2 x i64> @f2(<2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f2:
+; CHECK: vmng %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp sle <2 x i64> %val2, %val1
+ %ret = select <2 x i1> %cmp, <2 x i64> %val2, <2 x i64> %val1
+ ret <2 x i64> %ret
+}
+
+; Test with sgt.
+define <2 x i64> @f3(<2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f3:
+; CHECK: vmng %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp sgt <2 x i64> %val2, %val1
+ %ret = select <2 x i1> %cmp, <2 x i64> %val1, <2 x i64> %val2
+ ret <2 x i64> %ret
+}
+
+; Test with sge.
+define <2 x i64> @f4(<2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f4:
+; CHECK: vmng %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp sge <2 x i64> %val2, %val1
+ %ret = select <2 x i1> %cmp, <2 x i64> %val1, <2 x i64> %val2
+ ret <2 x i64> %ret
+}
+
+; Test with ult.
+define <2 x i64> @f5(<2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f5:
+; CHECK: vmnlg %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp ult <2 x i64> %val2, %val1
+ %ret = select <2 x i1> %cmp, <2 x i64> %val2, <2 x i64> %val1
+ ret <2 x i64> %ret
+}
+
+; Test with ule.
+define <2 x i64> @f6(<2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f6:
+; CHECK: vmnlg %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp ule <2 x i64> %val2, %val1
+ %ret = select <2 x i1> %cmp, <2 x i64> %val2, <2 x i64> %val1
+ ret <2 x i64> %ret
+}
+
+; Test with ugt.
+define <2 x i64> @f7(<2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f7:
+; CHECK: vmnlg %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp ugt <2 x i64> %val2, %val1
+ %ret = select <2 x i1> %cmp, <2 x i64> %val1, <2 x i64> %val2
+ ret <2 x i64> %ret
+}
+
+; Test with uge.
+define <2 x i64> @f8(<2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f8:
+; CHECK: vmnlg %v24, {{%v24, %v26|%v26, %v24}}
+; CHECK: br %r14
+ %cmp = icmp uge <2 x i64> %val2, %val1
+ %ret = select <2 x i1> %cmp, <2 x i64> %val1, <2 x i64> %val2
+ ret <2 x i64> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-move-01.ll b/llvm/test/CodeGen/SystemZ/vec-move-01.ll
new file mode 100644
index 00000000000..952e5a42126
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-move-01.ll
@@ -0,0 +1,35 @@
+; Test vector register moves.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test v16i8 moves.
+define <16 x i8> @f1(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f1:
+; CHECK: vlr %v24, %v26
+; CHECK: br %r14
+ ret <16 x i8> %val2
+}
+
+; Test v8i16 moves.
+define <8 x i16> @f2(<8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f2:
+; CHECK: vlr %v24, %v26
+; CHECK: br %r14
+ ret <8 x i16> %val2
+}
+
+; Test v4i32 moves.
+define <4 x i32> @f3(<4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f3:
+; CHECK: vlr %v24, %v26
+; CHECK: br %r14
+ ret <4 x i32> %val2
+}
+
+; Test v2i64 moves.
+define <2 x i64> @f4(<2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f4:
+; CHECK: vlr %v24, %v26
+; CHECK: br %r14
+ ret <2 x i64> %val2
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-move-02.ll b/llvm/test/CodeGen/SystemZ/vec-move-02.ll
new file mode 100644
index 00000000000..b7b3ab6798d
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-move-02.ll
@@ -0,0 +1,93 @@
+; Test vector loads.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test v16i8 loads.
+define <16 x i8> @f1(<16 x i8> *%ptr) {
+; CHECK-LABEL: f1:
+; CHECK: vl %v24, 0(%r2)
+; CHECK: br %r14
+ %ret = load <16 x i8>, <16 x i8> *%ptr
+ ret <16 x i8> %ret
+}
+
+; Test v8i16 loads.
+define <8 x i16> @f2(<8 x i16> *%ptr) {
+; CHECK-LABEL: f2:
+; CHECK: vl %v24, 0(%r2)
+; CHECK: br %r14
+ %ret = load <8 x i16>, <8 x i16> *%ptr
+ ret <8 x i16> %ret
+}
+
+; Test v4i32 loads.
+define <4 x i32> @f3(<4 x i32> *%ptr) {
+; CHECK-LABEL: f3:
+; CHECK: vl %v24, 0(%r2)
+; CHECK: br %r14
+ %ret = load <4 x i32>, <4 x i32> *%ptr
+ ret <4 x i32> %ret
+}
+
+; Test v2i64 loads.
+define <2 x i64> @f4(<2 x i64> *%ptr) {
+; CHECK-LABEL: f4:
+; CHECK: vl %v24, 0(%r2)
+; CHECK: br %r14
+ %ret = load <2 x i64>, <2 x i64> *%ptr
+ ret <2 x i64> %ret
+}
+
+; Test the highest aligned in-range offset.
+define <16 x i8> @f7(<16 x i8> *%base) {
+; CHECK-LABEL: f7:
+; CHECK: vl %v24, 4080(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr <16 x i8>, <16 x i8> *%base, i64 255
+ %ret = load <16 x i8>, <16 x i8> *%ptr
+ ret <16 x i8> %ret
+}
+
+; Test the highest unaligned in-range offset.
+define <16 x i8> @f8(i8 *%base) {
+; CHECK-LABEL: f8:
+; CHECK: vl %v24, 4095(%r2)
+; CHECK: br %r14
+ %addr = getelementptr i8, i8 *%base, i64 4095
+ %ptr = bitcast i8 *%addr to <16 x i8> *
+ %ret = load <16 x i8>, <16 x i8> *%ptr, align 1
+ ret <16 x i8> %ret
+}
+
+; Test the next offset up, which requires separate address logic,
+define <16 x i8> @f9(<16 x i8> *%base) {
+; CHECK-LABEL: f9:
+; CHECK: aghi %r2, 4096
+; CHECK: vl %v24, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr <16 x i8>, <16 x i8> *%base, i64 256
+ %ret = load <16 x i8>, <16 x i8> *%ptr
+ ret <16 x i8> %ret
+}
+
+; Test negative offsets, which also require separate address logic,
+define <16 x i8> @f10(<16 x i8> *%base) {
+; CHECK-LABEL: f10:
+; CHECK: aghi %r2, -16
+; CHECK: vl %v24, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr <16 x i8>, <16 x i8> *%base, i64 -1
+ %ret = load <16 x i8>, <16 x i8> *%ptr
+ ret <16 x i8> %ret
+}
+
+; Check that indexes are allowed.
+define <16 x i8> @f11(i8 *%base, i64 %index) {
+; CHECK-LABEL: f11:
+; CHECK: vl %v24, 0(%r3,%r2)
+; CHECK: br %r14
+ %addr = getelementptr i8, i8 *%base, i64 %index
+ %ptr = bitcast i8 *%addr to <16 x i8> *
+ %ret = load <16 x i8>, <16 x i8> *%ptr, align 1
+ ret <16 x i8> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-move-03.ll b/llvm/test/CodeGen/SystemZ/vec-move-03.ll
new file mode 100644
index 00000000000..ddce4ef209a
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-move-03.ll
@@ -0,0 +1,93 @@
+; Test vector stores.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test v16i8 stores.
+define void @f1(<16 x i8> %val, <16 x i8> *%ptr) {
+; CHECK-LABEL: f1:
+; CHECK: vst %v24, 0(%r2)
+; CHECK: br %r14
+ store <16 x i8> %val, <16 x i8> *%ptr
+ ret void
+}
+
+; Test v8i16 stores.
+define void @f2(<8 x i16> %val, <8 x i16> *%ptr) {
+; CHECK-LABEL: f2:
+; CHECK: vst %v24, 0(%r2)
+; CHECK: br %r14
+ store <8 x i16> %val, <8 x i16> *%ptr
+ ret void
+}
+
+; Test v4i32 stores.
+define void @f3(<4 x i32> %val, <4 x i32> *%ptr) {
+; CHECK-LABEL: f3:
+; CHECK: vst %v24, 0(%r2)
+; CHECK: br %r14
+ store <4 x i32> %val, <4 x i32> *%ptr
+ ret void
+}
+
+; Test v2i64 stores.
+define void @f4(<2 x i64> %val, <2 x i64> *%ptr) {
+; CHECK-LABEL: f4:
+; CHECK: vst %v24, 0(%r2)
+; CHECK: br %r14
+ store <2 x i64> %val, <2 x i64> *%ptr
+ ret void
+}
+
+; Test the highest aligned in-range offset.
+define void @f7(<16 x i8> %val, <16 x i8> *%base) {
+; CHECK-LABEL: f7:
+; CHECK: vst %v24, 4080(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr <16 x i8>, <16 x i8> *%base, i64 255
+ store <16 x i8> %val, <16 x i8> *%ptr
+ ret void
+}
+
+; Test the highest unaligned in-range offset.
+define void @f8(<16 x i8> %val, i8 *%base) {
+; CHECK-LABEL: f8:
+; CHECK: vst %v24, 4095(%r2)
+; CHECK: br %r14
+ %addr = getelementptr i8, i8 *%base, i64 4095
+ %ptr = bitcast i8 *%addr to <16 x i8> *
+ store <16 x i8> %val, <16 x i8> *%ptr, align 1
+ ret void
+}
+
+; Test the next offset up, which requires separate address logic,
+define void @f9(<16 x i8> %val, <16 x i8> *%base) {
+; CHECK-LABEL: f9:
+; CHECK: aghi %r2, 4096
+; CHECK: vst %v24, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr <16 x i8>, <16 x i8> *%base, i64 256
+ store <16 x i8> %val, <16 x i8> *%ptr
+ ret void
+}
+
+; Test negative offsets, which also require separate address logic,
+define void @f10(<16 x i8> %val, <16 x i8> *%base) {
+; CHECK-LABEL: f10:
+; CHECK: aghi %r2, -16
+; CHECK: vst %v24, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr <16 x i8>, <16 x i8> *%base, i64 -1
+ store <16 x i8> %val, <16 x i8> *%ptr
+ ret void
+}
+
+; Check that indexes are allowed.
+define void @f11(<16 x i8> %val, i8 *%base, i64 %index) {
+; CHECK-LABEL: f11:
+; CHECK: vst %v24, 0(%r3,%r2)
+; CHECK: br %r14
+ %addr = getelementptr i8, i8 *%base, i64 %index
+ %ptr = bitcast i8 *%addr to <16 x i8> *
+ store <16 x i8> %val, <16 x i8> *%ptr, align 1
+ ret void
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-move-04.ll b/llvm/test/CodeGen/SystemZ/vec-move-04.ll
new file mode 100644
index 00000000000..f43c0b71491
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-move-04.ll
@@ -0,0 +1,121 @@
+; Test vector insertion of register variables.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test v16i8 insertion into the first element.
+define <16 x i8> @f1(<16 x i8> %val, i8 %element) {
+; CHECK-LABEL: f1:
+; CHECK: vlvgb %v24, %r2, 0
+; CHECK: br %r14
+ %ret = insertelement <16 x i8> %val, i8 %element, i32 0
+ ret <16 x i8> %ret
+}
+
+; Test v16i8 insertion into the last element.
+define <16 x i8> @f2(<16 x i8> %val, i8 %element) {
+; CHECK-LABEL: f2:
+; CHECK: vlvgb %v24, %r2, 15
+; CHECK: br %r14
+ %ret = insertelement <16 x i8> %val, i8 %element, i32 15
+ ret <16 x i8> %ret
+}
+
+; Test v16i8 insertion into a variable element.
+define <16 x i8> @f3(<16 x i8> %val, i8 %element, i32 %index) {
+; CHECK-LABEL: f3:
+; CHECK: vlvgb %v24, %r2, 0(%r3)
+; CHECK: br %r14
+ %ret = insertelement <16 x i8> %val, i8 %element, i32 %index
+ ret <16 x i8> %ret
+}
+
+; Test v8i16 insertion into the first element.
+define <8 x i16> @f4(<8 x i16> %val, i16 %element) {
+; CHECK-LABEL: f4:
+; CHECK: vlvgh %v24, %r2, 0
+; CHECK: br %r14
+ %ret = insertelement <8 x i16> %val, i16 %element, i32 0
+ ret <8 x i16> %ret
+}
+
+; Test v8i16 insertion into the last element.
+define <8 x i16> @f5(<8 x i16> %val, i16 %element) {
+; CHECK-LABEL: f5:
+; CHECK: vlvgh %v24, %r2, 7
+; CHECK: br %r14
+ %ret = insertelement <8 x i16> %val, i16 %element, i32 7
+ ret <8 x i16> %ret
+}
+
+; Test v8i16 insertion into a variable element.
+define <8 x i16> @f6(<8 x i16> %val, i16 %element, i32 %index) {
+; CHECK-LABEL: f6:
+; CHECK: vlvgh %v24, %r2, 0(%r3)
+; CHECK: br %r14
+ %ret = insertelement <8 x i16> %val, i16 %element, i32 %index
+ ret <8 x i16> %ret
+}
+
+; Test v4i32 insertion into the first element.
+define <4 x i32> @f7(<4 x i32> %val, i32 %element) {
+; CHECK-LABEL: f7:
+; CHECK: vlvgf %v24, %r2, 0
+; CHECK: br %r14
+ %ret = insertelement <4 x i32> %val, i32 %element, i32 0
+ ret <4 x i32> %ret
+}
+
+; Test v4i32 insertion into the last element.
+define <4 x i32> @f8(<4 x i32> %val, i32 %element) {
+; CHECK-LABEL: f8:
+; CHECK: vlvgf %v24, %r2, 3
+; CHECK: br %r14
+ %ret = insertelement <4 x i32> %val, i32 %element, i32 3
+ ret <4 x i32> %ret
+}
+
+; Test v4i32 insertion into a variable element.
+define <4 x i32> @f9(<4 x i32> %val, i32 %element, i32 %index) {
+; CHECK-LABEL: f9:
+; CHECK: vlvgf %v24, %r2, 0(%r3)
+; CHECK: br %r14
+ %ret = insertelement <4 x i32> %val, i32 %element, i32 %index
+ ret <4 x i32> %ret
+}
+
+; Test v2i64 insertion into the first element.
+define <2 x i64> @f10(<2 x i64> %val, i64 %element) {
+; CHECK-LABEL: f10:
+; CHECK: vlvgg %v24, %r2, 0
+; CHECK: br %r14
+ %ret = insertelement <2 x i64> %val, i64 %element, i32 0
+ ret <2 x i64> %ret
+}
+
+; Test v2i64 insertion into the last element.
+define <2 x i64> @f11(<2 x i64> %val, i64 %element) {
+; CHECK-LABEL: f11:
+; CHECK: vlvgg %v24, %r2, 1
+; CHECK: br %r14
+ %ret = insertelement <2 x i64> %val, i64 %element, i32 1
+ ret <2 x i64> %ret
+}
+
+; Test v2i64 insertion into a variable element.
+define <2 x i64> @f12(<2 x i64> %val, i64 %element, i32 %index) {
+; CHECK-LABEL: f12:
+; CHECK: vlvgg %v24, %r2, 0(%r3)
+; CHECK: br %r14
+ %ret = insertelement <2 x i64> %val, i64 %element, i32 %index
+ ret <2 x i64> %ret
+}
+
+; Test v16i8 insertion into a variable element plus one.
+define <16 x i8> @f19(<16 x i8> %val, i8 %element, i32 %index) {
+; CHECK-LABEL: f19:
+; CHECK: vlvgb %v24, %r2, 1(%r3)
+; CHECK: br %r14
+ %add = add i32 %index, 1
+ %ret = insertelement <16 x i8> %val, i8 %element, i32 %add
+ ret <16 x i8> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-move-05.ll b/llvm/test/CodeGen/SystemZ/vec-move-05.ll
new file mode 100644
index 00000000000..60a0666c2f9
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-move-05.ll
@@ -0,0 +1,161 @@
+; Test vector extraction.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test v16i8 extraction of the first element.
+define i8 @f1(<16 x i8> %val) {
+; CHECK-LABEL: f1:
+; CHECK: vlgvb %r2, %v24, 0
+; CHECK: br %r14
+ %ret = extractelement <16 x i8> %val, i32 0
+ ret i8 %ret
+}
+
+; Test v16i8 extraction of the last element.
+define i8 @f2(<16 x i8> %val) {
+; CHECK-LABEL: f2:
+; CHECK: vlgvb %r2, %v24, 15
+; CHECK: br %r14
+ %ret = extractelement <16 x i8> %val, i32 15
+ ret i8 %ret
+}
+
+; Test v16i8 extractions of an absurd element number. This must compile
+; but we don't care what it does.
+define i8 @f3(<16 x i8> %val) {
+; CHECK-LABEL: f3:
+; CHECK-NOT: vlgvb %r2, %v24, 100000
+; CHECK: br %r14
+ %ret = extractelement <16 x i8> %val, i32 100000
+ ret i8 %ret
+}
+
+; Test v16i8 extraction of a variable element.
+define i8 @f4(<16 x i8> %val, i32 %index) {
+; CHECK-LABEL: f4:
+; CHECK: vlgvb %r2, %v24, 0(%r2)
+; CHECK: br %r14
+ %ret = extractelement <16 x i8> %val, i32 %index
+ ret i8 %ret
+}
+
+; Test v8i16 extraction of the first element.
+define i16 @f5(<8 x i16> %val) {
+; CHECK-LABEL: f5:
+; CHECK: vlgvh %r2, %v24, 0
+; CHECK: br %r14
+ %ret = extractelement <8 x i16> %val, i32 0
+ ret i16 %ret
+}
+
+; Test v8i16 extraction of the last element.
+define i16 @f6(<8 x i16> %val) {
+; CHECK-LABEL: f6:
+; CHECK: vlgvh %r2, %v24, 7
+; CHECK: br %r14
+ %ret = extractelement <8 x i16> %val, i32 7
+ ret i16 %ret
+}
+
+; Test v8i16 extractions of an absurd element number. This must compile
+; but we don't care what it does.
+define i16 @f7(<8 x i16> %val) {
+; CHECK-LABEL: f7:
+; CHECK-NOT: vlgvh %r2, %v24, 100000
+; CHECK: br %r14
+ %ret = extractelement <8 x i16> %val, i32 100000
+ ret i16 %ret
+}
+
+; Test v8i16 extraction of a variable element.
+define i16 @f8(<8 x i16> %val, i32 %index) {
+; CHECK-LABEL: f8:
+; CHECK: vlgvh %r2, %v24, 0(%r2)
+; CHECK: br %r14
+ %ret = extractelement <8 x i16> %val, i32 %index
+ ret i16 %ret
+}
+
+; Test v4i32 extraction of the first element.
+define i32 @f9(<4 x i32> %val) {
+; CHECK-LABEL: f9:
+; CHECK: vlgvf %r2, %v24, 0
+; CHECK: br %r14
+ %ret = extractelement <4 x i32> %val, i32 0
+ ret i32 %ret
+}
+
+; Test v4i32 extraction of the last element.
+define i32 @f10(<4 x i32> %val) {
+; CHECK-LABEL: f10:
+; CHECK: vlgvf %r2, %v24, 3
+; CHECK: br %r14
+ %ret = extractelement <4 x i32> %val, i32 3
+ ret i32 %ret
+}
+
+; Test v4i32 extractions of an absurd element number. This must compile
+; but we don't care what it does.
+define i32 @f11(<4 x i32> %val) {
+; CHECK-LABEL: f11:
+; CHECK-NOT: vlgvf %r2, %v24, 100000
+; CHECK: br %r14
+ %ret = extractelement <4 x i32> %val, i32 100000
+ ret i32 %ret
+}
+
+; Test v4i32 extraction of a variable element.
+define i32 @f12(<4 x i32> %val, i32 %index) {
+; CHECK-LABEL: f12:
+; CHECK: vlgvf %r2, %v24, 0(%r2)
+; CHECK: br %r14
+ %ret = extractelement <4 x i32> %val, i32 %index
+ ret i32 %ret
+}
+
+; Test v2i64 extraction of the first element.
+define i64 @f13(<2 x i64> %val) {
+; CHECK-LABEL: f13:
+; CHECK: vlgvg %r2, %v24, 0
+; CHECK: br %r14
+ %ret = extractelement <2 x i64> %val, i32 0
+ ret i64 %ret
+}
+
+; Test v2i64 extraction of the last element.
+define i64 @f14(<2 x i64> %val) {
+; CHECK-LABEL: f14:
+; CHECK: vlgvg %r2, %v24, 1
+; CHECK: br %r14
+ %ret = extractelement <2 x i64> %val, i32 1
+ ret i64 %ret
+}
+
+; Test v2i64 extractions of an absurd element number. This must compile
+; but we don't care what it does.
+define i64 @f15(<2 x i64> %val) {
+; CHECK-LABEL: f15:
+; CHECK-NOT: vlgvg %r2, %v24, 100000
+; CHECK: br %r14
+ %ret = extractelement <2 x i64> %val, i32 100000
+ ret i64 %ret
+}
+
+; Test v2i64 extraction of a variable element.
+define i64 @f16(<2 x i64> %val, i32 %index) {
+; CHECK-LABEL: f16:
+; CHECK: vlgvg %r2, %v24, 0(%r2)
+; CHECK: br %r14
+ %ret = extractelement <2 x i64> %val, i32 %index
+ ret i64 %ret
+}
+
+; Test v16i8 extraction of a variable element with an offset.
+define i8 @f27(<16 x i8> %val, i32 %index) {
+; CHECK-LABEL: f27:
+; CHECK: vlgvb %r2, %v24, 1(%r2)
+; CHECK: br %r14
+ %add = add i32 %index, 1
+ %ret = extractelement <16 x i8> %val, i32 %add
+ ret i8 %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-move-06.ll b/llvm/test/CodeGen/SystemZ/vec-move-06.ll
new file mode 100644
index 00000000000..de3960cad95
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-move-06.ll
@@ -0,0 +1,13 @@
+; Test vector builds using VLVGP.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test the basic v2i64 usage.
+define <2 x i64> @f1(i64 %a, i64 %b) {
+; CHECK-LABEL: f1:
+; CHECK: vlvgp %v24, %r2, %r3
+; CHECK: br %r14
+ %veca = insertelement <2 x i64> undef, i64 %a, i32 0
+ %vecb = insertelement <2 x i64> %veca, i64 %b, i32 1
+ ret <2 x i64> %vecb
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-move-07.ll b/llvm/test/CodeGen/SystemZ/vec-move-07.ll
new file mode 100644
index 00000000000..a688b089b97
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-move-07.ll
@@ -0,0 +1,39 @@
+; Test scalar_to_vector expansion.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test v16i8.
+define <16 x i8> @f1(i8 %val) {
+; CHECK-LABEL: f1:
+; CHECK: vlvgb %v24, %r2, 0
+; CHECK: br %r14
+ %ret = insertelement <16 x i8> undef, i8 %val, i32 0
+ ret <16 x i8> %ret
+}
+
+; Test v8i16.
+define <8 x i16> @f2(i16 %val) {
+; CHECK-LABEL: f2:
+; CHECK: vlvgh %v24, %r2, 0
+; CHECK: br %r14
+ %ret = insertelement <8 x i16> undef, i16 %val, i32 0
+ ret <8 x i16> %ret
+}
+
+; Test v4i32.
+define <4 x i32> @f3(i32 %val) {
+; CHECK-LABEL: f3:
+; CHECK: vlvgf %v24, %r2, 0
+; CHECK: br %r14
+ %ret = insertelement <4 x i32> undef, i32 %val, i32 0
+ ret <4 x i32> %ret
+}
+
+; Test v2i64. Here we load %val into both halves.
+define <2 x i64> @f4(i64 %val) {
+; CHECK-LABEL: f4:
+; CHECK: vlvgp %v24, %r2, %r2
+; CHECK: br %r14
+ %ret = insertelement <2 x i64> undef, i64 %val, i32 0
+ ret <2 x i64> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-move-08.ll b/llvm/test/CodeGen/SystemZ/vec-move-08.ll
new file mode 100644
index 00000000000..94a3b3aefba
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-move-08.ll
@@ -0,0 +1,284 @@
+; Test vector insertion of memory values.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test v16i8 insertion into the first element.
+define <16 x i8> @f1(<16 x i8> %val, i8 *%ptr) {
+; CHECK-LABEL: f1:
+; CHECK: vleb %v24, 0(%r2), 0
+; CHECK: br %r14
+ %element = load i8, i8 *%ptr
+ %ret = insertelement <16 x i8> %val, i8 %element, i32 0
+ ret <16 x i8> %ret
+}
+
+; Test v16i8 insertion into the last element.
+define <16 x i8> @f2(<16 x i8> %val, i8 *%ptr) {
+; CHECK-LABEL: f2:
+; CHECK: vleb %v24, 0(%r2), 15
+; CHECK: br %r14
+ %element = load i8, i8 *%ptr
+ %ret = insertelement <16 x i8> %val, i8 %element, i32 15
+ ret <16 x i8> %ret
+}
+
+; Test v16i8 insertion with the highest in-range offset.
+define <16 x i8> @f3(<16 x i8> %val, i8 *%base) {
+; CHECK-LABEL: f3:
+; CHECK: vleb %v24, 4095(%r2), 10
+; CHECK: br %r14
+ %ptr = getelementptr i8, i8 *%base, i32 4095
+ %element = load i8, i8 *%ptr
+ %ret = insertelement <16 x i8> %val, i8 %element, i32 10
+ ret <16 x i8> %ret
+}
+
+; Test v16i8 insertion with the first ouf-of-range offset.
+define <16 x i8> @f4(<16 x i8> %val, i8 *%base) {
+; CHECK-LABEL: f4:
+; CHECK: aghi %r2, 4096
+; CHECK: vleb %v24, 0(%r2), 5
+; CHECK: br %r14
+ %ptr = getelementptr i8, i8 *%base, i32 4096
+ %element = load i8, i8 *%ptr
+ %ret = insertelement <16 x i8> %val, i8 %element, i32 5
+ ret <16 x i8> %ret
+}
+
+; Test v16i8 insertion into a variable element.
+define <16 x i8> @f5(<16 x i8> %val, i8 *%ptr, i32 %index) {
+; CHECK-LABEL: f5:
+; CHECK-NOT: vleb
+; CHECK: br %r14
+ %element = load i8, i8 *%ptr
+ %ret = insertelement <16 x i8> %val, i8 %element, i32 %index
+ ret <16 x i8> %ret
+}
+
+; Test v8i16 insertion into the first element.
+define <8 x i16> @f6(<8 x i16> %val, i16 *%ptr) {
+; CHECK-LABEL: f6:
+; CHECK: vleh %v24, 0(%r2), 0
+; CHECK: br %r14
+ %element = load i16, i16 *%ptr
+ %ret = insertelement <8 x i16> %val, i16 %element, i32 0
+ ret <8 x i16> %ret
+}
+
+; Test v8i16 insertion into the last element.
+define <8 x i16> @f7(<8 x i16> %val, i16 *%ptr) {
+; CHECK-LABEL: f7:
+; CHECK: vleh %v24, 0(%r2), 7
+; CHECK: br %r14
+ %element = load i16, i16 *%ptr
+ %ret = insertelement <8 x i16> %val, i16 %element, i32 7
+ ret <8 x i16> %ret
+}
+
+; Test v8i16 insertion with the highest in-range offset.
+define <8 x i16> @f8(<8 x i16> %val, i16 *%base) {
+; CHECK-LABEL: f8:
+; CHECK: vleh %v24, 4094(%r2), 5
+; CHECK: br %r14
+ %ptr = getelementptr i16, i16 *%base, i32 2047
+ %element = load i16, i16 *%ptr
+ %ret = insertelement <8 x i16> %val, i16 %element, i32 5
+ ret <8 x i16> %ret
+}
+
+; Test v8i16 insertion with the first ouf-of-range offset.
+define <8 x i16> @f9(<8 x i16> %val, i16 *%base) {
+; CHECK-LABEL: f9:
+; CHECK: aghi %r2, 4096
+; CHECK: vleh %v24, 0(%r2), 1
+; CHECK: br %r14
+ %ptr = getelementptr i16, i16 *%base, i32 2048
+ %element = load i16, i16 *%ptr
+ %ret = insertelement <8 x i16> %val, i16 %element, i32 1
+ ret <8 x i16> %ret
+}
+
+; Test v8i16 insertion into a variable element.
+define <8 x i16> @f10(<8 x i16> %val, i16 *%ptr, i32 %index) {
+; CHECK-LABEL: f10:
+; CHECK-NOT: vleh
+; CHECK: br %r14
+ %element = load i16, i16 *%ptr
+ %ret = insertelement <8 x i16> %val, i16 %element, i32 %index
+ ret <8 x i16> %ret
+}
+
+; Test v4i32 insertion into the first element.
+define <4 x i32> @f11(<4 x i32> %val, i32 *%ptr) {
+; CHECK-LABEL: f11:
+; CHECK: vlef %v24, 0(%r2), 0
+; CHECK: br %r14
+ %element = load i32, i32 *%ptr
+ %ret = insertelement <4 x i32> %val, i32 %element, i32 0
+ ret <4 x i32> %ret
+}
+
+; Test v4i32 insertion into the last element.
+define <4 x i32> @f12(<4 x i32> %val, i32 *%ptr) {
+; CHECK-LABEL: f12:
+; CHECK: vlef %v24, 0(%r2), 3
+; CHECK: br %r14
+ %element = load i32, i32 *%ptr
+ %ret = insertelement <4 x i32> %val, i32 %element, i32 3
+ ret <4 x i32> %ret
+}
+
+; Test v4i32 insertion with the highest in-range offset.
+define <4 x i32> @f13(<4 x i32> %val, i32 *%base) {
+; CHECK-LABEL: f13:
+; CHECK: vlef %v24, 4092(%r2), 2
+; CHECK: br %r14
+ %ptr = getelementptr i32, i32 *%base, i32 1023
+ %element = load i32, i32 *%ptr
+ %ret = insertelement <4 x i32> %val, i32 %element, i32 2
+ ret <4 x i32> %ret
+}
+
+; Test v4i32 insertion with the first ouf-of-range offset.
+define <4 x i32> @f14(<4 x i32> %val, i32 *%base) {
+; CHECK-LABEL: f14:
+; CHECK: aghi %r2, 4096
+; CHECK: vlef %v24, 0(%r2), 1
+; CHECK: br %r14
+ %ptr = getelementptr i32, i32 *%base, i32 1024
+ %element = load i32, i32 *%ptr
+ %ret = insertelement <4 x i32> %val, i32 %element, i32 1
+ ret <4 x i32> %ret
+}
+
+; Test v4i32 insertion into a variable element.
+define <4 x i32> @f15(<4 x i32> %val, i32 *%ptr, i32 %index) {
+; CHECK-LABEL: f15:
+; CHECK-NOT: vlef
+; CHECK: br %r14
+ %element = load i32, i32 *%ptr
+ %ret = insertelement <4 x i32> %val, i32 %element, i32 %index
+ ret <4 x i32> %ret
+}
+
+; Test v2i64 insertion into the first element.
+define <2 x i64> @f16(<2 x i64> %val, i64 *%ptr) {
+; CHECK-LABEL: f16:
+; CHECK: vleg %v24, 0(%r2), 0
+; CHECK: br %r14
+ %element = load i64, i64 *%ptr
+ %ret = insertelement <2 x i64> %val, i64 %element, i32 0
+ ret <2 x i64> %ret
+}
+
+; Test v2i64 insertion into the last element.
+define <2 x i64> @f17(<2 x i64> %val, i64 *%ptr) {
+; CHECK-LABEL: f17:
+; CHECK: vleg %v24, 0(%r2), 1
+; CHECK: br %r14
+ %element = load i64, i64 *%ptr
+ %ret = insertelement <2 x i64> %val, i64 %element, i32 1
+ ret <2 x i64> %ret
+}
+
+; Test v2i64 insertion with the highest in-range offset.
+define <2 x i64> @f18(<2 x i64> %val, i64 *%base) {
+; CHECK-LABEL: f18:
+; CHECK: vleg %v24, 4088(%r2), 1
+; CHECK: br %r14
+ %ptr = getelementptr i64, i64 *%base, i32 511
+ %element = load i64, i64 *%ptr
+ %ret = insertelement <2 x i64> %val, i64 %element, i32 1
+ ret <2 x i64> %ret
+}
+
+; Test v2i64 insertion with the first ouf-of-range offset.
+define <2 x i64> @f19(<2 x i64> %val, i64 *%base) {
+; CHECK-LABEL: f19:
+; CHECK: aghi %r2, 4096
+; CHECK: vleg %v24, 0(%r2), 0
+; CHECK: br %r14
+ %ptr = getelementptr i64, i64 *%base, i32 512
+ %element = load i64, i64 *%ptr
+ %ret = insertelement <2 x i64> %val, i64 %element, i32 0
+ ret <2 x i64> %ret
+}
+
+; Test v2i64 insertion into a variable element.
+define <2 x i64> @f20(<2 x i64> %val, i64 *%ptr, i32 %index) {
+; CHECK-LABEL: f20:
+; CHECK-NOT: vleg
+; CHECK: br %r14
+ %element = load i64, i64 *%ptr
+ %ret = insertelement <2 x i64> %val, i64 %element, i32 %index
+ ret <2 x i64> %ret
+}
+
+; Test a v4i32 gather of the first element.
+define <4 x i32> @f31(<4 x i32> %val, <4 x i32> %index, i64 %base) {
+; CHECK-LABEL: f31:
+; CHECK: vgef %v24, 0(%v26,%r2), 0
+; CHECK: br %r14
+ %elem = extractelement <4 x i32> %index, i32 0
+ %ext = zext i32 %elem to i64
+ %add = add i64 %base, %ext
+ %ptr = inttoptr i64 %add to i32 *
+ %element = load i32, i32 *%ptr
+ %ret = insertelement <4 x i32> %val, i32 %element, i32 0
+ ret <4 x i32> %ret
+}
+
+; Test a v4i32 gather of the last element.
+define <4 x i32> @f32(<4 x i32> %val, <4 x i32> %index, i64 %base) {
+; CHECK-LABEL: f32:
+; CHECK: vgef %v24, 0(%v26,%r2), 3
+; CHECK: br %r14
+ %elem = extractelement <4 x i32> %index, i32 3
+ %ext = zext i32 %elem to i64
+ %add = add i64 %base, %ext
+ %ptr = inttoptr i64 %add to i32 *
+ %element = load i32, i32 *%ptr
+ %ret = insertelement <4 x i32> %val, i32 %element, i32 3
+ ret <4 x i32> %ret
+}
+
+; Test a v4i32 gather with the highest in-range offset.
+define <4 x i32> @f33(<4 x i32> %val, <4 x i32> %index, i64 %base) {
+; CHECK-LABEL: f33:
+; CHECK: vgef %v24, 4095(%v26,%r2), 1
+; CHECK: br %r14
+ %elem = extractelement <4 x i32> %index, i32 1
+ %ext = zext i32 %elem to i64
+ %add1 = add i64 %base, %ext
+ %add2 = add i64 %add1, 4095
+ %ptr = inttoptr i64 %add2 to i32 *
+ %element = load i32, i32 *%ptr
+ %ret = insertelement <4 x i32> %val, i32 %element, i32 1
+ ret <4 x i32> %ret
+}
+
+; Test a v2i64 gather of the first element.
+define <2 x i64> @f34(<2 x i64> %val, <2 x i64> %index, i64 %base) {
+; CHECK-LABEL: f34:
+; CHECK: vgeg %v24, 0(%v26,%r2), 0
+; CHECK: br %r14
+ %elem = extractelement <2 x i64> %index, i32 0
+ %add = add i64 %base, %elem
+ %ptr = inttoptr i64 %add to i64 *
+ %element = load i64, i64 *%ptr
+ %ret = insertelement <2 x i64> %val, i64 %element, i32 0
+ ret <2 x i64> %ret
+}
+
+; Test a v2i64 gather of the last element.
+define <2 x i64> @f35(<2 x i64> %val, <2 x i64> %index, i64 %base) {
+; CHECK-LABEL: f35:
+; CHECK: vgeg %v24, 0(%v26,%r2), 1
+; CHECK: br %r14
+ %elem = extractelement <2 x i64> %index, i32 1
+ %add = add i64 %base, %elem
+ %ptr = inttoptr i64 %add to i64 *
+ %element = load i64, i64 *%ptr
+ %ret = insertelement <2 x i64> %val, i64 %element, i32 1
+ ret <2 x i64> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-move-09.ll b/llvm/test/CodeGen/SystemZ/vec-move-09.ll
new file mode 100644
index 00000000000..7863e4305f9
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-move-09.ll
@@ -0,0 +1,237 @@
+; Test vector insertion of constants.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test v16i8 insertion into the first element.
+define <16 x i8> @f1(<16 x i8> %val) {
+; CHECK-LABEL: f1:
+; CHECK: vleib %v24, 0, 0
+; CHECK: br %r14
+ %ret = insertelement <16 x i8> %val, i8 0, i32 0
+ ret <16 x i8> %ret
+}
+
+; Test v16i8 insertion into the last element.
+define <16 x i8> @f2(<16 x i8> %val) {
+; CHECK-LABEL: f2:
+; CHECK: vleib %v24, 100, 15
+; CHECK: br %r14
+ %ret = insertelement <16 x i8> %val, i8 100, i32 15
+ ret <16 x i8> %ret
+}
+
+; Test v16i8 insertion with the maximum signed value.
+define <16 x i8> @f3(<16 x i8> %val) {
+; CHECK-LABEL: f3:
+; CHECK: vleib %v24, 127, 10
+; CHECK: br %r14
+ %ret = insertelement <16 x i8> %val, i8 127, i32 10
+ ret <16 x i8> %ret
+}
+
+; Test v16i8 insertion with the minimum signed value.
+define <16 x i8> @f4(<16 x i8> %val) {
+; CHECK-LABEL: f4:
+; CHECK: vleib %v24, -128, 11
+; CHECK: br %r14
+ %ret = insertelement <16 x i8> %val, i8 128, i32 11
+ ret <16 x i8> %ret
+}
+
+; Test v16i8 insertion with the maximum unsigned value.
+define <16 x i8> @f5(<16 x i8> %val) {
+; CHECK-LABEL: f5:
+; CHECK: vleib %v24, -1, 12
+; CHECK: br %r14
+ %ret = insertelement <16 x i8> %val, i8 255, i32 12
+ ret <16 x i8> %ret
+}
+
+; Test v16i8 insertion into a variable element.
+define <16 x i8> @f6(<16 x i8> %val, i32 %index) {
+; CHECK-LABEL: f6:
+; CHECK-NOT: vleib
+; CHECK: br %r14
+ %ret = insertelement <16 x i8> %val, i8 0, i32 %index
+ ret <16 x i8> %ret
+}
+
+; Test v8i16 insertion into the first element.
+define <8 x i16> @f7(<8 x i16> %val) {
+; CHECK-LABEL: f7:
+; CHECK: vleih %v24, 0, 0
+; CHECK: br %r14
+ %ret = insertelement <8 x i16> %val, i16 0, i32 0
+ ret <8 x i16> %ret
+}
+
+; Test v8i16 insertion into the last element.
+define <8 x i16> @f8(<8 x i16> %val) {
+; CHECK-LABEL: f8:
+; CHECK: vleih %v24, 0, 7
+; CHECK: br %r14
+ %ret = insertelement <8 x i16> %val, i16 0, i32 7
+ ret <8 x i16> %ret
+}
+
+; Test v8i16 insertion with the maximum signed value.
+define <8 x i16> @f9(<8 x i16> %val) {
+; CHECK-LABEL: f9:
+; CHECK: vleih %v24, 32767, 4
+; CHECK: br %r14
+ %ret = insertelement <8 x i16> %val, i16 32767, i32 4
+ ret <8 x i16> %ret
+}
+
+; Test v8i16 insertion with the minimum signed value.
+define <8 x i16> @f10(<8 x i16> %val) {
+; CHECK-LABEL: f10:
+; CHECK: vleih %v24, -32768, 5
+; CHECK: br %r14
+ %ret = insertelement <8 x i16> %val, i16 32768, i32 5
+ ret <8 x i16> %ret
+}
+
+; Test v8i16 insertion with the maximum unsigned value.
+define <8 x i16> @f11(<8 x i16> %val) {
+; CHECK-LABEL: f11:
+; CHECK: vleih %v24, -1, 6
+; CHECK: br %r14
+ %ret = insertelement <8 x i16> %val, i16 65535, i32 6
+ ret <8 x i16> %ret
+}
+
+; Test v8i16 insertion into a variable element.
+define <8 x i16> @f12(<8 x i16> %val, i32 %index) {
+; CHECK-LABEL: f12:
+; CHECK-NOT: vleih
+; CHECK: br %r14
+ %ret = insertelement <8 x i16> %val, i16 0, i32 %index
+ ret <8 x i16> %ret
+}
+
+; Test v4i32 insertion into the first element.
+define <4 x i32> @f13(<4 x i32> %val) {
+; CHECK-LABEL: f13:
+; CHECK: vleif %v24, 0, 0
+; CHECK: br %r14
+ %ret = insertelement <4 x i32> %val, i32 0, i32 0
+ ret <4 x i32> %ret
+}
+
+; Test v4i32 insertion into the last element.
+define <4 x i32> @f14(<4 x i32> %val) {
+; CHECK-LABEL: f14:
+; CHECK: vleif %v24, 0, 3
+; CHECK: br %r14
+ %ret = insertelement <4 x i32> %val, i32 0, i32 3
+ ret <4 x i32> %ret
+}
+
+; Test v4i32 insertion with the maximum value allowed by VLEIF.
+define <4 x i32> @f15(<4 x i32> %val) {
+; CHECK-LABEL: f15:
+; CHECK: vleif %v24, 32767, 1
+; CHECK: br %r14
+ %ret = insertelement <4 x i32> %val, i32 32767, i32 1
+ ret <4 x i32> %ret
+}
+
+; Test v4i32 insertion with the next value up.
+define <4 x i32> @f16(<4 x i32> %val) {
+; CHECK-LABEL: f16:
+; CHECK-NOT: vleif
+; CHECK: br %r14
+ %ret = insertelement <4 x i32> %val, i32 32768, i32 1
+ ret <4 x i32> %ret
+}
+
+; Test v4i32 insertion with the minimum value allowed by VLEIF.
+define <4 x i32> @f17(<4 x i32> %val) {
+; CHECK-LABEL: f17:
+; CHECK: vleif %v24, -32768, 2
+; CHECK: br %r14
+ %ret = insertelement <4 x i32> %val, i32 -32768, i32 2
+ ret <4 x i32> %ret
+}
+
+; Test v4i32 insertion with the next value down.
+define <4 x i32> @f18(<4 x i32> %val) {
+; CHECK-LABEL: f18:
+; CHECK-NOT: vleif
+; CHECK: br %r14
+ %ret = insertelement <4 x i32> %val, i32 -32769, i32 2
+ ret <4 x i32> %ret
+}
+
+; Test v4i32 insertion into a variable element.
+define <4 x i32> @f19(<4 x i32> %val, i32 %index) {
+; CHECK-LABEL: f19:
+; CHECK-NOT: vleif
+; CHECK: br %r14
+ %ret = insertelement <4 x i32> %val, i32 0, i32 %index
+ ret <4 x i32> %ret
+}
+
+; Test v2i64 insertion into the first element.
+define <2 x i64> @f20(<2 x i64> %val) {
+; CHECK-LABEL: f20:
+; CHECK: vleig %v24, 0, 0
+; CHECK: br %r14
+ %ret = insertelement <2 x i64> %val, i64 0, i32 0
+ ret <2 x i64> %ret
+}
+
+; Test v2i64 insertion into the last element.
+define <2 x i64> @f21(<2 x i64> %val) {
+; CHECK-LABEL: f21:
+; CHECK: vleig %v24, 0, 1
+; CHECK: br %r14
+ %ret = insertelement <2 x i64> %val, i64 0, i32 1
+ ret <2 x i64> %ret
+}
+
+; Test v2i64 insertion with the maximum value allowed by VLEIG.
+define <2 x i64> @f22(<2 x i64> %val) {
+; CHECK-LABEL: f22:
+; CHECK: vleig %v24, 32767, 1
+; CHECK: br %r14
+ %ret = insertelement <2 x i64> %val, i64 32767, i32 1
+ ret <2 x i64> %ret
+}
+
+; Test v2i64 insertion with the next value up.
+define <2 x i64> @f23(<2 x i64> %val) {
+; CHECK-LABEL: f23:
+; CHECK-NOT: vleig
+; CHECK: br %r14
+ %ret = insertelement <2 x i64> %val, i64 32768, i32 1
+ ret <2 x i64> %ret
+}
+
+; Test v2i64 insertion with the minimum value allowed by VLEIG.
+define <2 x i64> @f24(<2 x i64> %val) {
+; CHECK-LABEL: f24:
+; CHECK: vleig %v24, -32768, 0
+; CHECK: br %r14
+ %ret = insertelement <2 x i64> %val, i64 -32768, i32 0
+ ret <2 x i64> %ret
+}
+
+; Test v2i64 insertion with the next value down.
+define <2 x i64> @f25(<2 x i64> %val) {
+; CHECK-LABEL: f25:
+; CHECK-NOT: vleig
+; CHECK: br %r14
+ %ret = insertelement <2 x i64> %val, i64 -32769, i32 0
+ ret <2 x i64> %ret
+}
+
+; Test v2i64 insertion into a variable element.
+define <2 x i64> @f26(<2 x i64> %val, i32 %index) {
+; CHECK-LABEL: f26:
+; CHECK-NOT: vleig
+; CHECK: br %r14
+ %ret = insertelement <2 x i64> %val, i64 0, i32 %index
+ ret <2 x i64> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-move-10.ll b/llvm/test/CodeGen/SystemZ/vec-move-10.ll
new file mode 100644
index 00000000000..852a4a7c4ed
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-move-10.ll
@@ -0,0 +1,328 @@
+; Test vector extraction to memory.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test v16i8 extraction from the first element.
+define void @f1(<16 x i8> %val, i8 *%ptr) {
+; CHECK-LABEL: f1:
+; CHECK: vsteb %v24, 0(%r2), 0
+; CHECK: br %r14
+ %element = extractelement <16 x i8> %val, i32 0
+ store i8 %element, i8 *%ptr
+ ret void
+}
+
+; Test v16i8 extraction from the last element.
+define void @f2(<16 x i8> %val, i8 *%ptr) {
+; CHECK-LABEL: f2:
+; CHECK: vsteb %v24, 0(%r2), 15
+; CHECK: br %r14
+ %element = extractelement <16 x i8> %val, i32 15
+ store i8 %element, i8 *%ptr
+ ret void
+}
+
+; Test v16i8 extraction of an invalid element. This must compile,
+; but we don't care what it does.
+define void @f3(<16 x i8> %val, i8 *%ptr) {
+; CHECK-LABEL: f3:
+; CHECK-NOT: vsteb %v24, 0(%r2), 16
+; CHECK: br %r14
+ %element = extractelement <16 x i8> %val, i32 16
+ store i8 %element, i8 *%ptr
+ ret void
+}
+
+; Test v16i8 extraction with the highest in-range offset.
+define void @f4(<16 x i8> %val, i8 *%base) {
+; CHECK-LABEL: f4:
+; CHECK: vsteb %v24, 4095(%r2), 10
+; CHECK: br %r14
+ %ptr = getelementptr i8, i8 *%base, i32 4095
+ %element = extractelement <16 x i8> %val, i32 10
+ store i8 %element, i8 *%ptr
+ ret void
+}
+
+; Test v16i8 extraction with the first ouf-of-range offset.
+define void @f5(<16 x i8> %val, i8 *%base) {
+; CHECK-LABEL: f5:
+; CHECK: aghi %r2, 4096
+; CHECK: vsteb %v24, 0(%r2), 5
+; CHECK: br %r14
+ %ptr = getelementptr i8, i8 *%base, i32 4096
+ %element = extractelement <16 x i8> %val, i32 5
+ store i8 %element, i8 *%ptr
+ ret void
+}
+
+; Test v16i8 extraction from a variable element.
+define void @f6(<16 x i8> %val, i8 *%ptr, i32 %index) {
+; CHECK-LABEL: f6:
+; CHECK-NOT: vsteb
+; CHECK: br %r14
+ %element = extractelement <16 x i8> %val, i32 %index
+ store i8 %element, i8 *%ptr
+ ret void
+}
+
+; Test v8i16 extraction from the first element.
+define void @f7(<8 x i16> %val, i16 *%ptr) {
+; CHECK-LABEL: f7:
+; CHECK: vsteh %v24, 0(%r2), 0
+; CHECK: br %r14
+ %element = extractelement <8 x i16> %val, i32 0
+ store i16 %element, i16 *%ptr
+ ret void
+}
+
+; Test v8i16 extraction from the last element.
+define void @f8(<8 x i16> %val, i16 *%ptr) {
+; CHECK-LABEL: f8:
+; CHECK: vsteh %v24, 0(%r2), 7
+; CHECK: br %r14
+ %element = extractelement <8 x i16> %val, i32 7
+ store i16 %element, i16 *%ptr
+ ret void
+}
+
+; Test v8i16 extraction of an invalid element. This must compile,
+; but we don't care what it does.
+define void @f9(<8 x i16> %val, i16 *%ptr) {
+; CHECK-LABEL: f9:
+; CHECK-NOT: vsteh %v24, 0(%r2), 8
+; CHECK: br %r14
+ %element = extractelement <8 x i16> %val, i32 8
+ store i16 %element, i16 *%ptr
+ ret void
+}
+
+; Test v8i16 extraction with the highest in-range offset.
+define void @f10(<8 x i16> %val, i16 *%base) {
+; CHECK-LABEL: f10:
+; CHECK: vsteh %v24, 4094(%r2), 5
+; CHECK: br %r14
+ %ptr = getelementptr i16, i16 *%base, i32 2047
+ %element = extractelement <8 x i16> %val, i32 5
+ store i16 %element, i16 *%ptr
+ ret void
+}
+
+; Test v8i16 extraction with the first ouf-of-range offset.
+define void @f11(<8 x i16> %val, i16 *%base) {
+; CHECK-LABEL: f11:
+; CHECK: aghi %r2, 4096
+; CHECK: vsteh %v24, 0(%r2), 1
+; CHECK: br %r14
+ %ptr = getelementptr i16, i16 *%base, i32 2048
+ %element = extractelement <8 x i16> %val, i32 1
+ store i16 %element, i16 *%ptr
+ ret void
+}
+
+; Test v8i16 extraction from a variable element.
+define void @f12(<8 x i16> %val, i16 *%ptr, i32 %index) {
+; CHECK-LABEL: f12:
+; CHECK-NOT: vsteh
+; CHECK: br %r14
+ %element = extractelement <8 x i16> %val, i32 %index
+ store i16 %element, i16 *%ptr
+ ret void
+}
+
+; Test v4i32 extraction from the first element.
+define void @f13(<4 x i32> %val, i32 *%ptr) {
+; CHECK-LABEL: f13:
+; CHECK: vstef %v24, 0(%r2), 0
+; CHECK: br %r14
+ %element = extractelement <4 x i32> %val, i32 0
+ store i32 %element, i32 *%ptr
+ ret void
+}
+
+; Test v4i32 extraction from the last element.
+define void @f14(<4 x i32> %val, i32 *%ptr) {
+; CHECK-LABEL: f14:
+; CHECK: vstef %v24, 0(%r2), 3
+; CHECK: br %r14
+ %element = extractelement <4 x i32> %val, i32 3
+ store i32 %element, i32 *%ptr
+ ret void
+}
+
+; Test v4i32 extraction of an invalid element. This must compile,
+; but we don't care what it does.
+define void @f15(<4 x i32> %val, i32 *%ptr) {
+; CHECK-LABEL: f15:
+; CHECK-NOT: vstef %v24, 0(%r2), 4
+; CHECK: br %r14
+ %element = extractelement <4 x i32> %val, i32 4
+ store i32 %element, i32 *%ptr
+ ret void
+}
+
+; Test v4i32 extraction with the highest in-range offset.
+define void @f16(<4 x i32> %val, i32 *%base) {
+; CHECK-LABEL: f16:
+; CHECK: vstef %v24, 4092(%r2), 2
+; CHECK: br %r14
+ %ptr = getelementptr i32, i32 *%base, i32 1023
+ %element = extractelement <4 x i32> %val, i32 2
+ store i32 %element, i32 *%ptr
+ ret void
+}
+
+; Test v4i32 extraction with the first ouf-of-range offset.
+define void @f17(<4 x i32> %val, i32 *%base) {
+; CHECK-LABEL: f17:
+; CHECK: aghi %r2, 4096
+; CHECK: vstef %v24, 0(%r2), 1
+; CHECK: br %r14
+ %ptr = getelementptr i32, i32 *%base, i32 1024
+ %element = extractelement <4 x i32> %val, i32 1
+ store i32 %element, i32 *%ptr
+ ret void
+}
+
+; Test v4i32 extraction from a variable element.
+define void @f18(<4 x i32> %val, i32 *%ptr, i32 %index) {
+; CHECK-LABEL: f18:
+; CHECK-NOT: vstef
+; CHECK: br %r14
+ %element = extractelement <4 x i32> %val, i32 %index
+ store i32 %element, i32 *%ptr
+ ret void
+}
+
+; Test v2i64 extraction from the first element.
+define void @f19(<2 x i64> %val, i64 *%ptr) {
+; CHECK-LABEL: f19:
+; CHECK: vsteg %v24, 0(%r2), 0
+; CHECK: br %r14
+ %element = extractelement <2 x i64> %val, i32 0
+ store i64 %element, i64 *%ptr
+ ret void
+}
+
+; Test v2i64 extraction from the last element.
+define void @f20(<2 x i64> %val, i64 *%ptr) {
+; CHECK-LABEL: f20:
+; CHECK: vsteg %v24, 0(%r2), 1
+; CHECK: br %r14
+ %element = extractelement <2 x i64> %val, i32 1
+ store i64 %element, i64 *%ptr
+ ret void
+}
+
+; Test v2i64 extraction of an invalid element. This must compile,
+; but we don't care what it does.
+define void @f21(<2 x i64> %val, i64 *%ptr) {
+; CHECK-LABEL: f21:
+; CHECK-NOT: vsteg %v24, 0(%r2), 2
+; CHECK: br %r14
+ %element = extractelement <2 x i64> %val, i32 2
+ store i64 %element, i64 *%ptr
+ ret void
+}
+
+; Test v2i64 extraction with the highest in-range offset.
+define void @f22(<2 x i64> %val, i64 *%base) {
+; CHECK-LABEL: f22:
+; CHECK: vsteg %v24, 4088(%r2), 1
+; CHECK: br %r14
+ %ptr = getelementptr i64, i64 *%base, i32 511
+ %element = extractelement <2 x i64> %val, i32 1
+ store i64 %element, i64 *%ptr
+ ret void
+}
+
+; Test v2i64 extraction with the first ouf-of-range offset.
+define void @f23(<2 x i64> %val, i64 *%base) {
+; CHECK-LABEL: f23:
+; CHECK: aghi %r2, 4096
+; CHECK: vsteg %v24, 0(%r2), 0
+; CHECK: br %r14
+ %ptr = getelementptr i64, i64 *%base, i32 512
+ %element = extractelement <2 x i64> %val, i32 0
+ store i64 %element, i64 *%ptr
+ ret void
+}
+
+; Test v2i64 extraction from a variable element.
+define void @f24(<2 x i64> %val, i64 *%ptr, i32 %index) {
+; CHECK-LABEL: f24:
+; CHECK-NOT: vsteg
+; CHECK: br %r14
+ %element = extractelement <2 x i64> %val, i32 %index
+ store i64 %element, i64 *%ptr
+ ret void
+}
+
+; Test a v4i32 scatter of the first element.
+define void @f37(<4 x i32> %val, <4 x i32> %index, i64 %base) {
+; CHECK-LABEL: f37:
+; CHECK: vscef %v24, 0(%v26,%r2), 0
+; CHECK: br %r14
+ %elem = extractelement <4 x i32> %index, i32 0
+ %ext = zext i32 %elem to i64
+ %add = add i64 %base, %ext
+ %ptr = inttoptr i64 %add to i32 *
+ %element = extractelement <4 x i32> %val, i32 0
+ store i32 %element, i32 *%ptr
+ ret void
+}
+
+; Test a v4i32 scatter of the last element.
+define void @f38(<4 x i32> %val, <4 x i32> %index, i64 %base) {
+; CHECK-LABEL: f38:
+; CHECK: vscef %v24, 0(%v26,%r2), 3
+; CHECK: br %r14
+ %elem = extractelement <4 x i32> %index, i32 3
+ %ext = zext i32 %elem to i64
+ %add = add i64 %base, %ext
+ %ptr = inttoptr i64 %add to i32 *
+ %element = extractelement <4 x i32> %val, i32 3
+ store i32 %element, i32 *%ptr
+ ret void
+}
+
+; Test a v4i32 scatter with the highest in-range offset.
+define void @f39(<4 x i32> %val, <4 x i32> %index, i64 %base) {
+; CHECK-LABEL: f39:
+; CHECK: vscef %v24, 4095(%v26,%r2), 1
+; CHECK: br %r14
+ %elem = extractelement <4 x i32> %index, i32 1
+ %ext = zext i32 %elem to i64
+ %add1 = add i64 %base, %ext
+ %add2 = add i64 %add1, 4095
+ %ptr = inttoptr i64 %add2 to i32 *
+ %element = extractelement <4 x i32> %val, i32 1
+ store i32 %element, i32 *%ptr
+ ret void
+}
+
+; Test a v2i64 scatter of the first element.
+define void @f40(<2 x i64> %val, <2 x i64> %index, i64 %base) {
+; CHECK-LABEL: f40:
+; CHECK: vsceg %v24, 0(%v26,%r2), 0
+; CHECK: br %r14
+ %elem = extractelement <2 x i64> %index, i32 0
+ %add = add i64 %base, %elem
+ %ptr = inttoptr i64 %add to i64 *
+ %element = extractelement <2 x i64> %val, i32 0
+ store i64 %element, i64 *%ptr
+ ret void
+}
+
+; Test a v2i64 scatter of the last element.
+define void @f41(<2 x i64> %val, <2 x i64> %index, i64 %base) {
+; CHECK-LABEL: f41:
+; CHECK: vsceg %v24, 0(%v26,%r2), 1
+; CHECK: br %r14
+ %elem = extractelement <2 x i64> %index, i32 1
+ %add = add i64 %base, %elem
+ %ptr = inttoptr i64 %add to i64 *
+ %element = extractelement <2 x i64> %val, i32 1
+ store i64 %element, i64 *%ptr
+ ret void
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-move-11.ll b/llvm/test/CodeGen/SystemZ/vec-move-11.ll
new file mode 100644
index 00000000000..45bc91b169b
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-move-11.ll
@@ -0,0 +1,93 @@
+; Test insertions of register values into a nonzero index of an undef.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test v16i8 insertion into an undef, with an arbitrary index.
+define <16 x i8> @f1(i8 %val) {
+; CHECK-LABEL: f1:
+; CHECK: vlvgb %v24, %r2, 12
+; CHECK-NEXT: br %r14
+ %ret = insertelement <16 x i8> undef, i8 %val, i32 12
+ ret <16 x i8> %ret
+}
+
+; Test v16i8 insertion into an undef, with the first good index for VLVGP.
+define <16 x i8> @f2(i8 %val) {
+; CHECK-LABEL: f2:
+; CHECK: vlvgp %v24, %r2, %r2
+; CHECK-NEXT: br %r14
+ %ret = insertelement <16 x i8> undef, i8 %val, i32 7
+ ret <16 x i8> %ret
+}
+
+; Test v16i8 insertion into an undef, with the second good index for VLVGP.
+define <16 x i8> @f3(i8 %val) {
+; CHECK-LABEL: f3:
+; CHECK: vlvgp %v24, %r2, %r2
+; CHECK-NEXT: br %r14
+ %ret = insertelement <16 x i8> undef, i8 %val, i32 15
+ ret <16 x i8> %ret
+}
+
+; Test v8i16 insertion into an undef, with an arbitrary index.
+define <8 x i16> @f4(i16 %val) {
+; CHECK-LABEL: f4:
+; CHECK: vlvgh %v24, %r2, 5
+; CHECK-NEXT: br %r14
+ %ret = insertelement <8 x i16> undef, i16 %val, i32 5
+ ret <8 x i16> %ret
+}
+
+; Test v8i16 insertion into an undef, with the first good index for VLVGP.
+define <8 x i16> @f5(i16 %val) {
+; CHECK-LABEL: f5:
+; CHECK: vlvgp %v24, %r2, %r2
+; CHECK-NEXT: br %r14
+ %ret = insertelement <8 x i16> undef, i16 %val, i32 3
+ ret <8 x i16> %ret
+}
+
+; Test v8i16 insertion into an undef, with the second good index for VLVGP.
+define <8 x i16> @f6(i16 %val) {
+; CHECK-LABEL: f6:
+; CHECK: vlvgp %v24, %r2, %r2
+; CHECK-NEXT: br %r14
+ %ret = insertelement <8 x i16> undef, i16 %val, i32 7
+ ret <8 x i16> %ret
+}
+
+; Test v4i32 insertion into an undef, with an arbitrary index.
+define <4 x i32> @f7(i32 %val) {
+; CHECK-LABEL: f7:
+; CHECK: vlvgf %v24, %r2, 2
+; CHECK-NEXT: br %r14
+ %ret = insertelement <4 x i32> undef, i32 %val, i32 2
+ ret <4 x i32> %ret
+}
+
+; Test v4i32 insertion into an undef, with the first good index for VLVGP.
+define <4 x i32> @f8(i32 %val) {
+; CHECK-LABEL: f8:
+; CHECK: vlvgp %v24, %r2, %r2
+; CHECK-NEXT: br %r14
+ %ret = insertelement <4 x i32> undef, i32 %val, i32 1
+ ret <4 x i32> %ret
+}
+
+; Test v4i32 insertion into an undef, with the second good index for VLVGP.
+define <4 x i32> @f9(i32 %val) {
+; CHECK-LABEL: f9:
+; CHECK: vlvgp %v24, %r2, %r2
+; CHECK-NEXT: br %r14
+ %ret = insertelement <4 x i32> undef, i32 %val, i32 3
+ ret <4 x i32> %ret
+}
+
+; Test v2i64 insertion into an undef.
+define <2 x i64> @f10(i64 %val) {
+; CHECK-LABEL: f10:
+; CHECK: vlvgp %v24, %r2, %r2
+; CHECK-NEXT: br %r14
+ %ret = insertelement <2 x i64> undef, i64 %val, i32 1
+ ret <2 x i64> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-move-12.ll b/llvm/test/CodeGen/SystemZ/vec-move-12.ll
new file mode 100644
index 00000000000..1fecab688e7
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-move-12.ll
@@ -0,0 +1,103 @@
+; Test insertions of memory values into a nonzero index of an undef.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test v16i8 insertion into an undef, with an arbitrary index.
+define <16 x i8> @f1(i8 *%ptr) {
+; CHECK-LABEL: f1:
+; CHECK: vlrepb %v24, 0(%r2)
+; CHECK-NEXT: br %r14
+ %val = load i8, i8 *%ptr
+ %ret = insertelement <16 x i8> undef, i8 %val, i32 12
+ ret <16 x i8> %ret
+}
+
+; Test v16i8 insertion into an undef, with the first good index for VLVGP.
+define <16 x i8> @f2(i8 *%ptr) {
+; CHECK-LABEL: f2:
+; CHECK: {{vlrepb|vllezb}} %v24, 0(%r2)
+; CHECK-NEXT: br %r14
+ %val = load i8, i8 *%ptr
+ %ret = insertelement <16 x i8> undef, i8 %val, i32 7
+ ret <16 x i8> %ret
+}
+
+; Test v16i8 insertion into an undef, with the second good index for VLVGP.
+define <16 x i8> @f3(i8 *%ptr) {
+; CHECK-LABEL: f3:
+; CHECK: vlrepb %v24, 0(%r2)
+; CHECK-NEXT: br %r14
+ %val = load i8, i8 *%ptr
+ %ret = insertelement <16 x i8> undef, i8 %val, i32 15
+ ret <16 x i8> %ret
+}
+
+; Test v8i16 insertion into an undef, with an arbitrary index.
+define <8 x i16> @f4(i16 *%ptr) {
+; CHECK-LABEL: f4:
+; CHECK: vlreph %v24, 0(%r2)
+; CHECK-NEXT: br %r14
+ %val = load i16, i16 *%ptr
+ %ret = insertelement <8 x i16> undef, i16 %val, i32 5
+ ret <8 x i16> %ret
+}
+
+; Test v8i16 insertion into an undef, with the first good index for VLVGP.
+define <8 x i16> @f5(i16 *%ptr) {
+; CHECK-LABEL: f5:
+; CHECK: {{vlreph|vllezh}} %v24, 0(%r2)
+; CHECK-NEXT: br %r14
+ %val = load i16, i16 *%ptr
+ %ret = insertelement <8 x i16> undef, i16 %val, i32 3
+ ret <8 x i16> %ret
+}
+
+; Test v8i16 insertion into an undef, with the second good index for VLVGP.
+define <8 x i16> @f6(i16 *%ptr) {
+; CHECK-LABEL: f6:
+; CHECK: vlreph %v24, 0(%r2)
+; CHECK-NEXT: br %r14
+ %val = load i16, i16 *%ptr
+ %ret = insertelement <8 x i16> undef, i16 %val, i32 7
+ ret <8 x i16> %ret
+}
+
+; Test v4i32 insertion into an undef, with an arbitrary index.
+define <4 x i32> @f7(i32 *%ptr) {
+; CHECK-LABEL: f7:
+; CHECK: vlrepf %v24, 0(%r2)
+; CHECK-NEXT: br %r14
+ %val = load i32, i32 *%ptr
+ %ret = insertelement <4 x i32> undef, i32 %val, i32 2
+ ret <4 x i32> %ret
+}
+
+; Test v4i32 insertion into an undef, with the first good index for VLVGP.
+define <4 x i32> @f8(i32 *%ptr) {
+; CHECK-LABEL: f8:
+; CHECK: {{vlrepf|vllezf}} %v24, 0(%r2)
+; CHECK-NEXT: br %r14
+ %val = load i32, i32 *%ptr
+ %ret = insertelement <4 x i32> undef, i32 %val, i32 1
+ ret <4 x i32> %ret
+}
+
+; Test v4i32 insertion into an undef, with the second good index for VLVGP.
+define <4 x i32> @f9(i32 *%ptr) {
+; CHECK-LABEL: f9:
+; CHECK: vlrepf %v24, 0(%r2)
+; CHECK-NEXT: br %r14
+ %val = load i32, i32 *%ptr
+ %ret = insertelement <4 x i32> undef, i32 %val, i32 3
+ ret <4 x i32> %ret
+}
+
+; Test v2i64 insertion into an undef.
+define <2 x i64> @f10(i64 *%ptr) {
+; CHECK-LABEL: f10:
+; CHECK: vlrepg %v24, 0(%r2)
+; CHECK-NEXT: br %r14
+ %val = load i64, i64 *%ptr
+ %ret = insertelement <2 x i64> undef, i64 %val, i32 1
+ ret <2 x i64> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-move-13.ll b/llvm/test/CodeGen/SystemZ/vec-move-13.ll
new file mode 100644
index 00000000000..e103affa4b1
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-move-13.ll
@@ -0,0 +1,47 @@
+; Test insertions of register values into 0.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test v16i8 insertion into 0.
+define <16 x i8> @f1(i8 %val1, i8 %val2) {
+; CHECK-LABEL: f1:
+; CHECK: vgbm %v24, 0
+; CHECK-DAG: vlvgb %v24, %r2, 2
+; CHECK-DAG: vlvgb %v24, %r3, 12
+; CHECK: br %r14
+ %vec1 = insertelement <16 x i8> zeroinitializer, i8 %val1, i32 2
+ %vec2 = insertelement <16 x i8> %vec1, i8 %val2, i32 12
+ ret <16 x i8> %vec2
+}
+
+; Test v8i16 insertion into 0.
+define <8 x i16> @f2(i16 %val1, i16 %val2) {
+; CHECK-LABEL: f2:
+; CHECK: vgbm %v24, 0
+; CHECK-DAG: vlvgh %v24, %r2, 3
+; CHECK-DAG: vlvgh %v24, %r3, 5
+; CHECK: br %r14
+ %vec1 = insertelement <8 x i16> zeroinitializer, i16 %val1, i32 3
+ %vec2 = insertelement <8 x i16> %vec1, i16 %val2, i32 5
+ ret <8 x i16> %vec2
+}
+
+; Test v4i32 insertion into 0.
+define <4 x i32> @f3(i32 %val) {
+; CHECK-LABEL: f3:
+; CHECK: vgbm %v24, 0
+; CHECK: vlvgf %v24, %r2, 3
+; CHECK: br %r14
+ %ret = insertelement <4 x i32> zeroinitializer, i32 %val, i32 3
+ ret <4 x i32> %ret
+}
+
+; Test v2i64 insertion into 0.
+define <2 x i64> @f4(i64 %val) {
+; CHECK-LABEL: f4:
+; CHECK: lghi [[REG:%r[0-5]]], 0
+; CHECK: vlvgp %v24, [[REG]], %r2
+; CHECK: br %r14
+ %ret = insertelement <2 x i64> zeroinitializer, i64 %val, i32 1
+ ret <2 x i64> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-move-14.ll b/llvm/test/CodeGen/SystemZ/vec-move-14.ll
new file mode 100644
index 00000000000..f0c60e7d366
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-move-14.ll
@@ -0,0 +1,76 @@
+; Test insertions of memory values into 0.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test VLLEZB.
+define <16 x i8> @f1(i8 *%ptr) {
+; CHECK-LABEL: f1:
+; CHECK: vllezb %v24, 0(%r2)
+; CHECK: br %r14
+ %val = load i8, i8 *%ptr
+ %ret = insertelement <16 x i8> zeroinitializer, i8 %val, i32 7
+ ret <16 x i8> %ret
+}
+
+; Test VLLEZB with the highest in-range offset.
+define <16 x i8> @f2(i8 *%base) {
+; CHECK-LABEL: f2:
+; CHECK: vllezb %v24, 4095(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i8, i8 *%base, i64 4095
+ %val = load i8, i8 *%ptr
+ %ret = insertelement <16 x i8> zeroinitializer, i8 %val, i32 7
+ ret <16 x i8> %ret
+}
+
+; Test VLLEZB with the next highest offset.
+define <16 x i8> @f3(i8 *%base) {
+; CHECK-LABEL: f3:
+; CHECK-NOT: vllezb %v24, 4096(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i8, i8 *%base, i64 4096
+ %val = load i8, i8 *%ptr
+ %ret = insertelement <16 x i8> zeroinitializer, i8 %val, i32 7
+ ret <16 x i8> %ret
+}
+
+; Test that VLLEZB allows an index.
+define <16 x i8> @f4(i8 *%base, i64 %index) {
+; CHECK-LABEL: f4:
+; CHECK: vllezb %v24, 0({{%r2,%r3|%r3,%r2}})
+; CHECK: br %r14
+ %ptr = getelementptr i8, i8 *%base, i64 %index
+ %val = load i8, i8 *%ptr
+ %ret = insertelement <16 x i8> zeroinitializer, i8 %val, i32 7
+ ret <16 x i8> %ret
+}
+
+; Test VLLEZH.
+define <8 x i16> @f5(i16 *%ptr) {
+; CHECK-LABEL: f5:
+; CHECK: vllezh %v24, 0(%r2)
+; CHECK: br %r14
+ %val = load i16, i16 *%ptr
+ %ret = insertelement <8 x i16> zeroinitializer, i16 %val, i32 3
+ ret <8 x i16> %ret
+}
+
+; Test VLLEZF.
+define <4 x i32> @f6(i32 *%ptr) {
+; CHECK-LABEL: f6:
+; CHECK: vllezf %v24, 0(%r2)
+; CHECK: br %r14
+ %val = load i32, i32 *%ptr
+ %ret = insertelement <4 x i32> zeroinitializer, i32 %val, i32 1
+ ret <4 x i32> %ret
+}
+
+; Test VLLEZG.
+define <2 x i64> @f7(i64 *%ptr) {
+; CHECK-LABEL: f7:
+; CHECK: vllezg %v24, 0(%r2)
+; CHECK: br %r14
+ %val = load i64, i64 *%ptr
+ %ret = insertelement <2 x i64> zeroinitializer, i64 %val, i32 0
+ ret <2 x i64> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-mul-01.ll b/llvm/test/CodeGen/SystemZ/vec-mul-01.ll
new file mode 100644
index 00000000000..209582f5893
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-mul-01.ll
@@ -0,0 +1,39 @@
+; Test vector multiplication.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test a v16i8 multiplication.
+define <16 x i8> @f1(<16 x i8> %dummy, <16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f1:
+; CHECK: vmlb %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = mul <16 x i8> %val1, %val2
+ ret <16 x i8> %ret
+}
+
+; Test a v8i16 multiplication.
+define <8 x i16> @f2(<8 x i16> %dummy, <8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f2:
+; CHECK: vmlhw %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = mul <8 x i16> %val1, %val2
+ ret <8 x i16> %ret
+}
+
+; Test a v4i32 multiplication.
+define <4 x i32> @f3(<4 x i32> %dummy, <4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f3:
+; CHECK: vmlf %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = mul <4 x i32> %val1, %val2
+ ret <4 x i32> %ret
+}
+
+; Test a v2i64 multiplication. There's no vector equivalent.
+define <2 x i64> @f4(<2 x i64> %dummy, <2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f4:
+; CHECK-NOT: vmlg
+; CHECK: br %r14
+ %ret = mul <2 x i64> %val1, %val2
+ ret <2 x i64> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-mul-02.ll b/llvm/test/CodeGen/SystemZ/vec-mul-02.ll
new file mode 100644
index 00000000000..7323330919a
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-mul-02.ll
@@ -0,0 +1,36 @@
+; Test vector multiply-and-add.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test a v16i8 multiply-and-add.
+define <16 x i8> @f1(<16 x i8> %dummy, <16 x i8> %val1, <16 x i8> %val2,
+ <16 x i8> %val3) {
+; CHECK-LABEL: f1:
+; CHECK: vmalb %v24, %v26, %v28, %v30
+; CHECK: br %r14
+ %mul = mul <16 x i8> %val1, %val2
+ %ret = add <16 x i8> %mul, %val3
+ ret <16 x i8> %ret
+}
+
+; Test a v8i16 multiply-and-add.
+define <8 x i16> @f2(<8 x i16> %dummy, <8 x i16> %val1, <8 x i16> %val2,
+ <8 x i16> %val3) {
+; CHECK-LABEL: f2:
+; CHECK: vmalhw %v24, %v26, %v28, %v30
+; CHECK: br %r14
+ %mul = mul <8 x i16> %val1, %val2
+ %ret = add <8 x i16> %mul, %val3
+ ret <8 x i16> %ret
+}
+
+; Test a v4i32 multiply-and-add.
+define <4 x i32> @f3(<4 x i32> %dummy, <4 x i32> %val1, <4 x i32> %val2,
+ <4 x i32> %val3) {
+; CHECK-LABEL: f3:
+; CHECK: vmalf %v24, %v26, %v28, %v30
+; CHECK: br %r14
+ %mul = mul <4 x i32> %val1, %val2
+ %ret = add <4 x i32> %mul, %val3
+ ret <4 x i32> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-neg-01.ll b/llvm/test/CodeGen/SystemZ/vec-neg-01.ll
new file mode 100644
index 00000000000..357648ba4d3
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-neg-01.ll
@@ -0,0 +1,39 @@
+; Test vector negation.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test a v16i8 negation.
+define <16 x i8> @f1(<16 x i8> %dummy, <16 x i8> %val) {
+; CHECK-LABEL: f1:
+; CHECK: vlcb %v24, %v26
+; CHECK: br %r14
+ %ret = sub <16 x i8> zeroinitializer, %val
+ ret <16 x i8> %ret
+}
+
+; Test a v8i16 negation.
+define <8 x i16> @f2(<8 x i16> %dummy, <8 x i16> %val) {
+; CHECK-LABEL: f2:
+; CHECK: vlch %v24, %v26
+; CHECK: br %r14
+ %ret = sub <8 x i16> zeroinitializer, %val
+ ret <8 x i16> %ret
+}
+
+; Test a v4i32 negation.
+define <4 x i32> @f3(<4 x i32> %dummy, <4 x i32> %val) {
+; CHECK-LABEL: f3:
+; CHECK: vlcf %v24, %v26
+; CHECK: br %r14
+ %ret = sub <4 x i32> zeroinitializer, %val
+ ret <4 x i32> %ret
+}
+
+; Test a v2i64 negation.
+define <2 x i64> @f4(<2 x i64> %dummy, <2 x i64> %val) {
+; CHECK-LABEL: f4:
+; CHECK: vlcg %v24, %v26
+; CHECK: br %r14
+ %ret = sub <2 x i64> zeroinitializer, %val
+ ret <2 x i64> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-or-01.ll b/llvm/test/CodeGen/SystemZ/vec-or-01.ll
new file mode 100644
index 00000000000..789150ad2d1
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-or-01.ll
@@ -0,0 +1,39 @@
+; Test vector OR.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test a v16i8 OR.
+define <16 x i8> @f1(<16 x i8> %dummy, <16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f1:
+; CHECK: vo %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = or <16 x i8> %val1, %val2
+ ret <16 x i8> %ret
+}
+
+; Test a v8i16 OR.
+define <8 x i16> @f2(<8 x i16> %dummy, <8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f2:
+; CHECK: vo %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = or <8 x i16> %val1, %val2
+ ret <8 x i16> %ret
+}
+
+; Test a v4i32 OR.
+define <4 x i32> @f3(<4 x i32> %dummy, <4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f3:
+; CHECK: vo %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = or <4 x i32> %val1, %val2
+ ret <4 x i32> %ret
+}
+
+; Test a v2i64 OR.
+define <2 x i64> @f4(<2 x i64> %dummy, <2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f4:
+; CHECK: vo %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = or <2 x i64> %val1, %val2
+ ret <2 x i64> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-or-02.ll b/llvm/test/CodeGen/SystemZ/vec-or-02.ll
new file mode 100644
index 00000000000..eeb86e36ff0
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-or-02.ll
@@ -0,0 +1,107 @@
+; Test vector (or (and X, Z), (and Y, (not Z))) patterns.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test v16i8.
+define <16 x i8> @f1(<16 x i8> %val1, <16 x i8> %val2, <16 x i8> %val3) {
+; CHECK-LABEL: f1:
+; CHECK: vsel %v24, %v24, %v26, %v28
+; CHECK: br %r14
+ %not = xor <16 x i8> %val3, <i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1>
+ %and1 = and <16 x i8> %val1, %val3
+ %and2 = and <16 x i8> %val2, %not
+ %ret = or <16 x i8> %and1, %and2
+ ret <16 x i8> %ret
+}
+
+; ...and again with the XOR applied to the other operand of the AND.
+define <16 x i8> @f2(<16 x i8> %val1, <16 x i8> %val2, <16 x i8> %val3) {
+; CHECK-LABEL: f2:
+; CHECK: vsel %v24, %v26, %v24, %v28
+; CHECK: br %r14
+ %not = xor <16 x i8> %val3, <i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1,
+ i8 -1, i8 -1, i8 -1, i8 -1>
+ %and1 = and <16 x i8> %val1, %not
+ %and2 = and <16 x i8> %val2, %val3
+ %ret = or <16 x i8> %and1, %and2
+ ret <16 x i8> %ret
+}
+
+; Test v8i16.
+define <8 x i16> @f3(<8 x i16> %val1, <8 x i16> %val2, <8 x i16> %val3) {
+; CHECK-LABEL: f3:
+; CHECK: vsel %v24, %v24, %v26, %v28
+; CHECK: br %r14
+ %not = xor <8 x i16> %val3, <i16 -1, i16 -1, i16 -1, i16 -1,
+ i16 -1, i16 -1, i16 -1, i16 -1>
+ %and1 = and <8 x i16> %val1, %val3
+ %and2 = and <8 x i16> %val2, %not
+ %ret = or <8 x i16> %and1, %and2
+ ret <8 x i16> %ret
+}
+
+; ...and again with the XOR applied to the other operand of the AND.
+define <8 x i16> @f4(<8 x i16> %val1, <8 x i16> %val2, <8 x i16> %val3) {
+; CHECK-LABEL: f4:
+; CHECK: vsel %v24, %v26, %v24, %v28
+; CHECK: br %r14
+ %not = xor <8 x i16> %val3, <i16 -1, i16 -1, i16 -1, i16 -1,
+ i16 -1, i16 -1, i16 -1, i16 -1>
+ %and1 = and <8 x i16> %val1, %not
+ %and2 = and <8 x i16> %val2, %val3
+ %ret = or <8 x i16> %and1, %and2
+ ret <8 x i16> %ret
+}
+
+; Test v4i32.
+define <4 x i32> @f5(<4 x i32> %val1, <4 x i32> %val2, <4 x i32> %val3) {
+; CHECK-LABEL: f5:
+; CHECK: vsel %v24, %v24, %v26, %v28
+; CHECK: br %r14
+ %not = xor <4 x i32> %val3, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %and1 = and <4 x i32> %val1, %val3
+ %and2 = and <4 x i32> %val2, %not
+ %ret = or <4 x i32> %and1, %and2
+ ret <4 x i32> %ret
+}
+
+; ...and again with the XOR applied to the other operand of the AND.
+define <4 x i32> @f6(<4 x i32> %val1, <4 x i32> %val2, <4 x i32> %val3) {
+; CHECK-LABEL: f6:
+; CHECK: vsel %v24, %v26, %v24, %v28
+; CHECK: br %r14
+ %not = xor <4 x i32> %val3, <i32 -1, i32 -1, i32 -1, i32 -1>
+ %and1 = and <4 x i32> %val1, %not
+ %and2 = and <4 x i32> %val2, %val3
+ %ret = or <4 x i32> %and1, %and2
+ ret <4 x i32> %ret
+}
+
+; Test v2i64.
+define <2 x i64> @f7(<2 x i64> %val1, <2 x i64> %val2, <2 x i64> %val3) {
+; CHECK-LABEL: f7:
+; CHECK: vsel %v24, %v24, %v26, %v28
+; CHECK: br %r14
+ %not = xor <2 x i64> %val3, <i64 -1, i64 -1>
+ %and1 = and <2 x i64> %val1, %val3
+ %and2 = and <2 x i64> %val2, %not
+ %ret = or <2 x i64> %and1, %and2
+ ret <2 x i64> %ret
+}
+
+; ...and again with the XOR applied to the other operand of the AND.
+define <2 x i64> @f8(<2 x i64> %val1, <2 x i64> %val2, <2 x i64> %val3) {
+; CHECK-LABEL: f8:
+; CHECK: vsel %v24, %v26, %v24, %v28
+; CHECK: br %r14
+ %not = xor <2 x i64> %val3, <i64 -1, i64 -1>
+ %and1 = and <2 x i64> %val1, %not
+ %and2 = and <2 x i64> %val2, %val3
+ %ret = or <2 x i64> %and1, %and2
+ ret <2 x i64> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-perm-01.ll b/llvm/test/CodeGen/SystemZ/vec-perm-01.ll
new file mode 100644
index 00000000000..520ff45e7f7
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-perm-01.ll
@@ -0,0 +1,124 @@
+; Test vector splat.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test v16i8 splat of the first element.
+define <16 x i8> @f1(<16 x i8> %val) {
+; CHECK-LABEL: f1:
+; CHECK: vrepb %v24, %v24, 0
+; CHECK: br %r14
+ %ret = shufflevector <16 x i8> %val, <16 x i8> undef,
+ <16 x i32> zeroinitializer
+ ret <16 x i8> %ret
+}
+
+; Test v16i8 splat of the last element.
+define <16 x i8> @f2(<16 x i8> %val) {
+; CHECK-LABEL: f2:
+; CHECK: vrepb %v24, %v24, 15
+; CHECK: br %r14
+ %ret = shufflevector <16 x i8> %val, <16 x i8> undef,
+ <16 x i32> <i32 15, i32 15, i32 15, i32 15,
+ i32 15, i32 15, i32 15, i32 15,
+ i32 15, i32 15, i32 15, i32 15,
+ i32 15, i32 15, i32 15, i32 15>
+ ret <16 x i8> %ret
+}
+
+; Test v16i8 splat of an arbitrary element, using the second operand of
+; the shufflevector.
+define <16 x i8> @f3(<16 x i8> %val) {
+; CHECK-LABEL: f3:
+; CHECK: vrepb %v24, %v24, 4
+; CHECK: br %r14
+ %ret = shufflevector <16 x i8> undef, <16 x i8> %val,
+ <16 x i32> <i32 20, i32 20, i32 20, i32 20,
+ i32 20, i32 20, i32 20, i32 20,
+ i32 20, i32 20, i32 20, i32 20,
+ i32 20, i32 20, i32 20, i32 20>
+ ret <16 x i8> %ret
+}
+
+; Test v8i16 splat of the first element.
+define <8 x i16> @f4(<8 x i16> %val) {
+; CHECK-LABEL: f4:
+; CHECK: vreph %v24, %v24, 0
+; CHECK: br %r14
+ %ret = shufflevector <8 x i16> %val, <8 x i16> undef,
+ <8 x i32> zeroinitializer
+ ret <8 x i16> %ret
+}
+
+; Test v8i16 splat of the last element.
+define <8 x i16> @f5(<8 x i16> %val) {
+; CHECK-LABEL: f5:
+; CHECK: vreph %v24, %v24, 7
+; CHECK: br %r14
+ %ret = shufflevector <8 x i16> %val, <8 x i16> undef,
+ <8 x i32> <i32 7, i32 7, i32 7, i32 7,
+ i32 7, i32 7, i32 7, i32 7>
+ ret <8 x i16> %ret
+}
+
+; Test v8i16 splat of an arbitrary element, using the second operand of
+; the shufflevector.
+define <8 x i16> @f6(<8 x i16> %val) {
+; CHECK-LABEL: f6:
+; CHECK: vreph %v24, %v24, 2
+; CHECK: br %r14
+ %ret = shufflevector <8 x i16> undef, <8 x i16> %val,
+ <8 x i32> <i32 10, i32 10, i32 10, i32 10,
+ i32 10, i32 10, i32 10, i32 10>
+ ret <8 x i16> %ret
+}
+
+; Test v4i32 splat of the first element.
+define <4 x i32> @f7(<4 x i32> %val) {
+; CHECK-LABEL: f7:
+; CHECK: vrepf %v24, %v24, 0
+; CHECK: br %r14
+ %ret = shufflevector <4 x i32> %val, <4 x i32> undef,
+ <4 x i32> zeroinitializer
+ ret <4 x i32> %ret
+}
+
+; Test v4i32 splat of the last element.
+define <4 x i32> @f8(<4 x i32> %val) {
+; CHECK-LABEL: f8:
+; CHECK: vrepf %v24, %v24, 3
+; CHECK: br %r14
+ %ret = shufflevector <4 x i32> %val, <4 x i32> undef,
+ <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ ret <4 x i32> %ret
+}
+
+; Test v4i32 splat of an arbitrary element, using the second operand of
+; the shufflevector.
+define <4 x i32> @f9(<4 x i32> %val) {
+; CHECK-LABEL: f9:
+; CHECK: vrepf %v24, %v24, 1
+; CHECK: br %r14
+ %ret = shufflevector <4 x i32> undef, <4 x i32> %val,
+ <4 x i32> <i32 5, i32 5, i32 5, i32 5>
+ ret <4 x i32> %ret
+}
+
+; Test v2i64 splat of the first element.
+define <2 x i64> @f10(<2 x i64> %val) {
+; CHECK-LABEL: f10:
+; CHECK: vrepg %v24, %v24, 0
+; CHECK: br %r14
+ %ret = shufflevector <2 x i64> %val, <2 x i64> undef,
+ <2 x i32> zeroinitializer
+ ret <2 x i64> %ret
+}
+
+; Test v2i64 splat of the last element.
+define <2 x i64> @f11(<2 x i64> %val) {
+; CHECK-LABEL: f11:
+; CHECK: vrepg %v24, %v24, 1
+; CHECK: br %r14
+ %ret = shufflevector <2 x i64> %val, <2 x i64> undef,
+ <2 x i32> <i32 1, i32 1>
+ ret <2 x i64> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-perm-02.ll b/llvm/test/CodeGen/SystemZ/vec-perm-02.ll
new file mode 100644
index 00000000000..93e4112c0ef
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-perm-02.ll
@@ -0,0 +1,144 @@
+; Test replications of a scalar register value, represented as splats.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test v16i8 splat of the first element.
+define <16 x i8> @f1(i8 %scalar) {
+; CHECK-LABEL: f1:
+; CHECK: vlvgp [[REG:%v[0-9]+]], %r2, %r2
+; CHECK: vrepb %v24, [[REG]], 7
+; CHECK: br %r14
+ %val = insertelement <16 x i8> undef, i8 %scalar, i32 0
+ %ret = shufflevector <16 x i8> %val, <16 x i8> undef,
+ <16 x i32> zeroinitializer
+ ret <16 x i8> %ret
+}
+
+; Test v16i8 splat of the last element.
+define <16 x i8> @f2(i8 %scalar) {
+; CHECK-LABEL: f2:
+; CHECK: vlvgp [[REG:%v[0-9]+]], %r2, %r2
+; CHECK: vrepb %v24, [[REG]], 7
+; CHECK: br %r14
+ %val = insertelement <16 x i8> undef, i8 %scalar, i32 15
+ %ret = shufflevector <16 x i8> %val, <16 x i8> undef,
+ <16 x i32> <i32 15, i32 15, i32 15, i32 15,
+ i32 15, i32 15, i32 15, i32 15,
+ i32 15, i32 15, i32 15, i32 15,
+ i32 15, i32 15, i32 15, i32 15>
+ ret <16 x i8> %ret
+}
+
+; Test v16i8 splat of an arbitrary element, using the second operand of
+; the shufflevector.
+define <16 x i8> @f3(i8 %scalar) {
+; CHECK-LABEL: f3:
+; CHECK: vlvgp [[REG:%v[0-9]+]], %r2, %r2
+; CHECK: vrepb %v24, [[REG]], 7
+; CHECK: br %r14
+ %val = insertelement <16 x i8> undef, i8 %scalar, i32 4
+ %ret = shufflevector <16 x i8> undef, <16 x i8> %val,
+ <16 x i32> <i32 20, i32 20, i32 20, i32 20,
+ i32 20, i32 20, i32 20, i32 20,
+ i32 20, i32 20, i32 20, i32 20,
+ i32 20, i32 20, i32 20, i32 20>
+ ret <16 x i8> %ret
+}
+
+; Test v8i16 splat of the first element.
+define <8 x i16> @f4(i16 %scalar) {
+; CHECK-LABEL: f4:
+; CHECK: vlvgp [[REG:%v[0-9]+]], %r2, %r2
+; CHECK: vreph %v24, [[REG]], 3
+; CHECK: br %r14
+ %val = insertelement <8 x i16> undef, i16 %scalar, i32 0
+ %ret = shufflevector <8 x i16> %val, <8 x i16> undef,
+ <8 x i32> zeroinitializer
+ ret <8 x i16> %ret
+}
+
+; Test v8i16 splat of the last element.
+define <8 x i16> @f5(i16 %scalar) {
+; CHECK-LABEL: f5:
+; CHECK: vlvgp [[REG:%v[0-9]+]], %r2, %r2
+; CHECK: vreph %v24, [[REG]], 3
+; CHECK: br %r14
+ %val = insertelement <8 x i16> undef, i16 %scalar, i32 7
+ %ret = shufflevector <8 x i16> %val, <8 x i16> undef,
+ <8 x i32> <i32 7, i32 7, i32 7, i32 7,
+ i32 7, i32 7, i32 7, i32 7>
+ ret <8 x i16> %ret
+}
+
+; Test v8i16 splat of an arbitrary element, using the second operand of
+; the shufflevector.
+define <8 x i16> @f6(i16 %scalar) {
+; CHECK-LABEL: f6:
+; CHECK: vlvgp [[REG:%v[0-9]+]], %r2, %r2
+; CHECK: vreph %v24, [[REG]], 3
+; CHECK: br %r14
+ %val = insertelement <8 x i16> undef, i16 %scalar, i32 2
+ %ret = shufflevector <8 x i16> undef, <8 x i16> %val,
+ <8 x i32> <i32 10, i32 10, i32 10, i32 10,
+ i32 10, i32 10, i32 10, i32 10>
+ ret <8 x i16> %ret
+}
+
+; Test v4i32 splat of the first element.
+define <4 x i32> @f7(i32 %scalar) {
+; CHECK-LABEL: f7:
+; CHECK: vlvgp [[REG:%v[0-9]+]], %r2, %r2
+; CHECK: vrepf %v24, [[REG]], 1
+; CHECK: br %r14
+ %val = insertelement <4 x i32> undef, i32 %scalar, i32 0
+ %ret = shufflevector <4 x i32> %val, <4 x i32> undef,
+ <4 x i32> zeroinitializer
+ ret <4 x i32> %ret
+}
+
+; Test v4i32 splat of the last element.
+define <4 x i32> @f8(i32 %scalar) {
+; CHECK-LABEL: f8:
+; CHECK: vlvgp [[REG:%v[0-9]+]], %r2, %r2
+; CHECK: vrepf %v24, [[REG]], 1
+; CHECK: br %r14
+ %val = insertelement <4 x i32> undef, i32 %scalar, i32 3
+ %ret = shufflevector <4 x i32> %val, <4 x i32> undef,
+ <4 x i32> <i32 3, i32 3, i32 3, i32 3>
+ ret <4 x i32> %ret
+}
+
+; Test v4i32 splat of an arbitrary element, using the second operand of
+; the shufflevector.
+define <4 x i32> @f9(i32 %scalar) {
+; CHECK-LABEL: f9:
+; CHECK: vlvgp [[REG:%v[0-9]+]], %r2, %r2
+; CHECK: vrepf %v24, [[REG]], 1
+; CHECK: br %r14
+ %val = insertelement <4 x i32> undef, i32 %scalar, i32 1
+ %ret = shufflevector <4 x i32> undef, <4 x i32> %val,
+ <4 x i32> <i32 5, i32 5, i32 5, i32 5>
+ ret <4 x i32> %ret
+}
+
+; Test v2i64 splat of the first element.
+define <2 x i64> @f10(i64 %scalar) {
+; CHECK-LABEL: f10:
+; CHECK: vlvgp %v24, %r2, %r2
+; CHECK: br %r14
+ %val = insertelement <2 x i64> undef, i64 %scalar, i32 0
+ %ret = shufflevector <2 x i64> %val, <2 x i64> undef,
+ <2 x i32> zeroinitializer
+ ret <2 x i64> %ret
+}
+
+; Test v2i64 splat of the last element.
+define <2 x i64> @f11(i64 %scalar) {
+; CHECK-LABEL: f11:
+; CHECK: vlvgp %v24, %r2, %r2
+; CHECK: br %r14
+ %val = insertelement <2 x i64> undef, i64 %scalar, i32 1
+ %ret = shufflevector <2 x i64> %val, <2 x i64> undef,
+ <2 x i32> <i32 1, i32 1>
+ ret <2 x i64> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-perm-03.ll b/llvm/test/CodeGen/SystemZ/vec-perm-03.ll
new file mode 100644
index 00000000000..d74948bdb51
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-perm-03.ll
@@ -0,0 +1,173 @@
+; Test replications of a scalar memory value, represented as splats.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test a v16i8 replicating load with no offset.
+define <16 x i8> @f1(i8 *%ptr) {
+; CHECK-LABEL: f1:
+; CHECK: vlrepb %v24, 0(%r2)
+; CHECK: br %r14
+ %scalar = load i8, i8 *%ptr
+ %val = insertelement <16 x i8> undef, i8 %scalar, i32 0
+ %ret = shufflevector <16 x i8> %val, <16 x i8> undef,
+ <16 x i32> zeroinitializer
+ ret <16 x i8> %ret
+}
+
+; Test a v16i8 replicating load with the maximum in-range offset.
+define <16 x i8> @f2(i8 *%base) {
+; CHECK-LABEL: f2:
+; CHECK: vlrepb %v24, 4095(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i8, i8 *%base, i64 4095
+ %scalar = load i8, i8 *%ptr
+ %val = insertelement <16 x i8> undef, i8 %scalar, i32 0
+ %ret = shufflevector <16 x i8> %val, <16 x i8> undef,
+ <16 x i32> zeroinitializer
+ ret <16 x i8> %ret
+}
+
+; Test a v16i8 replicating load with the first out-of-range offset.
+define <16 x i8> @f3(i8 *%base) {
+; CHECK-LABEL: f3:
+; CHECK: aghi %r2, 4096
+; CHECK: vlrepb %v24, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i8, i8 *%base, i64 4096
+ %scalar = load i8, i8 *%ptr
+ %val = insertelement <16 x i8> undef, i8 %scalar, i32 0
+ %ret = shufflevector <16 x i8> %val, <16 x i8> undef,
+ <16 x i32> zeroinitializer
+ ret <16 x i8> %ret
+}
+
+; Test a v8i16 replicating load with no offset.
+define <8 x i16> @f4(i16 *%ptr) {
+; CHECK-LABEL: f4:
+; CHECK: vlreph %v24, 0(%r2)
+; CHECK: br %r14
+ %scalar = load i16, i16 *%ptr
+ %val = insertelement <8 x i16> undef, i16 %scalar, i32 0
+ %ret = shufflevector <8 x i16> %val, <8 x i16> undef,
+ <8 x i32> zeroinitializer
+ ret <8 x i16> %ret
+}
+
+; Test a v8i16 replicating load with the maximum in-range offset.
+define <8 x i16> @f5(i16 *%base) {
+; CHECK-LABEL: f5:
+; CHECK: vlreph %v24, 4094(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i16, i16 *%base, i64 2047
+ %scalar = load i16, i16 *%ptr
+ %val = insertelement <8 x i16> undef, i16 %scalar, i32 0
+ %ret = shufflevector <8 x i16> %val, <8 x i16> undef,
+ <8 x i32> zeroinitializer
+ ret <8 x i16> %ret
+}
+
+; Test a v8i16 replicating load with the first out-of-range offset.
+define <8 x i16> @f6(i16 *%base) {
+; CHECK-LABEL: f6:
+; CHECK: aghi %r2, 4096
+; CHECK: vlreph %v24, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i16, i16 *%base, i64 2048
+ %scalar = load i16, i16 *%ptr
+ %val = insertelement <8 x i16> undef, i16 %scalar, i32 0
+ %ret = shufflevector <8 x i16> %val, <8 x i16> undef,
+ <8 x i32> zeroinitializer
+ ret <8 x i16> %ret
+}
+
+; Test a v4i32 replicating load with no offset.
+define <4 x i32> @f7(i32 *%ptr) {
+; CHECK-LABEL: f7:
+; CHECK: vlrepf %v24, 0(%r2)
+; CHECK: br %r14
+ %scalar = load i32, i32 *%ptr
+ %val = insertelement <4 x i32> undef, i32 %scalar, i32 0
+ %ret = shufflevector <4 x i32> %val, <4 x i32> undef,
+ <4 x i32> zeroinitializer
+ ret <4 x i32> %ret
+}
+
+; Test a v4i32 replicating load with the maximum in-range offset.
+define <4 x i32> @f8(i32 *%base) {
+; CHECK-LABEL: f8:
+; CHECK: vlrepf %v24, 4092(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i32, i32 *%base, i64 1023
+ %scalar = load i32, i32 *%ptr
+ %val = insertelement <4 x i32> undef, i32 %scalar, i32 0
+ %ret = shufflevector <4 x i32> %val, <4 x i32> undef,
+ <4 x i32> zeroinitializer
+ ret <4 x i32> %ret
+}
+
+; Test a v4i32 replicating load with the first out-of-range offset.
+define <4 x i32> @f9(i32 *%base) {
+; CHECK-LABEL: f9:
+; CHECK: aghi %r2, 4096
+; CHECK: vlrepf %v24, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i32, i32 *%base, i64 1024
+ %scalar = load i32, i32 *%ptr
+ %val = insertelement <4 x i32> undef, i32 %scalar, i32 0
+ %ret = shufflevector <4 x i32> %val, <4 x i32> undef,
+ <4 x i32> zeroinitializer
+ ret <4 x i32> %ret
+}
+
+; Test a v2i64 replicating load with no offset.
+define <2 x i64> @f10(i64 *%ptr) {
+; CHECK-LABEL: f10:
+; CHECK: vlrepg %v24, 0(%r2)
+; CHECK: br %r14
+ %scalar = load i64, i64 *%ptr
+ %val = insertelement <2 x i64> undef, i64 %scalar, i32 0
+ %ret = shufflevector <2 x i64> %val, <2 x i64> undef,
+ <2 x i32> zeroinitializer
+ ret <2 x i64> %ret
+}
+
+; Test a v2i64 replicating load with the maximum in-range offset.
+define <2 x i64> @f11(i64 *%base) {
+; CHECK-LABEL: f11:
+; CHECK: vlrepg %v24, 4088(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i64, i64 *%base, i32 511
+ %scalar = load i64, i64 *%ptr
+ %val = insertelement <2 x i64> undef, i64 %scalar, i32 0
+ %ret = shufflevector <2 x i64> %val, <2 x i64> undef,
+ <2 x i32> zeroinitializer
+ ret <2 x i64> %ret
+}
+
+; Test a v2i64 replicating load with the first out-of-range offset.
+define <2 x i64> @f12(i64 *%base) {
+; CHECK-LABEL: f12:
+; CHECK: aghi %r2, 4096
+; CHECK: vlrepg %v24, 0(%r2)
+; CHECK: br %r14
+ %ptr = getelementptr i64, i64 *%base, i32 512
+ %scalar = load i64, i64 *%ptr
+ %val = insertelement <2 x i64> undef, i64 %scalar, i32 0
+ %ret = shufflevector <2 x i64> %val, <2 x i64> undef,
+ <2 x i32> zeroinitializer
+ ret <2 x i64> %ret
+}
+
+; Test a v16i8 replicating load with an index.
+define <16 x i8> @f19(i8 *%base, i64 %index) {
+; CHECK-LABEL: f19:
+; CHECK: vlrepb %v24, 1023(%r3,%r2)
+; CHECK: br %r14
+ %ptr1 = getelementptr i8, i8 *%base, i64 %index
+ %ptr = getelementptr i8, i8 *%ptr1, i64 1023
+ %scalar = load i8, i8 *%ptr
+ %val = insertelement <16 x i8> undef, i8 %scalar, i32 0
+ %ret = shufflevector <16 x i8> %val, <16 x i8> undef,
+ <16 x i32> zeroinitializer
+ ret <16 x i8> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-perm-04.ll b/llvm/test/CodeGen/SystemZ/vec-perm-04.ll
new file mode 100644
index 00000000000..1d449b9bb34
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-perm-04.ll
@@ -0,0 +1,160 @@
+; Test vector merge high.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test a canonical v16i8 merge high.
+define <16 x i8> @f1(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f1:
+; CHECK: vmrhb %v24, %v24, %v26
+; CHECK: br %r14
+ %ret = shufflevector <16 x i8> %val1, <16 x i8> %val2,
+ <16 x i32> <i32 0, i32 16, i32 1, i32 17,
+ i32 2, i32 18, i32 3, i32 19,
+ i32 4, i32 20, i32 5, i32 21,
+ i32 6, i32 22, i32 7, i32 23>
+ ret <16 x i8> %ret
+}
+
+; Test a reversed v16i8 merge high.
+define <16 x i8> @f2(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f2:
+; CHECK: vmrhb %v24, %v26, %v24
+; CHECK: br %r14
+ %ret = shufflevector <16 x i8> %val1, <16 x i8> %val2,
+ <16 x i32> <i32 16, i32 0, i32 17, i32 1,
+ i32 18, i32 2, i32 19, i32 3,
+ i32 20, i32 4, i32 21, i32 5,
+ i32 22, i32 6, i32 23, i32 7>
+ ret <16 x i8> %ret
+}
+
+; Test a v16i8 merge high with only the first operand being used.
+define <16 x i8> @f3(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f3:
+; CHECK: vmrhb %v24, %v24, %v24
+; CHECK: br %r14
+ %ret = shufflevector <16 x i8> %val1, <16 x i8> %val2,
+ <16 x i32> <i32 0, i32 0, i32 1, i32 1,
+ i32 2, i32 2, i32 3, i32 3,
+ i32 4, i32 4, i32 5, i32 5,
+ i32 6, i32 6, i32 7, i32 7>
+ ret <16 x i8> %ret
+}
+
+; Test a v16i8 merge high with only the second operand being used.
+; This is converted into @f3 by target-independent code.
+define <16 x i8> @f4(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f4:
+; CHECK: vmrhb %v24, %v26, %v26
+; CHECK: br %r14
+ %ret = shufflevector <16 x i8> %val1, <16 x i8> %val2,
+ <16 x i32> <i32 16, i32 16, i32 17, i32 17,
+ i32 18, i32 18, i32 19, i32 19,
+ i32 20, i32 20, i32 21, i32 21,
+ i32 22, i32 22, i32 23, i32 23>
+ ret <16 x i8> %ret
+}
+
+; Test a v16i8 merge with both operands being the same. This too is
+; converted into @f3 by target-independent code.
+define <16 x i8> @f5(<16 x i8> %val) {
+; CHECK-LABEL: f5:
+; CHECK: vmrhb %v24, %v24, %v24
+; CHECK: br %r14
+ %ret = shufflevector <16 x i8> %val, <16 x i8> %val,
+ <16 x i32> <i32 0, i32 16, i32 17, i32 17,
+ i32 18, i32 2, i32 3, i32 3,
+ i32 20, i32 20, i32 5, i32 5,
+ i32 6, i32 22, i32 23, i32 7>
+ ret <16 x i8> %ret
+}
+
+; Test a v16i8 merge in which some of the indices are don't care.
+define <16 x i8> @f6(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f6:
+; CHECK: vmrhb %v24, %v24, %v26
+; CHECK: br %r14
+ %ret = shufflevector <16 x i8> %val1, <16 x i8> %val2,
+ <16 x i32> <i32 0, i32 undef, i32 1, i32 17,
+ i32 undef, i32 18, i32 undef, i32 undef,
+ i32 undef, i32 20, i32 5, i32 21,
+ i32 undef, i32 22, i32 7, i32 undef>
+ ret <16 x i8> %ret
+}
+
+; Test a v16i8 merge in which one of the operands is undefined and where
+; indices for that operand are "don't care". Target-independent code
+; converts the indices themselves into "undef"s.
+define <16 x i8> @f7(<16 x i8> %val) {
+; CHECK-LABEL: f7:
+; CHECK: vmrhb %v24, %v24, %v24
+; CHECK: br %r14
+ %ret = shufflevector <16 x i8> undef, <16 x i8> %val,
+ <16 x i32> <i32 11, i32 16, i32 17, i32 5,
+ i32 18, i32 10, i32 19, i32 19,
+ i32 20, i32 20, i32 21, i32 3,
+ i32 2, i32 22, i32 9, i32 23>
+ ret <16 x i8> %ret
+}
+
+; Test a canonical v8i16 merge high.
+define <8 x i16> @f8(<8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f8:
+; CHECK: vmrhh %v24, %v24, %v26
+; CHECK: br %r14
+ %ret = shufflevector <8 x i16> %val1, <8 x i16> %val2,
+ <8 x i32> <i32 0, i32 8, i32 1, i32 9,
+ i32 2, i32 10, i32 3, i32 11>
+ ret <8 x i16> %ret
+}
+
+; Test a reversed v8i16 merge high.
+define <8 x i16> @f9(<8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f9:
+; CHECK: vmrhh %v24, %v26, %v24
+; CHECK: br %r14
+ %ret = shufflevector <8 x i16> %val1, <8 x i16> %val2,
+ <8 x i32> <i32 8, i32 0, i32 9, i32 1,
+ i32 10, i32 2, i32 11, i32 3>
+ ret <8 x i16> %ret
+}
+
+; Test a canonical v4i32 merge high.
+define <4 x i32> @f10(<4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f10:
+; CHECK: vmrhf %v24, %v24, %v26
+; CHECK: br %r14
+ %ret = shufflevector <4 x i32> %val1, <4 x i32> %val2,
+ <4 x i32> <i32 0, i32 4, i32 1, i32 5>
+ ret <4 x i32> %ret
+}
+
+; Test a reversed v4i32 merge high.
+define <4 x i32> @f11(<4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f11:
+; CHECK: vmrhf %v24, %v26, %v24
+; CHECK: br %r14
+ %ret = shufflevector <4 x i32> %val1, <4 x i32> %val2,
+ <4 x i32> <i32 4, i32 0, i32 5, i32 1>
+ ret <4 x i32> %ret
+}
+
+; Test a canonical v2i64 merge high.
+define <2 x i64> @f12(<2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f12:
+; CHECK: vmrhg %v24, %v24, %v26
+; CHECK: br %r14
+ %ret = shufflevector <2 x i64> %val1, <2 x i64> %val2,
+ <2 x i32> <i32 0, i32 2>
+ ret <2 x i64> %ret
+}
+
+; Test a reversed v2i64 merge high.
+define <2 x i64> @f13(<2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f13:
+; CHECK: vmrhg %v24, %v26, %v24
+; CHECK: br %r14
+ %ret = shufflevector <2 x i64> %val1, <2 x i64> %val2,
+ <2 x i32> <i32 2, i32 0>
+ ret <2 x i64> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-perm-05.ll b/llvm/test/CodeGen/SystemZ/vec-perm-05.ll
new file mode 100644
index 00000000000..636228c56ba
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-perm-05.ll
@@ -0,0 +1,160 @@
+; Test vector merge low.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test a canonical v16i8 merge low.
+define <16 x i8> @f1(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f1:
+; CHECK: vmrlb %v24, %v24, %v26
+; CHECK: br %r14
+ %ret = shufflevector <16 x i8> %val1, <16 x i8> %val2,
+ <16 x i32> <i32 8, i32 24, i32 9, i32 25,
+ i32 10, i32 26, i32 11, i32 27,
+ i32 12, i32 28, i32 13, i32 29,
+ i32 14, i32 30, i32 15, i32 31>
+ ret <16 x i8> %ret
+}
+
+; Test a reversed v16i8 merge low.
+define <16 x i8> @f2(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f2:
+; CHECK: vmrlb %v24, %v26, %v24
+; CHECK: br %r14
+ %ret = shufflevector <16 x i8> %val1, <16 x i8> %val2,
+ <16 x i32> <i32 24, i32 8, i32 25, i32 9,
+ i32 26, i32 10, i32 27, i32 11,
+ i32 28, i32 12, i32 29, i32 13,
+ i32 30, i32 14, i32 31, i32 15>
+ ret <16 x i8> %ret
+}
+
+; Test a v16i8 merge low with only the first operand being used.
+define <16 x i8> @f3(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f3:
+; CHECK: vmrlb %v24, %v24, %v24
+; CHECK: br %r14
+ %ret = shufflevector <16 x i8> %val1, <16 x i8> %val2,
+ <16 x i32> <i32 8, i32 8, i32 9, i32 9,
+ i32 10, i32 10, i32 11, i32 11,
+ i32 12, i32 12, i32 13, i32 13,
+ i32 14, i32 14, i32 15, i32 15>
+ ret <16 x i8> %ret
+}
+
+; Test a v16i8 merge low with only the second operand being used.
+; This is converted into @f3 by target-independent code.
+define <16 x i8> @f4(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f4:
+; CHECK: vmrlb %v24, %v26, %v26
+; CHECK: br %r14
+ %ret = shufflevector <16 x i8> %val1, <16 x i8> %val2,
+ <16 x i32> <i32 24, i32 24, i32 25, i32 25,
+ i32 26, i32 26, i32 27, i32 27,
+ i32 28, i32 28, i32 29, i32 29,
+ i32 30, i32 30, i32 31, i32 31>
+ ret <16 x i8> %ret
+}
+
+; Test a v16i8 merge with both operands being the same. This too is
+; converted into @f3 by target-independent code.
+define <16 x i8> @f5(<16 x i8> %val) {
+; CHECK-LABEL: f5:
+; CHECK: vmrlb %v24, %v24, %v24
+; CHECK: br %r14
+ %ret = shufflevector <16 x i8> %val, <16 x i8> %val,
+ <16 x i32> <i32 8, i32 24, i32 25, i32 25,
+ i32 26, i32 10, i32 11, i32 11,
+ i32 28, i32 28, i32 13, i32 13,
+ i32 14, i32 30, i32 31, i32 15>
+ ret <16 x i8> %ret
+}
+
+; Test a v16i8 merge in which some of the indices are don't care.
+define <16 x i8> @f6(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f6:
+; CHECK: vmrlb %v24, %v24, %v26
+; CHECK: br %r14
+ %ret = shufflevector <16 x i8> %val1, <16 x i8> %val2,
+ <16 x i32> <i32 8, i32 undef, i32 9, i32 25,
+ i32 undef, i32 26, i32 undef, i32 undef,
+ i32 undef, i32 28, i32 13, i32 29,
+ i32 undef, i32 30, i32 15, i32 undef>
+ ret <16 x i8> %ret
+}
+
+; Test a v16i8 merge in which one of the operands is undefined and where
+; indices for that operand are "don't care". Target-independent code
+; converts the indices themselves into "undef"s.
+define <16 x i8> @f7(<16 x i8> %val) {
+; CHECK-LABEL: f7:
+; CHECK: vmrlb %v24, %v24, %v24
+; CHECK: br %r14
+ %ret = shufflevector <16 x i8> undef, <16 x i8> %val,
+ <16 x i32> <i32 11, i32 24, i32 25, i32 5,
+ i32 26, i32 10, i32 27, i32 27,
+ i32 28, i32 28, i32 29, i32 3,
+ i32 2, i32 30, i32 9, i32 31>
+ ret <16 x i8> %ret
+}
+
+; Test a canonical v8i16 merge low.
+define <8 x i16> @f8(<8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f8:
+; CHECK: vmrlh %v24, %v24, %v26
+; CHECK: br %r14
+ %ret = shufflevector <8 x i16> %val1, <8 x i16> %val2,
+ <8 x i32> <i32 4, i32 12, i32 5, i32 13,
+ i32 6, i32 14, i32 7, i32 15>
+ ret <8 x i16> %ret
+}
+
+; Test a reversed v8i16 merge low.
+define <8 x i16> @f9(<8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f9:
+; CHECK: vmrlh %v24, %v26, %v24
+; CHECK: br %r14
+ %ret = shufflevector <8 x i16> %val1, <8 x i16> %val2,
+ <8 x i32> <i32 12, i32 4, i32 13, i32 5,
+ i32 14, i32 6, i32 15, i32 7>
+ ret <8 x i16> %ret
+}
+
+; Test a canonical v4i32 merge low.
+define <4 x i32> @f10(<4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f10:
+; CHECK: vmrlf %v24, %v24, %v26
+; CHECK: br %r14
+ %ret = shufflevector <4 x i32> %val1, <4 x i32> %val2,
+ <4 x i32> <i32 2, i32 6, i32 3, i32 7>
+ ret <4 x i32> %ret
+}
+
+; Test a reversed v4i32 merge low.
+define <4 x i32> @f11(<4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f11:
+; CHECK: vmrlf %v24, %v26, %v24
+; CHECK: br %r14
+ %ret = shufflevector <4 x i32> %val1, <4 x i32> %val2,
+ <4 x i32> <i32 6, i32 2, i32 7, i32 3>
+ ret <4 x i32> %ret
+}
+
+; Test a canonical v2i64 merge low.
+define <2 x i64> @f12(<2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f12:
+; CHECK: vmrlg %v24, %v24, %v26
+; CHECK: br %r14
+ %ret = shufflevector <2 x i64> %val1, <2 x i64> %val2,
+ <2 x i32> <i32 1, i32 3>
+ ret <2 x i64> %ret
+}
+
+; Test a reversed v2i64 merge low.
+define <2 x i64> @f13(<2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f13:
+; CHECK: vmrlg %v24, %v26, %v24
+; CHECK: br %r14
+ %ret = shufflevector <2 x i64> %val1, <2 x i64> %val2,
+ <2 x i32> <i32 3, i32 1>
+ ret <2 x i64> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-perm-06.ll b/llvm/test/CodeGen/SystemZ/vec-perm-06.ll
new file mode 100644
index 00000000000..298fc60e851
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-perm-06.ll
@@ -0,0 +1,140 @@
+; Test vector pack.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test a canonical v16i8 pack.
+define <16 x i8> @f1(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f1:
+; CHECK: vpkh %v24, %v24, %v26
+; CHECK: br %r14
+ %ret = shufflevector <16 x i8> %val1, <16 x i8> %val2,
+ <16 x i32> <i32 1, i32 3, i32 5, i32 7,
+ i32 9, i32 11, i32 13, i32 15,
+ i32 17, i32 19, i32 21, i32 23,
+ i32 25, i32 27, i32 29, i32 31>
+ ret <16 x i8> %ret
+}
+
+; Test a reversed v16i8 pack.
+define <16 x i8> @f2(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f2:
+; CHECK: vpkh %v24, %v26, %v24
+; CHECK: br %r14
+ %ret = shufflevector <16 x i8> %val1, <16 x i8> %val2,
+ <16 x i32> <i32 17, i32 19, i32 21, i32 23,
+ i32 25, i32 27, i32 29, i32 31,
+ i32 1, i32 3, i32 5, i32 7,
+ i32 9, i32 11, i32 13, i32 15>
+ ret <16 x i8> %ret
+}
+
+; Test a v16i8 pack with only the first operand being used.
+define <16 x i8> @f3(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f3:
+; CHECK: vpkh %v24, %v24, %v24
+; CHECK: br %r14
+ %ret = shufflevector <16 x i8> %val1, <16 x i8> %val2,
+ <16 x i32> <i32 1, i32 3, i32 5, i32 7,
+ i32 9, i32 11, i32 13, i32 15,
+ i32 1, i32 3, i32 5, i32 7,
+ i32 9, i32 11, i32 13, i32 15>
+ ret <16 x i8> %ret
+}
+
+; Test a v16i8 pack with only the second operand being used.
+; This is converted into @f3 by target-independent code.
+define <16 x i8> @f4(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f4:
+; CHECK: vpkh %v24, %v26, %v26
+; CHECK: br %r14
+ %ret = shufflevector <16 x i8> %val1, <16 x i8> %val2,
+ <16 x i32> <i32 17, i32 19, i32 21, i32 23,
+ i32 25, i32 27, i32 29, i32 31,
+ i32 17, i32 19, i32 21, i32 23,
+ i32 25, i32 27, i32 29, i32 31>
+ ret <16 x i8> %ret
+}
+
+; Test a v16i8 pack with both operands being the same. This too is
+; converted into @f3 by target-independent code.
+define <16 x i8> @f5(<16 x i8> %val) {
+; CHECK-LABEL: f5:
+; CHECK: vpkh %v24, %v24, %v24
+; CHECK: br %r14
+ %ret = shufflevector <16 x i8> %val, <16 x i8> %val,
+ <16 x i32> <i32 1, i32 3, i32 5, i32 7,
+ i32 9, i32 11, i32 13, i32 15,
+ i32 17, i32 19, i32 21, i32 23,
+ i32 25, i32 27, i32 29, i32 31>
+ ret <16 x i8> %ret
+}
+
+; Test a v16i8 pack in which some of the indices are don't care.
+define <16 x i8> @f6(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f6:
+; CHECK: vpkh %v24, %v24, %v26
+; CHECK: br %r14
+ %ret = shufflevector <16 x i8> %val1, <16 x i8> %val2,
+ <16 x i32> <i32 1, i32 undef, i32 5, i32 7,
+ i32 undef, i32 11, i32 undef, i32 undef,
+ i32 undef, i32 19, i32 21, i32 23,
+ i32 undef, i32 27, i32 29, i32 undef>
+ ret <16 x i8> %ret
+}
+
+; Test a v16i8 pack in which one of the operands is undefined and where
+; indices for that operand are "don't care". Target-independent code
+; converts the indices themselves into "undef"s.
+define <16 x i8> @f7(<16 x i8> %val) {
+; CHECK-LABEL: f7:
+; CHECK: vpkh %v24, %v24, %v24
+; CHECK: br %r14
+ %ret = shufflevector <16 x i8> undef, <16 x i8> %val,
+ <16 x i32> <i32 7, i32 1, i32 9, i32 15,
+ i32 15, i32 3, i32 5, i32 1,
+ i32 17, i32 19, i32 21, i32 23,
+ i32 25, i32 27, i32 29, i32 31>
+ ret <16 x i8> %ret
+}
+
+; Test a canonical v8i16 pack.
+define <8 x i16> @f8(<8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f8:
+; CHECK: vpkf %v24, %v24, %v26
+; CHECK: br %r14
+ %ret = shufflevector <8 x i16> %val1, <8 x i16> %val2,
+ <8 x i32> <i32 1, i32 3, i32 5, i32 7,
+ i32 9, i32 11, i32 13, i32 15>
+ ret <8 x i16> %ret
+}
+
+; Test a reversed v8i16 pack.
+define <8 x i16> @f9(<8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f9:
+; CHECK: vpkf %v24, %v26, %v24
+; CHECK: br %r14
+ %ret = shufflevector <8 x i16> %val1, <8 x i16> %val2,
+ <8 x i32> <i32 9, i32 11, i32 13, i32 15,
+ i32 1, i32 3, i32 5, i32 7>
+ ret <8 x i16> %ret
+}
+
+; Test a canonical v4i32 pack.
+define <4 x i32> @f10(<4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f10:
+; CHECK: vpkg %v24, %v24, %v26
+; CHECK: br %r14
+ %ret = shufflevector <4 x i32> %val1, <4 x i32> %val2,
+ <4 x i32> <i32 1, i32 3, i32 5, i32 7>
+ ret <4 x i32> %ret
+}
+
+; Test a reversed v4i32 pack.
+define <4 x i32> @f11(<4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f11:
+; CHECK: vpkg %v24, %v26, %v24
+; CHECK: br %r14
+ %ret = shufflevector <4 x i32> %val1, <4 x i32> %val2,
+ <4 x i32> <i32 5, i32 7, i32 1, i32 3>
+ ret <4 x i32> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-perm-07.ll b/llvm/test/CodeGen/SystemZ/vec-perm-07.ll
new file mode 100644
index 00000000000..40ca3995524
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-perm-07.ll
@@ -0,0 +1,125 @@
+; Test vector shift left double immediate.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test a v16i8 shift with the lowest useful shift amount.
+define <16 x i8> @f1(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f1:
+; CHECK: vsldb %v24, %v24, %v26, 1
+; CHECK: br %r14
+ %ret = shufflevector <16 x i8> %val1, <16 x i8> %val2,
+ <16 x i32> <i32 1, i32 2, i32 3, i32 4,
+ i32 5, i32 6, i32 7, i32 8,
+ i32 9, i32 10, i32 11, i32 12,
+ i32 13, i32 14, i32 15, i32 16>
+ ret <16 x i8> %ret
+}
+
+; Test a v16i8 shift with the highest shift amount.
+define <16 x i8> @f2(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f2:
+; CHECK: vsldb %v24, %v24, %v26, 15
+; CHECK: br %r14
+ %ret = shufflevector <16 x i8> %val1, <16 x i8> %val2,
+ <16 x i32> <i32 15, i32 16, i32 17, i32 18,
+ i32 19, i32 20, i32 21, i32 22,
+ i32 23, i32 24, i32 25, i32 26,
+ i32 27, i32 28, i32 29, i32 30>
+ ret <16 x i8> %ret
+}
+
+; Test a v16i8 shift in which the operands need to be reversed.
+define <16 x i8> @f3(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f3:
+; CHECK: vsldb %v24, %v26, %v24, 4
+; CHECK: br %r14
+ %ret = shufflevector <16 x i8> %val1, <16 x i8> %val2,
+ <16 x i32> <i32 20, i32 21, i32 22, i32 23,
+ i32 24, i32 25, i32 26, i32 27,
+ i32 28, i32 29, i32 30, i32 31,
+ i32 0, i32 1, i32 2, i32 3>
+ ret <16 x i8> %ret
+}
+
+; Test a v16i8 shift in which the operands need to be duplicated.
+define <16 x i8> @f4(<16 x i8> %val) {
+; CHECK-LABEL: f4:
+; CHECK: vsldb %v24, %v24, %v24, 7
+; CHECK: br %r14
+ %ret = shufflevector <16 x i8> %val, <16 x i8> undef,
+ <16 x i32> <i32 7, i32 8, i32 9, i32 10,
+ i32 11, i32 12, i32 13, i32 14,
+ i32 15, i32 0, i32 1, i32 2,
+ i32 3, i32 4, i32 5, i32 6>
+ ret <16 x i8> %ret
+}
+
+; Test a v16i8 shift in which some of the indices are undefs.
+define <16 x i8> @f5(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f5:
+; CHECK: vsldb %v24, %v24, %v26, 11
+; CHECK: br %r14
+ %ret = shufflevector <16 x i8> %val1, <16 x i8> %val2,
+ <16 x i32> <i32 undef, i32 undef, i32 undef, i32 undef,
+ i32 15, i32 16, i32 undef, i32 18,
+ i32 19, i32 20, i32 21, i32 22,
+ i32 23, i32 24, i32 25, i32 26>
+ ret <16 x i8> %ret
+}
+
+; ...and again with reversed operands.
+define <16 x i8> @f6(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f6:
+; CHECK: vsldb %v24, %v26, %v24, 13
+; CHECK: br %r14
+ %ret = shufflevector <16 x i8> %val1, <16 x i8> %val2,
+ <16 x i32> <i32 undef, i32 undef, i32 31, i32 0,
+ i32 1, i32 2, i32 3, i32 4,
+ i32 5, i32 6, i32 7, i32 8,
+ i32 9, i32 10, i32 11, i32 12>
+ ret <16 x i8> %ret
+}
+
+; Test a v8i16 shift with the lowest useful shift amount.
+define <8 x i16> @f7(<8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f7:
+; CHECK: vsldb %v24, %v24, %v26, 2
+; CHECK: br %r14
+ %ret = shufflevector <8 x i16> %val1, <8 x i16> %val2,
+ <8 x i32> <i32 1, i32 2, i32 3, i32 4,
+ i32 5, i32 6, i32 7, i32 8>
+ ret <8 x i16> %ret
+}
+
+; Test a v8i16 shift with the highest useful shift amount.
+define <8 x i16> @f8(<8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f8:
+; CHECK: vsldb %v24, %v24, %v26, 14
+; CHECK: br %r14
+ %ret = shufflevector <8 x i16> %val1, <8 x i16> %val2,
+ <8 x i32> <i32 7, i32 8, i32 9, i32 10,
+ i32 11, i32 12, i32 13, i32 14>
+ ret <8 x i16> %ret
+}
+
+; Test a v4i32 shift with the lowest useful shift amount.
+define <4 x i32> @f9(<4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f9:
+; CHECK: vsldb %v24, %v24, %v26, 4
+; CHECK: br %r14
+ %ret = shufflevector <4 x i32> %val1, <4 x i32> %val2,
+ <4 x i32> <i32 1, i32 2, i32 3, i32 4>
+ ret <4 x i32> %ret
+}
+
+; Test a v4i32 shift with the highest useful shift amount.
+define <4 x i32> @f10(<4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f10:
+; CHECK: vsldb %v24, %v24, %v26, 12
+; CHECK: br %r14
+ %ret = shufflevector <4 x i32> %val1, <4 x i32> %val2,
+ <4 x i32> <i32 3, i32 4, i32 5, i32 6>
+ ret <4 x i32> %ret
+}
+
+; We use VPDI for v2i64 shuffles.
diff --git a/llvm/test/CodeGen/SystemZ/vec-perm-08.ll b/llvm/test/CodeGen/SystemZ/vec-perm-08.ll
new file mode 100644
index 00000000000..4d06377f5a3
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-perm-08.ll
@@ -0,0 +1,130 @@
+; Test vector permutes using VPDI.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test a high1/low2 permute for v16i8.
+define <16 x i8> @f1(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f1:
+; CHECK: vpdi %v24, %v24, %v26, 1
+; CHECK: br %r14
+ %ret = shufflevector <16 x i8> %val1, <16 x i8> %val2,
+ <16 x i32> <i32 0, i32 1, i32 2, i32 3,
+ i32 4, i32 5, i32 6, i32 7,
+ i32 24, i32 25, i32 26, i32 27,
+ i32 28, i32 29, i32 30, i32 31>
+ ret <16 x i8> %ret
+}
+
+; Test a low2/high1 permute for v16i8.
+define <16 x i8> @f2(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f2:
+; CHECK: vpdi %v24, %v26, %v24, 4
+; CHECK: br %r14
+ %ret = shufflevector <16 x i8> %val1, <16 x i8> %val2,
+ <16 x i32> <i32 24, i32 25, i32 26, i32 27,
+ i32 28, i32 29, i32 30, i32 31,
+ i32 0, i32 1, i32 2, i32 3,
+ i32 4, i32 5, i32 6, i32 7>
+ ret <16 x i8> %ret
+}
+
+; Test a low1/high2 permute for v16i8.
+define <16 x i8> @f3(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f3:
+; CHECK: vpdi %v24, %v24, %v26, 4
+; CHECK: br %r14
+ %ret = shufflevector <16 x i8> %val1, <16 x i8> %val2,
+ <16 x i32> <i32 8, i32 9, i32 10, i32 undef,
+ i32 12, i32 undef, i32 14, i32 15,
+ i32 16, i32 17, i32 undef, i32 19,
+ i32 20, i32 21, i32 22, i32 undef>
+ ret <16 x i8> %ret
+}
+
+; Test a high2/low1 permute for v16i8.
+define <16 x i8> @f4(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f4:
+; CHECK: vpdi %v24, %v26, %v24, 1
+; CHECK: br %r14
+ %ret = shufflevector <16 x i8> %val1, <16 x i8> %val2,
+ <16 x i32> <i32 16, i32 17, i32 18, i32 19,
+ i32 20, i32 21, i32 22, i32 23,
+ i32 8, i32 9, i32 10, i32 11,
+ i32 12, i32 13, i32 14, i32 15>
+ ret <16 x i8> %ret
+}
+
+; Test reversing two doublewords in a v16i8.
+define <16 x i8> @f5(<16 x i8> %val) {
+; CHECK-LABEL: f5:
+; CHECK: vpdi %v24, %v24, %v24, 4
+; CHECK: br %r14
+ %ret = shufflevector <16 x i8> %val, <16 x i8> undef,
+ <16 x i32> <i32 8, i32 9, i32 10, i32 11,
+ i32 12, i32 13, i32 14, i32 15,
+ i32 0, i32 1, i32 2, i32 3,
+ i32 4, i32 5, i32 6, i32 7>
+ ret <16 x i8> %ret
+}
+
+; Test a high1/low2 permute for v8i16.
+define <8 x i16> @f6(<8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f6:
+; CHECK: vpdi %v24, %v24, %v26, 1
+; CHECK: br %r14
+ %ret = shufflevector <8 x i16> %val1, <8 x i16> %val2,
+ <8 x i32> <i32 0, i32 1, i32 2, i32 3,
+ i32 12, i32 13, i32 14, i32 15>
+ ret <8 x i16> %ret
+}
+
+; Test a low2/high1 permute for v8i16.
+define <8 x i16> @f7(<8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f7:
+; CHECK: vpdi %v24, %v26, %v24, 4
+; CHECK: br %r14
+ %ret = shufflevector <8 x i16> %val1, <8 x i16> %val2,
+ <8 x i32> <i32 12, i32 13, i32 14, i32 15,
+ i32 0, i32 1, i32 2, i32 3>
+ ret <8 x i16> %ret
+}
+
+; Test a high1/low2 permute for v4i32.
+define <4 x i32> @f8(<4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f8:
+; CHECK: vpdi %v24, %v24, %v26, 1
+; CHECK: br %r14
+ %ret = shufflevector <4 x i32> %val1, <4 x i32> %val2,
+ <4 x i32> <i32 0, i32 1, i32 6, i32 7>
+ ret <4 x i32> %ret
+}
+
+; Test a low2/high1 permute for v4i32.
+define <4 x i32> @f9(<4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f9:
+; CHECK: vpdi %v24, %v26, %v24, 4
+; CHECK: br %r14
+ %ret = shufflevector <4 x i32> %val1, <4 x i32> %val2,
+ <4 x i32> <i32 6, i32 7, i32 0, i32 1>
+ ret <4 x i32> %ret
+}
+
+; Test a high1/low2 permute for v2i64.
+define <2 x i64> @f10(<2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f10:
+; CHECK: vpdi %v24, %v24, %v26, 1
+; CHECK: br %r14
+ %ret = shufflevector <2 x i64> %val1, <2 x i64> %val2,
+ <2 x i32> <i32 0, i32 3>
+ ret <2 x i64> %ret
+}
+
+; Test low2/high1 permute for v2i64.
+define <2 x i64> @f11(<2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f11:
+; CHECK: vpdi %v24, %v26, %v24, 4
+; CHECK: br %r14
+ %ret = shufflevector <2 x i64> %val1, <2 x i64> %val2,
+ <2 x i32> <i32 3, i32 0>
+ ret <2 x i64> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-perm-09.ll b/llvm/test/CodeGen/SystemZ/vec-perm-09.ll
new file mode 100644
index 00000000000..9c9632cf030
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-perm-09.ll
@@ -0,0 +1,38 @@
+; Test general vector permute of a v16i8.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | \
+; RUN: FileCheck -check-prefix=CHECK-CODE %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | \
+; RUN: FileCheck -check-prefix=CHECK-VECTOR %s
+
+define <16 x i8> @f1(<16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-CODE-LABEL: f1:
+; CHECK-CODE: larl [[REG:%r[0-5]]],
+; CHECK-CODE: vl [[MASK:%v[0-9]+]], 0([[REG]])
+; CHECK-CODE: vperm %v24, %v24, %v26, [[MASK]]
+; CHECK-CODE: br %r14
+;
+; CHECK-VECTOR: .byte 1
+; CHECK-VECTOR-NEXT: .byte 19
+; CHECK-VECTOR-NEXT: .byte 6
+; CHECK-VECTOR-NEXT: .byte 5
+; CHECK-VECTOR-NEXT: .byte 20
+; CHECK-VECTOR-NEXT: .byte 22
+; CHECK-VECTOR-NEXT: .byte 1
+; CHECK-VECTOR-NEXT: .byte 1
+; CHECK-VECTOR-NEXT: .byte 25
+; CHECK-VECTOR-NEXT: .byte 29
+; CHECK-VECTOR-NEXT: .byte 11
+; Any byte would be OK here
+; CHECK-VECTOR-NEXT: .space 1
+; CHECK-VECTOR-NEXT: .byte 31
+; CHECK-VECTOR-NEXT: .byte 4
+; CHECK-VECTOR-NEXT: .byte 15
+; CHECK-VECTOR-NEXT: .byte 19
+ %ret = shufflevector <16 x i8> %val1, <16 x i8> %val2,
+ <16 x i32> <i32 1, i32 19, i32 6, i32 5,
+ i32 20, i32 22, i32 1, i32 1,
+ i32 25, i32 29, i32 11, i32 undef,
+ i32 31, i32 4, i32 15, i32 19>
+ ret <16 x i8> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-perm-10.ll b/llvm/test/CodeGen/SystemZ/vec-perm-10.ll
new file mode 100644
index 00000000000..382e6dc4c3f
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-perm-10.ll
@@ -0,0 +1,36 @@
+; Test general vector permute of a v8i16.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | \
+; RUN: FileCheck -check-prefix=CHECK-CODE %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | \
+; RUN: FileCheck -check-prefix=CHECK-VECTOR %s
+
+define <8 x i16> @f1(<8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-CODE-LABEL: f1:
+; CHECK-CODE: larl [[REG:%r[0-5]]],
+; CHECK-CODE: vl [[MASK:%v[0-9]+]], 0([[REG]])
+; CHECK-CODE: vperm %v24, %v26, %v24, [[MASK]]
+; CHECK-CODE: br %r14
+;
+; CHECK-VECTOR: .byte 0
+; CHECK-VECTOR-NEXT: .byte 1
+; CHECK-VECTOR-NEXT: .byte 26
+; CHECK-VECTOR-NEXT: .byte 27
+; Any 2 bytes would be OK here
+; CHECK-VECTOR-NEXT: .space 1
+; CHECK-VECTOR-NEXT: .space 1
+; CHECK-VECTOR-NEXT: .byte 28
+; CHECK-VECTOR-NEXT: .byte 29
+; CHECK-VECTOR-NEXT: .byte 6
+; CHECK-VECTOR-NEXT: .byte 7
+; CHECK-VECTOR-NEXT: .byte 14
+; CHECK-VECTOR-NEXT: .byte 15
+; CHECK-VECTOR-NEXT: .byte 8
+; CHECK-VECTOR-NEXT: .byte 9
+; CHECK-VECTOR-NEXT: .byte 16
+; CHECK-VECTOR-NEXT: .byte 17
+ %ret = shufflevector <8 x i16> %val1, <8 x i16> %val2,
+ <8 x i32> <i32 8, i32 5, i32 undef, i32 6,
+ i32 11, i32 15, i32 12, i32 0>
+ ret <8 x i16> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-perm-11.ll b/llvm/test/CodeGen/SystemZ/vec-perm-11.ll
new file mode 100644
index 00000000000..c9e29880fe0
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-perm-11.ll
@@ -0,0 +1,35 @@
+; Test general vector permute of a v4i32.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | \
+; RUN: FileCheck -check-prefix=CHECK-CODE %s
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | \
+; RUN: FileCheck -check-prefix=CHECK-VECTOR %s
+
+define <4 x i32> @f1(<4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-CODE-LABEL: f1:
+; CHECK-CODE: larl [[REG:%r[0-5]]],
+; CHECK-CODE: vl [[MASK:%v[0-9]+]], 0([[REG]])
+; CHECK-CODE: vperm %v24, %v26, %v24, [[MASK]]
+; CHECK-CODE: br %r14
+;
+; CHECK-VECTOR: .byte 4
+; CHECK-VECTOR-NEXT: .byte 5
+; CHECK-VECTOR-NEXT: .byte 6
+; CHECK-VECTOR-NEXT: .byte 7
+; CHECK-VECTOR-NEXT: .byte 20
+; CHECK-VECTOR-NEXT: .byte 21
+; CHECK-VECTOR-NEXT: .byte 22
+; CHECK-VECTOR-NEXT: .byte 23
+; Any 4 bytes would be OK here
+; CHECK-VECTOR-NEXT: .space 1
+; CHECK-VECTOR-NEXT: .space 1
+; CHECK-VECTOR-NEXT: .space 1
+; CHECK-VECTOR-NEXT: .space 1
+; CHECK-VECTOR-NEXT: .byte 12
+; CHECK-VECTOR-NEXT: .byte 13
+; CHECK-VECTOR-NEXT: .byte 14
+; CHECK-VECTOR-NEXT: .byte 15
+ %ret = shufflevector <4 x i32> %val1, <4 x i32> %val2,
+ <4 x i32> <i32 5, i32 1, i32 undef, i32 7>
+ ret <4 x i32> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-shift-01.ll b/llvm/test/CodeGen/SystemZ/vec-shift-01.ll
new file mode 100644
index 00000000000..be8605b182c
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-shift-01.ll
@@ -0,0 +1,39 @@
+; Test vector shift left with vector shift amount.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test a v16i8 shift.
+define <16 x i8> @f1(<16 x i8> %dummy, <16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f1:
+; CHECK: veslvb %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = shl <16 x i8> %val1, %val2
+ ret <16 x i8> %ret
+}
+
+; Test a v8i16 shift.
+define <8 x i16> @f2(<8 x i16> %dummy, <8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f2:
+; CHECK: veslvh %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = shl <8 x i16> %val1, %val2
+ ret <8 x i16> %ret
+}
+
+; Test a v4i32 shift.
+define <4 x i32> @f3(<4 x i32> %dummy, <4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f3:
+; CHECK: veslvf %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = shl <4 x i32> %val1, %val2
+ ret <4 x i32> %ret
+}
+
+; Test a v2i64 shift.
+define <2 x i64> @f4(<2 x i64> %dummy, <2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f4:
+; CHECK: veslvg %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = shl <2 x i64> %val1, %val2
+ ret <2 x i64> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-shift-02.ll b/llvm/test/CodeGen/SystemZ/vec-shift-02.ll
new file mode 100644
index 00000000000..2825872e023
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-shift-02.ll
@@ -0,0 +1,39 @@
+; Test vector arithmetic shift right with vector shift amount.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test a v16i8 shift.
+define <16 x i8> @f1(<16 x i8> %dummy, <16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f1:
+; CHECK: vesravb %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = ashr <16 x i8> %val1, %val2
+ ret <16 x i8> %ret
+}
+
+; Test a v8i16 shift.
+define <8 x i16> @f2(<8 x i16> %dummy, <8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f2:
+; CHECK: vesravh %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = ashr <8 x i16> %val1, %val2
+ ret <8 x i16> %ret
+}
+
+; Test a v4i32 shift.
+define <4 x i32> @f3(<4 x i32> %dummy, <4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f3:
+; CHECK: vesravf %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = ashr <4 x i32> %val1, %val2
+ ret <4 x i32> %ret
+}
+
+; Test a v2i64 shift.
+define <2 x i64> @f4(<2 x i64> %dummy, <2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f4:
+; CHECK: vesravg %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = ashr <2 x i64> %val1, %val2
+ ret <2 x i64> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-shift-03.ll b/llvm/test/CodeGen/SystemZ/vec-shift-03.ll
new file mode 100644
index 00000000000..c923d8b5d45
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-shift-03.ll
@@ -0,0 +1,39 @@
+; Test vector logical shift right with vector shift amount.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test a v16i8 shift.
+define <16 x i8> @f1(<16 x i8> %dummy, <16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f1:
+; CHECK: vesrlvb %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = lshr <16 x i8> %val1, %val2
+ ret <16 x i8> %ret
+}
+
+; Test a v8i16 shift.
+define <8 x i16> @f2(<8 x i16> %dummy, <8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f2:
+; CHECK: vesrlvh %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = lshr <8 x i16> %val1, %val2
+ ret <8 x i16> %ret
+}
+
+; Test a v4i32 shift.
+define <4 x i32> @f3(<4 x i32> %dummy, <4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f3:
+; CHECK: vesrlvf %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = lshr <4 x i32> %val1, %val2
+ ret <4 x i32> %ret
+}
+
+; Test a v2i64 shift.
+define <2 x i64> @f4(<2 x i64> %dummy, <2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f4:
+; CHECK: vesrlvg %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = lshr <2 x i64> %val1, %val2
+ ret <2 x i64> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-shift-04.ll b/llvm/test/CodeGen/SystemZ/vec-shift-04.ll
new file mode 100644
index 00000000000..6fd12897bf5
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-shift-04.ll
@@ -0,0 +1,134 @@
+; Test vector shift left with scalar shift amount.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test a v16i8 shift by a variable.
+define <16 x i8> @f1(<16 x i8> %dummy, <16 x i8> %val1, i32 %shift) {
+; CHECK-LABEL: f1:
+; CHECK: veslb %v24, %v26, 0(%r2)
+; CHECK: br %r14
+ %truncshift = trunc i32 %shift to i8
+ %shiftvec = insertelement <16 x i8> undef, i8 %truncshift, i32 0
+ %val2 = shufflevector <16 x i8> %shiftvec, <16 x i8> undef,
+ <16 x i32> zeroinitializer
+ %ret = shl <16 x i8> %val1, %val2
+ ret <16 x i8> %ret
+}
+
+; Test a v16i8 shift by the lowest useful constant.
+define <16 x i8> @f2(<16 x i8> %dummy, <16 x i8> %val) {
+; CHECK-LABEL: f2:
+; CHECK: veslb %v24, %v26, 1
+; CHECK: br %r14
+ %ret = shl <16 x i8> %val, <i8 1, i8 1, i8 1, i8 1,
+ i8 1, i8 1, i8 1, i8 1,
+ i8 1, i8 1, i8 1, i8 1,
+ i8 1, i8 1, i8 1, i8 1>
+ ret <16 x i8> %ret
+}
+
+; Test a v16i8 shift by the highest useful constant.
+define <16 x i8> @f3(<16 x i8> %dummy, <16 x i8> %val) {
+; CHECK-LABEL: f3:
+; CHECK: veslb %v24, %v26, 7
+; CHECK: br %r14
+ %ret = shl <16 x i8> %val, <i8 7, i8 7, i8 7, i8 7,
+ i8 7, i8 7, i8 7, i8 7,
+ i8 7, i8 7, i8 7, i8 7,
+ i8 7, i8 7, i8 7, i8 7>
+ ret <16 x i8> %ret
+}
+
+; Test a v8i16 shift by a variable.
+define <8 x i16> @f4(<8 x i16> %dummy, <8 x i16> %val1, i32 %shift) {
+; CHECK-LABEL: f4:
+; CHECK: veslh %v24, %v26, 0(%r2)
+; CHECK: br %r14
+ %truncshift = trunc i32 %shift to i16
+ %shiftvec = insertelement <8 x i16> undef, i16 %truncshift, i32 0
+ %val2 = shufflevector <8 x i16> %shiftvec, <8 x i16> undef,
+ <8 x i32> zeroinitializer
+ %ret = shl <8 x i16> %val1, %val2
+ ret <8 x i16> %ret
+}
+
+; Test a v8i16 shift by the lowest useful constant.
+define <8 x i16> @f5(<8 x i16> %dummy, <8 x i16> %val) {
+; CHECK-LABEL: f5:
+; CHECK: veslh %v24, %v26, 1
+; CHECK: br %r14
+ %ret = shl <8 x i16> %val, <i16 1, i16 1, i16 1, i16 1,
+ i16 1, i16 1, i16 1, i16 1>
+ ret <8 x i16> %ret
+}
+
+; Test a v8i16 shift by the highest useful constant.
+define <8 x i16> @f6(<8 x i16> %dummy, <8 x i16> %val) {
+; CHECK-LABEL: f6:
+; CHECK: veslh %v24, %v26, 15
+; CHECK: br %r14
+ %ret = shl <8 x i16> %val, <i16 15, i16 15, i16 15, i16 15,
+ i16 15, i16 15, i16 15, i16 15>
+ ret <8 x i16> %ret
+}
+
+; Test a v4i32 shift by a variable.
+define <4 x i32> @f7(<4 x i32> %dummy, <4 x i32> %val1, i32 %shift) {
+; CHECK-LABEL: f7:
+; CHECK: veslf %v24, %v26, 0(%r2)
+; CHECK: br %r14
+ %shiftvec = insertelement <4 x i32> undef, i32 %shift, i32 0
+ %val2 = shufflevector <4 x i32> %shiftvec, <4 x i32> undef,
+ <4 x i32> zeroinitializer
+ %ret = shl <4 x i32> %val1, %val2
+ ret <4 x i32> %ret
+}
+
+; Test a v4i32 shift by the lowest useful constant.
+define <4 x i32> @f8(<4 x i32> %dummy, <4 x i32> %val) {
+; CHECK-LABEL: f8:
+; CHECK: veslf %v24, %v26, 1
+; CHECK: br %r14
+ %ret = shl <4 x i32> %val, <i32 1, i32 1, i32 1, i32 1>
+ ret <4 x i32> %ret
+}
+
+; Test a v4i32 shift by the highest useful constant.
+define <4 x i32> @f9(<4 x i32> %dummy, <4 x i32> %val) {
+; CHECK-LABEL: f9:
+; CHECK: veslf %v24, %v26, 31
+; CHECK: br %r14
+ %ret = shl <4 x i32> %val, <i32 31, i32 31, i32 31, i32 31>
+ ret <4 x i32> %ret
+}
+
+; Test a v2i64 shift by a variable.
+define <2 x i64> @f10(<2 x i64> %dummy, <2 x i64> %val1, i32 %shift) {
+; CHECK-LABEL: f10:
+; CHECK: veslg %v24, %v26, 0(%r2)
+; CHECK: br %r14
+ %extshift = sext i32 %shift to i64
+ %shiftvec = insertelement <2 x i64> undef, i64 %extshift, i32 0
+ %val2 = shufflevector <2 x i64> %shiftvec, <2 x i64> undef,
+ <2 x i32> zeroinitializer
+ %ret = shl <2 x i64> %val1, %val2
+ ret <2 x i64> %ret
+}
+
+; Test a v2i64 shift by the lowest useful constant.
+define <2 x i64> @f11(<2 x i64> %dummy, <2 x i64> %val) {
+; CHECK-LABEL: f11:
+; CHECK: veslg %v24, %v26, 1
+; CHECK: br %r14
+ %ret = shl <2 x i64> %val, <i64 1, i64 1>
+ ret <2 x i64> %ret
+}
+
+; Test a v2i64 shift by the highest useful constant.
+define <2 x i64> @f12(<2 x i64> %dummy, <2 x i64> %val) {
+; CHECK-LABEL: f12:
+; CHECK: veslg %v24, %v26, 63
+; CHECK: br %r14
+ %ret = shl <2 x i64> %val, <i64 63, i64 63>
+ ret <2 x i64> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-shift-05.ll b/llvm/test/CodeGen/SystemZ/vec-shift-05.ll
new file mode 100644
index 00000000000..22ce46b2d0d
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-shift-05.ll
@@ -0,0 +1,134 @@
+; Test vector arithmetic shift right with scalar shift amount.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test a v16i8 shift by a variable.
+define <16 x i8> @f1(<16 x i8> %dummy, <16 x i8> %val1, i32 %shift) {
+; CHECK-LABEL: f1:
+; CHECK: vesrab %v24, %v26, 0(%r2)
+; CHECK: br %r14
+ %truncshift = trunc i32 %shift to i8
+ %shiftvec = insertelement <16 x i8> undef, i8 %truncshift, i32 0
+ %val2 = shufflevector <16 x i8> %shiftvec, <16 x i8> undef,
+ <16 x i32> zeroinitializer
+ %ret = ashr <16 x i8> %val1, %val2
+ ret <16 x i8> %ret
+}
+
+; Test a v16i8 shift by the lowest useful constant.
+define <16 x i8> @f2(<16 x i8> %dummy, <16 x i8> %val) {
+; CHECK-LABEL: f2:
+; CHECK: vesrab %v24, %v26, 1
+; CHECK: br %r14
+ %ret = ashr <16 x i8> %val, <i8 1, i8 1, i8 1, i8 1,
+ i8 1, i8 1, i8 1, i8 1,
+ i8 1, i8 1, i8 1, i8 1,
+ i8 1, i8 1, i8 1, i8 1>
+ ret <16 x i8> %ret
+}
+
+; Test a v16i8 shift by the highest useful constant.
+define <16 x i8> @f3(<16 x i8> %dummy, <16 x i8> %val) {
+; CHECK-LABEL: f3:
+; CHECK: vesrab %v24, %v26, 7
+; CHECK: br %r14
+ %ret = ashr <16 x i8> %val, <i8 7, i8 7, i8 7, i8 7,
+ i8 7, i8 7, i8 7, i8 7,
+ i8 7, i8 7, i8 7, i8 7,
+ i8 7, i8 7, i8 7, i8 7>
+ ret <16 x i8> %ret
+}
+
+; Test a v8i16 shift by a variable.
+define <8 x i16> @f4(<8 x i16> %dummy, <8 x i16> %val1, i32 %shift) {
+; CHECK-LABEL: f4:
+; CHECK: vesrah %v24, %v26, 0(%r2)
+; CHECK: br %r14
+ %truncshift = trunc i32 %shift to i16
+ %shiftvec = insertelement <8 x i16> undef, i16 %truncshift, i32 0
+ %val2 = shufflevector <8 x i16> %shiftvec, <8 x i16> undef,
+ <8 x i32> zeroinitializer
+ %ret = ashr <8 x i16> %val1, %val2
+ ret <8 x i16> %ret
+}
+
+; Test a v8i16 shift by the lowest useful constant.
+define <8 x i16> @f5(<8 x i16> %dummy, <8 x i16> %val) {
+; CHECK-LABEL: f5:
+; CHECK: vesrah %v24, %v26, 1
+; CHECK: br %r14
+ %ret = ashr <8 x i16> %val, <i16 1, i16 1, i16 1, i16 1,
+ i16 1, i16 1, i16 1, i16 1>
+ ret <8 x i16> %ret
+}
+
+; Test a v8i16 shift by the highest useful constant.
+define <8 x i16> @f6(<8 x i16> %dummy, <8 x i16> %val) {
+; CHECK-LABEL: f6:
+; CHECK: vesrah %v24, %v26, 15
+; CHECK: br %r14
+ %ret = ashr <8 x i16> %val, <i16 15, i16 15, i16 15, i16 15,
+ i16 15, i16 15, i16 15, i16 15>
+ ret <8 x i16> %ret
+}
+
+; Test a v4i32 shift by a variable.
+define <4 x i32> @f7(<4 x i32> %dummy, <4 x i32> %val1, i32 %shift) {
+; CHECK-LABEL: f7:
+; CHECK: vesraf %v24, %v26, 0(%r2)
+; CHECK: br %r14
+ %shiftvec = insertelement <4 x i32> undef, i32 %shift, i32 0
+ %val2 = shufflevector <4 x i32> %shiftvec, <4 x i32> undef,
+ <4 x i32> zeroinitializer
+ %ret = ashr <4 x i32> %val1, %val2
+ ret <4 x i32> %ret
+}
+
+; Test a v4i32 shift by the lowest useful constant.
+define <4 x i32> @f8(<4 x i32> %dummy, <4 x i32> %val) {
+; CHECK-LABEL: f8:
+; CHECK: vesraf %v24, %v26, 1
+; CHECK: br %r14
+ %ret = ashr <4 x i32> %val, <i32 1, i32 1, i32 1, i32 1>
+ ret <4 x i32> %ret
+}
+
+; Test a v4i32 shift by the highest useful constant.
+define <4 x i32> @f9(<4 x i32> %dummy, <4 x i32> %val) {
+; CHECK-LABEL: f9:
+; CHECK: vesraf %v24, %v26, 31
+; CHECK: br %r14
+ %ret = ashr <4 x i32> %val, <i32 31, i32 31, i32 31, i32 31>
+ ret <4 x i32> %ret
+}
+
+; Test a v2i64 shift by a variable.
+define <2 x i64> @f10(<2 x i64> %dummy, <2 x i64> %val1, i32 %shift) {
+; CHECK-LABEL: f10:
+; CHECK: vesrag %v24, %v26, 0(%r2)
+; CHECK: br %r14
+ %extshift = sext i32 %shift to i64
+ %shiftvec = insertelement <2 x i64> undef, i64 %extshift, i32 0
+ %val2 = shufflevector <2 x i64> %shiftvec, <2 x i64> undef,
+ <2 x i32> zeroinitializer
+ %ret = ashr <2 x i64> %val1, %val2
+ ret <2 x i64> %ret
+}
+
+; Test a v2i64 shift by the lowest useful constant.
+define <2 x i64> @f11(<2 x i64> %dummy, <2 x i64> %val) {
+; CHECK-LABEL: f11:
+; CHECK: vesrag %v24, %v26, 1
+; CHECK: br %r14
+ %ret = ashr <2 x i64> %val, <i64 1, i64 1>
+ ret <2 x i64> %ret
+}
+
+; Test a v2i64 shift by the highest useful constant.
+define <2 x i64> @f12(<2 x i64> %dummy, <2 x i64> %val) {
+; CHECK-LABEL: f12:
+; CHECK: vesrag %v24, %v26, 63
+; CHECK: br %r14
+ %ret = ashr <2 x i64> %val, <i64 63, i64 63>
+ ret <2 x i64> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-shift-06.ll b/llvm/test/CodeGen/SystemZ/vec-shift-06.ll
new file mode 100644
index 00000000000..8a5bb0a9a55
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-shift-06.ll
@@ -0,0 +1,134 @@
+; Test vector logical shift right with scalar shift amount.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test a v16i8 shift by a variable.
+define <16 x i8> @f1(<16 x i8> %dummy, <16 x i8> %val1, i32 %shift) {
+; CHECK-LABEL: f1:
+; CHECK: vesrlb %v24, %v26, 0(%r2)
+; CHECK: br %r14
+ %truncshift = trunc i32 %shift to i8
+ %shiftvec = insertelement <16 x i8> undef, i8 %truncshift, i32 0
+ %val2 = shufflevector <16 x i8> %shiftvec, <16 x i8> undef,
+ <16 x i32> zeroinitializer
+ %ret = lshr <16 x i8> %val1, %val2
+ ret <16 x i8> %ret
+}
+
+; Test a v16i8 shift by the lowest useful constant.
+define <16 x i8> @f2(<16 x i8> %dummy, <16 x i8> %val) {
+; CHECK-LABEL: f2:
+; CHECK: vesrlb %v24, %v26, 1
+; CHECK: br %r14
+ %ret = lshr <16 x i8> %val, <i8 1, i8 1, i8 1, i8 1,
+ i8 1, i8 1, i8 1, i8 1,
+ i8 1, i8 1, i8 1, i8 1,
+ i8 1, i8 1, i8 1, i8 1>
+ ret <16 x i8> %ret
+}
+
+; Test a v16i8 shift by the highest useful constant.
+define <16 x i8> @f3(<16 x i8> %dummy, <16 x i8> %val) {
+; CHECK-LABEL: f3:
+; CHECK: vesrlb %v24, %v26, 7
+; CHECK: br %r14
+ %ret = lshr <16 x i8> %val, <i8 7, i8 7, i8 7, i8 7,
+ i8 7, i8 7, i8 7, i8 7,
+ i8 7, i8 7, i8 7, i8 7,
+ i8 7, i8 7, i8 7, i8 7>
+ ret <16 x i8> %ret
+}
+
+; Test a v8i16 shift by a variable.
+define <8 x i16> @f4(<8 x i16> %dummy, <8 x i16> %val1, i32 %shift) {
+; CHECK-LABEL: f4:
+; CHECK: vesrlh %v24, %v26, 0(%r2)
+; CHECK: br %r14
+ %truncshift = trunc i32 %shift to i16
+ %shiftvec = insertelement <8 x i16> undef, i16 %truncshift, i32 0
+ %val2 = shufflevector <8 x i16> %shiftvec, <8 x i16> undef,
+ <8 x i32> zeroinitializer
+ %ret = lshr <8 x i16> %val1, %val2
+ ret <8 x i16> %ret
+}
+
+; Test a v8i16 shift by the lowest useful constant.
+define <8 x i16> @f5(<8 x i16> %dummy, <8 x i16> %val) {
+; CHECK-LABEL: f5:
+; CHECK: vesrlh %v24, %v26, 1
+; CHECK: br %r14
+ %ret = lshr <8 x i16> %val, <i16 1, i16 1, i16 1, i16 1,
+ i16 1, i16 1, i16 1, i16 1>
+ ret <8 x i16> %ret
+}
+
+; Test a v8i16 shift by the highest useful constant.
+define <8 x i16> @f6(<8 x i16> %dummy, <8 x i16> %val) {
+; CHECK-LABEL: f6:
+; CHECK: vesrlh %v24, %v26, 15
+; CHECK: br %r14
+ %ret = lshr <8 x i16> %val, <i16 15, i16 15, i16 15, i16 15,
+ i16 15, i16 15, i16 15, i16 15>
+ ret <8 x i16> %ret
+}
+
+; Test a v4i32 shift by a variable.
+define <4 x i32> @f7(<4 x i32> %dummy, <4 x i32> %val1, i32 %shift) {
+; CHECK-LABEL: f7:
+; CHECK: vesrlf %v24, %v26, 0(%r2)
+; CHECK: br %r14
+ %shiftvec = insertelement <4 x i32> undef, i32 %shift, i32 0
+ %val2 = shufflevector <4 x i32> %shiftvec, <4 x i32> undef,
+ <4 x i32> zeroinitializer
+ %ret = lshr <4 x i32> %val1, %val2
+ ret <4 x i32> %ret
+}
+
+; Test a v4i32 shift by the lowest useful constant.
+define <4 x i32> @f8(<4 x i32> %dummy, <4 x i32> %val) {
+; CHECK-LABEL: f8:
+; CHECK: vesrlf %v24, %v26, 1
+; CHECK: br %r14
+ %ret = lshr <4 x i32> %val, <i32 1, i32 1, i32 1, i32 1>
+ ret <4 x i32> %ret
+}
+
+; Test a v4i32 shift by the highest useful constant.
+define <4 x i32> @f9(<4 x i32> %dummy, <4 x i32> %val) {
+; CHECK-LABEL: f9:
+; CHECK: vesrlf %v24, %v26, 31
+; CHECK: br %r14
+ %ret = lshr <4 x i32> %val, <i32 31, i32 31, i32 31, i32 31>
+ ret <4 x i32> %ret
+}
+
+; Test a v2i64 shift by a variable.
+define <2 x i64> @f10(<2 x i64> %dummy, <2 x i64> %val1, i32 %shift) {
+; CHECK-LABEL: f10:
+; CHECK: vesrlg %v24, %v26, 0(%r2)
+; CHECK: br %r14
+ %extshift = sext i32 %shift to i64
+ %shiftvec = insertelement <2 x i64> undef, i64 %extshift, i32 0
+ %val2 = shufflevector <2 x i64> %shiftvec, <2 x i64> undef,
+ <2 x i32> zeroinitializer
+ %ret = lshr <2 x i64> %val1, %val2
+ ret <2 x i64> %ret
+}
+
+; Test a v2i64 shift by the lowest useful constant.
+define <2 x i64> @f11(<2 x i64> %dummy, <2 x i64> %val) {
+; CHECK-LABEL: f11:
+; CHECK: vesrlg %v24, %v26, 1
+; CHECK: br %r14
+ %ret = lshr <2 x i64> %val, <i64 1, i64 1>
+ ret <2 x i64> %ret
+}
+
+; Test a v2i64 shift by the highest useful constant.
+define <2 x i64> @f12(<2 x i64> %dummy, <2 x i64> %val) {
+; CHECK-LABEL: f12:
+; CHECK: vesrlg %v24, %v26, 63
+; CHECK: br %r14
+ %ret = lshr <2 x i64> %val, <i64 63, i64 63>
+ ret <2 x i64> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-shift-07.ll b/llvm/test/CodeGen/SystemZ/vec-shift-07.ll
new file mode 100644
index 00000000000..f229c5e25a4
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-shift-07.ll
@@ -0,0 +1,182 @@
+; Test vector sign extensions.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test a v16i1->v16i8 extension.
+define <16 x i8> @f1(<16 x i8> %val) {
+; CHECK-LABEL: f1:
+; CHECK: veslb [[REG:%v[0-9]+]], %v24, 7
+; CHECK: vesrab %v24, [[REG]], 7
+; CHECK: br %r14
+ %trunc = trunc <16 x i8> %val to <16 x i1>
+ %ret = sext <16 x i1> %trunc to <16 x i8>
+ ret <16 x i8> %ret
+}
+
+; Test a v8i1->v8i16 extension.
+define <8 x i16> @f2(<8 x i16> %val) {
+; CHECK-LABEL: f2:
+; CHECK: veslh [[REG:%v[0-9]+]], %v24, 15
+; CHECK: vesrah %v24, [[REG]], 15
+; CHECK: br %r14
+ %trunc = trunc <8 x i16> %val to <8 x i1>
+ %ret = sext <8 x i1> %trunc to <8 x i16>
+ ret <8 x i16> %ret
+}
+
+; Test a v8i8->v8i16 extension.
+define <8 x i16> @f3(<8 x i16> %val) {
+; CHECK-LABEL: f3:
+; CHECK: veslh [[REG:%v[0-9]+]], %v24, 8
+; CHECK: vesrah %v24, [[REG]], 8
+; CHECK: br %r14
+ %trunc = trunc <8 x i16> %val to <8 x i8>
+ %ret = sext <8 x i8> %trunc to <8 x i16>
+ ret <8 x i16> %ret
+}
+
+; Test a v4i1->v4i32 extension.
+define <4 x i32> @f4(<4 x i32> %val) {
+; CHECK-LABEL: f4:
+; CHECK: veslf [[REG:%v[0-9]+]], %v24, 31
+; CHECK: vesraf %v24, [[REG]], 31
+; CHECK: br %r14
+ %trunc = trunc <4 x i32> %val to <4 x i1>
+ %ret = sext <4 x i1> %trunc to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test a v4i8->v4i32 extension.
+define <4 x i32> @f5(<4 x i32> %val) {
+; CHECK-LABEL: f5:
+; CHECK: veslf [[REG:%v[0-9]+]], %v24, 24
+; CHECK: vesraf %v24, [[REG]], 24
+; CHECK: br %r14
+ %trunc = trunc <4 x i32> %val to <4 x i8>
+ %ret = sext <4 x i8> %trunc to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test a v4i16->v4i32 extension.
+define <4 x i32> @f6(<4 x i32> %val) {
+; CHECK-LABEL: f6:
+; CHECK: veslf [[REG:%v[0-9]+]], %v24, 16
+; CHECK: vesraf %v24, [[REG]], 16
+; CHECK: br %r14
+ %trunc = trunc <4 x i32> %val to <4 x i16>
+ %ret = sext <4 x i16> %trunc to <4 x i32>
+ ret <4 x i32> %ret
+}
+
+; Test a v2i1->v2i64 extension.
+define <2 x i64> @f7(<2 x i64> %val) {
+; CHECK-LABEL: f7:
+; CHECK: veslg [[REG:%v[0-9]+]], %v24, 63
+; CHECK: vesrag %v24, [[REG]], 63
+; CHECK: br %r14
+ %trunc = trunc <2 x i64> %val to <2 x i1>
+ %ret = sext <2 x i1> %trunc to <2 x i64>
+ ret <2 x i64> %ret
+}
+
+; Test a v2i8->v2i64 extension.
+define <2 x i64> @f8(<2 x i64> %val) {
+; CHECK-LABEL: f8:
+; CHECK: vsegb %v24, %v24
+; CHECK: br %r14
+ %trunc = trunc <2 x i64> %val to <2 x i8>
+ %ret = sext <2 x i8> %trunc to <2 x i64>
+ ret <2 x i64> %ret
+}
+
+; Test a v2i16->v2i64 extension.
+define <2 x i64> @f9(<2 x i64> %val) {
+; CHECK-LABEL: f9:
+; CHECK: vsegh %v24, %v24
+; CHECK: br %r14
+ %trunc = trunc <2 x i64> %val to <2 x i16>
+ %ret = sext <2 x i16> %trunc to <2 x i64>
+ ret <2 x i64> %ret
+}
+
+; Test a v2i32->v2i64 extension.
+define <2 x i64> @f10(<2 x i64> %val) {
+; CHECK-LABEL: f10:
+; CHECK: vsegf %v24, %v24
+; CHECK: br %r14
+ %trunc = trunc <2 x i64> %val to <2 x i32>
+ %ret = sext <2 x i32> %trunc to <2 x i64>
+ ret <2 x i64> %ret
+}
+
+; Test an alternative v2i8->v2i64 extension.
+define <2 x i64> @f11(<2 x i64> %val) {
+; CHECK-LABEL: f11:
+; CHECK: vsegb %v24, %v24
+; CHECK: br %r14
+ %shl = shl <2 x i64> %val, <i64 56, i64 56>
+ %ret = ashr <2 x i64> %shl, <i64 56, i64 56>
+ ret <2 x i64> %ret
+}
+
+; Test an alternative v2i16->v2i64 extension.
+define <2 x i64> @f12(<2 x i64> %val) {
+; CHECK-LABEL: f12:
+; CHECK: vsegh %v24, %v24
+; CHECK: br %r14
+ %shl = shl <2 x i64> %val, <i64 48, i64 48>
+ %ret = ashr <2 x i64> %shl, <i64 48, i64 48>
+ ret <2 x i64> %ret
+}
+
+; Test an alternative v2i32->v2i64 extension.
+define <2 x i64> @f13(<2 x i64> %val) {
+; CHECK-LABEL: f13:
+; CHECK: vsegf %v24, %v24
+; CHECK: br %r14
+ %shl = shl <2 x i64> %val, <i64 32, i64 32>
+ %ret = ashr <2 x i64> %shl, <i64 32, i64 32>
+ ret <2 x i64> %ret
+}
+
+; Test an extraction-based v2i8->v2i64 extension.
+define <2 x i64> @f14(<16 x i8> %val) {
+; CHECK-LABEL: f14:
+; CHECK: vsegb %v24, %v24
+; CHECK: br %r14
+ %elt0 = extractelement <16 x i8> %val, i32 7
+ %elt1 = extractelement <16 x i8> %val, i32 15
+ %ext0 = sext i8 %elt0 to i64
+ %ext1 = sext i8 %elt1 to i64
+ %vec0 = insertelement <2 x i64> undef, i64 %ext0, i32 0
+ %vec1 = insertelement <2 x i64> %vec0, i64 %ext1, i32 1
+ ret <2 x i64> %vec1
+}
+
+; Test an extraction-based v2i16->v2i64 extension.
+define <2 x i64> @f15(<16 x i16> %val) {
+; CHECK-LABEL: f15:
+; CHECK: vsegh %v24, %v24
+; CHECK: br %r14
+ %elt0 = extractelement <16 x i16> %val, i32 3
+ %elt1 = extractelement <16 x i16> %val, i32 7
+ %ext0 = sext i16 %elt0 to i64
+ %ext1 = sext i16 %elt1 to i64
+ %vec0 = insertelement <2 x i64> undef, i64 %ext0, i32 0
+ %vec1 = insertelement <2 x i64> %vec0, i64 %ext1, i32 1
+ ret <2 x i64> %vec1
+}
+
+; Test an extraction-based v2i32->v2i64 extension.
+define <2 x i64> @f16(<16 x i32> %val) {
+; CHECK-LABEL: f16:
+; CHECK: vsegf %v24, %v24
+; CHECK: br %r14
+ %elt0 = extractelement <16 x i32> %val, i32 1
+ %elt1 = extractelement <16 x i32> %val, i32 3
+ %ext0 = sext i32 %elt0 to i64
+ %ext1 = sext i32 %elt1 to i64
+ %vec0 = insertelement <2 x i64> undef, i64 %ext0, i32 0
+ %vec1 = insertelement <2 x i64> %vec0, i64 %ext1, i32 1
+ ret <2 x i64> %vec1
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-sub-01.ll b/llvm/test/CodeGen/SystemZ/vec-sub-01.ll
new file mode 100644
index 00000000000..9e5b4f81e6d
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-sub-01.ll
@@ -0,0 +1,39 @@
+; Test vector subtraction.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test a v16i8 subtraction.
+define <16 x i8> @f1(<16 x i8> %dummy, <16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f1:
+; CHECK: vsb %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = sub <16 x i8> %val1, %val2
+ ret <16 x i8> %ret
+}
+
+; Test a v8i16 subtraction.
+define <8 x i16> @f2(<8 x i16> %dummy, <8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f2:
+; CHECK: vsh %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = sub <8 x i16> %val1, %val2
+ ret <8 x i16> %ret
+}
+
+; Test a v4i32 subtraction.
+define <4 x i32> @f3(<4 x i32> %dummy, <4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f3:
+; CHECK: vsf %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = sub <4 x i32> %val1, %val2
+ ret <4 x i32> %ret
+}
+
+; Test a v2i64 subtraction.
+define <2 x i64> @f4(<2 x i64> %dummy, <2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f4:
+; CHECK: vsg %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = sub <2 x i64> %val1, %val2
+ ret <2 x i64> %ret
+}
diff --git a/llvm/test/CodeGen/SystemZ/vec-xor-01.ll b/llvm/test/CodeGen/SystemZ/vec-xor-01.ll
new file mode 100644
index 00000000000..063b768117c
--- /dev/null
+++ b/llvm/test/CodeGen/SystemZ/vec-xor-01.ll
@@ -0,0 +1,39 @@
+; Test vector XOR.
+;
+; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s
+
+; Test a v16i8 XOR.
+define <16 x i8> @f1(<16 x i8> %dummy, <16 x i8> %val1, <16 x i8> %val2) {
+; CHECK-LABEL: f1:
+; CHECK: vx %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = xor <16 x i8> %val1, %val2
+ ret <16 x i8> %ret
+}
+
+; Test a v8i16 XOR.
+define <8 x i16> @f2(<8 x i16> %dummy, <8 x i16> %val1, <8 x i16> %val2) {
+; CHECK-LABEL: f2:
+; CHECK: vx %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = xor <8 x i16> %val1, %val2
+ ret <8 x i16> %ret
+}
+
+; Test a v4i32 XOR.
+define <4 x i32> @f3(<4 x i32> %dummy, <4 x i32> %val1, <4 x i32> %val2) {
+; CHECK-LABEL: f3:
+; CHECK: vx %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = xor <4 x i32> %val1, %val2
+ ret <4 x i32> %ret
+}
+
+; Test a v2i64 XOR.
+define <2 x i64> @f4(<2 x i64> %dummy, <2 x i64> %val1, <2 x i64> %val2) {
+; CHECK-LABEL: f4:
+; CHECK: vx %v24, %v26, %v28
+; CHECK: br %r14
+ %ret = xor <2 x i64> %val1, %val2
+ ret <2 x i64> %ret
+}
OpenPOWER on IntegriCloud