diff options
Diffstat (limited to 'llvm/test/CodeGen')
| -rw-r--r-- | llvm/test/CodeGen/PowerPC/vec_add_sub_doubleword.ll | 62 | ||||
| -rw-r--r-- | llvm/test/CodeGen/PowerPC/vec_cmpd.ll | 258 | ||||
| -rw-r--r-- | llvm/test/CodeGen/PowerPC/vec_minmax.ll | 34 | ||||
| -rw-r--r-- | llvm/test/CodeGen/PowerPC/vec_mul_even_odd.ll | 41 | ||||
| -rw-r--r-- | llvm/test/CodeGen/PowerPC/vec_rotate_shift.ll | 33 |
5 files changed, 428 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/PowerPC/vec_add_sub_doubleword.ll b/llvm/test/CodeGen/PowerPC/vec_add_sub_doubleword.ll new file mode 100644 index 00000000000..013e16e2a73 --- /dev/null +++ b/llvm/test/CodeGen/PowerPC/vec_add_sub_doubleword.ll @@ -0,0 +1,62 @@ +; Check VMX 64-bit integer operations +; +; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s +; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 -mattr=-vsx < %s | FileCheck %s + +define <2 x i64> @test_add(<2 x i64> %x, <2 x i64> %y) nounwind { + %result = add <2 x i64> %x, %y + ret <2 x i64> %result +; CHECK: vaddudm 2, 2, 3 +} + +define <2 x i64> @increment_by_one(<2 x i64> %x) nounwind { + %result = add <2 x i64> %x, <i64 1, i64 1> + ret <2 x i64> %result +; CHECK vaddudm 2, 2, 3 +} + +define <2 x i64> @increment_by_val(<2 x i64> %x, i64 %val) nounwind { + %tmpvec = insertelement <2 x i64> <i64 0, i64 0>, i64 %val, i32 0 + %tmpvec2 = insertelement <2 x i64> %tmpvec, i64 %val, i32 1 + %result = add <2 x i64> %x, %tmpvec2 + ret <2 x i64> %result +; CHECK: vaddudm 2, 2, 3 +; FIXME: This is currently generating the following instruction sequence +; +; std 5, -8(1) +; std 5, -16(1) +; addi 3, 1, -16 +; ori 2, 2, 0 +; lxvd2x 35, 0, 3 +; vaddudm 2, 2, 3 +; blr +; +; This will almost certainly cause a load-hit-store hazard. +; Since val is a value parameter, it should not need to be +; saved onto the stack at all (unless we're using this to set +; up the vector register). Instead, it would be better to splat +; the value into a vector register. +} + +define <2 x i64> @test_sub(<2 x i64> %x, <2 x i64> %y) nounwind { + %result = sub <2 x i64> %x, %y + ret <2 x i64> %result +; CHECK: vsubudm 2, 2, 3 +} + +define <2 x i64> @decrement_by_one(<2 x i64> %x) nounwind { + %result = sub <2 x i64> %x, <i64 -1, i64 -1> + ret <2 x i64> %result +; CHECK vsubudm 2, 2, 3 +} + +define <2 x i64> @decrement_by_val(<2 x i64> %x, i64 %val) nounwind { + %tmpvec = insertelement <2 x i64> <i64 0, i64 0>, i64 %val, i32 0 + %tmpvec2 = insertelement <2 x i64> %tmpvec, i64 %val, i32 1 + %result = sub <2 x i64> %x, %tmpvec2 + ret <2 x i64> %result +; CHECK vsubudm 2, 2, 3 +} + + + diff --git a/llvm/test/CodeGen/PowerPC/vec_cmpd.ll b/llvm/test/CodeGen/PowerPC/vec_cmpd.ll new file mode 100644 index 00000000000..4a06ed9ffaf --- /dev/null +++ b/llvm/test/CodeGen/PowerPC/vec_cmpd.ll @@ -0,0 +1,258 @@ +; Test the doubleword comparison instructions that were added in POWER8 +; +; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s +; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 -mattr=-vsx < %s | FileCheck %s + +define <2 x i64> @v2si64_cmp(<2 x i64> %x, <2 x i64> %y) nounwind readnone { + %cmp = icmp eq <2 x i64> %x, %y + %result = sext <2 x i1> %cmp to <2 x i64> + ret <2 x i64> %result +; CHECK-LABEL: v2si64_cmp: +; CHECK: vcmpequd 2, 2, 3 +} + +define <4 x i64> @v4si64_cmp(<4 x i64> %x, <4 x i64> %y) nounwind readnone { + %cmp = icmp eq <4 x i64> %x, %y + %result = sext <4 x i1> %cmp to <4 x i64> + ret <4 x i64> %result +; CHECK-LABEL: v4si64_cmp +; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +} + +define <8 x i64> @v8si64_cmp(<8 x i64> %x, <8 x i64> %y) nounwind readnone { + %cmp = icmp eq <8 x i64> %x, %y + %result = sext <8 x i1> %cmp to <8 x i64> + ret <8 x i64> %result +; CHECK-LABEL: v8si64_cmp +; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +} + +define <16 x i64> @v16si64_cmp(<16 x i64> %x, <16 x i64> %y) nounwind readnone { + %cmp = icmp eq <16 x i64> %x, %y + %result = sext <16 x i1> %cmp to <16 x i64> + ret <16 x i64> %result +; CHECK-LABEL: v16si64_cmp +; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +} + +define <32 x i64> @v32si64_cmp(<32 x i64> %x, <32 x i64> %y) nounwind readnone { + %cmp = icmp eq <32 x i64> %x, %y + %result = sext <32 x i1> %cmp to <32 x i64> + ret <32 x i64> %result +; CHECK-LABEL: v32si64_cmp +; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +} + +; Greater than signed +define <2 x i64> @v2si64_cmp_gt(<2 x i64> %x, <2 x i64> %y) nounwind readnone { + %cmp = icmp sgt <2 x i64> %x, %y + %result = sext <2 x i1> %cmp to <2 x i64> + ret <2 x i64> %result +; CHECK-LABEL: v2si64_cmp_gt +; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +} + +define <4 x i64> @v4si64_cmp_gt(<4 x i64> %x, <4 x i64> %y) nounwind readnone { + %cmp = icmp sgt <4 x i64> %x, %y + %result = sext <4 x i1> %cmp to <4 x i64> + ret <4 x i64> %result +; CHECK-LABEL: v4si64_cmp_gt +; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +} + +define <8 x i64> @v8si64_cmp_gt(<8 x i64> %x, <8 x i64> %y) nounwind readnone { + %cmp = icmp sgt <8 x i64> %x, %y + %result = sext <8 x i1> %cmp to <8 x i64> + ret <8 x i64> %result +; CHECK-LABEL: v8si64_cmp_gt +; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +} + +define <16 x i64> @v16si64_cmp_gt(<16 x i64> %x, <16 x i64> %y) nounwind readnone { + %cmp = icmp sgt <16 x i64> %x, %y + %result = sext <16 x i1> %cmp to <16 x i64> + ret <16 x i64> %result +; CHECK-LABEL: v16si64_cmp_gt +; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +} + +define <32 x i64> @v32si64_cmp_gt(<32 x i64> %x, <32 x i64> %y) nounwind readnone { + %cmp = icmp sgt <32 x i64> %x, %y + %result = sext <32 x i1> %cmp to <32 x i64> + ret <32 x i64> %result +; CHECK-LABEL: v32si64_cmp_gt +; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +} + +; Greater than unsigned +define <2 x i64> @v2ui64_cmp_gt(<2 x i64> %x, <2 x i64> %y) nounwind readnone { + %cmp = icmp ugt <2 x i64> %x, %y + %result = sext <2 x i1> %cmp to <2 x i64> + ret <2 x i64> %result +; CHECK-LABEL: v2ui64_cmp_gt +; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +} + +define <4 x i64> @v4ui64_cmp_gt(<4 x i64> %x, <4 x i64> %y) nounwind readnone { + %cmp = icmp ugt <4 x i64> %x, %y + %result = sext <4 x i1> %cmp to <4 x i64> + ret <4 x i64> %result +; CHECK-LABEL: v4ui64_cmp_gt +; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +} + +define <8 x i64> @v8ui64_cmp_gt(<8 x i64> %x, <8 x i64> %y) nounwind readnone { + %cmp = icmp ugt <8 x i64> %x, %y + %result = sext <8 x i1> %cmp to <8 x i64> + ret <8 x i64> %result +; CHECK-LABEL: v8ui64_cmp_gt +; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +} + +define <16 x i64> @v16ui64_cmp_gt(<16 x i64> %x, <16 x i64> %y) nounwind readnone { + %cmp = icmp ugt <16 x i64> %x, %y + %result = sext <16 x i1> %cmp to <16 x i64> + ret <16 x i64> %result +; CHECK-LABEL: v16ui64_cmp_gt +; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +} + +define <32 x i64> @v32ui64_cmp_gt(<32 x i64> %x, <32 x i64> %y) nounwind readnone { + %cmp = icmp ugt <32 x i64> %x, %y + %result = sext <32 x i1> %cmp to <32 x i64> + ret <32 x i64> %result +; CHECK-LABEL: v32ui64_cmp_gt +; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +} + +; Check the intrinsics also +declare <2 x i64> @llvm.ppc.altivec.vcmpequd(<2 x i64>, <2 x i64>) nounwind readnone +declare i32 @llvm.ppc.altivec.vcmpequd.p(i32, <2 x i64>, <2 x i64>) nounwind readnone +declare <2 x i64> @llvm.ppc.altivec.vcmpgtsd(<2 x i64>, <2 x i64>) nounwind readnone +declare i32 @llvm.ppc.altivec.vcmpgtsd.p(i32, <2 x i64>, <2 x i64>) nounwind readnone +declare <2 x i64> @llvm.ppc.altivec.vcmpgtud(<2 x i64>, <2 x i64>) nounwind readnone +declare i32 @llvm.ppc.altivec.vcmpgtud.p(i32, <2 x i64>, <2 x i64>) nounwind readnone + +define <2 x i64> @test_vcmpequd(<2 x i64> %x, <2 x i64> %y) { + %tmp = tail call <2 x i64> @llvm.ppc.altivec.vcmpequd(<2 x i64> %x, <2 x i64> %y) + ret <2 x i64> %tmp +; CHECK-LABEL: test_vcmpequd: +; CHECK: vcmpequd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +} + +define i32 @test_vcmpequd_p(<2 x i64> %x, <2 x i64> %y) { + %tmp = tail call i32 @llvm.ppc.altivec.vcmpequd.p(i32 2, <2 x i64> %x, <2 x i64> %y) + ret i32 %tmp +; CHECK-LABEL: test_vcmpequd_p: +; CHECK: vcmpequd. {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +} + +define <2 x i64> @test_vcmpgtsd(<2 x i64> %x, <2 x i64> %y) { + %tmp = tail call <2 x i64> @llvm.ppc.altivec.vcmpgtsd(<2 x i64> %x, <2 x i64> %y) + ret <2 x i64> %tmp +; CHECK-LABEL: test_vcmpgtsd +; CHECK: vcmpgtsd {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +} + +define i32 @test_vcmpgtsd_p(<2 x i64> %x, <2 x i64> %y) { + %tmp = tail call i32 @llvm.ppc.altivec.vcmpgtsd.p(i32 2, <2 x i64> %x, <2 x i64> %y) + ret i32 %tmp +; CHECK-LABEL: test_vcmpgtsd_p +; CHECK: vcmpgtsd. {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +} + +define <2 x i64> @test_vcmpgtud(<2 x i64> %x, <2 x i64> %y) { + %tmp = tail call <2 x i64> @llvm.ppc.altivec.vcmpgtud(<2 x i64> %x, <2 x i64> %y) + ret <2 x i64> %tmp +; CHECK-LABEL: test_vcmpgtud +; CHECK: vcmpgtud {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +} + +define i32 @test_vcmpgtud_p(<2 x i64> %x, <2 x i64> %y) { + %tmp = tail call i32 @llvm.ppc.altivec.vcmpgtud.p(i32 2, <2 x i64> %x, <2 x i64> %y) + ret i32 %tmp +; CHECK-LABEL: test_vcmpgtud_p +; CHECK: vcmpgtud. {{[0-9]+}}, {{[0-9]+}}, {{[0-9]+}} +} + + + + diff --git a/llvm/test/CodeGen/PowerPC/vec_minmax.ll b/llvm/test/CodeGen/PowerPC/vec_minmax.ll new file mode 100644 index 00000000000..e9ba6a01a9b --- /dev/null +++ b/llvm/test/CodeGen/PowerPC/vec_minmax.ll @@ -0,0 +1,34 @@ +; Test the vector min/max doubleword instructions added for P8 +; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s +; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 -mattr=-vsx < %s | FileCheck %s + +declare <2 x i64> @llvm.ppc.altivec.vmaxsd(<2 x i64>, <2 x i64>) nounwind readnone +declare <2 x i64> @llvm.ppc.altivec.vmaxud(<2 x i64>, <2 x i64>) nounwind readnone +declare <2 x i64> @llvm.ppc.altivec.vminsd(<2 x i64>, <2 x i64>) nounwind readnone +declare <2 x i64> @llvm.ppc.altivec.vminud(<2 x i64>, <2 x i64>) nounwind readnone + +define <2 x i64> @test_vmaxsd(<2 x i64> %x, <2 x i64> %y) { + %tmp = tail call <2 x i64> @llvm.ppc.altivec.vmaxsd(<2 x i64> %x, <2 x i64> %y) + ret <2 x i64> %tmp +; CHECK: vmaxsd 2, 2, 3 +} + +define <2 x i64> @test_vmaxud(<2 x i64> %x, <2 x i64> %y) { + %tmp = tail call <2 x i64> @llvm.ppc.altivec.vmaxud(<2 x i64> %x, <2 x i64> %y) + ret <2 x i64> %tmp +; CHECK: vmaxud 2, 2, 3 +} + +define <2 x i64> @test_vminsd(<2 x i64> %x, <2 x i64> %y) { + %tmp = tail call <2 x i64> @llvm.ppc.altivec.vminsd(<2 x i64> %x, <2 x i64> %y) + ret <2 x i64> %tmp +; CHECK: vminsd 2, 2, 3 +} + +define <2 x i64> @test_vminud(<2 x i64> %x, <2 x i64> %y) { + %tmp = tail call <2 x i64> @llvm.ppc.altivec.vminud(<2 x i64> %x, <2 x i64> %y) + ret <2 x i64> %tmp +; CHECK: vminud 2, 2, 3 +} + + diff --git a/llvm/test/CodeGen/PowerPC/vec_mul_even_odd.ll b/llvm/test/CodeGen/PowerPC/vec_mul_even_odd.ll new file mode 100644 index 00000000000..2746ae54943 --- /dev/null +++ b/llvm/test/CodeGen/PowerPC/vec_mul_even_odd.ll @@ -0,0 +1,41 @@ +; Check the vector multiply even/odd word instructions that were added in P8 +; +; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s +; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 -mattr=-vsx < %s | FileCheck %s + +declare <2 x i64> @llvm.ppc.altivec.vmuleuw(<4 x i32>, <4 x i32>) nounwind readnone +declare <2 x i64> @llvm.ppc.altivec.vmulesw(<4 x i32>, <4 x i32>) nounwind readnone +declare <2 x i64> @llvm.ppc.altivec.vmulouw(<4 x i32>, <4 x i32>) nounwind readnone +declare <2 x i64> @llvm.ppc.altivec.vmulosw(<4 x i32>, <4 x i32>) nounwind readnone +declare <4 x i32> @llvm.ppc.altivec.vmuluwm(<4 x i32>, <4 x i32>) nounwind readnone + +define <2 x i64> @test_vmuleuw(<4 x i32> %x, <4 x i32> %y) nounwind readnone { + %tmp = tail call <2 x i64> @llvm.ppc.altivec.vmuleuw(<4 x i32> %x, <4 x i32> %y) + ret <2 x i64> %tmp +; CHECK: vmuleuw 2, 2, 3 +} + +define <2 x i64> @test_vmulesw(<4 x i32> %x, <4 x i32> %y) nounwind readnone { + %tmp = tail call <2 x i64> @llvm.ppc.altivec.vmulesw(<4 x i32> %x, <4 x i32> %y) + ret <2 x i64> %tmp +; CHECK: vmulesw 2, 2, 3 +} + +define <2 x i64> @test_vmulouw(<4 x i32> %x, <4 x i32> %y) nounwind readnone { + %tmp = tail call <2 x i64> @llvm.ppc.altivec.vmulouw(<4 x i32> %x, <4 x i32> %y) + ret <2 x i64> %tmp +; CHECK: vmulouw 2, 2, 3 +} + +define <2 x i64> @test_vmulosw(<4 x i32> %x, <4 x i32> %y) nounwind readnone { + %tmp = tail call <2 x i64> @llvm.ppc.altivec.vmulosw(<4 x i32> %x, <4 x i32> %y) + ret <2 x i64> %tmp +; CHECK: vmulosw 2, 2, 3 +} + +define <4 x i32> @test_vmuluwm(<4 x i32> %x, <4 x i32> %y) nounwind readnone { + %tmp = tail call <4 x i32> @llvm.ppc.altivec.vmuluwm(<4 x i32> %x, <4 x i32> %y) + ret <4 x i32> %tmp +; CHECK: vmuluwm 2, 2, 3 +} + diff --git a/llvm/test/CodeGen/PowerPC/vec_rotate_shift.ll b/llvm/test/CodeGen/PowerPC/vec_rotate_shift.ll new file mode 100644 index 00000000000..4dd307c5e5f --- /dev/null +++ b/llvm/test/CodeGen/PowerPC/vec_rotate_shift.ll @@ -0,0 +1,33 @@ +; Test the vector rotate and shift doubleword instructions that were added in P8 +; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 < %s | FileCheck %s +; RUN: llc -mtriple=powerpc64-unknown-linux-gnu -mcpu=pwr8 -mattr=-vsx < %s | FileCheck %s + +declare <2 x i64> @llvm.ppc.altivec.vrld(<2 x i64>, <2 x i64>) nounwind readnone +declare <2 x i64> @llvm.ppc.altivec.vsld(<2 x i64>, <2 x i64>) nounwind readnone +declare <2 x i64> @llvm.ppc.altivec.vsrd(<2 x i64>, <2 x i64>) nounwind readnone +declare <2 x i64> @llvm.ppc.altivec.vsrad(<2 x i64>, <2 x i64>) nounwind readnone + +define <2 x i64> @test_vrld(<2 x i64> %x, <2 x i64> %y) nounwind readnone { + %tmp = tail call <2 x i64> @llvm.ppc.altivec.vrld(<2 x i64> %x, <2 x i64> %y) + ret <2 x i64> %tmp +; CHECK: vrld 2, 2, 3 +} + +define <2 x i64> @test_vsld(<2 x i64> %x, <2 x i64> %y) nounwind readnone { + %tmp = tail call <2 x i64> @llvm.ppc.altivec.vsld(<2 x i64> %x, <2 x i64> %y) + ret <2 x i64> %tmp +; CHECK: vsld 2, 2, 3 +} + +define <2 x i64> @test_vsrd(<2 x i64> %x, <2 x i64> %y) nounwind readnone { + %tmp = tail call <2 x i64> @llvm.ppc.altivec.vsrd(<2 x i64> %x, <2 x i64> %y) + ret <2 x i64> %tmp +; CHECK: vsrd 2, 2, 3 +} + +define <2 x i64> @test_vsrad(<2 x i64> %x, <2 x i64> %y) nounwind readnone { + %tmp = tail call <2 x i64> @llvm.ppc.altivec.vsrad(<2 x i64> %x, <2 x i64> %y) + ret <2 x i64> %tmp +; CHECK: vsrad 2, 2, 3 +} + |

