summaryrefslogtreecommitdiffstats
path: root/llvm
diff options
context:
space:
mode:
Diffstat (limited to 'llvm')
-rw-r--r--llvm/include/llvm/IR/IntrinsicsPowerPC.td19
-rw-r--r--llvm/lib/Target/PowerPC/PPCInstrAltivec.td24
-rw-r--r--llvm/lib/Target/PowerPC/PPCInstrVSX.td4
-rw-r--r--llvm/test/CodeGen/PowerPC/vsx-p9.ll70
4 files changed, 112 insertions, 5 deletions
diff --git a/llvm/include/llvm/IR/IntrinsicsPowerPC.td b/llvm/include/llvm/IR/IntrinsicsPowerPC.td
index 77f67903eb1..f002f0ef8a0 100644
--- a/llvm/include/llvm/IR/IntrinsicsPowerPC.td
+++ b/llvm/include/llvm/IR/IntrinsicsPowerPC.td
@@ -711,6 +711,22 @@ def int_ppc_altivec_vabsdub : PowerPC_Vec_BBB_Intrinsic<"vabsdub">;
def int_ppc_altivec_vabsduh : PowerPC_Vec_HHH_Intrinsic<"vabsduh">;
def int_ppc_altivec_vabsduw : PowerPC_Vec_WWW_Intrinsic<"vabsduw">;
+// Vector rotates
+def int_ppc_altivec_vrlwnm :
+ PowerPC_Vec_Intrinsic<"vrlwnm", [llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+def int_ppc_altivec_vrlwmi :
+ PowerPC_Vec_Intrinsic<"vrlwmi", [llvm_v4i32_ty],
+ [llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+ [IntrNoMem]>;
+def int_ppc_altivec_vrldnm :
+ PowerPC_Vec_Intrinsic<"vrldnm", [llvm_v2i64_ty],
+ [llvm_v2i64_ty, llvm_v2i64_ty], [IntrNoMem]>;
+def int_ppc_altivec_vrldmi :
+ PowerPC_Vec_Intrinsic<"vrldmi", [llvm_v2i64_ty],
+ [llvm_v2i64_ty, llvm_v2i64_ty, llvm_v2i64_ty],
+ [IntrNoMem]>;
+
//===----------------------------------------------------------------------===//
// PowerPC VSX Intrinsic Definitions.
@@ -830,6 +846,9 @@ def int_ppc_vsx_xvcvuxdsp :
def int_ppc_vsx_xvcvdpsp :
PowerPC_VSX_Intrinsic<"xvcvdpsp", [llvm_v4f32_ty],
[llvm_v2f64_ty], [IntrNoMem]>;
+def int_ppc_vsx_xvcvsphp :
+ PowerPC_VSX_Intrinsic<"xvcvsphp", [llvm_v4f32_ty],
+ [llvm_v4f32_ty], [IntrNoMem]>;
}
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/PowerPC/PPCInstrAltivec.td b/llvm/lib/Target/PowerPC/PPCInstrAltivec.td
index a81fd5cedd6..f9a500bea17 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrAltivec.td
+++ b/llvm/lib/Target/PowerPC/PPCInstrAltivec.td
@@ -1337,10 +1337,26 @@ class VX1_VT5_VA5_VB5<bits<11> xo, string opc, list<dag> pattern>
!strconcat(opc, " $vD, $vA, $vB"), IIC_VecFP, pattern>;
// Vector Rotate Left Mask/Mask-Insert
-def VRLWNM : VX1_VT5_VA5_VB5<389, "vrlwnm", []>;
-def VRLWMI : VX1_VT5_VA5_VB5<133, "vrlwmi", []>;
-def VRLDNM : VX1_VT5_VA5_VB5<453, "vrldnm", []>;
-def VRLDMI : VX1_VT5_VA5_VB5<197, "vrldmi", []>;
+def VRLWNM : VX1_VT5_VA5_VB5<389, "vrlwnm",
+ [(set v4i32:$vD,
+ (int_ppc_altivec_vrlwnm v4i32:$vA,
+ v4i32:$vB))]>;
+def VRLWMI : VXForm_1<133, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB, vrrc:$vDi),
+ "vrlwmi $vD, $vA, $vB", IIC_VecFP,
+ [(set v4i32:$vD,
+ (int_ppc_altivec_vrlwmi v4i32:$vA, v4i32:$vB,
+ v4i32:$vDi))]>,
+ RegConstraint<"$vDi = $vD">, NoEncode<"$vDi">;
+def VRLDNM : VX1_VT5_VA5_VB5<453, "vrldnm",
+ [(set v2i64:$vD,
+ (int_ppc_altivec_vrldnm v2i64:$vA,
+ v2i64:$vB))]>;
+def VRLDMI : VXForm_1<197, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB, vrrc:$vDi),
+ "vrldmi $vD, $vA, $vB", IIC_VecFP,
+ [(set v2i64:$vD,
+ (int_ppc_altivec_vrldmi v2i64:$vA, v2i64:$vB,
+ v2i64:$vDi))]>,
+ RegConstraint<"$vDi = $vD">, NoEncode<"$vDi">;
// Vector Shift Left/Right
def VSLV : VX1_VT5_VA5_VB5<1860, "vslv",
diff --git a/llvm/lib/Target/PowerPC/PPCInstrVSX.td b/llvm/lib/Target/PowerPC/PPCInstrVSX.td
index 4e7e921c0b6..e9a06f3a381 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrVSX.td
+++ b/llvm/lib/Target/PowerPC/PPCInstrVSX.td
@@ -2144,7 +2144,9 @@ let AddedComplexity = 400, Predicates = [HasP9Vector] in {
// Vector HP -> SP
def XVCVHPSP : XX2_XT6_XO5_XB6<60, 24, 475, "xvcvhpsp", vsrc, []>;
- def XVCVSPHP : XX2_XT6_XO5_XB6<60, 25, 475, "xvcvsphp", vsrc, []>;
+ def XVCVSPHP : XX2_XT6_XO5_XB6<60, 25, 475, "xvcvsphp", vsrc,
+ [(set v4f32:$XT,
+ (int_ppc_vsx_xvcvsphp v4f32:$XB))]>;
class Z23_VT5_R1_VB5_RMC2_EX1<bits<6> opcode, bits<8> xo, bit ex, string opc,
list<dag> pattern>
diff --git a/llvm/test/CodeGen/PowerPC/vsx-p9.ll b/llvm/test/CodeGen/PowerPC/vsx-p9.ll
index 6514bc9e97f..9d546ec6b15 100644
--- a/llvm/test/CodeGen/PowerPC/vsx-p9.ll
+++ b/llvm/test/CodeGen/PowerPC/vsx-p9.ll
@@ -190,4 +190,74 @@ entry:
; Function Attrs: nounwind readnone
declare <16 x i8> @llvm.ppc.altivec.vsrv(<16 x i8>, <16 x i8>)
+; Function Attrs: nounwind readnone
+define <8 x i16> @testXVCVSPHP(<4 x float> %a) {
+entry:
+; CHECK-LABEL: testXVCVSPHP
+; CHECK: xvcvsphp 34, 34
+; CHECK: blr
+ %0 = tail call <4 x float> @llvm.ppc.vsx.xvcvsphp(<4 x float> %a)
+ %1 = bitcast <4 x float> %0 to <8 x i16>
+ ret <8 x i16> %1
+}
+
+; Function Attrs: nounwind readnone
+define <4 x i32> @testVRLWMI(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
+entry:
+; CHECK-LABEL: testVRLWMI
+; CHECK: vrlwmi 3, 2, 4
+; CHECK: blr
+ %0 = tail call <4 x i32> @llvm.ppc.altivec.vrlwmi(<4 x i32> %a, <4 x i32> %c, <4 x i32> %b)
+ ret <4 x i32> %0
+}
+
+; Function Attrs: nounwind readnone
+define <2 x i64> @testVRLDMI(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c) {
+entry:
+; CHECK-LABEL: testVRLDMI
+; CHECK: vrldmi 3, 2, 4
+; CHECK: blr
+ %0 = tail call <2 x i64> @llvm.ppc.altivec.vrldmi(<2 x i64> %a, <2 x i64> %c, <2 x i64> %b)
+ ret <2 x i64> %0
+}
+
+; Function Attrs: nounwind readnone
+define <4 x i32> @testVRLWNM(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
+entry:
+ %0 = tail call <4 x i32> @llvm.ppc.altivec.vrlwnm(<4 x i32> %a, <4 x i32> %b)
+ %and.i = and <4 x i32> %0, %c
+ ret <4 x i32> %and.i
+; CHECK-LABEL: testVRLWNM
+; CHECK: vrlwnm 2, 2, 3
+; CHECK: xxland 34, 34, 36
+; CHECK: blr
+}
+
+; Function Attrs: nounwind readnone
+define <2 x i64> @testVRLDNM(<2 x i64> %a, <2 x i64> %b, <2 x i64> %c) {
+entry:
+ %0 = tail call <2 x i64> @llvm.ppc.altivec.vrldnm(<2 x i64> %a, <2 x i64> %b)
+ %and.i = and <2 x i64> %0, %c
+ ret <2 x i64> %and.i
+; CHECK-LABEL: testVRLDNM
+; CHECK: vrldnm 2, 2, 3
+; CHECK: xxland 34, 34, 36
+; CHECK: blr
+}
+
+; Function Attrs: nounwind readnone
+declare <4 x float> @llvm.ppc.vsx.xvcvsphp(<4 x float>)
+
+; Function Attrs: nounwind readnone
+declare <4 x i32> @llvm.ppc.altivec.vrlwmi(<4 x i32>, <4 x i32>, <4 x i32>)
+
+; Function Attrs: nounwind readnone
+declare <2 x i64> @llvm.ppc.altivec.vrldmi(<2 x i64>, <2 x i64>, <2 x i64>)
+
+; Function Attrs: nounwind readnone
+declare <4 x i32> @llvm.ppc.altivec.vrlwnm(<4 x i32>, <4 x i32>)
+
+; Function Attrs: nounwind readnone
+declare <2 x i64> @llvm.ppc.altivec.vrldnm(<2 x i64>, <2 x i64>)
+
declare void @sink(...)
OpenPOWER on IntegriCloud