diff options
| -rw-r--r-- | llvm/include/llvm/IR/IntrinsicsPowerPC.td | 3 | ||||
| -rw-r--r-- | llvm/lib/Target/PowerPC/PPCISelLowering.cpp | 12 | ||||
| -rw-r--r-- | llvm/lib/Target/PowerPC/PPCInstrAltivec.td | 14 | ||||
| -rw-r--r-- | llvm/test/CodeGen/PowerPC/vec_rotate_shift.ll | 13 |
4 files changed, 26 insertions, 16 deletions
diff --git a/llvm/include/llvm/IR/IntrinsicsPowerPC.td b/llvm/include/llvm/IR/IntrinsicsPowerPC.td index bb03e053a05..e7b77d31f1d 100644 --- a/llvm/include/llvm/IR/IntrinsicsPowerPC.td +++ b/llvm/include/llvm/IR/IntrinsicsPowerPC.td @@ -517,7 +517,6 @@ def int_ppc_altivec_vslo : PowerPC_Vec_WWW_Intrinsic<"vslo">; def int_ppc_altivec_vslb : PowerPC_Vec_BBB_Intrinsic<"vslb">; def int_ppc_altivec_vslh : PowerPC_Vec_HHH_Intrinsic<"vslh">; def int_ppc_altivec_vslw : PowerPC_Vec_WWW_Intrinsic<"vslw">; -def int_ppc_altivec_vsld : PowerPC_Vec_DDD_Intrinsic<"vsld">; // Right Shifts. def int_ppc_altivec_vsr : PowerPC_Vec_WWW_Intrinsic<"vsr">; @@ -526,11 +525,9 @@ def int_ppc_altivec_vsro : PowerPC_Vec_WWW_Intrinsic<"vsro">; def int_ppc_altivec_vsrb : PowerPC_Vec_BBB_Intrinsic<"vsrb">; def int_ppc_altivec_vsrh : PowerPC_Vec_HHH_Intrinsic<"vsrh">; def int_ppc_altivec_vsrw : PowerPC_Vec_WWW_Intrinsic<"vsrw">; -def int_ppc_altivec_vsrd : PowerPC_Vec_DDD_Intrinsic<"vsrd">; def int_ppc_altivec_vsrab : PowerPC_Vec_BBB_Intrinsic<"vsrab">; def int_ppc_altivec_vsrah : PowerPC_Vec_HHH_Intrinsic<"vsrah">; def int_ppc_altivec_vsraw : PowerPC_Vec_WWW_Intrinsic<"vsraw">; -def int_ppc_altivec_vsrad : PowerPC_Vec_DDD_Intrinsic<"vsrad">; // Rotates. def int_ppc_altivec_vrlb : PowerPC_Vec_BBB_Intrinsic<"vrlb">; diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp index 70770a902af..2251dd1dcbe 100644 --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -574,14 +574,18 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM, addRegisterClass(MVT::v4f32, &PPC::VSRCRegClass); addRegisterClass(MVT::v2f64, &PPC::VSRCRegClass); - setOperationAction(ISD::SHL, MVT::v2i64, Expand); - setOperationAction(ISD::SRA, MVT::v2i64, Expand); - setOperationAction(ISD::SRL, MVT::v2i64, Expand); - if (Subtarget.hasP8Altivec()) { + setOperationAction(ISD::SHL, MVT::v2i64, Legal); + setOperationAction(ISD::SRA, MVT::v2i64, Legal); + setOperationAction(ISD::SRL, MVT::v2i64, Legal); + setOperationAction(ISD::SETCC, MVT::v2i64, Legal); } else { + setOperationAction(ISD::SHL, MVT::v2i64, Expand); + setOperationAction(ISD::SRA, MVT::v2i64, Expand); + setOperationAction(ISD::SRL, MVT::v2i64, Expand); + setOperationAction(ISD::SETCC, MVT::v2i64, Custom); // VSX v2i64 only supports non-arithmetic operations. diff --git a/llvm/lib/Target/PowerPC/PPCInstrAltivec.td b/llvm/lib/Target/PowerPC/PPCInstrAltivec.td index 2de852b4843..020c22ea770 100644 --- a/llvm/lib/Target/PowerPC/PPCInstrAltivec.td +++ b/llvm/lib/Target/PowerPC/PPCInstrAltivec.td @@ -969,11 +969,17 @@ def VMINSD : VX1_Int_Ty<962, "vminsd", int_ppc_altivec_vminsd, v2i64>; def VMIDUD : VX1_Int_Ty<706, "vminud", int_ppc_altivec_vminud, v2i64>; } // isCommutable +// Vector shifts def VRLD : VX1_Int_Ty<196, "vrld", int_ppc_altivec_vrld, v2i64>; -def VSLD : VX1_Int_Ty<1476, "vsld", int_ppc_altivec_vsld, v2i64>; -def VSRD : VX1_Int_Ty<1732, "vsrd", int_ppc_altivec_vsrd, v2i64>; -def VSRAD : VX1_Int_Ty<964, "vsrad", int_ppc_altivec_vsrad, v2i64>; - +def VSLD : VXForm_1<1476, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB), + "vsld $vD, $vA, $vB", IIC_VecGeneral, + [(set v2i64:$vD, (shl v2i64:$vA, v2i64:$vB))]>; +def VSRD : VXForm_1<1732, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB), + "vsrd $vD, $vA, $vB", IIC_VecGeneral, + [(set v2i64:$vD, (srl v2i64:$vA, v2i64:$vB))]>; +def VSRAD : VXForm_1<964, (outs vrrc:$vD), (ins vrrc:$vA, vrrc:$vB), + "vsrad $vD, $vA, $vB", IIC_VecGeneral, + [(set v2i64:$vD, (sra v2i64:$vA, v2i64:$vB))]>; // Vector Integer Arithmetic Instructions let isCommutable = 1 in { diff --git a/llvm/test/CodeGen/PowerPC/vec_rotate_shift.ll b/llvm/test/CodeGen/PowerPC/vec_rotate_shift.ll index 4dd307c5e5f..1a2e9578e03 100644 --- a/llvm/test/CodeGen/PowerPC/vec_rotate_shift.ll +++ b/llvm/test/CodeGen/PowerPC/vec_rotate_shift.ll @@ -14,20 +14,23 @@ define <2 x i64> @test_vrld(<2 x i64> %x, <2 x i64> %y) nounwind readnone { } define <2 x i64> @test_vsld(<2 x i64> %x, <2 x i64> %y) nounwind readnone { - %tmp = tail call <2 x i64> @llvm.ppc.altivec.vsld(<2 x i64> %x, <2 x i64> %y) + %tmp = shl <2 x i64> %x, %y ret <2 x i64> %tmp +; CHECK-LABEL: @test_vsld ; CHECK: vsld 2, 2, 3 } define <2 x i64> @test_vsrd(<2 x i64> %x, <2 x i64> %y) nounwind readnone { - %tmp = tail call <2 x i64> @llvm.ppc.altivec.vsrd(<2 x i64> %x, <2 x i64> %y) - ret <2 x i64> %tmp + %tmp = lshr <2 x i64> %x, %y + ret <2 x i64> %tmp +; CHECK-LABEL: @test_vsrd ; CHECK: vsrd 2, 2, 3 } define <2 x i64> @test_vsrad(<2 x i64> %x, <2 x i64> %y) nounwind readnone { - %tmp = tail call <2 x i64> @llvm.ppc.altivec.vsrad(<2 x i64> %x, <2 x i64> %y) - ret <2 x i64> %tmp + %tmp = ashr <2 x i64> %x, %y + ret <2 x i64> %tmp +; CHECK-LABER: @test_vsrad ; CHECK: vsrad 2, 2, 3 } |

