summaryrefslogtreecommitdiffstats
path: root/llvm
diff options
context:
space:
mode:
authorArnold Schwaighofer <aschwaighofer@apple.com>2013-04-08 18:05:48 +0000
committerArnold Schwaighofer <aschwaighofer@apple.com>2013-04-08 18:05:48 +0000
commitf47d2d7f6b8b57a227b029710dd9f78f1de4dc11 (patch)
tree56552b78b4bb0a4921398a3859e21d5b8ae77d4b /llvm
parent32efd25b939432d1c5b365fbb0438c3644a546f1 (diff)
downloadbcm5719-llvm-f47d2d7f6b8b57a227b029710dd9f78f1de4dc11.tar.gz
bcm5719-llvm-f47d2d7f6b8b57a227b029710dd9f78f1de4dc11.zip
X86 cost model: Model cost for uitofp and sitofp on SSE2
The costs are overfitted so that I can still use the legalization factor. For example the following kernel has about half the throughput vectorized than unvectorized when compiled with SSE2. Before this patch we would vectorize it. unsigned short A[1024]; double B[1024]; void f() { int i; for (i = 0; i < 1024; ++i) { B[i] = (double) A[i]; } } radar://13599001 llvm-svn: 179033
Diffstat (limited to 'llvm')
-rw-r--r--llvm/lib/Target/X86/X86TargetTransformInfo.cpp37
-rw-r--r--llvm/test/Analysis/CostModel/X86/sitofp.ll281
-rw-r--r--llvm/test/Analysis/CostModel/X86/uitofp.ll362
3 files changed, 677 insertions, 3 deletions
diff --git a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
index a98c6991192..488c2a42b2d 100644
--- a/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
+++ b/llvm/lib/Target/X86/X86TargetTransformInfo.cpp
@@ -334,12 +334,43 @@ unsigned X86TTI::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) const {
int ISD = TLI->InstructionOpcodeToISD(Opcode);
assert(ISD && "Invalid opcode");
+ std::pair<unsigned, MVT> LTSrc = TLI->getTypeLegalizationCost(Src);
+ std::pair<unsigned, MVT> LTDest = TLI->getTypeLegalizationCost(Dst);
+
+ static const TypeConversionCostTblEntry<MVT> SSE2ConvTbl[] = {
+ // These are somewhat magic numbers justified by looking at the output of
+ // Intel's IACA, running some kernels and making sure when we take
+ // legalization into account the throughput will be overestimated.
+ { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
+ { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
+ { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
+ { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
+ { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
+ { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
+ { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
+ { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
+ // There are faster sequences for float conversions.
+ { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
+ { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 15 },
+ { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
+ { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
+ { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
+ { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 15 },
+ { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
+ { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
+ };
+
+ if (ST->hasSSE2() && !ST->hasAVX()) {
+ int Idx = ConvertCostTableLookup<MVT>(SSE2ConvTbl,
+ array_lengthof(SSE2ConvTbl),
+ ISD, LTDest.second, LTSrc.second);
+ if (Idx != -1)
+ return LTSrc.first * SSE2ConvTbl[Idx].Cost;
+ }
+
EVT SrcTy = TLI->getValueType(Src);
EVT DstTy = TLI->getValueType(Dst);
- if (!SrcTy.isSimple() || !DstTy.isSimple())
- return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src);
-
static const TypeConversionCostTblEntry<MVT> AVXConversionTbl[] = {
{ ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
{ ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
diff --git a/llvm/test/Analysis/CostModel/X86/sitofp.ll b/llvm/test/Analysis/CostModel/X86/sitofp.ll
new file mode 100644
index 00000000000..338d9741652
--- /dev/null
+++ b/llvm/test/Analysis/CostModel/X86/sitofp.ll
@@ -0,0 +1,281 @@
+; RUN: opt -mtriple=x86_64-apple-darwin -mcpu=core2 -cost-model -analyze < %s | FileCheck --check-prefix=SSE2 %s
+
+define <2 x double> @sitofpv2i8v2double(<2 x i8> %a) {
+ ; SSE2: sitofpv2i8v2double
+ ; SSE2: cost of 20 {{.*}} sitofp
+ %1 = sitofp <2 x i8> %a to <2 x double>
+ ret <2 x double> %1
+}
+
+define <4 x double> @sitofpv4i8v4double(<4 x i8> %a) {
+ ; SSE2: sitofpv4i8v4double
+ ; SSE2: cost of 40 {{.*}} sitofp
+ %1 = sitofp <4 x i8> %a to <4 x double>
+ ret <4 x double> %1
+}
+
+define <8 x double> @sitofpv8i8v8double(<8 x i8> %a) {
+ ; SSE2: sitofpv8i8v8double
+ ; SSE2: cost of 80 {{.*}} sitofp
+%1 = sitofp <8 x i8> %a to <8 x double>
+ ret <8 x double> %1
+}
+
+define <16 x double> @sitofpv16i8v16double(<16 x i8> %a) {
+ ; SSE2: sitofpv16i8v16double
+ ; SSE2: cost of 160 {{.*}} sitofp
+ %1 = sitofp <16 x i8> %a to <16 x double>
+ ret <16 x double> %1
+}
+
+define <32 x double> @sitofpv32i8v32double(<32 x i8> %a) {
+ ; SSE2: sitofpv32i8v32double
+ ; SSE2: cost of 320 {{.*}} sitofp
+ %1 = sitofp <32 x i8> %a to <32 x double>
+ ret <32 x double> %1
+}
+
+define <2 x double> @sitofpv2i16v2double(<2 x i16> %a) {
+ ; SSE2: sitofpv2i16v2double
+ ; SSE2: cost of 20 {{.*}} sitofp
+ %1 = sitofp <2 x i16> %a to <2 x double>
+ ret <2 x double> %1
+}
+
+define <4 x double> @sitofpv4i16v4double(<4 x i16> %a) {
+ ; SSE2: sitofpv4i16v4double
+ ; SSE2: cost of 40 {{.*}} sitofp
+ %1 = sitofp <4 x i16> %a to <4 x double>
+ ret <4 x double> %1
+}
+
+define <8 x double> @sitofpv8i16v8double(<8 x i16> %a) {
+ ; SSE2: sitofpv8i16v8double
+ ; SSE2: cost of 80 {{.*}} sitofp
+ %1 = sitofp <8 x i16> %a to <8 x double>
+ ret <8 x double> %1
+}
+
+define <16 x double> @sitofpv16i16v16double(<16 x i16> %a) {
+ ; SSE2: sitofpv16i16v16double
+ ; SSE2: cost of 160 {{.*}} sitofp
+ %1 = sitofp <16 x i16> %a to <16 x double>
+ ret <16 x double> %1
+}
+
+define <32 x double> @sitofpv32i16v32double(<32 x i16> %a) {
+ ; SSE2: sitofpv32i16v32double
+ ; SSE2: cost of 320 {{.*}} sitofp
+ %1 = sitofp <32 x i16> %a to <32 x double>
+ ret <32 x double> %1
+}
+
+define <2 x double> @sitofpv2i32v2double(<2 x i32> %a) {
+ ; SSE2: sitofpv2i32v2double
+ ; SSE2: cost of 20 {{.*}} sitofp
+ %1 = sitofp <2 x i32> %a to <2 x double>
+ ret <2 x double> %1
+}
+
+define <4 x double> @sitofpv4i32v4double(<4 x i32> %a) {
+ ; SSE2: sitofpv4i32v4double
+ ; SSE2: cost of 40 {{.*}} sitofp
+ %1 = sitofp <4 x i32> %a to <4 x double>
+ ret <4 x double> %1
+}
+
+define <8 x double> @sitofpv8i32v8double(<8 x i32> %a) {
+ ; SSE2: sitofpv8i32v8double
+ ; SSE2: cost of 80 {{.*}} sitofp
+ %1 = sitofp <8 x i32> %a to <8 x double>
+ ret <8 x double> %1
+}
+
+define <16 x double> @sitofpv16i32v16double(<16 x i32> %a) {
+ ; SSE2: sitofpv16i32v16double
+ ; SSE2: cost of 160 {{.*}} sitofp
+ %1 = sitofp <16 x i32> %a to <16 x double>
+ ret <16 x double> %1
+}
+
+define <32 x double> @sitofpv32i32v32double(<32 x i32> %a) {
+ ; SSE2: sitofpv32i32v32double
+ ; SSE2: cost of 320 {{.*}} sitofp
+ %1 = sitofp <32 x i32> %a to <32 x double>
+ ret <32 x double> %1
+}
+
+define <2 x double> @sitofpv2i64v2double(<2 x i64> %a) {
+ ; SSE2: sitofpv2i64v2double
+ ; SSE2: cost of 20 {{.*}} sitofp
+ %1 = sitofp <2 x i64> %a to <2 x double>
+ ret <2 x double> %1
+}
+
+define <4 x double> @sitofpv4i64v4double(<4 x i64> %a) {
+ ; SSE2: sitofpv4i64v4double
+ ; SSE2: cost of 40 {{.*}} sitofp
+ %1 = sitofp <4 x i64> %a to <4 x double>
+ ret <4 x double> %1
+}
+
+define <8 x double> @sitofpv8i64v8double(<8 x i64> %a) {
+ %1 = sitofp <8 x i64> %a to <8 x double>
+ ; SSE2: sitofpv8i64v8double
+ ; SSE2: cost of 80 {{.*}} sitofp
+ ret <8 x double> %1
+}
+
+define <16 x double> @sitofpv16i64v16double(<16 x i64> %a) {
+ ; SSE2: sitofpv16i64v16double
+ ; SSE2: cost of 160 {{.*}} sitofp
+ %1 = sitofp <16 x i64> %a to <16 x double>
+ ret <16 x double> %1
+}
+
+define <32 x double> @sitofpv32i64v32double(<32 x i64> %a) {
+ ; SSE2: sitofpv32i64v32double
+ ; SSE2: cost of 320 {{.*}} sitofp
+ %1 = sitofp <32 x i64> %a to <32 x double>
+ ret <32 x double> %1
+}
+
+define <2 x float> @sitofpv2i8v2float(<2 x i8> %a) {
+ ; SSE2: sitofpv2i8v2float
+ ; SSE2: cost of 15 {{.*}} sitofp
+ %1 = sitofp <2 x i8> %a to <2 x float>
+ ret <2 x float> %1
+}
+
+define <4 x float> @sitofpv4i8v4float(<4 x i8> %a) {
+ ; SSE2: sitofpv4i8v4float
+ ; SSE2: cost of 15 {{.*}} sitofp
+ %1 = sitofp <4 x i8> %a to <4 x float>
+ ret <4 x float> %1
+}
+
+define <8 x float> @sitofpv8i8v8float(<8 x i8> %a) {
+ ; SSE2: sitofpv8i8v8float
+ ; SSE2: cost of 15 {{.*}} sitofp
+ %1 = sitofp <8 x i8> %a to <8 x float>
+ ret <8 x float> %1
+}
+
+define <16 x float> @sitofpv16i8v16float(<16 x i8> %a) {
+ ; SSE2: sitofpv16i8v16float
+ ; SSE2: cost of 8 {{.*}} sitofp
+ %1 = sitofp <16 x i8> %a to <16 x float>
+ ret <16 x float> %1
+}
+
+define <32 x float> @sitofpv32i8v32float(<32 x i8> %a) {
+ ; SSE2: sitofpv32i8v32float
+ ; SSE2: cost of 16 {{.*}} sitofp
+ %1 = sitofp <32 x i8> %a to <32 x float>
+ ret <32 x float> %1
+}
+
+define <2 x float> @sitofpv2i16v2float(<2 x i16> %a) {
+ ; SSE2: sitofpv2i16v2float
+ ; SSE2: cost of 15 {{.*}} sitofp
+ %1 = sitofp <2 x i16> %a to <2 x float>
+ ret <2 x float> %1
+}
+
+define <4 x float> @sitofpv4i16v4float(<4 x i16> %a) {
+ ; SSE2: sitofpv4i16v4float
+ ; SSE2: cost of 15 {{.*}} sitofp
+ %1 = sitofp <4 x i16> %a to <4 x float>
+ ret <4 x float> %1
+}
+
+define <8 x float> @sitofpv8i16v8float(<8 x i16> %a) {
+ ; SSE2: sitofpv8i16v8float
+ ; SSE2: cost of 15 {{.*}} sitofp
+ %1 = sitofp <8 x i16> %a to <8 x float>
+ ret <8 x float> %1
+}
+
+define <16 x float> @sitofpv16i16v16float(<16 x i16> %a) {
+ ; SSE2: sitofpv16i16v16float
+ ; SSE2: cost of 30 {{.*}} sitofp
+ %1 = sitofp <16 x i16> %a to <16 x float>
+ ret <16 x float> %1
+}
+
+define <32 x float> @sitofpv32i16v32float(<32 x i16> %a) {
+ ; SSE2: sitofpv32i16v32float
+ ; SSE2: cost of 60 {{.*}} sitofp
+ %1 = sitofp <32 x i16> %a to <32 x float>
+ ret <32 x float> %1
+}
+
+define <2 x float> @sitofpv2i32v2float(<2 x i32> %a) {
+ ; SSE2: sitofpv2i32v2float
+ ; SSE2: cost of 15 {{.*}} sitofp
+ %1 = sitofp <2 x i32> %a to <2 x float>
+ ret <2 x float> %1
+}
+
+define <4 x float> @sitofpv4i32v4float(<4 x i32> %a) {
+ ; SSE2: sitofpv4i32v4float
+ ; SSE2: cost of 15 {{.*}} sitofp
+ %1 = sitofp <4 x i32> %a to <4 x float>
+ ret <4 x float> %1
+}
+
+define <8 x float> @sitofpv8i32v8float(<8 x i32> %a) {
+ ; SSE2: sitofpv8i32v8float
+ ; SSE2: cost of 30 {{.*}} sitofp
+ %1 = sitofp <8 x i32> %a to <8 x float>
+ ret <8 x float> %1
+}
+
+define <16 x float> @sitofpv16i32v16float(<16 x i32> %a) {
+ ; SSE2: sitofpv16i32v16float
+ ; SSE2: cost of 60 {{.*}} sitofp
+ %1 = sitofp <16 x i32> %a to <16 x float>
+ ret <16 x float> %1
+}
+
+define <32 x float> @sitofpv32i32v32float(<32 x i32> %a) {
+ ; SSE2: sitofpv32i32v32float
+ ; SSE2: cost of 120 {{.*}} sitofp
+ %1 = sitofp <32 x i32> %a to <32 x float>
+ ret <32 x float> %1
+}
+
+define <2 x float> @sitofpv2i64v2float(<2 x i64> %a) {
+ ; SSE2: sitofpv2i64v2float
+ ; SSE2: cost of 15 {{.*}} sitofp
+ %1 = sitofp <2 x i64> %a to <2 x float>
+ ret <2 x float> %1
+}
+
+define <4 x float> @sitofpv4i64v4float(<4 x i64> %a) {
+ ; SSE2: sitofpv4i64v4float
+ ; SSE2: cost of 30 {{.*}} sitofp
+ %1 = sitofp <4 x i64> %a to <4 x float>
+ ret <4 x float> %1
+}
+
+define <8 x float> @sitofpv8i64v8float(<8 x i64> %a) {
+ ; SSE2: sitofpv8i64v8float
+ ; SSE2: cost of 60 {{.*}} sitofp
+ %1 = sitofp <8 x i64> %a to <8 x float>
+ ret <8 x float> %1
+}
+
+define <16 x float> @sitofpv16i64v16float(<16 x i64> %a) {
+ ; SSE2: sitofpv16i64v16float
+ ; SSE2: cost of 120 {{.*}} sitofp
+ %1 = sitofp <16 x i64> %a to <16 x float>
+ ret <16 x float> %1
+}
+
+define <32 x float> @sitofpv32i64v32float(<32 x i64> %a) {
+ ; SSE2: sitofpv32i64v32float
+ ; SSE2: cost of 240 {{.*}} sitofp
+ %1 = sitofp <32 x i64> %a to <32 x float>
+ ret <32 x float> %1
+}
diff --git a/llvm/test/Analysis/CostModel/X86/uitofp.ll b/llvm/test/Analysis/CostModel/X86/uitofp.ll
new file mode 100644
index 00000000000..aed3f2a67bf
--- /dev/null
+++ b/llvm/test/Analysis/CostModel/X86/uitofp.ll
@@ -0,0 +1,362 @@
+; RUN: llc -mtriple=x86_64-apple-darwin -mcpu=core2 < %s | FileCheck --check-prefix=SSE2-CODEGEN %s
+; RUN: opt -mtriple=x86_64-apple-darwin -mcpu=core2 -cost-model -analyze < %s | FileCheck --check-prefix=SSE2 %s
+
+define <2 x double> @uitofpv2i8v2double(<2 x i8> %a) {
+ ; SSE2: uitofpv2i8v2double
+ ; SSE2: cost of 20 {{.*}} uitofp
+ ; SSE2-CODEGEN: uitofpv2i8v2double
+ ; SSE2-CODEGEN: movapd LCPI
+ ; SSE2-CODEGEN: subpd
+ ; SSE2-CODEGEN: addpd
+ %1 = uitofp <2 x i8> %a to <2 x double>
+ ret <2 x double> %1
+}
+
+define <4 x double> @uitofpv4i8v4double(<4 x i8> %a) {
+ ; SSE2: uitofpv4i8v4double
+ ; SSE2: cost of 40 {{.*}} uitofp
+ ; SSE2-CODEGEN: uitofpv4i8v4double
+ ; SSE2-CODEGEN: movapd LCPI
+ ; SSE2-CODEGEN: subpd
+ ; SSE2-CODEGEN: addpd
+ %1 = uitofp <4 x i8> %a to <4 x double>
+ ret <4 x double> %1
+}
+
+define <8 x double> @uitofpv8i8v8double(<8 x i8> %a) {
+ ; SSE2: uitofpv8i8v8double
+ ; SSE2: cost of 80 {{.*}} uitofp
+ ; SSE2-CODEGEN: uitofpv8i8v8double
+ ; SSE2-CODEGEN: movapd LCPI
+ ; SSE2-CODEGEN: subpd
+ ; SSE2-CODEGEN: addpd
+%1 = uitofp <8 x i8> %a to <8 x double>
+ ret <8 x double> %1
+}
+
+define <16 x double> @uitofpv16i8v16double(<16 x i8> %a) {
+ ; SSE2: uitofpv16i8v16double
+ ; SSE2: cost of 160 {{.*}} uitofp
+ ; SSE2-CODEGEN: uitofpv16i8v16double
+ ; SSE2-CODEGEN: movapd LCPI
+ ; SSE2-CODEGEN: subpd
+ ; SSE2-CODEGEN: addpd
+ %1 = uitofp <16 x i8> %a to <16 x double>
+ ret <16 x double> %1
+}
+
+define <32 x double> @uitofpv32i8v32double(<32 x i8> %a) {
+ ; SSE2: uitofpv32i8v32double
+ ; SSE2: cost of 320 {{.*}} uitofp
+ ; SSE2-CODEGEN: uitofpv32i8v32double
+ ; SSE2-CODEGEN: movapd LCPI
+ ; SSE2-CODEGEN: subpd
+ ; SSE2-CODEGEN: addpd
+ %1 = uitofp <32 x i8> %a to <32 x double>
+ ret <32 x double> %1
+}
+
+define <2 x double> @uitofpv2i16v2double(<2 x i16> %a) {
+ ; SSE2: uitofpv2i16v2double
+ ; SSE2: cost of 20 {{.*}} uitofp
+ ; SSE2-CODEGEN: uitofpv2i16v2double
+ ; SSE2-CODEGEN: movapd LCPI
+ ; SSE2-CODEGEN: subpd
+ ; SSE2-CODEGEN: addpd
+ %1 = uitofp <2 x i16> %a to <2 x double>
+ ret <2 x double> %1
+}
+
+define <4 x double> @uitofpv4i16v4double(<4 x i16> %a) {
+ ; SSE2: uitofpv4i16v4double
+ ; SSE2: cost of 40 {{.*}} uitofp
+ ; SSE2-CODEGEN: uitofpv4i16v4double
+ ; SSE2-CODEGEN: movapd LCPI
+ ; SSE2-CODEGEN: subpd
+ ; SSE2-CODEGEN: addpd
+ %1 = uitofp <4 x i16> %a to <4 x double>
+ ret <4 x double> %1
+}
+
+define <8 x double> @uitofpv8i16v8double(<8 x i16> %a) {
+ ; SSE2: uitofpv8i16v8double
+ ; SSE2: cost of 80 {{.*}} uitofp
+ ; SSE2-CODEGEN: uitofpv8i16v8double
+ ; SSE2-CODEGEN: movapd LCPI
+ ; SSE2-CODEGEN: subpd
+ ; SSE2-CODEGEN: addpd
+ %1 = uitofp <8 x i16> %a to <8 x double>
+ ret <8 x double> %1
+}
+
+define <16 x double> @uitofpv16i16v16double(<16 x i16> %a) {
+ ; SSE2: uitofpv16i16v16double
+ ; SSE2: cost of 160 {{.*}} uitofp
+ ; SSE2-CODEGEN: uitofpv16i16v16double
+ ; SSE2-CODEGEN: movapd LCPI
+ ; SSE2-CODEGEN: subpd
+ ; SSE2-CODEGEN: addpd
+ %1 = uitofp <16 x i16> %a to <16 x double>
+ ret <16 x double> %1
+}
+
+define <32 x double> @uitofpv32i16v32double(<32 x i16> %a) {
+ ; SSE2: uitofpv32i16v32double
+ ; SSE2: cost of 320 {{.*}} uitofp
+ ; SSE2-CODEGEN: uitofpv32i16v32double
+ ; SSE2-CODEGEN: movapd LCPI
+ ; SSE2-CODEGEN: subpd
+ ; SSE2-CODEGEN: addpd
+ %1 = uitofp <32 x i16> %a to <32 x double>
+ ret <32 x double> %1
+}
+
+define <2 x double> @uitofpv2i32v2double(<2 x i32> %a) {
+ ; SSE2: uitofpv2i32v2double
+ ; SSE2: cost of 20 {{.*}} uitofp
+ ; SSE2-CODEGEN: uitofpv2i32v2double
+ ; SSE2-CODEGEN: movapd LCPI
+ ; SSE2-CODEGEN: subpd
+ ; SSE2-CODEGEN: addpd
+ %1 = uitofp <2 x i32> %a to <2 x double>
+ ret <2 x double> %1
+}
+
+define <4 x double> @uitofpv4i32v4double(<4 x i32> %a) {
+ ; SSE2: uitofpv4i32v4double
+ ; SSE2: cost of 40 {{.*}} uitofp
+ ; SSE2-CODEGEN: uitofpv4i32v4double
+ ; SSE2-CODEGEN: movapd LCPI
+ ; SSE2-CODEGEN: subpd
+ ; SSE2-CODEGEN: addpd
+ %1 = uitofp <4 x i32> %a to <4 x double>
+ ret <4 x double> %1
+}
+
+define <8 x double> @uitofpv8i32v8double(<8 x i32> %a) {
+ ; SSE2: uitofpv8i32v8double
+ ; SSE2: cost of 80 {{.*}} uitofp
+ ; SSE2-CODEGEN: uitofpv8i32v8double
+ ; SSE2-CODEGEN: movapd LCPI
+ ; SSE2-CODEGEN: subpd
+ ; SSE2-CODEGEN: addpd
+ %1 = uitofp <8 x i32> %a to <8 x double>
+ ret <8 x double> %1
+}
+
+define <16 x double> @uitofpv16i32v16double(<16 x i32> %a) {
+ ; SSE2: uitofpv16i32v16double
+ ; SSE2: cost of 160 {{.*}} uitofp
+ ; SSE2-CODEGEN: uitofpv16i32v16double
+ ; SSE2-CODEGEN: movapd LCPI
+ ; SSE2-CODEGEN: subpd
+ ; SSE2-CODEGEN: addpd
+ %1 = uitofp <16 x i32> %a to <16 x double>
+ ret <16 x double> %1
+}
+
+define <32 x double> @uitofpv32i32v32double(<32 x i32> %a) {
+ ; SSE2: uitofpv32i32v32double
+ ; SSE2: cost of 320 {{.*}} uitofp
+ ; SSE2-CODEGEN: uitofpv32i32v32double
+ ; SSE2-CODEGEN: movapd LCPI
+ ; SSE2-CODEGEN: subpd
+ ; SSE2-CODEGEN: addpd
+ %1 = uitofp <32 x i32> %a to <32 x double>
+ ret <32 x double> %1
+}
+
+define <2 x double> @uitofpv2i64v2double(<2 x i64> %a) {
+ ; SSE2: uitofpv2i64v2double
+ ; SSE2: cost of 20 {{.*}} uitofp
+ ; SSE2-CODEGEN: uitofpv2i64v2double
+ ; SSE2-CODEGEN: movapd LCPI
+ ; SSE2-CODEGEN: subpd
+ ; SSE2-CODEGEN: addpd
+ %1 = uitofp <2 x i64> %a to <2 x double>
+ ret <2 x double> %1
+}
+
+define <4 x double> @uitofpv4i64v4double(<4 x i64> %a) {
+ ; SSE2: uitofpv4i64v4double
+ ; SSE2: cost of 40 {{.*}} uitofp
+ ; SSE2-CODEGEN: uitofpv4i64v4double
+ ; SSE2-CODEGEN: movapd LCPI
+ ; SSE2-CODEGEN: subpd
+ ; SSE2-CODEGEN: addpd
+ %1 = uitofp <4 x i64> %a to <4 x double>
+ ret <4 x double> %1
+}
+
+define <8 x double> @uitofpv8i64v8double(<8 x i64> %a) {
+ %1 = uitofp <8 x i64> %a to <8 x double>
+ ; SSE2: uitofpv8i64v8double
+ ; SSE2: cost of 80 {{.*}} uitofp
+ ; SSE2-CODEGEN: uitofpv8i64v8double
+ ; SSE2-CODEGEN: movapd LCPI
+ ; SSE2-CODEGEN: subpd
+ ; SSE2-CODEGEN: addpd
+ ret <8 x double> %1
+}
+
+define <16 x double> @uitofpv16i64v16double(<16 x i64> %a) {
+ ; SSE2: uitofpv16i64v16double
+ ; SSE2: cost of 160 {{.*}} uitofp
+ ; SSE2-CODEGEN: uitofpv16i64v16double
+ ; SSE2-CODEGEN: movapd LCPI
+ ; SSE2-CODEGEN: subpd
+ ; SSE2-CODEGEN: addpd
+ %1 = uitofp <16 x i64> %a to <16 x double>
+ ret <16 x double> %1
+}
+
+define <32 x double> @uitofpv32i64v32double(<32 x i64> %a) {
+ ; SSE2: uitofpv32i64v32double
+ ; SSE2: cost of 320 {{.*}} uitofp
+ ; SSE2-CODEGEN: uitofpv32i64v32double
+ ; SSE2-CODEGEN: movapd LCPI
+ ; SSE2-CODEGEN: subpd
+ ; SSE2-CODEGEN: addpd
+ %1 = uitofp <32 x i64> %a to <32 x double>
+ ret <32 x double> %1
+}
+
+define <2 x float> @uitofpv2i8v2float(<2 x i8> %a) {
+ ; SSE2: uitofpv2i8v2float
+ ; SSE2: cost of 15 {{.*}} uitofp
+ %1 = uitofp <2 x i8> %a to <2 x float>
+ ret <2 x float> %1
+}
+
+define <4 x float> @uitofpv4i8v4float(<4 x i8> %a) {
+ ; SSE2: uitofpv4i8v4float
+ ; SSE2: cost of 15 {{.*}} uitofp
+ %1 = uitofp <4 x i8> %a to <4 x float>
+ ret <4 x float> %1
+}
+
+define <8 x float> @uitofpv8i8v8float(<8 x i8> %a) {
+ ; SSE2: uitofpv8i8v8float
+ ; SSE2: cost of 15 {{.*}} uitofp
+ %1 = uitofp <8 x i8> %a to <8 x float>
+ ret <8 x float> %1
+}
+
+define <16 x float> @uitofpv16i8v16float(<16 x i8> %a) {
+ ; SSE2: uitofpv16i8v16float
+ ; SSE2: cost of 8 {{.*}} uitofp
+ %1 = uitofp <16 x i8> %a to <16 x float>
+ ret <16 x float> %1
+}
+
+define <32 x float> @uitofpv32i8v32float(<32 x i8> %a) {
+ ; SSE2: uitofpv32i8v32float
+ ; SSE2: cost of 16 {{.*}} uitofp
+ %1 = uitofp <32 x i8> %a to <32 x float>
+ ret <32 x float> %1
+}
+
+define <2 x float> @uitofpv2i16v2float(<2 x i16> %a) {
+ ; SSE2: uitofpv2i16v2float
+ ; SSE2: cost of 15 {{.*}} uitofp
+ %1 = uitofp <2 x i16> %a to <2 x float>
+ ret <2 x float> %1
+}
+
+define <4 x float> @uitofpv4i16v4float(<4 x i16> %a) {
+ ; SSE2: uitofpv4i16v4float
+ ; SSE2: cost of 15 {{.*}} uitofp
+ %1 = uitofp <4 x i16> %a to <4 x float>
+ ret <4 x float> %1
+}
+
+define <8 x float> @uitofpv8i16v8float(<8 x i16> %a) {
+ ; SSE2: uitofpv8i16v8float
+ ; SSE2: cost of 15 {{.*}} uitofp
+ %1 = uitofp <8 x i16> %a to <8 x float>
+ ret <8 x float> %1
+}
+
+define <16 x float> @uitofpv16i16v16float(<16 x i16> %a) {
+ ; SSE2: uitofpv16i16v16float
+ ; SSE2: cost of 30 {{.*}} uitofp
+ %1 = uitofp <16 x i16> %a to <16 x float>
+ ret <16 x float> %1
+}
+
+define <32 x float> @uitofpv32i16v32float(<32 x i16> %a) {
+ ; SSE2: uitofpv32i16v32float
+ ; SSE2: cost of 60 {{.*}} uitofp
+ %1 = uitofp <32 x i16> %a to <32 x float>
+ ret <32 x float> %1
+}
+
+define <2 x float> @uitofpv2i32v2float(<2 x i32> %a) {
+ ; SSE2: uitofpv2i32v2float
+ ; SSE2: cost of 15 {{.*}} uitofp
+ %1 = uitofp <2 x i32> %a to <2 x float>
+ ret <2 x float> %1
+}
+
+define <4 x float> @uitofpv4i32v4float(<4 x i32> %a) {
+ ; SSE2: uitofpv4i32v4float
+ ; SSE2: cost of 15 {{.*}} uitofp
+ %1 = uitofp <4 x i32> %a to <4 x float>
+ ret <4 x float> %1
+}
+
+define <8 x float> @uitofpv8i32v8float(<8 x i32> %a) {
+ ; SSE2: uitofpv8i32v8float
+ ; SSE2: cost of 30 {{.*}} uitofp
+ %1 = uitofp <8 x i32> %a to <8 x float>
+ ret <8 x float> %1
+}
+
+define <16 x float> @uitofpv16i32v16float(<16 x i32> %a) {
+ ; SSE2: uitofpv16i32v16float
+ ; SSE2: cost of 60 {{.*}} uitofp
+ %1 = uitofp <16 x i32> %a to <16 x float>
+ ret <16 x float> %1
+}
+
+define <32 x float> @uitofpv32i32v32float(<32 x i32> %a) {
+ ; SSE2: uitofpv32i32v32float
+ ; SSE2: cost of 120 {{.*}} uitofp
+ %1 = uitofp <32 x i32> %a to <32 x float>
+ ret <32 x float> %1
+}
+
+define <2 x float> @uitofpv2i64v2float(<2 x i64> %a) {
+ ; SSE2: uitofpv2i64v2float
+ ; SSE2: cost of 15 {{.*}} uitofp
+ %1 = uitofp <2 x i64> %a to <2 x float>
+ ret <2 x float> %1
+}
+
+define <4 x float> @uitofpv4i64v4float(<4 x i64> %a) {
+ ; SSE2: uitofpv4i64v4float
+ ; SSE2: cost of 30 {{.*}} uitofp
+ %1 = uitofp <4 x i64> %a to <4 x float>
+ ret <4 x float> %1
+}
+
+define <8 x float> @uitofpv8i64v8float(<8 x i64> %a) {
+ ; SSE2: uitofpv8i64v8float
+ ; SSE2: cost of 60 {{.*}} uitofp
+ %1 = uitofp <8 x i64> %a to <8 x float>
+ ret <8 x float> %1
+}
+
+define <16 x float> @uitofpv16i64v16float(<16 x i64> %a) {
+ ; SSE2: uitofpv16i64v16float
+ ; SSE2: cost of 120 {{.*}} uitofp
+ %1 = uitofp <16 x i64> %a to <16 x float>
+ ret <16 x float> %1
+}
+
+define <32 x float> @uitofpv32i64v32float(<32 x i64> %a) {
+ ; SSE2: uitofpv32i64v32float
+ ; SSE2: cost of 240 {{.*}} uitofp
+ %1 = uitofp <32 x i64> %a to <32 x float>
+ ret <32 x float> %1
+}
OpenPOWER on IntegriCloud