summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--llvm/lib/Target/X86/X86.td3
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp13
-rw-r--r--llvm/lib/Target/X86/X86Subtarget.cpp4
-rw-r--r--llvm/lib/Target/X86/X86Subtarget.h5
-rw-r--r--llvm/test/CodeGen/X86/slow-pmulld.ll71
5 files changed, 93 insertions, 3 deletions
diff --git a/llvm/lib/Target/X86/X86.td b/llvm/lib/Target/X86/X86.td
index e8fbe82902a..dc18a59a30b 100644
--- a/llvm/lib/Target/X86/X86.td
+++ b/llvm/lib/Target/X86/X86.td
@@ -99,6 +99,8 @@ def FeatureSlowBTMem : SubtargetFeature<"slow-bt-mem", "IsBTMemSlow", "true",
"Bit testing of memory is slow">;
def FeatureSlowSHLD : SubtargetFeature<"slow-shld", "IsSHLDSlow", "true",
"SHLD instruction is slow">;
+def FeatureSlowPMULLD : SubtargetFeature<"slow-pmulld", "IsPMULLDSlow", "true",
+ "PMULLD instruction is slow">;
// FIXME: This should not apply to CPUs that do not have SSE.
def FeatureSlowUAMem16 : SubtargetFeature<"slow-unaligned-mem-16",
"IsUAMem16Slow", "true",
@@ -403,6 +405,7 @@ class SilvermontProc<string Name> : ProcessorModel<Name, SLMModel, [
FeatureSlowLEA,
FeatureSlowIncDec,
FeatureSlowBTMem,
+ FeatureSlowPMULLD,
FeatureLAHFSAHF
]>;
def : SilvermontProc<"silvermont">;
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index bac2c8e8145..eff9a7a92c0 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -29302,10 +29302,17 @@ static bool canReduceVMulWidth(SDNode *N, SelectionDAG &DAG, ShrinkMode &Mode) {
/// generate pmullw+pmulhuw for it (MULU16 mode).
static SDValue reduceVMULWidth(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
- // pmulld is supported since SSE41. It is better to use pmulld
- // instead of pmullw+pmulhw.
+ // Check for legality
// pmullw/pmulhw are not supported by SSE.
- if (Subtarget.hasSSE41() || !Subtarget.hasSSE2())
+ if (!Subtarget.hasSSE2())
+ return SDValue();
+
+ // Check for profitability
+ // pmulld is supported since SSE41. It is better to use pmulld
+ // instead of pmullw+pmulhw, except for subtargets where pmulld is slower than
+ // the expansion.
+ bool OptForMinSize = DAG.getMachineFunction().getFunction()->optForMinSize();
+ if (Subtarget.hasSSE41() && (OptForMinSize || !Subtarget.isPMULLDSlow()))
return SDValue();
ShrinkMode Mode;
diff --git a/llvm/lib/Target/X86/X86Subtarget.cpp b/llvm/lib/Target/X86/X86Subtarget.cpp
index ee37ea7df1c..f1c490ca6b9 100644
--- a/llvm/lib/Target/X86/X86Subtarget.cpp
+++ b/llvm/lib/Target/X86/X86Subtarget.cpp
@@ -228,6 +228,9 @@ void X86Subtarget::initSubtargetFeatures(StringRef CPU, StringRef FS) {
else if (isTargetDarwin() || isTargetLinux() || isTargetSolaris() ||
isTargetKFreeBSD() || In64BitMode)
stackAlignment = 16;
+
+ assert((!isPMULLDSlow() || hasSSE41()) &&
+ "Feature Slow PMULLD can only be set on a subtarget with SSE4.1");
}
void X86Subtarget::initializeEnvironment() {
@@ -275,6 +278,7 @@ void X86Subtarget::initializeEnvironment() {
HasMWAITX = false;
HasMPX = false;
IsBTMemSlow = false;
+ IsPMULLDSlow = false;
IsSHLDSlow = false;
IsUAMem16Slow = false;
IsUAMem32Slow = false;
diff --git a/llvm/lib/Target/X86/X86Subtarget.h b/llvm/lib/Target/X86/X86Subtarget.h
index 6940d1e7ce5..92c16214aa4 100644
--- a/llvm/lib/Target/X86/X86Subtarget.h
+++ b/llvm/lib/Target/X86/X86Subtarget.h
@@ -178,6 +178,10 @@ protected:
/// True if SHLD instructions are slow.
bool IsSHLDSlow;
+ /// True if the PMULLD instruction is slow compared to PMULLW/PMULHW and
+ // PMULUDQ.
+ bool IsPMULLDSlow;
+
/// True if unaligned memory accesses of 16-bytes are slow.
bool IsUAMem16Slow;
@@ -452,6 +456,7 @@ public:
bool hasMWAITX() const { return HasMWAITX; }
bool isBTMemSlow() const { return IsBTMemSlow; }
bool isSHLDSlow() const { return IsSHLDSlow; }
+ bool isPMULLDSlow() const { return IsPMULLDSlow; }
bool isUnalignedMem16Slow() const { return IsUAMem16Slow; }
bool isUnalignedMem32Slow() const { return IsUAMem32Slow; }
bool hasSSEUnalignedMem() const { return HasSSEUnalignedMem; }
diff --git a/llvm/test/CodeGen/X86/slow-pmulld.ll b/llvm/test/CodeGen/X86/slow-pmulld.ll
new file mode 100644
index 00000000000..ff6682090a2
--- /dev/null
+++ b/llvm/test/CodeGen/X86/slow-pmulld.ll
@@ -0,0 +1,71 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=i386-unknown-unknown -mcpu=silvermont | FileCheck %s --check-prefix=CHECK32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=silvermont | FileCheck %s --check-prefix=CHECK64
+; RUN: llc < %s -mtriple=i386-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE4-32
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE4-64
+
+define <4 x i32> @foo(<4 x i8> %A) {
+; CHECK32-LABEL: foo:
+; CHECK32: # BB#0:
+; CHECK32-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,xmm0[4],zero,xmm0[8],zero,xmm0[12],zero,xmm0[u,u,u,u,u,u,u,u]
+; CHECK32-NEXT: movdqa {{.*#+}} xmm1 = <18778,18778,18778,18778,u,u,u,u>
+; CHECK32-NEXT: movdqa %xmm0, %xmm2
+; CHECK32-NEXT: pmullw %xmm1, %xmm0
+; CHECK32-NEXT: pmulhw %xmm1, %xmm2
+; CHECK32-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; CHECK32-NEXT: retl
+;
+; CHECK64-LABEL: foo:
+; CHECK64: # BB#0:
+; CHECK64-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0],zero,xmm0[4],zero,xmm0[8],zero,xmm0[12],zero,xmm0[u,u,u,u,u,u,u,u]
+; CHECK64-NEXT: movdqa {{.*#+}} xmm1 = <18778,18778,18778,18778,u,u,u,u>
+; CHECK64-NEXT: movdqa %xmm0, %xmm2
+; CHECK64-NEXT: pmullw %xmm1, %xmm0
+; CHECK64-NEXT: pmulhw %xmm1, %xmm2
+; CHECK64-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; CHECK64-NEXT: retq
+;
+; SSE4-32-LABEL: foo:
+; SSE4-32: # BB#0:
+; SSE4-32-NEXT: pand {{\.LCPI.*}}, %xmm0
+; SSE4-32-NEXT: pmulld {{\.LCPI.*}}, %xmm0
+; SSE4-32-NEXT: retl
+;
+; SSE4-64-LABEL: foo:
+; SSE4-64: # BB#0:
+; SSE4-64-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE4-64-NEXT: pmulld {{.*}}(%rip), %xmm0
+; SSE4-64-NEXT: retq
+ %z = zext <4 x i8> %A to <4 x i32>
+ %m = mul nuw nsw <4 x i32> %z, <i32 18778, i32 18778, i32 18778, i32 18778>
+ ret <4 x i32> %m
+}
+
+define <4 x i32> @foo_os(<4 x i8> %A) minsize {
+; CHECK32-LABEL: foo_os:
+; CHECK32: # BB#0:
+; CHECK32-NEXT: pand {{\.LCPI.*}}, %xmm0
+; CHECK32-NEXT: pmulld {{\.LCPI.*}}, %xmm0
+; CHECK32-NEXT: retl
+;
+; CHECK64-LABEL: foo_os:
+; CHECK64: # BB#0:
+; CHECK64-NEXT: pand {{.*}}(%rip), %xmm0
+; CHECK64-NEXT: pmulld {{.*}}(%rip), %xmm0
+; CHECK64-NEXT: retq
+;
+; SSE4-32-LABEL: foo_os:
+; SSE4-32: # BB#0:
+; SSE4-32-NEXT: pand {{\.LCPI.*}}, %xmm0
+; SSE4-32-NEXT: pmulld {{\.LCPI.*}}, %xmm0
+; SSE4-32-NEXT: retl
+;
+; SSE4-64-LABEL: foo_os:
+; SSE4-64: # BB#0:
+; SSE4-64-NEXT: pand {{.*}}(%rip), %xmm0
+; SSE4-64-NEXT: pmulld {{.*}}(%rip), %xmm0
+; SSE4-64-NEXT: retq
+ %z = zext <4 x i8> %A to <4 x i32>
+ %m = mul nuw nsw <4 x i32> %z, <i32 18778, i32 18778, i32 18778, i32 18778>
+ ret <4 x i32> %m
+}
OpenPOWER on IntegriCloud