summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@intel.com>2019-06-27 05:51:56 +0000
committerCraig Topper <craig.topper@intel.com>2019-06-27 05:51:56 +0000
commit9ea5a322518ff844c03735eed8c7627567ef98e6 (patch)
tree108ff041830ac203652412c8900f51bbe86dce98
parent9f69052394a43b278ed1220c59fd5e36290f5d15 (diff)
downloadbcm5719-llvm-9ea5a322518ff844c03735eed8c7627567ef98e6.tar.gz
bcm5719-llvm-9ea5a322518ff844c03735eed8c7627567ef98e6.zip
[X86] Teach selectScalarSSELoad to not narrow volatile loads.
llvm-svn: 364498
-rw-r--r--llvm/lib/Target/X86/X86ISelDAGToDAG.cpp12
-rw-r--r--llvm/test/CodeGen/X86/fold-load-unops.ll34
2 files changed, 41 insertions, 5 deletions
diff --git a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
index 9b7b1cfab76..544a3fa922d 100644
--- a/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
+++ b/llvm/lib/Target/X86/X86ISelDAGToDAG.cpp
@@ -2283,12 +2283,14 @@ bool X86DAGToDAGISel::selectScalarSSELoad(SDNode *Root, SDNode *Parent,
if (!hasSingleUsesFromRoot(Root, Parent))
return false;
- // We can allow a full vector load here since narrowing a load is ok.
+ // We can allow a full vector load here since narrowing a load is ok unless
+ // it's volatile.
if (ISD::isNON_EXTLoad(N.getNode())) {
- PatternNodeWithChain = N;
- if (IsProfitableToFold(PatternNodeWithChain, N.getNode(), Root) &&
- IsLegalToFold(PatternNodeWithChain, Parent, Root, OptLevel)) {
- LoadSDNode *LD = cast<LoadSDNode>(PatternNodeWithChain);
+ LoadSDNode *LD = cast<LoadSDNode>(N);
+ if (!LD->isVolatile() &&
+ IsProfitableToFold(N, LD, Root) &&
+ IsLegalToFold(N, Parent, Root, OptLevel)) {
+ PatternNodeWithChain = N;
return selectAddr(LD, LD->getBasePtr(), Base, Scale, Index, Disp,
Segment);
}
diff --git a/llvm/test/CodeGen/X86/fold-load-unops.ll b/llvm/test/CodeGen/X86/fold-load-unops.ll
index c77c6adf2e8..aa6bc720fa9 100644
--- a/llvm/test/CodeGen/X86/fold-load-unops.ll
+++ b/llvm/test/CodeGen/X86/fold-load-unops.ll
@@ -179,6 +179,23 @@ define <4 x float> @sqrtss_full_size(<4 x float>* %a) optsize{
ret <4 x float> %res
}
+define <4 x float> @sqrtss_full_size_volatile(<4 x float>* %a) optsize{
+; SSE-LABEL: sqrtss_full_size_volatile:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps (%rdi), %xmm0
+; SSE-NEXT: sqrtss %xmm0, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: sqrtss_full_size_volatile:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovaps (%rdi), %xmm0
+; AVX-NEXT: vsqrtss %xmm0, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %ld = load volatile <4 x float>, <4 x float>* %a
+ %res = tail call <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float> %ld)
+ ret <4 x float> %res
+}
+
define double @sqrtsd_size(double* %a) optsize {
; SSE-LABEL: sqrtsd_size:
; SSE: # %bb.0:
@@ -213,6 +230,23 @@ define <2 x double> @sqrtsd_full_size(<2 x double>* %a) optsize {
ret <2 x double> %res
}
+define <2 x double> @sqrtsd_full_size_volatile(<2 x double>* %a) optsize {
+; SSE-LABEL: sqrtsd_full_size_volatile:
+; SSE: # %bb.0:
+; SSE-NEXT: movapd (%rdi), %xmm0
+; SSE-NEXT: sqrtsd %xmm0, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: sqrtsd_full_size_volatile:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovapd (%rdi), %xmm0
+; AVX-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %ld = load volatile <2 x double>, <2 x double>* %a
+ %res = tail call <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double> %ld)
+ ret <2 x double> %res
+}
+
declare <4 x float> @llvm.x86.sse.rcp.ss(<4 x float>) nounwind readnone
declare <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float>) nounwind readnone
declare <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float>) nounwind readnone
OpenPOWER on IntegriCloud