summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86/fold-load-unops.ll
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@intel.com>2019-06-27 05:51:56 +0000
committerCraig Topper <craig.topper@intel.com>2019-06-27 05:51:56 +0000
commit9ea5a322518ff844c03735eed8c7627567ef98e6 (patch)
tree108ff041830ac203652412c8900f51bbe86dce98 /llvm/test/CodeGen/X86/fold-load-unops.ll
parent9f69052394a43b278ed1220c59fd5e36290f5d15 (diff)
downloadbcm5719-llvm-9ea5a322518ff844c03735eed8c7627567ef98e6.tar.gz
bcm5719-llvm-9ea5a322518ff844c03735eed8c7627567ef98e6.zip
[X86] Teach selectScalarSSELoad to not narrow volatile loads.
llvm-svn: 364498
Diffstat (limited to 'llvm/test/CodeGen/X86/fold-load-unops.ll')
-rw-r--r--llvm/test/CodeGen/X86/fold-load-unops.ll34
1 files changed, 34 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/fold-load-unops.ll b/llvm/test/CodeGen/X86/fold-load-unops.ll
index c77c6adf2e8..aa6bc720fa9 100644
--- a/llvm/test/CodeGen/X86/fold-load-unops.ll
+++ b/llvm/test/CodeGen/X86/fold-load-unops.ll
@@ -179,6 +179,23 @@ define <4 x float> @sqrtss_full_size(<4 x float>* %a) optsize{
ret <4 x float> %res
}
+define <4 x float> @sqrtss_full_size_volatile(<4 x float>* %a) optsize{
+; SSE-LABEL: sqrtss_full_size_volatile:
+; SSE: # %bb.0:
+; SSE-NEXT: movaps (%rdi), %xmm0
+; SSE-NEXT: sqrtss %xmm0, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: sqrtss_full_size_volatile:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovaps (%rdi), %xmm0
+; AVX-NEXT: vsqrtss %xmm0, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %ld = load volatile <4 x float>, <4 x float>* %a
+ %res = tail call <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float> %ld)
+ ret <4 x float> %res
+}
+
define double @sqrtsd_size(double* %a) optsize {
; SSE-LABEL: sqrtsd_size:
; SSE: # %bb.0:
@@ -213,6 +230,23 @@ define <2 x double> @sqrtsd_full_size(<2 x double>* %a) optsize {
ret <2 x double> %res
}
+define <2 x double> @sqrtsd_full_size_volatile(<2 x double>* %a) optsize {
+; SSE-LABEL: sqrtsd_full_size_volatile:
+; SSE: # %bb.0:
+; SSE-NEXT: movapd (%rdi), %xmm0
+; SSE-NEXT: sqrtsd %xmm0, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: sqrtsd_full_size_volatile:
+; AVX: # %bb.0:
+; AVX-NEXT: vmovapd (%rdi), %xmm0
+; AVX-NEXT: vsqrtsd %xmm0, %xmm0, %xmm0
+; AVX-NEXT: retq
+ %ld = load volatile <2 x double>, <2 x double>* %a
+ %res = tail call <2 x double> @llvm.x86.sse2.sqrt.sd(<2 x double> %ld)
+ ret <2 x double> %res
+}
+
declare <4 x float> @llvm.x86.sse.rcp.ss(<4 x float>) nounwind readnone
declare <4 x float> @llvm.x86.sse.rsqrt.ss(<4 x float>) nounwind readnone
declare <4 x float> @llvm.x86.sse.sqrt.ss(<4 x float>) nounwind readnone
OpenPOWER on IntegriCloud