diff options
author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2014-12-16 22:30:10 +0000 |
---|---|---|
committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2014-12-16 22:30:10 +0000 |
commit | bf1e0790054b49ad67b78468a8e201bb511982bb (patch) | |
tree | 331af2463983fa562653c9a445b908286799b3e7 | |
parent | 9573a9cf9d1728975ac8a0791bd75f32f8cae4e0 (diff) | |
download | bcm5719-llvm-bf1e0790054b49ad67b78468a8e201bb511982bb.tar.gz bcm5719-llvm-bf1e0790054b49ad67b78468a8e201bb511982bb.zip |
[X86][SSE] Vector double -> float conversion memory folding (cvtpd2ps)
Added a missing memory folding relationship for the (V)CVTPD2PS instruction - we can safely fold these for stack reloads.
Differential Revision: http://reviews.llvm.org/D6663
llvm-svn: 224383
-rw-r--r-- | llvm/lib/Target/X86/X86InstrInfo.cpp | 3 | ||||
-rw-r--r-- | llvm/test/CodeGen/X86/avx1-stack-reload-folding.ll | 15 |
2 files changed, 18 insertions, 0 deletions
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp index b31d086f867..d745ba68bb2 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -450,6 +450,7 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI) { X86::CVTSS2SIrr, X86::CVTSS2SIrm, 0 }, { X86::CVTDQ2PSrr, X86::CVTDQ2PSrm, TB_ALIGN_16 }, { X86::CVTPD2DQrr, X86::CVTPD2DQrm, TB_ALIGN_16 }, + { X86::CVTPD2PSrr, X86::CVTPD2PSrm, TB_ALIGN_16 }, { X86::CVTPS2DQrr, X86::CVTPS2DQrm, TB_ALIGN_16 }, { X86::CVTTPD2DQrr, X86::CVTTPD2DQrm, TB_ALIGN_16 }, { X86::CVTTPS2DQrr, X86::CVTTPS2DQrm, TB_ALIGN_16 }, @@ -531,6 +532,7 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI) { X86::VCVTSS2SIrr, X86::VCVTSS2SIrm, 0 }, { X86::VCVTDQ2PSrr, X86::VCVTDQ2PSrm, 0 }, { X86::VCVTPD2DQrr, X86::VCVTPD2DQXrm, 0 }, + { X86::VCVTPD2PSrr, X86::VCVTPD2PSXrm, 0 }, { X86::VCVTPS2DQrr, X86::VCVTPS2DQrm, 0 }, { X86::VCVTTPD2DQrr, X86::VCVTTPD2DQXrm, 0 }, { X86::VCVTTPS2DQrr, X86::VCVTTPS2DQrm, 0 }, @@ -569,6 +571,7 @@ X86InstrInfo::X86InstrInfo(X86Subtarget &STI) // AVX 256-bit foldable instructions { X86::VCVTDQ2PSYrr, X86::VCVTDQ2PSYrm, 0 }, { X86::VCVTPD2DQYrr, X86::VCVTPD2DQYrm, 0 }, + { X86::VCVTPD2PSYrr, X86::VCVTPD2PSYrm, 0 }, { X86::VCVTPS2DQYrr, X86::VCVTPS2DQYrm, 0 }, { X86::VCVTTPD2DQYrr, X86::VCVTTPD2DQYrm, 0 }, { X86::VCVTTPS2DQYrr, X86::VCVTTPS2DQYrm, 0 }, diff --git a/llvm/test/CodeGen/X86/avx1-stack-reload-folding.ll b/llvm/test/CodeGen/X86/avx1-stack-reload-folding.ll index 2e669b0fe12..480e3dd6306 100644 --- a/llvm/test/CodeGen/X86/avx1-stack-reload-folding.ll +++ b/llvm/test/CodeGen/X86/avx1-stack-reload-folding.ll @@ -37,6 +37,21 @@ define void @stack_fold_cvtdq2ps(<128 x i32>* %a, <128 x i32>* %b, <128 x float> ret void
}
+define void @stack_fold_cvtpd2ps(<128 x double>* %a, <128 x double>* %b, <128 x float>* %c) {
+ ;CHECK-LABEL: stack_fold_cvtpd2ps
+ ;CHECK: vcvtpd2psy {{[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
+
+ %1 = load <128 x double>* %a
+ %2 = load <128 x double>* %b
+ %3 = fadd <128 x double> %1, %2
+ %4 = fsub <128 x double> %1, %2
+ %5 = fptrunc <128 x double> %3 to <128 x float>
+ %6 = fptrunc <128 x double> %4 to <128 x float>
+ %7 = fadd <128 x float> %5, %6
+ store <128 x float> %7, <128 x float>* %c
+ ret void
+}
+
define void @stack_fold_cvttpd2dq(<64 x double>* %a, <64 x double>* %b, <64 x i32>* %c) #0 {
;CHECK-LABEL: stack_fold_cvttpd2dq
;CHECK: vcvttpd2dqy {{[0-9]*}}(%rsp), {{%xmm[0-9][0-9]*}} {{.*#+}} 32-byte Folded Reload
|