summaryrefslogtreecommitdiffstats
path: root/llvm/lib
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@gmail.com>2012-06-01 06:07:48 +0000
committerCraig Topper <craig.topper@gmail.com>2012-06-01 06:07:48 +0000
commit00649d511164b31ac976db90eb310c2776c1c060 (patch)
treebcb2c02a5f2e9d0e2daea4e1ebca9949c5cd1905 /llvm/lib
parent2e127b5274a3b2f6d51bbd89407fbd202d064349 (diff)
downloadbcm5719-llvm-00649d511164b31ac976db90eb310c2776c1c060.tar.gz
bcm5719-llvm-00649d511164b31ac976db90eb310c2776c1c060.zip
Remove fadd(fmul) patterns for FMA3. This needs to be implemented by paying attention to FP_CONTRACT and matching @llvm.fma which is not available yet. This will allow us to enablle intrinsic use at least though.
llvm-svn: 157804
Diffstat (limited to 'llvm/lib')
-rw-r--r--llvm/lib/Target/X86/X86InstrFMA.td212
1 files changed, 0 insertions, 212 deletions
diff --git a/llvm/lib/Target/X86/X86InstrFMA.td b/llvm/lib/Target/X86/X86InstrFMA.td
index 1b7f0949df7..3dd642f2cff 100644
--- a/llvm/lib/Target/X86/X86InstrFMA.td
+++ b/llvm/lib/Target/X86/X86InstrFMA.td
@@ -113,162 +113,6 @@ let ExeDomain = SSEPackedDouble in {
memopv4f64, int_x86_fma4_vfnmsub_pd, int_x86_fma4_vfnmsub_pd_256>, VEX_W;
}
-let Predicates = [HasFMA3], AddedComplexity = 20 in {
-//------------
-// FP double precision ADD - 256
-//------------
-
-// FMA231: src1 = src2*src3 + src1
-def : Pat<(v4f64 (fadd (fmul VR256:$src2, (memopv4f64 addr:$src3)), VR256:$src1)),
- (VFMADDPDr231mY VR256:$src1, VR256:$src2, addr:$src3)>;
-
-// FMA231: src1 = src2*src3 + src1
-def : Pat<(v4f64 (fadd (fmul VR256:$src2, VR256:$src3), VR256:$src1)),
- (VFMADDPDr231rY VR256:$src1, VR256:$src2, VR256:$src3)>;
-
-
-//------------
-// FP double precision ADD - 128
-//------------
-
-
-// FMA231: src1 = src2*src3 + src1
-def : Pat<(v2f64 (fadd (fmul VR128:$src2, (memopv2f64 addr:$src3)), VR128:$src1)),
- (VFMADDPDr231m VR128:$src1, VR128:$src2, addr:$src3)>;
-
-// FMA231: src1 = src2*src3 + src1
-def : Pat<(v2f64 (fadd (fmul VR128:$src2, VR128:$src3), VR128:$src1)),
- (VFMADDPDr231r VR128:$src1, VR128:$src2, VR128:$src3)>;
-
-//------------
-// FP double precision SUB - 256
-//------------
-// FMA231: src1 = src2*src3 - src1
-def : Pat<(v4f64 (fsub (fmul VR256:$src2, (memopv4f64 addr:$src3)), VR256:$src1)),
- (VFMSUBPDr231mY VR256:$src1, VR256:$src2, addr:$src3)>;
-
-// FMA231: src1 = src2*src3 - src1
-def : Pat<(v4f64 (fsub (fmul VR256:$src2, VR256:$src3), VR256:$src1)),
- (VFMSUBPDr231rY VR256:$src1, VR256:$src2, VR256:$src3)>;
-
-
-//------------
-// FP double precision SUB - 128
-//------------
-
-// FMA231: src1 = src2*src3 - src1
-def : Pat<(v2f64 (fsub (fmul VR128:$src2, (memopv2f64 addr:$src3)), VR128:$src1)),
- (VFMSUBPDr231m VR128:$src1, VR128:$src2, addr:$src3)>;
-
-// FMA231: src1 = src2*src3 - src1
-def : Pat<(v2f64 (fsub (fmul VR128:$src2, VR128:$src3), VR128:$src1)),
- (VFMSUBPDr231r VR128:$src1, VR128:$src2, VR128:$src3)>;
-
-//------------
-// FP double precision FNMADD - 256
-//------------
-// FMA231: src1 = - src2*src3 + src1
-def : Pat<(v4f64 (fsub VR256:$src1, (fmul VR256:$src2, (memopv4f64 addr:$src3)))),
- (VFNMADDPDr231mY VR256:$src1, VR256:$src2, addr:$src3)>;
-
-// FMA231: src1 = - src2*src3 + src1
-def : Pat<(v4f64 (fsub VR256:$src1, (fmul VR256:$src2, VR256:$src3))),
- (VFNMADDPDr231rY VR256:$src1, VR256:$src2, VR256:$src3)>;
-
-//------------
-// FP double precision FNMADD - 128
-//------------
-
-// FMA231: src1 = - src2*src3 + src1
-def : Pat<(v2f64 (fsub VR128:$src1, (fmul VR128:$src2, (memopv2f64 addr:$src3)))),
- (VFNMADDPDr231m VR128:$src1, VR128:$src2, addr:$src3)>;
-
-// FMA231: src1 = - src2*src3 + src1
-def : Pat<(v2f64 (fsub VR128:$src1, (fmul VR128:$src2, VR128:$src3))),
- (VFNMADDPDr231r VR128:$src1, VR128:$src2, VR128:$src3)>;
-
-//------------
-// FP single precision ADD - 256
-//------------
-
-// FMA231: src1 = src2*src3 + src1
-def : Pat<(v8f32 (fadd (fmul VR256:$src2, VR256:$src3), VR256:$src1)),
- (VFMADDPSr231rY VR256:$src1, VR256:$src2, VR256:$src3)>;
-
-// FMA213 : src1 = src2*src1 + src3
-def : Pat<(v8f32 (fadd (fmul VR256:$src1, VR256:$src2), (memopv8f32 addr:$src3))),
- (VFMADDPSr213mY VR256:$src1, VR256:$src2, addr:$src3)>;
-
-// FMA231: src1 = src2*src3 + src1
-def : Pat<(v8f32 (fadd (fmul (memopv8f32 addr:$src3), VR256:$src2), VR256:$src1)),
- (VFMADDPSr231mY VR256:$src1, VR256:$src2, addr:$src3)>;
-
-// FMA213: src1 = src2*src1 + src3
-def : Pat<(v8f32 (fadd (fmul VR256:$src2, VR256:$src1), VR256:$src3)),
- (VFMADDPSr213rY VR256:$src1, VR256:$src2, VR256:$src3)>;
-
-//------------
-// FP single precision ADD - 128
-//------------
-
-// FMA231 : src1 = src2*src3 + src1
-def : Pat<(v4f32 (fadd (fmul VR128:$src2, (memopv4f32 addr:$src3)), VR128:$src1)),
- (VFMADDPSr231m VR128:$src1, VR128:$src2, addr:$src3)>;
-
-// FMA231 : src1 = src2*src3 + src1
-def : Pat<(v4f32 (fadd (fmul VR128:$src2, VR128:$src3), VR128:$src1)),
- (VFMADDPSr231r VR128:$src1, VR128:$src2, VR128:$src3)>;
-
-//------------
-// FP single precision SUB - 256
-//------------
-// FMA231: src1 = src2*src3 - src1
-def : Pat<(v8f32 (fsub (fmul VR256:$src2, (memopv8f32 addr:$src3)), VR256:$src1)),
- (VFMSUBPSr231mY VR256:$src1, VR256:$src2, addr:$src3)>;
-
-// FMA231: src1 = src2*src3 - src1
-def : Pat<(v8f32 (fsub (fmul VR256:$src2, VR256:$src3), VR256:$src1)),
- (VFMSUBPSr231rY VR256:$src1, VR256:$src2, VR256:$src3)>;
-
-//------------
-// FP single precision SUB - 128
-//------------
-// FMA231 : src1 = src2*src3 - src1
-def : Pat<(v4f32 (fsub (fmul VR128:$src2, (memopv4f32 addr:$src3)), VR128:$src1)),
- (VFMSUBPSr231m VR128:$src1, VR128:$src2, addr:$src3)>;
-
-// FMA231 : src1 = src2*src3 - src1
-def : Pat<(v4f32 (fsub (fmul VR128:$src2, VR128:$src3), VR128:$src1)),
- (VFMSUBPSr231r VR128:$src1, VR128:$src2, VR128:$src3)>;
-
-//------------
-// FP single precision FNMADD - 256
-//------------
-// FMA231: src1 = - src2*src3 + src1
-def : Pat<(v8f32 (fsub VR256:$src1, (fmul VR256:$src2, (memopv8f32 addr:$src3)))),
- (VFNMADDPSr231mY VR256:$src1, VR256:$src2, addr:$src3)>;
-
-// FMA231: src1 = - src2*src3 + src1
-def : Pat<(v8f32 (fsub VR256:$src1, (fmul VR256:$src2, VR256:$src3))),
- (VFNMADDPSr231rY VR256:$src1, VR256:$src2, VR256:$src3)>;
-
-//------------
-// FP single precision FNMADD - 128
-//------------
-
-// FMA231 : src1 = src2*src3 - src1
-def : Pat<(v4f32 (fsub VR128:$src1, (fmul VR128:$src2, (memopv4f32 addr:$src3)))),
- (VFNMADDPSr231m VR128:$src1, VR128:$src2, addr:$src3)>;
-
-// FMA231 : src1 = src2*src3 - src1
-def : Pat<(v4f32 (fsub VR128:$src1, (fmul VR128:$src2, VR128:$src3))),
- (VFNMADDPSr231r VR128:$src1, VR128:$src2, VR128:$src3)>;
-
-} // HasFMA3
-
-//------------------------------
-// SCALAR
-//------------------------------
let Constraints = "$src1 = $dst" in {
multiclass fma3s_rm<bits<8> opc, string OpcodeStr, X86MemOperand x86memop,
@@ -328,62 +172,6 @@ defm VFNMSUBSD : fma3s_forms<0x9F, 0xAF, 0xBF, "vfnmsub", "sd", f64mem, FR64,
int_x86_fma4_vfnmsub_sd>, VEX_W, VEX_LIG;
-let Predicates = [HasFMA3], AddedComplexity = 20 in {
-
-//------------
-// FP scalar ADD
-//------------
-
-
-// FMADD231 : src1 = src2*src3 + src1
-def : Pat<(f32 (fadd (fmul FR32:$src2, FR32:$src3), FR32:$src1)),
- (VFMADDSSr231r FR32:$src1, FR32:$src2, FR32:$src3)>;
-
-def : Pat<(f32 (fadd (fmul FR32:$src2, (loadf32 addr:$src3)), FR32:$src1)),
- (VFMADDSSr231m FR32:$src1, FR32:$src2, addr:$src3)>;
-
-def : Pat<(f64 (fadd (fmul FR64:$src2, FR64:$src3), FR64:$src1)),
- (VFMADDSDr231r FR64:$src1, FR64:$src2, FR64:$src3)>;
-
-def : Pat<(f64 (fadd (fmul FR64:$src2, (loadf64 addr:$src3)), FR64:$src1)),
- (VFMADDSDr231m FR64:$src1, FR64:$src2, addr:$src3)>;
-
-
-
-//------------
-// FP scalar SUB src2*src3 - src1
-//------------
-
-def : Pat<(f32 (fsub (fmul FR32:$src2, FR32:$src3), FR32:$src1)),
- (VFMSUBSSr231r FR32:$src1, FR32:$src2, FR32:$src3)>;
-
-def : Pat<(f32 (fsub (fmul FR32:$src2, (loadf32 addr:$src3)), FR32:$src1)),
- (VFMSUBSSr231m FR32:$src1, FR32:$src2, addr:$src3)>;
-
-def : Pat<(f64 (fsub (fmul FR64:$src2, FR64:$src3), FR64:$src1)),
- (VFMSUBSDr231r FR64:$src1, FR64:$src2, FR64:$src3)>;
-
-def : Pat<(f64 (fsub (fmul FR64:$src2, (loadf64 addr:$src3)), FR64:$src1)),
- (VFMSUBSDr231m FR64:$src1, FR64:$src2, addr:$src3)>;
-
-//------------
-// FP scalar NADD src1 - src2*src3
-//------------
-
-def : Pat<(f32 (fsub FR32:$src1, (fmul FR32:$src2, FR32:$src3))),
- (VFNMADDSSr231r FR32:$src1, FR32:$src2, FR32:$src3)>;
-
-def : Pat<(f32 (fsub FR32:$src1, (fmul FR32:$src2, (loadf32 addr:$src3)))),
- (VFNMADDSSr231m FR32:$src1, FR32:$src2, addr:$src3)>;
-
-def : Pat<(f64 (fsub FR64:$src1, (fmul FR64:$src2, FR64:$src3))),
- (VFNMADDSDr231r FR64:$src1, FR64:$src2, FR64:$src3)>;
-
-def : Pat<(f64 (fsub FR64:$src1, (fmul FR64:$src2, (loadf64 addr:$src3)))),
- (VFNMADDSDr231m FR64:$src1, FR64:$src2, addr:$src3)>;
-
-} // HasFMA3
-
//===----------------------------------------------------------------------===//
// FMA4 - AMD 4 operand Fused Multiply-Add instructions
//===----------------------------------------------------------------------===//
OpenPOWER on IntegriCloud