diff options
author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2016-08-06 18:40:28 +0000 |
---|---|---|
committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2016-08-06 18:40:28 +0000 |
commit | 7d168e19e8f0e82257c845a38bf90bc4b5195fd4 (patch) | |
tree | ec55052097574491843b32326c366c6ee713a556 /llvm/lib/Target/X86/X86InstrInfo.cpp | |
parent | 45a574130e68589f3b3b2d169c1bfea2595ef752 (diff) | |
download | bcm5719-llvm-7d168e19e8f0e82257c845a38bf90bc4b5195fd4.tar.gz bcm5719-llvm-7d168e19e8f0e82257c845a38bf90bc4b5195fd4.zip |
[X86][SSE] Enable commutation between MOVHLPS and UNPCKHPD
Assuming SSE2 is available then we can safely commute between these, removing some unnecessary register moves and improving memory folding opportunities.
VEX encoded versions don't benefit so I haven't added support to them.
llvm-svn: 277930
Diffstat (limited to 'llvm/lib/Target/X86/X86InstrInfo.cpp')
-rw-r--r-- | llvm/lib/Target/X86/X86InstrInfo.cpp | 16 |
1 files changed, 16 insertions, 0 deletions
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp index 0571dd28de4..98779351224 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -3709,6 +3709,22 @@ MachineInstr *X86InstrInfo::commuteInstructionImpl(MachineInstr &MI, bool NewMI, return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, OpIdx1, OpIdx2); } + case X86::MOVHLPSrr: + case X86::UNPCKHPDrr: { + if (!Subtarget.hasSSE2()) + return nullptr; + + unsigned Opc = MI.getOpcode(); + switch (Opc) { + default: llvm_unreachable("Unreachable!"); + case X86::MOVHLPSrr: Opc = X86::UNPCKHPDrr; break; + case X86::UNPCKHPDrr: Opc = X86::MOVHLPSrr; break; + } + auto &WorkingMI = cloneIfNew(MI); + WorkingMI.setDesc(get(Opc)); + return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false, + OpIdx1, OpIdx2); + } case X86::CMOVB16rr: case X86::CMOVB32rr: case X86::CMOVB64rr: case X86::CMOVAE16rr: case X86::CMOVAE32rr: case X86::CMOVAE64rr: case X86::CMOVE16rr: case X86::CMOVE32rr: case X86::CMOVE64rr: |