diff options
| author | Craig Topper <craig.topper@intel.com> | 2019-04-07 19:19:44 +0000 |
|---|---|---|
| committer | Craig Topper <craig.topper@intel.com> | 2019-04-07 19:19:44 +0000 |
| commit | 424417da79c60c8d67de6b0bb55e9d8404764707 (patch) | |
| tree | a15231c205515865c3c59ab384d43d460419f14c /llvm/lib | |
| parent | c664c2a5ec6db3ad637d4520d2afe15f64796b19 (diff) | |
| download | bcm5719-llvm-424417da79c60c8d67de6b0bb55e9d8404764707.tar.gz bcm5719-llvm-424417da79c60c8d67de6b0bb55e9d8404764707.zip | |
[X86] Use (SUBREG_TO_REG (MOV32rm)) for extloadi64i8/extloadi64i16 when the load is 4 byte aligned or better and not volatile.
Summary:
Previously we would use MOVZXrm8/MOVZXrm16, but those are longer encodings.
This is similar to what we do in the loadi32 predicate.
Reviewers: RKSimon, spatel
Reviewed By: RKSimon
Subscribers: hiraditya, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D60341
llvm-svn: 357875
Diffstat (limited to 'llvm/lib')
| -rw-r--r-- | llvm/lib/Target/X86/X86InstrCompiler.td | 6 | ||||
| -rw-r--r-- | llvm/lib/Target/X86/X86InstrInfo.td | 14 |
2 files changed, 17 insertions, 3 deletions
diff --git a/llvm/lib/Target/X86/X86InstrCompiler.td b/llvm/lib/Target/X86/X86InstrCompiler.td index 1fcc33e8724..dbc2fbcc6e7 100644 --- a/llvm/lib/Target/X86/X86InstrCompiler.td +++ b/llvm/lib/Target/X86/X86InstrCompiler.td @@ -1279,14 +1279,16 @@ def : Pat<(extloadi32i16 addr:$src), (MOVZX32rm16 addr:$src)>; // For other extloads, use subregs, since the high contents of the register are // defined after an extload. +// NOTE: The extloadi64i32 pattern needs to be first as it will try to form +// 32-bit loads for 4 byte aligned i8/i16 loads. +def : Pat<(extloadi64i32 addr:$src), + (SUBREG_TO_REG (i64 0), (MOV32rm addr:$src), sub_32bit)>; def : Pat<(extloadi64i1 addr:$src), (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>; def : Pat<(extloadi64i8 addr:$src), (SUBREG_TO_REG (i64 0), (MOVZX32rm8 addr:$src), sub_32bit)>; def : Pat<(extloadi64i16 addr:$src), (SUBREG_TO_REG (i64 0), (MOVZX32rm16 addr:$src), sub_32bit)>; -def : Pat<(extloadi64i32 addr:$src), - (SUBREG_TO_REG (i64 0), (MOV32rm addr:$src), sub_32bit)>; // anyext. Define these to do an explicit zero-extend to // avoid partial-register updates. diff --git a/llvm/lib/Target/X86/X86InstrInfo.td b/llvm/lib/Target/X86/X86InstrInfo.td index dc5e04cdd67..0176c2d707a 100644 --- a/llvm/lib/Target/X86/X86InstrInfo.td +++ b/llvm/lib/Target/X86/X86InstrInfo.td @@ -1121,7 +1121,19 @@ def extloadi32i16 : PatFrag<(ops node:$ptr), (i32 (extloadi16 node:$ptr))>; def extloadi64i1 : PatFrag<(ops node:$ptr), (i64 (extloadi1 node:$ptr))>; def extloadi64i8 : PatFrag<(ops node:$ptr), (i64 (extloadi8 node:$ptr))>; def extloadi64i16 : PatFrag<(ops node:$ptr), (i64 (extloadi16 node:$ptr))>; -def extloadi64i32 : PatFrag<(ops node:$ptr), (i64 (extloadi32 node:$ptr))>; + +// We can treat an i8/i16 extending load to i64 as a 32 bit load if its known +// to be 4 byte aligned or better. +def extloadi64i32 : PatFrag<(ops node:$ptr), (i64 (unindexedload node:$ptr)), [{ + LoadSDNode *LD = cast<LoadSDNode>(N); + ISD::LoadExtType ExtType = LD->getExtensionType(); + if (ExtType != ISD::EXTLOAD) + return false; + if (LD->getMemoryVT() == MVT::i32) + return true; + + return LD->getAlignment() >= 4 && !LD->isVolatile(); +}]>; // An 'and' node with a single use. |

