diff options
| author | Adam Nemet <anemet@apple.com> | 2014-03-12 21:20:55 +0000 |
|---|---|---|
| committer | Adam Nemet <anemet@apple.com> | 2014-03-12 21:20:55 +0000 |
| commit | d4e56073c768489d45e7941c6a839ab5c1d5eaf8 (patch) | |
| tree | a4922b2e3ec5e841d8a70b521f0c8c06418a8b8a /llvm | |
| parent | f217e099bb4766b25b1f7150cf4a333e95b6b682 (diff) | |
| download | bcm5719-llvm-d4e56073c768489d45e7941c6a839ab5c1d5eaf8.tar.gz bcm5719-llvm-d4e56073c768489d45e7941c6a839ab5c1d5eaf8.zip | |
[X86] Add peephole for masked rotate amount
Extend what's currently done for shift because the HW performs this masking
implicitly:
(rotl:i32 x, (and y, 31)) -> (rotl:i32 x, y)
I use the newly factored out multiclass that was only supporting shifts so
far.
For testing I extended my testcase for the new rotation idiom.
<rdar://problem/15295856>
llvm-svn: 203718
Diffstat (limited to 'llvm')
| -rw-r--r-- | llvm/lib/Target/X86/X86InstrCompiler.td | 2 | ||||
| -rw-r--r-- | llvm/test/CodeGen/X86/rotate4.ll | 8 |
2 files changed, 10 insertions, 0 deletions
diff --git a/llvm/lib/Target/X86/X86InstrCompiler.td b/llvm/lib/Target/X86/X86InstrCompiler.td index 370e4d875c6..401849f40ec 100644 --- a/llvm/lib/Target/X86/X86InstrCompiler.td +++ b/llvm/lib/Target/X86/X86InstrCompiler.td @@ -1556,6 +1556,8 @@ multiclass MaskedShiftAmountPats<SDNode frag, string name> { defm : MaskedShiftAmountPats<shl, "SHL">; defm : MaskedShiftAmountPats<srl, "SHR">; defm : MaskedShiftAmountPats<sra, "SAR">; +defm : MaskedShiftAmountPats<rotl, "ROL">; +defm : MaskedShiftAmountPats<rotr, "ROR">; // (anyext (setcc_carry)) -> (setcc_carry) def : Pat<(i16 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))), diff --git a/llvm/test/CodeGen/X86/rotate4.ll b/llvm/test/CodeGen/X86/rotate4.ll index b549a9bd935..5372612aeab 100644 --- a/llvm/test/CodeGen/X86/rotate4.ll +++ b/llvm/test/CodeGen/X86/rotate4.ll @@ -5,6 +5,7 @@ define i32 @rotate_left_32(i32 %a, i32 %b) { ; CHECK-LABEL: rotate_left_32: +; CHECK-NOT: and ; CHECK: roll entry: %and = and i32 %b, 31 @@ -18,6 +19,7 @@ entry: define i32 @rotate_right_32(i32 %a, i32 %b) { ; CHECK-LABEL: rotate_right_32: +; CHECK-NOT: and ; CHECK: rorl entry: %and = and i32 %b, 31 @@ -31,6 +33,7 @@ entry: define i64 @rotate_left_64(i64 %a, i64 %b) { ; CHECK-LABEL: rotate_left_64: +; CHECK-NOT: and ; CHECK: rolq entry: %and = and i64 %b, 63 @@ -44,6 +47,7 @@ entry: define i64 @rotate_right_64(i64 %a, i64 %b) { ; CHECK-LABEL: rotate_right_64: +; CHECK-NOT: and ; CHECK: rorq entry: %and = and i64 %b, 63 @@ -59,6 +63,7 @@ entry: define void @rotate_left_m32(i32 *%pa, i32 %b) { ; CHECK-LABEL: rotate_left_m32: +; CHECK-NOT: and ; CHECK: roll ; no store: ; CHECK-NOT: mov @@ -76,6 +81,7 @@ entry: define void @rotate_right_m32(i32 *%pa, i32 %b) { ; CHECK-LABEL: rotate_right_m32: +; CHECK-NOT: and ; CHECK: rorl ; no store: ; CHECK-NOT: mov @@ -93,6 +99,7 @@ entry: define void @rotate_left_m64(i64 *%pa, i64 %b) { ; CHECK-LABEL: rotate_left_m64: +; CHECK-NOT: and ; CHECK: rolq ; no store: ; CHECK-NOT: mov @@ -110,6 +117,7 @@ entry: define void @rotate_right_m64(i64 *%pa, i64 %b) { ; CHECK-LABEL: rotate_right_m64: +; CHECK-NOT: and ; CHECK: rorq ; no store: ; CHECK-NOT: mov |

