diff options
author | Juergen Ributzka <juergen@apple.com> | 2014-09-04 01:29:18 +0000 |
---|---|---|
committer | Juergen Ributzka <juergen@apple.com> | 2014-09-04 01:29:18 +0000 |
commit | 1dbc15f02d1eaa9ef908014e19276fde331d3758 (patch) | |
tree | e93d3cfa40d545f48879e376b6962ea05d6af0e9 /llvm/test/CodeGen/AArch64/fast-isel-logic-op.ll | |
parent | fc0db222b5a13f988f8e4d7afd71204e8f03175d (diff) | |
download | bcm5719-llvm-1dbc15f02d1eaa9ef908014e19276fde331d3758.tar.gz bcm5719-llvm-1dbc15f02d1eaa9ef908014e19276fde331d3758.zip |
[FastISel][AArch64] Add target-specific lowering for logical operations.
This change adds support for immediate and shift-left folding into logical
operations.
This fixes rdar://problem/18223183.
llvm-svn: 217118
Diffstat (limited to 'llvm/test/CodeGen/AArch64/fast-isel-logic-op.ll')
-rw-r--r-- | llvm/test/CodeGen/AArch64/fast-isel-logic-op.ll | 138 |
1 files changed, 138 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/AArch64/fast-isel-logic-op.ll b/llvm/test/CodeGen/AArch64/fast-isel-logic-op.ll new file mode 100644 index 00000000000..1efe4505f9e --- /dev/null +++ b/llvm/test/CodeGen/AArch64/fast-isel-logic-op.ll @@ -0,0 +1,138 @@ +; RUN: llc -mtriple=aarch64-apple-darwin -fast-isel=0 -verify-machineinstrs < %s | FileCheck %s +; RUN: llc -mtriple=aarch64-apple-darwin -fast-isel=1 -fast-isel-abort -verify-machineinstrs < %s | FileCheck %s + +; AND +define i32 @and_rr_i32(i32 %a, i32 %b) { +; CHECK-LABEL: and_rr_i32 +; CHECK: and w0, w0, w1 + %1 = and i32 %a, %b + ret i32 %1 +} + +define i64 @and_rr_i64(i64 %a, i64 %b) { +; CHECK-LABEL: and_rr_i64 +; CHECK: and x0, x0, x1 + %1 = and i64 %a, %b + ret i64 %1 +} + +define i32 @and_ri_i32(i32 %a) { +; CHECK-LABEL: and_ri_i32 +; CHECK: and w0, w0, #0xff + %1 = and i32 %a, 255 + ret i32 %1 +} + +define i64 @and_ri_i64(i64 %a) { +; CHECK-LABEL: and_ri_i64 +; CHECK: and x0, x0, #0xff + %1 = and i64 %a, 255 + ret i64 %1 +} + +define i32 @and_rs_i32(i32 %a, i32 %b) { +; CHECK-LABEL: and_rs_i32 +; CHECK: and w0, w0, w1, lsl #8 + %1 = shl i32 %b, 8 + %2 = and i32 %a, %1 + ret i32 %2 +} + +define i64 @and_rs_i64(i64 %a, i64 %b) { +; CHECK-LABEL: and_rs_i64 +; CHECK: and x0, x0, x1, lsl #8 + %1 = shl i64 %b, 8 + %2 = and i64 %a, %1 + ret i64 %2 +} + +; OR +define i32 @or_rr_i32(i32 %a, i32 %b) { +; CHECK-LABEL: or_rr_i32 +; CHECK: orr w0, w0, w1 + %1 = or i32 %a, %b + ret i32 %1 +} + +define i64 @or_rr_i64(i64 %a, i64 %b) { +; CHECK-LABEL: or_rr_i64 +; CHECK: orr x0, x0, x1 + %1 = or i64 %a, %b + ret i64 %1 +} + +define i32 @or_ri_i32(i32 %a) { +; CHECK-LABEL: or_ri_i32 +; CHECK: orr w0, w0, #0xff + %1 = or i32 %a, 255 + ret i32 %1 +} + +define i64 @or_ri_i64(i64 %a) { +; CHECK-LABEL: or_ri_i64 +; CHECK: orr x0, x0, #0xff + %1 = or i64 %a, 255 + ret i64 %1 +} + +define i32 @or_rs_i32(i32 %a, i32 %b) { +; CHECK-LABEL: or_rs_i32 +; CHECK: orr w0, w0, w1, lsl #8 + %1 = shl i32 %b, 8 + %2 = or i32 %a, %1 + ret i32 %2 +} + +define i64 @or_rs_i64(i64 %a, i64 %b) { +; CHECK-LABEL: or_rs_i64 +; CHECK: orr x0, x0, x1, lsl #8 + %1 = shl i64 %b, 8 + %2 = or i64 %a, %1 + ret i64 %2 +} + +; XOR +define i32 @xor_rr_i32(i32 %a, i32 %b) { +; CHECK-LABEL: xor_rr_i32 +; CHECK: eor w0, w0, w1 + %1 = xor i32 %a, %b + ret i32 %1 +} + +define i64 @xor_rr_i64(i64 %a, i64 %b) { +; CHECK-LABEL: xor_rr_i64 +; CHECK: eor x0, x0, x1 + %1 = xor i64 %a, %b + ret i64 %1 +} + +define i32 @xor_ri_i32(i32 %a) { +; CHECK-LABEL: xor_ri_i32 +; CHECK: eor w0, w0, #0xff + %1 = xor i32 %a, 255 + ret i32 %1 +} + +define i64 @xor_ri_i64(i64 %a) { +; CHECK-LABEL: xor_ri_i64 +; CHECK: eor x0, x0, #0xff + %1 = xor i64 %a, 255 + ret i64 %1 +} + +define i32 @xor_rs_i32(i32 %a, i32 %b) { +; CHECK-LABEL: xor_rs_i32 +; CHECK: eor w0, w0, w1, lsl #8 + %1 = shl i32 %b, 8 + %2 = xor i32 %a, %1 + ret i32 %2 +} + +define i64 @xor_rs_i64(i64 %a, i64 %b) { +; CHECK-LABEL: xor_rs_i64 +; CHECK: eor x0, x0, x1, lsl #8 + %1 = shl i64 %b, 8 + %2 = xor i64 %a, %1 + ret i64 %2 +} + |