diff options
| author | Sanjay Patel <spatel@rotateright.com> | 2018-08-19 14:44:47 +0000 |
|---|---|---|
| committer | Sanjay Patel <spatel@rotateright.com> | 2018-08-19 14:44:47 +0000 |
| commit | 446529b0d9452e3d4d6164f48ce2ccb104cdd6a5 (patch) | |
| tree | 2a304018245f41f376f2cc642a914803b8230d6d /clang/test/CodeGen | |
| parent | 39b4dd2da7120901b1295634dfd0b457c8c173e0 (diff) | |
| download | bcm5719-llvm-446529b0d9452e3d4d6164f48ce2ccb104cdd6a5.tar.gz bcm5719-llvm-446529b0d9452e3d4d6164f48ce2ccb104cdd6a5.zip | |
[CodeGen] add/fix rotate builtins that map to LLVM funnel shift (retry)
This is a retry of rL340135 (reverted at rL340136 because of gcc host compiler crashing)
with 2 changes:
1. Move the code into a helper to reduce code duplication (and hopefully work-around the crash).
2. The original commit had a formatting bug in the docs (missing an underscore).
Original commit message:
This exposes the LLVM funnel shift intrinsics as more familiar bit rotation functions in clang
(when both halves of a funnel shift are the same value, it's a rotate).
We're free to name these as we want because we're not copying gcc, but if there's some other
existing art (eg, the microsoft ops that are modified in this patch) that we want to replicate,
we can change the names.
The funnel shift intrinsics were added here:
https://reviews.llvm.org/D49242
With improved codegen in:
https://reviews.llvm.org/rL337966
https://reviews.llvm.org/rL339359
And basic IR optimization added in:
https://reviews.llvm.org/rL338218
https://reviews.llvm.org/rL340022
...so these are expected to produce asm output that's equal or better to the multi-instruction
alternatives using primitive C/IR ops.
In the motivating loop example from PR37387:
https://bugs.llvm.org/show_bug.cgi?id=37387#c7
...we get the expected 'rolq' x86 instructions if we substitute the rotate builtin into the source.
Differential Revision: https://reviews.llvm.org/D50924
llvm-svn: 340137
Diffstat (limited to 'clang/test/CodeGen')
| -rw-r--r-- | clang/test/CodeGen/builtin-rotate.c | 66 | ||||
| -rw-r--r-- | clang/test/CodeGen/ms-intrinsics-rotations.c | 99 |
2 files changed, 87 insertions, 78 deletions
diff --git a/clang/test/CodeGen/builtin-rotate.c b/clang/test/CodeGen/builtin-rotate.c new file mode 100644 index 00000000000..8fc1701c6c9 --- /dev/null +++ b/clang/test/CodeGen/builtin-rotate.c @@ -0,0 +1,66 @@ +// RUN: %clang_cc1 %s -emit-llvm -o - | FileCheck %s + +unsigned char rotl8(unsigned char x, unsigned char y) { +// CHECK-LABEL: rotl8 +// CHECK: [[F:%.*]] = call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[X]], i8 [[Y:%.*]]) +// CHECK-NEXT: ret i8 [[F]] + + return __builtin_rotateleft8(x, y); +} + +short rotl16(short x, short y) { +// CHECK-LABEL: rotl16 +// CHECK: [[F:%.*]] = call i16 @llvm.fshl.i16(i16 [[X:%.*]], i16 [[X]], i16 [[Y:%.*]]) +// CHECK-NEXT: ret i16 [[F]] + + return __builtin_rotateleft16(x, y); +} + +int rotl32(int x, unsigned int y) { +// CHECK-LABEL: rotl32 +// CHECK: [[F:%.*]] = call i32 @llvm.fshl.i32(i32 [[X:%.*]], i32 [[X]], i32 [[Y:%.*]]) +// CHECK-NEXT: ret i32 [[F]] + + return __builtin_rotateleft32(x, y); +} + +unsigned long long rotl64(unsigned long long x, long long y) { +// CHECK-LABEL: rotl64 +// CHECK: [[F:%.*]] = call i64 @llvm.fshl.i64(i64 [[X:%.*]], i64 [[X]], i64 [[Y:%.*]]) +// CHECK-NEXT: ret i64 [[F]] + + return __builtin_rotateleft64(x, y); +} + +char rotr8(char x, char y) { +// CHECK-LABEL: rotr8 +// CHECK: [[F:%.*]] = call i8 @llvm.fshr.i8(i8 [[X:%.*]], i8 [[X]], i8 [[Y:%.*]]) +// CHECK-NEXT: ret i8 [[F]] + + return __builtin_rotateright8(x, y); +} + +unsigned short rotr16(unsigned short x, unsigned short y) { +// CHECK-LABEL: rotr16 +// CHECK: [[F:%.*]] = call i16 @llvm.fshr.i16(i16 [[X:%.*]], i16 [[X]], i16 [[Y:%.*]]) +// CHECK-NEXT: ret i16 [[F]] + + return __builtin_rotateright16(x, y); +} + +unsigned int rotr32(unsigned int x, int y) { +// CHECK-LABEL: rotr32 +// CHECK: [[F:%.*]] = call i32 @llvm.fshr.i32(i32 [[X:%.*]], i32 [[X]], i32 [[Y:%.*]]) +// CHECK-NEXT: ret i32 [[F]] + + return __builtin_rotateright32(x, y); +} + +long long rotr64(long long x, unsigned long long y) { +// CHECK-LABEL: rotr64 +// CHECK: [[F:%.*]] = call i64 @llvm.fshr.i64(i64 [[X:%.*]], i64 [[X]], i64 [[Y:%.*]]) +// CHECK-NEXT: ret i64 [[F]] + + return __builtin_rotateright64(x, y); +} + diff --git a/clang/test/CodeGen/ms-intrinsics-rotations.c b/clang/test/CodeGen/ms-intrinsics-rotations.c index 735de6e41e6..30428b12aa3 100644 --- a/clang/test/CodeGen/ms-intrinsics-rotations.c +++ b/clang/test/CodeGen/ms-intrinsics-rotations.c @@ -30,66 +30,36 @@ unsigned char test_rotl8(unsigned char value, unsigned char shift) { return _rotl8(value, shift); } // CHECK: i8 @test_rotl8 -// CHECK: [[LSHIFT:%[0-9]+]] = and i8 [[SHIFT:%[0-9]+]], 7 -// CHECK: [[HIGH:%[0-9]+]] = shl i8 [[VALUE:%[0-9]+]], [[LSHIFT]] -// CHECK: [[NEGATE:%[0-9]+]] = sub i8 0, [[SHIFT]] -// CHECK: [[RSHIFT:%[0-9]+]] = and i8 [[NEGATE]], 7 -// CHECK: [[LOW:%[0-9]+]] = lshr i8 [[VALUE]], [[RSHIFT]] -// CHECK: [[RESULT:%[0-9]+]] = or i8 [[HIGH]], [[LOW]] -// CHECK: ret i8 [[RESULT]] -// CHECK } +// CHECK: [[R:%.*]] = call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[X]], i8 [[Y:%.*]]) +// CHECK: ret i8 [[R]] unsigned short test_rotl16(unsigned short value, unsigned char shift) { return _rotl16(value, shift); } // CHECK: i16 @test_rotl16 -// CHECK: [[LSHIFT:%[0-9]+]] = and i16 [[SHIFT:%[0-9]+]], 15 -// CHECK: [[HIGH:%[0-9]+]] = shl i16 [[VALUE:%[0-9]+]], [[LSHIFT]] -// CHECK: [[NEGATE:%[0-9]+]] = sub i16 0, [[SHIFT]] -// CHECK: [[RSHIFT:%[0-9]+]] = and i16 [[NEGATE]], 15 -// CHECK: [[LOW:%[0-9]+]] = lshr i16 [[VALUE]], [[RSHIFT]] -// CHECK: [[RESULT:%[0-9]+]] = or i16 [[HIGH]], [[LOW]] -// CHECK: ret i16 [[RESULT]] -// CHECK } +// CHECK: [[R:%.*]] = call i16 @llvm.fshl.i16(i16 [[X:%.*]], i16 [[X]], i16 [[Y:%.*]]) +// CHECK: ret i16 [[R]] unsigned int test_rotl(unsigned int value, int shift) { return _rotl(value, shift); } // CHECK: i32 @test_rotl -// CHECK: [[LSHIFT:%[0-9]+]] = and i32 [[SHIFT:%[0-9]+]], 31 -// CHECK: [[HIGH:%[0-9]+]] = shl i32 [[VALUE:%[0-9]+]], [[LSHIFT]] -// CHECK: [[NEGATE:%[0-9]+]] = sub i32 0, [[SHIFT]] -// CHECK: [[RSHIFT:%[0-9]+]] = and i32 [[NEGATE]], 31 -// CHECK: [[LOW:%[0-9]+]] = lshr i32 [[VALUE]], [[RSHIFT]] -// CHECK: [[RESULT:%[0-9]+]] = or i32 [[HIGH]], [[LOW]] -// CHECK: ret i32 [[RESULT]] -// CHECK } +// CHECK: [[R:%.*]] = call i32 @llvm.fshl.i32(i32 [[X:%.*]], i32 [[X]], i32 [[Y:%.*]]) +// CHECK: ret i32 [[R]] unsigned LONG test_lrotl(unsigned LONG value, int shift) { return _lrotl(value, shift); } // CHECK-32BIT-LONG: i32 @test_lrotl -// CHECK-32BIT-LONG: [[LSHIFT:%[0-9]+]] = and i32 [[SHIFT:%[0-9]+]], 31 -// CHECK-32BIT-LONG: [[HIGH:%[0-9]+]] = shl i32 [[VALUE:%[0-9]+]], [[LSHIFT]] -// CHECK-32BIT-LONG: [[NEGATE:%[0-9]+]] = sub i32 0, [[SHIFT]] -// CHECK-32BIT-LONG: [[RSHIFT:%[0-9]+]] = and i32 [[NEGATE]], 31 -// CHECK-32BIT-LONG: [[LOW:%[0-9]+]] = lshr i32 [[VALUE]], [[RSHIFT]] -// CHECK-32BIT-LONG: [[RESULT:%[0-9]+]] = or i32 [[HIGH]], [[LOW]] -// CHECK-32BIT-LONG: ret i32 [[RESULT]] -// CHECK-32BIT-LONG } +// CHECK-32BIT-LONG: [[R:%.*]] = call i32 @llvm.fshl.i32(i32 [[X:%.*]], i32 [[X]], i32 [[Y:%.*]]) +// CHECK-32BIT-LONG: ret i32 [[R]] unsigned __int64 test_rotl64(unsigned __int64 value, int shift) { return _rotl64(value, shift); } // CHECK: i64 @test_rotl64 -// CHECK: [[LSHIFT:%[0-9]+]] = and i64 [[SHIFT:%[0-9]+]], 63 -// CHECK: [[HIGH:%[0-9]+]] = shl i64 [[VALUE:%[0-9]+]], [[LSHIFT]] -// CHECK: [[NEGATE:%[0-9]+]] = sub i64 0, [[SHIFT]] -// CHECK: [[RSHIFT:%[0-9]+]] = and i64 [[NEGATE]], 63 -// CHECK: [[LOW:%[0-9]+]] = lshr i64 [[VALUE]], [[RSHIFT]] -// CHECK: [[RESULT:%[0-9]+]] = or i64 [[HIGH]], [[LOW]] -// CHECK: ret i64 [[RESULT]] -// CHECK } +// CHECK: [[R:%.*]] = call i64 @llvm.fshl.i64(i64 [[X:%.*]], i64 [[X]], i64 [[Y:%.*]]) +// CHECK: ret i64 [[R]] // rotate right @@ -97,61 +67,34 @@ unsigned char test_rotr8(unsigned char value, unsigned char shift) { return _rotr8(value, shift); } // CHECK: i8 @test_rotr8 -// CHECK: [[RSHIFT:%[0-9]+]] = and i8 [[SHIFT:%[0-9]+]], 7 -// CHECK: [[LOW:%[0-9]+]] = lshr i8 [[VALUE:%[0-9]+]], [[RSHIFT]] -// CHECK: [[NEGATE:%[0-9]+]] = sub i8 0, [[SHIFT]] -// CHECK: [[LSHIFT:%[0-9]+]] = and i8 [[NEGATE]], 7 -// CHECK: [[HIGH:%[0-9]+]] = shl i8 [[VALUE]], [[LSHIFT]] -// CHECK: [[RESULT:%[0-9]+]] = or i8 [[HIGH]], [[LOW]] -// CHECK } +// CHECK: [[R:%.*]] = call i8 @llvm.fshr.i8(i8 [[X:%.*]], i8 [[X]], i8 [[Y:%.*]]) +// CHECK: ret i8 [[R]] unsigned short test_rotr16(unsigned short value, unsigned char shift) { return _rotr16(value, shift); } // CHECK: i16 @test_rotr16 -// CHECK: [[RSHIFT:%[0-9]+]] = and i16 [[SHIFT:%[0-9]+]], 15 -// CHECK: [[LOW:%[0-9]+]] = lshr i16 [[VALUE:%[0-9]+]], [[RSHIFT]] -// CHECK: [[NEGATE:%[0-9]+]] = sub i16 0, [[SHIFT]] -// CHECK: [[LSHIFT:%[0-9]+]] = and i16 [[NEGATE]], 15 -// CHECK: [[HIGH:%[0-9]+]] = shl i16 [[VALUE]], [[LSHIFT]] -// CHECK: [[RESULT:%[0-9]+]] = or i16 [[HIGH]], [[LOW]] -// CHECK } +// CHECK: [[R:%.*]] = call i16 @llvm.fshr.i16(i16 [[X:%.*]], i16 [[X]], i16 [[Y:%.*]]) +// CHECK: ret i16 [[R]] unsigned int test_rotr(unsigned int value, int shift) { return _rotr(value, shift); } // CHECK: i32 @test_rotr -// CHECK: [[RSHIFT:%[0-9]+]] = and i32 [[SHIFT:%[0-9]+]], 31 -// CHECK: [[LOW:%[0-9]+]] = lshr i32 [[VALUE:%[0-9]+]], [[RSHIFT]] -// CHECK: [[NEGATE:%[0-9]+]] = sub i32 0, [[SHIFT]] -// CHECK: [[LSHIFT:%[0-9]+]] = and i32 [[NEGATE]], 31 -// CHECK: [[HIGH:%[0-9]+]] = shl i32 [[VALUE]], [[LSHIFT]] -// CHECK: [[RESULT:%[0-9]+]] = or i32 [[HIGH]], [[LOW]] -// CHECK: ret i32 [[RESULT]] -// CHECK } +// CHECK: [[R:%.*]] = call i32 @llvm.fshr.i32(i32 [[X:%.*]], i32 [[X]], i32 [[Y:%.*]]) +// CHECK: ret i32 [[R]] unsigned LONG test_lrotr(unsigned LONG value, int shift) { return _lrotr(value, shift); } // CHECK-32BIT-LONG: i32 @test_lrotr -// CHECK-32BIT-LONG: [[RSHIFT:%[0-9]+]] = and i32 [[SHIFT:%[0-9]+]], 31 -// CHECK-32BIT-LONG: [[LOW:%[0-9]+]] = lshr i32 [[VALUE:%[0-9]+]], [[RSHIFT]] -// CHECK-32BIT-LONG: [[NEGATE:%[0-9]+]] = sub i32 0, [[SHIFT]] -// CHECK-32BIT-LONG: [[LSHIFT:%[0-9]+]] = and i32 [[NEGATE]], 31 -// CHECK-32BIT-LONG: [[HIGH:%[0-9]+]] = shl i32 [[VALUE]], [[LSHIFT]] -// CHECK-32BIT-LONG: [[RESULT:%[0-9]+]] = or i32 [[HIGH]], [[LOW]] -// CHECK-32BIT-LONG: ret i32 [[RESULT]] -// CHECK-32BIT-LONG } +// CHECK-32BIT-LONG: [[R:%.*]] = call i32 @llvm.fshr.i32(i32 [[X:%.*]], i32 [[X]], i32 [[Y:%.*]]) +// CHECK-32BIT-LONG: ret i32 [[R]] unsigned __int64 test_rotr64(unsigned __int64 value, int shift) { return _rotr64(value, shift); } // CHECK: i64 @test_rotr64 -// CHECK: [[RSHIFT:%[0-9]+]] = and i64 [[SHIFT:%[0-9]+]], 63 -// CHECK: [[LOW:%[0-9]+]] = lshr i64 [[VALUE:%[0-9]+]], [[RSHIFT]] -// CHECK: [[NEGATE:%[0-9]+]] = sub i64 0, [[SHIFT]] -// CHECK: [[LSHIFT:%[0-9]+]] = and i64 [[NEGATE]], 63 -// CHECK: [[HIGH:%[0-9]+]] = shl i64 [[VALUE]], [[LSHIFT]] -// CHECK: [[RESULT:%[0-9]+]] = or i64 [[HIGH]], [[LOW]] -// CHECK: ret i64 [[RESULT]] -// CHECK } +// CHECK: [[R:%.*]] = call i64 @llvm.fshr.i64(i64 [[X:%.*]], i64 [[X]], i64 [[Y:%.*]]) +// CHECK: ret i64 [[R]] + |

