From 446529b0d9452e3d4d6164f48ce2ccb104cdd6a5 Mon Sep 17 00:00:00 2001 From: Sanjay Patel Date: Sun, 19 Aug 2018 14:44:47 +0000 Subject: [CodeGen] add/fix rotate builtins that map to LLVM funnel shift (retry) This is a retry of rL340135 (reverted at rL340136 because of gcc host compiler crashing) with 2 changes: 1. Move the code into a helper to reduce code duplication (and hopefully work-around the crash). 2. The original commit had a formatting bug in the docs (missing an underscore). Original commit message: This exposes the LLVM funnel shift intrinsics as more familiar bit rotation functions in clang (when both halves of a funnel shift are the same value, it's a rotate). We're free to name these as we want because we're not copying gcc, but if there's some other existing art (eg, the microsoft ops that are modified in this patch) that we want to replicate, we can change the names. The funnel shift intrinsics were added here: https://reviews.llvm.org/D49242 With improved codegen in: https://reviews.llvm.org/rL337966 https://reviews.llvm.org/rL339359 And basic IR optimization added in: https://reviews.llvm.org/rL338218 https://reviews.llvm.org/rL340022 ...so these are expected to produce asm output that's equal or better to the multi-instruction alternatives using primitive C/IR ops. In the motivating loop example from PR37387: https://bugs.llvm.org/show_bug.cgi?id=37387#c7 ...we get the expected 'rolq' x86 instructions if we substitute the rotate builtin into the source. Differential Revision: https://reviews.llvm.org/D50924 llvm-svn: 340137 --- clang/test/CodeGen/builtin-rotate.c | 66 +++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) create mode 100644 clang/test/CodeGen/builtin-rotate.c (limited to 'clang/test/CodeGen/builtin-rotate.c') diff --git a/clang/test/CodeGen/builtin-rotate.c b/clang/test/CodeGen/builtin-rotate.c new file mode 100644 index 00000000000..8fc1701c6c9 --- /dev/null +++ b/clang/test/CodeGen/builtin-rotate.c @@ -0,0 +1,66 @@ +// RUN: %clang_cc1 %s -emit-llvm -o - | FileCheck %s + +unsigned char rotl8(unsigned char x, unsigned char y) { +// CHECK-LABEL: rotl8 +// CHECK: [[F:%.*]] = call i8 @llvm.fshl.i8(i8 [[X:%.*]], i8 [[X]], i8 [[Y:%.*]]) +// CHECK-NEXT: ret i8 [[F]] + + return __builtin_rotateleft8(x, y); +} + +short rotl16(short x, short y) { +// CHECK-LABEL: rotl16 +// CHECK: [[F:%.*]] = call i16 @llvm.fshl.i16(i16 [[X:%.*]], i16 [[X]], i16 [[Y:%.*]]) +// CHECK-NEXT: ret i16 [[F]] + + return __builtin_rotateleft16(x, y); +} + +int rotl32(int x, unsigned int y) { +// CHECK-LABEL: rotl32 +// CHECK: [[F:%.*]] = call i32 @llvm.fshl.i32(i32 [[X:%.*]], i32 [[X]], i32 [[Y:%.*]]) +// CHECK-NEXT: ret i32 [[F]] + + return __builtin_rotateleft32(x, y); +} + +unsigned long long rotl64(unsigned long long x, long long y) { +// CHECK-LABEL: rotl64 +// CHECK: [[F:%.*]] = call i64 @llvm.fshl.i64(i64 [[X:%.*]], i64 [[X]], i64 [[Y:%.*]]) +// CHECK-NEXT: ret i64 [[F]] + + return __builtin_rotateleft64(x, y); +} + +char rotr8(char x, char y) { +// CHECK-LABEL: rotr8 +// CHECK: [[F:%.*]] = call i8 @llvm.fshr.i8(i8 [[X:%.*]], i8 [[X]], i8 [[Y:%.*]]) +// CHECK-NEXT: ret i8 [[F]] + + return __builtin_rotateright8(x, y); +} + +unsigned short rotr16(unsigned short x, unsigned short y) { +// CHECK-LABEL: rotr16 +// CHECK: [[F:%.*]] = call i16 @llvm.fshr.i16(i16 [[X:%.*]], i16 [[X]], i16 [[Y:%.*]]) +// CHECK-NEXT: ret i16 [[F]] + + return __builtin_rotateright16(x, y); +} + +unsigned int rotr32(unsigned int x, int y) { +// CHECK-LABEL: rotr32 +// CHECK: [[F:%.*]] = call i32 @llvm.fshr.i32(i32 [[X:%.*]], i32 [[X]], i32 [[Y:%.*]]) +// CHECK-NEXT: ret i32 [[F]] + + return __builtin_rotateright32(x, y); +} + +long long rotr64(long long x, unsigned long long y) { +// CHECK-LABEL: rotr64 +// CHECK: [[F:%.*]] = call i64 @llvm.fshr.i64(i64 [[X:%.*]], i64 [[X]], i64 [[Y:%.*]]) +// CHECK-NEXT: ret i64 [[F]] + + return __builtin_rotateright64(x, y); +} + -- cgit v1.2.3