diff options
| author | James Molloy <james.molloy@arm.com> | 2015-11-13 10:02:36 +0000 |
|---|---|---|
| committer | James Molloy <james.molloy@arm.com> | 2015-11-13 10:02:36 +0000 |
| commit | bb1dbf530a186cd923e867a40f389a2863f05d38 (patch) | |
| tree | b639587d207f31e8d6d79777cc6ed2cb5453fdc5 /llvm/test | |
| parent | b2f75136d1f0c5a9f4a9695acbfde28e600553b3 (diff) | |
| download | bcm5719-llvm-bb1dbf530a186cd923e867a40f389a2863f05d38.tar.gz bcm5719-llvm-bb1dbf530a186cd923e867a40f389a2863f05d38.zip | |
[SDAG] Fix expansion of BITREVERSE
Richard Trieu noted that UBSan detected an overflowing shift, and the obvious fix caused a crash.
What was happening was that the shiftee (1U) was indeed too small for the possible range of shifts it had to handle, but also we were using "VT.getSizeInBits()" to get the maximum type bitwidth, but we wanted "VT.getScalarSizeInBits()" to get the vector lane size instead of the entire vector size.
Use an APInt for the shift and VT.getScalarSizeInBits().
llvm-svn: 253023
Diffstat (limited to 'llvm/test')
| -rw-r--r-- | llvm/test/CodeGen/AArch64/bitreverse.ll | 45 |
1 files changed, 45 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/AArch64/bitreverse.ll b/llvm/test/CodeGen/AArch64/bitreverse.ll index b780412f765..702581789bb 100644 --- a/llvm/test/CodeGen/AArch64/bitreverse.ll +++ b/llvm/test/CodeGen/AArch64/bitreverse.ll @@ -21,3 +21,48 @@ define i8 @g(i8 %a) { %b = call i8 @llvm.bitreverse.i8(i8 %a) ret i8 %b } + +declare <8 x i8> @llvm.bitreverse.v8i8(<8 x i8>) readnone + +define <8 x i8> @g_vec(<8 x i8> %a) { +; Try and match as much of the sequence as precisely as possible. + +; CHECK-LABEL: g_vec: +; CHECK-DAG: movi [[M1:v.*]], #0x80 +; CHECK-DAG: movi [[M2:v.*]], #0x40 +; CHECK-DAG: movi [[M3:v.*]], #0x20 +; CHECK-DAG: movi [[M4:v.*]], #0x10 +; CHECK-DAG: movi [[M5:v.*]], #0x8 +; CHECK-DAG: movi [[M6:v.*]], #0x4{{$}} +; CHECK-DAG: movi [[M7:v.*]], #0x2{{$}} +; CHECK-DAG: movi [[M8:v.*]], #0x1{{$}} +; CHECK-DAG: shl [[S1:v.*]], v0.8b, #7 +; CHECK-DAG: shl [[S2:v.*]], v0.8b, #5 +; CHECK-DAG: shl [[S3:v.*]], v0.8b, #3 +; CHECK-DAG: shl [[S4:v.*]], v0.8b, #1 +; CHECK-DAG: ushr [[S5:v.*]], v0.8b, #1 +; CHECK-DAG: ushr [[S6:v.*]], v0.8b, #3 +; CHECK-DAG: ushr [[S7:v.*]], v0.8b, #5 +; CHECK-DAG: ushr [[S8:v.*]], v0.8b, #7 +; CHECK-DAG: and [[A1:v.*]], [[S1]], [[M1]] +; CHECK-DAG: and [[A2:v.*]], [[S2]], [[M2]] +; CHECK-DAG: and [[A3:v.*]], [[S3]], [[M3]] +; CHECK-DAG: and [[A4:v.*]], [[S4]], [[M4]] +; CHECK-DAG: and [[A5:v.*]], [[S5]], [[M5]] +; CHECK-DAG: and [[A6:v.*]], [[S6]], [[M6]] +; CHECK-DAG: and [[A7:v.*]], [[S7]], [[M7]] +; CHECK-DAG: and [[A8:v.*]], [[S8]], [[M8]] + +; The rest can be ORRed together in any order; it's not worth the test +; maintenance to match them precisely. +; CHECK-DAG: orr +; CHECK-DAG: orr +; CHECK-DAG: orr +; CHECK-DAG: orr +; CHECK-DAG: orr +; CHECK-DAG: orr +; CHECK-DAG: orr +; CHECK: ret + %b = call <8 x i8> @llvm.bitreverse.v8i8(<8 x i8> %a) + ret <8 x i8> %b +} |

