diff options
| author | Chris Lattner <sabre@nondot.org> | 2005-01-19 08:04:08 +0000 |
|---|---|---|
| committer | Chris Lattner <sabre@nondot.org> | 2005-01-19 08:04:08 +0000 |
| commit | c4adfbbd0b66b861224e22fd888ecb76430f6b56 (patch) | |
| tree | 64490e52bccdbfd3acbd2f53d7d309668053cc46 | |
| parent | 0edf9535b9a425b19e1176b346cd112974b34875 (diff) | |
| download | bcm5719-llvm-c4adfbbd0b66b861224e22fd888ecb76430f6b56.tar.gz bcm5719-llvm-c4adfbbd0b66b861224e22fd888ecb76430f6b56.zip | |
New testcase for rotate instructions. Each function should codegen to a
rotate.
llvm-svn: 19691
| -rw-r--r-- | llvm/test/Regression/CodeGen/X86/rotate.ll | 91 |
1 files changed, 91 insertions, 0 deletions
diff --git a/llvm/test/Regression/CodeGen/X86/rotate.ll b/llvm/test/Regression/CodeGen/X86/rotate.ll new file mode 100644 index 00000000000..94da9ad6351 --- /dev/null +++ b/llvm/test/Regression/CodeGen/X86/rotate.ll @@ -0,0 +1,91 @@ +; RUN: llvm-as < %s | llc -march=x86 -x86-asm-syntax=intel -disable-pattern-isel=0 | grep ro[rl] | wc -l | grep 12 + +uint %rotl32(uint %A, ubyte %Amt) { + %B = shl uint %A, ubyte %Amt + %Amt2 = sub ubyte 32, %Amt + %C = shr uint %A, ubyte %Amt2 + %D = or uint %B, %C + ret uint %D +} + +uint %rotr32(uint %A, ubyte %Amt) { + %B = shr uint %A, ubyte %Amt + %Amt2 = sub ubyte 32, %Amt + %C = shl uint %A, ubyte %Amt2 + %D = or uint %B, %C + ret uint %D +} + +uint %rotli32(uint %A) { + %B = shl uint %A, ubyte 5 + %C = shr uint %A, ubyte 27 + %D = or uint %B, %C + ret uint %D +} + +uint %rotri32(uint %A) { + %B = shr uint %A, ubyte 5 + %C = shl uint %A, ubyte 27 + %D = or uint %B, %C + ret uint %D +} + +ushort %rotl16(ushort %A, ubyte %Amt) { + %B = shl ushort %A, ubyte %Amt + %Amt2 = sub ubyte 16, %Amt + %C = shr ushort %A, ubyte %Amt2 + %D = or ushort %B, %C + ret ushort %D +} + +ushort %rotr16(ushort %A, ubyte %Amt) { + %B = shr ushort %A, ubyte %Amt + %Amt2 = sub ubyte 16, %Amt + %C = shl ushort %A, ubyte %Amt2 + %D = or ushort %B, %C + ret ushort %D +} + +ushort %rotli16(ushort %A) { + %B = shl ushort %A, ubyte 5 + %C = shr ushort %A, ubyte 11 + %D = or ushort %B, %C + ret ushort %D +} + +ushort %rotri16(ushort %A) { + %B = shr ushort %A, ubyte 5 + %C = shl ushort %A, ubyte 11 + %D = or ushort %B, %C + ret ushort %D +} + +ubyte %rotl8(ubyte %A, ubyte %Amt) { + %B = shl ubyte %A, ubyte %Amt + %Amt2 = sub ubyte 8, %Amt + %C = shr ubyte %A, ubyte %Amt2 + %D = or ubyte %B, %C + ret ubyte %D +} + +ubyte %rotr8(ubyte %A, ubyte %Amt) { + %B = shr ubyte %A, ubyte %Amt + %Amt2 = sub ubyte 8, %Amt + %C = shl ubyte %A, ubyte %Amt2 + %D = or ubyte %B, %C + ret ubyte %D +} + +ubyte %rotli8(ubyte %A) { + %B = shl ubyte %A, ubyte 5 + %C = shr ubyte %A, ubyte 3 + %D = or ubyte %B, %C + ret ubyte %D +} + +ubyte %rotri8(ubyte %A) { + %B = shr ubyte %A, ubyte 5 + %C = shl ubyte %A, ubyte 3 + %D = or ubyte %B, %C + ret ubyte %D +} |

