diff options
| author | Scott Michel <scottm@aero.org> | 2007-12-17 22:32:34 +0000 |
|---|---|---|
| committer | Scott Michel <scottm@aero.org> | 2007-12-17 22:32:34 +0000 |
| commit | c5cccb9e60c4f74a0f66585054728550559fa38d (patch) | |
| tree | 3a68ba9543865d73c9bfd37a69766e9f38a6d84a /llvm/test/CodeGen/CellSPU/nand.ll | |
| parent | bd5362511d31142fc62d5f91d13a51f499f509f9 (diff) | |
| download | bcm5719-llvm-c5cccb9e60c4f74a0f66585054728550559fa38d.tar.gz bcm5719-llvm-c5cccb9e60c4f74a0f66585054728550559fa38d.zip | |
- Restore some i8 functionality in CellSPU
- New test case: nand.ll
llvm-svn: 45130
Diffstat (limited to 'llvm/test/CodeGen/CellSPU/nand.ll')
| -rw-r--r-- | llvm/test/CodeGen/CellSPU/nand.ll | 119 |
1 files changed, 119 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/CellSPU/nand.ll b/llvm/test/CodeGen/CellSPU/nand.ll new file mode 100644 index 00000000000..091f4b2edcc --- /dev/null +++ b/llvm/test/CodeGen/CellSPU/nand.ll @@ -0,0 +1,119 @@ +; RUN: llvm-as -o - %s | llc -march=cellspu > %t1.s +; RUN: grep nand %t1.s | count 90 +; RUN: grep and %t1.s | count 94 +; RUN: grep xsbh %t1.s | count 2 +; RUN: grep xshw %t1.s | count 4 + +define <4 x i32> @nand_v4i32_1(<4 x i32> %arg1, <4 x i32> %arg2) { + %A = and <4 x i32> %arg2, %arg1 ; <<4 x i32>> [#uses=1] + %B = xor <4 x i32> %A, < i32 -1, i32 -1, i32 -1, i32 -1 > + ret <4 x i32> %B +} + +define <4 x i32> @nand_v4i32_2(<4 x i32> %arg1, <4 x i32> %arg2) { + %A = and <4 x i32> %arg1, %arg2 ; <<4 x i32>> [#uses=1] + %B = xor <4 x i32> %A, < i32 -1, i32 -1, i32 -1, i32 -1 > + ret <4 x i32> %B +} + +define <8 x i16> @nand_v8i16_1(<8 x i16> %arg1, <8 x i16> %arg2) { + %A = and <8 x i16> %arg2, %arg1 ; <<8 x i16>> [#uses=1] + %B = xor <8 x i16> %A, < i16 -1, i16 -1, i16 -1, i16 -1, + i16 -1, i16 -1, i16 -1, i16 -1 > + ret <8 x i16> %B +} + +define <8 x i16> @nand_v8i16_2(<8 x i16> %arg1, <8 x i16> %arg2) { + %A = and <8 x i16> %arg1, %arg2 ; <<8 x i16>> [#uses=1] + %B = xor <8 x i16> %A, < i16 -1, i16 -1, i16 -1, i16 -1, + i16 -1, i16 -1, i16 -1, i16 -1 > + ret <8 x i16> %B +} + +define <16 x i8> @nand_v16i8_1(<16 x i8> %arg1, <16 x i8> %arg2) { + %A = and <16 x i8> %arg2, %arg1 ; <<16 x i8>> [#uses=1] + %B = xor <16 x i8> %A, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, + i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, + i8 -1, i8 -1, i8 -1, i8 -1 > + ret <16 x i8> %B +} + +define <16 x i8> @nand_v16i8_2(<16 x i8> %arg1, <16 x i8> %arg2) { + %A = and <16 x i8> %arg1, %arg2 ; <<16 x i8>> [#uses=1] + %B = xor <16 x i8> %A, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, + i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, + i8 -1, i8 -1, i8 -1, i8 -1 > + ret <16 x i8> %B +} + +define i32 @nand_i32_1(i32 %arg1, i32 %arg2) { + %A = and i32 %arg2, %arg1 ; <i32> [#uses=1] + %B = xor i32 %A, -1 ; <i32> [#uses=1] + ret i32 %B +} + +define i32 @nand_i32_2(i32 %arg1, i32 %arg2) { + %A = and i32 %arg1, %arg2 ; <i32> [#uses=1] + %B = xor i32 %A, -1 ; <i32> [#uses=1] + ret i32 %B +} + +define i16 @nand_i16_1(i16 signext %arg1, i16 signext %arg2) signext { + %A = and i16 %arg2, %arg1 ; <i16> [#uses=1] + %B = xor i16 %A, -1 ; <i16> [#uses=1] + ret i16 %B +} + +define i16 @nand_i16_2(i16 signext %arg1, i16 signext %arg2) signext { + %A = and i16 %arg1, %arg2 ; <i16> [#uses=1] + %B = xor i16 %A, -1 ; <i16> [#uses=1] + ret i16 %B +} + +define i16 @nand_i16u_1(i16 zeroext %arg1, i16 zeroext %arg2) zeroext { + %A = and i16 %arg2, %arg1 ; <i16> [#uses=1] + %B = xor i16 %A, -1 ; <i16> [#uses=1] + ret i16 %B +} + +define i16 @nand_i16u_2(i16 zeroext %arg1, i16 zeroext %arg2) zeroext { + %A = and i16 %arg1, %arg2 ; <i16> [#uses=1] + %B = xor i16 %A, -1 ; <i16> [#uses=1] + ret i16 %B +} + +define i8 @nand_i8u_1(i8 zeroext %arg1, i8 zeroext %arg2) zeroext { + %A = and i8 %arg2, %arg1 ; <i8> [#uses=1] + %B = xor i8 %A, -1 ; <i8> [#uses=1] + ret i8 %B +} + +define i8 @nand_i8u_2(i8 zeroext %arg1, i8 zeroext %arg2) zeroext { + %A = and i8 %arg1, %arg2 ; <i8> [#uses=1] + %B = xor i8 %A, -1 ; <i8> [#uses=1] + ret i8 %B +} + +define i8 @nand_i8_1(i8 signext %arg1, i8 signext %arg2) signext { + %A = and i8 %arg2, %arg1 ; <i8> [#uses=1] + %B = xor i8 %A, -1 ; <i8> [#uses=1] + ret i8 %B +} + +define i8 @nand_i8_2(i8 signext %arg1, i8 signext %arg2) signext { + %A = and i8 %arg1, %arg2 ; <i8> [#uses=1] + %B = xor i8 %A, -1 ; <i8> [#uses=1] + ret i8 %B +} + +define i8 @nand_i8_3(i8 %arg1, i8 %arg2) { + %A = and i8 %arg2, %arg1 ; <i8> [#uses=1] + %B = xor i8 %A, -1 ; <i8> [#uses=1] + ret i8 %B +} + +define i8 @nand_i8_4(i8 %arg1, i8 %arg2) { + %A = and i8 %arg1, %arg2 ; <i8> [#uses=1] + %B = xor i8 %A, -1 ; <i8> [#uses=1] + ret i8 %B +} |

