diff options
| author | Scott Michel <scottm@aero.org> | 2009-01-26 03:31:40 +0000 |
|---|---|---|
| committer | Scott Michel <scottm@aero.org> | 2009-01-26 03:31:40 +0000 |
| commit | 9e3e4a9219dcb07ecbf0897cbd9d9b39d1cd9630 (patch) | |
| tree | 91caf5e19d12ca8baf4005987f658240ea13adbc /llvm/test/CodeGen/CellSPU/select_bits.ll | |
| parent | 624801e87e06c9f4436a2c3ad41289d5d3a82505 (diff) | |
| download | bcm5719-llvm-9e3e4a9219dcb07ecbf0897cbd9d9b39d1cd9630.tar.gz bcm5719-llvm-9e3e4a9219dcb07ecbf0897cbd9d9b39d1cd9630.zip | |
CellSPU:
- Rename fcmp.ll test to fcmp32.ll, start adding new double tests to fcmp64.ll
- Fix select_bits.ll test
- Capitulate to the DAGCombiner and move i64 constant loads to instruction
selection (SPUISelDAGtoDAG.cpp).
<rant>DAGCombiner will insert all kinds of 64-bit optimizations after
operation legalization occurs and now we have to do most of the work that
instruction selection should be doing twice (once to determine if v2i64
build_vector can be handled by SelectCode(), which then runs all of the
predicates a second time to select the necessary instructions.) But,
CellSPU is a good citizen.</rant>
llvm-svn: 62990
Diffstat (limited to 'llvm/test/CodeGen/CellSPU/select_bits.ll')
| -rw-r--r-- | llvm/test/CodeGen/CellSPU/select_bits.ll | 114 |
1 files changed, 57 insertions, 57 deletions
diff --git a/llvm/test/CodeGen/CellSPU/select_bits.ll b/llvm/test/CodeGen/CellSPU/select_bits.ll index 3a7334d808c..e83e47606c2 100644 --- a/llvm/test/CodeGen/CellSPU/select_bits.ll +++ b/llvm/test/CodeGen/CellSPU/select_bits.ll @@ -1,5 +1,5 @@ ; RUN: llvm-as -o - %s | llc -march=cellspu > %t1.s -; RUN: grep selb %t1.s | count 280 +; RUN: grep selb %t1.s | count 56 target datalayout = "E-p:32:32:128-f64:64:128-f32:32:128-i64:32:128-i32:32:128-i16:16:128-i8:8:128-i1:8:128-a0:0:128-v128:128:128-s0:128:128" target triple = "spu" @@ -9,7 +9,7 @@ target triple = "spu" ;-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ ; (or (and rC, rB), (and (not rC), rA)) -define <2 x i64> @selb_v2i64_01(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { +define <2 x i64> @selectbits_v2i64_01(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { %C = and <2 x i64> %rC, %rB %A = xor <2 x i64> %rC, < i64 -1, i64 -1 > %B = and <2 x i64> %A, %rA @@ -18,7 +18,7 @@ define <2 x i64> @selb_v2i64_01(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { } ; (or (and rB, rC), (and (not rC), rA)) -define <2 x i64> @selb_v2i64_02(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { +define <2 x i64> @selectbits_v2i64_02(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { %C = and <2 x i64> %rB, %rC %A = xor <2 x i64> %rC, < i64 -1, i64 -1 > %B = and <2 x i64> %A, %rA @@ -27,7 +27,7 @@ define <2 x i64> @selb_v2i64_02(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { } ; (or (and (not rC), rA), (and rB, rC)) -define <2 x i64> @selb_v2i64_03(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { +define <2 x i64> @selectbits_v2i64_03(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { %A = xor <2 x i64> %rC, < i64 -1, i64 -1 > %B = and <2 x i64> %A, %rA %C = and <2 x i64> %rB, %rC @@ -36,7 +36,7 @@ define <2 x i64> @selb_v2i64_03(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { } ; (or (and (not rC), rA), (and rC, rB)) -define <2 x i64> @selb_v2i64_04(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { +define <2 x i64> @selectbits_v2i64_04(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { %A = xor <2 x i64> %rC, < i64 -1, i64 -1 > %B = and <2 x i64> %A, %rA %C = and <2 x i64> %rC, %rB @@ -45,7 +45,7 @@ define <2 x i64> @selb_v2i64_04(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { } ; (or (and rC, rB), (and rA, (not rC))) -define <2 x i64> @selb_v2i64_05(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { +define <2 x i64> @selectbits_v2i64_05(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { %C = and <2 x i64> %rC, %rB %A = xor <2 x i64> %rC, < i64 -1, i64 -1 > %B = and <2 x i64> %rA, %A @@ -54,7 +54,7 @@ define <2 x i64> @selb_v2i64_05(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { } ; (or (and rB, rC), (and rA, (not rC))) -define <2 x i64> @selb_v2i64_06(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { +define <2 x i64> @selectbits_v2i64_06(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { %C = and <2 x i64> %rB, %rC %A = xor <2 x i64> %rC, < i64 -1, i64 -1 > %B = and <2 x i64> %rA, %A @@ -63,7 +63,7 @@ define <2 x i64> @selb_v2i64_06(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { } ; (or (and rA, (not rC)), (and rB, rC)) -define <2 x i64> @selb_v2i64_07(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { +define <2 x i64> @selectbits_v2i64_07(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { %A = xor <2 x i64> %rC, < i64 -1, i64 -1 > %B = and <2 x i64> %rA, %A %C = and <2 x i64> %rB, %rC @@ -72,7 +72,7 @@ define <2 x i64> @selb_v2i64_07(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { } ; (or (and rA, (not rC)), (and rC, rB)) -define <2 x i64> @selb_v2i64_08(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { +define <2 x i64> @selectbits_v2i64_08(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { %A = xor <2 x i64> %rC, < i64 -1, i64 -1 > %B = and <2 x i64> %rA, %A %C = and <2 x i64> %rC, %rB @@ -85,7 +85,7 @@ define <2 x i64> @selb_v2i64_08(<2 x i64> %rA, <2 x i64> %rB, <2 x i64> %rC) { ;-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ ; (or (and rC, rB), (and (not rC), rA)) -define <4 x i32> @selb_v4i32_01(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { +define <4 x i32> @selectbits_v4i32_01(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { %C = and <4 x i32> %rC, %rB %A = xor <4 x i32> %rC, < i32 -1, i32 -1, i32 -1, i32 -1 > %B = and <4 x i32> %A, %rA @@ -94,7 +94,7 @@ define <4 x i32> @selb_v4i32_01(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { } ; (or (and rB, rC), (and (not rC), rA)) -define <4 x i32> @selb_v4i32_02(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { +define <4 x i32> @selectbits_v4i32_02(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { %C = and <4 x i32> %rB, %rC %A = xor <4 x i32> %rC, < i32 -1, i32 -1, i32 -1, i32 -1 > %B = and <4 x i32> %A, %rA @@ -103,7 +103,7 @@ define <4 x i32> @selb_v4i32_02(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { } ; (or (and (not rC), rA), (and rB, rC)) -define <4 x i32> @selb_v4i32_03(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { +define <4 x i32> @selectbits_v4i32_03(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { %A = xor <4 x i32> %rC, < i32 -1, i32 -1, i32 -1, i32 -1 > %B = and <4 x i32> %A, %rA %C = and <4 x i32> %rB, %rC @@ -112,7 +112,7 @@ define <4 x i32> @selb_v4i32_03(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { } ; (or (and (not rC), rA), (and rC, rB)) -define <4 x i32> @selb_v4i32_04(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { +define <4 x i32> @selectbits_v4i32_04(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { %A = xor <4 x i32> %rC, < i32 -1, i32 -1, i32 -1, i32 -1> %B = and <4 x i32> %A, %rA %C = and <4 x i32> %rC, %rB @@ -121,7 +121,7 @@ define <4 x i32> @selb_v4i32_04(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { } ; (or (and rC, rB), (and rA, (not rC))) -define <4 x i32> @selb_v4i32_05(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { +define <4 x i32> @selectbits_v4i32_05(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { %C = and <4 x i32> %rC, %rB %A = xor <4 x i32> %rC, < i32 -1, i32 -1, i32 -1, i32 -1> %B = and <4 x i32> %rA, %A @@ -130,7 +130,7 @@ define <4 x i32> @selb_v4i32_05(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { } ; (or (and rB, rC), (and rA, (not rC))) -define <4 x i32> @selb_v4i32_06(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { +define <4 x i32> @selectbits_v4i32_06(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { %C = and <4 x i32> %rB, %rC %A = xor <4 x i32> %rC, < i32 -1, i32 -1, i32 -1, i32 -1> %B = and <4 x i32> %rA, %A @@ -139,7 +139,7 @@ define <4 x i32> @selb_v4i32_06(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { } ; (or (and rA, (not rC)), (and rB, rC)) -define <4 x i32> @selb_v4i32_07(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { +define <4 x i32> @selectbits_v4i32_07(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { %A = xor <4 x i32> %rC, < i32 -1, i32 -1, i32 -1, i32 -1> %B = and <4 x i32> %rA, %A %C = and <4 x i32> %rB, %rC @@ -148,7 +148,7 @@ define <4 x i32> @selb_v4i32_07(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { } ; (or (and rA, (not rC)), (and rC, rB)) -define <4 x i32> @selb_v4i32_08(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { +define <4 x i32> @selectbits_v4i32_08(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { %A = xor <4 x i32> %rC, < i32 -1, i32 -1, i32 -1, i32 -1> %B = and <4 x i32> %rA, %A %C = and <4 x i32> %rC, %rB @@ -161,7 +161,7 @@ define <4 x i32> @selb_v4i32_08(<4 x i32> %rA, <4 x i32> %rB, <4 x i32> %rC) { ;-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ ; (or (and rC, rB), (and (not rC), rA)) -define <8 x i16> @selb_v8i16_01(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { +define <8 x i16> @selectbits_v8i16_01(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { %C = and <8 x i16> %rC, %rB %A = xor <8 x i16> %rC, < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1 > @@ -171,7 +171,7 @@ define <8 x i16> @selb_v8i16_01(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { } ; (or (and rB, rC), (and (not rC), rA)) -define <8 x i16> @selb_v8i16_02(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { +define <8 x i16> @selectbits_v8i16_02(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { %C = and <8 x i16> %rB, %rC %A = xor <8 x i16> %rC, < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1 > @@ -181,7 +181,7 @@ define <8 x i16> @selb_v8i16_02(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { } ; (or (and (not rC), rA), (and rB, rC)) -define <8 x i16> @selb_v8i16_03(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { +define <8 x i16> @selectbits_v8i16_03(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { %A = xor <8 x i16> %rC, < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1 > %B = and <8 x i16> %A, %rA @@ -191,7 +191,7 @@ define <8 x i16> @selb_v8i16_03(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { } ; (or (and (not rC), rA), (and rC, rB)) -define <8 x i16> @selb_v8i16_04(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { +define <8 x i16> @selectbits_v8i16_04(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { %A = xor <8 x i16> %rC, < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1 > %B = and <8 x i16> %A, %rA @@ -201,7 +201,7 @@ define <8 x i16> @selb_v8i16_04(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { } ; (or (and rC, rB), (and rA, (not rC))) -define <8 x i16> @selb_v8i16_05(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { +define <8 x i16> @selectbits_v8i16_05(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { %C = and <8 x i16> %rC, %rB %A = xor <8 x i16> %rC, < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1 > @@ -211,7 +211,7 @@ define <8 x i16> @selb_v8i16_05(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { } ; (or (and rB, rC), (and rA, (not rC))) -define <8 x i16> @selb_v8i16_06(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { +define <8 x i16> @selectbits_v8i16_06(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { %C = and <8 x i16> %rB, %rC %A = xor <8 x i16> %rC, < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1 > @@ -221,7 +221,7 @@ define <8 x i16> @selb_v8i16_06(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { } ; (or (and rA, (not rC)), (and rB, rC)) -define <8 x i16> @selb_v8i16_07(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { +define <8 x i16> @selectbits_v8i16_07(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { %A = xor <8 x i16> %rC, < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1 > %B = and <8 x i16> %rA, %A @@ -231,7 +231,7 @@ define <8 x i16> @selb_v8i16_07(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { } ; (or (and rA, (not rC)), (and rC, rB)) -define <8 x i16> @selb_v8i16_08(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { +define <8 x i16> @selectbits_v8i16_08(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { %A = xor <8 x i16> %rC, < i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1 > %B = and <8 x i16> %rA, %A @@ -245,7 +245,7 @@ define <8 x i16> @selb_v8i16_08(<8 x i16> %rA, <8 x i16> %rB, <8 x i16> %rC) { ;-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ ; (or (and rC, rB), (and (not rC), rA)) -define <16 x i8> @selb_v16i8_01(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { +define <16 x i8> @selectbits_v16i8_01(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { %C = and <16 x i8> %rC, %rB %A = xor <16 x i8> %rC, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, @@ -257,7 +257,7 @@ define <16 x i8> @selb_v16i8_01(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { } ; (or (and rB, rC), (and (not rC), rA)) -define <16 x i8> @selb_v16i8_02(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { +define <16 x i8> @selectbits_v16i8_02(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { %C = and <16 x i8> %rB, %rC %A = xor <16 x i8> %rC, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, @@ -269,7 +269,7 @@ define <16 x i8> @selb_v16i8_02(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { } ; (or (and (not rC), rA), (and rB, rC)) -define <16 x i8> @selb_v16i8_03(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { +define <16 x i8> @selectbits_v16i8_03(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { %A = xor <16 x i8> %rC, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, @@ -281,7 +281,7 @@ define <16 x i8> @selb_v16i8_03(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { } ; (or (and (not rC), rA), (and rC, rB)) -define <16 x i8> @selb_v16i8_04(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { +define <16 x i8> @selectbits_v16i8_04(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { %A = xor <16 x i8> %rC, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, @@ -293,7 +293,7 @@ define <16 x i8> @selb_v16i8_04(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { } ; (or (and rC, rB), (and rA, (not rC))) -define <16 x i8> @selb_v16i8_05(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { +define <16 x i8> @selectbits_v16i8_05(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { %C = and <16 x i8> %rC, %rB %A = xor <16 x i8> %rC, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, @@ -305,7 +305,7 @@ define <16 x i8> @selb_v16i8_05(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { } ; (or (and rB, rC), (and rA, (not rC))) -define <16 x i8> @selb_v16i8_06(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { +define <16 x i8> @selectbits_v16i8_06(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { %C = and <16 x i8> %rB, %rC %A = xor <16 x i8> %rC, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, @@ -317,7 +317,7 @@ define <16 x i8> @selb_v16i8_06(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { } ; (or (and rA, (not rC)), (and rB, rC)) -define <16 x i8> @selb_v16i8_07(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { +define <16 x i8> @selectbits_v16i8_07(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { %A = xor <16 x i8> %rC, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, @@ -329,7 +329,7 @@ define <16 x i8> @selb_v16i8_07(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { } ; (or (and rA, (not rC)), (and rC, rB)) -define <16 x i8> @selb_v16i8_08(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { +define <16 x i8> @selectbits_v16i8_08(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { %A = xor <16 x i8> %rC, < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, @@ -345,7 +345,7 @@ define <16 x i8> @selb_v16i8_08(<16 x i8> %rA, <16 x i8> %rB, <16 x i8> %rC) { ;-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ ; (or (and rC, rB), (and (not rC), rA)) -define i32 @selb_i32_01(i32 %rA, i32 %rB, i32 %rC) { +define i32 @selectbits_i32_01(i32 %rA, i32 %rB, i32 %rC) { %C = and i32 %rC, %rB %A = xor i32 %rC, -1 %B = and i32 %A, %rA @@ -354,7 +354,7 @@ define i32 @selb_i32_01(i32 %rA, i32 %rB, i32 %rC) { } ; (or (and rB, rC), (and (not rC), rA)) -define i32 @selb_i32_02(i32 %rA, i32 %rB, i32 %rC) { +define i32 @selectbits_i32_02(i32 %rA, i32 %rB, i32 %rC) { %C = and i32 %rB, %rC %A = xor i32 %rC, -1 %B = and i32 %A, %rA @@ -363,7 +363,7 @@ define i32 @selb_i32_02(i32 %rA, i32 %rB, i32 %rC) { } ; (or (and (not rC), rA), (and rB, rC)) -define i32 @selb_i32_03(i32 %rA, i32 %rB, i32 %rC) { +define i32 @selectbits_i32_03(i32 %rA, i32 %rB, i32 %rC) { %A = xor i32 %rC, -1 %B = and i32 %A, %rA %C = and i32 %rB, %rC @@ -372,7 +372,7 @@ define i32 @selb_i32_03(i32 %rA, i32 %rB, i32 %rC) { } ; (or (and (not rC), rA), (and rC, rB)) -define i32 @selb_i32_04(i32 %rA, i32 %rB, i32 %rC) { +define i32 @selectbits_i32_04(i32 %rA, i32 %rB, i32 %rC) { %A = xor i32 %rC, -1 %B = and i32 %A, %rA %C = and i32 %rC, %rB @@ -381,7 +381,7 @@ define i32 @selb_i32_04(i32 %rA, i32 %rB, i32 %rC) { } ; (or (and rC, rB), (and rA, (not rC))) -define i32 @selb_i32_05(i32 %rA, i32 %rB, i32 %rC) { +define i32 @selectbits_i32_05(i32 %rA, i32 %rB, i32 %rC) { %C = and i32 %rC, %rB %A = xor i32 %rC, -1 %B = and i32 %rA, %A @@ -390,7 +390,7 @@ define i32 @selb_i32_05(i32 %rA, i32 %rB, i32 %rC) { } ; (or (and rB, rC), (and rA, (not rC))) -define i32 @selb_i32_06(i32 %rA, i32 %rB, i32 %rC) { +define i32 @selectbits_i32_06(i32 %rA, i32 %rB, i32 %rC) { %C = and i32 %rB, %rC %A = xor i32 %rC, -1 %B = and i32 %rA, %A @@ -399,7 +399,7 @@ define i32 @selb_i32_06(i32 %rA, i32 %rB, i32 %rC) { } ; (or (and rA, (not rC)), (and rB, rC)) -define i32 @selb_i32_07(i32 %rA, i32 %rB, i32 %rC) { +define i32 @selectbits_i32_07(i32 %rA, i32 %rB, i32 %rC) { %A = xor i32 %rC, -1 %B = and i32 %rA, %A %C = and i32 %rB, %rC @@ -408,7 +408,7 @@ define i32 @selb_i32_07(i32 %rA, i32 %rB, i32 %rC) { } ; (or (and rA, (not rC)), (and rC, rB)) -define i32 @selb_i32_08(i32 %rA, i32 %rB, i32 %rC) { +define i32 @selectbits_i32_08(i32 %rA, i32 %rB, i32 %rC) { %A = xor i32 %rC, -1 %B = and i32 %rA, %A %C = and i32 %rC, %rB @@ -421,7 +421,7 @@ define i32 @selb_i32_08(i32 %rA, i32 %rB, i32 %rC) { ;-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ ; (or (and rC, rB), (and (not rC), rA)) -define i16 @selb_i16_01(i16 %rA, i16 %rB, i16 %rC) { +define i16 @selectbits_i16_01(i16 %rA, i16 %rB, i16 %rC) { %C = and i16 %rC, %rB %A = xor i16 %rC, -1 %B = and i16 %A, %rA @@ -430,7 +430,7 @@ define i16 @selb_i16_01(i16 %rA, i16 %rB, i16 %rC) { } ; (or (and rB, rC), (and (not rC), rA)) -define i16 @selb_i16_02(i16 %rA, i16 %rB, i16 %rC) { +define i16 @selectbits_i16_02(i16 %rA, i16 %rB, i16 %rC) { %C = and i16 %rB, %rC %A = xor i16 %rC, -1 %B = and i16 %A, %rA @@ -439,7 +439,7 @@ define i16 @selb_i16_02(i16 %rA, i16 %rB, i16 %rC) { } ; (or (and (not rC), rA), (and rB, rC)) -define i16 @selb_i16_03(i16 %rA, i16 %rB, i16 %rC) { +define i16 @selectbits_i16_03(i16 %rA, i16 %rB, i16 %rC) { %A = xor i16 %rC, -1 %B = and i16 %A, %rA %C = and i16 %rB, %rC @@ -448,7 +448,7 @@ define i16 @selb_i16_03(i16 %rA, i16 %rB, i16 %rC) { } ; (or (and (not rC), rA), (and rC, rB)) -define i16 @selb_i16_04(i16 %rA, i16 %rB, i16 %rC) { +define i16 @selectbits_i16_04(i16 %rA, i16 %rB, i16 %rC) { %A = xor i16 %rC, -1 %B = and i16 %A, %rA %C = and i16 %rC, %rB @@ -457,7 +457,7 @@ define i16 @selb_i16_04(i16 %rA, i16 %rB, i16 %rC) { } ; (or (and rC, rB), (and rA, (not rC))) -define i16 @selb_i16_05(i16 %rA, i16 %rB, i16 %rC) { +define i16 @selectbits_i16_05(i16 %rA, i16 %rB, i16 %rC) { %C = and i16 %rC, %rB %A = xor i16 %rC, -1 %B = and i16 %rA, %A @@ -466,7 +466,7 @@ define i16 @selb_i16_05(i16 %rA, i16 %rB, i16 %rC) { } ; (or (and rB, rC), (and rA, (not rC))) -define i16 @selb_i16_06(i16 %rA, i16 %rB, i16 %rC) { +define i16 @selectbits_i16_06(i16 %rA, i16 %rB, i16 %rC) { %C = and i16 %rB, %rC %A = xor i16 %rC, -1 %B = and i16 %rA, %A @@ -475,7 +475,7 @@ define i16 @selb_i16_06(i16 %rA, i16 %rB, i16 %rC) { } ; (or (and rA, (not rC)), (and rB, rC)) -define i16 @selb_i16_07(i16 %rA, i16 %rB, i16 %rC) { +define i16 @selectbits_i16_07(i16 %rA, i16 %rB, i16 %rC) { %A = xor i16 %rC, -1 %B = and i16 %rA, %A %C = and i16 %rB, %rC @@ -484,7 +484,7 @@ define i16 @selb_i16_07(i16 %rA, i16 %rB, i16 %rC) { } ; (or (and rA, (not rC)), (and rC, rB)) -define i16 @selb_i16_08(i16 %rA, i16 %rB, i16 %rC) { +define i16 @selectbits_i16_08(i16 %rA, i16 %rB, i16 %rC) { %A = xor i16 %rC, -1 %B = and i16 %rA, %A %C = and i16 %rC, %rB @@ -497,7 +497,7 @@ define i16 @selb_i16_08(i16 %rA, i16 %rB, i16 %rC) { ;-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ ; (or (and rC, rB), (and (not rC), rA)) -define i8 @selb_i8_01(i8 %rA, i8 %rB, i8 %rC) { +define i8 @selectbits_i8_01(i8 %rA, i8 %rB, i8 %rC) { %C = and i8 %rC, %rB %A = xor i8 %rC, -1 %B = and i8 %A, %rA @@ -506,7 +506,7 @@ define i8 @selb_i8_01(i8 %rA, i8 %rB, i8 %rC) { } ; (or (and rB, rC), (and (not rC), rA)) -define i8 @selb_i8_02(i8 %rA, i8 %rB, i8 %rC) { +define i8 @selectbits_i8_02(i8 %rA, i8 %rB, i8 %rC) { %C = and i8 %rB, %rC %A = xor i8 %rC, -1 %B = and i8 %A, %rA @@ -515,7 +515,7 @@ define i8 @selb_i8_02(i8 %rA, i8 %rB, i8 %rC) { } ; (or (and (not rC), rA), (and rB, rC)) -define i8 @selb_i8_03(i8 %rA, i8 %rB, i8 %rC) { +define i8 @selectbits_i8_03(i8 %rA, i8 %rB, i8 %rC) { %A = xor i8 %rC, -1 %B = and i8 %A, %rA %C = and i8 %rB, %rC @@ -524,7 +524,7 @@ define i8 @selb_i8_03(i8 %rA, i8 %rB, i8 %rC) { } ; (or (and (not rC), rA), (and rC, rB)) -define i8 @selb_i8_04(i8 %rA, i8 %rB, i8 %rC) { +define i8 @selectbits_i8_04(i8 %rA, i8 %rB, i8 %rC) { %A = xor i8 %rC, -1 %B = and i8 %A, %rA %C = and i8 %rC, %rB @@ -533,7 +533,7 @@ define i8 @selb_i8_04(i8 %rA, i8 %rB, i8 %rC) { } ; (or (and rC, rB), (and rA, (not rC))) -define i8 @selb_i8_05(i8 %rA, i8 %rB, i8 %rC) { +define i8 @selectbits_i8_05(i8 %rA, i8 %rB, i8 %rC) { %C = and i8 %rC, %rB %A = xor i8 %rC, -1 %B = and i8 %rA, %A @@ -542,7 +542,7 @@ define i8 @selb_i8_05(i8 %rA, i8 %rB, i8 %rC) { } ; (or (and rB, rC), (and rA, (not rC))) -define i8 @selb_i8_06(i8 %rA, i8 %rB, i8 %rC) { +define i8 @selectbits_i8_06(i8 %rA, i8 %rB, i8 %rC) { %C = and i8 %rB, %rC %A = xor i8 %rC, -1 %B = and i8 %rA, %A @@ -551,7 +551,7 @@ define i8 @selb_i8_06(i8 %rA, i8 %rB, i8 %rC) { } ; (or (and rA, (not rC)), (and rB, rC)) -define i8 @selb_i8_07(i8 %rA, i8 %rB, i8 %rC) { +define i8 @selectbits_i8_07(i8 %rA, i8 %rB, i8 %rC) { %A = xor i8 %rC, -1 %B = and i8 %rA, %A %C = and i8 %rB, %rC @@ -560,7 +560,7 @@ define i8 @selb_i8_07(i8 %rA, i8 %rB, i8 %rC) { } ; (or (and rA, (not rC)), (and rC, rB)) -define i8 @selb_i8_08(i8 %rA, i8 %rB, i8 %rC) { +define i8 @selectbits_i8_08(i8 %rA, i8 %rB, i8 %rC) { %A = xor i8 %rC, -1 %B = and i8 %rA, %A %C = and i8 %rC, %rB |

