summaryrefslogtreecommitdiffstats
path: root/llvm
diff options
context:
space:
mode:
Diffstat (limited to 'llvm')
-rw-r--r--llvm/lib/Target/X86/X86InstrInfo.cpp6
-rw-r--r--llvm/test/CodeGen/X86/machine-combiner-int-vec.ll68
2 files changed, 74 insertions, 0 deletions
diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp
index 8b883162999..446d4bce155 100644
--- a/llvm/lib/Target/X86/X86InstrInfo.cpp
+++ b/llvm/lib/Target/X86/X86InstrInfo.cpp
@@ -6408,6 +6408,12 @@ static bool isAssociativeAndCommutative(const MachineInstr &Inst) {
case X86::IMUL16rr:
case X86::IMUL32rr:
case X86::IMUL64rr:
+ case X86::PANDrr:
+ case X86::PORrr:
+ case X86::PXORrr:
+ case X86::VPANDrr:
+ case X86::VPORrr:
+ case X86::VPXORrr:
// Normal min/max instructions are not commutative because of NaN and signed
// zero semantics, but these are. Thus, there's no need to check for global
// relaxed math; the instructions themselves have the properties we need.
diff --git a/llvm/test/CodeGen/X86/machine-combiner-int-vec.ll b/llvm/test/CodeGen/X86/machine-combiner-int-vec.ll
new file mode 100644
index 00000000000..8316f66c1dc
--- /dev/null
+++ b/llvm/test/CodeGen/X86/machine-combiner-int-vec.ll
@@ -0,0 +1,68 @@
+; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=sse < %s | FileCheck %s --check-prefix=SSE
+; RUN: llc -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=avx < %s | FileCheck %s --check-prefix=AVX
+
+; Verify that 128-bit vector logical ops are reassociated.
+
+define <4 x i32> @reassociate_and_v4i32(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, <4 x i32> %x3) {
+; SSE-LABEL: reassociate_and_v4i32:
+; SSE: # BB#0:
+; SSE-NEXT: paddd %xmm1, %xmm0
+; SSE-NEXT: pand %xmm3, %xmm2
+; SSE-NEXT: pand %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: reassociate_and_v4i32:
+; AVX: # BB#0:
+; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpand %xmm3, %xmm2, %xmm1
+; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+
+ %t0 = add <4 x i32> %x0, %x1
+ %t1 = and <4 x i32> %x2, %t0
+ %t2 = and <4 x i32> %x3, %t1
+ ret <4 x i32> %t2
+}
+
+define <4 x i32> @reassociate_or_v4i32(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, <4 x i32> %x3) {
+; SSE-LABEL: reassociate_or_v4i32:
+; SSE: # BB#0:
+; SSE-NEXT: paddd %xmm1, %xmm0
+; SSE-NEXT: por %xmm3, %xmm2
+; SSE-NEXT: por %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: reassociate_or_v4i32:
+; AVX: # BB#0:
+; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpor %xmm3, %xmm2, %xmm1
+; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+
+ %t0 = add <4 x i32> %x0, %x1
+ %t1 = or <4 x i32> %x2, %t0
+ %t2 = or <4 x i32> %x3, %t1
+ ret <4 x i32> %t2
+}
+
+define <4 x i32> @reassociate_xor_v4i32(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, <4 x i32> %x3) {
+; SSE-LABEL: reassociate_xor_v4i32:
+; SSE: # BB#0:
+; SSE-NEXT: paddd %xmm1, %xmm0
+; SSE-NEXT: pxor %xmm3, %xmm2
+; SSE-NEXT: pxor %xmm2, %xmm0
+; SSE-NEXT: retq
+;
+; AVX-LABEL: reassociate_xor_v4i32:
+; AVX: # BB#0:
+; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
+; AVX-NEXT: vpxor %xmm3, %xmm2, %xmm1
+; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
+; AVX-NEXT: retq
+
+ %t0 = add <4 x i32> %x0, %x1
+ %t1 = xor <4 x i32> %x2, %t0
+ %t2 = xor <4 x i32> %x3, %t1
+ ret <4 x i32> %t2
+}
+
OpenPOWER on IntegriCloud