summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSanjay Patel <spatel@rotateright.com>2018-02-08 23:21:44 +0000
committerSanjay Patel <spatel@rotateright.com>2018-02-08 23:21:44 +0000
commitb7e13938a94cfa96968ad3beca9bd3d5c9e0d669 (patch)
treebb2b5b489a1d9cedb830519d181410a97e0e9a76
parent03dd6f57390217378942eb74cf23bfd47083bb40 (diff)
downloadbcm5719-llvm-b7e13938a94cfa96968ad3beca9bd3d5c9e0d669.tar.gz
bcm5719-llvm-b7e13938a94cfa96968ad3beca9bd3d5c9e0d669.zip
[x86] consolidate and add tests for undef binop folds; NFC
As was already shown in the div/rem tests and noted in PR36305, the behavior is inconsistent, but it's not limited to div/rem only. llvm-svn: 324678
-rw-r--r--llvm/test/CodeGen/X86/combine-sdiv.ll35
-rw-r--r--llvm/test/CodeGen/X86/combine-srem.ll35
-rw-r--r--llvm/test/CodeGen/X86/combine-udiv.ll35
-rw-r--r--llvm/test/CodeGen/X86/combine-urem.ll35
-rw-r--r--llvm/test/CodeGen/X86/undef-ops.ll434
5 files changed, 434 insertions, 140 deletions
diff --git a/llvm/test/CodeGen/X86/combine-sdiv.ll b/llvm/test/CodeGen/X86/combine-sdiv.ll
index 7bba1756b63..665634527c8 100644
--- a/llvm/test/CodeGen/X86/combine-sdiv.ll
+++ b/llvm/test/CodeGen/X86/combine-sdiv.ll
@@ -6,41 +6,6 @@
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512bw,+avx512vl | FileCheck %s --check-prefixes=CHECK,AVX,AVX2ORLATER,AVX512,AVX512BW
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx,+xop | FileCheck %s --check-prefixes=CHECK,AVX,XOP
-; fold (sdiv undef, x) -> 0
-define i32 @combine_sdiv_undef0(i32 %x) {
-; CHECK-LABEL: combine_sdiv_undef0:
-; CHECK: # %bb.0:
-; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: retq
- %1 = sdiv i32 undef, %x
- ret i32 %1
-}
-
-define <4 x i32> @combine_vec_sdiv_undef0(<4 x i32> %x) {
-; CHECK-LABEL: combine_vec_sdiv_undef0:
-; CHECK: # %bb.0:
-; CHECK-NEXT: retq
- %1 = sdiv <4 x i32> undef, %x
- ret <4 x i32> %1
-}
-
-; fold (sdiv x, undef) -> undef
-define i32 @combine_sdiv_undef1(i32 %x) {
-; CHECK-LABEL: combine_sdiv_undef1:
-; CHECK: # %bb.0:
-; CHECK-NEXT: retq
- %1 = sdiv i32 %x, undef
- ret i32 %1
-}
-
-define <4 x i32> @combine_vec_sdiv_undef1(<4 x i32> %x) {
-; CHECK-LABEL: combine_vec_sdiv_undef1:
-; CHECK: # %bb.0:
-; CHECK-NEXT: retq
- %1 = sdiv <4 x i32> %x, undef
- ret <4 x i32> %1
-}
-
; fold (sdiv x, 1) -> x
define i32 @combine_sdiv_by_one(i32 %x) {
; CHECK-LABEL: combine_sdiv_by_one:
diff --git a/llvm/test/CodeGen/X86/combine-srem.ll b/llvm/test/CodeGen/X86/combine-srem.ll
index 740bece1374..0d9b35dbc42 100644
--- a/llvm/test/CodeGen/X86/combine-srem.ll
+++ b/llvm/test/CodeGen/X86/combine-srem.ll
@@ -3,41 +3,6 @@
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX2
-; fold (srem undef, x) -> 0
-define i32 @combine_srem_undef0(i32 %x) {
-; CHECK-LABEL: combine_srem_undef0:
-; CHECK: # %bb.0:
-; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: retq
- %1 = srem i32 undef, %x
- ret i32 %1
-}
-
-define <4 x i32> @combine_vec_srem_undef0(<4 x i32> %x) {
-; CHECK-LABEL: combine_vec_srem_undef0:
-; CHECK: # %bb.0:
-; CHECK-NEXT: retq
- %1 = srem <4 x i32> undef, %x
- ret <4 x i32> %1
-}
-
-; fold (srem x, undef) -> undef
-define i32 @combine_srem_undef1(i32 %x) {
-; CHECK-LABEL: combine_srem_undef1:
-; CHECK: # %bb.0:
-; CHECK-NEXT: retq
- %1 = srem i32 %x, undef
- ret i32 %1
-}
-
-define <4 x i32> @combine_vec_srem_undef1(<4 x i32> %x) {
-; CHECK-LABEL: combine_vec_srem_undef1:
-; CHECK: # %bb.0:
-; CHECK-NEXT: retq
- %1 = srem <4 x i32> %x, undef
- ret <4 x i32> %1
-}
-
; fold (srem x, 1) -> 0
define i32 @combine_srem_by_one(i32 %x) {
; CHECK-LABEL: combine_srem_by_one:
diff --git a/llvm/test/CodeGen/X86/combine-udiv.ll b/llvm/test/CodeGen/X86/combine-udiv.ll
index 7313091e64d..6b60065debd 100644
--- a/llvm/test/CodeGen/X86/combine-udiv.ll
+++ b/llvm/test/CodeGen/X86/combine-udiv.ll
@@ -3,41 +3,6 @@
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX2
-; fold (udiv undef, x) -> 0
-define i32 @combine_udiv_undef0(i32 %x) {
-; CHECK-LABEL: combine_udiv_undef0:
-; CHECK: # %bb.0:
-; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: retq
- %1 = udiv i32 undef, %x
- ret i32 %1
-}
-
-define <4 x i32> @combine_vec_udiv_undef0(<4 x i32> %x) {
-; CHECK-LABEL: combine_vec_udiv_undef0:
-; CHECK: # %bb.0:
-; CHECK-NEXT: retq
- %1 = udiv <4 x i32> undef, %x
- ret <4 x i32> %1
-}
-
-; fold (udiv x, undef) -> undef
-define i32 @combine_udiv_undef1(i32 %x) {
-; CHECK-LABEL: combine_udiv_undef1:
-; CHECK: # %bb.0:
-; CHECK-NEXT: retq
- %1 = udiv i32 %x, undef
- ret i32 %1
-}
-
-define <4 x i32> @combine_vec_udiv_undef1(<4 x i32> %x) {
-; CHECK-LABEL: combine_vec_udiv_undef1:
-; CHECK: # %bb.0:
-; CHECK-NEXT: retq
- %1 = udiv <4 x i32> %x, undef
- ret <4 x i32> %1
-}
-
; fold (udiv x, 1) -> x
define i32 @combine_udiv_by_one(i32 %x) {
; CHECK-LABEL: combine_udiv_by_one:
diff --git a/llvm/test/CodeGen/X86/combine-urem.ll b/llvm/test/CodeGen/X86/combine-urem.ll
index b00bb3adc7b..c698e8b7236 100644
--- a/llvm/test/CodeGen/X86/combine-urem.ll
+++ b/llvm/test/CodeGen/X86/combine-urem.ll
@@ -3,41 +3,6 @@
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=CHECK --check-prefix=AVX --check-prefix=AVX2
-; fold (urem undef, x) -> 0
-define i32 @combine_urem_undef0(i32 %x) {
-; CHECK-LABEL: combine_urem_undef0:
-; CHECK: # %bb.0:
-; CHECK-NEXT: xorl %eax, %eax
-; CHECK-NEXT: retq
- %1 = urem i32 undef, %x
- ret i32 %1
-}
-
-define <4 x i32> @combine_vec_urem_undef0(<4 x i32> %x) {
-; CHECK-LABEL: combine_vec_urem_undef0:
-; CHECK: # %bb.0:
-; CHECK-NEXT: retq
- %1 = urem <4 x i32> undef, %x
- ret <4 x i32> %1
-}
-
-; fold (urem x, undef) -> undef
-define i32 @combine_urem_undef1(i32 %x) {
-; CHECK-LABEL: combine_urem_undef1:
-; CHECK: # %bb.0:
-; CHECK-NEXT: retq
- %1 = urem i32 %x, undef
- ret i32 %1
-}
-
-define <4 x i32> @combine_vec_urem_undef1(<4 x i32> %x) {
-; CHECK-LABEL: combine_vec_urem_undef1:
-; CHECK: # %bb.0:
-; CHECK-NEXT: retq
- %1 = urem <4 x i32> %x, undef
- ret <4 x i32> %1
-}
-
; fold (urem x, 1) -> 0
define i32 @combine_urem_by_one(i32 %x) {
; CHECK-LABEL: combine_urem_by_one:
diff --git a/llvm/test/CodeGen/X86/undef-ops.ll b/llvm/test/CodeGen/X86/undef-ops.ll
new file mode 100644
index 00000000000..18a408a7fc5
--- /dev/null
+++ b/llvm/test/CodeGen/X86/undef-ops.ll
@@ -0,0 +1,434 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s
+
+define i32 @add_undef_rhs(i32 %x) {
+; CHECK-LABEL: add_undef_rhs:
+; CHECK: # %bb.0:
+; CHECK-NEXT: retq
+ %r = add i32 %x, undef
+ ret i32 %r
+}
+
+define <4 x i32> @add_undef_rhs_vec(<4 x i32> %x) {
+; CHECK-LABEL: add_undef_rhs_vec:
+; CHECK: # %bb.0:
+; CHECK-NEXT: retq
+ %r = add <4 x i32> %x, undef
+ ret <4 x i32> %r
+}
+
+define i32 @add_undef_lhs(i32 %x) {
+; CHECK-LABEL: add_undef_lhs:
+; CHECK: # %bb.0:
+; CHECK-NEXT: retq
+ %r = add i32 undef, %x
+ ret i32 %r
+}
+
+define <4 x i32> @add_undef_lhs_vec(<4 x i32> %x) {
+; CHECK-LABEL: add_undef_lhs_vec:
+; CHECK: # %bb.0:
+; CHECK-NEXT: retq
+ %r = add <4 x i32> undef, %x
+ ret <4 x i32> %r
+}
+
+define i32 @sub_undef_rhs(i32 %x) {
+; CHECK-LABEL: sub_undef_rhs:
+; CHECK: # %bb.0:
+; CHECK-NEXT: retq
+ %r = sub i32 %x, undef
+ ret i32 %r
+}
+
+define <4 x i32> @sub_undef_rhs_vec(<4 x i32> %x) {
+; CHECK-LABEL: sub_undef_rhs_vec:
+; CHECK: # %bb.0:
+; CHECK-NEXT: retq
+ %r = sub <4 x i32> %x, undef
+ ret <4 x i32> %r
+}
+
+define i32 @sub_undef_lhs(i32 %x) {
+; CHECK-LABEL: sub_undef_lhs:
+; CHECK: # %bb.0:
+; CHECK-NEXT: retq
+ %r = sub i32 undef, %x
+ ret i32 %r
+}
+
+define <4 x i32> @sub_undef_lhs_vec(<4 x i32> %x) {
+; CHECK-LABEL: sub_undef_lhs_vec:
+; CHECK: # %bb.0:
+; CHECK-NEXT: retq
+ %r = sub <4 x i32> undef, %x
+ ret <4 x i32> %r
+}
+
+define i32 @mul_undef_rhs(i32 %x) {
+; CHECK-LABEL: mul_undef_rhs:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: retq
+ %r = mul i32 %x, undef
+ ret i32 %r
+}
+
+define <4 x i32> @mul_undef_rhs_vec(<4 x i32> %x) {
+; CHECK-LABEL: mul_undef_rhs_vec:
+; CHECK: # %bb.0:
+; CHECK-NEXT: retq
+ %r = mul <4 x i32> %x, undef
+ ret <4 x i32> %r
+}
+
+define i32 @mul_undef_lhs(i32 %x) {
+; CHECK-LABEL: mul_undef_lhs:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: retq
+ %r = mul i32 undef, %x
+ ret i32 %r
+}
+
+define <4 x i32> @mul_undef_lhs_vec(<4 x i32> %x) {
+; CHECK-LABEL: mul_undef_lhs_vec:
+; CHECK: # %bb.0:
+; CHECK-NEXT: retq
+ %r = mul <4 x i32> undef, %x
+ ret <4 x i32> %r
+}
+
+define i32 @sdiv_undef_rhs(i32 %x) {
+; CHECK-LABEL: sdiv_undef_rhs:
+; CHECK: # %bb.0:
+; CHECK-NEXT: retq
+ %r = sdiv i32 %x, undef
+ ret i32 %r
+}
+
+define <4 x i32> @sdiv_undef_rhs_vec(<4 x i32> %x) {
+; CHECK-LABEL: sdiv_undef_rhs_vec:
+; CHECK: # %bb.0:
+; CHECK-NEXT: retq
+ %r = sdiv <4 x i32> %x, undef
+ ret <4 x i32> %r
+}
+
+define i32 @sdiv_undef_lhs(i32 %x) {
+; CHECK-LABEL: sdiv_undef_lhs:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: retq
+ %r = sdiv i32 undef, %x
+ ret i32 %r
+}
+
+define <4 x i32> @sdiv_undef_lhs_vec(<4 x i32> %x) {
+; CHECK-LABEL: sdiv_undef_lhs_vec:
+; CHECK: # %bb.0:
+; CHECK-NEXT: retq
+ %r = sdiv <4 x i32> undef, %x
+ ret <4 x i32> %r
+}
+
+define i32 @udiv_undef_rhs(i32 %x) {
+; CHECK-LABEL: udiv_undef_rhs:
+; CHECK: # %bb.0:
+; CHECK-NEXT: retq
+ %r = udiv i32 %x, undef
+ ret i32 %r
+}
+
+define <4 x i32> @udiv_undef_rhs_vec(<4 x i32> %x) {
+; CHECK-LABEL: udiv_undef_rhs_vec:
+; CHECK: # %bb.0:
+; CHECK-NEXT: retq
+ %r = udiv <4 x i32> %x, undef
+ ret <4 x i32> %r
+}
+
+define i32 @udiv_undef_lhs(i32 %x) {
+; CHECK-LABEL: udiv_undef_lhs:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: retq
+ %r = udiv i32 undef, %x
+ ret i32 %r
+}
+
+define <4 x i32> @udiv_undef_lhs_vec(<4 x i32> %x) {
+; CHECK-LABEL: udiv_undef_lhs_vec:
+; CHECK: # %bb.0:
+; CHECK-NEXT: retq
+ %r = udiv <4 x i32> undef, %x
+ ret <4 x i32> %r
+}
+
+define i32 @srem_undef_rhs(i32 %x) {
+; CHECK-LABEL: srem_undef_rhs:
+; CHECK: # %bb.0:
+; CHECK-NEXT: retq
+ %r = srem i32 %x, undef
+ ret i32 %r
+}
+
+define <4 x i32> @srem_undef_rhs_vec(<4 x i32> %x) {
+; CHECK-LABEL: srem_undef_rhs_vec:
+; CHECK: # %bb.0:
+; CHECK-NEXT: retq
+ %r = srem <4 x i32> %x, undef
+ ret <4 x i32> %r
+}
+
+define i32 @srem_undef_lhs(i32 %x) {
+; CHECK-LABEL: srem_undef_lhs:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: retq
+ %r = srem i32 undef, %x
+ ret i32 %r
+}
+
+define <4 x i32> @srem_undef_lhs_vec(<4 x i32> %x) {
+; CHECK-LABEL: srem_undef_lhs_vec:
+; CHECK: # %bb.0:
+; CHECK-NEXT: retq
+ %r = srem <4 x i32> undef, %x
+ ret <4 x i32> %r
+}
+
+define i32 @urem_undef_rhs(i32 %x) {
+; CHECK-LABEL: urem_undef_rhs:
+; CHECK: # %bb.0:
+; CHECK-NEXT: retq
+ %r = urem i32 %x, undef
+ ret i32 %r
+}
+
+define <4 x i32> @urem_undef_rhs_vec(<4 x i32> %x) {
+; CHECK-LABEL: urem_undef_rhs_vec:
+; CHECK: # %bb.0:
+; CHECK-NEXT: retq
+ %r = urem <4 x i32> %x, undef
+ ret <4 x i32> %r
+}
+
+define i32 @urem_undef_lhs(i32 %x) {
+; CHECK-LABEL: urem_undef_lhs:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: retq
+ %r = urem i32 undef, %x
+ ret i32 %r
+}
+
+define <4 x i32> @urem_undef_lhs_vec(<4 x i32> %x) {
+; CHECK-LABEL: urem_undef_lhs_vec:
+; CHECK: # %bb.0:
+; CHECK-NEXT: retq
+ %r = urem <4 x i32> undef, %x
+ ret <4 x i32> %r
+}
+
+define i32 @ashr_undef_rhs(i32 %x) {
+; CHECK-LABEL: ashr_undef_rhs:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl %edi, %eax
+; CHECK-NEXT: retq
+ %r = ashr i32 %x, undef
+ ret i32 %r
+}
+
+define <4 x i32> @ashr_undef_rhs_vec(<4 x i32> %x) {
+; CHECK-LABEL: ashr_undef_rhs_vec:
+; CHECK: # %bb.0:
+; CHECK-NEXT: retq
+ %r = ashr <4 x i32> %x, undef
+ ret <4 x i32> %r
+}
+
+define i32 @ashr_undef_lhs(i32 %x) {
+; CHECK-LABEL: ashr_undef_lhs:
+; CHECK: # %bb.0:
+; CHECK-NEXT: retq
+ %r = ashr i32 undef, %x
+ ret i32 %r
+}
+
+define <4 x i32> @ashr_undef_lhs_vec(<4 x i32> %x) {
+; CHECK-LABEL: ashr_undef_lhs_vec:
+; CHECK: # %bb.0:
+; CHECK-NEXT: retq
+ %r = ashr <4 x i32> undef, %x
+ ret <4 x i32> %r
+}
+
+define i32 @lshr_undef_rhs(i32 %x) {
+; CHECK-LABEL: lshr_undef_rhs:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: retq
+ %r = lshr i32 %x, undef
+ ret i32 %r
+}
+
+define <4 x i32> @lshr_undef_rhs_vec(<4 x i32> %x) {
+; CHECK-LABEL: lshr_undef_rhs_vec:
+; CHECK: # %bb.0:
+; CHECK-NEXT: retq
+ %r = lshr <4 x i32> %x, undef
+ ret <4 x i32> %r
+}
+
+define i32 @lshr_undef_lhs(i32 %x) {
+; CHECK-LABEL: lshr_undef_lhs:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: retq
+ %r = lshr i32 undef, %x
+ ret i32 %r
+}
+
+define <4 x i32> @lshr_undef_lhs_vec(<4 x i32> %x) {
+; CHECK-LABEL: lshr_undef_lhs_vec:
+; CHECK: # %bb.0:
+; CHECK-NEXT: retq
+ %r = lshr <4 x i32> undef, %x
+ ret <4 x i32> %r
+}
+
+define i32 @shl_undef_rhs(i32 %x) {
+; CHECK-LABEL: shl_undef_rhs:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: retq
+ %r = shl i32 %x, undef
+ ret i32 %r
+}
+
+define <4 x i32> @shl_undef_rhs_vec(<4 x i32> %x) {
+; CHECK-LABEL: shl_undef_rhs_vec:
+; CHECK: # %bb.0:
+; CHECK-NEXT: retq
+ %r = shl <4 x i32> %x, undef
+ ret <4 x i32> %r
+}
+
+define i32 @shl_undef_lhs(i32 %x) {
+; CHECK-LABEL: shl_undef_lhs:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: retq
+ %r = shl i32 undef, %x
+ ret i32 %r
+}
+
+define <4 x i32> @shl_undef_lhs_vec(<4 x i32> %x) {
+; CHECK-LABEL: shl_undef_lhs_vec:
+; CHECK: # %bb.0:
+; CHECK-NEXT: retq
+ %r = shl <4 x i32> undef, %x
+ ret <4 x i32> %r
+}
+
+define i32 @and_undef_rhs(i32 %x) {
+; CHECK-LABEL: and_undef_rhs:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: retq
+ %r = and i32 %x, undef
+ ret i32 %r
+}
+
+define <4 x i32> @and_undef_rhs_vec(<4 x i32> %x) {
+; CHECK-LABEL: and_undef_rhs_vec:
+; CHECK: # %bb.0:
+; CHECK-NEXT: retq
+ %r = and <4 x i32> %x, undef
+ ret <4 x i32> %r
+}
+
+define i32 @and_undef_lhs(i32 %x) {
+; CHECK-LABEL: and_undef_lhs:
+; CHECK: # %bb.0:
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: retq
+ %r = and i32 undef, %x
+ ret i32 %r
+}
+
+define <4 x i32> @and_undef_lhs_vec(<4 x i32> %x) {
+; CHECK-LABEL: and_undef_lhs_vec:
+; CHECK: # %bb.0:
+; CHECK-NEXT: retq
+ %r = and <4 x i32> undef, %x
+ ret <4 x i32> %r
+}
+
+define i32 @or_undef_rhs(i32 %x) {
+; CHECK-LABEL: or_undef_rhs:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl $-1, %eax
+; CHECK-NEXT: retq
+ %r = or i32 %x, undef
+ ret i32 %r
+}
+
+define <4 x i32> @or_undef_rhs_vec(<4 x i32> %x) {
+; CHECK-LABEL: or_undef_rhs_vec:
+; CHECK: # %bb.0:
+; CHECK-NEXT: retq
+ %r = or <4 x i32> %x, undef
+ ret <4 x i32> %r
+}
+
+define i32 @or_undef_lhs(i32 %x) {
+; CHECK-LABEL: or_undef_lhs:
+; CHECK: # %bb.0:
+; CHECK-NEXT: movl $-1, %eax
+; CHECK-NEXT: retq
+ %r = or i32 undef, %x
+ ret i32 %r
+}
+
+define <4 x i32> @or_undef_lhs_vec(<4 x i32> %x) {
+; CHECK-LABEL: or_undef_lhs_vec:
+; CHECK: # %bb.0:
+; CHECK-NEXT: retq
+ %r = or <4 x i32> undef, %x
+ ret <4 x i32> %r
+}
+
+define i32 @xor_undef_rhs(i32 %x) {
+; CHECK-LABEL: xor_undef_rhs:
+; CHECK: # %bb.0:
+; CHECK-NEXT: retq
+ %r = xor i32 %x, undef
+ ret i32 %r
+}
+
+define <4 x i32> @xor_undef_rhs_vec(<4 x i32> %x) {
+; CHECK-LABEL: xor_undef_rhs_vec:
+; CHECK: # %bb.0:
+; CHECK-NEXT: retq
+ %r = xor <4 x i32> %x, undef
+ ret <4 x i32> %r
+}
+
+define i32 @xor_undef_lhs(i32 %x) {
+; CHECK-LABEL: xor_undef_lhs:
+; CHECK: # %bb.0:
+; CHECK-NEXT: retq
+ %r = xor i32 undef, %x
+ ret i32 %r
+}
+
+define <4 x i32> @xor_undef_lhs_vec(<4 x i32> %x) {
+; CHECK-LABEL: xor_undef_lhs_vec:
+; CHECK: # %bb.0:
+; CHECK-NEXT: retq
+ %r = xor <4 x i32> undef, %x
+ ret <4 x i32> %r
+}
+
OpenPOWER on IntegriCloud