summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen/X86
diff options
context:
space:
mode:
authorSanjay Patel <spatel@rotateright.com>2017-06-18 14:45:23 +0000
committerSanjay Patel <spatel@rotateright.com>2017-06-18 14:45:23 +0000
commit44e3d4c812b0a3eda2b9b0fbd67a30ebcececab5 (patch)
treede9025f990a799f531e4496a97399b5cca787d73 /llvm/test/CodeGen/X86
parent020bf47c6a7c58f4899d4226f23dcf14bfbd7ef4 (diff)
downloadbcm5719-llvm-44e3d4c812b0a3eda2b9b0fbd67a30ebcececab5.tar.gz
bcm5719-llvm-44e3d4c812b0a3eda2b9b0fbd67a30ebcececab5.zip
x86] adjust test constants to maintain coverage; NFC
Increment (add 1) could be transformed to sub -1, and we'd lose coverage for these patterns. llvm-svn: 305646
Diffstat (limited to 'llvm/test/CodeGen/X86')
-rw-r--r--llvm/test/CodeGen/X86/avx2-vbroadcast.ll2
-rw-r--r--llvm/test/CodeGen/X86/avx512-arith.ll8
-rw-r--r--llvm/test/CodeGen/X86/avx512-logic.ll24
-rw-r--r--llvm/test/CodeGen/X86/avx512vl-logic.ll32
4 files changed, 33 insertions, 33 deletions
diff --git a/llvm/test/CodeGen/X86/avx2-vbroadcast.ll b/llvm/test/CodeGen/X86/avx2-vbroadcast.ll
index ba47e2ba15c..971d03af377 100644
--- a/llvm/test/CodeGen/X86/avx2-vbroadcast.ll
+++ b/llvm/test/CodeGen/X86/avx2-vbroadcast.ll
@@ -653,7 +653,7 @@ define <8 x i32> @V111(<8 x i32> %in) nounwind uwtable readnone ssp {
; X64-AVX512VL-NEXT: vpaddd {{.*}}(%rip){1to8}, %ymm0, %ymm0
; X64-AVX512VL-NEXT: retq
entry:
- %g = add <8 x i32> %in, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %g = add <8 x i32> %in, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
ret <8 x i32> %g
}
diff --git a/llvm/test/CodeGen/X86/avx512-arith.ll b/llvm/test/CodeGen/X86/avx512-arith.ll
index 26be2084056..d96b5882556 100644
--- a/llvm/test/CodeGen/X86/avx512-arith.ll
+++ b/llvm/test/CodeGen/X86/avx512-arith.ll
@@ -348,7 +348,7 @@ define <8 x i64> @vpaddq_broadcast_test(<8 x i64> %i) nounwind {
; CHECK: ## BB#0:
; CHECK-NEXT: vpaddq {{.*}}(%rip){1to8}, %zmm0, %zmm0
; CHECK-NEXT: retq
- %x = add <8 x i64> %i, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %x = add <8 x i64> %i, <i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2, i64 2>
ret <8 x i64> %x
}
@@ -394,7 +394,7 @@ define <16 x i32> @vpaddd_broadcast_test(<16 x i32> %i) nounwind {
; CHECK: ## BB#0:
; CHECK-NEXT: vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0
; CHECK-NEXT: retq
- %x = add <16 x i32> %i, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %x = add <16 x i32> %i, <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
ret <16 x i32> %x
}
@@ -446,7 +446,7 @@ define <16 x i32> @vpaddd_mask_broadcast_test(<16 x i32> %i, <16 x i32> %mask1)
; CHECK-NEXT: vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 {%k1}
; CHECK-NEXT: retq
%mask = icmp ne <16 x i32> %mask1, zeroinitializer
- %x = add <16 x i32> %i, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %x = add <16 x i32> %i, <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
%r = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> %i
ret <16 x i32> %r
}
@@ -473,7 +473,7 @@ define <16 x i32> @vpaddd_maskz_broadcast_test(<16 x i32> %i, <16 x i32> %mask1)
; CHECK-NEXT: vpaddd {{.*}}(%rip){1to16}, %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
%mask = icmp ne <16 x i32> %mask1, zeroinitializer
- %x = add <16 x i32> %i, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %x = add <16 x i32> %i, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
%r = select <16 x i1> %mask, <16 x i32> %x, <16 x i32> zeroinitializer
ret <16 x i32> %r
}
diff --git a/llvm/test/CodeGen/X86/avx512-logic.ll b/llvm/test/CodeGen/X86/avx512-logic.ll
index 7153c1ffaaa..6e08753dbbb 100644
--- a/llvm/test/CodeGen/X86/avx512-logic.ll
+++ b/llvm/test/CodeGen/X86/avx512-logic.ll
@@ -11,8 +11,8 @@ define <16 x i32> @vpandd(<16 x i32> %a, <16 x i32> %b) nounwind uwtable readnon
; ALL-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <16 x i32> %a, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1,
- i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %a2 = add <16 x i32> %a, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2,
+ i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
%x = and <16 x i32> %a2, %b
ret <16 x i32> %x
}
@@ -25,8 +25,8 @@ define <16 x i32> @vpandnd(<16 x i32> %a, <16 x i32> %b) nounwind uwtable readno
; ALL-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <16 x i32> %a, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1,
- i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %a2 = add <16 x i32> %a, <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3,
+ i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
%b2 = xor <16 x i32> %b, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1,
i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
%x = and <16 x i32> %a2, %b2
@@ -41,8 +41,8 @@ define <16 x i32> @vpord(<16 x i32> %a, <16 x i32> %b) nounwind uwtable readnone
; ALL-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <16 x i32> %a, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1,
- i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %a2 = add <16 x i32> %a, <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4,
+ i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
%x = or <16 x i32> %a2, %b
ret <16 x i32> %x
}
@@ -55,8 +55,8 @@ define <16 x i32> @vpxord(<16 x i32> %a, <16 x i32> %b) nounwind uwtable readnon
; ALL-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <16 x i32> %a, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1,
- i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %a2 = add <16 x i32> %a, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5,
+ i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
%x = xor <16 x i32> %a2, %b
ret <16 x i32> %x
}
@@ -69,7 +69,7 @@ define <8 x i64> @vpandq(<8 x i64> %a, <8 x i64> %b) nounwind uwtable readnone s
; ALL-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <8 x i64> %a, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %a2 = add <8 x i64> %a, <i64 6, i64 6, i64 6, i64 6, i64 6, i64 6, i64 6, i64 6>
%x = and <8 x i64> %a2, %b
ret <8 x i64> %x
}
@@ -82,7 +82,7 @@ define <8 x i64> @vpandnq(<8 x i64> %a, <8 x i64> %b) nounwind uwtable readnone
; ALL-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <8 x i64> %a, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %a2 = add <8 x i64> %a, <i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7>
%b2 = xor <8 x i64> %b, <i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1, i64 -1>
%x = and <8 x i64> %a2, %b2
ret <8 x i64> %x
@@ -96,7 +96,7 @@ define <8 x i64> @vporq(<8 x i64> %a, <8 x i64> %b) nounwind uwtable readnone ss
; ALL-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <8 x i64> %a, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %a2 = add <8 x i64> %a, <i64 8, i64 8, i64 8, i64 8, i64 8, i64 8, i64 8, i64 8>
%x = or <8 x i64> %a2, %b
ret <8 x i64> %x
}
@@ -109,7 +109,7 @@ define <8 x i64> @vpxorq(<8 x i64> %a, <8 x i64> %b) nounwind uwtable readnone s
; ALL-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <8 x i64> %a, <i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1, i64 1>
+ %a2 = add <8 x i64> %a, <i64 9, i64 9, i64 9, i64 9, i64 9, i64 9, i64 9, i64 9>
%x = xor <8 x i64> %a2, %b
ret <8 x i64> %x
}
diff --git a/llvm/test/CodeGen/X86/avx512vl-logic.ll b/llvm/test/CodeGen/X86/avx512vl-logic.ll
index 83fa8d4c34c..6e697cf59a4 100644
--- a/llvm/test/CodeGen/X86/avx512vl-logic.ll
+++ b/llvm/test/CodeGen/X86/avx512vl-logic.ll
@@ -12,7 +12,7 @@ define <8 x i32> @vpandd256(<8 x i32> %a, <8 x i32> %b) nounwind uwtable readnon
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <8 x i32> %a, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %a2 = add <8 x i32> %a, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
%x = and <8 x i32> %a2, %b
ret <8 x i32> %x
}
@@ -25,7 +25,7 @@ define <8 x i32> @vpandnd256(<8 x i32> %a, <8 x i32> %b) nounwind uwtable readno
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <8 x i32> %a, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %a2 = add <8 x i32> %a, <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3>
%b2 = xor <8 x i32> %a, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
%x = and <8 x i32> %a2, %b2
ret <8 x i32> %x
@@ -39,7 +39,7 @@ define <8 x i32> @vpord256(<8 x i32> %a, <8 x i32> %b) nounwind uwtable readnone
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <8 x i32> %a, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %a2 = add <8 x i32> %a, <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
%x = or <8 x i32> %a2, %b
ret <8 x i32> %x
}
@@ -52,7 +52,7 @@ define <8 x i32> @vpxord256(<8 x i32> %a, <8 x i32> %b) nounwind uwtable readnon
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <8 x i32> %a, <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
+ %a2 = add <8 x i32> %a, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
%x = xor <8 x i32> %a2, %b
ret <8 x i32> %x
}
@@ -65,7 +65,7 @@ define <4 x i64> @vpandq256(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnon
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <4 x i64> %a, <i64 1, i64 1, i64 1, i64 1>
+ %a2 = add <4 x i64> %a, <i64 6, i64 6, i64 6, i64 6>
%x = and <4 x i64> %a2, %b
ret <4 x i64> %x
}
@@ -78,7 +78,7 @@ define <4 x i64> @vpandnq256(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readno
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <4 x i64> %a, <i64 1, i64 1, i64 1, i64 1>
+ %a2 = add <4 x i64> %a, <i64 7, i64 7, i64 7, i64 7>
%b2 = xor <4 x i64> %b, <i64 -1, i64 -1, i64 -1, i64 -1>
%x = and <4 x i64> %a2, %b2
ret <4 x i64> %x
@@ -92,7 +92,7 @@ define <4 x i64> @vporq256(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <4 x i64> %a, <i64 1, i64 1, i64 1, i64 1>
+ %a2 = add <4 x i64> %a, <i64 21, i64 21, i64 21, i64 21>
%x = or <4 x i64> %a2, %b
ret <4 x i64> %x
}
@@ -105,7 +105,7 @@ define <4 x i64> @vpxorq256(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnon
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <4 x i64> %a, <i64 1, i64 1, i64 1, i64 1>
+ %a2 = add <4 x i64> %a, <i64 22, i64 22, i64 22, i64 22>
%x = xor <4 x i64> %a2, %b
ret <4 x i64> %x
}
@@ -120,7 +120,7 @@ define <4 x i32> @vpandd128(<4 x i32> %a, <4 x i32> %b) nounwind uwtable readnon
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <4 x i32> %a, <i32 1, i32 1, i32 1, i32 1>
+ %a2 = add <4 x i32> %a, <i32 8, i32 8, i32 8, i32 8>
%x = and <4 x i32> %a2, %b
ret <4 x i32> %x
}
@@ -133,7 +133,7 @@ define <4 x i32> @vpandnd128(<4 x i32> %a, <4 x i32> %b) nounwind uwtable readno
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <4 x i32> %a, <i32 1, i32 1, i32 1, i32 1>
+ %a2 = add <4 x i32> %a, <i32 9, i32 9, i32 9, i32 9>
%b2 = xor <4 x i32> %b, <i32 -1, i32 -1, i32 -1, i32 -1>
%x = and <4 x i32> %a2, %b2
ret <4 x i32> %x
@@ -147,7 +147,7 @@ define <4 x i32> @vpord128(<4 x i32> %a, <4 x i32> %b) nounwind uwtable readnone
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <4 x i32> %a, <i32 1, i32 1, i32 1, i32 1>
+ %a2 = add <4 x i32> %a, <i32 10, i32 10, i32 10, i32 10>
%x = or <4 x i32> %a2, %b
ret <4 x i32> %x
}
@@ -160,7 +160,7 @@ define <4 x i32> @vpxord128(<4 x i32> %a, <4 x i32> %b) nounwind uwtable readnon
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <4 x i32> %a, <i32 1, i32 1, i32 1, i32 1>
+ %a2 = add <4 x i32> %a, <i32 11, i32 11, i32 11, i32 11>
%x = xor <4 x i32> %a2, %b
ret <4 x i32> %x
}
@@ -173,7 +173,7 @@ define <2 x i64> @vpandq128(<2 x i64> %a, <2 x i64> %b) nounwind uwtable readnon
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <2 x i64> %a, <i64 1, i64 1>
+ %a2 = add <2 x i64> %a, <i64 12, i64 12>
%x = and <2 x i64> %a2, %b
ret <2 x i64> %x
}
@@ -186,7 +186,7 @@ define <2 x i64> @vpandnq128(<2 x i64> %a, <2 x i64> %b) nounwind uwtable readno
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <2 x i64> %a, <i64 1, i64 1>
+ %a2 = add <2 x i64> %a, <i64 13, i64 13>
%b2 = xor <2 x i64> %b, <i64 -1, i64 -1>
%x = and <2 x i64> %a2, %b2
ret <2 x i64> %x
@@ -200,7 +200,7 @@ define <2 x i64> @vporq128(<2 x i64> %a, <2 x i64> %b) nounwind uwtable readnone
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <2 x i64> %a, <i64 1, i64 1>
+ %a2 = add <2 x i64> %a, <i64 14, i64 14>
%x = or <2 x i64> %a2, %b
ret <2 x i64> %x
}
@@ -213,7 +213,7 @@ define <2 x i64> @vpxorq128(<2 x i64> %a, <2 x i64> %b) nounwind uwtable readnon
; CHECK-NEXT: retq
entry:
; Force the execution domain with an add.
- %a2 = add <2 x i64> %a, <i64 1, i64 1>
+ %a2 = add <2 x i64> %a, <i64 15, i64 15>
%x = xor <2 x i64> %a2, %b
ret <2 x i64> %x
}
OpenPOWER on IntegriCloud