summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBill Schmidt <wschmidt@linux.vnet.ibm.com>2014-10-19 21:29:21 +0000
committerBill Schmidt <wschmidt@linux.vnet.ibm.com>2014-10-19 21:29:21 +0000
commit941a3244ffe0fbed51f977ace7358fbf5c924479 (patch)
tree8d89b02538508a320e03e9c4559145560fad74c2
parent87982a1e9b42b18e803a2dda3a79ff665b7b2243 (diff)
downloadbcm5719-llvm-941a3244ffe0fbed51f977ace7358fbf5c924479.tar.gz
bcm5719-llvm-941a3244ffe0fbed51f977ace7358fbf5c924479.zip
[PowerPC] Clean up -mattr=+vsx tests to always specify -mcpu
We recently discovered an issue that reinforces what a good idea it is to always specify -mcpu in our code generation tests, particularly for -mattr=+vsx. This patch ensures that all tests that specify -mattr=+vsx also specify -mcpu=pwr7 or -mcpu=pwr8, as appropriate. Some of the uses of -mattr=+vsx added recently don't make much sense (when specified for -mtriple=powerpc-apple-darwin8 or -march=ppc32, for example). For cases like this I've just removed the extra VSX test commands; there's enough coverage without them. llvm-svn: 220173
-rw-r--r--llvm/test/CodeGen/PowerPC/2007-09-08-unaligned.ll4
-rw-r--r--llvm/test/CodeGen/PowerPC/2012-10-12-bitcast.ll4
-rw-r--r--llvm/test/CodeGen/PowerPC/buildvec_canonicalize.ll6
-rw-r--r--llvm/test/CodeGen/PowerPC/fabs.ll1
-rw-r--r--llvm/test/CodeGen/PowerPC/fnabs.ll1
-rw-r--r--llvm/test/CodeGen/PowerPC/fp-branch.ll1
-rw-r--r--llvm/test/CodeGen/PowerPC/fp_to_uint.ll1
-rw-r--r--llvm/test/CodeGen/PowerPC/i64_fp.ll4
-rw-r--r--llvm/test/CodeGen/PowerPC/unsafe-math.ll3
-rw-r--r--llvm/test/CodeGen/PowerPC/vec_mul.ll9
-rw-r--r--llvm/test/CodeGen/PowerPC/vrspill.ll5
11 files changed, 8 insertions, 31 deletions
diff --git a/llvm/test/CodeGen/PowerPC/2007-09-08-unaligned.ll b/llvm/test/CodeGen/PowerPC/2007-09-08-unaligned.ll
index 89a4b72288d..bdd91f34571 100644
--- a/llvm/test/CodeGen/PowerPC/2007-09-08-unaligned.ll
+++ b/llvm/test/CodeGen/PowerPC/2007-09-08-unaligned.ll
@@ -2,10 +2,6 @@
; RUN: llc -mattr=-vsx < %s | grep stfs | count 1
; RUN: llc -mattr=-vsx < %s | grep lfd | count 2
; RUN: llc -mattr=-vsx < %s | grep lfs | count 2
-; RUN: llc -mattr=+vsx < %s | grep stxsdx | count 3
-; RUN: llc -mattr=+vsx < %s | grep stfs | count 1
-; RUN: llc -mattr=+vsx < %s | grep lxsdx | count 2
-; RUN: llc -mattr=+vsx < %s | grep lfs | count 2
; ModuleID = 'foo.c'
target datalayout = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f128:64:128"
target triple = "powerpc-apple-darwin8"
diff --git a/llvm/test/CodeGen/PowerPC/2012-10-12-bitcast.ll b/llvm/test/CodeGen/PowerPC/2012-10-12-bitcast.ll
index cd714d088f5..fdacef2cdd4 100644
--- a/llvm/test/CodeGen/PowerPC/2012-10-12-bitcast.ll
+++ b/llvm/test/CodeGen/PowerPC/2012-10-12-bitcast.ll
@@ -1,5 +1,5 @@
-; RUN: llc -mattr=-vsx -mattr=+altivec < %s | FileCheck %s
-; RUN: llc -mattr=+vsx -mattr=+altivec < %s | FileCheck -check-prefix=CHECK-VSX %s
+; RUN: llc -mattr=-vsx -mattr=+altivec -mcpu=pwr7 < %s | FileCheck %s
+; RUN: llc -mattr=+vsx -mattr=+altivec -mcpu=pwr7 < %s | FileCheck -check-prefix=CHECK-VSX %s
target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64"
target triple = "powerpc64-unknown-linux-gnu"
diff --git a/llvm/test/CodeGen/PowerPC/buildvec_canonicalize.ll b/llvm/test/CodeGen/PowerPC/buildvec_canonicalize.ll
index 37feec8e1c7..b70671bfd5c 100644
--- a/llvm/test/CodeGen/PowerPC/buildvec_canonicalize.ll
+++ b/llvm/test/CodeGen/PowerPC/buildvec_canonicalize.ll
@@ -1,5 +1,4 @@
; RUN: llc < %s -mattr=-vsx -march=ppc32 -mattr=+altivec --enable-unsafe-fp-math | FileCheck %s
-; RUN: llc < %s -mattr=+vsx -march=ppc32 -mattr=+altivec --enable-unsafe-fp-math | FileCheck -check-prefix=CHECK-VSX %s
define void @VXOR(<4 x float>* %P1, <4 x i32>* %P2, <4 x float>* %P3) {
%tmp = load <4 x float>* %P3 ; <<4 x float>> [#uses=1]
@@ -15,9 +14,6 @@ define void @VXOR(<4 x float>* %P1, <4 x i32>* %P2, <4 x float>* %P3) {
; CHECK: @VXOR
; CHECK: vsplti
; CHECK: vxor
-; CHECK-VSX: @VXOR
-; CHECK-VSX: vxor
-; CHECK-VSX: xvmulsp
define void @VSPLTI(<4 x i32>* %P2, <8 x i16>* %P3) {
store <4 x i32> bitcast (<16 x i8> < i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1 > to <4 x i32>), <4 x i32>* %P2
@@ -26,5 +22,3 @@ define void @VSPLTI(<4 x i32>* %P2, <8 x i16>* %P3) {
}
; CHECK: @VSPLTI
; CHECK: vsplti
-; CHECK-VSX: @VSPLTI
-; CHECK-VSX: vsplti
diff --git a/llvm/test/CodeGen/PowerPC/fabs.ll b/llvm/test/CodeGen/PowerPC/fabs.ll
index a093def16f5..36aa23d0355 100644
--- a/llvm/test/CodeGen/PowerPC/fabs.ll
+++ b/llvm/test/CodeGen/PowerPC/fabs.ll
@@ -1,5 +1,4 @@
; RUN: llc < %s -mattr=-vsx -march=ppc32 -mtriple=powerpc-apple-darwin | grep "fabs f1, f1"
-; RUN: llc < %s -mattr=+vsx -march=ppc32 -mtriple=powerpc-apple-darwin | grep "xsabsdp f1, f1"
define double @fabs(double %f) {
entry:
diff --git a/llvm/test/CodeGen/PowerPC/fnabs.ll b/llvm/test/CodeGen/PowerPC/fnabs.ll
index 0e5c7a7c417..fc6a04e0094 100644
--- a/llvm/test/CodeGen/PowerPC/fnabs.ll
+++ b/llvm/test/CodeGen/PowerPC/fnabs.ll
@@ -1,5 +1,4 @@
; RUN: llc < %s -mattr=-vsx -march=ppc32 | grep fnabs
-; RUN: llc < %s -mattr=+vsx -march=ppc32 | grep xsnabsdp
declare double @fabs(double)
diff --git a/llvm/test/CodeGen/PowerPC/fp-branch.ll b/llvm/test/CodeGen/PowerPC/fp-branch.ll
index 926bb8eb1c3..f5857563745 100644
--- a/llvm/test/CodeGen/PowerPC/fp-branch.ll
+++ b/llvm/test/CodeGen/PowerPC/fp-branch.ll
@@ -1,5 +1,4 @@
; RUN: llc < %s -mattr=-vsx -march=ppc32 | grep fcmp | count 1
-; RUN: llc < %s -mattr=+vsx -march=ppc32 | grep xscmpudp | count 1
declare i1 @llvm.isunordered.f64(double, double)
diff --git a/llvm/test/CodeGen/PowerPC/fp_to_uint.ll b/llvm/test/CodeGen/PowerPC/fp_to_uint.ll
index 21a1b7f0d69..187d2d6ee1e 100644
--- a/llvm/test/CodeGen/PowerPC/fp_to_uint.ll
+++ b/llvm/test/CodeGen/PowerPC/fp_to_uint.ll
@@ -1,5 +1,4 @@
; RUN: llc < %s -mattr=-vsx -march=ppc32 | grep fctiwz | count 1
-; RUN: llc < %s -mattr=+vsx -march=ppc32 | grep xscvdpsxws | count 1
define i16 @foo(float %a) {
diff --git a/llvm/test/CodeGen/PowerPC/i64_fp.ll b/llvm/test/CodeGen/PowerPC/i64_fp.ll
index 6c1b645b1fc..67f4e0bc4b6 100644
--- a/llvm/test/CodeGen/PowerPC/i64_fp.ll
+++ b/llvm/test/CodeGen/PowerPC/i64_fp.ll
@@ -17,10 +17,6 @@
; RUN: not grep fcfid
; RUN: llc < %s -mattr=-vsx -march=ppc32 -mcpu=g4 | \
; RUN: not grep fctidz
-; RUN: llc < %s -mattr=+vsx -march=ppc32 -mattr=+64bit | \
-; RUN: grep xscvdpsxds
-; RUN: llc < %s -mattr=+vsx -march=ppc32 -mattr=+64bit | \
-; RUN: grep xscvsxddp
define double @X(double %Y) {
%A = fptosi double %Y to i64 ; <i64> [#uses=1]
diff --git a/llvm/test/CodeGen/PowerPC/unsafe-math.ll b/llvm/test/CodeGen/PowerPC/unsafe-math.ll
index 1a269da1ac3..f6430270eac 100644
--- a/llvm/test/CodeGen/PowerPC/unsafe-math.ll
+++ b/llvm/test/CodeGen/PowerPC/unsafe-math.ll
@@ -1,9 +1,6 @@
; RUN: llc < %s -mattr=-vsx -march=ppc32 | grep fmul | count 2
; RUN: llc < %s -mattr=-vsx -march=ppc32 -enable-unsafe-fp-math | \
; RUN: grep fmul | count 1
-; RUN: llc < %s -mattr=+vsx -march=ppc32 | grep xsmuldp | count 2
-; RUN: llc < %s -mattr=+vsx -march=ppc32 -enable-unsafe-fp-math | \
-; RUN: grep xsmuldp | count 1
define double @foo(double %X) nounwind {
%tmp1 = fmul double %X, 1.23
diff --git a/llvm/test/CodeGen/PowerPC/vec_mul.ll b/llvm/test/CodeGen/PowerPC/vec_mul.ll
index dee03693fc8..86596d4b0a8 100644
--- a/llvm/test/CodeGen/PowerPC/vec_mul.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_mul.ll
@@ -1,9 +1,8 @@
; RUN: llc < %s -mtriple=powerpc-unknown-linux-gnu -march=ppc32 -mattr=+altivec -mattr=-vsx | FileCheck %s
-; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -march=ppc64 -mattr=+altivec -mattr=-vsx | FileCheck %s
-; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -march=ppc64 -mattr=+altivec -mattr=-vsx | FileCheck %s -check-prefix=CHECK-LE
-; RUN: llc < %s -mtriple=powerpc-unknown-linux-gnu -march=ppc32 -mattr=+altivec -mattr=+vsx | FileCheck %s -check-prefix=CHECK-VSX
-; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -march=ppc64 -mattr=+altivec -mattr=+vsx | FileCheck %s -check-prefix=CHECK-VSX
-; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -march=ppc64 -mattr=+altivec -mattr=+vsx | FileCheck %s -check-prefix=CHECK-LE-VSX
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -march=ppc64 -mattr=+altivec -mattr=-vsx -mcpu=pwr7 | FileCheck %s
+; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -march=ppc64 -mattr=+altivec -mattr=-vsx -mcpu=pwr8 | FileCheck %s -check-prefix=CHECK-LE
+; RUN: llc < %s -mtriple=powerpc64-unknown-linux-gnu -march=ppc64 -mattr=+altivec -mattr=+vsx -mcpu=pwr7 | FileCheck %s -check-prefix=CHECK-VSX
+; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -march=ppc64 -mattr=+altivec -mattr=+vsx -mcpu=pwr8 | FileCheck %s -check-prefix=CHECK-LE-VSX
define <4 x i32> @test_v4i32(<4 x i32>* %X, <4 x i32>* %Y) {
%tmp = load <4 x i32>* %X ; <<4 x i32>> [#uses=1]
diff --git a/llvm/test/CodeGen/PowerPC/vrspill.ll b/llvm/test/CodeGen/PowerPC/vrspill.ll
index b990442aed8..b55e12960fa 100644
--- a/llvm/test/CodeGen/PowerPC/vrspill.ll
+++ b/llvm/test/CodeGen/PowerPC/vrspill.ll
@@ -1,7 +1,6 @@
; RUN: llc -O0 -mtriple=powerpc-unknown-linux-gnu -mattr=+altivec -mattr=-vsx -verify-machineinstrs < %s | FileCheck %s
-; RUN: llc -O0 -mtriple=powerpc64-unknown-linux-gnu -mattr=+altivec -mattr=-vsx -verify-machineinstrs -fast-isel=false < %s | FileCheck %s
-; RUN: llc -O0 -mtriple=powerpc-unknown-linux-gnu -mattr=+altivec -mattr=+vsx -verify-machineinstrs < %s | FileCheck -check-prefix=CHECK-VSX %s
-; RUN: llc -O0 -mtriple=powerpc64-unknown-linux-gnu -mattr=+altivec -mattr=+vsx -verify-machineinstrs -fast-isel=false < %s | FileCheck -check-prefix=CHECK-VSX %s
+; RUN: llc -O0 -mtriple=powerpc64-unknown-linux-gnu -mattr=+altivec -mattr=-vsx -verify-machineinstrs -fast-isel=false -mcpu=pwr7 < %s | FileCheck %s
+; RUN: llc -O0 -mtriple=powerpc64-unknown-linux-gnu -mattr=+altivec -mattr=+vsx -verify-machineinstrs -fast-isel=false -mcpu=pwr7 < %s | FileCheck -check-prefix=CHECK-VSX %s
; This verifies that we generate correct spill/reload code for vector regs.
OpenPOWER on IntegriCloud