summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test/CodeGen')
-rw-r--r--llvm/test/CodeGen/ARM/align.ll1
-rw-r--r--llvm/test/CodeGen/ARM/hello.ll1
-rw-r--r--llvm/test/CodeGen/Alpha/cmov.ll1
-rw-r--r--llvm/test/CodeGen/Alpha/ctlz.ll1
-rw-r--r--llvm/test/CodeGen/Alpha/ctlz_e.ll1
-rw-r--r--llvm/test/CodeGen/Alpha/ctpop.ll1
-rw-r--r--llvm/test/CodeGen/CBackend/2004-02-15-PreexistingExternals.llx1
-rw-r--r--llvm/test/CodeGen/CBackend/2005-07-14-NegationToMinusMinus.ll2
-rw-r--r--llvm/test/CodeGen/Generic/vector-constantexpr.ll1
-rw-r--r--llvm/test/CodeGen/X86/2004-02-14-InefficientStackPointer.llx1
-rw-r--r--llvm/test/CodeGen/X86/fp-stack-ret.ll6
-rw-r--r--llvm/test/CodeGen/X86/fp_constant_op.llx1
-rw-r--r--llvm/test/CodeGen/X86/fp_load_fold.llx1
-rw-r--r--llvm/test/CodeGen/X86/sse-fcopysign.ll1
-rw-r--r--llvm/test/CodeGen/X86/sse-load-ret.ll8
-rw-r--r--llvm/test/CodeGen/X86/vec_call.ll1
16 files changed, 23 insertions, 6 deletions
diff --git a/llvm/test/CodeGen/ARM/align.ll b/llvm/test/CodeGen/ARM/align.ll
index 457d883a3ca..8c91b8b5d0e 100644
--- a/llvm/test/CodeGen/ARM/align.ll
+++ b/llvm/test/CodeGen/ARM/align.ll
@@ -6,6 +6,7 @@
; RUN: grep align.*3 | wc | grep 2
; RUN: llvm-upgrade < %s | llvm-as | llc -mtriple=arm-apple-darwin | \
; RUN: grep align.*2 | wc | grep 4
+; XFAIL: *
%a = global bool true
%b = global sbyte 1
diff --git a/llvm/test/CodeGen/ARM/hello.ll b/llvm/test/CodeGen/ARM/hello.ll
index 4fbcb561422..4b713baf439 100644
--- a/llvm/test/CodeGen/ARM/hello.ll
+++ b/llvm/test/CodeGen/ARM/hello.ll
@@ -2,6 +2,7 @@
; RUN: llvm-as < %s | llc -mtriple=arm-linux | grep mov | wc -l | grep 1 &&
; RUN: llvm-as < %s | llc -mtriple=arm-linux --disable-fp-elim | grep mov | wc -l | grep 2
; RUN: llvm-as < %s | llc -mtriple=arm-apple-darwin | grep mov | wc -l | grep 2
+; XFAIL: *
@str = internal constant [12 x i8] c"Hello World\00"
diff --git a/llvm/test/CodeGen/Alpha/cmov.ll b/llvm/test/CodeGen/Alpha/cmov.ll
index c5f9f63c342..8d1c7c5af2f 100644
--- a/llvm/test/CodeGen/Alpha/cmov.ll
+++ b/llvm/test/CodeGen/Alpha/cmov.ll
@@ -1,5 +1,6 @@
; RUN: llvm-upgrade < %s | llvm-as | llc -march=alpha | not grep cmovlt
; RUN: llvm-upgrade < %s | llvm-as | llc -march=alpha | grep cmoveq
+; XFAIL: *
long %cmov_lt(long %a, long %c) {
entry:
diff --git a/llvm/test/CodeGen/Alpha/ctlz.ll b/llvm/test/CodeGen/Alpha/ctlz.ll
index 0ad014dbf97..e80266fdd5c 100644
--- a/llvm/test/CodeGen/Alpha/ctlz.ll
+++ b/llvm/test/CodeGen/Alpha/ctlz.ll
@@ -4,6 +4,7 @@
; RUN: llvm-as < %s | llc -march=alpha -mcpu=ev6 | not grep -i ctlz
; RUN: llvm-as < %s | llc -march=alpha -mcpu=ev56 | not grep -i ctlz
; RUN: llvm-as < %s | llc -march=alpha -mattr=-CIX | not grep -i ctlz
+; XFAIL: *
declare i32 @llvm.ctlz.i8(i8)
diff --git a/llvm/test/CodeGen/Alpha/ctlz_e.ll b/llvm/test/CodeGen/Alpha/ctlz_e.ll
index 7956f5c13df..0a2faab5cb3 100644
--- a/llvm/test/CodeGen/Alpha/ctlz_e.ll
+++ b/llvm/test/CodeGen/Alpha/ctlz_e.ll
@@ -1,5 +1,6 @@
; Make sure this testcase does not use ctpop
; RUN: llvm-upgrade < %s | llvm-as | llc -march=alpha | not grep -i ctpop
+; XFAIL: *
declare ulong %llvm.ctlz(ulong)
diff --git a/llvm/test/CodeGen/Alpha/ctpop.ll b/llvm/test/CodeGen/Alpha/ctpop.ll
index e68d23708ed..df9524c0bfe 100644
--- a/llvm/test/CodeGen/Alpha/ctpop.ll
+++ b/llvm/test/CodeGen/Alpha/ctpop.ll
@@ -8,6 +8,7 @@
; RUN: not grep -i ctpop
; RUN: llvm-upgrade < %s | llvm-as | llc -march=alpha -mattr=-CIX | \
; RUN: not grep -i 'ctpop'
+; XFAIL: *
declare long %llvm.ctpop(long)
diff --git a/llvm/test/CodeGen/CBackend/2004-02-15-PreexistingExternals.llx b/llvm/test/CodeGen/CBackend/2004-02-15-PreexistingExternals.llx
index d880876afaa..743d0177b2d 100644
--- a/llvm/test/CodeGen/CBackend/2004-02-15-PreexistingExternals.llx
+++ b/llvm/test/CodeGen/CBackend/2004-02-15-PreexistingExternals.llx
@@ -5,6 +5,7 @@
; everything up. :( Test that this does not happen anymore.
;
; RUN: llvm-upgrade < %s | llvm-as | llc -march=c | not grep _memcpy
+; XFAIL: *
declare void %llvm.memcpy(sbyte*, sbyte*, uint,uint)
declare float* %memcpy(int*, uint,int)
diff --git a/llvm/test/CodeGen/CBackend/2005-07-14-NegationToMinusMinus.ll b/llvm/test/CodeGen/CBackend/2005-07-14-NegationToMinusMinus.ll
index 6be1a66c18a..e2c665ecfd7 100644
--- a/llvm/test/CodeGen/CBackend/2005-07-14-NegationToMinusMinus.ll
+++ b/llvm/test/CodeGen/CBackend/2005-07-14-NegationToMinusMinus.ll
@@ -1,4 +1,6 @@
; RUN: llvm-upgrade < %s | llvm-as | llc -march=c | not grep -- -65535
+; XFAIL: *
+
; ModuleID = '<stdin>'
target endian = little
target pointersize = 32
diff --git a/llvm/test/CodeGen/Generic/vector-constantexpr.ll b/llvm/test/CodeGen/Generic/vector-constantexpr.ll
index 31b60a4d687..bf44f317eca 100644
--- a/llvm/test/CodeGen/Generic/vector-constantexpr.ll
+++ b/llvm/test/CodeGen/Generic/vector-constantexpr.ll
@@ -1,4 +1,5 @@
; RUN: llvm-upgrade < %s | llvm-as | llc
+; XFAIL: *
void ""(float* %inregs, float* %outregs) {
%a_addr.i = alloca <4 x float> ; <<4 x float>*> [#uses=1]
diff --git a/llvm/test/CodeGen/X86/2004-02-14-InefficientStackPointer.llx b/llvm/test/CodeGen/X86/2004-02-14-InefficientStackPointer.llx
index 69e4c1d011f..005a7dd51f1 100644
--- a/llvm/test/CodeGen/X86/2004-02-14-InefficientStackPointer.llx
+++ b/llvm/test/CodeGen/X86/2004-02-14-InefficientStackPointer.llx
@@ -1,4 +1,5 @@
; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 | grep ESP | not grep sub
+; XFAIL: *
int %test(int %X) {
ret int %X
diff --git a/llvm/test/CodeGen/X86/fp-stack-ret.ll b/llvm/test/CodeGen/X86/fp-stack-ret.ll
index 6ed8115945c..42cdb67cf50 100644
--- a/llvm/test/CodeGen/X86/fp-stack-ret.ll
+++ b/llvm/test/CodeGen/X86/fp-stack-ret.ll
@@ -1,7 +1,7 @@
-; RUN: llvm-as < %s | llc -mtriple=i686-apple-darwin8 -mcpu=yonah -march=x86 > %t &&
-; RUN: grep fldl %t | wc -l | grep 1 &&
+; RUN: llvm-as < %s | llc -mtriple=i686-apple-darwin8 -mcpu=yonah -march=x86 > %t
+; RUN: grep fldl %t | wc -l | grep 1
; RUN: not grep xmm %t &&
-; RUN: grep 'sub.*esp' %t | wc -l | grep 1
+; RUN: grep {sub.*esp} %t | wc -l | grep 1
; These testcases shouldn't require loading into an XMM register then storing
; to memory, then reloading into an FPStack reg.
diff --git a/llvm/test/CodeGen/X86/fp_constant_op.llx b/llvm/test/CodeGen/X86/fp_constant_op.llx
index 97cb1c0ed13..155673fcd72 100644
--- a/llvm/test/CodeGen/X86/fp_constant_op.llx
+++ b/llvm/test/CodeGen/X86/fp_constant_op.llx
@@ -1,5 +1,6 @@
; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -x86-asm-syntax=intel | \
; RUN: grep ST | not grep {fadd\\|fsub\\|fdiv\\|fmul}
+; XFAIL: *
; Test that the load of the constant is folded into the operation.
diff --git a/llvm/test/CodeGen/X86/fp_load_fold.llx b/llvm/test/CodeGen/X86/fp_load_fold.llx
index 1d8d353e4d6..ce272cea093 100644
--- a/llvm/test/CodeGen/X86/fp_load_fold.llx
+++ b/llvm/test/CodeGen/X86/fp_load_fold.llx
@@ -1,5 +1,6 @@
; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -x86-asm-syntax=intel | \
; RUN: grep ST | not grep {fadd\\|fsub\\|fdiv\\|fmul}
+; XFAIL: *
; Test that the load of the memory location is folded into the operation.
diff --git a/llvm/test/CodeGen/X86/sse-fcopysign.ll b/llvm/test/CodeGen/X86/sse-fcopysign.ll
index 25d8aa39a74..b82f18d32b3 100644
--- a/llvm/test/CodeGen/X86/sse-fcopysign.ll
+++ b/llvm/test/CodeGen/X86/sse-fcopysign.ll
@@ -1,4 +1,5 @@
; RUN: llvm-as < %s | llc -march=x86 -mattr=+sse2 | not grep test
+; XFAIL: *
define float @test1(float %a, float %b) {
%tmp = tail call float @copysignf( float %b, float %a )
diff --git a/llvm/test/CodeGen/X86/sse-load-ret.ll b/llvm/test/CodeGen/X86/sse-load-ret.ll
index d5f4d19686b..4777e0f551f 100644
--- a/llvm/test/CodeGen/X86/sse-load-ret.ll
+++ b/llvm/test/CodeGen/X86/sse-load-ret.ll
@@ -1,6 +1,8 @@
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mcpu=yonah -enable-x86-sse &&
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mcpu=yonah -enable-x86-sse | not grep movss
-; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mcpu=yonah -enable-x86-sse | not grep xmm
+; RUN: llvm-upgrade < %s | llvm-as | \
+; RUN: llc -march=x86 -mcpu=yonah -enable-x86-sse | not grep movss
+; RUN: llvm-upgrade < %s | llvm-as | \
+; RUN: llc -march=x86 -mcpu=yonah -enable-x86-sse | not grep xmm
+; XFAIL: *
double %test1(double *%P) {
%X = load double* %P
diff --git a/llvm/test/CodeGen/X86/vec_call.ll b/llvm/test/CodeGen/X86/vec_call.ll
index 6875894bfec..96cd7e47c39 100644
--- a/llvm/test/CodeGen/X86/vec_call.ll
+++ b/llvm/test/CodeGen/X86/vec_call.ll
@@ -2,6 +2,7 @@
; RUN: grep {subl.*60}
; RUN: llvm-upgrade < %s | llvm-as | llc -march=x86 -mattr=+sse2 | \
; RUN: grep {movdqa.*32}
+; XFAIL: *
void %test() {
tail call void %xx( int 1, int 2, int 3, int 4, int 5, int 6, int 7, <2 x long> cast (<4 x int> < int 4, int 3, int 2, int 1 > to <2 x long>), <2 x long> cast (<4 x int> < int 8, int 7, int 6, int 5 > to <2 x long>), <2 x long> cast (<4 x int> < int 6, int 4, int 2, int 0 > to <2 x long>), <2 x long> cast (<4 x int> < int 8, int 4, int 2, int 1 > to <2 x long>), <2 x long> cast (<4 x int> < int 0, int 1, int 3, int 9 > to <2 x long>) )
OpenPOWER on IntegriCloud