diff options
| author | Bob Wilson <bob.wilson@apple.com> | 2009-10-07 23:47:21 +0000 | 
|---|---|---|
| committer | Bob Wilson <bob.wilson@apple.com> | 2009-10-07 23:47:21 +0000 | 
| commit | d1de3b82fffc9357365b6950b6ed1a976d38c152 (patch) | |
| tree | 27633c84d2a2dc1e65910dbac03fa803afe3f154 /llvm/test/CodeGen/ARM/vmul.ll | |
| parent | 5ef3c6d9f49906a273f99e8106e2b70291d9b838 (diff) | |
| download | bcm5719-llvm-d1de3b82fffc9357365b6950b6ed1a976d38c152.tar.gz bcm5719-llvm-d1de3b82fffc9357365b6950b6ed1a976d38c152.zip | |
Convert more NEON tests to use FileCheck.
llvm-svn: 83507
Diffstat (limited to 'llvm/test/CodeGen/ARM/vmul.ll')
| -rw-r--r-- | llvm/test/CodeGen/ARM/vmul.ll | 27 | 
1 files changed, 21 insertions, 6 deletions
| diff --git a/llvm/test/CodeGen/ARM/vmul.ll b/llvm/test/CodeGen/ARM/vmul.ll index 38abcca9a38..83ec55e91bc 100644 --- a/llvm/test/CodeGen/ARM/vmul.ll +++ b/llvm/test/CodeGen/ARM/vmul.ll @@ -1,11 +1,8 @@ -; RUN: llc < %s -march=arm -mattr=+neon > %t -; RUN: grep {vmul\\.i8} %t | count 2 -; RUN: grep {vmul\\.i16} %t | count 2 -; RUN: grep {vmul\\.i32} %t | count 2 -; RUN: grep {vmul\\.f32} %t | count 2 -; RUN: grep {vmul\\.p8} %t | count 2 +; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s  define <8 x i8> @vmuli8(<8 x i8>* %A, <8 x i8>* %B) nounwind { +;CHECK: vmuli8: +;CHECK: vmul.i8  	%tmp1 = load <8 x i8>* %A  	%tmp2 = load <8 x i8>* %B  	%tmp3 = mul <8 x i8> %tmp1, %tmp2 @@ -13,6 +10,8 @@ define <8 x i8> @vmuli8(<8 x i8>* %A, <8 x i8>* %B) nounwind {  }  define <4 x i16> @vmuli16(<4 x i16>* %A, <4 x i16>* %B) nounwind { +;CHECK: vmuli16: +;CHECK: vmul.i16  	%tmp1 = load <4 x i16>* %A  	%tmp2 = load <4 x i16>* %B  	%tmp3 = mul <4 x i16> %tmp1, %tmp2 @@ -20,6 +19,8 @@ define <4 x i16> @vmuli16(<4 x i16>* %A, <4 x i16>* %B) nounwind {  }  define <2 x i32> @vmuli32(<2 x i32>* %A, <2 x i32>* %B) nounwind { +;CHECK: vmuli32: +;CHECK: vmul.i32  	%tmp1 = load <2 x i32>* %A  	%tmp2 = load <2 x i32>* %B  	%tmp3 = mul <2 x i32> %tmp1, %tmp2 @@ -27,6 +28,8 @@ define <2 x i32> @vmuli32(<2 x i32>* %A, <2 x i32>* %B) nounwind {  }  define <2 x float> @vmulf32(<2 x float>* %A, <2 x float>* %B) nounwind { +;CHECK: vmulf32: +;CHECK: vmul.f32  	%tmp1 = load <2 x float>* %A  	%tmp2 = load <2 x float>* %B  	%tmp3 = mul <2 x float> %tmp1, %tmp2 @@ -34,6 +37,8 @@ define <2 x float> @vmulf32(<2 x float>* %A, <2 x float>* %B) nounwind {  }  define <8 x i8> @vmulp8(<8 x i8>* %A, <8 x i8>* %B) nounwind { +;CHECK: vmulp8: +;CHECK: vmul.p8  	%tmp1 = load <8 x i8>* %A  	%tmp2 = load <8 x i8>* %B  	%tmp3 = call <8 x i8> @llvm.arm.neon.vmulp.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) @@ -41,6 +46,8 @@ define <8 x i8> @vmulp8(<8 x i8>* %A, <8 x i8>* %B) nounwind {  }  define <16 x i8> @vmulQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { +;CHECK: vmulQi8: +;CHECK: vmul.i8  	%tmp1 = load <16 x i8>* %A  	%tmp2 = load <16 x i8>* %B  	%tmp3 = mul <16 x i8> %tmp1, %tmp2 @@ -48,6 +55,8 @@ define <16 x i8> @vmulQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {  }  define <8 x i16> @vmulQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { +;CHECK: vmulQi16: +;CHECK: vmul.i16  	%tmp1 = load <8 x i16>* %A  	%tmp2 = load <8 x i16>* %B  	%tmp3 = mul <8 x i16> %tmp1, %tmp2 @@ -55,6 +64,8 @@ define <8 x i16> @vmulQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {  }  define <4 x i32> @vmulQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { +;CHECK: vmulQi32: +;CHECK: vmul.i32  	%tmp1 = load <4 x i32>* %A  	%tmp2 = load <4 x i32>* %B  	%tmp3 = mul <4 x i32> %tmp1, %tmp2 @@ -62,6 +73,8 @@ define <4 x i32> @vmulQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {  }  define <4 x float> @vmulQf32(<4 x float>* %A, <4 x float>* %B) nounwind { +;CHECK: vmulQf32: +;CHECK: vmul.f32  	%tmp1 = load <4 x float>* %A  	%tmp2 = load <4 x float>* %B  	%tmp3 = mul <4 x float> %tmp1, %tmp2 @@ -69,6 +82,8 @@ define <4 x float> @vmulQf32(<4 x float>* %A, <4 x float>* %B) nounwind {  }  define <16 x i8> @vmulQp8(<16 x i8>* %A, <16 x i8>* %B) nounwind { +;CHECK: vmulQp8: +;CHECK: vmul.p8  	%tmp1 = load <16 x i8>* %A  	%tmp2 = load <16 x i8>* %B  	%tmp3 = call <16 x i8> @llvm.arm.neon.vmulp.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) | 

