diff options
| author | Chandler Carruth <chandlerc@gmail.com> | 2014-09-18 08:33:04 +0000 | 
|---|---|---|
| committer | Chandler Carruth <chandlerc@gmail.com> | 2014-09-18 08:33:04 +0000 | 
| commit | 0fe4928fbe3a2cdf21efb54f39ee9e150e6bde7a (patch) | |
| tree | 116e9e123d00ed6f3c865b9d65b38e3431f55916 | |
| parent | e747362b56c9fb03dd722f3f92329dc238011fa9 (diff) | |
| download | bcm5719-llvm-0fe4928fbe3a2cdf21efb54f39ee9e150e6bde7a.tar.gz bcm5719-llvm-0fe4928fbe3a2cdf21efb54f39ee9e150e6bde7a.zip  | |
[x86] Add an SSSE3 run and check mode to the 128-bit v2 tests of the new
vector shuffle lowering. This will be needed for up-coming palignr
tests.
llvm-svn: 218037
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-shuffle-128-v2.ll | 47 | 
1 files changed, 47 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-128-v2.ll b/llvm/test/CodeGen/X86/vector-shuffle-128-v2.ll index d23b0794305..621e9a7db80 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-128-v2.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-128-v2.ll @@ -1,5 +1,6 @@  ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=SSE2  ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse3 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=SSE3 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+ssse3 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=SSSE3  ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse4.1 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=ALL --check-prefix=SSE41  target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" @@ -57,6 +58,10 @@ define <2 x double> @shuffle_v2f64_00(<2 x double> %a, <2 x double> %b) {  ; SSE3:         unpcklpd {{.*}} # xmm0 = xmm0[0,0]  ; SSE3-NEXT:    retq  ; +; SSSE3-LABEL: @shuffle_v2f64_00 +; SSSE3:         unpcklpd {{.*}} # xmm0 = xmm0[0,0] +; SSSE3-NEXT:    retq +;  ; SSE41-LABEL: @shuffle_v2f64_00  ; SSE41:         unpcklpd {{.*}} # xmm0 = xmm0[0,0]  ; SSE41-NEXT:    retq @@ -88,6 +93,11 @@ define <2 x double> @shuffle_v2f64_22(<2 x double> %a, <2 x double> %b) {  ; SSE3-NEXT:    movapd %xmm1, %xmm0  ; SSE3-NEXT:    retq  ; +; SSSE3-LABEL: @shuffle_v2f64_22 +; SSSE3:         unpcklpd {{.*}} # xmm1 = xmm1[0,0] +; SSSE3-NEXT:    movapd %xmm1, %xmm0 +; SSSE3-NEXT:    retq +;  ; SSE41-LABEL: @shuffle_v2f64_22  ; SSE41:         unpcklpd {{.*}} # xmm1 = xmm1[0,0]  ; SSE41-NEXT:    movapd %xmm1, %xmm0 @@ -119,6 +129,10 @@ define <2 x double> @shuffle_v2f64_03(<2 x double> %a, <2 x double> %b) {  ; SSE3:         shufpd {{.*}} # xmm0 = xmm0[0],xmm1[1]  ; SSE3-NEXT:    retq  ; +; SSSE3-LABEL: @shuffle_v2f64_03 +; SSSE3:         shufpd {{.*}} # xmm0 = xmm0[0],xmm1[1] +; SSSE3-NEXT:    retq +;  ; SSE41-LABEL: @shuffle_v2f64_03  ; SSE41:         blendpd {{.*}} # xmm0 = xmm0[0],xmm1[1]  ; SSE41-NEXT:    retq @@ -136,6 +150,11 @@ define <2 x double> @shuffle_v2f64_21(<2 x double> %a, <2 x double> %b) {  ; SSE3-NEXT:    movapd %xmm1, %xmm0  ; SSE3-NEXT:    retq  ; +; SSSE3-LABEL: @shuffle_v2f64_21 +; SSSE3:         shufpd {{.*}} # xmm1 = xmm1[0],xmm0[1] +; SSSE3-NEXT:    movapd %xmm1, %xmm0 +; SSSE3-NEXT:    retq +;  ; SSE41-LABEL: @shuffle_v2f64_21  ; SSE41:         blendpd {{.*}} # xmm1 = xmm1[0],xmm0[1]  ; SSE41-NEXT:    movapd %xmm1, %xmm0 @@ -169,6 +188,10 @@ define <2 x i64> @shuffle_v2i64_03(<2 x i64> %a, <2 x i64> %b) {  ; SSE3:         shufpd {{.*}} # xmm0 = xmm0[0],xmm1[1]  ; SSE3-NEXT:    retq  ; +; SSSE3-LABEL: @shuffle_v2i64_03 +; SSSE3:         shufpd {{.*}} # xmm0 = xmm0[0],xmm1[1] +; SSSE3-NEXT:    retq +;  ; SSE41-LABEL: @shuffle_v2i64_03  ; SSE41:         pblendw {{.*}} # xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]  ; SSE41-NEXT:    retq @@ -186,6 +209,11 @@ define <2 x i64> @shuffle_v2i64_03_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64  ; SSE3-NEXT:    movapd %xmm1, %xmm0  ; SSE3-NEXT:    retq  ; +; SSSE3-LABEL: @shuffle_v2i64_03_copy +; SSSE3:         shufpd {{.*}} # xmm1 = xmm1[0],xmm2[1] +; SSSE3-NEXT:    movapd %xmm1, %xmm0 +; SSSE3-NEXT:    retq +;  ; SSE41-LABEL: @shuffle_v2i64_03_copy  ; SSE41:         pblendw {{.*}} # xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]  ; SSE41-NEXT:    movdqa %xmm1, %xmm0 @@ -250,6 +278,11 @@ define <2 x i64> @shuffle_v2i64_21(<2 x i64> %a, <2 x i64> %b) {  ; SSE3-NEXT:    movapd %xmm1, %xmm0  ; SSE3-NEXT:    retq  ; +; SSSE3-LABEL: @shuffle_v2i64_21 +; SSSE3:         shufpd {{.*}} # xmm1 = xmm1[0],xmm0[1] +; SSSE3-NEXT:    movapd %xmm1, %xmm0 +; SSSE3-NEXT:    retq +;  ; SSE41-LABEL: @shuffle_v2i64_21  ; SSE41:         pblendw {{.*}} # xmm1 = xmm1[0,1,2,3],xmm0[4,5,6,7]  ; SSE41-NEXT:    movdqa %xmm1, %xmm0 @@ -268,6 +301,11 @@ define <2 x i64> @shuffle_v2i64_21_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64  ; SSE3-NEXT:    movapd %xmm2, %xmm0  ; SSE3-NEXT:    retq  ; +; SSSE3-LABEL: @shuffle_v2i64_21_copy +; SSSE3:         shufpd {{.*}} # xmm2 = xmm2[0],xmm1[1] +; SSSE3-NEXT:    movapd %xmm2, %xmm0 +; SSSE3-NEXT:    retq +;  ; SSE41-LABEL: @shuffle_v2i64_21_copy  ; SSE41:         pblendw {{.*}} # xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]  ; SSE41-NEXT:    movdqa %xmm2, %xmm0 @@ -320,6 +358,11 @@ define <2 x double> @insert_dup_reg_v2f64(double %a) {  ; SSE3-NEXT:    retq  ;  ; FIXME: This should match movddup as well! +; SSSE3-LABEL: @insert_dup_reg_v2f64 +; SSSE3:         unpcklpd {{.*}} # xmm0 = xmm0[0,0] +; SSSE3-NEXT:    retq +; +; FIXME: This should match movddup as well!  ; SSE41-LABEL: @insert_dup_reg_v2f64  ; SSE41:         unpcklpd {{.*}} # xmm0 = xmm0[0,0]  ; SSE41-NEXT:    retq @@ -337,6 +380,10 @@ define <2 x double> @insert_dup_mem_v2f64(double* %ptr) {  ; SSE3:         movddup {{.*}}, %xmm0  ; SSE3-NEXT:    retq  ; +; SSSE3-LABEL: @insert_dup_mem_v2f64 +; SSSE3:         movddup {{.*}}, %xmm0 +; SSSE3-NEXT:    retq +;  ; SSE41-LABEL: @insert_dup_mem_v2f64  ; SSE41:         movddup {{.*}}, %xmm0  ; SSE41-NEXT:    retq  | 

