summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@intel.com>2019-07-06 17:59:41 +0000
committerCraig Topper <craig.topper@intel.com>2019-07-06 17:59:41 +0000
commit8c036bf784eb8411087fce098eac8353367349ac (patch)
treebb688a0a79a20d79b8dfc8ee6ad37f1e583437bc
parent87856e739c8e55f3b4e0f37baaf93308ec2dbd47 (diff)
downloadbcm5719-llvm-8c036bf784eb8411087fce098eac8353367349ac.tar.gz
bcm5719-llvm-8c036bf784eb8411087fce098eac8353367349ac.zip
[X86] Copy some test cases from vector-shuffle-sse1.ll to vector-shuffle-128-v4.ll and v8 where sse1 did better load folding. NFC
llvm-svn: 365265
-rw-r--r--llvm/test/CodeGen/X86/vector-shuffle-128-v2.ll50
-rw-r--r--llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll50
2 files changed, 100 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-128-v2.ll b/llvm/test/CodeGen/X86/vector-shuffle-128-v2.ll
index 7905d9d45f1..761855e5c62 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-128-v2.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-128-v2.ll
@@ -1305,3 +1305,53 @@ define <2 x double> @shuffle_mem_v2f64_31(<2 x double> %a, <2 x double>* %b) {
%f = shufflevector <2 x double> %a, <2 x double> %c, <2 x i32> <i32 3, i32 1>
ret <2 x double> %f
}
+
+define <2 x double> @shuffle_mem_v2f64_02(<2 x double> %a, <2 x double>* %pb) {
+; SSE-LABEL: shuffle_mem_v2f64_02:
+; SSE: # %bb.0:
+; SSE-NEXT: movups (%rdi), %xmm1
+; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-NEXT: retq
+;
+; AVX-LABEL: shuffle_mem_v2f64_02:
+; AVX: # %bb.0:
+; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: retq
+ %b = load <2 x double>, <2 x double>* %pb, align 1
+ %shuffle = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 0, i32 2>
+ ret <2 x double> %shuffle
+}
+
+define <2 x double> @shuffle_mem_v2f64_21(<2 x double> %a, <2 x double>* %pb) {
+; SSE2-LABEL: shuffle_mem_v2f64_21:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movupd (%rdi), %xmm1
+; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; SSE2-NEXT: retq
+;
+; SSE3-LABEL: shuffle_mem_v2f64_21:
+; SSE3: # %bb.0:
+; SSE3-NEXT: movupd (%rdi), %xmm1
+; SSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: shuffle_mem_v2f64_21:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movupd (%rdi), %xmm1
+; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuffle_mem_v2f64_21:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movups (%rdi), %xmm1
+; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: shuffle_mem_v2f64_21:
+; AVX: # %bb.0:
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3]
+; AVX-NEXT: retq
+ %b = load <2 x double>, <2 x double>* %pb, align 1
+ %shuffle = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 2, i32 1>
+ ret <2 x double> %shuffle
+}
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll b/llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll
index 5fde2d1ebca..42869c1e02d 100644
--- a/llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll
+++ b/llvm/test/CodeGen/X86/vector-shuffle-128-v4.ll
@@ -2437,3 +2437,53 @@ define <4 x i32> @shuffle_v4i32_1z3z(<4 x i32> %a) {
%shuffle = shufflevector <4 x i32> %a, <4 x i32> zeroinitializer, <4 x i32> <i32 1, i32 4, i32 3, i32 4>
ret <4 x i32> %shuffle
}
+
+define <4 x float> @shuffle_mem_v4f32_0145(<4 x float> %a, <4 x float>* %pb) {
+; SSE-LABEL: shuffle_mem_v4f32_0145:
+; SSE: # %bb.0:
+; SSE-NEXT: movups (%rdi), %xmm1
+; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
+; SSE-NEXT: retq
+;
+; AVX-LABEL: shuffle_mem_v4f32_0145:
+; AVX: # %bb.0:
+; AVX-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0]
+; AVX-NEXT: retq
+ %b = load <4 x float>, <4 x float>* %pb, align 1
+ %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+ ret <4 x float> %shuffle
+}
+
+define <4 x float> @shuffle_mem_v4f32_4523(<4 x float> %a, <4 x float>* %pb) {
+; SSE2-LABEL: shuffle_mem_v4f32_4523:
+; SSE2: # %bb.0:
+; SSE2-NEXT: movupd (%rdi), %xmm1
+; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; SSE2-NEXT: retq
+;
+; SSE3-LABEL: shuffle_mem_v4f32_4523:
+; SSE3: # %bb.0:
+; SSE3-NEXT: movupd (%rdi), %xmm1
+; SSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; SSE3-NEXT: retq
+;
+; SSSE3-LABEL: shuffle_mem_v4f32_4523:
+; SSSE3: # %bb.0:
+; SSSE3-NEXT: movupd (%rdi), %xmm1
+; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
+; SSSE3-NEXT: retq
+;
+; SSE41-LABEL: shuffle_mem_v4f32_4523:
+; SSE41: # %bb.0:
+; SSE41-NEXT: movups (%rdi), %xmm1
+; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
+; SSE41-NEXT: retq
+;
+; AVX-LABEL: shuffle_mem_v4f32_4523:
+; AVX: # %bb.0:
+; AVX-NEXT: vblendps {{.*#+}} xmm0 = mem[0,1],xmm0[2,3]
+; AVX-NEXT: retq
+ %b = load <4 x float>, <4 x float>* %pb, align 1
+ %shuffle = shufflevector <4 x float> %a, <4 x float> %b, <4 x i32> <i32 4, i32 5, i32 2, i32 3>
+ ret <4 x float> %shuffle
+}
OpenPOWER on IntegriCloud