From b10c6b8e9ea2389ce390ef500298733b73639966 Mon Sep 17 00:00:00 2001 From: Chandler Carruth Date: Sun, 28 Sep 2014 03:30:25 +0000 Subject: [x86] Fix yet another bug in the new vector shuffle lowering's handling of widening masks. We can't widen a zeroing mask unless both elements that would be merged are either zeroed or undef. This is the only way to widen a mask if it has a zeroed element. Also clean up the code here by ordering the checks in a more logical way and by using the symoblic values for undef and zero. I'm actually torn on using the symbolic values because the existing code is littered with the assumption that -1 is undef, and moreover that entries '< 0' are the special entries. While that works with the values given to these constants, using the symbolic constants actually makes it a bit more opaque why this is the case. llvm-svn: 218575 --- llvm/test/CodeGen/X86/vector-shuffle-128-v16.ll | 37 +++++++++++++++++++++++++ 1 file changed, 37 insertions(+) (limited to 'llvm/test') diff --git a/llvm/test/CodeGen/X86/vector-shuffle-128-v16.ll b/llvm/test/CodeGen/X86/vector-shuffle-128-v16.ll index 21806489815..db7091485be 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-128-v16.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-128-v16.ll @@ -665,3 +665,40 @@ entry: ret <16 x i8> %shuffle } + +define <16 x i8> @stress_test2(<16 x i8> %s.0.0, <16 x i8> %s.0.1, <16 x i8> %s.0.2) { +; SSSE3-LABEL: @stress_test2 +; SSSE3: # BB#0: +; SSSE3-NEXT: pshufb {{.*}} # xmm0 = zero,zero,xmm0[2],zero,zero,zero,xmm0[11],zero,zero,xmm0[3,4,5],zero,zero,xmm0[15,5] +; SSSE3-NEXT: movdqa %xmm1, %[[X1:xmm[0-9]+]] +; SSSE3-NEXT: pshufb {{.*}} # [[X1]] = [[X1]][13,14],zero,[[X1]][0,10,5],zero,[[X1]][10,10],zero,zero,zero,[[X1]][14,12],zero,zero +; SSSE3-NEXT: por %xmm0, %[[X1]] +; SSSE3-NEXT: movdqa %xmm2, %xmm0 +; SSSE3-NEXT: pshufb {{.*}} # xmm0 = zero,zero,zero,zero,zero,zero,xmm0[1,6,u,u,u,u,u,u,u,u] +; SSSE3-NEXT: movdqa %xmm1, %[[X2:xmm[0-9]+]] +; SSSE3-NEXT: pshufb {{.*}} # [[X2]] = [[X2]][1,12,5,9,1,5],zero,zero,[[X2]][u,u,u,u,u,u,u,u] +; SSSE3-NEXT: por %xmm0, %[[X2]] +; SSSE3-NEXT: pshufb {{.*}} # xmm1 = zero,zero,zero,xmm1[2],zero,zero,xmm1[6,15,u,u,u,u,u,u,u,u] +; SSSE3-NEXT: pshufb {{.*}} # xmm2 = xmm2[15,8,12],zero,xmm2[13,15],zero,zero,xmm2[u,u,u,u,u,u,u,u] +; SSSE3-NEXT: por %xmm1, %xmm2 +; SSSE3-NEXT: punpcklbw {{.*}} # xmm2 = xmm2[0],[[X2]][0],xmm2[1],[[X2]][1],xmm2[2],[[X2]][2],xmm2[3],[[X2]][3],xmm2[4],[[X2]][4],xmm2[5],[[X2]][5],xmm2[6],[[X2]][6],xmm2[7],[[X2]][7] +; SSSE3-NEXT: movdqa %xmm2, %[[X3:xmm[0-9]+]] +; SSSE3-NEXT: pshufb {{.*}} # [[X3]] = [[X3]][6],zero,[[X3]][14,14],zero,zero,[[X3]][11,0,u,u,u,u,u,u,u,u] +; SSSE3-NEXT: movdqa %[[X1]], %xmm0 +; SSSE3-NEXT: pshufb {{.*}} # xmm0 = zero,xmm0[12],zero,zero,xmm0[1,14],zero,zero,xmm0[u,u,u,u,u,u,u,u] +; SSSE3-NEXT: por %[[X3]], %xmm0 +; SSSE3-NEXT: pshufb {{.*}} # xmm2 = zero,xmm2[u,2,3,u,u,u,u,u,u,u,u,u,u,u,u] +; SSSE3-NEXT: movdqa %[[X1]], %[[X3]] +; SSSE3-NEXT: pshufb {{.*}} # [[X3]] = [[X3]][3,u],zero,zero,[[X3]][u,u,u,u,u,u,u,u,u,u,u,u] +; SSSE3-NEXT: por %xmm2, %[[X3]] +; SSSE3-NEXT: pshufb {{.*}} # [[X1]] = [[X1]][1,4,10,13,u,u,u,u,u,u,u,u,u,u,u,u] +; SSSE3-NEXT: punpcklbw {{.*}} # [[X1]] = [[X1]][0],[[X3]][0],[[X1]][1],[[X3]][1],[[X1]][2],[[X3]][2],[[X1]][3],[[X3]][3],[[X1]][4],[[X3]][4],[[X1]][5],[[X3]][5],[[X1]][6],[[X3]][6],[[X1]][7],[[X3]][7] +; SSSE3-NEXT: punpcklbw {{.*}} # xmm0 = xmm0[0],[[X1]][0],xmm0[1],[[X1]][1],xmm0[2],[[X1]][2],xmm0[3],[[X1]][3],xmm0[4],[[X1]][4],xmm0[5],[[X1]][5],xmm0[6],[[X1]][6],xmm0[7],[[X1]][7] +; SSSE3-NEXT: retq +entry: + %s.1.0 = shufflevector <16 x i8> %s.0.0, <16 x i8> %s.0.1, <16 x i32> + %s.1.1 = shufflevector <16 x i8> %s.0.1, <16 x i8> %s.0.2, <16 x i32> + %s.2.0 = shufflevector <16 x i8> %s.1.0, <16 x i8> %s.1.1, <16 x i32> + + ret <16 x i8> %s.2.0 +} -- cgit v1.2.3