diff options
Diffstat (limited to 'llvm/test/CodeGen/X86/vector-shuffle-128-v2.ll')
| -rw-r--r-- | llvm/test/CodeGen/X86/vector-shuffle-128-v2.ll | 32 |
1 files changed, 16 insertions, 16 deletions
diff --git a/llvm/test/CodeGen/X86/vector-shuffle-128-v2.ll b/llvm/test/CodeGen/X86/vector-shuffle-128-v2.ll index 669aee42fe4..ccf64516ce1 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-128-v2.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-128-v2.ll @@ -358,12 +358,12 @@ define <2 x i64> @shuffle_v2i64_03(<2 x i64> %a, <2 x i64> %b) { ; ; AVX2-LABEL: shuffle_v2i64_03: ; AVX2: # BB#0: -; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] +; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] ; AVX2-NEXT: retq ; ; AVX512VL-LABEL: shuffle_v2i64_03: ; AVX512VL: # BB#0: -; AVX512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] +; AVX512VL-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3] ; AVX512VL-NEXT: retq %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 0, i32 3> ret <2 x i64> %shuffle @@ -400,12 +400,12 @@ define <2 x i64> @shuffle_v2i64_03_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64 ; ; AVX2-LABEL: shuffle_v2i64_03_copy: ; AVX2: # BB#0: -; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm2[2,3] +; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm2[2,3] ; AVX2-NEXT: retq ; ; AVX512VL-LABEL: shuffle_v2i64_03_copy: ; AVX512VL: # BB#0: -; AVX512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm2[2,3] +; AVX512VL-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm2[2,3] ; AVX512VL-NEXT: retq %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 0, i32 3> ret <2 x i64> %shuffle @@ -555,12 +555,12 @@ define <2 x i64> @shuffle_v2i64_21(<2 x i64> %a, <2 x i64> %b) { ; ; AVX2-LABEL: shuffle_v2i64_21: ; AVX2: # BB#0: -; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] +; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] ; AVX2-NEXT: retq ; ; AVX512VL-LABEL: shuffle_v2i64_21: ; AVX512VL: # BB#0: -; AVX512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] +; AVX512VL-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] ; AVX512VL-NEXT: retq %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 2, i32 1> ret <2 x i64> %shuffle @@ -597,12 +597,12 @@ define <2 x i64> @shuffle_v2i64_21_copy(<2 x i64> %nonce, <2 x i64> %a, <2 x i64 ; ; AVX2-LABEL: shuffle_v2i64_21_copy: ; AVX2: # BB#0: -; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm1[2,3] +; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm2[0,1],xmm1[2,3] ; AVX2-NEXT: retq ; ; AVX512VL-LABEL: shuffle_v2i64_21_copy: ; AVX512VL: # BB#0: -; AVX512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm1[2,3] +; AVX512VL-NEXT: vblendps {{.*#+}} xmm0 = xmm2[0,1],xmm1[2,3] ; AVX512VL-NEXT: retq %shuffle = shufflevector <2 x i64> %a, <2 x i64> %b, <2 x i32> <i32 2, i32 1> ret <2 x i64> %shuffle @@ -773,8 +773,8 @@ define <2 x i64> @shuffle_v2i64_z1(<2 x i64> %a) { ; ; AVX2-LABEL: shuffle_v2i64_z1: ; AVX2: # BB#0: -; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] +; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1 +; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] ; AVX2-NEXT: retq ; ; AVX512VL-LABEL: shuffle_v2i64_z1: @@ -964,8 +964,8 @@ define <2 x i64> @shuffle_v2i64_bitcast_z123(<2 x i64> %x) { ; ; AVX2-LABEL: shuffle_v2i64_bitcast_z123: ; AVX2: # BB#0: -; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 -; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] +; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1 +; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] ; AVX2-NEXT: retq ; ; AVX512VL-LABEL: shuffle_v2i64_bitcast_z123: @@ -1119,14 +1119,14 @@ define <2 x i64> @insert_mem_lo_v2i64(i64* %ptr, <2 x i64> %b) { ; ; AVX2-LABEL: insert_mem_lo_v2i64: ; AVX2: # BB#0: -; AVX2-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero -; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] +; AVX2-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] ; AVX2-NEXT: retq ; ; AVX512VL-LABEL: insert_mem_lo_v2i64: ; AVX512VL: # BB#0: -; AVX512VL-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero -; AVX512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] +; AVX512VL-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; AVX512VL-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] ; AVX512VL-NEXT: retq %a = load i64, i64* %ptr %v = insertelement <2 x i64> undef, i64 %a, i32 0 |

