summaryrefslogtreecommitdiffstats
path: root/llvm/test
diff options
context:
space:
mode:
Diffstat (limited to 'llvm/test')
-rw-r--r--llvm/test/CodeGen/AArch64/vecreduce-and-legalization.ll18
-rw-r--r--llvm/test/CodeGen/ARM/dagcombine-anyexttozeroext.ll14
-rw-r--r--llvm/test/CodeGen/WebAssembly/simd-arith.ll112
-rw-r--r--llvm/test/CodeGen/WebAssembly/simd-extended-extract.ll22
-rw-r--r--llvm/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll16
-rw-r--r--llvm/test/CodeGen/X86/promote-vec3.ll6
-rw-r--r--llvm/test/CodeGen/X86/shrink_vmul.ll202
-rw-r--r--llvm/test/CodeGen/X86/vec_smulo.ll152
-rw-r--r--llvm/test/CodeGen/X86/vec_umulo.ll140
-rw-r--r--llvm/test/CodeGen/X86/vector-reduce-mul.ll52
-rw-r--r--llvm/test/CodeGen/X86/xor.ll7
11 files changed, 379 insertions, 362 deletions
diff --git a/llvm/test/CodeGen/AArch64/vecreduce-and-legalization.ll b/llvm/test/CodeGen/AArch64/vecreduce-and-legalization.ll
index 375f220fe2c..bf186263b88 100644
--- a/llvm/test/CodeGen/AArch64/vecreduce-and-legalization.ll
+++ b/llvm/test/CodeGen/AArch64/vecreduce-and-legalization.ll
@@ -105,17 +105,17 @@ define i8 @test_v9i8(<9 x i8> %a) nounwind {
; CHECK-NEXT: mov v0.b[14], w8
; CHECK-NEXT: mov v0.b[15], w8
; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT: and v1.8b, v0.8b, v1.8b
-; CHECK-NEXT: umov w8, v1.b[1]
-; CHECK-NEXT: umov w9, v1.b[0]
+; CHECK-NEXT: and v0.8b, v0.8b, v1.8b
+; CHECK-NEXT: umov w8, v0.b[1]
+; CHECK-NEXT: umov w9, v0.b[0]
; CHECK-NEXT: and w8, w9, w8
-; CHECK-NEXT: umov w9, v1.b[2]
+; CHECK-NEXT: umov w9, v0.b[2]
; CHECK-NEXT: and w8, w8, w9
-; CHECK-NEXT: umov w9, v1.b[3]
+; CHECK-NEXT: umov w9, v0.b[3]
; CHECK-NEXT: and w8, w8, w9
-; CHECK-NEXT: umov w9, v1.b[4]
+; CHECK-NEXT: umov w9, v0.b[4]
; CHECK-NEXT: and w8, w8, w9
-; CHECK-NEXT: umov w9, v1.b[5]
+; CHECK-NEXT: umov w9, v0.b[5]
; CHECK-NEXT: and w8, w8, w9
; CHECK-NEXT: umov w9, v0.b[6]
; CHECK-NEXT: and w8, w8, w9
@@ -132,9 +132,9 @@ define i32 @test_v3i32(<3 x i32> %a) nounwind {
; CHECK-NEXT: mov w8, #-1
; CHECK-NEXT: mov v0.s[3], w8
; CHECK-NEXT: ext v1.16b, v0.16b, v0.16b, #8
-; CHECK-NEXT: and v1.8b, v0.8b, v1.8b
+; CHECK-NEXT: and v0.8b, v0.8b, v1.8b
; CHECK-NEXT: mov w8, v0.s[1]
-; CHECK-NEXT: fmov w9, s1
+; CHECK-NEXT: fmov w9, s0
; CHECK-NEXT: and w0, w9, w8
; CHECK-NEXT: ret
%b = call i32 @llvm.experimental.vector.reduce.and.v3i32(<3 x i32> %a)
diff --git a/llvm/test/CodeGen/ARM/dagcombine-anyexttozeroext.ll b/llvm/test/CodeGen/ARM/dagcombine-anyexttozeroext.ll
index 08f6d2185e6..de24681470d 100644
--- a/llvm/test/CodeGen/ARM/dagcombine-anyexttozeroext.ll
+++ b/llvm/test/CodeGen/ARM/dagcombine-anyexttozeroext.ll
@@ -1,4 +1,3 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple armv7 %s -o - | FileCheck %s
define float @f(<4 x i16>* nocapture %in) {
@@ -64,8 +63,8 @@ define <4 x i32> @h(<4 x i8> *%in) {
ret <4 x i32> %13
}
-; FIXME: The vmov.u + sxt can convert to a vmov.s
define float @i(<4 x i16>* nocapture %in) {
+ ; FIXME: The vmov.u + sxt can convert to a vmov.s
; CHECK-LABEL: i:
; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
@@ -96,8 +95,8 @@ define float @j(<8 x i8>* nocapture %in) {
ret float %3
}
-; FIXME: The vmov.u + sxt can convert to a vmov.s
define float @k(<8 x i8>* nocapture %in) {
+; FIXME: The vmov.u + sxt can convert to a vmov.s
; CHECK-LABEL: k:
; CHECK: @ %bb.0:
; CHECK-NEXT: vldr d16, [r0]
@@ -114,14 +113,17 @@ define float @k(<8 x i8>* nocapture %in) {
}
define float @KnownUpperZero(<4 x i16> %v) {
+; FIXME: uxtb are not required
; CHECK-LABEL: KnownUpperZero:
; CHECK: @ %bb.0:
-; CHECK-NEXT: vmov d16, r0, r1
+; CHECK-NEXT: vmov.i16 d16, #0x3
+; CHECK-NEXT: vmov d17, r0, r1
+; CHECK-NEXT: vand d16, d17, d16
; CHECK-NEXT: vmov.u16 r0, d16[0]
; CHECK-NEXT: vmov.u16 r1, d16[3]
-; CHECK-NEXT: and r0, r0, #3
+; CHECK-NEXT: uxtb r0, r0
; CHECK-NEXT: vmov s0, r0
-; CHECK-NEXT: and r0, r1, #3
+; CHECK-NEXT: uxtb r0, r1
; CHECK-NEXT: vmov s2, r0
; CHECK-NEXT: vcvt.f32.s32 s0, s0
; CHECK-NEXT: vcvt.f32.s32 s2, s2
diff --git a/llvm/test/CodeGen/WebAssembly/simd-arith.ll b/llvm/test/CodeGen/WebAssembly/simd-arith.ll
index 57bc86efc91..410b2e6f5ab 100644
--- a/llvm/test/CodeGen/WebAssembly/simd-arith.ll
+++ b/llvm/test/CodeGen/WebAssembly/simd-arith.ll
@@ -90,18 +90,18 @@ define <16 x i8> @shl_const_v16i8(<16 x i8> %v) {
; NO-SIMD128-NOT: i8x16
; SIMD128-NEXT: .functype shl_vec_v16i8 (v128, v128) -> (v128){{$}}
; SIMD128-NEXT: i8x16.extract_lane_s $push[[L0:[0-9]+]]=, $0, 0{{$}}
-; SIMD128-NEXT: i8x16.extract_lane_s $push[[L1:[0-9]+]]=, $1, 0{{$}}
; SIMD128-NEXT: i32.const $push[[M0:[0-9]+]]=, 7{{$}}
-; SIMD128-NEXT: i32.and $push[[M1:[0-9]+]]=, $pop[[L1]], $pop[[M0]]{{$}}
-; SIMD128-NEXT: i32.shl $push[[M2:[0-9]+]]=, $pop[[L0]], $pop[[M1]]
-; SIMD128-NEXT: i8x16.splat $push[[M3:[0-9]+]]=, $pop[[M2]]
+; SIMD128-NEXT: i8x16.splat $push[[M1:[0-9]+]]=, $pop[[M0]]{{$}}
+; SIMD128-NEXT: v128.and $push[[M2:[0-9]+]]=, $1, $pop[[M1]]{{$}}
+; SIMD128-NEXT: local.tee $push[[M:[0-9]+]]=, $1=, $pop[[M2]]{{$}}
+; SIMD128-NEXT: i8x16.extract_lane_u $push[[L1:[0-9]+]]=, $pop[[M]], 0{{$}}
+; SIMD128-NEXT: i32.shl $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; SIMD128-NEXT: i8x16.splat $push[[L3:[0-9]+]]=, $pop[[L2]]{{$}}
; Skip 14 lanes
; SIMD128: i8x16.extract_lane_s $push[[L4:[0-9]+]]=, $0, 15{{$}}
-; SIMD128-NEXT: i8x16.extract_lane_s $push[[L5:[0-9]+]]=, $1, 15{{$}}
-; SIMD128-NEXT: i32.const $push[[M4:[0-9]+]]=, 7{{$}}
-; SIMD128-NEXT: i32.and $push[[M5:[0-9]+]]=, $pop[[L5]], $pop[[M4]]{{$}}
-; SIMD128-NEXT: i32.shl $push[[M6:[0-9]+]]=, $pop[[L4]], $pop[[M5]]{{$}}
-; SIMD128-NEXT: i8x16.replace_lane $push[[R:[0-9]+]]=, $pop[[M7:[0-9]+]], 15, $pop[[M6]]{{$}}
+; SIMD128-NEXT: i8x16.extract_lane_u $push[[L5:[0-9]+]]=, $1, 15{{$}}
+; SIMD128-NEXT: i32.shl $push[[L6:[0-9]+]]=, $pop[[L4]], $pop[[L5]]{{$}}
+; SIMD128-NEXT: i8x16.replace_lane $push[[R:[0-9]+]]=, $pop[[L7:[0-9]+]], 15, $pop[[L6]]{{$}}
; SIMD128-NEXT: return $pop[[R]]{{$}}
define <16 x i8> @shl_vec_v16i8(<16 x i8> %v, <16 x i8> %x) {
%a = shl <16 x i8> %v, %x
@@ -126,18 +126,18 @@ define <16 x i8> @shr_s_v16i8(<16 x i8> %v, i8 %x) {
; NO-SIMD128-NOT: i8x16
; SIMD128-NEXT: .functype shr_s_vec_v16i8 (v128, v128) -> (v128){{$}}
; SIMD128-NEXT: i8x16.extract_lane_s $push[[L0:[0-9]+]]=, $0, 0{{$}}
-; SIMD128-NEXT: i8x16.extract_lane_s $push[[L1:[0-9]+]]=, $1, 0{{$}}
; SIMD128-NEXT: i32.const $push[[M0:[0-9]+]]=, 7{{$}}
-; SIMD128-NEXT: i32.and $push[[M1:[0-9]+]]=, $pop[[L1]], $pop[[M0]]{{$}}
-; SIMD128-NEXT: i32.shr_s $push[[M2:[0-9]+]]=, $pop[[L0]], $pop[[M1]]
-; SIMD128-NEXT: i8x16.splat $push[[M3:[0-9]+]]=, $pop[[M2]]
+; SIMD128-NEXT: i8x16.splat $push[[M1:[0-9]+]]=, $pop[[M0]]{{$}}
+; SIMD128-NEXT: v128.and $push[[M2:[0-9]+]]=, $1, $pop[[M1]]{{$}}
+; SIMD128-NEXT: local.tee $push[[M:[0-9]+]]=, $1=, $pop[[M2]]{{$}}
+; SIMD128-NEXT: i8x16.extract_lane_u $push[[L1:[0-9]+]]=, $pop[[M]], 0{{$}}
+; SIMD128-NEXT: i32.shr_s $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; SIMD128-NEXT: i8x16.splat $push[[L3:[0-9]+]]=, $pop[[L2]]{{$}}
; Skip 14 lanes
-; SIMD128: i8x16.extract_lane_s $push[[L4:[0-9]+]]=, $0, 15{{$}}
-; SIMD128-NEXT: i8x16.extract_lane_s $push[[L5:[0-9]+]]=, $1, 15{{$}}
-; SIMD128-NEXT: i32.const $push[[M4:[0-9]+]]=, 7{{$}}
-; SIMD128-NEXT: i32.and $push[[M5:[0-9]+]]=, $pop[[L5]], $pop[[M4]]{{$}}
-; SIMD128-NEXT: i32.shr_s $push[[M6:[0-9]+]]=, $pop[[L4]], $pop[[M5]]{{$}}
-; SIMD128-NEXT: i8x16.replace_lane $push[[R:[0-9]+]]=, $pop[[M7:[0-9]+]], 15, $pop[[M6]]{{$}}
+; SIMD128: i8x16.extract_lane_s $push[[L0:[0-9]+]]=, $0, 15{{$}}
+; SIMD128-NEXT: i8x16.extract_lane_u $push[[L1:[0-9]+]]=, $1, 15{{$}}
+; SIMD128-NEXT: i32.shr_s $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; SIMD128-NEXT: i8x16.replace_lane $push[[R:[0-9]+]]=, $pop{{[0-9]+}}, 15, $pop[[L2]]{{$}}
; SIMD128-NEXT: return $pop[[R]]{{$}}
define <16 x i8> @shr_s_vec_v16i8(<16 x i8> %v, <16 x i8> %x) {
%a = ashr <16 x i8> %v, %x
@@ -162,18 +162,18 @@ define <16 x i8> @shr_u_v16i8(<16 x i8> %v, i8 %x) {
; NO-SIMD128-NOT: i8x16
; SIMD128-NEXT: .functype shr_u_vec_v16i8 (v128, v128) -> (v128){{$}}
; SIMD128-NEXT: i8x16.extract_lane_u $push[[L0:[0-9]+]]=, $0, 0{{$}}
-; SIMD128-NEXT: i8x16.extract_lane_s $push[[L1:[0-9]+]]=, $1, 0{{$}}
; SIMD128-NEXT: i32.const $push[[M0:[0-9]+]]=, 7{{$}}
-; SIMD128-NEXT: i32.and $push[[M1:[0-9]+]]=, $pop[[L1]], $pop[[M0]]{{$}}
-; SIMD128-NEXT: i32.shr_u $push[[M2:[0-9]+]]=, $pop[[L0]], $pop[[M1]]
-; SIMD128-NEXT: i8x16.splat $push[[M3:[0-9]+]]=, $pop[[M2]]
+; SIMD128-NEXT: i8x16.splat $push[[M1:[0-9]+]]=, $pop[[M0]]{{$}}
+; SIMD128-NEXT: v128.and $push[[M2:[0-9]+]]=, $1, $pop[[M1]]{{$}}
+; SIMD128-NEXT: local.tee $push[[M:[0-9]+]]=, $1=, $pop[[M2]]{{$}}
+; SIMD128-NEXT: i8x16.extract_lane_u $push[[L1:[0-9]+]]=, $pop[[M]], 0{{$}}
+; SIMD128-NEXT: i32.shr_u $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; SIMD128-NEXT: i8x16.splat $push[[L3:[0-9]+]]=, $pop[[L2]]{{$}}
; Skip 14 lanes
; SIMD128: i8x16.extract_lane_u $push[[L4:[0-9]+]]=, $0, 15{{$}}
-; SIMD128-NEXT: i8x16.extract_lane_s $push[[L5:[0-9]+]]=, $1, 15{{$}}
-; SIMD128-NEXT: i32.const $push[[M4:[0-9]+]]=, 7{{$}}
-; SIMD128-NEXT: i32.and $push[[M5:[0-9]+]]=, $pop[[L5]], $pop[[M4]]{{$}}
-; SIMD128-NEXT: i32.shr_u $push[[M6:[0-9]+]]=, $pop[[L4]], $pop[[M5]]{{$}}
-; SIMD128-NEXT: i8x16.replace_lane $push[[R:[0-9]+]]=, $pop[[M7:[0-9]+]], 15, $pop[[M6]]{{$}}
+; SIMD128-NEXT: i8x16.extract_lane_u $push[[L5:[0-9]+]]=, $1, 15{{$}}
+; SIMD128-NEXT: i32.shr_u $push[[L6:[0-9]+]]=, $pop[[L4]], $pop[[L5]]{{$}}
+; SIMD128-NEXT: i8x16.replace_lane $push[[R:[0-9]+]]=, $pop[[L7:[0-9]+]], 15, $pop[[L6]]{{$}}
; SIMD128-NEXT: return $pop[[R]]{{$}}
define <16 x i8> @shr_u_vec_v16i8(<16 x i8> %v, <16 x i8> %x) {
%a = lshr <16 x i8> %v, %x
@@ -316,18 +316,18 @@ define <8 x i16> @shl_const_v8i16(<8 x i16> %v) {
; NO-SIMD128-NOT: i16x8
; SIMD128-NEXT: .functype shl_vec_v8i16 (v128, v128) -> (v128){{$}}
; SIMD128-NEXT: i16x8.extract_lane_s $push[[L0:[0-9]+]]=, $0, 0{{$}}
-; SIMD128-NEXT: i16x8.extract_lane_s $push[[L1:[0-9]+]]=, $1, 0{{$}}
; SIMD128-NEXT: i32.const $push[[M0:[0-9]+]]=, 15{{$}}
-; SIMD128-NEXT: i32.and $push[[M1:[0-9]+]]=, $pop[[L1]], $pop[[M0]]{{$}}
-; SIMD128-NEXT: i32.shl $push[[M2:[0-9]+]]=, $pop[[L0]], $pop[[M1]]{{$}}
-; SIMD128-NEXT: i16x8.splat $push[[M3:[0-9]+]]=, $pop[[M2]]{{$}}
+; SIMD128-NEXT: i16x8.splat $push[[M1:[0-9]+]]=, $pop[[M0]]{{$}}
+; SIMD128-NEXT: v128.and $push[[M2:[0-9]+]]=, $1, $pop[[M1]]{{$}}
+; SIMD128-NEXT: local.tee $push[[M:[0-9]+]]=, $1=, $pop[[M2]]{{$}}
+; SIMD128-NEXT: i16x8.extract_lane_u $push[[L1:[0-9]+]]=, $pop[[M]], 0{{$}}
+; SIMD128-NEXT: i32.shl $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; SIMD128-NEXT: i16x8.splat $push[[L3:[0-9]+]]=, $pop[[L2]]{{$}}
; Skip 6 lanes
; SIMD128: i16x8.extract_lane_s $push[[L4:[0-9]+]]=, $0, 7{{$}}
-; SIMD128-NEXT: i16x8.extract_lane_s $push[[L5:[0-9]+]]=, $1, 7{{$}}
-; SIMD128-NEXT: i32.const $push[[M4:[0-9]+]]=, 15{{$}}
-; SIMD128-NEXT: i32.and $push[[M5:[0-9]+]]=, $pop[[L5]], $pop[[M4]]{{$}}
-; SIMD128-NEXT: i32.shl $push[[M6:[0-9]+]]=, $pop[[L4]], $pop[[M5]]{{$}}
-; SIMD128-NEXT: i16x8.replace_lane $push[[R:[0-9]+]]=, $pop[[M7:[0-9]+]], 7, $pop[[M6]]{{$}}
+; SIMD128-NEXT: i16x8.extract_lane_u $push[[L5:[0-9]+]]=, $1, 7{{$}}
+; SIMD128-NEXT: i32.shl $push[[L6:[0-9]+]]=, $pop[[L4]], $pop[[L5]]{{$}}
+; SIMD128-NEXT: i16x8.replace_lane $push[[R:[0-9]+]]=, $pop[[L7:[0-9]+]], 7, $pop[[L6]]{{$}}
; SIMD128-NEXT: return $pop[[R]]{{$}}
define <8 x i16> @shl_vec_v8i16(<8 x i16> %v, <8 x i16> %x) {
%a = shl <8 x i16> %v, %x
@@ -351,18 +351,18 @@ define <8 x i16> @shr_s_v8i16(<8 x i16> %v, i16 %x) {
; NO-SIMD128-NOT: i16x8
; SIMD128-NEXT: .functype shr_s_vec_v8i16 (v128, v128) -> (v128){{$}}
; SIMD128-NEXT: i16x8.extract_lane_s $push[[L0:[0-9]+]]=, $0, 0{{$}}
-; SIMD128-NEXT: i16x8.extract_lane_s $push[[L1:[0-9]+]]=, $1, 0{{$}}
; SIMD128-NEXT: i32.const $push[[M0:[0-9]+]]=, 15{{$}}
-; SIMD128-NEXT: i32.and $push[[M1:[0-9]+]]=, $pop[[L1]], $pop[[M0]]{{$}}
-; SIMD128-NEXT: i32.shr_s $push[[M2:[0-9]+]]=, $pop[[L0]], $pop[[M1]]{{$}}
-; SIMD128-NEXT: i16x8.splat $push[[M3:[0-9]+]]=, $pop[[M2]]{{$}}
+; SIMD128-NEXT: i16x8.splat $push[[M1:[0-9]+]]=, $pop[[M0]]{{$}}
+; SIMD128-NEXT: v128.and $push[[M2:[0-9]+]]=, $1, $pop[[M1]]{{$}}
+; SIMD128-NEXT: local.tee $push[[M:[0-9]+]]=, $1=, $pop[[M2]]{{$}}
+; SIMD128-NEXT: i16x8.extract_lane_u $push[[L1:[0-9]+]]=, $pop[[M]], 0{{$}}
+; SIMD128-NEXT: i32.shr_s $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; SIMD128-NEXT: i16x8.splat $push[[L3:[0-9]+]]=, $pop[[L2]]{{$}}
; Skip 6 lanes
-; SIMD128: i16x8.extract_lane_s $push[[L4:[0-9]+]]=, $0, 7{{$}}
-; SIMD128-NEXT: i16x8.extract_lane_s $push[[L5:[0-9]+]]=, $1, 7{{$}}
-; SIMD128-NEXT: i32.const $push[[M4:[0-9]+]]=, 15{{$}}
-; SIMD128-NEXT: i32.and $push[[M5:[0-9]+]]=, $pop[[L5]], $pop[[M4]]{{$}}
-; SIMD128-NEXT: i32.shr_s $push[[M6:[0-9]+]]=, $pop[[L4]], $pop[[M5]]{{$}}
-; SIMD128-NEXT: i16x8.replace_lane $push[[R:[0-9]+]]=, $pop[[M7:[0-9]+]], 7, $pop[[M6]]{{$}}
+; SIMD128: i16x8.extract_lane_s $push[[L0:[0-9]+]]=, $0, 7{{$}}
+; SIMD128-NEXT: i16x8.extract_lane_u $push[[L1:[0-9]+]]=, $1, 7{{$}}
+; SIMD128-NEXT: i32.shr_s $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; SIMD128-NEXT: i16x8.replace_lane $push[[R:[0-9]+]]=, $pop{{[0-9]+}}, 7, $pop[[L2]]{{$}}
; SIMD128-NEXT: return $pop[[R]]{{$}}
define <8 x i16> @shr_s_vec_v8i16(<8 x i16> %v, <8 x i16> %x) {
%a = ashr <8 x i16> %v, %x
@@ -386,18 +386,18 @@ define <8 x i16> @shr_u_v8i16(<8 x i16> %v, i16 %x) {
; NO-SIMD128-NOT: i16x8
; SIMD128-NEXT: .functype shr_u_vec_v8i16 (v128, v128) -> (v128){{$}}
; SIMD128-NEXT: i16x8.extract_lane_u $push[[L0:[0-9]+]]=, $0, 0{{$}}
-; SIMD128-NEXT: i16x8.extract_lane_s $push[[L1:[0-9]+]]=, $1, 0{{$}}
; SIMD128-NEXT: i32.const $push[[M0:[0-9]+]]=, 15{{$}}
-; SIMD128-NEXT: i32.and $push[[M1:[0-9]+]]=, $pop[[L1]], $pop[[M0]]{{$}}
-; SIMD128-NEXT: i32.shr_u $push[[M2:[0-9]+]]=, $pop[[L0]], $pop[[M1]]{{$}}
-; SIMD128-NEXT: i16x8.splat $push[[M3:[0-9]+]]=, $pop[[M2]]{{$}}
+; SIMD128-NEXT: i16x8.splat $push[[M1:[0-9]+]]=, $pop[[M0]]{{$}}
+; SIMD128-NEXT: v128.and $push[[M2:[0-9]+]]=, $1, $pop[[M1]]{{$}}
+; SIMD128-NEXT: local.tee $push[[M:[0-9]+]]=, $1=, $pop[[M2]]{{$}}
+; SIMD128-NEXT: i16x8.extract_lane_u $push[[L1:[0-9]+]]=, $pop[[M]], 0{{$}}
+; SIMD128-NEXT: i32.shr_u $push[[L2:[0-9]+]]=, $pop[[L0]], $pop[[L1]]{{$}}
+; SIMD128-NEXT: i16x8.splat $push[[L3:[0-9]+]]=, $pop[[L2]]{{$}}
; Skip 6 lanes
; SIMD128: i16x8.extract_lane_u $push[[L4:[0-9]+]]=, $0, 7{{$}}
-; SIMD128-NEXT: i16x8.extract_lane_s $push[[L5:[0-9]+]]=, $1, 7{{$}}
-; SIMD128-NEXT: i32.const $push[[M4:[0-9]+]]=, 15{{$}}
-; SIMD128-NEXT: i32.and $push[[M5:[0-9]+]]=, $pop[[L5]], $pop[[M4]]{{$}}
-; SIMD128-NEXT: i32.shr_u $push[[M6:[0-9]+]]=, $pop[[L4]], $pop[[M5]]{{$}}
-; SIMD128-NEXT: i16x8.replace_lane $push[[R:[0-9]+]]=, $pop[[M7:[0-9]+]], 7, $pop[[M6]]{{$}}
+; SIMD128-NEXT: i16x8.extract_lane_u $push[[L5:[0-9]+]]=, $1, 7{{$}}
+; SIMD128-NEXT: i32.shr_u $push[[L6:[0-9]+]]=, $pop[[L4]], $pop[[L5]]{{$}}
+; SIMD128-NEXT: i16x8.replace_lane $push[[R:[0-9]+]]=, $pop[[L7:[0-9]+]], 7, $pop[[L6]]{{$}}
; SIMD128-NEXT: return $pop[[R]]{{$}}
define <8 x i16> @shr_u_vec_v8i16(<8 x i16> %v, <8 x i16> %x) {
%a = lshr <8 x i16> %v, %x
diff --git a/llvm/test/CodeGen/WebAssembly/simd-extended-extract.ll b/llvm/test/CodeGen/WebAssembly/simd-extended-extract.ll
index 149b1842b6c..b00fcf68f2d 100644
--- a/llvm/test/CodeGen/WebAssembly/simd-extended-extract.ll
+++ b/llvm/test/CodeGen/WebAssembly/simd-extended-extract.ll
@@ -21,6 +21,16 @@ define void @foo(<4 x i8>* %p) {
; CHECK: .functype foo (i32) -> ()
; CHECK-NEXT: i32.load8_u 0
; CHECK-NEXT: i32x4.splat
+; CHECK-NEXT: i32.load8_u 1
+; CHECK-NEXT: i32x4.replace_lane 1
+; CHECK-NEXT: i32.const 2
+; CHECK-NEXT: i32.add
+; CHECK-NEXT: i32.load8_u 0
+; CHECK-NEXT: i32x4.replace_lane 2
+; CHECK-NEXT: i32.const 3
+; CHECK-NEXT: i32.add
+; CHECK-NEXT: i32.load8_u 0
+; CHECK-NEXT: i32x4.replace_lane 3
; CHECK-NEXT: local.tee
; CHECK-NEXT: i8x16.extract_lane_s 0
; CHECK-NEXT: f64.convert_i32_s
@@ -30,9 +40,6 @@ define void @foo(<4 x i8>* %p) {
; CHECK-NEXT: f64.add
; CHECK-NEXT: f32.demote_f64
; CHECK-NEXT: f32x4.splat
-; CHECK-NEXT: i32.load8_u 1
-; CHECK-NEXT: i32x4.replace_lane 1
-; CHECK-NEXT: local.tee
; CHECK-NEXT: i8x16.extract_lane_s 4
; CHECK-NEXT: f64.convert_i32_s
; CHECK-NEXT: f64.const 0x0p0
@@ -41,11 +48,6 @@ define void @foo(<4 x i8>* %p) {
; CHECK-NEXT: f64.add
; CHECK-NEXT: f32.demote_f64
; CHECK-NEXT: f32x4.replace_lane 1
-; CHECK-NEXT: i32.const 2
-; CHECK-NEXT: i32.add
-; CHECK-NEXT: i32.load8_u 0
-; CHECK-NEXT: i32x4.replace_lane 2
-; CHECK-NEXT: local.tee
; CHECK-NEXT: i8x16.extract_lane_s 8
; CHECK-NEXT: f64.convert_i32_s
; CHECK-NEXT: f64.const 0x0p0
@@ -54,10 +56,6 @@ define void @foo(<4 x i8>* %p) {
; CHECK-NEXT: f64.add
; CHECK-NEXT: f32.demote_f64
; CHECK-NEXT: f32x4.replace_lane 2
-; CHECK-NEXT: i32.const 3
-; CHECK-NEXT: i32.add
-; CHECK-NEXT: i32.load8_u 0
-; CHECK-NEXT: i32x4.replace_lane 3
; CHECK-NEXT: i8x16.extract_lane_s 12
; CHECK-NEXT: f64.convert_i32_s
; CHECK-NEXT: f64.const 0x0p0
diff --git a/llvm/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll b/llvm/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll
index 4da750e9ecd..d7611d1de0a 100644
--- a/llvm/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll
+++ b/llvm/test/CodeGen/X86/avx512-intrinsics-fast-isel.ll
@@ -8319,7 +8319,7 @@ define float @test_mm512_reduce_max_ps(<16 x float> %__W) {
; X86-NEXT: vmaxps %xmm1, %xmm0, %xmm0
; X86-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; X86-NEXT: vmaxps %xmm1, %xmm0, %xmm0
-; X86-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; X86-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,0,3,2]
; X86-NEXT: vmaxss %xmm1, %xmm0, %xmm0
; X86-NEXT: vmovss %xmm0, (%esp)
; X86-NEXT: flds (%esp)
@@ -8336,7 +8336,7 @@ define float @test_mm512_reduce_max_ps(<16 x float> %__W) {
; X64-NEXT: vmaxps %xmm1, %xmm0, %xmm0
; X64-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; X64-NEXT: vmaxps %xmm1, %xmm0, %xmm0
-; X64-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; X64-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,0,3,2]
; X64-NEXT: vmaxss %xmm1, %xmm0, %xmm0
; X64-NEXT: vzeroupper
; X64-NEXT: retq
@@ -8445,7 +8445,7 @@ define float @test_mm512_reduce_min_ps(<16 x float> %__W) {
; X86-NEXT: vminps %xmm1, %xmm0, %xmm0
; X86-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; X86-NEXT: vminps %xmm1, %xmm0, %xmm0
-; X86-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; X86-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,0,3,2]
; X86-NEXT: vminss %xmm1, %xmm0, %xmm0
; X86-NEXT: vmovss %xmm0, (%esp)
; X86-NEXT: flds (%esp)
@@ -8462,7 +8462,7 @@ define float @test_mm512_reduce_min_ps(<16 x float> %__W) {
; X64-NEXT: vminps %xmm1, %xmm0, %xmm0
; X64-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; X64-NEXT: vminps %xmm1, %xmm0, %xmm0
-; X64-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; X64-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,0,3,2]
; X64-NEXT: vminss %xmm1, %xmm0, %xmm0
; X64-NEXT: vzeroupper
; X64-NEXT: retq
@@ -8623,7 +8623,7 @@ define float @test_mm512_mask_reduce_max_ps(i16 zeroext %__M, <16 x float> %__W)
; X86-NEXT: vmaxps %xmm1, %xmm0, %xmm0
; X86-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; X86-NEXT: vmaxps %xmm1, %xmm0, %xmm0
-; X86-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; X86-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,0,3,2]
; X86-NEXT: vmaxss %xmm1, %xmm0, %xmm0
; X86-NEXT: vmovss %xmm0, (%esp)
; X86-NEXT: flds (%esp)
@@ -8643,7 +8643,7 @@ define float @test_mm512_mask_reduce_max_ps(i16 zeroext %__M, <16 x float> %__W)
; X64-NEXT: vmaxps %xmm1, %xmm0, %xmm0
; X64-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; X64-NEXT: vmaxps %xmm1, %xmm0, %xmm0
-; X64-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; X64-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,0,3,2]
; X64-NEXT: vmaxss %xmm1, %xmm0, %xmm0
; X64-NEXT: vzeroupper
; X64-NEXT: retq
@@ -8808,7 +8808,7 @@ define float @test_mm512_mask_reduce_min_ps(i16 zeroext %__M, <16 x float> %__W)
; X86-NEXT: vminps %xmm1, %xmm0, %xmm0
; X86-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; X86-NEXT: vminps %xmm1, %xmm0, %xmm0
-; X86-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; X86-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,0,3,2]
; X86-NEXT: vminss %xmm1, %xmm0, %xmm0
; X86-NEXT: vmovss %xmm0, (%esp)
; X86-NEXT: flds (%esp)
@@ -8828,7 +8828,7 @@ define float @test_mm512_mask_reduce_min_ps(i16 zeroext %__M, <16 x float> %__W)
; X64-NEXT: vminps %xmm1, %xmm0, %xmm0
; X64-NEXT: vpermilpd {{.*#+}} xmm1 = xmm0[1,0]
; X64-NEXT: vminps %xmm1, %xmm0, %xmm0
-; X64-NEXT: vmovshdup {{.*#+}} xmm1 = xmm0[1,1,3,3]
+; X64-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,0,3,2]
; X64-NEXT: vminss %xmm1, %xmm0, %xmm0
; X64-NEXT: vzeroupper
; X64-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/promote-vec3.ll b/llvm/test/CodeGen/X86/promote-vec3.ll
index 19eda22b5b7..61df546e057 100644
--- a/llvm/test/CodeGen/X86/promote-vec3.ll
+++ b/llvm/test/CodeGen/X86/promote-vec3.ll
@@ -8,9 +8,13 @@
define <3 x i16> @zext_i8(<3 x i8>) {
; SSE3-LABEL: zext_i8:
; SSE3: # %bb.0:
-; SSE3-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; SSE3-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
; SSE3-NEXT: movzbl {{[0-9]+}}(%esp), %edx
+; SSE3-NEXT: movzbl {{[0-9]+}}(%esp), %eax
+; SSE3-NEXT: movd %eax, %xmm0
+; SSE3-NEXT: pinsrw $1, %edx, %xmm0
+; SSE3-NEXT: pinsrw $2, %ecx, %xmm0
+; SSE3-NEXT: movd %xmm0, %eax
; SSE3-NEXT: # kill: def $ax killed $ax killed $eax
; SSE3-NEXT: # kill: def $dx killed $dx killed $edx
; SSE3-NEXT: # kill: def $cx killed $cx killed $ecx
diff --git a/llvm/test/CodeGen/X86/shrink_vmul.ll b/llvm/test/CodeGen/X86/shrink_vmul.ll
index 1c1032a4c32..5ceb299befc 100644
--- a/llvm/test/CodeGen/X86/shrink_vmul.ll
+++ b/llvm/test/CodeGen/X86/shrink_vmul.ll
@@ -2085,88 +2085,85 @@ define void @PR34947(<9 x i16>* %p0, <9 x i32>* %p1) nounwind {
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE-NEXT: movdqa (%eax), %xmm5
-; X86-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X86-SSE-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X86-SSE-NEXT: movdqa (%ecx), %xmm2
; X86-SSE-NEXT: movdqa 16(%ecx), %xmm6
-; X86-SSE-NEXT: pxor %xmm1, %xmm1
-; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; X86-SSE-NEXT: pxor %xmm0, %xmm0
+; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; X86-SSE-NEXT: movdqa %xmm5, %xmm4
-; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
-; X86-SSE-NEXT: movdqa %xmm5, %xmm3
-; X86-SSE-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
-; X86-SSE-NEXT: movdqa %xmm5, %xmm1
-; X86-SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X86-SSE-NEXT: movd %xmm1, %eax
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm6[3,1,2,3]
-; X86-SSE-NEXT: movd %xmm1, %esi
+; X86-SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
+; X86-SSE-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
+; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[3,1,2,3]
+; X86-SSE-NEXT: movd %xmm0, %eax
+; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[3,1,2,3]
+; X86-SSE-NEXT: movd %xmm0, %esi
; X86-SSE-NEXT: xorl %edx, %edx
; X86-SSE-NEXT: divl %esi
-; X86-SSE-NEXT: movd %edx, %xmm1
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm3[2,3,0,1]
-; X86-SSE-NEXT: movd %xmm7, %eax
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm6[2,3,0,1]
-; X86-SSE-NEXT: movd %xmm7, %esi
+; X86-SSE-NEXT: movd %edx, %xmm0
+; X86-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm5[2,3,0,1]
+; X86-SSE-NEXT: movd %xmm3, %eax
+; X86-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm6[2,3,0,1]
+; X86-SSE-NEXT: movd %xmm3, %esi
; X86-SSE-NEXT: xorl %edx, %edx
; X86-SSE-NEXT: divl %esi
; X86-SSE-NEXT: movd %edx, %xmm7
-; X86-SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm1[0],xmm7[1],xmm1[1]
-; X86-SSE-NEXT: movd %xmm3, %eax
+; X86-SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm0[0],xmm7[1],xmm0[1]
+; X86-SSE-NEXT: movd %xmm5, %eax
; X86-SSE-NEXT: movd %xmm6, %esi
; X86-SSE-NEXT: xorl %edx, %edx
; X86-SSE-NEXT: divl %esi
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,2,3]
-; X86-SSE-NEXT: movd %xmm3, %eax
; X86-SSE-NEXT: movd %edx, %xmm3
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,2,3]
+; X86-SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,2,3]
+; X86-SSE-NEXT: movd %xmm5, %eax
+; X86-SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm6[1,1,2,3]
+; X86-SSE-NEXT: movd %xmm5, %esi
+; X86-SSE-NEXT: xorl %edx, %edx
+; X86-SSE-NEXT: divl %esi
+; X86-SSE-NEXT: movd %edx, %xmm5
+; X86-SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
+; X86-SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm7[0]
+; X86-SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm4[3,1,2,3]
+; X86-SSE-NEXT: movd %xmm6, %eax
+; X86-SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm2[3,1,2,3]
; X86-SSE-NEXT: movd %xmm6, %esi
; X86-SSE-NEXT: xorl %edx, %edx
; X86-SSE-NEXT: divl %esi
; X86-SSE-NEXT: movd %edx, %xmm6
-; X86-SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1]
-; X86-SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm7[0]
-; X86-SSE-NEXT: movdqa %xmm5, %xmm7
-; X86-SSE-NEXT: psrld $16, %xmm7
+; X86-SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm4[2,3,0,1]
; X86-SSE-NEXT: movd %xmm7, %eax
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm2[1,1,2,3]
+; X86-SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm2[2,3,0,1]
; X86-SSE-NEXT: movd %xmm7, %esi
; X86-SSE-NEXT: xorl %edx, %edx
; X86-SSE-NEXT: divl %esi
; X86-SSE-NEXT: movd %edx, %xmm7
+; X86-SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
; X86-SSE-NEXT: movd %xmm4, %eax
; X86-SSE-NEXT: movd %xmm2, %esi
; X86-SSE-NEXT: xorl %edx, %edx
; X86-SSE-NEXT: divl %esi
-; X86-SSE-NEXT: psrlq $48, %xmm5
-; X86-SSE-NEXT: movd %xmm5, %eax
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm2[3,1,2,3]
-; X86-SSE-NEXT: movd %xmm5, %esi
-; X86-SSE-NEXT: movd %edx, %xmm5
-; X86-SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm7[0],xmm5[1],xmm7[1]
-; X86-SSE-NEXT: xorl %edx, %edx
-; X86-SSE-NEXT: divl %esi
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,3,0,1]
+; X86-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,2,3]
; X86-SSE-NEXT: movd %xmm4, %eax
; X86-SSE-NEXT: movd %edx, %xmm4
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
+; X86-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
; X86-SSE-NEXT: movd %xmm2, %esi
; X86-SSE-NEXT: xorl %edx, %edx
; X86-SSE-NEXT: divl %esi
; X86-SSE-NEXT: movd %edx, %xmm2
-; X86-SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
-; X86-SSE-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm2[0]
-; X86-SSE-NEXT: movd %xmm0, %eax
-; X86-SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,0],xmm4[0,0]
-; X86-SSE-NEXT: movdqa {{.*#+}} xmm0 = [8199,8199,8199,8199]
-; X86-SSE-NEXT: pmuludq %xmm0, %xmm7
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm7[0,2,2,3]
-; X86-SSE-NEXT: pmuludq %xmm0, %xmm5
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm5[0,2,2,3]
; X86-SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
-; X86-SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,0],xmm1[0,0]
-; X86-SSE-NEXT: pmuludq %xmm0, %xmm3
-; X86-SSE-NEXT: pmuludq %xmm0, %xmm6
+; X86-SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm7[0]
+; X86-SSE-NEXT: movd %xmm1, %eax
+; X86-SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm6[0,0]
+; X86-SSE-NEXT: movdqa {{.*#+}} xmm1 = [8199,8199,8199,8199]
+; X86-SSE-NEXT: pmuludq %xmm1, %xmm4
+; X86-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
+; X86-SSE-NEXT: pmuludq %xmm1, %xmm2
+; X86-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; X86-SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
+; X86-SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,0],xmm0[0,0]
+; X86-SSE-NEXT: pmuludq %xmm1, %xmm3
+; X86-SSE-NEXT: pmuludq %xmm1, %xmm5
; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,2,2,3]
-; X86-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm6[0,2,2,3]
+; X86-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm5[0,2,2,3]
; X86-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; X86-SSE-NEXT: xorl %edx, %edx
; X86-SSE-NEXT: divl 32(%ecx)
@@ -2327,95 +2324,92 @@ define void @PR34947(<9 x i16>* %p0, <9 x i32>* %p1) nounwind {
; X64-SSE-LABEL: PR34947:
; X64-SSE: # %bb.0:
; X64-SSE-NEXT: movdqa (%rdi), %xmm5
-; X64-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
+; X64-SSE-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X64-SSE-NEXT: movdqa (%rsi), %xmm2
; X64-SSE-NEXT: movdqa 16(%rsi), %xmm6
-; X64-SSE-NEXT: pxor %xmm1, %xmm1
-; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
+; X64-SSE-NEXT: pxor %xmm0, %xmm0
+; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; X64-SSE-NEXT: movdqa %xmm5, %xmm3
-; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
-; X64-SSE-NEXT: movdqa %xmm5, %xmm7
-; X64-SSE-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm1[4],xmm7[5],xmm1[5],xmm7[6],xmm1[6],xmm7[7],xmm1[7]
-; X64-SSE-NEXT: movdqa %xmm5, %xmm1
-; X64-SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; X64-SSE-NEXT: movd %xmm1, %eax
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm6[3,1,2,3]
-; X64-SSE-NEXT: movd %xmm1, %ecx
+; X64-SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
+; X64-SSE-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm0[4],xmm5[5],xmm0[5],xmm5[6],xmm0[6],xmm5[7],xmm0[7]
+; X64-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[3,1,2,3]
+; X64-SSE-NEXT: movd %xmm0, %eax
+; X64-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[3,1,2,3]
+; X64-SSE-NEXT: movd %xmm0, %ecx
; X64-SSE-NEXT: xorl %edx, %edx
; X64-SSE-NEXT: divl %ecx
; X64-SSE-NEXT: movd %edx, %xmm8
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm7[2,3,0,1]
-; X64-SSE-NEXT: movd %xmm1, %eax
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm6[2,3,0,1]
-; X64-SSE-NEXT: movd %xmm1, %ecx
+; X64-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm5[2,3,0,1]
+; X64-SSE-NEXT: movd %xmm4, %eax
+; X64-SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm6[2,3,0,1]
+; X64-SSE-NEXT: movd %xmm4, %ecx
; X64-SSE-NEXT: xorl %edx, %edx
; X64-SSE-NEXT: divl %ecx
-; X64-SSE-NEXT: movd %edx, %xmm1
-; X64-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1]
-; X64-SSE-NEXT: movd %xmm7, %eax
+; X64-SSE-NEXT: movd %edx, %xmm7
+; X64-SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1]
+; X64-SSE-NEXT: movd %xmm5, %eax
; X64-SSE-NEXT: movd %xmm6, %ecx
; X64-SSE-NEXT: xorl %edx, %edx
; X64-SSE-NEXT: divl %ecx
; X64-SSE-NEXT: movd %edx, %xmm4
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[1,1,2,3]
-; X64-SSE-NEXT: movd %xmm7, %eax
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,2,3]
+; X64-SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,2,3]
+; X64-SSE-NEXT: movd %xmm5, %eax
+; X64-SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm6[1,1,2,3]
+; X64-SSE-NEXT: movd %xmm5, %ecx
+; X64-SSE-NEXT: xorl %edx, %edx
+; X64-SSE-NEXT: divl %ecx
+; X64-SSE-NEXT: movd %edx, %xmm5
+; X64-SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
+; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm7[0]
+; X64-SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm3[3,1,2,3]
+; X64-SSE-NEXT: movd %xmm6, %eax
+; X64-SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm2[3,1,2,3]
; X64-SSE-NEXT: movd %xmm6, %ecx
; X64-SSE-NEXT: xorl %edx, %edx
; X64-SSE-NEXT: divl %ecx
; X64-SSE-NEXT: movd %edx, %xmm6
-; X64-SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1]
-; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm1[0]
-; X64-SSE-NEXT: movdqa %xmm5, %xmm1
-; X64-SSE-NEXT: psrld $16, %xmm1
-; X64-SSE-NEXT: movd %xmm1, %eax
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,1,2,3]
-; X64-SSE-NEXT: movd %xmm1, %ecx
+; X64-SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm3[2,3,0,1]
+; X64-SSE-NEXT: movd %xmm7, %eax
+; X64-SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm2[2,3,0,1]
+; X64-SSE-NEXT: movd %xmm7, %ecx
; X64-SSE-NEXT: xorl %edx, %edx
; X64-SSE-NEXT: divl %ecx
; X64-SSE-NEXT: movd %edx, %xmm7
+; X64-SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
; X64-SSE-NEXT: movd %xmm3, %eax
; X64-SSE-NEXT: movd %xmm2, %ecx
; X64-SSE-NEXT: xorl %edx, %edx
; X64-SSE-NEXT: divl %ecx
-; X64-SSE-NEXT: movd %edx, %xmm1
-; X64-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1]
-; X64-SSE-NEXT: psrlq $48, %xmm5
-; X64-SSE-NEXT: movd %xmm5, %eax
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm2[3,1,2,3]
-; X64-SSE-NEXT: movd %xmm5, %ecx
-; X64-SSE-NEXT: xorl %edx, %edx
-; X64-SSE-NEXT: divl %ecx
-; X64-SSE-NEXT: movd %edx, %xmm5
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
+; X64-SSE-NEXT: movd %edx, %xmm0
+; X64-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,2,3]
; X64-SSE-NEXT: movd %xmm3, %eax
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
+; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,2,3]
; X64-SSE-NEXT: movd %xmm2, %ecx
; X64-SSE-NEXT: xorl %edx, %edx
; X64-SSE-NEXT: divl %ecx
; X64-SSE-NEXT: movd %edx, %xmm2
-; X64-SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
-; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
-; X64-SSE-NEXT: movd %xmm0, %eax
+; X64-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; X64-SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm7[0]
+; X64-SSE-NEXT: movd %xmm1, %eax
; X64-SSE-NEXT: xorl %edx, %edx
; X64-SSE-NEXT: divl 32(%rsi)
-; X64-SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,0],xmm5[0,0]
-; X64-SSE-NEXT: movdqa {{.*#+}} xmm0 = [8199,8199,8199,8199]
-; X64-SSE-NEXT: pmuludq %xmm0, %xmm7
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm7[0,2,2,3]
-; X64-SSE-NEXT: pmuludq %xmm0, %xmm1
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; X64-SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
-; X64-SSE-NEXT: pmuludq %xmm0, %xmm4
+; X64-SSE-NEXT: movdqa {{.*#+}} xmm1 = [8199,8199,8199,8199]
+; X64-SSE-NEXT: pmuludq %xmm1, %xmm0
+; X64-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; X64-SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,0],xmm6[0,0]
+; X64-SSE-NEXT: pmuludq %xmm1, %xmm2
+; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
+; X64-SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
+; X64-SSE-NEXT: pmuludq %xmm1, %xmm4
; X64-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm4[0,2,2,3]
-; X64-SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,0],xmm8[0,0]
-; X64-SSE-NEXT: pmuludq %xmm0, %xmm6
-; X64-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm6[0,2,2,3]
-; X64-SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
+; X64-SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,0],xmm8[0,0]
+; X64-SSE-NEXT: pmuludq %xmm1, %xmm5
+; X64-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm5[0,2,2,3]
+; X64-SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; X64-SSE-NEXT: imull $8199, %edx, %eax # imm = 0x2007
; X64-SSE-NEXT: movl %eax, (%rax)
; X64-SSE-NEXT: movdqa %xmm2, (%rax)
-; X64-SSE-NEXT: movdqa %xmm1, (%rax)
+; X64-SSE-NEXT: movdqa %xmm0, (%rax)
; X64-SSE-NEXT: retq
;
; X64-AVX1-LABEL: PR34947:
diff --git a/llvm/test/CodeGen/X86/vec_smulo.ll b/llvm/test/CodeGen/X86/vec_smulo.ll
index 547e5834ecf..385c556e969 100644
--- a/llvm/test/CodeGen/X86/vec_smulo.ll
+++ b/llvm/test/CodeGen/X86/vec_smulo.ll
@@ -195,17 +195,17 @@ define <3 x i32> @smulo_v3i32(<3 x i32> %a0, <3 x i32> %a1, <3 x i32>* %p2) noun
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
; SSE2-NEXT: psubd %xmm2, %xmm4
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; SSE2-NEXT: movq %xmm2, (%rdi)
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSE2-NEXT: movq %xmm0, (%rdi)
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: psrad $31, %xmm2
; SSE2-NEXT: pcmpeqd %xmm4, %xmm2
-; SSE2-NEXT: pcmpeqd %xmm1, %xmm1
-; SSE2-NEXT: pxor %xmm2, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; SSE2-NEXT: movd %xmm0, 8(%rdi)
-; SSE2-NEXT: movdqa %xmm1, %xmm0
+; SSE2-NEXT: pcmpeqd %xmm0, %xmm0
+; SSE2-NEXT: pxor %xmm2, %xmm0
+; SSE2-NEXT: movd %xmm1, 8(%rdi)
; SSE2-NEXT: retq
;
; SSSE3-LABEL: smulo_v3i32:
@@ -225,17 +225,17 @@ define <3 x i32> @smulo_v3i32(<3 x i32> %a0, <3 x i32> %a1, <3 x i32>* %p2) noun
; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,3,2,3]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
; SSSE3-NEXT: psubd %xmm2, %xmm4
-; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,2,2,3]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
-; SSSE3-NEXT: movq %xmm2, (%rdi)
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
+; SSSE3-NEXT: movq %xmm0, (%rdi)
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
+; SSSE3-NEXT: movdqa %xmm0, %xmm2
; SSSE3-NEXT: psrad $31, %xmm2
; SSSE3-NEXT: pcmpeqd %xmm4, %xmm2
-; SSSE3-NEXT: pcmpeqd %xmm1, %xmm1
-; SSSE3-NEXT: pxor %xmm2, %xmm1
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
-; SSSE3-NEXT: movd %xmm0, 8(%rdi)
-; SSSE3-NEXT: movdqa %xmm1, %xmm0
+; SSSE3-NEXT: pcmpeqd %xmm0, %xmm0
+; SSSE3-NEXT: pxor %xmm2, %xmm0
+; SSSE3-NEXT: movd %xmm1, 8(%rdi)
; SSSE3-NEXT: retq
;
; SSE41-LABEL: smulo_v3i32:
@@ -1767,52 +1767,52 @@ define <4 x i32> @smulo_v4i24(<4 x i24> %a0, <4 x i24> %a1, <4 x i24>* %p2) noun
; SSE2-NEXT: psrad $8, %xmm0
; SSE2-NEXT: pslld $8, %xmm1
; SSE2-NEXT: psrad $8, %xmm1
-; SSE2-NEXT: pxor %xmm3, %xmm3
+; SSE2-NEXT: pxor %xmm4, %xmm4
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: pcmpgtd %xmm1, %xmm2
; SSE2-NEXT: pand %xmm0, %xmm2
-; SSE2-NEXT: pcmpgtd %xmm0, %xmm3
-; SSE2-NEXT: pand %xmm1, %xmm3
-; SSE2-NEXT: paddd %xmm2, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; SSE2-NEXT: pcmpgtd %xmm0, %xmm4
+; SSE2-NEXT: pand %xmm1, %xmm4
+; SSE2-NEXT: paddd %xmm2, %xmm4
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
; SSE2-NEXT: pmuludq %xmm1, %xmm0
-; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,3,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; SSE2-NEXT: pmuludq %xmm4, %xmm2
+; SSE2-NEXT: pmuludq %xmm5, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1]
-; SSE2-NEXT: psubd %xmm3, %xmm5
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,2,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
-; SSE2-NEXT: movdqa %xmm3, %xmm1
+; SSE2-NEXT: psubd %xmm4, %xmm3
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,2,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
+; SSE2-NEXT: movdqa %xmm4, %xmm1
; SSE2-NEXT: pslld $8, %xmm1
; SSE2-NEXT: psrad $8, %xmm1
-; SSE2-NEXT: pcmpeqd %xmm3, %xmm1
-; SSE2-NEXT: psrad $31, %xmm3
-; SSE2-NEXT: pcmpeqd %xmm5, %xmm3
-; SSE2-NEXT: pcmpeqd %xmm4, %xmm4
-; SSE2-NEXT: pxor %xmm4, %xmm3
-; SSE2-NEXT: pxor %xmm4, %xmm1
-; SSE2-NEXT: por %xmm3, %xmm1
+; SSE2-NEXT: pcmpeqd %xmm4, %xmm1
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm4[3,1,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm4[2,3,0,1]
+; SSE2-NEXT: psrad $31, %xmm4
+; SSE2-NEXT: pcmpeqd %xmm3, %xmm4
+; SSE2-NEXT: pcmpeqd %xmm3, %xmm3
+; SSE2-NEXT: pxor %xmm3, %xmm4
+; SSE2-NEXT: pxor %xmm3, %xmm1
+; SSE2-NEXT: por %xmm4, %xmm1
; SSE2-NEXT: movd %xmm0, %eax
; SSE2-NEXT: movw %ax, (%rdi)
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; SSE2-NEXT: movd %xmm0, %ecx
-; SSE2-NEXT: movw %cx, 6(%rdi)
-; SSE2-NEXT: movd %xmm2, %edx
-; SSE2-NEXT: movw %dx, 3(%rdi)
+; SSE2-NEXT: movd %xmm2, %ecx
+; SSE2-NEXT: movw %cx, 3(%rdi)
; SSE2-NEXT: shrl $16, %eax
; SSE2-NEXT: movb %al, 2(%rdi)
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
-; SSE2-NEXT: movd %xmm0, %eax
-; SSE2-NEXT: movw %ax, 9(%rdi)
; SSE2-NEXT: shrl $16, %ecx
-; SSE2-NEXT: movb %cl, 8(%rdi)
-; SSE2-NEXT: shrl $16, %edx
-; SSE2-NEXT: movb %dl, 5(%rdi)
+; SSE2-NEXT: movb %cl, 5(%rdi)
+; SSE2-NEXT: movd %xmm5, %eax
+; SSE2-NEXT: movw %ax, 9(%rdi)
+; SSE2-NEXT: movd %xmm6, %ecx
+; SSE2-NEXT: movw %cx, 6(%rdi)
; SSE2-NEXT: shrl $16, %eax
; SSE2-NEXT: movb %al, 11(%rdi)
+; SSE2-NEXT: shrl $16, %ecx
+; SSE2-NEXT: movb %cl, 8(%rdi)
; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -1822,52 +1822,52 @@ define <4 x i32> @smulo_v4i24(<4 x i24> %a0, <4 x i24> %a1, <4 x i24>* %p2) noun
; SSSE3-NEXT: psrad $8, %xmm0
; SSSE3-NEXT: pslld $8, %xmm1
; SSSE3-NEXT: psrad $8, %xmm1
-; SSSE3-NEXT: pxor %xmm3, %xmm3
+; SSSE3-NEXT: pxor %xmm4, %xmm4
; SSSE3-NEXT: pxor %xmm2, %xmm2
; SSSE3-NEXT: pcmpgtd %xmm1, %xmm2
; SSSE3-NEXT: pand %xmm0, %xmm2
-; SSSE3-NEXT: pcmpgtd %xmm0, %xmm3
-; SSSE3-NEXT: pand %xmm1, %xmm3
-; SSSE3-NEXT: paddd %xmm2, %xmm3
-; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; SSSE3-NEXT: pcmpgtd %xmm0, %xmm4
+; SSSE3-NEXT: pand %xmm1, %xmm4
+; SSSE3-NEXT: paddd %xmm2, %xmm4
+; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,3,3]
; SSSE3-NEXT: pmuludq %xmm1, %xmm0
-; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,3,2,3]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
-; SSSE3-NEXT: pmuludq %xmm4, %xmm2
+; SSSE3-NEXT: pmuludq %xmm5, %xmm2
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1]
-; SSSE3-NEXT: psubd %xmm3, %xmm5
-; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,2,2,3]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
-; SSSE3-NEXT: movdqa %xmm3, %xmm1
+; SSSE3-NEXT: psubd %xmm4, %xmm3
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,2,2,3]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
+; SSSE3-NEXT: movdqa %xmm4, %xmm1
; SSSE3-NEXT: pslld $8, %xmm1
; SSSE3-NEXT: psrad $8, %xmm1
-; SSSE3-NEXT: pcmpeqd %xmm3, %xmm1
-; SSSE3-NEXT: psrad $31, %xmm3
-; SSSE3-NEXT: pcmpeqd %xmm5, %xmm3
-; SSSE3-NEXT: pcmpeqd %xmm4, %xmm4
-; SSSE3-NEXT: pxor %xmm4, %xmm3
-; SSSE3-NEXT: pxor %xmm4, %xmm1
-; SSSE3-NEXT: por %xmm3, %xmm1
+; SSSE3-NEXT: pcmpeqd %xmm4, %xmm1
+; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm4[3,1,2,3]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm4[2,3,0,1]
+; SSSE3-NEXT: psrad $31, %xmm4
+; SSSE3-NEXT: pcmpeqd %xmm3, %xmm4
+; SSSE3-NEXT: pcmpeqd %xmm3, %xmm3
+; SSSE3-NEXT: pxor %xmm3, %xmm4
+; SSSE3-NEXT: pxor %xmm3, %xmm1
+; SSSE3-NEXT: por %xmm4, %xmm1
; SSSE3-NEXT: movd %xmm0, %eax
; SSSE3-NEXT: movw %ax, (%rdi)
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; SSSE3-NEXT: movd %xmm0, %ecx
-; SSSE3-NEXT: movw %cx, 6(%rdi)
-; SSSE3-NEXT: movd %xmm2, %edx
-; SSSE3-NEXT: movw %dx, 3(%rdi)
+; SSSE3-NEXT: movd %xmm2, %ecx
+; SSSE3-NEXT: movw %cx, 3(%rdi)
; SSSE3-NEXT: shrl $16, %eax
; SSSE3-NEXT: movb %al, 2(%rdi)
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,2,3]
-; SSSE3-NEXT: movd %xmm0, %eax
-; SSSE3-NEXT: movw %ax, 9(%rdi)
; SSSE3-NEXT: shrl $16, %ecx
-; SSSE3-NEXT: movb %cl, 8(%rdi)
-; SSSE3-NEXT: shrl $16, %edx
-; SSSE3-NEXT: movb %dl, 5(%rdi)
+; SSSE3-NEXT: movb %cl, 5(%rdi)
+; SSSE3-NEXT: movd %xmm5, %eax
+; SSSE3-NEXT: movw %ax, 9(%rdi)
+; SSSE3-NEXT: movd %xmm6, %ecx
+; SSSE3-NEXT: movw %cx, 6(%rdi)
; SSSE3-NEXT: shrl $16, %eax
; SSSE3-NEXT: movb %al, 11(%rdi)
+; SSSE3-NEXT: shrl $16, %ecx
+; SSSE3-NEXT: movb %cl, 8(%rdi)
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/vec_umulo.ll b/llvm/test/CodeGen/X86/vec_umulo.ll
index a77a85e073a..c8146e34468 100644
--- a/llvm/test/CodeGen/X86/vec_umulo.ll
+++ b/llvm/test/CodeGen/X86/vec_umulo.ll
@@ -179,10 +179,12 @@ define <3 x i32> @umulo_v3i32(<3 x i32> %a0, <3 x i32> %a1, <3 x i32>* %p2) noun
; SSE2-NEXT: pcmpeqd %xmm3, %xmm2
; SSE2-NEXT: pcmpeqd %xmm1, %xmm1
; SSE2-NEXT: pxor %xmm2, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
-; SSE2-NEXT: movd %xmm2, 8(%rdi)
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm4[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSE2-NEXT: movq %xmm0, (%rdi)
+; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSE2-NEXT: movd %xmm0, 8(%rdi)
; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: retq
;
@@ -199,10 +201,12 @@ define <3 x i32> @umulo_v3i32(<3 x i32> %a0, <3 x i32> %a1, <3 x i32>* %p2) noun
; SSSE3-NEXT: pcmpeqd %xmm3, %xmm2
; SSSE3-NEXT: pcmpeqd %xmm1, %xmm1
; SSSE3-NEXT: pxor %xmm2, %xmm1
-; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
-; SSSE3-NEXT: movd %xmm2, 8(%rdi)
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm4[0,2,2,3]
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSSE3-NEXT: movq %xmm0, (%rdi)
+; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
+; SSSE3-NEXT: movd %xmm0, 8(%rdi)
; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: retq
;
@@ -1563,90 +1567,90 @@ define <2 x i32> @umulo_v2i64(<2 x i64> %a0, <2 x i64> %a1, <2 x i64>* %p2) noun
define <4 x i32> @umulo_v4i24(<4 x i24> %a0, <4 x i24> %a1, <4 x i24>* %p2) nounwind {
; SSE2-LABEL: umulo_v4i24:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0]
-; SSE2-NEXT: pand %xmm0, %xmm1
-; SSE2-NEXT: pand %xmm0, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSE2-NEXT: pmuludq %xmm1, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,3,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSE2-NEXT: pmuludq %xmm0, %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,3,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
+; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0]
+; SSE2-NEXT: pand %xmm2, %xmm1
+; SSE2-NEXT: pand %xmm2, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; SSE2-NEXT: pmuludq %xmm1, %xmm0
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; SSE2-NEXT: pmuludq %xmm4, %xmm2
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
; SSE2-NEXT: pxor %xmm4, %xmm4
; SSE2-NEXT: pcmpeqd %xmm4, %xmm3
-; SSE2-NEXT: pcmpeqd %xmm5, %xmm5
-; SSE2-NEXT: pxor %xmm5, %xmm3
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
-; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm1[0,2,2,3]
-; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
-; SSE2-NEXT: psrld $24, %xmm0
-; SSE2-NEXT: pcmpeqd %xmm4, %xmm0
-; SSE2-NEXT: pxor %xmm5, %xmm0
-; SSE2-NEXT: por %xmm3, %xmm0
-; SSE2-NEXT: movd %xmm2, %eax
+; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm2[0,2,2,3]
+; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1]
+; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm1[3,1,2,3]
+; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,0,1]
+; SSE2-NEXT: psrld $24, %xmm1
+; SSE2-NEXT: pcmpeqd %xmm4, %xmm1
+; SSE2-NEXT: pcmpeqd %xmm4, %xmm4
+; SSE2-NEXT: pxor %xmm4, %xmm3
+; SSE2-NEXT: pxor %xmm4, %xmm1
+; SSE2-NEXT: por %xmm3, %xmm1
+; SSE2-NEXT: movd %xmm0, %eax
; SSE2-NEXT: movw %ax, (%rdi)
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
; SSE2-NEXT: movd %xmm2, %ecx
-; SSE2-NEXT: movw %cx, 6(%rdi)
-; SSE2-NEXT: movd %xmm1, %edx
-; SSE2-NEXT: movw %dx, 3(%rdi)
+; SSE2-NEXT: movw %cx, 3(%rdi)
; SSE2-NEXT: shrl $16, %eax
; SSE2-NEXT: movb %al, 2(%rdi)
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: movw %ax, 9(%rdi)
; SSE2-NEXT: shrl $16, %ecx
-; SSE2-NEXT: movb %cl, 8(%rdi)
-; SSE2-NEXT: shrl $16, %edx
-; SSE2-NEXT: movb %dl, 5(%rdi)
+; SSE2-NEXT: movb %cl, 5(%rdi)
+; SSE2-NEXT: movd %xmm5, %eax
+; SSE2-NEXT: movw %ax, 9(%rdi)
+; SSE2-NEXT: movd %xmm6, %ecx
+; SSE2-NEXT: movw %cx, 6(%rdi)
; SSE2-NEXT: shrl $16, %eax
; SSE2-NEXT: movb %al, 11(%rdi)
+; SSE2-NEXT: shrl $16, %ecx
+; SSE2-NEXT: movb %cl, 8(%rdi)
+; SSE2-NEXT: movdqa %xmm1, %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: umulo_v4i24:
; SSSE3: # %bb.0:
-; SSSE3-NEXT: movdqa %xmm0, %xmm2
-; SSSE3-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0]
-; SSSE3-NEXT: pand %xmm0, %xmm1
-; SSSE3-NEXT: pand %xmm0, %xmm2
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3]
-; SSSE3-NEXT: pmuludq %xmm1, %xmm2
-; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,3,2,3]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
-; SSSE3-NEXT: pmuludq %xmm0, %xmm1
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,3,2,3]
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
+; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,0,255,255,255,0,255,255,255,0,255,255,255,0]
+; SSSE3-NEXT: pand %xmm2, %xmm1
+; SSSE3-NEXT: pand %xmm2, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,3,3]
+; SSSE3-NEXT: pmuludq %xmm1, %xmm0
+; SSSE3-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,3,2,3]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,3,3]
+; SSSE3-NEXT: pmuludq %xmm4, %xmm2
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[1,3,2,3]
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
; SSSE3-NEXT: pxor %xmm4, %xmm4
; SSSE3-NEXT: pcmpeqd %xmm4, %xmm3
-; SSSE3-NEXT: pcmpeqd %xmm5, %xmm5
-; SSSE3-NEXT: pxor %xmm5, %xmm3
-; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
-; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm1[0,2,2,3]
-; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
-; SSSE3-NEXT: psrld $24, %xmm0
-; SSSE3-NEXT: pcmpeqd %xmm4, %xmm0
-; SSSE3-NEXT: pxor %xmm5, %xmm0
-; SSSE3-NEXT: por %xmm3, %xmm0
-; SSSE3-NEXT: movd %xmm2, %eax
+; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,2,2,3]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm2[0,2,2,3]
+; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm5 = xmm1[3,1,2,3]
+; SSSE3-NEXT: pshufd {{.*#+}} xmm6 = xmm1[2,3,0,1]
+; SSSE3-NEXT: psrld $24, %xmm1
+; SSSE3-NEXT: pcmpeqd %xmm4, %xmm1
+; SSSE3-NEXT: pcmpeqd %xmm4, %xmm4
+; SSSE3-NEXT: pxor %xmm4, %xmm3
+; SSSE3-NEXT: pxor %xmm4, %xmm1
+; SSSE3-NEXT: por %xmm3, %xmm1
+; SSSE3-NEXT: movd %xmm0, %eax
; SSSE3-NEXT: movw %ax, (%rdi)
-; SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
; SSSE3-NEXT: movd %xmm2, %ecx
-; SSSE3-NEXT: movw %cx, 6(%rdi)
-; SSSE3-NEXT: movd %xmm1, %edx
-; SSSE3-NEXT: movw %dx, 3(%rdi)
+; SSSE3-NEXT: movw %cx, 3(%rdi)
; SSSE3-NEXT: shrl $16, %eax
; SSSE3-NEXT: movb %al, 2(%rdi)
-; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
-; SSSE3-NEXT: movd %xmm1, %eax
-; SSSE3-NEXT: movw %ax, 9(%rdi)
; SSSE3-NEXT: shrl $16, %ecx
-; SSSE3-NEXT: movb %cl, 8(%rdi)
-; SSSE3-NEXT: shrl $16, %edx
-; SSSE3-NEXT: movb %dl, 5(%rdi)
+; SSSE3-NEXT: movb %cl, 5(%rdi)
+; SSSE3-NEXT: movd %xmm5, %eax
+; SSSE3-NEXT: movw %ax, 9(%rdi)
+; SSSE3-NEXT: movd %xmm6, %ecx
+; SSSE3-NEXT: movw %cx, 6(%rdi)
; SSSE3-NEXT: shrl $16, %eax
; SSSE3-NEXT: movb %al, 11(%rdi)
+; SSSE3-NEXT: shrl $16, %ecx
+; SSSE3-NEXT: movb %cl, 8(%rdi)
+; SSSE3-NEXT: movdqa %xmm1, %xmm0
; SSSE3-NEXT: retq
;
; SSE41-LABEL: umulo_v4i24:
diff --git a/llvm/test/CodeGen/X86/vector-reduce-mul.ll b/llvm/test/CodeGen/X86/vector-reduce-mul.ll
index a020863a919..b4cca0015bd 100644
--- a/llvm/test/CodeGen/X86/vector-reduce-mul.ll
+++ b/llvm/test/CodeGen/X86/vector-reduce-mul.ll
@@ -1739,11 +1739,14 @@ define i8 @test_v16i8(<16 x i8> %a0) {
; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: pmullw %xmm2, %xmm0
-; SSE2-NEXT: pand %xmm0, %xmm1
-; SSE2-NEXT: packuswb %xmm3, %xmm1
-; SSE2-NEXT: psrlw $8, %xmm1
-; SSE2-NEXT: pmullw %xmm0, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: pand %xmm1, %xmm2
+; SSE2-NEXT: packuswb %xmm3, %xmm2
+; SSE2-NEXT: psrlw $8, %xmm2
+; SSE2-NEXT: pmullw %xmm0, %xmm2
+; SSE2-NEXT: pand %xmm1, %xmm2
+; SSE2-NEXT: packuswb %xmm0, %xmm2
+; SSE2-NEXT: movd %xmm2, %eax
; SSE2-NEXT: # kill: def $al killed $al killed $eax
; SSE2-NEXT: retq
;
@@ -1942,11 +1945,14 @@ define i8 @test_v32i8(<32 x i8> %a0) {
; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: pmullw %xmm2, %xmm0
-; SSE2-NEXT: pand %xmm0, %xmm1
-; SSE2-NEXT: packuswb %xmm3, %xmm1
-; SSE2-NEXT: psrlw $8, %xmm1
-; SSE2-NEXT: pmullw %xmm0, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: pand %xmm1, %xmm2
+; SSE2-NEXT: packuswb %xmm3, %xmm2
+; SSE2-NEXT: psrlw $8, %xmm2
+; SSE2-NEXT: pmullw %xmm0, %xmm2
+; SSE2-NEXT: pand %xmm1, %xmm2
+; SSE2-NEXT: packuswb %xmm0, %xmm2
+; SSE2-NEXT: movd %xmm2, %eax
; SSE2-NEXT: # kill: def $al killed $al killed $eax
; SSE2-NEXT: retq
;
@@ -2199,11 +2205,14 @@ define i8 @test_v64i8(<64 x i8> %a0) {
; SSE2-NEXT: psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE2-NEXT: pmullw %xmm2, %xmm0
-; SSE2-NEXT: pand %xmm0, %xmm1
-; SSE2-NEXT: packuswb %xmm3, %xmm1
-; SSE2-NEXT: psrlw $8, %xmm1
-; SSE2-NEXT: pmullw %xmm0, %xmm1
-; SSE2-NEXT: movd %xmm1, %eax
+; SSE2-NEXT: movdqa %xmm0, %xmm2
+; SSE2-NEXT: pand %xmm1, %xmm2
+; SSE2-NEXT: packuswb %xmm3, %xmm2
+; SSE2-NEXT: psrlw $8, %xmm2
+; SSE2-NEXT: pmullw %xmm0, %xmm2
+; SSE2-NEXT: pand %xmm1, %xmm2
+; SSE2-NEXT: packuswb %xmm0, %xmm2
+; SSE2-NEXT: movd %xmm2, %eax
; SSE2-NEXT: # kill: def $al killed $al killed $eax
; SSE2-NEXT: retq
;
@@ -2563,11 +2572,14 @@ define i8 @test_v128i8(<128 x i8> %a0) {
; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE2-NEXT: pmullw %xmm2, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm0
-; SSE2-NEXT: packuswb %xmm3, %xmm0
-; SSE2-NEXT: psrlw $8, %xmm0
-; SSE2-NEXT: pmullw %xmm1, %xmm0
-; SSE2-NEXT: movd %xmm0, %eax
+; SSE2-NEXT: movdqa %xmm1, %xmm2
+; SSE2-NEXT: pand %xmm0, %xmm2
+; SSE2-NEXT: packuswb %xmm3, %xmm2
+; SSE2-NEXT: psrlw $8, %xmm2
+; SSE2-NEXT: pmullw %xmm1, %xmm2
+; SSE2-NEXT: pand %xmm0, %xmm2
+; SSE2-NEXT: packuswb %xmm0, %xmm2
+; SSE2-NEXT: movd %xmm2, %eax
; SSE2-NEXT: # kill: def $al killed $al killed $eax
; SSE2-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/xor.ll b/llvm/test/CodeGen/X86/xor.ll
index e836b00bb62..654382f7b73 100644
--- a/llvm/test/CodeGen/X86/xor.ll
+++ b/llvm/test/CodeGen/X86/xor.ll
@@ -407,10 +407,13 @@ define i32 @PR17487(i1 %tobool) {
; X32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1]
; X32-NEXT: pandn {{\.LCPI.*}}, %xmm0
+; X32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3]
+; X32-NEXT: movd %xmm1, %ecx
; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
-; X32-NEXT: movd %xmm0, %ecx
+; X32-NEXT: movd %xmm0, %edx
+; X32-NEXT: xorl $1, %edx
; X32-NEXT: xorl %eax, %eax
-; X32-NEXT: cmpl $1, %ecx
+; X32-NEXT: orl %ecx, %edx
; X32-NEXT: setne %al
; X32-NEXT: retl
;
OpenPOWER on IntegriCloud