summaryrefslogtreecommitdiffstats
path: root/llvm/test/CodeGen
diff options
context:
space:
mode:
authorSimon Pilgrim <llvm-dev@redking.me.uk>2015-09-30 08:17:50 +0000
committerSimon Pilgrim <llvm-dev@redking.me.uk>2015-09-30 08:17:50 +0000
commit3d11c994f7d85474b80409efb6e0b4916910252d (patch)
tree3ab63bc2d19c13e267bc224758bd7b193a9a8977 /llvm/test/CodeGen
parent82d705e6d99812bd66db531414153117f50728a4 (diff)
downloadbcm5719-llvm-3d11c994f7d85474b80409efb6e0b4916910252d.tar.gz
bcm5719-llvm-3d11c994f7d85474b80409efb6e0b4916910252d.zip
[X86][XOP] Added support for the lowering of 128-bit vector shifts to XOP shift instructions
The XOP shifts just have logical/arithmetic versions and the left/right shifts are controlled by whether the value is positive/negative. Because of this I've added new X86ISD nodes instead of trying to force them to use the existing shift nodes. Additionally Excavator cores (bdver4) support XOP and AVX2 - meaning that it should use the AVX2 shifts when it can and fall back to XOP in other cases. Differential Revision: http://reviews.llvm.org/D8690 llvm-svn: 248878
Diffstat (limited to 'llvm/test/CodeGen')
-rw-r--r--llvm/test/CodeGen/X86/vector-shift-ashr-128.ll136
-rw-r--r--llvm/test/CodeGen/X86/vector-shift-ashr-256.ll305
-rw-r--r--llvm/test/CodeGen/X86/vector-shift-lshr-128.ll133
-rw-r--r--llvm/test/CodeGen/X86/vector-shift-lshr-256.ll283
-rw-r--r--llvm/test/CodeGen/X86/vector-shift-shl-128.ll114
-rw-r--r--llvm/test/CodeGen/X86/vector-shift-shl-256.ll246
6 files changed, 1217 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll b/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll
index 64d4ea03762..b9171811278 100644
--- a/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-ashr-128.ll
@@ -2,6 +2,8 @@
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+xop,+avx | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+xop,+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX2
;
; Just one 32-bit run to make sure we do reasonable things for i64 shifts.
; RUN: llc < %s -mtriple=i686-unknown-unknown -mcpu=x86-64 | FileCheck %s --check-prefix=ALL --check-prefix=X32-SSE --check-prefix=X32-SSE2
@@ -67,6 +69,13 @@ define <2 x i64> @var_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; AVX2-NEXT: vpsubq %xmm3, %xmm0, %xmm0
; AVX2-NEXT: retq
;
+; XOP-LABEL: var_shift_v2i64:
+; XOP: # BB#0:
+; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; XOP-NEXT: vpsubq %xmm1, %xmm2, %xmm1
+; XOP-NEXT: vpshaq %xmm1, %xmm0, %xmm0
+; XOP-NEXT: retq
+;
; X32-SSE-LABEL: var_shift_v2i64:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
@@ -155,6 +164,18 @@ define <4 x i32> @var_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; AVX2-NEXT: vpsravd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
+; XOPAVX1-LABEL: var_shift_v4i32:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; XOPAVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm1
+; XOPAVX1-NEXT: vpshad %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: var_shift_v4i32:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpsravd %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT: retq
+;
; X32-SSE-LABEL: var_shift_v4i32:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: movdqa %xmm1, %xmm2
@@ -276,6 +297,13 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
+; XOP-LABEL: var_shift_v8i16:
+; XOP: # BB#0:
+; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; XOP-NEXT: vpsubw %xmm1, %xmm2, %xmm1
+; XOP-NEXT: vpshaw %xmm1, %xmm0, %xmm0
+; XOP-NEXT: retq
+;
; X32-SSE-LABEL: var_shift_v8i16:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: psllw $12, %xmm1
@@ -437,6 +465,13 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; AVX-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
; AVX-NEXT: retq
;
+; XOP-LABEL: var_shift_v16i8:
+; XOP: # BB#0:
+; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; XOP-NEXT: vpsubb %xmm1, %xmm2, %xmm1
+; XOP-NEXT: vpshab %xmm1, %xmm0, %xmm0
+; XOP-NEXT: retq
+;
; X32-SSE-LABEL: var_shift_v16i8:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
@@ -521,6 +556,22 @@ define <2 x i64> @splatvar_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; AVX-NEXT: vpsubq %xmm2, %xmm0, %xmm0
; AVX-NEXT: retq
;
+; XOPAVX1-LABEL: splatvar_shift_v2i64:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
+; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; XOPAVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm1
+; XOPAVX1-NEXT: vpshaq %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: splatvar_shift_v2i64:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpbroadcastq %xmm1, %xmm1
+; XOPAVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; XOPAVX2-NEXT: vpsubq %xmm1, %xmm2, %xmm1
+; XOPAVX2-NEXT: vpshaq %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT: retq
+;
; X32-SSE-LABEL: splatvar_shift_v2i64:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: movq {{.*#+}} xmm1 = xmm1[0],zero
@@ -557,6 +608,13 @@ define <4 x i32> @splatvar_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; AVX-NEXT: vpsrad %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
+; XOP-LABEL: splatvar_shift_v4i32:
+; XOP: # BB#0:
+; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; XOP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7]
+; XOP-NEXT: vpsrad %xmm1, %xmm0, %xmm0
+; XOP-NEXT: retq
+;
; X32-SSE-LABEL: splatvar_shift_v4i32:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: xorps %xmm2, %xmm2
@@ -591,6 +649,13 @@ define <8 x i16> @splatvar_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; AVX-NEXT: vpsraw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
+; XOP-LABEL: splatvar_shift_v8i16:
+; XOP: # BB#0:
+; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; XOP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3,4,5,6,7]
+; XOP-NEXT: vpsraw %xmm1, %xmm0, %xmm0
+; XOP-NEXT: retq
+;
; X32-SSE-LABEL: splatvar_shift_v8i16:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: movd %xmm1, %eax
@@ -763,6 +828,22 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; AVX2-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
; AVX2-NEXT: retq
;
+; XOPAVX1-LABEL: splatvar_shift_v16i8:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; XOPAVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; XOPAVX1-NEXT: vpsubb %xmm1, %xmm2, %xmm1
+; XOPAVX1-NEXT: vpshab %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: splatvar_shift_v16i8:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpbroadcastb %xmm1, %xmm1
+; XOPAVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; XOPAVX2-NEXT: vpsubb %xmm1, %xmm2, %xmm1
+; XOPAVX2-NEXT: vpshab %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT: retq
+;
; X32-SSE-LABEL: splatvar_shift_v16i8:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -875,6 +956,13 @@ define <2 x i64> @constant_shift_v2i64(<2 x i64> %a) nounwind {
; AVX2-NEXT: vpsubq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
+; XOP-LABEL: constant_shift_v2i64:
+; XOP: # BB#0:
+; XOP-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; XOP-NEXT: vpsubq {{.*}}(%rip), %xmm1, %xmm1
+; XOP-NEXT: vpshaq %xmm1, %xmm0, %xmm0
+; XOP-NEXT: retq
+;
; X32-SSE-LABEL: constant_shift_v2i64:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [0,2147483648,0,2147483648]
@@ -941,6 +1029,16 @@ define <4 x i32> @constant_shift_v4i32(<4 x i32> %a) nounwind {
; AVX2-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: retq
;
+; XOPAVX1-LABEL: constant_shift_v4i32:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vpshad {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: constant_shift_v4i32:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX2-NEXT: retq
+;
; X32-SSE-LABEL: constant_shift_v4i32:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
@@ -1026,6 +1124,13 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
+; XOP-LABEL: constant_shift_v8i16:
+; XOP: # BB#0:
+; XOP-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; XOP-NEXT: vpsubw {{.*}}(%rip), %xmm1, %xmm1
+; XOP-NEXT: vpshaw %xmm1, %xmm0, %xmm0
+; XOP-NEXT: retq
+;
; X32-SSE-LABEL: constant_shift_v8i16:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
@@ -1172,6 +1277,13 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
; AVX-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
; AVX-NEXT: retq
;
+; XOP-LABEL: constant_shift_v16i8:
+; XOP: # BB#0:
+; XOP-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; XOP-NEXT: vpsubb {{.*}}(%rip), %xmm1, %xmm1
+; XOP-NEXT: vpshab %xmm1, %xmm0, %xmm0
+; XOP-NEXT: retq
+;
; X32-SSE-LABEL: constant_shift_v16i8:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
@@ -1271,6 +1383,13 @@ define <2 x i64> @splatconstant_shift_v2i64(<2 x i64> %a) nounwind {
; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3]
; AVX2-NEXT: retq
;
+; XOP-LABEL: splatconstant_shift_v2i64:
+; XOP: # BB#0:
+; XOP-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; XOP-NEXT: vpsubq {{.*}}(%rip), %xmm1, %xmm1
+; XOP-NEXT: vpshaq %xmm1, %xmm0, %xmm0
+; XOP-NEXT: retq
+;
; X32-SSE-LABEL: splatconstant_shift_v2i64:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
@@ -1295,6 +1414,11 @@ define <4 x i32> @splatconstant_shift_v4i32(<4 x i32> %a) nounwind {
; AVX-NEXT: vpsrad $5, %xmm0, %xmm0
; AVX-NEXT: retq
;
+; XOP-LABEL: splatconstant_shift_v4i32:
+; XOP: # BB#0:
+; XOP-NEXT: vpsrad $5, %xmm0, %xmm0
+; XOP-NEXT: retq
+;
; X32-SSE-LABEL: splatconstant_shift_v4i32:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: psrad $5, %xmm0
@@ -1314,6 +1438,11 @@ define <8 x i16> @splatconstant_shift_v8i16(<8 x i16> %a) nounwind {
; AVX-NEXT: vpsraw $3, %xmm0, %xmm0
; AVX-NEXT: retq
;
+; XOP-LABEL: splatconstant_shift_v8i16:
+; XOP: # BB#0:
+; XOP-NEXT: vpsraw $3, %xmm0, %xmm0
+; XOP-NEXT: retq
+;
; X32-SSE-LABEL: splatconstant_shift_v8i16:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: psraw $3, %xmm0
@@ -1341,6 +1470,13 @@ define <16 x i8> @splatconstant_shift_v16i8(<16 x i8> %a) nounwind {
; AVX-NEXT: vpsubb %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
+; XOP-LABEL: splatconstant_shift_v16i8:
+; XOP: # BB#0:
+; XOP-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; XOP-NEXT: vpsubb {{.*}}(%rip), %xmm1, %xmm1
+; XOP-NEXT: vpshab %xmm1, %xmm0, %xmm0
+; XOP-NEXT: retq
+;
; X32-SSE-LABEL: splatconstant_shift_v16i8:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: psrlw $3, %xmm0
diff --git a/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll b/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll
index 51ded9ef5ba..25a94a5a1de 100644
--- a/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-ashr-256.ll
@@ -1,5 +1,7 @@
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+xop,+avx | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+xop,+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX2
;
; Variable Shifts
@@ -40,6 +42,27 @@ define <4 x i64> @var_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX2-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpsubq %ymm3, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: var_shift_v4i64:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; XOPAVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX1-NEXT: vpsubq %xmm2, %xmm3, %xmm2
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; XOPAVX1-NEXT: vpshaq %xmm2, %xmm4, %xmm2
+; XOPAVX1-NEXT: vpsubq %xmm1, %xmm3, %xmm1
+; XOPAVX1-NEXT: vpshaq %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: var_shift_v4i64:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2
+; XOPAVX2-NEXT: vpsrlvq %ymm1, %ymm2, %ymm3
+; XOPAVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; XOPAVX2-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
+; XOPAVX2-NEXT: vpsubq %ymm3, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%shift = ashr <4 x i64> %a, %b
ret <4 x i64> %shift
}
@@ -79,6 +102,23 @@ define <8 x i32> @var_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; AVX2: # BB#0:
; AVX2-NEXT: vpsravd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: var_shift_v8i32:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; XOPAVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX1-NEXT: vpsubd %xmm2, %xmm3, %xmm2
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; XOPAVX1-NEXT: vpshad %xmm2, %xmm4, %xmm2
+; XOPAVX1-NEXT: vpsubd %xmm1, %xmm3, %xmm1
+; XOPAVX1-NEXT: vpshad %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: var_shift_v8i32:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpsravd %ymm1, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%shift = ashr <8 x i32> %a, %b
ret <8 x i32> %shift
}
@@ -132,6 +172,30 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
; AVX2-NEXT: vpackusdw %ymm3, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: var_shift_v16i16:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; XOPAVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX1-NEXT: vpsubw %xmm2, %xmm3, %xmm2
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; XOPAVX1-NEXT: vpshaw %xmm2, %xmm4, %xmm2
+; XOPAVX1-NEXT: vpsubw %xmm1, %xmm3, %xmm1
+; XOPAVX1-NEXT: vpshaw %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: var_shift_v16i16:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
+; XOPAVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX2-NEXT: vpsubw %xmm2, %xmm3, %xmm2
+; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm4
+; XOPAVX2-NEXT: vpshaw %xmm2, %xmm4, %xmm2
+; XOPAVX2-NEXT: vpsubw %xmm1, %xmm3, %xmm1
+; XOPAVX2-NEXT: vpshaw %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%shift = ashr <16 x i16> %a, %b
ret <16 x i16> %shift
}
@@ -219,6 +283,30 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
; AVX2-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: var_shift_v32i8:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; XOPAVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX1-NEXT: vpsubb %xmm2, %xmm3, %xmm2
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; XOPAVX1-NEXT: vpshab %xmm2, %xmm4, %xmm2
+; XOPAVX1-NEXT: vpsubb %xmm1, %xmm3, %xmm1
+; XOPAVX1-NEXT: vpshab %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: var_shift_v32i8:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
+; XOPAVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX2-NEXT: vpsubb %xmm2, %xmm3, %xmm2
+; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm4
+; XOPAVX2-NEXT: vpshab %xmm2, %xmm4, %xmm2
+; XOPAVX2-NEXT: vpsubb %xmm1, %xmm3, %xmm1
+; XOPAVX2-NEXT: vpshab %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%shift = ashr <32 x i8> %a, %b
ret <32 x i8> %shift
}
@@ -250,6 +338,26 @@ define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpsubq %ymm2, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: splatvar_shift_v4i64:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vmovddup {{.*#+}} xmm1 = xmm1[0,0]
+; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; XOPAVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm1
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; XOPAVX1-NEXT: vpshaq %xmm1, %xmm2, %xmm2
+; XOPAVX1-NEXT: vpshaq %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: splatvar_shift_v4i64:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm2
+; XOPAVX2-NEXT: vpsrlq %xmm1, %ymm2, %ymm2
+; XOPAVX2-NEXT: vpsrlq %xmm1, %ymm0, %ymm0
+; XOPAVX2-NEXT: vpxor %ymm2, %ymm0, %ymm0
+; XOPAVX2-NEXT: vpsubq %ymm2, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%splat = shufflevector <4 x i64> %b, <4 x i64> undef, <4 x i32> zeroinitializer
%shift = ashr <4 x i64> %a, %splat
ret <4 x i64> %shift
@@ -272,6 +380,23 @@ define <8 x i32> @splatvar_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7]
; AVX2-NEXT: vpsrad %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: splatvar_shift_v8i32:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; XOPAVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7]
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; XOPAVX1-NEXT: vpsrad %xmm1, %xmm2, %xmm2
+; XOPAVX1-NEXT: vpsrad %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: splatvar_shift_v8i32:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; XOPAVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7]
+; XOPAVX2-NEXT: vpsrad %xmm1, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%splat = shufflevector <8 x i32> %b, <8 x i32> undef, <8 x i32> zeroinitializer
%shift = ashr <8 x i32> %a, %splat
ret <8 x i32> %shift
@@ -296,6 +421,25 @@ define <16 x i16> @splatvar_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind
; AVX2-NEXT: vmovd %eax, %xmm1
; AVX2-NEXT: vpsraw %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: splatvar_shift_v16i16:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; XOPAVX1-NEXT: vmovd %xmm1, %eax
+; XOPAVX1-NEXT: movzwl %ax, %eax
+; XOPAVX1-NEXT: vmovd %eax, %xmm1
+; XOPAVX1-NEXT: vpsraw %xmm1, %xmm2, %xmm2
+; XOPAVX1-NEXT: vpsraw %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: splatvar_shift_v16i16:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vmovd %xmm1, %eax
+; XOPAVX2-NEXT: movzwl %ax, %eax
+; XOPAVX2-NEXT: vmovd %eax, %xmm1
+; XOPAVX2-NEXT: vpsraw %xmm1, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%splat = shufflevector <16 x i16> %b, <16 x i16> undef, <16 x i32> zeroinitializer
%shift = ashr <16 x i16> %a, %splat
ret <16 x i16> %shift
@@ -379,6 +523,30 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
; AVX2-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: splatvar_shift_v32i8:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; XOPAVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; XOPAVX1-NEXT: vpsubb %xmm1, %xmm2, %xmm1
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; XOPAVX1-NEXT: vpshab %xmm1, %xmm2, %xmm2
+; XOPAVX1-NEXT: vpshab %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: splatvar_shift_v32i8:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpbroadcastb %xmm1, %ymm1
+; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
+; XOPAVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX2-NEXT: vpsubb %xmm2, %xmm3, %xmm2
+; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm4
+; XOPAVX2-NEXT: vpshab %xmm2, %xmm4, %xmm2
+; XOPAVX2-NEXT: vpsubb %xmm1, %xmm3, %xmm1
+; XOPAVX2-NEXT: vpshab %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%splat = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
%shift = ashr <32 x i8> %a, %splat
ret <32 x i8> %shift
@@ -414,6 +582,25 @@ define <4 x i64> @constant_shift_v4i64(<4 x i64> %a) nounwind {
; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: constant_shift_v4i64:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; XOPAVX1-NEXT: vpsubq {{.*}}(%rip), %xmm1, %xmm2
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; XOPAVX1-NEXT: vpshaq %xmm2, %xmm3, %xmm2
+; XOPAVX1-NEXT: vpsubq {{.*}}(%rip), %xmm1, %xmm1
+; XOPAVX1-NEXT: vpshaq %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: constant_shift_v4i64:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpsrlvq {{.*}}(%rip), %ymm0, %ymm0
+; XOPAVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [4611686018427387904,72057594037927936,4294967296,2]
+; XOPAVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; XOPAVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%shift = ashr <4 x i64> %a, <i64 1, i64 7, i64 31, i64 62>
ret <4 x i64> %shift
}
@@ -441,6 +628,19 @@ define <8 x i32> @constant_shift_v8i32(<8 x i32> %a) nounwind {
; AVX2: # BB#0:
; AVX2-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: constant_shift_v8i32:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vpshad {{.*}}(%rip), %xmm0, %xmm1
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT: vpshad {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: constant_shift_v8i32:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpsravd {{.*}}(%rip), %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%shift = ashr <8 x i32> %a, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 8, i32 7>
ret <8 x i32> %shift
}
@@ -490,6 +690,28 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
; AVX2-NEXT: vpackusdw %ymm3, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: constant_shift_v16i16:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; XOPAVX1-NEXT: vpsubw {{.*}}(%rip), %xmm1, %xmm2
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; XOPAVX1-NEXT: vpshaw %xmm2, %xmm3, %xmm2
+; XOPAVX1-NEXT: vpsubw {{.*}}(%rip), %xmm1, %xmm1
+; XOPAVX1-NEXT: vpshaw %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: constant_shift_v16i16:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; XOPAVX2-NEXT: vpsubw {{.*}}(%rip), %xmm1, %xmm2
+; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm3
+; XOPAVX2-NEXT: vpshaw %xmm2, %xmm3, %xmm2
+; XOPAVX2-NEXT: vpsubw {{.*}}(%rip), %xmm1, %xmm1
+; XOPAVX2-NEXT: vpshaw %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%shift = ashr <16 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
ret <16 x i16> %shift
}
@@ -571,6 +793,26 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
; AVX2-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: constant_shift_v32i8:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; XOPAVX1-NEXT: vpsubb {{.*}}(%rip), %xmm1, %xmm1
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; XOPAVX1-NEXT: vpshab %xmm1, %xmm2, %xmm2
+; XOPAVX1-NEXT: vpshab %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: constant_shift_v32i8:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; XOPAVX2-NEXT: vpsubb {{.*}}(%rip), %xmm1, %xmm1
+; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
+; XOPAVX2-NEXT: vpshab %xmm1, %xmm2, %xmm2
+; XOPAVX2-NEXT: vpshab %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%shift = ashr <32 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
ret <32 x i8> %shift
}
@@ -598,6 +840,24 @@ define <4 x i64> @splatconstant_shift_v4i64(<4 x i64> %a) nounwind {
; AVX2-NEXT: vpsrlq $7, %ymm0, %ymm0
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7]
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: splatconstant_shift_v4i64:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; XOPAVX1-NEXT: vpsubq {{.*}}(%rip), %xmm1, %xmm1
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; XOPAVX1-NEXT: vpshaq %xmm1, %xmm2, %xmm2
+; XOPAVX1-NEXT: vpshaq %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: splatconstant_shift_v4i64:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpsrlq $7, %ymm0, %ymm0
+; XOPAVX2-NEXT: vpbroadcastq {{.*}}(%rip), %ymm1
+; XOPAVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; XOPAVX2-NEXT: vpsubq %ymm1, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%shift = ashr <4 x i64> %a, <i64 7, i64 7, i64 7, i64 7>
ret <4 x i64> %shift
}
@@ -615,6 +875,19 @@ define <8 x i32> @splatconstant_shift_v8i32(<8 x i32> %a) nounwind {
; AVX2: # BB#0:
; AVX2-NEXT: vpsrad $5, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: splatconstant_shift_v8i32:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vpsrad $5, %xmm0, %xmm1
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT: vpsrad $5, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: splatconstant_shift_v8i32:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpsrad $5, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%shift = ashr <8 x i32> %a, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
ret <8 x i32> %shift
}
@@ -632,6 +905,19 @@ define <16 x i16> @splatconstant_shift_v16i16(<16 x i16> %a) nounwind {
; AVX2: # BB#0:
; AVX2-NEXT: vpsraw $3, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: splatconstant_shift_v16i16:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vpsraw $3, %xmm0, %xmm1
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT: vpsraw $3, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: splatconstant_shift_v16i16:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpsraw $3, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%shift = ashr <16 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
ret <16 x i16> %shift
}
@@ -661,6 +947,25 @@ define <32 x i8> @splatconstant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpsubb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: splatconstant_shift_v32i8:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; XOPAVX1-NEXT: vpsubb {{.*}}(%rip), %xmm1, %xmm1
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; XOPAVX1-NEXT: vpshab %xmm1, %xmm2, %xmm2
+; XOPAVX1-NEXT: vpshab %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: splatconstant_shift_v32i8:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpsrlw $3, %ymm0, %ymm0
+; XOPAVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
+; XOPAVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
+; XOPAVX2-NEXT: vpxor %ymm1, %ymm0, %ymm0
+; XOPAVX2-NEXT: vpsubb %ymm1, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%shift = ashr <32 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
ret <32 x i8> %shift
}
diff --git a/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll b/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll
index 1c988946a46..1886f27a4bb 100644
--- a/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-lshr-128.ll
@@ -2,6 +2,8 @@
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+xop,+avx | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+xop,+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX2
;
; Just one 32-bit run to make sure we do reasonable things for i64 shifts.
; RUN: llc < %s -mtriple=i686-unknown-unknown -mcpu=x86-64 | FileCheck %s --check-prefix=ALL --check-prefix=X32-SSE --check-prefix=X32-SSE2
@@ -43,6 +45,18 @@ define <2 x i64> @var_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; AVX2-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
+; XOPAVX1-LABEL: var_shift_v2i64:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; XOPAVX1-NEXT: vpsubq %xmm1, %xmm2, %xmm1
+; XOPAVX1-NEXT: vpshlq %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: var_shift_v2i64:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpsrlvq %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT: retq
+;
; X32-SSE-LABEL: var_shift_v2i64:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
@@ -124,6 +138,18 @@ define <4 x i32> @var_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; AVX2-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
+; XOPAVX1-LABEL: var_shift_v4i32:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; XOPAVX1-NEXT: vpsubd %xmm1, %xmm2, %xmm1
+; XOPAVX1-NEXT: vpshld %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: var_shift_v4i32:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT: retq
+;
; X32-SSE-LABEL: var_shift_v4i32:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: movdqa %xmm1, %xmm2
@@ -245,6 +271,13 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
+; XOP-LABEL: var_shift_v8i16:
+; XOP: # BB#0:
+; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; XOP-NEXT: vpsubw %xmm1, %xmm2, %xmm1
+; XOP-NEXT: vpshlw %xmm1, %xmm0, %xmm0
+; XOP-NEXT: retq
+;
; X32-SSE-LABEL: var_shift_v8i16:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: psllw $12, %xmm1
@@ -355,6 +388,13 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
; AVX-NEXT: retq
;
+; XOP-LABEL: var_shift_v16i8:
+; XOP: # BB#0:
+; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; XOP-NEXT: vpsubb %xmm1, %xmm2, %xmm1
+; XOP-NEXT: vpshlb %xmm1, %xmm0, %xmm0
+; XOP-NEXT: retq
+;
; X32-SSE-LABEL: var_shift_v16i8:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: psllw $5, %xmm1
@@ -404,6 +444,11 @@ define <2 x i64> @splatvar_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; AVX-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
+; XOP-LABEL: splatvar_shift_v2i64:
+; XOP: # BB#0:
+; XOP-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
+; XOP-NEXT: retq
+;
; X32-SSE-LABEL: splatvar_shift_v2i64:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: movq {{.*#+}} xmm1 = xmm1[0],zero
@@ -436,6 +481,13 @@ define <4 x i32> @splatvar_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; AVX-NEXT: vpsrld %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
+; XOP-LABEL: splatvar_shift_v4i32:
+; XOP: # BB#0:
+; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; XOP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7]
+; XOP-NEXT: vpsrld %xmm1, %xmm0, %xmm0
+; XOP-NEXT: retq
+;
; X32-SSE-LABEL: splatvar_shift_v4i32:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: xorps %xmm2, %xmm2
@@ -470,6 +522,13 @@ define <8 x i16> @splatvar_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; AVX-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
+; XOP-LABEL: splatvar_shift_v8i16:
+; XOP: # BB#0:
+; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; XOP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3,4,5,6,7]
+; XOP-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
+; XOP-NEXT: retq
+;
; X32-SSE-LABEL: splatvar_shift_v8i16:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: movd %xmm1, %eax
@@ -580,6 +639,22 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; AVX2-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
; AVX2-NEXT: retq
;
+; XOPAVX1-LABEL: splatvar_shift_v16i8:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; XOPAVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; XOPAVX1-NEXT: vpsubb %xmm1, %xmm2, %xmm1
+; XOPAVX1-NEXT: vpshlb %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: splatvar_shift_v16i8:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpbroadcastb %xmm1, %xmm1
+; XOPAVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; XOPAVX2-NEXT: vpsubb %xmm1, %xmm2, %xmm1
+; XOPAVX2-NEXT: vpshlb %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT: retq
+;
; X32-SSE-LABEL: splatvar_shift_v16i8:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -653,6 +728,18 @@ define <2 x i64> @constant_shift_v2i64(<2 x i64> %a) nounwind {
; AVX2-NEXT: vpsrlvq {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: retq
;
+; XOPAVX1-LABEL: constant_shift_v2i64:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; XOPAVX1-NEXT: vpsubq {{.*}}(%rip), %xmm1, %xmm1
+; XOPAVX1-NEXT: vpshlq %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: constant_shift_v2i64:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpsrlvq {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX2-NEXT: retq
+;
; X32-SSE-LABEL: constant_shift_v2i64:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
@@ -712,6 +799,16 @@ define <4 x i32> @constant_shift_v4i32(<4 x i32> %a) nounwind {
; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: retq
;
+; XOPAVX1-LABEL: constant_shift_v4i32:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: constant_shift_v4i32:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX2-NEXT: retq
+;
; X32-SSE-LABEL: constant_shift_v4i32:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
@@ -797,6 +894,13 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
+; XOP-LABEL: constant_shift_v8i16:
+; XOP: # BB#0:
+; XOP-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; XOP-NEXT: vpsubw {{.*}}(%rip), %xmm1, %xmm1
+; XOP-NEXT: vpshlw %xmm1, %xmm0, %xmm0
+; XOP-NEXT: retq
+;
; X32-SSE-LABEL: constant_shift_v8i16:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
@@ -889,6 +993,13 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
; AVX-NEXT: retq
;
+; XOP-LABEL: constant_shift_v16i8:
+; XOP: # BB#0:
+; XOP-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; XOP-NEXT: vpsubb {{.*}}(%rip), %xmm1, %xmm1
+; XOP-NEXT: vpshlb %xmm1, %xmm0, %xmm0
+; XOP-NEXT: retq
+;
; X32-SSE-LABEL: constant_shift_v16i8:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
@@ -939,6 +1050,11 @@ define <2 x i64> @splatconstant_shift_v2i64(<2 x i64> %a) nounwind {
; AVX-NEXT: vpsrlq $7, %xmm0, %xmm0
; AVX-NEXT: retq
;
+; XOP-LABEL: splatconstant_shift_v2i64:
+; XOP: # BB#0:
+; XOP-NEXT: vpsrlq $7, %xmm0, %xmm0
+; XOP-NEXT: retq
+;
; X32-SSE-LABEL: splatconstant_shift_v2i64:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: psrlq $7, %xmm0
@@ -958,6 +1074,11 @@ define <4 x i32> @splatconstant_shift_v4i32(<4 x i32> %a) nounwind {
; AVX-NEXT: vpsrld $5, %xmm0, %xmm0
; AVX-NEXT: retq
;
+; XOP-LABEL: splatconstant_shift_v4i32:
+; XOP: # BB#0:
+; XOP-NEXT: vpsrld $5, %xmm0, %xmm0
+; XOP-NEXT: retq
+;
; X32-SSE-LABEL: splatconstant_shift_v4i32:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: psrld $5, %xmm0
@@ -977,6 +1098,11 @@ define <8 x i16> @splatconstant_shift_v8i16(<8 x i16> %a) nounwind {
; AVX-NEXT: vpsrlw $3, %xmm0, %xmm0
; AVX-NEXT: retq
;
+; XOP-LABEL: splatconstant_shift_v8i16:
+; XOP: # BB#0:
+; XOP-NEXT: vpsrlw $3, %xmm0, %xmm0
+; XOP-NEXT: retq
+;
; X32-SSE-LABEL: splatconstant_shift_v8i16:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: psrlw $3, %xmm0
@@ -998,6 +1124,13 @@ define <16 x i8> @splatconstant_shift_v16i8(<16 x i8> %a) nounwind {
; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
;
+; XOP-LABEL: splatconstant_shift_v16i8:
+; XOP: # BB#0:
+; XOP-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; XOP-NEXT: vpsubb {{.*}}(%rip), %xmm1, %xmm1
+; XOP-NEXT: vpshlb %xmm1, %xmm0, %xmm0
+; XOP-NEXT: retq
+;
; X32-SSE-LABEL: splatconstant_shift_v16i8:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: psrlw $3, %xmm0
diff --git a/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll b/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll
index 03cf0f018d7..96c273005d0 100644
--- a/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-lshr-256.ll
@@ -1,5 +1,7 @@
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+xop,+avx | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+xop,+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX2
;
; Variable Shifts
@@ -25,6 +27,23 @@ define <4 x i64> @var_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX2: # BB#0:
; AVX2-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: var_shift_v4i64:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; XOPAVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX1-NEXT: vpsubq %xmm2, %xmm3, %xmm2
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; XOPAVX1-NEXT: vpshlq %xmm2, %xmm4, %xmm2
+; XOPAVX1-NEXT: vpsubq %xmm1, %xmm3, %xmm1
+; XOPAVX1-NEXT: vpshlq %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: var_shift_v4i64:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpsrlvq %ymm1, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%shift = lshr <4 x i64> %a, %b
ret <4 x i64> %shift
}
@@ -64,6 +83,23 @@ define <8 x i32> @var_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; AVX2: # BB#0:
; AVX2-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: var_shift_v8i32:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; XOPAVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX1-NEXT: vpsubd %xmm2, %xmm3, %xmm2
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; XOPAVX1-NEXT: vpshld %xmm2, %xmm4, %xmm2
+; XOPAVX1-NEXT: vpsubd %xmm1, %xmm3, %xmm1
+; XOPAVX1-NEXT: vpshld %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: var_shift_v8i32:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%shift = lshr <8 x i32> %a, %b
ret <8 x i32> %shift
}
@@ -117,6 +153,30 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
; AVX2-NEXT: vpackusdw %ymm3, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: var_shift_v16i16:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; XOPAVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX1-NEXT: vpsubw %xmm2, %xmm3, %xmm2
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; XOPAVX1-NEXT: vpshlw %xmm2, %xmm4, %xmm2
+; XOPAVX1-NEXT: vpsubw %xmm1, %xmm3, %xmm1
+; XOPAVX1-NEXT: vpshlw %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: var_shift_v16i16:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
+; XOPAVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX2-NEXT: vpsubw %xmm2, %xmm3, %xmm2
+; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm4
+; XOPAVX2-NEXT: vpshlw %xmm2, %xmm4, %xmm2
+; XOPAVX2-NEXT: vpsubw %xmm1, %xmm3, %xmm1
+; XOPAVX2-NEXT: vpshlw %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%shift = lshr <16 x i16> %a, %b
ret <16 x i16> %shift
}
@@ -171,6 +231,30 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: var_shift_v32i8:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; XOPAVX1-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX1-NEXT: vpsubb %xmm2, %xmm3, %xmm2
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
+; XOPAVX1-NEXT: vpshlb %xmm2, %xmm4, %xmm2
+; XOPAVX1-NEXT: vpsubb %xmm1, %xmm3, %xmm1
+; XOPAVX1-NEXT: vpshlb %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: var_shift_v32i8:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
+; XOPAVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX2-NEXT: vpsubb %xmm2, %xmm3, %xmm2
+; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm4
+; XOPAVX2-NEXT: vpshlb %xmm2, %xmm4, %xmm2
+; XOPAVX2-NEXT: vpsubb %xmm1, %xmm3, %xmm1
+; XOPAVX2-NEXT: vpshlb %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%shift = lshr <32 x i8> %a, %b
ret <32 x i8> %shift
}
@@ -192,6 +276,19 @@ define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX2: # BB#0:
; AVX2-NEXT: vpsrlq %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: splatvar_shift_v4i64:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; XOPAVX1-NEXT: vpsrlq %xmm1, %xmm2, %xmm2
+; XOPAVX1-NEXT: vpsrlq %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: splatvar_shift_v4i64:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpsrlq %xmm1, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%splat = shufflevector <4 x i64> %b, <4 x i64> undef, <4 x i32> zeroinitializer
%shift = lshr <4 x i64> %a, %splat
ret <4 x i64> %shift
@@ -214,6 +311,23 @@ define <8 x i32> @splatvar_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7]
; AVX2-NEXT: vpsrld %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: splatvar_shift_v8i32:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; XOPAVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7]
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; XOPAVX1-NEXT: vpsrld %xmm1, %xmm2, %xmm2
+; XOPAVX1-NEXT: vpsrld %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: splatvar_shift_v8i32:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; XOPAVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7]
+; XOPAVX2-NEXT: vpsrld %xmm1, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%splat = shufflevector <8 x i32> %b, <8 x i32> undef, <8 x i32> zeroinitializer
%shift = lshr <8 x i32> %a, %splat
ret <8 x i32> %shift
@@ -238,6 +352,25 @@ define <16 x i16> @splatvar_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind
; AVX2-NEXT: vmovd %eax, %xmm1
; AVX2-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: splatvar_shift_v16i16:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; XOPAVX1-NEXT: vmovd %xmm1, %eax
+; XOPAVX1-NEXT: movzwl %ax, %eax
+; XOPAVX1-NEXT: vmovd %eax, %xmm1
+; XOPAVX1-NEXT: vpsrlw %xmm1, %xmm2, %xmm2
+; XOPAVX1-NEXT: vpsrlw %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: splatvar_shift_v16i16:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vmovd %xmm1, %eax
+; XOPAVX2-NEXT: movzwl %ax, %eax
+; XOPAVX2-NEXT: vmovd %eax, %xmm1
+; XOPAVX2-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%splat = shufflevector <16 x i16> %b, <16 x i16> undef, <16 x i32> zeroinitializer
%shift = lshr <16 x i16> %a, %splat
ret <16 x i16> %shift
@@ -292,6 +425,30 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: splatvar_shift_v32i8:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; XOPAVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; XOPAVX1-NEXT: vpsubb %xmm1, %xmm2, %xmm1
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; XOPAVX1-NEXT: vpshlb %xmm1, %xmm2, %xmm2
+; XOPAVX1-NEXT: vpshlb %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: splatvar_shift_v32i8:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpbroadcastb %xmm1, %ymm1
+; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
+; XOPAVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; XOPAVX2-NEXT: vpsubb %xmm2, %xmm3, %xmm2
+; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm4
+; XOPAVX2-NEXT: vpshlb %xmm2, %xmm4, %xmm2
+; XOPAVX2-NEXT: vpsubb %xmm1, %xmm3, %xmm1
+; XOPAVX2-NEXT: vpshlb %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%splat = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
%shift = lshr <32 x i8> %a, %splat
ret <32 x i8> %shift
@@ -318,6 +475,22 @@ define <4 x i64> @constant_shift_v4i64(<4 x i64> %a) nounwind {
; AVX2: # BB#0:
; AVX2-NEXT: vpsrlvq {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: constant_shift_v4i64:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; XOPAVX1-NEXT: vpsubq {{.*}}(%rip), %xmm1, %xmm2
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; XOPAVX1-NEXT: vpshlq %xmm2, %xmm3, %xmm2
+; XOPAVX1-NEXT: vpsubq {{.*}}(%rip), %xmm1, %xmm1
+; XOPAVX1-NEXT: vpshlq %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: constant_shift_v4i64:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpsrlvq {{.*}}(%rip), %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%shift = lshr <4 x i64> %a, <i64 1, i64 7, i64 31, i64 62>
ret <4 x i64> %shift
}
@@ -345,6 +518,19 @@ define <8 x i32> @constant_shift_v8i32(<8 x i32> %a) nounwind {
; AVX2: # BB#0:
; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: constant_shift_v8i32:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm1
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: constant_shift_v8i32:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpsrlvd {{.*}}(%rip), %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%shift = lshr <8 x i32> %a, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 8, i32 7>
ret <8 x i32> %shift
}
@@ -394,6 +580,28 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
; AVX2-NEXT: vpackusdw %ymm3, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: constant_shift_v16i16:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; XOPAVX1-NEXT: vpsubw {{.*}}(%rip), %xmm1, %xmm2
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; XOPAVX1-NEXT: vpshlw %xmm2, %xmm3, %xmm2
+; XOPAVX1-NEXT: vpsubw {{.*}}(%rip), %xmm1, %xmm1
+; XOPAVX1-NEXT: vpshlw %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: constant_shift_v16i16:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; XOPAVX2-NEXT: vpsubw {{.*}}(%rip), %xmm1, %xmm2
+; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm3
+; XOPAVX2-NEXT: vpshlw %xmm2, %xmm3, %xmm2
+; XOPAVX2-NEXT: vpsubw {{.*}}(%rip), %xmm1, %xmm1
+; XOPAVX2-NEXT: vpshlw %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%shift = lshr <16 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
ret <16 x i16> %shift
}
@@ -446,6 +654,26 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: constant_shift_v32i8:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; XOPAVX1-NEXT: vpsubb {{.*}}(%rip), %xmm1, %xmm1
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; XOPAVX1-NEXT: vpshlb %xmm1, %xmm2, %xmm2
+; XOPAVX1-NEXT: vpshlb %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: constant_shift_v32i8:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; XOPAVX2-NEXT: vpsubb {{.*}}(%rip), %xmm1, %xmm1
+; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
+; XOPAVX2-NEXT: vpshlb %xmm1, %xmm2, %xmm2
+; XOPAVX2-NEXT: vpshlb %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%shift = lshr <32 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
ret <32 x i8> %shift
}
@@ -467,6 +695,19 @@ define <4 x i64> @splatconstant_shift_v4i64(<4 x i64> %a) nounwind {
; AVX2: # BB#0:
; AVX2-NEXT: vpsrlq $7, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: splatconstant_shift_v4i64:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vpsrlq $7, %xmm0, %xmm1
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT: vpsrlq $7, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: splatconstant_shift_v4i64:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpsrlq $7, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%shift = lshr <4 x i64> %a, <i64 7, i64 7, i64 7, i64 7>
ret <4 x i64> %shift
}
@@ -484,6 +725,19 @@ define <8 x i32> @splatconstant_shift_v8i32(<8 x i32> %a) nounwind {
; AVX2: # BB#0:
; AVX2-NEXT: vpsrld $5, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: splatconstant_shift_v8i32:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vpsrld $5, %xmm0, %xmm1
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT: vpsrld $5, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: splatconstant_shift_v8i32:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpsrld $5, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%shift = lshr <8 x i32> %a, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
ret <8 x i32> %shift
}
@@ -501,6 +755,19 @@ define <16 x i16> @splatconstant_shift_v16i16(<16 x i16> %a) nounwind {
; AVX2: # BB#0:
; AVX2-NEXT: vpsrlw $3, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: splatconstant_shift_v16i16:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vpsrlw $3, %xmm0, %xmm1
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT: vpsrlw $3, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: splatconstant_shift_v16i16:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpsrlw $3, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%shift = lshr <16 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
ret <16 x i16> %shift
}
@@ -522,6 +789,22 @@ define <32 x i8> @splatconstant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX2-NEXT: vpsrlw $3, %ymm0, %ymm0
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: splatconstant_shift_v32i8:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
+; XOPAVX1-NEXT: vpsubb {{.*}}(%rip), %xmm1, %xmm1
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; XOPAVX1-NEXT: vpshlb %xmm1, %xmm2, %xmm2
+; XOPAVX1-NEXT: vpshlb %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: splatconstant_shift_v32i8:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpsrlw $3, %ymm0, %ymm0
+; XOPAVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%shift = lshr <32 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
ret <32 x i8> %shift
}
diff --git a/llvm/test/CodeGen/X86/vector-shift-shl-128.ll b/llvm/test/CodeGen/X86/vector-shift-shl-128.ll
index fa6533d39cb..2818c288700 100644
--- a/llvm/test/CodeGen/X86/vector-shift-shl-128.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-shl-128.ll
@@ -2,6 +2,8 @@
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+xop,+avx | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+xop,+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX2
;
; Just one 32-bit run to make sure we do reasonable things for i64 shifts.
; RUN: llc < %s -mtriple=i686-unknown-unknown -mcpu=x86-64 | FileCheck %s --check-prefix=ALL --check-prefix=X32-SSE --check-prefix=X32-SSE2
@@ -43,6 +45,16 @@ define <2 x i64> @var_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; AVX2-NEXT: vpsllvq %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
+; XOPAVX1-LABEL: var_shift_v2i64:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vpshlq %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: var_shift_v2i64:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpsllvq %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT: retq
+;
; X32-SSE-LABEL: var_shift_v2i64:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
@@ -94,6 +106,16 @@ define <4 x i32> @var_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; AVX2-NEXT: vpsllvd %xmm1, %xmm0, %xmm0
; AVX2-NEXT: retq
;
+; XOPAVX1-LABEL: var_shift_v4i32:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vpshld %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: var_shift_v4i32:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpsllvd %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT: retq
+;
; X32-SSE-LABEL: var_shift_v4i32:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: pslld $23, %xmm1
@@ -206,6 +228,11 @@ define <8 x i16> @var_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
+; XOP-LABEL: var_shift_v8i16:
+; XOP: # BB#0:
+; XOP-NEXT: vpshlw %xmm1, %xmm0, %xmm0
+; XOP-NEXT: retq
+;
; X32-SSE-LABEL: var_shift_v8i16:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: psllw $12, %xmm1
@@ -313,6 +340,11 @@ define <16 x i8> @var_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
; AVX-NEXT: retq
;
+; XOP-LABEL: var_shift_v16i8:
+; XOP: # BB#0:
+; XOP-NEXT: vpshlb %xmm1, %xmm0, %xmm0
+; XOP-NEXT: retq
+;
; X32-SSE-LABEL: var_shift_v16i8:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: psllw $5, %xmm1
@@ -361,6 +393,11 @@ define <2 x i64> @splatvar_shift_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
; AVX-NEXT: vpsllq %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
+; XOP-LABEL: splatvar_shift_v2i64:
+; XOP: # BB#0:
+; XOP-NEXT: vpsllq %xmm1, %xmm0, %xmm0
+; XOP-NEXT: retq
+;
; X32-SSE-LABEL: splatvar_shift_v2i64:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: movq {{.*#+}} xmm1 = xmm1[0],zero
@@ -393,6 +430,13 @@ define <4 x i32> @splatvar_shift_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
; AVX-NEXT: vpslld %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
+; XOP-LABEL: splatvar_shift_v4i32:
+; XOP: # BB#0:
+; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; XOP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7]
+; XOP-NEXT: vpslld %xmm1, %xmm0, %xmm0
+; XOP-NEXT: retq
+;
; X32-SSE-LABEL: splatvar_shift_v4i32:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: xorps %xmm2, %xmm2
@@ -427,6 +471,13 @@ define <8 x i16> @splatvar_shift_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; AVX-NEXT: vpsllw %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
;
+; XOP-LABEL: splatvar_shift_v8i16:
+; XOP: # BB#0:
+; XOP-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; XOP-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3,4,5,6,7]
+; XOP-NEXT: vpsllw %xmm1, %xmm0, %xmm0
+; XOP-NEXT: retq
+;
; X32-SSE-LABEL: splatvar_shift_v8i16:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: movd %xmm1, %eax
@@ -533,6 +584,19 @@ define <16 x i8> @splatvar_shift_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
; AVX2-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
; AVX2-NEXT: retq
;
+; XOPAVX1-LABEL: splatvar_shift_v16i8:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; XOPAVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; XOPAVX1-NEXT: vpshlb %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: splatvar_shift_v16i8:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpbroadcastb %xmm1, %xmm1
+; XOPAVX2-NEXT: vpshlb %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT: retq
+;
; X32-SSE-LABEL: splatvar_shift_v16i8:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
@@ -605,6 +669,16 @@ define <2 x i64> @constant_shift_v2i64(<2 x i64> %a) nounwind {
; AVX2-NEXT: vpsllvq {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: retq
;
+; XOPAVX1-LABEL: constant_shift_v2i64:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vpshlq {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: constant_shift_v2i64:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpsllvq {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX2-NEXT: retq
+;
; X32-SSE-LABEL: constant_shift_v2i64:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: movdqa %xmm0, %xmm1
@@ -645,6 +719,16 @@ define <4 x i32> @constant_shift_v4i32(<4 x i32> %a) nounwind {
; AVX2-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
; AVX2-NEXT: retq
;
+; XOPAVX1-LABEL: constant_shift_v4i32:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: constant_shift_v4i32:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX2-NEXT: retq
+;
; X32-SSE-LABEL: constant_shift_v4i32:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: movdqa {{.*#+}} xmm1 = [16,32,64,128]
@@ -671,6 +755,11 @@ define <8 x i16> @constant_shift_v8i16(<8 x i16> %a) nounwind {
; AVX-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
;
+; XOP-LABEL: constant_shift_v8i16:
+; XOP: # BB#0:
+; XOP-NEXT: vpshlw {{.*}}(%rip), %xmm0, %xmm0
+; XOP-NEXT: retq
+;
; X32-SSE-LABEL: constant_shift_v8i16:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: pmullw .LCPI10_0, %xmm0
@@ -748,6 +837,11 @@ define <16 x i8> @constant_shift_v16i8(<16 x i8> %a) nounwind {
; AVX-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm0
; AVX-NEXT: retq
;
+; XOP-LABEL: constant_shift_v16i8:
+; XOP: # BB#0:
+; XOP-NEXT: vpshlb {{.*}}(%rip), %xmm0, %xmm0
+; XOP-NEXT: retq
+;
; X32-SSE-LABEL: constant_shift_v16i8:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
@@ -797,6 +891,11 @@ define <2 x i64> @splatconstant_shift_v2i64(<2 x i64> %a) nounwind {
; AVX-NEXT: vpsllq $7, %xmm0, %xmm0
; AVX-NEXT: retq
;
+; XOP-LABEL: splatconstant_shift_v2i64:
+; XOP: # BB#0:
+; XOP-NEXT: vpsllq $7, %xmm0, %xmm0
+; XOP-NEXT: retq
+;
; X32-SSE-LABEL: splatconstant_shift_v2i64:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: psllq $7, %xmm0
@@ -816,6 +915,11 @@ define <4 x i32> @splatconstant_shift_v4i32(<4 x i32> %a) nounwind {
; AVX-NEXT: vpslld $5, %xmm0, %xmm0
; AVX-NEXT: retq
;
+; XOP-LABEL: splatconstant_shift_v4i32:
+; XOP: # BB#0:
+; XOP-NEXT: vpslld $5, %xmm0, %xmm0
+; XOP-NEXT: retq
+;
; X32-SSE-LABEL: splatconstant_shift_v4i32:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: pslld $5, %xmm0
@@ -835,6 +939,11 @@ define <8 x i16> @splatconstant_shift_v8i16(<8 x i16> %a) nounwind {
; AVX-NEXT: vpsllw $3, %xmm0, %xmm0
; AVX-NEXT: retq
;
+; XOP-LABEL: splatconstant_shift_v8i16:
+; XOP: # BB#0:
+; XOP-NEXT: vpsllw $3, %xmm0, %xmm0
+; XOP-NEXT: retq
+;
; X32-SSE-LABEL: splatconstant_shift_v8i16:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: psllw $3, %xmm0
@@ -856,6 +965,11 @@ define <16 x i8> @splatconstant_shift_v16i8(<16 x i8> %a) nounwind {
; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX-NEXT: retq
;
+; XOP-LABEL: splatconstant_shift_v16i8:
+; XOP: # BB#0:
+; XOP-NEXT: vpshlb {{.*}}(%rip), %xmm0, %xmm0
+; XOP-NEXT: retq
+;
; X32-SSE-LABEL: splatconstant_shift_v16i8:
; X32-SSE: # BB#0:
; X32-SSE-NEXT: psllw $3, %xmm0
diff --git a/llvm/test/CodeGen/X86/vector-shift-shl-256.ll b/llvm/test/CodeGen/X86/vector-shift-shl-256.ll
index 6e28494123c..f0ed82a1651 100644
--- a/llvm/test/CodeGen/X86/vector-shift-shl-256.ll
+++ b/llvm/test/CodeGen/X86/vector-shift-shl-256.ll
@@ -1,5 +1,7 @@
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+xop,+avx | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+xop,+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=XOP --check-prefix=XOPAVX2
;
; Variable Shifts
@@ -25,6 +27,20 @@ define <4 x i64> @var_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX2: # BB#0:
; AVX2-NEXT: vpsllvq %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: var_shift_v4i64:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; XOPAVX1-NEXT: vpshlq %xmm2, %xmm3, %xmm2
+; XOPAVX1-NEXT: vpshlq %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: var_shift_v4i64:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpsllvq %ymm1, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%shift = shl <4 x i64> %a, %b
ret <4 x i64> %shift
}
@@ -50,6 +66,20 @@ define <8 x i32> @var_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; AVX2: # BB#0:
; AVX2-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: var_shift_v8i32:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; XOPAVX1-NEXT: vpshld %xmm2, %xmm3, %xmm2
+; XOPAVX1-NEXT: vpshld %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: var_shift_v8i32:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%shift = shl <8 x i32> %a, %b
ret <8 x i32> %shift
}
@@ -103,6 +133,24 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
; AVX2-NEXT: vpackusdw %ymm3, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: var_shift_v16i16:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; XOPAVX1-NEXT: vpshlw %xmm2, %xmm3, %xmm2
+; XOPAVX1-NEXT: vpshlw %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: var_shift_v16i16:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
+; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm3
+; XOPAVX2-NEXT: vpshlw %xmm2, %xmm3, %xmm2
+; XOPAVX2-NEXT: vpshlw %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%shift = shl <16 x i16> %a, %b
ret <16 x i16> %shift
}
@@ -153,6 +201,24 @@ define <32 x i8> @var_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: var_shift_v32i8:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; XOPAVX1-NEXT: vpshlb %xmm2, %xmm3, %xmm2
+; XOPAVX1-NEXT: vpshlb %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: var_shift_v32i8:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
+; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm3
+; XOPAVX2-NEXT: vpshlb %xmm2, %xmm3, %xmm2
+; XOPAVX2-NEXT: vpshlb %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%shift = shl <32 x i8> %a, %b
ret <32 x i8> %shift
}
@@ -174,6 +240,19 @@ define <4 x i64> @splatvar_shift_v4i64(<4 x i64> %a, <4 x i64> %b) nounwind {
; AVX2: # BB#0:
; AVX2-NEXT: vpsllq %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: splatvar_shift_v4i64:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; XOPAVX1-NEXT: vpsllq %xmm1, %xmm2, %xmm2
+; XOPAVX1-NEXT: vpsllq %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: splatvar_shift_v4i64:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpsllq %xmm1, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%splat = shufflevector <4 x i64> %b, <4 x i64> undef, <4 x i32> zeroinitializer
%shift = shl <4 x i64> %a, %splat
ret <4 x i64> %shift
@@ -196,6 +275,23 @@ define <8 x i32> @splatvar_shift_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
; AVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7]
; AVX2-NEXT: vpslld %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: splatvar_shift_v8i32:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; XOPAVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7]
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; XOPAVX1-NEXT: vpslld %xmm1, %xmm2, %xmm2
+; XOPAVX1-NEXT: vpslld %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: splatvar_shift_v8i32:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; XOPAVX2-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3,4,5,6,7]
+; XOPAVX2-NEXT: vpslld %xmm1, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%splat = shufflevector <8 x i32> %b, <8 x i32> undef, <8 x i32> zeroinitializer
%shift = shl <8 x i32> %a, %splat
ret <8 x i32> %shift
@@ -220,6 +316,25 @@ define <16 x i16> @splatvar_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind
; AVX2-NEXT: vmovd %eax, %xmm1
; AVX2-NEXT: vpsllw %xmm1, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: splatvar_shift_v16i16:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; XOPAVX1-NEXT: vmovd %xmm1, %eax
+; XOPAVX1-NEXT: movzwl %ax, %eax
+; XOPAVX1-NEXT: vmovd %eax, %xmm1
+; XOPAVX1-NEXT: vpsllw %xmm1, %xmm2, %xmm2
+; XOPAVX1-NEXT: vpsllw %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: splatvar_shift_v16i16:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vmovd %xmm1, %eax
+; XOPAVX2-NEXT: movzwl %ax, %eax
+; XOPAVX2-NEXT: vmovd %eax, %xmm1
+; XOPAVX2-NEXT: vpsllw %xmm1, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%splat = shufflevector <16 x i16> %b, <16 x i16> undef, <16 x i32> zeroinitializer
%shift = shl <16 x i16> %a, %splat
ret <16 x i16> %shift
@@ -270,6 +385,26 @@ define <32 x i8> @splatvar_shift_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: splatvar_shift_v32i8:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
+; XOPAVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
+; XOPAVX1-NEXT: vpshlb %xmm1, %xmm2, %xmm2
+; XOPAVX1-NEXT: vpshlb %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: splatvar_shift_v32i8:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpbroadcastb %xmm1, %ymm1
+; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
+; XOPAVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
+; XOPAVX2-NEXT: vpshlb %xmm3, %xmm2, %xmm2
+; XOPAVX2-NEXT: vpshlb %xmm1, %xmm0, %xmm0
+; XOPAVX2-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%splat = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
%shift = shl <32 x i8> %a, %splat
ret <32 x i8> %shift
@@ -296,6 +431,19 @@ define <4 x i64> @constant_shift_v4i64(<4 x i64> %a) nounwind {
; AVX2: # BB#0:
; AVX2-NEXT: vpsllvq {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: constant_shift_v4i64:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vpshlq {{.*}}(%rip), %xmm0, %xmm1
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT: vpshlq {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: constant_shift_v4i64:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpsllvq {{.*}}(%rip), %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%shift = shl <4 x i64> %a, <i64 1, i64 7, i64 31, i64 62>
ret <4 x i64> %shift
}
@@ -313,6 +461,19 @@ define <8 x i32> @constant_shift_v8i32(<8 x i32> %a) nounwind {
; AVX2: # BB#0:
; AVX2-NEXT: vpsllvd {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: constant_shift_v8i32:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm1
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT: vpshld {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: constant_shift_v8i32:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpsllvd {{.*}}(%rip), %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%shift = shl <8 x i32> %a, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 8, i32 7>
ret <8 x i32> %shift
}
@@ -330,6 +491,19 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
; AVX2: # BB#0:
; AVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: constant_shift_v16i16:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vpshlw {{.*}}(%rip), %xmm0, %xmm1
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT: vpshlw {{.*}}(%rip), %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: constant_shift_v16i16:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpmullw {{.*}}(%rip), %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%shift = shl <16 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
ret <16 x i16> %shift
}
@@ -378,6 +552,24 @@ define <32 x i8> @constant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX2-NEXT: vpaddb %ymm1, %ymm1, %ymm1
; AVX2-NEXT: vpblendvb %ymm1, %ymm2, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: constant_shift_v32i8:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
+; XOPAVX1-NEXT: vpshlb %xmm2, %xmm1, %xmm1
+; XOPAVX1-NEXT: vpshlb %xmm2, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: constant_shift_v32i8:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
+; XOPAVX2-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
+; XOPAVX2-NEXT: vpshlb %xmm2, %xmm1, %xmm1
+; XOPAVX2-NEXT: vpshlb %xmm2, %xmm0, %xmm0
+; XOPAVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%shift = shl <32 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
ret <32 x i8> %shift
}
@@ -399,6 +591,19 @@ define <4 x i64> @splatconstant_shift_v4i64(<4 x i64> %a) nounwind {
; AVX2: # BB#0:
; AVX2-NEXT: vpsllq $7, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: splatconstant_shift_v4i64:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vpsllq $7, %xmm0, %xmm1
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT: vpsllq $7, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: splatconstant_shift_v4i64:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpsllq $7, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%shift = shl <4 x i64> %a, <i64 7, i64 7, i64 7, i64 7>
ret <4 x i64> %shift
}
@@ -416,6 +621,19 @@ define <8 x i32> @splatconstant_shift_v8i32(<8 x i32> %a) nounwind {
; AVX2: # BB#0:
; AVX2-NEXT: vpslld $5, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: splatconstant_shift_v8i32:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vpslld $5, %xmm0, %xmm1
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT: vpslld $5, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: splatconstant_shift_v8i32:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpslld $5, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%shift = shl <8 x i32> %a, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
ret <8 x i32> %shift
}
@@ -433,6 +651,19 @@ define <16 x i16> @splatconstant_shift_v16i16(<16 x i16> %a) nounwind {
; AVX2: # BB#0:
; AVX2-NEXT: vpsllw $3, %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: splatconstant_shift_v16i16:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vpsllw $3, %xmm0, %xmm1
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
+; XOPAVX1-NEXT: vpsllw $3, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: splatconstant_shift_v16i16:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpsllw $3, %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%shift = shl <16 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
ret <16 x i16> %shift
}
@@ -454,6 +685,21 @@ define <32 x i8> @splatconstant_shift_v32i8(<32 x i8> %a) nounwind {
; AVX2-NEXT: vpsllw $3, %ymm0, %ymm0
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
; AVX2-NEXT: retq
+;
+; XOPAVX1-LABEL: splatconstant_shift_v32i8:
+; XOPAVX1: # BB#0:
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
+; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
+; XOPAVX1-NEXT: vpshlb %xmm2, %xmm1, %xmm1
+; XOPAVX1-NEXT: vpshlb %xmm2, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
+; XOPAVX1-NEXT: retq
+;
+; XOPAVX2-LABEL: splatconstant_shift_v32i8:
+; XOPAVX2: # BB#0:
+; XOPAVX2-NEXT: vpsllw $3, %ymm0, %ymm0
+; XOPAVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
+; XOPAVX2-NEXT: retq
%shift = shl <32 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
ret <32 x i8> %shift
}
OpenPOWER on IntegriCloud