diff options
author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2016-04-30 07:32:19 +0000 |
---|---|---|
committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2016-04-30 07:32:19 +0000 |
commit | 8e38a5439bd4aad10a382019efea4236d1bfb406 (patch) | |
tree | d2d40f230b0bb5a1d2939b1e1f55cc533d53a3a5 | |
parent | 640f9964c707bf0d63d1829b1d2f9f360c27eb8a (diff) | |
download | bcm5719-llvm-8e38a5439bd4aad10a382019efea4236d1bfb406.tar.gz bcm5719-llvm-8e38a5439bd4aad10a382019efea4236d1bfb406.zip |
[InstCombine][AVX] Split off VPERMILVAR tests and added additional tests for UNDEF mask elements
llvm-svn: 268159
-rw-r--r-- | llvm/test/Transforms/InstCombine/vec_demanded_elts.ll | 60 | ||||
-rw-r--r-- | llvm/test/Transforms/InstCombine/x86-avx.ll | 124 |
2 files changed, 124 insertions, 60 deletions
diff --git a/llvm/test/Transforms/InstCombine/vec_demanded_elts.ll b/llvm/test/Transforms/InstCombine/vec_demanded_elts.ll index 0b9663300c3..e744b59ec46 100644 --- a/llvm/test/Transforms/InstCombine/vec_demanded_elts.ll +++ b/llvm/test/Transforms/InstCombine/vec_demanded_elts.ll @@ -194,66 +194,6 @@ define <4 x float> @test_select(float %f, float %g) { ret <4 x float> %ret } -declare <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float>, <4 x i32>) -define <4 x float> @test_vpermilvar_ps(<4 x float> %v) { -; CHECK-LABEL: @test_vpermilvar_ps( -; CHECK: shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0> - %a = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %v, <4 x i32> <i32 3, i32 2, i32 1, i32 0>) - ret <4 x float> %a -} - -declare <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float>, <8 x i32>) -define <8 x float> @test_vpermilvar_ps_256(<8 x float> %v) { -; CHECK-LABEL: @test_vpermilvar_ps_256( -; CHECK: shufflevector <8 x float> %v, <8 x float> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4> - %a = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %v, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>) - ret <8 x float> %a -} - -declare <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double>, <2 x i64>) -define <2 x double> @test_vpermilvar_pd(<2 x double> %v) { -; CHECK-LABEL: @test_vpermilvar_pd( -; CHECK: shufflevector <2 x double> %v, <2 x double> undef, <2 x i32> <i32 1, i32 0> - %a = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %v, <2 x i64> <i64 2, i64 0>) - ret <2 x double> %a -} - -declare <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double>, <4 x i64>) -define <4 x double> @test_vpermilvar_pd_256(<4 x double> %v) { -; CHECK-LABEL: @test_vpermilvar_pd_256( -; CHECK: shufflevector <4 x double> %v, <4 x double> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2> - %a = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %v, <4 x i64> <i64 3, i64 1, i64 2, i64 0>) - ret <4 x double> %a -} - -define <4 x float> @test_vpermilvar_ps_zero(<4 x float> %v) { -; CHECK-LABEL: @test_vpermilvar_ps_zero( -; CHECK: shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> zeroinitializer - %a = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %v, <4 x i32> zeroinitializer) - ret <4 x float> %a -} - -define <8 x float> @test_vpermilvar_ps_256_zero(<8 x float> %v) { -; CHECK-LABEL: @test_vpermilvar_ps_256_zero( -; CHECK: shufflevector <8 x float> %v, <8 x float> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4> - %a = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %v, <8 x i32> zeroinitializer) - ret <8 x float> %a -} - -define <2 x double> @test_vpermilvar_pd_zero(<2 x double> %v) { -; CHECK-LABEL: @test_vpermilvar_pd_zero( -; CHECK: shufflevector <2 x double> %v, <2 x double> undef, <2 x i32> zeroinitializer - %a = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %v, <2 x i64> zeroinitializer) - ret <2 x double> %a -} - -define <4 x double> @test_vpermilvar_pd_256_zero(<4 x double> %v) { -; CHECK-LABEL: @test_vpermilvar_pd_256_zero( -; CHECK: shufflevector <4 x double> %v, <4 x double> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2> - %a = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %v, <4 x i64> zeroinitializer) - ret <4 x double> %a -} - define <2 x i64> @PR24922(<2 x i64> %v) { ; CHECK-LABEL: @PR24922 ; CHECK: select <2 x i1> diff --git a/llvm/test/Transforms/InstCombine/x86-avx.ll b/llvm/test/Transforms/InstCombine/x86-avx.ll new file mode 100644 index 00000000000..71cc2d316f7 --- /dev/null +++ b/llvm/test/Transforms/InstCombine/x86-avx.ll @@ -0,0 +1,124 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -instcombine -S | FileCheck %s +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" + +; Verify that instcombine is able to fold identity shuffles. + +define <4 x float> @identity_test_vpermilvar_ps(<4 x float> %v) { +; CHECK-LABEL: @identity_test_vpermilvar_ps( +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0> +; CHECK-NEXT: ret <4 x float> [[TMP1]] +; + %a = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %v, <4 x i32> <i32 3, i32 2, i32 1, i32 0>) + ret <4 x float> %a +} + +define <8 x float> @identity_test_vpermilvar_ps_256(<8 x float> %v) { +; CHECK-LABEL: @identity_test_vpermilvar_ps_256( +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x float> %v, <8 x float> undef, <8 x i32> <i32 3, i32 2, i32 1, i32 0, i32 7, i32 6, i32 5, i32 4> +; CHECK-NEXT: ret <8 x float> [[TMP1]] +; + %a = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %v, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>) + ret <8 x float> %a +} + +define <2 x double> @identity_test_vpermilvar_pd(<2 x double> %v) { +; CHECK-LABEL: @identity_test_vpermilvar_pd( +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <2 x double> %v, <2 x double> undef, <2 x i32> <i32 1, i32 0> +; CHECK-NEXT: ret <2 x double> [[TMP1]] +; + %a = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %v, <2 x i64> <i64 2, i64 0>) + ret <2 x double> %a +} + +define <4 x double> @identity_test_vpermilvar_pd_256(<4 x double> %v) { +; CHECK-LABEL: @identity_test_vpermilvar_pd_256( +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x double> %v, <4 x double> undef, <4 x i32> <i32 1, i32 0, i32 3, i32 2> +; CHECK-NEXT: ret <4 x double> [[TMP1]] +; + %a = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %v, <4 x i64> <i64 3, i64 1, i64 2, i64 0>) + ret <4 x double> %a +} + +; Instcombine should be able to fold the following byte shuffle to a builtin shufflevector +; with a shuffle mask of all zeroes. + +define <4 x float> @zero_test_vpermilvar_ps_zero(<4 x float> %v) { +; CHECK-LABEL: @zero_test_vpermilvar_ps_zero( +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x float> %v, <4 x float> undef, <4 x i32> zeroinitializer +; CHECK-NEXT: ret <4 x float> [[TMP1]] +; + %a = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %v, <4 x i32> zeroinitializer) + ret <4 x float> %a +} + +define <8 x float> @zero_test_vpermilvar_ps_256_zero(<8 x float> %v) { +; CHECK-LABEL: @zero_test_vpermilvar_ps_256_zero( +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x float> %v, <8 x float> undef, <8 x i32> <i32 0, i32 0, i32 0, i32 0, i32 4, i32 4, i32 4, i32 4> +; CHECK-NEXT: ret <8 x float> [[TMP1]] +; + %a = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %v, <8 x i32> zeroinitializer) + ret <8 x float> %a +} + +define <2 x double> @zero_test_vpermilvar_pd_zero(<2 x double> %v) { +; CHECK-LABEL: @zero_test_vpermilvar_pd_zero( +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <2 x double> %v, <2 x double> undef, <2 x i32> zeroinitializer +; CHECK-NEXT: ret <2 x double> [[TMP1]] +; + %a = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %v, <2 x i64> zeroinitializer) + ret <2 x double> %a +} + +define <4 x double> @zero_test_vpermilvar_pd_256_zero(<4 x double> %v) { +; CHECK-LABEL: @zero_test_vpermilvar_pd_256_zero( +; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x double> %v, <4 x double> undef, <4 x i32> <i32 0, i32 0, i32 2, i32 2> +; CHECK-NEXT: ret <4 x double> [[TMP1]] +; + %a = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %v, <4 x i64> zeroinitializer) + ret <4 x double> %a +} + +; FIXME: Verify that instcombine is able to fold constant byte shuffles with undef mask elements. + +define <4 x float> @undef_test_vpermilvar_ps(<4 x float> %v) { +; CHECK-LABEL: @undef_test_vpermilvar_ps( +; CHECK-NEXT: [[A:%.*]] = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %v, <4 x i32> <i32 undef, i32 2, i32 1, i32 undef>) +; CHECK-NEXT: ret <4 x float> [[A]] +; + %a = tail call <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float> %v, <4 x i32> <i32 undef, i32 2, i32 1, i32 undef>) + ret <4 x float> %a +} + +define <8 x float> @undef_test_vpermilvar_ps_256(<8 x float> %v) { +; CHECK-LABEL: @undef_test_vpermilvar_ps_256( +; CHECK-NEXT: [[A:%.*]] = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %v, <8 x i32> <i32 undef, i32 6, i32 5, i32 undef, i32 3, i32 2, i32 1, i32 0>) +; CHECK-NEXT: ret <8 x float> [[A]] +; + %a = tail call <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float> %v, <8 x i32> <i32 undef, i32 6, i32 5, i32 undef, i32 3, i32 2, i32 1, i32 0>) + ret <8 x float> %a +} + +define <2 x double> @undef_test_vpermilvar_pd(<2 x double> %v) { +; CHECK-LABEL: @undef_test_vpermilvar_pd( +; CHECK-NEXT: [[A:%.*]] = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %v, <2 x i64> <i64 undef, i64 0>) +; CHECK-NEXT: ret <2 x double> [[A]] +; + %a = tail call <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double> %v, <2 x i64> <i64 undef, i64 0>) + ret <2 x double> %a +} + +define <4 x double> @undef_test_vpermilvar_pd_256(<4 x double> %v) { +; CHECK-LABEL: @undef_test_vpermilvar_pd_256( +; CHECK-NEXT: [[A:%.*]] = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %v, <4 x i64> <i64 undef, i64 1, i64 2, i64 undef>) +; CHECK-NEXT: ret <4 x double> [[A]] +; + %a = tail call <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double> %v, <4 x i64> <i64 undef, i64 1, i64 2, i64 undef>) + ret <4 x double> %a +} + +declare <2 x double> @llvm.x86.avx.vpermilvar.pd(<2 x double>, <2 x i64>) +declare <4 x double> @llvm.x86.avx.vpermilvar.pd.256(<4 x double>, <4 x i64>) + +declare <4 x float> @llvm.x86.avx.vpermilvar.ps(<4 x float>, <4 x i32>) +declare <8 x float> @llvm.x86.avx.vpermilvar.ps.256(<8 x float>, <8 x i32>) |