diff options
| author | Simon Pilgrim <llvm-dev@redking.me.uk> | 2016-06-04 13:42:46 +0000 |
|---|---|---|
| committer | Simon Pilgrim <llvm-dev@redking.me.uk> | 2016-06-04 13:42:46 +0000 |
| commit | fda22d66fc531865fe8c4ebc2fafb39815c9f261 (patch) | |
| tree | dce934c6b99c1d1bb2c77ec685a68aed1ac30800 /llvm/test | |
| parent | fe9466fe2ce7ac9b50c09b3f3c2d14370147f4dc (diff) | |
| download | bcm5719-llvm-fda22d66fc531865fe8c4ebc2fafb39815c9f261.tar.gz bcm5719-llvm-fda22d66fc531865fe8c4ebc2fafb39815c9f261.zip | |
[InstCombine][MMX] Extend SimplifyDemandedUseBits MOVMSK support to MMX
Add the MMX implementation to the SimplifyDemandedUseBits SSE/AVX MOVMSK support added in D19614
Requires a minor tweak as llvm.x86.mmx.pmovmskb takes a x86_mmx argument - so we have to be explicit about the implied v8i8 vector type.
llvm-svn: 271789
Diffstat (limited to 'llvm/test')
| -rw-r--r-- | llvm/test/Transforms/InstCombine/x86-movmsk.ll | 20 |
1 files changed, 20 insertions, 0 deletions
diff --git a/llvm/test/Transforms/InstCombine/x86-movmsk.ll b/llvm/test/Transforms/InstCombine/x86-movmsk.ll index 767899432b0..3b644cba8a2 100644 --- a/llvm/test/Transforms/InstCombine/x86-movmsk.ll +++ b/llvm/test/Transforms/InstCombine/x86-movmsk.ll @@ -7,6 +7,16 @@ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" ; DemandedBits - MOVMSK zeros the upper bits of the result. ; +define i32 @test_upper_x86_mmx_pmovmskb(x86_mmx %a0) { +; CHECK-LABEL: @test_upper_x86_mmx_pmovmskb( +; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.x86.mmx.pmovmskb(x86_mmx %a0) +; CHECK-NEXT: ret i32 [[TMP1]] +; + %1 = call i32 @llvm.x86.mmx.pmovmskb(x86_mmx %a0) + %2 = and i32 %1, 255 + ret i32 %2 +} + define i32 @test_upper_x86_sse_movmsk_ps(<4 x float> %a0) { ; CHECK-LABEL: @test_upper_x86_sse_movmsk_ps( ; CHECK-NEXT: [[TMP1:%.*]] = call i32 @llvm.x86.sse.movmsk.ps(<4 x float> %a0) @@ -63,6 +73,15 @@ define i32 @test_upper_x86_avx_movmsk_pd_256(<4 x double> %a0) { ; DemandedBits - If we don't use the lower bits then we just return zero. ; +define i32 @test_lower_x86_mmx_pmovmskb(x86_mmx %a0) { +; CHECK-LABEL: @test_lower_x86_mmx_pmovmskb( +; CHECK-NEXT: ret i32 0 +; + %1 = call i32 @llvm.x86.mmx.pmovmskb(x86_mmx %a0) + %2 = and i32 %1, -256 + ret i32 %2 +} + define i32 @test_lower_x86_sse_movmsk_ps(<4 x float> %a0) { ; CHECK-LABEL: @test_lower_x86_sse_movmsk_ps( ; CHECK-NEXT: ret i32 0 @@ -110,6 +129,7 @@ define i32 @test_lower_x86_avx_movmsk_pd_256(<4 x double> %a0) { ; llvm.x86.avx2.pmovmskb uses the whole of the 32-bit register. +declare i32 @llvm.x86.mmx.pmovmskb(x86_mmx) declare i32 @llvm.x86.sse.movmsk.ps(<4 x float>) declare i32 @llvm.x86.sse2.movmsk.pd(<2 x double>) |

