diff options
| author | Eli Friedman <eli.friedman@gmail.com> | 2011-11-11 03:16:38 +0000 |
|---|---|---|
| committer | Eli Friedman <eli.friedman@gmail.com> | 2011-11-11 03:16:38 +0000 |
| commit | c4a001478c813b8f6f14d781f325da2ef4f689fd (patch) | |
| tree | 4b5b8290c1c75c38812cf84abe44189cee010fa6 /llvm | |
| parent | 0a917b7ad46ba4a5be4931c0de4b7464a4d49efa (diff) | |
| download | bcm5719-llvm-c4a001478c813b8f6f14d781f325da2ef4f689fd.tar.gz bcm5719-llvm-c4a001478c813b8f6f14d781f325da2ef4f689fd.zip | |
Make sure to expand SIGN_EXTEND_INREG for NEON vectors. PR11319, round 3.
llvm-svn: 144361
Diffstat (limited to 'llvm')
| -rw-r--r-- | llvm/lib/Target/ARM/ARMISelLowering.cpp | 1 | ||||
| -rw-r--r-- | llvm/test/CodeGen/ARM/2011-11-07-PromoteVectorLoadStore.ll | 9 |
2 files changed, 10 insertions, 0 deletions
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp index 6f2b3b83bc3..b55ef700f5c 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -127,6 +127,7 @@ void ARMTargetLowering::addTypeForNEON(EVT VT, EVT PromotedLdStVT, setOperationAction(ISD::EXTRACT_SUBVECTOR, VT.getSimpleVT(), Legal); setOperationAction(ISD::SELECT, VT.getSimpleVT(), Expand); setOperationAction(ISD::SELECT_CC, VT.getSimpleVT(), Expand); + setOperationAction(ISD::SIGN_EXTEND_INREG, VT.getSimpleVT(), Expand); if (VT.isInteger()) { setOperationAction(ISD::SHL, VT.getSimpleVT(), Custom); setOperationAction(ISD::SRA, VT.getSimpleVT(), Custom); diff --git a/llvm/test/CodeGen/ARM/2011-11-07-PromoteVectorLoadStore.ll b/llvm/test/CodeGen/ARM/2011-11-07-PromoteVectorLoadStore.ll index af43671791a..113cbfe3962 100644 --- a/llvm/test/CodeGen/ARM/2011-11-07-PromoteVectorLoadStore.ll +++ b/llvm/test/CodeGen/ARM/2011-11-07-PromoteVectorLoadStore.ll @@ -13,3 +13,12 @@ define void @test_neon_vector_add_2xi8() nounwind { store <2 x i8> %3, <2 x i8>* @i8_res ret void } + +define void @test_neon_ld_st_volatile_with_ashr_2xi8() { +; CHECK: test_neon_ld_st_volatile_with_ashr_2xi8: + %1 = load volatile <2 x i8>* @i8_src1 + %2 = load volatile <2 x i8>* @i8_src2 + %3 = ashr <2 x i8> %1, %2 + store volatile <2 x i8> %3, <2 x i8>* @i8_res + ret void +} |

