diff options
| author | Nadav Rotem <nrotem@apple.com> | 2013-01-20 08:35:56 +0000 |
|---|---|---|
| committer | Nadav Rotem <nrotem@apple.com> | 2013-01-20 08:35:56 +0000 |
| commit | 9450fcfff145cd20dc2bddad938d19dce1cc74eb (patch) | |
| tree | 67722a61fcff86be92dfd741d8aa70cd14983fc6 /llvm/test | |
| parent | 87a736ca9fa0007d5590b111011b0b40bd2042dd (diff) | |
| download | bcm5719-llvm-9450fcfff145cd20dc2bddad938d19dce1cc74eb.tar.gz bcm5719-llvm-9450fcfff145cd20dc2bddad938d19dce1cc74eb.zip | |
Revert 172708.
The optimization handles esoteric cases but adds a lot of complexity both to the X86 backend and to other backends.
This optimization disables an important canonicalization of chains of SEXT nodes and makes SEXT and ZEXT asymmetrical.
Disabling the canonicalization of consecutive SEXT nodes into a single node disables other DAG optimizations that assume
that there is only one SEXT node. The AVX mask optimizations is one example. Additionally this optimization does not update the cost model.
llvm-svn: 172968
Diffstat (limited to 'llvm/test')
| -rwxr-xr-x | llvm/test/CodeGen/X86/avx-sext.ll | 56 | ||||
| -rwxr-xr-x | llvm/test/CodeGen/X86/avx2-conversions.ll | 12 |
2 files changed, 0 insertions, 68 deletions
diff --git a/llvm/test/CodeGen/X86/avx-sext.ll b/llvm/test/CodeGen/X86/avx-sext.ll index adee9bbe247..8d7d79db7de 100755 --- a/llvm/test/CodeGen/X86/avx-sext.ll +++ b/llvm/test/CodeGen/X86/avx-sext.ll @@ -142,59 +142,3 @@ define <8 x i16> @load_sext_test6(<8 x i8> *%ptr) { %Y = sext <8 x i8> %X to <8 x i16> ret <8 x i16>%Y } -; AVX: sext_1 -; AVX: vpmovsxbd -; AVX: vpmovsxdq -; AVX: vpmovsxdq -; AVX: ret -define void @sext_1(<4 x i8>* %inbuf, <4 x i64>* %outbuf) { - %v0 = load <4 x i8>* %inbuf - %r = sext <4 x i8> %v0 to <4 x i64> - store <4 x i64> %r, <4 x i64>* %outbuf - ret void -} - -; AVX: sext_2 -; AVX: vpmovsxbd -; AVX: ret -define void @sext_2(<4 x i8>* %inbuf, <4 x i32>* %outbuf) { - %v0 = load <4 x i8>* %inbuf - %r = sext <4 x i8> %v0 to <4 x i32> - store <4 x i32> %r, <4 x i32>* %outbuf - ret void -} - -; AVX: sext_3 -; AVX: vpmovsxwd -; AVX: ret -define void @sext_3(<4 x i16>* %inbuf, <4 x i32>* %outbuf) { - %v0 = load <4 x i16>* %inbuf - %r = sext <4 x i16> %v0 to <4 x i32> - store <4 x i32> %r, <4 x i32>* %outbuf - ret void -} - -; AVX: sext_4 -; AVX: vpmovsxwd -; AVX: vpmovsxdq -; AVX: vpmovsxdq -; AVX: ret -define void @sext_4(<4 x i16>* %inbuf, <4 x i64>* %outbuf) { - %v0 = load <4 x i16>* %inbuf - %r = sext <4 x i16> %v0 to <4 x i64> - store <4 x i64> %r, <4 x i64>* %outbuf - ret void -} - -; AVX: sext_6 -; AVX: vpmovsxbw -; AVX: vpmovsxwd -; AVX: vpmovsxwd -; AVX: ret -define void @sext_6(<8 x i8>* %inbuf, <8 x i32>* %outbuf) { - %v0 = load <8 x i8>* %inbuf - %r = sext <8 x i8> %v0 to <8 x i32> - store <8 x i32> %r, <8 x i32>* %outbuf - ret void -} - diff --git a/llvm/test/CodeGen/X86/avx2-conversions.ll b/llvm/test/CodeGen/X86/avx2-conversions.ll index 17bd10a76e7..3ce08dcc737 100755 --- a/llvm/test/CodeGen/X86/avx2-conversions.ll +++ b/llvm/test/CodeGen/X86/avx2-conversions.ll @@ -107,15 +107,3 @@ define <8 x i32> @load_sext_test5(<8 x i8> *%ptr) { %Y = sext <8 x i8> %X to <8 x i32> ret <8 x i32>%Y } - -; CHECK: load_sext_test6 -; CHECK: vpmovsxbd (%r{{[^,]*}}), %ymm{{.*}} -; CHECK: vpmovsxdq -; CHECK: vpmovsxdq -; CHECK: ret -define <8 x i64> @load_sext_test6(<8 x i8> *%ptr) { - %X = load <8 x i8>* %ptr - %Y = sext <8 x i8> %X to <8 x i64> - ret <8 x i64>%Y -} - |

