diff options
author | Bill Wendling <isanbard@gmail.com> | 2011-04-12 22:46:31 +0000 |
---|---|---|
committer | Bill Wendling <isanbard@gmail.com> | 2011-04-12 22:46:31 +0000 |
commit | 47c24875a1d1ce1a4b09c45d87723c229a7af3c9 (patch) | |
tree | 4200c20a52a48a9bd4aa5133eaa5fb43c4a552e6 /llvm/test/CodeGen | |
parent | de9d58569ffca4c370346f3aed398db5827a813e (diff) | |
download | bcm5719-llvm-47c24875a1d1ce1a4b09c45d87723c229a7af3c9.tar.gz bcm5719-llvm-47c24875a1d1ce1a4b09c45d87723c229a7af3c9.zip |
Remove the unaligned load intrinsics in favor of using native unaligned loads.
Now that we have a first-class way to represent unaligned loads, the unaligned
load intrinsics are superfluous.
First part of <rdar://problem/8460511>.
llvm-svn: 129401
Diffstat (limited to 'llvm/test/CodeGen')
-rw-r--r-- | llvm/test/CodeGen/X86/avx-intrinsics-x86.ll | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/llvm/test/CodeGen/X86/avx-intrinsics-x86.ll b/llvm/test/CodeGen/X86/avx-intrinsics-x86.ll index 6c32396a417..5201688686d 100644 --- a/llvm/test/CodeGen/X86/avx-intrinsics-x86.ll +++ b/llvm/test/CodeGen/X86/avx-intrinsics-x86.ll @@ -247,7 +247,7 @@ declare <2 x double> @llvm.x86.sse2.div.sd(<2 x double>, <2 x double>) nounwind define <16 x i8> @test_x86_sse2_loadu_dq(i8* %a0) { ; CHECK: movl - ; CHECK: vmovdqu + ; CHECK: vmovups %res = call <16 x i8> @llvm.x86.sse2.loadu.dq(i8* %a0) ; <<16 x i8>> [#uses=1] ret <16 x i8> %res } @@ -256,7 +256,7 @@ declare <16 x i8> @llvm.x86.sse2.loadu.dq(i8*) nounwind readonly define <2 x double> @test_x86_sse2_loadu_pd(i8* %a0) { ; CHECK: movl - ; CHECK: vmovupd + ; CHECK: vmovups %res = call <2 x double> @llvm.x86.sse2.loadu.pd(i8* %a0) ; <<2 x double>> [#uses=1] ret <2 x double> %res } |