summaryrefslogtreecommitdiffstats
path: root/clang/test/CodeGen/avx-builtins.c
diff options
context:
space:
mode:
authorCraig Topper <craig.topper@gmail.com>2016-05-30 17:10:30 +0000
committerCraig Topper <craig.topper@gmail.com>2016-05-30 17:10:30 +0000
commit09175dab317616cde85f043fb8d89ba2f36a8e2f (patch)
treeaba040b35b30dfc789bbcfca31f0d65894463f52 /clang/test/CodeGen/avx-builtins.c
parent83817327fcc06878a277c2b98aa2ee01ed263492 (diff)
downloadbcm5719-llvm-09175dab317616cde85f043fb8d89ba2f36a8e2f.tar.gz
bcm5719-llvm-09175dab317616cde85f043fb8d89ba2f36a8e2f.zip
[X86] Replace unaligned store builtins in SSE/AVX intrinsic files with code that will compile to a native unaligned store. Remove the builtins since they are no longer used.
Intrinsics will be removed from llvm in a future commit. llvm-svn: 271214
Diffstat (limited to 'clang/test/CodeGen/avx-builtins.c')
-rw-r--r--clang/test/CodeGen/avx-builtins.c21
1 files changed, 12 insertions, 9 deletions
diff --git a/clang/test/CodeGen/avx-builtins.c b/clang/test/CodeGen/avx-builtins.c
index 9558421191f..756ea3634b3 100644
--- a/clang/test/CodeGen/avx-builtins.c
+++ b/clang/test/CodeGen/avx-builtins.c
@@ -1154,46 +1154,49 @@ void test_mm256_store_si256(__m256i* A, __m256i B) {
void test_mm256_storeu_pd(double* A, __m256d B) {
// CHECK-LABEL: test_mm256_storeu_pd
- // CHECK: call void @llvm.x86.avx.storeu.pd.256(i8* %{{.*}}, <4 x double> %{{.*}})
+ // CHECK: store <4 x double> %{{.*}}, <4 x double>* %{{.*}}, align 1{{$}}
+ // CHECK-NEXT: ret void
_mm256_storeu_pd(A, B);
}
void test_mm256_storeu_ps(float* A, __m256 B) {
// CHECK-LABEL: test_mm256_storeu_ps
- // CHECK: call void @llvm.x86.avx.storeu.ps.256(i8* %{{.*}}, <8 x float> %{{.*}})
+ // CHECK: store <8 x float> %{{.*}}, <8 x float>* %{{.*}}, align 1{{$}}
+ // CHECk-NEXT: ret void
_mm256_storeu_ps(A, B);
}
void test_mm256_storeu_si256(__m256i* A, __m256i B) {
// CHECK-LABEL: test_mm256_storeu_si256
- // CHECK: call void @llvm.x86.avx.storeu.dq.256(i8* %{{.*}}, <32 x i8> %{{.*}})
+ // CHECK: store <4 x i64> %{{.*}}, <4 x i64>* %{{.*}}, align 1{{$}}
+ // CHECk-NEXT: ret void
_mm256_storeu_si256(A, B);
}
void test_mm256_storeu2_m128(float* A, float* B, __m256 C) {
// CHECK-LABEL: test_mm256_storeu2_m128
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- // CHECK: call void @llvm.x86.sse.storeu.ps(i8* %{{.*}}, <4 x float> %{{.*}})
+ // CHECK: store <4 x float> %{{.*}}, <4 x float>* %{{.*}}, align 1{{$}}
// CHECK: shufflevector <8 x float> %{{.*}}, <8 x float> %{{.*}}, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
- // CHECK: call void @llvm.x86.sse.storeu.ps(i8* %{{.*}}, <4 x float> %{{.*}})
+ // CHECK: store <4 x float> %{{.*}}, <4 x float>* %{{.*}}, align 1{{$}}
_mm256_storeu2_m128(A, B, C);
}
void test_mm256_storeu2_m128d(double* A, double* B, __m256d C) {
// CHECK-LABEL: test_mm256_storeu2_m128d
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <2 x i32> <i32 0, i32 1>
- // CHECK: call void @llvm.x86.sse2.storeu.pd(i8* %{{.*}}, <2 x double> %{{.*}})
+ // CHECK: store <2 x double> %{{.*}}, <2 x double>* %{{.*}}, align 1{{$}}
// CHECK: shufflevector <4 x double> %{{.*}}, <4 x double> %{{.*}}, <2 x i32> <i32 2, i32 3>
- // CHECK: call void @llvm.x86.sse2.storeu.pd(i8* %{{.*}}, <2 x double> %{{.*}})
+ // CHECK: store <2 x double> %{{.*}}, <2 x double>* %{{.*}}, align 1{{$}}
_mm256_storeu2_m128d(A, B, C);
}
void test_mm256_storeu2_m128i(__m128i* A, __m128i* B, __m256i C) {
// CHECK-LABEL: test_mm256_storeu2_m128i
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <2 x i32> <i32 0, i32 1>
- // CHECK: call void @llvm.x86.sse2.storeu.dq(i8* %{{.*}}, <16 x i8> %{{.*}})
+ // CHECK: store <2 x i64> %{{.*}}, <2 x i64>* %{{.*}}, align 1{{$}}
// CHECK: shufflevector <4 x i64> %{{.*}}, <4 x i64> %{{.*}}, <2 x i32> <i32 2, i32 3>
- // CHECK: call void @llvm.x86.sse2.storeu.dq(i8* %{{.*}}, <16 x i8> %{{.*}})
+ // CHECK: store <2 x i64> %{{.*}}, <2 x i64>* %{{.*}}, align 1{{$}}
_mm256_storeu2_m128i(A, B, C);
}
OpenPOWER on IntegriCloud