diff options
| author | Tim Northover <tnorthover@apple.com> | 2015-10-26 21:32:53 +0000 |
|---|---|---|
| committer | Tim Northover <tnorthover@apple.com> | 2015-10-26 21:32:53 +0000 |
| commit | 939f089242cd22a0192fd30ed4216f66e77b3cf6 (patch) | |
| tree | b3e6dd3f0d472a1ebe89b825d0812d6cc585f4c6 /llvm/test | |
| parent | 2a81c0c43a83dd530fd886048a3ed68c0d5f983d (diff) | |
| download | bcm5719-llvm-939f089242cd22a0192fd30ed4216f66e77b3cf6.tar.gz bcm5719-llvm-939f089242cd22a0192fd30ed4216f66e77b3cf6.zip | |
ARM: make sure VFP loads and stores are properly aligned.
Both VLDRS and VLDRD fault if the memory is not 4 byte aligned, which wasn't
really being checked before, leading to faults at runtime.
llvm-svn: 251352
Diffstat (limited to 'llvm/test')
| -rw-r--r-- | llvm/test/CodeGen/ARM/unaligned_load_store_vfp.ll | 98 |
1 files changed, 98 insertions, 0 deletions
diff --git a/llvm/test/CodeGen/ARM/unaligned_load_store_vfp.ll b/llvm/test/CodeGen/ARM/unaligned_load_store_vfp.ll new file mode 100644 index 00000000000..90d17e19c28 --- /dev/null +++ b/llvm/test/CodeGen/ARM/unaligned_load_store_vfp.ll @@ -0,0 +1,98 @@ +; RUN: llc -mtriple=thumbv7-linux-gnueabihf %s -o - | FileCheck %s + +define float @test_load_s32_float(i32* %addr) { +; CHECK-LABEL: test_load_s32_float: +; CHECK: ldr [[TMP:r[0-9]+]], [r0] +; CHECK: vmov [[RES_INT:s[0-9]+]], [[TMP]] +; CHECK: vcvt.f32.s32 s0, [[RES_INT]] + + %val = load i32, i32* %addr, align 1 + %res = sitofp i32 %val to float + ret float %res +} + +define double @test_load_s32_double(i32* %addr) { +; CHECK-LABEL: test_load_s32_double: +; CHECK: ldr [[TMP:r[0-9]+]], [r0] +; CHECK: vmov [[RES_INT:s[0-9]+]], [[TMP]] +; CHECK: vcvt.f64.s32 d0, [[RES_INT]] + + %val = load i32, i32* %addr, align 1 + %res = sitofp i32 %val to double + ret double %res +} + +define float @test_load_u32_float(i32* %addr) { +; CHECK-LABEL: test_load_u32_float: +; CHECK: ldr [[TMP:r[0-9]+]], [r0] +; CHECK: vmov [[RES_INT:s[0-9]+]], [[TMP]] +; CHECK: vcvt.f32.u32 s0, [[RES_INT]] + + %val = load i32, i32* %addr, align 1 + %res = uitofp i32 %val to float + ret float %res +} + +define double @test_load_u32_double(i32* %addr) { +; CHECK-LABEL: test_load_u32_double: +; CHECK: ldr [[TMP:r[0-9]+]], [r0] +; CHECK: vmov [[RES_INT:s[0-9]+]], [[TMP]] +; CHECK: vcvt.f64.u32 d0, [[RES_INT]] + + %val = load i32, i32* %addr, align 1 + %res = uitofp i32 %val to double + ret double %res +} + +define void @test_store_f32(float %in, float* %addr) { +; CHECK-LABEL: test_store_f32: +; CHECK: vmov [[TMP:r[0-9]+]], s0 +; CHECK: str [[TMP]], [r0] + + store float %in, float* %addr, align 1 + ret void +} + +define void @test_store_float_s32(float %in, i32* %addr) { +; CHECK-LABEL: test_store_float_s32: +; CHECK: vcvt.s32.f32 [[TMP:s[0-9]+]], s0 +; CHECK: vmov [[TMP_INT:r[0-9]+]], [[TMP]] +; CHECK: str [[TMP_INT]], [r0] + + %val = fptosi float %in to i32 + store i32 %val, i32* %addr, align 1 + ret void +} + +define void @test_store_double_s32(double %in, i32* %addr) { +; CHECK-LABEL: test_store_double_s32: +; CHECK: vcvt.s32.f64 [[TMP:s[0-9]+]], d0 +; CHECK: vmov [[TMP_INT:r[0-9]+]], [[TMP]] +; CHECK: str [[TMP_INT]], [r0] + + %val = fptosi double %in to i32 + store i32 %val, i32* %addr, align 1 + ret void +} + +define void @test_store_float_u32(float %in, i32* %addr) { +; CHECK-LABEL: test_store_float_u32: +; CHECK: vcvt.u32.f32 [[TMP:s[0-9]+]], s0 +; CHECK: vmov [[TMP_INT:r[0-9]+]], [[TMP]] +; CHECK: str [[TMP_INT]], [r0] + + %val = fptoui float %in to i32 + store i32 %val, i32* %addr, align 1 + ret void +} + +define void @test_store_double_u32(double %in, i32* %addr) { +; CHECK-LABEL: test_store_double_u32: +; CHECK: vcvt.u32.f64 [[TMP:s[0-9]+]], d0 +; CHECK: vmov [[TMP_INT:r[0-9]+]], [[TMP]] +; CHECK: str [[TMP_INT]], [r0] + + %val = fptoui double %in to i32 + store i32 %val, i32* %addr, align 1 + ret void +} |

