diff options
| author | Krzysztof Parzyszek <kparzysz@codeaurora.org> | 2018-02-14 20:46:06 +0000 |
|---|---|---|
| committer | Krzysztof Parzyszek <kparzysz@codeaurora.org> | 2018-02-14 20:46:06 +0000 |
| commit | ad83ce4cb45771aecd4ed916abe368787da55b72 (patch) | |
| tree | 104cd6bb2cd84d25a020d10c44f3815456f54816 /llvm/test/CodeGen/Hexagon/select-instr-align.ll | |
| parent | 668664889c790227fef496bffcbe6b21eb33007b (diff) | |
| download | bcm5719-llvm-ad83ce4cb45771aecd4ed916abe368787da55b72.tar.gz bcm5719-llvm-ad83ce4cb45771aecd4ed916abe368787da55b72.zip | |
[Hexagon] Split HVX vector pair loads/stores, expand unaligned loads
llvm-svn: 325169
Diffstat (limited to 'llvm/test/CodeGen/Hexagon/select-instr-align.ll')
| -rw-r--r-- | llvm/test/CodeGen/Hexagon/select-instr-align.ll | 26 |
1 files changed, 13 insertions, 13 deletions
diff --git a/llvm/test/CodeGen/Hexagon/select-instr-align.ll b/llvm/test/CodeGen/Hexagon/select-instr-align.ll index 368ee3c5726..9d8939282c6 100644 --- a/llvm/test/CodeGen/Hexagon/select-instr-align.ll +++ b/llvm/test/CodeGen/Hexagon/select-instr-align.ll @@ -1,31 +1,31 @@ -; RUN: llc -march=hexagon -mcpu=hexagonv60 -mattr=+hvxv60,hvx-length64b < %s | FileCheck %s +; RUN: llc -march=hexagon -hvx-expand-unaligned-loads=0 < %s | FileCheck %s + ; CHECK-LABEL: aligned_load: ; CHECK: = vmem({{.*}}) -; CHECK-LABEL: aligned_store: -; CHECK: vmem({{.*}}) = -; CHECK-LABEL: unaligned_load: -; CHECK: = vmemu({{.*}}) -; CHECK-LABEL: unaligned_store: -; CHECK: vmemu({{.*}}) = - -define <16 x i32> @aligned_load(<16 x i32>* %p, <16 x i32> %a) { +define <16 x i32> @aligned_load(<16 x i32>* %p, <16 x i32> %a) #0 { %v = load <16 x i32>, <16 x i32>* %p, align 64 ret <16 x i32> %v } -define void @aligned_store(<16 x i32>* %p, <16 x i32> %a) { +; CHECK-LABEL: aligned_store: +; CHECK: vmem({{.*}}) = +define void @aligned_store(<16 x i32>* %p, <16 x i32> %a) #0 { store <16 x i32> %a, <16 x i32>* %p, align 64 ret void } -define <16 x i32> @unaligned_load(<16 x i32>* %p, <16 x i32> %a) { +; CHECK-LABEL: unaligned_load: +; CHECK: = vmemu({{.*}}) +define <16 x i32> @unaligned_load(<16 x i32>* %p, <16 x i32> %a) #0 { %v = load <16 x i32>, <16 x i32>* %p, align 32 ret <16 x i32> %v } -define void @unaligned_store(<16 x i32>* %p, <16 x i32> %a) { +; CHECK-LABEL: unaligned_store: +; CHECK: vmemu({{.*}}) = +define void @unaligned_store(<16 x i32>* %p, <16 x i32> %a) #0 { store <16 x i32> %a, <16 x i32>* %p, align 32 ret void } - +attributes #0 = { nounwind "target-cpu"="hexagonv60" "target-features"="+hvxv60,+hvx-length64b" } |

