diff options
author | Matthew Simpson <mssimpso@codeaurora.org> | 2016-12-15 18:36:59 +0000 |
---|---|---|
committer | Matthew Simpson <mssimpso@codeaurora.org> | 2016-12-15 18:36:59 +0000 |
commit | 2c8de192a1eab0f60c3c3ccc3652e22cba275a82 (patch) | |
tree | 4270af75ada1c147a736e2e6221687360da1266b /llvm/test | |
parent | 2a26a5f1f0694c07a3d6f4dcb312476c912d1ea9 (diff) | |
download | bcm5719-llvm-2c8de192a1eab0f60c3c3ccc3652e22cba275a82.tar.gz bcm5719-llvm-2c8de192a1eab0f60c3c3ccc3652e22cba275a82.zip |
[AArch64] Guard Misaligned 128-bit store penalty by subtarget feature
This patch checks that the SlowMisaligned128Store subtarget feature is set
when penalizing such stores in getMemoryOpCost.
Differential Revision: https://reviews.llvm.org/D27677
llvm-svn: 289845
Diffstat (limited to 'llvm/test')
-rw-r--r-- | llvm/test/Analysis/CostModel/AArch64/store.ll | 18 |
1 files changed, 12 insertions, 6 deletions
diff --git a/llvm/test/Analysis/CostModel/AArch64/store.ll b/llvm/test/Analysis/CostModel/AArch64/store.ll index 307f8f8ee97..58750721cb9 100644 --- a/llvm/test/Analysis/CostModel/AArch64/store.ll +++ b/llvm/test/Analysis/CostModel/AArch64/store.ll @@ -1,10 +1,16 @@ -; RUN: opt < %s -cost-model -analyze -mtriple=arm64-apple-ios -mcpu=cyclone | FileCheck %s +; RUN: opt < %s -cost-model -analyze -mtriple=aarch64-apple-ios | FileCheck %s +; RUN: opt < %s -cost-model -analyze -mtriple=aarch64-apple-ios -mattr=slow-misaligned-128store | FileCheck %s --check-prefix=SLOW_MISALIGNED_128_STORE + target datalayout = "e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:32:64-v128:32:128-a0:0:32-n32-S32" -; CHECK-LABEL: store -define void @store() { - ; Stores of <2 x i64> should be expensive because we don't split them and - ; and unaligned 16b stores have bad performance. - ; CHECK: cost of 12 {{.*}} store +; CHECK-LABEL: getMemoryOpCost +; SLOW_MISALIGNED_128_STORE-LABEL: getMemoryOpCost +define void @getMemoryOpCost() { + ; If FeatureSlowMisaligned128Store is set, we penalize <2 x i64> stores. On + ; Cyclone, for example, such stores should be expensive because we don't + ; split them and misaligned 16b stores have bad performance. + ; + ; CHECK: cost of 1 {{.*}} store + ; SLOW_MISALIGNED_128_STORE: cost of 12 {{.*}} store store <2 x i64> undef, <2 x i64> * undef ; We scalarize the loads/stores because there is no vector register name for |