summaryrefslogtreecommitdiffstats
path: root/llvm
diff options
context:
space:
mode:
Diffstat (limited to 'llvm')
-rw-r--r--llvm/lib/Transforms/Utils/VNCoercion.cpp4
-rw-r--r--llvm/test/Transforms/GVN/non-integral-pointers.ll31
2 files changed, 33 insertions, 2 deletions
diff --git a/llvm/lib/Transforms/Utils/VNCoercion.cpp b/llvm/lib/Transforms/Utils/VNCoercion.cpp
index 74572eb2d46..8e21472f2c1 100644
--- a/llvm/lib/Transforms/Utils/VNCoercion.cpp
+++ b/llvm/lib/Transforms/Utils/VNCoercion.cpp
@@ -31,8 +31,8 @@ bool canCoerceMustAliasedValueToLoad(Value *StoredVal, Type *LoadTy,
return false;
// Don't coerce non-integral pointers to integers or vice versa.
- if (DL.isNonIntegralPointerType(StoredVal->getType()) !=
- DL.isNonIntegralPointerType(LoadTy)) {
+ if (DL.isNonIntegralPointerType(StoredVal->getType()->getScalarType()) !=
+ DL.isNonIntegralPointerType(LoadTy->getScalarType())) {
// As a special case, allow coercion of memset used to initialize
// an array w/null. Despite non-integral pointers not generally having a
// specific bit pattern, we do assume null is zero.
diff --git a/llvm/test/Transforms/GVN/non-integral-pointers.ll b/llvm/test/Transforms/GVN/non-integral-pointers.ll
index c222a8954bd..c5a281e1423 100644
--- a/llvm/test/Transforms/GVN/non-integral-pointers.ll
+++ b/llvm/test/Transforms/GVN/non-integral-pointers.ll
@@ -77,6 +77,22 @@ define i8 addrspace(4)* @neg_forward_memset(i8 addrspace(4)* addrspace(4)* %loc)
ret i8 addrspace(4)* %ref
}
+define <1 x i8 addrspace(4)*> @neg_forward_memset_vload(<1 x i8 addrspace(4)*> addrspace(4)* %loc) {
+; CHECK-LABEL: @neg_forward_memset_vload(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast <1 x i8 addrspace(4)*> addrspace(4)* [[LOC:%.*]] to i8 addrspace(4)*
+; CHECK-NEXT: call void @llvm.memset.p4i8.i64(i8 addrspace(4)* align 4 [[LOC_BC]], i8 7, i64 8, i1 false)
+; CHECK-NEXT: [[REF:%.*]] = load <1 x i8 addrspace(4)*>, <1 x i8 addrspace(4)*> addrspace(4)* [[LOC]]
+; CHECK-NEXT: ret <1 x i8 addrspace(4)*> [[REF]]
+;
+ entry:
+ %loc.bc = bitcast <1 x i8 addrspace(4)*> addrspace(4)* %loc to i8 addrspace(4)*
+ call void @llvm.memset.p4i8.i64(i8 addrspace(4)* align 4 %loc.bc, i8 7, i64 8, i1 false)
+ %ref = load <1 x i8 addrspace(4)*>, <1 x i8 addrspace(4)*> addrspace(4)* %loc
+ ret <1 x i8 addrspace(4)*> %ref
+}
+
+
; Can forward since we can do so w/o breaking types
define i8 addrspace(4)* @forward_memset_zero(i8 addrspace(4)* addrspace(4)* %loc) {
; CHECK-LABEL: @forward_memset_zero(
@@ -108,6 +124,21 @@ define i8 addrspace(4)* @neg_forward_store(i8 addrspace(4)* addrspace(4)* %loc)
ret i8 addrspace(4)* %ref
}
+define <1 x i8 addrspace(4)*> @neg_forward_store_vload(<1 x i8 addrspace(4)*> addrspace(4)* %loc) {
+; CHECK-LABEL: @neg_forward_store_vload(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[LOC_BC:%.*]] = bitcast <1 x i8 addrspace(4)*> addrspace(4)* [[LOC:%.*]] to i64 addrspace(4)*
+; CHECK-NEXT: store i64 5, i64 addrspace(4)* [[LOC_BC]]
+; CHECK-NEXT: [[REF:%.*]] = load <1 x i8 addrspace(4)*>, <1 x i8 addrspace(4)*> addrspace(4)* [[LOC]]
+; CHECK-NEXT: ret <1 x i8 addrspace(4)*> [[REF]]
+;
+ entry:
+ %loc.bc = bitcast <1 x i8 addrspace(4)*> addrspace(4)* %loc to i64 addrspace(4)*
+ store i64 5, i64 addrspace(4)* %loc.bc
+ %ref = load <1 x i8 addrspace(4)*>, <1 x i8 addrspace(4)*> addrspace(4)* %loc
+ ret <1 x i8 addrspace(4)*> %ref
+}
+
; TODO: missed optimization, we can forward the null.
define i8 addrspace(4)* @forward_store_zero(i8 addrspace(4)* addrspace(4)* %loc) {
; CHECK-LABEL: @forward_store_zero(
OpenPOWER on IntegriCloud