summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Dunbar <daniel@zuster.org>2010-04-22 15:22:33 +0000
committerDaniel Dunbar <daniel@zuster.org>2010-04-22 15:22:33 +0000
commit5981377698591b640c9abf1e79cc11a803c43fe3 (patch)
tree38f35768b7aada5a21886fd768be90ab99269612
parent5d6c07e0e9f38b63484f9ae4cc47cc0414e78339 (diff)
downloadbcm5719-llvm-5981377698591b640c9abf1e79cc11a803c43fe3.tar.gz
bcm5719-llvm-5981377698591b640c9abf1e79cc11a803c43fe3.zip
IRgen: Fix another case where we generated an invalid access component when we
immediately narrowed the access size. Fix this (and previous case) by just choosing a better access size up-front. llvm-svn: 102068
-rw-r--r--clang/lib/CodeGen/CGRecordLayoutBuilder.cpp25
-rw-r--r--clang/test/CodeGen/bitfield-2.c21
2 files changed, 33 insertions, 13 deletions
diff --git a/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp b/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
index ee8ae5224fd..6302cf8d1fc 100644
--- a/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -191,7 +191,14 @@ static CGBitFieldInfo ComputeBitFieldInfo(CodeGenTypes &Types,
// Round down from the field offset to find the first access position that is
// at an aligned offset of the initial access type.
- uint64_t AccessStart = FieldOffset - (FieldOffset % TypeSizeInBits);
+ uint64_t AccessStart = FieldOffset - (FieldOffset % AccessWidth);
+
+ // Adjust initial access size to fit within record.
+ while (AccessWidth > 8 &&
+ AccessStart + AccessWidth > ContainingTypeSizeInBits) {
+ AccessWidth >>= 1;
+ AccessStart = FieldOffset - (FieldOffset % AccessWidth);
+ }
while (AccessedTargetBits < FieldSize) {
// Check that we can access using a type of this size, without reading off
@@ -210,20 +217,12 @@ static CGBitFieldInfo ComputeBitFieldInfo(CodeGenTypes &Types,
// target. We are reading bits [AccessStart, AccessStart + AccessWidth); the
// intersection with [FieldOffset, FieldOffset + FieldSize) gives the bits
// in the target that we are reading.
+ assert(FieldOffset < AccessStart + AccessWidth && "Invalid access start!");
+ assert(AccessStart < FieldOffset + FieldSize && "Invalid access start!");
uint64_t AccessBitsInFieldStart = std::max(AccessStart, FieldOffset);
uint64_t AccessBitsInFieldSize =
- std::min(AccessWidth - (AccessBitsInFieldStart - AccessStart),
- FieldSize - (AccessBitsInFieldStart-FieldOffset));
-
- // If we haven't accessed any target bits yet and narrowed the access size,
- // we might not have reached any target bits yet.
- //
- // FIXME: This test is unnecessarily once we choose the initial acccess size
- // more intelligently.
- if (!AccessedTargetBits && AccessBitsInFieldSize == 0) {
- AccessStart += AccessWidth;
- continue;
- }
+ std::min(AccessWidth + AccessStart,
+ FieldOffset + FieldSize) - AccessBitsInFieldStart;
assert(NumComponents < 3 && "Unexpected number of components!");
CGBitFieldInfo::AccessInfo &AI = Components[NumComponents++];
diff --git a/clang/test/CodeGen/bitfield-2.c b/clang/test/CodeGen/bitfield-2.c
index 121bd7cd4fd..e91859fb728 100644
--- a/clang/test/CodeGen/bitfield-2.c
+++ b/clang/test/CodeGen/bitfield-2.c
@@ -345,3 +345,24 @@ unsigned test_8() {
res ^= g8.f0 ^ g8.f2 ^ g8.f3;
return res;
}
+
+/***/
+
+// This is another case where we narrow the access width immediately.
+//
+// <rdar://problem/7893760>
+
+struct __attribute__((packed)) s9 {
+ unsigned f0 : 7;
+ unsigned f1 : 7;
+ unsigned f2 : 7;
+ unsigned f3 : 7;
+ unsigned f4 : 7;
+ unsigned f5 : 7;
+ unsigned f6 : 7;
+ unsigned f7 : 7;
+};
+
+int f9_load(struct s9 *a0) {
+ return a0->f7;
+}
OpenPOWER on IntegriCloud