summaryrefslogtreecommitdiffstats
path: root/compiler-rt/lib/sanitizer_common
diff options
context:
space:
mode:
Diffstat (limited to 'compiler-rt/lib/sanitizer_common')
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_common.h15
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.cc13
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h2
3 files changed, 29 insertions, 1 deletions
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common.h b/compiler-rt/lib/sanitizer_common/sanitizer_common.h
index 27cc3082537..0a98012b679 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_common.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_common.h
@@ -23,12 +23,25 @@ namespace __sanitizer {
// Constants.
const uptr kWordSize = __WORDSIZE / 8;
const uptr kWordSizeInBits = 8 * kWordSize;
+#if defined(__powerpc__) || defined(__powerpc64__)
+// Current PPC64 kernels use 64K pages sizes, but they can be
+// configured with 4K or even other sizes.
+// We may want to use getpagesize() or sysconf(_SC_PAGESIZE) here rather than
+// hardcoding the values, but today these values need to be compile-time
+// constants.
+const uptr kPageSizeBits = 16;
+const uptr kPageSize = 1UL << kPageSizeBits;
+const uptr kCacheLineSize = 128;
+const uptr kMmapGranularity = kPageSize;
+#elif !defined(_WIN32)
const uptr kPageSizeBits = 12;
const uptr kPageSize = 1UL << kPageSizeBits;
const uptr kCacheLineSize = 64;
-#ifndef _WIN32
const uptr kMmapGranularity = kPageSize;
#else
+const uptr kPageSizeBits = 12;
+const uptr kPageSize = 1UL << kPageSizeBits;
+const uptr kCacheLineSize = 64;
const uptr kMmapGranularity = 1UL << 16;
#endif
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.cc b/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.cc
index 28ee9600d0a..964c5241fac 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.cc
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.cc
@@ -33,7 +33,12 @@ static uptr patch_pc(uptr pc) {
// Cancel Thumb bit.
pc = pc & (~1);
#endif
+#if defined(__powerpc__) || defined(__powerpc64__)
+ // PCs are always 4 byte aligned.
+ return pc - 4;
+#else
return pc - 1;
+#endif
}
static void PrintStackFramePrefix(uptr frame_num, uptr pc) {
@@ -138,6 +143,14 @@ void StackTrace::FastUnwindStack(uptr pc, uptr bp,
}
}
+void StackTrace::PopStackFrames(uptr count) {
+ CHECK(size > count);
+ size -= count;
+ for (uptr i = 0; i < size; i++) {
+ trace[i] = trace[i + count];
+ }
+}
+
// On 32-bits we don't compress stack traces.
// On 64-bits we compress stack traces: if a given pc differes slightly from
// the previous one, we record a 31-bit offset instead of the full pc.
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h b/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h
index b823a7e0d85..fe2dcf06424 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_stacktrace.h
@@ -45,6 +45,8 @@ struct StackTrace {
void FastUnwindStack(uptr pc, uptr bp, uptr stack_top, uptr stack_bottom);
+ void PopStackFrames(uptr count);
+
static uptr GetCurrentPc();
static uptr CompressStack(StackTrace *stack,
OpenPOWER on IntegriCloud