summaryrefslogtreecommitdiffstats
path: root/compiler-rt/lib
diff options
context:
space:
mode:
authorKostya Serebryany <kcc@google.com>2013-11-23 12:49:19 +0000
committerKostya Serebryany <kcc@google.com>2013-11-23 12:49:19 +0000
commita2fde9484f98686d39f54618cf40b63dde92acf7 (patch)
tree91559e7d9a2eb1b168ab0fe5eb0b7bf3ed2d5bee /compiler-rt/lib
parent364b59e7f2d2ad0225d5afea042df36f5a4f4d00 (diff)
downloadbcm5719-llvm-a2fde9484f98686d39f54618cf40b63dde92acf7.tar.gz
bcm5719-llvm-a2fde9484f98686d39f54618cf40b63dde92acf7.zip
[sanitizer] use 16-byte aligned bzero in performance critical place (mostly for lsan)
llvm-svn: 195549
Diffstat (limited to 'compiler-rt/lib')
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_allocator.h2
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_libc.cc15
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_libc.h2
3 files changed, 18 insertions, 1 deletions
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h
index 6075cfe92aa..81e40ed1516 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator.h
@@ -1191,7 +1191,7 @@ class CombinedAllocator {
if (alignment > 8)
CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
if (cleared && res)
- internal_memset(res, 0, size);
+ internal_bzero_aligned16(res, RoundUpTo(size, 16));
return res;
}
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_libc.cc b/compiler-rt/lib/sanitizer_common/sanitizer_libc.cc
index 72ddf0fd3fb..c93c9cfbc8e 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_libc.cc
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_libc.cc
@@ -16,6 +16,11 @@
namespace __sanitizer {
+// Make the compiler think that something is going on there.
+static inline void break_optimization(void *arg) {
+ __asm__ __volatile__("" : : "r" (arg) : "memory");
+}
+
s64 internal_atoll(const char *nptr) {
return internal_simple_strtoll(nptr, (char**)0, 10);
}
@@ -62,6 +67,16 @@ void *internal_memmove(void *dest, const void *src, uptr n) {
return dest;
}
+// Semi-fast bzero for 16-aligned data. Still far from peak performance.
+void internal_bzero_aligned16(void *s, uptr n) {
+ struct S16 { u64 a, b; } ALIGNED(16);
+ CHECK_EQ((reinterpret_cast<uptr>(s) | n) & 15, 0);
+ for (S16 *p = reinterpret_cast<S16*>(s), *end = p + n / 16; p < end; p++) {
+ p->a = p->b = 0;
+ break_optimization(0); // Make sure this does not become memset.
+ }
+}
+
void *internal_memset(void* s, int c, uptr n) {
// The next line prevents Clang from making a call to memset() instead of the
// loop below.
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_libc.h b/compiler-rt/lib/sanitizer_common/sanitizer_libc.h
index 187a714a3b2..9a1b4b53935 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_libc.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_libc.h
@@ -29,6 +29,8 @@ void *internal_memchr(const void *s, int c, uptr n);
int internal_memcmp(const void* s1, const void* s2, uptr n);
void *internal_memcpy(void *dest, const void *src, uptr n);
void *internal_memmove(void *dest, const void *src, uptr n);
+// Set [s, s + n) to 0. Both s and n should be 16-aligned.
+void internal_bzero_aligned16(void *s, uptr n);
// Should not be used in performance-critical places.
void *internal_memset(void *s, int c, uptr n);
char* internal_strchr(const char *s, int c);
OpenPOWER on IntegriCloud