summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKostya Serebryany <kcc@google.com>2013-03-29 09:44:16 +0000
committerKostya Serebryany <kcc@google.com>2013-03-29 09:44:16 +0000
commit91952cda76f6efa17f583680d03ae465a39153b0 (patch)
tree44b113e95f0aa3e0992b3833c8208c1e3dfca587
parentb042555cd3d30be2549db4e4a0ec2cf2aaacc1ea (diff)
downloadbcm5719-llvm-91952cda76f6efa17f583680d03ae465a39153b0.tar.gz
bcm5719-llvm-91952cda76f6efa17f583680d03ae465a39153b0.zip
[tsan] restore performance critical inlining in tsan: remove static from ALWAYS_INLINE, use ALWAYS_INLINE USED for critical functions.
llvm-svn: 178341
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_internal_defs.h2
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_rtl.cc8
2 files changed, 5 insertions, 5 deletions
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_internal_defs.h b/compiler-rt/lib/sanitizer_common/sanitizer_internal_defs.h
index e5a7fb7abca..8c0e601fcf3 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_internal_defs.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_internal_defs.h
@@ -130,7 +130,7 @@ using namespace __sanitizer; // NOLINT
# define USED
# define PREFETCH(x) /* _mm_prefetch(x, _MM_HINT_NTA) */
#else // _MSC_VER
-# define ALWAYS_INLINE static inline __attribute__((always_inline))
+# define ALWAYS_INLINE inline __attribute__((always_inline))
# define ALIAS(x) __attribute__((alias(x)))
# define ALIGNED(x) __attribute__((aligned(x)))
# define FORMAT(f, a) __attribute__((format(printf, f, a)))
diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.cc b/compiler-rt/lib/tsan/rtl/tsan_rtl.cc
index 36868740234..9209cd70f6a 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl.cc
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.cc
@@ -385,7 +385,7 @@ static inline bool HappensBefore(Shadow old, ThreadState *thr) {
return thr->clock.get(old.TidWithIgnore()) >= old.epoch();
}
-// FIXME: should be ALWAYS_INLINE for performance reasons?
+ALWAYS_INLINE USED
void MemoryAccessImpl(ThreadState *thr, uptr addr,
int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
u64 *shadow_mem, Shadow cur) {
@@ -459,7 +459,7 @@ void MemoryAccessImpl(ThreadState *thr, uptr addr,
return;
}
-// FIXME: should be ALWAYS_INLINE for performance reasons?
+ALWAYS_INLINE USED
void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) {
u64 *shadow_mem = (u64*)MemToShadow(addr);
@@ -597,7 +597,7 @@ void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) {
MemoryRangeSet(thr, pc, addr, size, s.raw());
}
-// FIXME: should be ALWAYS_INLINE for performance reasons?
+ALWAYS_INLINE USED
void FuncEntry(ThreadState *thr, uptr pc) {
DCHECK_EQ(thr->in_rtl, 0);
StatInc(thr, StatFuncEnter);
@@ -627,7 +627,7 @@ void FuncEntry(ThreadState *thr, uptr pc) {
thr->shadow_stack_pos++;
}
-// FIXME: should be ALWAYS_INLINE for performance reasons?
+ALWAYS_INLINE USED
void FuncExit(ThreadState *thr) {
DCHECK_EQ(thr->in_rtl, 0);
StatInc(thr, StatFuncExit);
OpenPOWER on IntegriCloud