diff options
author | Kostya Serebryany <kcc@google.com> | 2016-09-09 01:17:03 +0000 |
---|---|---|
committer | Kostya Serebryany <kcc@google.com> | 2016-09-09 01:17:03 +0000 |
commit | 5c04bd250e84ed67624896550b65236a9170efcc (patch) | |
tree | 6202d3fd522334b6eeb4a047585840c05ffc9a12 /llvm/lib/Fuzzer/FuzzerTraceState.cpp | |
parent | 2c3ea554985b1433db28a504c629d5d8c0eea185 (diff) | |
download | bcm5719-llvm-5c04bd250e84ed67624896550b65236a9170efcc.tar.gz bcm5719-llvm-5c04bd250e84ed67624896550b65236a9170efcc.zip |
[libFuzzer] remove use_traces=1 since use_value_profile seems to be strictly better
llvm-svn: 281007
Diffstat (limited to 'llvm/lib/Fuzzer/FuzzerTraceState.cpp')
-rw-r--r-- | llvm/lib/Fuzzer/FuzzerTraceState.cpp | 55 |
1 files changed, 9 insertions, 46 deletions
diff --git a/llvm/lib/Fuzzer/FuzzerTraceState.cpp b/llvm/lib/Fuzzer/FuzzerTraceState.cpp index 63dd5a6c649..77946ef9763 100644 --- a/llvm/lib/Fuzzer/FuzzerTraceState.cpp +++ b/llvm/lib/Fuzzer/FuzzerTraceState.cpp @@ -170,7 +170,6 @@ struct TraceBasedMutation { }; // Declared as static globals for faster checks inside the hooks. -static bool RecordingTraces = false; static bool RecordingMemcmp = false; static bool RecordingMemmem = false; static bool RecordingValueProfile = false; @@ -209,9 +208,8 @@ public: const uint8_t *DesiredData, size_t DataSize); void StartTraceRecording() { - if (!Options.UseTraces && !Options.UseMemcmp) + if (!Options.UseMemcmp) return; - RecordingTraces = Options.UseTraces; RecordingMemcmp = Options.UseMemcmp; RecordingMemmem = Options.UseMemmem; NumMutations = 0; @@ -220,9 +218,8 @@ public: } void StopTraceRecording() { - if (!RecordingTraces && !RecordingMemcmp) + if (!RecordingMemcmp) return; - RecordingTraces = false; RecordingMemcmp = false; for (size_t i = 0; i < NumMutations; i++) { auto &M = Mutations[i]; @@ -332,7 +329,7 @@ void TraceState::DFSanCmpCallback(uintptr_t PC, size_t CmpSize, size_t CmpType, uint64_t Arg1, uint64_t Arg2, dfsan_label L1, dfsan_label L2) { assert(ReallyHaveDFSan()); - if (!RecordingTraces || !F->InFuzzingThread()) return; + if (!F->InFuzzingThread()) return; if (L1 == 0 && L2 == 0) return; // Not actionable. if (L1 != 0 && L2 != 0) @@ -381,7 +378,7 @@ void TraceState::DFSanSwitchCallback(uint64_t PC, size_t ValSizeInBits, uint64_t Val, size_t NumCases, uint64_t *Cases, dfsan_label L) { assert(ReallyHaveDFSan()); - if (!RecordingTraces || !F->InFuzzingThread()) return; + if (!F->InFuzzingThread()) return; if (!L) return; // Not actionable. LabelRange LR = GetLabelRange(L); size_t ValSize = ValSizeInBits / 8; @@ -451,7 +448,7 @@ int TraceState::TryToAddDesiredData(const uint8_t *PresentData, void TraceState::TraceCmpCallback(uintptr_t PC, size_t CmpSize, size_t CmpType, uint64_t Arg1, uint64_t Arg2) { - if (!RecordingTraces || !F->InFuzzingThread()) return; + if (!F->InFuzzingThread()) return; if ((CmpType == ICMP_EQ || CmpType == ICMP_NE) && Arg1 == Arg2) return; // No reason to mutate. int Added = 0; @@ -482,7 +479,7 @@ void TraceState::TraceMemcmpCallback(size_t CmpSize, const uint8_t *Data1, void TraceState::TraceSwitchCallback(uintptr_t PC, size_t ValSizeInBits, uint64_t Val, size_t NumCases, uint64_t *Cases) { - if (!RecordingTraces || !F->InFuzzingThread()) return; + if (F->InFuzzingThread()) return; size_t ValSize = ValSizeInBits / 8; bool TryShort = IsTwoByteData(Val); for (size_t i = 0; i < NumCases; i++) @@ -512,7 +509,7 @@ void Fuzzer::StopTraceRecording() { } void Fuzzer::AssignTaintLabels(uint8_t *Data, size_t Size) { - if (!Options.UseTraces && !Options.UseMemcmp) return; + if (!Options.UseMemcmp) return; if (!ReallyHaveDFSan()) return; TS->EnsureDfsanLabels(Size); for (size_t i = 0; i < Size; i++) @@ -520,7 +517,7 @@ void Fuzzer::AssignTaintLabels(uint8_t *Data, size_t Size) { } void Fuzzer::InitializeTraceState() { - if (!Options.UseTraces && !Options.UseMemcmp) return; + if (!Options.UseMemcmp) return; TS = new TraceState(MD, Options, this); } @@ -603,7 +600,6 @@ static void AddValueForSingleVal(void *PCptr, uintptr_t Val) { } // namespace fuzzer using fuzzer::TS; -using fuzzer::RecordingTraces; using fuzzer::RecordingMemcmp; using fuzzer::RecordingValueProfile; @@ -611,21 +607,11 @@ extern "C" { void __dfsw___sanitizer_cov_trace_cmp(uint64_t SizeAndType, uint64_t Arg1, uint64_t Arg2, dfsan_label L0, dfsan_label L1, dfsan_label L2) { - if (!RecordingTraces) return; - assert(L0 == 0); - uintptr_t PC = reinterpret_cast<uintptr_t>(__builtin_return_address(0)); - uint64_t CmpSize = (SizeAndType >> 32) / 8; - uint64_t Type = (SizeAndType << 32) >> 32; - TS->DFSanCmpCallback(PC, CmpSize, Type, Arg1, Arg2, L1, L2); } #define DFSAN_CMP_CALLBACK(N) \ void __dfsw___sanitizer_cov_trace_cmp##N(uint64_t Arg1, uint64_t Arg2, \ dfsan_label L1, dfsan_label L2) { \ - if (RecordingTraces) \ - TS->DFSanCmpCallback( \ - reinterpret_cast<uintptr_t>(__builtin_return_address(0)), N, \ - fuzzer::ICMP_EQ, Arg1, Arg2, L1, L2); \ } DFSAN_CMP_CALLBACK(1) @@ -636,9 +622,6 @@ DFSAN_CMP_CALLBACK(8) void __dfsw___sanitizer_cov_trace_switch(uint64_t Val, uint64_t *Cases, dfsan_label L1, dfsan_label L2) { - if (!RecordingTraces) return; - uintptr_t PC = reinterpret_cast<uintptr_t>(__builtin_return_address(0)); - TS->DFSanSwitchCallback(PC, Cases[1], Val, Cases[0], Cases+2, L1); } void dfsan_weak_hook_memcmp(void *caller_pc, const void *s1, const void *s2, @@ -750,50 +733,30 @@ void __sanitizer_weak_hook_memmem(void *called_pc, const void *s1, size_t len1, __attribute__((visibility("default"))) void __sanitizer_cov_trace_cmp(uint64_t SizeAndType, uint64_t Arg1, uint64_t Arg2) { - if (RecordingTraces) { - uintptr_t PC = reinterpret_cast<uintptr_t>(__builtin_return_address(0)); - uint64_t CmpSize = (SizeAndType >> 32) / 8; - uint64_t Type = (SizeAndType << 32) >> 32; - TS->TraceCmpCallback(PC, CmpSize, Type, Arg1, Arg2); - } if (RecordingValueProfile) fuzzer::AddValueForCmp(__builtin_return_address(0), Arg1, Arg2); } -// Adding if(RecordingTraces){...} slows down the VP callbacks. -// Once we prove that VP is as strong as traces, delete this. -#define MAYBE_RECORD_TRACE(N) \ - if (RecordingTraces) { \ - uintptr_t PC = reinterpret_cast<uintptr_t>(__builtin_return_address(0)); \ - TS->TraceCmpCallback(PC, N, fuzzer::ICMP_EQ, Arg1, Arg2); \ - } - __attribute__((visibility("default"))) void __sanitizer_cov_trace_cmp8(uint64_t Arg1, int64_t Arg2) { fuzzer::AddValueForCmp(__builtin_return_address(0), Arg1, Arg2); - MAYBE_RECORD_TRACE(8); } __attribute__((visibility("default"))) void __sanitizer_cov_trace_cmp4(uint32_t Arg1, int32_t Arg2) { fuzzer::AddValueForCmp(__builtin_return_address(0), Arg1, Arg2); - MAYBE_RECORD_TRACE(4); } __attribute__((visibility("default"))) void __sanitizer_cov_trace_cmp2(uint16_t Arg1, int16_t Arg2) { fuzzer::AddValueForCmp(__builtin_return_address(0), Arg1, Arg2); - MAYBE_RECORD_TRACE(2); } __attribute__((visibility("default"))) void __sanitizer_cov_trace_cmp1(uint8_t Arg1, int8_t Arg2) { fuzzer::AddValueForCmp(__builtin_return_address(0), Arg1, Arg2); - MAYBE_RECORD_TRACE(1); } __attribute__((visibility("default"))) void __sanitizer_cov_trace_switch(uint64_t Val, uint64_t *Cases) { - if (!RecordingTraces) return; - uintptr_t PC = reinterpret_cast<uintptr_t>(__builtin_return_address(0)); - TS->TraceSwitchCallback(PC, Cases[1], Val, Cases[0], Cases + 2); + // TODO(kcc): support value profile here. } __attribute__((visibility("default"))) |