diff options
Diffstat (limited to 'lldb/source/Target')
| -rw-r--r-- | lldb/source/Target/StackFrame.cpp | 40 | ||||
| -rw-r--r-- | lldb/source/Target/StackFrameList.cpp | 195 | ||||
| -rw-r--r-- | lldb/source/Target/ThreadPlanStepOut.cpp | 30 |
3 files changed, 234 insertions, 31 deletions
diff --git a/lldb/source/Target/StackFrame.cpp b/lldb/source/Target/StackFrame.cpp index 0e9c6df2bf7..5ed15099612 100644 --- a/lldb/source/Target/StackFrame.cpp +++ b/lldb/source/Target/StackFrame.cpp @@ -49,20 +49,18 @@ using namespace lldb_private; StackFrame::StackFrame(const ThreadSP &thread_sp, user_id_t frame_idx, user_id_t unwind_frame_index, addr_t cfa, - bool cfa_is_valid, addr_t pc, uint32_t stop_id, - bool stop_id_is_valid, bool is_history_frame, + bool cfa_is_valid, addr_t pc, StackFrame::Kind kind, const SymbolContext *sc_ptr) : m_thread_wp(thread_sp), m_frame_index(frame_idx), m_concrete_frame_index(unwind_frame_index), m_reg_context_sp(), m_id(pc, cfa, nullptr), m_frame_code_addr(pc), m_sc(), m_flags(), m_frame_base(), m_frame_base_error(), m_cfa_is_valid(cfa_is_valid), - m_stop_id(stop_id), m_stop_id_is_valid(stop_id_is_valid), - m_is_history_frame(is_history_frame), m_variable_list_sp(), + m_stack_frame_kind(kind), m_variable_list_sp(), m_variable_list_value_objects(), m_disassembly(), m_mutex() { // If we don't have a CFA value, use the frame index for our StackID so that // recursive functions properly aren't confused with one another on a history // stack. - if (m_is_history_frame && !m_cfa_is_valid) { + if (IsHistorical() && !m_cfa_is_valid) { m_id.SetCFA(m_frame_index); } @@ -80,10 +78,9 @@ StackFrame::StackFrame(const ThreadSP &thread_sp, user_id_t frame_idx, m_concrete_frame_index(unwind_frame_index), m_reg_context_sp(reg_context_sp), m_id(pc, cfa, nullptr), m_frame_code_addr(pc), m_sc(), m_flags(), m_frame_base(), - m_frame_base_error(), m_cfa_is_valid(true), m_stop_id(0), - m_stop_id_is_valid(false), m_is_history_frame(false), - m_variable_list_sp(), m_variable_list_value_objects(), m_disassembly(), - m_mutex() { + m_frame_base_error(), m_cfa_is_valid(true), + m_stack_frame_kind(StackFrame::Kind::Regular), m_variable_list_sp(), + m_variable_list_value_objects(), m_disassembly(), m_mutex() { if (sc_ptr != nullptr) { m_sc = *sc_ptr; m_flags.Set(m_sc.GetResolvedMask()); @@ -106,10 +103,9 @@ StackFrame::StackFrame(const ThreadSP &thread_sp, user_id_t frame_idx, m_id(pc_addr.GetLoadAddress(thread_sp->CalculateTarget().get()), cfa, nullptr), m_frame_code_addr(pc_addr), m_sc(), m_flags(), m_frame_base(), - m_frame_base_error(), m_cfa_is_valid(true), m_stop_id(0), - m_stop_id_is_valid(false), m_is_history_frame(false), - m_variable_list_sp(), m_variable_list_value_objects(), m_disassembly(), - m_mutex() { + m_frame_base_error(), m_cfa_is_valid(true), + m_stack_frame_kind(StackFrame::Kind::Regular), m_variable_list_sp(), + m_variable_list_value_objects(), m_disassembly(), m_mutex() { if (sc_ptr != nullptr) { m_sc = *sc_ptr; m_flags.Set(m_sc.GetResolvedMask()); @@ -210,7 +206,7 @@ const Address &StackFrame::GetFrameCodeAddress() { bool StackFrame::ChangePC(addr_t pc) { std::lock_guard<std::recursive_mutex> guard(m_mutex); // We can't change the pc value of a history stack frame - it is immutable. - if (m_is_history_frame) + if (IsHistorical()) return false; m_frame_code_addr.SetRawAddress(pc); m_sc.Clear(false); @@ -456,7 +452,7 @@ StackFrame::GetInScopeVariableList(bool get_file_globals, bool must_have_valid_location) { std::lock_guard<std::recursive_mutex> guard(m_mutex); // We can't fetch variable information for a history stack frame. - if (m_is_history_frame) + if (IsHistorical()) return VariableListSP(); VariableListSP var_list_sp(new VariableList); @@ -490,7 +486,7 @@ ValueObjectSP StackFrame::GetValueForVariableExpressionPath( VariableSP &var_sp, Status &error) { llvm::StringRef original_var_expr = var_expr; // We can't fetch variable information for a history stack frame. - if (m_is_history_frame) + if (IsHistorical()) return ValueObjectSP(); if (var_expr.empty()) { @@ -1135,7 +1131,7 @@ StackFrame::GetValueObjectForFrameVariable(const VariableSP &variable_sp, DynamicValueType use_dynamic) { std::lock_guard<std::recursive_mutex> guard(m_mutex); ValueObjectSP valobj_sp; - if (m_is_history_frame) { + if (IsHistorical()) { return valobj_sp; } VariableList *var_list = GetVariableList(true); @@ -1164,7 +1160,7 @@ StackFrame::GetValueObjectForFrameVariable(const VariableSP &variable_sp, ValueObjectSP StackFrame::TrackGlobalVariable(const VariableSP &variable_sp, DynamicValueType use_dynamic) { std::lock_guard<std::recursive_mutex> guard(m_mutex); - if (m_is_history_frame) + if (IsHistorical()) return ValueObjectSP(); // Check to make sure we aren't already tracking this variable? @@ -1194,6 +1190,14 @@ bool StackFrame::IsInlined() { return false; } +bool StackFrame::IsHistorical() const { + return m_stack_frame_kind == StackFrame::Kind::History; +} + +bool StackFrame::IsArtificial() const { + return m_stack_frame_kind == StackFrame::Kind::Artificial; +} + lldb::LanguageType StackFrame::GetLanguage() { CompileUnit *cu = GetSymbolContext(eSymbolContextCompUnit).comp_unit; if (cu) diff --git a/lldb/source/Target/StackFrameList.cpp b/lldb/source/Target/StackFrameList.cpp index f177df0ccdf..a01de7a36d9 100644 --- a/lldb/source/Target/StackFrameList.cpp +++ b/lldb/source/Target/StackFrameList.cpp @@ -27,6 +27,7 @@ #include "lldb/Target/Thread.h" #include "lldb/Target/Unwind.h" #include "lldb/Utility/Log.h" +#include "llvm/ADT/SmallPtrSet.h" //#define DEBUG_STACK_FRAMES 1 @@ -240,6 +241,178 @@ void StackFrameList::GetOnlyConcreteFramesUpTo(uint32_t end_idx, m_frames.resize(num_frames); } +/// Find the unique path through the call graph from \p begin (with return PC +/// \p return_pc) to \p end. On success this path is stored into \p path, and +/// on failure \p path is unchanged. +static void FindInterveningFrames(Function &begin, Function &end, + Target &target, addr_t return_pc, + std::vector<Function *> &path, + ModuleList &images, Log *log) { + LLDB_LOG(log, "Finding frames between {0} and {1}, retn-pc={2:x}", + begin.GetDisplayName(), end.GetDisplayName(), return_pc); + + // Find a non-tail calling edge with the correct return PC. + auto first_level_edges = begin.GetCallEdges(); + if (log) + for (const CallEdge &edge : first_level_edges) + LLDB_LOG(log, "FindInterveningFrames: found call with retn-PC = {0:x}", + edge.GetReturnPCAddress(begin, target)); + auto first_edge_it = std::lower_bound( + first_level_edges.begin(), first_level_edges.end(), return_pc, + [&](const CallEdge &edge, addr_t target_pc) { + return edge.GetReturnPCAddress(begin, target) < target_pc; + }); + if (first_edge_it == first_level_edges.end() || + first_edge_it->GetReturnPCAddress(begin, target) != return_pc) { + LLDB_LOG(log, "No call edge outgoing from {0} with retn-PC == {1:x}", + begin.GetDisplayName(), return_pc); + return; + } + CallEdge &first_edge = const_cast<CallEdge &>(*first_edge_it); + + // The first callee may not be resolved, or there may be nothing to fill in. + Function *first_callee = first_edge.GetCallee(images); + if (!first_callee) { + LLDB_LOG(log, "Could not resolve callee"); + return; + } + if (first_callee == &end) { + LLDB_LOG(log, "Not searching further, first callee is {0} (retn-PC: {1:x})", + end.GetDisplayName(), return_pc); + return; + } + + // Run DFS on the tail-calling edges out of the first callee to find \p end. + // Fully explore the set of functions reachable from the first edge via tail + // calls in order to detect ambiguous executions. + struct DFS { + std::vector<Function *> active_path = {}; + std::vector<Function *> solution_path = {}; + llvm::SmallPtrSet<Function *, 2> visited_nodes = {}; + bool ambiguous = false; + Function *end; + ModuleList &images; + + DFS(Function *end, ModuleList &images) : end(end), images(images) {} + + void search(Function *first_callee, std::vector<Function *> &path) { + dfs(first_callee); + if (!ambiguous) + path = std::move(solution_path); + } + + void dfs(Function *callee) { + // Found a path to the target function. + if (callee == end) { + if (solution_path.empty()) + solution_path = active_path; + else + ambiguous = true; + return; + } + + // Terminate the search if tail recursion is found, or more generally if + // there's more than one way to reach a target. This errs on the side of + // caution: it conservatively stops searching when some solutions are + // still possible to save time in the average case. + if (!visited_nodes.insert(callee).second) { + ambiguous = true; + return; + } + + // Search the calls made from this callee. + active_path.push_back(callee); + for (CallEdge &edge : callee->GetTailCallingEdges()) { + Function *next_callee = edge.GetCallee(images); + if (!next_callee) + continue; + + dfs(next_callee); + if (ambiguous) + return; + } + active_path.pop_back(); + } + }; + + DFS(&end, images).search(first_callee, path); +} + +/// Given that \p next_frame will be appended to the frame list, synthesize +/// tail call frames between the current end of the list and \p next_frame. +/// If any frames are added, adjust the frame index of \p next_frame. +/// +/// -------------- +/// | ... | <- Completed frames. +/// -------------- +/// | prev_frame | +/// -------------- +/// | ... | <- Artificial frames inserted here. +/// -------------- +/// | next_frame | +/// -------------- +/// | ... | <- Not-yet-visited frames. +/// -------------- +void StackFrameList::SynthesizeTailCallFrames(StackFrame &next_frame) { + TargetSP target_sp = next_frame.CalculateTarget(); + if (!target_sp) + return; + + lldb::RegisterContextSP next_reg_ctx_sp = next_frame.GetRegisterContext(); + if (!next_reg_ctx_sp) + return; + + Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP)); + + assert(!m_frames.empty() && "Cannot synthesize frames in an empty stack"); + StackFrame &prev_frame = *m_frames.back().get(); + + // Find the functions prev_frame and next_frame are stopped in. The function + // objects are needed to search the lazy call graph for intervening frames. + Function *prev_func = + prev_frame.GetSymbolContext(eSymbolContextFunction).function; + if (!prev_func) { + LLDB_LOG(log, "SynthesizeTailCallFrames: can't find previous function"); + return; + } + Function *next_func = + next_frame.GetSymbolContext(eSymbolContextFunction).function; + if (!next_func) { + LLDB_LOG(log, "SynthesizeTailCallFrames: can't find next function"); + return; + } + + // Try to find the unique sequence of (tail) calls which led from next_frame + // to prev_frame. + std::vector<Function *> path; + addr_t return_pc = next_reg_ctx_sp->GetPC(); + Target &target = *target_sp.get(); + ModuleList &images = next_frame.CalculateTarget()->GetImages(); + FindInterveningFrames(*next_func, *prev_func, target, return_pc, path, images, + log); + + // Push synthetic tail call frames. + for (Function *callee : llvm::reverse(path)) { + uint32_t frame_idx = m_frames.size(); + uint32_t concrete_frame_idx = next_frame.GetConcreteFrameIndex(); + addr_t cfa = LLDB_INVALID_ADDRESS; + bool cfa_is_valid = false; + addr_t pc = + callee->GetAddressRange().GetBaseAddress().GetLoadAddress(&target); + SymbolContext sc; + callee->CalculateSymbolContext(&sc); + auto synth_frame = std::make_shared<StackFrame>( + m_thread.shared_from_this(), frame_idx, concrete_frame_idx, cfa, + cfa_is_valid, pc, StackFrame::Kind::Artificial, &sc); + m_frames.push_back(synth_frame); + LLDB_LOG(log, "Pushed frame {0}", callee->GetDisplayName()); + } + + // If any frames were created, adjust next_frame's index. + if (!path.empty()) + next_frame.SetFrameIndex(m_frames.size()); +} + void StackFrameList::GetFramesUpTo(uint32_t end_idx) { // Do not fetch frames for an invalid thread. if (!m_thread.IsValid()) @@ -315,11 +488,15 @@ void StackFrameList::GetFramesUpTo(uint32_t end_idx) { break; } const bool cfa_is_valid = true; - const bool stop_id_is_valid = false; - const bool is_history_frame = false; - unwind_frame_sp.reset(new StackFrame( - m_thread.shared_from_this(), m_frames.size(), idx, cfa, cfa_is_valid, - pc, 0, stop_id_is_valid, is_history_frame, nullptr)); + unwind_frame_sp.reset( + new StackFrame(m_thread.shared_from_this(), m_frames.size(), idx, cfa, + cfa_is_valid, pc, StackFrame::Kind::Regular, nullptr)); + + // Create synthetic tail call frames between the previous frame and the + // newly-found frame. The new frame's index may change after this call, + // although its concrete index will stay the same. + SynthesizeTailCallFrames(*unwind_frame_sp.get()); + m_frames.push_back(unwind_frame_sp); } @@ -491,11 +668,9 @@ StackFrameSP StackFrameList::GetFrameAtIndex(uint32_t idx) { addr_t pc, cfa; if (unwinder->GetFrameInfoAtIndex(idx, cfa, pc)) { const bool cfa_is_valid = true; - const bool stop_id_is_valid = false; - const bool is_history_frame = false; - frame_sp.reset(new StackFrame( - m_thread.shared_from_this(), idx, idx, cfa, cfa_is_valid, pc, 0, - stop_id_is_valid, is_history_frame, nullptr)); + frame_sp.reset(new StackFrame(m_thread.shared_from_this(), idx, idx, + cfa, cfa_is_valid, pc, + StackFrame::Kind::Regular, nullptr)); Function *function = frame_sp->GetSymbolContext(eSymbolContextFunction).function; diff --git a/lldb/source/Target/ThreadPlanStepOut.cpp b/lldb/source/Target/ThreadPlanStepOut.cpp index f2db6e4b6f4..9acc17c7622 100644 --- a/lldb/source/Target/ThreadPlanStepOut.cpp +++ b/lldb/source/Target/ThreadPlanStepOut.cpp @@ -48,18 +48,36 @@ ThreadPlanStepOut::ThreadPlanStepOut( m_return_addr(LLDB_INVALID_ADDRESS), m_stop_others(stop_others), m_immediate_step_from_function(nullptr), m_calculate_return_value(gather_return_value) { + Log *log(lldb_private::GetLogIfAllCategoriesSet(LIBLLDB_LOG_STEP)); SetFlagsToDefault(); SetupAvoidNoDebug(step_out_avoids_code_without_debug_info); m_step_from_insn = m_thread.GetRegisterContext()->GetPC(0); - StackFrameSP return_frame_sp(m_thread.GetStackFrameAtIndex(frame_idx + 1)); + uint32_t return_frame_index = frame_idx + 1; + StackFrameSP return_frame_sp( + m_thread.GetStackFrameAtIndex(return_frame_index)); StackFrameSP immediate_return_from_sp( m_thread.GetStackFrameAtIndex(frame_idx)); if (!return_frame_sp || !immediate_return_from_sp) return; // we can't do anything here. ValidatePlan() will return false. + // While stepping out, behave as-if artificial frames are not present. + while (return_frame_sp->IsArtificial()) { + m_stepped_past_frames.push_back(return_frame_sp); + + ++return_frame_index; + return_frame_sp = m_thread.GetStackFrameAtIndex(return_frame_index); + + // We never expect to see an artificial frame without a regular ancestor. + // If this happens, log the issue and defensively refuse to step out. + if (!return_frame_sp) { + LLDB_LOG(log, "Can't step out of frame with artificial ancestors"); + return; + } + } + m_step_out_to_id = return_frame_sp->GetStackID(); m_immediate_step_from_id = immediate_return_from_sp->GetStackID(); @@ -67,7 +85,7 @@ ThreadPlanStepOut::ThreadPlanStepOut( // have to be a little more careful. It is non-trivial to determine the real // "return code address" for an inlined frame, so we have to work our way to // that frame and then step out. - if (immediate_return_from_sp && immediate_return_from_sp->IsInlined()) { + if (immediate_return_from_sp->IsInlined()) { if (frame_idx > 0) { // First queue a plan that gets us to this inlined frame, and when we get // there we'll queue a second plan that walks us out of this frame. @@ -82,7 +100,7 @@ ThreadPlanStepOut::ThreadPlanStepOut( // just do that now. QueueInlinedStepPlan(false); } - } else if (return_frame_sp) { + } else { // Find the return address and set a breakpoint there: // FIXME - can we do this more securely if we know first_insn? @@ -198,6 +216,12 @@ void ThreadPlanStepOut::GetDescription(Stream *s, s->Printf(" using breakpoint site %d", m_return_bp_id); } } + + s->Printf("\n"); + for (StackFrameSP frame_sp : m_stepped_past_frames) { + s->Printf("Stepped out past: "); + frame_sp->DumpUsingSettingsFormat(s); + } } bool ThreadPlanStepOut::ValidatePlan(Stream *error) { |

