stackwalker_amd64.cc
1 // Copyright 2010 Google LLC 2 // 3 // Redistribution and use in source and binary forms, with or without 4 // modification, are permitted provided that the following conditions are 5 // met: 6 // 7 // * Redistributions of source code must retain the above copyright 8 // notice, this list of conditions and the following disclaimer. 9 // * Redistributions in binary form must reproduce the above 10 // copyright notice, this list of conditions and the following disclaimer 11 // in the documentation and/or other materials provided with the 12 // distribution. 13 // * Neither the name of Google LLC nor the names of its 14 // contributors may be used to endorse or promote products derived from 15 // this software without specific prior written permission. 16 // 17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 20 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 21 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 22 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 23 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 24 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 25 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 26 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 27 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 29 // stackwalker_amd64.cc: amd64-specific stackwalker. 30 // 31 // See stackwalker_amd64.h for documentation. 32 // 33 // Author: Mark Mentovai, Ted Mielczarek 34 35 #ifdef HAVE_CONFIG_H 36 #include <config.h> // Must come first 37 #endif 38 39 #include <assert.h> 40 41 #include "common/scoped_ptr.h" 42 #include "google_breakpad/processor/call_stack.h" 43 #include "google_breakpad/processor/memory_region.h" 44 #include "google_breakpad/processor/source_line_resolver_interface.h" 45 #include "google_breakpad/processor/stack_frame_cpu.h" 46 #include "google_breakpad/processor/system_info.h" 47 #include "processor/cfi_frame_info.h" 48 #include "processor/logging.h" 49 #include "processor/stackwalker_amd64.h" 50 51 namespace google_breakpad { 52 53 54 const StackwalkerAMD64::CFIWalker::RegisterSet 55 StackwalkerAMD64::cfi_register_map_[] = { 56 // It may seem like $rip and $rsp are callee-saves, because the callee is 57 // responsible for having them restored upon return. But the callee_saves 58 // flags here really means that the walker should assume they're 59 // unchanged if the CFI doesn't mention them --- clearly wrong for $rip 60 // and $rsp. 61 { "$rax", NULL, false, 62 StackFrameAMD64::CONTEXT_VALID_RAX, &MDRawContextAMD64::rax }, 63 { "$rdx", NULL, false, 64 StackFrameAMD64::CONTEXT_VALID_RDX, &MDRawContextAMD64::rdx }, 65 { "$rcx", NULL, false, 66 StackFrameAMD64::CONTEXT_VALID_RCX, &MDRawContextAMD64::rcx }, 67 { "$rbx", NULL, true, 68 StackFrameAMD64::CONTEXT_VALID_RBX, &MDRawContextAMD64::rbx }, 69 { "$rsi", NULL, false, 70 StackFrameAMD64::CONTEXT_VALID_RSI, &MDRawContextAMD64::rsi }, 71 { "$rdi", NULL, false, 72 StackFrameAMD64::CONTEXT_VALID_RDI, &MDRawContextAMD64::rdi }, 73 { "$rbp", NULL, true, 74 StackFrameAMD64::CONTEXT_VALID_RBP, &MDRawContextAMD64::rbp }, 75 { "$rsp", ".cfa", false, 76 StackFrameAMD64::CONTEXT_VALID_RSP, &MDRawContextAMD64::rsp }, 77 { "$r8", NULL, false, 78 StackFrameAMD64::CONTEXT_VALID_R8, &MDRawContextAMD64::r8 }, 79 { "$r9", NULL, false, 80 StackFrameAMD64::CONTEXT_VALID_R9, &MDRawContextAMD64::r9 }, 81 { "$r10", NULL, false, 82 StackFrameAMD64::CONTEXT_VALID_R10, &MDRawContextAMD64::r10 }, 83 { "$r11", NULL, false, 84 StackFrameAMD64::CONTEXT_VALID_R11, &MDRawContextAMD64::r11 }, 85 { "$r12", NULL, true, 86 StackFrameAMD64::CONTEXT_VALID_R12, &MDRawContextAMD64::r12 }, 87 { "$r13", NULL, true, 88 StackFrameAMD64::CONTEXT_VALID_R13, &MDRawContextAMD64::r13 }, 89 { "$r14", NULL, true, 90 StackFrameAMD64::CONTEXT_VALID_R14, &MDRawContextAMD64::r14 }, 91 { "$r15", NULL, true, 92 StackFrameAMD64::CONTEXT_VALID_R15, &MDRawContextAMD64::r15 }, 93 { "$rip", ".ra", false, 94 StackFrameAMD64::CONTEXT_VALID_RIP, &MDRawContextAMD64::rip }, 95 }; 96 97 StackwalkerAMD64::StackwalkerAMD64(const SystemInfo* system_info, 98 const MDRawContextAMD64* context, 99 MemoryRegion* memory, 100 const CodeModules* modules, 101 StackFrameSymbolizer* resolver_helper) 102 : Stackwalker(system_info, memory, modules, resolver_helper), 103 context_(context), 104 cfi_walker_(cfi_register_map_, 105 (sizeof(cfi_register_map_) / sizeof(cfi_register_map_[0]))) { 106 } 107 108 uint64_t StackFrameAMD64::ReturnAddress() const { 109 assert(context_validity & StackFrameAMD64::CONTEXT_VALID_RIP); 110 return context.rip; 111 } 112 113 StackFrame* StackwalkerAMD64::GetContextFrame() { 114 if (!context_) { 115 BPLOG(ERROR) << "Can't get context frame without context"; 116 return NULL; 117 } 118 119 StackFrameAMD64* frame = new StackFrameAMD64(); 120 121 // The instruction pointer is stored directly in a register, so pull it 122 // straight out of the CPU context structure. 123 frame->context = *context_; 124 frame->context_validity = StackFrameAMD64::CONTEXT_VALID_ALL; 125 frame->trust = StackFrame::FRAME_TRUST_CONTEXT; 126 frame->instruction = frame->context.rip; 127 128 return frame; 129 } 130 131 StackFrameAMD64* StackwalkerAMD64::GetCallerByCFIFrameInfo( 132 const vector<StackFrame*>& frames, 133 CFIFrameInfo* cfi_frame_info) { 134 StackFrameAMD64* last_frame = static_cast<StackFrameAMD64*>(frames.back()); 135 136 scoped_ptr<StackFrameAMD64> frame(new StackFrameAMD64()); 137 if (!cfi_walker_ 138 .FindCallerRegisters(*memory_, *cfi_frame_info, 139 last_frame->context, last_frame->context_validity, 140 &frame->context, &frame->context_validity)) 141 return NULL; 142 143 // Make sure we recovered all the essentials. 144 static const int essentials = (StackFrameAMD64::CONTEXT_VALID_RIP 145 | StackFrameAMD64::CONTEXT_VALID_RSP); 146 if ((frame->context_validity & essentials) != essentials) 147 return NULL; 148 149 if (!frame->context.rip || !frame->context.rsp) { 150 BPLOG(ERROR) << "invalid rip/rsp"; 151 return NULL; 152 } 153 154 frame->trust = StackFrame::FRAME_TRUST_CFI; 155 return frame.release(); 156 } 157 158 // Returns true if `ptr` is not in x86-64 canonical form. 159 // https://en.wikipedia.org/wiki/X86-64#Virtual_address_space_details 160 static bool is_non_canonical(uint64_t ptr) { 161 return ptr > 0x7FFFFFFFFFFF && ptr < 0xFFFF800000000000; 162 } 163 164 StackFrameAMD64* StackwalkerAMD64::GetCallerByFramePointerRecovery( 165 const vector<StackFrame*>& frames) { 166 StackFrameAMD64* last_frame = static_cast<StackFrameAMD64*>(frames.back()); 167 uint64_t last_rbp = last_frame->context.rbp; 168 169 // Assume the presence of a frame pointer. This is not mandated by the 170 // AMD64 ABI, c.f. section 3.2.2 footnote 7, though it is typical for 171 // compilers to still preserve the frame pointer and not treat %rbp as a 172 // general purpose register. 173 // 174 // With this assumption, the CALL instruction pushes the return address 175 // onto the stack and sets %rip to the procedure to enter. The procedure 176 // then establishes the stack frame with a prologue that PUSHes the current 177 // %rbp onto the stack, MOVes the current %rsp to %rbp, and then allocates 178 // space for any local variables. Using this procedure linking information, 179 // it is possible to locate frame information for the callee: 180 // 181 // %caller_rsp = *(%callee_rbp + 16) 182 // %caller_rip = *(%callee_rbp + 8) 183 // %caller_rbp = *(%callee_rbp) 184 185 // If rbp is not 8-byte aligned it can't be a frame pointer. 186 if (last_rbp % 8 != 0) { 187 return NULL; 188 } 189 190 uint64_t caller_rip, caller_rbp; 191 if (memory_->GetMemoryAtAddress(last_rbp + 8, &caller_rip) && 192 memory_->GetMemoryAtAddress(last_rbp, &caller_rbp)) { 193 uint64_t caller_rsp = last_rbp + 16; 194 195 // If the recovered rip is not a canonical address it can't be 196 // the return address, so rbp must not have been a frame pointer. 197 if (is_non_canonical(caller_rip)) { 198 return NULL; 199 } 200 201 // Check that rbp is within the right frame 202 if (caller_rsp <= last_rbp || caller_rbp < caller_rsp) { 203 return NULL; 204 } 205 206 // Sanity check that resulting rbp is still inside stack memory. 207 uint64_t unused; 208 if (!memory_->GetMemoryAtAddress(caller_rbp, &unused)) { 209 return NULL; 210 } 211 212 StackFrameAMD64* frame = new StackFrameAMD64(); 213 frame->trust = StackFrame::FRAME_TRUST_FP; 214 frame->context = last_frame->context; 215 frame->context.rip = caller_rip; 216 frame->context.rsp = caller_rsp; 217 frame->context.rbp = caller_rbp; 218 frame->context_validity = StackFrameAMD64::CONTEXT_VALID_RIP | 219 StackFrameAMD64::CONTEXT_VALID_RSP | 220 StackFrameAMD64::CONTEXT_VALID_RBP; 221 return frame; 222 } 223 224 return NULL; 225 } 226 227 StackFrameAMD64* StackwalkerAMD64::GetCallerBySimulatingReturn( 228 const vector<StackFrame*>& frames) { 229 assert(frames.back()->trust == StackFrame::FRAME_TRUST_CONTEXT); 230 StackFrameAMD64* last_frame = static_cast<StackFrameAMD64*>(frames.back()); 231 uint64_t last_rsp = last_frame->context.rsp; 232 uint64_t caller_rip_address, caller_rip; 233 int searchwords = 1; 234 if (!ScanForReturnAddress(last_rsp, &caller_rip_address, &caller_rip, 235 searchwords)) { 236 // No plausible return address at the top of the stack. Unable to simulate 237 // a return. 238 return NULL; 239 } 240 241 // Create a new stack frame (ownership will be transferred to the caller) 242 // and fill it in. 243 StackFrameAMD64* frame = new StackFrameAMD64(); 244 245 frame->trust = StackFrame::FRAME_TRUST_LEAF; 246 frame->context = last_frame->context; 247 frame->context.rip = caller_rip; 248 // The caller's %rsp is directly underneath the return address pushed by 249 // the call. 250 frame->context.rsp = caller_rip_address + 8; 251 frame->context_validity = last_frame->context_validity; 252 253 return frame; 254 } 255 256 StackFrameAMD64* StackwalkerAMD64::GetCallerByStackScan( 257 const vector<StackFrame*>& frames) { 258 StackFrameAMD64* last_frame = static_cast<StackFrameAMD64*>(frames.back()); 259 uint64_t last_rsp = last_frame->context.rsp; 260 uint64_t caller_rip_address, caller_rip; 261 262 if (!ScanForReturnAddress(last_rsp, &caller_rip_address, &caller_rip, 263 /*is_context_frame=*/last_frame->trust == 264 StackFrame::FRAME_TRUST_CONTEXT)) { 265 // No plausible return address was found. 266 return NULL; 267 } 268 269 // Create a new stack frame (ownership will be transferred to the caller) 270 // and fill it in. 271 StackFrameAMD64* frame = new StackFrameAMD64(); 272 273 frame->trust = StackFrame::FRAME_TRUST_SCAN; 274 frame->context = last_frame->context; 275 frame->context.rip = caller_rip; 276 // The caller's %rsp is directly underneath the return address pushed by 277 // the call. 278 frame->context.rsp = caller_rip_address + 8; 279 frame->context_validity = StackFrameAMD64::CONTEXT_VALID_RIP | 280 StackFrameAMD64::CONTEXT_VALID_RSP; 281 282 // Other unwinders give up if they don't have an %rbp value, so see if we 283 // can pass some plausible value on. 284 if (last_frame->context_validity & StackFrameAMD64::CONTEXT_VALID_RBP) { 285 // Functions typically push their caller's %rbp immediately upon entry, 286 // and then set %rbp to point to that. So if the callee's %rbp is 287 // pointing to the first word below the alleged return address, presume 288 // that the caller's %rbp is saved there. 289 if (caller_rip_address - 8 == last_frame->context.rbp) { 290 uint64_t caller_rbp = 0; 291 if (memory_->GetMemoryAtAddress(last_frame->context.rbp, &caller_rbp) && 292 caller_rbp > caller_rip_address) { 293 frame->context.rbp = caller_rbp; 294 frame->context_validity |= StackFrameAMD64::CONTEXT_VALID_RBP; 295 } 296 } else if (last_frame->context.rbp >= caller_rip_address + 8) { 297 // If the callee's %rbp is plausible as a value for the caller's 298 // %rbp, presume that the callee left it unchanged. 299 frame->context.rbp = last_frame->context.rbp; 300 frame->context_validity |= StackFrameAMD64::CONTEXT_VALID_RBP; 301 } 302 } 303 304 return frame; 305 } 306 307 StackFrame* StackwalkerAMD64::GetCallerFrame(const CallStack* stack, 308 bool stack_scan_allowed) { 309 if (!memory_ || !stack) { 310 BPLOG(ERROR) << "Can't get caller frame without memory or stack"; 311 return NULL; 312 } 313 314 const vector<StackFrame*>& frames = *stack->frames(); 315 StackFrameAMD64* last_frame = static_cast<StackFrameAMD64*>(frames.back()); 316 scoped_ptr<StackFrameAMD64> new_frame; 317 318 // If we have CFI information, use it. 319 scoped_ptr<CFIFrameInfo> cfi_frame_info( 320 frame_symbolizer_->FindCFIFrameInfo(last_frame)); 321 if (cfi_frame_info.get()) 322 new_frame.reset(GetCallerByCFIFrameInfo(frames, cfi_frame_info.get())); 323 324 // If CFI was not available and this is a Windows x64 stack, check whether 325 // this is a leaf function which doesn't touch any callee-saved registers. 326 // According to https://reviews.llvm.org/D24748, LLVM doesn't generate unwind 327 // info for such functions. According to MSDN, leaf functions can be unwound 328 // simply by simulating a return. 329 if (!new_frame.get() && 330 last_frame->trust == StackFrame::FRAME_TRUST_CONTEXT && 331 system_info_->os_short == "windows") { 332 new_frame.reset(GetCallerBySimulatingReturn(frames)); 333 } 334 335 // If CFI was not available or failed, try using frame pointer recovery. 336 // Never try to use frame pointer unwinding on Windows x64 stack. MSVC never 337 // generates code that works with frame pointer chasing, and LLVM does the 338 // same. Stack scanning would be better. 339 if (!new_frame.get() && system_info_->os_short != "windows") { 340 new_frame.reset(GetCallerByFramePointerRecovery(frames)); 341 } 342 343 // If all else fails, fall back to stack scanning. 344 if (stack_scan_allowed && !new_frame.get()) { 345 new_frame.reset(GetCallerByStackScan(frames)); 346 } 347 348 // If nothing worked, tell the caller. 349 if (!new_frame.get()) 350 return NULL; 351 352 if (system_info_->os_short == "nacl") { 353 // Apply constraints from Native Client's x86-64 sandbox. These 354 // registers have the 4GB-aligned sandbox base address (from r15) 355 // added to them, and only the bottom 32 bits are relevant for 356 // stack walking. 357 new_frame->context.rip = static_cast<uint32_t>(new_frame->context.rip); 358 new_frame->context.rsp = static_cast<uint32_t>(new_frame->context.rsp); 359 new_frame->context.rbp = static_cast<uint32_t>(new_frame->context.rbp); 360 } 361 362 // Should we terminate the stack walk? (end-of-stack or broken invariant) 363 if (TerminateWalk(new_frame->context.rip, new_frame->context.rsp, 364 last_frame->context.rsp, 365 /*first_unwind=*/last_frame->trust == 366 StackFrame::FRAME_TRUST_CONTEXT)) { 367 return NULL; 368 } 369 370 // new_frame->context.rip is the return address, which is the instruction 371 // after the CALL that caused us to arrive at the callee. Set 372 // new_frame->instruction to one less than that, so it points within the 373 // CALL instruction. See StackFrame::instruction for details, and 374 // StackFrameAMD64::ReturnAddress. 375 new_frame->instruction = new_frame->context.rip - 1; 376 377 return new_frame.release(); 378 } 379 380 } // namespace google_breakpad