/ duct-tape / xnu / osfmk / kern / backtrace.c
backtrace.c
  1  /*
  2   * Copyright (c) 2016-2019 Apple Inc. All rights reserved.
  3   *
  4   * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  5   *
  6   * This file contains Original Code and/or Modifications of Original Code
  7   * as defined in and that are subject to the Apple Public Source License
  8   * Version 2.0 (the 'License'). You may not use this file except in
  9   * compliance with the License. The rights granted to you under the License
 10   * may not be used to create, or enable the creation or redistribution of,
 11   * unlawful or unlicensed copies of an Apple operating system, or to
 12   * circumvent, violate, or enable the circumvention or violation of, any
 13   * terms of an Apple operating system software license agreement.
 14   *
 15   * Please obtain a copy of the License at
 16   * http://www.opensource.apple.com/apsl/ and read it before using this file.
 17   *
 18   * The Original Code and all software distributed under the License are
 19   * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
 20   * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
 21   * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
 22   * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
 23   * Please see the License for the specific language governing rights and
 24   * limitations under the License.
 25   *
 26   * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
 27   */
 28  
 29  #include <stddef.h>
 30  #include <stdint.h>
 31  
 32  #include <kern/assert.h>
 33  #include <kern/backtrace.h>
 34  #include <kern/cambria_layout.h>
 35  #include <kern/thread.h>
 36  #include <sys/errno.h>
 37  #include <vm/vm_map.h>
 38  
 39  #if defined(__arm__) || defined(__arm64__)
 40  #include <arm/cpu_data.h>
 41  #include <arm/cpu_data_internal.h>
 42  #endif
 43  
 44  #if defined(HAS_APPLE_PAC)
 45  #include <ptrauth.h>
 46  #endif
 47  
 48  #if XNU_MONITOR
 49  #define IN_PPLSTK_BOUNDS(__addr) \
 50  	(((uintptr_t)(__addr) >= (uintptr_t)pmap_stacks_start) && \
 51  	((uintptr_t)(__addr) < (uintptr_t)pmap_stacks_end))
 52  #endif
 53  
 54  unsigned int __attribute__((noinline))
 55  backtrace(uintptr_t *bt, unsigned int max_frames, bool *was_truncated_out)
 56  {
 57  	return backtrace_frame(bt, max_frames, __builtin_frame_address(0),
 58  	           was_truncated_out);
 59  }
 60  
 61  /*
 62   * This function captures a backtrace from the current stack and returns the
 63   * number of frames captured, limited by max_frames and starting at start_frame.
 64   * It's fast because it does no checking to make sure there isn't bad data.
 65   * Since it's only called from threads that we're going to keep executing,
 66   * if there's bad data we were going to die eventually.  If this function is
 67   * inlined, it doesn't record the frame of the function it's inside (because
 68   * there's no stack frame).
 69   */
 70  unsigned int __attribute__((noinline, not_tail_called))
 71  backtrace_frame(uintptr_t *bt, unsigned int max_frames, void *start_frame,
 72      bool *was_truncated_out)
 73  {
 74  	thread_t thread = current_thread();
 75  	uintptr_t *fp;
 76  	unsigned int frame_index = 0;
 77  	uintptr_t top, bottom;
 78  	bool in_valid_stack;
 79  
 80  	assert(bt != NULL);
 81  	assert(max_frames > 0);
 82  
 83  	fp = start_frame;
 84  	bottom = thread->kernel_stack;
 85  	top = bottom + kernel_stack_size;
 86  
 87  #define IN_STK_BOUNDS(__addr) \
 88  	(((uintptr_t)(__addr) >= (uintptr_t)bottom) && \
 89  	((uintptr_t)(__addr) < (uintptr_t)top))
 90  
 91  	in_valid_stack = IN_STK_BOUNDS(fp);
 92  #if XNU_MONITOR
 93  	in_valid_stack |= IN_PPLSTK_BOUNDS(fp);
 94  #endif /* XNU_MONITOR */
 95  
 96  	if (!in_valid_stack) {
 97  		fp = NULL;
 98  	}
 99  
100  	while (fp != NULL && frame_index < max_frames) {
101  		uintptr_t *next_fp = (uintptr_t *)*fp;
102  		uintptr_t ret_addr = *(fp + 1); /* return address is one word higher than frame pointer */
103  
104  		/*
105  		 * If the frame pointer is 0, backtracing has reached the top of
106  		 * the stack and there is no return address.  Some stacks might not
107  		 * have set this up, so bounds check, as well.
108  		 */
109  		in_valid_stack = IN_STK_BOUNDS(next_fp);
110  #if XNU_MONITOR
111  		in_valid_stack |= IN_PPLSTK_BOUNDS(next_fp);
112  #endif /* XNU_MONITOR */
113  
114  		if (next_fp == NULL || !in_valid_stack) {
115  			break;
116  		}
117  
118  #if defined(HAS_APPLE_PAC)
119  		/* return addresses signed by arm64e ABI */
120  		bt[frame_index++] = (uintptr_t) ptrauth_strip((void *)ret_addr, ptrauth_key_return_address);
121  #else /* defined(HAS_APPLE_PAC) */
122  		bt[frame_index++] = ret_addr;
123  #endif /* !defined(HAS_APPLE_PAC) */
124  
125  		/* stacks grow down; backtracing should be moving to higher addresses */
126  		if (next_fp <= fp) {
127  #if XNU_MONITOR
128  			bool fp_in_pplstack = IN_PPLSTK_BOUNDS(fp);
129  			bool fp_in_kstack = IN_STK_BOUNDS(fp);
130  			bool next_fp_in_pplstack = IN_PPLSTK_BOUNDS(fp);
131  			bool next_fp_in_kstack = IN_STK_BOUNDS(fp);
132  
133  			/*
134  			 * This check is verbose; it is basically checking whether
135  			 * we are switching between the kernel stack and the cpu
136  			 * stack.  If so, we ignore the fact that fp has switched
137  			 * directions (as it is a symptom of switching stacks).
138  			 */
139  			if (((fp_in_pplstack) && (next_fp_in_kstack)) ||
140  			    ((fp_in_kstack) && (next_fp_in_pplstack))) {
141  				break;
142  			}
143  #else /* XNU_MONITOR */
144  			break;
145  #endif /* !XNU_MONITOR */
146  		}
147  		fp = next_fp;
148  	}
149  
150  	/* NULL-terminate the list, if space is available */
151  	if (frame_index != max_frames) {
152  		bt[frame_index] = 0;
153  	}
154  
155  	if (fp != NULL && frame_index == max_frames && was_truncated_out) {
156  		*was_truncated_out = true;
157  	}
158  
159  	return frame_index;
160  #undef IN_STK_BOUNDS
161  }
162  
163  #if defined(__x86_64__)
164  
165  static kern_return_t
166  interrupted_kernel_pc_fp(uintptr_t *pc, uintptr_t *fp)
167  {
168  	x86_saved_state_t *state;
169  	bool state_64;
170  	uint64_t cs;
171  
172  	state = current_cpu_datap()->cpu_int_state;
173  	if (!state) {
174  		return KERN_FAILURE;
175  	}
176  
177  	state_64 = is_saved_state64(state);
178  
179  	if (state_64) {
180  		cs = saved_state64(state)->isf.cs;
181  	} else {
182  		cs = saved_state32(state)->cs;
183  	}
184  	/* return early if interrupted a thread in user space */
185  	if ((cs & SEL_PL) == SEL_PL_U) {
186  		return KERN_FAILURE;
187  	}
188  
189  	if (state_64) {
190  		*pc = saved_state64(state)->isf.rip;
191  		*fp = saved_state64(state)->rbp;
192  	} else {
193  		*pc = saved_state32(state)->eip;
194  		*fp = saved_state32(state)->ebp;
195  	}
196  	return KERN_SUCCESS;
197  }
198  
199  #elif defined(__arm64__)
200  
201  static kern_return_t
202  interrupted_kernel_pc_fp(uintptr_t *pc, uintptr_t *fp)
203  {
204  	struct arm_saved_state *state;
205  	bool state_64;
206  
207  	state = getCpuDatap()->cpu_int_state;
208  	if (!state) {
209  		return KERN_FAILURE;
210  	}
211  	state_64 = is_saved_state64(state);
212  
213  	/* return early if interrupted a thread in user space */
214  	if (PSR64_IS_USER(get_saved_state_cpsr(state))) {
215  		return KERN_FAILURE;
216  	}
217  
218  	*pc = get_saved_state_pc(state);
219  	*fp = get_saved_state_fp(state);
220  	return KERN_SUCCESS;
221  }
222  
223  #elif defined(__arm__)
224  
225  static kern_return_t
226  interrupted_kernel_pc_fp(uintptr_t *pc, uintptr_t *fp)
227  {
228  	struct arm_saved_state *state;
229  
230  	state = getCpuDatap()->cpu_int_state;
231  	if (!state) {
232  		return KERN_FAILURE;
233  	}
234  
235  	/* return early if interrupted a thread in user space */
236  	if (PSR_IS_USER(get_saved_state_cpsr(state))) {
237  		return KERN_FAILURE;
238  	}
239  
240  	*pc = get_saved_state_pc(state);
241  	*fp = get_saved_state_fp(state);
242  	return KERN_SUCCESS;
243  }
244  
245  #else /* defined(__arm__) */
246  #error "interrupted_kernel_pc_fp: unsupported architecture"
247  #endif /* !defined(__arm__) */
248  
249  unsigned int
250  backtrace_interrupted(uintptr_t *bt, unsigned int max_frames,
251      bool *was_truncated_out)
252  {
253  	uintptr_t pc;
254  	uintptr_t fp;
255  	kern_return_t kr;
256  
257  	assert(bt != NULL);
258  	assert(max_frames > 0);
259  	assert(ml_at_interrupt_context() == TRUE);
260  
261  	kr = interrupted_kernel_pc_fp(&pc, &fp);
262  	if (kr != KERN_SUCCESS) {
263  		return 0;
264  	}
265  
266  	bt[0] = pc;
267  	if (max_frames == 1) {
268  		return 1;
269  	}
270  
271  	return backtrace_frame(bt + 1, max_frames - 1, (void *)fp,
272  	           was_truncated_out) + 1;
273  }
274  
275  unsigned int
276  backtrace_user(uintptr_t *bt, unsigned int max_frames,
277      int *error_out, bool *user_64_out, bool *was_truncated_out)
278  {
279  	return backtrace_thread_user(current_thread(), bt, max_frames,
280  	           error_out, user_64_out, was_truncated_out, true);
281  }
282  
283  unsigned int
284  backtrace_thread_user(void *thread, uintptr_t *bt, unsigned int max_frames,
285      int *error_out, bool *user_64_out, bool *was_truncated_out, __unused bool faults_permitted)
286  {
287  	bool user_64;
288  	uintptr_t pc = 0, fp = 0, next_fp = 0;
289  	vm_map_t map = NULL, old_map = NULL;
290  	unsigned int frame_index = 0;
291  	int err = 0;
292  	size_t frame_size = 0;
293  
294  	assert(bt != NULL);
295  	assert(max_frames > 0);
296  	assert((max_frames == 1) || (faults_permitted == true));
297  
298  #if defined(__x86_64__)
299  
300  	/* don't allow a malformed user stack to copyin arbitrary kernel data */
301  #define INVALID_USER_FP(FP) ((FP) == 0 || !IS_USERADDR64_CANONICAL((FP)))
302  
303  	x86_saved_state_t *state = get_user_regs(thread);
304  	if (!state) {
305  		return EINVAL;
306  	}
307  
308  	user_64 = is_saved_state64(state);
309  	if (user_64) {
310  		pc = saved_state64(state)->isf.rip;
311  		fp = saved_state64(state)->rbp;
312  	} else {
313  		pc = saved_state32(state)->eip;
314  		fp = saved_state32(state)->ebp;
315  	}
316  
317  #elif defined(__arm64__)
318  
319  	struct arm_saved_state *state = get_user_regs(thread);
320  	if (!state) {
321  		return EINVAL;
322  	}
323  
324  	user_64 = is_saved_state64(state);
325  	pc = get_saved_state_pc(state);
326  	fp = get_saved_state_fp(state);
327  
328  
329  	/* ARM expects stack frames to be aligned to 16 bytes */
330  #define INVALID_USER_FP(FP) ((FP) == 0 || ((FP) & 0x3UL) != 0UL)
331  
332  
333  
334  #elif defined(__arm__)
335  
336  	/* ARM expects stack frames to be aligned to 16 bytes */
337  #define INVALID_USER_FP(FP) ((FP) == 0 || ((FP) & 0x3UL) != 0UL)
338  
339  	struct arm_saved_state *state = get_user_regs(thread);
340  	if (!state) {
341  		return EINVAL;
342  	}
343  
344  	user_64 = false;
345  	pc = get_saved_state_pc(state);
346  	fp = get_saved_state_fp(state);
347  
348  #else /* defined(__arm__) */
349  #error "backtrace_thread_user: unsupported architecture"
350  #endif /* !defined(__arm__) */
351  
352  	bt[frame_index++] = pc;
353  
354  	if (frame_index >= max_frames) {
355  		goto out;
356  	}
357  
358  	if (INVALID_USER_FP(fp)) {
359  		goto out;
360  	}
361  
362  	assert(ml_get_interrupts_enabled() == TRUE);
363  	if (!ml_get_interrupts_enabled()) {
364  		goto out;
365  	}
366  
367  	union {
368  		struct {
369  			uint64_t fp;
370  			uint64_t ret;
371  		} u64;
372  		struct {
373  			uint32_t fp;
374  			uint32_t ret;
375  		} u32;
376  	} frame;
377  
378  	frame_size = 2 * (user_64 ? 8 : 4);
379  
380  	/* switch to the correct map, for copyin */
381  	if (thread != current_thread()) {
382  		map = get_task_map_reference(get_threadtask(thread));
383  		if (map == NULL) {
384  			goto out;
385  		}
386  		old_map = vm_map_switch(map);
387  	} else {
388  		map = NULL;
389  	}
390  
391  	while (fp != 0 && frame_index < max_frames) {
392  		err = copyin(fp, (char *)&frame, frame_size);
393  		if (err) {
394  			if (was_truncated_out) {
395  				*was_truncated_out = true;
396  			}
397  			goto out;
398  		}
399  
400  		next_fp = user_64 ? frame.u64.fp : frame.u32.fp;
401  
402  		if (INVALID_USER_FP(next_fp)) {
403  			break;
404  		}
405  
406  		uintptr_t ret_addr = user_64 ? frame.u64.ret : frame.u32.ret;
407  #if defined(HAS_APPLE_PAC)
408  		/* return addresses signed by arm64e ABI */
409  		bt[frame_index++] = (uintptr_t)ptrauth_strip((void *)ret_addr,
410  		    ptrauth_key_return_address);
411  #else /* defined(HAS_APPLE_PAC) */
412  		bt[frame_index++] = ret_addr;
413  #endif /* !defined(HAS_APPLE_PAC) */
414  
415  		/* stacks grow down; backtracing should be moving to higher addresses */
416  		if (next_fp <= fp) {
417  			break;
418  		}
419  		fp = next_fp;
420  	}
421  
422  out:
423  	if (map) {
424  		(void)vm_map_switch(old_map);
425  		vm_map_deallocate(map);
426  	}
427  
428  	/* NULL-terminate the list, if space is available */
429  	if (frame_index != max_frames) {
430  		bt[frame_index] = 0;
431  	}
432  
433  	if (fp != 0 && frame_index == max_frames && was_truncated_out) {
434  		*was_truncated_out = true;
435  	}
436  
437  	if (user_64_out) {
438  		*user_64_out = user_64;
439  	}
440  	if (error_out) {
441  		*error_out = err;
442  	}
443  
444  	return frame_index;
445  #undef INVALID_USER_FP
446  }