/ duct-tape / xnu / osfmk / kern / kern_stackshot.c
kern_stackshot.c
   1  /*
   2   * Copyright (c) 2013-2020 Apple Inc. All rights reserved.
   3   *
   4   * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
   5   *
   6   * This file contains Original Code and/or Modifications of Original Code
   7   * as defined in and that are subject to the Apple Public Source License
   8   * Version 2.0 (the 'License'). You may not use this file except in
   9   * compliance with the License. The rights granted to you under the License
  10   * may not be used to create, or enable the creation or redistribution of,
  11   * unlawful or unlicensed copies of an Apple operating system, or to
  12   * circumvent, violate, or enable the circumvention or violation of, any
  13   * terms of an Apple operating system software license agreement.
  14   *
  15   * Please obtain a copy of the License at
  16   * http://www.opensource.apple.com/apsl/ and read it before using this file.
  17   *
  18   * The Original Code and all software distributed under the License are
  19   * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
  20   * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
  21   * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
  22   * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
  23   * Please see the License for the specific language governing rights and
  24   * limitations under the License.
  25   *
  26   * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
  27   */
  28  
  29  #include <mach/mach_types.h>
  30  #include <mach/vm_param.h>
  31  #include <mach/mach_vm.h>
  32  #include <mach/clock_types.h>
  33  #include <sys/errno.h>
  34  #include <sys/stackshot.h>
  35  #ifdef IMPORTANCE_INHERITANCE
  36  #include <ipc/ipc_importance.h>
  37  #endif
  38  #include <sys/appleapiopts.h>
  39  #include <kern/debug.h>
  40  #include <kern/block_hint.h>
  41  #include <uuid/uuid.h>
  42  
  43  #include <kdp/kdp_dyld.h>
  44  #include <kdp/kdp_en_debugger.h>
  45  
  46  #include <libsa/types.h>
  47  #include <libkern/version.h>
  48  #include <libkern/section_keywords.h>
  49  
  50  #include <string.h> /* bcopy */
  51  
  52  #include <kern/cambria_layout.h>
  53  #include <kern/coalition.h>
  54  #include <kern/processor.h>
  55  #include <kern/host_statistics.h>
  56  #include <kern/counter.h>
  57  #include <kern/thread.h>
  58  #include <kern/thread_group.h>
  59  #include <kern/task.h>
  60  #include <kern/telemetry.h>
  61  #include <kern/clock.h>
  62  #include <kern/policy_internal.h>
  63  #include <vm/vm_map.h>
  64  #include <vm/vm_kern.h>
  65  #include <vm/vm_pageout.h>
  66  #include <vm/vm_fault.h>
  67  #include <vm/vm_shared_region.h>
  68  #include <vm/vm_compressor.h>
  69  #include <libkern/OSKextLibPrivate.h>
  70  #include <os/log.h>
  71  
  72  #if defined(__x86_64__)
  73  #include <i386/mp.h>
  74  #include <i386/cpu_threads.h>
  75  #endif
  76  
  77  #include <pexpert/pexpert.h>
  78  
  79  #if MONOTONIC
  80  #include <kern/monotonic.h>
  81  #endif /* MONOTONIC */
  82  
  83  #include <san/kasan.h>
  84  
  85  #if DEBUG || DEVELOPMENT
  86  # define STACKSHOT_COLLECTS_LATENCY_INFO 1
  87  #else
  88  # define STACKSHOT_COLLECTS_LATENCY_INFO 0
  89  #endif /* DEBUG || DEVELOPMENT */
  90  
  91  extern unsigned int not_in_kdp;
  92  
  93  
  94  /* indicate to the compiler that some accesses are unaligned */
  95  typedef uint64_t unaligned_u64 __attribute__((aligned(1)));
  96  
  97  extern addr64_t kdp_vtophys(pmap_t pmap, addr64_t va);
  98  
  99  int kdp_snapshot                            = 0;
 100  static kern_return_t stack_snapshot_ret     = 0;
 101  static uint32_t stack_snapshot_bytes_traced = 0;
 102  static uint32_t stack_snapshot_bytes_uncompressed  = 0;
 103  
 104  #if STACKSHOT_COLLECTS_LATENCY_INFO
 105  static bool collect_latency_info = true;
 106  #endif
 107  static kcdata_descriptor_t stackshot_kcdata_p = NULL;
 108  static void *stack_snapshot_buf;
 109  static uint32_t stack_snapshot_bufsize;
 110  int stack_snapshot_pid;
 111  static uint64_t stack_snapshot_flags;
 112  static uint64_t stack_snapshot_delta_since_timestamp;
 113  static uint32_t stack_snapshot_pagetable_mask;
 114  static boolean_t panic_stackshot;
 115  
 116  static boolean_t stack_enable_faulting = FALSE;
 117  static struct stackshot_fault_stats fault_stats;
 118  
 119  static uint32_t stackshot_initial_estimate;
 120  static uint64_t stackshot_duration_prior_abs;   /* prior attempts, abs */
 121  static unaligned_u64 * stackshot_duration_outer;
 122  static uint64_t stackshot_microsecs;
 123  
 124  void * kernel_stackshot_buf   = NULL; /* Pointer to buffer for stackshots triggered from the kernel and retrieved later */
 125  int kernel_stackshot_buf_size = 0;
 126  
 127  void * stackshot_snapbuf = NULL; /* Used by stack_snapshot2 (to be removed) */
 128  
 129  #if INTERRUPT_MASKED_DEBUG
 130  extern boolean_t interrupt_masked_debug;
 131  #endif
 132  
 133  __private_extern__ void stackshot_init( void );
 134  static boolean_t memory_iszero(void *addr, size_t size);
 135  uint32_t                get_stackshot_estsize(uint32_t prev_size_hint);
 136  kern_return_t           kern_stack_snapshot_internal(int stackshot_config_version, void *stackshot_config,
 137      size_t stackshot_config_size, boolean_t stackshot_from_user);
 138  kern_return_t           do_stackshot(void *);
 139  void                    kdp_snapshot_preflight(int pid, void * tracebuf, uint32_t tracebuf_size, uint64_t flags, kcdata_descriptor_t data_p, uint64_t since_timestamp, uint32_t pagetable_mask);
 140  boolean_t               stackshot_thread_is_idle_worker_unsafe(thread_t thread);
 141  static int              kdp_stackshot_kcdata_format(int pid, uint64_t trace_flags, uint32_t *pBytesTraced, uint32_t *pBytesUncompressed);
 142  uint32_t                kdp_stack_snapshot_bytes_traced(void);
 143  uint32_t                kdp_stack_snapshot_bytes_uncompressed(void);
 144  static void             kdp_mem_and_io_snapshot(struct mem_and_io_snapshot *memio_snap);
 145  static boolean_t        kdp_copyin(vm_map_t map, uint64_t uaddr, void *dest, size_t size, boolean_t try_fault, uint32_t *kdp_fault_result);
 146  static int              kdp_copyin_string(task_t task, uint64_t addr, char *buf, int buf_sz, boolean_t try_fault, uint32_t *kdp_fault_results);
 147  static boolean_t        kdp_copyin_word(task_t task, uint64_t addr, uint64_t *result, boolean_t try_fault, uint32_t *kdp_fault_results);
 148  static uint64_t         proc_was_throttled_from_task(task_t task);
 149  static void             stackshot_thread_wait_owner_info(thread_t thread, thread_waitinfo_t * waitinfo);
 150  static int              stackshot_thread_has_valid_waitinfo(thread_t thread);
 151  static void             stackshot_thread_turnstileinfo(thread_t thread, thread_turnstileinfo_t *tsinfo);
 152  static int              stackshot_thread_has_valid_turnstileinfo(thread_t thread);
 153  
 154  #if CONFIG_COALITIONS
 155  static void             stackshot_coalition_jetsam_count(void *arg, int i, coalition_t coal);
 156  static void             stackshot_coalition_jetsam_snapshot(void *arg, int i, coalition_t coal);
 157  #endif /* CONFIG_COALITIONS */
 158  
 159  #if CONFIG_THREAD_GROUPS
 160  static void             stackshot_thread_group_count(void *arg, int i, struct thread_group *tg);
 161  static void             stackshot_thread_group_snapshot(void *arg, int i, struct thread_group *tg);
 162  #endif /* CONFIG_THREAD_GROUPS */
 163  
 164  extern uint32_t         workqueue_get_pwq_state_kdp(void *proc);
 165  
 166  struct proc;
 167  extern int              proc_pid(struct proc *p);
 168  extern uint64_t         proc_uniqueid(void *p);
 169  extern uint64_t         proc_was_throttled(void *p);
 170  extern uint64_t         proc_did_throttle(void *p);
 171  extern int              proc_exiting(void *p);
 172  extern int              proc_in_teardown(void *p);
 173  static uint64_t         proc_did_throttle_from_task(task_t task);
 174  extern void             proc_name_kdp(task_t task, char * buf, int size);
 175  extern int              proc_threadname_kdp(void * uth, char * buf, size_t size);
 176  extern void             proc_starttime_kdp(void * p, uint64_t * tv_sec, uint64_t * tv_usec, uint64_t * abstime);
 177  extern void             proc_archinfo_kdp(void* p, cpu_type_t* cputype, cpu_subtype_t* cpusubtype);
 178  extern boolean_t        proc_binary_uuid_kdp(task_t task, uuid_t uuid);
 179  extern int              memorystatus_get_pressure_status_kdp(void);
 180  extern void             memorystatus_proc_flags_unsafe(void * v, boolean_t *is_dirty, boolean_t *is_dirty_tracked, boolean_t *allow_idle_exit);
 181  
 182  extern int count_busy_buffers(void); /* must track with declaration in bsd/sys/buf_internal.h */
 183  extern void bcopy_phys(addr64_t, addr64_t, vm_size_t);
 184  
 185  #if CONFIG_TELEMETRY
 186  extern kern_return_t stack_microstackshot(user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t flags, int32_t *retval);
 187  #endif /* CONFIG_TELEMETRY */
 188  
 189  extern kern_return_t kern_stack_snapshot_with_reason(char* reason);
 190  extern kern_return_t kern_stack_snapshot_internal(int stackshot_config_version, void *stackshot_config, size_t stackshot_config_size, boolean_t stackshot_from_user);
 191  
 192  /*
 193   * Validates that the given address is both a valid page and has
 194   * default caching attributes for the current map.  Returns
 195   * 0 if the address is invalid, and a kernel virtual address for
 196   * the given address if it is valid.
 197   */
 198  vm_offset_t machine_trace_thread_get_kva(vm_offset_t cur_target_addr, vm_map_t map, uint32_t *thread_trace_flags);
 199  
 200  #define KDP_FAULT_RESULT_PAGED_OUT   0x1 /* some data was unable to be retrieved */
 201  #define KDP_FAULT_RESULT_TRIED_FAULT 0x2 /* tried to fault in data */
 202  #define KDP_FAULT_RESULT_FAULTED_IN  0x4 /* successfully faulted in data */
 203  
 204  /*
 205   * Looks up the physical translation for the given address in the target map, attempting
 206   * to fault data in if requested and it is not resident. Populates thread_trace_flags if requested
 207   * as well.
 208   */
 209  vm_offset_t kdp_find_phys(vm_map_t map, vm_offset_t target_addr, boolean_t try_fault, uint32_t *kdp_fault_results);
 210  
 211  static size_t stackshot_strlcpy(char *dst, const char *src, size_t maxlen);
 212  void stackshot_memcpy(void *dst, const void *src, size_t len);
 213  
 214  /* Clears caching information used by the above validation routine
 215   * (in case the current map has been changed or cleared).
 216   */
 217  void machine_trace_thread_clear_validation_cache(void);
 218  
 219  #define MAX_FRAMES 1000
 220  #define MAX_LOADINFOS 500
 221  #define TASK_IMP_WALK_LIMIT 20
 222  
 223  typedef struct thread_snapshot *thread_snapshot_t;
 224  typedef struct task_snapshot *task_snapshot_t;
 225  
 226  #if CONFIG_KDP_INTERACTIVE_DEBUGGING
 227  extern kdp_send_t    kdp_en_send_pkt;
 228  #endif
 229  
 230  /*
 231   * Globals to support machine_trace_thread_get_kva.
 232   */
 233  static vm_offset_t prev_target_page = 0;
 234  static vm_offset_t prev_target_kva = 0;
 235  static boolean_t validate_next_addr = TRUE;
 236  
 237  /*
 238   * Stackshot locking and other defines.
 239   */
 240  static LCK_GRP_DECLARE(stackshot_subsys_lck_grp, "stackshot_subsys_lock");
 241  static LCK_MTX_DECLARE(stackshot_subsys_mutex, &stackshot_subsys_lck_grp);
 242  
 243  #define STACKSHOT_SUBSYS_LOCK() lck_mtx_lock(&stackshot_subsys_mutex)
 244  #define STACKSHOT_SUBSYS_TRY_LOCK() lck_mtx_try_lock(&stackshot_subsys_mutex)
 245  #define STACKSHOT_SUBSYS_UNLOCK() lck_mtx_unlock(&stackshot_subsys_mutex)
 246  
 247  #define SANE_BOOTPROFILE_TRACEBUF_SIZE (64ULL * 1024ULL * 1024ULL)
 248  #define SANE_TRACEBUF_SIZE (8ULL * 1024ULL * 1024ULL)
 249  
 250  #define TRACEBUF_SIZE_PER_GB (1024ULL * 1024ULL)
 251  #define GIGABYTES (1024ULL * 1024ULL * 1024ULL)
 252  
 253  SECURITY_READ_ONLY_LATE(static uint32_t) max_tracebuf_size = SANE_TRACEBUF_SIZE;
 254  
 255  /*
 256   * We currently set a ceiling of 3 milliseconds spent in the kdp fault path
 257   * for non-panic stackshots where faulting is requested.
 258   */
 259  #define KDP_FAULT_PATH_MAX_TIME_PER_STACKSHOT_NSECS (3 * NSEC_PER_MSEC)
 260  
 261  #define STACKSHOT_SUPP_SIZE (16 * 1024) /* Minimum stackshot size */
 262  #define TASK_UUID_AVG_SIZE (16 * sizeof(uuid_t)) /* Average space consumed by UUIDs/task */
 263  
 264  #ifndef ROUNDUP
 265  #define ROUNDUP(x, y)            ((((x)+(y)-1)/(y))*(y))
 266  #endif
 267  
 268  #define STACKSHOT_QUEUE_LABEL_MAXSIZE  64
 269  
 270  /*
 271   * Initialize the mutex governing access to the stack snapshot subsystem
 272   * and other stackshot related bits.
 273   */
 274  __private_extern__ void
 275  stackshot_init( void )
 276  {
 277  	mach_timebase_info_data_t timebase;
 278  
 279  	clock_timebase_info(&timebase);
 280  	fault_stats.sfs_system_max_fault_time = ((KDP_FAULT_PATH_MAX_TIME_PER_STACKSHOT_NSECS * timebase.denom) / timebase.numer);
 281  
 282  	max_tracebuf_size = MAX(max_tracebuf_size, ((ROUNDUP(max_mem, GIGABYTES) / GIGABYTES) * TRACEBUF_SIZE_PER_GB));
 283  
 284  	PE_parse_boot_argn("stackshot_maxsz", &max_tracebuf_size, sizeof(max_tracebuf_size));
 285  }
 286  
 287  /*
 288   * Method for grabbing timer values safely, in the sense that no infinite loop will occur
 289   * Certain flavors of the timer_grab function, which would seem to be the thing to use,
 290   * can loop infinitely if called while the timer is in the process of being updated.
 291   * Unfortunately, it is (rarely) possible to get inconsistent top and bottom halves of
 292   * the timer using this method. This seems insoluble, since stackshot runs in a context
 293   * where the timer might be half-updated, and has no way of yielding control just long
 294   * enough to finish the update.
 295   */
 296  
 297  static uint64_t
 298  safe_grab_timer_value(struct timer *t)
 299  {
 300  #if   defined(__LP64__)
 301  	return t->all_bits;
 302  #else
 303  	uint64_t time = t->high_bits; /* endian independent grab */
 304  	time = (time << 32) | t->low_bits;
 305  	return time;
 306  #endif
 307  }
 308  
 309  /*
 310   * Called with interrupts disabled after stackshot context has been
 311   * initialized. Updates stack_snapshot_ret.
 312   */
 313  static kern_return_t
 314  stackshot_trap()
 315  {
 316  	kern_return_t   rv;
 317  
 318  #if defined(__x86_64__)
 319  	/*
 320  	 * Since mp_rendezvous and stackshot both attempt to capture cpus then perform an
 321  	 * operation, it's essential to apply mutual exclusion to the other when one
 322  	 * mechanism is in operation, lest there be a deadlock as the mechanisms race to
 323  	 * capture CPUs.
 324  	 *
 325  	 * Further, we assert that invoking stackshot from mp_rendezvous*() is not
 326  	 * allowed, so we check to ensure there there is no rendezvous in progress before
 327  	 * trying to grab the lock (if there is, a deadlock will occur when we try to
 328  	 * grab the lock).  This is accomplished by setting cpu_rendezvous_in_progress to
 329  	 * TRUE in the mp rendezvous action function.  If stackshot_trap() is called by
 330  	 * a subordinate of the call chain within the mp rendezvous action, this flag will
 331  	 * be set and can be used to detect the inevitable deadlock that would occur
 332  	 * if this thread tried to grab the rendezvous lock.
 333  	 */
 334  
 335  	if (current_cpu_datap()->cpu_rendezvous_in_progress == TRUE) {
 336  		panic("Calling stackshot from a rendezvous is not allowed!");
 337  	}
 338  
 339  	mp_rendezvous_lock();
 340  #endif
 341  
 342  	rv = DebuggerTrapWithState(DBOP_STACKSHOT, NULL, NULL, NULL, 0, NULL, FALSE, 0);
 343  
 344  #if defined(__x86_64__)
 345  	mp_rendezvous_unlock();
 346  #endif
 347  	return rv;
 348  }
 349  
 350  
 351  kern_return_t
 352  stack_snapshot_from_kernel(int pid, void *buf, uint32_t size, uint64_t flags, uint64_t delta_since_timestamp, uint32_t pagetable_mask, unsigned *bytes_traced)
 353  {
 354  	kern_return_t error = KERN_SUCCESS;
 355  	boolean_t istate;
 356  
 357  #if DEVELOPMENT || DEBUG
 358  	if (kern_feature_override(KF_STACKSHOT_OVRD) == TRUE) {
 359  		error = KERN_NOT_SUPPORTED;
 360  		goto out;
 361  	}
 362  #endif
 363  	if ((buf == NULL) || (size <= 0) || (bytes_traced == NULL)) {
 364  		return KERN_INVALID_ARGUMENT;
 365  	}
 366  
 367  	/* cap in individual stackshot to max_tracebuf_size */
 368  	if (size > max_tracebuf_size) {
 369  		size = max_tracebuf_size;
 370  	}
 371  
 372  	/* Serialize tracing */
 373  	if (flags & STACKSHOT_TRYLOCK) {
 374  		if (!STACKSHOT_SUBSYS_TRY_LOCK()) {
 375  			return KERN_LOCK_OWNED;
 376  		}
 377  	} else {
 378  		STACKSHOT_SUBSYS_LOCK();
 379  	}
 380  
 381  	struct kcdata_descriptor kcdata;
 382  	uint32_t hdr_tag = (flags & STACKSHOT_COLLECT_DELTA_SNAPSHOT) ?
 383  	    KCDATA_BUFFER_BEGIN_DELTA_STACKSHOT : KCDATA_BUFFER_BEGIN_STACKSHOT;
 384  
 385  	error = kcdata_memory_static_init(&kcdata, (mach_vm_address_t)buf, hdr_tag, size,
 386  	    KCFLAG_USE_MEMCOPY | KCFLAG_NO_AUTO_ENDBUFFER);
 387  	if (error) {
 388  		goto out;
 389  	}
 390  
 391  	stackshot_initial_estimate = 0;
 392  	stackshot_duration_prior_abs = 0;
 393  	stackshot_duration_outer = NULL;
 394  	uint64_t time_start      = mach_absolute_time();
 395  
 396  	istate = ml_set_interrupts_enabled(FALSE);
 397  
 398  	/* Preload trace parameters*/
 399  	kdp_snapshot_preflight(pid, buf, size, flags, &kcdata,
 400  	    delta_since_timestamp, pagetable_mask);
 401  
 402  	/*
 403  	 * Trap to the debugger to obtain a coherent stack snapshot; this populates
 404  	 * the trace buffer
 405  	 */
 406  	error = stackshot_trap();
 407  
 408  	ml_set_interrupts_enabled(istate);
 409  
 410  	uint64_t time_end = mach_absolute_time();
 411  	if (stackshot_duration_outer) {
 412  		*stackshot_duration_outer = time_end - time_start;
 413  	}
 414  	*bytes_traced = kdp_stack_snapshot_bytes_traced();
 415  
 416  out:
 417  	stackshot_kcdata_p = NULL;
 418  	STACKSHOT_SUBSYS_UNLOCK();
 419  	return error;
 420  }
 421  
 422  #if CONFIG_TELEMETRY
 423  kern_return_t
 424  stack_microstackshot(user_addr_t tracebuf, uint32_t tracebuf_size, uint32_t flags, int32_t *retval)
 425  {
 426  	int error = KERN_SUCCESS;
 427  	uint32_t bytes_traced = 0;
 428  
 429  	*retval = -1;
 430  
 431  	/*
 432  	 * Control related operations
 433  	 */
 434  	if (flags & STACKSHOT_GLOBAL_MICROSTACKSHOT_ENABLE) {
 435  		telemetry_global_ctl(1);
 436  		*retval = 0;
 437  		goto exit;
 438  	} else if (flags & STACKSHOT_GLOBAL_MICROSTACKSHOT_DISABLE) {
 439  		telemetry_global_ctl(0);
 440  		*retval = 0;
 441  		goto exit;
 442  	}
 443  
 444  	/*
 445  	 * Data related operations
 446  	 */
 447  	*retval = -1;
 448  
 449  	if ((((void*)tracebuf) == NULL) || (tracebuf_size == 0)) {
 450  		error = KERN_INVALID_ARGUMENT;
 451  		goto exit;
 452  	}
 453  
 454  	STACKSHOT_SUBSYS_LOCK();
 455  
 456  	if (flags & STACKSHOT_GET_MICROSTACKSHOT) {
 457  		if (tracebuf_size > max_tracebuf_size) {
 458  			error = KERN_INVALID_ARGUMENT;
 459  			goto unlock_exit;
 460  		}
 461  
 462  		bytes_traced = tracebuf_size;
 463  		error = telemetry_gather(tracebuf, &bytes_traced,
 464  		    (flags & STACKSHOT_SET_MICROSTACKSHOT_MARK) ? TRUE : FALSE);
 465  		*retval = (int)bytes_traced;
 466  		goto unlock_exit;
 467  	}
 468  
 469  unlock_exit:
 470  	STACKSHOT_SUBSYS_UNLOCK();
 471  exit:
 472  	return error;
 473  }
 474  #endif /* CONFIG_TELEMETRY */
 475  
 476  /*
 477   * Return the estimated size of a stackshot based on the
 478   * number of currently running threads and tasks.
 479   */
 480  uint32_t
 481  get_stackshot_estsize(uint32_t prev_size_hint)
 482  {
 483  	vm_size_t thread_total;
 484  	vm_size_t task_total;
 485  	uint32_t estimated_size;
 486  	size_t est_thread_size = sizeof(struct thread_snapshot);
 487  	size_t est_task_size = sizeof(struct task_snapshot) + TASK_UUID_AVG_SIZE;
 488  
 489  #if STACKSHOT_COLLECTS_LATENCY_INFO
 490  	if (collect_latency_info) {
 491  		est_thread_size += sizeof(struct stackshot_latency_thread);
 492  		est_task_size += sizeof(struct stackshot_latency_task);
 493  	}
 494  #endif
 495  
 496  	thread_total = (threads_count * est_thread_size);
 497  	task_total = (tasks_count  * est_task_size);
 498  
 499  	estimated_size = (uint32_t) VM_MAP_ROUND_PAGE((thread_total + task_total + STACKSHOT_SUPP_SIZE), PAGE_MASK);
 500  	if (estimated_size < prev_size_hint) {
 501  		estimated_size = (uint32_t) VM_MAP_ROUND_PAGE(prev_size_hint, PAGE_MASK);
 502  	}
 503  
 504  	return estimated_size;
 505  }
 506  
 507  /*
 508   * stackshot_remap_buffer:	Utility function to remap bytes_traced bytes starting at stackshotbuf
 509   *				into the current task's user space and subsequently copy out the address
 510   *				at which the buffer has been mapped in user space to out_buffer_addr.
 511   *
 512   * Inputs:			stackshotbuf - pointer to the original buffer in the kernel's address space
 513   *				bytes_traced - length of the buffer to remap starting from stackshotbuf
 514   *				out_buffer_addr - pointer to placeholder where newly mapped buffer will be mapped.
 515   *				out_size_addr - pointer to be filled in with the size of the buffer
 516   *
 517   * Outputs:			ENOSPC if there is not enough free space in the task's address space to remap the buffer
 518   *				EINVAL for all other errors returned by task_remap_buffer/mach_vm_remap
 519   *				an error from copyout
 520   */
 521  static kern_return_t
 522  stackshot_remap_buffer(void *stackshotbuf, uint32_t bytes_traced, uint64_t out_buffer_addr, uint64_t out_size_addr)
 523  {
 524  	int                     error = 0;
 525  	mach_vm_offset_t        stackshotbuf_user_addr = (mach_vm_offset_t)NULL;
 526  	vm_prot_t               cur_prot, max_prot;
 527  
 528  	error = mach_vm_remap_kernel(get_task_map(current_task()), &stackshotbuf_user_addr, bytes_traced, 0,
 529  	    VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_NONE, kernel_map, (mach_vm_offset_t)stackshotbuf, FALSE, &cur_prot, &max_prot, VM_INHERIT_DEFAULT);
 530  	/*
 531  	 * If the call to mach_vm_remap fails, we return the appropriate converted error
 532  	 */
 533  	if (error == KERN_SUCCESS) {
 534  		/*
 535  		 * If we fail to copy out the address or size of the new buffer, we remove the buffer mapping that
 536  		 * we just made in the task's user space.
 537  		 */
 538  		error = copyout(CAST_DOWN(void *, &stackshotbuf_user_addr), (user_addr_t)out_buffer_addr, sizeof(stackshotbuf_user_addr));
 539  		if (error != KERN_SUCCESS) {
 540  			mach_vm_deallocate(get_task_map(current_task()), stackshotbuf_user_addr, (mach_vm_size_t)bytes_traced);
 541  			return error;
 542  		}
 543  		error = copyout(&bytes_traced, (user_addr_t)out_size_addr, sizeof(bytes_traced));
 544  		if (error != KERN_SUCCESS) {
 545  			mach_vm_deallocate(get_task_map(current_task()), stackshotbuf_user_addr, (mach_vm_size_t)bytes_traced);
 546  			return error;
 547  		}
 548  	}
 549  	return error;
 550  }
 551  
 552  kern_return_t
 553  kern_stack_snapshot_internal(int stackshot_config_version, void *stackshot_config, size_t stackshot_config_size, boolean_t stackshot_from_user)
 554  {
 555  	int error = 0;
 556  	boolean_t prev_interrupt_state;
 557  	uint32_t bytes_traced = 0;
 558  	uint32_t stackshotbuf_size = 0;
 559  	void * stackshotbuf = NULL;
 560  	kcdata_descriptor_t kcdata_p = NULL;
 561  
 562  	void * buf_to_free = NULL;
 563  	int size_to_free = 0;
 564  
 565  	/* Parsed arguments */
 566  	uint64_t                out_buffer_addr;
 567  	uint64_t                out_size_addr;
 568  	int                     pid = -1;
 569  	uint64_t                flags;
 570  	uint64_t                since_timestamp;
 571  	uint32_t                size_hint = 0;
 572  	uint32_t                pagetable_mask = STACKSHOT_PAGETABLES_MASK_ALL;
 573  
 574  	if (stackshot_config == NULL) {
 575  		return KERN_INVALID_ARGUMENT;
 576  	}
 577  #if DEVELOPMENT || DEBUG
 578  	/* TBD: ask stackshot clients to avoid issuing stackshots in this
 579  	 * configuration in lieu of the kernel feature override.
 580  	 */
 581  	if (kern_feature_override(KF_STACKSHOT_OVRD) == TRUE) {
 582  		return KERN_NOT_SUPPORTED;
 583  	}
 584  #endif
 585  
 586  	switch (stackshot_config_version) {
 587  	case STACKSHOT_CONFIG_TYPE:
 588  		if (stackshot_config_size != sizeof(stackshot_config_t)) {
 589  			return KERN_INVALID_ARGUMENT;
 590  		}
 591  		stackshot_config_t *config = (stackshot_config_t *) stackshot_config;
 592  		out_buffer_addr = config->sc_out_buffer_addr;
 593  		out_size_addr = config->sc_out_size_addr;
 594  		pid = config->sc_pid;
 595  		flags = config->sc_flags;
 596  		since_timestamp = config->sc_delta_timestamp;
 597  		if (config->sc_size <= max_tracebuf_size) {
 598  			size_hint = config->sc_size;
 599  		}
 600  		/*
 601  		 * Retain the pre-sc_pagetable_mask behavior of STACKSHOT_PAGE_TABLES,
 602  		 * dump every level if the pagetable_mask is not set
 603  		 */
 604  		if (flags & STACKSHOT_PAGE_TABLES && config->sc_pagetable_mask) {
 605  			pagetable_mask = config->sc_pagetable_mask;
 606  		}
 607  		break;
 608  	default:
 609  		return KERN_NOT_SUPPORTED;
 610  	}
 611  
 612  	/*
 613  	 * Currently saving a kernel buffer and trylock are only supported from the
 614  	 * internal/KEXT API.
 615  	 */
 616  	if (stackshot_from_user) {
 617  		if (flags & (STACKSHOT_TRYLOCK | STACKSHOT_SAVE_IN_KERNEL_BUFFER | STACKSHOT_FROM_PANIC)) {
 618  			return KERN_NO_ACCESS;
 619  		}
 620  #if !DEVELOPMENT && !DEBUG
 621  		if (flags & (STACKSHOT_DO_COMPRESS)) {
 622  			return KERN_NO_ACCESS;
 623  		}
 624  #endif
 625  	} else {
 626  		if (!(flags & STACKSHOT_SAVE_IN_KERNEL_BUFFER)) {
 627  			return KERN_NOT_SUPPORTED;
 628  		}
 629  	}
 630  
 631  	if (!((flags & STACKSHOT_KCDATA_FORMAT) || (flags & STACKSHOT_RETRIEVE_EXISTING_BUFFER))) {
 632  		return KERN_NOT_SUPPORTED;
 633  	}
 634  
 635  	/* Compresssed delta stackshots or page dumps are not yet supported */
 636  	if (((flags & STACKSHOT_COLLECT_DELTA_SNAPSHOT) || (flags & STACKSHOT_PAGE_TABLES))
 637  	    && (flags & STACKSHOT_DO_COMPRESS)) {
 638  		return KERN_NOT_SUPPORTED;
 639  	}
 640  
 641  	/*
 642  	 * If we're not saving the buffer in the kernel pointer, we need a place to copy into.
 643  	 */
 644  	if ((!out_buffer_addr || !out_size_addr) && !(flags & STACKSHOT_SAVE_IN_KERNEL_BUFFER)) {
 645  		return KERN_INVALID_ARGUMENT;
 646  	}
 647  
 648  	if (since_timestamp != 0 && ((flags & STACKSHOT_COLLECT_DELTA_SNAPSHOT) == 0)) {
 649  		return KERN_INVALID_ARGUMENT;
 650  	}
 651  
 652  #if MONOTONIC
 653  	if (!mt_core_supported) {
 654  		flags &= ~STACKSHOT_INSTRS_CYCLES;
 655  	}
 656  #else /* MONOTONIC */
 657  	flags &= ~STACKSHOT_INSTRS_CYCLES;
 658  #endif /* !MONOTONIC */
 659  
 660  	STACKSHOT_SUBSYS_LOCK();
 661  
 662  	if (flags & STACKSHOT_SAVE_IN_KERNEL_BUFFER) {
 663  		/*
 664  		 * Don't overwrite an existing stackshot
 665  		 */
 666  		if (kernel_stackshot_buf != NULL) {
 667  			error = KERN_MEMORY_PRESENT;
 668  			goto error_exit;
 669  		}
 670  	} else if (flags & STACKSHOT_RETRIEVE_EXISTING_BUFFER) {
 671  		if ((kernel_stackshot_buf == NULL) || (kernel_stackshot_buf_size <= 0)) {
 672  			error = KERN_NOT_IN_SET;
 673  			goto error_exit;
 674  		}
 675  		error = stackshot_remap_buffer(kernel_stackshot_buf, kernel_stackshot_buf_size,
 676  		    out_buffer_addr, out_size_addr);
 677  		/*
 678  		 * If we successfully remapped the buffer into the user's address space, we
 679  		 * set buf_to_free and size_to_free so the prior kernel mapping will be removed
 680  		 * and then clear the kernel stackshot pointer and associated size.
 681  		 */
 682  		if (error == KERN_SUCCESS) {
 683  			buf_to_free = kernel_stackshot_buf;
 684  			size_to_free = (int) VM_MAP_ROUND_PAGE(kernel_stackshot_buf_size, PAGE_MASK);
 685  			kernel_stackshot_buf = NULL;
 686  			kernel_stackshot_buf_size = 0;
 687  		}
 688  
 689  		goto error_exit;
 690  	}
 691  
 692  	if (flags & STACKSHOT_GET_BOOT_PROFILE) {
 693  		void *bootprofile = NULL;
 694  		uint32_t len = 0;
 695  #if CONFIG_TELEMETRY
 696  		bootprofile_get(&bootprofile, &len);
 697  #endif
 698  		if (!bootprofile || !len) {
 699  			error = KERN_NOT_IN_SET;
 700  			goto error_exit;
 701  		}
 702  		error = stackshot_remap_buffer(bootprofile, len, out_buffer_addr, out_size_addr);
 703  		goto error_exit;
 704  	}
 705  
 706  	stackshot_duration_prior_abs = 0;
 707  	stackshotbuf_size = get_stackshot_estsize(size_hint);
 708  	stackshot_initial_estimate = stackshotbuf_size;
 709  
 710  	for (; stackshotbuf_size <= max_tracebuf_size; stackshotbuf_size <<= 1) {
 711  		if (kmem_alloc_flags(kernel_map, (vm_offset_t *)&stackshotbuf, stackshotbuf_size, VM_KERN_MEMORY_DIAG, KMA_ZERO) != KERN_SUCCESS) {
 712  			error = KERN_RESOURCE_SHORTAGE;
 713  			goto error_exit;
 714  		}
 715  
 716  
 717  		uint32_t hdr_tag = (flags & STACKSHOT_COLLECT_DELTA_SNAPSHOT) ? KCDATA_BUFFER_BEGIN_DELTA_STACKSHOT
 718  		    : (flags & STACKSHOT_DO_COMPRESS) ? KCDATA_BUFFER_BEGIN_COMPRESSED
 719  		    : KCDATA_BUFFER_BEGIN_STACKSHOT;
 720  		kcdata_p = kcdata_memory_alloc_init((mach_vm_address_t)stackshotbuf, hdr_tag, stackshotbuf_size,
 721  		    KCFLAG_USE_MEMCOPY | KCFLAG_NO_AUTO_ENDBUFFER);
 722  
 723  		stackshot_duration_outer = NULL;
 724  		uint64_t time_start      = mach_absolute_time();
 725  
 726  		/* if compression was requested, allocate the extra zlib scratch area */
 727  		if (flags & STACKSHOT_DO_COMPRESS) {
 728  			hdr_tag = (flags & STACKSHOT_COLLECT_DELTA_SNAPSHOT) ? KCDATA_BUFFER_BEGIN_DELTA_STACKSHOT
 729  			    : KCDATA_BUFFER_BEGIN_STACKSHOT;
 730  			error = kcdata_init_compress(kcdata_p, hdr_tag, stackshot_memcpy, KCDCT_ZLIB);
 731  			if (error != KERN_SUCCESS) {
 732  				os_log(OS_LOG_DEFAULT, "failed to initialize compression: %d!\n",
 733  				    (int) error);
 734  				goto error_exit;
 735  			}
 736  		}
 737  
 738  		/*
 739  		 * Disable interrupts and save the current interrupt state.
 740  		 */
 741  		prev_interrupt_state = ml_set_interrupts_enabled(FALSE);
 742  
 743  		/*
 744  		 * Load stackshot parameters.
 745  		 */
 746  		kdp_snapshot_preflight(pid, stackshotbuf, stackshotbuf_size, flags, kcdata_p, since_timestamp,
 747  		    pagetable_mask);
 748  
 749  		error = stackshot_trap();
 750  
 751  		ml_set_interrupts_enabled(prev_interrupt_state);
 752  
 753  		/* record the duration that interupts were disabled */
 754  
 755  		uint64_t time_end = mach_absolute_time();
 756  		if (stackshot_duration_outer) {
 757  			*stackshot_duration_outer = time_end - time_start;
 758  		}
 759  
 760  		if (error != KERN_SUCCESS) {
 761  			if (kcdata_p != NULL) {
 762  				kcdata_memory_destroy(kcdata_p);
 763  				kcdata_p = NULL;
 764  				stackshot_kcdata_p = NULL;
 765  			}
 766  			kmem_free(kernel_map, (vm_offset_t)stackshotbuf, stackshotbuf_size);
 767  			stackshotbuf = NULL;
 768  			if (error == KERN_INSUFFICIENT_BUFFER_SIZE) {
 769  				/*
 770  				 * If we didn't allocate a big enough buffer, deallocate and try again.
 771  				 */
 772  				stackshot_duration_prior_abs +=
 773  				    (time_end - time_start);
 774  				continue;
 775  			} else {
 776  				goto error_exit;
 777  			}
 778  		}
 779  
 780  		bytes_traced = kdp_stack_snapshot_bytes_traced();
 781  
 782  		if (bytes_traced <= 0) {
 783  			error = KERN_ABORTED;
 784  			goto error_exit;
 785  		}
 786  
 787  		assert(bytes_traced <= stackshotbuf_size);
 788  		if (!(flags & STACKSHOT_SAVE_IN_KERNEL_BUFFER)) {
 789  			error = stackshot_remap_buffer(stackshotbuf, bytes_traced, out_buffer_addr, out_size_addr);
 790  			goto error_exit;
 791  		}
 792  
 793  		/*
 794  		 * Save the stackshot in the kernel buffer.
 795  		 */
 796  		kernel_stackshot_buf = stackshotbuf;
 797  		kernel_stackshot_buf_size =  bytes_traced;
 798  		/*
 799  		 * Figure out if we didn't use all the pages in the buffer. If so, we set buf_to_free to the beginning of
 800  		 * the next page after the end of the stackshot in the buffer so that the kmem_free clips the buffer and
 801  		 * update size_to_free for kmem_free accordingly.
 802  		 */
 803  		size_to_free = stackshotbuf_size - (int) VM_MAP_ROUND_PAGE(bytes_traced, PAGE_MASK);
 804  
 805  		assert(size_to_free >= 0);
 806  
 807  		if (size_to_free != 0) {
 808  			buf_to_free = (void *)((uint64_t)stackshotbuf + stackshotbuf_size - size_to_free);
 809  		}
 810  
 811  		stackshotbuf = NULL;
 812  		stackshotbuf_size = 0;
 813  		goto error_exit;
 814  	}
 815  
 816  	if (stackshotbuf_size > max_tracebuf_size) {
 817  		error = KERN_RESOURCE_SHORTAGE;
 818  	}
 819  
 820  error_exit:
 821  	if (kcdata_p != NULL) {
 822  		kcdata_memory_destroy(kcdata_p);
 823  		kcdata_p = NULL;
 824  		stackshot_kcdata_p = NULL;
 825  	}
 826  
 827  	if (stackshotbuf != NULL) {
 828  		kmem_free(kernel_map, (vm_offset_t)stackshotbuf, stackshotbuf_size);
 829  	}
 830  	if (buf_to_free != NULL) {
 831  		kmem_free(kernel_map, (vm_offset_t)buf_to_free, size_to_free);
 832  	}
 833  	STACKSHOT_SUBSYS_UNLOCK();
 834  	return error;
 835  }
 836  
 837  /*
 838   * Cache stack snapshot parameters in preparation for a trace.
 839   */
 840  void
 841  kdp_snapshot_preflight(int pid, void * tracebuf, uint32_t tracebuf_size, uint64_t flags,
 842      kcdata_descriptor_t data_p, uint64_t since_timestamp, uint32_t pagetable_mask)
 843  {
 844  	uint64_t microsecs = 0, secs = 0;
 845  	clock_get_calendar_microtime((clock_sec_t *)&secs, (clock_usec_t *)&microsecs);
 846  
 847  	stackshot_microsecs = microsecs + (secs * USEC_PER_SEC);
 848  	stack_snapshot_pid = pid;
 849  	stack_snapshot_buf = tracebuf;
 850  	stack_snapshot_bufsize = tracebuf_size;
 851  	stack_snapshot_flags = flags;
 852  	stack_snapshot_delta_since_timestamp = since_timestamp;
 853  	stack_snapshot_pagetable_mask = pagetable_mask;
 854  
 855  	panic_stackshot = ((flags & STACKSHOT_FROM_PANIC) != 0);
 856  
 857  	assert(data_p != NULL);
 858  	assert(stackshot_kcdata_p == NULL);
 859  	stackshot_kcdata_p = data_p;
 860  
 861  	stack_snapshot_bytes_traced = 0;
 862  	stack_snapshot_bytes_uncompressed = 0;
 863  }
 864  
 865  void
 866  panic_stackshot_reset_state()
 867  {
 868  	stackshot_kcdata_p = NULL;
 869  }
 870  
 871  boolean_t
 872  stackshot_active()
 873  {
 874  	return stackshot_kcdata_p != NULL;
 875  }
 876  
 877  uint32_t
 878  kdp_stack_snapshot_bytes_traced(void)
 879  {
 880  	return stack_snapshot_bytes_traced;
 881  }
 882  
 883  uint32_t
 884  kdp_stack_snapshot_bytes_uncompressed(void)
 885  {
 886  	return stack_snapshot_bytes_uncompressed;
 887  }
 888  
 889  static boolean_t
 890  memory_iszero(void *addr, size_t size)
 891  {
 892  	char *data = (char *)addr;
 893  	for (size_t i = 0; i < size; i++) {
 894  		if (data[i] != 0) {
 895  			return FALSE;
 896  		}
 897  	}
 898  	return TRUE;
 899  }
 900  
 901  #define kcd_end_address(kcd) ((void *)((uint64_t)((kcd)->kcd_addr_begin) + kcdata_memory_get_used_bytes((kcd))))
 902  #define kcd_max_address(kcd) ((void *)((kcd)->kcd_addr_begin + (kcd)->kcd_length))
 903  /*
 904   * Use of the kcd_exit_on_error(action) macro requires a local
 905   * 'kern_return_t error' variable and 'error_exit' label.
 906   */
 907  #define kcd_exit_on_error(action)                      \
 908  	do {                                               \
 909  	        if (KERN_SUCCESS != (error = (action))) {      \
 910  	                if (error == KERN_RESOURCE_SHORTAGE) {     \
 911  	                        error = KERN_INSUFFICIENT_BUFFER_SIZE; \
 912  	                }                                          \
 913  	                goto error_exit;                           \
 914  	        }                                              \
 915  	} while (0); /* end kcd_exit_on_error */
 916  
 917  static uint64_t
 918  kcdata_get_task_ss_flags(task_t task)
 919  {
 920  	uint64_t ss_flags = 0;
 921  	boolean_t task_64bit_addr = task_has_64Bit_addr(task);
 922  
 923  	if (task_64bit_addr) {
 924  		ss_flags |= kUser64_p;
 925  	}
 926  	if (!task->active || task_is_a_corpse(task) || proc_exiting(task->bsd_info)) {
 927  		ss_flags |= kTerminatedSnapshot;
 928  	}
 929  	if (task->pidsuspended) {
 930  		ss_flags |= kPidSuspended;
 931  	}
 932  	if (task->frozen) {
 933  		ss_flags |= kFrozen;
 934  	}
 935  	if (task->effective_policy.tep_darwinbg == 1) {
 936  		ss_flags |= kTaskDarwinBG;
 937  	}
 938  	if (task->requested_policy.trp_role == TASK_FOREGROUND_APPLICATION) {
 939  		ss_flags |= kTaskIsForeground;
 940  	}
 941  	if (task->requested_policy.trp_boosted == 1) {
 942  		ss_flags |= kTaskIsBoosted;
 943  	}
 944  	if (task->effective_policy.tep_sup_active == 1) {
 945  		ss_flags |= kTaskIsSuppressed;
 946  	}
 947  #if CONFIG_MEMORYSTATUS
 948  
 949  	boolean_t dirty = FALSE, dirty_tracked = FALSE, allow_idle_exit = FALSE;
 950  	memorystatus_proc_flags_unsafe(task->bsd_info, &dirty, &dirty_tracked, &allow_idle_exit);
 951  	if (dirty) {
 952  		ss_flags |= kTaskIsDirty;
 953  	}
 954  	if (dirty_tracked) {
 955  		ss_flags |= kTaskIsDirtyTracked;
 956  	}
 957  	if (allow_idle_exit) {
 958  		ss_flags |= kTaskAllowIdleExit;
 959  	}
 960  
 961  #endif
 962  	if (task->effective_policy.tep_tal_engaged) {
 963  		ss_flags |= kTaskTALEngaged;
 964  	}
 965  
 966  	ss_flags |= (0x7 & workqueue_get_pwq_state_kdp(task->bsd_info)) << 17;
 967  
 968  #if IMPORTANCE_INHERITANCE
 969  	if (task->task_imp_base) {
 970  		if (task->task_imp_base->iit_donor) {
 971  			ss_flags |= kTaskIsImpDonor;
 972  		}
 973  		if (task->task_imp_base->iit_live_donor) {
 974  			ss_flags |= kTaskIsLiveImpDonor;
 975  		}
 976  	}
 977  #endif
 978  	return ss_flags;
 979  }
 980  
 981  static kern_return_t
 982  kcdata_record_shared_cache_info(kcdata_descriptor_t kcd, task_t task, unaligned_u64 *task_snap_ss_flags)
 983  {
 984  	kern_return_t error = KERN_SUCCESS;
 985  
 986  	uint64_t shared_cache_slide = 0;
 987  	uint64_t shared_cache_first_mapping = 0;
 988  	uint32_t kdp_fault_results = 0;
 989  	struct dyld_shared_cache_loadinfo shared_cache_data = {0};
 990  
 991  
 992  	assert(task_snap_ss_flags != NULL);
 993  
 994  	/* Get basic info about the shared region pointer, regardless of any failures */
 995  	if (task->shared_region == NULL) {
 996  		*task_snap_ss_flags |= kTaskSharedRegionNone;
 997  	} else if (task->shared_region == primary_system_shared_region) {
 998  		*task_snap_ss_flags |= kTaskSharedRegionSystem;
 999  	} else {
1000  		*task_snap_ss_flags |= kTaskSharedRegionOther;
1001  	}
1002  
1003  	if (task->shared_region && ml_validate_nofault((vm_offset_t)task->shared_region, sizeof(struct vm_shared_region))) {
1004  		struct vm_shared_region *sr = task->shared_region;
1005  		shared_cache_first_mapping = sr->sr_base_address + sr->sr_first_mapping;
1006  
1007  	} else {
1008  		*task_snap_ss_flags |= kTaskSharedRegionInfoUnavailable;
1009  		goto error_exit;
1010  	}
1011  
1012  	/* We haven't copied in the shared region UUID yet as part of setup */
1013  	if (!shared_cache_first_mapping || !task->shared_region->sr_uuid_copied) {
1014  		goto error_exit;
1015  	}
1016  
1017  
1018  	/*
1019  	 * No refcounting here, but we are in debugger context, so that should be safe.
1020  	 */
1021  	shared_cache_slide = task->shared_region->sr_slide;
1022  
1023  	if (task->shared_region == primary_system_shared_region) {
1024  		/* skip adding shared cache info -- it's the same as the system level one */
1025  		goto error_exit;
1026  	}
1027  
1028  	/*
1029  	 * Historically, this data was in a dyld_uuid_info_64 structure, but the
1030  	 * naming of both the structure and fields for this use wasn't great.  The
1031  	 * dyld_shared_cache_loadinfo structure has better names, but the same
1032  	 * layout and content as the original.
1033  	 *
1034  	 * The imageSlidBaseAddress/sharedCacheUnreliableSlidBaseAddress field
1035  	 * has been used inconsistently for STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT
1036  	 * entries; here, it's the slid first mapping, and we leave it that way
1037  	 * for backwards compatibility.
1038  	 */
1039  	shared_cache_data.sharedCacheSlide = shared_cache_slide;
1040  	stackshot_memcpy(&shared_cache_data.sharedCacheUUID, task->shared_region->sr_uuid, sizeof(task->shared_region->sr_uuid));
1041  	shared_cache_data.sharedCacheUnreliableSlidBaseAddress = shared_cache_first_mapping;
1042  	shared_cache_data.sharedCacheSlidFirstMapping = shared_cache_first_mapping;
1043  	kcd_exit_on_error(kcdata_push_data(kcd, STACKSHOT_KCTYPE_SHAREDCACHE_LOADINFO, sizeof(shared_cache_data), &shared_cache_data));
1044  
1045  error_exit:
1046  	if (kdp_fault_results & KDP_FAULT_RESULT_PAGED_OUT) {
1047  		*task_snap_ss_flags |= kTaskUUIDInfoMissing;
1048  	}
1049  
1050  	if (kdp_fault_results & KDP_FAULT_RESULT_TRIED_FAULT) {
1051  		*task_snap_ss_flags |= kTaskUUIDInfoTriedFault;
1052  	}
1053  
1054  	if (kdp_fault_results & KDP_FAULT_RESULT_FAULTED_IN) {
1055  		*task_snap_ss_flags |= kTaskUUIDInfoFaultedIn;
1056  	}
1057  
1058  	return error;
1059  }
1060  
1061  static kern_return_t
1062  kcdata_record_uuid_info(kcdata_descriptor_t kcd, task_t task, uint64_t trace_flags, boolean_t have_pmap, unaligned_u64 *task_snap_ss_flags)
1063  {
1064  	boolean_t save_loadinfo_p         = ((trace_flags & STACKSHOT_SAVE_LOADINFO) != 0);
1065  	boolean_t save_kextloadinfo_p     = ((trace_flags & STACKSHOT_SAVE_KEXT_LOADINFO) != 0);
1066  	boolean_t should_fault            = (trace_flags & STACKSHOT_ENABLE_UUID_FAULTING);
1067  
1068  	kern_return_t error        = KERN_SUCCESS;
1069  	mach_vm_address_t out_addr = 0;
1070  
1071  	uint32_t uuid_info_count         = 0;
1072  	mach_vm_address_t uuid_info_addr = 0;
1073  	uint64_t uuid_info_timestamp     = 0;
1074  	uint32_t kdp_fault_results       = 0;
1075  
1076  
1077  	assert(task_snap_ss_flags != NULL);
1078  
1079  	int task_pid     = pid_from_task(task);
1080  	boolean_t task_64bit_addr = task_has_64Bit_addr(task);
1081  
1082  	if (save_loadinfo_p && have_pmap && task->active && task_pid > 0) {
1083  		/* Read the dyld_all_image_infos struct from the task memory to get UUID array count and location */
1084  		if (task_64bit_addr) {
1085  			struct user64_dyld_all_image_infos task_image_infos;
1086  			if (kdp_copyin(task->map, task->all_image_info_addr, &task_image_infos,
1087  			    sizeof(struct user64_dyld_all_image_infos), should_fault, &kdp_fault_results)) {
1088  				uuid_info_count = (uint32_t)task_image_infos.uuidArrayCount;
1089  				uuid_info_addr = task_image_infos.uuidArray;
1090  				if (task_image_infos.version >= DYLD_ALL_IMAGE_INFOS_TIMESTAMP_MINIMUM_VERSION) {
1091  					uuid_info_timestamp = task_image_infos.timestamp;
1092  				}
1093  
1094  			}
1095  		} else {
1096  			struct user32_dyld_all_image_infos task_image_infos;
1097  			if (kdp_copyin(task->map, task->all_image_info_addr, &task_image_infos,
1098  			    sizeof(struct user32_dyld_all_image_infos), should_fault, &kdp_fault_results)) {
1099  				uuid_info_count = task_image_infos.uuidArrayCount;
1100  				uuid_info_addr = task_image_infos.uuidArray;
1101  				if (task_image_infos.version >= DYLD_ALL_IMAGE_INFOS_TIMESTAMP_MINIMUM_VERSION) {
1102  					uuid_info_timestamp = task_image_infos.timestamp;
1103  				}
1104  			}
1105  		}
1106  
1107  		/*
1108  		 * If we get a NULL uuid_info_addr (which can happen when we catch dyld in the middle of updating
1109  		 * this data structure), we zero the uuid_info_count so that we won't even try to save load info
1110  		 * for this task.
1111  		 */
1112  		if (!uuid_info_addr) {
1113  			uuid_info_count = 0;
1114  		}
1115  
1116  
1117  	}
1118  
1119  	if (have_pmap && task_pid == 0) {
1120  		if (save_kextloadinfo_p && ml_validate_nofault((vm_offset_t)(gLoadedKextSummaries), sizeof(OSKextLoadedKextSummaryHeader))) {
1121  			uuid_info_count = gLoadedKextSummaries->numSummaries + 1; /* include main kernel UUID */
1122  		} else {
1123  			uuid_info_count = 1; /* include kernelcache UUID (embedded) or kernel UUID (desktop) */
1124  		}
1125  	}
1126  
1127  	if (save_loadinfo_p && task_pid > 0 && (uuid_info_count < MAX_LOADINFOS)) {
1128  		uint32_t copied_uuid_count = 0;
1129  		uint32_t uuid_info_size = (uint32_t)(task_64bit_addr ? sizeof(struct user64_dyld_uuid_info) : sizeof(struct user32_dyld_uuid_info));
1130  		uint32_t uuid_info_array_size = 0;
1131  
1132  		/* Open a compression window to avoid overflowing the stack */
1133  		kcdata_compression_window_open(kcd);
1134  
1135  		/* If we found some UUID information, first try to copy it in -- this will only be non-zero if we had a pmap above */
1136  		if (uuid_info_count > 0) {
1137  			uuid_info_array_size = uuid_info_count * uuid_info_size;
1138  
1139  			kcd_exit_on_error(kcdata_get_memory_addr_for_array(kcd, (task_64bit_addr ? KCDATA_TYPE_LIBRARY_LOADINFO64 : KCDATA_TYPE_LIBRARY_LOADINFO),
1140  			    uuid_info_size, uuid_info_count, &out_addr));
1141  
1142  			if (!kdp_copyin(task->map, uuid_info_addr, (void *)out_addr, uuid_info_array_size, should_fault, &kdp_fault_results)) {
1143  				bzero((void *)out_addr, uuid_info_array_size);
1144  			} else {
1145  				copied_uuid_count = uuid_info_count;
1146  			}
1147  		}
1148  
1149  		uuid_t binary_uuid;
1150  		if (!copied_uuid_count && proc_binary_uuid_kdp(task, binary_uuid)) {
1151  			/* We failed to copyin the UUID information, try to store the UUID of the main binary we have in the proc */
1152  			if (uuid_info_array_size == 0) {
1153  				/* We just need to store one UUID */
1154  				uuid_info_array_size = uuid_info_size;
1155  				kcd_exit_on_error(kcdata_get_memory_addr_for_array(kcd, (task_64bit_addr ? KCDATA_TYPE_LIBRARY_LOADINFO64 : KCDATA_TYPE_LIBRARY_LOADINFO),
1156  				    uuid_info_size, 1, &out_addr));
1157  			}
1158  
1159  			if (task_64bit_addr) {
1160  				struct user64_dyld_uuid_info *uuid_info = (struct user64_dyld_uuid_info *)out_addr;
1161  				uint64_t image_load_address = task->mach_header_vm_address;
1162  
1163  				stackshot_memcpy(&uuid_info->imageUUID, binary_uuid, sizeof(uuid_t));
1164  				stackshot_memcpy(&uuid_info->imageLoadAddress, &image_load_address, sizeof(image_load_address));
1165  			} else {
1166  				struct user32_dyld_uuid_info *uuid_info = (struct user32_dyld_uuid_info *)out_addr;
1167  				uint32_t image_load_address = (uint32_t) task->mach_header_vm_address;
1168  
1169  				stackshot_memcpy(&uuid_info->imageUUID, binary_uuid, sizeof(uuid_t));
1170  				stackshot_memcpy(&uuid_info->imageLoadAddress, &image_load_address, sizeof(image_load_address));
1171  			}
1172  		}
1173  
1174  		kcd_exit_on_error(kcdata_compression_window_close(kcd));
1175  	} else if (task_pid == 0 && uuid_info_count > 0 && uuid_info_count < MAX_LOADINFOS) {
1176  		uintptr_t image_load_address;
1177  
1178  		do {
1179  #if defined(__arm__) || defined(__arm64__)
1180  			if (kernelcache_uuid_valid && !save_kextloadinfo_p) {
1181  				struct dyld_uuid_info_64 kc_uuid = {0};
1182  				kc_uuid.imageLoadAddress = VM_MIN_KERNEL_AND_KEXT_ADDRESS;
1183  				stackshot_memcpy(&kc_uuid.imageUUID, &kernelcache_uuid, sizeof(uuid_t));
1184  				kcd_exit_on_error(kcdata_push_data(kcd, STACKSHOT_KCTYPE_KERNELCACHE_LOADINFO, sizeof(struct dyld_uuid_info_64), &kc_uuid));
1185  				break;
1186  			}
1187  #endif /* defined(__arm__) || defined(__arm64__) */
1188  
1189  			if (!kernel_uuid || !ml_validate_nofault((vm_offset_t)kernel_uuid, sizeof(uuid_t))) {
1190  				/* Kernel UUID not found or inaccessible */
1191  				break;
1192  			}
1193  
1194  			uint32_t uuid_type = KCDATA_TYPE_LIBRARY_LOADINFO;
1195  			if ((sizeof(kernel_uuid_info) == sizeof(struct user64_dyld_uuid_info))) {
1196  				uuid_type = KCDATA_TYPE_LIBRARY_LOADINFO64;
1197  #if  defined(__arm64__)
1198  				kc_format_t primary_kc_type = KCFormatUnknown;
1199  				if (PE_get_primary_kc_format(&primary_kc_type) && (primary_kc_type == KCFormatFileset)) {
1200  					/* return TEXT_EXEC based load information on arm devices running with fileset kernelcaches */
1201  					uuid_type = STACKSHOT_KCTYPE_LOADINFO64_TEXT_EXEC;
1202  				}
1203  #endif
1204  			}
1205  
1206  			/*
1207  			 * The element count of the array can vary - avoid overflowing the
1208  			 * stack by opening a window.
1209  			 */
1210  			kcdata_compression_window_open(kcd);
1211  			kcd_exit_on_error(kcdata_get_memory_addr_for_array(kcd, uuid_type,
1212  			    sizeof(kernel_uuid_info), uuid_info_count, &out_addr));
1213  			kernel_uuid_info *uuid_info_array = (kernel_uuid_info *)out_addr;
1214  
1215  			image_load_address = (uintptr_t)VM_KERNEL_UNSLIDE(vm_kernel_stext);
1216  #if defined(__arm64__)
1217  			if (uuid_type == STACKSHOT_KCTYPE_LOADINFO64_TEXT_EXEC) {
1218  				/* If we're reporting TEXT_EXEC load info, populate the TEXT_EXEC base instead */
1219  				extern vm_offset_t segTEXTEXECB;
1220  				image_load_address = (uintptr_t)VM_KERNEL_UNSLIDE(segTEXTEXECB);
1221  			}
1222  #endif
1223  			uuid_info_array[0].imageLoadAddress = image_load_address;
1224  			stackshot_memcpy(&uuid_info_array[0].imageUUID, kernel_uuid, sizeof(uuid_t));
1225  
1226  			if (save_kextloadinfo_p &&
1227  			    ml_validate_nofault((vm_offset_t)(gLoadedKextSummaries), sizeof(OSKextLoadedKextSummaryHeader)) &&
1228  			    ml_validate_nofault((vm_offset_t)(&gLoadedKextSummaries->summaries[0]),
1229  			    gLoadedKextSummaries->entry_size * gLoadedKextSummaries->numSummaries)) {
1230  				uint32_t kexti;
1231  				for (kexti = 0; kexti < gLoadedKextSummaries->numSummaries; kexti++) {
1232  					image_load_address = (uintptr_t)VM_KERNEL_UNSLIDE(gLoadedKextSummaries->summaries[kexti].address);
1233  #if defined(__arm64__)
1234  					if (uuid_type == STACKSHOT_KCTYPE_LOADINFO64_TEXT_EXEC) {
1235  						/* If we're reporting TEXT_EXEC load info, populate the TEXT_EXEC base instead */
1236  						image_load_address = (uintptr_t)VM_KERNEL_UNSLIDE(gLoadedKextSummaries->summaries[kexti].text_exec_address);
1237  					}
1238  #endif
1239  					uuid_info_array[kexti + 1].imageLoadAddress = image_load_address;
1240  					stackshot_memcpy(&uuid_info_array[kexti + 1].imageUUID, &gLoadedKextSummaries->summaries[kexti].uuid, sizeof(uuid_t));
1241  				}
1242  			}
1243  			kcd_exit_on_error(kcdata_compression_window_close(kcd));
1244  		} while (0);
1245  	}
1246  
1247  error_exit:
1248  	if (kdp_fault_results & KDP_FAULT_RESULT_PAGED_OUT) {
1249  		*task_snap_ss_flags |= kTaskUUIDInfoMissing;
1250  	}
1251  
1252  	if (kdp_fault_results & KDP_FAULT_RESULT_TRIED_FAULT) {
1253  		*task_snap_ss_flags |= kTaskUUIDInfoTriedFault;
1254  	}
1255  
1256  	if (kdp_fault_results & KDP_FAULT_RESULT_FAULTED_IN) {
1257  		*task_snap_ss_flags |= kTaskUUIDInfoFaultedIn;
1258  	}
1259  
1260  	return error;
1261  }
1262  
1263  static kern_return_t
1264  kcdata_record_task_iostats(kcdata_descriptor_t kcd, task_t task)
1265  {
1266  	kern_return_t error = KERN_SUCCESS;
1267  	mach_vm_address_t out_addr = 0;
1268  
1269  	/* I/O Statistics if any counters are non zero */
1270  	assert(IO_NUM_PRIORITIES == STACKSHOT_IO_NUM_PRIORITIES);
1271  	if (task->task_io_stats && !memory_iszero(task->task_io_stats, sizeof(struct io_stat_info))) {
1272  		/* struct io_stats_snapshot is quite large - avoid overflowing the stack. */
1273  		kcdata_compression_window_open(kcd);
1274  		kcd_exit_on_error(kcdata_get_memory_addr(kcd, STACKSHOT_KCTYPE_IOSTATS, sizeof(struct io_stats_snapshot), &out_addr));
1275  		struct io_stats_snapshot *_iostat = (struct io_stats_snapshot *)out_addr;
1276  		_iostat->ss_disk_reads_count = task->task_io_stats->disk_reads.count;
1277  		_iostat->ss_disk_reads_size = task->task_io_stats->disk_reads.size;
1278  		_iostat->ss_disk_writes_count = (task->task_io_stats->total_io.count - task->task_io_stats->disk_reads.count);
1279  		_iostat->ss_disk_writes_size = (task->task_io_stats->total_io.size - task->task_io_stats->disk_reads.size);
1280  		_iostat->ss_paging_count = task->task_io_stats->paging.count;
1281  		_iostat->ss_paging_size = task->task_io_stats->paging.size;
1282  		_iostat->ss_non_paging_count = (task->task_io_stats->total_io.count - task->task_io_stats->paging.count);
1283  		_iostat->ss_non_paging_size = (task->task_io_stats->total_io.size - task->task_io_stats->paging.size);
1284  		_iostat->ss_metadata_count = task->task_io_stats->metadata.count;
1285  		_iostat->ss_metadata_size = task->task_io_stats->metadata.size;
1286  		_iostat->ss_data_count = (task->task_io_stats->total_io.count - task->task_io_stats->metadata.count);
1287  		_iostat->ss_data_size = (task->task_io_stats->total_io.size - task->task_io_stats->metadata.size);
1288  		for (int i = 0; i < IO_NUM_PRIORITIES; i++) {
1289  			_iostat->ss_io_priority_count[i] = task->task_io_stats->io_priority[i].count;
1290  			_iostat->ss_io_priority_size[i] = task->task_io_stats->io_priority[i].size;
1291  		}
1292  		kcd_exit_on_error(kcdata_compression_window_close(kcd));
1293  	}
1294  
1295  
1296  error_exit:
1297  	return error;
1298  }
1299  
1300  #if MONOTONIC
1301  static kern_return_t
1302  kcdata_record_task_instrs_cycles(kcdata_descriptor_t kcd, task_t task)
1303  {
1304  	struct instrs_cycles_snapshot instrs_cycles = {0};
1305  	uint64_t ics_instructions;
1306  	uint64_t ics_cycles;
1307  
1308  	mt_stackshot_task(task, &ics_instructions, &ics_cycles);
1309  	instrs_cycles.ics_instructions = ics_instructions;
1310  	instrs_cycles.ics_cycles = ics_cycles;
1311  
1312  	return kcdata_push_data(kcd, STACKSHOT_KCTYPE_INSTRS_CYCLES, sizeof(instrs_cycles), &instrs_cycles);
1313  }
1314  #endif /* MONOTONIC */
1315  
1316  static kern_return_t
1317  kcdata_record_task_cpu_architecture(kcdata_descriptor_t kcd, task_t task)
1318  {
1319  	struct stackshot_cpu_architecture cpu_architecture = {0};
1320  	int32_t cputype;
1321  	int32_t cpusubtype;
1322  
1323  	proc_archinfo_kdp(task->bsd_info, &cputype, &cpusubtype);
1324  	cpu_architecture.cputype = cputype;
1325  	cpu_architecture.cpusubtype = cpusubtype;
1326  
1327  	return kcdata_push_data(kcd, STACKSHOT_KCTYPE_TASK_CPU_ARCHITECTURE, sizeof(struct stackshot_cpu_architecture), &cpu_architecture);
1328  }
1329  
1330  static kern_return_t
1331  #if STACKSHOT_COLLECTS_LATENCY_INFO
1332  kcdata_record_task_snapshot(kcdata_descriptor_t kcd, task_t task, uint64_t trace_flags, boolean_t have_pmap, unaligned_u64 task_snap_ss_flags, struct stackshot_latency_task *latency_info)
1333  #else
1334  kcdata_record_task_snapshot(kcdata_descriptor_t kcd, task_t task, uint64_t trace_flags, boolean_t have_pmap, unaligned_u64 task_snap_ss_flags)
1335  #endif /* STACKSHOT_COLLECTS_LATENCY_INFO */
1336  {
1337  	boolean_t collect_delta_stackshot = ((trace_flags & STACKSHOT_COLLECT_DELTA_SNAPSHOT) != 0);
1338  	boolean_t collect_iostats         = !collect_delta_stackshot && !(trace_flags & STACKSHOT_NO_IO_STATS);
1339  #if MONOTONIC
1340  	boolean_t collect_instrs_cycles   = ((trace_flags & STACKSHOT_INSTRS_CYCLES) != 0);
1341  #endif /* MONOTONIC */
1342  #if __arm__ || __arm64__
1343  	boolean_t collect_asid            = ((trace_flags & STACKSHOT_ASID) != 0);
1344  #endif
1345  	boolean_t collect_pagetables       = ((trace_flags & STACKSHOT_PAGE_TABLES) != 0);
1346  
1347  
1348  	kern_return_t error                 = KERN_SUCCESS;
1349  	mach_vm_address_t out_addr          = 0;
1350  	struct task_snapshot_v2 * cur_tsnap = NULL;
1351  #if STACKSHOT_COLLECTS_LATENCY_INFO
1352  	latency_info->cur_tsnap_latency = mach_absolute_time();
1353  #endif /* STACKSHOT_COLLECTS_LATENCY_INFO */
1354  
1355  	int task_pid           = pid_from_task(task);
1356  	uint64_t task_uniqueid = get_task_uniqueid(task);
1357  	uint64_t proc_starttime_secs = 0;
1358  
1359  	if (task_pid && (task_did_exec_internal(task) || task_is_exec_copy_internal(task))) {
1360  		/*
1361  		 * if this task is a transit task from another one, show the pid as
1362  		 * negative
1363  		 */
1364  		task_pid = 0 - task_pid;
1365  	}
1366  
1367  	/* the task_snapshot_v2 struct is large - avoid overflowing the stack */
1368  	kcdata_compression_window_open(kcd);
1369  	kcd_exit_on_error(kcdata_get_memory_addr(kcd, STACKSHOT_KCTYPE_TASK_SNAPSHOT, sizeof(struct task_snapshot_v2), &out_addr));
1370  	cur_tsnap = (struct task_snapshot_v2 *)out_addr;
1371  	bzero(cur_tsnap, sizeof(*cur_tsnap));
1372  
1373  	cur_tsnap->ts_unique_pid = task_uniqueid;
1374  	cur_tsnap->ts_ss_flags = kcdata_get_task_ss_flags(task);
1375  	cur_tsnap->ts_ss_flags |= task_snap_ss_flags;
1376  	cur_tsnap->ts_user_time_in_terminated_threads = task->total_user_time;
1377  	cur_tsnap->ts_system_time_in_terminated_threads = task->total_system_time;
1378  
1379  	proc_starttime_kdp(task->bsd_info, &proc_starttime_secs, NULL, NULL);
1380  	cur_tsnap->ts_p_start_sec = proc_starttime_secs;
1381  	cur_tsnap->ts_task_size = have_pmap ? get_task_phys_footprint(task) : 0;
1382  	cur_tsnap->ts_max_resident_size = get_task_resident_max(task);
1383  	cur_tsnap->ts_was_throttled = (uint32_t) proc_was_throttled_from_task(task);
1384  	cur_tsnap->ts_did_throttle = (uint32_t) proc_did_throttle_from_task(task);
1385  
1386  	cur_tsnap->ts_suspend_count = task->suspend_count;
1387  	cur_tsnap->ts_faults = counter_load(&task->faults);
1388  	cur_tsnap->ts_pageins = task->pageins;
1389  	cur_tsnap->ts_cow_faults = task->cow_faults;
1390  	cur_tsnap->ts_latency_qos = (task->effective_policy.tep_latency_qos == LATENCY_QOS_TIER_UNSPECIFIED) ?
1391  	    LATENCY_QOS_TIER_UNSPECIFIED : ((0xFF << 16) | task->effective_policy.tep_latency_qos);
1392  	cur_tsnap->ts_pid = task_pid;
1393  
1394  	/* Add the BSD process identifiers */
1395  	if (task_pid != -1 && task->bsd_info != NULL) {
1396  		proc_name_kdp(task, cur_tsnap->ts_p_comm, sizeof(cur_tsnap->ts_p_comm));
1397  	} else {
1398  		cur_tsnap->ts_p_comm[0] = '\0';
1399  #if IMPORTANCE_INHERITANCE && (DEVELOPMENT || DEBUG)
1400  		if (task->task_imp_base != NULL) {
1401  			stackshot_strlcpy(cur_tsnap->ts_p_comm, &task->task_imp_base->iit_procname[0],
1402  			    MIN((int)sizeof(task->task_imp_base->iit_procname), (int)sizeof(cur_tsnap->ts_p_comm)));
1403  		}
1404  #endif /* IMPORTANCE_INHERITANCE && (DEVELOPMENT || DEBUG) */
1405  	}
1406  
1407  	kcd_exit_on_error(kcdata_compression_window_close(kcd));
1408  
1409  #if CONFIG_COALITIONS
1410  	if (task_pid != -1 && task->bsd_info != NULL &&
1411  	    ((trace_flags & STACKSHOT_SAVE_JETSAM_COALITIONS) && (task->coalition[COALITION_TYPE_JETSAM] != NULL))) {
1412  		uint64_t jetsam_coal_id = coalition_id(task->coalition[COALITION_TYPE_JETSAM]);
1413  		kcd_exit_on_error(kcdata_push_data(kcd, STACKSHOT_KCTYPE_JETSAM_COALITION, sizeof(jetsam_coal_id), &jetsam_coal_id));
1414  	}
1415  #endif /* CONFIG_COALITIONS */
1416  
1417  #if __arm__ || __arm64__
1418  	if (collect_asid && have_pmap) {
1419  		uint32_t asid = PMAP_VASID(task->map->pmap);
1420  		kcd_exit_on_error(kcdata_push_data(kcd, STACKSHOT_KCTYPE_ASID, sizeof(asid), &asid));
1421  	}
1422  #endif
1423  
1424  #if STACKSHOT_COLLECTS_LATENCY_INFO
1425  	latency_info->cur_tsnap_latency = mach_absolute_time() - latency_info->cur_tsnap_latency;
1426  	latency_info->pmap_latency = mach_absolute_time();
1427  #endif /* STACKSHOT_COLLECTS_LATENCY_INFO */
1428  
1429  	if (collect_pagetables && have_pmap) {
1430  #if INTERRUPT_MASKED_DEBUG
1431  		// pagetable dumps can be large; reset the interrupt timeout to avoid a panic
1432  		ml_spin_debug_clear_self();
1433  #endif
1434  		size_t bytes_dumped = 0;
1435  		error = pmap_dump_page_tables(task->map->pmap, kcd_end_address(kcd), kcd_max_address(kcd), stack_snapshot_pagetable_mask, &bytes_dumped);
1436  		if (error != KERN_SUCCESS) {
1437  			goto error_exit;
1438  		} else {
1439  			/* Variable size array - better not have it on the stack. */
1440  			kcdata_compression_window_open(kcd);
1441  			kcd_exit_on_error(kcdata_get_memory_addr_for_array(kcd, STACKSHOT_KCTYPE_PAGE_TABLES,
1442  			    sizeof(uint64_t), (uint32_t)(bytes_dumped / sizeof(uint64_t)), &out_addr));
1443  			kcd_exit_on_error(kcdata_compression_window_close(kcd));
1444  		}
1445  	}
1446  
1447  #if STACKSHOT_COLLECTS_LATENCY_INFO
1448  	latency_info->pmap_latency = mach_absolute_time() - latency_info->pmap_latency;
1449  	latency_info->bsd_proc_ids_latency = mach_absolute_time();
1450  #endif /* STACKSHOT_COLLECTS_LATENCY_INFO */
1451  
1452  #if STACKSHOT_COLLECTS_LATENCY_INFO
1453  	latency_info->bsd_proc_ids_latency = mach_absolute_time() - latency_info->bsd_proc_ids_latency;
1454  	latency_info->end_latency = mach_absolute_time();
1455  #endif /* STACKSHOT_COLLECTS_LATENCY_INFO */
1456  
1457  	if (collect_iostats) {
1458  		kcd_exit_on_error(kcdata_record_task_iostats(kcd, task));
1459  	}
1460  
1461  #if MONOTONIC
1462  	if (collect_instrs_cycles) {
1463  		kcd_exit_on_error(kcdata_record_task_instrs_cycles(kcd, task));
1464  	}
1465  #endif /* MONOTONIC */
1466  
1467  	kcd_exit_on_error(kcdata_record_task_cpu_architecture(kcd, task));
1468  
1469  #if STACKSHOT_COLLECTS_LATENCY_INFO
1470  	latency_info->end_latency = mach_absolute_time() - latency_info->end_latency;
1471  #endif /* STACKSHOT_COLLECTS_LATENCY_INFO */
1472  
1473  error_exit:
1474  	return error;
1475  }
1476  
1477  static kern_return_t
1478  kcdata_record_task_delta_snapshot(kcdata_descriptor_t kcd, task_t task, uint64_t trace_flags, boolean_t have_pmap, unaligned_u64 task_snap_ss_flags)
1479  {
1480  #if !MONOTONIC
1481  #pragma unused(trace_flags)
1482  #endif /* !MONOTONIC */
1483  	kern_return_t error                       = KERN_SUCCESS;
1484  	struct task_delta_snapshot_v2 * cur_tsnap = NULL;
1485  	mach_vm_address_t out_addr                = 0;
1486  	(void) trace_flags;
1487  #if __arm__ || __arm64__
1488  	boolean_t collect_asid                    = ((trace_flags & STACKSHOT_ASID) != 0);
1489  #endif
1490  #if MONOTONIC
1491  	boolean_t collect_instrs_cycles           = ((trace_flags & STACKSHOT_INSTRS_CYCLES) != 0);
1492  #endif /* MONOTONIC */
1493  
1494  	uint64_t task_uniqueid = get_task_uniqueid(task);
1495  
1496  	kcd_exit_on_error(kcdata_get_memory_addr(kcd, STACKSHOT_KCTYPE_TASK_DELTA_SNAPSHOT, sizeof(struct task_delta_snapshot_v2), &out_addr));
1497  
1498  	cur_tsnap = (struct task_delta_snapshot_v2 *)out_addr;
1499  
1500  	cur_tsnap->tds_unique_pid = task_uniqueid;
1501  	cur_tsnap->tds_ss_flags = kcdata_get_task_ss_flags(task);
1502  	cur_tsnap->tds_ss_flags |= task_snap_ss_flags;
1503  
1504  	cur_tsnap->tds_user_time_in_terminated_threads = task->total_user_time;
1505  	cur_tsnap->tds_system_time_in_terminated_threads = task->total_system_time;
1506  
1507  	cur_tsnap->tds_task_size = have_pmap ? get_task_phys_footprint(task) : 0;
1508  
1509  	cur_tsnap->tds_max_resident_size = get_task_resident_max(task);
1510  	cur_tsnap->tds_suspend_count = task->suspend_count;
1511  	cur_tsnap->tds_faults            = counter_load(&task->faults);
1512  	cur_tsnap->tds_pageins           = task->pageins;
1513  	cur_tsnap->tds_cow_faults        = task->cow_faults;
1514  	cur_tsnap->tds_was_throttled     = (uint32_t)proc_was_throttled_from_task(task);
1515  	cur_tsnap->tds_did_throttle      = (uint32_t)proc_did_throttle_from_task(task);
1516  	cur_tsnap->tds_latency_qos       = (task->effective_policy.tep_latency_qos == LATENCY_QOS_TIER_UNSPECIFIED)
1517  	    ? LATENCY_QOS_TIER_UNSPECIFIED
1518  	    : ((0xFF << 16) | task->effective_policy.tep_latency_qos);
1519  
1520  #if __arm__ || __arm64__
1521  	if (collect_asid && have_pmap) {
1522  		uint32_t asid = PMAP_VASID(task->map->pmap);
1523  		kcd_exit_on_error(kcdata_get_memory_addr(kcd, STACKSHOT_KCTYPE_ASID, sizeof(uint32_t), &out_addr));
1524  		stackshot_memcpy((void*)out_addr, &asid, sizeof(asid));
1525  	}
1526  #endif
1527  
1528  #if MONOTONIC
1529  	if (collect_instrs_cycles) {
1530  		kcd_exit_on_error(kcdata_record_task_instrs_cycles(kcd, task));
1531  	}
1532  #endif /* MONOTONIC */
1533  
1534  error_exit:
1535  	return error;
1536  }
1537  
1538  static kern_return_t
1539  kcdata_record_thread_iostats(kcdata_descriptor_t kcd, thread_t thread)
1540  {
1541  	kern_return_t error = KERN_SUCCESS;
1542  	mach_vm_address_t out_addr = 0;
1543  
1544  	/* I/O Statistics */
1545  	assert(IO_NUM_PRIORITIES == STACKSHOT_IO_NUM_PRIORITIES);
1546  	if (thread->thread_io_stats && !memory_iszero(thread->thread_io_stats, sizeof(struct io_stat_info))) {
1547  		kcd_exit_on_error(kcdata_get_memory_addr(kcd, STACKSHOT_KCTYPE_IOSTATS, sizeof(struct io_stats_snapshot), &out_addr));
1548  		struct io_stats_snapshot *_iostat = (struct io_stats_snapshot *)out_addr;
1549  		_iostat->ss_disk_reads_count = thread->thread_io_stats->disk_reads.count;
1550  		_iostat->ss_disk_reads_size = thread->thread_io_stats->disk_reads.size;
1551  		_iostat->ss_disk_writes_count = (thread->thread_io_stats->total_io.count - thread->thread_io_stats->disk_reads.count);
1552  		_iostat->ss_disk_writes_size = (thread->thread_io_stats->total_io.size - thread->thread_io_stats->disk_reads.size);
1553  		_iostat->ss_paging_count = thread->thread_io_stats->paging.count;
1554  		_iostat->ss_paging_size = thread->thread_io_stats->paging.size;
1555  		_iostat->ss_non_paging_count = (thread->thread_io_stats->total_io.count - thread->thread_io_stats->paging.count);
1556  		_iostat->ss_non_paging_size = (thread->thread_io_stats->total_io.size - thread->thread_io_stats->paging.size);
1557  		_iostat->ss_metadata_count = thread->thread_io_stats->metadata.count;
1558  		_iostat->ss_metadata_size = thread->thread_io_stats->metadata.size;
1559  		_iostat->ss_data_count = (thread->thread_io_stats->total_io.count - thread->thread_io_stats->metadata.count);
1560  		_iostat->ss_data_size = (thread->thread_io_stats->total_io.size - thread->thread_io_stats->metadata.size);
1561  		for (int i = 0; i < IO_NUM_PRIORITIES; i++) {
1562  			_iostat->ss_io_priority_count[i] = thread->thread_io_stats->io_priority[i].count;
1563  			_iostat->ss_io_priority_size[i] = thread->thread_io_stats->io_priority[i].size;
1564  		}
1565  	}
1566  
1567  error_exit:
1568  	return error;
1569  }
1570  
1571  static kern_return_t
1572  kcdata_record_thread_snapshot(
1573  	kcdata_descriptor_t kcd, thread_t thread, task_t task, uint64_t trace_flags, boolean_t have_pmap, boolean_t thread_on_core)
1574  {
1575  	boolean_t dispatch_p              = ((trace_flags & STACKSHOT_GET_DQ) != 0);
1576  	boolean_t active_kthreads_only_p  = ((trace_flags & STACKSHOT_ACTIVE_KERNEL_THREADS_ONLY) != 0);
1577  	boolean_t collect_delta_stackshot = ((trace_flags & STACKSHOT_COLLECT_DELTA_SNAPSHOT) != 0);
1578  	boolean_t collect_iostats         = !collect_delta_stackshot && !(trace_flags & STACKSHOT_NO_IO_STATS);
1579  #if MONOTONIC
1580  	boolean_t collect_instrs_cycles   = ((trace_flags & STACKSHOT_INSTRS_CYCLES) != 0);
1581  #endif /* MONOTONIC */
1582  	kern_return_t error        = KERN_SUCCESS;
1583  
1584  #if STACKSHOT_COLLECTS_LATENCY_INFO
1585  	struct stackshot_latency_thread latency_info;
1586  	latency_info.cur_thsnap1_latency = mach_absolute_time();
1587  #endif /* STACKSHOT_COLLECTS_LATENCY_INFO */
1588  
1589  	mach_vm_address_t out_addr = 0;
1590  	int saved_count            = 0;
1591  
1592  	struct thread_snapshot_v4 * cur_thread_snap = NULL;
1593  	char cur_thread_name[STACKSHOT_MAX_THREAD_NAME_SIZE];
1594  	uint64_t tval    = 0;
1595  	const boolean_t is_64bit_data = task_has_64Bit_data(task);
1596  
1597  	kcd_exit_on_error(kcdata_get_memory_addr(kcd, STACKSHOT_KCTYPE_THREAD_SNAPSHOT, sizeof(struct thread_snapshot_v4), &out_addr));
1598  	cur_thread_snap = (struct thread_snapshot_v4 *)out_addr;
1599  
1600  	/* Populate the thread snapshot header */
1601  	cur_thread_snap->ths_ss_flags = 0;
1602  	cur_thread_snap->ths_thread_id = thread_tid(thread);
1603  	cur_thread_snap->ths_wait_event = VM_KERNEL_UNSLIDE_OR_PERM(thread->wait_event);
1604  	cur_thread_snap->ths_continuation = VM_KERNEL_UNSLIDE(thread->continuation);
1605  	cur_thread_snap->ths_total_syscalls = thread->syscalls_mach + thread->syscalls_unix;
1606  
1607  	if (IPC_VOUCHER_NULL != thread->ith_voucher) {
1608  		cur_thread_snap->ths_voucher_identifier = VM_KERNEL_ADDRPERM(thread->ith_voucher);
1609  	} else {
1610  		cur_thread_snap->ths_voucher_identifier = 0;
1611  	}
1612  
1613  #if STACKSHOT_COLLECTS_LATENCY_INFO
1614  	latency_info.cur_thsnap1_latency = mach_absolute_time() - latency_info.cur_thsnap1_latency;
1615  	latency_info.dispatch_serial_latency = mach_absolute_time();
1616  	latency_info.dispatch_label_latency = 0;
1617  #endif /* STACKSHOT_COLLECTS_LATENCY_INFO */
1618  
1619  	cur_thread_snap->ths_dqserialnum = 0;
1620  	if (dispatch_p && (task != kernel_task) && (task->active) && have_pmap) {
1621  		uint64_t dqkeyaddr = thread_dispatchqaddr(thread);
1622  		if (dqkeyaddr != 0) {
1623  			uint64_t dqaddr = 0;
1624  			boolean_t copyin_ok = kdp_copyin_word(task, dqkeyaddr, &dqaddr, FALSE, NULL);
1625  			if (copyin_ok && dqaddr != 0) {
1626  				uint64_t dqserialnumaddr = dqaddr + get_task_dispatchqueue_serialno_offset(task);
1627  				uint64_t dqserialnum = 0;
1628  				copyin_ok = kdp_copyin_word(task, dqserialnumaddr, &dqserialnum, FALSE, NULL);
1629  				if (copyin_ok) {
1630  					cur_thread_snap->ths_ss_flags |= kHasDispatchSerial;
1631  					cur_thread_snap->ths_dqserialnum = dqserialnum;
1632  				}
1633  
1634  #if STACKSHOT_COLLECTS_LATENCY_INFO
1635  				latency_info.dispatch_serial_latency = mach_absolute_time() - latency_info.dispatch_serial_latency;
1636  				latency_info.dispatch_label_latency = mach_absolute_time();
1637  #endif /* STACKSHOT_COLLECTS_LATENCY_INFO */
1638  
1639  				/* try copying in the queue label */
1640  				uint64_t label_offs = get_task_dispatchqueue_label_offset(task);
1641  				if (label_offs) {
1642  					uint64_t dqlabeladdr = dqaddr + label_offs;
1643  					uint64_t actual_dqlabeladdr = 0;
1644  
1645  					copyin_ok = kdp_copyin_word(task, dqlabeladdr, &actual_dqlabeladdr, FALSE, NULL);
1646  					if (copyin_ok && actual_dqlabeladdr != 0) {
1647  						char label_buf[STACKSHOT_QUEUE_LABEL_MAXSIZE];
1648  						int len;
1649  
1650  						bzero(label_buf, STACKSHOT_QUEUE_LABEL_MAXSIZE * sizeof(char));
1651  						len = kdp_copyin_string(task, actual_dqlabeladdr, label_buf, STACKSHOT_QUEUE_LABEL_MAXSIZE, FALSE, NULL);
1652  						if (len > 0) {
1653  							mach_vm_address_t label_addr = 0;
1654  							kcd_exit_on_error(kcdata_get_memory_addr(kcd, STACKSHOT_KCTYPE_THREAD_DISPATCH_QUEUE_LABEL, len, &label_addr));
1655  							stackshot_strlcpy((char*)label_addr, &label_buf[0], len);
1656  						}
1657  					}
1658  				}
1659  #if STACKSHOT_COLLECTS_LATENCY_INFO
1660  				latency_info.dispatch_label_latency = mach_absolute_time() - latency_info.dispatch_label_latency;
1661  #endif /* STACKSHOT_COLLECTS_LATENCY_INFO */
1662  			}
1663  		}
1664  	}
1665  
1666  #if STACKSHOT_COLLECTS_LATENCY_INFO
1667  	if ((cur_thread_snap->ths_ss_flags & kHasDispatchSerial) == 0) {
1668  		latency_info.dispatch_serial_latency = 0;
1669  	}
1670  	latency_info.cur_thsnap2_latency = mach_absolute_time();
1671  #endif /* STACKSHOT_COLLECTS_LATENCY_INFO */
1672  
1673  	tval = safe_grab_timer_value(&thread->user_timer);
1674  	cur_thread_snap->ths_user_time = tval;
1675  	tval = safe_grab_timer_value(&thread->system_timer);
1676  
1677  	if (thread->precise_user_kernel_time) {
1678  		cur_thread_snap->ths_sys_time = tval;
1679  	} else {
1680  		cur_thread_snap->ths_user_time += tval;
1681  		cur_thread_snap->ths_sys_time = 0;
1682  	}
1683  
1684  	if (thread->thread_tag & THREAD_TAG_MAINTHREAD) {
1685  		cur_thread_snap->ths_ss_flags |= kThreadMain;
1686  	}
1687  	if (thread->effective_policy.thep_darwinbg) {
1688  		cur_thread_snap->ths_ss_flags |= kThreadDarwinBG;
1689  	}
1690  	if (proc_get_effective_thread_policy(thread, TASK_POLICY_PASSIVE_IO)) {
1691  		cur_thread_snap->ths_ss_flags |= kThreadIOPassive;
1692  	}
1693  	if (thread->suspend_count > 0) {
1694  		cur_thread_snap->ths_ss_flags |= kThreadSuspended;
1695  	}
1696  	if (thread->options & TH_OPT_GLOBAL_FORCED_IDLE) {
1697  		cur_thread_snap->ths_ss_flags |= kGlobalForcedIdle;
1698  	}
1699  	if (thread_on_core) {
1700  		cur_thread_snap->ths_ss_flags |= kThreadOnCore;
1701  	}
1702  	if (stackshot_thread_is_idle_worker_unsafe(thread)) {
1703  		cur_thread_snap->ths_ss_flags |= kThreadIdleWorker;
1704  	}
1705  
1706  	/* make sure state flags defined in kcdata.h still match internal flags */
1707  	static_assert(SS_TH_WAIT == TH_WAIT);
1708  	static_assert(SS_TH_SUSP == TH_SUSP);
1709  	static_assert(SS_TH_RUN == TH_RUN);
1710  	static_assert(SS_TH_UNINT == TH_UNINT);
1711  	static_assert(SS_TH_TERMINATE == TH_TERMINATE);
1712  	static_assert(SS_TH_TERMINATE2 == TH_TERMINATE2);
1713  	static_assert(SS_TH_IDLE == TH_IDLE);
1714  
1715  	cur_thread_snap->ths_last_run_time           = thread->last_run_time;
1716  	cur_thread_snap->ths_last_made_runnable_time = thread->last_made_runnable_time;
1717  	cur_thread_snap->ths_state                   = thread->state;
1718  	cur_thread_snap->ths_sched_flags             = thread->sched_flags;
1719  	cur_thread_snap->ths_base_priority = thread->base_pri;
1720  	cur_thread_snap->ths_sched_priority = thread->sched_pri;
1721  	cur_thread_snap->ths_eqos = thread->effective_policy.thep_qos;
1722  	cur_thread_snap->ths_rqos = thread->requested_policy.thrp_qos;
1723  	cur_thread_snap->ths_rqos_override = MAX(thread->requested_policy.thrp_qos_override,
1724  	    thread->requested_policy.thrp_qos_workq_override);
1725  	cur_thread_snap->ths_io_tier = (uint8_t) proc_get_effective_thread_policy(thread, TASK_POLICY_IO);
1726  	cur_thread_snap->ths_thread_t = VM_KERNEL_UNSLIDE_OR_PERM(thread);
1727  
1728  	static_assert(sizeof(thread->effective_policy) == sizeof(uint64_t));
1729  	static_assert(sizeof(thread->requested_policy) == sizeof(uint64_t));
1730  	cur_thread_snap->ths_requested_policy = *(unaligned_u64 *) &thread->requested_policy;
1731  	cur_thread_snap->ths_effective_policy = *(unaligned_u64 *) &thread->effective_policy;
1732  
1733  #if STACKSHOT_COLLECTS_LATENCY_INFO
1734  	latency_info.cur_thsnap2_latency = mach_absolute_time()  - latency_info.cur_thsnap2_latency;
1735  	latency_info.thread_name_latency = mach_absolute_time();
1736  #endif /* STACKSHOT_COLLECTS_LATENCY_INFO */
1737  
1738  	/* if there is thread name then add to buffer */
1739  	cur_thread_name[0] = '\0';
1740  	proc_threadname_kdp(thread->uthread, cur_thread_name, STACKSHOT_MAX_THREAD_NAME_SIZE);
1741  	if (strnlen(cur_thread_name, STACKSHOT_MAX_THREAD_NAME_SIZE) > 0) {
1742  		kcd_exit_on_error(kcdata_get_memory_addr(kcd, STACKSHOT_KCTYPE_THREAD_NAME, sizeof(cur_thread_name), &out_addr));
1743  		stackshot_memcpy((void *)out_addr, (void *)cur_thread_name, sizeof(cur_thread_name));
1744  	}
1745  
1746  #if STACKSHOT_COLLECTS_LATENCY_INFO
1747  	latency_info.thread_name_latency = mach_absolute_time()  - latency_info.thread_name_latency;
1748  	latency_info.sur_times_latency = mach_absolute_time();
1749  #endif /* STACKSHOT_COLLECTS_LATENCY_INFO */
1750  
1751  	/* record system, user, and runnable times */
1752  	time_value_t user_time, system_time, runnable_time;
1753  	thread_read_times(thread, &user_time, &system_time, &runnable_time);
1754  	kcd_exit_on_error(kcdata_get_memory_addr(kcd, STACKSHOT_KCTYPE_CPU_TIMES, sizeof(struct stackshot_cpu_times_v2), &out_addr));
1755  	struct stackshot_cpu_times_v2 *stackshot_cpu_times = (struct stackshot_cpu_times_v2 *)out_addr;
1756  	*stackshot_cpu_times = (struct stackshot_cpu_times_v2){
1757  		.user_usec = (uint64_t)user_time.seconds * USEC_PER_SEC + user_time.microseconds,
1758  		.system_usec = (uint64_t)system_time.seconds * USEC_PER_SEC + system_time.microseconds,
1759  		.runnable_usec = (uint64_t)runnable_time.seconds * USEC_PER_SEC + runnable_time.microseconds,
1760  	};
1761  
1762  #if STACKSHOT_COLLECTS_LATENCY_INFO
1763  	latency_info.sur_times_latency = mach_absolute_time()  - latency_info.sur_times_latency;
1764  	latency_info.user_stack_latency = mach_absolute_time();
1765  #endif /* STACKSHOT_COLLECTS_LATENCY_INFO */
1766  
1767  	/* Trace user stack, if any */
1768  	if (!active_kthreads_only_p && task->active && thread->task->map != kernel_map) {
1769  		uint32_t thread_snapshot_flags = 0;
1770  
1771  		/* Uses 64-bit machine state? */
1772  		if (is_64bit_data) {
1773  			uint64_t sp = 0;
1774  			out_addr    = (mach_vm_address_t)kcd_end_address(kcd);
1775  
1776  			uintptr_t fp = 0;
1777  
1778  
1779  			saved_count = machine_trace_thread64(thread, (char *)out_addr, (char *)kcd_max_address(kcd), MAX_FRAMES, TRUE,
1780  			    &thread_snapshot_flags, &sp, fp);
1781  			if (saved_count > 0) {
1782  				int frame_size = sizeof(uint64_t);
1783  				kcd_exit_on_error(kcdata_get_memory_addr_for_array(kcd, STACKSHOT_KCTYPE_USER_STACKLR64,
1784  				    frame_size, saved_count / frame_size, &out_addr));
1785  				cur_thread_snap->ths_ss_flags |= kUser64_p;
1786  			}
1787  #if __x86_64__
1788  			if (sp) {
1789  				// I'm using 8 here and not sizeof(stack_contents) because this
1790  				// code would not work if you just made stack_contents bigger.
1791  				vm_offset_t kern_virt_addr = machine_trace_thread_get_kva(sp, thread->task->map, &thread_snapshot_flags);
1792  				if (kern_virt_addr && (kern_virt_addr % 8) == 0) {
1793  					kcd_exit_on_error(kcdata_get_memory_addr(kcd, STACKSHOT_KCTYPE_USER_STACKTOP, sizeof(struct stack_snapshot_stacktop), &out_addr));
1794  					struct stack_snapshot_stacktop *stacktop = (struct stack_snapshot_stacktop *)out_addr;
1795  					stacktop->sp = sp;
1796  					memcpy(stacktop->stack_contents, (void*) kern_virt_addr, 8);
1797  				}
1798  			}
1799  #endif /* __x86_64__ */
1800  		} else {
1801  			out_addr    = (mach_vm_address_t)kcd_end_address(kcd);
1802  			saved_count = machine_trace_thread(thread, (char *)out_addr, (char *)kcd_max_address(kcd), MAX_FRAMES, TRUE,
1803  			    &thread_snapshot_flags);
1804  			if (saved_count > 0) {
1805  				int frame_size = sizeof(uint32_t);
1806  				kcd_exit_on_error(kcdata_get_memory_addr_for_array(kcd, STACKSHOT_KCTYPE_USER_STACKLR,
1807  				    frame_size, saved_count / frame_size, &out_addr));
1808  			}
1809  		}
1810  
1811  		if (thread_snapshot_flags != 0) {
1812  			cur_thread_snap->ths_ss_flags |= thread_snapshot_flags;
1813  		}
1814  	}
1815  
1816  #if STACKSHOT_COLLECTS_LATENCY_INFO
1817  	latency_info.user_stack_latency = mach_absolute_time()  - latency_info.user_stack_latency;
1818  	latency_info.kernel_stack_latency = mach_absolute_time();
1819  #endif /* STACKSHOT_COLLECTS_LATENCY_INFO */
1820  
1821  	/* Call through to the machine specific trace routines
1822  	 * Frames are added past the snapshot header.
1823  	 */
1824  	if (thread->kernel_stack != 0) {
1825  		uint32_t thread_snapshot_flags = 0;
1826  #if defined(__LP64__)
1827  		out_addr    = (mach_vm_address_t)kcd_end_address(kcd);
1828  		saved_count = machine_trace_thread64(thread, (char *)out_addr, (char *)kcd_max_address(kcd), MAX_FRAMES, FALSE,
1829  		    &thread_snapshot_flags, NULL, 0);
1830  		if (saved_count > 0) {
1831  			int frame_size = sizeof(uint64_t);
1832  			cur_thread_snap->ths_ss_flags |= kKernel64_p;
1833  			kcd_exit_on_error(kcdata_get_memory_addr_for_array(kcd, STACKSHOT_KCTYPE_KERN_STACKLR64,
1834  			    frame_size, saved_count / frame_size, &out_addr));
1835  		}
1836  #else
1837  		out_addr             = (mach_vm_address_t)kcd_end_address(kcd);
1838  		saved_count = machine_trace_thread(thread, (char *)out_addr, (char *)kcd_max_address(kcd), MAX_FRAMES, FALSE,
1839  		    &thread_snapshot_flags);
1840  		if (saved_count > 0) {
1841  			int frame_size = sizeof(uint32_t);
1842  			kcd_exit_on_error(
1843  				kcdata_get_memory_addr_for_array(kcd, STACKSHOT_KCTYPE_KERN_STACKLR, frame_size,
1844  				saved_count / frame_size, &out_addr));
1845  		}
1846  #endif
1847  		if (thread_snapshot_flags != 0) {
1848  			cur_thread_snap->ths_ss_flags |= thread_snapshot_flags;
1849  		}
1850  	}
1851  
1852  #if STACKSHOT_COLLECTS_LATENCY_INFO
1853  	latency_info.kernel_stack_latency = mach_absolute_time()  - latency_info.kernel_stack_latency;
1854  	latency_info.misc_latency = mach_absolute_time();
1855  #endif /* STACKSHOT_COLLECTS_LATENCY_INFO */
1856  
1857  #if CONFIG_THREAD_GROUPS
1858  	if (trace_flags & STACKSHOT_THREAD_GROUP) {
1859  		uint64_t thread_group_id = thread->thread_group ? thread_group_get_id(thread->thread_group) : 0;
1860  		kcd_exit_on_error(kcdata_get_memory_addr(kcd, STACKSHOT_KCTYPE_THREAD_GROUP, sizeof(thread_group_id), &out_addr));
1861  		stackshot_memcpy((void*)out_addr, &thread_group_id, sizeof(uint64_t));
1862  	}
1863  #endif /* CONFIG_THREAD_GROUPS */
1864  
1865  	if (collect_iostats) {
1866  		kcd_exit_on_error(kcdata_record_thread_iostats(kcd, thread));
1867  	}
1868  
1869  #if MONOTONIC
1870  	if (collect_instrs_cycles) {
1871  		uint64_t instrs = 0, cycles = 0;
1872  		mt_stackshot_thread(thread, &instrs, &cycles);
1873  
1874  		kcd_exit_on_error(kcdata_get_memory_addr(kcd, STACKSHOT_KCTYPE_INSTRS_CYCLES, sizeof(struct instrs_cycles_snapshot), &out_addr));
1875  		struct instrs_cycles_snapshot *instrs_cycles = (struct instrs_cycles_snapshot *)out_addr;
1876  		instrs_cycles->ics_instructions = instrs;
1877  		instrs_cycles->ics_cycles = cycles;
1878  	}
1879  #endif /* MONOTONIC */
1880  
1881  #if STACKSHOT_COLLECTS_LATENCY_INFO
1882  	latency_info.misc_latency = mach_absolute_time() - latency_info.misc_latency;
1883  	if (collect_latency_info) {
1884  		kcd_exit_on_error(kcdata_push_data(kcd, STACKSHOT_KCTYPE_LATENCY_INFO_THREAD, sizeof(latency_info), &latency_info));
1885  	}
1886  #endif /* STACKSHOT_COLLECTS_LATENCY_INFO */
1887  
1888  error_exit:
1889  	return error;
1890  }
1891  
1892  static int
1893  kcdata_record_thread_delta_snapshot(struct thread_delta_snapshot_v3 * cur_thread_snap, thread_t thread, boolean_t thread_on_core)
1894  {
1895  	cur_thread_snap->tds_thread_id = thread_tid(thread);
1896  	if (IPC_VOUCHER_NULL != thread->ith_voucher) {
1897  		cur_thread_snap->tds_voucher_identifier  = VM_KERNEL_ADDRPERM(thread->ith_voucher);
1898  	} else {
1899  		cur_thread_snap->tds_voucher_identifier = 0;
1900  	}
1901  
1902  	cur_thread_snap->tds_ss_flags = 0;
1903  	if (thread->effective_policy.thep_darwinbg) {
1904  		cur_thread_snap->tds_ss_flags |= kThreadDarwinBG;
1905  	}
1906  	if (proc_get_effective_thread_policy(thread, TASK_POLICY_PASSIVE_IO)) {
1907  		cur_thread_snap->tds_ss_flags |= kThreadIOPassive;
1908  	}
1909  	if (thread->suspend_count > 0) {
1910  		cur_thread_snap->tds_ss_flags |= kThreadSuspended;
1911  	}
1912  	if (thread->options & TH_OPT_GLOBAL_FORCED_IDLE) {
1913  		cur_thread_snap->tds_ss_flags |= kGlobalForcedIdle;
1914  	}
1915  	if (thread_on_core) {
1916  		cur_thread_snap->tds_ss_flags |= kThreadOnCore;
1917  	}
1918  	if (stackshot_thread_is_idle_worker_unsafe(thread)) {
1919  		cur_thread_snap->tds_ss_flags |= kThreadIdleWorker;
1920  	}
1921  
1922  	cur_thread_snap->tds_last_made_runnable_time = thread->last_made_runnable_time;
1923  	cur_thread_snap->tds_state                   = thread->state;
1924  	cur_thread_snap->tds_sched_flags             = thread->sched_flags;
1925  	cur_thread_snap->tds_base_priority           = thread->base_pri;
1926  	cur_thread_snap->tds_sched_priority          = thread->sched_pri;
1927  	cur_thread_snap->tds_eqos                    = thread->effective_policy.thep_qos;
1928  	cur_thread_snap->tds_rqos                    = thread->requested_policy.thrp_qos;
1929  	cur_thread_snap->tds_rqos_override           = MAX(thread->requested_policy.thrp_qos_override,
1930  	    thread->requested_policy.thrp_qos_workq_override);
1931  	cur_thread_snap->tds_io_tier                 = (uint8_t) proc_get_effective_thread_policy(thread, TASK_POLICY_IO);
1932  
1933  	static_assert(sizeof(thread->effective_policy) == sizeof(uint64_t));
1934  	static_assert(sizeof(thread->requested_policy) == sizeof(uint64_t));
1935  	cur_thread_snap->tds_requested_policy = *(unaligned_u64 *) &thread->requested_policy;
1936  	cur_thread_snap->tds_effective_policy = *(unaligned_u64 *) &thread->effective_policy;
1937  
1938  	return 0;
1939  }
1940  
1941  /*
1942   * Why 12?  12 strikes a decent balance between allocating a large array on
1943   * the stack and having large kcdata item overheads for recording nonrunable
1944   * tasks.
1945   */
1946  #define UNIQUEIDSPERFLUSH 12
1947  
1948  struct saved_uniqueids {
1949  	uint64_t ids[UNIQUEIDSPERFLUSH];
1950  	unsigned count;
1951  };
1952  
1953  enum thread_classification {
1954  	tc_full_snapshot,  /* take a full snapshot */
1955  	tc_delta_snapshot, /* take a delta snapshot */
1956  };
1957  
1958  static enum thread_classification
1959  classify_thread(thread_t thread, boolean_t * thread_on_core_p, uint64_t trace_flags)
1960  {
1961  	boolean_t collect_delta_stackshot = ((trace_flags & STACKSHOT_COLLECT_DELTA_SNAPSHOT) != 0);
1962  
1963  	processor_t last_processor = thread->last_processor;
1964  
1965  	boolean_t thread_on_core =
1966  	    (last_processor != PROCESSOR_NULL &&
1967  	    (last_processor->state == PROCESSOR_SHUTDOWN || last_processor->state == PROCESSOR_RUNNING) &&
1968  	    last_processor->active_thread == thread);
1969  
1970  	*thread_on_core_p = thread_on_core;
1971  
1972  	/* Capture the full thread snapshot if this is not a delta stackshot or if the thread has run subsequent to the
1973  	 * previous full stackshot */
1974  	if (!collect_delta_stackshot || thread_on_core || (thread->last_run_time > stack_snapshot_delta_since_timestamp)) {
1975  		return tc_full_snapshot;
1976  	} else {
1977  		return tc_delta_snapshot;
1978  	}
1979  }
1980  
1981  struct stackshot_context {
1982  	int pid;
1983  	uint64_t trace_flags;
1984  };
1985  
1986  static kern_return_t
1987  kdp_stackshot_record_task(struct stackshot_context *ctx, task_t task)
1988  {
1989  	boolean_t active_kthreads_only_p  = ((ctx->trace_flags & STACKSHOT_ACTIVE_KERNEL_THREADS_ONLY) != 0);
1990  	boolean_t save_donating_pids_p    = ((ctx->trace_flags & STACKSHOT_SAVE_IMP_DONATION_PIDS) != 0);
1991  	boolean_t collect_delta_stackshot = ((ctx->trace_flags & STACKSHOT_COLLECT_DELTA_SNAPSHOT) != 0);
1992  	boolean_t save_owner_info         = ((ctx->trace_flags & STACKSHOT_THREAD_WAITINFO) != 0);
1993  
1994  	kern_return_t error = KERN_SUCCESS;
1995  	mach_vm_address_t out_addr = 0;
1996  	int saved_count = 0;
1997  
1998  	int task_pid                   = 0;
1999  	uint64_t task_uniqueid         = 0;
2000  	int num_delta_thread_snapshots = 0;
2001  	int num_waitinfo_threads       = 0;
2002  	int num_turnstileinfo_threads  = 0;
2003  
2004  	uint64_t task_start_abstime    = 0;
2005  	boolean_t have_map = FALSE, have_pmap = FALSE;
2006  	boolean_t some_thread_ran = FALSE;
2007  	unaligned_u64 task_snap_ss_flags = 0;
2008  
2009  #if STACKSHOT_COLLECTS_LATENCY_INFO
2010  	struct stackshot_latency_task latency_info;
2011  	latency_info.setup_latency = mach_absolute_time();
2012  #endif /* STACKSHOT_COLLECTS_LATENCY_INFO */
2013  
2014  #if INTERRUPT_MASKED_DEBUG && MONOTONIC
2015  	uint64_t task_begin_cpu_cycle_count = 0;
2016  	if (!panic_stackshot) {
2017  		task_begin_cpu_cycle_count = mt_cur_cpu_cycles();
2018  	}
2019  #endif
2020  
2021  	if ((task == NULL) || !ml_validate_nofault((vm_offset_t)task, sizeof(struct task))) {
2022  		error = KERN_FAILURE;
2023  		goto error_exit;
2024  	}
2025  
2026  	have_map = (task->map != NULL) && (ml_validate_nofault((vm_offset_t)(task->map), sizeof(struct _vm_map)));
2027  	have_pmap = have_map && (task->map->pmap != NULL) && (ml_validate_nofault((vm_offset_t)(task->map->pmap), sizeof(struct pmap)));
2028  
2029  	task_pid = pid_from_task(task);
2030  	task_uniqueid = get_task_uniqueid(task);
2031  
2032  	if (!task->active || task_is_a_corpse(task) || task_is_a_corpse_fork(task)) {
2033  		/*
2034  		 * Not interested in terminated tasks without threads.
2035  		 */
2036  		if (queue_empty(&task->threads) || task_pid == -1) {
2037  			return KERN_SUCCESS;
2038  		}
2039  	}
2040  
2041  	/* All PIDs should have the MSB unset */
2042  	assert((task_pid & (1ULL << 31)) == 0);
2043  
2044  #if STACKSHOT_COLLECTS_LATENCY_INFO
2045  	latency_info.setup_latency = mach_absolute_time() - latency_info.setup_latency;
2046  	latency_info.task_uniqueid = task_uniqueid;
2047  #endif /* STACKSHOT_COLLECTS_LATENCY_INFO */
2048  
2049  	/* Trace everything, unless a process was specified */
2050  	if ((ctx->pid == -1) || (ctx->pid == task_pid)) {
2051  		/* add task snapshot marker */
2052  		kcd_exit_on_error(kcdata_add_container_marker(stackshot_kcdata_p, KCDATA_TYPE_CONTAINER_BEGIN,
2053  		    STACKSHOT_KCCONTAINER_TASK, task_uniqueid));
2054  
2055  		if (collect_delta_stackshot) {
2056  			/*
2057  			 * For delta stackshots we need to know if a thread from this task has run since the
2058  			 * previous timestamp to decide whether we're going to record a full snapshot and UUID info.
2059  			 */
2060  			thread_t thread = THREAD_NULL;
2061  			queue_iterate(&task->threads, thread, thread_t, task_threads)
2062  			{
2063  				if ((thread == NULL) || !ml_validate_nofault((vm_offset_t)thread, sizeof(struct thread))) {
2064  					error = KERN_FAILURE;
2065  					goto error_exit;
2066  				}
2067  
2068  				if (active_kthreads_only_p && thread->kernel_stack == 0) {
2069  					continue;
2070  				}
2071  
2072  				boolean_t thread_on_core;
2073  				enum thread_classification thread_classification = classify_thread(thread, &thread_on_core, ctx->trace_flags);
2074  
2075  				switch (thread_classification) {
2076  				case tc_full_snapshot:
2077  					some_thread_ran = TRUE;
2078  					break;
2079  				case tc_delta_snapshot:
2080  					num_delta_thread_snapshots++;
2081  					break;
2082  				}
2083  			}
2084  		}
2085  
2086  		if (collect_delta_stackshot) {
2087  			proc_starttime_kdp(task->bsd_info, NULL, NULL, &task_start_abstime);
2088  		}
2089  
2090  		/* Next record any relevant UUID info and store the task snapshot */
2091  		if (!collect_delta_stackshot ||
2092  		    (task_start_abstime == 0) ||
2093  		    (task_start_abstime > stack_snapshot_delta_since_timestamp) ||
2094  		    some_thread_ran) {
2095  			/*
2096  			 * Collect full task information in these scenarios:
2097  			 *
2098  			 * 1) a full stackshot
2099  			 * 2) a delta stackshot where the task started after the previous full stackshot
2100  			 * 3) a delta stackshot where any thread from the task has run since the previous full stackshot
2101  			 *
2102  			 * because the task may have exec'ed, changing its name, architecture, load info, etc
2103  			 */
2104  
2105  			kcd_exit_on_error(kcdata_record_shared_cache_info(stackshot_kcdata_p, task, &task_snap_ss_flags));
2106  			kcd_exit_on_error(kcdata_record_uuid_info(stackshot_kcdata_p, task, ctx->trace_flags, have_pmap, &task_snap_ss_flags));
2107  #if STACKSHOT_COLLECTS_LATENCY_INFO
2108  			kcd_exit_on_error(kcdata_record_task_snapshot(stackshot_kcdata_p, task, ctx->trace_flags, have_pmap, task_snap_ss_flags, &latency_info));
2109  #else
2110  			kcd_exit_on_error(kcdata_record_task_snapshot(stackshot_kcdata_p, task, ctx->trace_flags, have_pmap, task_snap_ss_flags));
2111  #endif /* STACKSHOT_COLLECTS_LATENCY_INFO */
2112  		} else {
2113  			kcd_exit_on_error(kcdata_record_task_delta_snapshot(stackshot_kcdata_p, task, ctx->trace_flags, have_pmap, task_snap_ss_flags));
2114  		}
2115  
2116  #if STACKSHOT_COLLECTS_LATENCY_INFO
2117  		latency_info.misc_latency = mach_absolute_time();
2118  #endif /* STACKSHOT_COLLECTS_LATENCY_INFO */
2119  
2120  		struct thread_delta_snapshot_v3 * delta_snapshots = NULL;
2121  		int current_delta_snapshot_index                  = 0;
2122  		if (num_delta_thread_snapshots > 0) {
2123  			kcd_exit_on_error(kcdata_get_memory_addr_for_array(stackshot_kcdata_p, STACKSHOT_KCTYPE_THREAD_DELTA_SNAPSHOT,
2124  			    sizeof(struct thread_delta_snapshot_v3),
2125  			    num_delta_thread_snapshots, &out_addr));
2126  			delta_snapshots = (struct thread_delta_snapshot_v3 *)out_addr;
2127  		}
2128  
2129  
2130  #if STACKSHOT_COLLECTS_LATENCY_INFO
2131  		latency_info.task_thread_count_loop_latency = mach_absolute_time();
2132  #endif
2133  		/*
2134  		 * Iterate over the task threads to save thread snapshots and determine
2135  		 * how much space we need for waitinfo and turnstile info
2136  		 */
2137  		thread_t thread = THREAD_NULL;
2138  		queue_iterate(&task->threads, thread, thread_t, task_threads)
2139  		{
2140  			if ((thread == NULL) || !ml_validate_nofault((vm_offset_t)thread, sizeof(struct thread))) {
2141  				error = KERN_FAILURE;
2142  				goto error_exit;
2143  			}
2144  
2145  			uint64_t thread_uniqueid;
2146  			if (active_kthreads_only_p && thread->kernel_stack == 0) {
2147  				continue;
2148  			}
2149  			thread_uniqueid = thread_tid(thread);
2150  
2151  			boolean_t thread_on_core;
2152  			enum thread_classification thread_classification = classify_thread(thread, &thread_on_core, ctx->trace_flags);
2153  
2154  			switch (thread_classification) {
2155  			case tc_full_snapshot:
2156  				/* add thread marker */
2157  				kcd_exit_on_error(kcdata_add_container_marker(stackshot_kcdata_p, KCDATA_TYPE_CONTAINER_BEGIN,
2158  				    STACKSHOT_KCCONTAINER_THREAD, thread_uniqueid));
2159  
2160  				/* thread snapshot can be large, including strings, avoid overflowing the stack. */
2161  				kcdata_compression_window_open(stackshot_kcdata_p);
2162  
2163  				kcd_exit_on_error(kcdata_record_thread_snapshot(stackshot_kcdata_p, thread, task, ctx->trace_flags, have_pmap, thread_on_core));
2164  
2165  				kcd_exit_on_error(kcdata_compression_window_close(stackshot_kcdata_p));
2166  
2167  				/* mark end of thread snapshot data */
2168  				kcd_exit_on_error(kcdata_add_container_marker(stackshot_kcdata_p, KCDATA_TYPE_CONTAINER_END,
2169  				    STACKSHOT_KCCONTAINER_THREAD, thread_uniqueid));
2170  				break;
2171  			case tc_delta_snapshot:
2172  				kcd_exit_on_error(kcdata_record_thread_delta_snapshot(&delta_snapshots[current_delta_snapshot_index++], thread, thread_on_core));
2173  				break;
2174  			}
2175  
2176  			/*
2177  			 * We want to report owner information regardless of whether a thread
2178  			 * has changed since the last delta, whether it's a normal stackshot,
2179  			 * or whether it's nonrunnable
2180  			 */
2181  			if (save_owner_info) {
2182  				if (stackshot_thread_has_valid_waitinfo(thread)) {
2183  					num_waitinfo_threads++;
2184  				}
2185  
2186  				if (stackshot_thread_has_valid_turnstileinfo(thread)) {
2187  					num_turnstileinfo_threads++;
2188  				}
2189  			}
2190  		}
2191  #if STACKSHOT_COLLECTS_LATENCY_INFO
2192  		latency_info.task_thread_count_loop_latency = mach_absolute_time() - latency_info.task_thread_count_loop_latency;
2193  #endif /* STACKSHOT_COLLECTS_LATENCY_INFO */
2194  
2195  
2196  		thread_waitinfo_t *thread_waitinfo           = NULL;
2197  		thread_turnstileinfo_t *thread_turnstileinfo = NULL;
2198  		int current_waitinfo_index              = 0;
2199  		int current_turnstileinfo_index         = 0;
2200  		/* allocate space for the wait and turnstil info */
2201  		if (num_waitinfo_threads > 0 || num_turnstileinfo_threads > 0) {
2202  			/* thread waitinfo and turnstileinfo can be quite large, avoid overflowing the stack */
2203  			kcdata_compression_window_open(stackshot_kcdata_p);
2204  
2205  			if (num_waitinfo_threads > 0) {
2206  				kcd_exit_on_error(kcdata_get_memory_addr_for_array(stackshot_kcdata_p, STACKSHOT_KCTYPE_THREAD_WAITINFO,
2207  				    sizeof(thread_waitinfo_t), num_waitinfo_threads, &out_addr));
2208  				thread_waitinfo = (thread_waitinfo_t *)out_addr;
2209  			}
2210  
2211  			if (num_turnstileinfo_threads > 0) {
2212  				/* get space for the turnstile info */
2213  				kcd_exit_on_error(kcdata_get_memory_addr_for_array(stackshot_kcdata_p, STACKSHOT_KCTYPE_THREAD_TURNSTILEINFO,
2214  				    sizeof(thread_turnstileinfo_t), num_turnstileinfo_threads, &out_addr));
2215  				thread_turnstileinfo = (thread_turnstileinfo_t *)out_addr;
2216  			}
2217  		}
2218  
2219  #if STACKSHOT_COLLECTS_LATENCY_INFO
2220  		latency_info.misc_latency = mach_absolute_time() - latency_info.misc_latency;
2221  		latency_info.task_thread_data_loop_latency = mach_absolute_time();
2222  #endif /* STACKSHOT_COLLECTS_LATENCY_INFO */
2223  
2224  		/* Iterate over the task's threads to save the wait and turnstile info */
2225  		queue_iterate(&task->threads, thread, thread_t, task_threads)
2226  		{
2227  			uint64_t thread_uniqueid;
2228  
2229  			if (active_kthreads_only_p && thread->kernel_stack == 0) {
2230  				continue;
2231  			}
2232  
2233  			thread_uniqueid = thread_tid(thread);
2234  
2235  			/* If we want owner info, we should capture it regardless of its classification */
2236  			if (save_owner_info) {
2237  				if (stackshot_thread_has_valid_waitinfo(thread)) {
2238  					stackshot_thread_wait_owner_info(
2239  						thread,
2240  						&thread_waitinfo[current_waitinfo_index++]);
2241  				}
2242  
2243  				if (stackshot_thread_has_valid_turnstileinfo(thread)) {
2244  					stackshot_thread_turnstileinfo(
2245  						thread,
2246  						&thread_turnstileinfo[current_turnstileinfo_index++]);
2247  				}
2248  			}
2249  		}
2250  
2251  #if STACKSHOT_COLLECTS_LATENCY_INFO
2252  		latency_info.task_thread_data_loop_latency = mach_absolute_time() - latency_info.task_thread_data_loop_latency;
2253  		latency_info.misc2_latency = mach_absolute_time();
2254  #endif /* STACKSHOT_COLLECTS_LATENCY_INFO */
2255  
2256  #if DEBUG || DEVELOPMENT
2257  		if (current_delta_snapshot_index != num_delta_thread_snapshots) {
2258  			panic("delta thread snapshot count mismatch while capturing snapshots for task %p. expected %d, found %d", task,
2259  			    num_delta_thread_snapshots, current_delta_snapshot_index);
2260  		}
2261  		if (current_waitinfo_index != num_waitinfo_threads) {
2262  			panic("thread wait info count mismatch while capturing snapshots for task %p. expected %d, found %d", task,
2263  			    num_waitinfo_threads, current_waitinfo_index);
2264  		}
2265  #endif
2266  
2267  		if (num_waitinfo_threads > 0 || num_turnstileinfo_threads > 0) {
2268  			kcd_exit_on_error(kcdata_compression_window_close(stackshot_kcdata_p));
2269  		}
2270  
2271  #if IMPORTANCE_INHERITANCE
2272  		if (save_donating_pids_p) {
2273  			kcd_exit_on_error(
2274  				((((mach_vm_address_t)kcd_end_address(stackshot_kcdata_p) + (TASK_IMP_WALK_LIMIT * sizeof(int32_t))) <
2275  				(mach_vm_address_t)kcd_max_address(stackshot_kcdata_p))
2276  				? KERN_SUCCESS
2277  				: KERN_RESOURCE_SHORTAGE));
2278  			saved_count = task_importance_list_pids(task, TASK_IMP_LIST_DONATING_PIDS,
2279  			    (void *)kcd_end_address(stackshot_kcdata_p), TASK_IMP_WALK_LIMIT);
2280  			if (saved_count > 0) {
2281  				/* Variable size array - better not have it on the stack. */
2282  				kcdata_compression_window_open(stackshot_kcdata_p);
2283  				kcd_exit_on_error(kcdata_get_memory_addr_for_array(stackshot_kcdata_p, STACKSHOT_KCTYPE_DONATING_PIDS,
2284  				    sizeof(int32_t), saved_count, &out_addr));
2285  				kcd_exit_on_error(kcdata_compression_window_close(stackshot_kcdata_p));
2286  			}
2287  		}
2288  #endif
2289  
2290  #if INTERRUPT_MASKED_DEBUG && MONOTONIC
2291  		if (!panic_stackshot) {
2292  			kcd_exit_on_error(kcdata_add_uint64_with_description(stackshot_kcdata_p, (mt_cur_cpu_cycles() - task_begin_cpu_cycle_count),
2293  			    "task_cpu_cycle_count"));
2294  		}
2295  #endif
2296  
2297  #if STACKSHOT_COLLECTS_LATENCY_INFO
2298  		latency_info.misc2_latency = mach_absolute_time() - latency_info.misc2_latency;
2299  		if (collect_latency_info) {
2300  			kcd_exit_on_error(kcdata_push_data(stackshot_kcdata_p, STACKSHOT_KCTYPE_LATENCY_INFO_TASK, sizeof(latency_info), &latency_info));
2301  		}
2302  #endif /* STACKSHOT_COLLECTS_LATENCY_INFO */
2303  
2304  		/* mark end of task snapshot data */
2305  		kcd_exit_on_error(kcdata_add_container_marker(stackshot_kcdata_p, KCDATA_TYPE_CONTAINER_END, STACKSHOT_KCCONTAINER_TASK,
2306  		    task_uniqueid));
2307  	}
2308  
2309  
2310  error_exit:
2311  	return error;
2312  }
2313  
2314  
2315  static kern_return_t
2316  kdp_stackshot_kcdata_format(int pid, uint64_t trace_flags, uint32_t * pBytesTraced, uint32_t * pBytesUncompressed)
2317  {
2318  	kern_return_t error        = KERN_SUCCESS;
2319  	mach_vm_address_t out_addr = 0;
2320  	uint64_t abs_time = 0, abs_time_end = 0;
2321  	uint64_t system_state_flags = 0;
2322  	task_t task = TASK_NULL;
2323  	mach_timebase_info_data_t timebase = {0, 0};
2324  	uint32_t length_to_copy = 0, tmp32 = 0;
2325  	abs_time = mach_absolute_time();
2326  	uint64_t last_task_start_time = 0;
2327  
2328  #if STACKSHOT_COLLECTS_LATENCY_INFO
2329  	struct stackshot_latency_collection latency_info;
2330  #endif
2331  
2332  #if INTERRUPT_MASKED_DEBUG && MONOTONIC
2333  	uint64_t stackshot_begin_cpu_cycle_count = 0;
2334  
2335  	if (!panic_stackshot) {
2336  		stackshot_begin_cpu_cycle_count = mt_cur_cpu_cycles();
2337  	}
2338  #endif
2339  
2340  #if STACKSHOT_COLLECTS_LATENCY_INFO
2341  	collect_latency_info = trace_flags & STACKSHOT_DISABLE_LATENCY_INFO ? false : true;
2342  #endif
2343  
2344  	/* process the flags */
2345  	boolean_t collect_delta_stackshot = ((trace_flags & STACKSHOT_COLLECT_DELTA_SNAPSHOT) != 0);
2346  	boolean_t use_fault_path          = ((trace_flags & (STACKSHOT_ENABLE_UUID_FAULTING | STACKSHOT_ENABLE_BT_FAULTING)) != 0);
2347  	stack_enable_faulting = (trace_flags & (STACKSHOT_ENABLE_BT_FAULTING));
2348  
2349  	/* Currently we only support returning explicit KEXT load info on fileset kernels */
2350  	kc_format_t primary_kc_type = KCFormatUnknown;
2351  	if (PE_get_primary_kc_format(&primary_kc_type) && (primary_kc_type != KCFormatFileset)) {
2352  		trace_flags &= ~(STACKSHOT_SAVE_KEXT_LOADINFO);
2353  	}
2354  
2355  	struct stackshot_context ctx = {};
2356  	ctx.trace_flags = trace_flags;
2357  	ctx.pid = pid;
2358  
2359  	if (use_fault_path) {
2360  		fault_stats.sfs_pages_faulted_in = 0;
2361  		fault_stats.sfs_time_spent_faulting = 0;
2362  		fault_stats.sfs_stopped_faulting = (uint8_t) FALSE;
2363  	}
2364  
2365  	if (sizeof(void *) == 8) {
2366  		system_state_flags |= kKernel64_p;
2367  	}
2368  
2369  	if (stackshot_kcdata_p == NULL || pBytesTraced == NULL) {
2370  		error = KERN_INVALID_ARGUMENT;
2371  		goto error_exit;
2372  	}
2373  
2374  	/* setup mach_absolute_time and timebase info -- copy out in some cases and needed to convert since_timestamp to seconds for proc start time */
2375  	clock_timebase_info(&timebase);
2376  
2377  	/* begin saving data into the buffer */
2378  	*pBytesTraced = 0;
2379  	if (pBytesUncompressed) {
2380  		*pBytesUncompressed = 0;
2381  	}
2382  	kcd_exit_on_error(kcdata_add_uint64_with_description(stackshot_kcdata_p, trace_flags, "stackshot_in_flags"));
2383  	kcd_exit_on_error(kcdata_add_uint32_with_description(stackshot_kcdata_p, (uint32_t)pid, "stackshot_in_pid"));
2384  	kcd_exit_on_error(kcdata_add_uint64_with_description(stackshot_kcdata_p, system_state_flags, "system_state_flags"));
2385  	if (trace_flags & STACKSHOT_PAGE_TABLES) {
2386  		kcd_exit_on_error(kcdata_add_uint32_with_description(stackshot_kcdata_p, stack_snapshot_pagetable_mask, "stackshot_pagetable_mask"));
2387  	}
2388  	if (stackshot_initial_estimate != 0) {
2389  		kcd_exit_on_error(kcdata_add_uint32_with_description(stackshot_kcdata_p, stackshot_initial_estimate, "stackshot_size_estimate"));
2390  	}
2391  
2392  #if STACKSHOT_COLLECTS_LATENCY_INFO
2393  	latency_info.setup_latency = mach_absolute_time();
2394  #endif /* STACKSHOT_COLLECTS_LATENCY_INFO */
2395  
2396  #if CONFIG_JETSAM
2397  	tmp32 = memorystatus_get_pressure_status_kdp();
2398  	kcd_exit_on_error(kcdata_push_data(stackshot_kcdata_p, STACKSHOT_KCTYPE_JETSAM_LEVEL, sizeof(uint32_t), &tmp32));
2399  #endif
2400  
2401  	if (!collect_delta_stackshot) {
2402  		tmp32 = THREAD_POLICY_INTERNAL_STRUCT_VERSION;
2403  		kcd_exit_on_error(kcdata_push_data(stackshot_kcdata_p, STACKSHOT_KCTYPE_THREAD_POLICY_VERSION, sizeof(uint32_t), &tmp32));
2404  
2405  		tmp32 = PAGE_SIZE;
2406  		kcd_exit_on_error(kcdata_push_data(stackshot_kcdata_p, STACKSHOT_KCTYPE_KERN_PAGE_SIZE, sizeof(uint32_t), &tmp32));
2407  
2408  		/* save boot-args and osversion string */
2409  		length_to_copy =  MIN((uint32_t)(strlen(version) + 1), OSVERSIZE);
2410  		kcd_exit_on_error(kcdata_push_data(stackshot_kcdata_p, STACKSHOT_KCTYPE_OSVERSION, length_to_copy, (const void *)version));
2411  
2412  
2413  		length_to_copy =  MIN((uint32_t)(strlen(PE_boot_args()) + 1), BOOT_LINE_LENGTH);
2414  		kcd_exit_on_error(kcdata_push_data(stackshot_kcdata_p, STACKSHOT_KCTYPE_BOOTARGS, length_to_copy, PE_boot_args()));
2415  
2416  		kcd_exit_on_error(kcdata_push_data(stackshot_kcdata_p, KCDATA_TYPE_TIMEBASE, sizeof(timebase), &timebase));
2417  	} else {
2418  		kcd_exit_on_error(kcdata_push_data(stackshot_kcdata_p, STACKSHOT_KCTYPE_DELTA_SINCE_TIMESTAMP, sizeof(uint64_t), &stack_snapshot_delta_since_timestamp));
2419  	}
2420  
2421  	kcd_exit_on_error(kcdata_push_data(stackshot_kcdata_p, KCDATA_TYPE_MACH_ABSOLUTE_TIME, sizeof(uint64_t), &abs_time));
2422  
2423  	kcd_exit_on_error(kcdata_push_data(stackshot_kcdata_p, KCDATA_TYPE_USECS_SINCE_EPOCH, sizeof(uint64_t), &stackshot_microsecs));
2424  
2425  	/* record system level shared cache load info (if available) */
2426  	if (!collect_delta_stackshot && primary_system_shared_region &&
2427  	    ml_validate_nofault((vm_offset_t)primary_system_shared_region, sizeof(struct vm_shared_region))) {
2428  		struct dyld_shared_cache_loadinfo sys_shared_cache_info = {0};
2429  
2430  		/*
2431  		 * Historically, this data was in a dyld_uuid_info_64 structure, but the
2432  		 * naming of both the structure and fields for this use isn't great.  The
2433  		 * dyld_shared_cache_loadinfo structure has better names, but the same
2434  		 * layout and content as the original.
2435  		 *
2436  		 * The imageSlidBaseAddress/sharedCacheUnreliableSlidBaseAddress field
2437  		 * has been used inconsistently for STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT
2438  		 * entries; here, it's the slid base address, and we leave it that way
2439  		 * for backwards compatibility.
2440  		 */
2441  		stackshot_memcpy(sys_shared_cache_info.sharedCacheUUID, &primary_system_shared_region->sr_uuid, sizeof(primary_system_shared_region->sr_uuid));
2442  		sys_shared_cache_info.sharedCacheSlide =
2443  		    primary_system_shared_region->sr_slide;
2444  		sys_shared_cache_info.sharedCacheUnreliableSlidBaseAddress =
2445  		    primary_system_shared_region->sr_slide + primary_system_shared_region->sr_base_address;
2446  		sys_shared_cache_info.sharedCacheSlidFirstMapping =
2447  		    primary_system_shared_region->sr_base_address + primary_system_shared_region->sr_first_mapping;
2448  
2449  		kcd_exit_on_error(kcdata_push_data(stackshot_kcdata_p, STACKSHOT_KCTYPE_SHAREDCACHE_LOADINFO,
2450  		    sizeof(sys_shared_cache_info), &sys_shared_cache_info));
2451  
2452  		if (trace_flags & STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT) {
2453  			/*
2454  			 * Include a map of the system shared cache layout if it has been populated
2455  			 * (which is only when the system is using a custom shared cache).
2456  			 */
2457  			if (primary_system_shared_region->sr_images && ml_validate_nofault((vm_offset_t)primary_system_shared_region->sr_images,
2458  			    (primary_system_shared_region->sr_images_count * sizeof(struct dyld_uuid_info_64)))) {
2459  				assert(primary_system_shared_region->sr_images_count != 0);
2460  				kcd_exit_on_error(kcdata_push_array(stackshot_kcdata_p, STACKSHOT_KCTYPE_SYS_SHAREDCACHE_LAYOUT, sizeof(struct dyld_uuid_info_64), primary_system_shared_region->sr_images_count, primary_system_shared_region->sr_images));
2461  			}
2462  		}
2463  	}
2464  
2465  	/* Add requested information first */
2466  	if (trace_flags & STACKSHOT_GET_GLOBAL_MEM_STATS) {
2467  		struct mem_and_io_snapshot mais = {0};
2468  		kdp_mem_and_io_snapshot(&mais);
2469  		kcd_exit_on_error(kcdata_push_data(stackshot_kcdata_p, STACKSHOT_KCTYPE_GLOBAL_MEM_STATS, sizeof(mais), &mais));
2470  	}
2471  
2472  #if CONFIG_THREAD_GROUPS
2473  	struct thread_group_snapshot_v2 *thread_groups = NULL;
2474  	int num_thread_groups = 0;
2475  
2476  #if INTERRUPT_MASKED_DEBUG && MONOTONIC
2477  	uint64_t thread_group_begin_cpu_cycle_count = 0;
2478  
2479  	if (!panic_stackshot && (trace_flags & STACKSHOT_THREAD_GROUP)) {
2480  		thread_group_begin_cpu_cycle_count = mt_cur_cpu_cycles();
2481  	}
2482  #endif
2483  
2484  
2485  	/* Iterate over thread group names */
2486  	if (trace_flags & STACKSHOT_THREAD_GROUP) {
2487  		/* Variable size array - better not have it on the stack. */
2488  		kcdata_compression_window_open(stackshot_kcdata_p);
2489  
2490  		if (thread_group_iterate_stackshot(stackshot_thread_group_count, &num_thread_groups) != KERN_SUCCESS) {
2491  			trace_flags &= ~(STACKSHOT_THREAD_GROUP);
2492  		}
2493  
2494  		if (num_thread_groups > 0) {
2495  			kcd_exit_on_error(kcdata_get_memory_addr_for_array(stackshot_kcdata_p, STACKSHOT_KCTYPE_THREAD_GROUP_SNAPSHOT, sizeof(struct thread_group_snapshot_v2), num_thread_groups, &out_addr));
2496  			thread_groups = (struct thread_group_snapshot_v2 *)out_addr;
2497  		}
2498  
2499  		if (thread_group_iterate_stackshot(stackshot_thread_group_snapshot, thread_groups) != KERN_SUCCESS) {
2500  			error = KERN_FAILURE;
2501  			goto error_exit;
2502  		}
2503  
2504  		kcd_exit_on_error(kcdata_compression_window_close(stackshot_kcdata_p));
2505  	}
2506  
2507  #if INTERRUPT_MASKED_DEBUG && MONOTONIC
2508  	if (!panic_stackshot && (thread_group_begin_cpu_cycle_count != 0)) {
2509  		kcd_exit_on_error(kcdata_add_uint64_with_description(stackshot_kcdata_p, (mt_cur_cpu_cycles() - thread_group_begin_cpu_cycle_count),
2510  		    "thread_groups_cpu_cycle_count"));
2511  	}
2512  #endif
2513  #else
2514  	trace_flags &= ~(STACKSHOT_THREAD_GROUP);
2515  #endif /* CONFIG_THREAD_GROUPS */
2516  
2517  
2518  #if STACKSHOT_COLLECTS_LATENCY_INFO
2519  	latency_info.setup_latency = mach_absolute_time() - latency_info.setup_latency;
2520  	latency_info.total_task_iteration_latency = mach_absolute_time();
2521  #endif /* STACKSHOT_COLLECTS_LATENCY_INFO */
2522  
2523  	/* Iterate over tasks */
2524  	queue_iterate(&tasks, task, task_t, tasks)
2525  	{
2526  		if (collect_delta_stackshot) {
2527  			uint64_t abstime;
2528  			proc_starttime_kdp(task->bsd_info, NULL, NULL, &abstime);
2529  
2530  			if (abstime > last_task_start_time) {
2531  				last_task_start_time = abstime;
2532  			}
2533  		}
2534  
2535  		error = kdp_stackshot_record_task(&ctx, task);
2536  		if (error) {
2537  			goto error_exit;
2538  		}
2539  	}
2540  
2541  
2542  #if STACKSHOT_COLLECTS_LATENCY_INFO
2543  	latency_info.total_task_iteration_latency = mach_absolute_time() - latency_info.total_task_iteration_latency;
2544  #endif /* STACKSHOT_COLLECTS_LATENCY_INFO */
2545  
2546  #if CONFIG_COALITIONS
2547  	/* Don't collect jetsam coalition data in delta stakshots - these don't change */
2548  	if (!collect_delta_stackshot || (last_task_start_time > stack_snapshot_delta_since_timestamp)) {
2549  		int num_coalitions = 0;
2550  		struct jetsam_coalition_snapshot *coalitions = NULL;
2551  
2552  #if INTERRUPT_MASKED_DEBUG && MONOTONIC
2553  		uint64_t coalition_begin_cpu_cycle_count = 0;
2554  
2555  		if (!panic_stackshot && (trace_flags & STACKSHOT_SAVE_JETSAM_COALITIONS)) {
2556  			coalition_begin_cpu_cycle_count = mt_cur_cpu_cycles();
2557  		}
2558  #endif /* INTERRUPT_MASKED_DEBUG && MONOTONIC */
2559  
2560  		/* Iterate over coalitions */
2561  		if (trace_flags & STACKSHOT_SAVE_JETSAM_COALITIONS) {
2562  			if (coalition_iterate_stackshot(stackshot_coalition_jetsam_count, &num_coalitions, COALITION_TYPE_JETSAM) != KERN_SUCCESS) {
2563  				trace_flags &= ~(STACKSHOT_SAVE_JETSAM_COALITIONS);
2564  			}
2565  		}
2566  		if (trace_flags & STACKSHOT_SAVE_JETSAM_COALITIONS) {
2567  			if (num_coalitions > 0) {
2568  				/* Variable size array - better not have it on the stack. */
2569  				kcdata_compression_window_open(stackshot_kcdata_p);
2570  				kcd_exit_on_error(kcdata_get_memory_addr_for_array(stackshot_kcdata_p, STACKSHOT_KCTYPE_JETSAM_COALITION_SNAPSHOT, sizeof(struct jetsam_coalition_snapshot), num_coalitions, &out_addr));
2571  				coalitions = (struct jetsam_coalition_snapshot*)out_addr;
2572  
2573  				if (coalition_iterate_stackshot(stackshot_coalition_jetsam_snapshot, coalitions, COALITION_TYPE_JETSAM) != KERN_SUCCESS) {
2574  					error = KERN_FAILURE;
2575  					goto error_exit;
2576  				}
2577  
2578  				kcd_exit_on_error(kcdata_compression_window_close(stackshot_kcdata_p));
2579  			}
2580  		}
2581  #if INTERRUPT_MASKED_DEBUG && MONOTONIC
2582  		if (!panic_stackshot && (coalition_begin_cpu_cycle_count != 0)) {
2583  			kcd_exit_on_error(kcdata_add_uint64_with_description(stackshot_kcdata_p, (mt_cur_cpu_cycles() - coalition_begin_cpu_cycle_count),
2584  			    "coalitions_cpu_cycle_count"));
2585  		}
2586  #endif /* INTERRUPT_MASKED_DEBUG && MONOTONIC */
2587  	}
2588  #else
2589  	trace_flags &= ~(STACKSHOT_SAVE_JETSAM_COALITIONS);
2590  #endif /* CONFIG_COALITIONS */
2591  
2592  #if STACKSHOT_COLLECTS_LATENCY_INFO
2593  	latency_info.total_terminated_task_iteration_latency = mach_absolute_time();
2594  #endif /* STACKSHOT_COLLECTS_LATENCY_INFO */
2595  
2596  	/*
2597  	 * Iterate over the tasks in the terminated tasks list. We only inspect
2598  	 * tasks that have a valid bsd_info pointer where P_LPEXIT is NOT set.
2599  	 * We're only interested in tasks that have remaining threads (which
2600  	 * could be involved in a deadlock, etc), and the last thread that tears
2601  	 * itself down during exit sets P_LPEXIT during proc_exit().
2602  	 */
2603  	queue_iterate(&terminated_tasks, task, task_t, tasks)
2604  	{
2605  		if (task->bsd_info && !proc_in_teardown(task->bsd_info)) {
2606  			error = kdp_stackshot_record_task(&ctx, task);
2607  			if (error) {
2608  				goto error_exit;
2609  			}
2610  		}
2611  	}
2612  
2613  #if STACKSHOT_COLLECTS_LATENCY_INFO
2614  	latency_info.total_terminated_task_iteration_latency = mach_absolute_time() - latency_info.total_terminated_task_iteration_latency;
2615  #endif /* STACKSHOT_COLLECTS_LATENCY_INFO */
2616  
2617  	if (use_fault_path) {
2618  		kcdata_push_data(stackshot_kcdata_p, STACKSHOT_KCTYPE_STACKSHOT_FAULT_STATS,
2619  		    sizeof(struct stackshot_fault_stats), &fault_stats);
2620  	}
2621  
2622  #if STACKSHOT_COLLECTS_LATENCY_INFO
2623  	if (collect_latency_info) {
2624  		latency_info.latency_version = 1;
2625  		kcd_exit_on_error(kcdata_push_data(stackshot_kcdata_p, STACKSHOT_KCTYPE_LATENCY_INFO, sizeof(latency_info), &latency_info));
2626  	}
2627  #endif /* STACKSHOT_COLLECTS_LATENCY_INFO */
2628  
2629  	/* update timestamp of the stackshot */
2630  	abs_time_end = mach_absolute_time();
2631  	struct stackshot_duration_v2 stackshot_duration = {
2632  		.stackshot_duration         = (abs_time_end - abs_time),
2633  		.stackshot_duration_outer   = 0,
2634  		.stackshot_duration_prior   = stackshot_duration_prior_abs,
2635  	};
2636  
2637  	if ((trace_flags & STACKSHOT_DO_COMPRESS) == 0) {
2638  		kcd_exit_on_error(kcdata_get_memory_addr(stackshot_kcdata_p, STACKSHOT_KCTYPE_STACKSHOT_DURATION,
2639  		    sizeof(struct stackshot_duration_v2), &out_addr));
2640  		struct stackshot_duration_v2 *duration_p = (void *) out_addr;
2641  		stackshot_memcpy(duration_p, &stackshot_duration, sizeof(*duration_p));
2642  		stackshot_duration_outer                   = (unaligned_u64 *)&duration_p->stackshot_duration_outer;
2643  	} else {
2644  		kcd_exit_on_error(kcdata_push_data(stackshot_kcdata_p, STACKSHOT_KCTYPE_STACKSHOT_DURATION, sizeof(stackshot_duration), &stackshot_duration));
2645  		stackshot_duration_outer = NULL;
2646  	}
2647  
2648  #if INTERRUPT_MASKED_DEBUG && MONOTONIC
2649  	if (!panic_stackshot) {
2650  		kcd_exit_on_error(kcdata_add_uint64_with_description(stackshot_kcdata_p, (mt_cur_cpu_cycles() - stackshot_begin_cpu_cycle_count),
2651  		    "stackshot_total_cpu_cycle_cnt"));
2652  	}
2653  #endif
2654  
2655  	kcd_finalize_compression(stackshot_kcdata_p);
2656  	kcd_exit_on_error(kcdata_add_uint64_with_description(stackshot_kcdata_p, trace_flags, "stackshot_out_flags"));
2657  
2658  	kcd_exit_on_error(kcdata_write_buffer_end(stackshot_kcdata_p));
2659  
2660  	/*  === END of populating stackshot data === */
2661  
2662  	*pBytesTraced = (uint32_t) kcdata_memory_get_used_bytes(stackshot_kcdata_p);
2663  	*pBytesUncompressed = (uint32_t) kcdata_memory_get_uncompressed_bytes(stackshot_kcdata_p);
2664  
2665  error_exit:;
2666  
2667  #if INTERRUPT_MASKED_DEBUG
2668  	bool disable_interrupts_masked_check = kern_feature_override(
2669  		KF_INTERRUPT_MASKED_DEBUG_STACKSHOT_OVRD) ||
2670  	    (trace_flags & STACKSHOT_DO_COMPRESS) != 0;
2671  
2672  #if STACKSHOT_INTERRUPTS_MASKED_CHECK_DISABLED
2673  	disable_interrupts_masked_check = true;
2674  #endif /* STACKSHOT_INTERRUPTS_MASKED_CHECK_DISABLED */
2675  
2676  	if (disable_interrupts_masked_check) {
2677  		ml_spin_debug_clear_self();
2678  	}
2679  
2680  	if (!panic_stackshot && interrupt_masked_debug) {
2681  		/*
2682  		 * Try to catch instances where stackshot takes too long BEFORE returning from
2683  		 * the debugger
2684  		 */
2685  		ml_check_stackshot_interrupt_disabled_duration(current_thread());
2686  	}
2687  #endif /* INTERRUPT_MASKED_DEBUG */
2688  
2689  	stack_enable_faulting = FALSE;
2690  
2691  	return error;
2692  }
2693  
2694  static uint64_t
2695  proc_was_throttled_from_task(task_t task)
2696  {
2697  	uint64_t was_throttled = 0;
2698  
2699  	if (task->bsd_info) {
2700  		was_throttled = proc_was_throttled(task->bsd_info);
2701  	}
2702  
2703  	return was_throttled;
2704  }
2705  
2706  static uint64_t
2707  proc_did_throttle_from_task(task_t task)
2708  {
2709  	uint64_t did_throttle = 0;
2710  
2711  	if (task->bsd_info) {
2712  		did_throttle = proc_did_throttle(task->bsd_info);
2713  	}
2714  
2715  	return did_throttle;
2716  }
2717  
2718  static void
2719  kdp_mem_and_io_snapshot(struct mem_and_io_snapshot *memio_snap)
2720  {
2721  	unsigned int pages_reclaimed;
2722  	unsigned int pages_wanted;
2723  	kern_return_t kErr;
2724  
2725  	uint64_t compressions = 0;
2726  	uint64_t decompressions = 0;
2727  
2728  	compressions = counter_load(&vm_statistics_compressions);
2729  	decompressions = counter_load(&vm_statistics_decompressions);
2730  
2731  	memio_snap->snapshot_magic = STACKSHOT_MEM_AND_IO_SNAPSHOT_MAGIC;
2732  	memio_snap->free_pages = vm_page_free_count;
2733  	memio_snap->active_pages = vm_page_active_count;
2734  	memio_snap->inactive_pages = vm_page_inactive_count;
2735  	memio_snap->purgeable_pages = vm_page_purgeable_count;
2736  	memio_snap->wired_pages = vm_page_wire_count;
2737  	memio_snap->speculative_pages = vm_page_speculative_count;
2738  	memio_snap->throttled_pages = vm_page_throttled_count;
2739  	memio_snap->busy_buffer_count = count_busy_buffers();
2740  	memio_snap->filebacked_pages = vm_page_pageable_external_count;
2741  	memio_snap->compressions = (uint32_t)compressions;
2742  	memio_snap->decompressions = (uint32_t)decompressions;
2743  	memio_snap->compressor_size = VM_PAGE_COMPRESSOR_COUNT;
2744  	kErr = mach_vm_pressure_monitor(FALSE, VM_PRESSURE_TIME_WINDOW, &pages_reclaimed, &pages_wanted);
2745  
2746  	if (!kErr) {
2747  		memio_snap->pages_wanted = (uint32_t)pages_wanted;
2748  		memio_snap->pages_reclaimed = (uint32_t)pages_reclaimed;
2749  		memio_snap->pages_wanted_reclaimed_valid = 1;
2750  	} else {
2751  		memio_snap->pages_wanted = 0;
2752  		memio_snap->pages_reclaimed = 0;
2753  		memio_snap->pages_wanted_reclaimed_valid = 0;
2754  	}
2755  }
2756  
2757  void
2758  stackshot_memcpy(void *dst, const void *src, size_t len)
2759  {
2760  #if defined(__arm__) || defined(__arm64__)
2761  	if (panic_stackshot) {
2762  		uint8_t *dest_bytes = (uint8_t *)dst;
2763  		const uint8_t *src_bytes = (const uint8_t *)src;
2764  		for (size_t i = 0; i < len; i++) {
2765  			dest_bytes[i] = src_bytes[i];
2766  		}
2767  	} else
2768  #endif
2769  	memcpy(dst, src, len);
2770  }
2771  
2772  size_t
2773  stackshot_strlcpy(char *dst, const char *src, size_t maxlen)
2774  {
2775  	const size_t srclen = strlen(src);
2776  
2777  	if (srclen < maxlen) {
2778  		stackshot_memcpy(dst, src, srclen + 1);
2779  	} else if (maxlen != 0) {
2780  		stackshot_memcpy(dst, src, maxlen - 1);
2781  		dst[maxlen - 1] = '\0';
2782  	}
2783  
2784  	return srclen;
2785  }
2786  
2787  static inline void
2788  kdp_extract_page_mask_and_size(vm_map_t map, int *effective_page_mask, int *effective_page_size)
2789  {
2790  	if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT) {
2791  		*effective_page_mask = VM_MAP_PAGE_MASK(map);
2792  		*effective_page_size = VM_MAP_PAGE_SIZE(map);
2793  	} else {
2794  		*effective_page_mask = PAGE_MASK;
2795  		*effective_page_size = PAGE_SIZE;
2796  	}
2797  }
2798  
2799  /*
2800   * Returns the physical address of the specified map:target address,
2801   * using the kdp fault path if requested and the page is not resident.
2802   */
2803  vm_offset_t
2804  kdp_find_phys(vm_map_t map, vm_offset_t target_addr, boolean_t try_fault, uint32_t *kdp_fault_results)
2805  {
2806  	vm_offset_t cur_phys_addr;
2807  	unsigned cur_wimg_bits;
2808  	uint64_t fault_start_time = 0;
2809  	int effective_page_mask, effective_page_size;
2810  
2811  	if (map == VM_MAP_NULL) {
2812  		return 0;
2813  	}
2814  
2815  	kdp_extract_page_mask_and_size(map, &effective_page_mask, &effective_page_size);
2816  
2817  	cur_phys_addr = kdp_vtophys(map->pmap, target_addr);
2818  	if (!pmap_valid_page((ppnum_t) atop(cur_phys_addr))) {
2819  		if (!try_fault || fault_stats.sfs_stopped_faulting) {
2820  			if (kdp_fault_results) {
2821  				*kdp_fault_results |= KDP_FAULT_RESULT_PAGED_OUT;
2822  			}
2823  
2824  			return 0;
2825  		}
2826  
2827  		/*
2828  		 * The pmap doesn't have a valid page so we start at the top level
2829  		 * vm map and try a lightweight fault. Update fault path usage stats.
2830  		 */
2831  		fault_start_time = mach_absolute_time();
2832  		cur_phys_addr = kdp_lightweight_fault(map, (target_addr & ~effective_page_mask));
2833  		fault_stats.sfs_time_spent_faulting += (mach_absolute_time() - fault_start_time);
2834  
2835  		if ((fault_stats.sfs_time_spent_faulting >= fault_stats.sfs_system_max_fault_time) && !panic_stackshot) {
2836  			fault_stats.sfs_stopped_faulting = (uint8_t) TRUE;
2837  		}
2838  
2839  		cur_phys_addr += (target_addr & effective_page_mask);
2840  
2841  		if (!pmap_valid_page((ppnum_t) atop(cur_phys_addr))) {
2842  			if (kdp_fault_results) {
2843  				*kdp_fault_results |= (KDP_FAULT_RESULT_TRIED_FAULT | KDP_FAULT_RESULT_PAGED_OUT);
2844  			}
2845  
2846  			return 0;
2847  		}
2848  
2849  		if (kdp_fault_results) {
2850  			*kdp_fault_results |= KDP_FAULT_RESULT_FAULTED_IN;
2851  		}
2852  
2853  		fault_stats.sfs_pages_faulted_in++;
2854  	} else {
2855  		/*
2856  		 * This check is done in kdp_lightweight_fault for the fault path.
2857  		 */
2858  		cur_wimg_bits = pmap_cache_attributes((ppnum_t) atop(cur_phys_addr));
2859  
2860  		if ((cur_wimg_bits & VM_WIMG_MASK) != VM_WIMG_DEFAULT) {
2861  			return 0;
2862  		}
2863  	}
2864  
2865  	return cur_phys_addr;
2866  }
2867  
2868  boolean_t
2869  kdp_copyin_word(
2870  	task_t task, uint64_t addr, uint64_t *result, boolean_t try_fault, uint32_t *kdp_fault_results)
2871  {
2872  	if (task_has_64Bit_addr(task)) {
2873  		return kdp_copyin(task->map, addr, result, sizeof(uint64_t), try_fault, kdp_fault_results);
2874  	} else {
2875  		uint32_t buf;
2876  		boolean_t r = kdp_copyin(task->map, addr, &buf, sizeof(uint32_t), try_fault, kdp_fault_results);
2877  		*result = buf;
2878  		return r;
2879  	}
2880  }
2881  
2882  static int
2883  kdp_copyin_string_slowpath(
2884  	task_t task, uint64_t addr, char *buf, int buf_sz, boolean_t try_fault, uint32_t *kdp_fault_results)
2885  {
2886  	int i;
2887  	uint64_t validated = 0, valid_from;
2888  	uint64_t phys_src, phys_dest;
2889  	int effective_page_mask, effective_page_size;
2890  	vm_map_t map = task->map;
2891  
2892  	kdp_extract_page_mask_and_size(map, &effective_page_mask, &effective_page_size);
2893  
2894  	for (i = 0; i < buf_sz; i++) {
2895  		if (validated == 0) {
2896  			valid_from = i;
2897  			phys_src = kdp_find_phys(map, addr + i, try_fault, kdp_fault_results);
2898  			phys_dest = kvtophys((vm_offset_t)&buf[i]);
2899  			uint64_t src_rem = effective_page_size - (phys_src & effective_page_mask);
2900  			uint64_t dst_rem = PAGE_SIZE - (phys_dest & PAGE_MASK);
2901  			if (phys_src && phys_dest) {
2902  				validated = MIN(src_rem, dst_rem);
2903  				if (validated) {
2904  					bcopy_phys(phys_src, phys_dest, 1);
2905  					validated--;
2906  				} else {
2907  					return 0;
2908  				}
2909  			} else {
2910  				return 0;
2911  			}
2912  		} else {
2913  			bcopy_phys(phys_src + (i - valid_from), phys_dest + (i - valid_from), 1);
2914  			validated--;
2915  		}
2916  
2917  		if (buf[i] == '\0') {
2918  			return i + 1;
2919  		}
2920  	}
2921  
2922  	/* ran out of space */
2923  	return -1;
2924  }
2925  
2926  int
2927  kdp_copyin_string(
2928  	task_t task, uint64_t addr, char *buf, int buf_sz, boolean_t try_fault, uint32_t *kdp_fault_results)
2929  {
2930  	/* try to opportunistically copyin 32 bytes, most strings should fit */
2931  	char optbuffer[32];
2932  	boolean_t res;
2933  
2934  	bzero(optbuffer, sizeof(optbuffer));
2935  	res = kdp_copyin(task->map, addr, optbuffer, sizeof(optbuffer), try_fault, kdp_fault_results);
2936  	if (res == FALSE || strnlen(optbuffer, sizeof(optbuffer)) == sizeof(optbuffer)) {
2937  		/* try the slowpath */
2938  		return kdp_copyin_string_slowpath(task, addr, buf, buf_sz, try_fault, kdp_fault_results);
2939  	}
2940  
2941  	/* success */
2942  	return (int) strlcpy(buf, optbuffer, buf_sz) + 1;
2943  }
2944  
2945  boolean_t
2946  kdp_copyin(vm_map_t map, uint64_t uaddr, void *dest, size_t size, boolean_t try_fault, uint32_t *kdp_fault_results)
2947  {
2948  	size_t rem = size;
2949  	char *kvaddr = dest;
2950  	int effective_page_mask, effective_page_size;
2951  
2952  	kdp_extract_page_mask_and_size(map, &effective_page_mask, &effective_page_size);
2953  
2954  #if defined(__arm__) || defined(__arm64__)
2955  	/* Identify if destination buffer is in panic storage area */
2956  	if (panic_stackshot && ((vm_offset_t)dest >= gPanicBase) && ((vm_offset_t)dest < (gPanicBase + gPanicSize))) {
2957  		if (((vm_offset_t)dest + size) > (gPanicBase + gPanicSize)) {
2958  			return FALSE;
2959  		}
2960  	}
2961  #endif
2962  
2963  	while (rem) {
2964  		uint64_t phys_src = kdp_find_phys(map, uaddr, try_fault, kdp_fault_results);
2965  		uint64_t phys_dest = kvtophys((vm_offset_t)kvaddr);
2966  		uint64_t src_rem = effective_page_size - (phys_src & effective_page_mask);
2967  		uint64_t dst_rem = PAGE_SIZE - (phys_dest & PAGE_MASK);
2968  		size_t cur_size = (uint32_t) MIN(src_rem, dst_rem);
2969  		cur_size = MIN(cur_size, rem);
2970  
2971  		if (phys_src && phys_dest) {
2972  #if defined(__arm__) || defined(__arm64__)
2973  			/*
2974  			 * On arm devices the panic buffer is mapped as device memory and doesn't allow
2975  			 * unaligned accesses. To prevent these, we copy over bytes individually here.
2976  			 */
2977  			if (panic_stackshot) {
2978  				stackshot_memcpy(kvaddr, (const void *)phystokv(phys_src), cur_size);
2979  			} else
2980  #endif /* defined(__arm__) || defined(__arm64__) */
2981  			bcopy_phys(phys_src, phys_dest, cur_size);
2982  		} else {
2983  			break;
2984  		}
2985  
2986  		uaddr += cur_size;
2987  		kvaddr += cur_size;
2988  		rem -= cur_size;
2989  	}
2990  
2991  	return rem == 0;
2992  }
2993  
2994  kern_return_t
2995  do_stackshot(void *context)
2996  {
2997  #pragma unused(context)
2998  	kdp_snapshot++;
2999  
3000  	stack_snapshot_ret = kdp_stackshot_kcdata_format(stack_snapshot_pid,
3001  	    stack_snapshot_flags,
3002  	    &stack_snapshot_bytes_traced,
3003  	    &stack_snapshot_bytes_uncompressed);
3004  
3005  	if (stack_snapshot_ret == KERN_SUCCESS && stack_snapshot_flags & STACKSHOT_DO_COMPRESS) {
3006  		kcdata_finish_compression(stackshot_kcdata_p);
3007  	}
3008  
3009  	kdp_snapshot--;
3010  	return stack_snapshot_ret;
3011  }
3012  
3013  /*
3014   * A fantastical routine that tries to be fast about returning
3015   * translations.  Caches the last page we found a translation
3016   * for, so that we can be quick about multiple queries to the
3017   * same page.  It turns out this is exactly the workflow
3018   * machine_trace_thread and its relatives tend to throw at us.
3019   *
3020   * Please zero the nasty global this uses after a bulk lookup;
3021   * this isn't safe across a switch of the map or changes
3022   * to a pmap.
3023   *
3024   * This also means that if zero is a valid KVA, we are
3025   * screwed.  Sucks to be us.  Fortunately, this should never
3026   * happen.
3027   */
3028  vm_offset_t
3029  machine_trace_thread_get_kva(vm_offset_t cur_target_addr, vm_map_t map, uint32_t *thread_trace_flags)
3030  {
3031  	vm_offset_t cur_target_page;
3032  	vm_offset_t cur_phys_addr;
3033  	vm_offset_t kern_virt_target_addr;
3034  	uint32_t kdp_fault_results = 0;
3035  
3036  	cur_target_page = atop(cur_target_addr);
3037  
3038  	if ((cur_target_page != prev_target_page) || validate_next_addr) {
3039  		/*
3040  		 * Alright; it wasn't our previous page.  So
3041  		 * we must validate that there is a page
3042  		 * table entry for this address under the
3043  		 * current pmap, and that it has default
3044  		 * cache attributes (otherwise it may not be
3045  		 * safe to access it).
3046  		 */
3047  		cur_phys_addr = kdp_find_phys(map, cur_target_addr, stack_enable_faulting, &kdp_fault_results);
3048  		if (thread_trace_flags) {
3049  			if (kdp_fault_results & KDP_FAULT_RESULT_PAGED_OUT) {
3050  				*thread_trace_flags |= kThreadTruncatedBT;
3051  			}
3052  
3053  			if (kdp_fault_results & KDP_FAULT_RESULT_TRIED_FAULT) {
3054  				*thread_trace_flags |= kThreadTriedFaultBT;
3055  			}
3056  
3057  			if (kdp_fault_results & KDP_FAULT_RESULT_FAULTED_IN) {
3058  				*thread_trace_flags |= kThreadFaultedBT;
3059  			}
3060  		}
3061  
3062  		if (cur_phys_addr == 0) {
3063  			return 0;
3064  		}
3065  		kern_virt_target_addr = phystokv(cur_phys_addr);
3066  		prev_target_page = cur_target_page;
3067  		prev_target_kva = (kern_virt_target_addr & ~PAGE_MASK);
3068  		validate_next_addr = FALSE;
3069  	} else {
3070  		/* We found a translation, so stash this page */
3071  		kern_virt_target_addr = prev_target_kva + (cur_target_addr & PAGE_MASK);
3072  	}
3073  
3074  #if KASAN
3075  	kasan_notify_address(kern_virt_target_addr, sizeof(uint64_t));
3076  #endif
3077  	return kern_virt_target_addr;
3078  }
3079  
3080  void
3081  machine_trace_thread_clear_validation_cache(void)
3082  {
3083  	validate_next_addr = TRUE;
3084  }
3085  
3086  boolean_t
3087  stackshot_thread_is_idle_worker_unsafe(thread_t thread)
3088  {
3089  	/* When the pthread kext puts a worker thread to sleep, it will
3090  	 * set kThreadWaitParkedWorkQueue in the block_hint of the thread
3091  	 * struct. See parkit() in kern/kern_support.c in libpthread.
3092  	 */
3093  	return (thread->state & TH_WAIT) &&
3094  	       (thread->block_hint == kThreadWaitParkedWorkQueue);
3095  }
3096  
3097  #if CONFIG_COALITIONS
3098  static void
3099  stackshot_coalition_jetsam_count(void *arg, int i, coalition_t coal)
3100  {
3101  #pragma unused(i, coal)
3102  	unsigned int *coalition_count = (unsigned int*)arg;
3103  	(*coalition_count)++;
3104  }
3105  
3106  static void
3107  stackshot_coalition_jetsam_snapshot(void *arg, int i, coalition_t coal)
3108  {
3109  	if (coalition_type(coal) != COALITION_TYPE_JETSAM) {
3110  		return;
3111  	}
3112  
3113  	struct jetsam_coalition_snapshot *coalitions = (struct jetsam_coalition_snapshot*)arg;
3114  	struct jetsam_coalition_snapshot *jcs = &coalitions[i];
3115  	task_t leader = TASK_NULL;
3116  	jcs->jcs_id = coalition_id(coal);
3117  	jcs->jcs_flags = 0;
3118  	jcs->jcs_thread_group = 0;
3119  
3120  	if (coalition_term_requested(coal)) {
3121  		jcs->jcs_flags |= kCoalitionTermRequested;
3122  	}
3123  	if (coalition_is_terminated(coal)) {
3124  		jcs->jcs_flags |= kCoalitionTerminated;
3125  	}
3126  	if (coalition_is_reaped(coal)) {
3127  		jcs->jcs_flags |= kCoalitionReaped;
3128  	}
3129  	if (coalition_is_privileged(coal)) {
3130  		jcs->jcs_flags |= kCoalitionPrivileged;
3131  	}
3132  
3133  #if CONFIG_THREAD_GROUPS
3134  	struct thread_group *thread_group = kdp_coalition_get_thread_group(coal);
3135  	if (thread_group) {
3136  		jcs->jcs_thread_group = thread_group_get_id(thread_group);
3137  	}
3138  #endif /* CONFIG_THREAD_GROUPS */
3139  
3140  	leader = kdp_coalition_get_leader(coal);
3141  	if (leader) {
3142  		jcs->jcs_leader_task_uniqueid = get_task_uniqueid(leader);
3143  	} else {
3144  		jcs->jcs_leader_task_uniqueid = 0;
3145  	}
3146  }
3147  #endif /* CONFIG_COALITIONS */
3148  
3149  #if CONFIG_THREAD_GROUPS
3150  static void
3151  stackshot_thread_group_count(void *arg, int i, struct thread_group *tg)
3152  {
3153  #pragma unused(i, tg)
3154  	unsigned int *n = (unsigned int*)arg;
3155  	(*n)++;
3156  }
3157  
3158  static void
3159  stackshot_thread_group_snapshot(void *arg, int i, struct thread_group *tg)
3160  {
3161  	struct thread_group_snapshot_v2 *thread_groups = (struct thread_group_snapshot_v2 *)arg;
3162  	struct thread_group_snapshot_v2 *tgs = &thread_groups[i];
3163  	uint32_t flags = thread_group_get_flags(tg);
3164  	tgs->tgs_id = thread_group_get_id(tg);
3165  	stackshot_memcpy(tgs->tgs_name, thread_group_get_name(tg), THREAD_GROUP_MAXNAME);
3166  	tgs->tgs_flags = ((flags & THREAD_GROUP_FLAGS_EFFICIENT) ? kThreadGroupEfficient : 0) |
3167  	    ((flags & THREAD_GROUP_FLAGS_UI_APP) ? kThreadGroupUIApp : 0);
3168  }
3169  #endif /* CONFIG_THREAD_GROUPS */
3170  
3171  /* Determine if a thread has waitinfo that stackshot can provide */
3172  static int
3173  stackshot_thread_has_valid_waitinfo(thread_t thread)
3174  {
3175  	if (!(thread->state & TH_WAIT)) {
3176  		return 0;
3177  	}
3178  
3179  	switch (thread->block_hint) {
3180  	// If set to None or is a parked work queue, ignore it
3181  	case kThreadWaitParkedWorkQueue:
3182  	case kThreadWaitNone:
3183  		return 0;
3184  	// There is a short window where the pthread kext removes a thread
3185  	// from its ksyn wait queue before waking the thread up
3186  	case kThreadWaitPThreadMutex:
3187  	case kThreadWaitPThreadRWLockRead:
3188  	case kThreadWaitPThreadRWLockWrite:
3189  	case kThreadWaitPThreadCondVar:
3190  		return kdp_pthread_get_thread_kwq(thread) != NULL;
3191  	// All other cases are valid block hints if in a wait state
3192  	default:
3193  		return 1;
3194  	}
3195  }
3196  
3197  /* Determine if a thread has turnstileinfo that stackshot can provide */
3198  static int
3199  stackshot_thread_has_valid_turnstileinfo(thread_t thread)
3200  {
3201  	struct turnstile *ts = thread_get_waiting_turnstile(thread);
3202  
3203  	return stackshot_thread_has_valid_waitinfo(thread) &&
3204  	       ts != TURNSTILE_NULL;
3205  }
3206  
3207  static void
3208  stackshot_thread_turnstileinfo(thread_t thread, thread_turnstileinfo_t *tsinfo)
3209  {
3210  	struct turnstile *ts;
3211  
3212  	/* acquire turnstile information and store it in the stackshot */
3213  	ts = thread_get_waiting_turnstile(thread);
3214  	tsinfo->waiter = thread_tid(thread);
3215  	kdp_turnstile_fill_tsinfo(ts, tsinfo);
3216  }
3217  
3218  static void
3219  stackshot_thread_wait_owner_info(thread_t thread, thread_waitinfo_t *waitinfo)
3220  {
3221  	waitinfo->waiter        = thread_tid(thread);
3222  	waitinfo->wait_type     = thread->block_hint;
3223  
3224  	switch (waitinfo->wait_type) {
3225  	case kThreadWaitKernelMutex:
3226  		kdp_lck_mtx_find_owner(thread->waitq, thread->wait_event, waitinfo);
3227  		break;
3228  	case kThreadWaitPortReceive:
3229  		kdp_mqueue_recv_find_owner(thread->waitq, thread->wait_event, waitinfo);
3230  		break;
3231  	case kThreadWaitPortSend:
3232  		kdp_mqueue_send_find_owner(thread->waitq, thread->wait_event, waitinfo);
3233  		break;
3234  	case kThreadWaitSemaphore:
3235  		kdp_sema_find_owner(thread->waitq, thread->wait_event, waitinfo);
3236  		break;
3237  	case kThreadWaitUserLock:
3238  		kdp_ulock_find_owner(thread->waitq, thread->wait_event, waitinfo);
3239  		break;
3240  	case kThreadWaitKernelRWLockRead:
3241  	case kThreadWaitKernelRWLockWrite:
3242  	case kThreadWaitKernelRWLockUpgrade:
3243  		kdp_rwlck_find_owner(thread->waitq, thread->wait_event, waitinfo);
3244  		break;
3245  	case kThreadWaitPThreadMutex:
3246  	case kThreadWaitPThreadRWLockRead:
3247  	case kThreadWaitPThreadRWLockWrite:
3248  	case kThreadWaitPThreadCondVar:
3249  		kdp_pthread_find_owner(thread, waitinfo);
3250  		break;
3251  	case kThreadWaitWorkloopSyncWait:
3252  		kdp_workloop_sync_wait_find_owner(thread, thread->wait_event, waitinfo);
3253  		break;
3254  	case kThreadWaitOnProcess:
3255  		kdp_wait4_find_process(thread, thread->wait_event, waitinfo);
3256  		break;
3257  	case kThreadWaitSleepWithInheritor:
3258  		kdp_sleep_with_inheritor_find_owner(thread->waitq, thread->wait_event, waitinfo);
3259  		break;
3260  	case kThreadWaitEventlink:
3261  		kdp_eventlink_find_owner(thread->waitq, thread->wait_event, waitinfo);
3262  		break;
3263  	case kThreadWaitCompressor:
3264  		kdp_compressor_busy_find_owner(thread->wait_event, waitinfo);
3265  		break;
3266  	default:
3267  		waitinfo->owner = 0;
3268  		waitinfo->context = 0;
3269  		break;
3270  	}
3271  }