/ duct-tape / src / task.c
task.c
   1  #include "darlingserver/rpc.h"
   2  #include "mach/kern_return.h"
   3  #include "mach/task_info.h"
   4  #include <darlingserver/duct-tape/stubs.h>
   5  #include <darlingserver/duct-tape.h>
   6  #include <darlingserver/duct-tape/task.h>
   7  #include <darlingserver/duct-tape/memory.h>
   8  #include <darlingserver/duct-tape/psynch.h>
   9  #include <darlingserver/duct-tape/hooks.internal.h>
  10  #include <darlingserver/duct-tape/log.h>
  11  
  12  #include <kern/task.h>
  13  #include <kern/ipc_tt.h>
  14  #include <kern/policy_internal.h>
  15  #include <ipc/ipc_importance.h>
  16  #include <kern/restartable.h>
  17  #include <kern/sync_sema.h>
  18  #include <mach/mach_traps.h>
  19  #include <mach/mach_port.h>
  20  #include <ipc/ipc_hash.h>
  21  
  22  #include <stdlib.h>
  23  
  24  task_t kernel_task = NULL;
  25  
  26  void dtape_task_init(void) {
  27  	// this will assign to kernel_task
  28  	dserver_rpc_architecture_t arch = dserver_rpc_architecture_invalid;
  29  
  30  #if __x86_64__
  31  	arch = dserver_rpc_architecture_x86_64;
  32  #elif __i386__
  33  	arch = dserver_rpc_architecture_i386;
  34  #elif __aarch64__
  35  	arch = dserver_rpc_architecture_arm64;
  36  #elif __arm__
  37  	arch = dserver_rpc_architecture_arm32;
  38  #else
  39  	#error Unknown architecture
  40  #endif
  41  
  42  	if (!dtape_task_create(NULL, 0, NULL, arch)) {
  43  		panic("Failed to create kernel task");
  44  	}
  45  };
  46  
  47  dtape_task_t* dtape_task_create(dtape_task_t* parent_task, uint32_t nsid, void* context, dserver_rpc_architecture_t architecture) {
  48  	if (parent_task == NULL && nsid == 0 && kernel_task) {
  49  		dtape_task_t* task = dtape_task_for_xnu_task(kernel_task);
  50  
  51  		// don't acquire an additional reference;
  52  		// the managing Task instance acquires ownership of the kernel task
  53  		//task_reference(kernel_task);
  54  
  55  		if (task->context) {
  56  			panic("The kernel task already has a context");
  57  		} else {
  58  			task->context = context;
  59  		}
  60  		return task;
  61  	}
  62  
  63  	dtape_task_t* task = malloc(sizeof(dtape_task_t));
  64  	if (!task) {
  65  		return NULL;
  66  	}
  67  
  68  	task->context = context;
  69  	task->saved_pid = nsid;
  70  	task->architecture = architecture;
  71  	task->has_sigexc = false;
  72  	task->dyld_info_addr = 0;
  73  	task->dyld_info_length = 0;
  74  	task->p_ident.eid = dtape_hooks->task_eternal_id(context);
  75  	dtape_mutex_init(&task->dyld_info_lock);
  76  	dtape_condvar_init(&task->dyld_info_condvar);
  77  	memset(&task->xnu_task, 0, sizeof(task->xnu_task));
  78  
  79  	// this next section uses code adapted from XNU's task_create_internal() in osfmk/kern/task.c
  80  
  81  	os_ref_init(&task->xnu_task.ref_count, NULL);
  82  
  83  	lck_mtx_init(&task->xnu_task.lock, LCK_GRP_NULL, LCK_ATTR_NULL);
  84  	queue_init(&task->xnu_task.threads);
  85  
  86  	task->xnu_task.active = true;
  87  
  88  	task->xnu_task.map = dtape_vm_map_create(task);
  89  
  90  	queue_init(&task->xnu_task.semaphore_list);
  91  
  92  	ipc_task_init(&task->xnu_task, parent_task ? &parent_task->xnu_task : NULL);
  93  
  94  	if (parent_task) {
  95  		task_importance_init_from_parent(&task->xnu_task, &parent_task->xnu_task);
  96  	}
  97  
  98  	// this is a hack to force all tasks to have an IPC importance structure associated with them
  99  	// since i'm not sure where it's normally acquired in XNU.
 100  	// (this is necessary ipc_importance_send() needs the task to have a valid `task_imp_base`)
 101  	if (task->xnu_task.task_imp_base == IIT_NULL) {
 102  		ipc_importance_task_t imp = ipc_importance_for_task(&task->xnu_task, false);
 103  		// the new IPC importance structure has 2 references:
 104  		//   * one that the task gets,
 105  		//   * and another one that we (the caller) get
 106  		// we don't actually want a reference; we only want the task to have one.
 107  		ipc_importance_task_release(imp);
 108  	}
 109  
 110  	if (parent_task != NULL) {
 111  		task->xnu_task.sec_token = parent_task->xnu_task.sec_token;
 112  		task->xnu_task.audit_token = parent_task->xnu_task.audit_token;
 113  	} else {
 114  		task->xnu_task.sec_token = KERNEL_SECURITY_TOKEN;
 115  		task->xnu_task.audit_token = KERNEL_AUDIT_TOKEN;
 116  	}
 117  
 118  	task->xnu_task.audit_token.val[5] = task->saved_pid;
 119  
 120  	if (architecture == dserver_rpc_architecture_x86_64 || architecture == dserver_rpc_architecture_arm64) {
 121  		task_set_64Bit_addr(&task->xnu_task);
 122  		task_set_64Bit_data(&task->xnu_task);
 123  	}
 124  
 125  	ipc_task_enable(&task->xnu_task);
 126  
 127  	dtape_psynch_task_init(task);
 128  
 129  	if (parent_task == NULL && nsid == 0) {
 130  		if (kernel_task) {
 131  			panic("Another kernel task has been created");
 132  		}
 133  
 134  		kernel_task = &task->xnu_task;
 135  	}
 136  
 137  	return task;
 138  };
 139  
 140  void dtape_task_destroy(dtape_task_t* task) {
 141  	dtape_log_debug("%d: task being destroyed", task->saved_pid);
 142  
 143  	dtape_psynch_task_destroy(task);
 144  
 145  	// this next section uses code adapted from XNU's task_deallocate() in osfmk/kern/task.c
 146  
 147  	task_lock(&task->xnu_task);
 148  	task->xnu_task.active = false;
 149  	ipc_task_disable(&task->xnu_task);
 150  	task_unlock(&task->xnu_task);
 151  
 152  	semaphore_destroy_all(&task->xnu_task);
 153  
 154  	ipc_space_terminate(task->xnu_task.itk_space);
 155  
 156  	ipc_task_terminate(&task->xnu_task);
 157  
 158  	dtape_vm_map_destroy(task->xnu_task.map);
 159  
 160  	is_release(task->xnu_task.itk_space);
 161  
 162  	lck_mtx_destroy(&task->xnu_task.lock, LCK_GRP_NULL);
 163  
 164  	dtape_hooks->task_context_dispose(task->context);
 165  
 166  	free(task);
 167  };
 168  
 169  void dtape_task_uidgid(dtape_task_t* task, int new_uid, int new_gid, int* old_uid, int* old_gid) {
 170  	task_lock(&task->xnu_task);
 171  	if (old_uid) {
 172  		*old_uid = task->xnu_task.audit_token.val[1];
 173  	}
 174  	if (old_gid) {
 175  		*old_gid = task->xnu_task.audit_token.val[2];
 176  	}
 177  	if (new_uid >= 0) {
 178  		task->xnu_task.audit_token.val[1] = new_uid;
 179  	}
 180  	if (new_gid >= 0) {
 181  		task->xnu_task.audit_token.val[2] = new_gid;
 182  	}
 183  	task_unlock(&task->xnu_task);
 184  };
 185  
 186  void dtape_task_retain(dtape_task_t* task) {
 187  	task_reference(&task->xnu_task);
 188  };
 189  
 190  void dtape_task_release(dtape_task_t* task) {
 191  	task_deallocate(&task->xnu_task);
 192  };
 193  
 194  void dtape_task_dying(dtape_task_t* task) {
 195  	// nothing for now
 196  };
 197  
 198  void dtape_task_set_dyld_info(dtape_task_t* task, uint64_t address, uint64_t length) {
 199  	dtape_mutex_lock(&task->dyld_info_lock);
 200  	dtape_log_debug("setting dyld info to %llu bytes at %llx", length, address);
 201  	task->dyld_info_addr = address;
 202  	task->dyld_info_length = length;
 203  	dtape_mutex_unlock(&task->dyld_info_lock);
 204  	dtape_condvar_signal(&task->dyld_info_condvar, SIZE_MAX);
 205  };
 206  
 207  void dtape_task_set_sigexc_enabled(dtape_task_t* task, bool enabled) {
 208  	// FIXME: we should probably have a lock for this
 209  	task->has_sigexc = enabled;
 210  };
 211  
 212  bool dtape_task_try_resume(dtape_task_t* task) {
 213  	if (task->xnu_task.user_stop_count) {
 214  		dtape_log_debug("sigexc target task is stopped (%d), resuming", task->xnu_task.user_stop_count);
 215  		return task_resume(&task->xnu_task) == KERN_SUCCESS;
 216  	}
 217  	return false;
 218  };
 219  
 220  void task_deallocate(task_t xtask) {
 221  	dtape_task_t* task = dtape_task_for_xnu_task(xtask);
 222  	os_ref_count_t count = os_ref_release(&xtask->ref_count);
 223  	if (count > 0) {
 224  		// IPC importance info might be holding the last reference on the task
 225  		if (count == 1) {
 226  			if (IIT_NULL != task->xnu_task.task_imp_base) {
 227  				ipc_importance_disconnect_task(&task->xnu_task);
 228  			}
 229  		}
 230  		return;
 231  	}
 232  	dtape_task_destroy(task);
 233  };
 234  
 235  int pid_from_task(task_t xtask) {
 236  	dtape_task_t* task = dtape_task_for_xnu_task(xtask);
 237  	return task->saved_pid;
 238  };
 239  
 240  int proc_get_effective_task_policy(task_t task, int flavor) {
 241  	dtape_stub();
 242  	if (flavor == TASK_POLICY_ROLE) {
 243  		return TASK_UNSPECIFIED;
 244  	} else {
 245  		panic("Unimplemented proc_get_effective_task_policy flavor: %d", flavor);
 246  	}
 247  };
 248  
 249  int task_pid(task_t task) {
 250  	return pid_from_task(task);
 251  };
 252  
 253  void task_policy_update_complete_unlocked(task_t task, task_pend_token_t pend_token) {
 254  	dtape_stub();
 255  };
 256  
 257  void task_port_notify(mach_msg_header_t* msg) {
 258  	dtape_stub();
 259  };
 260  
 261  void task_port_with_flavor_notify(mach_msg_header_t* msg) {
 262  	dtape_stub();
 263  };
 264  
 265  boolean_t task_suspension_notify(mach_msg_header_t* request_header) {
 266  	dtape_stub();
 267  	return FALSE;
 268  };
 269  
 270  void task_update_boost_locked(task_t task, boolean_t boost_active, task_pend_token_t pend_token) {
 271  	dtape_stub();
 272  };
 273  
 274  void task_watchport_elem_deallocate(struct task_watchport_elem* watchport_elem) {
 275  	dtape_stub();
 276  };
 277  
 278  kern_return_t task_create_suid_cred(task_t task, suid_cred_path_t path, suid_cred_uid_t uid, suid_cred_t* sc_p) {
 279  	dtape_stub_unsafe();
 280  };
 281  
 282  kern_return_t task_dyld_process_info_notify_deregister(task_t task, mach_port_name_t rcv_name) {
 283  	dtape_stub_unsafe();
 284  };
 285  
 286  kern_return_t task_dyld_process_info_notify_register(task_t task, ipc_port_t sright) {
 287  	dtape_stub_unsafe();
 288  };
 289  
 290  kern_return_t task_generate_corpse(task_t task, ipc_port_t* corpse_task_port) {
 291  	dtape_stub_unsafe();
 292  };
 293  
 294  kern_return_t task_get_assignment(task_t task, processor_set_t* pset) {
 295  	dtape_stub_unsafe();
 296  };
 297  
 298  kern_return_t task_get_state(task_t  task, int flavor, thread_state_t state, mach_msg_type_number_t* state_count) {
 299  	dtape_stub_unsafe();
 300  };
 301  
 302  kern_return_t task_info(task_t xtask, task_flavor_t flavor, task_info_t task_info_out, mach_msg_type_number_t* task_info_count) {
 303  	dtape_task_t* task = dtape_task_for_xnu_task(xtask);
 304  
 305  	switch (flavor) {
 306  		case TASK_BASIC_INFO_32:
 307  		case TASK_BASIC_INFO_64:
 308  		case MACH_TASK_BASIC_INFO: {
 309  			uint64_t utimeus;
 310  			uint64_t stimeus;
 311  			dtape_memory_info_t mem_info;
 312  
 313  			dtape_hooks->task_get_memory_info(task->context, &mem_info);
 314  
 315  			dtape_log_debug("%s: TODO: fetch utimeus and stimeus somehow", __FUNCTION__);
 316  			utimeus = 0;
 317  			stimeus = 0;
 318  
 319  			if (flavor == TASK_BASIC_INFO_32) {
 320  				struct task_basic_info_32* info = (void*)task_info_out;
 321  
 322  				if (*task_info_count < TASK_BASIC_INFO_32_COUNT) {
 323  					return KERN_INVALID_ARGUMENT;
 324  				}
 325  
 326  				*task_info_count = TASK_BASIC_INFO_32_COUNT;
 327  
 328  				info->suspend_count = task->xnu_task.user_stop_count;
 329  				info->virtual_size = mem_info.virtual_size;
 330  				info->resident_size = mem_info.resident_size;
 331  				info->user_time.seconds = utimeus / USEC_PER_SEC;
 332  				info->user_time.microseconds = utimeus % USEC_PER_SEC;
 333  				info->system_time.seconds = stimeus / USEC_PER_SEC;
 334  				info->system_time.microseconds = stimeus % USEC_PER_SEC;
 335  				info->policy = 0;
 336  			} else if (flavor == TASK_BASIC_INFO_64) {
 337  				struct task_basic_info_64* info = (void*)task_info_out;
 338  
 339  				if (*task_info_count < TASK_BASIC_INFO_64_COUNT) {
 340  					return KERN_INVALID_ARGUMENT;
 341  				}
 342  
 343  				*task_info_count = TASK_BASIC_INFO_64_COUNT;
 344  
 345  				info->suspend_count = task->xnu_task.user_stop_count;
 346  				info->virtual_size = mem_info.virtual_size;
 347  				info->resident_size = mem_info.resident_size;
 348  				info->user_time.seconds = utimeus / USEC_PER_SEC;
 349  				info->user_time.microseconds = utimeus % USEC_PER_SEC;
 350  				info->system_time.seconds = stimeus / USEC_PER_SEC;
 351  				info->system_time.microseconds = stimeus % USEC_PER_SEC;
 352  				info->policy = 0;
 353  			} else {
 354  				struct mach_task_basic_info* info = (void*)task_info_out;
 355  
 356  				if (*task_info_count < MACH_TASK_BASIC_INFO_COUNT) {
 357  					return KERN_INVALID_ARGUMENT;
 358  				}
 359  
 360  				*task_info_count = MACH_TASK_BASIC_INFO_COUNT;
 361  
 362  				info->suspend_count = task->xnu_task.user_stop_count;
 363  				info->virtual_size = mem_info.virtual_size;
 364  				info->resident_size = mem_info.resident_size;
 365  				info->user_time.seconds = utimeus / USEC_PER_SEC;
 366  				info->user_time.microseconds = utimeus % USEC_PER_SEC;
 367  				info->system_time.seconds = stimeus / USEC_PER_SEC;
 368  				info->system_time.microseconds = stimeus % USEC_PER_SEC;
 369  				info->policy = 0;
 370  			}
 371  
 372  			return KERN_SUCCESS;
 373  		};
 374  
 375  		case TASK_THREAD_TIMES_INFO: {
 376  			task_thread_times_info_data_t* info = (task_thread_times_info_data_t*)task_info_out;
 377  
 378  			if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT) {
 379  				return KERN_INVALID_ARGUMENT;
 380  			}
 381  			*task_info_count = TASK_THREAD_TIMES_INFO_COUNT;
 382  
 383  			dtape_log_debug("%s: TODO: fetch utimeus and stimeus somehow", __FUNCTION__);
 384  			uint64_t utimeus = 0;
 385  			uint64_t stimeus = 0;
 386  
 387  			info->user_time.seconds = utimeus / USEC_PER_SEC;
 388  			info->user_time.microseconds = utimeus % USEC_PER_SEC;
 389  			info->system_time.seconds = stimeus / USEC_PER_SEC;
 390  			info->system_time.microseconds = stimeus % USEC_PER_SEC;
 391  
 392  			return KERN_SUCCESS;
 393  		};
 394  
 395  		case TASK_DYLD_INFO: {
 396  			task_dyld_info_t info = (task_dyld_info_t)task_info_out;
 397  
 398  			/*
 399  			* We added the format field to TASK_DYLD_INFO output.  For
 400  			* temporary backward compatibility, accept the fact that
 401  			* clients may ask for the old version - distinquished by the
 402  			* size of the expected result structure.
 403  			*/
 404  #define TASK_LEGACY_DYLD_INFO_COUNT \
 405  			offsetof(struct task_dyld_info, all_image_info_format)/sizeof(natural_t)
 406  
 407  			if (*task_info_count < TASK_LEGACY_DYLD_INFO_COUNT) {
 408  				return KERN_INVALID_ARGUMENT;
 409  			}
 410  
 411  			// DARLING:
 412  			// This call may block, waiting for Darling to provide this information
 413  			// shortly after startup.
 414  
 415  			dtape_log_debug("going to read dyld info for task %p (%d)", task, task->saved_pid);
 416  
 417  			dtape_mutex_lock(&task->dyld_info_lock);
 418  
 419  			while (task->dyld_info_addr == 0 && task->dyld_info_length == 0) {
 420  				dtape_log_debug("going to wait for dyld info for task %p (%d)", task, task->saved_pid);
 421  				dtape_condvar_wait(&task->dyld_info_condvar, &task->dyld_info_lock);
 422  				dtape_log_debug("awoken from dyld info wait for task %p (%d)", task, task->saved_pid);
 423  			}
 424  
 425  			info->all_image_info_addr = task->dyld_info_addr;
 426  			info->all_image_info_size = task->dyld_info_length;
 427  
 428  			dtape_mutex_unlock(&task->dyld_info_lock);
 429  
 430  			dtape_log_debug("got dyld info for task %p (%d): %llu bytes at %llx", task, task->saved_pid, info->all_image_info_addr, info->all_image_info_size);
 431  
 432  			/* only set format on output for those expecting it */
 433  			if (*task_info_count >= TASK_DYLD_INFO_COUNT) {
 434  				info->all_image_info_format = task_has_64Bit_addr(xtask) ? TASK_DYLD_ALL_IMAGE_INFO_64 : TASK_DYLD_ALL_IMAGE_INFO_32 ;
 435  				*task_info_count = TASK_DYLD_INFO_COUNT;
 436  			} else {
 437  				*task_info_count = TASK_LEGACY_DYLD_INFO_COUNT;
 438  			}
 439  
 440  			return KERN_SUCCESS;
 441  		};
 442  
 443  		case TASK_AUDIT_TOKEN: {
 444  			audit_token_t   *audit_token_p;
 445  
 446  			if (*task_info_count < TASK_AUDIT_TOKEN_COUNT) {
 447  				return KERN_INVALID_ARGUMENT;
 448  			}
 449  
 450  			audit_token_p = (audit_token_t *) task_info_out;
 451  			*audit_token_p = task->xnu_task.audit_token;
 452  			*task_info_count = TASK_AUDIT_TOKEN_COUNT;
 453  
 454  			return KERN_SUCCESS;
 455  		};
 456  
 457  		case TASK_VM_INFO: {
 458  			task_vm_info_t info = (task_vm_info_t)task_info_out;
 459  			mach_msg_type_number_t orig_info_count = *task_info_count;
 460  
 461  			if (orig_info_count < TASK_VM_INFO_REV0_COUNT) {
 462  				return KERN_INVALID_ARGUMENT;
 463  			}
 464  
 465  			memset(info, 0, orig_info_count * sizeof(natural_t));
 466  
 467  			dtape_memory_info_t meminfo;
 468  			dtape_hooks->task_get_memory_info(task->context, &meminfo);
 469  
 470  			info->page_size = meminfo.page_size;
 471  			info->resident_size = meminfo.resident_size;
 472  			info->resident_size_peak = meminfo.resident_size;
 473  			info->virtual_size = meminfo.virtual_size;
 474  
 475  			// TODO: fill in other stuff
 476  
 477  			*task_info_count = TASK_VM_INFO_REV0_COUNT;
 478  
 479  			if (orig_info_count >= TASK_VM_INFO_REV1_COUNT) {
 480  
 481  				*task_info_count = TASK_VM_INFO_REV1_COUNT;
 482  
 483  				if (orig_info_count >= TASK_VM_INFO_REV2_COUNT) {
 484  
 485  					*task_info_count = TASK_VM_INFO_REV2_COUNT;
 486  
 487  					if (orig_info_count >= TASK_VM_INFO_REV3_COUNT) {
 488  
 489  						*task_info_count = TASK_VM_INFO_REV3_COUNT;
 490  
 491  						if (orig_info_count >= TASK_VM_INFO_REV4_COUNT) {
 492  
 493  							*task_info_count = TASK_VM_INFO_REV4_COUNT;
 494  
 495  							if (orig_info_count >= TASK_VM_INFO_REV5_COUNT) {
 496  								*task_info_count = TASK_VM_INFO_REV5_COUNT;
 497  							}
 498  						}
 499  					}
 500  				}
 501  			}
 502  
 503  			return KERN_SUCCESS;
 504  		};
 505  
 506  		case TASK_FLAGS_INFO: {
 507  			task_flags_info_t info = (task_flags_info_t)task_info_out;
 508  			mach_msg_type_number_t orig_info_count = *task_info_count;
 509  
 510  			if (orig_info_count < TASK_FLAGS_INFO_COUNT) {
 511  				return KERN_INVALID_ARGUMENT;
 512  			}
 513  
 514  			info->flags = 0;
 515  
 516  			if (task->architecture == dserver_rpc_architecture_x86_64 || task->architecture == dserver_rpc_architecture_arm64) {
 517  				info->flags = TF_LP64 | TF_64B_DATA;
 518  			}
 519  
 520  			return KERN_SUCCESS;
 521  		};
 522  
 523  		default:
 524  			dtape_stub_unsafe("unimplemented flavor");
 525  	}
 526  };
 527  
 528  kern_return_t task_inspect(task_inspect_t task_insp, task_inspect_flavor_t flavor, task_inspect_info_t info_out, mach_msg_type_number_t* size_in_out) {
 529  	dtape_stub_safe();
 530  	return KERN_FAILURE;
 531  };
 532  
 533  bool task_is_driver(task_t task) {
 534  	dtape_stub_safe();
 535  	return false;
 536  };
 537  
 538  kern_return_t task_map_corpse_info(task_t task, task_t corpse_task, vm_address_t* kcd_addr_begin, uint32_t* kcd_size) {
 539  	dtape_stub_unsafe();
 540  };
 541  
 542  kern_return_t task_map_corpse_info_64(task_t task, task_t corpse_task, mach_vm_address_t* kcd_addr_begin, mach_vm_size_t* kcd_size) {
 543  	dtape_stub_unsafe();
 544  };
 545  
 546  void task_name_deallocate(task_name_t task_name) {
 547  	dtape_stub_unsafe();
 548  };
 549  
 550  kern_return_t task_policy_get(task_t task, task_policy_flavor_t flavor, task_policy_t policy_info, mach_msg_type_number_t* count, boolean_t* get_default) {
 551  	dtape_stub_unsafe();
 552  };
 553  
 554  void task_policy_get_deallocate(task_policy_get_t task_policy_get) {
 555  	dtape_stub_unsafe();
 556  };
 557  
 558  kern_return_t task_policy_set(task_t task, task_policy_flavor_t flavor, task_policy_t policy_info, mach_msg_type_number_t count) {
 559  	dtape_stub_safe();
 560  	return KERN_SUCCESS;
 561  };
 562  
 563  void task_policy_set_deallocate(task_policy_set_t task_policy_set) {
 564  	return task_deallocate((task_t)task_policy_set);
 565  };
 566  
 567  kern_return_t task_purgable_info(task_t task, task_purgable_info_t* stats) {
 568  	dtape_stub_unsafe();
 569  };
 570  
 571  kern_return_t task_register_dyld_image_infos(task_t task, dyld_kernel_image_info_array_t infos_copy, mach_msg_type_number_t infos_len) {
 572  	dtape_stub_unsafe();
 573  };
 574  
 575  kern_return_t task_register_dyld_shared_cache_image_info(task_t task, dyld_kernel_image_info_t cache_img, boolean_t no_cache, boolean_t private_cache) {
 576  	dtape_stub_unsafe();
 577  };
 578  
 579  kern_return_t task_restartable_ranges_register(task_t task, task_restartable_range_t* ranges, mach_msg_type_number_t count) {
 580  	dtape_stub_unsafe();
 581  };
 582  
 583  kern_return_t task_restartable_ranges_synchronize(task_t task) {
 584  	dtape_stub_unsafe();
 585  };
 586  
 587  kern_return_t task_set_exc_guard_behavior(task_t task, task_exc_guard_behavior_t behavior) {
 588  	dtape_stub_unsafe();
 589  };
 590  
 591  kern_return_t task_set_info(task_t task, task_flavor_t flavor, task_info_t task_info_in, mach_msg_type_number_t task_info_count) {
 592  	dtape_stub_unsafe();
 593  };
 594  
 595  kern_return_t task_set_phys_footprint_limit(task_t task, int new_limit_mb, int* old_limit_mb) {
 596  	dtape_stub_unsafe();
 597  };
 598  
 599  kern_return_t task_set_state(task_t task, int flavor, thread_state_t state, mach_msg_type_number_t state_count) {
 600  	dtape_stub_unsafe();
 601  };
 602  
 603  void task_suspension_token_deallocate(task_suspension_token_t token) {
 604  	dtape_stub_unsafe();
 605  };
 606  
 607  kern_return_t task_terminate(task_t task) {
 608  	dtape_stub_unsafe();
 609  };
 610  
 611  kern_return_t task_unregister_dyld_image_infos(task_t task, dyld_kernel_image_info_array_t infos_copy, mach_msg_type_number_t infos_len) {
 612  	dtape_stub_unsafe();
 613  };
 614  
 615  static kern_return_t task_for_pid_internal(mach_port_name_t target_tport, int pid, uintptr_t t, bool task_name) {
 616  	kern_return_t kr = KERN_FAILURE;
 617  	task_t receiving_task = TASK_NULL;
 618  	dtape_task_t* looked_up_task = NULL;
 619  	ipc_port_t right = IPC_PORT_NULL;
 620  	mach_port_name_t out_name = MACH_PORT_NULL;
 621  
 622  	receiving_task = port_name_to_task(target_tport);
 623  	if (receiving_task == TASK_NULL) {
 624  		goto out;
 625  	}
 626  
 627  	looked_up_task = dtape_hooks->task_lookup(pid, true, true);
 628  	if (!looked_up_task) {
 629  		goto out;
 630  	}
 631  
 632  	if (task_name) {
 633  		right = convert_task_name_to_port(&looked_up_task->xnu_task);
 634  	} else {
 635  		if (&looked_up_task->xnu_task == current_task()) {
 636  			right = convert_task_to_port_pinned(&looked_up_task->xnu_task);
 637  		} else {
 638  			right = convert_task_to_port(&looked_up_task->xnu_task);
 639  		}
 640  	}
 641  
 642  	// consumed by convert_task{,_name}_to_port{,_pinned}
 643  	looked_up_task = NULL;
 644  
 645  	if (right == IPC_PORT_NULL) {
 646  		goto out;
 647  	}
 648  
 649  	out_name = ipc_port_copyout_send(right, receiving_task->itk_space);
 650  
 651  	// consumed by ipc_port_copyout_send
 652  	right = IPC_PORT_NULL;
 653  
 654  	if (!MACH_PORT_VALID(out_name)) {
 655  		goto out;
 656  	}
 657  
 658  	if (copyout(&out_name, t, sizeof(out_name))) {
 659  		goto out;
 660  	}
 661  
 662  	// consumed by copyout
 663  	out_name = MACH_PORT_NULL;
 664  
 665  	kr = KERN_SUCCESS;
 666  
 667  out:
 668  	if (MACH_PORT_VALID(out_name)) {
 669  		mach_port_deallocate(receiving_task->itk_space, out_name);
 670  	}
 671  	if (right != IPC_PORT_NULL) {
 672  		ipc_port_release_send(right);
 673  	}
 674  	if (looked_up_task) {
 675  		dtape_task_release(looked_up_task);
 676  	}
 677  	if (receiving_task != TASK_NULL) {
 678  		task_deallocate(receiving_task);
 679  	}
 680  	return kr;
 681  };
 682  
 683  kern_return_t task_for_pid(struct task_for_pid_args* args) {
 684  	return task_for_pid_internal(args->target_tport, args->pid, args->t, false);
 685  };
 686  
 687  kern_return_t task_name_for_pid(struct task_name_for_pid_args* args) {
 688  	return task_for_pid_internal(args->target_tport, args->pid, args->t, true);
 689  };
 690  
 691  kern_return_t pid_for_task(struct pid_for_task_args* args) {
 692  	kern_return_t kr = KERN_FAILURE;
 693  	task_t converted_task = TASK_NULL;
 694  	int pid = -1;
 695  
 696  	converted_task = port_name_to_task_name(args->t);
 697  	if (converted_task == TASK_NULL) {
 698  		goto out;
 699  	}
 700  
 701  	pid = task_pid(converted_task);
 702  
 703  	if (pid < 0) {
 704  		goto out;
 705  	}
 706  
 707  	if (copyout(&pid, args->pid, sizeof(pid))) {
 708  		goto out;
 709  	}
 710  
 711  	kr = KERN_SUCCESS;
 712  
 713  out:
 714  	if (converted_task != TASK_NULL) {
 715  		task_deallocate(converted_task);
 716  	}
 717  	return kr;
 718  };
 719  
 720  boolean_t task_is_exec_copy(task_t task) {
 721  	dtape_stub_safe();
 722  	return FALSE;
 723  };
 724  
 725  void task_wait_locked(task_t task, boolean_t until_not_runnable) {
 726  	// this was stubbed in the LKM, so it should be safe to stub here
 727  	dtape_stub_safe();
 728  };
 729  
 730  //
 731  // for task_ident.c
 732  //
 733  
 734  void* proc_find_ident(struct proc_ident const *i) {
 735  	return dtape_hooks->task_lookup_eternal(i->eid, true);
 736  };
 737  
 738  int proc_rele(void* p) {
 739  	dtape_task_release(p);
 740  	return 0;
 741  };
 742  
 743  task_t proc_task(void* p) {
 744  	return &((dtape_task_t*)p)->xnu_task;
 745  };
 746  
 747  struct proc_ident proc_ident(void* p) {
 748  	return ((dtape_task_t*)p)->p_ident;
 749  };
 750  
 751  //
 752  // end for task_ident.c
 753  //
 754  
 755  // <copied from="xnu://7195.141.2/osfmk/kern/task_policy.c">
 756  
 757  /*
 758   * Check if this task should donate importance.
 759   *
 760   * May be called without taking the task lock. In that case, donor status can change
 761   * so you must check only once for each donation event.
 762   */
 763  boolean_t
 764  task_is_importance_donor(task_t task)
 765  {
 766  	if (task->task_imp_base == IIT_NULL) {
 767  		return FALSE;
 768  	}
 769  	return ipc_importance_task_is_donor(task->task_imp_base);
 770  }
 771  
 772  /*
 773   *      task_policy
 774   *
 775   *	Set scheduling policy and parameters, both base and limit, for
 776   *	the given task. Policy must be a policy which is enabled for the
 777   *	processor set. Change contained threads if requested.
 778   */
 779  kern_return_t
 780  task_policy(
 781  	__unused task_t                 task,
 782  	__unused policy_t                       policy_id,
 783  	__unused policy_base_t          base,
 784  	__unused mach_msg_type_number_t count,
 785  	__unused boolean_t                      set_limit,
 786  	__unused boolean_t                      change)
 787  {
 788  	return KERN_FAILURE;
 789  }
 790  
 791  /*
 792   * Query the status of the task's donor mark.
 793   */
 794  boolean_t
 795  task_is_marked_importance_donor(task_t task)
 796  {
 797  	if (task->task_imp_base == IIT_NULL) {
 798  		return FALSE;
 799  	}
 800  	return ipc_importance_task_is_marked_donor(task->task_imp_base);
 801  }
 802  
 803  /*
 804   * Query the status of the task's live donor and donor mark.
 805   */
 806  boolean_t
 807  task_is_marked_live_importance_donor(task_t task)
 808  {
 809  	if (task->task_imp_base == IIT_NULL) {
 810  		return FALSE;
 811  	}
 812  	return ipc_importance_task_is_marked_live_donor(task->task_imp_base);
 813  }
 814  
 815  /*
 816   * Query the task's receiver mark.
 817   */
 818  boolean_t
 819  task_is_marked_importance_receiver(task_t task)
 820  {
 821  	if (task->task_imp_base == IIT_NULL) {
 822  		return FALSE;
 823  	}
 824  	return ipc_importance_task_is_marked_receiver(task->task_imp_base);
 825  }
 826  
 827  /*
 828   * This routine may be called without holding task lock
 829   * since the value of de-nap receiver can never be unset.
 830   */
 831  boolean_t
 832  task_is_importance_denap_receiver(task_t task)
 833  {
 834  	if (task->task_imp_base == IIT_NULL) {
 835  		return FALSE;
 836  	}
 837  	return ipc_importance_task_is_denap_receiver(task->task_imp_base);
 838  }
 839  
 840  /*
 841   * Query the task's de-nap receiver mark.
 842   */
 843  boolean_t
 844  task_is_marked_importance_denap_receiver(task_t task)
 845  {
 846  	if (task->task_imp_base == IIT_NULL) {
 847  		return FALSE;
 848  	}
 849  	return ipc_importance_task_is_marked_denap_receiver(task->task_imp_base);
 850  }
 851  
 852  void
 853  task_importance_init_from_parent(task_t new_task, task_t parent_task)
 854  {
 855  #if IMPORTANCE_INHERITANCE
 856  	ipc_importance_task_t new_task_imp = IIT_NULL;
 857  
 858  	new_task->task_imp_base = NULL;
 859  	if (!parent_task) {
 860  		return;
 861  	}
 862  
 863  	if (task_is_marked_importance_donor(parent_task)) {
 864  		new_task_imp = ipc_importance_for_task(new_task, FALSE);
 865  		assert(IIT_NULL != new_task_imp);
 866  		ipc_importance_task_mark_donor(new_task_imp, TRUE);
 867  	}
 868  	if (task_is_marked_live_importance_donor(parent_task)) {
 869  		if (IIT_NULL == new_task_imp) {
 870  			new_task_imp = ipc_importance_for_task(new_task, FALSE);
 871  		}
 872  		assert(IIT_NULL != new_task_imp);
 873  		ipc_importance_task_mark_live_donor(new_task_imp, TRUE);
 874  	}
 875  	/* Do not inherit 'receiver' on fork, vfexec or true spawn */
 876  	if (task_is_exec_copy(new_task) &&
 877  	    task_is_marked_importance_receiver(parent_task)) {
 878  		if (IIT_NULL == new_task_imp) {
 879  			new_task_imp = ipc_importance_for_task(new_task, FALSE);
 880  		}
 881  		assert(IIT_NULL != new_task_imp);
 882  		ipc_importance_task_mark_receiver(new_task_imp, TRUE);
 883  	}
 884  	if (task_is_marked_importance_denap_receiver(parent_task)) {
 885  		if (IIT_NULL == new_task_imp) {
 886  			new_task_imp = ipc_importance_for_task(new_task, FALSE);
 887  		}
 888  		assert(IIT_NULL != new_task_imp);
 889  		ipc_importance_task_mark_denap_receiver(new_task_imp, TRUE);
 890  	}
 891  	if (IIT_NULL != new_task_imp) {
 892  		assert(new_task->task_imp_base == new_task_imp);
 893  		ipc_importance_task_release(new_task_imp);
 894  	}
 895  #endif /* IMPORTANCE_INHERITANCE */
 896  }
 897  
 898  // </copied>
 899  
 900  // <copied from="xnu://7195.141.2/osfmk/kern/task.c">
 901  
 902  boolean_t
 903  task_get_filter_msg_flag(
 904  	task_t task)
 905  {
 906  	uint32_t flags = 0;
 907  
 908  	if (!task) {
 909  		return false;
 910  	}
 911  
 912  	flags = os_atomic_load(&task->t_flags, relaxed);
 913  	return (flags & TF_FILTER_MSG) ? TRUE : FALSE;
 914  }
 915  
 916  /*
 917   *	task_assign:
 918   *
 919   *	Change the assigned processor set for the task
 920   */
 921  kern_return_t
 922  task_assign(
 923  	__unused task_t         task,
 924  	__unused processor_set_t        new_pset,
 925  	__unused boolean_t      assign_threads)
 926  {
 927  	return KERN_FAILURE;
 928  }
 929  
 930  /*
 931   *	task_assign_default:
 932   *
 933   *	Version of task_assign to assign to default processor set.
 934   */
 935  kern_return_t
 936  task_assign_default(
 937  	task_t          task,
 938  	boolean_t       assign_threads)
 939  {
 940  	return task_assign(task, &pset0, assign_threads);
 941  }
 942  
 943  kern_return_t
 944  task_create(
 945  	task_t                          parent_task,
 946  	__unused ledger_port_array_t    ledger_ports,
 947  	__unused mach_msg_type_number_t num_ledger_ports,
 948  	__unused boolean_t              inherit_memory,
 949  	__unused task_t                 *child_task)    /* OUT */
 950  {
 951  	if (parent_task == TASK_NULL) {
 952  		return KERN_INVALID_ARGUMENT;
 953  	}
 954  
 955  	/*
 956  	 * No longer supported: too many calls assume that a task has a valid
 957  	 * process attached.
 958  	 */
 959  	return KERN_FAILURE;
 960  }
 961  
 962  kern_return_t
 963  task_get_dyld_image_infos(__unused task_t task,
 964      __unused dyld_kernel_image_info_array_t * dyld_images,
 965      __unused mach_msg_type_number_t * dyld_imagesCnt)
 966  {
 967  	return KERN_NOT_SUPPORTED;
 968  }
 969  
 970  kern_return_t
 971  task_get_exc_guard_behavior(
 972  	task_t task,
 973  	task_exc_guard_behavior_t *behaviorp)
 974  {
 975  	if (task == TASK_NULL) {
 976  		return KERN_INVALID_TASK;
 977  	}
 978  	*behaviorp = task->task_exc_guard;
 979  	return KERN_SUCCESS;
 980  }
 981  
 982  /* Placeholders for the task set/get voucher interfaces */
 983  kern_return_t
 984  task_get_mach_voucher(
 985  	task_t                  task,
 986  	mach_voucher_selector_t __unused which,
 987  	ipc_voucher_t           *voucher)
 988  {
 989  	if (TASK_NULL == task) {
 990  		return KERN_INVALID_TASK;
 991  	}
 992  
 993  	*voucher = NULL;
 994  	return KERN_SUCCESS;
 995  }
 996  
 997  kern_return_t
 998  task_set_mach_voucher(
 999  	task_t                  task,
1000  	ipc_voucher_t           __unused voucher)
1001  {
1002  	if (TASK_NULL == task) {
1003  		return KERN_INVALID_TASK;
1004  	}
1005  
1006  	return KERN_SUCCESS;
1007  }
1008  
1009  kern_return_t
1010  task_swap_mach_voucher(
1011  	__unused task_t         task,
1012  	__unused ipc_voucher_t  new_voucher,
1013  	ipc_voucher_t          *in_out_old_voucher)
1014  {
1015  	/*
1016  	 * Currently this function is only called from a MIG generated
1017  	 * routine which doesn't release the reference on the voucher
1018  	 * addressed by in_out_old_voucher. To avoid leaking this reference,
1019  	 * a call to release it has been added here.
1020  	 */
1021  	ipc_voucher_release(*in_out_old_voucher);
1022  	return KERN_NOT_SUPPORTED;
1023  }
1024  
1025  /*
1026   *	task_inspect_deallocate:
1027   *
1028   *	Drop a task inspection reference.
1029   */
1030  void
1031  task_inspect_deallocate(
1032  	task_inspect_t          task_inspect)
1033  {
1034  	return task_deallocate((task_t)task_inspect);
1035  }
1036  
1037  kern_return_t
1038  task_register_dyld_set_dyld_state(__unused task_t task,
1039      __unused uint8_t dyld_state)
1040  {
1041  	return KERN_NOT_SUPPORTED;
1042  }
1043  
1044  kern_return_t
1045  task_register_dyld_get_process_state(__unused task_t task,
1046      __unused dyld_kernel_process_info_t * dyld_process_state)
1047  {
1048  	return KERN_NOT_SUPPORTED;
1049  }
1050  
1051  /*
1052   *	task_set_policy
1053   *
1054   *	Set scheduling policy and parameters, both base and limit, for
1055   *	the given task. Policy can be any policy implemented by the
1056   *	processor set, whether enabled or not. Change contained threads
1057   *	if requested.
1058   */
1059  kern_return_t
1060  task_set_policy(
1061  	__unused task_t                 task,
1062  	__unused processor_set_t                pset,
1063  	__unused policy_t                       policy_id,
1064  	__unused policy_base_t          base,
1065  	__unused mach_msg_type_number_t base_count,
1066  	__unused policy_limit_t         limit,
1067  	__unused mach_msg_type_number_t limit_count,
1068  	__unused boolean_t                      change)
1069  {
1070  	return KERN_FAILURE;
1071  }
1072  
1073  kern_return_t
1074  task_set_ras_pc(
1075  	__unused task_t task,
1076  	__unused vm_offset_t    pc,
1077  	__unused vm_offset_t    endpc)
1078  {
1079  	return KERN_FAILURE;
1080  }
1081  
1082  /*
1083   * This routine finds a thread in a task by its unique id
1084   * Returns a referenced thread or THREAD_NULL if the thread was not found
1085   *
1086   * TODO: This is super inefficient - it's an O(threads in task) list walk!
1087   *       We should make a tid hash, or transition all tid clients to thread ports
1088   *
1089   * Precondition: No locks held (will take task lock)
1090   */
1091  thread_t
1092  task_findtid(task_t task, uint64_t tid)
1093  {
1094  	thread_t self           = current_thread();
1095  	thread_t found_thread   = THREAD_NULL;
1096  	thread_t iter_thread    = THREAD_NULL;
1097  
1098  	/* Short-circuit the lookup if we're looking up ourselves */
1099  	if (tid == self->thread_id || tid == TID_NULL) {
1100  		assert(self->task == task);
1101  
1102  		thread_reference(self);
1103  
1104  		return self;
1105  	}
1106  
1107  	task_lock(task);
1108  
1109  	queue_iterate(&task->threads, iter_thread, thread_t, task_threads) {
1110  		if (iter_thread->thread_id == tid) {
1111  			found_thread = iter_thread;
1112  			thread_reference(found_thread);
1113  			break;
1114  		}
1115  	}
1116  
1117  	task_unlock(task);
1118  
1119  	return found_thread;
1120  }
1121  
1122  /*
1123   * task_info_from_user
1124   *
1125   * When calling task_info from user space,
1126   * this function will be executed as mig server side
1127   * instead of calling directly into task_info.
1128   * This gives the possibility to perform more security
1129   * checks on task_port.
1130   *
1131   * In the case of TASK_DYLD_INFO, we require the more
1132   * privileged task_read_port not the less-privileged task_name_port.
1133   *
1134   */
1135  kern_return_t
1136  task_info_from_user(
1137  	mach_port_t             task_port,
1138  	task_flavor_t           flavor,
1139  	task_info_t             task_info_out,
1140  	mach_msg_type_number_t  *task_info_count)
1141  {
1142  	task_t task;
1143  	kern_return_t ret;
1144  
1145  	if (flavor == TASK_DYLD_INFO) {
1146  		task = convert_port_to_task_read(task_port);
1147  	} else {
1148  		task = convert_port_to_task_name(task_port);
1149  	}
1150  
1151  	ret = task_info(task, flavor, task_info_out, task_info_count);
1152  
1153  	task_deallocate(task);
1154  
1155  	return ret;
1156  }
1157  
1158  static kern_return_t
1159  task_threads_internal(
1160  	task_t                      task,
1161  	thread_act_array_t         *threads_out,
1162  	mach_msg_type_number_t     *count,
1163  	mach_thread_flavor_t        flavor)
1164  {
1165  	mach_msg_type_number_t  actual;
1166  	thread_t                                *thread_list;
1167  	thread_t                                thread;
1168  	vm_size_t                               size, size_needed;
1169  	void                                    *addr;
1170  	unsigned int                    i, j;
1171  
1172  	size = 0; addr = NULL;
1173  
1174  	if (task == TASK_NULL) {
1175  		return KERN_INVALID_ARGUMENT;
1176  	}
1177  
1178  	assert(flavor <= THREAD_FLAVOR_INSPECT);
1179  
1180  	for (;;) {
1181  		task_lock(task);
1182  		if (!task->active) {
1183  			task_unlock(task);
1184  
1185  			if (size != 0) {
1186  				kfree(addr, size);
1187  			}
1188  
1189  			return KERN_FAILURE;
1190  		}
1191  
1192  		actual = task->thread_count;
1193  
1194  		/* do we have the memory we need? */
1195  		size_needed = actual * sizeof(mach_port_t);
1196  		if (size_needed <= size) {
1197  			break;
1198  		}
1199  
1200  		/* unlock the task and allocate more memory */
1201  		task_unlock(task);
1202  
1203  		if (size != 0) {
1204  			kfree(addr, size);
1205  		}
1206  
1207  		assert(size_needed > 0);
1208  		size = size_needed;
1209  
1210  		addr = kalloc(size);
1211  		if (addr == 0) {
1212  			return KERN_RESOURCE_SHORTAGE;
1213  		}
1214  	}
1215  
1216  	/* OK, have memory and the task is locked & active */
1217  	thread_list = (thread_t *)addr;
1218  
1219  	i = j = 0;
1220  
1221  	for (thread = (thread_t)queue_first(&task->threads); i < actual;
1222  	    ++i, thread = (thread_t)queue_next(&thread->task_threads)) {
1223  		thread_reference_internal(thread);
1224  		thread_list[j++] = thread;
1225  	}
1226  
1227  	assert(queue_end(&task->threads, (queue_entry_t)thread));
1228  
1229  	actual = j;
1230  	size_needed = actual * sizeof(mach_port_t);
1231  
1232  	/* can unlock task now that we've got the thread refs */
1233  	task_unlock(task);
1234  
1235  	if (actual == 0) {
1236  		/* no threads, so return null pointer and deallocate memory */
1237  
1238  		*threads_out = NULL;
1239  		*count = 0;
1240  
1241  		if (size != 0) {
1242  			kfree(addr, size);
1243  		}
1244  	} else {
1245  		/* if we allocated too much, must copy */
1246  
1247  		if (size_needed < size) {
1248  			void *newaddr;
1249  
1250  			newaddr = kalloc(size_needed);
1251  			if (newaddr == 0) {
1252  				for (i = 0; i < actual; ++i) {
1253  					thread_deallocate(thread_list[i]);
1254  				}
1255  				kfree(addr, size);
1256  				return KERN_RESOURCE_SHORTAGE;
1257  			}
1258  
1259  			bcopy(addr, newaddr, size_needed);
1260  			kfree(addr, size);
1261  			thread_list = (thread_t *)newaddr;
1262  		}
1263  
1264  		*threads_out = thread_list;
1265  		*count = actual;
1266  
1267  		/* do the conversion that Mig should handle */
1268  
1269  		switch (flavor) {
1270  		case THREAD_FLAVOR_CONTROL:
1271  			if (task == current_task()) {
1272  				for (i = 0; i < actual; ++i) {
1273  					((ipc_port_t *) thread_list)[i] = convert_thread_to_port_pinned(thread_list[i]);
1274  				}
1275  			} else {
1276  				for (i = 0; i < actual; ++i) {
1277  					((ipc_port_t *) thread_list)[i] = convert_thread_to_port(thread_list[i]);
1278  				}
1279  			}
1280  			break;
1281  		case THREAD_FLAVOR_READ:
1282  			for (i = 0; i < actual; ++i) {
1283  				((ipc_port_t *) thread_list)[i] = convert_thread_read_to_port(thread_list[i]);
1284  			}
1285  			break;
1286  		case THREAD_FLAVOR_INSPECT:
1287  			for (i = 0; i < actual; ++i) {
1288  				((ipc_port_t *) thread_list)[i] = convert_thread_inspect_to_port(thread_list[i]);
1289  			}
1290  			break;
1291  		}
1292  	}
1293  
1294  	return KERN_SUCCESS;
1295  }
1296  
1297  kern_return_t
1298  task_threads_from_user(
1299  	mach_port_t                 port,
1300  	thread_act_array_t         *threads_out,
1301  	mach_msg_type_number_t     *count)
1302  {
1303  	ipc_kobject_type_t kotype;
1304  	kern_return_t kr;
1305  
1306  	task_t task = convert_port_to_task_check_type(port, &kotype, TASK_FLAVOR_INSPECT, FALSE);
1307  
1308  	if (task == TASK_NULL) {
1309  		return KERN_INVALID_ARGUMENT;
1310  	}
1311  
1312  	switch (kotype) {
1313  	case IKOT_TASK_CONTROL:
1314  		kr = task_threads_internal(task, threads_out, count, THREAD_FLAVOR_CONTROL);
1315  		break;
1316  	case IKOT_TASK_READ:
1317  		kr = task_threads_internal(task, threads_out, count, THREAD_FLAVOR_READ);
1318  		break;
1319  	case IKOT_TASK_INSPECT:
1320  		kr = task_threads_internal(task, threads_out, count, THREAD_FLAVOR_INSPECT);
1321  		break;
1322  	default:
1323  		panic("strange kobject type");
1324  		break;
1325  	}
1326  
1327  	task_deallocate(task);
1328  	return kr;
1329  }
1330  
1331  /*
1332   *	task_release_locked:
1333   *
1334   *	Release a kernel hold on a task.
1335   *
1336   *      CONDITIONS: the task is locked and active
1337   */
1338  void
1339  task_release_locked(
1340  	task_t          task)
1341  {
1342  	thread_t        thread;
1343  
1344  	assert(task->active);
1345  	assert(task->suspend_count > 0);
1346  
1347  	if (--task->suspend_count > 0) {
1348  		return;
1349  	}
1350  
1351  #ifndef __DARLING__
1352  	if (task->bsd_info) {
1353  		workq_proc_resumed(task->bsd_info);
1354  	}
1355  #endif // __DARLING__
1356  
1357  	queue_iterate(&task->threads, thread, thread_t, task_threads) {
1358  		thread_mtx_lock(thread);
1359  		thread_release(thread);
1360  		thread_mtx_unlock(thread);
1361  	}
1362  }
1363  
1364  /*
1365   *	task_hold_locked:
1366   *
1367   *	Suspend execution of the specified task.
1368   *	This is a recursive-style suspension of the task, a count of
1369   *	suspends is maintained.
1370   *
1371   *	CONDITIONS: the task is locked and active.
1372   */
1373  void
1374  task_hold_locked(
1375  	task_t          task)
1376  {
1377  	thread_t        thread;
1378  
1379  	assert(task->active);
1380  
1381  	if (task->suspend_count++ > 0) {
1382  		return;
1383  	}
1384  
1385  #ifndef __DARLING__
1386  	if (task->bsd_info) {
1387  		workq_proc_suspended(task->bsd_info);
1388  	}
1389  #endif // __DARLING__
1390  
1391  	/*
1392  	 *	Iterate through all the threads and hold them.
1393  	 */
1394  	queue_iterate(&task->threads, thread, thread_t, task_threads) {
1395  		thread_mtx_lock(thread);
1396  		thread_hold(thread);
1397  		thread_mtx_unlock(thread);
1398  	}
1399  }
1400  
1401  #define TASK_HOLD_NORMAL        0
1402  #define TASK_HOLD_PIDSUSPEND    1
1403  #define TASK_HOLD_LEGACY        2
1404  #define TASK_HOLD_LEGACY_ALL    3
1405  
1406  static kern_return_t
1407  place_task_hold(
1408  	task_t task,
1409  	int mode)
1410  {
1411  	if (!task->active && !task_is_a_corpse(task)) {
1412  		return KERN_FAILURE;
1413  	}
1414  
1415  	/* Return success for corpse task */
1416  	if (task_is_a_corpse(task)) {
1417  		return KERN_SUCCESS;
1418  	}
1419  
1420  	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_IPC, MACH_TASK_SUSPEND),
1421  	    task_pid(task),
1422  	    task->thread_count > 0 ?((thread_t)queue_first(&task->threads))->thread_id : 0,
1423  	    task->user_stop_count, task->user_stop_count + 1);
1424  
1425  #if MACH_ASSERT
1426  	current_task()->suspends_outstanding++;
1427  #endif
1428  
1429  	if (mode == TASK_HOLD_LEGACY) {
1430  		task->legacy_stop_count++;
1431  	}
1432  
1433  	if (task->user_stop_count++ > 0) {
1434  		/*
1435  		 *	If the stop count was positive, the task is
1436  		 *	already stopped and we can exit.
1437  		 */
1438  		return KERN_SUCCESS;
1439  	}
1440  
1441  	/*
1442  	 * Put a kernel-level hold on the threads in the task (all
1443  	 * user-level task suspensions added together represent a
1444  	 * single kernel-level hold).  We then wait for the threads
1445  	 * to stop executing user code.
1446  	 */
1447  	task_hold_locked(task);
1448  	task_wait_locked(task, FALSE);
1449  
1450  	return KERN_SUCCESS;
1451  }
1452  
1453  static kern_return_t
1454  release_task_hold(
1455  	task_t          task,
1456  	int                     mode)
1457  {
1458  	boolean_t release = FALSE;
1459  
1460  	if (!task->active && !task_is_a_corpse(task)) {
1461  		return KERN_FAILURE;
1462  	}
1463  
1464  	/* Return success for corpse task */
1465  	if (task_is_a_corpse(task)) {
1466  		return KERN_SUCCESS;
1467  	}
1468  
1469  	if (mode == TASK_HOLD_PIDSUSPEND) {
1470  		if (task->pidsuspended == FALSE) {
1471  			return KERN_FAILURE;
1472  		}
1473  		task->pidsuspended = FALSE;
1474  	}
1475  
1476  	if (task->user_stop_count > (task->pidsuspended ? 1 : 0)) {
1477  		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
1478  		    MACHDBG_CODE(DBG_MACH_IPC, MACH_TASK_RESUME) | DBG_FUNC_NONE,
1479  		    task_pid(task), ((thread_t)queue_first(&task->threads))->thread_id,
1480  		    task->user_stop_count, mode, task->legacy_stop_count);
1481  
1482  #if MACH_ASSERT
1483  		/*
1484  		 * This is obviously not robust; if we suspend one task and then resume a different one,
1485  		 * we'll fly under the radar. This is only meant to catch the common case of a crashed
1486  		 * or buggy suspender.
1487  		 */
1488  		current_task()->suspends_outstanding--;
1489  #endif
1490  
1491  		if (mode == TASK_HOLD_LEGACY_ALL) {
1492  			if (task->legacy_stop_count >= task->user_stop_count) {
1493  				task->user_stop_count = 0;
1494  				release = TRUE;
1495  			} else {
1496  				task->user_stop_count -= task->legacy_stop_count;
1497  			}
1498  			task->legacy_stop_count = 0;
1499  		} else {
1500  			if (mode == TASK_HOLD_LEGACY && task->legacy_stop_count > 0) {
1501  				task->legacy_stop_count--;
1502  			}
1503  			if (--task->user_stop_count == 0) {
1504  				release = TRUE;
1505  			}
1506  		}
1507  	} else {
1508  		return KERN_FAILURE;
1509  	}
1510  
1511  	/*
1512  	 *	Release the task if necessary.
1513  	 */
1514  	if (release) {
1515  		task_release_locked(task);
1516  	}
1517  
1518  	return KERN_SUCCESS;
1519  }
1520  
1521  /*
1522   *	task_suspend:
1523   *
1524   *	Implement an (old-fashioned) user-level suspension on a task.
1525   *
1526   *	Because the user isn't expecting to have to manage a suspension
1527   *	token, we'll track it for him in the kernel in the form of a naked
1528   *	send right to the task's resume port.  All such send rights
1529   *	account for a single suspension against the task (unlike task_suspend2()
1530   *	where each caller gets a unique suspension count represented by a
1531   *	unique send-once right).
1532   *
1533   * Conditions:
1534   *      The caller holds a reference to the task
1535   */
1536  kern_return_t
1537  task_suspend(
1538  	task_t          task)
1539  {
1540  	kern_return_t                   kr;
1541  	mach_port_t                     port;
1542  	mach_port_name_t                name;
1543  
1544  	if (task == TASK_NULL || task == kernel_task) {
1545  		return KERN_INVALID_ARGUMENT;
1546  	}
1547  
1548  	task_lock(task);
1549  
1550  	/*
1551  	 * place a legacy hold on the task.
1552  	 */
1553  	kr = place_task_hold(task, TASK_HOLD_LEGACY);
1554  	if (kr != KERN_SUCCESS) {
1555  		task_unlock(task);
1556  		return kr;
1557  	}
1558  
1559  	/*
1560  	 * Claim a send right on the task resume port, and request a no-senders
1561  	 * notification on that port (if none outstanding).
1562  	 */
1563  	(void)ipc_kobject_make_send_lazy_alloc_port((ipc_port_t *) &task->itk_resume,
1564  	    (ipc_kobject_t)task, IKOT_TASK_RESUME, IPC_KOBJECT_ALLOC_NONE, true,
1565  	    OS_PTRAUTH_DISCRIMINATOR("task.itk_resume"));
1566  	port = task->itk_resume;
1567  	task_unlock(task);
1568  
1569  	/*
1570  	 * Copyout the send right into the calling task's IPC space.  It won't know it is there,
1571  	 * but we'll look it up when calling a traditional resume.  Any IPC operations that
1572  	 * deallocate the send right will auto-release the suspension.
1573  	 */
1574  	if (IP_VALID(port)) {
1575  		kr = ipc_object_copyout(current_space(), ip_to_object(port),
1576  		    MACH_MSG_TYPE_MOVE_SEND, IPC_OBJECT_COPYOUT_FLAGS_NONE,
1577  		    NULL, NULL, &name);
1578  	} else {
1579  		kr = KERN_SUCCESS;
1580  	}
1581  	if (kr != KERN_SUCCESS) {
1582  #ifndef __DARLING__
1583  		printf("warning: %s(%d) failed to copyout suspension "
1584  		    "token for pid %d with error: %d\n",
1585  		    proc_name_address(current_task()->bsd_info),
1586  		    proc_pid(current_task()->bsd_info),
1587  		    task_pid(task), kr);
1588  #endif // __DARLING__
1589  	}
1590  
1591  	return kr;
1592  }
1593  
1594  /*
1595   *	task_resume:
1596   *		Release a user hold on a task.
1597   *
1598   * Conditions:
1599   *		The caller holds a reference to the task
1600   */
1601  kern_return_t
1602  task_resume(
1603  	task_t  task)
1604  {
1605  	kern_return_t    kr;
1606  	mach_port_name_t resume_port_name;
1607  	ipc_entry_t              resume_port_entry;
1608  	ipc_space_t              space = current_task()->itk_space;
1609  
1610  	if (task == TASK_NULL || task == kernel_task) {
1611  		return KERN_INVALID_ARGUMENT;
1612  	}
1613  
1614  	/* release a legacy task hold */
1615  	task_lock(task);
1616  	kr = release_task_hold(task, TASK_HOLD_LEGACY);
1617  	task_unlock(task);
1618  
1619  	is_write_lock(space);
1620  	if (is_active(space) && IP_VALID(task->itk_resume) &&
1621  	    ipc_hash_lookup(space, ip_to_object(task->itk_resume), &resume_port_name, &resume_port_entry) == TRUE) {
1622  		/*
1623  		 * We found a suspension token in the caller's IPC space. Release a send right to indicate that
1624  		 * we are holding one less legacy hold on the task from this caller.  If the release failed,
1625  		 * go ahead and drop all the rights, as someone either already released our holds or the task
1626  		 * is gone.
1627  		 */
1628  		if (kr == KERN_SUCCESS) {
1629  			ipc_right_dealloc(space, resume_port_name, resume_port_entry);
1630  		} else {
1631  			ipc_right_destroy(space, resume_port_name, resume_port_entry, FALSE, 0);
1632  		}
1633  		/* space unlocked */
1634  	} else {
1635  		is_write_unlock(space);
1636  		if (kr == KERN_SUCCESS) {
1637  #ifndef __DARLING__
1638  			printf("warning: %s(%d) performed out-of-band resume on pid %d\n",
1639  			    proc_name_address(current_task()->bsd_info), proc_pid(current_task()->bsd_info),
1640  			    task_pid(task));
1641  #endif // __DARLING__
1642  		}
1643  	}
1644  
1645  	return kr;
1646  }
1647  
1648  /*
1649   * Suspend the target task.
1650   * Making/holding a token/reference/port is the callers responsibility.
1651   */
1652  kern_return_t
1653  task_suspend_internal(task_t task)
1654  {
1655  	kern_return_t    kr;
1656  
1657  	if (task == TASK_NULL || task == kernel_task) {
1658  		return KERN_INVALID_ARGUMENT;
1659  	}
1660  
1661  	task_lock(task);
1662  	kr = place_task_hold(task, TASK_HOLD_NORMAL);
1663  	task_unlock(task);
1664  	return kr;
1665  }
1666  
1667  /*
1668   * Suspend the target task, and return a suspension token. The token
1669   * represents a reference on the suspended task.
1670   */
1671  kern_return_t
1672  task_suspend2(
1673  	task_t                  task,
1674  	task_suspension_token_t *suspend_token)
1675  {
1676  	kern_return_t    kr;
1677  
1678  	kr = task_suspend_internal(task);
1679  	if (kr != KERN_SUCCESS) {
1680  		*suspend_token = TASK_NULL;
1681  		return kr;
1682  	}
1683  
1684  	/*
1685  	 * Take a reference on the target task and return that to the caller
1686  	 * as a "suspension token," which can be converted into an SO right to
1687  	 * the now-suspended task's resume port.
1688  	 */
1689  	task_reference_internal(task);
1690  	*suspend_token = task;
1691  
1692  	return KERN_SUCCESS;
1693  }
1694  
1695  /*
1696   * Resume the task
1697   * (reference/token/port management is caller's responsibility).
1698   */
1699  kern_return_t
1700  task_resume_internal(
1701  	task_suspension_token_t         task)
1702  {
1703  	kern_return_t kr;
1704  
1705  	if (task == TASK_NULL || task == kernel_task) {
1706  		return KERN_INVALID_ARGUMENT;
1707  	}
1708  
1709  	task_lock(task);
1710  	kr = release_task_hold(task, TASK_HOLD_NORMAL);
1711  	task_unlock(task);
1712  	return kr;
1713  }
1714  
1715  /*
1716   * Resume the task using a suspension token. Consumes the token's ref.
1717   */
1718  kern_return_t
1719  task_resume2(
1720  	task_suspension_token_t         task)
1721  {
1722  	kern_return_t kr;
1723  
1724  	kr = task_resume_internal(task);
1725  	task_suspension_token_deallocate(task);
1726  
1727  	return kr;
1728  }
1729  
1730  /*
1731   *	task_read_deallocate:
1732   *
1733   *	Drop a reference on task read port.
1734   */
1735  void
1736  task_read_deallocate(
1737  	task_read_t          task_read)
1738  {
1739  	return task_deallocate((task_t)task_read);
1740  }
1741  
1742  // </copied>
1743  
1744  // <copied from="xnu://7195.141.2/osfmk/kern/zalloc.c">
1745  
1746  kern_return_t
1747  task_zone_info(
1748  	__unused task_t                                 task,
1749  	__unused mach_zone_name_array_t *namesp,
1750  	__unused mach_msg_type_number_t *namesCntp,
1751  	__unused task_zone_info_array_t *infop,
1752  	__unused mach_msg_type_number_t *infoCntp)
1753  {
1754  	return KERN_FAILURE;
1755  }
1756  
1757  // </copied>